repo_name string | path string | copies string | size string | content string | license string |
|---|---|---|---|---|---|
tifler/linux-mainline | net/ipv6/netfilter/ip6table_mangle.c | 611 | 4119 | /*
* IPv6 packet mangling table, a port of the IPv4 mangle table to IPv6
*
* Copyright (C) 2000-2001 by Harald Welte <laforge@gnumonks.org>
* Copyright (C) 2000-2004 Netfilter Core Team <coreteam@netfilter.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/netfilter_ipv6/ip6_tables.h>
#include <linux/slab.h>
#include <net/ipv6.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
MODULE_DESCRIPTION("ip6tables mangle table");
#define MANGLE_VALID_HOOKS ((1 << NF_INET_PRE_ROUTING) | \
(1 << NF_INET_LOCAL_IN) | \
(1 << NF_INET_FORWARD) | \
(1 << NF_INET_LOCAL_OUT) | \
(1 << NF_INET_POST_ROUTING))
static const struct xt_table packet_mangler = {
.name = "mangle",
.valid_hooks = MANGLE_VALID_HOOKS,
.me = THIS_MODULE,
.af = NFPROTO_IPV6,
.priority = NF_IP6_PRI_MANGLE,
};
static unsigned int
ip6t_mangle_out(struct sk_buff *skb, const struct nf_hook_state *state)
{
unsigned int ret;
struct in6_addr saddr, daddr;
u_int8_t hop_limit;
u_int32_t flowlabel, mark;
int err;
#if 0
/* root is playing with raw sockets. */
if (skb->len < sizeof(struct iphdr) ||
ip_hdrlen(skb) < sizeof(struct iphdr)) {
net_warn_ratelimited("ip6t_hook: happy cracking\n");
return NF_ACCEPT;
}
#endif
/* save source/dest address, mark, hoplimit, flowlabel, priority, */
memcpy(&saddr, &ipv6_hdr(skb)->saddr, sizeof(saddr));
memcpy(&daddr, &ipv6_hdr(skb)->daddr, sizeof(daddr));
mark = skb->mark;
hop_limit = ipv6_hdr(skb)->hop_limit;
/* flowlabel and prio (includes version, which shouldn't change either */
flowlabel = *((u_int32_t *)ipv6_hdr(skb));
ret = ip6t_do_table(skb, NF_INET_LOCAL_OUT, state,
dev_net(state->out)->ipv6.ip6table_mangle);
if (ret != NF_DROP && ret != NF_STOLEN &&
(!ipv6_addr_equal(&ipv6_hdr(skb)->saddr, &saddr) ||
!ipv6_addr_equal(&ipv6_hdr(skb)->daddr, &daddr) ||
skb->mark != mark ||
ipv6_hdr(skb)->hop_limit != hop_limit ||
flowlabel != *((u_int32_t *)ipv6_hdr(skb)))) {
err = ip6_route_me_harder(skb);
if (err < 0)
ret = NF_DROP_ERR(err);
}
return ret;
}
/* The work comes in here from netfilter.c. */
static unsigned int
ip6table_mangle_hook(const struct nf_hook_ops *ops, struct sk_buff *skb,
const struct nf_hook_state *state)
{
if (ops->hooknum == NF_INET_LOCAL_OUT)
return ip6t_mangle_out(skb, state);
if (ops->hooknum == NF_INET_POST_ROUTING)
return ip6t_do_table(skb, ops->hooknum, state,
dev_net(state->out)->ipv6.ip6table_mangle);
/* INPUT/FORWARD */
return ip6t_do_table(skb, ops->hooknum, state,
dev_net(state->in)->ipv6.ip6table_mangle);
}
static struct nf_hook_ops *mangle_ops __read_mostly;
static int __net_init ip6table_mangle_net_init(struct net *net)
{
struct ip6t_replace *repl;
repl = ip6t_alloc_initial_table(&packet_mangler);
if (repl == NULL)
return -ENOMEM;
net->ipv6.ip6table_mangle =
ip6t_register_table(net, &packet_mangler, repl);
kfree(repl);
return PTR_ERR_OR_ZERO(net->ipv6.ip6table_mangle);
}
static void __net_exit ip6table_mangle_net_exit(struct net *net)
{
ip6t_unregister_table(net, net->ipv6.ip6table_mangle);
}
static struct pernet_operations ip6table_mangle_net_ops = {
.init = ip6table_mangle_net_init,
.exit = ip6table_mangle_net_exit,
};
static int __init ip6table_mangle_init(void)
{
int ret;
ret = register_pernet_subsys(&ip6table_mangle_net_ops);
if (ret < 0)
return ret;
/* Register hooks */
mangle_ops = xt_hook_link(&packet_mangler, ip6table_mangle_hook);
if (IS_ERR(mangle_ops)) {
ret = PTR_ERR(mangle_ops);
goto cleanup_table;
}
return ret;
cleanup_table:
unregister_pernet_subsys(&ip6table_mangle_net_ops);
return ret;
}
static void __exit ip6table_mangle_fini(void)
{
xt_hook_unlink(&packet_mangler, mangle_ops);
unregister_pernet_subsys(&ip6table_mangle_net_ops);
}
module_init(ip6table_mangle_init);
module_exit(ip6table_mangle_fini);
| gpl-2.0 |
arkusuma/mediapad_kernel_ics | sound/soc/msm/msm8x60-dai.c | 867 | 3844 | /* sound/soc/msm/msm-dai.c
*
* Copyright (C) 2008 Google, Inc.
* Copyright (C) 2008 HTC Corporation
* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
*
* Derived from msm-pcm.c and msm7201.c.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*
* See the GNU General Public License for more details.
* You should have received a copy of the GNU General Public License
* along with this program; if not, you can find it at http://www.fsf.org.
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/delay.h>
#include <linux/clk.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/initval.h>
#include <sound/soc.h>
#include "msm8x60-pcm.h"
static struct snd_soc_dai_driver msm_pcm_codec_dais[] = {
{
.name = "msm-codec-dai",
.playback = {
.channels_max = 2,
.rates = SNDRV_PCM_RATE_8000_48000,
.rate_min = 8000,
.rate_max = 48000,
.formats = SNDRV_PCM_FMTBIT_S16_LE,
},
.capture = {
.channels_max = 2,
.rate_min = 8000,
.rates = SNDRV_PCM_RATE_8000_48000,
.formats = SNDRV_PCM_FMTBIT_S16_LE,
},
},
};
static struct snd_soc_dai_driver msm_pcm_cpu_dais[] = {
{
.name = "msm-cpu-dai",
.playback = {
.channels_min = 1,
.channels_max = 2,
.rates = SNDRV_PCM_RATE_8000_48000,
.rate_min = 8000,
.rate_max = 48000,
.formats = SNDRV_PCM_FMTBIT_S16_LE,
},
.capture = {
.channels_min = 1,
.channels_max = 2,
.rate_min = 8000,
.rates = SNDRV_PCM_RATE_8000_48000,
.formats = SNDRV_PCM_FMTBIT_S16_LE,
},
},
};
static struct snd_soc_codec_driver soc_codec_dev_msm = {
.compress_type = SND_SOC_FLAT_COMPRESSION,
};
static __devinit int asoc_msm_codec_probe(struct platform_device *pdev)
{
dev_info(&pdev->dev, "%s: dev name %s\n", __func__, dev_name(&pdev->dev));
return snd_soc_register_codec(&pdev->dev, &soc_codec_dev_msm,
msm_pcm_codec_dais, ARRAY_SIZE(msm_pcm_codec_dais));
}
static int __devexit asoc_msm_codec_remove(struct platform_device *pdev)
{
snd_soc_unregister_dai(&pdev->dev);
return 0;
}
static __devinit int asoc_msm_cpu_probe(struct platform_device *pdev)
{
dev_info(&pdev->dev, "%s: dev name %s\n", __func__, dev_name(&pdev->dev));
return snd_soc_register_dai(&pdev->dev, msm_pcm_cpu_dais);
}
static int __devexit asoc_msm_cpu_remove(struct platform_device *pdev)
{
snd_soc_unregister_dai(&pdev->dev);
return 0;
}
static struct platform_driver asoc_msm_codec_driver = {
.probe = asoc_msm_codec_probe,
.remove = __devexit_p(asoc_msm_codec_remove),
.driver = {
.name = "msm-codec-dai",
.owner = THIS_MODULE,
},
};
static struct platform_driver asoc_msm_cpu_driver = {
.probe = asoc_msm_cpu_probe,
.remove = __devexit_p(asoc_msm_cpu_remove),
.driver = {
.name = "msm-cpu-dai",
.owner = THIS_MODULE,
},
};
static int __init msm_codec_dai_init(void)
{
return platform_driver_register(&asoc_msm_codec_driver);
}
static void __exit msm_codec_dai_exit(void)
{
platform_driver_unregister(&asoc_msm_codec_driver);
}
static int __init msm_cpu_dai_init(void)
{
return platform_driver_register(&asoc_msm_cpu_driver);
}
static void __exit msm_cpu_dai_exit(void)
{
platform_driver_unregister(&asoc_msm_cpu_driver);
}
module_init(msm_codec_dai_init);
module_exit(msm_codec_dai_exit);
module_init(msm_cpu_dai_init);
module_exit(msm_cpu_dai_exit);
/* Module information */
MODULE_DESCRIPTION("MSM Codec/Cpu Dai driver");
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
koolkhel/linux | tools/perf/util/stat.c | 1379 | 1075 | #include <math.h>
#include "stat.h"
void update_stats(struct stats *stats, u64 val)
{
double delta;
stats->n++;
delta = val - stats->mean;
stats->mean += delta / stats->n;
stats->M2 += delta*(val - stats->mean);
if (val > stats->max)
stats->max = val;
if (val < stats->min)
stats->min = val;
}
double avg_stats(struct stats *stats)
{
return stats->mean;
}
/*
* http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
*
* (\Sum n_i^2) - ((\Sum n_i)^2)/n
* s^2 = -------------------------------
* n - 1
*
* http://en.wikipedia.org/wiki/Stddev
*
* The std dev of the mean is related to the std dev by:
*
* s
* s_mean = -------
* sqrt(n)
*
*/
double stddev_stats(struct stats *stats)
{
double variance, variance_mean;
if (stats->n < 2)
return 0.0;
variance = stats->M2 / (stats->n - 1);
variance_mean = variance / stats->n;
return sqrt(variance_mean);
}
double rel_stddev_stats(double stddev, double avg)
{
double pct = 0.0;
if (avg)
pct = 100.0 * stddev/avg;
return pct;
}
| gpl-2.0 |
mstfkaratas/kernel_htc_msm7227 | arch/sh/boards/mach-edosk7705/io.c | 1635 | 1812 | /*
* arch/sh/boards/renesas/edosk7705/io.c
*
* Copyright (C) 2001 Ian da Silva, Jeremy Siegel
* Based largely on io_se.c.
*
* I/O routines for Hitachi EDOSK7705 board.
*
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/io.h>
#include <mach/edosk7705.h>
#include <asm/addrspace.h>
#define SMC_IOADDR 0xA2000000
/* Map the Ethernet addresses as if it is at 0x300 - 0x320 */
static unsigned long sh_edosk7705_isa_port2addr(unsigned long port)
{
/*
* SMC91C96 registers are 4 byte aligned rather than the
* usual 2 byte!
*/
if (port >= 0x300 && port < 0x320)
return SMC_IOADDR + ((port - 0x300) * 2);
maybebadio(port);
return port;
}
/* Trying to read / write bytes on odd-byte boundaries to the Ethernet
* registers causes problems. So we bit-shift the value and read / write
* in 2 byte chunks. Setting the low byte to 0 does not cause problems
* now as odd byte writes are only made on the bit mask / interrupt
* register. This may not be the case in future Mar-2003 SJD
*/
unsigned char sh_edosk7705_inb(unsigned long port)
{
if (port >= 0x300 && port < 0x320 && port & 0x01)
return __raw_readw(port - 1) >> 8;
return __raw_readb(sh_edosk7705_isa_port2addr(port));
}
void sh_edosk7705_outb(unsigned char value, unsigned long port)
{
if (port >= 0x300 && port < 0x320 && port & 0x01) {
__raw_writew(((unsigned short)value << 8), port - 1);
return;
}
__raw_writeb(value, sh_edosk7705_isa_port2addr(port));
}
void sh_edosk7705_insb(unsigned long port, void *addr, unsigned long count)
{
unsigned char *p = addr;
while (count--)
*p++ = sh_edosk7705_inb(port);
}
void sh_edosk7705_outsb(unsigned long port, const void *addr, unsigned long count)
{
unsigned char *p = (unsigned char *)addr;
while (count--)
sh_edosk7705_outb(*p++, port);
}
| gpl-2.0 |
bemolxd/android_kernel_x2xtreme-test | fs/dlm/config.c | 2147 | 27151 | /******************************************************************************
*******************************************************************************
**
** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
** Copyright (C) 2004-2011 Red Hat, Inc. All rights reserved.
**
** This copyrighted material is made available to anyone wishing to use,
** modify, copy, or redistribute it subject to the terms and conditions
** of the GNU General Public License v.2.
**
*******************************************************************************
******************************************************************************/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/configfs.h>
#include <linux/slab.h>
#include <linux/in.h>
#include <linux/in6.h>
#include <linux/dlmconstants.h>
#include <net/ipv6.h>
#include <net/sock.h>
#include "config.h"
#include "lowcomms.h"
/*
* /config/dlm/<cluster>/spaces/<space>/nodes/<node>/nodeid
* /config/dlm/<cluster>/spaces/<space>/nodes/<node>/weight
* /config/dlm/<cluster>/comms/<comm>/nodeid
* /config/dlm/<cluster>/comms/<comm>/local
* /config/dlm/<cluster>/comms/<comm>/addr (write only)
* /config/dlm/<cluster>/comms/<comm>/addr_list (read only)
* The <cluster> level is useless, but I haven't figured out how to avoid it.
*/
static struct config_group *space_list;
static struct config_group *comm_list;
static struct dlm_comm *local_comm;
static uint32_t dlm_comm_count;
struct dlm_clusters;
struct dlm_cluster;
struct dlm_spaces;
struct dlm_space;
struct dlm_comms;
struct dlm_comm;
struct dlm_nodes;
struct dlm_node;
static struct config_group *make_cluster(struct config_group *, const char *);
static void drop_cluster(struct config_group *, struct config_item *);
static void release_cluster(struct config_item *);
static struct config_group *make_space(struct config_group *, const char *);
static void drop_space(struct config_group *, struct config_item *);
static void release_space(struct config_item *);
static struct config_item *make_comm(struct config_group *, const char *);
static void drop_comm(struct config_group *, struct config_item *);
static void release_comm(struct config_item *);
static struct config_item *make_node(struct config_group *, const char *);
static void drop_node(struct config_group *, struct config_item *);
static void release_node(struct config_item *);
static ssize_t show_cluster(struct config_item *i, struct configfs_attribute *a,
char *buf);
static ssize_t store_cluster(struct config_item *i,
struct configfs_attribute *a,
const char *buf, size_t len);
static ssize_t show_comm(struct config_item *i, struct configfs_attribute *a,
char *buf);
static ssize_t store_comm(struct config_item *i, struct configfs_attribute *a,
const char *buf, size_t len);
static ssize_t show_node(struct config_item *i, struct configfs_attribute *a,
char *buf);
static ssize_t store_node(struct config_item *i, struct configfs_attribute *a,
const char *buf, size_t len);
static ssize_t comm_nodeid_read(struct dlm_comm *cm, char *buf);
static ssize_t comm_nodeid_write(struct dlm_comm *cm, const char *buf,
size_t len);
static ssize_t comm_local_read(struct dlm_comm *cm, char *buf);
static ssize_t comm_local_write(struct dlm_comm *cm, const char *buf,
size_t len);
static ssize_t comm_addr_write(struct dlm_comm *cm, const char *buf,
size_t len);
static ssize_t comm_addr_list_read(struct dlm_comm *cm, char *buf);
static ssize_t node_nodeid_read(struct dlm_node *nd, char *buf);
static ssize_t node_nodeid_write(struct dlm_node *nd, const char *buf,
size_t len);
static ssize_t node_weight_read(struct dlm_node *nd, char *buf);
static ssize_t node_weight_write(struct dlm_node *nd, const char *buf,
size_t len);
struct dlm_cluster {
struct config_group group;
unsigned int cl_tcp_port;
unsigned int cl_buffer_size;
unsigned int cl_rsbtbl_size;
unsigned int cl_recover_timer;
unsigned int cl_toss_secs;
unsigned int cl_scan_secs;
unsigned int cl_log_debug;
unsigned int cl_protocol;
unsigned int cl_timewarn_cs;
unsigned int cl_waitwarn_us;
unsigned int cl_new_rsb_count;
unsigned int cl_recover_callbacks;
char cl_cluster_name[DLM_LOCKSPACE_LEN];
};
enum {
CLUSTER_ATTR_TCP_PORT = 0,
CLUSTER_ATTR_BUFFER_SIZE,
CLUSTER_ATTR_RSBTBL_SIZE,
CLUSTER_ATTR_RECOVER_TIMER,
CLUSTER_ATTR_TOSS_SECS,
CLUSTER_ATTR_SCAN_SECS,
CLUSTER_ATTR_LOG_DEBUG,
CLUSTER_ATTR_PROTOCOL,
CLUSTER_ATTR_TIMEWARN_CS,
CLUSTER_ATTR_WAITWARN_US,
CLUSTER_ATTR_NEW_RSB_COUNT,
CLUSTER_ATTR_RECOVER_CALLBACKS,
CLUSTER_ATTR_CLUSTER_NAME,
};
struct cluster_attribute {
struct configfs_attribute attr;
ssize_t (*show)(struct dlm_cluster *, char *);
ssize_t (*store)(struct dlm_cluster *, const char *, size_t);
};
static ssize_t cluster_cluster_name_read(struct dlm_cluster *cl, char *buf)
{
return sprintf(buf, "%s\n", cl->cl_cluster_name);
}
static ssize_t cluster_cluster_name_write(struct dlm_cluster *cl,
const char *buf, size_t len)
{
strncpy(dlm_config.ci_cluster_name, buf, DLM_LOCKSPACE_LEN);
strncpy(cl->cl_cluster_name, buf, DLM_LOCKSPACE_LEN);
return len;
}
static struct cluster_attribute cluster_attr_cluster_name = {
.attr = { .ca_owner = THIS_MODULE,
.ca_name = "cluster_name",
.ca_mode = S_IRUGO | S_IWUSR },
.show = cluster_cluster_name_read,
.store = cluster_cluster_name_write,
};
static ssize_t cluster_set(struct dlm_cluster *cl, unsigned int *cl_field,
int *info_field, int check_zero,
const char *buf, size_t len)
{
unsigned int x;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
x = simple_strtoul(buf, NULL, 0);
if (check_zero && !x)
return -EINVAL;
*cl_field = x;
*info_field = x;
return len;
}
#define CLUSTER_ATTR(name, check_zero) \
static ssize_t name##_write(struct dlm_cluster *cl, const char *buf, size_t len) \
{ \
return cluster_set(cl, &cl->cl_##name, &dlm_config.ci_##name, \
check_zero, buf, len); \
} \
static ssize_t name##_read(struct dlm_cluster *cl, char *buf) \
{ \
return snprintf(buf, PAGE_SIZE, "%u\n", cl->cl_##name); \
} \
static struct cluster_attribute cluster_attr_##name = \
__CONFIGFS_ATTR(name, 0644, name##_read, name##_write)
CLUSTER_ATTR(tcp_port, 1);
CLUSTER_ATTR(buffer_size, 1);
CLUSTER_ATTR(rsbtbl_size, 1);
CLUSTER_ATTR(recover_timer, 1);
CLUSTER_ATTR(toss_secs, 1);
CLUSTER_ATTR(scan_secs, 1);
CLUSTER_ATTR(log_debug, 0);
CLUSTER_ATTR(protocol, 0);
CLUSTER_ATTR(timewarn_cs, 1);
CLUSTER_ATTR(waitwarn_us, 0);
CLUSTER_ATTR(new_rsb_count, 0);
CLUSTER_ATTR(recover_callbacks, 0);
static struct configfs_attribute *cluster_attrs[] = {
[CLUSTER_ATTR_TCP_PORT] = &cluster_attr_tcp_port.attr,
[CLUSTER_ATTR_BUFFER_SIZE] = &cluster_attr_buffer_size.attr,
[CLUSTER_ATTR_RSBTBL_SIZE] = &cluster_attr_rsbtbl_size.attr,
[CLUSTER_ATTR_RECOVER_TIMER] = &cluster_attr_recover_timer.attr,
[CLUSTER_ATTR_TOSS_SECS] = &cluster_attr_toss_secs.attr,
[CLUSTER_ATTR_SCAN_SECS] = &cluster_attr_scan_secs.attr,
[CLUSTER_ATTR_LOG_DEBUG] = &cluster_attr_log_debug.attr,
[CLUSTER_ATTR_PROTOCOL] = &cluster_attr_protocol.attr,
[CLUSTER_ATTR_TIMEWARN_CS] = &cluster_attr_timewarn_cs.attr,
[CLUSTER_ATTR_WAITWARN_US] = &cluster_attr_waitwarn_us.attr,
[CLUSTER_ATTR_NEW_RSB_COUNT] = &cluster_attr_new_rsb_count.attr,
[CLUSTER_ATTR_RECOVER_CALLBACKS] = &cluster_attr_recover_callbacks.attr,
[CLUSTER_ATTR_CLUSTER_NAME] = &cluster_attr_cluster_name.attr,
NULL,
};
enum {
COMM_ATTR_NODEID = 0,
COMM_ATTR_LOCAL,
COMM_ATTR_ADDR,
COMM_ATTR_ADDR_LIST,
};
struct comm_attribute {
struct configfs_attribute attr;
ssize_t (*show)(struct dlm_comm *, char *);
ssize_t (*store)(struct dlm_comm *, const char *, size_t);
};
static struct comm_attribute comm_attr_nodeid = {
.attr = { .ca_owner = THIS_MODULE,
.ca_name = "nodeid",
.ca_mode = S_IRUGO | S_IWUSR },
.show = comm_nodeid_read,
.store = comm_nodeid_write,
};
static struct comm_attribute comm_attr_local = {
.attr = { .ca_owner = THIS_MODULE,
.ca_name = "local",
.ca_mode = S_IRUGO | S_IWUSR },
.show = comm_local_read,
.store = comm_local_write,
};
static struct comm_attribute comm_attr_addr = {
.attr = { .ca_owner = THIS_MODULE,
.ca_name = "addr",
.ca_mode = S_IWUSR },
.store = comm_addr_write,
};
static struct comm_attribute comm_attr_addr_list = {
.attr = { .ca_owner = THIS_MODULE,
.ca_name = "addr_list",
.ca_mode = S_IRUGO },
.show = comm_addr_list_read,
};
static struct configfs_attribute *comm_attrs[] = {
[COMM_ATTR_NODEID] = &comm_attr_nodeid.attr,
[COMM_ATTR_LOCAL] = &comm_attr_local.attr,
[COMM_ATTR_ADDR] = &comm_attr_addr.attr,
[COMM_ATTR_ADDR_LIST] = &comm_attr_addr_list.attr,
NULL,
};
enum {
NODE_ATTR_NODEID = 0,
NODE_ATTR_WEIGHT,
};
struct node_attribute {
struct configfs_attribute attr;
ssize_t (*show)(struct dlm_node *, char *);
ssize_t (*store)(struct dlm_node *, const char *, size_t);
};
static struct node_attribute node_attr_nodeid = {
.attr = { .ca_owner = THIS_MODULE,
.ca_name = "nodeid",
.ca_mode = S_IRUGO | S_IWUSR },
.show = node_nodeid_read,
.store = node_nodeid_write,
};
static struct node_attribute node_attr_weight = {
.attr = { .ca_owner = THIS_MODULE,
.ca_name = "weight",
.ca_mode = S_IRUGO | S_IWUSR },
.show = node_weight_read,
.store = node_weight_write,
};
static struct configfs_attribute *node_attrs[] = {
[NODE_ATTR_NODEID] = &node_attr_nodeid.attr,
[NODE_ATTR_WEIGHT] = &node_attr_weight.attr,
NULL,
};
struct dlm_clusters {
struct configfs_subsystem subsys;
};
struct dlm_spaces {
struct config_group ss_group;
};
struct dlm_space {
struct config_group group;
struct list_head members;
struct mutex members_lock;
int members_count;
};
struct dlm_comms {
struct config_group cs_group;
};
struct dlm_comm {
struct config_item item;
int seq;
int nodeid;
int local;
int addr_count;
struct sockaddr_storage *addr[DLM_MAX_ADDR_COUNT];
};
struct dlm_nodes {
struct config_group ns_group;
};
struct dlm_node {
struct config_item item;
struct list_head list; /* space->members */
int nodeid;
int weight;
int new;
int comm_seq; /* copy of cm->seq when nd->nodeid is set */
};
static struct configfs_group_operations clusters_ops = {
.make_group = make_cluster,
.drop_item = drop_cluster,
};
static struct configfs_item_operations cluster_ops = {
.release = release_cluster,
.show_attribute = show_cluster,
.store_attribute = store_cluster,
};
static struct configfs_group_operations spaces_ops = {
.make_group = make_space,
.drop_item = drop_space,
};
static struct configfs_item_operations space_ops = {
.release = release_space,
};
static struct configfs_group_operations comms_ops = {
.make_item = make_comm,
.drop_item = drop_comm,
};
static struct configfs_item_operations comm_ops = {
.release = release_comm,
.show_attribute = show_comm,
.store_attribute = store_comm,
};
static struct configfs_group_operations nodes_ops = {
.make_item = make_node,
.drop_item = drop_node,
};
static struct configfs_item_operations node_ops = {
.release = release_node,
.show_attribute = show_node,
.store_attribute = store_node,
};
static struct config_item_type clusters_type = {
.ct_group_ops = &clusters_ops,
.ct_owner = THIS_MODULE,
};
static struct config_item_type cluster_type = {
.ct_item_ops = &cluster_ops,
.ct_attrs = cluster_attrs,
.ct_owner = THIS_MODULE,
};
static struct config_item_type spaces_type = {
.ct_group_ops = &spaces_ops,
.ct_owner = THIS_MODULE,
};
static struct config_item_type space_type = {
.ct_item_ops = &space_ops,
.ct_owner = THIS_MODULE,
};
static struct config_item_type comms_type = {
.ct_group_ops = &comms_ops,
.ct_owner = THIS_MODULE,
};
static struct config_item_type comm_type = {
.ct_item_ops = &comm_ops,
.ct_attrs = comm_attrs,
.ct_owner = THIS_MODULE,
};
static struct config_item_type nodes_type = {
.ct_group_ops = &nodes_ops,
.ct_owner = THIS_MODULE,
};
static struct config_item_type node_type = {
.ct_item_ops = &node_ops,
.ct_attrs = node_attrs,
.ct_owner = THIS_MODULE,
};
static struct dlm_cluster *config_item_to_cluster(struct config_item *i)
{
return i ? container_of(to_config_group(i), struct dlm_cluster, group) :
NULL;
}
static struct dlm_space *config_item_to_space(struct config_item *i)
{
return i ? container_of(to_config_group(i), struct dlm_space, group) :
NULL;
}
static struct dlm_comm *config_item_to_comm(struct config_item *i)
{
return i ? container_of(i, struct dlm_comm, item) : NULL;
}
static struct dlm_node *config_item_to_node(struct config_item *i)
{
return i ? container_of(i, struct dlm_node, item) : NULL;
}
static struct config_group *make_cluster(struct config_group *g,
const char *name)
{
struct dlm_cluster *cl = NULL;
struct dlm_spaces *sps = NULL;
struct dlm_comms *cms = NULL;
void *gps = NULL;
cl = kzalloc(sizeof(struct dlm_cluster), GFP_NOFS);
gps = kcalloc(3, sizeof(struct config_group *), GFP_NOFS);
sps = kzalloc(sizeof(struct dlm_spaces), GFP_NOFS);
cms = kzalloc(sizeof(struct dlm_comms), GFP_NOFS);
if (!cl || !gps || !sps || !cms)
goto fail;
config_group_init_type_name(&cl->group, name, &cluster_type);
config_group_init_type_name(&sps->ss_group, "spaces", &spaces_type);
config_group_init_type_name(&cms->cs_group, "comms", &comms_type);
cl->group.default_groups = gps;
cl->group.default_groups[0] = &sps->ss_group;
cl->group.default_groups[1] = &cms->cs_group;
cl->group.default_groups[2] = NULL;
cl->cl_tcp_port = dlm_config.ci_tcp_port;
cl->cl_buffer_size = dlm_config.ci_buffer_size;
cl->cl_rsbtbl_size = dlm_config.ci_rsbtbl_size;
cl->cl_recover_timer = dlm_config.ci_recover_timer;
cl->cl_toss_secs = dlm_config.ci_toss_secs;
cl->cl_scan_secs = dlm_config.ci_scan_secs;
cl->cl_log_debug = dlm_config.ci_log_debug;
cl->cl_protocol = dlm_config.ci_protocol;
cl->cl_timewarn_cs = dlm_config.ci_timewarn_cs;
cl->cl_waitwarn_us = dlm_config.ci_waitwarn_us;
cl->cl_new_rsb_count = dlm_config.ci_new_rsb_count;
cl->cl_recover_callbacks = dlm_config.ci_recover_callbacks;
memcpy(cl->cl_cluster_name, dlm_config.ci_cluster_name,
DLM_LOCKSPACE_LEN);
space_list = &sps->ss_group;
comm_list = &cms->cs_group;
return &cl->group;
fail:
kfree(cl);
kfree(gps);
kfree(sps);
kfree(cms);
return ERR_PTR(-ENOMEM);
}
static void drop_cluster(struct config_group *g, struct config_item *i)
{
struct dlm_cluster *cl = config_item_to_cluster(i);
struct config_item *tmp;
int j;
for (j = 0; cl->group.default_groups[j]; j++) {
tmp = &cl->group.default_groups[j]->cg_item;
cl->group.default_groups[j] = NULL;
config_item_put(tmp);
}
space_list = NULL;
comm_list = NULL;
config_item_put(i);
}
static void release_cluster(struct config_item *i)
{
struct dlm_cluster *cl = config_item_to_cluster(i);
kfree(cl->group.default_groups);
kfree(cl);
}
static struct config_group *make_space(struct config_group *g, const char *name)
{
struct dlm_space *sp = NULL;
struct dlm_nodes *nds = NULL;
void *gps = NULL;
sp = kzalloc(sizeof(struct dlm_space), GFP_NOFS);
gps = kcalloc(2, sizeof(struct config_group *), GFP_NOFS);
nds = kzalloc(sizeof(struct dlm_nodes), GFP_NOFS);
if (!sp || !gps || !nds)
goto fail;
config_group_init_type_name(&sp->group, name, &space_type);
config_group_init_type_name(&nds->ns_group, "nodes", &nodes_type);
sp->group.default_groups = gps;
sp->group.default_groups[0] = &nds->ns_group;
sp->group.default_groups[1] = NULL;
INIT_LIST_HEAD(&sp->members);
mutex_init(&sp->members_lock);
sp->members_count = 0;
return &sp->group;
fail:
kfree(sp);
kfree(gps);
kfree(nds);
return ERR_PTR(-ENOMEM);
}
static void drop_space(struct config_group *g, struct config_item *i)
{
struct dlm_space *sp = config_item_to_space(i);
struct config_item *tmp;
int j;
/* assert list_empty(&sp->members) */
for (j = 0; sp->group.default_groups[j]; j++) {
tmp = &sp->group.default_groups[j]->cg_item;
sp->group.default_groups[j] = NULL;
config_item_put(tmp);
}
config_item_put(i);
}
static void release_space(struct config_item *i)
{
struct dlm_space *sp = config_item_to_space(i);
kfree(sp->group.default_groups);
kfree(sp);
}
static struct config_item *make_comm(struct config_group *g, const char *name)
{
struct dlm_comm *cm;
cm = kzalloc(sizeof(struct dlm_comm), GFP_NOFS);
if (!cm)
return ERR_PTR(-ENOMEM);
config_item_init_type_name(&cm->item, name, &comm_type);
cm->seq = dlm_comm_count++;
if (!cm->seq)
cm->seq = dlm_comm_count++;
cm->nodeid = -1;
cm->local = 0;
cm->addr_count = 0;
return &cm->item;
}
static void drop_comm(struct config_group *g, struct config_item *i)
{
struct dlm_comm *cm = config_item_to_comm(i);
if (local_comm == cm)
local_comm = NULL;
dlm_lowcomms_close(cm->nodeid);
while (cm->addr_count--)
kfree(cm->addr[cm->addr_count]);
config_item_put(i);
}
static void release_comm(struct config_item *i)
{
struct dlm_comm *cm = config_item_to_comm(i);
kfree(cm);
}
static struct config_item *make_node(struct config_group *g, const char *name)
{
struct dlm_space *sp = config_item_to_space(g->cg_item.ci_parent);
struct dlm_node *nd;
nd = kzalloc(sizeof(struct dlm_node), GFP_NOFS);
if (!nd)
return ERR_PTR(-ENOMEM);
config_item_init_type_name(&nd->item, name, &node_type);
nd->nodeid = -1;
nd->weight = 1; /* default weight of 1 if none is set */
nd->new = 1; /* set to 0 once it's been read by dlm_nodeid_list() */
mutex_lock(&sp->members_lock);
list_add(&nd->list, &sp->members);
sp->members_count++;
mutex_unlock(&sp->members_lock);
return &nd->item;
}
static void drop_node(struct config_group *g, struct config_item *i)
{
struct dlm_space *sp = config_item_to_space(g->cg_item.ci_parent);
struct dlm_node *nd = config_item_to_node(i);
mutex_lock(&sp->members_lock);
list_del(&nd->list);
sp->members_count--;
mutex_unlock(&sp->members_lock);
config_item_put(i);
}
static void release_node(struct config_item *i)
{
struct dlm_node *nd = config_item_to_node(i);
kfree(nd);
}
static struct dlm_clusters clusters_root = {
.subsys = {
.su_group = {
.cg_item = {
.ci_namebuf = "dlm",
.ci_type = &clusters_type,
},
},
},
};
int __init dlm_config_init(void)
{
config_group_init(&clusters_root.subsys.su_group);
mutex_init(&clusters_root.subsys.su_mutex);
return configfs_register_subsystem(&clusters_root.subsys);
}
void dlm_config_exit(void)
{
configfs_unregister_subsystem(&clusters_root.subsys);
}
/*
* Functions for user space to read/write attributes
*/
static ssize_t show_cluster(struct config_item *i, struct configfs_attribute *a,
char *buf)
{
struct dlm_cluster *cl = config_item_to_cluster(i);
struct cluster_attribute *cla =
container_of(a, struct cluster_attribute, attr);
return cla->show ? cla->show(cl, buf) : 0;
}
static ssize_t store_cluster(struct config_item *i,
struct configfs_attribute *a,
const char *buf, size_t len)
{
struct dlm_cluster *cl = config_item_to_cluster(i);
struct cluster_attribute *cla =
container_of(a, struct cluster_attribute, attr);
return cla->store ? cla->store(cl, buf, len) : -EINVAL;
}
static ssize_t show_comm(struct config_item *i, struct configfs_attribute *a,
char *buf)
{
struct dlm_comm *cm = config_item_to_comm(i);
struct comm_attribute *cma =
container_of(a, struct comm_attribute, attr);
return cma->show ? cma->show(cm, buf) : 0;
}
static ssize_t store_comm(struct config_item *i, struct configfs_attribute *a,
const char *buf, size_t len)
{
struct dlm_comm *cm = config_item_to_comm(i);
struct comm_attribute *cma =
container_of(a, struct comm_attribute, attr);
return cma->store ? cma->store(cm, buf, len) : -EINVAL;
}
static ssize_t comm_nodeid_read(struct dlm_comm *cm, char *buf)
{
return sprintf(buf, "%d\n", cm->nodeid);
}
static ssize_t comm_nodeid_write(struct dlm_comm *cm, const char *buf,
size_t len)
{
cm->nodeid = simple_strtol(buf, NULL, 0);
return len;
}
static ssize_t comm_local_read(struct dlm_comm *cm, char *buf)
{
return sprintf(buf, "%d\n", cm->local);
}
static ssize_t comm_local_write(struct dlm_comm *cm, const char *buf,
size_t len)
{
cm->local= simple_strtol(buf, NULL, 0);
if (cm->local && !local_comm)
local_comm = cm;
return len;
}
static ssize_t comm_addr_write(struct dlm_comm *cm, const char *buf, size_t len)
{
struct sockaddr_storage *addr;
int rv;
if (len != sizeof(struct sockaddr_storage))
return -EINVAL;
if (cm->addr_count >= DLM_MAX_ADDR_COUNT)
return -ENOSPC;
addr = kzalloc(sizeof(*addr), GFP_NOFS);
if (!addr)
return -ENOMEM;
memcpy(addr, buf, len);
rv = dlm_lowcomms_addr(cm->nodeid, addr, len);
if (rv) {
kfree(addr);
return rv;
}
cm->addr[cm->addr_count++] = addr;
return len;
}
static ssize_t comm_addr_list_read(struct dlm_comm *cm, char *buf)
{
ssize_t s;
ssize_t allowance;
int i;
struct sockaddr_storage *addr;
struct sockaddr_in *addr_in;
struct sockaddr_in6 *addr_in6;
/* Taken from ip6_addr_string() defined in lib/vsprintf.c */
char buf0[sizeof("AF_INET6 xxxx:xxxx:xxxx:xxxx:xxxx:xxxx:255.255.255.255\n")];
/* Derived from SIMPLE_ATTR_SIZE of fs/configfs/file.c */
allowance = 4096;
buf[0] = '\0';
for (i = 0; i < cm->addr_count; i++) {
addr = cm->addr[i];
switch(addr->ss_family) {
case AF_INET:
addr_in = (struct sockaddr_in *)addr;
s = sprintf(buf0, "AF_INET %pI4\n", &addr_in->sin_addr.s_addr);
break;
case AF_INET6:
addr_in6 = (struct sockaddr_in6 *)addr;
s = sprintf(buf0, "AF_INET6 %pI6\n", &addr_in6->sin6_addr);
break;
default:
s = sprintf(buf0, "%s\n", "<UNKNOWN>");
break;
}
allowance -= s;
if (allowance >= 0)
strcat(buf, buf0);
else {
allowance += s;
break;
}
}
return 4096 - allowance;
}
static ssize_t show_node(struct config_item *i, struct configfs_attribute *a,
char *buf)
{
struct dlm_node *nd = config_item_to_node(i);
struct node_attribute *nda =
container_of(a, struct node_attribute, attr);
return nda->show ? nda->show(nd, buf) : 0;
}
static ssize_t store_node(struct config_item *i, struct configfs_attribute *a,
const char *buf, size_t len)
{
struct dlm_node *nd = config_item_to_node(i);
struct node_attribute *nda =
container_of(a, struct node_attribute, attr);
return nda->store ? nda->store(nd, buf, len) : -EINVAL;
}
static ssize_t node_nodeid_read(struct dlm_node *nd, char *buf)
{
return sprintf(buf, "%d\n", nd->nodeid);
}
static ssize_t node_nodeid_write(struct dlm_node *nd, const char *buf,
size_t len)
{
uint32_t seq = 0;
nd->nodeid = simple_strtol(buf, NULL, 0);
dlm_comm_seq(nd->nodeid, &seq);
nd->comm_seq = seq;
return len;
}
static ssize_t node_weight_read(struct dlm_node *nd, char *buf)
{
return sprintf(buf, "%d\n", nd->weight);
}
static ssize_t node_weight_write(struct dlm_node *nd, const char *buf,
size_t len)
{
nd->weight = simple_strtol(buf, NULL, 0);
return len;
}
/*
* Functions for the dlm to get the info that's been configured
*/
static struct dlm_space *get_space(char *name)
{
struct config_item *i;
if (!space_list)
return NULL;
mutex_lock(&space_list->cg_subsys->su_mutex);
i = config_group_find_item(space_list, name);
mutex_unlock(&space_list->cg_subsys->su_mutex);
return config_item_to_space(i);
}
static void put_space(struct dlm_space *sp)
{
config_item_put(&sp->group.cg_item);
}
static struct dlm_comm *get_comm(int nodeid)
{
struct config_item *i;
struct dlm_comm *cm = NULL;
int found = 0;
if (!comm_list)
return NULL;
mutex_lock(&clusters_root.subsys.su_mutex);
list_for_each_entry(i, &comm_list->cg_children, ci_entry) {
cm = config_item_to_comm(i);
if (cm->nodeid != nodeid)
continue;
found = 1;
config_item_get(i);
break;
}
mutex_unlock(&clusters_root.subsys.su_mutex);
if (!found)
cm = NULL;
return cm;
}
static void put_comm(struct dlm_comm *cm)
{
config_item_put(&cm->item);
}
/* caller must free mem */
int dlm_config_nodes(char *lsname, struct dlm_config_node **nodes_out,
int *count_out)
{
struct dlm_space *sp;
struct dlm_node *nd;
struct dlm_config_node *nodes, *node;
int rv, count;
sp = get_space(lsname);
if (!sp)
return -EEXIST;
mutex_lock(&sp->members_lock);
if (!sp->members_count) {
rv = -EINVAL;
printk(KERN_ERR "dlm: zero members_count\n");
goto out;
}
count = sp->members_count;
nodes = kcalloc(count, sizeof(struct dlm_config_node), GFP_NOFS);
if (!nodes) {
rv = -ENOMEM;
goto out;
}
node = nodes;
list_for_each_entry(nd, &sp->members, list) {
node->nodeid = nd->nodeid;
node->weight = nd->weight;
node->new = nd->new;
node->comm_seq = nd->comm_seq;
node++;
nd->new = 0;
}
*count_out = count;
*nodes_out = nodes;
rv = 0;
out:
mutex_unlock(&sp->members_lock);
put_space(sp);
return rv;
}
int dlm_comm_seq(int nodeid, uint32_t *seq)
{
struct dlm_comm *cm = get_comm(nodeid);
if (!cm)
return -EEXIST;
*seq = cm->seq;
put_comm(cm);
return 0;
}
int dlm_our_nodeid(void)
{
return local_comm ? local_comm->nodeid : 0;
}
/* num 0 is first addr, num 1 is second addr */
int dlm_our_addr(struct sockaddr_storage *addr, int num)
{
if (!local_comm)
return -1;
if (num + 1 > local_comm->addr_count)
return -1;
memcpy(addr, local_comm->addr[num], sizeof(*addr));
return 0;
}
/* Config file defaults */
#define DEFAULT_TCP_PORT 21064
#define DEFAULT_BUFFER_SIZE 4096
#define DEFAULT_RSBTBL_SIZE 1024
#define DEFAULT_RECOVER_TIMER 5
#define DEFAULT_TOSS_SECS 10
#define DEFAULT_SCAN_SECS 5
#define DEFAULT_LOG_DEBUG 0
#define DEFAULT_PROTOCOL 0
#define DEFAULT_TIMEWARN_CS 500 /* 5 sec = 500 centiseconds */
#define DEFAULT_WAITWARN_US 0
#define DEFAULT_NEW_RSB_COUNT 128
#define DEFAULT_RECOVER_CALLBACKS 0
#define DEFAULT_CLUSTER_NAME ""
struct dlm_config_info dlm_config = {
.ci_tcp_port = DEFAULT_TCP_PORT,
.ci_buffer_size = DEFAULT_BUFFER_SIZE,
.ci_rsbtbl_size = DEFAULT_RSBTBL_SIZE,
.ci_recover_timer = DEFAULT_RECOVER_TIMER,
.ci_toss_secs = DEFAULT_TOSS_SECS,
.ci_scan_secs = DEFAULT_SCAN_SECS,
.ci_log_debug = DEFAULT_LOG_DEBUG,
.ci_protocol = DEFAULT_PROTOCOL,
.ci_timewarn_cs = DEFAULT_TIMEWARN_CS,
.ci_waitwarn_us = DEFAULT_WAITWARN_US,
.ci_new_rsb_count = DEFAULT_NEW_RSB_COUNT,
.ci_recover_callbacks = DEFAULT_RECOVER_CALLBACKS,
.ci_cluster_name = DEFAULT_CLUSTER_NAME
};
| gpl-2.0 |
RichardWithnell/mptcp-rpi | arch/arm/mach-imx/mm-imx1.c | 2147 | 1883 | /*
* author: Sascha Hauer
* Created: april 20th, 2004
* Copyright: Synertronixx GmbH
*
* Common code for i.MX1 machines
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/pinctrl/machine.h>
#include <asm/mach/map.h>
#include "common.h"
#include "devices/devices-common.h"
#include "hardware.h"
#include "iomux-v1.h"
static struct map_desc imx_io_desc[] __initdata = {
imx_map_entry(MX1, IO, MT_DEVICE),
};
void __init mx1_map_io(void)
{
iotable_init(imx_io_desc, ARRAY_SIZE(imx_io_desc));
}
void __init imx1_init_early(void)
{
mxc_set_cpu_type(MXC_CPU_MX1);
imx_iomuxv1_init(MX1_IO_ADDRESS(MX1_GPIO_BASE_ADDR),
MX1_NUM_GPIO_PORT);
}
void __init mx1_init_irq(void)
{
mxc_init_irq(MX1_IO_ADDRESS(MX1_AVIC_BASE_ADDR));
}
void __init imx1_soc_init(void)
{
mxc_arch_reset_init(MX1_IO_ADDRESS(MX1_WDT_BASE_ADDR));
mxc_device_init();
mxc_register_gpio("imx1-gpio", 0, MX1_GPIO1_BASE_ADDR, SZ_256,
MX1_GPIO_INT_PORTA, 0);
mxc_register_gpio("imx1-gpio", 1, MX1_GPIO2_BASE_ADDR, SZ_256,
MX1_GPIO_INT_PORTB, 0);
mxc_register_gpio("imx1-gpio", 2, MX1_GPIO3_BASE_ADDR, SZ_256,
MX1_GPIO_INT_PORTC, 0);
mxc_register_gpio("imx1-gpio", 3, MX1_GPIO4_BASE_ADDR, SZ_256,
MX1_GPIO_INT_PORTD, 0);
imx_add_imx_dma("imx1-dma", MX1_DMA_BASE_ADDR,
MX1_DMA_INT, MX1_DMA_ERR);
pinctrl_provide_dummies();
}
| gpl-2.0 |
AndroidGX/SimpleGX-MM-6.0_H815_20i | drivers/staging/bcm/InterfaceTx.c | 2403 | 6561 | #include "headers.h"
/*this is transmit call-back(BULK OUT)*/
static void write_bulk_callback(struct urb *urb/*, struct pt_regs *regs*/)
{
struct bcm_usb_tcb *pTcb= (struct bcm_usb_tcb *)urb->context;
struct bcm_interface_adapter *psIntfAdapter = pTcb->psIntfAdapter;
struct bcm_link_request *pControlMsg = (struct bcm_link_request *)urb->transfer_buffer;
struct bcm_mini_adapter *psAdapter = psIntfAdapter->psAdapter ;
BOOLEAN bpowerDownMsg = FALSE ;
struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
if (unlikely(netif_msg_tx_done(Adapter)))
pr_info(PFX "%s: transmit status %d\n", Adapter->dev->name, urb->status);
if(urb->status != STATUS_SUCCESS)
{
if(urb->status == -EPIPE)
{
psIntfAdapter->psAdapter->bEndPointHalted = TRUE ;
wake_up(&psIntfAdapter->psAdapter->tx_packet_wait_queue);
}
else
{
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, NEXT_SEND, DBG_LVL_ALL,"Tx URB has got cancelled. status :%d", urb->status);
}
}
pTcb->bUsed = FALSE;
atomic_dec(&psIntfAdapter->uNumTcbUsed);
if(TRUE == psAdapter->bPreparingForLowPowerMode)
{
if(((pControlMsg->szData[0] == GO_TO_IDLE_MODE_PAYLOAD) &&
(pControlMsg->szData[1] == TARGET_CAN_GO_TO_IDLE_MODE)))
{
bpowerDownMsg = TRUE ;
//This covers the bus err while Idle Request msg sent down.
if(urb->status != STATUS_SUCCESS)
{
psAdapter->bPreparingForLowPowerMode = FALSE ;
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, NEXT_SEND, DBG_LVL_ALL,"Idle Mode Request msg failed to reach to Modem");
//Signalling the cntrl pkt path in Ioctl
wake_up(&psAdapter->lowpower_mode_wait_queue);
StartInterruptUrb(psIntfAdapter);
goto err_exit;
}
if(psAdapter->bDoSuspend == FALSE)
{
psAdapter->IdleMode = TRUE;
//since going in Idle mode completed hence making this var false;
psAdapter->bPreparingForLowPowerMode = FALSE ;
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, NEXT_SEND, DBG_LVL_ALL, "Host Entered in Idle Mode State...");
//Signalling the cntrl pkt path in Ioctl
wake_up(&psAdapter->lowpower_mode_wait_queue);
}
}
else if((pControlMsg->Leader.Status == LINK_UP_CONTROL_REQ) &&
(pControlMsg->szData[0] == LINK_UP_ACK) &&
(pControlMsg->szData[1] == LINK_SHUTDOWN_REQ_FROM_FIRMWARE) &&
(pControlMsg->szData[2] == SHUTDOWN_ACK_FROM_DRIVER))
{
//This covers the bus err while shutdown Request msg sent down.
if(urb->status != STATUS_SUCCESS)
{
psAdapter->bPreparingForLowPowerMode = FALSE ;
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, NEXT_SEND, DBG_LVL_ALL,"Shutdown Request Msg failed to reach to Modem");
//Signalling the cntrl pkt path in Ioctl
wake_up(&psAdapter->lowpower_mode_wait_queue);
StartInterruptUrb(psIntfAdapter);
goto err_exit;
}
bpowerDownMsg = TRUE ;
if(psAdapter->bDoSuspend == FALSE)
{
psAdapter->bShutStatus = TRUE;
//since going in shutdown mode completed hence making this var false;
psAdapter->bPreparingForLowPowerMode = FALSE ;
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, NEXT_SEND, DBG_LVL_ALL,"Host Entered in shutdown Mode State...");
//Signalling the cntrl pkt path in Ioctl
wake_up(&psAdapter->lowpower_mode_wait_queue);
}
}
if(psAdapter->bDoSuspend && bpowerDownMsg)
{
//issuing bus suspend request
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, NEXT_SEND, DBG_LVL_ALL,"Issuing the Bus suspend request to USB stack");
psIntfAdapter->bPreparingForBusSuspend = TRUE;
schedule_work(&psIntfAdapter->usbSuspendWork);
}
}
err_exit :
usb_free_coherent(urb->dev, urb->transfer_buffer_length,
urb->transfer_buffer, urb->transfer_dma);
}
static struct bcm_usb_tcb *GetBulkOutTcb(struct bcm_interface_adapter *psIntfAdapter)
{
struct bcm_usb_tcb *pTcb = NULL;
UINT index = 0;
if((atomic_read(&psIntfAdapter->uNumTcbUsed) < MAXIMUM_USB_TCB) &&
(psIntfAdapter->psAdapter->StopAllXaction ==FALSE))
{
index = atomic_read(&psIntfAdapter->uCurrTcb);
pTcb = &psIntfAdapter->asUsbTcb[index];
pTcb->bUsed = TRUE;
pTcb->psIntfAdapter= psIntfAdapter;
BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_TX, NEXT_SEND, DBG_LVL_ALL, "Got Tx desc %d used %d",
index, atomic_read(&psIntfAdapter->uNumTcbUsed));
index = (index + 1) % MAXIMUM_USB_TCB;
atomic_set(&psIntfAdapter->uCurrTcb, index);
atomic_inc(&psIntfAdapter->uNumTcbUsed);
}
return pTcb;
}
static int TransmitTcb(struct bcm_interface_adapter *psIntfAdapter, struct bcm_usb_tcb *pTcb, PVOID data, int len)
{
struct urb *urb = pTcb->urb;
int retval = 0;
urb->transfer_buffer = usb_alloc_coherent(psIntfAdapter->udev, len,
GFP_ATOMIC, &urb->transfer_dma);
if (!urb->transfer_buffer)
{
BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_PRINTK, 0, 0, "Error allocating memory\n");
return -ENOMEM;
}
memcpy(urb->transfer_buffer, data, len);
urb->transfer_buffer_length = len;
BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_TX, NEXT_SEND, DBG_LVL_ALL, "Sending Bulk out packet\n");
//For T3B,INT OUT end point will be used as bulk out end point
if((psIntfAdapter->psAdapter->chip_id == T3B) && (psIntfAdapter->bHighSpeedDevice == TRUE))
{
usb_fill_int_urb(urb, psIntfAdapter->udev,
psIntfAdapter->sBulkOut.bulk_out_pipe,
urb->transfer_buffer, len, write_bulk_callback, pTcb,
psIntfAdapter->sBulkOut.int_out_interval);
}
else
{
usb_fill_bulk_urb(urb, psIntfAdapter->udev,
psIntfAdapter->sBulkOut.bulk_out_pipe,
urb->transfer_buffer, len, write_bulk_callback, pTcb);
}
urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; /* For DMA transfer */
if(FALSE == psIntfAdapter->psAdapter->device_removed &&
FALSE == psIntfAdapter->psAdapter->bEndPointHalted &&
FALSE == psIntfAdapter->bSuspended &&
FALSE == psIntfAdapter->bPreparingForBusSuspend)
{
retval = usb_submit_urb(urb, GFP_ATOMIC);
if (retval)
{
BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_TX, NEXT_SEND, DBG_LVL_ALL, "failed submitting write urb, error %d", retval);
if(retval == -EPIPE)
{
psIntfAdapter->psAdapter->bEndPointHalted = TRUE ;
wake_up(&psIntfAdapter->psAdapter->tx_packet_wait_queue);
}
}
}
return retval;
}
int InterfaceTransmitPacket(PVOID arg, PVOID data, UINT len)
{
struct bcm_usb_tcb *pTcb= NULL;
struct bcm_interface_adapter *psIntfAdapter = (struct bcm_interface_adapter *)arg;
pTcb= GetBulkOutTcb(psIntfAdapter);
if(pTcb == NULL)
{
BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_PRINTK, 0, 0, "No URB to transmit packet, dropping packet");
return -EFAULT;
}
return TransmitTcb(psIntfAdapter, pTcb, data, len);
}
| gpl-2.0 |
brieuwers/N80001600Mhz | drivers/ata/pata_arasan_cf.c | 2915 | 26825 | /*
* drivers/ata/pata_arasan_cf.c
*
* Arasan Compact Flash host controller source file
*
* Copyright (C) 2011 ST Microelectronics
* Viresh Kumar <viresh.kumar@st.com>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*/
/*
* The Arasan CompactFlash Device Controller IP core has three basic modes of
* operation: PC card ATA using I/O mode, PC card ATA using memory mode, PC card
* ATA using true IDE modes. This driver supports only True IDE mode currently.
*
* Arasan CF Controller shares global irq register with Arasan XD Controller.
*
* Tested on arch/arm/mach-spear13xx
*/
#include <linux/ata.h>
#include <linux/clk.h>
#include <linux/completion.h>
#include <linux/delay.h>
#include <linux/dmaengine.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/libata.h>
#include <linux/module.h>
#include <linux/pata_arasan_cf_data.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/types.h>
#include <linux/workqueue.h>
#define DRIVER_NAME "arasan_cf"
#define TIMEOUT msecs_to_jiffies(3000)
/* Registers */
/* CompactFlash Interface Status */
#define CFI_STS 0x000
#define STS_CHG (1)
#define BIN_AUDIO_OUT (1 << 1)
#define CARD_DETECT1 (1 << 2)
#define CARD_DETECT2 (1 << 3)
#define INP_ACK (1 << 4)
#define CARD_READY (1 << 5)
#define IO_READY (1 << 6)
#define B16_IO_PORT_SEL (1 << 7)
/* IRQ */
#define IRQ_STS 0x004
/* Interrupt Enable */
#define IRQ_EN 0x008
#define CARD_DETECT_IRQ (1)
#define STATUS_CHNG_IRQ (1 << 1)
#define MEM_MODE_IRQ (1 << 2)
#define IO_MODE_IRQ (1 << 3)
#define TRUE_IDE_MODE_IRQ (1 << 8)
#define PIO_XFER_ERR_IRQ (1 << 9)
#define BUF_AVAIL_IRQ (1 << 10)
#define XFER_DONE_IRQ (1 << 11)
#define IGNORED_IRQS (STATUS_CHNG_IRQ | MEM_MODE_IRQ | IO_MODE_IRQ |\
TRUE_IDE_MODE_IRQ)
#define TRUE_IDE_IRQS (CARD_DETECT_IRQ | PIO_XFER_ERR_IRQ |\
BUF_AVAIL_IRQ | XFER_DONE_IRQ)
/* Operation Mode */
#define OP_MODE 0x00C
#define CARD_MODE_MASK (0x3)
#define MEM_MODE (0x0)
#define IO_MODE (0x1)
#define TRUE_IDE_MODE (0x2)
#define CARD_TYPE_MASK (1 << 2)
#define CF_CARD (0)
#define CF_PLUS_CARD (1 << 2)
#define CARD_RESET (1 << 3)
#define CFHOST_ENB (1 << 4)
#define OUTPUTS_TRISTATE (1 << 5)
#define ULTRA_DMA_ENB (1 << 8)
#define MULTI_WORD_DMA_ENB (1 << 9)
#define DRQ_BLOCK_SIZE_MASK (0x3 << 11)
#define DRQ_BLOCK_SIZE_512 (0)
#define DRQ_BLOCK_SIZE_1024 (1 << 11)
#define DRQ_BLOCK_SIZE_2048 (2 << 11)
#define DRQ_BLOCK_SIZE_4096 (3 << 11)
/* CF Interface Clock Configuration */
#define CLK_CFG 0x010
#define CF_IF_CLK_MASK (0XF)
/* CF Timing Mode Configuration */
#define TM_CFG 0x014
#define MEM_MODE_TIMING_MASK (0x3)
#define MEM_MODE_TIMING_250NS (0x0)
#define MEM_MODE_TIMING_120NS (0x1)
#define MEM_MODE_TIMING_100NS (0x2)
#define MEM_MODE_TIMING_80NS (0x3)
#define IO_MODE_TIMING_MASK (0x3 << 2)
#define IO_MODE_TIMING_250NS (0x0 << 2)
#define IO_MODE_TIMING_120NS (0x1 << 2)
#define IO_MODE_TIMING_100NS (0x2 << 2)
#define IO_MODE_TIMING_80NS (0x3 << 2)
#define TRUEIDE_PIO_TIMING_MASK (0x7 << 4)
#define TRUEIDE_PIO_TIMING_SHIFT 4
#define TRUEIDE_MWORD_DMA_TIMING_MASK (0x7 << 7)
#define TRUEIDE_MWORD_DMA_TIMING_SHIFT 7
#define ULTRA_DMA_TIMING_MASK (0x7 << 10)
#define ULTRA_DMA_TIMING_SHIFT 10
/* CF Transfer Address */
#define XFER_ADDR 0x014
#define XFER_ADDR_MASK (0x7FF)
#define MAX_XFER_COUNT 0x20000u
/* Transfer Control */
#define XFER_CTR 0x01C
#define XFER_COUNT_MASK (0x3FFFF)
#define ADDR_INC_DISABLE (1 << 24)
#define XFER_WIDTH_MASK (1 << 25)
#define XFER_WIDTH_8B (0)
#define XFER_WIDTH_16B (1 << 25)
#define MEM_TYPE_MASK (1 << 26)
#define MEM_TYPE_COMMON (0)
#define MEM_TYPE_ATTRIBUTE (1 << 26)
#define MEM_IO_XFER_MASK (1 << 27)
#define MEM_XFER (0)
#define IO_XFER (1 << 27)
#define DMA_XFER_MODE (1 << 28)
#define AHB_BUS_NORMAL_PIO_OPRTN (~(1 << 29))
#define XFER_DIR_MASK (1 << 30)
#define XFER_READ (0)
#define XFER_WRITE (1 << 30)
#define XFER_START (1 << 31)
/* Write Data Port */
#define WRITE_PORT 0x024
/* Read Data Port */
#define READ_PORT 0x028
/* ATA Data Port */
#define ATA_DATA_PORT 0x030
#define ATA_DATA_PORT_MASK (0xFFFF)
/* ATA Error/Features */
#define ATA_ERR_FTR 0x034
/* ATA Sector Count */
#define ATA_SC 0x038
/* ATA Sector Number */
#define ATA_SN 0x03C
/* ATA Cylinder Low */
#define ATA_CL 0x040
/* ATA Cylinder High */
#define ATA_CH 0x044
/* ATA Select Card/Head */
#define ATA_SH 0x048
/* ATA Status-Command */
#define ATA_STS_CMD 0x04C
/* ATA Alternate Status/Device Control */
#define ATA_ASTS_DCTR 0x050
/* Extended Write Data Port 0x200-0x3FC */
#define EXT_WRITE_PORT 0x200
/* Extended Read Data Port 0x400-0x5FC */
#define EXT_READ_PORT 0x400
#define FIFO_SIZE 0x200u
/* Global Interrupt Status */
#define GIRQ_STS 0x800
/* Global Interrupt Status enable */
#define GIRQ_STS_EN 0x804
/* Global Interrupt Signal enable */
#define GIRQ_SGN_EN 0x808
#define GIRQ_CF (1)
#define GIRQ_XD (1 << 1)
/* Compact Flash Controller Dev Structure */
struct arasan_cf_dev {
/* pointer to ata_host structure */
struct ata_host *host;
/* clk structure, only if HAVE_CLK is defined */
#ifdef CONFIG_HAVE_CLK
struct clk *clk;
#endif
/* physical base address of controller */
dma_addr_t pbase;
/* virtual base address of controller */
void __iomem *vbase;
/* irq number*/
int irq;
/* status to be updated to framework regarding DMA transfer */
u8 dma_status;
/* Card is present or Not */
u8 card_present;
/* dma specific */
/* Completion for transfer complete interrupt from controller */
struct completion cf_completion;
/* Completion for DMA transfer complete. */
struct completion dma_completion;
/* Dma channel allocated */
struct dma_chan *dma_chan;
/* Mask for DMA transfers */
dma_cap_mask_t mask;
/* dma channel private data */
void *dma_priv;
/* DMA transfer work */
struct work_struct work;
/* DMA delayed finish work */
struct delayed_work dwork;
/* qc to be transferred using DMA */
struct ata_queued_cmd *qc;
};
static struct scsi_host_template arasan_cf_sht = {
ATA_BASE_SHT(DRIVER_NAME),
.sg_tablesize = SG_NONE,
.dma_boundary = 0xFFFFFFFFUL,
};
static void cf_dumpregs(struct arasan_cf_dev *acdev)
{
struct device *dev = acdev->host->dev;
dev_dbg(dev, ": =========== REGISTER DUMP ===========");
dev_dbg(dev, ": CFI_STS: %x", readl(acdev->vbase + CFI_STS));
dev_dbg(dev, ": IRQ_STS: %x", readl(acdev->vbase + IRQ_STS));
dev_dbg(dev, ": IRQ_EN: %x", readl(acdev->vbase + IRQ_EN));
dev_dbg(dev, ": OP_MODE: %x", readl(acdev->vbase + OP_MODE));
dev_dbg(dev, ": CLK_CFG: %x", readl(acdev->vbase + CLK_CFG));
dev_dbg(dev, ": TM_CFG: %x", readl(acdev->vbase + TM_CFG));
dev_dbg(dev, ": XFER_CTR: %x", readl(acdev->vbase + XFER_CTR));
dev_dbg(dev, ": GIRQ_STS: %x", readl(acdev->vbase + GIRQ_STS));
dev_dbg(dev, ": GIRQ_STS_EN: %x", readl(acdev->vbase + GIRQ_STS_EN));
dev_dbg(dev, ": GIRQ_SGN_EN: %x", readl(acdev->vbase + GIRQ_SGN_EN));
dev_dbg(dev, ": =====================================");
}
/* Enable/Disable global interrupts shared between CF and XD ctrlr. */
static void cf_ginterrupt_enable(struct arasan_cf_dev *acdev, bool enable)
{
/* enable should be 0 or 1 */
writel(enable, acdev->vbase + GIRQ_STS_EN);
writel(enable, acdev->vbase + GIRQ_SGN_EN);
}
/* Enable/Disable CF interrupts */
static inline void
cf_interrupt_enable(struct arasan_cf_dev *acdev, u32 mask, bool enable)
{
u32 val = readl(acdev->vbase + IRQ_EN);
/* clear & enable/disable irqs */
if (enable) {
writel(mask, acdev->vbase + IRQ_STS);
writel(val | mask, acdev->vbase + IRQ_EN);
} else
writel(val & ~mask, acdev->vbase + IRQ_EN);
}
static inline void cf_card_reset(struct arasan_cf_dev *acdev)
{
u32 val = readl(acdev->vbase + OP_MODE);
writel(val | CARD_RESET, acdev->vbase + OP_MODE);
udelay(200);
writel(val & ~CARD_RESET, acdev->vbase + OP_MODE);
}
static inline void cf_ctrl_reset(struct arasan_cf_dev *acdev)
{
writel(readl(acdev->vbase + OP_MODE) & ~CFHOST_ENB,
acdev->vbase + OP_MODE);
writel(readl(acdev->vbase + OP_MODE) | CFHOST_ENB,
acdev->vbase + OP_MODE);
}
static void cf_card_detect(struct arasan_cf_dev *acdev, bool hotplugged)
{
struct ata_port *ap = acdev->host->ports[0];
struct ata_eh_info *ehi = &ap->link.eh_info;
u32 val = readl(acdev->vbase + CFI_STS);
/* Both CD1 & CD2 should be low if card inserted completely */
if (!(val & (CARD_DETECT1 | CARD_DETECT2))) {
if (acdev->card_present)
return;
acdev->card_present = 1;
cf_card_reset(acdev);
} else {
if (!acdev->card_present)
return;
acdev->card_present = 0;
}
if (hotplugged) {
ata_ehi_hotplugged(ehi);
ata_port_freeze(ap);
}
}
static int cf_init(struct arasan_cf_dev *acdev)
{
struct arasan_cf_pdata *pdata = dev_get_platdata(acdev->host->dev);
unsigned long flags;
int ret = 0;
#ifdef CONFIG_HAVE_CLK
ret = clk_enable(acdev->clk);
if (ret) {
dev_dbg(acdev->host->dev, "clock enable failed");
return ret;
}
#endif
spin_lock_irqsave(&acdev->host->lock, flags);
/* configure CF interface clock */
writel((pdata->cf_if_clk <= CF_IF_CLK_200M) ? pdata->cf_if_clk :
CF_IF_CLK_166M, acdev->vbase + CLK_CFG);
writel(TRUE_IDE_MODE | CFHOST_ENB, acdev->vbase + OP_MODE);
cf_interrupt_enable(acdev, CARD_DETECT_IRQ, 1);
cf_ginterrupt_enable(acdev, 1);
spin_unlock_irqrestore(&acdev->host->lock, flags);
return ret;
}
static void cf_exit(struct arasan_cf_dev *acdev)
{
unsigned long flags;
spin_lock_irqsave(&acdev->host->lock, flags);
cf_ginterrupt_enable(acdev, 0);
cf_interrupt_enable(acdev, TRUE_IDE_IRQS, 0);
cf_card_reset(acdev);
writel(readl(acdev->vbase + OP_MODE) & ~CFHOST_ENB,
acdev->vbase + OP_MODE);
spin_unlock_irqrestore(&acdev->host->lock, flags);
#ifdef CONFIG_HAVE_CLK
clk_disable(acdev->clk);
#endif
}
static void dma_callback(void *dev)
{
struct arasan_cf_dev *acdev = (struct arasan_cf_dev *) dev;
complete(&acdev->dma_completion);
}
static bool filter(struct dma_chan *chan, void *slave)
{
chan->private = slave;
return true;
}
static inline void dma_complete(struct arasan_cf_dev *acdev)
{
struct ata_queued_cmd *qc = acdev->qc;
unsigned long flags;
acdev->qc = NULL;
ata_sff_interrupt(acdev->irq, acdev->host);
spin_lock_irqsave(&acdev->host->lock, flags);
if (unlikely(qc->err_mask) && ata_is_dma(qc->tf.protocol))
ata_ehi_push_desc(&qc->ap->link.eh_info, "DMA Failed: Timeout");
spin_unlock_irqrestore(&acdev->host->lock, flags);
}
static inline int wait4buf(struct arasan_cf_dev *acdev)
{
if (!wait_for_completion_timeout(&acdev->cf_completion, TIMEOUT)) {
u32 rw = acdev->qc->tf.flags & ATA_TFLAG_WRITE;
dev_err(acdev->host->dev, "%s TimeOut", rw ? "write" : "read");
return -ETIMEDOUT;
}
/* Check if PIO Error interrupt has occurred */
if (acdev->dma_status & ATA_DMA_ERR)
return -EAGAIN;
return 0;
}
static int
dma_xfer(struct arasan_cf_dev *acdev, dma_addr_t src, dma_addr_t dest, u32 len)
{
struct dma_async_tx_descriptor *tx;
struct dma_chan *chan = acdev->dma_chan;
dma_cookie_t cookie;
unsigned long flags = DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP |
DMA_COMPL_SKIP_DEST_UNMAP;
int ret = 0;
tx = chan->device->device_prep_dma_memcpy(chan, dest, src, len, flags);
if (!tx) {
dev_err(acdev->host->dev, "device_prep_dma_memcpy failed\n");
return -EAGAIN;
}
tx->callback = dma_callback;
tx->callback_param = acdev;
cookie = tx->tx_submit(tx);
ret = dma_submit_error(cookie);
if (ret) {
dev_err(acdev->host->dev, "dma_submit_error\n");
return ret;
}
chan->device->device_issue_pending(chan);
/* Wait for DMA to complete */
if (!wait_for_completion_timeout(&acdev->dma_completion, TIMEOUT)) {
chan->device->device_control(chan, DMA_TERMINATE_ALL, 0);
dev_err(acdev->host->dev, "wait_for_completion_timeout\n");
return -ETIMEDOUT;
}
return ret;
}
static int sg_xfer(struct arasan_cf_dev *acdev, struct scatterlist *sg)
{
dma_addr_t dest = 0, src = 0;
u32 xfer_cnt, sglen, dma_len, xfer_ctr;
u32 write = acdev->qc->tf.flags & ATA_TFLAG_WRITE;
unsigned long flags;
int ret = 0;
sglen = sg_dma_len(sg);
if (write) {
src = sg_dma_address(sg);
dest = acdev->pbase + EXT_WRITE_PORT;
} else {
dest = sg_dma_address(sg);
src = acdev->pbase + EXT_READ_PORT;
}
/*
* For each sg:
* MAX_XFER_COUNT data will be transferred before we get transfer
* complete interrupt. Between after FIFO_SIZE data
* buffer available interrupt will be generated. At this time we will
* fill FIFO again: max FIFO_SIZE data.
*/
while (sglen) {
xfer_cnt = min(sglen, MAX_XFER_COUNT);
spin_lock_irqsave(&acdev->host->lock, flags);
xfer_ctr = readl(acdev->vbase + XFER_CTR) &
~XFER_COUNT_MASK;
writel(xfer_ctr | xfer_cnt | XFER_START,
acdev->vbase + XFER_CTR);
spin_unlock_irqrestore(&acdev->host->lock, flags);
/* continue dma xfers until current sg is completed */
while (xfer_cnt) {
/* wait for read to complete */
if (!write) {
ret = wait4buf(acdev);
if (ret)
goto fail;
}
/* read/write FIFO in chunk of FIFO_SIZE */
dma_len = min(xfer_cnt, FIFO_SIZE);
ret = dma_xfer(acdev, src, dest, dma_len);
if (ret) {
dev_err(acdev->host->dev, "dma failed");
goto fail;
}
if (write)
src += dma_len;
else
dest += dma_len;
sglen -= dma_len;
xfer_cnt -= dma_len;
/* wait for write to complete */
if (write) {
ret = wait4buf(acdev);
if (ret)
goto fail;
}
}
}
fail:
spin_lock_irqsave(&acdev->host->lock, flags);
writel(readl(acdev->vbase + XFER_CTR) & ~XFER_START,
acdev->vbase + XFER_CTR);
spin_unlock_irqrestore(&acdev->host->lock, flags);
return ret;
}
/*
* This routine uses External DMA controller to read/write data to FIFO of CF
* controller. There are two xfer related interrupt supported by CF controller:
* - buf_avail: This interrupt is generated as soon as we have buffer of 512
* bytes available for reading or empty buffer available for writing.
* - xfer_done: This interrupt is generated on transfer of "xfer_size" amount of
* data to/from FIFO. xfer_size is programmed in XFER_CTR register.
*
* Max buffer size = FIFO_SIZE = 512 Bytes.
* Max xfer_size = MAX_XFER_COUNT = 256 KB.
*/
static void data_xfer(struct work_struct *work)
{
struct arasan_cf_dev *acdev = container_of(work, struct arasan_cf_dev,
work);
struct ata_queued_cmd *qc = acdev->qc;
struct scatterlist *sg;
unsigned long flags;
u32 temp;
int ret = 0;
/* request dma channels */
/* dma_request_channel may sleep, so calling from process context */
acdev->dma_chan = dma_request_channel(acdev->mask, filter,
acdev->dma_priv);
if (!acdev->dma_chan) {
dev_err(acdev->host->dev, "Unable to get dma_chan\n");
goto chan_request_fail;
}
for_each_sg(qc->sg, sg, qc->n_elem, temp) {
ret = sg_xfer(acdev, sg);
if (ret)
break;
}
dma_release_channel(acdev->dma_chan);
/* data xferred successfully */
if (!ret) {
u32 status;
spin_lock_irqsave(&acdev->host->lock, flags);
status = ioread8(qc->ap->ioaddr.altstatus_addr);
spin_unlock_irqrestore(&acdev->host->lock, flags);
if (status & (ATA_BUSY | ATA_DRQ)) {
ata_sff_queue_delayed_work(&acdev->dwork, 1);
return;
}
goto sff_intr;
}
cf_dumpregs(acdev);
chan_request_fail:
spin_lock_irqsave(&acdev->host->lock, flags);
/* error when transferring data to/from memory */
qc->err_mask |= AC_ERR_HOST_BUS;
qc->ap->hsm_task_state = HSM_ST_ERR;
cf_ctrl_reset(acdev);
spin_unlock_irqrestore(qc->ap->lock, flags);
sff_intr:
dma_complete(acdev);
}
static void delayed_finish(struct work_struct *work)
{
struct arasan_cf_dev *acdev = container_of(work, struct arasan_cf_dev,
dwork.work);
struct ata_queued_cmd *qc = acdev->qc;
unsigned long flags;
u8 status;
spin_lock_irqsave(&acdev->host->lock, flags);
status = ioread8(qc->ap->ioaddr.altstatus_addr);
spin_unlock_irqrestore(&acdev->host->lock, flags);
if (status & (ATA_BUSY | ATA_DRQ))
ata_sff_queue_delayed_work(&acdev->dwork, 1);
else
dma_complete(acdev);
}
static irqreturn_t arasan_cf_interrupt(int irq, void *dev)
{
struct arasan_cf_dev *acdev = ((struct ata_host *)dev)->private_data;
unsigned long flags;
u32 irqsts;
irqsts = readl(acdev->vbase + GIRQ_STS);
if (!(irqsts & GIRQ_CF))
return IRQ_NONE;
spin_lock_irqsave(&acdev->host->lock, flags);
irqsts = readl(acdev->vbase + IRQ_STS);
writel(irqsts, acdev->vbase + IRQ_STS); /* clear irqs */
writel(GIRQ_CF, acdev->vbase + GIRQ_STS); /* clear girqs */
/* handle only relevant interrupts */
irqsts &= ~IGNORED_IRQS;
if (irqsts & CARD_DETECT_IRQ) {
cf_card_detect(acdev, 1);
spin_unlock_irqrestore(&acdev->host->lock, flags);
return IRQ_HANDLED;
}
if (irqsts & PIO_XFER_ERR_IRQ) {
acdev->dma_status = ATA_DMA_ERR;
writel(readl(acdev->vbase + XFER_CTR) & ~XFER_START,
acdev->vbase + XFER_CTR);
spin_unlock_irqrestore(&acdev->host->lock, flags);
complete(&acdev->cf_completion);
dev_err(acdev->host->dev, "pio xfer err irq\n");
return IRQ_HANDLED;
}
spin_unlock_irqrestore(&acdev->host->lock, flags);
if (irqsts & BUF_AVAIL_IRQ) {
complete(&acdev->cf_completion);
return IRQ_HANDLED;
}
if (irqsts & XFER_DONE_IRQ) {
struct ata_queued_cmd *qc = acdev->qc;
/* Send Complete only for write */
if (qc->tf.flags & ATA_TFLAG_WRITE)
complete(&acdev->cf_completion);
}
return IRQ_HANDLED;
}
static void arasan_cf_freeze(struct ata_port *ap)
{
struct arasan_cf_dev *acdev = ap->host->private_data;
/* stop transfer and reset controller */
writel(readl(acdev->vbase + XFER_CTR) & ~XFER_START,
acdev->vbase + XFER_CTR);
cf_ctrl_reset(acdev);
acdev->dma_status = ATA_DMA_ERR;
ata_sff_dma_pause(ap);
ata_sff_freeze(ap);
}
void arasan_cf_error_handler(struct ata_port *ap)
{
struct arasan_cf_dev *acdev = ap->host->private_data;
/*
* DMA transfers using an external DMA controller may be scheduled.
* Abort them before handling error. Refer data_xfer() for further
* details.
*/
cancel_work_sync(&acdev->work);
cancel_delayed_work_sync(&acdev->dwork);
return ata_sff_error_handler(ap);
}
static void arasan_cf_dma_start(struct arasan_cf_dev *acdev)
{
u32 xfer_ctr = readl(acdev->vbase + XFER_CTR) & ~XFER_DIR_MASK;
u32 write = acdev->qc->tf.flags & ATA_TFLAG_WRITE;
xfer_ctr |= write ? XFER_WRITE : XFER_READ;
writel(xfer_ctr, acdev->vbase + XFER_CTR);
acdev->qc->ap->ops->sff_exec_command(acdev->qc->ap, &acdev->qc->tf);
ata_sff_queue_work(&acdev->work);
}
unsigned int arasan_cf_qc_issue(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct arasan_cf_dev *acdev = ap->host->private_data;
/* defer PIO handling to sff_qc_issue */
if (!ata_is_dma(qc->tf.protocol))
return ata_sff_qc_issue(qc);
/* select the device */
ata_wait_idle(ap);
ata_sff_dev_select(ap, qc->dev->devno);
ata_wait_idle(ap);
/* start the command */
switch (qc->tf.protocol) {
case ATA_PROT_DMA:
WARN_ON_ONCE(qc->tf.flags & ATA_TFLAG_POLLING);
ap->ops->sff_tf_load(ap, &qc->tf);
acdev->dma_status = 0;
acdev->qc = qc;
arasan_cf_dma_start(acdev);
ap->hsm_task_state = HSM_ST_LAST;
break;
default:
WARN_ON(1);
return AC_ERR_SYSTEM;
}
return 0;
}
static void arasan_cf_set_piomode(struct ata_port *ap, struct ata_device *adev)
{
struct arasan_cf_dev *acdev = ap->host->private_data;
u8 pio = adev->pio_mode - XFER_PIO_0;
unsigned long flags;
u32 val;
/* Arasan ctrl supports Mode0 -> Mode6 */
if (pio > 6) {
dev_err(ap->dev, "Unknown PIO mode\n");
return;
}
spin_lock_irqsave(&acdev->host->lock, flags);
val = readl(acdev->vbase + OP_MODE) &
~(ULTRA_DMA_ENB | MULTI_WORD_DMA_ENB | DRQ_BLOCK_SIZE_MASK);
writel(val, acdev->vbase + OP_MODE);
val = readl(acdev->vbase + TM_CFG) & ~TRUEIDE_PIO_TIMING_MASK;
val |= pio << TRUEIDE_PIO_TIMING_SHIFT;
writel(val, acdev->vbase + TM_CFG);
cf_interrupt_enable(acdev, BUF_AVAIL_IRQ | XFER_DONE_IRQ, 0);
cf_interrupt_enable(acdev, PIO_XFER_ERR_IRQ, 1);
spin_unlock_irqrestore(&acdev->host->lock, flags);
}
static void arasan_cf_set_dmamode(struct ata_port *ap, struct ata_device *adev)
{
struct arasan_cf_dev *acdev = ap->host->private_data;
u32 opmode, tmcfg, dma_mode = adev->dma_mode;
unsigned long flags;
spin_lock_irqsave(&acdev->host->lock, flags);
opmode = readl(acdev->vbase + OP_MODE) &
~(MULTI_WORD_DMA_ENB | ULTRA_DMA_ENB);
tmcfg = readl(acdev->vbase + TM_CFG);
if ((dma_mode >= XFER_UDMA_0) && (dma_mode <= XFER_UDMA_6)) {
opmode |= ULTRA_DMA_ENB;
tmcfg &= ~ULTRA_DMA_TIMING_MASK;
tmcfg |= (dma_mode - XFER_UDMA_0) << ULTRA_DMA_TIMING_SHIFT;
} else if ((dma_mode >= XFER_MW_DMA_0) && (dma_mode <= XFER_MW_DMA_4)) {
opmode |= MULTI_WORD_DMA_ENB;
tmcfg &= ~TRUEIDE_MWORD_DMA_TIMING_MASK;
tmcfg |= (dma_mode - XFER_MW_DMA_0) <<
TRUEIDE_MWORD_DMA_TIMING_SHIFT;
} else {
dev_err(ap->dev, "Unknown DMA mode\n");
spin_unlock_irqrestore(&acdev->host->lock, flags);
return;
}
writel(opmode, acdev->vbase + OP_MODE);
writel(tmcfg, acdev->vbase + TM_CFG);
writel(DMA_XFER_MODE, acdev->vbase + XFER_CTR);
cf_interrupt_enable(acdev, PIO_XFER_ERR_IRQ, 0);
cf_interrupt_enable(acdev, BUF_AVAIL_IRQ | XFER_DONE_IRQ, 1);
spin_unlock_irqrestore(&acdev->host->lock, flags);
}
static struct ata_port_operations arasan_cf_ops = {
.inherits = &ata_sff_port_ops,
.freeze = arasan_cf_freeze,
.error_handler = arasan_cf_error_handler,
.qc_issue = arasan_cf_qc_issue,
.set_piomode = arasan_cf_set_piomode,
.set_dmamode = arasan_cf_set_dmamode,
};
static int __devinit arasan_cf_probe(struct platform_device *pdev)
{
struct arasan_cf_dev *acdev;
struct arasan_cf_pdata *pdata = dev_get_platdata(&pdev->dev);
struct ata_host *host;
struct ata_port *ap;
struct resource *res;
irq_handler_t irq_handler = NULL;
int ret = 0;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -EINVAL;
if (!devm_request_mem_region(&pdev->dev, res->start, resource_size(res),
DRIVER_NAME)) {
dev_warn(&pdev->dev, "Failed to get memory region resource\n");
return -ENOENT;
}
acdev = devm_kzalloc(&pdev->dev, sizeof(*acdev), GFP_KERNEL);
if (!acdev) {
dev_warn(&pdev->dev, "kzalloc fail\n");
return -ENOMEM;
}
/* if irq is 0, support only PIO */
acdev->irq = platform_get_irq(pdev, 0);
if (acdev->irq)
irq_handler = arasan_cf_interrupt;
else
pdata->quirk |= CF_BROKEN_MWDMA | CF_BROKEN_UDMA;
acdev->pbase = res->start;
acdev->vbase = devm_ioremap_nocache(&pdev->dev, res->start,
resource_size(res));
if (!acdev->vbase) {
dev_warn(&pdev->dev, "ioremap fail\n");
return -ENOMEM;
}
#ifdef CONFIG_HAVE_CLK
acdev->clk = clk_get(&pdev->dev, NULL);
if (IS_ERR(acdev->clk)) {
dev_warn(&pdev->dev, "Clock not found\n");
return PTR_ERR(acdev->clk);
}
#endif
/* allocate host */
host = ata_host_alloc(&pdev->dev, 1);
if (!host) {
ret = -ENOMEM;
dev_warn(&pdev->dev, "alloc host fail\n");
goto free_clk;
}
ap = host->ports[0];
host->private_data = acdev;
acdev->host = host;
ap->ops = &arasan_cf_ops;
ap->pio_mask = ATA_PIO6;
ap->mwdma_mask = ATA_MWDMA4;
ap->udma_mask = ATA_UDMA6;
init_completion(&acdev->cf_completion);
init_completion(&acdev->dma_completion);
INIT_WORK(&acdev->work, data_xfer);
INIT_DELAYED_WORK(&acdev->dwork, delayed_finish);
dma_cap_set(DMA_MEMCPY, acdev->mask);
acdev->dma_priv = pdata->dma_priv;
/* Handle platform specific quirks */
if (pdata->quirk) {
if (pdata->quirk & CF_BROKEN_PIO) {
ap->ops->set_piomode = NULL;
ap->pio_mask = 0;
}
if (pdata->quirk & CF_BROKEN_MWDMA)
ap->mwdma_mask = 0;
if (pdata->quirk & CF_BROKEN_UDMA)
ap->udma_mask = 0;
}
ap->flags |= ATA_FLAG_PIO_POLLING | ATA_FLAG_NO_ATAPI;
ap->ioaddr.cmd_addr = acdev->vbase + ATA_DATA_PORT;
ap->ioaddr.data_addr = acdev->vbase + ATA_DATA_PORT;
ap->ioaddr.error_addr = acdev->vbase + ATA_ERR_FTR;
ap->ioaddr.feature_addr = acdev->vbase + ATA_ERR_FTR;
ap->ioaddr.nsect_addr = acdev->vbase + ATA_SC;
ap->ioaddr.lbal_addr = acdev->vbase + ATA_SN;
ap->ioaddr.lbam_addr = acdev->vbase + ATA_CL;
ap->ioaddr.lbah_addr = acdev->vbase + ATA_CH;
ap->ioaddr.device_addr = acdev->vbase + ATA_SH;
ap->ioaddr.status_addr = acdev->vbase + ATA_STS_CMD;
ap->ioaddr.command_addr = acdev->vbase + ATA_STS_CMD;
ap->ioaddr.altstatus_addr = acdev->vbase + ATA_ASTS_DCTR;
ap->ioaddr.ctl_addr = acdev->vbase + ATA_ASTS_DCTR;
ata_port_desc(ap, "phy_addr %llx virt_addr %p",
(unsigned long long) res->start, acdev->vbase);
ret = cf_init(acdev);
if (ret)
goto free_clk;
cf_card_detect(acdev, 0);
return ata_host_activate(host, acdev->irq, irq_handler, 0,
&arasan_cf_sht);
free_clk:
#ifdef CONFIG_HAVE_CLK
clk_put(acdev->clk);
#endif
return ret;
}
static int __devexit arasan_cf_remove(struct platform_device *pdev)
{
struct ata_host *host = dev_get_drvdata(&pdev->dev);
struct arasan_cf_dev *acdev = host->ports[0]->private_data;
ata_host_detach(host);
cf_exit(acdev);
#ifdef CONFIG_HAVE_CLK
clk_put(acdev->clk);
#endif
return 0;
}
#ifdef CONFIG_PM
static int arasan_cf_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct ata_host *host = dev_get_drvdata(&pdev->dev);
struct arasan_cf_dev *acdev = host->ports[0]->private_data;
if (acdev->dma_chan) {
acdev->dma_chan->device->device_control(acdev->dma_chan,
DMA_TERMINATE_ALL, 0);
dma_release_channel(acdev->dma_chan);
}
cf_exit(acdev);
return ata_host_suspend(host, PMSG_SUSPEND);
}
static int arasan_cf_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct ata_host *host = dev_get_drvdata(&pdev->dev);
struct arasan_cf_dev *acdev = host->ports[0]->private_data;
cf_init(acdev);
ata_host_resume(host);
return 0;
}
static const struct dev_pm_ops arasan_cf_pm_ops = {
.suspend = arasan_cf_suspend,
.resume = arasan_cf_resume,
};
#endif
static struct platform_driver arasan_cf_driver = {
.probe = arasan_cf_probe,
.remove = __devexit_p(arasan_cf_remove),
.driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
#ifdef CONFIG_PM
.pm = &arasan_cf_pm_ops,
#endif
},
};
static int __init arasan_cf_init(void)
{
return platform_driver_register(&arasan_cf_driver);
}
module_init(arasan_cf_init);
static void __exit arasan_cf_exit(void)
{
platform_driver_unregister(&arasan_cf_driver);
}
module_exit(arasan_cf_exit);
MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>");
MODULE_DESCRIPTION("Arasan ATA Compact Flash driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:" DRIVER_NAME);
| gpl-2.0 |
intel-linux-graphics/drm-intel | drivers/tty/serial/8250/8250_hp300.c | 2915 | 7843 | /*
* Driver for the 98626/98644/internal serial interface on hp300/hp400
* (based on the National Semiconductor INS8250/NS16550AF/WD16C552 UARTs)
*
* Ported from 2.2 and modified to use the normal 8250 driver
* by Kars de Jong <jongk@linux-m68k.org>, May 2004.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/serial.h>
#include <linux/serial_core.h>
#include <linux/serial_8250.h>
#include <linux/delay.h>
#include <linux/dio.h>
#include <linux/console.h>
#include <linux/slab.h>
#include <asm/io.h>
#include "8250.h"
#if !defined(CONFIG_HPDCA) && !defined(CONFIG_HPAPCI)
#warning CONFIG_8250 defined but neither CONFIG_HPDCA nor CONFIG_HPAPCI defined, are you sure?
#endif
#ifdef CONFIG_HPAPCI
struct hp300_port
{
struct hp300_port *next; /* next port */
int line; /* line (tty) number */
};
static struct hp300_port *hp300_ports;
#endif
#ifdef CONFIG_HPDCA
static int hpdca_init_one(struct dio_dev *d,
const struct dio_device_id *ent);
static void hpdca_remove_one(struct dio_dev *d);
static struct dio_device_id hpdca_dio_tbl[] = {
{ DIO_ID_DCA0 },
{ DIO_ID_DCA0REM },
{ DIO_ID_DCA1 },
{ DIO_ID_DCA1REM },
{ 0 }
};
static struct dio_driver hpdca_driver = {
.name = "hpdca",
.id_table = hpdca_dio_tbl,
.probe = hpdca_init_one,
.remove = hpdca_remove_one,
};
#endif
static unsigned int num_ports;
extern int hp300_uart_scode;
/* Offset to UART registers from base of DCA */
#define UART_OFFSET 17
#define DCA_ID 0x01 /* ID (read), reset (write) */
#define DCA_IC 0x03 /* Interrupt control */
/* Interrupt control */
#define DCA_IC_IE 0x80 /* Master interrupt enable */
#define HPDCA_BAUD_BASE 153600
/* Base address of the Frodo part */
#define FRODO_BASE (0x41c000)
/*
* Where we find the 8250-like APCI ports, and how far apart they are.
*/
#define FRODO_APCIBASE 0x0
#define FRODO_APCISPACE 0x20
#define FRODO_APCI_OFFSET(x) (FRODO_APCIBASE + ((x) * FRODO_APCISPACE))
#define HPAPCI_BAUD_BASE 500400
#ifdef CONFIG_SERIAL_8250_CONSOLE
/*
* Parse the bootinfo to find descriptions for headless console and
* debug serial ports and register them with the 8250 driver.
* This function should be called before serial_console_init() is called
* to make sure the serial console will be available for use. IA-64 kernel
* calls this function from setup_arch() after the EFI and ACPI tables have
* been parsed.
*/
int __init hp300_setup_serial_console(void)
{
int scode;
struct uart_port port;
memset(&port, 0, sizeof(port));
if (hp300_uart_scode < 0 || hp300_uart_scode > DIO_SCMAX)
return 0;
if (DIO_SCINHOLE(hp300_uart_scode))
return 0;
scode = hp300_uart_scode;
/* Memory mapped I/O */
port.iotype = UPIO_MEM;
port.flags = UPF_SKIP_TEST | UPF_SHARE_IRQ | UPF_BOOT_AUTOCONF;
port.type = PORT_UNKNOWN;
/* Check for APCI console */
if (scode == 256) {
#ifdef CONFIG_HPAPCI
printk(KERN_INFO "Serial console is HP APCI 1\n");
port.uartclk = HPAPCI_BAUD_BASE * 16;
port.mapbase = (FRODO_BASE + FRODO_APCI_OFFSET(1));
port.membase = (char *)(port.mapbase + DIO_VIRADDRBASE);
port.regshift = 2;
add_preferred_console("ttyS", port.line, "9600n8");
#else
printk(KERN_WARNING "Serial console is APCI but support is disabled (CONFIG_HPAPCI)!\n");
return 0;
#endif
} else {
#ifdef CONFIG_HPDCA
unsigned long pa = dio_scodetophysaddr(scode);
if (!pa)
return 0;
printk(KERN_INFO "Serial console is HP DCA at select code %d\n", scode);
port.uartclk = HPDCA_BAUD_BASE * 16;
port.mapbase = (pa + UART_OFFSET);
port.membase = (char *)(port.mapbase + DIO_VIRADDRBASE);
port.regshift = 1;
port.irq = DIO_IPL(pa + DIO_VIRADDRBASE);
/* Enable board-interrupts */
out_8(pa + DIO_VIRADDRBASE + DCA_IC, DCA_IC_IE);
if (DIO_ID(pa + DIO_VIRADDRBASE) & 0x80)
add_preferred_console("ttyS", port.line, "9600n8");
#else
printk(KERN_WARNING "Serial console is DCA but support is disabled (CONFIG_HPDCA)!\n");
return 0;
#endif
}
if (early_serial_setup(&port) < 0)
printk(KERN_WARNING "hp300_setup_serial_console(): early_serial_setup() failed.\n");
return 0;
}
#endif /* CONFIG_SERIAL_8250_CONSOLE */
#ifdef CONFIG_HPDCA
static int hpdca_init_one(struct dio_dev *d,
const struct dio_device_id *ent)
{
struct uart_8250_port uart;
int line;
#ifdef CONFIG_SERIAL_8250_CONSOLE
if (hp300_uart_scode == d->scode) {
/* Already got it. */
return 0;
}
#endif
memset(&uart, 0, sizeof(uart));
/* Memory mapped I/O */
uart.port.iotype = UPIO_MEM;
uart.port.flags = UPF_SKIP_TEST | UPF_SHARE_IRQ | UPF_BOOT_AUTOCONF;
uart.port.irq = d->ipl;
uart.port.uartclk = HPDCA_BAUD_BASE * 16;
uart.port.mapbase = (d->resource.start + UART_OFFSET);
uart.port.membase = (char *)(uart.port.mapbase + DIO_VIRADDRBASE);
uart.port.regshift = 1;
uart.port.dev = &d->dev;
line = serial8250_register_8250_port(&uart);
if (line < 0) {
printk(KERN_NOTICE "8250_hp300: register_serial() DCA scode %d"
" irq %d failed\n", d->scode, uart.port.irq);
return -ENOMEM;
}
/* Enable board-interrupts */
out_8(d->resource.start + DIO_VIRADDRBASE + DCA_IC, DCA_IC_IE);
dio_set_drvdata(d, (void *)line);
/* Reset the DCA */
out_8(d->resource.start + DIO_VIRADDRBASE + DCA_ID, 0xff);
udelay(100);
num_ports++;
return 0;
}
#endif
static int __init hp300_8250_init(void)
{
static int called;
#ifdef CONFIG_HPAPCI
int line;
unsigned long base;
struct uart_8250_port uart;
struct hp300_port *port;
int i;
#endif
if (called)
return -ENODEV;
called = 1;
if (!MACH_IS_HP300)
return -ENODEV;
#ifdef CONFIG_HPDCA
dio_register_driver(&hpdca_driver);
#endif
#ifdef CONFIG_HPAPCI
if (hp300_model < HP_400) {
if (!num_ports)
return -ENODEV;
return 0;
}
/* These models have the Frodo chip.
* Port 0 is reserved for the Apollo Domain keyboard.
* Port 1 is either the console or the DCA.
*/
for (i = 1; i < 4; i++) {
/* Port 1 is the console on a 425e, on other machines it's
* mapped to DCA.
*/
#ifdef CONFIG_SERIAL_8250_CONSOLE
if (i == 1)
continue;
#endif
/* Create new serial device */
port = kmalloc(sizeof(struct hp300_port), GFP_KERNEL);
if (!port)
return -ENOMEM;
memset(&uart, 0, sizeof(uart));
base = (FRODO_BASE + FRODO_APCI_OFFSET(i));
/* Memory mapped I/O */
uart.port.iotype = UPIO_MEM;
uart.port.flags = UPF_SKIP_TEST | UPF_SHARE_IRQ \
| UPF_BOOT_AUTOCONF;
/* XXX - no interrupt support yet */
uart.port.irq = 0;
uart.port.uartclk = HPAPCI_BAUD_BASE * 16;
uart.port.mapbase = base;
uart.port.membase = (char *)(base + DIO_VIRADDRBASE);
uart.port.regshift = 2;
line = serial8250_register_8250_port(&uart);
if (line < 0) {
printk(KERN_NOTICE "8250_hp300: register_serial() APCI"
" %d irq %d failed\n", i, uart.port.irq);
kfree(port);
continue;
}
port->line = line;
port->next = hp300_ports;
hp300_ports = port;
num_ports++;
}
#endif
/* Any boards found? */
if (!num_ports)
return -ENODEV;
return 0;
}
#ifdef CONFIG_HPDCA
static void hpdca_remove_one(struct dio_dev *d)
{
int line;
line = (int) dio_get_drvdata(d);
if (d->resource.start) {
/* Disable board-interrupts */
out_8(d->resource.start + DIO_VIRADDRBASE + DCA_IC, 0);
}
serial8250_unregister_port(line);
}
#endif
static void __exit hp300_8250_exit(void)
{
#ifdef CONFIG_HPAPCI
struct hp300_port *port, *to_free;
for (port = hp300_ports; port; ) {
serial8250_unregister_port(port->line);
to_free = port;
port = port->next;
kfree(to_free);
}
hp300_ports = NULL;
#endif
#ifdef CONFIG_HPDCA
dio_unregister_driver(&hpdca_driver);
#endif
}
module_init(hp300_8250_init);
module_exit(hp300_8250_exit);
MODULE_DESCRIPTION("HP DCA/APCI serial driver");
MODULE_AUTHOR("Kars de Jong <jongk@linux-m68k.org>");
MODULE_LICENSE("GPL");
| gpl-2.0 |
Meticulus/android_kernel_samsung_codina | drivers/net/wireless/bcm4329/siutils.c | 4195 | 35763 | /*
* Misc utility routines for accessing chip-specific features
* of the SiliconBackplane-based Broadcom chips.
*
* Copyright (C) 1999-2010, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
*
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* you also meet, for each linked independent module, the terms and conditions of
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
*
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* $Id: siutils.c,v 1.662.4.4.4.16.4.28 2010/06/23 21:37:54 Exp $
*/
#include <typedefs.h>
#include <bcmdefs.h>
#include <osl.h>
#include <bcmutils.h>
#include <siutils.h>
#include <bcmdevs.h>
#include <hndsoc.h>
#include <sbchipc.h>
#include <pcicfg.h>
#include <sbpcmcia.h>
#include <sbsocram.h>
#include <bcmsdh.h>
#include <sdio.h>
#include <sbsdio.h>
#include <sbhnddma.h>
#include <sbsdpcmdev.h>
#include <bcmsdpcm.h>
#include <hndpmu.h>
#include "siutils_priv.h"
/* local prototypes */
static si_info_t *si_doattach(si_info_t *sii, uint devid, osl_t *osh, void *regs,
uint bustype, void *sdh, char **vars, uint *varsz);
static bool si_buscore_prep(si_info_t *sii, uint bustype, uint devid, void *sdh);
static bool si_buscore_setup(si_info_t *sii, chipcregs_t *cc, uint bustype, uint32 savewin,
uint *origidx, void *regs);
/* global variable to indicate reservation/release of gpio's */
static uint32 si_gpioreservation = 0;
static void *common_info_alloced = NULL;
/* global flag to prevent shared resources from being initialized multiple times in si_attach() */
/*
* Allocate a si handle.
* devid - pci device id (used to determine chip#)
* osh - opaque OS handle
* regs - virtual address of initial core registers
* bustype - pci/pcmcia/sb/sdio/etc
* vars - pointer to a pointer area for "environment" variables
* varsz - pointer to int to return the size of the vars
*/
si_t *
si_attach(uint devid, osl_t *osh, void *regs,
uint bustype, void *sdh, char **vars, uint *varsz)
{
si_info_t *sii;
/* alloc si_info_t */
if ((sii = MALLOC(osh, sizeof (si_info_t))) == NULL) {
SI_ERROR(("si_attach: malloc failed! malloced %d bytes\n", MALLOCED(osh)));
return (NULL);
}
if (si_doattach(sii, devid, osh, regs, bustype, sdh, vars, varsz) == NULL) {
if (NULL != sii->common_info)
MFREE(osh, sii->common_info, sizeof(si_common_info_t));
MFREE(osh, sii, sizeof(si_info_t));
return (NULL);
}
sii->vars = vars ? *vars : NULL;
sii->varsz = varsz ? *varsz : 0;
return (si_t *)sii;
}
/* global kernel resource */
static si_info_t ksii;
static uint32 wd_msticks; /* watchdog timer ticks normalized to ms */
/* generic kernel variant of si_attach() */
si_t *
si_kattach(osl_t *osh)
{
static bool ksii_attached = FALSE;
if (!ksii_attached) {
void *regs = REG_MAP(SI_ENUM_BASE, SI_CORE_SIZE);
if (si_doattach(&ksii, BCM4710_DEVICE_ID, osh, regs,
SI_BUS, NULL,
osh != SI_OSH ? &ksii.vars : NULL,
osh != SI_OSH ? &ksii.varsz : NULL) == NULL) {
if (NULL != ksii.common_info)
MFREE(osh, ksii.common_info, sizeof(si_common_info_t));
SI_ERROR(("si_kattach: si_doattach failed\n"));
REG_UNMAP(regs);
return NULL;
}
REG_UNMAP(regs);
/* save ticks normalized to ms for si_watchdog_ms() */
if (PMUCTL_ENAB(&ksii.pub)) {
/* based on 32KHz ILP clock */
wd_msticks = 32;
} else {
wd_msticks = ALP_CLOCK / 1000;
}
ksii_attached = TRUE;
SI_MSG(("si_kattach done. ccrev = %d, wd_msticks = %d\n",
ksii.pub.ccrev, wd_msticks));
}
return &ksii.pub;
}
static bool
si_buscore_prep(si_info_t *sii, uint bustype, uint devid, void *sdh)
{
/* need to set memseg flag for CF card first before any sb registers access */
if (BUSTYPE(bustype) == PCMCIA_BUS)
sii->memseg = TRUE;
if (BUSTYPE(bustype) == SDIO_BUS) {
int err;
uint8 clkset;
/* Try forcing SDIO core to do ALPAvail request only */
clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_ALP_AVAIL_REQ;
bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err);
if (!err) {
uint8 clkval;
/* If register supported, wait for ALPAvail and then force ALP */
clkval = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, NULL);
if ((clkval & ~SBSDIO_AVBITS) == clkset) {
SPINWAIT(((clkval = bcmsdh_cfg_read(sdh, SDIO_FUNC_1,
SBSDIO_FUNC1_CHIPCLKCSR, NULL)), !SBSDIO_ALPAV(clkval)),
PMU_MAX_TRANSITION_DLY);
if (!SBSDIO_ALPAV(clkval)) {
SI_ERROR(("timeout on ALPAV wait, clkval 0x%02x\n",
clkval));
return FALSE;
}
clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_FORCE_ALP;
bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR,
clkset, &err);
OSL_DELAY(65);
}
}
/* Also, disable the extra SDIO pull-ups */
bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SDIOPULLUP, 0, NULL);
}
return TRUE;
}
static bool
si_buscore_setup(si_info_t *sii, chipcregs_t *cc, uint bustype, uint32 savewin,
uint *origidx, void *regs)
{
bool pci, pcie;
uint i;
uint pciidx, pcieidx, pcirev, pcierev;
cc = si_setcoreidx(&sii->pub, SI_CC_IDX);
ASSERT((uintptr)cc);
/* get chipcommon rev */
sii->pub.ccrev = (int)si_corerev(&sii->pub);
/* get chipcommon chipstatus */
if (sii->pub.ccrev >= 11)
sii->pub.chipst = R_REG(sii->osh, &cc->chipstatus);
/* get chipcommon capabilites */
sii->pub.cccaps = R_REG(sii->osh, &cc->capabilities);
/* get pmu rev and caps */
if (sii->pub.cccaps & CC_CAP_PMU) {
sii->pub.pmucaps = R_REG(sii->osh, &cc->pmucapabilities);
sii->pub.pmurev = sii->pub.pmucaps & PCAP_REV_MASK;
}
SI_MSG(("Chipc: rev %d, caps 0x%x, chipst 0x%x pmurev %d, pmucaps 0x%x\n",
sii->pub.ccrev, sii->pub.cccaps, sii->pub.chipst, sii->pub.pmurev,
sii->pub.pmucaps));
/* figure out bus/orignal core idx */
sii->pub.buscoretype = NODEV_CORE_ID;
sii->pub.buscorerev = NOREV;
sii->pub.buscoreidx = BADIDX;
pci = pcie = FALSE;
pcirev = pcierev = NOREV;
pciidx = pcieidx = BADIDX;
for (i = 0; i < sii->numcores; i++) {
uint cid, crev;
si_setcoreidx(&sii->pub, i);
cid = si_coreid(&sii->pub);
crev = si_corerev(&sii->pub);
/* Display cores found */
SI_MSG(("CORE[%d]: id 0x%x rev %d base 0x%x regs 0x%p\n",
i, cid, crev, sii->common_info->coresba[i], sii->common_info->regs[i]));
if (BUSTYPE(bustype) == PCI_BUS) {
if (cid == PCI_CORE_ID) {
pciidx = i;
pcirev = crev;
pci = TRUE;
} else if (cid == PCIE_CORE_ID) {
pcieidx = i;
pcierev = crev;
pcie = TRUE;
}
} else if ((BUSTYPE(bustype) == PCMCIA_BUS) &&
(cid == PCMCIA_CORE_ID)) {
sii->pub.buscorerev = crev;
sii->pub.buscoretype = cid;
sii->pub.buscoreidx = i;
}
else if (((BUSTYPE(bustype) == SDIO_BUS) ||
(BUSTYPE(bustype) == SPI_BUS)) &&
((cid == PCMCIA_CORE_ID) ||
(cid == SDIOD_CORE_ID))) {
sii->pub.buscorerev = crev;
sii->pub.buscoretype = cid;
sii->pub.buscoreidx = i;
}
/* find the core idx before entering this func. */
if ((savewin && (savewin == sii->common_info->coresba[i])) ||
(regs == sii->common_info->regs[i]))
*origidx = i;
}
SI_MSG(("Buscore id/type/rev %d/0x%x/%d\n", sii->pub.buscoreidx, sii->pub.buscoretype,
sii->pub.buscorerev));
if (BUSTYPE(sii->pub.bustype) == SI_BUS && (CHIPID(sii->pub.chip) == BCM4712_CHIP_ID) &&
(sii->pub.chippkg != BCM4712LARGE_PKG_ID) && (sii->pub.chiprev <= 3))
OR_REG(sii->osh, &cc->slow_clk_ctl, SCC_SS_XTAL);
/* Make sure any on-chip ARM is off (in case strapping is wrong), or downloaded code was
* already running.
*/
if ((BUSTYPE(bustype) == SDIO_BUS) || (BUSTYPE(bustype) == SPI_BUS)) {
if (si_setcore(&sii->pub, ARM7S_CORE_ID, 0) ||
si_setcore(&sii->pub, ARMCM3_CORE_ID, 0))
si_core_disable(&sii->pub, 0);
}
/* return to the original core */
si_setcoreidx(&sii->pub, *origidx);
return TRUE;
}
static si_info_t *
si_doattach(si_info_t *sii, uint devid, osl_t *osh, void *regs,
uint bustype, void *sdh, char **vars, uint *varsz)
{
struct si_pub *sih = &sii->pub;
uint32 w, savewin;
chipcregs_t *cc;
char *pvars = NULL;
uint origidx;
ASSERT(GOODREGS(regs));
bzero((uchar*)sii, sizeof(si_info_t));
{
if (NULL == (common_info_alloced = (void *)MALLOC(osh, sizeof(si_common_info_t)))) {
SI_ERROR(("si_doattach: malloc failed! malloced %dbytes\n", MALLOCED(osh)));
return (NULL);
}
bzero((uchar*)(common_info_alloced), sizeof(si_common_info_t));
}
sii->common_info = (si_common_info_t *)common_info_alloced;
sii->common_info->attach_count++;
savewin = 0;
sih->buscoreidx = BADIDX;
sii->curmap = regs;
sii->sdh = sdh;
sii->osh = osh;
/* find Chipcommon address */
if (bustype == PCI_BUS) {
savewin = OSL_PCI_READ_CONFIG(sii->osh, PCI_BAR0_WIN, sizeof(uint32));
if (!GOODCOREADDR(savewin, SI_ENUM_BASE))
savewin = SI_ENUM_BASE;
OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, 4, SI_ENUM_BASE);
cc = (chipcregs_t *)regs;
} else
if ((bustype == SDIO_BUS) || (bustype == SPI_BUS)) {
cc = (chipcregs_t *)sii->curmap;
} else {
cc = (chipcregs_t *)REG_MAP(SI_ENUM_BASE, SI_CORE_SIZE);
}
sih->bustype = bustype;
if (bustype != BUSTYPE(bustype)) {
SI_ERROR(("si_doattach: bus type %d does not match configured bus type %d\n",
bustype, BUSTYPE(bustype)));
return NULL;
}
/* bus/core/clk setup for register access */
if (!si_buscore_prep(sii, bustype, devid, sdh)) {
SI_ERROR(("si_doattach: si_core_clk_prep failed %d\n", bustype));
return NULL;
}
/* ChipID recognition.
* We assume we can read chipid at offset 0 from the regs arg.
* If we add other chiptypes (or if we need to support old sdio hosts w/o chipcommon),
* some way of recognizing them needs to be added here.
*/
w = R_REG(osh, &cc->chipid);
sih->socitype = (w & CID_TYPE_MASK) >> CID_TYPE_SHIFT;
/* Might as wll fill in chip id rev & pkg */
sih->chip = w & CID_ID_MASK;
sih->chiprev = (w & CID_REV_MASK) >> CID_REV_SHIFT;
sih->chippkg = (w & CID_PKG_MASK) >> CID_PKG_SHIFT;
if ((CHIPID(sih->chip) == BCM4329_CHIP_ID) && (sih->chippkg != BCM4329_289PIN_PKG_ID))
sih->chippkg = BCM4329_182PIN_PKG_ID;
sih->issim = IS_SIM(sih->chippkg);
/* scan for cores */
if (CHIPTYPE(sii->pub.socitype) == SOCI_SB) {
SI_MSG(("Found chip type SB (0x%08x)\n", w));
sb_scan(&sii->pub, regs, devid);
} else if (CHIPTYPE(sii->pub.socitype) == SOCI_AI) {
SI_MSG(("Found chip type AI (0x%08x)\n", w));
/* pass chipc address instead of original core base */
ai_scan(&sii->pub, (void *)cc, devid);
} else {
SI_ERROR(("Found chip of unkown type (0x%08x)\n", w));
return NULL;
}
/* no cores found, bail out */
if (sii->numcores == 0) {
SI_ERROR(("si_doattach: could not find any cores\n"));
return NULL;
}
/* bus/core/clk setup */
origidx = SI_CC_IDX;
if (!si_buscore_setup(sii, cc, bustype, savewin, &origidx, regs)) {
SI_ERROR(("si_doattach: si_buscore_setup failed\n"));
return NULL;
}
pvars = NULL;
if (sii->pub.ccrev >= 20) {
cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
W_REG(osh, &cc->gpiopullup, 0);
W_REG(osh, &cc->gpiopulldown, 0);
si_setcoreidx(sih, origidx);
}
/* Skip PMU initialization from the Dongle Host.
* Firmware will take care of it when it comes up.
*/
return (sii);
}
/* may be called with core in reset */
void
si_detach(si_t *sih)
{
si_info_t *sii;
uint idx;
sii = SI_INFO(sih);
if (sii == NULL)
return;
if (BUSTYPE(sih->bustype) == SI_BUS)
for (idx = 0; idx < SI_MAXCORES; idx++)
if (sii->common_info->regs[idx]) {
REG_UNMAP(sii->common_info->regs[idx]);
sii->common_info->regs[idx] = NULL;
}
if (1 == sii->common_info->attach_count--) {
MFREE(sii->osh, sii->common_info, sizeof(si_common_info_t));
common_info_alloced = NULL;
}
#if !defined(BCMBUSTYPE) || (BCMBUSTYPE == SI_BUS)
if (sii != &ksii)
#endif /* !BCMBUSTYPE || (BCMBUSTYPE == SI_BUS) */
MFREE(sii->osh, sii, sizeof(si_info_t));
}
void *
si_osh(si_t *sih)
{
si_info_t *sii;
sii = SI_INFO(sih);
return sii->osh;
}
void
si_setosh(si_t *sih, osl_t *osh)
{
si_info_t *sii;
sii = SI_INFO(sih);
if (sii->osh != NULL) {
SI_ERROR(("osh is already set....\n"));
ASSERT(!sii->osh);
}
sii->osh = osh;
}
/* register driver interrupt disabling and restoring callback functions */
void
si_register_intr_callback(si_t *sih, void *intrsoff_fn, void *intrsrestore_fn,
void *intrsenabled_fn, void *intr_arg)
{
si_info_t *sii;
sii = SI_INFO(sih);
sii->intr_arg = intr_arg;
sii->intrsoff_fn = (si_intrsoff_t)intrsoff_fn;
sii->intrsrestore_fn = (si_intrsrestore_t)intrsrestore_fn;
sii->intrsenabled_fn = (si_intrsenabled_t)intrsenabled_fn;
/* save current core id. when this function called, the current core
* must be the core which provides driver functions(il, et, wl, etc.)
*/
sii->dev_coreid = sii->common_info->coreid[sii->curidx];
}
void
si_deregister_intr_callback(si_t *sih)
{
si_info_t *sii;
sii = SI_INFO(sih);
sii->intrsoff_fn = NULL;
}
uint
si_intflag(si_t *sih)
{
si_info_t *sii = SI_INFO(sih);
if (CHIPTYPE(sih->socitype) == SOCI_SB) {
sbconfig_t *ccsbr = (sbconfig_t *)((uintptr)((ulong)
(sii->common_info->coresba[SI_CC_IDX]) + SBCONFIGOFF));
return R_REG(sii->osh, &ccsbr->sbflagst);
} else if (CHIPTYPE(sih->socitype) == SOCI_AI)
return R_REG(sii->osh, ((uint32 *)(uintptr)
(sii->common_info->oob_router + OOB_STATUSA)));
else {
ASSERT(0);
return 0;
}
}
uint
si_flag(si_t *sih)
{
if (CHIPTYPE(sih->socitype) == SOCI_SB)
return sb_flag(sih);
else if (CHIPTYPE(sih->socitype) == SOCI_AI)
return ai_flag(sih);
else {
ASSERT(0);
return 0;
}
}
void
si_setint(si_t *sih, int siflag)
{
if (CHIPTYPE(sih->socitype) == SOCI_SB)
sb_setint(sih, siflag);
else if (CHIPTYPE(sih->socitype) == SOCI_AI)
ai_setint(sih, siflag);
else
ASSERT(0);
}
uint
si_coreid(si_t *sih)
{
si_info_t *sii;
sii = SI_INFO(sih);
return sii->common_info->coreid[sii->curidx];
}
uint
si_coreidx(si_t *sih)
{
si_info_t *sii;
sii = SI_INFO(sih);
return sii->curidx;
}
/* return the core-type instantiation # of the current core */
uint
si_coreunit(si_t *sih)
{
si_info_t *sii;
uint idx;
uint coreid;
uint coreunit;
uint i;
sii = SI_INFO(sih);
coreunit = 0;
idx = sii->curidx;
ASSERT(GOODREGS(sii->curmap));
coreid = si_coreid(sih);
/* count the cores of our type */
for (i = 0; i < idx; i++)
if (sii->common_info->coreid[i] == coreid)
coreunit++;
return (coreunit);
}
uint
si_corevendor(si_t *sih)
{
if (CHIPTYPE(sih->socitype) == SOCI_SB)
return sb_corevendor(sih);
else if (CHIPTYPE(sih->socitype) == SOCI_AI)
return ai_corevendor(sih);
else {
ASSERT(0);
return 0;
}
}
bool
si_backplane64(si_t *sih)
{
return ((sih->cccaps & CC_CAP_BKPLN64) != 0);
}
uint
si_corerev(si_t *sih)
{
if (CHIPTYPE(sih->socitype) == SOCI_SB)
return sb_corerev(sih);
else if (CHIPTYPE(sih->socitype) == SOCI_AI)
return ai_corerev(sih);
else {
ASSERT(0);
return 0;
}
}
/* return index of coreid or BADIDX if not found */
uint
si_findcoreidx(si_t *sih, uint coreid, uint coreunit)
{
si_info_t *sii;
uint found;
uint i;
sii = SI_INFO(sih);
found = 0;
for (i = 0; i < sii->numcores; i++)
if (sii->common_info->coreid[i] == coreid) {
if (found == coreunit)
return (i);
found++;
}
return (BADIDX);
}
/* return list of found cores */
uint
si_corelist(si_t *sih, uint coreid[])
{
si_info_t *sii;
sii = SI_INFO(sih);
bcopy((uchar*)sii->common_info->coreid, (uchar*)coreid, (sii->numcores * sizeof(uint)));
return (sii->numcores);
}
/* return current register mapping */
void *
si_coreregs(si_t *sih)
{
si_info_t *sii;
sii = SI_INFO(sih);
ASSERT(GOODREGS(sii->curmap));
return (sii->curmap);
}
/*
* This function changes logical "focus" to the indicated core;
* must be called with interrupts off.
* Moreover, callers should keep interrupts off during switching out of and back to d11 core
*/
void *
si_setcore(si_t *sih, uint coreid, uint coreunit)
{
uint idx;
idx = si_findcoreidx(sih, coreid, coreunit);
if (!GOODIDX(idx))
return (NULL);
if (CHIPTYPE(sih->socitype) == SOCI_SB)
return sb_setcoreidx(sih, idx);
else if (CHIPTYPE(sih->socitype) == SOCI_AI)
return ai_setcoreidx(sih, idx);
else {
ASSERT(0);
return NULL;
}
}
void *
si_setcoreidx(si_t *sih, uint coreidx)
{
if (CHIPTYPE(sih->socitype) == SOCI_SB)
return sb_setcoreidx(sih, coreidx);
else if (CHIPTYPE(sih->socitype) == SOCI_AI)
return ai_setcoreidx(sih, coreidx);
else {
ASSERT(0);
return NULL;
}
}
/* Turn off interrupt as required by sb_setcore, before switch core */
void *si_switch_core(si_t *sih, uint coreid, uint *origidx, uint *intr_val)
{
void *cc;
si_info_t *sii;
sii = SI_INFO(sih);
INTR_OFF(sii, *intr_val);
*origidx = sii->curidx;
cc = si_setcore(sih, coreid, 0);
ASSERT(cc != NULL);
return cc;
}
/* restore coreidx and restore interrupt */
void si_restore_core(si_t *sih, uint coreid, uint intr_val)
{
si_info_t *sii;
sii = SI_INFO(sih);
si_setcoreidx(sih, coreid);
INTR_RESTORE(sii, intr_val);
}
int
si_numaddrspaces(si_t *sih)
{
if (CHIPTYPE(sih->socitype) == SOCI_SB)
return sb_numaddrspaces(sih);
else if (CHIPTYPE(sih->socitype) == SOCI_AI)
return ai_numaddrspaces(sih);
else {
ASSERT(0);
return 0;
}
}
uint32
si_addrspace(si_t *sih, uint asidx)
{
if (CHIPTYPE(sih->socitype) == SOCI_SB)
return sb_addrspace(sih, asidx);
else if (CHIPTYPE(sih->socitype) == SOCI_AI)
return ai_addrspace(sih, asidx);
else {
ASSERT(0);
return 0;
}
}
uint32
si_addrspacesize(si_t *sih, uint asidx)
{
if (CHIPTYPE(sih->socitype) == SOCI_SB)
return sb_addrspacesize(sih, asidx);
else if (CHIPTYPE(sih->socitype) == SOCI_AI)
return ai_addrspacesize(sih, asidx);
else {
ASSERT(0);
return 0;
}
}
uint32
si_core_cflags(si_t *sih, uint32 mask, uint32 val)
{
if (CHIPTYPE(sih->socitype) == SOCI_SB)
return sb_core_cflags(sih, mask, val);
else if (CHIPTYPE(sih->socitype) == SOCI_AI)
return ai_core_cflags(sih, mask, val);
else {
ASSERT(0);
return 0;
}
}
void
si_core_cflags_wo(si_t *sih, uint32 mask, uint32 val)
{
if (CHIPTYPE(sih->socitype) == SOCI_SB)
sb_core_cflags_wo(sih, mask, val);
else if (CHIPTYPE(sih->socitype) == SOCI_AI)
ai_core_cflags_wo(sih, mask, val);
else
ASSERT(0);
}
uint32
si_core_sflags(si_t *sih, uint32 mask, uint32 val)
{
if (CHIPTYPE(sih->socitype) == SOCI_SB)
return sb_core_sflags(sih, mask, val);
else if (CHIPTYPE(sih->socitype) == SOCI_AI)
return ai_core_sflags(sih, mask, val);
else {
ASSERT(0);
return 0;
}
}
bool
si_iscoreup(si_t *sih)
{
if (CHIPTYPE(sih->socitype) == SOCI_SB)
return sb_iscoreup(sih);
else if (CHIPTYPE(sih->socitype) == SOCI_AI)
return ai_iscoreup(sih);
else {
ASSERT(0);
return FALSE;
}
}
void
si_write_wrapperreg(si_t *sih, uint32 offset, uint32 val)
{
/* only for 4319, no requirement for SOCI_SB */
if (CHIPTYPE(sih->socitype) == SOCI_AI) {
ai_write_wrap_reg(sih, offset, val);
}
else
return;
return;
}
uint
si_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val)
{
if (CHIPTYPE(sih->socitype) == SOCI_SB)
return sb_corereg(sih, coreidx, regoff, mask, val);
else if (CHIPTYPE(sih->socitype) == SOCI_AI)
return ai_corereg(sih, coreidx, regoff, mask, val);
else {
ASSERT(0);
return 0;
}
}
void
si_core_disable(si_t *sih, uint32 bits)
{
if (CHIPTYPE(sih->socitype) == SOCI_SB)
sb_core_disable(sih, bits);
else if (CHIPTYPE(sih->socitype) == SOCI_AI)
ai_core_disable(sih, bits);
}
void
si_core_reset(si_t *sih, uint32 bits, uint32 resetbits)
{
if (CHIPTYPE(sih->socitype) == SOCI_SB)
sb_core_reset(sih, bits, resetbits);
else if (CHIPTYPE(sih->socitype) == SOCI_AI)
ai_core_reset(sih, bits, resetbits);
}
void
si_core_tofixup(si_t *sih)
{
if (CHIPTYPE(sih->socitype) == SOCI_SB)
sb_core_tofixup(sih);
}
/* Run bist on current core. Caller needs to take care of core-specific bist hazards */
int
si_corebist(si_t *sih)
{
uint32 cflags;
int result = 0;
/* Read core control flags */
cflags = si_core_cflags(sih, 0, 0);
/* Set bist & fgc */
si_core_cflags(sih, 0, (SICF_BIST_EN | SICF_FGC));
/* Wait for bist done */
SPINWAIT(((si_core_sflags(sih, 0, 0) & SISF_BIST_DONE) == 0), 100000);
if (si_core_sflags(sih, 0, 0) & SISF_BIST_ERROR)
result = BCME_ERROR;
/* Reset core control flags */
si_core_cflags(sih, 0xffff, cflags);
return result;
}
static uint32
factor6(uint32 x)
{
switch (x) {
case CC_F6_2: return 2;
case CC_F6_3: return 3;
case CC_F6_4: return 4;
case CC_F6_5: return 5;
case CC_F6_6: return 6;
case CC_F6_7: return 7;
default: return 0;
}
}
/* calculate the speed the SI would run at given a set of clockcontrol values */
uint32
si_clock_rate(uint32 pll_type, uint32 n, uint32 m)
{
uint32 n1, n2, clock, m1, m2, m3, mc;
n1 = n & CN_N1_MASK;
n2 = (n & CN_N2_MASK) >> CN_N2_SHIFT;
if (pll_type == PLL_TYPE6) {
if (m & CC_T6_MMASK)
return CC_T6_M1;
else
return CC_T6_M0;
} else if ((pll_type == PLL_TYPE1) ||
(pll_type == PLL_TYPE3) ||
(pll_type == PLL_TYPE4) ||
(pll_type == PLL_TYPE7)) {
n1 = factor6(n1);
n2 += CC_F5_BIAS;
} else if (pll_type == PLL_TYPE2) {
n1 += CC_T2_BIAS;
n2 += CC_T2_BIAS;
ASSERT((n1 >= 2) && (n1 <= 7));
ASSERT((n2 >= 5) && (n2 <= 23));
} else if (pll_type == PLL_TYPE5) {
return (100000000);
} else
ASSERT(0);
/* PLL types 3 and 7 use BASE2 (25Mhz) */
if ((pll_type == PLL_TYPE3) ||
(pll_type == PLL_TYPE7)) {
clock = CC_CLOCK_BASE2 * n1 * n2;
} else
clock = CC_CLOCK_BASE1 * n1 * n2;
if (clock == 0)
return 0;
m1 = m & CC_M1_MASK;
m2 = (m & CC_M2_MASK) >> CC_M2_SHIFT;
m3 = (m & CC_M3_MASK) >> CC_M3_SHIFT;
mc = (m & CC_MC_MASK) >> CC_MC_SHIFT;
if ((pll_type == PLL_TYPE1) ||
(pll_type == PLL_TYPE3) ||
(pll_type == PLL_TYPE4) ||
(pll_type == PLL_TYPE7)) {
m1 = factor6(m1);
if ((pll_type == PLL_TYPE1) || (pll_type == PLL_TYPE3))
m2 += CC_F5_BIAS;
else
m2 = factor6(m2);
m3 = factor6(m3);
switch (mc) {
case CC_MC_BYPASS: return (clock);
case CC_MC_M1: return (clock / m1);
case CC_MC_M1M2: return (clock / (m1 * m2));
case CC_MC_M1M2M3: return (clock / (m1 * m2 * m3));
case CC_MC_M1M3: return (clock / (m1 * m3));
default: return (0);
}
} else {
ASSERT(pll_type == PLL_TYPE2);
m1 += CC_T2_BIAS;
m2 += CC_T2M2_BIAS;
m3 += CC_T2_BIAS;
ASSERT((m1 >= 2) && (m1 <= 7));
ASSERT((m2 >= 3) && (m2 <= 10));
ASSERT((m3 >= 2) && (m3 <= 7));
if ((mc & CC_T2MC_M1BYP) == 0)
clock /= m1;
if ((mc & CC_T2MC_M2BYP) == 0)
clock /= m2;
if ((mc & CC_T2MC_M3BYP) == 0)
clock /= m3;
return (clock);
}
}
/* set chip watchdog reset timer to fire in 'ticks' */
void
si_watchdog(si_t *sih, uint ticks)
{
if (PMUCTL_ENAB(sih)) {
if ((sih->chip == BCM4319_CHIP_ID) && (sih->chiprev == 0) && (ticks != 0)) {
si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, clk_ctl_st), ~0, 0x2);
si_setcore(sih, USB20D_CORE_ID, 0);
si_core_disable(sih, 1);
si_setcore(sih, CC_CORE_ID, 0);
}
if (ticks == 1)
ticks = 2;
si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, pmuwatchdog), ~0, ticks);
} else {
/* instant NMI */
si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, watchdog), ~0, ticks);
}
}
#if !defined(BCMBUSTYPE) || (BCMBUSTYPE == SI_BUS)
/* trigger watchdog reset after ms milliseconds */
void
si_watchdog_ms(si_t *sih, uint32 ms)
{
si_info_t *sii;
sii = SI_INFO(sih);
si_watchdog(sih, wd_msticks * ms);
}
#endif
/* initialize the sdio core */
void
si_sdio_init(si_t *sih)
{
si_info_t *sii = SI_INFO(sih);
if (((sih->buscoretype == PCMCIA_CORE_ID) && (sih->buscorerev >= 8)) ||
(sih->buscoretype == SDIOD_CORE_ID)) {
uint idx;
sdpcmd_regs_t *sdpregs;
/* get the current core index */
idx = sii->curidx;
ASSERT(idx == si_findcoreidx(sih, D11_CORE_ID, 0));
/* switch to sdio core */
if (!(sdpregs = (sdpcmd_regs_t *)si_setcore(sih, PCMCIA_CORE_ID, 0)))
sdpregs = (sdpcmd_regs_t *)si_setcore(sih, SDIOD_CORE_ID, 0);
ASSERT(sdpregs);
SI_MSG(("si_sdio_init: For PCMCIA/SDIO Corerev %d, enable ints from core %d "
"through SD core %d (%p)\n",
sih->buscorerev, idx, sii->curidx, sdpregs));
/* enable backplane error and core interrupts */
W_REG(sii->osh, &sdpregs->hostintmask, I_SBINT);
W_REG(sii->osh, &sdpregs->sbintmask, (I_SB_SERR | I_SB_RESPERR | (1 << idx)));
/* switch back to previous core */
si_setcoreidx(sih, idx);
}
/* enable interrupts */
bcmsdh_intr_enable(sii->sdh);
}
/* change logical "focus" to the gpio core for optimized access */
void *
si_gpiosetcore(si_t *sih)
{
return (si_setcoreidx(sih, SI_CC_IDX));
}
/* mask&set gpiocontrol bits */
uint32
si_gpiocontrol(si_t *sih, uint32 mask, uint32 val, uint8 priority)
{
uint regoff;
regoff = 0;
/* gpios could be shared on router platforms
* ignore reservation if it's high priority (e.g., test apps)
*/
if ((priority != GPIO_HI_PRIORITY) &&
(BUSTYPE(sih->bustype) == SI_BUS) && (val || mask)) {
mask = priority ? (si_gpioreservation & mask) :
((si_gpioreservation | mask) & ~(si_gpioreservation));
val &= mask;
}
regoff = OFFSETOF(chipcregs_t, gpiocontrol);
return (si_corereg(sih, SI_CC_IDX, regoff, mask, val));
}
/* mask&set gpio output enable bits */
uint32
si_gpioouten(si_t *sih, uint32 mask, uint32 val, uint8 priority)
{
uint regoff;
regoff = 0;
/* gpios could be shared on router platforms
* ignore reservation if it's high priority (e.g., test apps)
*/
if ((priority != GPIO_HI_PRIORITY) &&
(BUSTYPE(sih->bustype) == SI_BUS) && (val || mask)) {
mask = priority ? (si_gpioreservation & mask) :
((si_gpioreservation | mask) & ~(si_gpioreservation));
val &= mask;
}
regoff = OFFSETOF(chipcregs_t, gpioouten);
return (si_corereg(sih, SI_CC_IDX, regoff, mask, val));
}
/* mask&set gpio output bits */
uint32
si_gpioout(si_t *sih, uint32 mask, uint32 val, uint8 priority)
{
uint regoff;
regoff = 0;
/* gpios could be shared on router platforms
* ignore reservation if it's high priority (e.g., test apps)
*/
if ((priority != GPIO_HI_PRIORITY) &&
(BUSTYPE(sih->bustype) == SI_BUS) && (val || mask)) {
mask = priority ? (si_gpioreservation & mask) :
((si_gpioreservation | mask) & ~(si_gpioreservation));
val &= mask;
}
regoff = OFFSETOF(chipcregs_t, gpioout);
return (si_corereg(sih, SI_CC_IDX, regoff, mask, val));
}
/* reserve one gpio */
uint32
si_gpioreserve(si_t *sih, uint32 gpio_bitmask, uint8 priority)
{
si_info_t *sii;
sii = SI_INFO(sih);
/* only cores on SI_BUS share GPIO's and only applcation users need to
* reserve/release GPIO
*/
if ((BUSTYPE(sih->bustype) != SI_BUS) || (!priority)) {
ASSERT((BUSTYPE(sih->bustype) == SI_BUS) && (priority));
return -1;
}
/* make sure only one bit is set */
if ((!gpio_bitmask) || ((gpio_bitmask) & (gpio_bitmask - 1))) {
ASSERT((gpio_bitmask) && !((gpio_bitmask) & (gpio_bitmask - 1)));
return -1;
}
/* already reserved */
if (si_gpioreservation & gpio_bitmask)
return -1;
/* set reservation */
si_gpioreservation |= gpio_bitmask;
return si_gpioreservation;
}
/* release one gpio */
/*
* releasing the gpio doesn't change the current value on the GPIO last write value
* persists till some one overwrites it
*/
uint32
si_gpiorelease(si_t *sih, uint32 gpio_bitmask, uint8 priority)
{
si_info_t *sii;
sii = SI_INFO(sih);
/* only cores on SI_BUS share GPIO's and only applcation users need to
* reserve/release GPIO
*/
if ((BUSTYPE(sih->bustype) != SI_BUS) || (!priority)) {
ASSERT((BUSTYPE(sih->bustype) == SI_BUS) && (priority));
return -1;
}
/* make sure only one bit is set */
if ((!gpio_bitmask) || ((gpio_bitmask) & (gpio_bitmask - 1))) {
ASSERT((gpio_bitmask) && !((gpio_bitmask) & (gpio_bitmask - 1)));
return -1;
}
/* already released */
if (!(si_gpioreservation & gpio_bitmask))
return -1;
/* clear reservation */
si_gpioreservation &= ~gpio_bitmask;
return si_gpioreservation;
}
/* return the current gpioin register value */
uint32
si_gpioin(si_t *sih)
{
si_info_t *sii;
uint regoff;
sii = SI_INFO(sih);
regoff = 0;
regoff = OFFSETOF(chipcregs_t, gpioin);
return (si_corereg(sih, SI_CC_IDX, regoff, 0, 0));
}
/* mask&set gpio interrupt polarity bits */
uint32
si_gpiointpolarity(si_t *sih, uint32 mask, uint32 val, uint8 priority)
{
si_info_t *sii;
uint regoff;
sii = SI_INFO(sih);
regoff = 0;
/* gpios could be shared on router platforms */
if ((BUSTYPE(sih->bustype) == SI_BUS) && (val || mask)) {
mask = priority ? (si_gpioreservation & mask) :
((si_gpioreservation | mask) & ~(si_gpioreservation));
val &= mask;
}
regoff = OFFSETOF(chipcregs_t, gpiointpolarity);
return (si_corereg(sih, SI_CC_IDX, regoff, mask, val));
}
/* mask&set gpio interrupt mask bits */
uint32
si_gpiointmask(si_t *sih, uint32 mask, uint32 val, uint8 priority)
{
si_info_t *sii;
uint regoff;
sii = SI_INFO(sih);
regoff = 0;
/* gpios could be shared on router platforms */
if ((BUSTYPE(sih->bustype) == SI_BUS) && (val || mask)) {
mask = priority ? (si_gpioreservation & mask) :
((si_gpioreservation | mask) & ~(si_gpioreservation));
val &= mask;
}
regoff = OFFSETOF(chipcregs_t, gpiointmask);
return (si_corereg(sih, SI_CC_IDX, regoff, mask, val));
}
/* assign the gpio to an led */
uint32
si_gpioled(si_t *sih, uint32 mask, uint32 val)
{
si_info_t *sii;
sii = SI_INFO(sih);
if (sih->ccrev < 16)
return -1;
/* gpio led powersave reg */
return (si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, gpiotimeroutmask), mask, val));
}
/* mask&set gpio timer val */
uint32
si_gpiotimerval(si_t *sih, uint32 mask, uint32 gpiotimerval)
{
si_info_t *sii;
sii = SI_INFO(sih);
if (sih->ccrev < 16)
return -1;
return (si_corereg(sih, SI_CC_IDX,
OFFSETOF(chipcregs_t, gpiotimerval), mask, gpiotimerval));
}
uint32
si_gpiopull(si_t *sih, bool updown, uint32 mask, uint32 val)
{
si_info_t *sii;
uint offs;
sii = SI_INFO(sih);
if (sih->ccrev < 20)
return -1;
offs = (updown ? OFFSETOF(chipcregs_t, gpiopulldown) : OFFSETOF(chipcregs_t, gpiopullup));
return (si_corereg(sih, SI_CC_IDX, offs, mask, val));
}
uint32
si_gpioevent(si_t *sih, uint regtype, uint32 mask, uint32 val)
{
si_info_t *sii;
uint offs;
sii = SI_INFO(sih);
if (sih->ccrev < 11)
return -1;
if (regtype == GPIO_REGEVT)
offs = OFFSETOF(chipcregs_t, gpioevent);
else if (regtype == GPIO_REGEVT_INTMSK)
offs = OFFSETOF(chipcregs_t, gpioeventintmask);
else if (regtype == GPIO_REGEVT_INTPOL)
offs = OFFSETOF(chipcregs_t, gpioeventintpolarity);
else
return -1;
return (si_corereg(sih, SI_CC_IDX, offs, mask, val));
}
void *
si_gpio_handler_register(si_t *sih, uint32 event,
bool level, gpio_handler_t cb, void *arg)
{
si_info_t *sii;
gpioh_item_t *gi;
ASSERT(event);
ASSERT(cb != NULL);
sii = SI_INFO(sih);
if (sih->ccrev < 11)
return NULL;
if ((gi = MALLOC(sii->osh, sizeof(gpioh_item_t))) == NULL)
return NULL;
bzero(gi, sizeof(gpioh_item_t));
gi->event = event;
gi->handler = cb;
gi->arg = arg;
gi->level = level;
gi->next = sii->gpioh_head;
sii->gpioh_head = gi;
return (void *)(gi);
}
void
si_gpio_handler_unregister(si_t *sih, void *gpioh)
{
si_info_t *sii;
gpioh_item_t *p, *n;
sii = SI_INFO(sih);
if (sih->ccrev < 11)
return;
ASSERT(sii->gpioh_head != NULL);
if ((void*)sii->gpioh_head == gpioh) {
sii->gpioh_head = sii->gpioh_head->next;
MFREE(sii->osh, gpioh, sizeof(gpioh_item_t));
return;
} else {
p = sii->gpioh_head;
n = p->next;
while (n) {
if ((void*)n == gpioh) {
p->next = n->next;
MFREE(sii->osh, gpioh, sizeof(gpioh_item_t));
return;
}
p = n;
n = n->next;
}
}
ASSERT(0); /* Not found in list */
}
void
si_gpio_handler_process(si_t *sih)
{
si_info_t *sii;
gpioh_item_t *h;
uint32 status;
uint32 level = si_gpioin(sih);
uint32 edge = si_gpioevent(sih, GPIO_REGEVT, 0, 0);
sii = SI_INFO(sih);
for (h = sii->gpioh_head; h != NULL; h = h->next) {
if (h->handler) {
status = (h->level ? level : edge);
if (status & h->event)
h->handler(status, h->arg);
}
}
si_gpioevent(sih, GPIO_REGEVT, edge, edge); /* clear edge-trigger status */
}
uint32
si_gpio_int_enable(si_t *sih, bool enable)
{
si_info_t *sii;
uint offs;
sii = SI_INFO(sih);
if (sih->ccrev < 11)
return -1;
offs = OFFSETOF(chipcregs_t, intmask);
return (si_corereg(sih, SI_CC_IDX, offs, CI_GPIO, (enable ? CI_GPIO : 0)));
}
/* Return the RAM size of the SOCRAM core */
uint32
si_socram_size(si_t *sih)
{
si_info_t *sii;
uint origidx;
uint intr_val = 0;
sbsocramregs_t *regs;
bool wasup;
uint corerev;
uint32 coreinfo;
uint memsize = 0;
sii = SI_INFO(sih);
/* Block ints and save current core */
INTR_OFF(sii, intr_val);
origidx = si_coreidx(sih);
/* Switch to SOCRAM core */
if (!(regs = si_setcore(sih, SOCRAM_CORE_ID, 0)))
goto done;
/* Get info for determining size */
if (!(wasup = si_iscoreup(sih)))
si_core_reset(sih, 0, 0);
corerev = si_corerev(sih);
coreinfo = R_REG(sii->osh, ®s->coreinfo);
/* Calculate size from coreinfo based on rev */
if (corerev == 0)
memsize = 1 << (16 + (coreinfo & SRCI_MS0_MASK));
else if (corerev < 3) {
memsize = 1 << (SR_BSZ_BASE + (coreinfo & SRCI_SRBSZ_MASK));
memsize *= (coreinfo & SRCI_SRNB_MASK) >> SRCI_SRNB_SHIFT;
} else {
uint nb = (coreinfo & SRCI_SRNB_MASK) >> SRCI_SRNB_SHIFT;
uint bsz = (coreinfo & SRCI_SRBSZ_MASK);
uint lss = (coreinfo & SRCI_LSS_MASK) >> SRCI_LSS_SHIFT;
if (lss != 0)
nb --;
memsize = nb * (1 << (bsz + SR_BSZ_BASE));
if (lss != 0)
memsize += (1 << ((lss - 1) + SR_BSZ_BASE));
}
/* Return to previous state and core */
if (!wasup)
si_core_disable(sih, 0);
si_setcoreidx(sih, origidx);
done:
INTR_RESTORE(sii, intr_val);
return memsize;
}
void
si_btcgpiowar(si_t *sih)
{
si_info_t *sii;
uint origidx;
uint intr_val = 0;
chipcregs_t *cc;
sii = SI_INFO(sih);
/* Make sure that there is ChipCommon core present &&
* UART_TX is strapped to 1
*/
if (!(sih->cccaps & CC_CAP_UARTGPIO))
return;
/* si_corereg cannot be used as we have to guarantee 8-bit read/writes */
INTR_OFF(sii, intr_val);
origidx = si_coreidx(sih);
cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
ASSERT(cc != NULL);
W_REG(sii->osh, &cc->uart0mcr, R_REG(sii->osh, &cc->uart0mcr) | 0x04);
/* restore the original index */
si_setcoreidx(sih, origidx);
INTR_RESTORE(sii, intr_val);
}
/* check if the device is removed */
bool
si_deviceremoved(si_t *sih)
{
uint32 w;
si_info_t *sii;
sii = SI_INFO(sih);
switch (BUSTYPE(sih->bustype)) {
case PCI_BUS:
ASSERT(sii->osh != NULL);
w = OSL_PCI_READ_CONFIG(sii->osh, PCI_CFG_VID, sizeof(uint32));
if ((w & 0xFFFF) != VENDOR_BROADCOM)
return TRUE;
else
return FALSE;
default:
return FALSE;
}
return FALSE;
}
| gpl-2.0 |
m7rom/android_kernel_htc_msm8960 | drivers/media/common/tuners/max2165.c | 4963 | 10282 | /*
* Driver for Maxim MAX2165 silicon tuner
*
* Copyright (c) 2009 David T. L. Wong <davidtlwong@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/videodev2.h>
#include <linux/delay.h>
#include <linux/dvb/frontend.h>
#include <linux/i2c.h>
#include <linux/slab.h>
#include "dvb_frontend.h"
#include "max2165.h"
#include "max2165_priv.h"
#include "tuner-i2c.h"
#define dprintk(args...) \
do { \
if (debug) \
printk(KERN_DEBUG "max2165: " args); \
} while (0)
static int debug;
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "Turn on/off debugging (default:off).");
static int max2165_write_reg(struct max2165_priv *priv, u8 reg, u8 data)
{
int ret;
u8 buf[] = { reg, data };
struct i2c_msg msg = { .flags = 0, .buf = buf, .len = 2 };
msg.addr = priv->config->i2c_address;
if (debug >= 2)
dprintk("%s: reg=0x%02X, data=0x%02X\n", __func__, reg, data);
ret = i2c_transfer(priv->i2c, &msg, 1);
if (ret != 1)
dprintk("%s: error reg=0x%x, data=0x%x, ret=%i\n",
__func__, reg, data, ret);
return (ret != 1) ? -EIO : 0;
}
static int max2165_read_reg(struct max2165_priv *priv, u8 reg, u8 *p_data)
{
int ret;
u8 dev_addr = priv->config->i2c_address;
u8 b0[] = { reg };
u8 b1[] = { 0 };
struct i2c_msg msg[] = {
{ .addr = dev_addr, .flags = 0, .buf = b0, .len = 1 },
{ .addr = dev_addr, .flags = I2C_M_RD, .buf = b1, .len = 1 },
};
ret = i2c_transfer(priv->i2c, msg, 2);
if (ret != 2) {
dprintk("%s: error reg=0x%x, ret=%i\n", __func__, reg, ret);
return -EIO;
}
*p_data = b1[0];
if (debug >= 2)
dprintk("%s: reg=0x%02X, data=0x%02X\n",
__func__, reg, b1[0]);
return 0;
}
static int max2165_mask_write_reg(struct max2165_priv *priv, u8 reg,
u8 mask, u8 data)
{
int ret;
u8 v;
data &= mask;
ret = max2165_read_reg(priv, reg, &v);
if (ret != 0)
return ret;
v &= ~mask;
v |= data;
ret = max2165_write_reg(priv, reg, v);
return ret;
}
static int max2165_read_rom_table(struct max2165_priv *priv)
{
u8 dat[3];
int i;
for (i = 0; i < 3; i++) {
max2165_write_reg(priv, REG_ROM_TABLE_ADDR, i + 1);
max2165_read_reg(priv, REG_ROM_TABLE_DATA, &dat[i]);
}
priv->tf_ntch_low_cfg = dat[0] >> 4;
priv->tf_ntch_hi_cfg = dat[0] & 0x0F;
priv->tf_balun_low_ref = dat[1] & 0x0F;
priv->tf_balun_hi_ref = dat[1] >> 4;
priv->bb_filter_7mhz_cfg = dat[2] & 0x0F;
priv->bb_filter_8mhz_cfg = dat[2] >> 4;
dprintk("tf_ntch_low_cfg = 0x%X\n", priv->tf_ntch_low_cfg);
dprintk("tf_ntch_hi_cfg = 0x%X\n", priv->tf_ntch_hi_cfg);
dprintk("tf_balun_low_ref = 0x%X\n", priv->tf_balun_low_ref);
dprintk("tf_balun_hi_ref = 0x%X\n", priv->tf_balun_hi_ref);
dprintk("bb_filter_7mhz_cfg = 0x%X\n", priv->bb_filter_7mhz_cfg);
dprintk("bb_filter_8mhz_cfg = 0x%X\n", priv->bb_filter_8mhz_cfg);
return 0;
}
static int max2165_set_osc(struct max2165_priv *priv, u8 osc /*MHz*/)
{
u8 v;
v = (osc / 2);
if (v == 2)
v = 0x7;
else
v -= 8;
max2165_mask_write_reg(priv, REG_PLL_CFG, 0x07, v);
return 0;
}
static int max2165_set_bandwidth(struct max2165_priv *priv, u32 bw)
{
u8 val;
if (bw == 8000000)
val = priv->bb_filter_8mhz_cfg;
else
val = priv->bb_filter_7mhz_cfg;
max2165_mask_write_reg(priv, REG_BASEBAND_CTRL, 0xF0, val << 4);
return 0;
}
int fixpt_div32(u32 dividend, u32 divisor, u32 *quotient, u32 *fraction)
{
u32 remainder;
u32 q, f = 0;
int i;
if (0 == divisor)
return -EINVAL;
q = dividend / divisor;
remainder = dividend - q * divisor;
for (i = 0; i < 31; i++) {
remainder <<= 1;
if (remainder >= divisor) {
f += 1;
remainder -= divisor;
}
f <<= 1;
}
*quotient = q;
*fraction = f;
return 0;
}
static int max2165_set_rf(struct max2165_priv *priv, u32 freq)
{
u8 tf;
u8 tf_ntch;
u32 t;
u32 quotient, fraction;
int ret;
/* Set PLL divider according to RF frequency */
ret = fixpt_div32(freq / 1000, priv->config->osc_clk * 1000,
"ient, &fraction);
if (ret != 0)
return ret;
/* 20-bit fraction */
fraction >>= 12;
max2165_write_reg(priv, REG_NDIV_INT, quotient);
max2165_mask_write_reg(priv, REG_NDIV_FRAC2, 0x0F, fraction >> 16);
max2165_write_reg(priv, REG_NDIV_FRAC1, fraction >> 8);
max2165_write_reg(priv, REG_NDIV_FRAC0, fraction);
/* Norch Filter */
tf_ntch = (freq < 725000000) ?
priv->tf_ntch_low_cfg : priv->tf_ntch_hi_cfg;
/* Tracking filter balun */
t = priv->tf_balun_low_ref;
t += (priv->tf_balun_hi_ref - priv->tf_balun_low_ref)
* (freq / 1000 - 470000) / (780000 - 470000);
tf = t;
dprintk("tf = %X\n", tf);
tf |= tf_ntch << 4;
max2165_write_reg(priv, REG_TRACK_FILTER, tf);
return 0;
}
static void max2165_debug_status(struct max2165_priv *priv)
{
u8 status, autotune;
u8 auto_vco_success, auto_vco_active;
u8 pll_locked;
u8 dc_offset_low, dc_offset_hi;
u8 signal_lv_over_threshold;
u8 vco, vco_sub_band, adc;
max2165_read_reg(priv, REG_STATUS, &status);
max2165_read_reg(priv, REG_AUTOTUNE, &autotune);
auto_vco_success = (status >> 6) & 0x01;
auto_vco_active = (status >> 5) & 0x01;
pll_locked = (status >> 4) & 0x01;
dc_offset_low = (status >> 3) & 0x01;
dc_offset_hi = (status >> 2) & 0x01;
signal_lv_over_threshold = status & 0x01;
vco = autotune >> 6;
vco_sub_band = (autotune >> 3) & 0x7;
adc = autotune & 0x7;
dprintk("auto VCO active: %d, auto VCO success: %d\n",
auto_vco_active, auto_vco_success);
dprintk("PLL locked: %d\n", pll_locked);
dprintk("DC offset low: %d, DC offset high: %d\n",
dc_offset_low, dc_offset_hi);
dprintk("Signal lvl over threshold: %d\n", signal_lv_over_threshold);
dprintk("VCO: %d, VCO Sub-band: %d, ADC: %d\n", vco, vco_sub_band, adc);
}
static int max2165_set_params(struct dvb_frontend *fe)
{
struct max2165_priv *priv = fe->tuner_priv;
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
int ret;
switch (c->bandwidth_hz) {
case 7000000:
case 8000000:
priv->frequency = c->frequency;
break;
default:
printk(KERN_INFO "MAX2165: bandwidth %d Hz not supported.\n",
c->bandwidth_hz);
return -EINVAL;
}
dprintk("%s() frequency=%d\n", __func__, c->frequency);
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 1);
max2165_set_bandwidth(priv, c->bandwidth_hz);
ret = max2165_set_rf(priv, priv->frequency);
mdelay(50);
max2165_debug_status(priv);
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 0);
if (ret != 0)
return -EREMOTEIO;
return 0;
}
static int max2165_get_frequency(struct dvb_frontend *fe, u32 *freq)
{
struct max2165_priv *priv = fe->tuner_priv;
dprintk("%s()\n", __func__);
*freq = priv->frequency;
return 0;
}
static int max2165_get_bandwidth(struct dvb_frontend *fe, u32 *bw)
{
struct max2165_priv *priv = fe->tuner_priv;
dprintk("%s()\n", __func__);
*bw = priv->bandwidth;
return 0;
}
static int max2165_get_status(struct dvb_frontend *fe, u32 *status)
{
struct max2165_priv *priv = fe->tuner_priv;
u16 lock_status = 0;
dprintk("%s()\n", __func__);
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 1);
max2165_debug_status(priv);
*status = lock_status;
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 0);
return 0;
}
static int max2165_sleep(struct dvb_frontend *fe)
{
dprintk("%s()\n", __func__);
return 0;
}
static int max2165_init(struct dvb_frontend *fe)
{
struct max2165_priv *priv = fe->tuner_priv;
dprintk("%s()\n", __func__);
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 1);
/* Setup initial values */
/* Fractional Mode on */
max2165_write_reg(priv, REG_NDIV_FRAC2, 0x18);
/* LNA on */
max2165_write_reg(priv, REG_LNA, 0x01);
max2165_write_reg(priv, REG_PLL_CFG, 0x7A);
max2165_write_reg(priv, REG_TEST, 0x08);
max2165_write_reg(priv, REG_SHUTDOWN, 0x40);
max2165_write_reg(priv, REG_VCO_CTRL, 0x84);
max2165_write_reg(priv, REG_BASEBAND_CTRL, 0xC3);
max2165_write_reg(priv, REG_DC_OFFSET_CTRL, 0x75);
max2165_write_reg(priv, REG_DC_OFFSET_DAC, 0x00);
max2165_write_reg(priv, REG_ROM_TABLE_ADDR, 0x00);
max2165_set_osc(priv, priv->config->osc_clk);
max2165_read_rom_table(priv);
max2165_set_bandwidth(priv, 8000000);
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 0);
return 0;
}
static int max2165_release(struct dvb_frontend *fe)
{
struct max2165_priv *priv = fe->tuner_priv;
dprintk("%s()\n", __func__);
kfree(priv);
fe->tuner_priv = NULL;
return 0;
}
static const struct dvb_tuner_ops max2165_tuner_ops = {
.info = {
.name = "Maxim MAX2165",
.frequency_min = 470000000,
.frequency_max = 780000000,
.frequency_step = 50000,
},
.release = max2165_release,
.init = max2165_init,
.sleep = max2165_sleep,
.set_params = max2165_set_params,
.set_analog_params = NULL,
.get_frequency = max2165_get_frequency,
.get_bandwidth = max2165_get_bandwidth,
.get_status = max2165_get_status
};
struct dvb_frontend *max2165_attach(struct dvb_frontend *fe,
struct i2c_adapter *i2c,
struct max2165_config *cfg)
{
struct max2165_priv *priv = NULL;
dprintk("%s(%d-%04x)\n", __func__,
i2c ? i2c_adapter_id(i2c) : -1,
cfg ? cfg->i2c_address : -1);
priv = kzalloc(sizeof(struct max2165_priv), GFP_KERNEL);
if (priv == NULL)
return NULL;
memcpy(&fe->ops.tuner_ops, &max2165_tuner_ops,
sizeof(struct dvb_tuner_ops));
priv->config = cfg;
priv->i2c = i2c;
fe->tuner_priv = priv;
max2165_init(fe);
max2165_debug_status(priv);
return fe;
}
EXPORT_SYMBOL(max2165_attach);
MODULE_AUTHOR("David T. L. Wong <davidtlwong@gmail.com>");
MODULE_DESCRIPTION("Maxim MAX2165 silicon tuner driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
slz/delidded-kernel-n900t-note3 | fs/hfsplus/catalog.c | 4963 | 11270 | /*
* linux/fs/hfsplus/catalog.c
*
* Copyright (C) 2001
* Brad Boyer (flar@allandria.com)
* (C) 2003 Ardis Technologies <roman@ardistech.com>
*
* Handling of catalog records
*/
#include "hfsplus_fs.h"
#include "hfsplus_raw.h"
int hfsplus_cat_case_cmp_key(const hfsplus_btree_key *k1,
const hfsplus_btree_key *k2)
{
__be32 k1p, k2p;
k1p = k1->cat.parent;
k2p = k2->cat.parent;
if (k1p != k2p)
return be32_to_cpu(k1p) < be32_to_cpu(k2p) ? -1 : 1;
return hfsplus_strcasecmp(&k1->cat.name, &k2->cat.name);
}
int hfsplus_cat_bin_cmp_key(const hfsplus_btree_key *k1,
const hfsplus_btree_key *k2)
{
__be32 k1p, k2p;
k1p = k1->cat.parent;
k2p = k2->cat.parent;
if (k1p != k2p)
return be32_to_cpu(k1p) < be32_to_cpu(k2p) ? -1 : 1;
return hfsplus_strcmp(&k1->cat.name, &k2->cat.name);
}
void hfsplus_cat_build_key(struct super_block *sb, hfsplus_btree_key *key,
u32 parent, struct qstr *str)
{
int len;
key->cat.parent = cpu_to_be32(parent);
if (str) {
hfsplus_asc2uni(sb, &key->cat.name, str->name, str->len);
len = be16_to_cpu(key->cat.name.length);
} else {
key->cat.name.length = 0;
len = 0;
}
key->key_len = cpu_to_be16(6 + 2 * len);
}
static void hfsplus_cat_build_key_uni(hfsplus_btree_key *key, u32 parent,
struct hfsplus_unistr *name)
{
int ustrlen;
ustrlen = be16_to_cpu(name->length);
key->cat.parent = cpu_to_be32(parent);
key->cat.name.length = cpu_to_be16(ustrlen);
ustrlen *= 2;
memcpy(key->cat.name.unicode, name->unicode, ustrlen);
key->key_len = cpu_to_be16(6 + ustrlen);
}
void hfsplus_cat_set_perms(struct inode *inode, struct hfsplus_perm *perms)
{
if (inode->i_flags & S_IMMUTABLE)
perms->rootflags |= HFSPLUS_FLG_IMMUTABLE;
else
perms->rootflags &= ~HFSPLUS_FLG_IMMUTABLE;
if (inode->i_flags & S_APPEND)
perms->rootflags |= HFSPLUS_FLG_APPEND;
else
perms->rootflags &= ~HFSPLUS_FLG_APPEND;
perms->userflags = HFSPLUS_I(inode)->userflags;
perms->mode = cpu_to_be16(inode->i_mode);
perms->owner = cpu_to_be32(inode->i_uid);
perms->group = cpu_to_be32(inode->i_gid);
if (S_ISREG(inode->i_mode))
perms->dev = cpu_to_be32(inode->i_nlink);
else if (S_ISBLK(inode->i_mode) || S_ISCHR(inode->i_mode))
perms->dev = cpu_to_be32(inode->i_rdev);
else
perms->dev = 0;
}
static int hfsplus_cat_build_record(hfsplus_cat_entry *entry,
u32 cnid, struct inode *inode)
{
struct hfsplus_sb_info *sbi = HFSPLUS_SB(inode->i_sb);
if (S_ISDIR(inode->i_mode)) {
struct hfsplus_cat_folder *folder;
folder = &entry->folder;
memset(folder, 0, sizeof(*folder));
folder->type = cpu_to_be16(HFSPLUS_FOLDER);
folder->id = cpu_to_be32(inode->i_ino);
HFSPLUS_I(inode)->create_date =
folder->create_date =
folder->content_mod_date =
folder->attribute_mod_date =
folder->access_date = hfsp_now2mt();
hfsplus_cat_set_perms(inode, &folder->permissions);
if (inode == sbi->hidden_dir)
/* invisible and namelocked */
folder->user_info.frFlags = cpu_to_be16(0x5000);
return sizeof(*folder);
} else {
struct hfsplus_cat_file *file;
file = &entry->file;
memset(file, 0, sizeof(*file));
file->type = cpu_to_be16(HFSPLUS_FILE);
file->flags = cpu_to_be16(HFSPLUS_FILE_THREAD_EXISTS);
file->id = cpu_to_be32(cnid);
HFSPLUS_I(inode)->create_date =
file->create_date =
file->content_mod_date =
file->attribute_mod_date =
file->access_date = hfsp_now2mt();
if (cnid == inode->i_ino) {
hfsplus_cat_set_perms(inode, &file->permissions);
if (S_ISLNK(inode->i_mode)) {
file->user_info.fdType =
cpu_to_be32(HFSP_SYMLINK_TYPE);
file->user_info.fdCreator =
cpu_to_be32(HFSP_SYMLINK_CREATOR);
} else {
file->user_info.fdType =
cpu_to_be32(sbi->type);
file->user_info.fdCreator =
cpu_to_be32(sbi->creator);
}
if (HFSPLUS_FLG_IMMUTABLE &
(file->permissions.rootflags |
file->permissions.userflags))
file->flags |=
cpu_to_be16(HFSPLUS_FILE_LOCKED);
} else {
file->user_info.fdType =
cpu_to_be32(HFSP_HARDLINK_TYPE);
file->user_info.fdCreator =
cpu_to_be32(HFSP_HFSPLUS_CREATOR);
file->user_info.fdFlags =
cpu_to_be16(0x100);
file->create_date =
HFSPLUS_I(sbi->hidden_dir)->create_date;
file->permissions.dev =
cpu_to_be32(HFSPLUS_I(inode)->linkid);
}
return sizeof(*file);
}
}
static int hfsplus_fill_cat_thread(struct super_block *sb,
hfsplus_cat_entry *entry, int type,
u32 parentid, struct qstr *str)
{
entry->type = cpu_to_be16(type);
entry->thread.reserved = 0;
entry->thread.parentID = cpu_to_be32(parentid);
hfsplus_asc2uni(sb, &entry->thread.nodeName, str->name, str->len);
return 10 + be16_to_cpu(entry->thread.nodeName.length) * 2;
}
/* Try to get a catalog entry for given catalog id */
int hfsplus_find_cat(struct super_block *sb, u32 cnid,
struct hfs_find_data *fd)
{
hfsplus_cat_entry tmp;
int err;
u16 type;
hfsplus_cat_build_key(sb, fd->search_key, cnid, NULL);
err = hfs_brec_read(fd, &tmp, sizeof(hfsplus_cat_entry));
if (err)
return err;
type = be16_to_cpu(tmp.type);
if (type != HFSPLUS_FOLDER_THREAD && type != HFSPLUS_FILE_THREAD) {
printk(KERN_ERR "hfs: found bad thread record in catalog\n");
return -EIO;
}
if (be16_to_cpu(tmp.thread.nodeName.length) > 255) {
printk(KERN_ERR "hfs: catalog name length corrupted\n");
return -EIO;
}
hfsplus_cat_build_key_uni(fd->search_key,
be32_to_cpu(tmp.thread.parentID),
&tmp.thread.nodeName);
return hfs_brec_find(fd);
}
int hfsplus_create_cat(u32 cnid, struct inode *dir,
struct qstr *str, struct inode *inode)
{
struct super_block *sb = dir->i_sb;
struct hfs_find_data fd;
hfsplus_cat_entry entry;
int entry_size;
int err;
dprint(DBG_CAT_MOD, "create_cat: %s,%u(%d)\n",
str->name, cnid, inode->i_nlink);
err = hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd);
if (err)
return err;
hfsplus_cat_build_key(sb, fd.search_key, cnid, NULL);
entry_size = hfsplus_fill_cat_thread(sb, &entry,
S_ISDIR(inode->i_mode) ?
HFSPLUS_FOLDER_THREAD : HFSPLUS_FILE_THREAD,
dir->i_ino, str);
err = hfs_brec_find(&fd);
if (err != -ENOENT) {
if (!err)
err = -EEXIST;
goto err2;
}
err = hfs_brec_insert(&fd, &entry, entry_size);
if (err)
goto err2;
hfsplus_cat_build_key(sb, fd.search_key, dir->i_ino, str);
entry_size = hfsplus_cat_build_record(&entry, cnid, inode);
err = hfs_brec_find(&fd);
if (err != -ENOENT) {
/* panic? */
if (!err)
err = -EEXIST;
goto err1;
}
err = hfs_brec_insert(&fd, &entry, entry_size);
if (err)
goto err1;
dir->i_size++;
dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC;
hfsplus_mark_inode_dirty(dir, HFSPLUS_I_CAT_DIRTY);
hfs_find_exit(&fd);
return 0;
err1:
hfsplus_cat_build_key(sb, fd.search_key, cnid, NULL);
if (!hfs_brec_find(&fd))
hfs_brec_remove(&fd);
err2:
hfs_find_exit(&fd);
return err;
}
int hfsplus_delete_cat(u32 cnid, struct inode *dir, struct qstr *str)
{
struct super_block *sb = dir->i_sb;
struct hfs_find_data fd;
struct hfsplus_fork_raw fork;
struct list_head *pos;
int err, off;
u16 type;
dprint(DBG_CAT_MOD, "delete_cat: %s,%u\n",
str ? str->name : NULL, cnid);
err = hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd);
if (err)
return err;
if (!str) {
int len;
hfsplus_cat_build_key(sb, fd.search_key, cnid, NULL);
err = hfs_brec_find(&fd);
if (err)
goto out;
off = fd.entryoffset +
offsetof(struct hfsplus_cat_thread, nodeName);
fd.search_key->cat.parent = cpu_to_be32(dir->i_ino);
hfs_bnode_read(fd.bnode,
&fd.search_key->cat.name.length, off, 2);
len = be16_to_cpu(fd.search_key->cat.name.length) * 2;
hfs_bnode_read(fd.bnode,
&fd.search_key->cat.name.unicode,
off + 2, len);
fd.search_key->key_len = cpu_to_be16(6 + len);
} else
hfsplus_cat_build_key(sb, fd.search_key, dir->i_ino, str);
err = hfs_brec_find(&fd);
if (err)
goto out;
type = hfs_bnode_read_u16(fd.bnode, fd.entryoffset);
if (type == HFSPLUS_FILE) {
#if 0
off = fd.entryoffset + offsetof(hfsplus_cat_file, data_fork);
hfs_bnode_read(fd.bnode, &fork, off, sizeof(fork));
hfsplus_free_fork(sb, cnid, &fork, HFSPLUS_TYPE_DATA);
#endif
off = fd.entryoffset +
offsetof(struct hfsplus_cat_file, rsrc_fork);
hfs_bnode_read(fd.bnode, &fork, off, sizeof(fork));
hfsplus_free_fork(sb, cnid, &fork, HFSPLUS_TYPE_RSRC);
}
list_for_each(pos, &HFSPLUS_I(dir)->open_dir_list) {
struct hfsplus_readdir_data *rd =
list_entry(pos, struct hfsplus_readdir_data, list);
if (fd.tree->keycmp(fd.search_key, (void *)&rd->key) < 0)
rd->file->f_pos--;
}
err = hfs_brec_remove(&fd);
if (err)
goto out;
hfsplus_cat_build_key(sb, fd.search_key, cnid, NULL);
err = hfs_brec_find(&fd);
if (err)
goto out;
err = hfs_brec_remove(&fd);
if (err)
goto out;
dir->i_size--;
dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC;
hfsplus_mark_inode_dirty(dir, HFSPLUS_I_CAT_DIRTY);
out:
hfs_find_exit(&fd);
return err;
}
int hfsplus_rename_cat(u32 cnid,
struct inode *src_dir, struct qstr *src_name,
struct inode *dst_dir, struct qstr *dst_name)
{
struct super_block *sb = src_dir->i_sb;
struct hfs_find_data src_fd, dst_fd;
hfsplus_cat_entry entry;
int entry_size, type;
int err;
dprint(DBG_CAT_MOD, "rename_cat: %u - %lu,%s - %lu,%s\n",
cnid, src_dir->i_ino, src_name->name,
dst_dir->i_ino, dst_name->name);
err = hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &src_fd);
if (err)
return err;
dst_fd = src_fd;
/* find the old dir entry and read the data */
hfsplus_cat_build_key(sb, src_fd.search_key, src_dir->i_ino, src_name);
err = hfs_brec_find(&src_fd);
if (err)
goto out;
if (src_fd.entrylength > sizeof(entry) || src_fd.entrylength < 0) {
err = -EIO;
goto out;
}
hfs_bnode_read(src_fd.bnode, &entry, src_fd.entryoffset,
src_fd.entrylength);
/* create new dir entry with the data from the old entry */
hfsplus_cat_build_key(sb, dst_fd.search_key, dst_dir->i_ino, dst_name);
err = hfs_brec_find(&dst_fd);
if (err != -ENOENT) {
if (!err)
err = -EEXIST;
goto out;
}
err = hfs_brec_insert(&dst_fd, &entry, src_fd.entrylength);
if (err)
goto out;
dst_dir->i_size++;
dst_dir->i_mtime = dst_dir->i_ctime = CURRENT_TIME_SEC;
/* finally remove the old entry */
hfsplus_cat_build_key(sb, src_fd.search_key, src_dir->i_ino, src_name);
err = hfs_brec_find(&src_fd);
if (err)
goto out;
err = hfs_brec_remove(&src_fd);
if (err)
goto out;
src_dir->i_size--;
src_dir->i_mtime = src_dir->i_ctime = CURRENT_TIME_SEC;
/* remove old thread entry */
hfsplus_cat_build_key(sb, src_fd.search_key, cnid, NULL);
err = hfs_brec_find(&src_fd);
if (err)
goto out;
type = hfs_bnode_read_u16(src_fd.bnode, src_fd.entryoffset);
err = hfs_brec_remove(&src_fd);
if (err)
goto out;
/* create new thread entry */
hfsplus_cat_build_key(sb, dst_fd.search_key, cnid, NULL);
entry_size = hfsplus_fill_cat_thread(sb, &entry, type,
dst_dir->i_ino, dst_name);
err = hfs_brec_find(&dst_fd);
if (err != -ENOENT) {
if (!err)
err = -EEXIST;
goto out;
}
err = hfs_brec_insert(&dst_fd, &entry, entry_size);
hfsplus_mark_inode_dirty(dst_dir, HFSPLUS_I_CAT_DIRTY);
hfsplus_mark_inode_dirty(src_dir, HFSPLUS_I_CAT_DIRTY);
out:
hfs_bnode_put(dst_fd.bnode);
hfs_find_exit(&src_fd);
return err;
}
| gpl-2.0 |
fat-tire/android_kernel_qcom_kfire-hdx-common | sound/pci/ctxfi/ctdaio.c | 8035 | 17649 | /**
* Copyright (C) 2008, Creative Technology Ltd. All Rights Reserved.
*
* This source file is released under GPL v2 license (no other versions).
* See the COPYING file included in the main directory of this source
* distribution for the license terms and conditions.
*
* @File ctdaio.c
*
* @Brief
* This file contains the implementation of Digital Audio Input Output
* resource management object.
*
* @Author Liu Chun
* @Date May 23 2008
*
*/
#include "ctdaio.h"
#include "cthardware.h"
#include "ctimap.h"
#include <linux/slab.h>
#include <linux/kernel.h>
#define DAIO_OUT_MAX SPDIFOO
struct daio_usage {
unsigned short data;
};
struct daio_rsc_idx {
unsigned short left;
unsigned short right;
};
struct daio_rsc_idx idx_20k1[NUM_DAIOTYP] = {
[LINEO1] = {.left = 0x00, .right = 0x01},
[LINEO2] = {.left = 0x18, .right = 0x19},
[LINEO3] = {.left = 0x08, .right = 0x09},
[LINEO4] = {.left = 0x10, .right = 0x11},
[LINEIM] = {.left = 0x1b5, .right = 0x1bd},
[SPDIFOO] = {.left = 0x20, .right = 0x21},
[SPDIFIO] = {.left = 0x15, .right = 0x1d},
[SPDIFI1] = {.left = 0x95, .right = 0x9d},
};
struct daio_rsc_idx idx_20k2[NUM_DAIOTYP] = {
[LINEO1] = {.left = 0x40, .right = 0x41},
[LINEO2] = {.left = 0x60, .right = 0x61},
[LINEO3] = {.left = 0x50, .right = 0x51},
[LINEO4] = {.left = 0x70, .right = 0x71},
[LINEIM] = {.left = 0x45, .right = 0xc5},
[MIC] = {.left = 0x55, .right = 0xd5},
[SPDIFOO] = {.left = 0x00, .right = 0x01},
[SPDIFIO] = {.left = 0x05, .right = 0x85},
};
static int daio_master(struct rsc *rsc)
{
/* Actually, this is not the resource index of DAIO.
* For DAO, it is the input mapper index. And, for DAI,
* it is the output time-slot index. */
return rsc->conj = rsc->idx;
}
static int daio_index(const struct rsc *rsc)
{
return rsc->conj;
}
static int daio_out_next_conj(struct rsc *rsc)
{
return rsc->conj += 2;
}
static int daio_in_next_conj_20k1(struct rsc *rsc)
{
return rsc->conj += 0x200;
}
static int daio_in_next_conj_20k2(struct rsc *rsc)
{
return rsc->conj += 0x100;
}
static struct rsc_ops daio_out_rsc_ops = {
.master = daio_master,
.next_conj = daio_out_next_conj,
.index = daio_index,
.output_slot = NULL,
};
static struct rsc_ops daio_in_rsc_ops_20k1 = {
.master = daio_master,
.next_conj = daio_in_next_conj_20k1,
.index = NULL,
.output_slot = daio_index,
};
static struct rsc_ops daio_in_rsc_ops_20k2 = {
.master = daio_master,
.next_conj = daio_in_next_conj_20k2,
.index = NULL,
.output_slot = daio_index,
};
static unsigned int daio_device_index(enum DAIOTYP type, struct hw *hw)
{
switch (hw->chip_type) {
case ATC20K1:
switch (type) {
case SPDIFOO: return 0;
case SPDIFIO: return 0;
case SPDIFI1: return 1;
case LINEO1: return 4;
case LINEO2: return 7;
case LINEO3: return 5;
case LINEO4: return 6;
case LINEIM: return 7;
default: return -EINVAL;
}
case ATC20K2:
switch (type) {
case SPDIFOO: return 0;
case SPDIFIO: return 0;
case LINEO1: return 4;
case LINEO2: return 7;
case LINEO3: return 5;
case LINEO4: return 6;
case LINEIM: return 4;
case MIC: return 5;
default: return -EINVAL;
}
default:
return -EINVAL;
}
}
static int dao_rsc_reinit(struct dao *dao, const struct dao_desc *desc);
static int dao_spdif_get_spos(struct dao *dao, unsigned int *spos)
{
((struct hw *)dao->hw)->dao_get_spos(dao->ctrl_blk, spos);
return 0;
}
static int dao_spdif_set_spos(struct dao *dao, unsigned int spos)
{
((struct hw *)dao->hw)->dao_set_spos(dao->ctrl_blk, spos);
return 0;
}
static int dao_commit_write(struct dao *dao)
{
((struct hw *)dao->hw)->dao_commit_write(dao->hw,
daio_device_index(dao->daio.type, dao->hw), dao->ctrl_blk);
return 0;
}
static int dao_set_left_input(struct dao *dao, struct rsc *input)
{
struct imapper *entry;
struct daio *daio = &dao->daio;
int i;
entry = kzalloc((sizeof(*entry) * daio->rscl.msr), GFP_KERNEL);
if (!entry)
return -ENOMEM;
dao->ops->clear_left_input(dao);
/* Program master and conjugate resources */
input->ops->master(input);
daio->rscl.ops->master(&daio->rscl);
for (i = 0; i < daio->rscl.msr; i++, entry++) {
entry->slot = input->ops->output_slot(input);
entry->user = entry->addr = daio->rscl.ops->index(&daio->rscl);
dao->mgr->imap_add(dao->mgr, entry);
dao->imappers[i] = entry;
input->ops->next_conj(input);
daio->rscl.ops->next_conj(&daio->rscl);
}
input->ops->master(input);
daio->rscl.ops->master(&daio->rscl);
return 0;
}
static int dao_set_right_input(struct dao *dao, struct rsc *input)
{
struct imapper *entry;
struct daio *daio = &dao->daio;
int i;
entry = kzalloc((sizeof(*entry) * daio->rscr.msr), GFP_KERNEL);
if (!entry)
return -ENOMEM;
dao->ops->clear_right_input(dao);
/* Program master and conjugate resources */
input->ops->master(input);
daio->rscr.ops->master(&daio->rscr);
for (i = 0; i < daio->rscr.msr; i++, entry++) {
entry->slot = input->ops->output_slot(input);
entry->user = entry->addr = daio->rscr.ops->index(&daio->rscr);
dao->mgr->imap_add(dao->mgr, entry);
dao->imappers[daio->rscl.msr + i] = entry;
input->ops->next_conj(input);
daio->rscr.ops->next_conj(&daio->rscr);
}
input->ops->master(input);
daio->rscr.ops->master(&daio->rscr);
return 0;
}
static int dao_clear_left_input(struct dao *dao)
{
struct imapper *entry;
struct daio *daio = &dao->daio;
int i;
if (!dao->imappers[0])
return 0;
entry = dao->imappers[0];
dao->mgr->imap_delete(dao->mgr, entry);
/* Program conjugate resources */
for (i = 1; i < daio->rscl.msr; i++) {
entry = dao->imappers[i];
dao->mgr->imap_delete(dao->mgr, entry);
dao->imappers[i] = NULL;
}
kfree(dao->imappers[0]);
dao->imappers[0] = NULL;
return 0;
}
static int dao_clear_right_input(struct dao *dao)
{
struct imapper *entry;
struct daio *daio = &dao->daio;
int i;
if (!dao->imappers[daio->rscl.msr])
return 0;
entry = dao->imappers[daio->rscl.msr];
dao->mgr->imap_delete(dao->mgr, entry);
/* Program conjugate resources */
for (i = 1; i < daio->rscr.msr; i++) {
entry = dao->imappers[daio->rscl.msr + i];
dao->mgr->imap_delete(dao->mgr, entry);
dao->imappers[daio->rscl.msr + i] = NULL;
}
kfree(dao->imappers[daio->rscl.msr]);
dao->imappers[daio->rscl.msr] = NULL;
return 0;
}
static struct dao_rsc_ops dao_ops = {
.set_spos = dao_spdif_set_spos,
.commit_write = dao_commit_write,
.get_spos = dao_spdif_get_spos,
.reinit = dao_rsc_reinit,
.set_left_input = dao_set_left_input,
.set_right_input = dao_set_right_input,
.clear_left_input = dao_clear_left_input,
.clear_right_input = dao_clear_right_input,
};
static int dai_set_srt_srcl(struct dai *dai, struct rsc *src)
{
src->ops->master(src);
((struct hw *)dai->hw)->dai_srt_set_srcm(dai->ctrl_blk,
src->ops->index(src));
return 0;
}
static int dai_set_srt_srcr(struct dai *dai, struct rsc *src)
{
src->ops->master(src);
((struct hw *)dai->hw)->dai_srt_set_srco(dai->ctrl_blk,
src->ops->index(src));
return 0;
}
static int dai_set_srt_msr(struct dai *dai, unsigned int msr)
{
unsigned int rsr;
for (rsr = 0; msr > 1; msr >>= 1)
rsr++;
((struct hw *)dai->hw)->dai_srt_set_rsr(dai->ctrl_blk, rsr);
return 0;
}
static int dai_set_enb_src(struct dai *dai, unsigned int enb)
{
((struct hw *)dai->hw)->dai_srt_set_ec(dai->ctrl_blk, enb);
return 0;
}
static int dai_set_enb_srt(struct dai *dai, unsigned int enb)
{
((struct hw *)dai->hw)->dai_srt_set_et(dai->ctrl_blk, enb);
return 0;
}
static int dai_commit_write(struct dai *dai)
{
((struct hw *)dai->hw)->dai_commit_write(dai->hw,
daio_device_index(dai->daio.type, dai->hw), dai->ctrl_blk);
return 0;
}
static struct dai_rsc_ops dai_ops = {
.set_srt_srcl = dai_set_srt_srcl,
.set_srt_srcr = dai_set_srt_srcr,
.set_srt_msr = dai_set_srt_msr,
.set_enb_src = dai_set_enb_src,
.set_enb_srt = dai_set_enb_srt,
.commit_write = dai_commit_write,
};
static int daio_rsc_init(struct daio *daio,
const struct daio_desc *desc,
void *hw)
{
int err;
unsigned int idx_l, idx_r;
switch (((struct hw *)hw)->chip_type) {
case ATC20K1:
idx_l = idx_20k1[desc->type].left;
idx_r = idx_20k1[desc->type].right;
break;
case ATC20K2:
idx_l = idx_20k2[desc->type].left;
idx_r = idx_20k2[desc->type].right;
break;
default:
return -EINVAL;
}
err = rsc_init(&daio->rscl, idx_l, DAIO, desc->msr, hw);
if (err)
return err;
err = rsc_init(&daio->rscr, idx_r, DAIO, desc->msr, hw);
if (err)
goto error1;
/* Set daio->rscl/r->ops to daio specific ones */
if (desc->type <= DAIO_OUT_MAX) {
daio->rscl.ops = daio->rscr.ops = &daio_out_rsc_ops;
} else {
switch (((struct hw *)hw)->chip_type) {
case ATC20K1:
daio->rscl.ops = daio->rscr.ops = &daio_in_rsc_ops_20k1;
break;
case ATC20K2:
daio->rscl.ops = daio->rscr.ops = &daio_in_rsc_ops_20k2;
break;
default:
break;
}
}
daio->type = desc->type;
return 0;
error1:
rsc_uninit(&daio->rscl);
return err;
}
static int daio_rsc_uninit(struct daio *daio)
{
rsc_uninit(&daio->rscl);
rsc_uninit(&daio->rscr);
return 0;
}
static int dao_rsc_init(struct dao *dao,
const struct daio_desc *desc,
struct daio_mgr *mgr)
{
struct hw *hw = mgr->mgr.hw;
unsigned int conf;
int err;
err = daio_rsc_init(&dao->daio, desc, mgr->mgr.hw);
if (err)
return err;
dao->imappers = kzalloc(sizeof(void *)*desc->msr*2, GFP_KERNEL);
if (!dao->imappers) {
err = -ENOMEM;
goto error1;
}
dao->ops = &dao_ops;
dao->mgr = mgr;
dao->hw = hw;
err = hw->dao_get_ctrl_blk(&dao->ctrl_blk);
if (err)
goto error2;
hw->daio_mgr_dsb_dao(mgr->mgr.ctrl_blk,
daio_device_index(dao->daio.type, hw));
hw->daio_mgr_commit_write(hw, mgr->mgr.ctrl_blk);
conf = (desc->msr & 0x7) | (desc->passthru << 3);
hw->daio_mgr_dao_init(mgr->mgr.ctrl_blk,
daio_device_index(dao->daio.type, hw), conf);
hw->daio_mgr_enb_dao(mgr->mgr.ctrl_blk,
daio_device_index(dao->daio.type, hw));
hw->daio_mgr_commit_write(hw, mgr->mgr.ctrl_blk);
return 0;
error2:
kfree(dao->imappers);
dao->imappers = NULL;
error1:
daio_rsc_uninit(&dao->daio);
return err;
}
static int dao_rsc_uninit(struct dao *dao)
{
if (dao->imappers) {
if (dao->imappers[0])
dao_clear_left_input(dao);
if (dao->imappers[dao->daio.rscl.msr])
dao_clear_right_input(dao);
kfree(dao->imappers);
dao->imappers = NULL;
}
((struct hw *)dao->hw)->dao_put_ctrl_blk(dao->ctrl_blk);
dao->hw = dao->ctrl_blk = NULL;
daio_rsc_uninit(&dao->daio);
return 0;
}
static int dao_rsc_reinit(struct dao *dao, const struct dao_desc *desc)
{
struct daio_mgr *mgr = dao->mgr;
struct daio_desc dsc = {0};
dsc.type = dao->daio.type;
dsc.msr = desc->msr;
dsc.passthru = desc->passthru;
dao_rsc_uninit(dao);
return dao_rsc_init(dao, &dsc, mgr);
}
static int dai_rsc_init(struct dai *dai,
const struct daio_desc *desc,
struct daio_mgr *mgr)
{
int err;
struct hw *hw = mgr->mgr.hw;
unsigned int rsr, msr;
err = daio_rsc_init(&dai->daio, desc, mgr->mgr.hw);
if (err)
return err;
dai->ops = &dai_ops;
dai->hw = mgr->mgr.hw;
err = hw->dai_get_ctrl_blk(&dai->ctrl_blk);
if (err)
goto error1;
for (rsr = 0, msr = desc->msr; msr > 1; msr >>= 1)
rsr++;
hw->dai_srt_set_rsr(dai->ctrl_blk, rsr);
hw->dai_srt_set_drat(dai->ctrl_blk, 0);
/* default to disabling control of a SRC */
hw->dai_srt_set_ec(dai->ctrl_blk, 0);
hw->dai_srt_set_et(dai->ctrl_blk, 0); /* default to disabling SRT */
hw->dai_commit_write(hw,
daio_device_index(dai->daio.type, dai->hw), dai->ctrl_blk);
return 0;
error1:
daio_rsc_uninit(&dai->daio);
return err;
}
static int dai_rsc_uninit(struct dai *dai)
{
((struct hw *)dai->hw)->dai_put_ctrl_blk(dai->ctrl_blk);
dai->hw = dai->ctrl_blk = NULL;
daio_rsc_uninit(&dai->daio);
return 0;
}
static int daio_mgr_get_rsc(struct rsc_mgr *mgr, enum DAIOTYP type)
{
if (((struct daio_usage *)mgr->rscs)->data & (0x1 << type))
return -ENOENT;
((struct daio_usage *)mgr->rscs)->data |= (0x1 << type);
return 0;
}
static int daio_mgr_put_rsc(struct rsc_mgr *mgr, enum DAIOTYP type)
{
((struct daio_usage *)mgr->rscs)->data &= ~(0x1 << type);
return 0;
}
static int get_daio_rsc(struct daio_mgr *mgr,
const struct daio_desc *desc,
struct daio **rdaio)
{
int err;
struct dai *dai = NULL;
struct dao *dao = NULL;
unsigned long flags;
*rdaio = NULL;
/* Check whether there are sufficient daio resources to meet request. */
spin_lock_irqsave(&mgr->mgr_lock, flags);
err = daio_mgr_get_rsc(&mgr->mgr, desc->type);
spin_unlock_irqrestore(&mgr->mgr_lock, flags);
if (err) {
printk(KERN_ERR "Can't meet DAIO resource request!\n");
return err;
}
/* Allocate mem for daio resource */
if (desc->type <= DAIO_OUT_MAX) {
dao = kzalloc(sizeof(*dao), GFP_KERNEL);
if (!dao) {
err = -ENOMEM;
goto error;
}
err = dao_rsc_init(dao, desc, mgr);
if (err)
goto error;
*rdaio = &dao->daio;
} else {
dai = kzalloc(sizeof(*dai), GFP_KERNEL);
if (!dai) {
err = -ENOMEM;
goto error;
}
err = dai_rsc_init(dai, desc, mgr);
if (err)
goto error;
*rdaio = &dai->daio;
}
mgr->daio_enable(mgr, *rdaio);
mgr->commit_write(mgr);
return 0;
error:
if (dao)
kfree(dao);
else if (dai)
kfree(dai);
spin_lock_irqsave(&mgr->mgr_lock, flags);
daio_mgr_put_rsc(&mgr->mgr, desc->type);
spin_unlock_irqrestore(&mgr->mgr_lock, flags);
return err;
}
static int put_daio_rsc(struct daio_mgr *mgr, struct daio *daio)
{
unsigned long flags;
mgr->daio_disable(mgr, daio);
mgr->commit_write(mgr);
spin_lock_irqsave(&mgr->mgr_lock, flags);
daio_mgr_put_rsc(&mgr->mgr, daio->type);
spin_unlock_irqrestore(&mgr->mgr_lock, flags);
if (daio->type <= DAIO_OUT_MAX) {
dao_rsc_uninit(container_of(daio, struct dao, daio));
kfree(container_of(daio, struct dao, daio));
} else {
dai_rsc_uninit(container_of(daio, struct dai, daio));
kfree(container_of(daio, struct dai, daio));
}
return 0;
}
static int daio_mgr_enb_daio(struct daio_mgr *mgr, struct daio *daio)
{
struct hw *hw = mgr->mgr.hw;
if (DAIO_OUT_MAX >= daio->type) {
hw->daio_mgr_enb_dao(mgr->mgr.ctrl_blk,
daio_device_index(daio->type, hw));
} else {
hw->daio_mgr_enb_dai(mgr->mgr.ctrl_blk,
daio_device_index(daio->type, hw));
}
return 0;
}
static int daio_mgr_dsb_daio(struct daio_mgr *mgr, struct daio *daio)
{
struct hw *hw = mgr->mgr.hw;
if (DAIO_OUT_MAX >= daio->type) {
hw->daio_mgr_dsb_dao(mgr->mgr.ctrl_blk,
daio_device_index(daio->type, hw));
} else {
hw->daio_mgr_dsb_dai(mgr->mgr.ctrl_blk,
daio_device_index(daio->type, hw));
}
return 0;
}
static int daio_map_op(void *data, struct imapper *entry)
{
struct rsc_mgr *mgr = &((struct daio_mgr *)data)->mgr;
struct hw *hw = mgr->hw;
hw->daio_mgr_set_imaparc(mgr->ctrl_blk, entry->slot);
hw->daio_mgr_set_imapnxt(mgr->ctrl_blk, entry->next);
hw->daio_mgr_set_imapaddr(mgr->ctrl_blk, entry->addr);
hw->daio_mgr_commit_write(mgr->hw, mgr->ctrl_blk);
return 0;
}
static int daio_imap_add(struct daio_mgr *mgr, struct imapper *entry)
{
unsigned long flags;
int err;
spin_lock_irqsave(&mgr->imap_lock, flags);
if (!entry->addr && mgr->init_imap_added) {
input_mapper_delete(&mgr->imappers, mgr->init_imap,
daio_map_op, mgr);
mgr->init_imap_added = 0;
}
err = input_mapper_add(&mgr->imappers, entry, daio_map_op, mgr);
spin_unlock_irqrestore(&mgr->imap_lock, flags);
return err;
}
static int daio_imap_delete(struct daio_mgr *mgr, struct imapper *entry)
{
unsigned long flags;
int err;
spin_lock_irqsave(&mgr->imap_lock, flags);
err = input_mapper_delete(&mgr->imappers, entry, daio_map_op, mgr);
if (list_empty(&mgr->imappers)) {
input_mapper_add(&mgr->imappers, mgr->init_imap,
daio_map_op, mgr);
mgr->init_imap_added = 1;
}
spin_unlock_irqrestore(&mgr->imap_lock, flags);
return err;
}
static int daio_mgr_commit_write(struct daio_mgr *mgr)
{
struct hw *hw = mgr->mgr.hw;
hw->daio_mgr_commit_write(hw, mgr->mgr.ctrl_blk);
return 0;
}
int daio_mgr_create(void *hw, struct daio_mgr **rdaio_mgr)
{
int err, i;
struct daio_mgr *daio_mgr;
struct imapper *entry;
*rdaio_mgr = NULL;
daio_mgr = kzalloc(sizeof(*daio_mgr), GFP_KERNEL);
if (!daio_mgr)
return -ENOMEM;
err = rsc_mgr_init(&daio_mgr->mgr, DAIO, NUM_DAIOTYP, hw);
if (err)
goto error1;
spin_lock_init(&daio_mgr->mgr_lock);
spin_lock_init(&daio_mgr->imap_lock);
INIT_LIST_HEAD(&daio_mgr->imappers);
entry = kzalloc(sizeof(*entry), GFP_KERNEL);
if (!entry) {
err = -ENOMEM;
goto error2;
}
entry->slot = entry->addr = entry->next = entry->user = 0;
list_add(&entry->list, &daio_mgr->imappers);
daio_mgr->init_imap = entry;
daio_mgr->init_imap_added = 1;
daio_mgr->get_daio = get_daio_rsc;
daio_mgr->put_daio = put_daio_rsc;
daio_mgr->daio_enable = daio_mgr_enb_daio;
daio_mgr->daio_disable = daio_mgr_dsb_daio;
daio_mgr->imap_add = daio_imap_add;
daio_mgr->imap_delete = daio_imap_delete;
daio_mgr->commit_write = daio_mgr_commit_write;
for (i = 0; i < 8; i++) {
((struct hw *)hw)->daio_mgr_dsb_dao(daio_mgr->mgr.ctrl_blk, i);
((struct hw *)hw)->daio_mgr_dsb_dai(daio_mgr->mgr.ctrl_blk, i);
}
((struct hw *)hw)->daio_mgr_commit_write(hw, daio_mgr->mgr.ctrl_blk);
*rdaio_mgr = daio_mgr;
return 0;
error2:
rsc_mgr_uninit(&daio_mgr->mgr);
error1:
kfree(daio_mgr);
return err;
}
int daio_mgr_destroy(struct daio_mgr *daio_mgr)
{
unsigned long flags;
/* free daio input mapper list */
spin_lock_irqsave(&daio_mgr->imap_lock, flags);
free_input_mapper_list(&daio_mgr->imappers);
spin_unlock_irqrestore(&daio_mgr->imap_lock, flags);
rsc_mgr_uninit(&daio_mgr->mgr);
kfree(daio_mgr);
return 0;
}
| gpl-2.0 |
Pantech-Discover/android_kernel_pantech_magnus | drivers/rtc/rtc-generic.c | 10083 | 1759 | /* rtc-generic: RTC driver using the generic RTC abstraction
*
* Copyright (C) 2008 Kyle McMartin <kyle@mcmartin.ca>
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/time.h>
#include <linux/platform_device.h>
#include <linux/rtc.h>
#include <asm/rtc.h>
static int generic_get_time(struct device *dev, struct rtc_time *tm)
{
unsigned int ret = get_rtc_time(tm);
if (ret & RTC_BATT_BAD)
return -EOPNOTSUPP;
return rtc_valid_tm(tm);
}
static int generic_set_time(struct device *dev, struct rtc_time *tm)
{
if (set_rtc_time(tm) < 0)
return -EOPNOTSUPP;
return 0;
}
static const struct rtc_class_ops generic_rtc_ops = {
.read_time = generic_get_time,
.set_time = generic_set_time,
};
static int __init generic_rtc_probe(struct platform_device *dev)
{
struct rtc_device *rtc;
rtc = rtc_device_register("rtc-generic", &dev->dev, &generic_rtc_ops,
THIS_MODULE);
if (IS_ERR(rtc))
return PTR_ERR(rtc);
platform_set_drvdata(dev, rtc);
return 0;
}
static int __exit generic_rtc_remove(struct platform_device *dev)
{
struct rtc_device *rtc = platform_get_drvdata(dev);
rtc_device_unregister(rtc);
return 0;
}
static struct platform_driver generic_rtc_driver = {
.driver = {
.name = "rtc-generic",
.owner = THIS_MODULE,
},
.remove = __exit_p(generic_rtc_remove),
};
static int __init generic_rtc_init(void)
{
return platform_driver_probe(&generic_rtc_driver, generic_rtc_probe);
}
static void __exit generic_rtc_fini(void)
{
platform_driver_unregister(&generic_rtc_driver);
}
module_init(generic_rtc_init);
module_exit(generic_rtc_fini);
MODULE_AUTHOR("Kyle McMartin <kyle@mcmartin.ca>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Generic RTC driver");
MODULE_ALIAS("platform:rtc-generic");
| gpl-2.0 |
joebonrichie/CAF_kernel-msm_M76XXUSNSKTLYA4145 | drivers/video/sysfillrect.c | 14691 | 7646 | /*
* Generic fillrect for frame buffers in system RAM with packed pixels of
* any depth.
*
* Based almost entirely from cfbfillrect.c (which is based almost entirely
* on Geert Uytterhoeven's fillrect routine)
*
* Copyright (C) 2007 Antonino Daplas <adaplas@pol.net>
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive for
* more details.
*/
#include <linux/module.h>
#include <linux/string.h>
#include <linux/fb.h>
#include <asm/types.h>
#include "fb_draw.h"
/*
* Aligned pattern fill using 32/64-bit memory accesses
*/
static void
bitfill_aligned(struct fb_info *p, unsigned long *dst, int dst_idx,
unsigned long pat, unsigned n, int bits)
{
unsigned long first, last;
if (!n)
return;
first = FB_SHIFT_HIGH(p, ~0UL, dst_idx);
last = ~(FB_SHIFT_HIGH(p, ~0UL, (dst_idx+n) % bits));
if (dst_idx+n <= bits) {
/* Single word */
if (last)
first &= last;
*dst = comp(pat, *dst, first);
} else {
/* Multiple destination words */
/* Leading bits */
if (first!= ~0UL) {
*dst = comp(pat, *dst, first);
dst++;
n -= bits - dst_idx;
}
/* Main chunk */
n /= bits;
while (n >= 8) {
*dst++ = pat;
*dst++ = pat;
*dst++ = pat;
*dst++ = pat;
*dst++ = pat;
*dst++ = pat;
*dst++ = pat;
*dst++ = pat;
n -= 8;
}
while (n--)
*dst++ = pat;
/* Trailing bits */
if (last)
*dst = comp(pat, *dst, last);
}
}
/*
* Unaligned generic pattern fill using 32/64-bit memory accesses
* The pattern must have been expanded to a full 32/64-bit value
* Left/right are the appropriate shifts to convert to the pattern to be
* used for the next 32/64-bit word
*/
static void
bitfill_unaligned(struct fb_info *p, unsigned long *dst, int dst_idx,
unsigned long pat, int left, int right, unsigned n, int bits)
{
unsigned long first, last;
if (!n)
return;
first = FB_SHIFT_HIGH(p, ~0UL, dst_idx);
last = ~(FB_SHIFT_HIGH(p, ~0UL, (dst_idx+n) % bits));
if (dst_idx+n <= bits) {
/* Single word */
if (last)
first &= last;
*dst = comp(pat, *dst, first);
} else {
/* Multiple destination words */
/* Leading bits */
if (first) {
*dst = comp(pat, *dst, first);
dst++;
pat = pat << left | pat >> right;
n -= bits - dst_idx;
}
/* Main chunk */
n /= bits;
while (n >= 4) {
*dst++ = pat;
pat = pat << left | pat >> right;
*dst++ = pat;
pat = pat << left | pat >> right;
*dst++ = pat;
pat = pat << left | pat >> right;
*dst++ = pat;
pat = pat << left | pat >> right;
n -= 4;
}
while (n--) {
*dst++ = pat;
pat = pat << left | pat >> right;
}
/* Trailing bits */
if (last)
*dst = comp(pat, *dst, last);
}
}
/*
* Aligned pattern invert using 32/64-bit memory accesses
*/
static void
bitfill_aligned_rev(struct fb_info *p, unsigned long *dst, int dst_idx,
unsigned long pat, unsigned n, int bits)
{
unsigned long val = pat;
unsigned long first, last;
if (!n)
return;
first = FB_SHIFT_HIGH(p, ~0UL, dst_idx);
last = ~(FB_SHIFT_HIGH(p, ~0UL, (dst_idx+n) % bits));
if (dst_idx+n <= bits) {
/* Single word */
if (last)
first &= last;
*dst = comp(*dst ^ val, *dst, first);
} else {
/* Multiple destination words */
/* Leading bits */
if (first!=0UL) {
*dst = comp(*dst ^ val, *dst, first);
dst++;
n -= bits - dst_idx;
}
/* Main chunk */
n /= bits;
while (n >= 8) {
*dst++ ^= val;
*dst++ ^= val;
*dst++ ^= val;
*dst++ ^= val;
*dst++ ^= val;
*dst++ ^= val;
*dst++ ^= val;
*dst++ ^= val;
n -= 8;
}
while (n--)
*dst++ ^= val;
/* Trailing bits */
if (last)
*dst = comp(*dst ^ val, *dst, last);
}
}
/*
* Unaligned generic pattern invert using 32/64-bit memory accesses
* The pattern must have been expanded to a full 32/64-bit value
* Left/right are the appropriate shifts to convert to the pattern to be
* used for the next 32/64-bit word
*/
static void
bitfill_unaligned_rev(struct fb_info *p, unsigned long *dst, int dst_idx,
unsigned long pat, int left, int right, unsigned n,
int bits)
{
unsigned long first, last;
if (!n)
return;
first = FB_SHIFT_HIGH(p, ~0UL, dst_idx);
last = ~(FB_SHIFT_HIGH(p, ~0UL, (dst_idx+n) % bits));
if (dst_idx+n <= bits) {
/* Single word */
if (last)
first &= last;
*dst = comp(*dst ^ pat, *dst, first);
} else {
/* Multiple destination words */
/* Leading bits */
if (first != 0UL) {
*dst = comp(*dst ^ pat, *dst, first);
dst++;
pat = pat << left | pat >> right;
n -= bits - dst_idx;
}
/* Main chunk */
n /= bits;
while (n >= 4) {
*dst++ ^= pat;
pat = pat << left | pat >> right;
*dst++ ^= pat;
pat = pat << left | pat >> right;
*dst++ ^= pat;
pat = pat << left | pat >> right;
*dst++ ^= pat;
pat = pat << left | pat >> right;
n -= 4;
}
while (n--) {
*dst ^= pat;
pat = pat << left | pat >> right;
}
/* Trailing bits */
if (last)
*dst = comp(*dst ^ pat, *dst, last);
}
}
void sys_fillrect(struct fb_info *p, const struct fb_fillrect *rect)
{
unsigned long pat, pat2, fg;
unsigned long width = rect->width, height = rect->height;
int bits = BITS_PER_LONG, bytes = bits >> 3;
u32 bpp = p->var.bits_per_pixel;
unsigned long *dst;
int dst_idx, left;
if (p->state != FBINFO_STATE_RUNNING)
return;
if (p->fix.visual == FB_VISUAL_TRUECOLOR ||
p->fix.visual == FB_VISUAL_DIRECTCOLOR )
fg = ((u32 *) (p->pseudo_palette))[rect->color];
else
fg = rect->color;
pat = pixel_to_pat( bpp, fg);
dst = (unsigned long *)((unsigned long)p->screen_base & ~(bytes-1));
dst_idx = ((unsigned long)p->screen_base & (bytes - 1))*8;
dst_idx += rect->dy*p->fix.line_length*8+rect->dx*bpp;
/* FIXME For now we support 1-32 bpp only */
left = bits % bpp;
if (p->fbops->fb_sync)
p->fbops->fb_sync(p);
if (!left) {
void (*fill_op32)(struct fb_info *p, unsigned long *dst,
int dst_idx, unsigned long pat, unsigned n,
int bits) = NULL;
switch (rect->rop) {
case ROP_XOR:
fill_op32 = bitfill_aligned_rev;
break;
case ROP_COPY:
fill_op32 = bitfill_aligned;
break;
default:
printk( KERN_ERR "cfb_fillrect(): unknown rop, "
"defaulting to ROP_COPY\n");
fill_op32 = bitfill_aligned;
break;
}
while (height--) {
dst += dst_idx >> (ffs(bits) - 1);
dst_idx &= (bits - 1);
fill_op32(p, dst, dst_idx, pat, width*bpp, bits);
dst_idx += p->fix.line_length*8;
}
} else {
int right, r;
void (*fill_op)(struct fb_info *p, unsigned long *dst,
int dst_idx, unsigned long pat, int left,
int right, unsigned n, int bits) = NULL;
#ifdef __LITTLE_ENDIAN
right = left;
left = bpp - right;
#else
right = bpp - left;
#endif
switch (rect->rop) {
case ROP_XOR:
fill_op = bitfill_unaligned_rev;
break;
case ROP_COPY:
fill_op = bitfill_unaligned;
break;
default:
printk(KERN_ERR "sys_fillrect(): unknown rop, "
"defaulting to ROP_COPY\n");
fill_op = bitfill_unaligned;
break;
}
while (height--) {
dst += dst_idx / bits;
dst_idx &= (bits - 1);
r = dst_idx % bpp;
/* rotate pattern to the correct start position */
pat2 = le_long_to_cpu(rolx(cpu_to_le_long(pat), r, bpp));
fill_op(p, dst, dst_idx, pat2, left, right,
width*bpp, bits);
dst_idx += p->fix.line_length*8;
}
}
}
EXPORT_SYMBOL(sys_fillrect);
MODULE_AUTHOR("Antonino Daplas <adaplas@pol.net>");
MODULE_DESCRIPTION("Generic fill rectangle (sys-to-sys)");
MODULE_LICENSE("GPL");
| gpl-2.0 |
msduketown/xbmc | xbmc/windowing/egl/EGLNativeTypeWayland.cpp | 100 | 9742 | /*
* Copyright (C) 2011-2013 Team XBMC
* http://xbmc.org
*
* This Program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This Program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with XBMC; see the file COPYING. If not, see
* <http://www.gnu.org/licenses/>.
*
*/
#include "system.h"
#if defined(HAVE_WAYLAND)
#define WL_EGL_PLATFORM
#include <sstream>
#include <iostream>
#include <stdexcept>
#include <boost/noncopyable.hpp>
#include <boost/function.hpp>
#include <boost/bind.hpp>
#include <boost/scoped_ptr.hpp>
#include <boost/shared_ptr.hpp>
#include <cstdlib>
#include <wayland-client.h>
#include <wayland-version.h>
#include "windowing/DllWaylandClient.h"
#include "windowing/DllWaylandEgl.h"
#include "windowing/DllXKBCommon.h"
#include "windowing/WaylandProtocol.h"
#include "guilib/gui3d.h"
#include "utils/log.h"
#include "windowing/WinEvents.h"
#include "windowing/WinEventsWayland.h"
#include "wayland/WaylandLibraries.h"
#include "wayland/XBMCConnection.h"
#include "wayland/XBMCSurface.h"
#endif
#include "EGLNativeTypeWayland.h"
#if defined(HAVE_WAYLAND)
namespace xw = xbmc::wayland;
class CEGLNativeTypeWayland::Private
{
public:
boost::scoped_ptr<xw::Libraries> m_libraries;
boost::scoped_ptr<xw::XBMCConnection> m_connection;
boost::scoped_ptr<xw::XBMCSurface> m_surface;
bool LoadWaylandLibraries();
void UnloadWaylandLibraries();
};
bool CEGLNativeTypeWayland::Private::LoadWaylandLibraries()
{
try
{
m_libraries.reset(new xw::Libraries());
}
catch (const std::runtime_error &err)
{
CLog::Log(LOGWARNING, "%s: %s\n",
__FUNCTION__, err.what());
return false;
}
return true;
}
void CEGLNativeTypeWayland::Private::UnloadWaylandLibraries()
{
m_libraries.reset();
}
#else
class CEGLNativeTypeWayland::Private
{
};
#endif
CEGLNativeTypeWayland::CEGLNativeTypeWayland() :
priv(new Private())
{
}
CEGLNativeTypeWayland::~CEGLNativeTypeWayland()
{
}
bool CEGLNativeTypeWayland::CheckCompatibility()
{
#if defined(HAVE_WAYLAND)
if (!getenv("WAYLAND_DISPLAY"))
{
CLog::Log(LOGWARNING, "%s:, WAYLAND_DISPLAY is not set",
__FUNCTION__);
return false;
}
/* FIXME:
* There appears to be a bug in DllDynamic::CanLoad() which causes
* it to always return false. We are just loading the library
* directly at CheckCompatibility time now */
if (!priv->LoadWaylandLibraries())
return false;
return true;
#else
return false;
#endif
}
void CEGLNativeTypeWayland::Initialize()
{
}
void CEGLNativeTypeWayland::Destroy()
{
#if defined(HAVE_WAYLAND)
priv->UnloadWaylandLibraries();
#endif
}
int CEGLNativeTypeWayland::GetQuirks()
{
return EGL_QUIRK_DONT_TRUST_SURFACE_SIZE;
}
bool CEGLNativeTypeWayland::CreateNativeDisplay()
{
#if defined(HAVE_WAYLAND)
/* On CreateNativeDisplay we connect to the running wayland
* compositor on our current socket (as specified by WAYLAND_DISPLAY)
* and then do some initial set up like registering event handlers.
*
* xbmc::wayland::XBMCConnection is an encapsulation of all of our
* current global state with regards to a wayland connection. We
* need to give it access to the wayland client libraries and
* libxkbcommon for it to do its work.
*
* We also inject an xbmc::wayland::XBMCConnection::EventInjector
* which is basically just a table of function pointers to functions
* in CWinEventsWayland, which are all static. CWinEvents is still
* effectively a static, singleton class, and depending on it
* means that testing becomes substantially more difficult. As such
* we just inject the bits that we need here so that they can be
* stubbed out later in testing environments if need be.
*
* xbmc::wayland::XBMCConnection's constructor will throw an
* std::runtime_error in case it runs into any trouble in connecting
* to the wayland compositor or getting the initial global objects.
*
* The best we can do when that happens is just report the error
* and bail out, possibly to try another (fallback) windowing system.
*/
try
{
xw::XBMCConnection::EventInjector injector =
{
CWinEventsWayland::SetEventQueueStrategy,
CWinEventsWayland::DestroyEventQueueStrategy,
CWinEventsWayland::SetWaylandSeat,
CWinEventsWayland::DestroyWaylandSeat,
CWinEvents::MessagePump
};
priv->m_connection.reset(new xw::XBMCConnection(priv->m_libraries->ClientLibrary(),
priv->m_libraries->XKBCommonLibrary(),
injector));
}
catch (const std::runtime_error &err)
{
CLog::Log(LOGERROR, "%s: %s", __FUNCTION__, err.what());
return false;
}
return true;
#else
return false;
#endif
}
bool CEGLNativeTypeWayland::CreateNativeWindow()
{
#if defined(HAVE_WAYLAND)
/* CreateNativeWindow is where we allocate a new wayland surface
* using libwayland-egl and ask the compositor to display it by
* creating a new remote surface object.
*
* xbmc::wayland::XBMCSurface encapsulates all of this information. It
* needs access to various client libraries, as well as the compositor
* and shell global interfaces from xbmc::wayland::XBMCConnection
* in order to actually create the internal "surface" and "shell
* surface" representations.
*
* Once xbmc::wayland::XBMCSurface is created, an EGL bindable
* surface will be available for later use.
*
* The last two parameters are the requested width and height of
* the surface.
*
* If any problems are encountered in creating the surface
* an std::runtime_error is thrown. Like above, we catch it and
* report the error, since there's not much we can do about it.
*/
try
{
RESOLUTION_INFO info;
priv->m_connection->CurrentResolution(info);
xw::XBMCSurface::EventInjector injector =
{
CWinEventsWayland::SetXBMCSurface
};
priv->m_surface.reset(new xw::XBMCSurface(priv->m_libraries->ClientLibrary(),
priv->m_libraries->EGLLibrary(),
injector,
priv->m_connection->GetCompositor(),
priv->m_connection->GetShell(),
info.iScreenWidth,
info.iScreenHeight));
}
catch (const std::runtime_error &err)
{
CLog::Log(LOGERROR, "%s: %s", __FUNCTION__, err.what());
return false;
}
return true;
#else
return false;
#endif
}
bool CEGLNativeTypeWayland::GetNativeDisplay(XBNativeDisplayType **nativeDisplay) const
{
#if defined(HAVE_WAYLAND)
/* We need to return a pointer to the wl_display * (eg wl_display **),
* as EGLWrapper needs to dereference our return value to get the
* actual display and not its first member */
*nativeDisplay =
reinterpret_cast <XBNativeDisplayType *>(priv->m_connection->NativeDisplay());
return true;
#else
return false;
#endif
}
bool CEGLNativeTypeWayland::GetNativeWindow(XBNativeDisplayType **nativeWindow) const
{
#if defined(HAVE_WAYLAND)
*nativeWindow =
reinterpret_cast <XBNativeWindowType *>(priv->m_surface->EGLNativeWindow());
return true;
#else
return false;
#endif
}
/* DestroyNativeDisplay and DestroyNativeWindow simply just call
* reset on the relevant scoped_ptr. This will effectively destroy
* the encapsulating objects which cleans up all of the relevant
* connections and surfaces */
bool CEGLNativeTypeWayland::DestroyNativeDisplay()
{
#if defined(HAVE_WAYLAND)
priv->m_connection.reset();
return true;
#else
return false;
#endif
}
bool CEGLNativeTypeWayland::DestroyNativeWindow()
{
#if defined(HAVE_WAYLAND)
priv->m_surface.reset();
return true;
#else
return false;
#endif
}
/* The connection knowns about the resolution size, so we ask it
* about it. This information is all cached locally, but stored in
* the xbmc::wayland::XBMCConnection object */
bool CEGLNativeTypeWayland::GetNativeResolution(RESOLUTION_INFO *res) const
{
#if defined(HAVE_WAYLAND)
priv->m_connection->CurrentResolution(*res);
return true;
#else
return false;
#endif
}
bool CEGLNativeTypeWayland::SetNativeResolution(const RESOLUTION_INFO &res)
{
#if defined(HAVE_WAYLAND)
priv->m_surface->Resize(res.iScreenWidth, res.iScreenHeight);
return true;
#else
return false;
#endif
}
bool CEGLNativeTypeWayland::ProbeResolutions(std::vector<RESOLUTION_INFO> &resolutions)
{
#if defined(HAVE_WAYLAND)
priv->m_connection->AvailableResolutions(resolutions);
return true;
#else
return false;
#endif
}
bool CEGLNativeTypeWayland::GetPreferredResolution(RESOLUTION_INFO *res) const
{
#if defined(HAVE_WAYLAND)
priv->m_connection->PreferredResolution(*res);
return true;
#else
return false;
#endif
}
bool CEGLNativeTypeWayland::ShowWindow(bool show)
{
#if defined(HAVE_WAYLAND)
/* XBMC lacks a way to select the output it should appear on,
* so we always appear on the first output */
if (show)
priv->m_surface->Show(priv->m_connection->GetFirstOutput());
else
return false;
return true;
#else
return false;
#endif
}
| gpl-2.0 |
remilia15/android_kernel_samsung_grandprimeve3g-RZ | drivers/staging/comedi/drivers/adl_pci7x3x.c | 356 | 8136 | /*
* COMEDI driver for the ADLINK PCI-723x/743x series boards.
* Copyright (C) 2012 H Hartley Sweeten <hsweeten@visionengravers.com>
*
* Based on the adl_pci7230 driver written by:
* David Fernandez <dfcastelao@gmail.com>
* and the adl_pci7432 driver written by:
* Michel Lachaine <mike@mikelachaine.ca>
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 2000 David A. Schleef <ds@schleef.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/*
Driver: adl_pci7x3x
Description: 32/64-Channel Isolated Digital I/O Boards
Devices: (ADLink) PCI-7230 [adl_pci7230] - 16 input / 16 output
(ADLink) PCI-7233 [adl_pci7233] - 32 input
(ADLink) PCI-7234 [adl_pci7234] - 32 output
(ADLink) PCI-7432 [adl_pci7432] - 32 input / 32 output
(ADLink) PCI-7433 [adl_pci7433] - 64 input
(ADLink) PCI-7434 [adl_pci7434] - 64 output
Author: H Hartley Sweeten <hsweeten@visionengravers.com>
Updated: Thu, 02 Aug 2012 14:27:46 -0700
Status: untested
The PCI-7230, PCI-7432 and PCI-7433 boards also support external
interrupt signals on digital input channels 0 and 1. The PCI-7233
has dual-interrupt sources for change-of-state (COS) on any 16
digital input channels of LSB and for COS on any 16 digital input
lines of MSB. Interrupts are not currently supported by this
driver.
Configuration Options: not applicable, uses comedi PCI auto config
*/
#include <linux/pci.h>
#include "../comedidev.h"
/*
* Register I/O map (32-bit access only)
*/
#define PCI7X3X_DIO_REG 0x00
#define PCI743X_DIO_REG 0x04
enum apci1516_boardid {
BOARD_PCI7230,
BOARD_PCI7233,
BOARD_PCI7234,
BOARD_PCI7432,
BOARD_PCI7433,
BOARD_PCI7434,
};
struct adl_pci7x3x_boardinfo {
const char *name;
int nsubdevs;
int di_nchan;
int do_nchan;
};
static const struct adl_pci7x3x_boardinfo adl_pci7x3x_boards[] = {
[BOARD_PCI7230] = {
.name = "adl_pci7230",
.nsubdevs = 2,
.di_nchan = 16,
.do_nchan = 16,
},
[BOARD_PCI7233] = {
.name = "adl_pci7233",
.nsubdevs = 1,
.di_nchan = 32,
},
[BOARD_PCI7234] = {
.name = "adl_pci7234",
.nsubdevs = 1,
.do_nchan = 32,
},
[BOARD_PCI7432] = {
.name = "adl_pci7432",
.nsubdevs = 2,
.di_nchan = 32,
.do_nchan = 32,
},
[BOARD_PCI7433] = {
.name = "adl_pci7433",
.nsubdevs = 2,
.di_nchan = 64,
},
[BOARD_PCI7434] = {
.name = "adl_pci7434",
.nsubdevs = 2,
.do_nchan = 64,
}
};
static int adl_pci7x3x_do_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
unsigned long reg = (unsigned long)s->private;
unsigned int mask = data[0];
unsigned int bits = data[1];
if (mask) {
unsigned int val;
s->state &= ~mask;
s->state |= (bits & mask);
val = s->state;
if (s->n_chan == 16) {
/*
* It seems the PCI-7230 needs the 16-bit DO state
* to be shifted left by 16 bits before being written
* to the 32-bit register. Set the value in both
* halves of the register to be sure.
*/
val |= val << 16;
}
outl(val, dev->iobase + reg);
}
/*
* NOTE: The output register is not readable.
* This returned state will not be correct until all the
* outputs have been updated.
*/
data[1] = s->state;
return insn->n;
}
static int adl_pci7x3x_di_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
unsigned long reg = (unsigned long)s->private;
data[1] = inl(dev->iobase + reg);
return insn->n;
}
static int adl_pci7x3x_auto_attach(struct comedi_device *dev,
unsigned long context)
{
struct pci_dev *pcidev = comedi_to_pci_dev(dev);
const struct adl_pci7x3x_boardinfo *board = NULL;
struct comedi_subdevice *s;
int subdev;
int nchan;
int ret;
if (context < ARRAY_SIZE(adl_pci7x3x_boards))
board = &adl_pci7x3x_boards[context];
if (!board)
return -ENODEV;
dev->board_ptr = board;
dev->board_name = board->name;
ret = comedi_pci_enable(dev);
if (ret)
return ret;
dev->iobase = pci_resource_start(pcidev, 2);
/*
* One or two subdevices are setup by this driver depending on
* the number of digital inputs and/or outputs provided by the
* board. Each subdevice has a maximum of 32 channels.
*
* PCI-7230 - 2 subdevices: 0 - 16 input, 1 - 16 output
* PCI-7233 - 1 subdevice: 0 - 32 input
* PCI-7234 - 1 subdevice: 0 - 32 output
* PCI-7432 - 2 subdevices: 0 - 32 input, 1 - 32 output
* PCI-7433 - 2 subdevices: 0 - 32 input, 1 - 32 input
* PCI-7434 - 2 subdevices: 0 - 32 output, 1 - 32 output
*/
ret = comedi_alloc_subdevices(dev, board->nsubdevs);
if (ret)
return ret;
subdev = 0;
if (board->di_nchan) {
nchan = min(board->di_nchan, 32);
s = &dev->subdevices[subdev];
/* Isolated digital inputs 0 to 15/31 */
s->type = COMEDI_SUBD_DI;
s->subdev_flags = SDF_READABLE;
s->n_chan = nchan;
s->maxdata = 1;
s->insn_bits = adl_pci7x3x_di_insn_bits;
s->range_table = &range_digital;
s->private = (void *)PCI7X3X_DIO_REG;
subdev++;
nchan = board->di_nchan - nchan;
if (nchan) {
s = &dev->subdevices[subdev];
/* Isolated digital inputs 32 to 63 */
s->type = COMEDI_SUBD_DI;
s->subdev_flags = SDF_READABLE;
s->n_chan = nchan;
s->maxdata = 1;
s->insn_bits = adl_pci7x3x_di_insn_bits;
s->range_table = &range_digital;
s->private = (void *)PCI743X_DIO_REG;
subdev++;
}
}
if (board->do_nchan) {
nchan = min(board->do_nchan, 32);
s = &dev->subdevices[subdev];
/* Isolated digital outputs 0 to 15/31 */
s->type = COMEDI_SUBD_DO;
s->subdev_flags = SDF_WRITABLE;
s->n_chan = nchan;
s->maxdata = 1;
s->insn_bits = adl_pci7x3x_do_insn_bits;
s->range_table = &range_digital;
s->private = (void *)PCI7X3X_DIO_REG;
subdev++;
nchan = board->do_nchan - nchan;
if (nchan) {
s = &dev->subdevices[subdev];
/* Isolated digital outputs 32 to 63 */
s->type = COMEDI_SUBD_DO;
s->subdev_flags = SDF_WRITABLE;
s->n_chan = nchan;
s->maxdata = 1;
s->insn_bits = adl_pci7x3x_do_insn_bits;
s->range_table = &range_digital;
s->private = (void *)PCI743X_DIO_REG;
subdev++;
}
}
dev_info(dev->class_dev, "%s attached (%d inputs/%d outputs)\n",
dev->board_name, board->di_nchan, board->do_nchan);
return 0;
}
static struct comedi_driver adl_pci7x3x_driver = {
.driver_name = "adl_pci7x3x",
.module = THIS_MODULE,
.auto_attach = adl_pci7x3x_auto_attach,
.detach = comedi_pci_disable,
};
static int adl_pci7x3x_pci_probe(struct pci_dev *dev,
const struct pci_device_id *id)
{
return comedi_pci_auto_config(dev, &adl_pci7x3x_driver,
id->driver_data);
}
static DEFINE_PCI_DEVICE_TABLE(adl_pci7x3x_pci_table) = {
{ PCI_VDEVICE(ADLINK, 0x7230), BOARD_PCI7230 },
{ PCI_VDEVICE(ADLINK, 0x7233), BOARD_PCI7233 },
{ PCI_VDEVICE(ADLINK, 0x7234), BOARD_PCI7234 },
{ PCI_VDEVICE(ADLINK, 0x7432), BOARD_PCI7432 },
{ PCI_VDEVICE(ADLINK, 0x7433), BOARD_PCI7433 },
{ PCI_VDEVICE(ADLINK, 0x7434), BOARD_PCI7434 },
{ 0 }
};
MODULE_DEVICE_TABLE(pci, adl_pci7x3x_pci_table);
static struct pci_driver adl_pci7x3x_pci_driver = {
.name = "adl_pci7x3x",
.id_table = adl_pci7x3x_pci_table,
.probe = adl_pci7x3x_pci_probe,
.remove = comedi_pci_auto_unconfig,
};
module_comedi_pci_driver(adl_pci7x3x_driver, adl_pci7x3x_pci_driver);
MODULE_DESCRIPTION("ADLINK PCI-723x/743x Isolated Digital I/O boards");
MODULE_AUTHOR("H Hartley Sweeten <hsweeten@visionengravers.com>");
MODULE_LICENSE("GPL");
| gpl-2.0 |
abhijeet-dev/ll-arndale-octa | drivers/staging/vt6656/tkip.c | 356 | 8336 | /*
* Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
* All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*
*
* File: tkip.c
*
* Purpose: Implement functions for 802.11i TKIP
*
* Author: Jerry Chen
*
* Date: Mar. 11, 2003
*
* Functions:
* TKIPvMixKey - Get TKIP RC4 Key from TK,TA, and TSC
*
* Revision History:
*
*/
#include "tmacro.h"
#include "tkip.h"
/* The Sbox is reduced to 2 16-bit wide tables, each with 256 entries. */
/* The 2nd table is the same as the 1st but with the upper and lower */
/* bytes swapped. To allow an endian tolerant implementation, the byte */
/* halves have been expressed independently here. */
static const u8 TKIP_Sbox_Lower[256] = {
0xA5,0x84,0x99,0x8D,0x0D,0xBD,0xB1,0x54,
0x50,0x03,0xA9,0x7D,0x19,0x62,0xE6,0x9A,
0x45,0x9D,0x40,0x87,0x15,0xEB,0xC9,0x0B,
0xEC,0x67,0xFD,0xEA,0xBF,0xF7,0x96,0x5B,
0xC2,0x1C,0xAE,0x6A,0x5A,0x41,0x02,0x4F,
0x5C,0xF4,0x34,0x08,0x93,0x73,0x53,0x3F,
0x0C,0x52,0x65,0x5E,0x28,0xA1,0x0F,0xB5,
0x09,0x36,0x9B,0x3D,0x26,0x69,0xCD,0x9F,
0x1B,0x9E,0x74,0x2E,0x2D,0xB2,0xEE,0xFB,
0xF6,0x4D,0x61,0xCE,0x7B,0x3E,0x71,0x97,
0xF5,0x68,0x00,0x2C,0x60,0x1F,0xC8,0xED,
0xBE,0x46,0xD9,0x4B,0xDE,0xD4,0xE8,0x4A,
0x6B,0x2A,0xE5,0x16,0xC5,0xD7,0x55,0x94,
0xCF,0x10,0x06,0x81,0xF0,0x44,0xBA,0xE3,
0xF3,0xFE,0xC0,0x8A,0xAD,0xBC,0x48,0x04,
0xDF,0xC1,0x75,0x63,0x30,0x1A,0x0E,0x6D,
0x4C,0x14,0x35,0x2F,0xE1,0xA2,0xCC,0x39,
0x57,0xF2,0x82,0x47,0xAC,0xE7,0x2B,0x95,
0xA0,0x98,0xD1,0x7F,0x66,0x7E,0xAB,0x83,
0xCA,0x29,0xD3,0x3C,0x79,0xE2,0x1D,0x76,
0x3B,0x56,0x4E,0x1E,0xDB,0x0A,0x6C,0xE4,
0x5D,0x6E,0xEF,0xA6,0xA8,0xA4,0x37,0x8B,
0x32,0x43,0x59,0xB7,0x8C,0x64,0xD2,0xE0,
0xB4,0xFA,0x07,0x25,0xAF,0x8E,0xE9,0x18,
0xD5,0x88,0x6F,0x72,0x24,0xF1,0xC7,0x51,
0x23,0x7C,0x9C,0x21,0xDD,0xDC,0x86,0x85,
0x90,0x42,0xC4,0xAA,0xD8,0x05,0x01,0x12,
0xA3,0x5F,0xF9,0xD0,0x91,0x58,0x27,0xB9,
0x38,0x13,0xB3,0x33,0xBB,0x70,0x89,0xA7,
0xB6,0x22,0x92,0x20,0x49,0xFF,0x78,0x7A,
0x8F,0xF8,0x80,0x17,0xDA,0x31,0xC6,0xB8,
0xC3,0xB0,0x77,0x11,0xCB,0xFC,0xD6,0x3A
};
static const u8 TKIP_Sbox_Upper[256] = {
0xC6,0xF8,0xEE,0xF6,0xFF,0xD6,0xDE,0x91,
0x60,0x02,0xCE,0x56,0xE7,0xB5,0x4D,0xEC,
0x8F,0x1F,0x89,0xFA,0xEF,0xB2,0x8E,0xFB,
0x41,0xB3,0x5F,0x45,0x23,0x53,0xE4,0x9B,
0x75,0xE1,0x3D,0x4C,0x6C,0x7E,0xF5,0x83,
0x68,0x51,0xD1,0xF9,0xE2,0xAB,0x62,0x2A,
0x08,0x95,0x46,0x9D,0x30,0x37,0x0A,0x2F,
0x0E,0x24,0x1B,0xDF,0xCD,0x4E,0x7F,0xEA,
0x12,0x1D,0x58,0x34,0x36,0xDC,0xB4,0x5B,
0xA4,0x76,0xB7,0x7D,0x52,0xDD,0x5E,0x13,
0xA6,0xB9,0x00,0xC1,0x40,0xE3,0x79,0xB6,
0xD4,0x8D,0x67,0x72,0x94,0x98,0xB0,0x85,
0xBB,0xC5,0x4F,0xED,0x86,0x9A,0x66,0x11,
0x8A,0xE9,0x04,0xFE,0xA0,0x78,0x25,0x4B,
0xA2,0x5D,0x80,0x05,0x3F,0x21,0x70,0xF1,
0x63,0x77,0xAF,0x42,0x20,0xE5,0xFD,0xBF,
0x81,0x18,0x26,0xC3,0xBE,0x35,0x88,0x2E,
0x93,0x55,0xFC,0x7A,0xC8,0xBA,0x32,0xE6,
0xC0,0x19,0x9E,0xA3,0x44,0x54,0x3B,0x0B,
0x8C,0xC7,0x6B,0x28,0xA7,0xBC,0x16,0xAD,
0xDB,0x64,0x74,0x14,0x92,0x0C,0x48,0xB8,
0x9F,0xBD,0x43,0xC4,0x39,0x31,0xD3,0xF2,
0xD5,0x8B,0x6E,0xDA,0x01,0xB1,0x9C,0x49,
0xD8,0xAC,0xF3,0xCF,0xCA,0xF4,0x47,0x10,
0x6F,0xF0,0x4A,0x5C,0x38,0x57,0x73,0x97,
0xCB,0xA1,0xE8,0x3E,0x96,0x61,0x0D,0x0F,
0xE0,0x7C,0x71,0xCC,0x90,0x06,0xF7,0x1C,
0xC2,0x6A,0xAE,0x69,0x17,0x99,0x3A,0x27,
0xD9,0xEB,0x2B,0x22,0xD2,0xA9,0x07,0x33,
0x2D,0x3C,0x15,0xC9,0x87,0xAA,0x50,0xA5,
0x03,0x59,0x09,0x1A,0x65,0xD7,0x84,0xD0,
0x82,0x29,0x5A,0x1E,0x7B,0xA8,0x6D,0x2C
};
//STKIPKeyManagement sTKIPKeyTable[MAX_TKIP_KEY];
/************************************************************/
/* tkip_sbox() */
/* Returns a 16 bit value from a 64K entry table. The Table */
/* is synthesized from two 256 entry byte wide tables. */
/************************************************************/
static unsigned int tkip_sbox(unsigned int index)
{
unsigned int index_low;
unsigned int index_high;
unsigned int left, right;
index_low = (index % 256);
index_high = ((index >> 8) % 256);
left = TKIP_Sbox_Lower[index_low] + (TKIP_Sbox_Upper[index_low] * 256);
right = TKIP_Sbox_Upper[index_high] + (TKIP_Sbox_Lower[index_high] * 256);
return (left ^ right);
};
static unsigned int rotr1(unsigned int a)
{
unsigned int b;
if ((a & 0x01) == 0x01) {
b = (a >> 1) | 0x8000;
} else {
b = (a >> 1) & 0x7fff;
}
b = b % 65536;
return b;
}
/*
* Description: Calculate RC4Key fom TK, TA, and TSC
*
* Parameters:
* In:
* pbyTKey - TKey
* pbyTA - TA
* dwTSC - TSC
* Out:
* pbyRC4Key - RC4Key
*
* Return Value: none
*
*/
void TKIPvMixKey(
u8 * pbyTKey,
u8 * pbyTA,
u16 wTSC15_0,
u32 dwTSC47_16,
u8 * pbyRC4Key
)
{
u32 p1k[5];
u32 tsc0, tsc1, tsc2;
u32 ppk0, ppk1, ppk2, ppk3, ppk4, ppk5;
u32 pnl, pnh;
int i, j;
pnl = (u32)wTSC15_0;
pnh = (u32)(dwTSC47_16 & 0xffffffff);
tsc0 = (u32)((pnh >> 16) % 65536); /* msb */
tsc1 = (u32)(pnh % 65536);
tsc2 = (u32)(pnl % 65536); /* lsb */
/* Phase 1, step 1 */
p1k[0] = tsc1;
p1k[1] = tsc0;
p1k[2] = (u32)(pbyTA[0] + (pbyTA[1]*256));
p1k[3] = (u32)(pbyTA[2] + (pbyTA[3]*256));
p1k[4] = (u32)(pbyTA[4] + (pbyTA[5]*256));
/* Phase 1, step 2 */
for (i=0; i<8; i++) {
j = 2*(i & 1);
p1k[0] = (p1k[0] + tkip_sbox((p1k[4] ^ ((256*pbyTKey[1+j]) + pbyTKey[j])) % 65536)) % 65536;
p1k[1] = (p1k[1] + tkip_sbox((p1k[0] ^ ((256*pbyTKey[5+j]) + pbyTKey[4+j])) % 65536)) % 65536;
p1k[2] = (p1k[2] + tkip_sbox((p1k[1] ^ ((256*pbyTKey[9+j]) + pbyTKey[8+j])) % 65536)) % 65536;
p1k[3] = (p1k[3] + tkip_sbox((p1k[2] ^ ((256*pbyTKey[13+j]) + pbyTKey[12+j])) % 65536)) % 65536;
p1k[4] = (p1k[4] + tkip_sbox((p1k[3] ^ (((256*pbyTKey[1+j]) + pbyTKey[j]))) % 65536)) % 65536;
p1k[4] = (p1k[4] + i) % 65536;
}
/* Phase 2, Step 1 */
ppk0 = p1k[0];
ppk1 = p1k[1];
ppk2 = p1k[2];
ppk3 = p1k[3];
ppk4 = p1k[4];
ppk5 = (p1k[4] + tsc2) % 65536;
/* Phase2, Step 2 */
ppk0 = ppk0 + tkip_sbox((ppk5 ^ ((256*pbyTKey[1]) + pbyTKey[0])) % 65536);
ppk1 = ppk1 + tkip_sbox((ppk0 ^ ((256*pbyTKey[3]) + pbyTKey[2])) % 65536);
ppk2 = ppk2 + tkip_sbox((ppk1 ^ ((256*pbyTKey[5]) + pbyTKey[4])) % 65536);
ppk3 = ppk3 + tkip_sbox((ppk2 ^ ((256*pbyTKey[7]) + pbyTKey[6])) % 65536);
ppk4 = ppk4 + tkip_sbox((ppk3 ^ ((256*pbyTKey[9]) + pbyTKey[8])) % 65536);
ppk5 = ppk5 + tkip_sbox((ppk4 ^ ((256*pbyTKey[11]) + pbyTKey[10])) % 65536);
ppk0 = ppk0 + rotr1(ppk5 ^ ((256*pbyTKey[13]) + pbyTKey[12]));
ppk1 = ppk1 + rotr1(ppk0 ^ ((256*pbyTKey[15]) + pbyTKey[14]));
ppk2 = ppk2 + rotr1(ppk1);
ppk3 = ppk3 + rotr1(ppk2);
ppk4 = ppk4 + rotr1(ppk3);
ppk5 = ppk5 + rotr1(ppk4);
/* Phase 2, Step 3 */
pbyRC4Key[0] = (tsc2 >> 8) % 256;
pbyRC4Key[1] = (((tsc2 >> 8) % 256) | 0x20) & 0x7f;
pbyRC4Key[2] = tsc2 % 256;
pbyRC4Key[3] = ((ppk5 ^ ((256*pbyTKey[1]) + pbyTKey[0])) >> 1) % 256;
pbyRC4Key[4] = ppk0 % 256;
pbyRC4Key[5] = (ppk0 >> 8) % 256;
pbyRC4Key[6] = ppk1 % 256;
pbyRC4Key[7] = (ppk1 >> 8) % 256;
pbyRC4Key[8] = ppk2 % 256;
pbyRC4Key[9] = (ppk2 >> 8) % 256;
pbyRC4Key[10] = ppk3 % 256;
pbyRC4Key[11] = (ppk3 >> 8) % 256;
pbyRC4Key[12] = ppk4 % 256;
pbyRC4Key[13] = (ppk4 >> 8) % 256;
pbyRC4Key[14] = ppk5 % 256;
pbyRC4Key[15] = (ppk5 >> 8) % 256;
}
| gpl-2.0 |
omkar062/linux-4.0.4 | drivers/gpu/drm/i915/i915_ioc32.c | 356 | 7195 | /**
* \file i915_ioc32.c
*
* 32-bit ioctl compatibility routines for the i915 DRM.
*
* \author Alan Hourihane <alanh@fairlite.demon.co.uk>
*
*
* Copyright (C) Paul Mackerras 2005
* Copyright (C) Alan Hourihane 2005
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include <linux/compat.h>
#include <drm/drmP.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
typedef struct _drm_i915_batchbuffer32 {
int start; /* agp offset */
int used; /* nr bytes in use */
int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */
int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */
int num_cliprects; /* mulitpass with multiple cliprects? */
u32 cliprects; /* pointer to userspace cliprects */
} drm_i915_batchbuffer32_t;
static int compat_i915_batchbuffer(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_i915_batchbuffer32_t batchbuffer32;
drm_i915_batchbuffer_t __user *batchbuffer;
if (copy_from_user
(&batchbuffer32, (void __user *)arg, sizeof(batchbuffer32)))
return -EFAULT;
batchbuffer = compat_alloc_user_space(sizeof(*batchbuffer));
if (!access_ok(VERIFY_WRITE, batchbuffer, sizeof(*batchbuffer))
|| __put_user(batchbuffer32.start, &batchbuffer->start)
|| __put_user(batchbuffer32.used, &batchbuffer->used)
|| __put_user(batchbuffer32.DR1, &batchbuffer->DR1)
|| __put_user(batchbuffer32.DR4, &batchbuffer->DR4)
|| __put_user(batchbuffer32.num_cliprects,
&batchbuffer->num_cliprects)
|| __put_user((int __user *)(unsigned long)batchbuffer32.cliprects,
&batchbuffer->cliprects))
return -EFAULT;
return drm_ioctl(file, DRM_IOCTL_I915_BATCHBUFFER,
(unsigned long)batchbuffer);
}
typedef struct _drm_i915_cmdbuffer32 {
u32 buf; /* pointer to userspace command buffer */
int sz; /* nr bytes in buf */
int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */
int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */
int num_cliprects; /* mulitpass with multiple cliprects? */
u32 cliprects; /* pointer to userspace cliprects */
} drm_i915_cmdbuffer32_t;
static int compat_i915_cmdbuffer(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_i915_cmdbuffer32_t cmdbuffer32;
drm_i915_cmdbuffer_t __user *cmdbuffer;
if (copy_from_user
(&cmdbuffer32, (void __user *)arg, sizeof(cmdbuffer32)))
return -EFAULT;
cmdbuffer = compat_alloc_user_space(sizeof(*cmdbuffer));
if (!access_ok(VERIFY_WRITE, cmdbuffer, sizeof(*cmdbuffer))
|| __put_user((int __user *)(unsigned long)cmdbuffer32.buf,
&cmdbuffer->buf)
|| __put_user(cmdbuffer32.sz, &cmdbuffer->sz)
|| __put_user(cmdbuffer32.DR1, &cmdbuffer->DR1)
|| __put_user(cmdbuffer32.DR4, &cmdbuffer->DR4)
|| __put_user(cmdbuffer32.num_cliprects, &cmdbuffer->num_cliprects)
|| __put_user((int __user *)(unsigned long)cmdbuffer32.cliprects,
&cmdbuffer->cliprects))
return -EFAULT;
return drm_ioctl(file, DRM_IOCTL_I915_CMDBUFFER,
(unsigned long)cmdbuffer);
}
typedef struct drm_i915_irq_emit32 {
u32 irq_seq;
} drm_i915_irq_emit32_t;
static int compat_i915_irq_emit(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_i915_irq_emit32_t req32;
drm_i915_irq_emit_t __user *request;
if (copy_from_user(&req32, (void __user *)arg, sizeof(req32)))
return -EFAULT;
request = compat_alloc_user_space(sizeof(*request));
if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
|| __put_user((int __user *)(unsigned long)req32.irq_seq,
&request->irq_seq))
return -EFAULT;
return drm_ioctl(file, DRM_IOCTL_I915_IRQ_EMIT,
(unsigned long)request);
}
typedef struct drm_i915_getparam32 {
int param;
u32 value;
} drm_i915_getparam32_t;
static int compat_i915_getparam(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_i915_getparam32_t req32;
drm_i915_getparam_t __user *request;
if (copy_from_user(&req32, (void __user *)arg, sizeof(req32)))
return -EFAULT;
request = compat_alloc_user_space(sizeof(*request));
if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
|| __put_user(req32.param, &request->param)
|| __put_user((void __user *)(unsigned long)req32.value,
&request->value))
return -EFAULT;
return drm_ioctl(file, DRM_IOCTL_I915_GETPARAM,
(unsigned long)request);
}
typedef struct drm_i915_mem_alloc32 {
int region;
int alignment;
int size;
u32 region_offset; /* offset from start of fb or agp */
} drm_i915_mem_alloc32_t;
static int compat_i915_alloc(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_i915_mem_alloc32_t req32;
drm_i915_mem_alloc_t __user *request;
if (copy_from_user(&req32, (void __user *)arg, sizeof(req32)))
return -EFAULT;
request = compat_alloc_user_space(sizeof(*request));
if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
|| __put_user(req32.region, &request->region)
|| __put_user(req32.alignment, &request->alignment)
|| __put_user(req32.size, &request->size)
|| __put_user((void __user *)(unsigned long)req32.region_offset,
&request->region_offset))
return -EFAULT;
return drm_ioctl(file, DRM_IOCTL_I915_ALLOC,
(unsigned long)request);
}
static drm_ioctl_compat_t *i915_compat_ioctls[] = {
[DRM_I915_BATCHBUFFER] = compat_i915_batchbuffer,
[DRM_I915_CMDBUFFER] = compat_i915_cmdbuffer,
[DRM_I915_GETPARAM] = compat_i915_getparam,
[DRM_I915_IRQ_EMIT] = compat_i915_irq_emit,
[DRM_I915_ALLOC] = compat_i915_alloc
};
/**
* Called whenever a 32-bit process running under a 64-bit kernel
* performs an ioctl on /dev/dri/card<n>.
*
* \param filp file pointer.
* \param cmd command.
* \param arg user argument.
* \return zero on success or negative number on failure.
*/
long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
unsigned int nr = DRM_IOCTL_NR(cmd);
drm_ioctl_compat_t *fn = NULL;
int ret;
if (nr < DRM_COMMAND_BASE)
return drm_compat_ioctl(filp, cmd, arg);
if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(i915_compat_ioctls))
fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE];
if (fn != NULL)
ret = (*fn) (filp, cmd, arg);
else
ret = drm_ioctl(filp, cmd, arg);
return ret;
}
| gpl-2.0 |
Compulsion/linux-stable | net/ipv4/netfilter/nf_tables_arp.c | 612 | 2413 | /*
* Copyright (c) 2008-2010 Patrick McHardy <kaber@trash.net>
* Copyright (c) 2013 Pablo Neira Ayuso <pablo@netfilter.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Development of this code funded by Astaro AG (http://www.astaro.com/)
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/netfilter_arp.h>
#include <net/netfilter/nf_tables.h>
static unsigned int
nft_do_chain_arp(const struct nf_hook_ops *ops,
struct sk_buff *skb,
const struct nf_hook_state *state)
{
struct nft_pktinfo pkt;
nft_set_pktinfo(&pkt, ops, skb, state);
return nft_do_chain(&pkt, ops);
}
static struct nft_af_info nft_af_arp __read_mostly = {
.family = NFPROTO_ARP,
.nhooks = NF_ARP_NUMHOOKS,
.owner = THIS_MODULE,
.nops = 1,
.hooks = {
[NF_ARP_IN] = nft_do_chain_arp,
[NF_ARP_OUT] = nft_do_chain_arp,
[NF_ARP_FORWARD] = nft_do_chain_arp,
},
};
static int nf_tables_arp_init_net(struct net *net)
{
net->nft.arp = kmalloc(sizeof(struct nft_af_info), GFP_KERNEL);
if (net->nft.arp== NULL)
return -ENOMEM;
memcpy(net->nft.arp, &nft_af_arp, sizeof(nft_af_arp));
if (nft_register_afinfo(net, net->nft.arp) < 0)
goto err;
return 0;
err:
kfree(net->nft.arp);
return -ENOMEM;
}
static void nf_tables_arp_exit_net(struct net *net)
{
nft_unregister_afinfo(net->nft.arp);
kfree(net->nft.arp);
}
static struct pernet_operations nf_tables_arp_net_ops = {
.init = nf_tables_arp_init_net,
.exit = nf_tables_arp_exit_net,
};
static const struct nf_chain_type filter_arp = {
.name = "filter",
.type = NFT_CHAIN_T_DEFAULT,
.family = NFPROTO_ARP,
.owner = THIS_MODULE,
.hook_mask = (1 << NF_ARP_IN) |
(1 << NF_ARP_OUT) |
(1 << NF_ARP_FORWARD),
};
static int __init nf_tables_arp_init(void)
{
int ret;
nft_register_chain_type(&filter_arp);
ret = register_pernet_subsys(&nf_tables_arp_net_ops);
if (ret < 0)
nft_unregister_chain_type(&filter_arp);
return ret;
}
static void __exit nf_tables_arp_exit(void)
{
unregister_pernet_subsys(&nf_tables_arp_net_ops);
nft_unregister_chain_type(&filter_arp);
}
module_init(nf_tables_arp_init);
module_exit(nf_tables_arp_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
MODULE_ALIAS_NFT_FAMILY(3); /* NFPROTO_ARP */
| gpl-2.0 |
Milad1993/linux | arch/arm/mach-shmobile/board-marzen-reference.c | 612 | 1575 | /*
* marzen board support - Reference DT implementation
*
* Copyright (C) 2011 Renesas Solutions Corp.
* Copyright (C) 2011 Magnus Damm
* Copyright (C) 2013 Simon Horman
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/clk/shmobile.h>
#include <linux/clocksource.h>
#include <linux/of_platform.h>
#include <asm/irq.h>
#include <asm/mach/arch.h>
#include "common.h"
#include "irqs.h"
#include "r8a7779.h"
static void __init marzen_init_timer(void)
{
r8a7779_clocks_init(r8a7779_read_mode_pins());
clocksource_of_init();
}
static void __init marzen_init(void)
{
of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
r8a7779_init_irq_extpin_dt(1); /* IRQ1 as individual interrupt */
}
static const char *marzen_boards_compat_dt[] __initdata = {
"renesas,marzen",
"renesas,marzen-reference",
NULL,
};
DT_MACHINE_START(MARZEN, "marzen")
.smp = smp_ops(r8a7779_smp_ops),
.map_io = r8a7779_map_io,
.init_early = shmobile_init_delay,
.init_time = marzen_init_timer,
.init_irq = r8a7779_init_irq_dt,
.init_machine = marzen_init,
.init_late = shmobile_init_late,
.dt_compat = marzen_boards_compat_dt,
MACHINE_END
| gpl-2.0 |
ignacio28/android_kernel_lge_msm8610 | mm/bootmem.c | 1380 | 21224 | /*
* bootmem - A boot-time physical memory allocator and configurator
*
* Copyright (C) 1999 Ingo Molnar
* 1999 Kanoj Sarcar, SGI
* 2008 Johannes Weiner
*
* Access to this subsystem has to be serialized externally (which is true
* for the boot process anyway).
*/
#include <linux/init.h>
#include <linux/pfn.h>
#include <linux/slab.h>
#include <linux/bootmem.h>
#include <linux/export.h>
#include <linux/kmemleak.h>
#include <linux/range.h>
#include <linux/memblock.h>
#include <asm/bug.h>
#include <asm/io.h>
#include <asm/processor.h>
#include "internal.h"
#ifndef CONFIG_NEED_MULTIPLE_NODES
struct pglist_data __refdata contig_page_data = {
.bdata = &bootmem_node_data[0]
};
EXPORT_SYMBOL(contig_page_data);
#endif
unsigned long max_low_pfn;
unsigned long min_low_pfn;
unsigned long max_pfn;
bootmem_data_t bootmem_node_data[MAX_NUMNODES] __initdata;
static struct list_head bdata_list __initdata = LIST_HEAD_INIT(bdata_list);
static int bootmem_debug;
static int __init bootmem_debug_setup(char *buf)
{
bootmem_debug = 1;
return 0;
}
early_param("bootmem_debug", bootmem_debug_setup);
#define bdebug(fmt, args...) ({ \
if (unlikely(bootmem_debug)) \
printk(KERN_INFO \
"bootmem::%s " fmt, \
__func__, ## args); \
})
static unsigned long __init bootmap_bytes(unsigned long pages)
{
unsigned long bytes = DIV_ROUND_UP(pages, 8);
return ALIGN(bytes, sizeof(long));
}
/**
* bootmem_bootmap_pages - calculate bitmap size in pages
* @pages: number of pages the bitmap has to represent
*/
unsigned long __init bootmem_bootmap_pages(unsigned long pages)
{
unsigned long bytes = bootmap_bytes(pages);
return PAGE_ALIGN(bytes) >> PAGE_SHIFT;
}
/*
* link bdata in order
*/
static void __init link_bootmem(bootmem_data_t *bdata)
{
struct list_head *iter;
list_for_each(iter, &bdata_list) {
bootmem_data_t *ent;
ent = list_entry(iter, bootmem_data_t, list);
if (bdata->node_min_pfn < ent->node_min_pfn)
break;
}
list_add_tail(&bdata->list, iter);
}
/*
* Called once to set up the allocator itself.
*/
static unsigned long __init init_bootmem_core(bootmem_data_t *bdata,
unsigned long mapstart, unsigned long start, unsigned long end)
{
unsigned long mapsize;
mminit_validate_memmodel_limits(&start, &end);
bdata->node_bootmem_map = phys_to_virt(PFN_PHYS(mapstart));
bdata->node_min_pfn = start;
bdata->node_low_pfn = end;
link_bootmem(bdata);
/*
* Initially all pages are reserved - setup_arch() has to
* register free RAM areas explicitly.
*/
mapsize = bootmap_bytes(end - start);
memset(bdata->node_bootmem_map, 0xff, mapsize);
bdebug("nid=%td start=%lx map=%lx end=%lx mapsize=%lx\n",
bdata - bootmem_node_data, start, mapstart, end, mapsize);
return mapsize;
}
/**
* init_bootmem_node - register a node as boot memory
* @pgdat: node to register
* @freepfn: pfn where the bitmap for this node is to be placed
* @startpfn: first pfn on the node
* @endpfn: first pfn after the node
*
* Returns the number of bytes needed to hold the bitmap for this node.
*/
unsigned long __init init_bootmem_node(pg_data_t *pgdat, unsigned long freepfn,
unsigned long startpfn, unsigned long endpfn)
{
return init_bootmem_core(pgdat->bdata, freepfn, startpfn, endpfn);
}
/**
* init_bootmem - register boot memory
* @start: pfn where the bitmap is to be placed
* @pages: number of available physical pages
*
* Returns the number of bytes needed to hold the bitmap.
*/
unsigned long __init init_bootmem(unsigned long start, unsigned long pages)
{
max_low_pfn = pages;
min_low_pfn = start;
return init_bootmem_core(NODE_DATA(0)->bdata, start, 0, pages);
}
/*
* free_bootmem_late - free bootmem pages directly to page allocator
* @addr: starting address of the range
* @size: size of the range in bytes
*
* This is only useful when the bootmem allocator has already been torn
* down, but we are still initializing the system. Pages are given directly
* to the page allocator, no bootmem metadata is updated because it is gone.
*/
void free_bootmem_late(unsigned long addr, unsigned long size)
{
unsigned long cursor, end;
kmemleak_free_part(__va(addr), size);
cursor = PFN_UP(addr);
end = PFN_DOWN(addr + size);
for (; cursor < end; cursor++) {
__free_pages_bootmem(pfn_to_page(cursor), 0);
totalram_pages++;
}
}
static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
{
struct page *page;
unsigned long start, end, pages, count = 0;
if (!bdata->node_bootmem_map)
return 0;
start = bdata->node_min_pfn;
end = bdata->node_low_pfn;
bdebug("nid=%td start=%lx end=%lx\n",
bdata - bootmem_node_data, start, end);
while (start < end) {
unsigned long *map, idx, vec;
map = bdata->node_bootmem_map;
idx = start - bdata->node_min_pfn;
vec = ~map[idx / BITS_PER_LONG];
/*
* If we have a properly aligned and fully unreserved
* BITS_PER_LONG block of pages in front of us, free
* it in one go.
*/
if (IS_ALIGNED(start, BITS_PER_LONG) && vec == ~0UL) {
int order = ilog2(BITS_PER_LONG);
__free_pages_bootmem(pfn_to_page(start), order);
count += BITS_PER_LONG;
start += BITS_PER_LONG;
} else {
unsigned long off = 0;
while (vec && off < BITS_PER_LONG) {
if (vec & 1) {
page = pfn_to_page(start + off);
__free_pages_bootmem(page, 0);
count++;
}
vec >>= 1;
off++;
}
start = ALIGN(start + 1, BITS_PER_LONG);
}
}
page = virt_to_page(bdata->node_bootmem_map);
pages = bdata->node_low_pfn - bdata->node_min_pfn;
pages = bootmem_bootmap_pages(pages);
count += pages;
while (pages--)
__free_pages_bootmem(page++, 0);
bdebug("nid=%td released=%lx\n", bdata - bootmem_node_data, count);
return count;
}
/**
* free_all_bootmem_node - release a node's free pages to the buddy allocator
* @pgdat: node to be released
*
* Returns the number of pages actually released.
*/
unsigned long __init free_all_bootmem_node(pg_data_t *pgdat)
{
register_page_bootmem_info_node(pgdat);
return free_all_bootmem_core(pgdat->bdata);
}
/**
* free_all_bootmem - release free pages to the buddy allocator
*
* Returns the number of pages actually released.
*/
unsigned long __init free_all_bootmem(void)
{
unsigned long total_pages = 0;
bootmem_data_t *bdata;
list_for_each_entry(bdata, &bdata_list, list)
total_pages += free_all_bootmem_core(bdata);
return total_pages;
}
static void __init __free(bootmem_data_t *bdata,
unsigned long sidx, unsigned long eidx)
{
unsigned long idx;
bdebug("nid=%td start=%lx end=%lx\n", bdata - bootmem_node_data,
sidx + bdata->node_min_pfn,
eidx + bdata->node_min_pfn);
if (bdata->hint_idx > sidx)
bdata->hint_idx = sidx;
for (idx = sidx; idx < eidx; idx++)
if (!test_and_clear_bit(idx, bdata->node_bootmem_map))
BUG();
}
static int __init __reserve(bootmem_data_t *bdata, unsigned long sidx,
unsigned long eidx, int flags)
{
unsigned long idx;
int exclusive = flags & BOOTMEM_EXCLUSIVE;
bdebug("nid=%td start=%lx end=%lx flags=%x\n",
bdata - bootmem_node_data,
sidx + bdata->node_min_pfn,
eidx + bdata->node_min_pfn,
flags);
for (idx = sidx; idx < eidx; idx++)
if (test_and_set_bit(idx, bdata->node_bootmem_map)) {
if (exclusive) {
__free(bdata, sidx, idx);
return -EBUSY;
}
bdebug("silent double reserve of PFN %lx\n",
idx + bdata->node_min_pfn);
}
return 0;
}
static int __init mark_bootmem_node(bootmem_data_t *bdata,
unsigned long start, unsigned long end,
int reserve, int flags)
{
unsigned long sidx, eidx;
bdebug("nid=%td start=%lx end=%lx reserve=%d flags=%x\n",
bdata - bootmem_node_data, start, end, reserve, flags);
BUG_ON(start < bdata->node_min_pfn);
BUG_ON(end > bdata->node_low_pfn);
sidx = start - bdata->node_min_pfn;
eidx = end - bdata->node_min_pfn;
if (reserve)
return __reserve(bdata, sidx, eidx, flags);
else
__free(bdata, sidx, eidx);
return 0;
}
static int __init mark_bootmem(unsigned long start, unsigned long end,
int reserve, int flags)
{
unsigned long pos;
bootmem_data_t *bdata;
pos = start;
list_for_each_entry(bdata, &bdata_list, list) {
int err;
unsigned long max;
if (pos < bdata->node_min_pfn ||
pos >= bdata->node_low_pfn) {
BUG_ON(pos != start);
continue;
}
max = min(bdata->node_low_pfn, end);
err = mark_bootmem_node(bdata, pos, max, reserve, flags);
if (reserve && err) {
mark_bootmem(start, pos, 0, 0);
return err;
}
if (max == end)
return 0;
pos = bdata->node_low_pfn;
}
BUG();
}
/**
* free_bootmem_node - mark a page range as usable
* @pgdat: node the range resides on
* @physaddr: starting address of the range
* @size: size of the range in bytes
*
* Partial pages will be considered reserved and left as they are.
*
* The range must reside completely on the specified node.
*/
void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
unsigned long size)
{
unsigned long start, end;
kmemleak_free_part(__va(physaddr), size);
start = PFN_UP(physaddr);
end = PFN_DOWN(physaddr + size);
mark_bootmem_node(pgdat->bdata, start, end, 0, 0);
}
/**
* free_bootmem - mark a page range as usable
* @addr: starting address of the range
* @size: size of the range in bytes
*
* Partial pages will be considered reserved and left as they are.
*
* The range must be contiguous but may span node boundaries.
*/
void __init free_bootmem(unsigned long addr, unsigned long size)
{
unsigned long start, end;
kmemleak_free_part(__va(addr), size);
start = PFN_UP(addr);
end = PFN_DOWN(addr + size);
mark_bootmem(start, end, 0, 0);
}
/**
* reserve_bootmem_node - mark a page range as reserved
* @pgdat: node the range resides on
* @physaddr: starting address of the range
* @size: size of the range in bytes
* @flags: reservation flags (see linux/bootmem.h)
*
* Partial pages will be reserved.
*
* The range must reside completely on the specified node.
*/
int __init reserve_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
unsigned long size, int flags)
{
unsigned long start, end;
start = PFN_DOWN(physaddr);
end = PFN_UP(physaddr + size);
return mark_bootmem_node(pgdat->bdata, start, end, 1, flags);
}
/**
* reserve_bootmem - mark a page range as usable
* @addr: starting address of the range
* @size: size of the range in bytes
* @flags: reservation flags (see linux/bootmem.h)
*
* Partial pages will be reserved.
*
* The range must be contiguous but may span node boundaries.
*/
int __init reserve_bootmem(unsigned long addr, unsigned long size,
int flags)
{
unsigned long start, end;
start = PFN_DOWN(addr);
end = PFN_UP(addr + size);
return mark_bootmem(start, end, 1, flags);
}
int __weak __init reserve_bootmem_generic(unsigned long phys, unsigned long len,
int flags)
{
return reserve_bootmem(phys, len, flags);
}
static unsigned long __init align_idx(struct bootmem_data *bdata,
unsigned long idx, unsigned long step)
{
unsigned long base = bdata->node_min_pfn;
/*
* Align the index with respect to the node start so that the
* combination of both satisfies the requested alignment.
*/
return ALIGN(base + idx, step) - base;
}
static unsigned long __init align_off(struct bootmem_data *bdata,
unsigned long off, unsigned long align)
{
unsigned long base = PFN_PHYS(bdata->node_min_pfn);
/* Same as align_idx for byte offsets */
return ALIGN(base + off, align) - base;
}
static void * __init alloc_bootmem_core(struct bootmem_data *bdata,
unsigned long size, unsigned long align,
unsigned long goal, unsigned long limit)
{
unsigned long fallback = 0;
unsigned long min, max, start, sidx, midx, step;
bdebug("nid=%td size=%lx [%lu pages] align=%lx goal=%lx limit=%lx\n",
bdata - bootmem_node_data, size, PAGE_ALIGN(size) >> PAGE_SHIFT,
align, goal, limit);
BUG_ON(!size);
BUG_ON(align & (align - 1));
BUG_ON(limit && goal + size > limit);
if (!bdata->node_bootmem_map)
return NULL;
min = bdata->node_min_pfn;
max = bdata->node_low_pfn;
goal >>= PAGE_SHIFT;
limit >>= PAGE_SHIFT;
if (limit && max > limit)
max = limit;
if (max <= min)
return NULL;
step = max(align >> PAGE_SHIFT, 1UL);
if (goal && min < goal && goal < max)
start = ALIGN(goal, step);
else
start = ALIGN(min, step);
sidx = start - bdata->node_min_pfn;
midx = max - bdata->node_min_pfn;
if (bdata->hint_idx > sidx) {
/*
* Handle the valid case of sidx being zero and still
* catch the fallback below.
*/
fallback = sidx + 1;
sidx = align_idx(bdata, bdata->hint_idx, step);
}
while (1) {
int merge;
void *region;
unsigned long eidx, i, start_off, end_off;
find_block:
sidx = find_next_zero_bit(bdata->node_bootmem_map, midx, sidx);
sidx = align_idx(bdata, sidx, step);
eidx = sidx + PFN_UP(size);
if (sidx >= midx || eidx > midx)
break;
for (i = sidx; i < eidx; i++)
if (test_bit(i, bdata->node_bootmem_map)) {
sidx = align_idx(bdata, i, step);
if (sidx == i)
sidx += step;
goto find_block;
}
if (bdata->last_end_off & (PAGE_SIZE - 1) &&
PFN_DOWN(bdata->last_end_off) + 1 == sidx)
start_off = align_off(bdata, bdata->last_end_off, align);
else
start_off = PFN_PHYS(sidx);
merge = PFN_DOWN(start_off) < sidx;
end_off = start_off + size;
bdata->last_end_off = end_off;
bdata->hint_idx = PFN_UP(end_off);
/*
* Reserve the area now:
*/
if (__reserve(bdata, PFN_DOWN(start_off) + merge,
PFN_UP(end_off), BOOTMEM_EXCLUSIVE))
BUG();
region = phys_to_virt(PFN_PHYS(bdata->node_min_pfn) +
start_off);
memset(region, 0, size);
/*
* The min_count is set to 0 so that bootmem allocated blocks
* are never reported as leaks.
*/
kmemleak_alloc(region, size, 0, 0);
return region;
}
if (fallback) {
sidx = align_idx(bdata, fallback - 1, step);
fallback = 0;
goto find_block;
}
return NULL;
}
static void * __init alloc_arch_preferred_bootmem(bootmem_data_t *bdata,
unsigned long size, unsigned long align,
unsigned long goal, unsigned long limit)
{
if (WARN_ON_ONCE(slab_is_available()))
return kzalloc(size, GFP_NOWAIT);
#ifdef CONFIG_HAVE_ARCH_BOOTMEM
{
bootmem_data_t *p_bdata;
p_bdata = bootmem_arch_preferred_node(bdata, size, align,
goal, limit);
if (p_bdata)
return alloc_bootmem_core(p_bdata, size, align,
goal, limit);
}
#endif
return NULL;
}
static void * __init ___alloc_bootmem_nopanic(unsigned long size,
unsigned long align,
unsigned long goal,
unsigned long limit)
{
bootmem_data_t *bdata;
void *region;
restart:
region = alloc_arch_preferred_bootmem(NULL, size, align, goal, limit);
if (region)
return region;
list_for_each_entry(bdata, &bdata_list, list) {
if (goal && bdata->node_low_pfn <= PFN_DOWN(goal))
continue;
if (limit && bdata->node_min_pfn >= PFN_DOWN(limit))
break;
region = alloc_bootmem_core(bdata, size, align, goal, limit);
if (region)
return region;
}
if (goal) {
goal = 0;
goto restart;
}
return NULL;
}
/**
* __alloc_bootmem_nopanic - allocate boot memory without panicking
* @size: size of the request in bytes
* @align: alignment of the region
* @goal: preferred starting address of the region
*
* The goal is dropped if it can not be satisfied and the allocation will
* fall back to memory below @goal.
*
* Allocation may happen on any node in the system.
*
* Returns NULL on failure.
*/
void * __init __alloc_bootmem_nopanic(unsigned long size, unsigned long align,
unsigned long goal)
{
unsigned long limit = 0;
return ___alloc_bootmem_nopanic(size, align, goal, limit);
}
static void * __init ___alloc_bootmem(unsigned long size, unsigned long align,
unsigned long goal, unsigned long limit)
{
void *mem = ___alloc_bootmem_nopanic(size, align, goal, limit);
if (mem)
return mem;
/*
* Whoops, we cannot satisfy the allocation request.
*/
printk(KERN_ALERT "bootmem alloc of %lu bytes failed!\n", size);
panic("Out of memory");
return NULL;
}
/**
* __alloc_bootmem - allocate boot memory
* @size: size of the request in bytes
* @align: alignment of the region
* @goal: preferred starting address of the region
*
* The goal is dropped if it can not be satisfied and the allocation will
* fall back to memory below @goal.
*
* Allocation may happen on any node in the system.
*
* The function panics if the request can not be satisfied.
*/
void * __init __alloc_bootmem(unsigned long size, unsigned long align,
unsigned long goal)
{
unsigned long limit = 0;
return ___alloc_bootmem(size, align, goal, limit);
}
static void * __init ___alloc_bootmem_node(bootmem_data_t *bdata,
unsigned long size, unsigned long align,
unsigned long goal, unsigned long limit)
{
void *ptr;
ptr = alloc_arch_preferred_bootmem(bdata, size, align, goal, limit);
if (ptr)
return ptr;
ptr = alloc_bootmem_core(bdata, size, align, goal, limit);
if (ptr)
return ptr;
return ___alloc_bootmem(size, align, goal, limit);
}
/**
* __alloc_bootmem_node - allocate boot memory from a specific node
* @pgdat: node to allocate from
* @size: size of the request in bytes
* @align: alignment of the region
* @goal: preferred starting address of the region
*
* The goal is dropped if it can not be satisfied and the allocation will
* fall back to memory below @goal.
*
* Allocation may fall back to any node in the system if the specified node
* can not hold the requested memory.
*
* The function panics if the request can not be satisfied.
*/
void * __init __alloc_bootmem_node(pg_data_t *pgdat, unsigned long size,
unsigned long align, unsigned long goal)
{
if (WARN_ON_ONCE(slab_is_available()))
return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
return ___alloc_bootmem_node(pgdat->bdata, size, align, goal, 0);
}
void * __init __alloc_bootmem_node_high(pg_data_t *pgdat, unsigned long size,
unsigned long align, unsigned long goal)
{
#ifdef MAX_DMA32_PFN
unsigned long end_pfn;
if (WARN_ON_ONCE(slab_is_available()))
return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
/* update goal according ...MAX_DMA32_PFN */
end_pfn = pgdat->node_start_pfn + pgdat->node_spanned_pages;
if (end_pfn > MAX_DMA32_PFN + (128 >> (20 - PAGE_SHIFT)) &&
(goal >> PAGE_SHIFT) < MAX_DMA32_PFN) {
void *ptr;
unsigned long new_goal;
new_goal = MAX_DMA32_PFN << PAGE_SHIFT;
ptr = alloc_bootmem_core(pgdat->bdata, size, align,
new_goal, 0);
if (ptr)
return ptr;
}
#endif
return __alloc_bootmem_node(pgdat, size, align, goal);
}
#ifdef CONFIG_SPARSEMEM
/**
* alloc_bootmem_section - allocate boot memory from a specific section
* @size: size of the request in bytes
* @section_nr: sparse map section to allocate from
*
* Return NULL on failure.
*/
void * __init alloc_bootmem_section(unsigned long size,
unsigned long section_nr)
{
bootmem_data_t *bdata;
unsigned long pfn, goal;
pfn = section_nr_to_pfn(section_nr);
goal = pfn << PAGE_SHIFT;
bdata = &bootmem_node_data[early_pfn_to_nid(pfn)];
return alloc_bootmem_core(bdata, size, SMP_CACHE_BYTES, goal, 0);
}
#endif
void * __init __alloc_bootmem_node_nopanic(pg_data_t *pgdat, unsigned long size,
unsigned long align, unsigned long goal)
{
void *ptr;
if (WARN_ON_ONCE(slab_is_available()))
return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
ptr = alloc_arch_preferred_bootmem(pgdat->bdata, size, align, goal, 0);
if (ptr)
return ptr;
ptr = alloc_bootmem_core(pgdat->bdata, size, align, goal, 0);
if (ptr)
return ptr;
return __alloc_bootmem_nopanic(size, align, goal);
}
#ifndef ARCH_LOW_ADDRESS_LIMIT
#define ARCH_LOW_ADDRESS_LIMIT 0xffffffffUL
#endif
/**
* __alloc_bootmem_low - allocate low boot memory
* @size: size of the request in bytes
* @align: alignment of the region
* @goal: preferred starting address of the region
*
* The goal is dropped if it can not be satisfied and the allocation will
* fall back to memory below @goal.
*
* Allocation may happen on any node in the system.
*
* The function panics if the request can not be satisfied.
*/
void * __init __alloc_bootmem_low(unsigned long size, unsigned long align,
unsigned long goal)
{
return ___alloc_bootmem(size, align, goal, ARCH_LOW_ADDRESS_LIMIT);
}
/**
* __alloc_bootmem_low_node - allocate low boot memory from a specific node
* @pgdat: node to allocate from
* @size: size of the request in bytes
* @align: alignment of the region
* @goal: preferred starting address of the region
*
* The goal is dropped if it can not be satisfied and the allocation will
* fall back to memory below @goal.
*
* Allocation may fall back to any node in the system if the specified node
* can not hold the requested memory.
*
* The function panics if the request can not be satisfied.
*/
void * __init __alloc_bootmem_low_node(pg_data_t *pgdat, unsigned long size,
unsigned long align, unsigned long goal)
{
if (WARN_ON_ONCE(slab_is_available()))
return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
return ___alloc_bootmem_node(pgdat->bdata, size, align,
goal, ARCH_LOW_ADDRESS_LIMIT);
}
| gpl-2.0 |
rchicoli/linux | arch/powerpc/sysdev/fsl_mpic_timer_wakeup.c | 2404 | 3532 | /*
* MPIC timer wakeup driver
*
* Copyright 2013 Freescale Semiconductor, Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/device.h>
#include <asm/mpic_timer.h>
#include <asm/mpic.h>
struct fsl_mpic_timer_wakeup {
struct mpic_timer *timer;
struct work_struct free_work;
};
static struct fsl_mpic_timer_wakeup *fsl_wakeup;
static DEFINE_MUTEX(sysfs_lock);
static void fsl_free_resource(struct work_struct *ws)
{
struct fsl_mpic_timer_wakeup *wakeup =
container_of(ws, struct fsl_mpic_timer_wakeup, free_work);
mutex_lock(&sysfs_lock);
if (wakeup->timer) {
disable_irq_wake(wakeup->timer->irq);
mpic_free_timer(wakeup->timer);
}
wakeup->timer = NULL;
mutex_unlock(&sysfs_lock);
}
static irqreturn_t fsl_mpic_timer_irq(int irq, void *dev_id)
{
struct fsl_mpic_timer_wakeup *wakeup = dev_id;
schedule_work(&wakeup->free_work);
return wakeup->timer ? IRQ_HANDLED : IRQ_NONE;
}
static ssize_t fsl_timer_wakeup_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct timeval interval;
int val = 0;
mutex_lock(&sysfs_lock);
if (fsl_wakeup->timer) {
mpic_get_remain_time(fsl_wakeup->timer, &interval);
val = interval.tv_sec + 1;
}
mutex_unlock(&sysfs_lock);
return sprintf(buf, "%d\n", val);
}
static ssize_t fsl_timer_wakeup_store(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t count)
{
struct timeval interval;
int ret;
interval.tv_usec = 0;
if (kstrtol(buf, 0, &interval.tv_sec))
return -EINVAL;
mutex_lock(&sysfs_lock);
if (fsl_wakeup->timer) {
disable_irq_wake(fsl_wakeup->timer->irq);
mpic_free_timer(fsl_wakeup->timer);
fsl_wakeup->timer = NULL;
}
if (!interval.tv_sec) {
mutex_unlock(&sysfs_lock);
return count;
}
fsl_wakeup->timer = mpic_request_timer(fsl_mpic_timer_irq,
fsl_wakeup, &interval);
if (!fsl_wakeup->timer) {
mutex_unlock(&sysfs_lock);
return -EINVAL;
}
ret = enable_irq_wake(fsl_wakeup->timer->irq);
if (ret) {
mpic_free_timer(fsl_wakeup->timer);
fsl_wakeup->timer = NULL;
mutex_unlock(&sysfs_lock);
return ret;
}
mpic_start_timer(fsl_wakeup->timer);
mutex_unlock(&sysfs_lock);
return count;
}
static struct device_attribute mpic_attributes = __ATTR(timer_wakeup, 0644,
fsl_timer_wakeup_show, fsl_timer_wakeup_store);
static int __init fsl_wakeup_sys_init(void)
{
int ret;
fsl_wakeup = kzalloc(sizeof(struct fsl_mpic_timer_wakeup), GFP_KERNEL);
if (!fsl_wakeup)
return -ENOMEM;
INIT_WORK(&fsl_wakeup->free_work, fsl_free_resource);
ret = device_create_file(mpic_subsys.dev_root, &mpic_attributes);
if (ret)
kfree(fsl_wakeup);
return ret;
}
static void __exit fsl_wakeup_sys_exit(void)
{
device_remove_file(mpic_subsys.dev_root, &mpic_attributes);
mutex_lock(&sysfs_lock);
if (fsl_wakeup->timer) {
disable_irq_wake(fsl_wakeup->timer->irq);
mpic_free_timer(fsl_wakeup->timer);
}
kfree(fsl_wakeup);
mutex_unlock(&sysfs_lock);
}
module_init(fsl_wakeup_sys_init);
module_exit(fsl_wakeup_sys_exit);
MODULE_DESCRIPTION("Freescale MPIC global timer wakeup driver");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Wang Dongsheng <dongsheng.wang@freescale.com>");
| gpl-2.0 |
Arc-Team/android_kernel_htc_ruby | drivers/power/bq27x00_battery.c | 2660 | 19952 | /*
* BQ27x00 battery driver
*
* Copyright (C) 2008 Rodolfo Giometti <giometti@linux.it>
* Copyright (C) 2008 Eurotech S.p.A. <info@eurotech.it>
* Copyright (C) 2010-2011 Lars-Peter Clausen <lars@metafoo.de>
* Copyright (C) 2011 Pali Rohár <pali.rohar@gmail.com>
*
* Based on a previous work by Copyright (C) 2008 Texas Instruments, Inc.
*
* This package is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
* WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
*
*/
/*
* Datasheets:
* http://focus.ti.com/docs/prod/folders/print/bq27000.html
* http://focus.ti.com/docs/prod/folders/print/bq27500.html
*/
#include <linux/module.h>
#include <linux/param.h>
#include <linux/jiffies.h>
#include <linux/workqueue.h>
#include <linux/delay.h>
#include <linux/platform_device.h>
#include <linux/power_supply.h>
#include <linux/idr.h>
#include <linux/i2c.h>
#include <linux/slab.h>
#include <asm/unaligned.h>
#include <linux/power/bq27x00_battery.h>
#define DRIVER_VERSION "1.2.0"
#define BQ27x00_REG_TEMP 0x06
#define BQ27x00_REG_VOLT 0x08
#define BQ27x00_REG_AI 0x14
#define BQ27x00_REG_FLAGS 0x0A
#define BQ27x00_REG_TTE 0x16
#define BQ27x00_REG_TTF 0x18
#define BQ27x00_REG_TTECP 0x26
#define BQ27x00_REG_NAC 0x0C /* Nominal available capaciy */
#define BQ27x00_REG_LMD 0x12 /* Last measured discharge */
#define BQ27x00_REG_CYCT 0x2A /* Cycle count total */
#define BQ27x00_REG_AE 0x22 /* Available enery */
#define BQ27000_REG_RSOC 0x0B /* Relative State-of-Charge */
#define BQ27000_REG_ILMD 0x76 /* Initial last measured discharge */
#define BQ27000_FLAG_CHGS BIT(7)
#define BQ27000_FLAG_FC BIT(5)
#define BQ27500_REG_SOC 0x2C
#define BQ27500_REG_DCAP 0x3C /* Design capacity */
#define BQ27500_FLAG_DSC BIT(0)
#define BQ27500_FLAG_FC BIT(9)
#define BQ27000_RS 20 /* Resistor sense */
struct bq27x00_device_info;
struct bq27x00_access_methods {
int (*read)(struct bq27x00_device_info *di, u8 reg, bool single);
};
enum bq27x00_chip { BQ27000, BQ27500 };
struct bq27x00_reg_cache {
int temperature;
int time_to_empty;
int time_to_empty_avg;
int time_to_full;
int charge_full;
int cycle_count;
int capacity;
int flags;
int current_now;
};
struct bq27x00_device_info {
struct device *dev;
int id;
enum bq27x00_chip chip;
struct bq27x00_reg_cache cache;
int charge_design_full;
unsigned long last_update;
struct delayed_work work;
struct power_supply bat;
struct bq27x00_access_methods bus;
struct mutex lock;
};
static enum power_supply_property bq27x00_battery_props[] = {
POWER_SUPPLY_PROP_STATUS,
POWER_SUPPLY_PROP_PRESENT,
POWER_SUPPLY_PROP_VOLTAGE_NOW,
POWER_SUPPLY_PROP_CURRENT_NOW,
POWER_SUPPLY_PROP_CAPACITY,
POWER_SUPPLY_PROP_TEMP,
POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW,
POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG,
POWER_SUPPLY_PROP_TIME_TO_FULL_NOW,
POWER_SUPPLY_PROP_TECHNOLOGY,
POWER_SUPPLY_PROP_CHARGE_FULL,
POWER_SUPPLY_PROP_CHARGE_NOW,
POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
POWER_SUPPLY_PROP_CYCLE_COUNT,
POWER_SUPPLY_PROP_ENERGY_NOW,
};
static unsigned int poll_interval = 360;
module_param(poll_interval, uint, 0644);
MODULE_PARM_DESC(poll_interval, "battery poll interval in seconds - " \
"0 disables polling");
/*
* Common code for BQ27x00 devices
*/
static inline int bq27x00_read(struct bq27x00_device_info *di, u8 reg,
bool single)
{
return di->bus.read(di, reg, single);
}
/*
* Return the battery Relative State-of-Charge
* Or < 0 if something fails.
*/
static int bq27x00_battery_read_rsoc(struct bq27x00_device_info *di)
{
int rsoc;
if (di->chip == BQ27500)
rsoc = bq27x00_read(di, BQ27500_REG_SOC, false);
else
rsoc = bq27x00_read(di, BQ27000_REG_RSOC, true);
if (rsoc < 0)
dev_err(di->dev, "error reading relative State-of-Charge\n");
return rsoc;
}
/*
* Return a battery charge value in µAh
* Or < 0 if something fails.
*/
static int bq27x00_battery_read_charge(struct bq27x00_device_info *di, u8 reg)
{
int charge;
charge = bq27x00_read(di, reg, false);
if (charge < 0) {
dev_err(di->dev, "error reading nominal available capacity\n");
return charge;
}
if (di->chip == BQ27500)
charge *= 1000;
else
charge = charge * 3570 / BQ27000_RS;
return charge;
}
/*
* Return the battery Nominal available capaciy in µAh
* Or < 0 if something fails.
*/
static inline int bq27x00_battery_read_nac(struct bq27x00_device_info *di)
{
return bq27x00_battery_read_charge(di, BQ27x00_REG_NAC);
}
/*
* Return the battery Last measured discharge in µAh
* Or < 0 if something fails.
*/
static inline int bq27x00_battery_read_lmd(struct bq27x00_device_info *di)
{
return bq27x00_battery_read_charge(di, BQ27x00_REG_LMD);
}
/*
* Return the battery Initial last measured discharge in µAh
* Or < 0 if something fails.
*/
static int bq27x00_battery_read_ilmd(struct bq27x00_device_info *di)
{
int ilmd;
if (di->chip == BQ27500)
ilmd = bq27x00_read(di, BQ27500_REG_DCAP, false);
else
ilmd = bq27x00_read(di, BQ27000_REG_ILMD, true);
if (ilmd < 0) {
dev_err(di->dev, "error reading initial last measured discharge\n");
return ilmd;
}
if (di->chip == BQ27500)
ilmd *= 1000;
else
ilmd = ilmd * 256 * 3570 / BQ27000_RS;
return ilmd;
}
/*
* Return the battery Cycle count total
* Or < 0 if something fails.
*/
static int bq27x00_battery_read_cyct(struct bq27x00_device_info *di)
{
int cyct;
cyct = bq27x00_read(di, BQ27x00_REG_CYCT, false);
if (cyct < 0)
dev_err(di->dev, "error reading cycle count total\n");
return cyct;
}
/*
* Read a time register.
* Return < 0 if something fails.
*/
static int bq27x00_battery_read_time(struct bq27x00_device_info *di, u8 reg)
{
int tval;
tval = bq27x00_read(di, reg, false);
if (tval < 0) {
dev_err(di->dev, "error reading register %02x: %d\n", reg, tval);
return tval;
}
if (tval == 65535)
return -ENODATA;
return tval * 60;
}
static void bq27x00_update(struct bq27x00_device_info *di)
{
struct bq27x00_reg_cache cache = {0, };
bool is_bq27500 = di->chip == BQ27500;
cache.flags = bq27x00_read(di, BQ27x00_REG_FLAGS, is_bq27500);
if (cache.flags >= 0) {
cache.capacity = bq27x00_battery_read_rsoc(di);
cache.temperature = bq27x00_read(di, BQ27x00_REG_TEMP, false);
cache.time_to_empty = bq27x00_battery_read_time(di, BQ27x00_REG_TTE);
cache.time_to_empty_avg = bq27x00_battery_read_time(di, BQ27x00_REG_TTECP);
cache.time_to_full = bq27x00_battery_read_time(di, BQ27x00_REG_TTF);
cache.charge_full = bq27x00_battery_read_lmd(di);
cache.cycle_count = bq27x00_battery_read_cyct(di);
if (!is_bq27500)
cache.current_now = bq27x00_read(di, BQ27x00_REG_AI, false);
/* We only have to read charge design full once */
if (di->charge_design_full <= 0)
di->charge_design_full = bq27x00_battery_read_ilmd(di);
}
/* Ignore current_now which is a snapshot of the current battery state
* and is likely to be different even between two consecutive reads */
if (memcmp(&di->cache, &cache, sizeof(cache) - sizeof(int)) != 0) {
di->cache = cache;
power_supply_changed(&di->bat);
}
di->last_update = jiffies;
}
static void bq27x00_battery_poll(struct work_struct *work)
{
struct bq27x00_device_info *di =
container_of(work, struct bq27x00_device_info, work.work);
bq27x00_update(di);
if (poll_interval > 0) {
/* The timer does not have to be accurate. */
set_timer_slack(&di->work.timer, poll_interval * HZ / 4);
schedule_delayed_work(&di->work, poll_interval * HZ);
}
}
/*
* Return the battery temperature in tenths of degree Celsius
* Or < 0 if something fails.
*/
static int bq27x00_battery_temperature(struct bq27x00_device_info *di,
union power_supply_propval *val)
{
if (di->cache.temperature < 0)
return di->cache.temperature;
if (di->chip == BQ27500)
val->intval = di->cache.temperature - 2731;
else
val->intval = ((di->cache.temperature * 5) - 5463) / 2;
return 0;
}
/*
* Return the battery average current in µA
* Note that current can be negative signed as well
* Or 0 if something fails.
*/
static int bq27x00_battery_current(struct bq27x00_device_info *di,
union power_supply_propval *val)
{
int curr;
if (di->chip == BQ27500)
curr = bq27x00_read(di, BQ27x00_REG_AI, false);
else
curr = di->cache.current_now;
if (curr < 0)
return curr;
if (di->chip == BQ27500) {
/* bq27500 returns signed value */
val->intval = (int)((s16)curr) * 1000;
} else {
if (di->cache.flags & BQ27000_FLAG_CHGS) {
dev_dbg(di->dev, "negative current!\n");
curr = -curr;
}
val->intval = curr * 3570 / BQ27000_RS;
}
return 0;
}
static int bq27x00_battery_status(struct bq27x00_device_info *di,
union power_supply_propval *val)
{
int status;
if (di->chip == BQ27500) {
if (di->cache.flags & BQ27500_FLAG_FC)
status = POWER_SUPPLY_STATUS_FULL;
else if (di->cache.flags & BQ27500_FLAG_DSC)
status = POWER_SUPPLY_STATUS_DISCHARGING;
else
status = POWER_SUPPLY_STATUS_CHARGING;
} else {
if (di->cache.flags & BQ27000_FLAG_FC)
status = POWER_SUPPLY_STATUS_FULL;
else if (di->cache.flags & BQ27000_FLAG_CHGS)
status = POWER_SUPPLY_STATUS_CHARGING;
else if (power_supply_am_i_supplied(&di->bat))
status = POWER_SUPPLY_STATUS_NOT_CHARGING;
else
status = POWER_SUPPLY_STATUS_DISCHARGING;
}
val->intval = status;
return 0;
}
/*
* Return the battery Voltage in milivolts
* Or < 0 if something fails.
*/
static int bq27x00_battery_voltage(struct bq27x00_device_info *di,
union power_supply_propval *val)
{
int volt;
volt = bq27x00_read(di, BQ27x00_REG_VOLT, false);
if (volt < 0)
return volt;
val->intval = volt * 1000;
return 0;
}
/*
* Return the battery Available energy in µWh
* Or < 0 if something fails.
*/
static int bq27x00_battery_energy(struct bq27x00_device_info *di,
union power_supply_propval *val)
{
int ae;
ae = bq27x00_read(di, BQ27x00_REG_AE, false);
if (ae < 0) {
dev_err(di->dev, "error reading available energy\n");
return ae;
}
if (di->chip == BQ27500)
ae *= 1000;
else
ae = ae * 29200 / BQ27000_RS;
val->intval = ae;
return 0;
}
static int bq27x00_simple_value(int value,
union power_supply_propval *val)
{
if (value < 0)
return value;
val->intval = value;
return 0;
}
#define to_bq27x00_device_info(x) container_of((x), \
struct bq27x00_device_info, bat);
static int bq27x00_battery_get_property(struct power_supply *psy,
enum power_supply_property psp,
union power_supply_propval *val)
{
int ret = 0;
struct bq27x00_device_info *di = to_bq27x00_device_info(psy);
mutex_lock(&di->lock);
if (time_is_before_jiffies(di->last_update + 5 * HZ)) {
cancel_delayed_work_sync(&di->work);
bq27x00_battery_poll(&di->work.work);
}
mutex_unlock(&di->lock);
if (psp != POWER_SUPPLY_PROP_PRESENT && di->cache.flags < 0)
return -ENODEV;
switch (psp) {
case POWER_SUPPLY_PROP_STATUS:
ret = bq27x00_battery_status(di, val);
break;
case POWER_SUPPLY_PROP_VOLTAGE_NOW:
ret = bq27x00_battery_voltage(di, val);
break;
case POWER_SUPPLY_PROP_PRESENT:
val->intval = di->cache.flags < 0 ? 0 : 1;
break;
case POWER_SUPPLY_PROP_CURRENT_NOW:
ret = bq27x00_battery_current(di, val);
break;
case POWER_SUPPLY_PROP_CAPACITY:
ret = bq27x00_simple_value(di->cache.capacity, val);
break;
case POWER_SUPPLY_PROP_TEMP:
ret = bq27x00_battery_temperature(di, val);
break;
case POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW:
ret = bq27x00_simple_value(di->cache.time_to_empty, val);
break;
case POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG:
ret = bq27x00_simple_value(di->cache.time_to_empty_avg, val);
break;
case POWER_SUPPLY_PROP_TIME_TO_FULL_NOW:
ret = bq27x00_simple_value(di->cache.time_to_full, val);
break;
case POWER_SUPPLY_PROP_TECHNOLOGY:
val->intval = POWER_SUPPLY_TECHNOLOGY_LION;
break;
case POWER_SUPPLY_PROP_CHARGE_NOW:
ret = bq27x00_simple_value(bq27x00_battery_read_nac(di), val);
break;
case POWER_SUPPLY_PROP_CHARGE_FULL:
ret = bq27x00_simple_value(di->cache.charge_full, val);
break;
case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
ret = bq27x00_simple_value(di->charge_design_full, val);
break;
case POWER_SUPPLY_PROP_CYCLE_COUNT:
ret = bq27x00_simple_value(di->cache.cycle_count, val);
break;
case POWER_SUPPLY_PROP_ENERGY_NOW:
ret = bq27x00_battery_energy(di, val);
break;
default:
return -EINVAL;
}
return ret;
}
static void bq27x00_external_power_changed(struct power_supply *psy)
{
struct bq27x00_device_info *di = to_bq27x00_device_info(psy);
cancel_delayed_work_sync(&di->work);
schedule_delayed_work(&di->work, 0);
}
static int bq27x00_powersupply_init(struct bq27x00_device_info *di)
{
int ret;
di->bat.type = POWER_SUPPLY_TYPE_BATTERY;
di->bat.properties = bq27x00_battery_props;
di->bat.num_properties = ARRAY_SIZE(bq27x00_battery_props);
di->bat.get_property = bq27x00_battery_get_property;
di->bat.external_power_changed = bq27x00_external_power_changed;
INIT_DELAYED_WORK(&di->work, bq27x00_battery_poll);
mutex_init(&di->lock);
ret = power_supply_register(di->dev, &di->bat);
if (ret) {
dev_err(di->dev, "failed to register battery: %d\n", ret);
return ret;
}
dev_info(di->dev, "support ver. %s enabled\n", DRIVER_VERSION);
bq27x00_update(di);
return 0;
}
static void bq27x00_powersupply_unregister(struct bq27x00_device_info *di)
{
cancel_delayed_work_sync(&di->work);
power_supply_unregister(&di->bat);
mutex_destroy(&di->lock);
}
/* i2c specific code */
#ifdef CONFIG_BATTERY_BQ27X00_I2C
/* If the system has several batteries we need a different name for each
* of them...
*/
static DEFINE_IDR(battery_id);
static DEFINE_MUTEX(battery_mutex);
static int bq27x00_read_i2c(struct bq27x00_device_info *di, u8 reg, bool single)
{
struct i2c_client *client = to_i2c_client(di->dev);
struct i2c_msg msg[2];
unsigned char data[2];
int ret;
if (!client->adapter)
return -ENODEV;
msg[0].addr = client->addr;
msg[0].flags = 0;
msg[0].buf = ®
msg[0].len = sizeof(reg);
msg[1].addr = client->addr;
msg[1].flags = I2C_M_RD;
msg[1].buf = data;
if (single)
msg[1].len = 1;
else
msg[1].len = 2;
ret = i2c_transfer(client->adapter, msg, ARRAY_SIZE(msg));
if (ret < 0)
return ret;
if (!single)
ret = get_unaligned_le16(data);
else
ret = data[0];
return ret;
}
static int bq27x00_battery_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
char *name;
struct bq27x00_device_info *di;
int num;
int retval = 0;
/* Get new ID for the new battery device */
retval = idr_pre_get(&battery_id, GFP_KERNEL);
if (retval == 0)
return -ENOMEM;
mutex_lock(&battery_mutex);
retval = idr_get_new(&battery_id, client, &num);
mutex_unlock(&battery_mutex);
if (retval < 0)
return retval;
name = kasprintf(GFP_KERNEL, "%s-%d", id->name, num);
if (!name) {
dev_err(&client->dev, "failed to allocate device name\n");
retval = -ENOMEM;
goto batt_failed_1;
}
di = kzalloc(sizeof(*di), GFP_KERNEL);
if (!di) {
dev_err(&client->dev, "failed to allocate device info data\n");
retval = -ENOMEM;
goto batt_failed_2;
}
di->id = num;
di->dev = &client->dev;
di->chip = id->driver_data;
di->bat.name = name;
di->bus.read = &bq27x00_read_i2c;
if (bq27x00_powersupply_init(di))
goto batt_failed_3;
i2c_set_clientdata(client, di);
return 0;
batt_failed_3:
kfree(di);
batt_failed_2:
kfree(name);
batt_failed_1:
mutex_lock(&battery_mutex);
idr_remove(&battery_id, num);
mutex_unlock(&battery_mutex);
return retval;
}
static int bq27x00_battery_remove(struct i2c_client *client)
{
struct bq27x00_device_info *di = i2c_get_clientdata(client);
bq27x00_powersupply_unregister(di);
kfree(di->bat.name);
mutex_lock(&battery_mutex);
idr_remove(&battery_id, di->id);
mutex_unlock(&battery_mutex);
kfree(di);
return 0;
}
static const struct i2c_device_id bq27x00_id[] = {
{ "bq27200", BQ27000 }, /* bq27200 is same as bq27000, but with i2c */
{ "bq27500", BQ27500 },
{},
};
MODULE_DEVICE_TABLE(i2c, bq27x00_id);
static struct i2c_driver bq27x00_battery_driver = {
.driver = {
.name = "bq27x00-battery",
},
.probe = bq27x00_battery_probe,
.remove = bq27x00_battery_remove,
.id_table = bq27x00_id,
};
static inline int bq27x00_battery_i2c_init(void)
{
int ret = i2c_add_driver(&bq27x00_battery_driver);
if (ret)
printk(KERN_ERR "Unable to register BQ27x00 i2c driver\n");
return ret;
}
static inline void bq27x00_battery_i2c_exit(void)
{
i2c_del_driver(&bq27x00_battery_driver);
}
#else
static inline int bq27x00_battery_i2c_init(void) { return 0; }
static inline void bq27x00_battery_i2c_exit(void) {};
#endif
/* platform specific code */
#ifdef CONFIG_BATTERY_BQ27X00_PLATFORM
static int bq27000_read_platform(struct bq27x00_device_info *di, u8 reg,
bool single)
{
struct device *dev = di->dev;
struct bq27000_platform_data *pdata = dev->platform_data;
unsigned int timeout = 3;
int upper, lower;
int temp;
if (!single) {
/* Make sure the value has not changed in between reading the
* lower and the upper part */
upper = pdata->read(dev, reg + 1);
do {
temp = upper;
if (upper < 0)
return upper;
lower = pdata->read(dev, reg);
if (lower < 0)
return lower;
upper = pdata->read(dev, reg + 1);
} while (temp != upper && --timeout);
if (timeout == 0)
return -EIO;
return (upper << 8) | lower;
}
return pdata->read(dev, reg);
}
static int __devinit bq27000_battery_probe(struct platform_device *pdev)
{
struct bq27x00_device_info *di;
struct bq27000_platform_data *pdata = pdev->dev.platform_data;
int ret;
if (!pdata) {
dev_err(&pdev->dev, "no platform_data supplied\n");
return -EINVAL;
}
if (!pdata->read) {
dev_err(&pdev->dev, "no hdq read callback supplied\n");
return -EINVAL;
}
di = kzalloc(sizeof(*di), GFP_KERNEL);
if (!di) {
dev_err(&pdev->dev, "failed to allocate device info data\n");
return -ENOMEM;
}
platform_set_drvdata(pdev, di);
di->dev = &pdev->dev;
di->chip = BQ27000;
di->bat.name = pdata->name ?: dev_name(&pdev->dev);
di->bus.read = &bq27000_read_platform;
ret = bq27x00_powersupply_init(di);
if (ret)
goto err_free;
return 0;
err_free:
platform_set_drvdata(pdev, NULL);
kfree(di);
return ret;
}
static int __devexit bq27000_battery_remove(struct platform_device *pdev)
{
struct bq27x00_device_info *di = platform_get_drvdata(pdev);
bq27x00_powersupply_unregister(di);
platform_set_drvdata(pdev, NULL);
kfree(di);
return 0;
}
static struct platform_driver bq27000_battery_driver = {
.probe = bq27000_battery_probe,
.remove = __devexit_p(bq27000_battery_remove),
.driver = {
.name = "bq27000-battery",
.owner = THIS_MODULE,
},
};
static inline int bq27x00_battery_platform_init(void)
{
int ret = platform_driver_register(&bq27000_battery_driver);
if (ret)
printk(KERN_ERR "Unable to register BQ27000 platform driver\n");
return ret;
}
static inline void bq27x00_battery_platform_exit(void)
{
platform_driver_unregister(&bq27000_battery_driver);
}
#else
static inline int bq27x00_battery_platform_init(void) { return 0; }
static inline void bq27x00_battery_platform_exit(void) {};
#endif
/*
* Module stuff
*/
static int __init bq27x00_battery_init(void)
{
int ret;
ret = bq27x00_battery_i2c_init();
if (ret)
return ret;
ret = bq27x00_battery_platform_init();
if (ret)
bq27x00_battery_i2c_exit();
return ret;
}
module_init(bq27x00_battery_init);
static void __exit bq27x00_battery_exit(void)
{
bq27x00_battery_platform_exit();
bq27x00_battery_i2c_exit();
}
module_exit(bq27x00_battery_exit);
MODULE_AUTHOR("Rodolfo Giometti <giometti@linux.it>");
MODULE_DESCRIPTION("BQ27x00 battery monitor driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
animania260/android_ani-kernel_galaxy_reverb | drivers/media/video/tvp514x.c | 2916 | 32432 | /*
* drivers/media/video/tvp514x.c
*
* TI TVP5146/47 decoder driver
*
* Copyright (C) 2008 Texas Instruments Inc
* Author: Vaibhav Hiremath <hvaibhav@ti.com>
*
* Contributors:
* Sivaraj R <sivaraj@ti.com>
* Brijesh R Jadav <brijesh.j@ti.com>
* Hardik Shah <hardik.shah@ti.com>
* Manjunath Hadli <mrh@ti.com>
* Karicheri Muralidharan <m-karicheri2@ti.com>
*
* This package is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
*/
#include <linux/i2c.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/videodev2.h>
#include <media/v4l2-device.h>
#include <media/v4l2-common.h>
#include <media/v4l2-mediabus.h>
#include <media/v4l2-chip-ident.h>
#include <media/v4l2-ctrls.h>
#include <media/tvp514x.h>
#include "tvp514x_regs.h"
/* Module Name */
#define TVP514X_MODULE_NAME "tvp514x"
/* Private macros for TVP */
#define I2C_RETRY_COUNT (5)
#define LOCK_RETRY_COUNT (5)
#define LOCK_RETRY_DELAY (200)
/* Debug functions */
static int debug;
module_param(debug, bool, 0644);
MODULE_PARM_DESC(debug, "Debug level (0-1)");
MODULE_AUTHOR("Texas Instruments");
MODULE_DESCRIPTION("TVP514X linux decoder driver");
MODULE_LICENSE("GPL");
/* enum tvp514x_std - enum for supported standards */
enum tvp514x_std {
STD_NTSC_MJ = 0,
STD_PAL_BDGHIN,
STD_INVALID
};
/**
* struct tvp514x_std_info - Structure to store standard informations
* @width: Line width in pixels
* @height:Number of active lines
* @video_std: Value to write in REG_VIDEO_STD register
* @standard: v4l2 standard structure information
*/
struct tvp514x_std_info {
unsigned long width;
unsigned long height;
u8 video_std;
struct v4l2_standard standard;
};
static struct tvp514x_reg tvp514x_reg_list_default[0x40];
static int tvp514x_s_stream(struct v4l2_subdev *sd, int enable);
/**
* struct tvp514x_decoder - TVP5146/47 decoder object
* @sd: Subdevice Slave handle
* @tvp514x_regs: copy of hw's regs with preset values.
* @pdata: Board specific
* @ver: Chip version
* @streaming: TVP5146/47 decoder streaming - enabled or disabled.
* @current_std: Current standard
* @num_stds: Number of standards
* @std_list: Standards list
* @input: Input routing at chip level
* @output: Output routing at chip level
*/
struct tvp514x_decoder {
struct v4l2_subdev sd;
struct v4l2_ctrl_handler hdl;
struct tvp514x_reg tvp514x_regs[ARRAY_SIZE(tvp514x_reg_list_default)];
const struct tvp514x_platform_data *pdata;
int ver;
int streaming;
enum tvp514x_std current_std;
int num_stds;
const struct tvp514x_std_info *std_list;
/* Input and Output Routing parameters */
u32 input;
u32 output;
};
/* TVP514x default register values */
static struct tvp514x_reg tvp514x_reg_list_default[] = {
/* Composite selected */
{TOK_WRITE, REG_INPUT_SEL, 0x05},
{TOK_WRITE, REG_AFE_GAIN_CTRL, 0x0F},
/* Auto mode */
{TOK_WRITE, REG_VIDEO_STD, 0x00},
{TOK_WRITE, REG_OPERATION_MODE, 0x00},
{TOK_SKIP, REG_AUTOSWITCH_MASK, 0x3F},
{TOK_WRITE, REG_COLOR_KILLER, 0x10},
{TOK_WRITE, REG_LUMA_CONTROL1, 0x00},
{TOK_WRITE, REG_LUMA_CONTROL2, 0x00},
{TOK_WRITE, REG_LUMA_CONTROL3, 0x02},
{TOK_WRITE, REG_BRIGHTNESS, 0x80},
{TOK_WRITE, REG_CONTRAST, 0x80},
{TOK_WRITE, REG_SATURATION, 0x80},
{TOK_WRITE, REG_HUE, 0x00},
{TOK_WRITE, REG_CHROMA_CONTROL1, 0x00},
{TOK_WRITE, REG_CHROMA_CONTROL2, 0x0E},
/* Reserved */
{TOK_SKIP, 0x0F, 0x00},
{TOK_WRITE, REG_COMP_PR_SATURATION, 0x80},
{TOK_WRITE, REG_COMP_Y_CONTRAST, 0x80},
{TOK_WRITE, REG_COMP_PB_SATURATION, 0x80},
/* Reserved */
{TOK_SKIP, 0x13, 0x00},
{TOK_WRITE, REG_COMP_Y_BRIGHTNESS, 0x80},
/* Reserved */
{TOK_SKIP, 0x15, 0x00},
/* NTSC timing */
{TOK_SKIP, REG_AVID_START_PIXEL_LSB, 0x55},
{TOK_SKIP, REG_AVID_START_PIXEL_MSB, 0x00},
{TOK_SKIP, REG_AVID_STOP_PIXEL_LSB, 0x25},
{TOK_SKIP, REG_AVID_STOP_PIXEL_MSB, 0x03},
/* NTSC timing */
{TOK_SKIP, REG_HSYNC_START_PIXEL_LSB, 0x00},
{TOK_SKIP, REG_HSYNC_START_PIXEL_MSB, 0x00},
{TOK_SKIP, REG_HSYNC_STOP_PIXEL_LSB, 0x40},
{TOK_SKIP, REG_HSYNC_STOP_PIXEL_MSB, 0x00},
/* NTSC timing */
{TOK_SKIP, REG_VSYNC_START_LINE_LSB, 0x04},
{TOK_SKIP, REG_VSYNC_START_LINE_MSB, 0x00},
{TOK_SKIP, REG_VSYNC_STOP_LINE_LSB, 0x07},
{TOK_SKIP, REG_VSYNC_STOP_LINE_MSB, 0x00},
/* NTSC timing */
{TOK_SKIP, REG_VBLK_START_LINE_LSB, 0x01},
{TOK_SKIP, REG_VBLK_START_LINE_MSB, 0x00},
{TOK_SKIP, REG_VBLK_STOP_LINE_LSB, 0x15},
{TOK_SKIP, REG_VBLK_STOP_LINE_MSB, 0x00},
/* Reserved */
{TOK_SKIP, 0x26, 0x00},
/* Reserved */
{TOK_SKIP, 0x27, 0x00},
{TOK_SKIP, REG_FAST_SWTICH_CONTROL, 0xCC},
/* Reserved */
{TOK_SKIP, 0x29, 0x00},
{TOK_SKIP, REG_FAST_SWTICH_SCART_DELAY, 0x00},
/* Reserved */
{TOK_SKIP, 0x2B, 0x00},
{TOK_SKIP, REG_SCART_DELAY, 0x00},
{TOK_SKIP, REG_CTI_DELAY, 0x00},
{TOK_SKIP, REG_CTI_CONTROL, 0x00},
/* Reserved */
{TOK_SKIP, 0x2F, 0x00},
/* Reserved */
{TOK_SKIP, 0x30, 0x00},
/* Reserved */
{TOK_SKIP, 0x31, 0x00},
/* HS, VS active high */
{TOK_WRITE, REG_SYNC_CONTROL, 0x00},
/* 10-bit BT.656 */
{TOK_WRITE, REG_OUTPUT_FORMATTER1, 0x00},
/* Enable clk & data */
{TOK_WRITE, REG_OUTPUT_FORMATTER2, 0x11},
/* Enable AVID & FLD */
{TOK_WRITE, REG_OUTPUT_FORMATTER3, 0xEE},
/* Enable VS & HS */
{TOK_WRITE, REG_OUTPUT_FORMATTER4, 0xAF},
{TOK_WRITE, REG_OUTPUT_FORMATTER5, 0xFF},
{TOK_WRITE, REG_OUTPUT_FORMATTER6, 0xFF},
/* Clear status */
{TOK_WRITE, REG_CLEAR_LOST_LOCK, 0x01},
{TOK_TERM, 0, 0},
};
/**
* Supported standards -
*
* Currently supports two standards only, need to add support for rest of the
* modes, like SECAM, etc...
*/
static const struct tvp514x_std_info tvp514x_std_list[] = {
/* Standard: STD_NTSC_MJ */
[STD_NTSC_MJ] = {
.width = NTSC_NUM_ACTIVE_PIXELS,
.height = NTSC_NUM_ACTIVE_LINES,
.video_std = VIDEO_STD_NTSC_MJ_BIT,
.standard = {
.index = 0,
.id = V4L2_STD_NTSC,
.name = "NTSC",
.frameperiod = {1001, 30000},
.framelines = 525
},
/* Standard: STD_PAL_BDGHIN */
},
[STD_PAL_BDGHIN] = {
.width = PAL_NUM_ACTIVE_PIXELS,
.height = PAL_NUM_ACTIVE_LINES,
.video_std = VIDEO_STD_PAL_BDGHIN_BIT,
.standard = {
.index = 1,
.id = V4L2_STD_PAL,
.name = "PAL",
.frameperiod = {1, 25},
.framelines = 625
},
},
/* Standard: need to add for additional standard */
};
static inline struct tvp514x_decoder *to_decoder(struct v4l2_subdev *sd)
{
return container_of(sd, struct tvp514x_decoder, sd);
}
static inline struct v4l2_subdev *to_sd(struct v4l2_ctrl *ctrl)
{
return &container_of(ctrl->handler, struct tvp514x_decoder, hdl)->sd;
}
/**
* tvp514x_read_reg() - Read a value from a register in an TVP5146/47.
* @sd: ptr to v4l2_subdev struct
* @reg: TVP5146/47 register address
*
* Returns value read if successful, or non-zero (-1) otherwise.
*/
static int tvp514x_read_reg(struct v4l2_subdev *sd, u8 reg)
{
int err, retry = 0;
struct i2c_client *client = v4l2_get_subdevdata(sd);
read_again:
err = i2c_smbus_read_byte_data(client, reg);
if (err < 0) {
if (retry <= I2C_RETRY_COUNT) {
v4l2_warn(sd, "Read: retry ... %d\n", retry);
retry++;
msleep_interruptible(10);
goto read_again;
}
}
return err;
}
/**
* dump_reg() - dump the register content of TVP5146/47.
* @sd: ptr to v4l2_subdev struct
* @reg: TVP5146/47 register address
*/
static void dump_reg(struct v4l2_subdev *sd, u8 reg)
{
u32 val;
val = tvp514x_read_reg(sd, reg);
v4l2_info(sd, "Reg(0x%.2X): 0x%.2X\n", reg, val);
}
/**
* tvp514x_write_reg() - Write a value to a register in TVP5146/47
* @sd: ptr to v4l2_subdev struct
* @reg: TVP5146/47 register address
* @val: value to be written to the register
*
* Write a value to a register in an TVP5146/47 decoder device.
* Returns zero if successful, or non-zero otherwise.
*/
static int tvp514x_write_reg(struct v4l2_subdev *sd, u8 reg, u8 val)
{
int err, retry = 0;
struct i2c_client *client = v4l2_get_subdevdata(sd);
write_again:
err = i2c_smbus_write_byte_data(client, reg, val);
if (err) {
if (retry <= I2C_RETRY_COUNT) {
v4l2_warn(sd, "Write: retry ... %d\n", retry);
retry++;
msleep_interruptible(10);
goto write_again;
}
}
return err;
}
/**
* tvp514x_write_regs() : Initializes a list of TVP5146/47 registers
* @sd: ptr to v4l2_subdev struct
* @reglist: list of TVP5146/47 registers and values
*
* Initializes a list of TVP5146/47 registers:-
* if token is TOK_TERM, then entire write operation terminates
* if token is TOK_DELAY, then a delay of 'val' msec is introduced
* if token is TOK_SKIP, then the register write is skipped
* if token is TOK_WRITE, then the register write is performed
* Returns zero if successful, or non-zero otherwise.
*/
static int tvp514x_write_regs(struct v4l2_subdev *sd,
const struct tvp514x_reg reglist[])
{
int err;
const struct tvp514x_reg *next = reglist;
for (; next->token != TOK_TERM; next++) {
if (next->token == TOK_DELAY) {
msleep(next->val);
continue;
}
if (next->token == TOK_SKIP)
continue;
err = tvp514x_write_reg(sd, next->reg, (u8) next->val);
if (err) {
v4l2_err(sd, "Write failed. Err[%d]\n", err);
return err;
}
}
return 0;
}
/**
* tvp514x_query_current_std() : Query the current standard detected by TVP5146/47
* @sd: ptr to v4l2_subdev struct
*
* Returns the current standard detected by TVP5146/47, STD_INVALID if there is no
* standard detected.
*/
static enum tvp514x_std tvp514x_query_current_std(struct v4l2_subdev *sd)
{
u8 std, std_status;
std = tvp514x_read_reg(sd, REG_VIDEO_STD);
if ((std & VIDEO_STD_MASK) == VIDEO_STD_AUTO_SWITCH_BIT)
/* use the standard status register */
std_status = tvp514x_read_reg(sd, REG_VIDEO_STD_STATUS);
else
/* use the standard register itself */
std_status = std;
switch (std_status & VIDEO_STD_MASK) {
case VIDEO_STD_NTSC_MJ_BIT:
return STD_NTSC_MJ;
case VIDEO_STD_PAL_BDGHIN_BIT:
return STD_PAL_BDGHIN;
default:
return STD_INVALID;
}
return STD_INVALID;
}
/* TVP5146/47 register dump function */
static void tvp514x_reg_dump(struct v4l2_subdev *sd)
{
dump_reg(sd, REG_INPUT_SEL);
dump_reg(sd, REG_AFE_GAIN_CTRL);
dump_reg(sd, REG_VIDEO_STD);
dump_reg(sd, REG_OPERATION_MODE);
dump_reg(sd, REG_COLOR_KILLER);
dump_reg(sd, REG_LUMA_CONTROL1);
dump_reg(sd, REG_LUMA_CONTROL2);
dump_reg(sd, REG_LUMA_CONTROL3);
dump_reg(sd, REG_BRIGHTNESS);
dump_reg(sd, REG_CONTRAST);
dump_reg(sd, REG_SATURATION);
dump_reg(sd, REG_HUE);
dump_reg(sd, REG_CHROMA_CONTROL1);
dump_reg(sd, REG_CHROMA_CONTROL2);
dump_reg(sd, REG_COMP_PR_SATURATION);
dump_reg(sd, REG_COMP_Y_CONTRAST);
dump_reg(sd, REG_COMP_PB_SATURATION);
dump_reg(sd, REG_COMP_Y_BRIGHTNESS);
dump_reg(sd, REG_AVID_START_PIXEL_LSB);
dump_reg(sd, REG_AVID_START_PIXEL_MSB);
dump_reg(sd, REG_AVID_STOP_PIXEL_LSB);
dump_reg(sd, REG_AVID_STOP_PIXEL_MSB);
dump_reg(sd, REG_HSYNC_START_PIXEL_LSB);
dump_reg(sd, REG_HSYNC_START_PIXEL_MSB);
dump_reg(sd, REG_HSYNC_STOP_PIXEL_LSB);
dump_reg(sd, REG_HSYNC_STOP_PIXEL_MSB);
dump_reg(sd, REG_VSYNC_START_LINE_LSB);
dump_reg(sd, REG_VSYNC_START_LINE_MSB);
dump_reg(sd, REG_VSYNC_STOP_LINE_LSB);
dump_reg(sd, REG_VSYNC_STOP_LINE_MSB);
dump_reg(sd, REG_VBLK_START_LINE_LSB);
dump_reg(sd, REG_VBLK_START_LINE_MSB);
dump_reg(sd, REG_VBLK_STOP_LINE_LSB);
dump_reg(sd, REG_VBLK_STOP_LINE_MSB);
dump_reg(sd, REG_SYNC_CONTROL);
dump_reg(sd, REG_OUTPUT_FORMATTER1);
dump_reg(sd, REG_OUTPUT_FORMATTER2);
dump_reg(sd, REG_OUTPUT_FORMATTER3);
dump_reg(sd, REG_OUTPUT_FORMATTER4);
dump_reg(sd, REG_OUTPUT_FORMATTER5);
dump_reg(sd, REG_OUTPUT_FORMATTER6);
dump_reg(sd, REG_CLEAR_LOST_LOCK);
}
/**
* tvp514x_configure() - Configure the TVP5146/47 registers
* @sd: ptr to v4l2_subdev struct
* @decoder: ptr to tvp514x_decoder structure
*
* Returns zero if successful, or non-zero otherwise.
*/
static int tvp514x_configure(struct v4l2_subdev *sd,
struct tvp514x_decoder *decoder)
{
int err;
/* common register initialization */
err =
tvp514x_write_regs(sd, decoder->tvp514x_regs);
if (err)
return err;
if (debug)
tvp514x_reg_dump(sd);
return 0;
}
/**
* tvp514x_detect() - Detect if an tvp514x is present, and if so which revision.
* @sd: pointer to standard V4L2 sub-device structure
* @decoder: pointer to tvp514x_decoder structure
*
* A device is considered to be detected if the chip ID (LSB and MSB)
* registers match the expected values.
* Any value of the rom version register is accepted.
* Returns ENODEV error number if no device is detected, or zero
* if a device is detected.
*/
static int tvp514x_detect(struct v4l2_subdev *sd,
struct tvp514x_decoder *decoder)
{
u8 chip_id_msb, chip_id_lsb, rom_ver;
struct i2c_client *client = v4l2_get_subdevdata(sd);
chip_id_msb = tvp514x_read_reg(sd, REG_CHIP_ID_MSB);
chip_id_lsb = tvp514x_read_reg(sd, REG_CHIP_ID_LSB);
rom_ver = tvp514x_read_reg(sd, REG_ROM_VERSION);
v4l2_dbg(1, debug, sd,
"chip id detected msb:0x%x lsb:0x%x rom version:0x%x\n",
chip_id_msb, chip_id_lsb, rom_ver);
if ((chip_id_msb != TVP514X_CHIP_ID_MSB)
|| ((chip_id_lsb != TVP5146_CHIP_ID_LSB)
&& (chip_id_lsb != TVP5147_CHIP_ID_LSB))) {
/* We didn't read the values we expected, so this must not be
* an TVP5146/47.
*/
v4l2_err(sd, "chip id mismatch msb:0x%x lsb:0x%x\n",
chip_id_msb, chip_id_lsb);
return -ENODEV;
}
decoder->ver = rom_ver;
v4l2_info(sd, "%s (Version - 0x%.2x) found at 0x%x (%s)\n",
client->name, decoder->ver,
client->addr << 1, client->adapter->name);
return 0;
}
/**
* tvp514x_querystd() - V4L2 decoder interface handler for querystd
* @sd: pointer to standard V4L2 sub-device structure
* @std_id: standard V4L2 std_id ioctl enum
*
* Returns the current standard detected by TVP5146/47. If no active input is
* detected then *std_id is set to 0 and the function returns 0.
*/
static int tvp514x_querystd(struct v4l2_subdev *sd, v4l2_std_id *std_id)
{
struct tvp514x_decoder *decoder = to_decoder(sd);
enum tvp514x_std current_std;
enum tvp514x_input input_sel;
u8 sync_lock_status, lock_mask;
if (std_id == NULL)
return -EINVAL;
*std_id = V4L2_STD_UNKNOWN;
/* query the current standard */
current_std = tvp514x_query_current_std(sd);
if (current_std == STD_INVALID)
return 0;
input_sel = decoder->input;
switch (input_sel) {
case INPUT_CVBS_VI1A:
case INPUT_CVBS_VI1B:
case INPUT_CVBS_VI1C:
case INPUT_CVBS_VI2A:
case INPUT_CVBS_VI2B:
case INPUT_CVBS_VI2C:
case INPUT_CVBS_VI3A:
case INPUT_CVBS_VI3B:
case INPUT_CVBS_VI3C:
case INPUT_CVBS_VI4A:
lock_mask = STATUS_CLR_SUBCAR_LOCK_BIT |
STATUS_HORZ_SYNC_LOCK_BIT |
STATUS_VIRT_SYNC_LOCK_BIT;
break;
case INPUT_SVIDEO_VI2A_VI1A:
case INPUT_SVIDEO_VI2B_VI1B:
case INPUT_SVIDEO_VI2C_VI1C:
case INPUT_SVIDEO_VI2A_VI3A:
case INPUT_SVIDEO_VI2B_VI3B:
case INPUT_SVIDEO_VI2C_VI3C:
case INPUT_SVIDEO_VI4A_VI1A:
case INPUT_SVIDEO_VI4A_VI1B:
case INPUT_SVIDEO_VI4A_VI1C:
case INPUT_SVIDEO_VI4A_VI3A:
case INPUT_SVIDEO_VI4A_VI3B:
case INPUT_SVIDEO_VI4A_VI3C:
lock_mask = STATUS_HORZ_SYNC_LOCK_BIT |
STATUS_VIRT_SYNC_LOCK_BIT;
break;
/*Need to add other interfaces*/
default:
return -EINVAL;
}
/* check whether signal is locked */
sync_lock_status = tvp514x_read_reg(sd, REG_STATUS1);
if (lock_mask != (sync_lock_status & lock_mask))
return 0; /* No input detected */
*std_id = decoder->std_list[current_std].standard.id;
v4l2_dbg(1, debug, sd, "Current STD: %s\n",
decoder->std_list[current_std].standard.name);
return 0;
}
/**
* tvp514x_s_std() - V4L2 decoder interface handler for s_std
* @sd: pointer to standard V4L2 sub-device structure
* @std_id: standard V4L2 v4l2_std_id ioctl enum
*
* If std_id is supported, sets the requested standard. Otherwise, returns
* -EINVAL
*/
static int tvp514x_s_std(struct v4l2_subdev *sd, v4l2_std_id std_id)
{
struct tvp514x_decoder *decoder = to_decoder(sd);
int err, i;
for (i = 0; i < decoder->num_stds; i++)
if (std_id & decoder->std_list[i].standard.id)
break;
if ((i == decoder->num_stds) || (i == STD_INVALID))
return -EINVAL;
err = tvp514x_write_reg(sd, REG_VIDEO_STD,
decoder->std_list[i].video_std);
if (err)
return err;
decoder->current_std = i;
decoder->tvp514x_regs[REG_VIDEO_STD].val =
decoder->std_list[i].video_std;
v4l2_dbg(1, debug, sd, "Standard set to: %s\n",
decoder->std_list[i].standard.name);
return 0;
}
/**
* tvp514x_s_routing() - V4L2 decoder interface handler for s_routing
* @sd: pointer to standard V4L2 sub-device structure
* @input: input selector for routing the signal
* @output: output selector for routing the signal
* @config: config value. Not used
*
* If index is valid, selects the requested input. Otherwise, returns -EINVAL if
* the input is not supported or there is no active signal present in the
* selected input.
*/
static int tvp514x_s_routing(struct v4l2_subdev *sd,
u32 input, u32 output, u32 config)
{
struct tvp514x_decoder *decoder = to_decoder(sd);
int err;
enum tvp514x_input input_sel;
enum tvp514x_output output_sel;
u8 sync_lock_status, lock_mask;
int try_count = LOCK_RETRY_COUNT;
if ((input >= INPUT_INVALID) ||
(output >= OUTPUT_INVALID))
/* Index out of bound */
return -EINVAL;
/*
* For the sequence streamon -> streamoff and again s_input
* it fails to lock the signal, since streamoff puts TVP514x
* into power off state which leads to failure in sub-sequent s_input.
*
* So power up the TVP514x device here, since it is important to lock
* the signal at this stage.
*/
if (!decoder->streaming)
tvp514x_s_stream(sd, 1);
input_sel = input;
output_sel = output;
err = tvp514x_write_reg(sd, REG_INPUT_SEL, input_sel);
if (err)
return err;
output_sel |= tvp514x_read_reg(sd,
REG_OUTPUT_FORMATTER1) & 0x7;
err = tvp514x_write_reg(sd, REG_OUTPUT_FORMATTER1,
output_sel);
if (err)
return err;
decoder->tvp514x_regs[REG_INPUT_SEL].val = input_sel;
decoder->tvp514x_regs[REG_OUTPUT_FORMATTER1].val = output_sel;
/* Clear status */
msleep(LOCK_RETRY_DELAY);
err =
tvp514x_write_reg(sd, REG_CLEAR_LOST_LOCK, 0x01);
if (err)
return err;
switch (input_sel) {
case INPUT_CVBS_VI1A:
case INPUT_CVBS_VI1B:
case INPUT_CVBS_VI1C:
case INPUT_CVBS_VI2A:
case INPUT_CVBS_VI2B:
case INPUT_CVBS_VI2C:
case INPUT_CVBS_VI3A:
case INPUT_CVBS_VI3B:
case INPUT_CVBS_VI3C:
case INPUT_CVBS_VI4A:
lock_mask = STATUS_CLR_SUBCAR_LOCK_BIT |
STATUS_HORZ_SYNC_LOCK_BIT |
STATUS_VIRT_SYNC_LOCK_BIT;
break;
case INPUT_SVIDEO_VI2A_VI1A:
case INPUT_SVIDEO_VI2B_VI1B:
case INPUT_SVIDEO_VI2C_VI1C:
case INPUT_SVIDEO_VI2A_VI3A:
case INPUT_SVIDEO_VI2B_VI3B:
case INPUT_SVIDEO_VI2C_VI3C:
case INPUT_SVIDEO_VI4A_VI1A:
case INPUT_SVIDEO_VI4A_VI1B:
case INPUT_SVIDEO_VI4A_VI1C:
case INPUT_SVIDEO_VI4A_VI3A:
case INPUT_SVIDEO_VI4A_VI3B:
case INPUT_SVIDEO_VI4A_VI3C:
lock_mask = STATUS_HORZ_SYNC_LOCK_BIT |
STATUS_VIRT_SYNC_LOCK_BIT;
break;
/* Need to add other interfaces*/
default:
return -EINVAL;
}
while (try_count-- > 0) {
/* Allow decoder to sync up with new input */
msleep(LOCK_RETRY_DELAY);
sync_lock_status = tvp514x_read_reg(sd,
REG_STATUS1);
if (lock_mask == (sync_lock_status & lock_mask))
/* Input detected */
break;
}
if (try_count < 0)
return -EINVAL;
decoder->input = input;
decoder->output = output;
v4l2_dbg(1, debug, sd, "Input set to: %d\n", input_sel);
return 0;
}
/**
* tvp514x_s_ctrl() - V4L2 decoder interface handler for s_ctrl
* @ctrl: pointer to v4l2_ctrl structure
*
* If the requested control is supported, sets the control's current
* value in HW. Otherwise, returns -EINVAL if the control is not supported.
*/
static int tvp514x_s_ctrl(struct v4l2_ctrl *ctrl)
{
struct v4l2_subdev *sd = to_sd(ctrl);
struct tvp514x_decoder *decoder = to_decoder(sd);
int err = -EINVAL, value;
value = ctrl->val;
switch (ctrl->id) {
case V4L2_CID_BRIGHTNESS:
err = tvp514x_write_reg(sd, REG_BRIGHTNESS, value);
if (!err)
decoder->tvp514x_regs[REG_BRIGHTNESS].val = value;
break;
case V4L2_CID_CONTRAST:
err = tvp514x_write_reg(sd, REG_CONTRAST, value);
if (!err)
decoder->tvp514x_regs[REG_CONTRAST].val = value;
break;
case V4L2_CID_SATURATION:
err = tvp514x_write_reg(sd, REG_SATURATION, value);
if (!err)
decoder->tvp514x_regs[REG_SATURATION].val = value;
break;
case V4L2_CID_HUE:
if (value == 180)
value = 0x7F;
else if (value == -180)
value = 0x80;
err = tvp514x_write_reg(sd, REG_HUE, value);
if (!err)
decoder->tvp514x_regs[REG_HUE].val = value;
break;
case V4L2_CID_AUTOGAIN:
err = tvp514x_write_reg(sd, REG_AFE_GAIN_CTRL, value ? 0x0f : 0x0c);
if (!err)
decoder->tvp514x_regs[REG_AFE_GAIN_CTRL].val = value;
break;
}
v4l2_dbg(1, debug, sd, "Set Control: ID - %d - %d\n",
ctrl->id, ctrl->val);
return err;
}
/**
* tvp514x_enum_mbus_fmt() - V4L2 decoder interface handler for enum_mbus_fmt
* @sd: pointer to standard V4L2 sub-device structure
* @index: index of pixelcode to retrieve
* @code: receives the pixelcode
*
* Enumerates supported mediabus formats
*/
static int
tvp514x_enum_mbus_fmt(struct v4l2_subdev *sd, unsigned index,
enum v4l2_mbus_pixelcode *code)
{
if (index)
return -EINVAL;
*code = V4L2_MBUS_FMT_YUYV10_2X10;
return 0;
}
/**
* tvp514x_mbus_fmt_cap() - V4L2 decoder interface handler for try/s/g_mbus_fmt
* @sd: pointer to standard V4L2 sub-device structure
* @f: pointer to the mediabus format structure
*
* Negotiates the image capture size and mediabus format.
*/
static int
tvp514x_mbus_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *f)
{
struct tvp514x_decoder *decoder = to_decoder(sd);
enum tvp514x_std current_std;
if (f == NULL)
return -EINVAL;
/* Calculate height and width based on current standard */
current_std = decoder->current_std;
f->code = V4L2_MBUS_FMT_YUYV10_2X10;
f->width = decoder->std_list[current_std].width;
f->height = decoder->std_list[current_std].height;
f->field = V4L2_FIELD_INTERLACED;
f->colorspace = V4L2_COLORSPACE_SMPTE170M;
v4l2_dbg(1, debug, sd, "MBUS_FMT: Width - %d, Height - %d\n",
f->width, f->height);
return 0;
}
/**
* tvp514x_g_parm() - V4L2 decoder interface handler for g_parm
* @sd: pointer to standard V4L2 sub-device structure
* @a: pointer to standard V4L2 VIDIOC_G_PARM ioctl structure
*
* Returns the decoder's video CAPTURE parameters.
*/
static int
tvp514x_g_parm(struct v4l2_subdev *sd, struct v4l2_streamparm *a)
{
struct tvp514x_decoder *decoder = to_decoder(sd);
struct v4l2_captureparm *cparm;
enum tvp514x_std current_std;
if (a == NULL)
return -EINVAL;
if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
/* only capture is supported */
return -EINVAL;
/* get the current standard */
current_std = decoder->current_std;
cparm = &a->parm.capture;
cparm->capability = V4L2_CAP_TIMEPERFRAME;
cparm->timeperframe =
decoder->std_list[current_std].standard.frameperiod;
return 0;
}
/**
* tvp514x_s_parm() - V4L2 decoder interface handler for s_parm
* @sd: pointer to standard V4L2 sub-device structure
* @a: pointer to standard V4L2 VIDIOC_S_PARM ioctl structure
*
* Configures the decoder to use the input parameters, if possible. If
* not possible, returns the appropriate error code.
*/
static int
tvp514x_s_parm(struct v4l2_subdev *sd, struct v4l2_streamparm *a)
{
struct tvp514x_decoder *decoder = to_decoder(sd);
struct v4l2_fract *timeperframe;
enum tvp514x_std current_std;
if (a == NULL)
return -EINVAL;
if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
/* only capture is supported */
return -EINVAL;
timeperframe = &a->parm.capture.timeperframe;
/* get the current standard */
current_std = decoder->current_std;
*timeperframe =
decoder->std_list[current_std].standard.frameperiod;
return 0;
}
/**
* tvp514x_s_stream() - V4L2 decoder i/f handler for s_stream
* @sd: pointer to standard V4L2 sub-device structure
* @enable: streaming enable or disable
*
* Sets streaming to enable or disable, if possible.
*/
static int tvp514x_s_stream(struct v4l2_subdev *sd, int enable)
{
int err = 0;
struct i2c_client *client = v4l2_get_subdevdata(sd);
struct tvp514x_decoder *decoder = to_decoder(sd);
if (decoder->streaming == enable)
return 0;
switch (enable) {
case 0:
{
/* Power Down Sequence */
err = tvp514x_write_reg(sd, REG_OPERATION_MODE, 0x01);
if (err) {
v4l2_err(sd, "Unable to turn off decoder\n");
return err;
}
decoder->streaming = enable;
break;
}
case 1:
{
struct tvp514x_reg *int_seq = (struct tvp514x_reg *)
client->driver->id_table->driver_data;
/* Power Up Sequence */
err = tvp514x_write_regs(sd, int_seq);
if (err) {
v4l2_err(sd, "Unable to turn on decoder\n");
return err;
}
/* Detect if not already detected */
err = tvp514x_detect(sd, decoder);
if (err) {
v4l2_err(sd, "Unable to detect decoder\n");
return err;
}
err = tvp514x_configure(sd, decoder);
if (err) {
v4l2_err(sd, "Unable to configure decoder\n");
return err;
}
decoder->streaming = enable;
break;
}
default:
err = -ENODEV;
break;
}
return err;
}
static const struct v4l2_ctrl_ops tvp514x_ctrl_ops = {
.s_ctrl = tvp514x_s_ctrl,
};
static const struct v4l2_subdev_core_ops tvp514x_core_ops = {
.g_ext_ctrls = v4l2_subdev_g_ext_ctrls,
.try_ext_ctrls = v4l2_subdev_try_ext_ctrls,
.s_ext_ctrls = v4l2_subdev_s_ext_ctrls,
.g_ctrl = v4l2_subdev_g_ctrl,
.s_ctrl = v4l2_subdev_s_ctrl,
.queryctrl = v4l2_subdev_queryctrl,
.querymenu = v4l2_subdev_querymenu,
.s_std = tvp514x_s_std,
};
static const struct v4l2_subdev_video_ops tvp514x_video_ops = {
.s_routing = tvp514x_s_routing,
.querystd = tvp514x_querystd,
.enum_mbus_fmt = tvp514x_enum_mbus_fmt,
.g_mbus_fmt = tvp514x_mbus_fmt,
.try_mbus_fmt = tvp514x_mbus_fmt,
.s_mbus_fmt = tvp514x_mbus_fmt,
.g_parm = tvp514x_g_parm,
.s_parm = tvp514x_s_parm,
.s_stream = tvp514x_s_stream,
};
static const struct v4l2_subdev_ops tvp514x_ops = {
.core = &tvp514x_core_ops,
.video = &tvp514x_video_ops,
};
static struct tvp514x_decoder tvp514x_dev = {
.streaming = 0,
.current_std = STD_NTSC_MJ,
.std_list = tvp514x_std_list,
.num_stds = ARRAY_SIZE(tvp514x_std_list),
};
/**
* tvp514x_probe() - decoder driver i2c probe handler
* @client: i2c driver client device structure
* @id: i2c driver id table
*
* Register decoder as an i2c client device and V4L2
* device.
*/
static int
tvp514x_probe(struct i2c_client *client, const struct i2c_device_id *id)
{
struct tvp514x_decoder *decoder;
struct v4l2_subdev *sd;
/* Check if the adapter supports the needed features */
if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
return -EIO;
if (!client->dev.platform_data) {
v4l2_err(client, "No platform data!!\n");
return -ENODEV;
}
decoder = kzalloc(sizeof(*decoder), GFP_KERNEL);
if (!decoder)
return -ENOMEM;
/* Initialize the tvp514x_decoder with default configuration */
*decoder = tvp514x_dev;
/* Copy default register configuration */
memcpy(decoder->tvp514x_regs, tvp514x_reg_list_default,
sizeof(tvp514x_reg_list_default));
/* Copy board specific information here */
decoder->pdata = client->dev.platform_data;
/**
* Fetch platform specific data, and configure the
* tvp514x_reg_list[] accordingly. Since this is one
* time configuration, no need to preserve.
*/
decoder->tvp514x_regs[REG_OUTPUT_FORMATTER2].val |=
(decoder->pdata->clk_polarity << 1);
decoder->tvp514x_regs[REG_SYNC_CONTROL].val |=
((decoder->pdata->hs_polarity << 2) |
(decoder->pdata->vs_polarity << 3));
/* Set default standard to auto */
decoder->tvp514x_regs[REG_VIDEO_STD].val =
VIDEO_STD_AUTO_SWITCH_BIT;
/* Register with V4L2 layer as slave device */
sd = &decoder->sd;
v4l2_i2c_subdev_init(sd, client, &tvp514x_ops);
v4l2_ctrl_handler_init(&decoder->hdl, 5);
v4l2_ctrl_new_std(&decoder->hdl, &tvp514x_ctrl_ops,
V4L2_CID_BRIGHTNESS, 0, 255, 1, 128);
v4l2_ctrl_new_std(&decoder->hdl, &tvp514x_ctrl_ops,
V4L2_CID_CONTRAST, 0, 255, 1, 128);
v4l2_ctrl_new_std(&decoder->hdl, &tvp514x_ctrl_ops,
V4L2_CID_SATURATION, 0, 255, 1, 128);
v4l2_ctrl_new_std(&decoder->hdl, &tvp514x_ctrl_ops,
V4L2_CID_HUE, -180, 180, 180, 0);
v4l2_ctrl_new_std(&decoder->hdl, &tvp514x_ctrl_ops,
V4L2_CID_AUTOGAIN, 0, 1, 1, 1);
sd->ctrl_handler = &decoder->hdl;
if (decoder->hdl.error) {
int err = decoder->hdl.error;
v4l2_ctrl_handler_free(&decoder->hdl);
kfree(decoder);
return err;
}
v4l2_ctrl_handler_setup(&decoder->hdl);
v4l2_info(sd, "%s decoder driver registered !!\n", sd->name);
return 0;
}
/**
* tvp514x_remove() - decoder driver i2c remove handler
* @client: i2c driver client device structure
*
* Unregister decoder as an i2c client device and V4L2
* device. Complement of tvp514x_probe().
*/
static int tvp514x_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct tvp514x_decoder *decoder = to_decoder(sd);
v4l2_device_unregister_subdev(sd);
v4l2_ctrl_handler_free(&decoder->hdl);
kfree(decoder);
return 0;
}
/* TVP5146 Init/Power on Sequence */
static const struct tvp514x_reg tvp5146_init_reg_seq[] = {
{TOK_WRITE, REG_VBUS_ADDRESS_ACCESS1, 0x02},
{TOK_WRITE, REG_VBUS_ADDRESS_ACCESS2, 0x00},
{TOK_WRITE, REG_VBUS_ADDRESS_ACCESS3, 0x80},
{TOK_WRITE, REG_VBUS_DATA_ACCESS_NO_VBUS_ADDR_INCR, 0x01},
{TOK_WRITE, REG_VBUS_ADDRESS_ACCESS1, 0x60},
{TOK_WRITE, REG_VBUS_ADDRESS_ACCESS2, 0x00},
{TOK_WRITE, REG_VBUS_ADDRESS_ACCESS3, 0xB0},
{TOK_WRITE, REG_VBUS_DATA_ACCESS_NO_VBUS_ADDR_INCR, 0x01},
{TOK_WRITE, REG_VBUS_DATA_ACCESS_NO_VBUS_ADDR_INCR, 0x00},
{TOK_WRITE, REG_OPERATION_MODE, 0x01},
{TOK_WRITE, REG_OPERATION_MODE, 0x00},
{TOK_TERM, 0, 0},
};
/* TVP5147 Init/Power on Sequence */
static const struct tvp514x_reg tvp5147_init_reg_seq[] = {
{TOK_WRITE, REG_VBUS_ADDRESS_ACCESS1, 0x02},
{TOK_WRITE, REG_VBUS_ADDRESS_ACCESS2, 0x00},
{TOK_WRITE, REG_VBUS_ADDRESS_ACCESS3, 0x80},
{TOK_WRITE, REG_VBUS_DATA_ACCESS_NO_VBUS_ADDR_INCR, 0x01},
{TOK_WRITE, REG_VBUS_ADDRESS_ACCESS1, 0x60},
{TOK_WRITE, REG_VBUS_ADDRESS_ACCESS2, 0x00},
{TOK_WRITE, REG_VBUS_ADDRESS_ACCESS3, 0xB0},
{TOK_WRITE, REG_VBUS_DATA_ACCESS_NO_VBUS_ADDR_INCR, 0x01},
{TOK_WRITE, REG_VBUS_ADDRESS_ACCESS1, 0x16},
{TOK_WRITE, REG_VBUS_ADDRESS_ACCESS2, 0x00},
{TOK_WRITE, REG_VBUS_ADDRESS_ACCESS3, 0xA0},
{TOK_WRITE, REG_VBUS_DATA_ACCESS_NO_VBUS_ADDR_INCR, 0x16},
{TOK_WRITE, REG_VBUS_ADDRESS_ACCESS1, 0x60},
{TOK_WRITE, REG_VBUS_ADDRESS_ACCESS2, 0x00},
{TOK_WRITE, REG_VBUS_ADDRESS_ACCESS3, 0xB0},
{TOK_WRITE, REG_VBUS_DATA_ACCESS_NO_VBUS_ADDR_INCR, 0x00},
{TOK_WRITE, REG_OPERATION_MODE, 0x01},
{TOK_WRITE, REG_OPERATION_MODE, 0x00},
{TOK_TERM, 0, 0},
};
/* TVP5146M2/TVP5147M1 Init/Power on Sequence */
static const struct tvp514x_reg tvp514xm_init_reg_seq[] = {
{TOK_WRITE, REG_OPERATION_MODE, 0x01},
{TOK_WRITE, REG_OPERATION_MODE, 0x00},
{TOK_TERM, 0, 0},
};
/**
* I2C Device Table -
*
* name - Name of the actual device/chip.
* driver_data - Driver data
*/
static const struct i2c_device_id tvp514x_id[] = {
{"tvp5146", (unsigned long)tvp5146_init_reg_seq},
{"tvp5146m2", (unsigned long)tvp514xm_init_reg_seq},
{"tvp5147", (unsigned long)tvp5147_init_reg_seq},
{"tvp5147m1", (unsigned long)tvp514xm_init_reg_seq},
{},
};
MODULE_DEVICE_TABLE(i2c, tvp514x_id);
static struct i2c_driver tvp514x_driver = {
.driver = {
.owner = THIS_MODULE,
.name = TVP514X_MODULE_NAME,
},
.probe = tvp514x_probe,
.remove = tvp514x_remove,
.id_table = tvp514x_id,
};
static int __init tvp514x_init(void)
{
return i2c_add_driver(&tvp514x_driver);
}
static void __exit tvp514x_exit(void)
{
i2c_del_driver(&tvp514x_driver);
}
module_init(tvp514x_init);
module_exit(tvp514x_exit);
| gpl-2.0 |
jwhitham/ppc_linux | arch/mips/jz4740/prom.c | 3428 | 1508 | /*
* Copyright (C) 2010, Lars-Peter Clausen <lars@metafoo.de>
* JZ4740 SoC prom code
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 675 Mass Ave, Cambridge, MA 02139, USA.
*
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/string.h>
#include <linux/serial_reg.h>
#include <asm/bootinfo.h>
#include <asm/mach-jz4740/base.h>
static __init void jz4740_init_cmdline(int argc, char *argv[])
{
unsigned int count = COMMAND_LINE_SIZE - 1;
int i;
char *dst = &(arcs_cmdline[0]);
char *src;
for (i = 1; i < argc && count; ++i) {
src = argv[i];
while (*src && count) {
*dst++ = *src++;
--count;
}
*dst++ = ' ';
}
if (i > 1)
--dst;
*dst = 0;
}
void __init prom_init(void)
{
jz4740_init_cmdline((int)fw_arg0, (char **)fw_arg1);
mips_machtype = MACH_INGENIC_JZ4740;
}
void __init prom_free_prom_memory(void)
{
}
#define UART_REG(_reg) ((void __iomem *)CKSEG1ADDR(JZ4740_UART0_BASE_ADDR + (_reg << 2)))
void prom_putchar(char c)
{
uint8_t lsr;
do {
lsr = readb(UART_REG(UART_LSR));
} while ((lsr & UART_LSR_TEMT) == 0);
writeb(c, UART_REG(UART_TX));
}
| gpl-2.0 |
jsr-d10/android_kernel_jsr_msm8226 | drivers/media/rc/user-rc-input.c | 3428 | 6543 | /* Copyright (c) 2012, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/ioctl.h>
#include <linux/fs.h>
#include <linux/poll.h>
#include <linux/platform_device.h>
#include <linux/device.h>
#include <linux/cdev.h>
#include <linux/slab.h>
#include <media/rc-core.h>
#include <media/user-rc-input.h>
#define MAX_RC_DEVICES 1
#define USER_RC_INPUT_DEV_NAME "user-rc-input"
#define USER_RC_INPUT_DRV_NAME "rc-user-input"
struct user_rc_input_dev {
struct cdev rc_input_cdev;
struct class *rc_input_class;
struct device *rc_input_dev;
struct rc_dev *rcdev;
dev_t rc_input_base_dev;
struct device *dev;
int in_use;
};
static int user_rc_input_open(struct inode *inode, struct file *file)
{
struct cdev *input_cdev = inode->i_cdev;
struct user_rc_input_dev *input_dev =
container_of(input_cdev, struct user_rc_input_dev, rc_input_cdev);
if (input_dev->in_use) {
dev_err(input_dev->dev,
"Device is already open..only one instance is allowed\n");
return -EBUSY;
}
input_dev->in_use++;
file->private_data = input_dev;
return 0;
}
static int user_rc_input_release(struct inode *inode, struct file *file)
{
struct user_rc_input_dev *input_dev = file->private_data;
input_dev->in_use--;
return 0;
}
static ssize_t user_rc_input_write(struct file *file, const char __user *buffer,
size_t count, loff_t *ppos)
{
int ret;
struct user_rc_input_dev *input_dev = file->private_data;
__u8 *buf;
buf = kmalloc(count * sizeof(__u8), GFP_KERNEL);
if (!buf) {
dev_err(input_dev->dev,
"kmalloc failed...Insufficient memory\n");
ret = -ENOMEM;
goto out;
}
if (copy_from_user(buf, buffer, count)) {
dev_err(input_dev->dev, "Copy from user failed\n");
ret = -EFAULT;
goto out_free;
}
switch (buf[0]) {
case USER_CONTROL_PRESSED:
dev_dbg(input_dev->dev, "user controlled"
" pressed 0x%x\n", buf[1]);
rc_keydown(input_dev->rcdev, buf[1], 0);
break;
case USER_CONTROL_REPEATED:
dev_dbg(input_dev->dev, "user controlled"
" repeated 0x%x\n", buf[1]);
rc_repeat(input_dev->rcdev);
break;
case USER_CONTROL_RELEASED:
dev_dbg(input_dev->dev, "user controlled"
" released 0x%x\n", buf[1]);
rc_keyup(input_dev->rcdev);
break;
}
out_free:
kfree(buf);
out:
return ret;
}
const struct file_operations fops = {
.owner = THIS_MODULE,
.open = user_rc_input_open,
.write = user_rc_input_write,
.release = user_rc_input_release,
};
static int __devinit user_rc_input_probe(struct platform_device *pdev)
{
struct user_rc_input_dev *user_rc_dev;
struct rc_dev *rcdev;
int retval;
user_rc_dev = kzalloc(sizeof(struct user_rc_input_dev), GFP_KERNEL);
if (!user_rc_dev)
return -ENOMEM;
user_rc_dev->rc_input_class = class_create(THIS_MODULE,
"user-rc-input-loopback");
if (IS_ERR(user_rc_dev->rc_input_class)) {
retval = PTR_ERR(user_rc_dev->rc_input_class);
goto err;
}
retval = alloc_chrdev_region(&user_rc_dev->rc_input_base_dev, 0,
MAX_RC_DEVICES, USER_RC_INPUT_DEV_NAME);
if (retval) {
dev_err(&pdev->dev,
"alloc_chrdev_region failed\n");
goto alloc_chrdev_err;
}
dev_info(&pdev->dev, "User space report key event input "
"loopback driver registered, "
"major %d\n", MAJOR(user_rc_dev->rc_input_base_dev));
cdev_init(&user_rc_dev->rc_input_cdev, &fops);
retval = cdev_add(&user_rc_dev->rc_input_cdev,
user_rc_dev->rc_input_base_dev,
MAX_RC_DEVICES);
if (retval) {
dev_err(&pdev->dev, "cdev_add failed\n");
goto cdev_add_err;
}
user_rc_dev->rc_input_dev =
device_create(user_rc_dev->rc_input_class,
NULL,
MKDEV(MAJOR(user_rc_dev->rc_input_base_dev),
0), NULL, "user-rc-input-dev%d", 0);
if (IS_ERR(user_rc_dev->rc_input_dev)) {
retval = PTR_ERR(user_rc_dev->rc_input_dev);
dev_err(&pdev->dev, "device_create failed\n");
goto device_create_err;
}
rcdev = rc_allocate_device();
if (!rcdev) {
dev_err(&pdev->dev, "failed to allocate rc device");
retval = -ENOMEM;
goto err_allocate_device;
}
rcdev->driver_type = RC_DRIVER_SCANCODE;
rcdev->allowed_protos = RC_TYPE_OTHER;
rcdev->input_name = USER_RC_INPUT_DEV_NAME;
rcdev->input_id.bustype = BUS_HOST;
rcdev->driver_name = USER_RC_INPUT_DRV_NAME;
rcdev->map_name = RC_MAP_UE_RF4CE;
retval = rc_register_device(rcdev);
if (retval < 0) {
dev_err(&pdev->dev, "failed to register rc device\n");
goto rc_register_err;
}
user_rc_dev->rcdev = rcdev;
user_rc_dev->dev = &pdev->dev;
platform_set_drvdata(pdev, user_rc_dev);
user_rc_dev->in_use = 0;
return 0;
rc_register_err:
rc_free_device(rcdev);
err_allocate_device:
device_destroy(user_rc_dev->rc_input_class,
MKDEV(MAJOR(user_rc_dev->rc_input_base_dev), 0));
cdev_add_err:
unregister_chrdev_region(user_rc_dev->rc_input_base_dev,
MAX_RC_DEVICES);
device_create_err:
cdev_del(&user_rc_dev->rc_input_cdev);
alloc_chrdev_err:
class_destroy(user_rc_dev->rc_input_class);
err:
kfree(user_rc_dev);
return retval;
}
static int __devexit user_rc_input_remove(struct platform_device *pdev)
{
struct user_rc_input_dev *user_rc_dev = platform_get_drvdata(pdev);
platform_set_drvdata(pdev, NULL);
rc_free_device(user_rc_dev->rcdev);
device_destroy(user_rc_dev->rc_input_class,
MKDEV(MAJOR(user_rc_dev->rc_input_base_dev), 0));
unregister_chrdev_region(user_rc_dev->rc_input_base_dev,
MAX_RC_DEVICES);
cdev_del(&user_rc_dev->rc_input_cdev);
class_destroy(user_rc_dev->rc_input_class);
kfree(user_rc_dev);
return 0;
}
static struct platform_driver user_rc_input_driver = {
.probe = user_rc_input_probe,
.remove = __devexit_p(user_rc_input_remove),
.driver = {
.name = USER_RC_INPUT_DRV_NAME,
.owner = THIS_MODULE,
},
};
static int __init user_rc_input_init(void)
{
return platform_driver_register(&user_rc_input_driver);
}
module_init(user_rc_input_init);
static void __exit user_rc_input_exit(void)
{
platform_driver_unregister(&user_rc_input_driver);
}
module_exit(user_rc_input_exit);
MODULE_DESCRIPTION("User RC Input driver");
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
embeddedarm/linux-2.6.35-ts4800 | drivers/misc/c2port/core.c | 4196 | 22013 | /*
* Silicon Labs C2 port core Linux support
*
* Copyright (c) 2007 Rodolfo Giometti <giometti@linux.it>
* Copyright (c) 2007 Eurotech S.p.A. <info@eurotech.it>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/kmemcheck.h>
#include <linux/ctype.h>
#include <linux/delay.h>
#include <linux/idr.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/c2port.h>
#define DRIVER_NAME "c2port"
#define DRIVER_VERSION "0.51.0"
static DEFINE_SPINLOCK(c2port_idr_lock);
static DEFINE_IDR(c2port_idr);
/*
* Local variables
*/
static struct class *c2port_class;
/*
* C2 registers & commands defines
*/
/* C2 registers */
#define C2PORT_DEVICEID 0x00
#define C2PORT_REVID 0x01
#define C2PORT_FPCTL 0x02
#define C2PORT_FPDAT 0xB4
/* C2 interface commands */
#define C2PORT_GET_VERSION 0x01
#define C2PORT_DEVICE_ERASE 0x03
#define C2PORT_BLOCK_READ 0x06
#define C2PORT_BLOCK_WRITE 0x07
#define C2PORT_PAGE_ERASE 0x08
/* C2 status return codes */
#define C2PORT_INVALID_COMMAND 0x00
#define C2PORT_COMMAND_FAILED 0x02
#define C2PORT_COMMAND_OK 0x0d
/*
* C2 port low level signal managements
*/
static void c2port_reset(struct c2port_device *dev)
{
struct c2port_ops *ops = dev->ops;
/* To reset the device we have to keep clock line low for at least
* 20us.
*/
local_irq_disable();
ops->c2ck_set(dev, 0);
udelay(25);
ops->c2ck_set(dev, 1);
local_irq_enable();
udelay(1);
}
static void c2port_strobe_ck(struct c2port_device *dev)
{
struct c2port_ops *ops = dev->ops;
/* During hi-low-hi transition we disable local IRQs to avoid
* interructions since C2 port specification says that it must be
* shorter than 5us, otherwise the microcontroller may consider
* it as a reset signal!
*/
local_irq_disable();
ops->c2ck_set(dev, 0);
udelay(1);
ops->c2ck_set(dev, 1);
local_irq_enable();
udelay(1);
}
/*
* C2 port basic functions
*/
static void c2port_write_ar(struct c2port_device *dev, u8 addr)
{
struct c2port_ops *ops = dev->ops;
int i;
/* START field */
c2port_strobe_ck(dev);
/* INS field (11b, LSB first) */
ops->c2d_dir(dev, 0);
ops->c2d_set(dev, 1);
c2port_strobe_ck(dev);
ops->c2d_set(dev, 1);
c2port_strobe_ck(dev);
/* ADDRESS field */
for (i = 0; i < 8; i++) {
ops->c2d_set(dev, addr & 0x01);
c2port_strobe_ck(dev);
addr >>= 1;
}
/* STOP field */
ops->c2d_dir(dev, 1);
c2port_strobe_ck(dev);
}
static int c2port_read_ar(struct c2port_device *dev, u8 *addr)
{
struct c2port_ops *ops = dev->ops;
int i;
/* START field */
c2port_strobe_ck(dev);
/* INS field (10b, LSB first) */
ops->c2d_dir(dev, 0);
ops->c2d_set(dev, 0);
c2port_strobe_ck(dev);
ops->c2d_set(dev, 1);
c2port_strobe_ck(dev);
/* ADDRESS field */
ops->c2d_dir(dev, 1);
*addr = 0;
for (i = 0; i < 8; i++) {
*addr >>= 1; /* shift in 8-bit ADDRESS field LSB first */
c2port_strobe_ck(dev);
if (ops->c2d_get(dev))
*addr |= 0x80;
}
/* STOP field */
c2port_strobe_ck(dev);
return 0;
}
static int c2port_write_dr(struct c2port_device *dev, u8 data)
{
struct c2port_ops *ops = dev->ops;
int timeout, i;
/* START field */
c2port_strobe_ck(dev);
/* INS field (01b, LSB first) */
ops->c2d_dir(dev, 0);
ops->c2d_set(dev, 1);
c2port_strobe_ck(dev);
ops->c2d_set(dev, 0);
c2port_strobe_ck(dev);
/* LENGTH field (00b, LSB first -> 1 byte) */
ops->c2d_set(dev, 0);
c2port_strobe_ck(dev);
ops->c2d_set(dev, 0);
c2port_strobe_ck(dev);
/* DATA field */
for (i = 0; i < 8; i++) {
ops->c2d_set(dev, data & 0x01);
c2port_strobe_ck(dev);
data >>= 1;
}
/* WAIT field */
ops->c2d_dir(dev, 1);
timeout = 20;
do {
c2port_strobe_ck(dev);
if (ops->c2d_get(dev))
break;
udelay(1);
} while (--timeout > 0);
if (timeout == 0)
return -EIO;
/* STOP field */
c2port_strobe_ck(dev);
return 0;
}
static int c2port_read_dr(struct c2port_device *dev, u8 *data)
{
struct c2port_ops *ops = dev->ops;
int timeout, i;
/* START field */
c2port_strobe_ck(dev);
/* INS field (00b, LSB first) */
ops->c2d_dir(dev, 0);
ops->c2d_set(dev, 0);
c2port_strobe_ck(dev);
ops->c2d_set(dev, 0);
c2port_strobe_ck(dev);
/* LENGTH field (00b, LSB first -> 1 byte) */
ops->c2d_set(dev, 0);
c2port_strobe_ck(dev);
ops->c2d_set(dev, 0);
c2port_strobe_ck(dev);
/* WAIT field */
ops->c2d_dir(dev, 1);
timeout = 20;
do {
c2port_strobe_ck(dev);
if (ops->c2d_get(dev))
break;
udelay(1);
} while (--timeout > 0);
if (timeout == 0)
return -EIO;
/* DATA field */
*data = 0;
for (i = 0; i < 8; i++) {
*data >>= 1; /* shift in 8-bit DATA field LSB first */
c2port_strobe_ck(dev);
if (ops->c2d_get(dev))
*data |= 0x80;
}
/* STOP field */
c2port_strobe_ck(dev);
return 0;
}
static int c2port_poll_in_busy(struct c2port_device *dev)
{
u8 addr;
int ret, timeout = 20;
do {
ret = (c2port_read_ar(dev, &addr));
if (ret < 0)
return -EIO;
if (!(addr & 0x02))
break;
udelay(1);
} while (--timeout > 0);
if (timeout == 0)
return -EIO;
return 0;
}
static int c2port_poll_out_ready(struct c2port_device *dev)
{
u8 addr;
int ret, timeout = 10000; /* erase flash needs long time... */
do {
ret = (c2port_read_ar(dev, &addr));
if (ret < 0)
return -EIO;
if (addr & 0x01)
break;
udelay(1);
} while (--timeout > 0);
if (timeout == 0)
return -EIO;
return 0;
}
/*
* sysfs methods
*/
static ssize_t c2port_show_name(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct c2port_device *c2dev = dev_get_drvdata(dev);
return sprintf(buf, "%s\n", c2dev->name);
}
static ssize_t c2port_show_flash_blocks_num(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct c2port_device *c2dev = dev_get_drvdata(dev);
struct c2port_ops *ops = c2dev->ops;
return sprintf(buf, "%d\n", ops->blocks_num);
}
static ssize_t c2port_show_flash_block_size(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct c2port_device *c2dev = dev_get_drvdata(dev);
struct c2port_ops *ops = c2dev->ops;
return sprintf(buf, "%d\n", ops->block_size);
}
static ssize_t c2port_show_flash_size(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct c2port_device *c2dev = dev_get_drvdata(dev);
struct c2port_ops *ops = c2dev->ops;
return sprintf(buf, "%d\n", ops->blocks_num * ops->block_size);
}
static ssize_t c2port_show_access(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct c2port_device *c2dev = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", c2dev->access);
}
static ssize_t c2port_store_access(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct c2port_device *c2dev = dev_get_drvdata(dev);
struct c2port_ops *ops = c2dev->ops;
int status, ret;
ret = sscanf(buf, "%d", &status);
if (ret != 1)
return -EINVAL;
mutex_lock(&c2dev->mutex);
c2dev->access = !!status;
/* If access is "on" clock should be HIGH _before_ setting the line
* as output and data line should be set as INPUT anyway */
if (c2dev->access)
ops->c2ck_set(c2dev, 1);
ops->access(c2dev, c2dev->access);
if (c2dev->access)
ops->c2d_dir(c2dev, 1);
mutex_unlock(&c2dev->mutex);
return count;
}
static ssize_t c2port_store_reset(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct c2port_device *c2dev = dev_get_drvdata(dev);
/* Check the device access status */
if (!c2dev->access)
return -EBUSY;
mutex_lock(&c2dev->mutex);
c2port_reset(c2dev);
c2dev->flash_access = 0;
mutex_unlock(&c2dev->mutex);
return count;
}
static ssize_t __c2port_show_dev_id(struct c2port_device *dev, char *buf)
{
u8 data;
int ret;
/* Select DEVICEID register for C2 data register accesses */
c2port_write_ar(dev, C2PORT_DEVICEID);
/* Read and return the device ID register */
ret = c2port_read_dr(dev, &data);
if (ret < 0)
return ret;
return sprintf(buf, "%d\n", data);
}
static ssize_t c2port_show_dev_id(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct c2port_device *c2dev = dev_get_drvdata(dev);
ssize_t ret;
/* Check the device access status */
if (!c2dev->access)
return -EBUSY;
mutex_lock(&c2dev->mutex);
ret = __c2port_show_dev_id(c2dev, buf);
mutex_unlock(&c2dev->mutex);
if (ret < 0)
dev_err(dev, "cannot read from %s\n", c2dev->name);
return ret;
}
static ssize_t __c2port_show_rev_id(struct c2port_device *dev, char *buf)
{
u8 data;
int ret;
/* Select REVID register for C2 data register accesses */
c2port_write_ar(dev, C2PORT_REVID);
/* Read and return the revision ID register */
ret = c2port_read_dr(dev, &data);
if (ret < 0)
return ret;
return sprintf(buf, "%d\n", data);
}
static ssize_t c2port_show_rev_id(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct c2port_device *c2dev = dev_get_drvdata(dev);
ssize_t ret;
/* Check the device access status */
if (!c2dev->access)
return -EBUSY;
mutex_lock(&c2dev->mutex);
ret = __c2port_show_rev_id(c2dev, buf);
mutex_unlock(&c2dev->mutex);
if (ret < 0)
dev_err(c2dev->dev, "cannot read from %s\n", c2dev->name);
return ret;
}
static ssize_t c2port_show_flash_access(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct c2port_device *c2dev = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", c2dev->flash_access);
}
static ssize_t __c2port_store_flash_access(struct c2port_device *dev,
int status)
{
int ret;
/* Check the device access status */
if (!dev->access)
return -EBUSY;
dev->flash_access = !!status;
/* If flash_access is off we have nothing to do... */
if (dev->flash_access == 0)
return 0;
/* Target the C2 flash programming control register for C2 data
* register access */
c2port_write_ar(dev, C2PORT_FPCTL);
/* Write the first keycode to enable C2 Flash programming */
ret = c2port_write_dr(dev, 0x02);
if (ret < 0)
return ret;
/* Write the second keycode to enable C2 Flash programming */
ret = c2port_write_dr(dev, 0x01);
if (ret < 0)
return ret;
/* Delay for at least 20ms to ensure the target is ready for
* C2 flash programming */
mdelay(25);
return 0;
}
static ssize_t c2port_store_flash_access(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct c2port_device *c2dev = dev_get_drvdata(dev);
int status;
ssize_t ret;
ret = sscanf(buf, "%d", &status);
if (ret != 1)
return -EINVAL;
mutex_lock(&c2dev->mutex);
ret = __c2port_store_flash_access(c2dev, status);
mutex_unlock(&c2dev->mutex);
if (ret < 0) {
dev_err(c2dev->dev, "cannot enable %s flash programming\n",
c2dev->name);
return ret;
}
return count;
}
static ssize_t __c2port_write_flash_erase(struct c2port_device *dev)
{
u8 status;
int ret;
/* Target the C2 flash programming data register for C2 data register
* access.
*/
c2port_write_ar(dev, C2PORT_FPDAT);
/* Send device erase command */
c2port_write_dr(dev, C2PORT_DEVICE_ERASE);
/* Wait for input acknowledge */
ret = c2port_poll_in_busy(dev);
if (ret < 0)
return ret;
/* Should check status before starting FLASH access sequence */
/* Wait for status information */
ret = c2port_poll_out_ready(dev);
if (ret < 0)
return ret;
/* Read flash programming interface status */
ret = c2port_read_dr(dev, &status);
if (ret < 0)
return ret;
if (status != C2PORT_COMMAND_OK)
return -EBUSY;
/* Send a three-byte arming sequence to enable the device erase.
* If the sequence is not received correctly, the command will be
* ignored.
* Sequence is: 0xde, 0xad, 0xa5.
*/
c2port_write_dr(dev, 0xde);
ret = c2port_poll_in_busy(dev);
if (ret < 0)
return ret;
c2port_write_dr(dev, 0xad);
ret = c2port_poll_in_busy(dev);
if (ret < 0)
return ret;
c2port_write_dr(dev, 0xa5);
ret = c2port_poll_in_busy(dev);
if (ret < 0)
return ret;
ret = c2port_poll_out_ready(dev);
if (ret < 0)
return ret;
return 0;
}
static ssize_t c2port_store_flash_erase(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct c2port_device *c2dev = dev_get_drvdata(dev);
int ret;
/* Check the device and flash access status */
if (!c2dev->access || !c2dev->flash_access)
return -EBUSY;
mutex_lock(&c2dev->mutex);
ret = __c2port_write_flash_erase(c2dev);
mutex_unlock(&c2dev->mutex);
if (ret < 0) {
dev_err(c2dev->dev, "cannot erase %s flash\n", c2dev->name);
return ret;
}
return count;
}
static ssize_t __c2port_read_flash_data(struct c2port_device *dev,
char *buffer, loff_t offset, size_t count)
{
struct c2port_ops *ops = dev->ops;
u8 status, nread = 128;
int i, ret;
/* Check for flash end */
if (offset >= ops->block_size * ops->blocks_num)
return 0;
if (ops->block_size * ops->blocks_num - offset < nread)
nread = ops->block_size * ops->blocks_num - offset;
if (count < nread)
nread = count;
if (nread == 0)
return nread;
/* Target the C2 flash programming data register for C2 data register
* access */
c2port_write_ar(dev, C2PORT_FPDAT);
/* Send flash block read command */
c2port_write_dr(dev, C2PORT_BLOCK_READ);
/* Wait for input acknowledge */
ret = c2port_poll_in_busy(dev);
if (ret < 0)
return ret;
/* Should check status before starting FLASH access sequence */
/* Wait for status information */
ret = c2port_poll_out_ready(dev);
if (ret < 0)
return ret;
/* Read flash programming interface status */
ret = c2port_read_dr(dev, &status);
if (ret < 0)
return ret;
if (status != C2PORT_COMMAND_OK)
return -EBUSY;
/* Send address high byte */
c2port_write_dr(dev, offset >> 8);
ret = c2port_poll_in_busy(dev);
if (ret < 0)
return ret;
/* Send address low byte */
c2port_write_dr(dev, offset & 0x00ff);
ret = c2port_poll_in_busy(dev);
if (ret < 0)
return ret;
/* Send address block size */
c2port_write_dr(dev, nread);
ret = c2port_poll_in_busy(dev);
if (ret < 0)
return ret;
/* Should check status before reading FLASH block */
/* Wait for status information */
ret = c2port_poll_out_ready(dev);
if (ret < 0)
return ret;
/* Read flash programming interface status */
ret = c2port_read_dr(dev, &status);
if (ret < 0)
return ret;
if (status != C2PORT_COMMAND_OK)
return -EBUSY;
/* Read flash block */
for (i = 0; i < nread; i++) {
ret = c2port_poll_out_ready(dev);
if (ret < 0)
return ret;
ret = c2port_read_dr(dev, buffer+i);
if (ret < 0)
return ret;
}
return nread;
}
static ssize_t c2port_read_flash_data(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr,
char *buffer, loff_t offset, size_t count)
{
struct c2port_device *c2dev =
dev_get_drvdata(container_of(kobj,
struct device, kobj));
ssize_t ret;
/* Check the device and flash access status */
if (!c2dev->access || !c2dev->flash_access)
return -EBUSY;
mutex_lock(&c2dev->mutex);
ret = __c2port_read_flash_data(c2dev, buffer, offset, count);
mutex_unlock(&c2dev->mutex);
if (ret < 0)
dev_err(c2dev->dev, "cannot read %s flash\n", c2dev->name);
return ret;
}
static ssize_t __c2port_write_flash_data(struct c2port_device *dev,
char *buffer, loff_t offset, size_t count)
{
struct c2port_ops *ops = dev->ops;
u8 status, nwrite = 128;
int i, ret;
if (nwrite > count)
nwrite = count;
if (ops->block_size * ops->blocks_num - offset < nwrite)
nwrite = ops->block_size * ops->blocks_num - offset;
/* Check for flash end */
if (offset >= ops->block_size * ops->blocks_num)
return -EINVAL;
/* Target the C2 flash programming data register for C2 data register
* access */
c2port_write_ar(dev, C2PORT_FPDAT);
/* Send flash block write command */
c2port_write_dr(dev, C2PORT_BLOCK_WRITE);
/* Wait for input acknowledge */
ret = c2port_poll_in_busy(dev);
if (ret < 0)
return ret;
/* Should check status before starting FLASH access sequence */
/* Wait for status information */
ret = c2port_poll_out_ready(dev);
if (ret < 0)
return ret;
/* Read flash programming interface status */
ret = c2port_read_dr(dev, &status);
if (ret < 0)
return ret;
if (status != C2PORT_COMMAND_OK)
return -EBUSY;
/* Send address high byte */
c2port_write_dr(dev, offset >> 8);
ret = c2port_poll_in_busy(dev);
if (ret < 0)
return ret;
/* Send address low byte */
c2port_write_dr(dev, offset & 0x00ff);
ret = c2port_poll_in_busy(dev);
if (ret < 0)
return ret;
/* Send address block size */
c2port_write_dr(dev, nwrite);
ret = c2port_poll_in_busy(dev);
if (ret < 0)
return ret;
/* Should check status before writing FLASH block */
/* Wait for status information */
ret = c2port_poll_out_ready(dev);
if (ret < 0)
return ret;
/* Read flash programming interface status */
ret = c2port_read_dr(dev, &status);
if (ret < 0)
return ret;
if (status != C2PORT_COMMAND_OK)
return -EBUSY;
/* Write flash block */
for (i = 0; i < nwrite; i++) {
ret = c2port_write_dr(dev, *(buffer+i));
if (ret < 0)
return ret;
ret = c2port_poll_in_busy(dev);
if (ret < 0)
return ret;
}
/* Wait for last flash write to complete */
ret = c2port_poll_out_ready(dev);
if (ret < 0)
return ret;
return nwrite;
}
static ssize_t c2port_write_flash_data(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr,
char *buffer, loff_t offset, size_t count)
{
struct c2port_device *c2dev =
dev_get_drvdata(container_of(kobj,
struct device, kobj));
int ret;
/* Check the device access status */
if (!c2dev->access || !c2dev->flash_access)
return -EBUSY;
mutex_lock(&c2dev->mutex);
ret = __c2port_write_flash_data(c2dev, buffer, offset, count);
mutex_unlock(&c2dev->mutex);
if (ret < 0)
dev_err(c2dev->dev, "cannot write %s flash\n", c2dev->name);
return ret;
}
/*
* Class attributes
*/
static struct device_attribute c2port_attrs[] = {
__ATTR(name, 0444, c2port_show_name, NULL),
__ATTR(flash_blocks_num, 0444, c2port_show_flash_blocks_num, NULL),
__ATTR(flash_block_size, 0444, c2port_show_flash_block_size, NULL),
__ATTR(flash_size, 0444, c2port_show_flash_size, NULL),
__ATTR(access, 0644, c2port_show_access, c2port_store_access),
__ATTR(reset, 0200, NULL, c2port_store_reset),
__ATTR(dev_id, 0444, c2port_show_dev_id, NULL),
__ATTR(rev_id, 0444, c2port_show_rev_id, NULL),
__ATTR(flash_access, 0644, c2port_show_flash_access,
c2port_store_flash_access),
__ATTR(flash_erase, 0200, NULL, c2port_store_flash_erase),
__ATTR_NULL,
};
static struct bin_attribute c2port_bin_attrs = {
.attr = {
.name = "flash_data",
.mode = 0644
},
.read = c2port_read_flash_data,
.write = c2port_write_flash_data,
/* .size is computed at run-time */
};
/*
* Exported functions
*/
struct c2port_device *c2port_device_register(char *name,
struct c2port_ops *ops, void *devdata)
{
struct c2port_device *c2dev;
int id, ret;
if (unlikely(!ops) || unlikely(!ops->access) || \
unlikely(!ops->c2d_dir) || unlikely(!ops->c2ck_set) || \
unlikely(!ops->c2d_get) || unlikely(!ops->c2d_set))
return ERR_PTR(-EINVAL);
c2dev = kmalloc(sizeof(struct c2port_device), GFP_KERNEL);
kmemcheck_annotate_bitfield(c2dev, flags);
if (unlikely(!c2dev))
return ERR_PTR(-ENOMEM);
ret = idr_pre_get(&c2port_idr, GFP_KERNEL);
if (!ret) {
ret = -ENOMEM;
goto error_idr_get_new;
}
spin_lock_irq(&c2port_idr_lock);
ret = idr_get_new(&c2port_idr, c2dev, &id);
spin_unlock_irq(&c2port_idr_lock);
if (ret < 0)
goto error_idr_get_new;
c2dev->id = id;
c2dev->dev = device_create(c2port_class, NULL, 0, c2dev,
"c2port%d", id);
if (unlikely(IS_ERR(c2dev->dev))) {
ret = PTR_ERR(c2dev->dev);
goto error_device_create;
}
dev_set_drvdata(c2dev->dev, c2dev);
strncpy(c2dev->name, name, C2PORT_NAME_LEN);
c2dev->ops = ops;
mutex_init(&c2dev->mutex);
/* Create binary file */
c2port_bin_attrs.size = ops->blocks_num * ops->block_size;
ret = device_create_bin_file(c2dev->dev, &c2port_bin_attrs);
if (unlikely(ret))
goto error_device_create_bin_file;
/* By default C2 port access is off */
c2dev->access = c2dev->flash_access = 0;
ops->access(c2dev, 0);
dev_info(c2dev->dev, "C2 port %s added\n", name);
dev_info(c2dev->dev, "%s flash has %d blocks x %d bytes "
"(%d bytes total)\n",
name, ops->blocks_num, ops->block_size,
ops->blocks_num * ops->block_size);
return c2dev;
error_device_create_bin_file:
device_destroy(c2port_class, 0);
error_device_create:
spin_lock_irq(&c2port_idr_lock);
idr_remove(&c2port_idr, id);
spin_unlock_irq(&c2port_idr_lock);
error_idr_get_new:
kfree(c2dev);
return ERR_PTR(ret);
}
EXPORT_SYMBOL(c2port_device_register);
void c2port_device_unregister(struct c2port_device *c2dev)
{
if (!c2dev)
return;
dev_info(c2dev->dev, "C2 port %s removed\n", c2dev->name);
device_remove_bin_file(c2dev->dev, &c2port_bin_attrs);
spin_lock_irq(&c2port_idr_lock);
idr_remove(&c2port_idr, c2dev->id);
spin_unlock_irq(&c2port_idr_lock);
device_destroy(c2port_class, c2dev->id);
kfree(c2dev);
}
EXPORT_SYMBOL(c2port_device_unregister);
/*
* Module stuff
*/
static int __init c2port_init(void)
{
printk(KERN_INFO "Silicon Labs C2 port support v. " DRIVER_VERSION
" - (C) 2007 Rodolfo Giometti\n");
c2port_class = class_create(THIS_MODULE, "c2port");
if (!c2port_class) {
printk(KERN_ERR "c2port: failed to allocate class\n");
return -ENOMEM;
}
c2port_class->dev_attrs = c2port_attrs;
return 0;
}
static void __exit c2port_exit(void)
{
class_destroy(c2port_class);
}
module_init(c2port_init);
module_exit(c2port_exit);
MODULE_AUTHOR("Rodolfo Giometti <giometti@linux.it>");
MODULE_DESCRIPTION("Silicon Labs C2 port support v. " DRIVER_VERSION);
MODULE_LICENSE("GPL");
| gpl-2.0 |
HarveyHunt/CI20_linux | fs/hfs/bfind.c | 4452 | 4653 | /*
* linux/fs/hfs/bfind.c
*
* Copyright (C) 2001
* Brad Boyer (flar@allandria.com)
* (C) 2003 Ardis Technologies <roman@ardistech.com>
*
* Search routines for btrees
*/
#include <linux/slab.h>
#include "btree.h"
int hfs_find_init(struct hfs_btree *tree, struct hfs_find_data *fd)
{
void *ptr;
fd->tree = tree;
fd->bnode = NULL;
ptr = kmalloc(tree->max_key_len * 2 + 4, GFP_KERNEL);
if (!ptr)
return -ENOMEM;
fd->search_key = ptr;
fd->key = ptr + tree->max_key_len + 2;
hfs_dbg(BNODE_REFS, "find_init: %d (%p)\n",
tree->cnid, __builtin_return_address(0));
mutex_lock(&tree->tree_lock);
return 0;
}
void hfs_find_exit(struct hfs_find_data *fd)
{
hfs_bnode_put(fd->bnode);
kfree(fd->search_key);
hfs_dbg(BNODE_REFS, "find_exit: %d (%p)\n",
fd->tree->cnid, __builtin_return_address(0));
mutex_unlock(&fd->tree->tree_lock);
fd->tree = NULL;
}
/* Find the record in bnode that best matches key (not greater than...)*/
int __hfs_brec_find(struct hfs_bnode *bnode, struct hfs_find_data *fd)
{
int cmpval;
u16 off, len, keylen;
int rec;
int b, e;
int res;
b = 0;
e = bnode->num_recs - 1;
res = -ENOENT;
do {
rec = (e + b) / 2;
len = hfs_brec_lenoff(bnode, rec, &off);
keylen = hfs_brec_keylen(bnode, rec);
if (keylen == 0) {
res = -EINVAL;
goto fail;
}
hfs_bnode_read(bnode, fd->key, off, keylen);
cmpval = bnode->tree->keycmp(fd->key, fd->search_key);
if (!cmpval) {
e = rec;
res = 0;
goto done;
}
if (cmpval < 0)
b = rec + 1;
else
e = rec - 1;
} while (b <= e);
if (rec != e && e >= 0) {
len = hfs_brec_lenoff(bnode, e, &off);
keylen = hfs_brec_keylen(bnode, e);
if (keylen == 0) {
res = -EINVAL;
goto fail;
}
hfs_bnode_read(bnode, fd->key, off, keylen);
}
done:
fd->record = e;
fd->keyoffset = off;
fd->keylength = keylen;
fd->entryoffset = off + keylen;
fd->entrylength = len - keylen;
fail:
return res;
}
/* Traverse a B*Tree from the root to a leaf finding best fit to key */
/* Return allocated copy of node found, set recnum to best record */
int hfs_brec_find(struct hfs_find_data *fd)
{
struct hfs_btree *tree;
struct hfs_bnode *bnode;
u32 nidx, parent;
__be32 data;
int height, res;
tree = fd->tree;
if (fd->bnode)
hfs_bnode_put(fd->bnode);
fd->bnode = NULL;
nidx = tree->root;
if (!nidx)
return -ENOENT;
height = tree->depth;
res = 0;
parent = 0;
for (;;) {
bnode = hfs_bnode_find(tree, nidx);
if (IS_ERR(bnode)) {
res = PTR_ERR(bnode);
bnode = NULL;
break;
}
if (bnode->height != height)
goto invalid;
if (bnode->type != (--height ? HFS_NODE_INDEX : HFS_NODE_LEAF))
goto invalid;
bnode->parent = parent;
res = __hfs_brec_find(bnode, fd);
if (!height)
break;
if (fd->record < 0)
goto release;
parent = nidx;
hfs_bnode_read(bnode, &data, fd->entryoffset, 4);
nidx = be32_to_cpu(data);
hfs_bnode_put(bnode);
}
fd->bnode = bnode;
return res;
invalid:
pr_err("inconsistency in B*Tree (%d,%d,%d,%u,%u)\n",
height, bnode->height, bnode->type, nidx, parent);
res = -EIO;
release:
hfs_bnode_put(bnode);
return res;
}
int hfs_brec_read(struct hfs_find_data *fd, void *rec, int rec_len)
{
int res;
res = hfs_brec_find(fd);
if (res)
return res;
if (fd->entrylength > rec_len)
return -EINVAL;
hfs_bnode_read(fd->bnode, rec, fd->entryoffset, fd->entrylength);
return 0;
}
int hfs_brec_goto(struct hfs_find_data *fd, int cnt)
{
struct hfs_btree *tree;
struct hfs_bnode *bnode;
int idx, res = 0;
u16 off, len, keylen;
bnode = fd->bnode;
tree = bnode->tree;
if (cnt < 0) {
cnt = -cnt;
while (cnt > fd->record) {
cnt -= fd->record + 1;
fd->record = bnode->num_recs - 1;
idx = bnode->prev;
if (!idx) {
res = -ENOENT;
goto out;
}
hfs_bnode_put(bnode);
bnode = hfs_bnode_find(tree, idx);
if (IS_ERR(bnode)) {
res = PTR_ERR(bnode);
bnode = NULL;
goto out;
}
}
fd->record -= cnt;
} else {
while (cnt >= bnode->num_recs - fd->record) {
cnt -= bnode->num_recs - fd->record;
fd->record = 0;
idx = bnode->next;
if (!idx) {
res = -ENOENT;
goto out;
}
hfs_bnode_put(bnode);
bnode = hfs_bnode_find(tree, idx);
if (IS_ERR(bnode)) {
res = PTR_ERR(bnode);
bnode = NULL;
goto out;
}
}
fd->record += cnt;
}
len = hfs_brec_lenoff(bnode, fd->record, &off);
keylen = hfs_brec_keylen(bnode, fd->record);
if (keylen == 0) {
res = -EINVAL;
goto out;
}
fd->keyoffset = off;
fd->keylength = keylen;
fd->entryoffset = off + keylen;
fd->entrylength = len - keylen;
hfs_bnode_read(bnode, fd->key, off, keylen);
out:
fd->bnode = bnode;
return res;
}
| gpl-2.0 |
theboleslaw/hammerhead_kernel | drivers/staging/iio/accel/adis16201_core.c | 4964 | 13229 | /*
* ADIS16201 Programmable Digital Vibration Sensor driver
*
* Copyright 2010 Analog Devices Inc.
*
* Licensed under the GPL-2 or later.
*/
#include <linux/delay.h>
#include <linux/mutex.h>
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/spi/spi.h>
#include <linux/slab.h>
#include <linux/sysfs.h>
#include <linux/module.h>
#include "../iio.h"
#include "../sysfs.h"
#include "../buffer.h"
#include "adis16201.h"
enum adis16201_chan {
in_supply,
temp,
accel_x,
accel_y,
incli_x,
incli_y,
in_aux,
};
/**
* adis16201_spi_write_reg_8() - write single byte to a register
* @dev: device associated with child of actual device (iio_dev or iio_trig)
* @reg_address: the address of the register to be written
* @val: the value to write
**/
static int adis16201_spi_write_reg_8(struct iio_dev *indio_dev,
u8 reg_address,
u8 val)
{
int ret;
struct adis16201_state *st = iio_priv(indio_dev);
mutex_lock(&st->buf_lock);
st->tx[0] = ADIS16201_WRITE_REG(reg_address);
st->tx[1] = val;
ret = spi_write(st->us, st->tx, 2);
mutex_unlock(&st->buf_lock);
return ret;
}
/**
* adis16201_spi_write_reg_16() - write 2 bytes to a pair of registers
* @indio_dev: iio device associated with child of actual device
* @reg_address: the address of the lower of the two registers. Second register
* is assumed to have address one greater.
* @val: value to be written
**/
static int adis16201_spi_write_reg_16(struct iio_dev *indio_dev,
u8 lower_reg_address,
u16 value)
{
int ret;
struct spi_message msg;
struct adis16201_state *st = iio_priv(indio_dev);
struct spi_transfer xfers[] = {
{
.tx_buf = st->tx,
.bits_per_word = 8,
.len = 2,
.cs_change = 1,
}, {
.tx_buf = st->tx + 2,
.bits_per_word = 8,
.len = 2,
},
};
mutex_lock(&st->buf_lock);
st->tx[0] = ADIS16201_WRITE_REG(lower_reg_address);
st->tx[1] = value & 0xFF;
st->tx[2] = ADIS16201_WRITE_REG(lower_reg_address + 1);
st->tx[3] = (value >> 8) & 0xFF;
spi_message_init(&msg);
spi_message_add_tail(&xfers[0], &msg);
spi_message_add_tail(&xfers[1], &msg);
ret = spi_sync(st->us, &msg);
mutex_unlock(&st->buf_lock);
return ret;
}
/**
* adis16201_spi_read_reg_16() - read 2 bytes from a 16-bit register
* @indio_dev: iio device associated with child of actual device
* @reg_address: the address of the lower of the two registers. Second register
* is assumed to have address one greater.
* @val: somewhere to pass back the value read
**/
static int adis16201_spi_read_reg_16(struct iio_dev *indio_dev,
u8 lower_reg_address,
u16 *val)
{
struct spi_message msg;
struct adis16201_state *st = iio_priv(indio_dev);
int ret;
struct spi_transfer xfers[] = {
{
.tx_buf = st->tx,
.bits_per_word = 8,
.len = 2,
.cs_change = 1,
.delay_usecs = 20,
}, {
.rx_buf = st->rx,
.bits_per_word = 8,
.len = 2,
.delay_usecs = 20,
},
};
mutex_lock(&st->buf_lock);
st->tx[0] = ADIS16201_READ_REG(lower_reg_address);
st->tx[1] = 0;
spi_message_init(&msg);
spi_message_add_tail(&xfers[0], &msg);
spi_message_add_tail(&xfers[1], &msg);
ret = spi_sync(st->us, &msg);
if (ret) {
dev_err(&st->us->dev, "problem when reading 16 bit register 0x%02X",
lower_reg_address);
goto error_ret;
}
*val = (st->rx[0] << 8) | st->rx[1];
error_ret:
mutex_unlock(&st->buf_lock);
return ret;
}
static int adis16201_reset(struct iio_dev *indio_dev)
{
int ret;
struct adis16201_state *st = iio_priv(indio_dev);
ret = adis16201_spi_write_reg_8(indio_dev,
ADIS16201_GLOB_CMD,
ADIS16201_GLOB_CMD_SW_RESET);
if (ret)
dev_err(&st->us->dev, "problem resetting device");
return ret;
}
static ssize_t adis16201_write_reset(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t len)
{
int ret;
bool res;
if (len < 1)
return -EINVAL;
ret = strtobool(buf, &res);
if (ret || !res)
return ret;
return adis16201_reset(dev_get_drvdata(dev));
}
int adis16201_set_irq(struct iio_dev *indio_dev, bool enable)
{
int ret = 0;
u16 msc;
ret = adis16201_spi_read_reg_16(indio_dev, ADIS16201_MSC_CTRL, &msc);
if (ret)
goto error_ret;
msc |= ADIS16201_MSC_CTRL_ACTIVE_HIGH;
msc &= ~ADIS16201_MSC_CTRL_DATA_RDY_DIO1;
if (enable)
msc |= ADIS16201_MSC_CTRL_DATA_RDY_EN;
else
msc &= ~ADIS16201_MSC_CTRL_DATA_RDY_EN;
ret = adis16201_spi_write_reg_16(indio_dev, ADIS16201_MSC_CTRL, msc);
error_ret:
return ret;
}
static int adis16201_check_status(struct iio_dev *indio_dev)
{
u16 status;
int ret;
ret = adis16201_spi_read_reg_16(indio_dev,
ADIS16201_DIAG_STAT, &status);
if (ret < 0) {
dev_err(&indio_dev->dev, "Reading status failed\n");
goto error_ret;
}
ret = status & 0xF;
if (ret)
ret = -EFAULT;
if (status & ADIS16201_DIAG_STAT_SPI_FAIL)
dev_err(&indio_dev->dev, "SPI failure\n");
if (status & ADIS16201_DIAG_STAT_FLASH_UPT)
dev_err(&indio_dev->dev, "Flash update failed\n");
if (status & ADIS16201_DIAG_STAT_POWER_HIGH)
dev_err(&indio_dev->dev, "Power supply above 3.625V\n");
if (status & ADIS16201_DIAG_STAT_POWER_LOW)
dev_err(&indio_dev->dev, "Power supply below 3.15V\n");
error_ret:
return ret;
}
static int adis16201_self_test(struct iio_dev *indio_dev)
{
int ret;
ret = adis16201_spi_write_reg_16(indio_dev,
ADIS16201_MSC_CTRL,
ADIS16201_MSC_CTRL_SELF_TEST_EN);
if (ret) {
dev_err(&indio_dev->dev, "problem starting self test");
goto err_ret;
}
ret = adis16201_check_status(indio_dev);
err_ret:
return ret;
}
static int adis16201_initial_setup(struct iio_dev *indio_dev)
{
int ret;
struct device *dev = &indio_dev->dev;
/* Disable IRQ */
ret = adis16201_set_irq(indio_dev, false);
if (ret) {
dev_err(dev, "disable irq failed");
goto err_ret;
}
/* Do self test */
ret = adis16201_self_test(indio_dev);
if (ret) {
dev_err(dev, "self test failure");
goto err_ret;
}
/* Read status register to check the result */
ret = adis16201_check_status(indio_dev);
if (ret) {
adis16201_reset(indio_dev);
dev_err(dev, "device not playing ball -> reset");
msleep(ADIS16201_STARTUP_DELAY);
ret = adis16201_check_status(indio_dev);
if (ret) {
dev_err(dev, "giving up");
goto err_ret;
}
}
err_ret:
return ret;
}
static u8 adis16201_addresses[7][2] = {
[in_supply] = { ADIS16201_SUPPLY_OUT, },
[temp] = { ADIS16201_TEMP_OUT },
[accel_x] = { ADIS16201_XACCL_OUT, ADIS16201_XACCL_OFFS },
[accel_y] = { ADIS16201_YACCL_OUT, ADIS16201_YACCL_OFFS },
[in_aux] = { ADIS16201_AUX_ADC },
[incli_x] = { ADIS16201_XINCL_OUT },
[incli_y] = { ADIS16201_YINCL_OUT },
};
static int adis16201_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int *val, int *val2,
long mask)
{
int ret;
int bits;
u8 addr;
s16 val16;
switch (mask) {
case 0:
mutex_lock(&indio_dev->mlock);
addr = adis16201_addresses[chan->address][0];
ret = adis16201_spi_read_reg_16(indio_dev, addr, &val16);
if (ret) {
mutex_unlock(&indio_dev->mlock);
return ret;
}
if (val16 & ADIS16201_ERROR_ACTIVE) {
ret = adis16201_check_status(indio_dev);
if (ret) {
mutex_unlock(&indio_dev->mlock);
return ret;
}
}
val16 = val16 & ((1 << chan->scan_type.realbits) - 1);
if (chan->scan_type.sign == 's')
val16 = (s16)(val16 <<
(16 - chan->scan_type.realbits)) >>
(16 - chan->scan_type.realbits);
*val = val16;
mutex_unlock(&indio_dev->mlock);
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
switch (chan->type) {
case IIO_VOLTAGE:
*val = 0;
if (chan->channel == 0)
*val2 = 1220;
else
*val2 = 610;
return IIO_VAL_INT_PLUS_MICRO;
case IIO_TEMP:
*val = 0;
*val2 = -470000;
return IIO_VAL_INT_PLUS_MICRO;
case IIO_ACCEL:
*val = 0;
*val2 = 462500;
return IIO_VAL_INT_PLUS_MICRO;
case IIO_INCLI:
*val = 0;
*val2 = 100000;
return IIO_VAL_INT_PLUS_MICRO;
default:
return -EINVAL;
}
break;
case IIO_CHAN_INFO_OFFSET:
*val = 25;
return IIO_VAL_INT;
case IIO_CHAN_INFO_CALIBBIAS:
switch (chan->type) {
case IIO_ACCEL:
bits = 12;
break;
case IIO_INCLI:
bits = 9;
break;
default:
return -EINVAL;
};
mutex_lock(&indio_dev->mlock);
addr = adis16201_addresses[chan->address][1];
ret = adis16201_spi_read_reg_16(indio_dev, addr, &val16);
if (ret) {
mutex_unlock(&indio_dev->mlock);
return ret;
}
val16 &= (1 << bits) - 1;
val16 = (s16)(val16 << (16 - bits)) >> (16 - bits);
*val = val16;
mutex_unlock(&indio_dev->mlock);
return IIO_VAL_INT;
}
return -EINVAL;
}
static int adis16201_write_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int val,
int val2,
long mask)
{
int bits;
s16 val16;
u8 addr;
switch (mask) {
case IIO_CHAN_INFO_CALIBBIAS:
switch (chan->type) {
case IIO_ACCEL:
bits = 12;
break;
case IIO_INCLI:
bits = 9;
break;
default:
return -EINVAL;
};
val16 = val & ((1 << bits) - 1);
addr = adis16201_addresses[chan->address][1];
return adis16201_spi_write_reg_16(indio_dev, addr, val16);
}
return -EINVAL;
}
static struct iio_chan_spec adis16201_channels[] = {
IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, "supply", 0, 0,
IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
in_supply, ADIS16201_SCAN_SUPPLY,
IIO_ST('u', 12, 16, 0), 0),
IIO_CHAN(IIO_TEMP, 0, 1, 0, NULL, 0, 0,
IIO_CHAN_INFO_SCALE_SEPARATE_BIT |
IIO_CHAN_INFO_OFFSET_SEPARATE_BIT,
temp, ADIS16201_SCAN_TEMP,
IIO_ST('u', 12, 16, 0), 0),
IIO_CHAN(IIO_ACCEL, 1, 0, 0, NULL, 0, IIO_MOD_X,
IIO_CHAN_INFO_SCALE_SHARED_BIT |
IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT,
accel_x, ADIS16201_SCAN_ACC_X,
IIO_ST('s', 14, 16, 0), 0),
IIO_CHAN(IIO_ACCEL, 1, 0, 0, NULL, 0, IIO_MOD_Y,
IIO_CHAN_INFO_SCALE_SHARED_BIT |
IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT,
accel_y, ADIS16201_SCAN_ACC_Y,
IIO_ST('s', 14, 16, 0), 0),
IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, NULL, 1, 0,
IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
in_aux, ADIS16201_SCAN_AUX_ADC,
IIO_ST('u', 12, 16, 0), 0),
IIO_CHAN(IIO_INCLI, 1, 0, 0, NULL, 0, IIO_MOD_X,
IIO_CHAN_INFO_SCALE_SHARED_BIT |
IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT,
incli_x, ADIS16201_SCAN_INCLI_X,
IIO_ST('s', 14, 16, 0), 0),
IIO_CHAN(IIO_INCLI, 1, 0, 0, NULL, 0, IIO_MOD_Y,
IIO_CHAN_INFO_SCALE_SHARED_BIT |
IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT,
incli_y, ADIS16201_SCAN_INCLI_Y,
IIO_ST('s', 14, 16, 0), 0),
IIO_CHAN_SOFT_TIMESTAMP(7)
};
static IIO_DEVICE_ATTR(reset, S_IWUSR, NULL, adis16201_write_reset, 0);
static struct attribute *adis16201_attributes[] = {
&iio_dev_attr_reset.dev_attr.attr,
NULL
};
static const struct attribute_group adis16201_attribute_group = {
.attrs = adis16201_attributes,
};
static const struct iio_info adis16201_info = {
.attrs = &adis16201_attribute_group,
.read_raw = &adis16201_read_raw,
.write_raw = &adis16201_write_raw,
.driver_module = THIS_MODULE,
};
static int __devinit adis16201_probe(struct spi_device *spi)
{
int ret;
struct adis16201_state *st;
struct iio_dev *indio_dev;
/* setup the industrialio driver allocated elements */
indio_dev = iio_allocate_device(sizeof(*st));
if (indio_dev == NULL) {
ret = -ENOMEM;
goto error_ret;
}
st = iio_priv(indio_dev);
/* this is only used for removal purposes */
spi_set_drvdata(spi, indio_dev);
st->us = spi;
mutex_init(&st->buf_lock);
indio_dev->name = spi->dev.driver->name;
indio_dev->dev.parent = &spi->dev;
indio_dev->info = &adis16201_info;
indio_dev->channels = adis16201_channels;
indio_dev->num_channels = ARRAY_SIZE(adis16201_channels);
indio_dev->modes = INDIO_DIRECT_MODE;
ret = adis16201_configure_ring(indio_dev);
if (ret)
goto error_free_dev;
ret = iio_buffer_register(indio_dev,
adis16201_channels,
ARRAY_SIZE(adis16201_channels));
if (ret) {
printk(KERN_ERR "failed to initialize the ring\n");
goto error_unreg_ring_funcs;
}
if (spi->irq) {
ret = adis16201_probe_trigger(indio_dev);
if (ret)
goto error_uninitialize_ring;
}
/* Get the device into a sane initial state */
ret = adis16201_initial_setup(indio_dev);
if (ret)
goto error_remove_trigger;
ret = iio_device_register(indio_dev);
if (ret < 0)
goto error_remove_trigger;
return 0;
error_remove_trigger:
adis16201_remove_trigger(indio_dev);
error_uninitialize_ring:
iio_buffer_unregister(indio_dev);
error_unreg_ring_funcs:
adis16201_unconfigure_ring(indio_dev);
error_free_dev:
iio_free_device(indio_dev);
error_ret:
return ret;
}
static int adis16201_remove(struct spi_device *spi)
{
struct iio_dev *indio_dev = spi_get_drvdata(spi);
iio_device_unregister(indio_dev);
adis16201_remove_trigger(indio_dev);
iio_buffer_unregister(indio_dev);
adis16201_unconfigure_ring(indio_dev);
iio_free_device(indio_dev);
return 0;
}
static struct spi_driver adis16201_driver = {
.driver = {
.name = "adis16201",
.owner = THIS_MODULE,
},
.probe = adis16201_probe,
.remove = __devexit_p(adis16201_remove),
};
module_spi_driver(adis16201_driver);
MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
MODULE_DESCRIPTION("Analog Devices ADIS16201 Programmable Digital Vibration Sensor driver");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("spi:adis16201");
| gpl-2.0 |
chadouming/canuck-3.10 | arch/powerpc/oprofile/common.c | 6756 | 6329 | /*
* PPC 64 oprofile support:
* Copyright (C) 2004 Anton Blanchard <anton@au.ibm.com>, IBM
* PPC 32 oprofile support: (based on PPC 64 support)
* Copyright (C) Freescale Semiconductor, Inc 2004
* Author: Andy Fleming
*
* Based on alpha version.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/oprofile.h>
#include <linux/init.h>
#include <linux/smp.h>
#include <linux/errno.h>
#include <asm/ptrace.h>
#include <asm/pmc.h>
#include <asm/cputable.h>
#include <asm/oprofile_impl.h>
#include <asm/firmware.h>
static struct op_powerpc_model *model;
static struct op_counter_config ctr[OP_MAX_COUNTER];
static struct op_system_config sys;
static int op_per_cpu_rc;
static void op_handle_interrupt(struct pt_regs *regs)
{
model->handle_interrupt(regs, ctr);
}
static void op_powerpc_cpu_setup(void *dummy)
{
int ret;
ret = model->cpu_setup(ctr);
if (ret != 0)
op_per_cpu_rc = ret;
}
static int op_powerpc_setup(void)
{
int err;
op_per_cpu_rc = 0;
/* Grab the hardware */
err = reserve_pmc_hardware(op_handle_interrupt);
if (err)
return err;
/* Pre-compute the values to stuff in the hardware registers. */
op_per_cpu_rc = model->reg_setup(ctr, &sys, model->num_counters);
if (op_per_cpu_rc)
goto out;
/* Configure the registers on all cpus. If an error occurs on one
* of the cpus, op_per_cpu_rc will be set to the error */
on_each_cpu(op_powerpc_cpu_setup, NULL, 1);
out: if (op_per_cpu_rc) {
/* error on setup release the performance counter hardware */
release_pmc_hardware();
}
return op_per_cpu_rc;
}
static void op_powerpc_shutdown(void)
{
release_pmc_hardware();
}
static void op_powerpc_cpu_start(void *dummy)
{
/* If any of the cpus have return an error, set the
* global flag to the error so it can be returned
* to the generic OProfile caller.
*/
int ret;
ret = model->start(ctr);
if (ret != 0)
op_per_cpu_rc = ret;
}
static int op_powerpc_start(void)
{
op_per_cpu_rc = 0;
if (model->global_start)
return model->global_start(ctr);
if (model->start) {
on_each_cpu(op_powerpc_cpu_start, NULL, 1);
return op_per_cpu_rc;
}
return -EIO; /* No start function is defined for this
power architecture */
}
static inline void op_powerpc_cpu_stop(void *dummy)
{
model->stop();
}
static void op_powerpc_stop(void)
{
if (model->stop)
on_each_cpu(op_powerpc_cpu_stop, NULL, 1);
if (model->global_stop)
model->global_stop();
}
static int op_powerpc_create_files(struct super_block *sb, struct dentry *root)
{
int i;
#ifdef CONFIG_PPC64
/*
* There is one mmcr0, mmcr1 and mmcra for setting the events for
* all of the counters.
*/
oprofilefs_create_ulong(sb, root, "mmcr0", &sys.mmcr0);
oprofilefs_create_ulong(sb, root, "mmcr1", &sys.mmcr1);
oprofilefs_create_ulong(sb, root, "mmcra", &sys.mmcra);
#ifdef CONFIG_OPROFILE_CELL
/* create a file the user tool can check to see what level of profiling
* support exits with this kernel. Initialize bit mask to indicate
* what support the kernel has:
* bit 0 - Supports SPU event profiling in addition to PPU
* event and cycles; and SPU cycle profiling
* bits 1-31 - Currently unused.
*
* If the file does not exist, then the kernel only supports SPU
* cycle profiling, PPU event and cycle profiling.
*/
oprofilefs_create_ulong(sb, root, "cell_support", &sys.cell_support);
sys.cell_support = 0x1; /* Note, the user OProfile tool must check
* that this bit is set before attempting to
* user SPU event profiling. Older kernels
* will not have this file, hence the user
* tool is not allowed to do SPU event
* profiling on older kernels. Older kernels
* will accept SPU events but collected data
* is garbage.
*/
#endif
#endif
for (i = 0; i < model->num_counters; ++i) {
struct dentry *dir;
char buf[4];
snprintf(buf, sizeof buf, "%d", i);
dir = oprofilefs_mkdir(sb, root, buf);
oprofilefs_create_ulong(sb, dir, "enabled", &ctr[i].enabled);
oprofilefs_create_ulong(sb, dir, "event", &ctr[i].event);
oprofilefs_create_ulong(sb, dir, "count", &ctr[i].count);
/*
* Classic PowerPC doesn't support per-counter
* control like this, but the options are
* expected, so they remain. For Freescale
* Book-E style performance monitors, we do
* support them.
*/
oprofilefs_create_ulong(sb, dir, "kernel", &ctr[i].kernel);
oprofilefs_create_ulong(sb, dir, "user", &ctr[i].user);
oprofilefs_create_ulong(sb, dir, "unit_mask", &ctr[i].unit_mask);
}
oprofilefs_create_ulong(sb, root, "enable_kernel", &sys.enable_kernel);
oprofilefs_create_ulong(sb, root, "enable_user", &sys.enable_user);
/* Default to tracing both kernel and user */
sys.enable_kernel = 1;
sys.enable_user = 1;
return 0;
}
int __init oprofile_arch_init(struct oprofile_operations *ops)
{
if (!cur_cpu_spec->oprofile_cpu_type)
return -ENODEV;
switch (cur_cpu_spec->oprofile_type) {
#ifdef CONFIG_PPC_BOOK3S_64
#ifdef CONFIG_OPROFILE_CELL
case PPC_OPROFILE_CELL:
if (firmware_has_feature(FW_FEATURE_LPAR))
return -ENODEV;
model = &op_model_cell;
ops->sync_start = model->sync_start;
ops->sync_stop = model->sync_stop;
break;
#endif
case PPC_OPROFILE_RS64:
model = &op_model_rs64;
break;
case PPC_OPROFILE_POWER4:
model = &op_model_power4;
break;
case PPC_OPROFILE_PA6T:
model = &op_model_pa6t;
break;
#endif
#ifdef CONFIG_6xx
case PPC_OPROFILE_G4:
model = &op_model_7450;
break;
#endif
#if defined(CONFIG_FSL_EMB_PERFMON)
case PPC_OPROFILE_FSL_EMB:
model = &op_model_fsl_emb;
break;
#endif
default:
return -ENODEV;
}
model->num_counters = cur_cpu_spec->num_pmcs;
ops->cpu_type = cur_cpu_spec->oprofile_cpu_type;
ops->create_files = op_powerpc_create_files;
ops->setup = op_powerpc_setup;
ops->shutdown = op_powerpc_shutdown;
ops->start = op_powerpc_start;
ops->stop = op_powerpc_stop;
ops->backtrace = op_powerpc_backtrace;
printk(KERN_DEBUG "oprofile: using %s performance monitoring.\n",
ops->cpu_type);
return 0;
}
void oprofile_arch_exit(void)
{
}
| gpl-2.0 |
xjljian/android_kernel_huawei_msm8226 | arch/powerpc/lib/locks.c | 6756 | 2225 | /*
* Spin and read/write lock operations.
*
* Copyright (C) 2001-2004 Paul Mackerras <paulus@au.ibm.com>, IBM
* Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
* Copyright (C) 2002 Dave Engebretsen <engebret@us.ibm.com>, IBM
* Rework to support virtual processors
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/kernel.h>
#include <linux/spinlock.h>
#include <linux/export.h>
#include <linux/stringify.h>
#include <linux/smp.h>
/* waiting for a spinlock... */
#if defined(CONFIG_PPC_SPLPAR)
#include <asm/hvcall.h>
#include <asm/smp.h>
void __spin_yield(arch_spinlock_t *lock)
{
unsigned int lock_value, holder_cpu, yield_count;
lock_value = lock->slock;
if (lock_value == 0)
return;
holder_cpu = lock_value & 0xffff;
BUG_ON(holder_cpu >= NR_CPUS);
yield_count = lppaca_of(holder_cpu).yield_count;
if ((yield_count & 1) == 0)
return; /* virtual cpu is currently running */
rmb();
if (lock->slock != lock_value)
return; /* something has changed */
plpar_hcall_norets(H_CONFER,
get_hard_smp_processor_id(holder_cpu), yield_count);
}
/*
* Waiting for a read lock or a write lock on a rwlock...
* This turns out to be the same for read and write locks, since
* we only know the holder if it is write-locked.
*/
void __rw_yield(arch_rwlock_t *rw)
{
int lock_value;
unsigned int holder_cpu, yield_count;
lock_value = rw->lock;
if (lock_value >= 0)
return; /* no write lock at present */
holder_cpu = lock_value & 0xffff;
BUG_ON(holder_cpu >= NR_CPUS);
yield_count = lppaca_of(holder_cpu).yield_count;
if ((yield_count & 1) == 0)
return; /* virtual cpu is currently running */
rmb();
if (rw->lock != lock_value)
return; /* something has changed */
plpar_hcall_norets(H_CONFER,
get_hard_smp_processor_id(holder_cpu), yield_count);
}
#endif
void arch_spin_unlock_wait(arch_spinlock_t *lock)
{
while (lock->slock) {
HMT_low();
if (SHARED_PROCESSOR)
__spin_yield(lock);
}
HMT_medium();
}
EXPORT_SYMBOL(arch_spin_unlock_wait);
| gpl-2.0 |
HighwindONE/Kernel_GoldStar | arch/mips/cavium-octeon/executive/cvmx-cmd-queue.c | 7012 | 9447 | /***********************license start***************
* Author: Cavium Networks
*
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
* Copyright (c) 2003-2008 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
* published by the Free Software Foundation.
*
* This file is distributed in the hope that it will be useful, but
* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
* NONINFRINGEMENT. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License
* along with this file; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
* or visit http://www.gnu.org/licenses/.
*
* This file may also be available under a different license from Cavium.
* Contact Cavium Networks for more information
***********************license end**************************************/
/*
* Support functions for managing command queues used for
* various hardware blocks.
*/
#include <linux/kernel.h>
#include <asm/octeon/octeon.h>
#include <asm/octeon/cvmx-config.h>
#include <asm/octeon/cvmx-fpa.h>
#include <asm/octeon/cvmx-cmd-queue.h>
#include <asm/octeon/cvmx-npei-defs.h>
#include <asm/octeon/cvmx-pexp-defs.h>
#include <asm/octeon/cvmx-pko-defs.h>
/**
* This application uses this pointer to access the global queue
* state. It points to a bootmem named block.
*/
__cvmx_cmd_queue_all_state_t *__cvmx_cmd_queue_state_ptr;
/**
* Initialize the Global queue state pointer.
*
* Returns CVMX_CMD_QUEUE_SUCCESS or a failure code
*/
static cvmx_cmd_queue_result_t __cvmx_cmd_queue_init_state_ptr(void)
{
char *alloc_name = "cvmx_cmd_queues";
#if defined(CONFIG_CAVIUM_RESERVE32) && CONFIG_CAVIUM_RESERVE32
extern uint64_t octeon_reserve32_memory;
#endif
if (likely(__cvmx_cmd_queue_state_ptr))
return CVMX_CMD_QUEUE_SUCCESS;
#if defined(CONFIG_CAVIUM_RESERVE32) && CONFIG_CAVIUM_RESERVE32
if (octeon_reserve32_memory)
__cvmx_cmd_queue_state_ptr =
cvmx_bootmem_alloc_named_range(sizeof(*__cvmx_cmd_queue_state_ptr),
octeon_reserve32_memory,
octeon_reserve32_memory +
(CONFIG_CAVIUM_RESERVE32 <<
20) - 1, 128, alloc_name);
else
#endif
__cvmx_cmd_queue_state_ptr =
cvmx_bootmem_alloc_named(sizeof(*__cvmx_cmd_queue_state_ptr),
128,
alloc_name);
if (__cvmx_cmd_queue_state_ptr)
memset(__cvmx_cmd_queue_state_ptr, 0,
sizeof(*__cvmx_cmd_queue_state_ptr));
else {
struct cvmx_bootmem_named_block_desc *block_desc =
cvmx_bootmem_find_named_block(alloc_name);
if (block_desc)
__cvmx_cmd_queue_state_ptr =
cvmx_phys_to_ptr(block_desc->base_addr);
else {
cvmx_dprintf
("ERROR: cvmx_cmd_queue_initialize: Unable to get named block %s.\n",
alloc_name);
return CVMX_CMD_QUEUE_NO_MEMORY;
}
}
return CVMX_CMD_QUEUE_SUCCESS;
}
/**
* Initialize a command queue for use. The initial FPA buffer is
* allocated and the hardware unit is configured to point to the
* new command queue.
*
* @queue_id: Hardware command queue to initialize.
* @max_depth: Maximum outstanding commands that can be queued.
* @fpa_pool: FPA pool the command queues should come from.
* @pool_size: Size of each buffer in the FPA pool (bytes)
*
* Returns CVMX_CMD_QUEUE_SUCCESS or a failure code
*/
cvmx_cmd_queue_result_t cvmx_cmd_queue_initialize(cvmx_cmd_queue_id_t queue_id,
int max_depth, int fpa_pool,
int pool_size)
{
__cvmx_cmd_queue_state_t *qstate;
cvmx_cmd_queue_result_t result = __cvmx_cmd_queue_init_state_ptr();
if (result != CVMX_CMD_QUEUE_SUCCESS)
return result;
qstate = __cvmx_cmd_queue_get_state(queue_id);
if (qstate == NULL)
return CVMX_CMD_QUEUE_INVALID_PARAM;
/*
* We artificially limit max_depth to 1<<20 words. It is an
* arbitrary limit.
*/
if (CVMX_CMD_QUEUE_ENABLE_MAX_DEPTH) {
if ((max_depth < 0) || (max_depth > 1 << 20))
return CVMX_CMD_QUEUE_INVALID_PARAM;
} else if (max_depth != 0)
return CVMX_CMD_QUEUE_INVALID_PARAM;
if ((fpa_pool < 0) || (fpa_pool > 7))
return CVMX_CMD_QUEUE_INVALID_PARAM;
if ((pool_size < 128) || (pool_size > 65536))
return CVMX_CMD_QUEUE_INVALID_PARAM;
/* See if someone else has already initialized the queue */
if (qstate->base_ptr_div128) {
if (max_depth != (int)qstate->max_depth) {
cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: "
"Queue already initialized with different "
"max_depth (%d).\n",
(int)qstate->max_depth);
return CVMX_CMD_QUEUE_INVALID_PARAM;
}
if (fpa_pool != qstate->fpa_pool) {
cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: "
"Queue already initialized with different "
"FPA pool (%u).\n",
qstate->fpa_pool);
return CVMX_CMD_QUEUE_INVALID_PARAM;
}
if ((pool_size >> 3) - 1 != qstate->pool_size_m1) {
cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: "
"Queue already initialized with different "
"FPA pool size (%u).\n",
(qstate->pool_size_m1 + 1) << 3);
return CVMX_CMD_QUEUE_INVALID_PARAM;
}
CVMX_SYNCWS;
return CVMX_CMD_QUEUE_ALREADY_SETUP;
} else {
union cvmx_fpa_ctl_status status;
void *buffer;
status.u64 = cvmx_read_csr(CVMX_FPA_CTL_STATUS);
if (!status.s.enb) {
cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: "
"FPA is not enabled.\n");
return CVMX_CMD_QUEUE_NO_MEMORY;
}
buffer = cvmx_fpa_alloc(fpa_pool);
if (buffer == NULL) {
cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: "
"Unable to allocate initial buffer.\n");
return CVMX_CMD_QUEUE_NO_MEMORY;
}
memset(qstate, 0, sizeof(*qstate));
qstate->max_depth = max_depth;
qstate->fpa_pool = fpa_pool;
qstate->pool_size_m1 = (pool_size >> 3) - 1;
qstate->base_ptr_div128 = cvmx_ptr_to_phys(buffer) / 128;
/*
* We zeroed the now serving field so we need to also
* zero the ticket.
*/
__cvmx_cmd_queue_state_ptr->
ticket[__cvmx_cmd_queue_get_index(queue_id)] = 0;
CVMX_SYNCWS;
return CVMX_CMD_QUEUE_SUCCESS;
}
}
/**
* Shutdown a queue a free it's command buffers to the FPA. The
* hardware connected to the queue must be stopped before this
* function is called.
*
* @queue_id: Queue to shutdown
*
* Returns CVMX_CMD_QUEUE_SUCCESS or a failure code
*/
cvmx_cmd_queue_result_t cvmx_cmd_queue_shutdown(cvmx_cmd_queue_id_t queue_id)
{
__cvmx_cmd_queue_state_t *qptr = __cvmx_cmd_queue_get_state(queue_id);
if (qptr == NULL) {
cvmx_dprintf("ERROR: cvmx_cmd_queue_shutdown: Unable to "
"get queue information.\n");
return CVMX_CMD_QUEUE_INVALID_PARAM;
}
if (cvmx_cmd_queue_length(queue_id) > 0) {
cvmx_dprintf("ERROR: cvmx_cmd_queue_shutdown: Queue still "
"has data in it.\n");
return CVMX_CMD_QUEUE_FULL;
}
__cvmx_cmd_queue_lock(queue_id, qptr);
if (qptr->base_ptr_div128) {
cvmx_fpa_free(cvmx_phys_to_ptr
((uint64_t) qptr->base_ptr_div128 << 7),
qptr->fpa_pool, 0);
qptr->base_ptr_div128 = 0;
}
__cvmx_cmd_queue_unlock(qptr);
return CVMX_CMD_QUEUE_SUCCESS;
}
/**
* Return the number of command words pending in the queue. This
* function may be relatively slow for some hardware units.
*
* @queue_id: Hardware command queue to query
*
* Returns Number of outstanding commands
*/
int cvmx_cmd_queue_length(cvmx_cmd_queue_id_t queue_id)
{
if (CVMX_ENABLE_PARAMETER_CHECKING) {
if (__cvmx_cmd_queue_get_state(queue_id) == NULL)
return CVMX_CMD_QUEUE_INVALID_PARAM;
}
/*
* The cast is here so gcc with check that all values in the
* cvmx_cmd_queue_id_t enumeration are here.
*/
switch ((cvmx_cmd_queue_id_t) (queue_id & 0xff0000)) {
case CVMX_CMD_QUEUE_PKO_BASE:
/*
* FIXME: Need atomic lock on
* CVMX_PKO_REG_READ_IDX. Right now we are normally
* called with the queue lock, so that is a SLIGHT
* amount of protection.
*/
cvmx_write_csr(CVMX_PKO_REG_READ_IDX, queue_id & 0xffff);
if (OCTEON_IS_MODEL(OCTEON_CN3XXX)) {
union cvmx_pko_mem_debug9 debug9;
debug9.u64 = cvmx_read_csr(CVMX_PKO_MEM_DEBUG9);
return debug9.cn38xx.doorbell;
} else {
union cvmx_pko_mem_debug8 debug8;
debug8.u64 = cvmx_read_csr(CVMX_PKO_MEM_DEBUG8);
return debug8.cn58xx.doorbell;
}
case CVMX_CMD_QUEUE_ZIP:
case CVMX_CMD_QUEUE_DFA:
case CVMX_CMD_QUEUE_RAID:
/* FIXME: Implement other lengths */
return 0;
case CVMX_CMD_QUEUE_DMA_BASE:
{
union cvmx_npei_dmax_counts dmax_counts;
dmax_counts.u64 =
cvmx_read_csr(CVMX_PEXP_NPEI_DMAX_COUNTS
(queue_id & 0x7));
return dmax_counts.s.dbell;
}
case CVMX_CMD_QUEUE_END:
return CVMX_CMD_QUEUE_INVALID_PARAM;
}
return CVMX_CMD_QUEUE_INVALID_PARAM;
}
/**
* Return the command buffer to be written to. The purpose of this
* function is to allow CVMX routine access t othe low level buffer
* for initial hardware setup. User applications should not call this
* function directly.
*
* @queue_id: Command queue to query
*
* Returns Command buffer or NULL on failure
*/
void *cvmx_cmd_queue_buffer(cvmx_cmd_queue_id_t queue_id)
{
__cvmx_cmd_queue_state_t *qptr = __cvmx_cmd_queue_get_state(queue_id);
if (qptr && qptr->base_ptr_div128)
return cvmx_phys_to_ptr((uint64_t) qptr->base_ptr_div128 << 7);
else
return NULL;
}
| gpl-2.0 |
nobodyAtall/msm7x27-2.6.32.x-nAa | arch/mips/nxp/pnx833x/stb22x/board.c | 9060 | 3926 | /*
* board.c: STB225 board support.
*
* Copyright 2008 NXP Semiconductors
* Chris Steel <chris.steel@nxp.com>
* Daniel Laird <daniel.j.laird@nxp.com>
*
* Based on software written by:
* Nikita Youshchenko <yoush@debian.org>, based on PNX8550 code.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/init.h>
#include <asm/bootinfo.h>
#include <linux/mm.h>
#include <pnx833x.h>
#include <gpio.h>
/* endianess twiddlers */
#define PNX8335_DEBUG0 0x4400
#define PNX8335_DEBUG1 0x4404
#define PNX8335_DEBUG2 0x4408
#define PNX8335_DEBUG3 0x440c
#define PNX8335_DEBUG4 0x4410
#define PNX8335_DEBUG5 0x4414
#define PNX8335_DEBUG6 0x4418
#define PNX8335_DEBUG7 0x441c
int prom_argc;
char **prom_argv, **prom_envp;
extern void prom_init_cmdline(void);
extern char *prom_getenv(char *envname);
const char *get_system_type(void)
{
return "NXP STB22x";
}
static inline unsigned long env_or_default(char *env, unsigned long dfl)
{
char *str = prom_getenv(env);
return str ? simple_strtol(str, 0, 0) : dfl;
}
void __init prom_init(void)
{
unsigned long memsize;
prom_argc = fw_arg0;
prom_argv = (char **)fw_arg1;
prom_envp = (char **)fw_arg2;
prom_init_cmdline();
memsize = env_or_default("memsize", 0x02000000);
add_memory_region(0, memsize, BOOT_MEM_RAM);
}
void __init pnx833x_board_setup(void)
{
pnx833x_gpio_select_function_alt(4);
pnx833x_gpio_select_output(4);
pnx833x_gpio_select_function_alt(5);
pnx833x_gpio_select_input(5);
pnx833x_gpio_select_function_alt(6);
pnx833x_gpio_select_input(6);
pnx833x_gpio_select_function_alt(7);
pnx833x_gpio_select_output(7);
pnx833x_gpio_select_function_alt(25);
pnx833x_gpio_select_function_alt(26);
pnx833x_gpio_select_function_alt(27);
pnx833x_gpio_select_function_alt(28);
pnx833x_gpio_select_function_alt(29);
pnx833x_gpio_select_function_alt(30);
pnx833x_gpio_select_function_alt(31);
pnx833x_gpio_select_function_alt(32);
pnx833x_gpio_select_function_alt(33);
#if defined(CONFIG_MTD_NAND_PLATFORM) || defined(CONFIG_MTD_NAND_PLATFORM_MODULE)
/* Setup MIU for NAND access on CS0...
*
* (it seems that we must also configure CS1 for reliable operation,
* otherwise the first read ID command will fail if it's read as 4 bytes
* but pass if it's read as 1 word.)
*/
/* Setup MIU CS0 & CS1 timing */
PNX833X_MIU_SEL0 = 0;
PNX833X_MIU_SEL1 = 0;
PNX833X_MIU_SEL0_TIMING = 0x50003081;
PNX833X_MIU_SEL1_TIMING = 0x50003081;
/* Setup GPIO 00 for use as MIU CS1 (CS0 is not multiplexed, so does not need this) */
pnx833x_gpio_select_function_alt(0);
/* Setup GPIO 04 to input NAND read/busy signal */
pnx833x_gpio_select_function_io(4);
pnx833x_gpio_select_input(4);
/* Setup GPIO 05 to disable NAND write protect */
pnx833x_gpio_select_function_io(5);
pnx833x_gpio_select_output(5);
pnx833x_gpio_write(1, 5);
#elif defined(CONFIG_MTD_CFI) || defined(CONFIG_MTD_CFI_MODULE)
/* Set up MIU for 16-bit NOR access on CS0 and CS1... */
/* Setup MIU CS0 & CS1 timing */
PNX833X_MIU_SEL0 = 1;
PNX833X_MIU_SEL1 = 1;
PNX833X_MIU_SEL0_TIMING = 0x6A08D082;
PNX833X_MIU_SEL1_TIMING = 0x6A08D082;
/* Setup GPIO 00 for use as MIU CS1 (CS0 is not multiplexed, so does not need this) */
pnx833x_gpio_select_function_alt(0);
#endif
}
| gpl-2.0 |
jbott/android_kernel_rpi_rpi | Documentation/prctl/disable-tsc-test.c | 12900 | 2121 | /*
* Tests for prctl(PR_GET_TSC, ...) / prctl(PR_SET_TSC, ...)
*
* Basic test to test behaviour of PR_GET_TSC and PR_SET_TSC
*/
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <signal.h>
#include <inttypes.h>
#include <sys/prctl.h>
#include <linux/prctl.h>
/* Get/set the process' ability to use the timestamp counter instruction */
#ifndef PR_GET_TSC
#define PR_GET_TSC 25
#define PR_SET_TSC 26
# define PR_TSC_ENABLE 1 /* allow the use of the timestamp counter */
# define PR_TSC_SIGSEGV 2 /* throw a SIGSEGV instead of reading the TSC */
#endif
const char *tsc_names[] =
{
[0] = "[not set]",
[PR_TSC_ENABLE] = "PR_TSC_ENABLE",
[PR_TSC_SIGSEGV] = "PR_TSC_SIGSEGV",
};
uint64_t rdtsc() {
uint32_t lo, hi;
/* We cannot use "=A", since this would use %rax on x86_64 */
__asm__ __volatile__ ("rdtsc" : "=a" (lo), "=d" (hi));
return (uint64_t)hi << 32 | lo;
}
void sigsegv_cb(int sig)
{
int tsc_val = 0;
printf("[ SIG_SEGV ]\n");
printf("prctl(PR_GET_TSC, &tsc_val); ");
fflush(stdout);
if ( prctl(PR_GET_TSC, &tsc_val) == -1)
perror("prctl");
printf("tsc_val == %s\n", tsc_names[tsc_val]);
printf("prctl(PR_SET_TSC, PR_TSC_ENABLE)\n");
fflush(stdout);
if ( prctl(PR_SET_TSC, PR_TSC_ENABLE) == -1)
perror("prctl");
printf("rdtsc() == ");
}
int main(int argc, char **argv)
{
int tsc_val = 0;
signal(SIGSEGV, sigsegv_cb);
printf("rdtsc() == %llu\n", (unsigned long long)rdtsc());
printf("prctl(PR_GET_TSC, &tsc_val); ");
fflush(stdout);
if ( prctl(PR_GET_TSC, &tsc_val) == -1)
perror("prctl");
printf("tsc_val == %s\n", tsc_names[tsc_val]);
printf("rdtsc() == %llu\n", (unsigned long long)rdtsc());
printf("prctl(PR_SET_TSC, PR_TSC_ENABLE)\n");
fflush(stdout);
if ( prctl(PR_SET_TSC, PR_TSC_ENABLE) == -1)
perror("prctl");
printf("rdtsc() == %llu\n", (unsigned long long)rdtsc());
printf("prctl(PR_SET_TSC, PR_TSC_SIGSEGV)\n");
fflush(stdout);
if ( prctl(PR_SET_TSC, PR_TSC_SIGSEGV) == -1)
perror("prctl");
printf("rdtsc() == ");
fflush(stdout);
printf("%llu\n", (unsigned long long)rdtsc());
fflush(stdout);
exit(EXIT_SUCCESS);
}
| gpl-2.0 |
hyuh/kernel-dlx | drivers/s390/cio/fcx.c | 13924 | 9488 | /*
* Functions for assembling fcx enabled I/O control blocks.
*
* Copyright IBM Corp. 2008
* Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/module.h>
#include <asm/fcx.h>
#include "cio.h"
/**
* tcw_get_intrg - return pointer to associated interrogate tcw
* @tcw: pointer to the original tcw
*
* Return a pointer to the interrogate tcw associated with the specified tcw
* or %NULL if there is no associated interrogate tcw.
*/
struct tcw *tcw_get_intrg(struct tcw *tcw)
{
return (struct tcw *) ((addr_t) tcw->intrg);
}
EXPORT_SYMBOL(tcw_get_intrg);
/**
* tcw_get_data - return pointer to input/output data associated with tcw
* @tcw: pointer to the tcw
*
* Return the input or output data address specified in the tcw depending
* on whether the r-bit or the w-bit is set. If neither bit is set, return
* %NULL.
*/
void *tcw_get_data(struct tcw *tcw)
{
if (tcw->r)
return (void *) ((addr_t) tcw->input);
if (tcw->w)
return (void *) ((addr_t) tcw->output);
return NULL;
}
EXPORT_SYMBOL(tcw_get_data);
/**
* tcw_get_tccb - return pointer to tccb associated with tcw
* @tcw: pointer to the tcw
*
* Return pointer to the tccb associated with this tcw.
*/
struct tccb *tcw_get_tccb(struct tcw *tcw)
{
return (struct tccb *) ((addr_t) tcw->tccb);
}
EXPORT_SYMBOL(tcw_get_tccb);
/**
* tcw_get_tsb - return pointer to tsb associated with tcw
* @tcw: pointer to the tcw
*
* Return pointer to the tsb associated with this tcw.
*/
struct tsb *tcw_get_tsb(struct tcw *tcw)
{
return (struct tsb *) ((addr_t) tcw->tsb);
}
EXPORT_SYMBOL(tcw_get_tsb);
/**
* tcw_init - initialize tcw data structure
* @tcw: pointer to the tcw to be initialized
* @r: initial value of the r-bit
* @w: initial value of the w-bit
*
* Initialize all fields of the specified tcw data structure with zero and
* fill in the format, flags, r and w fields.
*/
void tcw_init(struct tcw *tcw, int r, int w)
{
memset(tcw, 0, sizeof(struct tcw));
tcw->format = TCW_FORMAT_DEFAULT;
tcw->flags = TCW_FLAGS_TIDAW_FORMAT(TCW_TIDAW_FORMAT_DEFAULT);
if (r)
tcw->r = 1;
if (w)
tcw->w = 1;
}
EXPORT_SYMBOL(tcw_init);
static inline size_t tca_size(struct tccb *tccb)
{
return tccb->tcah.tcal - 12;
}
static u32 calc_dcw_count(struct tccb *tccb)
{
int offset;
struct dcw *dcw;
u32 count = 0;
size_t size;
size = tca_size(tccb);
for (offset = 0; offset < size;) {
dcw = (struct dcw *) &tccb->tca[offset];
count += dcw->count;
if (!(dcw->flags & DCW_FLAGS_CC))
break;
offset += sizeof(struct dcw) + ALIGN((int) dcw->cd_count, 4);
}
return count;
}
static u32 calc_cbc_size(struct tidaw *tidaw, int num)
{
int i;
u32 cbc_data;
u32 cbc_count = 0;
u64 data_count = 0;
for (i = 0; i < num; i++) {
if (tidaw[i].flags & TIDAW_FLAGS_LAST)
break;
/* TODO: find out if padding applies to total of data
* transferred or data transferred by this tidaw. Assumption:
* applies to total. */
data_count += tidaw[i].count;
if (tidaw[i].flags & TIDAW_FLAGS_INSERT_CBC) {
cbc_data = 4 + ALIGN(data_count, 4) - data_count;
cbc_count += cbc_data;
data_count += cbc_data;
}
}
return cbc_count;
}
/**
* tcw_finalize - finalize tcw length fields and tidaw list
* @tcw: pointer to the tcw
* @num_tidaws: the number of tidaws used to address input/output data or zero
* if no tida is used
*
* Calculate the input-/output-count and tccbl field in the tcw, add a
* tcat the tccb and terminate the data tidaw list if used.
*
* Note: in case input- or output-tida is used, the tidaw-list must be stored
* in contiguous storage (no ttic). The tcal field in the tccb must be
* up-to-date.
*/
void tcw_finalize(struct tcw *tcw, int num_tidaws)
{
struct tidaw *tidaw;
struct tccb *tccb;
struct tccb_tcat *tcat;
u32 count;
/* Terminate tidaw list. */
tidaw = tcw_get_data(tcw);
if (num_tidaws > 0)
tidaw[num_tidaws - 1].flags |= TIDAW_FLAGS_LAST;
/* Add tcat to tccb. */
tccb = tcw_get_tccb(tcw);
tcat = (struct tccb_tcat *) &tccb->tca[tca_size(tccb)];
memset(tcat, 0, sizeof(*tcat));
/* Calculate tcw input/output count and tcat transport count. */
count = calc_dcw_count(tccb);
if (tcw->w && (tcw->flags & TCW_FLAGS_OUTPUT_TIDA))
count += calc_cbc_size(tidaw, num_tidaws);
if (tcw->r)
tcw->input_count = count;
else if (tcw->w)
tcw->output_count = count;
tcat->count = ALIGN(count, 4) + 4;
/* Calculate tccbl. */
tcw->tccbl = (sizeof(struct tccb) + tca_size(tccb) +
sizeof(struct tccb_tcat) - 20) >> 2;
}
EXPORT_SYMBOL(tcw_finalize);
/**
* tcw_set_intrg - set the interrogate tcw address of a tcw
* @tcw: the tcw address
* @intrg_tcw: the address of the interrogate tcw
*
* Set the address of the interrogate tcw in the specified tcw.
*/
void tcw_set_intrg(struct tcw *tcw, struct tcw *intrg_tcw)
{
tcw->intrg = (u32) ((addr_t) intrg_tcw);
}
EXPORT_SYMBOL(tcw_set_intrg);
/**
* tcw_set_data - set data address and tida flag of a tcw
* @tcw: the tcw address
* @data: the data address
* @use_tidal: zero of the data address specifies a contiguous block of data,
* non-zero if it specifies a list if tidaws.
*
* Set the input/output data address of a tcw (depending on the value of the
* r-flag and w-flag). If @use_tidal is non-zero, the corresponding tida flag
* is set as well.
*/
void tcw_set_data(struct tcw *tcw, void *data, int use_tidal)
{
if (tcw->r) {
tcw->input = (u64) ((addr_t) data);
if (use_tidal)
tcw->flags |= TCW_FLAGS_INPUT_TIDA;
} else if (tcw->w) {
tcw->output = (u64) ((addr_t) data);
if (use_tidal)
tcw->flags |= TCW_FLAGS_OUTPUT_TIDA;
}
}
EXPORT_SYMBOL(tcw_set_data);
/**
* tcw_set_tccb - set tccb address of a tcw
* @tcw: the tcw address
* @tccb: the tccb address
*
* Set the address of the tccb in the specified tcw.
*/
void tcw_set_tccb(struct tcw *tcw, struct tccb *tccb)
{
tcw->tccb = (u64) ((addr_t) tccb);
}
EXPORT_SYMBOL(tcw_set_tccb);
/**
* tcw_set_tsb - set tsb address of a tcw
* @tcw: the tcw address
* @tsb: the tsb address
*
* Set the address of the tsb in the specified tcw.
*/
void tcw_set_tsb(struct tcw *tcw, struct tsb *tsb)
{
tcw->tsb = (u64) ((addr_t) tsb);
}
EXPORT_SYMBOL(tcw_set_tsb);
/**
* tccb_init - initialize tccb
* @tccb: the tccb address
* @size: the maximum size of the tccb
* @sac: the service-action-code to be user
*
* Initialize the header of the specified tccb by resetting all values to zero
* and filling in defaults for format, sac and initial tcal fields.
*/
void tccb_init(struct tccb *tccb, size_t size, u32 sac)
{
memset(tccb, 0, size);
tccb->tcah.format = TCCB_FORMAT_DEFAULT;
tccb->tcah.sac = sac;
tccb->tcah.tcal = 12;
}
EXPORT_SYMBOL(tccb_init);
/**
* tsb_init - initialize tsb
* @tsb: the tsb address
*
* Initialize the specified tsb by resetting all values to zero.
*/
void tsb_init(struct tsb *tsb)
{
memset(tsb, 0, sizeof(*tsb));
}
EXPORT_SYMBOL(tsb_init);
/**
* tccb_add_dcw - add a dcw to the tccb
* @tccb: the tccb address
* @tccb_size: the maximum tccb size
* @cmd: the dcw command
* @flags: flags for the dcw
* @cd: pointer to control data for this dcw or NULL if none is required
* @cd_count: number of control data bytes for this dcw
* @count: number of data bytes for this dcw
*
* Add a new dcw to the specified tccb by writing the dcw information specified
* by @cmd, @flags, @cd, @cd_count and @count to the tca of the tccb. Return
* a pointer to the newly added dcw on success or -%ENOSPC if the new dcw
* would exceed the available space as defined by @tccb_size.
*
* Note: the tcal field of the tccb header will be updates to reflect added
* content.
*/
struct dcw *tccb_add_dcw(struct tccb *tccb, size_t tccb_size, u8 cmd, u8 flags,
void *cd, u8 cd_count, u32 count)
{
struct dcw *dcw;
int size;
int tca_offset;
/* Check for space. */
tca_offset = tca_size(tccb);
size = ALIGN(sizeof(struct dcw) + cd_count, 4);
if (sizeof(struct tccb_tcah) + tca_offset + size +
sizeof(struct tccb_tcat) > tccb_size)
return ERR_PTR(-ENOSPC);
/* Add dcw to tca. */
dcw = (struct dcw *) &tccb->tca[tca_offset];
memset(dcw, 0, size);
dcw->cmd = cmd;
dcw->flags = flags;
dcw->count = count;
dcw->cd_count = cd_count;
if (cd)
memcpy(&dcw->cd[0], cd, cd_count);
tccb->tcah.tcal += size;
return dcw;
}
EXPORT_SYMBOL(tccb_add_dcw);
/**
* tcw_add_tidaw - add a tidaw to a tcw
* @tcw: the tcw address
* @num_tidaws: the current number of tidaws
* @flags: flags for the new tidaw
* @addr: address value for the new tidaw
* @count: count value for the new tidaw
*
* Add a new tidaw to the input/output data tidaw-list of the specified tcw
* (depending on the value of the r-flag and w-flag) and return a pointer to
* the new tidaw.
*
* Note: the tidaw-list is assumed to be contiguous with no ttics. The caller
* must ensure that there is enough space for the new tidaw. The last-tidaw
* flag for the last tidaw in the list will be set by tcw_finalize.
*/
struct tidaw *tcw_add_tidaw(struct tcw *tcw, int num_tidaws, u8 flags,
void *addr, u32 count)
{
struct tidaw *tidaw;
/* Add tidaw to tidaw-list. */
tidaw = ((struct tidaw *) tcw_get_data(tcw)) + num_tidaws;
memset(tidaw, 0, sizeof(struct tidaw));
tidaw->flags = flags;
tidaw->count = count;
tidaw->addr = (u64) ((addr_t) addr);
return tidaw;
}
EXPORT_SYMBOL(tcw_add_tidaw);
| gpl-2.0 |
Think-Silicon/linux-thinksilicon | net/sched/cls_cgroup.c | 101 | 7370 | /*
* net/sched/cls_cgroup.c Control Group Classifier
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* Authors: Thomas Graf <tgraf@suug.ch>
*/
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/skbuff.h>
#include <linux/cgroup.h>
#include <linux/rcupdate.h>
#include <linux/fdtable.h>
#include <net/rtnetlink.h>
#include <net/pkt_cls.h>
#include <net/sock.h>
#include <net/cls_cgroup.h>
static inline struct cgroup_cls_state *cgrp_cls_state(struct cgroup *cgrp)
{
return container_of(cgroup_subsys_state(cgrp, net_cls_subsys_id),
struct cgroup_cls_state, css);
}
static inline struct cgroup_cls_state *task_cls_state(struct task_struct *p)
{
return container_of(task_subsys_state(p, net_cls_subsys_id),
struct cgroup_cls_state, css);
}
static struct cgroup_subsys_state *cgrp_css_alloc(struct cgroup *cgrp)
{
struct cgroup_cls_state *cs;
cs = kzalloc(sizeof(*cs), GFP_KERNEL);
if (!cs)
return ERR_PTR(-ENOMEM);
return &cs->css;
}
static int cgrp_css_online(struct cgroup *cgrp)
{
if (cgrp->parent)
cgrp_cls_state(cgrp)->classid =
cgrp_cls_state(cgrp->parent)->classid;
return 0;
}
static void cgrp_css_free(struct cgroup *cgrp)
{
kfree(cgrp_cls_state(cgrp));
}
static int update_classid(const void *v, struct file *file, unsigned n)
{
int err;
struct socket *sock = sock_from_file(file, &err);
if (sock)
sock->sk->sk_classid = (u32)(unsigned long)v;
return 0;
}
static void cgrp_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
{
struct task_struct *p;
void *v;
cgroup_taskset_for_each(p, cgrp, tset) {
task_lock(p);
v = (void *)(unsigned long)task_cls_classid(p);
iterate_fd(p->files, 0, update_classid, v);
task_unlock(p);
}
}
static u64 read_classid(struct cgroup *cgrp, struct cftype *cft)
{
return cgrp_cls_state(cgrp)->classid;
}
static int write_classid(struct cgroup *cgrp, struct cftype *cft, u64 value)
{
cgrp_cls_state(cgrp)->classid = (u32) value;
return 0;
}
static struct cftype ss_files[] = {
{
.name = "classid",
.read_u64 = read_classid,
.write_u64 = write_classid,
},
{ } /* terminate */
};
struct cgroup_subsys net_cls_subsys = {
.name = "net_cls",
.css_alloc = cgrp_css_alloc,
.css_online = cgrp_css_online,
.css_free = cgrp_css_free,
.attach = cgrp_attach,
.subsys_id = net_cls_subsys_id,
.base_cftypes = ss_files,
.module = THIS_MODULE,
};
struct cls_cgroup_head {
u32 handle;
struct tcf_exts exts;
struct tcf_ematch_tree ematches;
};
static int cls_cgroup_classify(struct sk_buff *skb, const struct tcf_proto *tp,
struct tcf_result *res)
{
struct cls_cgroup_head *head = tp->root;
u32 classid;
rcu_read_lock();
classid = task_cls_state(current)->classid;
rcu_read_unlock();
/*
* Due to the nature of the classifier it is required to ignore all
* packets originating from softirq context as accessing `current'
* would lead to false results.
*
* This test assumes that all callers of dev_queue_xmit() explicitely
* disable bh. Knowing this, it is possible to detect softirq based
* calls by looking at the number of nested bh disable calls because
* softirqs always disables bh.
*/
if (in_serving_softirq()) {
/* If there is an sk_classid we'll use that. */
if (!skb->sk)
return -1;
classid = skb->sk->sk_classid;
}
if (!classid)
return -1;
if (!tcf_em_tree_match(skb, &head->ematches, NULL))
return -1;
res->classid = classid;
res->class = 0;
return tcf_exts_exec(skb, &head->exts, res);
}
static unsigned long cls_cgroup_get(struct tcf_proto *tp, u32 handle)
{
return 0UL;
}
static void cls_cgroup_put(struct tcf_proto *tp, unsigned long f)
{
}
static int cls_cgroup_init(struct tcf_proto *tp)
{
return 0;
}
static const struct tcf_ext_map cgroup_ext_map = {
.action = TCA_CGROUP_ACT,
.police = TCA_CGROUP_POLICE,
};
static const struct nla_policy cgroup_policy[TCA_CGROUP_MAX + 1] = {
[TCA_CGROUP_EMATCHES] = { .type = NLA_NESTED },
};
static int cls_cgroup_change(struct sk_buff *in_skb,
struct tcf_proto *tp, unsigned long base,
u32 handle, struct nlattr **tca,
unsigned long *arg)
{
struct nlattr *tb[TCA_CGROUP_MAX + 1];
struct cls_cgroup_head *head = tp->root;
struct tcf_ematch_tree t;
struct tcf_exts e;
int err;
if (!tca[TCA_OPTIONS])
return -EINVAL;
if (head == NULL) {
if (!handle)
return -EINVAL;
head = kzalloc(sizeof(*head), GFP_KERNEL);
if (head == NULL)
return -ENOBUFS;
head->handle = handle;
tcf_tree_lock(tp);
tp->root = head;
tcf_tree_unlock(tp);
}
if (handle != head->handle)
return -ENOENT;
err = nla_parse_nested(tb, TCA_CGROUP_MAX, tca[TCA_OPTIONS],
cgroup_policy);
if (err < 0)
return err;
err = tcf_exts_validate(tp, tb, tca[TCA_RATE], &e, &cgroup_ext_map);
if (err < 0)
return err;
err = tcf_em_tree_validate(tp, tb[TCA_CGROUP_EMATCHES], &t);
if (err < 0)
return err;
tcf_exts_change(tp, &head->exts, &e);
tcf_em_tree_change(tp, &head->ematches, &t);
return 0;
}
static void cls_cgroup_destroy(struct tcf_proto *tp)
{
struct cls_cgroup_head *head = tp->root;
if (head) {
tcf_exts_destroy(tp, &head->exts);
tcf_em_tree_destroy(tp, &head->ematches);
kfree(head);
}
}
static int cls_cgroup_delete(struct tcf_proto *tp, unsigned long arg)
{
return -EOPNOTSUPP;
}
static void cls_cgroup_walk(struct tcf_proto *tp, struct tcf_walker *arg)
{
struct cls_cgroup_head *head = tp->root;
if (arg->count < arg->skip)
goto skip;
if (arg->fn(tp, (unsigned long) head, arg) < 0) {
arg->stop = 1;
return;
}
skip:
arg->count++;
}
static int cls_cgroup_dump(struct tcf_proto *tp, unsigned long fh,
struct sk_buff *skb, struct tcmsg *t)
{
struct cls_cgroup_head *head = tp->root;
unsigned char *b = skb_tail_pointer(skb);
struct nlattr *nest;
t->tcm_handle = head->handle;
nest = nla_nest_start(skb, TCA_OPTIONS);
if (nest == NULL)
goto nla_put_failure;
if (tcf_exts_dump(skb, &head->exts, &cgroup_ext_map) < 0 ||
tcf_em_tree_dump(skb, &head->ematches, TCA_CGROUP_EMATCHES) < 0)
goto nla_put_failure;
nla_nest_end(skb, nest);
if (tcf_exts_dump_stats(skb, &head->exts, &cgroup_ext_map) < 0)
goto nla_put_failure;
return skb->len;
nla_put_failure:
nlmsg_trim(skb, b);
return -1;
}
static struct tcf_proto_ops cls_cgroup_ops __read_mostly = {
.kind = "cgroup",
.init = cls_cgroup_init,
.change = cls_cgroup_change,
.classify = cls_cgroup_classify,
.destroy = cls_cgroup_destroy,
.get = cls_cgroup_get,
.put = cls_cgroup_put,
.delete = cls_cgroup_delete,
.walk = cls_cgroup_walk,
.dump = cls_cgroup_dump,
.owner = THIS_MODULE,
};
static int __init init_cgroup_cls(void)
{
int ret;
ret = cgroup_load_subsys(&net_cls_subsys);
if (ret)
goto out;
ret = register_tcf_proto_ops(&cls_cgroup_ops);
if (ret)
cgroup_unload_subsys(&net_cls_subsys);
out:
return ret;
}
static void __exit exit_cgroup_cls(void)
{
unregister_tcf_proto_ops(&cls_cgroup_ops);
cgroup_unload_subsys(&net_cls_subsys);
}
module_init(init_cgroup_cls);
module_exit(exit_cgroup_cls);
MODULE_LICENSE("GPL");
| gpl-2.0 |
bingfengxiaokai/u-boot-2012.04.01-tranplate | arch/arm/cpu/arm926ejs/kirkwood/cpu.c | 101 | 11301 | /*
* (C) Copyright 2009
* Marvell Semiconductor <www.marvell.com>
* Written-by: Prafulla Wadaskar <prafulla@marvell.com>
*
* See file CREDITS for list of people who contributed to this
* project.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of
* the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
* MA 02110-1301 USA
*/
#include <common.h>
#include <netdev.h>
#include <asm/cache.h>
#include <u-boot/md5.h>
#include <asm/io.h>
#include <asm/arch/cpu.h>
#include <asm/arch/kirkwood.h>
#include <hush.h>
#define BUFLEN 16
void reset_cpu(unsigned long ignored)
{
struct kwcpu_registers *cpureg =
(struct kwcpu_registers *)KW_CPU_REG_BASE;
writel(readl(&cpureg->rstoutn_mask) | (1 << 2),
&cpureg->rstoutn_mask);
writel(readl(&cpureg->sys_soft_rst) | 1,
&cpureg->sys_soft_rst);
while (1) ;
}
/*
* Generates Ramdom hex number reading some time varient system registers
* and using md5 algorithm
*/
unsigned char get_random_hex(void)
{
int i;
u32 inbuf[BUFLEN];
u8 outbuf[BUFLEN];
/*
* in case of 88F6281/88F6282/88F6192 A0,
* Bit7 need to reset to generate random values in KW_REG_UNDOC_0x1470
* Soc reg offsets KW_REG_UNDOC_0x1470 and KW_REG_UNDOC_0x1478 are
* reserved regs and does not have names at this moment
* (no errata available)
*/
writel(readl(KW_REG_UNDOC_0x1478) & ~(1 << 7), KW_REG_UNDOC_0x1478);
for (i = 0; i < BUFLEN; i++) {
inbuf[i] = readl(KW_REG_UNDOC_0x1470);
}
md5((u8 *) inbuf, (BUFLEN * sizeof(u32)), outbuf);
return outbuf[outbuf[7] % 0x0f];
}
/*
* Window Size
* Used with the Base register to set the address window size and location.
* Must be programmed from LSB to MSB as sequence of ones followed by
* sequence of zeros. The number of ones specifies the size of the window in
* 64 KByte granularity (e.g., a value of 0x00FF specifies 256 = 16 MByte).
* NOTE: A value of 0x0 specifies 64-KByte size.
*/
unsigned int kw_winctrl_calcsize(unsigned int sizeval)
{
int i;
unsigned int j = 0;
u32 val = sizeval >> 1;
for (i = 0; val >= 0x10000; i++) {
j |= (1 << i);
val = val >> 1;
}
return (0x0000ffff & j);
}
/*
* kw_config_adr_windows - Configure address Windows
*
* There are 8 address windows supported by Kirkwood Soc to addess different
* devices. Each window can be configured for size, BAR and remap addr
* Below configuration is standard for most of the cases
*
* If remap function not used, remap_lo must be set as base
*
* Reference Documentation:
* Mbus-L to Mbus Bridge Registers Configuration.
* (Sec 25.1 and 25.3 of Datasheet)
*/
int kw_config_adr_windows(void)
{
struct kwwin_registers *winregs =
(struct kwwin_registers *)KW_CPU_WIN_BASE;
/* Window 0: PCIE MEM address space */
writel(KWCPU_WIN_CTRL_DATA(1024 * 1024 * 256, KWCPU_TARGET_PCIE,
KWCPU_ATTR_PCIE_MEM, KWCPU_WIN_ENABLE), &winregs[0].ctrl);
writel(KW_DEFADR_PCI_MEM, &winregs[0].base);
writel(KW_DEFADR_PCI_MEM, &winregs[0].remap_lo);
writel(0x0, &winregs[0].remap_hi);
/* Window 1: PCIE IO address space */
writel(KWCPU_WIN_CTRL_DATA(1024 * 64, KWCPU_TARGET_PCIE,
KWCPU_ATTR_PCIE_IO, KWCPU_WIN_ENABLE), &winregs[1].ctrl);
writel(KW_DEFADR_PCI_IO, &winregs[1].base);
writel(KW_DEFADR_PCI_IO_REMAP, &winregs[1].remap_lo);
writel(0x0, &winregs[1].remap_hi);
/* Window 2: NAND Flash address space */
writel(KWCPU_WIN_CTRL_DATA(1024 * 1024 * 128, KWCPU_TARGET_MEMORY,
KWCPU_ATTR_NANDFLASH, KWCPU_WIN_ENABLE), &winregs[2].ctrl);
writel(KW_DEFADR_NANDF, &winregs[2].base);
writel(KW_DEFADR_NANDF, &winregs[2].remap_lo);
writel(0x0, &winregs[2].remap_hi);
/* Window 3: SPI Flash address space */
writel(KWCPU_WIN_CTRL_DATA(1024 * 1024 * 128, KWCPU_TARGET_MEMORY,
KWCPU_ATTR_SPIFLASH, KWCPU_WIN_ENABLE), &winregs[3].ctrl);
writel(KW_DEFADR_SPIF, &winregs[3].base);
writel(KW_DEFADR_SPIF, &winregs[3].remap_lo);
writel(0x0, &winregs[3].remap_hi);
/* Window 4: BOOT Memory address space */
writel(KWCPU_WIN_CTRL_DATA(1024 * 1024 * 128, KWCPU_TARGET_MEMORY,
KWCPU_ATTR_BOOTROM, KWCPU_WIN_ENABLE), &winregs[4].ctrl);
writel(KW_DEFADR_BOOTROM, &winregs[4].base);
/* Window 5: Security SRAM address space */
writel(KWCPU_WIN_CTRL_DATA(1024 * 64, KWCPU_TARGET_SASRAM,
KWCPU_ATTR_SASRAM, KWCPU_WIN_ENABLE), &winregs[5].ctrl);
writel(KW_DEFADR_SASRAM, &winregs[5].base);
/* Window 6-7: Disabled */
writel(KWCPU_WIN_DISABLE, &winregs[6].ctrl);
writel(KWCPU_WIN_DISABLE, &winregs[7].ctrl);
return 0;
}
/*
* kw_config_gpio - GPIO configuration
*/
void kw_config_gpio(u32 gpp0_oe_val, u32 gpp1_oe_val, u32 gpp0_oe, u32 gpp1_oe)
{
struct kwgpio_registers *gpio0reg =
(struct kwgpio_registers *)KW_GPIO0_BASE;
struct kwgpio_registers *gpio1reg =
(struct kwgpio_registers *)KW_GPIO1_BASE;
/* Init GPIOS to default values as per board requirement */
writel(gpp0_oe_val, &gpio0reg->dout);
writel(gpp1_oe_val, &gpio1reg->dout);
writel(gpp0_oe, &gpio0reg->oe);
writel(gpp1_oe, &gpio1reg->oe);
}
/*
* kw_config_mpp - Multi-Purpose Pins Functionality configuration
*
* Each MPP can be configured to different functionality through
* MPP control register, ref (sec 6.1 of kirkwood h/w specification)
*
* There are maximum 64 Multi-Pourpose Pins on Kirkwood
* Each MPP functionality can be configuration by a 4bit value
* of MPP control reg, the value and associated functionality depends
* upon used SoC varient
*/
int kw_config_mpp(u32 mpp0_7, u32 mpp8_15, u32 mpp16_23, u32 mpp24_31,
u32 mpp32_39, u32 mpp40_47, u32 mpp48_55)
{
u32 *mppreg = (u32 *) KW_MPP_BASE;
/* program mpp registers */
writel(mpp0_7, &mppreg[0]);
writel(mpp8_15, &mppreg[1]);
writel(mpp16_23, &mppreg[2]);
writel(mpp24_31, &mppreg[3]);
writel(mpp32_39, &mppreg[4]);
writel(mpp40_47, &mppreg[5]);
writel(mpp48_55, &mppreg[6]);
return 0;
}
/*
* SYSRSTn Duration Counter Support
*
* Kirkwood SoC implements a hardware-based SYSRSTn duration counter.
* When SYSRSTn is asserted low, a SYSRSTn duration counter is running.
* The SYSRSTn duration counter is useful for implementing a manufacturer
* or factory reset. Upon a long reset assertion that is greater than a
* pre-configured environment variable value for sysrstdelay,
* The counter value is stored in the SYSRSTn Length Counter Register
* The counter is based on the 25-MHz reference clock (40ns)
* It is a 29-bit counter, yielding a maximum counting duration of
* 2^29/25 MHz (21.4 seconds). When the counter reach its maximum value,
* it remains at this value until counter reset is triggered by setting
* bit 31 of KW_REG_SYSRST_CNT
*/
static void kw_sysrst_action(void)
{
int ret;
char *s = getenv("sysrstcmd");
if (!s) {
debug("Error.. %s failed, check sysrstcmd\n",
__FUNCTION__);
return;
}
debug("Starting %s process...\n", __FUNCTION__);
ret = run_command(s, 0);
if (ret < 0)
debug("Error.. %s failed\n", __FUNCTION__);
else
debug("%s process finished\n", __FUNCTION__);
}
static void kw_sysrst_check(void)
{
u32 sysrst_cnt, sysrst_dly;
char *s;
/*
* no action if sysrstdelay environment variable is not defined
*/
s = getenv("sysrstdelay");
if (s == NULL)
return;
/* read sysrstdelay value */
sysrst_dly = (u32) simple_strtoul(s, NULL, 10);
/* read SysRst Length counter register (bits 28:0) */
sysrst_cnt = (0x1fffffff & readl(KW_REG_SYSRST_CNT));
debug("H/w Rst hold time: %d.%d secs\n",
sysrst_cnt / SYSRST_CNT_1SEC_VAL,
sysrst_cnt % SYSRST_CNT_1SEC_VAL);
/* clear the counter for next valid read*/
writel(1 << 31, KW_REG_SYSRST_CNT);
/*
* sysrst_action:
* if H/w Reset key is pressed and hold for time
* more than sysrst_dly in seconds
*/
if (sysrst_cnt >= SYSRST_CNT_1SEC_VAL * sysrst_dly)
kw_sysrst_action();
}
#if defined(CONFIG_DISPLAY_CPUINFO)
int print_cpuinfo(void)
{
char *rev;
u16 devid = (readl(KW_REG_PCIE_DEVID) >> 16) & 0xffff;
u8 revid = readl(KW_REG_PCIE_REVID) & 0xff;
if ((readl(KW_REG_DEVICE_ID) & 0x03) > 2) {
printf("Error.. %s:Unsupported Kirkwood SoC 88F%04x\n", __FUNCTION__, devid);
return -1;
}
switch (revid) {
case 0:
rev = "Z0";
break;
case 2:
rev = "A0";
break;
case 3:
rev = "A1";
break;
default:
rev = "??";
break;
}
printf("SoC: Kirkwood 88F%04x_%s\n", devid, rev);
return 0;
}
#endif /* CONFIG_DISPLAY_CPUINFO */
#ifdef CONFIG_ARCH_CPU_INIT
int arch_cpu_init(void)
{
u32 reg;
struct kwcpu_registers *cpureg =
(struct kwcpu_registers *)KW_CPU_REG_BASE;
/* Linux expects` the internal registers to be at 0xf1000000 */
writel(KW_REGS_PHY_BASE, KW_OFFSET_REG);
/* Enable and invalidate L2 cache in write through mode */
writel(readl(&cpureg->l2_cfg) | 0x18, &cpureg->l2_cfg);
invalidate_l2_cache();
kw_config_adr_windows();
#ifdef CONFIG_KIRKWOOD_RGMII_PAD_1V8
/*
* Configures the I/O voltage of the pads connected to Egigabit
* Ethernet interface to 1.8V
* By defult it is set to 3.3V
*/
reg = readl(KW_REG_MPP_OUT_DRV_REG);
reg |= (1 << 7);
writel(reg, KW_REG_MPP_OUT_DRV_REG);
#endif
#ifdef CONFIG_KIRKWOOD_EGIGA_INIT
/*
* Set egiga port0/1 in normal functional mode
* This is required becasue on kirkwood by default ports are in reset mode
* OS egiga driver may not have provision to set them in normal mode
* and if u-boot is build without network support, network may fail at OS level
*/
reg = readl(KWGBE_PORT_SERIAL_CONTROL1_REG(0));
reg &= ~(1 << 4); /* Clear PortReset Bit */
writel(reg, (KWGBE_PORT_SERIAL_CONTROL1_REG(0)));
reg = readl(KWGBE_PORT_SERIAL_CONTROL1_REG(1));
reg &= ~(1 << 4); /* Clear PortReset Bit */
writel(reg, (KWGBE_PORT_SERIAL_CONTROL1_REG(1)));
#endif
#ifdef CONFIG_KIRKWOOD_PCIE_INIT
/*
* Enable PCI Express Port0
*/
reg = readl(&cpureg->ctrl_stat);
reg |= (1 << 0); /* Set PEX0En Bit */
writel(reg, &cpureg->ctrl_stat);
#endif
return 0;
}
#endif /* CONFIG_ARCH_CPU_INIT */
/*
* SOC specific misc init
*/
#if defined(CONFIG_ARCH_MISC_INIT)
int arch_misc_init(void)
{
volatile u32 temp;
/*CPU streaming & write allocate */
temp = readfr_extra_feature_reg();
temp &= ~(1 << 28); /* disable wr alloc */
writefr_extra_feature_reg(temp);
temp = readfr_extra_feature_reg();
temp &= ~(1 << 29); /* streaming disabled */
writefr_extra_feature_reg(temp);
/* L2Cache settings */
temp = readfr_extra_feature_reg();
/* Disable L2C pre fetch - Set bit 24 */
temp |= (1 << 24);
/* enable L2C - Set bit 22 */
temp |= (1 << 22);
writefr_extra_feature_reg(temp);
icache_enable();
/* Change reset vector to address 0x0 */
temp = get_cr();
set_cr(temp & ~CR_V);
/* checks and execute resset to factory event */
kw_sysrst_check();
return 0;
}
#endif /* CONFIG_ARCH_MISC_INIT */
#ifdef CONFIG_MVGBE
int cpu_eth_init(bd_t *bis)
{
mvgbe_initialize(bis);
return 0;
}
#endif
| gpl-2.0 |
koying/buildroot-linux-kernel-m3-pivos | drivers/amlogic/wifi/nano_c047_12/WiFiEngine/wifi_drv/src/we_lqm.c | 101 | 2600 | /* $Id: $ */
/** @defgroup we_lqm WiFiEngine link quality management
*
* \brief This module implements WiFiEngine link quality management
*
* On recieving PEER_STATUS Warnings lqm will issue probes to the AP to
* force the AP to sent responces. This will make the link less vonurable
* to AP'n that tometimes 'forget' to send beacons. (mostly D-Link)
*
*/
#include "driverenv.h"
#include "m80211_stddefs.h"
#include "driverenv_kref.h"
#include "we_ind.h"
#include "wifi_engine_internal.h"
#include "wei_netlist.h"
#include "hmg_traffic.h"
#include "registry.h"
#include "registryAccess.h"
#ifdef FILE_WE_LQM_C
#undef FILE_NUMBER
#define FILE_NUMBER FILE_WE_LQM_C
#endif
/* this has been depricated in fw */
#if 0
static uint32_t lqm_enabled = FALSE;
static uint32_t lqm_scan_job_id;
static int we_lqm_create_scan_job(WiFiEngine_net_t* net, uint32_t *lqm_job_id)
{
int r;
channel_list_t ch_list;
if (net)
{
ch_list.reserved = 0;
ch_list.no_channels = 1;
ch_list.channelList[0] = net->bss_p->bss_description_p->ie.ds_parameter_set.channel;
r = WiFiEngine_AddScanJob(lqm_job_id,
net->bss_p->bss_description_p->ie.ssid,
net->bssId_AP,
1,
ch_list,
CONNECTED_MODE,
1,
0,
-1,
1,
NULL);
return r;
}
return WIFI_ENGINE_FAILURE;
}
#endif
/*
* Configure a scan job to be used if mlme_peer_status_ind is received
* with status warning. The job will initiate sending of a probe
* request to the associated AP.
*/
int we_lqm_configure_lqm_job(void)
{
#if 0
int r;
// TODO: make it optional to use a pre configured scan job
r = we_lqm_create_scan_job(
wei_netlist_get_current_net(),
&lqm_scan_job_id);
if(r==WIFI_ENGINE_SUCCESS) lqm_enabled = TRUE;
// TODO: add indication for connected/disconnected to avoid scanning when not connected
#endif
return WIFI_ENGINE_SUCCESS;
}
int wei_lqm_shutdown(void)
{
#if 0
if(lqm_enabled == TRUE) {
WiFiEngine_RemoveScanJob(lqm_scan_job_id, NULL);
lqm_enabled = FALSE;
}
#endif
return 0;
}
/*
* Activate the lqm job
*/
int we_lqm_trigger_scan(int peer_status)
{
#if 0
/* TODO: check if connected */
if(lqm_enabled == TRUE) {
wei_send_scan_req(lqm_scan_job_id, 0, 0, 0, 0);
}
#endif
return WIFI_ENGINE_SUCCESS;
}
/** @} */ /* End of we_roam group */
| gpl-2.0 |
FrozenCow/FIRE-ICE | drivers/net/wireless/bcmdhd/hnd_pktq.c | 357 | 10681 | /*
* HND generic pktq operation primitives
*
* Copyright (C) 1999-2014, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
*
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* you also meet, for each linked independent module, the terms and conditions of
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
*
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* $Id: $
*/
#include <typedefs.h>
#include <osl.h>
#include <bcmutils.h>
#include <hnd_pktq.h>
/*
* osl multiple-precedence packet queue
* hi_prec is always >= the number of the highest non-empty precedence
*/
void * BCMFASTPATH
pktq_penq(struct pktq *pq, int prec, void *p)
{
struct pktq_prec *q;
ASSERT(prec >= 0 && prec < pq->num_prec);
ASSERT(PKTLINK(p) == NULL); /* queueing chains not allowed */
ASSERT(!pktq_full(pq));
ASSERT(!pktq_pfull(pq, prec));
q = &pq->q[prec];
if (q->head)
PKTSETLINK(q->tail, p);
else
q->head = p;
q->tail = p;
q->len++;
pq->len++;
if (pq->hi_prec < prec)
pq->hi_prec = (uint8)prec;
return p;
}
void * BCMFASTPATH
pktq_penq_head(struct pktq *pq, int prec, void *p)
{
struct pktq_prec *q;
ASSERT(prec >= 0 && prec < pq->num_prec);
ASSERT(PKTLINK(p) == NULL); /* queueing chains not allowed */
ASSERT(!pktq_full(pq));
ASSERT(!pktq_pfull(pq, prec));
q = &pq->q[prec];
if (q->head == NULL)
q->tail = p;
PKTSETLINK(p, q->head);
q->head = p;
q->len++;
pq->len++;
if (pq->hi_prec < prec)
pq->hi_prec = (uint8)prec;
return p;
}
/*
* Append spktq 'list' to the tail of pktq 'pq'
*/
void BCMFASTPATH
pktq_append(struct pktq *pq, int prec, struct spktq *list)
{
struct pktq_prec *q;
struct pktq_prec *list_q;
list_q = &list->q[0];
/* empty list check */
if (list_q->head == NULL)
return;
ASSERT(prec >= 0 && prec < pq->num_prec);
ASSERT(PKTLINK(list_q->tail) == NULL); /* terminated list */
ASSERT(!pktq_full(pq));
ASSERT(!pktq_pfull(pq, prec));
q = &pq->q[prec];
if (q->head)
PKTSETLINK(q->tail, list_q->head);
else
q->head = list_q->head;
q->tail = list_q->tail;
q->len += list_q->len;
pq->len += list_q->len;
if (pq->hi_prec < prec)
pq->hi_prec = (uint8)prec;
list_q->head = NULL;
list_q->tail = NULL;
list_q->len = 0;
list->len = 0;
}
/*
* Prepend spktq 'list' to the head of pktq 'pq'
*/
void BCMFASTPATH
pktq_prepend(struct pktq *pq, int prec, struct spktq *list)
{
struct pktq_prec *q;
struct pktq_prec *list_q;
list_q = &list->q[0];
/* empty list check */
if (list_q->head == NULL)
return;
ASSERT(prec >= 0 && prec < pq->num_prec);
ASSERT(PKTLINK(list_q->tail) == NULL); /* terminated list */
ASSERT(!pktq_full(pq));
ASSERT(!pktq_pfull(pq, prec));
q = &pq->q[prec];
/* set the tail packet of list to point at the former pq head */
PKTSETLINK(list_q->tail, q->head);
/* the new q head is the head of list */
q->head = list_q->head;
/* If the q tail was non-null, then it stays as is.
* If the q tail was null, it is now the tail of list
*/
if (q->tail == NULL) {
q->tail = list_q->tail;
}
q->len += list_q->len;
pq->len += list_q->len;
if (pq->hi_prec < prec)
pq->hi_prec = (uint8)prec;
list_q->head = NULL;
list_q->tail = NULL;
list_q->len = 0;
list->len = 0;
}
void * BCMFASTPATH
pktq_pdeq(struct pktq *pq, int prec)
{
struct pktq_prec *q;
void *p;
ASSERT(prec >= 0 && prec < pq->num_prec);
q = &pq->q[prec];
if ((p = q->head) == NULL)
return NULL;
if ((q->head = PKTLINK(p)) == NULL)
q->tail = NULL;
q->len--;
pq->len--;
PKTSETLINK(p, NULL);
return p;
}
void * BCMFASTPATH
pktq_pdeq_prev(struct pktq *pq, int prec, void *prev_p)
{
struct pktq_prec *q;
void *p;
ASSERT(prec >= 0 && prec < pq->num_prec);
q = &pq->q[prec];
if (prev_p == NULL)
return NULL;
if ((p = PKTLINK(prev_p)) == NULL)
return NULL;
q->len--;
pq->len--;
PKTSETLINK(prev_p, PKTLINK(p));
PKTSETLINK(p, NULL);
return p;
}
void * BCMFASTPATH
pktq_pdeq_with_fn(struct pktq *pq, int prec, ifpkt_cb_t fn, int arg)
{
struct pktq_prec *q;
void *p, *prev = NULL;
ASSERT(prec >= 0 && prec < pq->num_prec);
q = &pq->q[prec];
p = q->head;
while (p) {
if (fn == NULL || (*fn)(p, arg)) {
break;
} else {
prev = p;
p = PKTLINK(p);
}
}
if (p == NULL)
return NULL;
if (prev == NULL) {
if ((q->head = PKTLINK(p)) == NULL) {
q->tail = NULL;
}
} else {
PKTSETLINK(prev, PKTLINK(p));
if (q->tail == p) {
q->tail = prev;
}
}
q->len--;
pq->len--;
PKTSETLINK(p, NULL);
return p;
}
void * BCMFASTPATH
pktq_pdeq_tail(struct pktq *pq, int prec)
{
struct pktq_prec *q;
void *p, *prev;
ASSERT(prec >= 0 && prec < pq->num_prec);
q = &pq->q[prec];
if ((p = q->head) == NULL)
return NULL;
for (prev = NULL; p != q->tail; p = PKTLINK(p))
prev = p;
if (prev)
PKTSETLINK(prev, NULL);
else
q->head = NULL;
q->tail = prev;
q->len--;
pq->len--;
return p;
}
void
pktq_pflush(osl_t *osh, struct pktq *pq, int prec, bool dir, ifpkt_cb_t fn, int arg)
{
struct pktq_prec *q;
void *p, *prev = NULL;
q = &pq->q[prec];
p = q->head;
while (p) {
if (fn == NULL || (*fn)(p, arg)) {
bool head = (p == q->head);
if (head)
q->head = PKTLINK(p);
else
PKTSETLINK(prev, PKTLINK(p));
PKTSETLINK(p, NULL);
PKTFREE(osh, p, dir);
q->len--;
pq->len--;
p = (head ? q->head : PKTLINK(prev));
} else {
prev = p;
p = PKTLINK(p);
}
}
if (q->head == NULL) {
ASSERT(q->len == 0);
q->tail = NULL;
}
}
bool BCMFASTPATH
pktq_pdel(struct pktq *pq, void *pktbuf, int prec)
{
struct pktq_prec *q;
void *p;
ASSERT(prec >= 0 && prec < pq->num_prec);
/* Should this just assert pktbuf? */
if (!pktbuf)
return FALSE;
q = &pq->q[prec];
if (q->head == pktbuf) {
if ((q->head = PKTLINK(pktbuf)) == NULL)
q->tail = NULL;
} else {
for (p = q->head; p && PKTLINK(p) != pktbuf; p = PKTLINK(p))
;
if (p == NULL)
return FALSE;
PKTSETLINK(p, PKTLINK(pktbuf));
if (q->tail == pktbuf)
q->tail = p;
}
q->len--;
pq->len--;
PKTSETLINK(pktbuf, NULL);
return TRUE;
}
void
pktq_init(struct pktq *pq, int num_prec, int max_len)
{
int prec;
ASSERT(num_prec > 0 && num_prec <= PKTQ_MAX_PREC);
/* pq is variable size; only zero out what's requested */
bzero(pq, OFFSETOF(struct pktq, q) + (sizeof(struct pktq_prec) * num_prec));
pq->num_prec = (uint16)num_prec;
pq->max = (uint16)max_len;
for (prec = 0; prec < num_prec; prec++)
pq->q[prec].max = pq->max;
}
void
pktq_set_max_plen(struct pktq *pq, int prec, int max_len)
{
ASSERT(prec >= 0 && prec < pq->num_prec);
if (prec < pq->num_prec)
pq->q[prec].max = (uint16)max_len;
}
void * BCMFASTPATH
pktq_deq(struct pktq *pq, int *prec_out)
{
struct pktq_prec *q;
void *p;
int prec;
if (pq->len == 0)
return NULL;
while ((prec = pq->hi_prec) > 0 && pq->q[prec].head == NULL)
pq->hi_prec--;
q = &pq->q[prec];
if ((p = q->head) == NULL)
return NULL;
if ((q->head = PKTLINK(p)) == NULL)
q->tail = NULL;
q->len--;
pq->len--;
if (prec_out)
*prec_out = prec;
PKTSETLINK(p, NULL);
return p;
}
void * BCMFASTPATH
pktq_deq_tail(struct pktq *pq, int *prec_out)
{
struct pktq_prec *q;
void *p, *prev;
int prec;
if (pq->len == 0)
return NULL;
for (prec = 0; prec < pq->hi_prec; prec++)
if (pq->q[prec].head)
break;
q = &pq->q[prec];
if ((p = q->head) == NULL)
return NULL;
for (prev = NULL; p != q->tail; p = PKTLINK(p))
prev = p;
if (prev)
PKTSETLINK(prev, NULL);
else
q->head = NULL;
q->tail = prev;
q->len--;
pq->len--;
if (prec_out)
*prec_out = prec;
PKTSETLINK(p, NULL);
return p;
}
void *
pktq_peek(struct pktq *pq, int *prec_out)
{
int prec;
if (pq->len == 0)
return NULL;
while ((prec = pq->hi_prec) > 0 && pq->q[prec].head == NULL)
pq->hi_prec--;
if (prec_out)
*prec_out = prec;
return (pq->q[prec].head);
}
void *
pktq_peek_tail(struct pktq *pq, int *prec_out)
{
int prec;
if (pq->len == 0)
return NULL;
for (prec = 0; prec < pq->hi_prec; prec++)
if (pq->q[prec].head)
break;
if (prec_out)
*prec_out = prec;
return (pq->q[prec].tail);
}
void
pktq_flush(osl_t *osh, struct pktq *pq, bool dir, ifpkt_cb_t fn, int arg)
{
int prec;
/* Optimize flush, if pktq len = 0, just return.
* pktq len of 0 means pktq's prec q's are all empty.
*/
if (pq->len == 0) {
return;
}
for (prec = 0; prec < pq->num_prec; prec++)
pktq_pflush(osh, pq, prec, dir, fn, arg);
if (fn == NULL)
ASSERT(pq->len == 0);
}
/* Return sum of lengths of a specific set of precedences */
int
pktq_mlen(struct pktq *pq, uint prec_bmp)
{
int prec, len;
len = 0;
for (prec = 0; prec <= pq->hi_prec; prec++)
if (prec_bmp & (1 << prec))
len += pq->q[prec].len;
return len;
}
/* Priority peek from a specific set of precedences */
void * BCMFASTPATH
pktq_mpeek(struct pktq *pq, uint prec_bmp, int *prec_out)
{
struct pktq_prec *q;
void *p;
int prec;
if (pq->len == 0)
{
return NULL;
}
while ((prec = pq->hi_prec) > 0 && pq->q[prec].head == NULL)
pq->hi_prec--;
while ((prec_bmp & (1 << prec)) == 0 || pq->q[prec].head == NULL)
if (prec-- == 0)
return NULL;
q = &pq->q[prec];
if ((p = q->head) == NULL)
return NULL;
if (prec_out)
*prec_out = prec;
return p;
}
/* Priority dequeue from a specific set of precedences */
void * BCMFASTPATH
pktq_mdeq(struct pktq *pq, uint prec_bmp, int *prec_out)
{
struct pktq_prec *q;
void *p;
int prec;
if (pq->len == 0)
return NULL;
while ((prec = pq->hi_prec) > 0 && pq->q[prec].head == NULL)
pq->hi_prec--;
while ((pq->q[prec].head == NULL) || ((prec_bmp & (1 << prec)) == 0))
if (prec-- == 0)
return NULL;
q = &pq->q[prec];
if ((p = q->head) == NULL)
return NULL;
if ((q->head = PKTLINK(p)) == NULL)
q->tail = NULL;
q->len--;
if (prec_out)
*prec_out = prec;
pq->len--;
PKTSETLINK(p, NULL);
return p;
}
| gpl-2.0 |
vmayoral/snappy-kernel | drivers/gpu/drm/msm/msm_gem_submit.c | 869 | 10078 | /*
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "msm_drv.h"
#include "msm_gpu.h"
#include "msm_gem.h"
/*
* Cmdstream submission:
*/
/* make sure these don't conflict w/ MSM_SUBMIT_BO_x */
#define BO_VALID 0x8000
#define BO_LOCKED 0x4000
#define BO_PINNED 0x2000
static inline void __user *to_user_ptr(u64 address)
{
return (void __user *)(uintptr_t)address;
}
static struct msm_gem_submit *submit_create(struct drm_device *dev,
struct msm_gpu *gpu, int nr)
{
struct msm_gem_submit *submit;
int sz = sizeof(*submit) + (nr * sizeof(submit->bos[0]));
submit = kmalloc(sz, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
if (submit) {
submit->dev = dev;
submit->gpu = gpu;
/* initially, until copy_from_user() and bo lookup succeeds: */
submit->nr_bos = 0;
submit->nr_cmds = 0;
INIT_LIST_HEAD(&submit->bo_list);
ww_acquire_init(&submit->ticket, &reservation_ww_class);
}
return submit;
}
static int submit_lookup_objects(struct msm_gem_submit *submit,
struct drm_msm_gem_submit *args, struct drm_file *file)
{
unsigned i;
int ret = 0;
spin_lock(&file->table_lock);
for (i = 0; i < args->nr_bos; i++) {
struct drm_msm_gem_submit_bo submit_bo;
struct drm_gem_object *obj;
struct msm_gem_object *msm_obj;
void __user *userptr =
to_user_ptr(args->bos + (i * sizeof(submit_bo)));
ret = copy_from_user(&submit_bo, userptr, sizeof(submit_bo));
if (ret) {
ret = -EFAULT;
goto out_unlock;
}
if (submit_bo.flags & ~MSM_SUBMIT_BO_FLAGS) {
DRM_ERROR("invalid flags: %x\n", submit_bo.flags);
ret = -EINVAL;
goto out_unlock;
}
submit->bos[i].flags = submit_bo.flags;
/* in validate_objects() we figure out if this is true: */
submit->bos[i].iova = submit_bo.presumed;
/* normally use drm_gem_object_lookup(), but for bulk lookup
* all under single table_lock just hit object_idr directly:
*/
obj = idr_find(&file->object_idr, submit_bo.handle);
if (!obj) {
DRM_ERROR("invalid handle %u at index %u\n", submit_bo.handle, i);
ret = -EINVAL;
goto out_unlock;
}
msm_obj = to_msm_bo(obj);
if (!list_empty(&msm_obj->submit_entry)) {
DRM_ERROR("handle %u at index %u already on submit list\n",
submit_bo.handle, i);
ret = -EINVAL;
goto out_unlock;
}
drm_gem_object_reference(obj);
submit->bos[i].obj = msm_obj;
list_add_tail(&msm_obj->submit_entry, &submit->bo_list);
}
out_unlock:
submit->nr_bos = i;
spin_unlock(&file->table_lock);
return ret;
}
static void submit_unlock_unpin_bo(struct msm_gem_submit *submit, int i)
{
struct msm_gem_object *msm_obj = submit->bos[i].obj;
if (submit->bos[i].flags & BO_PINNED)
msm_gem_put_iova(&msm_obj->base, submit->gpu->id);
if (submit->bos[i].flags & BO_LOCKED)
ww_mutex_unlock(&msm_obj->resv->lock);
if (!(submit->bos[i].flags & BO_VALID))
submit->bos[i].iova = 0;
submit->bos[i].flags &= ~(BO_LOCKED | BO_PINNED);
}
/* This is where we make sure all the bo's are reserved and pin'd: */
static int submit_validate_objects(struct msm_gem_submit *submit)
{
int contended, slow_locked = -1, i, ret = 0;
retry:
submit->valid = true;
for (i = 0; i < submit->nr_bos; i++) {
struct msm_gem_object *msm_obj = submit->bos[i].obj;
uint32_t iova;
if (slow_locked == i)
slow_locked = -1;
contended = i;
if (!(submit->bos[i].flags & BO_LOCKED)) {
ret = ww_mutex_lock_interruptible(&msm_obj->resv->lock,
&submit->ticket);
if (ret)
goto fail;
submit->bos[i].flags |= BO_LOCKED;
}
/* if locking succeeded, pin bo: */
ret = msm_gem_get_iova_locked(&msm_obj->base,
submit->gpu->id, &iova);
/* this would break the logic in the fail path.. there is no
* reason for this to happen, but just to be on the safe side
* let's notice if this starts happening in the future:
*/
WARN_ON(ret == -EDEADLK);
if (ret)
goto fail;
submit->bos[i].flags |= BO_PINNED;
if (iova == submit->bos[i].iova) {
submit->bos[i].flags |= BO_VALID;
} else {
submit->bos[i].iova = iova;
submit->bos[i].flags &= ~BO_VALID;
submit->valid = false;
}
}
ww_acquire_done(&submit->ticket);
return 0;
fail:
for (; i >= 0; i--)
submit_unlock_unpin_bo(submit, i);
if (slow_locked > 0)
submit_unlock_unpin_bo(submit, slow_locked);
if (ret == -EDEADLK) {
struct msm_gem_object *msm_obj = submit->bos[contended].obj;
/* we lost out in a seqno race, lock and retry.. */
ret = ww_mutex_lock_slow_interruptible(&msm_obj->resv->lock,
&submit->ticket);
if (!ret) {
submit->bos[contended].flags |= BO_LOCKED;
slow_locked = contended;
goto retry;
}
}
return ret;
}
static int submit_bo(struct msm_gem_submit *submit, uint32_t idx,
struct msm_gem_object **obj, uint32_t *iova, bool *valid)
{
if (idx >= submit->nr_bos) {
DRM_ERROR("invalid buffer index: %u (out of %u)\n",
idx, submit->nr_bos);
return -EINVAL;
}
if (obj)
*obj = submit->bos[idx].obj;
if (iova)
*iova = submit->bos[idx].iova;
if (valid)
*valid = !!(submit->bos[idx].flags & BO_VALID);
return 0;
}
/* process the reloc's and patch up the cmdstream as needed: */
static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *obj,
uint32_t offset, uint32_t nr_relocs, uint64_t relocs)
{
uint32_t i, last_offset = 0;
uint32_t *ptr;
int ret;
if (offset % 4) {
DRM_ERROR("non-aligned cmdstream buffer: %u\n", offset);
return -EINVAL;
}
/* For now, just map the entire thing. Eventually we probably
* to do it page-by-page, w/ kmap() if not vmap()d..
*/
ptr = msm_gem_vaddr_locked(&obj->base);
if (IS_ERR(ptr)) {
ret = PTR_ERR(ptr);
DBG("failed to map: %d", ret);
return ret;
}
for (i = 0; i < nr_relocs; i++) {
struct drm_msm_gem_submit_reloc submit_reloc;
void __user *userptr =
to_user_ptr(relocs + (i * sizeof(submit_reloc)));
uint32_t iova, off;
bool valid;
ret = copy_from_user(&submit_reloc, userptr, sizeof(submit_reloc));
if (ret)
return -EFAULT;
if (submit_reloc.submit_offset % 4) {
DRM_ERROR("non-aligned reloc offset: %u\n",
submit_reloc.submit_offset);
return -EINVAL;
}
/* offset in dwords: */
off = submit_reloc.submit_offset / 4;
if ((off >= (obj->base.size / 4)) ||
(off < last_offset)) {
DRM_ERROR("invalid offset %u at reloc %u\n", off, i);
return -EINVAL;
}
ret = submit_bo(submit, submit_reloc.reloc_idx, NULL, &iova, &valid);
if (ret)
return ret;
if (valid)
continue;
iova += submit_reloc.reloc_offset;
if (submit_reloc.shift < 0)
iova >>= -submit_reloc.shift;
else
iova <<= submit_reloc.shift;
ptr[off] = iova | submit_reloc.or;
last_offset = off;
}
return 0;
}
static void submit_cleanup(struct msm_gem_submit *submit, bool fail)
{
unsigned i;
for (i = 0; i < submit->nr_bos; i++) {
struct msm_gem_object *msm_obj = submit->bos[i].obj;
submit_unlock_unpin_bo(submit, i);
list_del_init(&msm_obj->submit_entry);
drm_gem_object_unreference(&msm_obj->base);
}
ww_acquire_fini(&submit->ticket);
kfree(submit);
}
int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct msm_drm_private *priv = dev->dev_private;
struct drm_msm_gem_submit *args = data;
struct msm_file_private *ctx = file->driver_priv;
struct msm_gem_submit *submit;
struct msm_gpu *gpu;
unsigned i;
int ret;
/* for now, we just have 3d pipe.. eventually this would need to
* be more clever to dispatch to appropriate gpu module:
*/
if (args->pipe != MSM_PIPE_3D0)
return -EINVAL;
gpu = priv->gpu;
if (args->nr_cmds > MAX_CMDS)
return -EINVAL;
mutex_lock(&dev->struct_mutex);
submit = submit_create(dev, gpu, args->nr_bos);
if (!submit) {
ret = -ENOMEM;
goto out;
}
ret = submit_lookup_objects(submit, args, file);
if (ret)
goto out;
ret = submit_validate_objects(submit);
if (ret)
goto out;
for (i = 0; i < args->nr_cmds; i++) {
struct drm_msm_gem_submit_cmd submit_cmd;
void __user *userptr =
to_user_ptr(args->cmds + (i * sizeof(submit_cmd)));
struct msm_gem_object *msm_obj;
uint32_t iova;
ret = copy_from_user(&submit_cmd, userptr, sizeof(submit_cmd));
if (ret) {
ret = -EFAULT;
goto out;
}
/* validate input from userspace: */
switch (submit_cmd.type) {
case MSM_SUBMIT_CMD_BUF:
case MSM_SUBMIT_CMD_IB_TARGET_BUF:
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
break;
default:
DRM_ERROR("invalid type: %08x\n", submit_cmd.type);
ret = -EINVAL;
goto out;
}
ret = submit_bo(submit, submit_cmd.submit_idx,
&msm_obj, &iova, NULL);
if (ret)
goto out;
if (submit_cmd.size % 4) {
DRM_ERROR("non-aligned cmdstream buffer size: %u\n",
submit_cmd.size);
ret = -EINVAL;
goto out;
}
if ((submit_cmd.size + submit_cmd.submit_offset) >=
msm_obj->base.size) {
DRM_ERROR("invalid cmdstream size: %u\n", submit_cmd.size);
ret = -EINVAL;
goto out;
}
submit->cmd[i].type = submit_cmd.type;
submit->cmd[i].size = submit_cmd.size / 4;
submit->cmd[i].iova = iova + submit_cmd.submit_offset;
submit->cmd[i].idx = submit_cmd.submit_idx;
if (submit->valid)
continue;
ret = submit_reloc(submit, msm_obj, submit_cmd.submit_offset,
submit_cmd.nr_relocs, submit_cmd.relocs);
if (ret)
goto out;
}
submit->nr_cmds = i;
ret = msm_gpu_submit(gpu, submit, ctx);
args->fence = submit->fence;
out:
if (submit)
submit_cleanup(submit, !!ret);
mutex_unlock(&dev->struct_mutex);
return ret;
}
| gpl-2.0 |
thiz11/kernel_mediatek_wiko | drivers/usb/serial/visor.c | 869 | 21696 | /*
* USB HandSpring Visor, Palm m50x, and Sony Clie driver
* (supports all of the Palm OS USB devices)
*
* Copyright (C) 1999 - 2004
* Greg Kroah-Hartman (greg@kroah.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version
* 2 as published by the Free Software Foundation.
*
* See Documentation/usb/usb-serial.txt for more information on using this
* driver
*
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
#include <linux/tty_flip.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/spinlock.h>
#include <linux/uaccess.h>
#include <linux/usb.h>
#include <linux/usb/serial.h>
#include <linux/usb/cdc.h>
#include "visor.h"
/*
* Version Information
*/
#define DRIVER_AUTHOR "Greg Kroah-Hartman <greg@kroah.com>"
#define DRIVER_DESC "USB HandSpring Visor / Palm OS driver"
/* function prototypes for a handspring visor */
static int visor_open(struct tty_struct *tty, struct usb_serial_port *port);
static void visor_close(struct usb_serial_port *port);
static int visor_probe(struct usb_serial *serial,
const struct usb_device_id *id);
static int visor_calc_num_ports(struct usb_serial *serial);
static void visor_read_int_callback(struct urb *urb);
static int clie_3_5_startup(struct usb_serial *serial);
static int treo_attach(struct usb_serial *serial);
static int clie_5_attach(struct usb_serial *serial);
static int palm_os_3_probe(struct usb_serial *serial,
const struct usb_device_id *id);
static int palm_os_4_probe(struct usb_serial *serial,
const struct usb_device_id *id);
/* Parameters that may be passed into the module. */
static bool debug;
static __u16 vendor;
static __u16 product;
static struct usb_device_id id_table [] = {
{ USB_DEVICE(HANDSPRING_VENDOR_ID, HANDSPRING_VISOR_ID),
.driver_info = (kernel_ulong_t)&palm_os_3_probe },
{ USB_DEVICE(HANDSPRING_VENDOR_ID, HANDSPRING_TREO_ID),
.driver_info = (kernel_ulong_t)&palm_os_4_probe },
{ USB_DEVICE(HANDSPRING_VENDOR_ID, HANDSPRING_TREO600_ID),
.driver_info = (kernel_ulong_t)&palm_os_4_probe },
{ USB_DEVICE(GSPDA_VENDOR_ID, GSPDA_XPLORE_M68_ID),
.driver_info = (kernel_ulong_t)&palm_os_4_probe },
{ USB_DEVICE(PALM_VENDOR_ID, PALM_M500_ID),
.driver_info = (kernel_ulong_t)&palm_os_4_probe },
{ USB_DEVICE(PALM_VENDOR_ID, PALM_M505_ID),
.driver_info = (kernel_ulong_t)&palm_os_4_probe },
{ USB_DEVICE(PALM_VENDOR_ID, PALM_M515_ID),
.driver_info = (kernel_ulong_t)&palm_os_4_probe },
{ USB_DEVICE(PALM_VENDOR_ID, PALM_I705_ID),
.driver_info = (kernel_ulong_t)&palm_os_4_probe },
{ USB_DEVICE(PALM_VENDOR_ID, PALM_M100_ID),
.driver_info = (kernel_ulong_t)&palm_os_4_probe },
{ USB_DEVICE(PALM_VENDOR_ID, PALM_M125_ID),
.driver_info = (kernel_ulong_t)&palm_os_4_probe },
{ USB_DEVICE(PALM_VENDOR_ID, PALM_M130_ID),
.driver_info = (kernel_ulong_t)&palm_os_4_probe },
{ USB_DEVICE(PALM_VENDOR_ID, PALM_TUNGSTEN_T_ID),
.driver_info = (kernel_ulong_t)&palm_os_4_probe },
{ USB_DEVICE(PALM_VENDOR_ID, PALM_TREO_650),
.driver_info = (kernel_ulong_t)&palm_os_4_probe },
{ USB_DEVICE(PALM_VENDOR_ID, PALM_TUNGSTEN_Z_ID),
.driver_info = (kernel_ulong_t)&palm_os_4_probe },
{ USB_DEVICE(PALM_VENDOR_ID, PALM_ZIRE_ID),
.driver_info = (kernel_ulong_t)&palm_os_4_probe },
{ USB_DEVICE(SONY_VENDOR_ID, SONY_CLIE_4_0_ID),
.driver_info = (kernel_ulong_t)&palm_os_4_probe },
{ USB_DEVICE(SONY_VENDOR_ID, SONY_CLIE_S360_ID),
.driver_info = (kernel_ulong_t)&palm_os_4_probe },
{ USB_DEVICE(SONY_VENDOR_ID, SONY_CLIE_4_1_ID),
.driver_info = (kernel_ulong_t)&palm_os_4_probe },
{ USB_DEVICE(SONY_VENDOR_ID, SONY_CLIE_NX60_ID),
.driver_info = (kernel_ulong_t)&palm_os_4_probe },
{ USB_DEVICE(SONY_VENDOR_ID, SONY_CLIE_NZ90V_ID),
.driver_info = (kernel_ulong_t)&palm_os_4_probe },
{ USB_DEVICE(SONY_VENDOR_ID, SONY_CLIE_TJ25_ID),
.driver_info = (kernel_ulong_t)&palm_os_4_probe },
{ USB_DEVICE(ACER_VENDOR_ID, ACER_S10_ID),
.driver_info = (kernel_ulong_t)&palm_os_4_probe },
{ USB_DEVICE(SAMSUNG_VENDOR_ID, SAMSUNG_SCH_I330_ID),
.driver_info = (kernel_ulong_t)&palm_os_4_probe },
{ USB_DEVICE(SAMSUNG_VENDOR_ID, SAMSUNG_SPH_I500_ID),
.driver_info = (kernel_ulong_t)&palm_os_4_probe },
{ USB_DEVICE(TAPWAVE_VENDOR_ID, TAPWAVE_ZODIAC_ID),
.driver_info = (kernel_ulong_t)&palm_os_4_probe },
{ USB_DEVICE(GARMIN_VENDOR_ID, GARMIN_IQUE_3600_ID),
.driver_info = (kernel_ulong_t)&palm_os_4_probe },
{ USB_DEVICE(ACEECA_VENDOR_ID, ACEECA_MEZ1000_ID),
.driver_info = (kernel_ulong_t)&palm_os_4_probe },
{ USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_7135_ID),
.driver_info = (kernel_ulong_t)&palm_os_4_probe },
{ USB_DEVICE(FOSSIL_VENDOR_ID, FOSSIL_ABACUS_ID),
.driver_info = (kernel_ulong_t)&palm_os_4_probe },
{ }, /* optional parameter entry */
{ } /* Terminating entry */
};
static struct usb_device_id clie_id_5_table [] = {
{ USB_DEVICE(SONY_VENDOR_ID, SONY_CLIE_UX50_ID),
.driver_info = (kernel_ulong_t)&palm_os_4_probe },
{ }, /* optional parameter entry */
{ } /* Terminating entry */
};
static struct usb_device_id clie_id_3_5_table [] = {
{ USB_DEVICE(SONY_VENDOR_ID, SONY_CLIE_3_5_ID) },
{ } /* Terminating entry */
};
static struct usb_device_id id_table_combined [] = {
{ USB_DEVICE(HANDSPRING_VENDOR_ID, HANDSPRING_VISOR_ID) },
{ USB_DEVICE(HANDSPRING_VENDOR_ID, HANDSPRING_TREO_ID) },
{ USB_DEVICE(HANDSPRING_VENDOR_ID, HANDSPRING_TREO600_ID) },
{ USB_DEVICE(GSPDA_VENDOR_ID, GSPDA_XPLORE_M68_ID) },
{ USB_DEVICE(PALM_VENDOR_ID, PALM_M500_ID) },
{ USB_DEVICE(PALM_VENDOR_ID, PALM_M505_ID) },
{ USB_DEVICE(PALM_VENDOR_ID, PALM_M515_ID) },
{ USB_DEVICE(PALM_VENDOR_ID, PALM_I705_ID) },
{ USB_DEVICE(PALM_VENDOR_ID, PALM_M100_ID) },
{ USB_DEVICE(PALM_VENDOR_ID, PALM_M125_ID) },
{ USB_DEVICE(PALM_VENDOR_ID, PALM_M130_ID) },
{ USB_DEVICE(PALM_VENDOR_ID, PALM_TUNGSTEN_T_ID) },
{ USB_DEVICE(PALM_VENDOR_ID, PALM_TREO_650) },
{ USB_DEVICE(PALM_VENDOR_ID, PALM_TUNGSTEN_Z_ID) },
{ USB_DEVICE(PALM_VENDOR_ID, PALM_ZIRE_ID) },
{ USB_DEVICE(SONY_VENDOR_ID, SONY_CLIE_3_5_ID) },
{ USB_DEVICE(SONY_VENDOR_ID, SONY_CLIE_4_0_ID) },
{ USB_DEVICE(SONY_VENDOR_ID, SONY_CLIE_S360_ID) },
{ USB_DEVICE(SONY_VENDOR_ID, SONY_CLIE_4_1_ID) },
{ USB_DEVICE(SONY_VENDOR_ID, SONY_CLIE_NX60_ID) },
{ USB_DEVICE(SONY_VENDOR_ID, SONY_CLIE_NZ90V_ID) },
{ USB_DEVICE(SONY_VENDOR_ID, SONY_CLIE_UX50_ID) },
{ USB_DEVICE(SONY_VENDOR_ID, SONY_CLIE_TJ25_ID) },
{ USB_DEVICE(SAMSUNG_VENDOR_ID, SAMSUNG_SCH_I330_ID) },
{ USB_DEVICE(SAMSUNG_VENDOR_ID, SAMSUNG_SPH_I500_ID) },
{ USB_DEVICE(TAPWAVE_VENDOR_ID, TAPWAVE_ZODIAC_ID) },
{ USB_DEVICE(GARMIN_VENDOR_ID, GARMIN_IQUE_3600_ID) },
{ USB_DEVICE(ACEECA_VENDOR_ID, ACEECA_MEZ1000_ID) },
{ USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_7135_ID) },
{ USB_DEVICE(FOSSIL_VENDOR_ID, FOSSIL_ABACUS_ID) },
{ }, /* optional parameter entry */
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, id_table_combined);
static struct usb_driver visor_driver = {
.name = "visor",
.probe = usb_serial_probe,
.disconnect = usb_serial_disconnect,
.id_table = id_table_combined,
};
/* All of the device info needed for the Handspring Visor,
and Palm 4.0 devices */
static struct usb_serial_driver handspring_device = {
.driver = {
.owner = THIS_MODULE,
.name = "visor",
},
.description = "Handspring Visor / Palm OS",
.id_table = id_table,
.num_ports = 2,
.bulk_out_size = 256,
.open = visor_open,
.close = visor_close,
.throttle = usb_serial_generic_throttle,
.unthrottle = usb_serial_generic_unthrottle,
.attach = treo_attach,
.probe = visor_probe,
.calc_num_ports = visor_calc_num_ports,
.read_int_callback = visor_read_int_callback,
};
/* All of the device info needed for the Clie UX50, TH55 Palm 5.0 devices */
static struct usb_serial_driver clie_5_device = {
.driver = {
.owner = THIS_MODULE,
.name = "clie_5",
},
.description = "Sony Clie 5.0",
.id_table = clie_id_5_table,
.num_ports = 2,
.bulk_out_size = 256,
.open = visor_open,
.close = visor_close,
.throttle = usb_serial_generic_throttle,
.unthrottle = usb_serial_generic_unthrottle,
.attach = clie_5_attach,
.probe = visor_probe,
.calc_num_ports = visor_calc_num_ports,
.read_int_callback = visor_read_int_callback,
};
/* device info for the Sony Clie OS version 3.5 */
static struct usb_serial_driver clie_3_5_device = {
.driver = {
.owner = THIS_MODULE,
.name = "clie_3.5",
},
.description = "Sony Clie 3.5",
.id_table = clie_id_3_5_table,
.num_ports = 1,
.bulk_out_size = 256,
.open = visor_open,
.close = visor_close,
.throttle = usb_serial_generic_throttle,
.unthrottle = usb_serial_generic_unthrottle,
.attach = clie_3_5_startup,
};
static struct usb_serial_driver * const serial_drivers[] = {
&handspring_device, &clie_5_device, &clie_3_5_device, NULL
};
/******************************************************************************
* Handspring Visor specific driver functions
******************************************************************************/
static int visor_open(struct tty_struct *tty, struct usb_serial_port *port)
{
int result = 0;
dbg("%s - port %d", __func__, port->number);
if (!port->read_urb) {
/* this is needed for some brain dead Sony devices */
dev_err(&port->dev, "Device lied about number of ports, please use a lower one.\n");
return -ENODEV;
}
/* Start reading from the device */
result = usb_serial_generic_open(tty, port);
if (result)
goto exit;
if (port->interrupt_in_urb) {
dbg("%s - adding interrupt input for treo", __func__);
result = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
if (result)
dev_err(&port->dev,
"%s - failed submitting interrupt urb, error %d\n",
__func__, result);
}
exit:
return result;
}
static void visor_close(struct usb_serial_port *port)
{
unsigned char *transfer_buffer;
dbg("%s - port %d", __func__, port->number);
/* shutdown our urbs */
usb_serial_generic_close(port);
usb_kill_urb(port->interrupt_in_urb);
mutex_lock(&port->serial->disc_mutex);
if (!port->serial->disconnected) {
/* Try to send shutdown message, unless the device is gone */
transfer_buffer = kmalloc(0x12, GFP_KERNEL);
if (transfer_buffer) {
usb_control_msg(port->serial->dev,
usb_rcvctrlpipe(port->serial->dev, 0),
VISOR_CLOSE_NOTIFICATION, 0xc2,
0x0000, 0x0000,
transfer_buffer, 0x12, 300);
kfree(transfer_buffer);
}
}
mutex_unlock(&port->serial->disc_mutex);
}
static void visor_read_int_callback(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
int status = urb->status;
int result;
switch (status) {
case 0:
/* success */
break;
case -ECONNRESET:
case -ENOENT:
case -ESHUTDOWN:
/* this urb is terminated, clean up */
dbg("%s - urb shutting down with status: %d",
__func__, status);
return;
default:
dbg("%s - nonzero urb status received: %d",
__func__, status);
goto exit;
}
/*
* This information is still unknown what it can be used for.
* If anyone has an idea, please let the author know...
*
* Rumor has it this endpoint is used to notify when data
* is ready to be read from the bulk ones.
*/
usb_serial_debug_data(debug, &port->dev, __func__,
urb->actual_length, urb->transfer_buffer);
exit:
result = usb_submit_urb(urb, GFP_ATOMIC);
if (result)
dev_err(&urb->dev->dev,
"%s - Error %d submitting interrupt urb\n",
__func__, result);
}
static int palm_os_3_probe(struct usb_serial *serial,
const struct usb_device_id *id)
{
struct device *dev = &serial->dev->dev;
struct visor_connection_info *connection_info;
unsigned char *transfer_buffer;
char *string;
int retval = 0;
int i;
int num_ports = 0;
dbg("%s", __func__);
transfer_buffer = kmalloc(sizeof(*connection_info), GFP_KERNEL);
if (!transfer_buffer) {
dev_err(dev, "%s - kmalloc(%Zd) failed.\n", __func__,
sizeof(*connection_info));
return -ENOMEM;
}
/* send a get connection info request */
retval = usb_control_msg(serial->dev,
usb_rcvctrlpipe(serial->dev, 0),
VISOR_GET_CONNECTION_INFORMATION,
0xc2, 0x0000, 0x0000, transfer_buffer,
sizeof(*connection_info), 300);
if (retval < 0) {
dev_err(dev, "%s - error %d getting connection information\n",
__func__, retval);
goto exit;
}
if (retval == sizeof(*connection_info)) {
connection_info = (struct visor_connection_info *)
transfer_buffer;
num_ports = le16_to_cpu(connection_info->num_ports);
for (i = 0; i < num_ports; ++i) {
switch (
connection_info->connections[i].port_function_id) {
case VISOR_FUNCTION_GENERIC:
string = "Generic";
break;
case VISOR_FUNCTION_DEBUGGER:
string = "Debugger";
break;
case VISOR_FUNCTION_HOTSYNC:
string = "HotSync";
break;
case VISOR_FUNCTION_CONSOLE:
string = "Console";
break;
case VISOR_FUNCTION_REMOTE_FILE_SYS:
string = "Remote File System";
break;
default:
string = "unknown";
break;
}
dev_info(dev, "%s: port %d, is for %s use\n",
serial->type->description,
connection_info->connections[i].port, string);
}
}
/*
* Handle devices that report invalid stuff here.
*/
if (num_ports == 0 || num_ports > 2) {
dev_warn(dev, "%s: No valid connect info available\n",
serial->type->description);
num_ports = 2;
}
dev_info(dev, "%s: Number of ports: %d\n", serial->type->description,
num_ports);
/*
* save off our num_ports info so that we can use it in the
* calc_num_ports callback
*/
usb_set_serial_data(serial, (void *)(long)num_ports);
/* ask for the number of bytes available, but ignore the
response as it is broken */
retval = usb_control_msg(serial->dev,
usb_rcvctrlpipe(serial->dev, 0),
VISOR_REQUEST_BYTES_AVAILABLE,
0xc2, 0x0000, 0x0005, transfer_buffer,
0x02, 300);
if (retval < 0)
dev_err(dev, "%s - error %d getting bytes available request\n",
__func__, retval);
retval = 0;
exit:
kfree(transfer_buffer);
return retval;
}
static int palm_os_4_probe(struct usb_serial *serial,
const struct usb_device_id *id)
{
struct device *dev = &serial->dev->dev;
struct palm_ext_connection_info *connection_info;
unsigned char *transfer_buffer;
int retval;
dbg("%s", __func__);
transfer_buffer = kmalloc(sizeof(*connection_info), GFP_KERNEL);
if (!transfer_buffer) {
dev_err(dev, "%s - kmalloc(%Zd) failed.\n", __func__,
sizeof(*connection_info));
return -ENOMEM;
}
retval = usb_control_msg(serial->dev,
usb_rcvctrlpipe(serial->dev, 0),
PALM_GET_EXT_CONNECTION_INFORMATION,
0xc2, 0x0000, 0x0000, transfer_buffer,
sizeof(*connection_info), 300);
if (retval < 0)
dev_err(dev, "%s - error %d getting connection info\n",
__func__, retval);
else
usb_serial_debug_data(debug, &serial->dev->dev, __func__,
retval, transfer_buffer);
kfree(transfer_buffer);
return 0;
}
static int visor_probe(struct usb_serial *serial,
const struct usb_device_id *id)
{
int retval = 0;
int (*startup)(struct usb_serial *serial,
const struct usb_device_id *id);
dbg("%s", __func__);
/*
* some Samsung Android phones in modem mode have the same ID
* as SPH-I500, but they are ACM devices, so dont bind to them
*/
if (id->idVendor == SAMSUNG_VENDOR_ID &&
id->idProduct == SAMSUNG_SPH_I500_ID &&
serial->dev->descriptor.bDeviceClass == USB_CLASS_COMM &&
serial->dev->descriptor.bDeviceSubClass ==
USB_CDC_SUBCLASS_ACM)
return -ENODEV;
if (serial->dev->actconfig->desc.bConfigurationValue != 1) {
dev_err(&serial->dev->dev, "active config #%d != 1 ??\n",
serial->dev->actconfig->desc.bConfigurationValue);
return -ENODEV;
}
if (id->driver_info) {
startup = (void *)id->driver_info;
retval = startup(serial, id);
}
return retval;
}
static int visor_calc_num_ports(struct usb_serial *serial)
{
int num_ports = (int)(long)(usb_get_serial_data(serial));
if (num_ports)
usb_set_serial_data(serial, NULL);
return num_ports;
}
static int clie_3_5_startup(struct usb_serial *serial)
{
struct device *dev = &serial->dev->dev;
int result;
u8 *data;
dbg("%s", __func__);
data = kmalloc(1, GFP_KERNEL);
if (!data)
return -ENOMEM;
/*
* Note that PEG-300 series devices expect the following two calls.
*/
/* get the config number */
result = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
USB_REQ_GET_CONFIGURATION, USB_DIR_IN,
0, 0, data, 1, 3000);
if (result < 0) {
dev_err(dev, "%s: get config number failed: %d\n",
__func__, result);
goto out;
}
if (result != 1) {
dev_err(dev, "%s: get config number bad return length: %d\n",
__func__, result);
result = -EIO;
goto out;
}
/* get the interface number */
result = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
USB_REQ_GET_INTERFACE,
USB_DIR_IN | USB_RECIP_INTERFACE,
0, 0, data, 1, 3000);
if (result < 0) {
dev_err(dev, "%s: get interface number failed: %d\n",
__func__, result);
goto out;
}
if (result != 1) {
dev_err(dev,
"%s: get interface number bad return length: %d\n",
__func__, result);
result = -EIO;
goto out;
}
result = 0;
out:
kfree(data);
return result;
}
static int treo_attach(struct usb_serial *serial)
{
struct usb_serial_port *swap_port;
/* Only do this endpoint hack for the Handspring devices with
* interrupt in endpoints, which for now are the Treo devices. */
if (!((le16_to_cpu(serial->dev->descriptor.idVendor)
== HANDSPRING_VENDOR_ID) ||
(le16_to_cpu(serial->dev->descriptor.idVendor)
== KYOCERA_VENDOR_ID)) ||
(serial->num_interrupt_in == 0))
return 0;
dbg("%s", __func__);
/*
* It appears that Treos and Kyoceras want to use the
* 1st bulk in endpoint to communicate with the 2nd bulk out endpoint,
* so let's swap the 1st and 2nd bulk in and interrupt endpoints.
* Note that swapping the bulk out endpoints would break lots of
* apps that want to communicate on the second port.
*/
#define COPY_PORT(dest, src) \
do { \
int i; \
\
for (i = 0; i < ARRAY_SIZE(src->read_urbs); ++i) { \
dest->read_urbs[i] = src->read_urbs[i]; \
dest->read_urbs[i]->context = dest; \
dest->bulk_in_buffers[i] = src->bulk_in_buffers[i]; \
} \
dest->read_urb = src->read_urb; \
dest->bulk_in_endpointAddress = src->bulk_in_endpointAddress;\
dest->bulk_in_buffer = src->bulk_in_buffer; \
dest->bulk_in_size = src->bulk_in_size; \
dest->interrupt_in_urb = src->interrupt_in_urb; \
dest->interrupt_in_urb->context = dest; \
dest->interrupt_in_endpointAddress = \
src->interrupt_in_endpointAddress;\
dest->interrupt_in_buffer = src->interrupt_in_buffer; \
} while (0);
swap_port = kmalloc(sizeof(*swap_port), GFP_KERNEL);
if (!swap_port)
return -ENOMEM;
COPY_PORT(swap_port, serial->port[0]);
COPY_PORT(serial->port[0], serial->port[1]);
COPY_PORT(serial->port[1], swap_port);
kfree(swap_port);
return 0;
}
static int clie_5_attach(struct usb_serial *serial)
{
struct usb_serial_port *port;
unsigned int pipe;
int j;
dbg("%s", __func__);
/* TH55 registers 2 ports.
Communication in from the UX50/TH55 uses bulk_in_endpointAddress
from port 0. Communication out to the UX50/TH55 uses
bulk_out_endpointAddress from port 1
Lets do a quick and dirty mapping
*/
/* some sanity check */
if (serial->num_ports < 2)
return -1;
/* port 0 now uses the modified endpoint Address */
port = serial->port[0];
port->bulk_out_endpointAddress =
serial->port[1]->bulk_out_endpointAddress;
pipe = usb_sndbulkpipe(serial->dev, port->bulk_out_endpointAddress);
for (j = 0; j < ARRAY_SIZE(port->write_urbs); ++j)
port->write_urbs[j]->pipe = pipe;
return 0;
}
static int __init visor_init(void)
{
int i, retval;
/* Only if parameters were passed to us */
if (vendor > 0 && product > 0) {
struct usb_device_id usb_dev_temp[] = {
{
USB_DEVICE(vendor, product),
.driver_info =
(kernel_ulong_t) &palm_os_4_probe
}
};
/* Find the last entry in id_table */
for (i = 0;; i++) {
if (id_table[i].idVendor == 0) {
id_table[i] = usb_dev_temp[0];
break;
}
}
/* Find the last entry in id_table_combined */
for (i = 0;; i++) {
if (id_table_combined[i].idVendor == 0) {
id_table_combined[i] = usb_dev_temp[0];
break;
}
}
printk(KERN_INFO KBUILD_MODNAME
": Untested USB device specified at time of module insertion\n");
printk(KERN_INFO KBUILD_MODNAME
": Warning: This is not guaranteed to work\n");
printk(KERN_INFO KBUILD_MODNAME
": Using a newer kernel is preferred to this method\n");
printk(KERN_INFO KBUILD_MODNAME
": Adding Palm OS protocol 4.x support for unknown device: 0x%x/0x%x\n",
vendor, product);
}
retval = usb_serial_register_drivers(&visor_driver, serial_drivers);
if (retval == 0)
printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_DESC "\n");
return retval;
}
static void __exit visor_exit (void)
{
usb_serial_deregister_drivers(&visor_driver, serial_drivers);
}
module_init(visor_init);
module_exit(visor_exit);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
module_param(debug, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(debug, "Debug enabled or not");
module_param(vendor, ushort, 0);
MODULE_PARM_DESC(vendor, "User specified vendor ID");
module_param(product, ushort, 0);
MODULE_PARM_DESC(product, "User specified product ID");
| gpl-2.0 |
WaRP7/linux-fslc | drivers/net/wan/pci200syn.c | 1637 | 11475 | /*
* Goramo PCI200SYN synchronous serial card driver for Linux
*
* Copyright (C) 2002-2008 Krzysztof Halasa <khc@pm.waw.pl>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License
* as published by the Free Software Foundation.
*
* For information see <http://www.kernel.org/pub/linux/utils/net/hdlc/>
*
* Sources of information:
* Hitachi HD64572 SCA-II User's Manual
* PLX Technology Inc. PCI9052 Data Book
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/capability.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/fcntl.h>
#include <linux/in.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/moduleparam.h>
#include <linux/netdevice.h>
#include <linux/hdlc.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <asm/io.h>
#include "hd64572.h"
#undef DEBUG_PKT
#define DEBUG_RINGS
#define PCI200SYN_PLX_SIZE 0x80 /* PLX control window size (128b) */
#define PCI200SYN_SCA_SIZE 0x400 /* SCA window size (1Kb) */
#define MAX_TX_BUFFERS 10
static int pci_clock_freq = 33000000;
#define CLOCK_BASE pci_clock_freq
/*
* PLX PCI9052 local configuration and shared runtime registers.
* This structure can be used to access 9052 registers (memory mapped).
*/
typedef struct {
u32 loc_addr_range[4]; /* 00-0Ch : Local Address Ranges */
u32 loc_rom_range; /* 10h : Local ROM Range */
u32 loc_addr_base[4]; /* 14-20h : Local Address Base Addrs */
u32 loc_rom_base; /* 24h : Local ROM Base */
u32 loc_bus_descr[4]; /* 28-34h : Local Bus Descriptors */
u32 rom_bus_descr; /* 38h : ROM Bus Descriptor */
u32 cs_base[4]; /* 3C-48h : Chip Select Base Addrs */
u32 intr_ctrl_stat; /* 4Ch : Interrupt Control/Status */
u32 init_ctrl; /* 50h : EEPROM ctrl, Init Ctrl, etc */
}plx9052;
typedef struct port_s {
struct napi_struct napi;
struct net_device *netdev;
struct card_s *card;
spinlock_t lock; /* TX lock */
sync_serial_settings settings;
int rxpart; /* partial frame received, next frame invalid*/
unsigned short encoding;
unsigned short parity;
u16 rxin; /* rx ring buffer 'in' pointer */
u16 txin; /* tx ring buffer 'in' and 'last' pointers */
u16 txlast;
u8 rxs, txs, tmc; /* SCA registers */
u8 chan; /* physical port # - 0 or 1 */
}port_t;
typedef struct card_s {
u8 __iomem *rambase; /* buffer memory base (virtual) */
u8 __iomem *scabase; /* SCA memory base (virtual) */
plx9052 __iomem *plxbase;/* PLX registers memory base (virtual) */
u16 rx_ring_buffers; /* number of buffers in a ring */
u16 tx_ring_buffers;
u16 buff_offset; /* offset of first buffer of first channel */
u8 irq; /* interrupt request level */
port_t ports[2];
}card_t;
#define get_port(card, port) (&card->ports[port])
#define sca_flush(card) (sca_in(IER0, card));
static inline void new_memcpy_toio(char __iomem *dest, char *src, int length)
{
int len;
do {
len = length > 256 ? 256 : length;
memcpy_toio(dest, src, len);
dest += len;
src += len;
length -= len;
readb(dest);
} while (len);
}
#undef memcpy_toio
#define memcpy_toio new_memcpy_toio
#include "hd64572.c"
static void pci200_set_iface(port_t *port)
{
card_t *card = port->card;
u16 msci = get_msci(port);
u8 rxs = port->rxs & CLK_BRG_MASK;
u8 txs = port->txs & CLK_BRG_MASK;
sca_out(EXS_TES1, (port->chan ? MSCI1_OFFSET : MSCI0_OFFSET) + EXS,
port->card);
switch(port->settings.clock_type) {
case CLOCK_INT:
rxs |= CLK_BRG; /* BRG output */
txs |= CLK_PIN_OUT | CLK_TX_RXCLK; /* RX clock */
break;
case CLOCK_TXINT:
rxs |= CLK_LINE; /* RXC input */
txs |= CLK_PIN_OUT | CLK_BRG; /* BRG output */
break;
case CLOCK_TXFROMRX:
rxs |= CLK_LINE; /* RXC input */
txs |= CLK_PIN_OUT | CLK_TX_RXCLK; /* RX clock */
break;
default: /* EXTernal clock */
rxs |= CLK_LINE; /* RXC input */
txs |= CLK_PIN_OUT | CLK_LINE; /* TXC input */
break;
}
port->rxs = rxs;
port->txs = txs;
sca_out(rxs, msci + RXS, card);
sca_out(txs, msci + TXS, card);
sca_set_port(port);
}
static int pci200_open(struct net_device *dev)
{
port_t *port = dev_to_port(dev);
int result = hdlc_open(dev);
if (result)
return result;
sca_open(dev);
pci200_set_iface(port);
sca_flush(port->card);
return 0;
}
static int pci200_close(struct net_device *dev)
{
sca_close(dev);
sca_flush(dev_to_port(dev)->card);
hdlc_close(dev);
return 0;
}
static int pci200_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
const size_t size = sizeof(sync_serial_settings);
sync_serial_settings new_line;
sync_serial_settings __user *line = ifr->ifr_settings.ifs_ifsu.sync;
port_t *port = dev_to_port(dev);
#ifdef DEBUG_RINGS
if (cmd == SIOCDEVPRIVATE) {
sca_dump_rings(dev);
return 0;
}
#endif
if (cmd != SIOCWANDEV)
return hdlc_ioctl(dev, ifr, cmd);
switch(ifr->ifr_settings.type) {
case IF_GET_IFACE:
ifr->ifr_settings.type = IF_IFACE_V35;
if (ifr->ifr_settings.size < size) {
ifr->ifr_settings.size = size; /* data size wanted */
return -ENOBUFS;
}
if (copy_to_user(line, &port->settings, size))
return -EFAULT;
return 0;
case IF_IFACE_V35:
case IF_IFACE_SYNC_SERIAL:
if (!capable(CAP_NET_ADMIN))
return -EPERM;
if (copy_from_user(&new_line, line, size))
return -EFAULT;
if (new_line.clock_type != CLOCK_EXT &&
new_line.clock_type != CLOCK_TXFROMRX &&
new_line.clock_type != CLOCK_INT &&
new_line.clock_type != CLOCK_TXINT)
return -EINVAL; /* No such clock setting */
if (new_line.loopback != 0 && new_line.loopback != 1)
return -EINVAL;
memcpy(&port->settings, &new_line, size); /* Update settings */
pci200_set_iface(port);
sca_flush(port->card);
return 0;
default:
return hdlc_ioctl(dev, ifr, cmd);
}
}
static void pci200_pci_remove_one(struct pci_dev *pdev)
{
int i;
card_t *card = pci_get_drvdata(pdev);
for (i = 0; i < 2; i++)
if (card->ports[i].card)
unregister_hdlc_device(card->ports[i].netdev);
if (card->irq)
free_irq(card->irq, card);
if (card->rambase)
iounmap(card->rambase);
if (card->scabase)
iounmap(card->scabase);
if (card->plxbase)
iounmap(card->plxbase);
pci_release_regions(pdev);
pci_disable_device(pdev);
if (card->ports[0].netdev)
free_netdev(card->ports[0].netdev);
if (card->ports[1].netdev)
free_netdev(card->ports[1].netdev);
kfree(card);
}
static const struct net_device_ops pci200_ops = {
.ndo_open = pci200_open,
.ndo_stop = pci200_close,
.ndo_change_mtu = hdlc_change_mtu,
.ndo_start_xmit = hdlc_start_xmit,
.ndo_do_ioctl = pci200_ioctl,
};
static int pci200_pci_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
card_t *card;
u32 __iomem *p;
int i;
u32 ramsize;
u32 ramphys; /* buffer memory base */
u32 scaphys; /* SCA memory base */
u32 plxphys; /* PLX registers memory base */
i = pci_enable_device(pdev);
if (i)
return i;
i = pci_request_regions(pdev, "PCI200SYN");
if (i) {
pci_disable_device(pdev);
return i;
}
card = kzalloc(sizeof(card_t), GFP_KERNEL);
if (card == NULL) {
pci_release_regions(pdev);
pci_disable_device(pdev);
return -ENOBUFS;
}
pci_set_drvdata(pdev, card);
card->ports[0].netdev = alloc_hdlcdev(&card->ports[0]);
card->ports[1].netdev = alloc_hdlcdev(&card->ports[1]);
if (!card->ports[0].netdev || !card->ports[1].netdev) {
pr_err("unable to allocate memory\n");
pci200_pci_remove_one(pdev);
return -ENOMEM;
}
if (pci_resource_len(pdev, 0) != PCI200SYN_PLX_SIZE ||
pci_resource_len(pdev, 2) != PCI200SYN_SCA_SIZE ||
pci_resource_len(pdev, 3) < 16384) {
pr_err("invalid card EEPROM parameters\n");
pci200_pci_remove_one(pdev);
return -EFAULT;
}
plxphys = pci_resource_start(pdev,0) & PCI_BASE_ADDRESS_MEM_MASK;
card->plxbase = ioremap(plxphys, PCI200SYN_PLX_SIZE);
scaphys = pci_resource_start(pdev,2) & PCI_BASE_ADDRESS_MEM_MASK;
card->scabase = ioremap(scaphys, PCI200SYN_SCA_SIZE);
ramphys = pci_resource_start(pdev,3) & PCI_BASE_ADDRESS_MEM_MASK;
card->rambase = pci_ioremap_bar(pdev, 3);
if (card->plxbase == NULL ||
card->scabase == NULL ||
card->rambase == NULL) {
pr_err("ioremap() failed\n");
pci200_pci_remove_one(pdev);
return -EFAULT;
}
/* Reset PLX */
p = &card->plxbase->init_ctrl;
writel(readl(p) | 0x40000000, p);
readl(p); /* Flush the write - do not use sca_flush */
udelay(1);
writel(readl(p) & ~0x40000000, p);
readl(p); /* Flush the write - do not use sca_flush */
udelay(1);
ramsize = sca_detect_ram(card, card->rambase,
pci_resource_len(pdev, 3));
/* number of TX + RX buffers for one port - this is dual port card */
i = ramsize / (2 * (sizeof(pkt_desc) + HDLC_MAX_MRU));
card->tx_ring_buffers = min(i / 2, MAX_TX_BUFFERS);
card->rx_ring_buffers = i - card->tx_ring_buffers;
card->buff_offset = 2 * sizeof(pkt_desc) * (card->tx_ring_buffers +
card->rx_ring_buffers);
pr_info("%u KB RAM at 0x%x, IRQ%u, using %u TX + %u RX packets rings\n",
ramsize / 1024, ramphys,
pdev->irq, card->tx_ring_buffers, card->rx_ring_buffers);
if (card->tx_ring_buffers < 1) {
pr_err("RAM test failed\n");
pci200_pci_remove_one(pdev);
return -EFAULT;
}
/* Enable interrupts on the PCI bridge */
p = &card->plxbase->intr_ctrl_stat;
writew(readw(p) | 0x0040, p);
/* Allocate IRQ */
if (request_irq(pdev->irq, sca_intr, IRQF_SHARED, "pci200syn", card)) {
pr_warn("could not allocate IRQ%d\n", pdev->irq);
pci200_pci_remove_one(pdev);
return -EBUSY;
}
card->irq = pdev->irq;
sca_init(card, 0);
for (i = 0; i < 2; i++) {
port_t *port = &card->ports[i];
struct net_device *dev = port->netdev;
hdlc_device *hdlc = dev_to_hdlc(dev);
port->chan = i;
spin_lock_init(&port->lock);
dev->irq = card->irq;
dev->mem_start = ramphys;
dev->mem_end = ramphys + ramsize - 1;
dev->tx_queue_len = 50;
dev->netdev_ops = &pci200_ops;
hdlc->attach = sca_attach;
hdlc->xmit = sca_xmit;
port->settings.clock_type = CLOCK_EXT;
port->card = card;
sca_init_port(port);
if (register_hdlc_device(dev)) {
pr_err("unable to register hdlc device\n");
port->card = NULL;
pci200_pci_remove_one(pdev);
return -ENOBUFS;
}
netdev_info(dev, "PCI200SYN channel %d\n", port->chan);
}
sca_flush(card);
return 0;
}
static const struct pci_device_id pci200_pci_tbl[] = {
{ PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050, PCI_VENDOR_ID_PLX,
PCI_DEVICE_ID_PLX_PCI200SYN, 0, 0, 0 },
{ 0, }
};
static struct pci_driver pci200_pci_driver = {
.name = "PCI200SYN",
.id_table = pci200_pci_tbl,
.probe = pci200_pci_init_one,
.remove = pci200_pci_remove_one,
};
static int __init pci200_init_module(void)
{
if (pci_clock_freq < 1000000 || pci_clock_freq > 80000000) {
pr_err("Invalid PCI clock frequency\n");
return -EINVAL;
}
return pci_register_driver(&pci200_pci_driver);
}
static void __exit pci200_cleanup_module(void)
{
pci_unregister_driver(&pci200_pci_driver);
}
MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>");
MODULE_DESCRIPTION("Goramo PCI200SYN serial port driver");
MODULE_LICENSE("GPL v2");
MODULE_DEVICE_TABLE(pci, pci200_pci_tbl);
module_param(pci_clock_freq, int, 0444);
MODULE_PARM_DESC(pci_clock_freq, "System PCI clock frequency in Hz");
module_init(pci200_init_module);
module_exit(pci200_cleanup_module);
| gpl-2.0 |
qkdxorjs1002/nov_kernel_razr | sound/soc/codecs/ak4104.c | 2917 | 7467 | /*
* AK4104 ALSA SoC (ASoC) driver
*
* Copyright (c) 2009 Daniel Mack <daniel@caiaq.de>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/module.h>
#include <linux/slab.h>
#include <sound/core.h>
#include <sound/soc.h>
#include <sound/initval.h>
#include <linux/spi/spi.h>
#include <sound/asoundef.h>
/* AK4104 registers addresses */
#define AK4104_REG_CONTROL1 0x00
#define AK4104_REG_RESERVED 0x01
#define AK4104_REG_CONTROL2 0x02
#define AK4104_REG_TX 0x03
#define AK4104_REG_CHN_STATUS(x) ((x) + 0x04)
#define AK4104_NUM_REGS 10
#define AK4104_REG_MASK 0x1f
#define AK4104_READ 0xc0
#define AK4104_WRITE 0xe0
#define AK4104_RESERVED_VAL 0x5b
/* Bit masks for AK4104 registers */
#define AK4104_CONTROL1_RSTN (1 << 0)
#define AK4104_CONTROL1_PW (1 << 1)
#define AK4104_CONTROL1_DIF0 (1 << 2)
#define AK4104_CONTROL1_DIF1 (1 << 3)
#define AK4104_CONTROL2_SEL0 (1 << 0)
#define AK4104_CONTROL2_SEL1 (1 << 1)
#define AK4104_CONTROL2_MODE (1 << 2)
#define AK4104_TX_TXE (1 << 0)
#define AK4104_TX_V (1 << 1)
#define DRV_NAME "ak4104-codec"
struct ak4104_private {
enum snd_soc_control_type control_type;
void *control_data;
};
static int ak4104_fill_cache(struct snd_soc_codec *codec)
{
int i;
u8 *reg_cache = codec->reg_cache;
struct spi_device *spi = codec->control_data;
for (i = 0; i < codec->driver->reg_cache_size; i++) {
int ret = spi_w8r8(spi, i | AK4104_READ);
if (ret < 0) {
dev_err(&spi->dev, "SPI write failure\n");
return ret;
}
reg_cache[i] = ret;
}
return 0;
}
static unsigned int ak4104_read_reg_cache(struct snd_soc_codec *codec,
unsigned int reg)
{
u8 *reg_cache = codec->reg_cache;
if (reg >= codec->driver->reg_cache_size)
return -EINVAL;
return reg_cache[reg];
}
static int ak4104_spi_write(struct snd_soc_codec *codec, unsigned int reg,
unsigned int value)
{
u8 *cache = codec->reg_cache;
struct spi_device *spi = codec->control_data;
if (reg >= codec->driver->reg_cache_size)
return -EINVAL;
/* only write to the hardware if value has changed */
if (cache[reg] != value) {
u8 tmp[2] = { (reg & AK4104_REG_MASK) | AK4104_WRITE, value };
if (spi_write(spi, tmp, sizeof(tmp))) {
dev_err(&spi->dev, "SPI write failed\n");
return -EIO;
}
cache[reg] = value;
}
return 0;
}
static int ak4104_set_dai_fmt(struct snd_soc_dai *codec_dai,
unsigned int format)
{
struct snd_soc_codec *codec = codec_dai->codec;
int val = 0;
val = ak4104_read_reg_cache(codec, AK4104_REG_CONTROL1);
if (val < 0)
return val;
val &= ~(AK4104_CONTROL1_DIF0 | AK4104_CONTROL1_DIF1);
/* set DAI format */
switch (format & SND_SOC_DAIFMT_FORMAT_MASK) {
case SND_SOC_DAIFMT_RIGHT_J:
break;
case SND_SOC_DAIFMT_LEFT_J:
val |= AK4104_CONTROL1_DIF0;
break;
case SND_SOC_DAIFMT_I2S:
val |= AK4104_CONTROL1_DIF0 | AK4104_CONTROL1_DIF1;
break;
default:
dev_err(codec->dev, "invalid dai format\n");
return -EINVAL;
}
/* This device can only be slave */
if ((format & SND_SOC_DAIFMT_MASTER_MASK) != SND_SOC_DAIFMT_CBS_CFS)
return -EINVAL;
return ak4104_spi_write(codec, AK4104_REG_CONTROL1, val);
}
static int ak4104_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *dai)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_codec *codec = rtd->codec;
int val = 0;
/* set the IEC958 bits: consumer mode, no copyright bit */
val |= IEC958_AES0_CON_NOT_COPYRIGHT;
ak4104_spi_write(codec, AK4104_REG_CHN_STATUS(0), val);
val = 0;
switch (params_rate(params)) {
case 44100:
val |= IEC958_AES3_CON_FS_44100;
break;
case 48000:
val |= IEC958_AES3_CON_FS_48000;
break;
case 32000:
val |= IEC958_AES3_CON_FS_32000;
break;
default:
dev_err(codec->dev, "unsupported sampling rate\n");
return -EINVAL;
}
return ak4104_spi_write(codec, AK4104_REG_CHN_STATUS(3), val);
}
static struct snd_soc_dai_ops ak4101_dai_ops = {
.hw_params = ak4104_hw_params,
.set_fmt = ak4104_set_dai_fmt,
};
static struct snd_soc_dai_driver ak4104_dai = {
.name = "ak4104-hifi",
.playback = {
.stream_name = "Playback",
.channels_min = 2,
.channels_max = 2,
.rates = SNDRV_PCM_RATE_8000_192000,
.formats = SNDRV_PCM_FMTBIT_S16_LE |
SNDRV_PCM_FMTBIT_S24_3LE |
SNDRV_PCM_FMTBIT_S24_LE
},
.ops = &ak4101_dai_ops,
};
static int ak4104_probe(struct snd_soc_codec *codec)
{
struct ak4104_private *ak4104 = snd_soc_codec_get_drvdata(codec);
int ret, val;
codec->control_data = ak4104->control_data;
/* read all regs and fill the cache */
ret = ak4104_fill_cache(codec);
if (ret < 0) {
dev_err(codec->dev, "failed to fill register cache\n");
return ret;
}
/* read the 'reserved' register - according to the datasheet, it
* should contain 0x5b. Not a good way to verify the presence of
* the device, but there is no hardware ID register. */
if (ak4104_read_reg_cache(codec, AK4104_REG_RESERVED) !=
AK4104_RESERVED_VAL)
return -ENODEV;
/* set power-up and non-reset bits */
val = ak4104_read_reg_cache(codec, AK4104_REG_CONTROL1);
val |= AK4104_CONTROL1_PW | AK4104_CONTROL1_RSTN;
ret = ak4104_spi_write(codec, AK4104_REG_CONTROL1, val);
if (ret < 0)
return ret;
/* enable transmitter */
val = ak4104_read_reg_cache(codec, AK4104_REG_TX);
val |= AK4104_TX_TXE;
ret = ak4104_spi_write(codec, AK4104_REG_TX, val);
if (ret < 0)
return ret;
dev_info(codec->dev, "SPI device initialized\n");
return 0;
}
static int ak4104_remove(struct snd_soc_codec *codec)
{
int val, ret;
val = ak4104_read_reg_cache(codec, AK4104_REG_CONTROL1);
if (val < 0)
return val;
/* clear power-up and non-reset bits */
val &= ~(AK4104_CONTROL1_PW | AK4104_CONTROL1_RSTN);
ret = ak4104_spi_write(codec, AK4104_REG_CONTROL1, val);
return ret;
}
static struct snd_soc_codec_driver soc_codec_device_ak4104 = {
.probe = ak4104_probe,
.remove = ak4104_remove,
.reg_cache_size = AK4104_NUM_REGS,
.reg_word_size = sizeof(u16),
};
static int ak4104_spi_probe(struct spi_device *spi)
{
struct ak4104_private *ak4104;
int ret;
spi->bits_per_word = 8;
spi->mode = SPI_MODE_0;
ret = spi_setup(spi);
if (ret < 0)
return ret;
ak4104 = kzalloc(sizeof(struct ak4104_private), GFP_KERNEL);
if (ak4104 == NULL)
return -ENOMEM;
ak4104->control_data = spi;
ak4104->control_type = SND_SOC_SPI;
spi_set_drvdata(spi, ak4104);
ret = snd_soc_register_codec(&spi->dev,
&soc_codec_device_ak4104, &ak4104_dai, 1);
if (ret < 0)
kfree(ak4104);
return ret;
}
static int __devexit ak4104_spi_remove(struct spi_device *spi)
{
snd_soc_unregister_codec(&spi->dev);
kfree(spi_get_drvdata(spi));
return 0;
}
static struct spi_driver ak4104_spi_driver = {
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
},
.probe = ak4104_spi_probe,
.remove = __devexit_p(ak4104_spi_remove),
};
static int __init ak4104_init(void)
{
return spi_register_driver(&ak4104_spi_driver);
}
module_init(ak4104_init);
static void __exit ak4104_exit(void)
{
spi_unregister_driver(&ak4104_spi_driver);
}
module_exit(ak4104_exit);
MODULE_AUTHOR("Daniel Mack <daniel@caiaq.de>");
MODULE_DESCRIPTION("Asahi Kasei AK4104 ALSA SoC driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
emceethemouth/kernel_android | drivers/gpu/drm/nouveau/core/subdev/vm/nv44.c | 3173 | 6933 | /*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include <core/gpuobj.h>
#include <core/option.h>
#include <subdev/timer.h>
#include <subdev/vm.h>
#include "nv04.h"
#define NV44_GART_SIZE (512 * 1024 * 1024)
#define NV44_GART_PAGE ( 4 * 1024)
/*******************************************************************************
* VM map/unmap callbacks
******************************************************************************/
static void
nv44_vm_fill(struct nouveau_gpuobj *pgt, dma_addr_t null,
dma_addr_t *list, u32 pte, u32 cnt)
{
u32 base = (pte << 2) & ~0x0000000f;
u32 tmp[4];
tmp[0] = nv_ro32(pgt, base + 0x0);
tmp[1] = nv_ro32(pgt, base + 0x4);
tmp[2] = nv_ro32(pgt, base + 0x8);
tmp[3] = nv_ro32(pgt, base + 0xc);
while (cnt--) {
u32 addr = list ? (*list++ >> 12) : (null >> 12);
switch (pte++ & 0x3) {
case 0:
tmp[0] &= ~0x07ffffff;
tmp[0] |= addr;
break;
case 1:
tmp[0] &= ~0xf8000000;
tmp[0] |= addr << 27;
tmp[1] &= ~0x003fffff;
tmp[1] |= addr >> 5;
break;
case 2:
tmp[1] &= ~0xffc00000;
tmp[1] |= addr << 22;
tmp[2] &= ~0x0001ffff;
tmp[2] |= addr >> 10;
break;
case 3:
tmp[2] &= ~0xfffe0000;
tmp[2] |= addr << 17;
tmp[3] &= ~0x00000fff;
tmp[3] |= addr >> 15;
break;
}
}
nv_wo32(pgt, base + 0x0, tmp[0]);
nv_wo32(pgt, base + 0x4, tmp[1]);
nv_wo32(pgt, base + 0x8, tmp[2]);
nv_wo32(pgt, base + 0xc, tmp[3] | 0x40000000);
}
static void
nv44_vm_map_sg(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
struct nouveau_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
{
struct nv04_vmmgr_priv *priv = (void *)vma->vm->vmm;
u32 tmp[4];
int i;
if (pte & 3) {
u32 max = 4 - (pte & 3);
u32 part = (cnt > max) ? max : cnt;
nv44_vm_fill(pgt, priv->null, list, pte, part);
pte += part;
list += part;
cnt -= part;
}
while (cnt >= 4) {
for (i = 0; i < 4; i++)
tmp[i] = *list++ >> 12;
nv_wo32(pgt, pte++ * 4, tmp[0] >> 0 | tmp[1] << 27);
nv_wo32(pgt, pte++ * 4, tmp[1] >> 5 | tmp[2] << 22);
nv_wo32(pgt, pte++ * 4, tmp[2] >> 10 | tmp[3] << 17);
nv_wo32(pgt, pte++ * 4, tmp[3] >> 15 | 0x40000000);
cnt -= 4;
}
if (cnt)
nv44_vm_fill(pgt, priv->null, list, pte, cnt);
}
static void
nv44_vm_unmap(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt)
{
struct nv04_vmmgr_priv *priv = (void *)nouveau_vmmgr(pgt);
if (pte & 3) {
u32 max = 4 - (pte & 3);
u32 part = (cnt > max) ? max : cnt;
nv44_vm_fill(pgt, priv->null, NULL, pte, part);
pte += part;
cnt -= part;
}
while (cnt >= 4) {
nv_wo32(pgt, pte++ * 4, 0x00000000);
nv_wo32(pgt, pte++ * 4, 0x00000000);
nv_wo32(pgt, pte++ * 4, 0x00000000);
nv_wo32(pgt, pte++ * 4, 0x00000000);
cnt -= 4;
}
if (cnt)
nv44_vm_fill(pgt, priv->null, NULL, pte, cnt);
}
static void
nv44_vm_flush(struct nouveau_vm *vm)
{
struct nv04_vmmgr_priv *priv = (void *)vm->vmm;
nv_wr32(priv, 0x100814, priv->base.limit - NV44_GART_PAGE);
nv_wr32(priv, 0x100808, 0x00000020);
if (!nv_wait(priv, 0x100808, 0x00000001, 0x00000001))
nv_error(priv, "timeout: 0x%08x\n", nv_rd32(priv, 0x100808));
nv_wr32(priv, 0x100808, 0x00000000);
}
/*******************************************************************************
* VMMGR subdev
******************************************************************************/
static int
nv44_vmmgr_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
struct nouveau_device *device = nv_device(parent);
struct nv04_vmmgr_priv *priv;
int ret;
if (pci_find_capability(device->pdev, PCI_CAP_ID_AGP) ||
!nouveau_boolopt(device->cfgopt, "NvPCIE", true)) {
return nouveau_object_ctor(parent, engine, &nv04_vmmgr_oclass,
data, size, pobject);
}
ret = nouveau_vmmgr_create(parent, engine, oclass, "PCIEGART",
"pciegart", &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
priv->base.create = nv04_vm_create;
priv->base.limit = NV44_GART_SIZE;
priv->base.dma_bits = 39;
priv->base.pgt_bits = 32 - 12;
priv->base.spg_shift = 12;
priv->base.lpg_shift = 12;
priv->base.map_sg = nv44_vm_map_sg;
priv->base.unmap = nv44_vm_unmap;
priv->base.flush = nv44_vm_flush;
priv->nullp = pci_alloc_consistent(device->pdev, 16 * 1024, &priv->null);
if (!priv->nullp) {
nv_error(priv, "unable to allocate dummy pages\n");
return -ENOMEM;
}
ret = nouveau_vm_create(&priv->base, 0, NV44_GART_SIZE, 0, 4096,
&priv->vm);
if (ret)
return ret;
ret = nouveau_gpuobj_new(nv_object(priv), NULL,
(NV44_GART_SIZE / NV44_GART_PAGE) * 4,
512 * 1024, NVOBJ_FLAG_ZERO_ALLOC,
&priv->vm->pgt[0].obj[0]);
priv->vm->pgt[0].refcount[0] = 1;
if (ret)
return ret;
return 0;
}
static int
nv44_vmmgr_init(struct nouveau_object *object)
{
struct nv04_vmmgr_priv *priv = (void *)object;
struct nouveau_gpuobj *gart = priv->vm->pgt[0].obj[0];
u32 addr;
int ret;
ret = nouveau_vmmgr_init(&priv->base);
if (ret)
return ret;
/* calculate vram address of this PRAMIN block, object must be
* allocated on 512KiB alignment, and not exceed a total size
* of 512KiB for this to work correctly
*/
addr = nv_rd32(priv, 0x10020c);
addr -= ((gart->addr >> 19) + 1) << 19;
nv_wr32(priv, 0x100850, 0x80000000);
nv_wr32(priv, 0x100818, priv->null);
nv_wr32(priv, 0x100804, NV44_GART_SIZE);
nv_wr32(priv, 0x100850, 0x00008000);
nv_mask(priv, 0x10008c, 0x00000200, 0x00000200);
nv_wr32(priv, 0x100820, 0x00000000);
nv_wr32(priv, 0x10082c, 0x00000001);
nv_wr32(priv, 0x100800, addr | 0x00000010);
return 0;
}
struct nouveau_oclass
nv44_vmmgr_oclass = {
.handle = NV_SUBDEV(VM, 0x44),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv44_vmmgr_ctor,
.dtor = nv04_vmmgr_dtor,
.init = nv44_vmmgr_init,
.fini = _nouveau_vmmgr_fini,
},
};
| gpl-2.0 |
Samsung-BCM/android_kernel_samsung_bcm | arch/x86/um/sys_call_table_64.c | 4197 | 1635 | /*
* System call table for UML/x86-64, copied from arch/x86/kernel/syscall_*.c
* with some changes for UML.
*/
#include <linux/linkage.h>
#include <linux/sys.h>
#include <linux/cache.h>
#include <generated/user_constants.h>
#define __NO_STUBS
/*
* Below you can see, in terms of #define's, the differences between the x86-64
* and the UML syscall table.
*/
/* Not going to be implemented by UML, since we have no hardware. */
#define stub_iopl sys_ni_syscall
#define sys_ioperm sys_ni_syscall
/*
* The UML TLS problem. Note that x86_64 does not implement this, so the below
* is needed only for the ia32 compatibility.
*/
/* On UML we call it this way ("old" means it's not mmap2) */
#define sys_mmap old_mmap
#define stub_clone sys_clone
#define stub_fork sys_fork
#define stub_vfork sys_vfork
#define stub_execve sys_execve
#define stub_rt_sigsuspend sys_rt_sigsuspend
#define stub_sigaltstack sys_sigaltstack
#define stub_rt_sigreturn sys_rt_sigreturn
#define __SYSCALL_COMMON(nr, sym, compat) __SYSCALL_64(nr, sym, compat)
#define __SYSCALL_X32(nr, sym, compat) /* Not supported */
#define __SYSCALL_64(nr, sym, compat) extern asmlinkage void sym(void) ;
#include <asm/syscalls_64.h>
#undef __SYSCALL_64
#define __SYSCALL_64(nr, sym, compat) [ nr ] = sym,
typedef void (*sys_call_ptr_t)(void);
extern void sys_ni_syscall(void);
const sys_call_ptr_t sys_call_table[] __cacheline_aligned = {
/*
* Smells like a compiler bug -- it doesn't work
* when the & below is removed.
*/
[0 ... __NR_syscall_max] = &sys_ni_syscall,
#include <asm/syscalls_64.h>
};
int syscall_table_size = sizeof(sys_call_table);
| gpl-2.0 |
gerard87/kernel_angler_n_mr1 | arch/ia64/kernel/sys_ia64.c | 4197 | 4501 | /*
* This file contains various system calls that have different calling
* conventions on different platforms.
*
* Copyright (C) 1999-2000, 2002-2003, 2005 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
*/
#include <linux/errno.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/mman.h>
#include <linux/sched.h>
#include <linux/shm.h>
#include <linux/file.h> /* doh, must come after sched.h... */
#include <linux/smp.h>
#include <linux/syscalls.h>
#include <linux/highuid.h>
#include <linux/hugetlb.h>
#include <asm/shmparam.h>
#include <asm/uaccess.h>
unsigned long
arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len,
unsigned long pgoff, unsigned long flags)
{
long map_shared = (flags & MAP_SHARED);
unsigned long align_mask = 0;
struct mm_struct *mm = current->mm;
struct vm_unmapped_area_info info;
if (len > RGN_MAP_LIMIT)
return -ENOMEM;
/* handle fixed mapping: prevent overlap with huge pages */
if (flags & MAP_FIXED) {
if (is_hugepage_only_range(mm, addr, len))
return -EINVAL;
return addr;
}
#ifdef CONFIG_HUGETLB_PAGE
if (REGION_NUMBER(addr) == RGN_HPAGE)
addr = 0;
#endif
if (!addr)
addr = TASK_UNMAPPED_BASE;
if (map_shared && (TASK_SIZE > 0xfffffffful))
/*
* For 64-bit tasks, align shared segments to 1MB to avoid potential
* performance penalty due to virtual aliasing (see ASDM). For 32-bit
* tasks, we prefer to avoid exhausting the address space too quickly by
* limiting alignment to a single page.
*/
align_mask = PAGE_MASK & (SHMLBA - 1);
info.flags = 0;
info.length = len;
info.low_limit = addr;
info.high_limit = TASK_SIZE;
info.align_mask = align_mask;
info.align_offset = 0;
return vm_unmapped_area(&info);
}
asmlinkage long
ia64_getpriority (int which, int who)
{
long prio;
prio = sys_getpriority(which, who);
if (prio >= 0) {
force_successful_syscall_return();
prio = 20 - prio;
}
return prio;
}
/* XXX obsolete, but leave it here until the old libc is gone... */
asmlinkage unsigned long
sys_getpagesize (void)
{
return PAGE_SIZE;
}
asmlinkage unsigned long
ia64_brk (unsigned long brk)
{
unsigned long retval = sys_brk(brk);
force_successful_syscall_return();
return retval;
}
/*
* On IA-64, we return the two file descriptors in ret0 and ret1 (r8
* and r9) as this is faster than doing a copy_to_user().
*/
asmlinkage long
sys_ia64_pipe (void)
{
struct pt_regs *regs = task_pt_regs(current);
int fd[2];
int retval;
retval = do_pipe_flags(fd, 0);
if (retval)
goto out;
retval = fd[0];
regs->r9 = fd[1];
out:
return retval;
}
int ia64_mmap_check(unsigned long addr, unsigned long len,
unsigned long flags)
{
unsigned long roff;
/*
* Don't permit mappings into unmapped space, the virtual page table
* of a region, or across a region boundary. Note: RGN_MAP_LIMIT is
* equal to 2^n-PAGE_SIZE (for some integer n <= 61) and len > 0.
*/
roff = REGION_OFFSET(addr);
if ((len > RGN_MAP_LIMIT) || (roff > (RGN_MAP_LIMIT - len)))
return -EINVAL;
return 0;
}
/*
* mmap2() is like mmap() except that the offset is expressed in units
* of PAGE_SIZE (instead of bytes). This allows to mmap2() (pieces
* of) files that are larger than the address space of the CPU.
*/
asmlinkage unsigned long
sys_mmap2 (unsigned long addr, unsigned long len, int prot, int flags, int fd, long pgoff)
{
addr = sys_mmap_pgoff(addr, len, prot, flags, fd, pgoff);
if (!IS_ERR((void *) addr))
force_successful_syscall_return();
return addr;
}
asmlinkage unsigned long
sys_mmap (unsigned long addr, unsigned long len, int prot, int flags, int fd, long off)
{
if (offset_in_page(off) != 0)
return -EINVAL;
addr = sys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT);
if (!IS_ERR((void *) addr))
force_successful_syscall_return();
return addr;
}
asmlinkage unsigned long
ia64_mremap (unsigned long addr, unsigned long old_len, unsigned long new_len, unsigned long flags,
unsigned long new_addr)
{
addr = sys_mremap(addr, old_len, new_len, flags, new_addr);
if (!IS_ERR((void *) addr))
force_successful_syscall_return();
return addr;
}
#ifndef CONFIG_PCI
asmlinkage long
sys_pciconfig_read (unsigned long bus, unsigned long dfn, unsigned long off, unsigned long len,
void *buf)
{
return -ENOSYS;
}
asmlinkage long
sys_pciconfig_write (unsigned long bus, unsigned long dfn, unsigned long off, unsigned long len,
void *buf)
{
return -ENOSYS;
}
#endif /* CONFIG_PCI */
| gpl-2.0 |
samarthp/sam-tenderloin-kernel-3.4 | drivers/staging/iio/dds/ad5930.c | 4965 | 3539 | /*
* Driver for ADI Direct Digital Synthesis ad5930
*
* Copyright (c) 2010-2010 Analog Devices Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/
#include <linux/types.h>
#include <linux/mutex.h>
#include <linux/device.h>
#include <linux/spi/spi.h>
#include <linux/slab.h>
#include <linux/sysfs.h>
#include <linux/module.h>
#include "../iio.h"
#include "../sysfs.h"
#define DRV_NAME "ad5930"
#define value_mask (u16)0xf000
#define addr_shift 12
/* Register format: 4 bits addr + 12 bits value */
struct ad5903_config {
u16 control;
u16 incnum;
u16 frqdelt[2];
u16 incitvl;
u16 buritvl;
u16 strtfrq[2];
};
struct ad5930_state {
struct mutex lock;
struct spi_device *sdev;
};
static ssize_t ad5930_set_parameter(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t len)
{
struct spi_message msg;
struct spi_transfer xfer;
int ret;
struct ad5903_config *config = (struct ad5903_config *)buf;
struct iio_dev *idev = dev_get_drvdata(dev);
struct ad5930_state *st = iio_priv(idev);
config->control = (config->control & ~value_mask);
config->incnum = (config->control & ~value_mask) | (1 << addr_shift);
config->frqdelt[0] = (config->control & ~value_mask) | (2 << addr_shift);
config->frqdelt[1] = (config->control & ~value_mask) | 3 << addr_shift;
config->incitvl = (config->control & ~value_mask) | 4 << addr_shift;
config->buritvl = (config->control & ~value_mask) | 8 << addr_shift;
config->strtfrq[0] = (config->control & ~value_mask) | 0xc << addr_shift;
config->strtfrq[1] = (config->control & ~value_mask) | 0xd << addr_shift;
xfer.len = len;
xfer.tx_buf = config;
mutex_lock(&st->lock);
spi_message_init(&msg);
spi_message_add_tail(&xfer, &msg);
ret = spi_sync(st->sdev, &msg);
if (ret)
goto error_ret;
error_ret:
mutex_unlock(&st->lock);
return ret ? ret : len;
}
static IIO_DEVICE_ATTR(dds, S_IWUSR, NULL, ad5930_set_parameter, 0);
static struct attribute *ad5930_attributes[] = {
&iio_dev_attr_dds.dev_attr.attr,
NULL,
};
static const struct attribute_group ad5930_attribute_group = {
.attrs = ad5930_attributes,
};
static const struct iio_info ad5930_info = {
.attrs = &ad5930_attribute_group,
.driver_module = THIS_MODULE,
};
static int __devinit ad5930_probe(struct spi_device *spi)
{
struct ad5930_state *st;
struct iio_dev *idev;
int ret = 0;
idev = iio_allocate_device(sizeof(*st));
if (idev == NULL) {
ret = -ENOMEM;
goto error_ret;
}
spi_set_drvdata(spi, idev);
st = iio_priv(idev);
mutex_init(&st->lock);
st->sdev = spi;
idev->dev.parent = &spi->dev;
idev->info = &ad5930_info;
idev->modes = INDIO_DIRECT_MODE;
ret = iio_device_register(idev);
if (ret)
goto error_free_dev;
spi->max_speed_hz = 2000000;
spi->mode = SPI_MODE_3;
spi->bits_per_word = 16;
spi_setup(spi);
return 0;
error_free_dev:
iio_free_device(idev);
error_ret:
return ret;
}
static int __devexit ad5930_remove(struct spi_device *spi)
{
iio_device_unregister(spi_get_drvdata(spi));
iio_free_device(spi_get_drvdata(spi));
return 0;
}
static struct spi_driver ad5930_driver = {
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
},
.probe = ad5930_probe,
.remove = __devexit_p(ad5930_remove),
};
module_spi_driver(ad5930_driver);
MODULE_AUTHOR("Cliff Cai");
MODULE_DESCRIPTION("Analog Devices ad5930 driver");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("spi:" DRV_NAME);
| gpl-2.0 |
Split-Screen/android_kernel_htc_msm8960 | drivers/input/misc/gp2ap002a00f.c | 4965 | 6166 | /*
* Copyright (C) 2011 Sony Ericsson Mobile Communications Inc.
*
* Author: Courtney Cavin <courtney.cavin@sonyericsson.com>
* Prepared for up-stream by: Oskar Andero <oskar.andero@sonyericsson.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2, as
* published by the Free Software Foundation.
*/
#include <linux/i2c.h>
#include <linux/irq.h>
#include <linux/slab.h>
#include <linux/input.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/gpio.h>
#include <linux/delay.h>
#include <linux/input/gp2ap002a00f.h>
struct gp2a_data {
struct input_dev *input;
const struct gp2a_platform_data *pdata;
struct i2c_client *i2c_client;
};
enum gp2a_addr {
GP2A_ADDR_PROX = 0x0,
GP2A_ADDR_GAIN = 0x1,
GP2A_ADDR_HYS = 0x2,
GP2A_ADDR_CYCLE = 0x3,
GP2A_ADDR_OPMOD = 0x4,
GP2A_ADDR_CON = 0x6
};
enum gp2a_controls {
/* Software Shutdown control: 0 = shutdown, 1 = normal operation */
GP2A_CTRL_SSD = 0x01
};
static int gp2a_report(struct gp2a_data *dt)
{
int vo = gpio_get_value(dt->pdata->vout_gpio);
input_report_switch(dt->input, SW_FRONT_PROXIMITY, !vo);
input_sync(dt->input);
return 0;
}
static irqreturn_t gp2a_irq(int irq, void *handle)
{
struct gp2a_data *dt = handle;
gp2a_report(dt);
return IRQ_HANDLED;
}
static int gp2a_enable(struct gp2a_data *dt)
{
return i2c_smbus_write_byte_data(dt->i2c_client, GP2A_ADDR_OPMOD,
GP2A_CTRL_SSD);
}
static int gp2a_disable(struct gp2a_data *dt)
{
return i2c_smbus_write_byte_data(dt->i2c_client, GP2A_ADDR_OPMOD,
0x00);
}
static int gp2a_device_open(struct input_dev *dev)
{
struct gp2a_data *dt = input_get_drvdata(dev);
int error;
error = gp2a_enable(dt);
if (error < 0) {
dev_err(&dt->i2c_client->dev,
"unable to activate, err %d\n", error);
return error;
}
gp2a_report(dt);
return 0;
}
static void gp2a_device_close(struct input_dev *dev)
{
struct gp2a_data *dt = input_get_drvdata(dev);
int error;
error = gp2a_disable(dt);
if (error < 0)
dev_err(&dt->i2c_client->dev,
"unable to deactivate, err %d\n", error);
}
static int __devinit gp2a_initialize(struct gp2a_data *dt)
{
int error;
error = i2c_smbus_write_byte_data(dt->i2c_client, GP2A_ADDR_GAIN,
0x08);
if (error < 0)
return error;
error = i2c_smbus_write_byte_data(dt->i2c_client, GP2A_ADDR_HYS,
0xc2);
if (error < 0)
return error;
error = i2c_smbus_write_byte_data(dt->i2c_client, GP2A_ADDR_CYCLE,
0x04);
if (error < 0)
return error;
error = gp2a_disable(dt);
return error;
}
static int __devinit gp2a_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
const struct gp2a_platform_data *pdata = client->dev.platform_data;
struct gp2a_data *dt;
int error;
if (!pdata)
return -EINVAL;
if (pdata->hw_setup) {
error = pdata->hw_setup(client);
if (error < 0)
return error;
}
error = gpio_request_one(pdata->vout_gpio, GPIOF_IN, GP2A_I2C_NAME);
if (error)
goto err_hw_shutdown;
dt = kzalloc(sizeof(struct gp2a_data), GFP_KERNEL);
if (!dt) {
error = -ENOMEM;
goto err_free_gpio;
}
dt->pdata = pdata;
dt->i2c_client = client;
error = gp2a_initialize(dt);
if (error < 0)
goto err_free_mem;
dt->input = input_allocate_device();
if (!dt->input) {
error = -ENOMEM;
goto err_free_mem;
}
input_set_drvdata(dt->input, dt);
dt->input->open = gp2a_device_open;
dt->input->close = gp2a_device_close;
dt->input->name = GP2A_I2C_NAME;
dt->input->id.bustype = BUS_I2C;
dt->input->dev.parent = &client->dev;
input_set_capability(dt->input, EV_SW, SW_FRONT_PROXIMITY);
error = request_threaded_irq(client->irq, NULL, gp2a_irq,
IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING |
IRQF_ONESHOT,
GP2A_I2C_NAME, dt);
if (error) {
dev_err(&client->dev, "irq request failed\n");
goto err_free_input_dev;
}
error = input_register_device(dt->input);
if (error) {
dev_err(&client->dev, "device registration failed\n");
goto err_free_irq;
}
device_init_wakeup(&client->dev, pdata->wakeup);
i2c_set_clientdata(client, dt);
return 0;
err_free_irq:
free_irq(client->irq, dt);
err_free_input_dev:
input_free_device(dt->input);
err_free_mem:
kfree(dt);
err_free_gpio:
gpio_free(pdata->vout_gpio);
err_hw_shutdown:
if (pdata->hw_shutdown)
pdata->hw_shutdown(client);
return error;
}
static int __devexit gp2a_remove(struct i2c_client *client)
{
struct gp2a_data *dt = i2c_get_clientdata(client);
const struct gp2a_platform_data *pdata = dt->pdata;
device_init_wakeup(&client->dev, false);
free_irq(client->irq, dt);
input_unregister_device(dt->input);
kfree(dt);
gpio_free(pdata->vout_gpio);
if (pdata->hw_shutdown)
pdata->hw_shutdown(client);
return 0;
}
#ifdef CONFIG_PM_SLEEP
static int gp2a_suspend(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct gp2a_data *dt = i2c_get_clientdata(client);
int retval = 0;
if (device_may_wakeup(&client->dev)) {
enable_irq_wake(client->irq);
} else {
mutex_lock(&dt->input->mutex);
if (dt->input->users)
retval = gp2a_disable(dt);
mutex_unlock(&dt->input->mutex);
}
return retval;
}
static int gp2a_resume(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct gp2a_data *dt = i2c_get_clientdata(client);
int retval = 0;
if (device_may_wakeup(&client->dev)) {
disable_irq_wake(client->irq);
} else {
mutex_lock(&dt->input->mutex);
if (dt->input->users)
retval = gp2a_enable(dt);
mutex_unlock(&dt->input->mutex);
}
return retval;
}
#endif
static SIMPLE_DEV_PM_OPS(gp2a_pm, gp2a_suspend, gp2a_resume);
static const struct i2c_device_id gp2a_i2c_id[] = {
{ GP2A_I2C_NAME, 0 },
{ }
};
static struct i2c_driver gp2a_i2c_driver = {
.driver = {
.name = GP2A_I2C_NAME,
.owner = THIS_MODULE,
.pm = &gp2a_pm,
},
.probe = gp2a_probe,
.remove = __devexit_p(gp2a_remove),
.id_table = gp2a_i2c_id,
};
module_i2c_driver(gp2a_i2c_driver);
MODULE_AUTHOR("Courtney Cavin <courtney.cavin@sonyericsson.com>");
MODULE_DESCRIPTION("Sharp GP2AP002A00F I2C Proximity/Opto sensor driver");
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
SOKP/kernel_samsung_hlte | arch/sparc/kernel/cpumap.c | 7013 | 11009 | /* cpumap.c: used for optimizing CPU assignment
*
* Copyright (C) 2009 Hong H. Pham <hong.pham@windriver.com>
*/
#include <linux/export.h>
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/cpumask.h>
#include <linux/spinlock.h>
#include <asm/cpudata.h>
#include "cpumap.h"
enum {
CPUINFO_LVL_ROOT = 0,
CPUINFO_LVL_NODE,
CPUINFO_LVL_CORE,
CPUINFO_LVL_PROC,
CPUINFO_LVL_MAX,
};
enum {
ROVER_NO_OP = 0,
/* Increment rover every time level is visited */
ROVER_INC_ON_VISIT = 1 << 0,
/* Increment parent's rover every time rover wraps around */
ROVER_INC_PARENT_ON_LOOP = 1 << 1,
};
struct cpuinfo_node {
int id;
int level;
int num_cpus; /* Number of CPUs in this hierarchy */
int parent_index;
int child_start; /* Array index of the first child node */
int child_end; /* Array index of the last child node */
int rover; /* Child node iterator */
};
struct cpuinfo_level {
int start_index; /* Index of first node of a level in a cpuinfo tree */
int end_index; /* Index of last node of a level in a cpuinfo tree */
int num_nodes; /* Number of nodes in a level in a cpuinfo tree */
};
struct cpuinfo_tree {
int total_nodes;
/* Offsets into nodes[] for each level of the tree */
struct cpuinfo_level level[CPUINFO_LVL_MAX];
struct cpuinfo_node nodes[0];
};
static struct cpuinfo_tree *cpuinfo_tree;
static u16 cpu_distribution_map[NR_CPUS];
static DEFINE_SPINLOCK(cpu_map_lock);
/* Niagara optimized cpuinfo tree traversal. */
static const int niagara_iterate_method[] = {
[CPUINFO_LVL_ROOT] = ROVER_NO_OP,
/* Strands (or virtual CPUs) within a core may not run concurrently
* on the Niagara, as instruction pipeline(s) are shared. Distribute
* work to strands in different cores first for better concurrency.
* Go to next NUMA node when all cores are used.
*/
[CPUINFO_LVL_NODE] = ROVER_INC_ON_VISIT|ROVER_INC_PARENT_ON_LOOP,
/* Strands are grouped together by proc_id in cpuinfo_sparc, i.e.
* a proc_id represents an instruction pipeline. Distribute work to
* strands in different proc_id groups if the core has multiple
* instruction pipelines (e.g. the Niagara 2/2+ has two).
*/
[CPUINFO_LVL_CORE] = ROVER_INC_ON_VISIT,
/* Pick the next strand in the proc_id group. */
[CPUINFO_LVL_PROC] = ROVER_INC_ON_VISIT,
};
/* Generic cpuinfo tree traversal. Distribute work round robin across NUMA
* nodes.
*/
static const int generic_iterate_method[] = {
[CPUINFO_LVL_ROOT] = ROVER_INC_ON_VISIT,
[CPUINFO_LVL_NODE] = ROVER_NO_OP,
[CPUINFO_LVL_CORE] = ROVER_INC_PARENT_ON_LOOP,
[CPUINFO_LVL_PROC] = ROVER_INC_ON_VISIT|ROVER_INC_PARENT_ON_LOOP,
};
static int cpuinfo_id(int cpu, int level)
{
int id;
switch (level) {
case CPUINFO_LVL_ROOT:
id = 0;
break;
case CPUINFO_LVL_NODE:
id = cpu_to_node(cpu);
break;
case CPUINFO_LVL_CORE:
id = cpu_data(cpu).core_id;
break;
case CPUINFO_LVL_PROC:
id = cpu_data(cpu).proc_id;
break;
default:
id = -EINVAL;
}
return id;
}
/*
* Enumerate the CPU information in __cpu_data to determine the start index,
* end index, and number of nodes for each level in the cpuinfo tree. The
* total number of cpuinfo nodes required to build the tree is returned.
*/
static int enumerate_cpuinfo_nodes(struct cpuinfo_level *tree_level)
{
int prev_id[CPUINFO_LVL_MAX];
int i, n, num_nodes;
for (i = CPUINFO_LVL_ROOT; i < CPUINFO_LVL_MAX; i++) {
struct cpuinfo_level *lv = &tree_level[i];
prev_id[i] = -1;
lv->start_index = lv->end_index = lv->num_nodes = 0;
}
num_nodes = 1; /* Include the root node */
for (i = 0; i < num_possible_cpus(); i++) {
if (!cpu_online(i))
continue;
n = cpuinfo_id(i, CPUINFO_LVL_NODE);
if (n > prev_id[CPUINFO_LVL_NODE]) {
tree_level[CPUINFO_LVL_NODE].num_nodes++;
prev_id[CPUINFO_LVL_NODE] = n;
num_nodes++;
}
n = cpuinfo_id(i, CPUINFO_LVL_CORE);
if (n > prev_id[CPUINFO_LVL_CORE]) {
tree_level[CPUINFO_LVL_CORE].num_nodes++;
prev_id[CPUINFO_LVL_CORE] = n;
num_nodes++;
}
n = cpuinfo_id(i, CPUINFO_LVL_PROC);
if (n > prev_id[CPUINFO_LVL_PROC]) {
tree_level[CPUINFO_LVL_PROC].num_nodes++;
prev_id[CPUINFO_LVL_PROC] = n;
num_nodes++;
}
}
tree_level[CPUINFO_LVL_ROOT].num_nodes = 1;
n = tree_level[CPUINFO_LVL_NODE].num_nodes;
tree_level[CPUINFO_LVL_NODE].start_index = 1;
tree_level[CPUINFO_LVL_NODE].end_index = n;
n++;
tree_level[CPUINFO_LVL_CORE].start_index = n;
n += tree_level[CPUINFO_LVL_CORE].num_nodes;
tree_level[CPUINFO_LVL_CORE].end_index = n - 1;
tree_level[CPUINFO_LVL_PROC].start_index = n;
n += tree_level[CPUINFO_LVL_PROC].num_nodes;
tree_level[CPUINFO_LVL_PROC].end_index = n - 1;
return num_nodes;
}
/* Build a tree representation of the CPU hierarchy using the per CPU
* information in __cpu_data. Entries in __cpu_data[0..NR_CPUS] are
* assumed to be sorted in ascending order based on node, core_id, and
* proc_id (in order of significance).
*/
static struct cpuinfo_tree *build_cpuinfo_tree(void)
{
struct cpuinfo_tree *new_tree;
struct cpuinfo_node *node;
struct cpuinfo_level tmp_level[CPUINFO_LVL_MAX];
int num_cpus[CPUINFO_LVL_MAX];
int level_rover[CPUINFO_LVL_MAX];
int prev_id[CPUINFO_LVL_MAX];
int n, id, cpu, prev_cpu, last_cpu, level;
n = enumerate_cpuinfo_nodes(tmp_level);
new_tree = kzalloc(sizeof(struct cpuinfo_tree) +
(sizeof(struct cpuinfo_node) * n), GFP_ATOMIC);
if (!new_tree)
return NULL;
new_tree->total_nodes = n;
memcpy(&new_tree->level, tmp_level, sizeof(tmp_level));
prev_cpu = cpu = cpumask_first(cpu_online_mask);
/* Initialize all levels in the tree with the first CPU */
for (level = CPUINFO_LVL_PROC; level >= CPUINFO_LVL_ROOT; level--) {
n = new_tree->level[level].start_index;
level_rover[level] = n;
node = &new_tree->nodes[n];
id = cpuinfo_id(cpu, level);
if (unlikely(id < 0)) {
kfree(new_tree);
return NULL;
}
node->id = id;
node->level = level;
node->num_cpus = 1;
node->parent_index = (level > CPUINFO_LVL_ROOT)
? new_tree->level[level - 1].start_index : -1;
node->child_start = node->child_end = node->rover =
(level == CPUINFO_LVL_PROC)
? cpu : new_tree->level[level + 1].start_index;
prev_id[level] = node->id;
num_cpus[level] = 1;
}
for (last_cpu = (num_possible_cpus() - 1); last_cpu >= 0; last_cpu--) {
if (cpu_online(last_cpu))
break;
}
while (++cpu <= last_cpu) {
if (!cpu_online(cpu))
continue;
for (level = CPUINFO_LVL_PROC; level >= CPUINFO_LVL_ROOT;
level--) {
id = cpuinfo_id(cpu, level);
if (unlikely(id < 0)) {
kfree(new_tree);
return NULL;
}
if ((id != prev_id[level]) || (cpu == last_cpu)) {
prev_id[level] = id;
node = &new_tree->nodes[level_rover[level]];
node->num_cpus = num_cpus[level];
num_cpus[level] = 1;
if (cpu == last_cpu)
node->num_cpus++;
/* Connect tree node to parent */
if (level == CPUINFO_LVL_ROOT)
node->parent_index = -1;
else
node->parent_index =
level_rover[level - 1];
if (level == CPUINFO_LVL_PROC) {
node->child_end =
(cpu == last_cpu) ? cpu : prev_cpu;
} else {
node->child_end =
level_rover[level + 1] - 1;
}
/* Initialize the next node in the same level */
n = ++level_rover[level];
if (n <= new_tree->level[level].end_index) {
node = &new_tree->nodes[n];
node->id = id;
node->level = level;
/* Connect node to child */
node->child_start = node->child_end =
node->rover =
(level == CPUINFO_LVL_PROC)
? cpu : level_rover[level + 1];
}
} else
num_cpus[level]++;
}
prev_cpu = cpu;
}
return new_tree;
}
static void increment_rover(struct cpuinfo_tree *t, int node_index,
int root_index, const int *rover_inc_table)
{
struct cpuinfo_node *node = &t->nodes[node_index];
int top_level, level;
top_level = t->nodes[root_index].level;
for (level = node->level; level >= top_level; level--) {
node->rover++;
if (node->rover <= node->child_end)
return;
node->rover = node->child_start;
/* If parent's rover does not need to be adjusted, stop here. */
if ((level == top_level) ||
!(rover_inc_table[level] & ROVER_INC_PARENT_ON_LOOP))
return;
node = &t->nodes[node->parent_index];
}
}
static int iterate_cpu(struct cpuinfo_tree *t, unsigned int root_index)
{
const int *rover_inc_table;
int level, new_index, index = root_index;
switch (sun4v_chip_type) {
case SUN4V_CHIP_NIAGARA1:
case SUN4V_CHIP_NIAGARA2:
case SUN4V_CHIP_NIAGARA3:
case SUN4V_CHIP_NIAGARA4:
case SUN4V_CHIP_NIAGARA5:
rover_inc_table = niagara_iterate_method;
break;
default:
rover_inc_table = generic_iterate_method;
}
for (level = t->nodes[root_index].level; level < CPUINFO_LVL_MAX;
level++) {
new_index = t->nodes[index].rover;
if (rover_inc_table[level] & ROVER_INC_ON_VISIT)
increment_rover(t, index, root_index, rover_inc_table);
index = new_index;
}
return index;
}
static void _cpu_map_rebuild(void)
{
int i;
if (cpuinfo_tree) {
kfree(cpuinfo_tree);
cpuinfo_tree = NULL;
}
cpuinfo_tree = build_cpuinfo_tree();
if (!cpuinfo_tree)
return;
/* Build CPU distribution map that spans all online CPUs. No need
* to check if the CPU is online, as that is done when the cpuinfo
* tree is being built.
*/
for (i = 0; i < cpuinfo_tree->nodes[0].num_cpus; i++)
cpu_distribution_map[i] = iterate_cpu(cpuinfo_tree, 0);
}
/* Fallback if the cpuinfo tree could not be built. CPU mapping is linear
* round robin.
*/
static int simple_map_to_cpu(unsigned int index)
{
int i, end, cpu_rover;
cpu_rover = 0;
end = index % num_online_cpus();
for (i = 0; i < num_possible_cpus(); i++) {
if (cpu_online(cpu_rover)) {
if (cpu_rover >= end)
return cpu_rover;
cpu_rover++;
}
}
/* Impossible, since num_online_cpus() <= num_possible_cpus() */
return cpumask_first(cpu_online_mask);
}
static int _map_to_cpu(unsigned int index)
{
struct cpuinfo_node *root_node;
if (unlikely(!cpuinfo_tree)) {
_cpu_map_rebuild();
if (!cpuinfo_tree)
return simple_map_to_cpu(index);
}
root_node = &cpuinfo_tree->nodes[0];
#ifdef CONFIG_HOTPLUG_CPU
if (unlikely(root_node->num_cpus != num_online_cpus())) {
_cpu_map_rebuild();
if (!cpuinfo_tree)
return simple_map_to_cpu(index);
}
#endif
return cpu_distribution_map[index % root_node->num_cpus];
}
int map_to_cpu(unsigned int index)
{
int mapped_cpu;
unsigned long flag;
spin_lock_irqsave(&cpu_map_lock, flag);
mapped_cpu = _map_to_cpu(index);
#ifdef CONFIG_HOTPLUG_CPU
while (unlikely(!cpu_online(mapped_cpu)))
mapped_cpu = _map_to_cpu(index);
#endif
spin_unlock_irqrestore(&cpu_map_lock, flag);
return mapped_cpu;
}
EXPORT_SYMBOL(map_to_cpu);
void cpu_map_rebuild(void)
{
unsigned long flag;
spin_lock_irqsave(&cpu_map_lock, flag);
_cpu_map_rebuild();
spin_unlock_irqrestore(&cpu_map_lock, flag);
}
| gpl-2.0 |
AOKP/kernel_lge_mako | arch/x86/um/user-offsets.c | 9061 | 2490 | #include <stdio.h>
#include <stddef.h>
#include <signal.h>
#include <sys/poll.h>
#include <sys/mman.h>
#include <sys/user.h>
#define __FRAME_OFFSETS
#include <asm/ptrace.h>
#include <asm/types.h>
#ifdef __i386__
#define __SYSCALL_I386(nr, sym, compat) [nr] = 1,
static char syscalls[] = {
#include <asm/syscalls_32.h>
};
#else
#define __SYSCALL_64(nr, sym, compat) [nr] = 1,
#define __SYSCALL_COMMON(nr, sym, compat) [nr] = 1,
#define __SYSCALL_X32(nr, sym, compat) /* Not supported */
static char syscalls[] = {
#include <asm/syscalls_64.h>
};
#endif
#define DEFINE(sym, val) \
asm volatile("\n->" #sym " %0 " #val : : "i" (val))
#define DEFINE_LONGS(sym, val) \
asm volatile("\n->" #sym " %0 " #val : : "i" (val/sizeof(unsigned long)))
void foo(void)
{
#ifdef __i386__
DEFINE_LONGS(HOST_FP_SIZE, sizeof(struct user_fpregs_struct));
DEFINE_LONGS(HOST_FPX_SIZE, sizeof(struct user_fpxregs_struct));
DEFINE(HOST_IP, EIP);
DEFINE(HOST_SP, UESP);
DEFINE(HOST_EFLAGS, EFL);
DEFINE(HOST_AX, EAX);
DEFINE(HOST_BX, EBX);
DEFINE(HOST_CX, ECX);
DEFINE(HOST_DX, EDX);
DEFINE(HOST_SI, ESI);
DEFINE(HOST_DI, EDI);
DEFINE(HOST_BP, EBP);
DEFINE(HOST_CS, CS);
DEFINE(HOST_SS, SS);
DEFINE(HOST_DS, DS);
DEFINE(HOST_FS, FS);
DEFINE(HOST_ES, ES);
DEFINE(HOST_GS, GS);
DEFINE(HOST_ORIG_AX, ORIG_EAX);
#else
DEFINE(HOST_FP_SIZE, sizeof(struct _fpstate) / sizeof(unsigned long));
DEFINE_LONGS(HOST_BX, RBX);
DEFINE_LONGS(HOST_CX, RCX);
DEFINE_LONGS(HOST_DI, RDI);
DEFINE_LONGS(HOST_SI, RSI);
DEFINE_LONGS(HOST_DX, RDX);
DEFINE_LONGS(HOST_BP, RBP);
DEFINE_LONGS(HOST_AX, RAX);
DEFINE_LONGS(HOST_R8, R8);
DEFINE_LONGS(HOST_R9, R9);
DEFINE_LONGS(HOST_R10, R10);
DEFINE_LONGS(HOST_R11, R11);
DEFINE_LONGS(HOST_R12, R12);
DEFINE_LONGS(HOST_R13, R13);
DEFINE_LONGS(HOST_R14, R14);
DEFINE_LONGS(HOST_R15, R15);
DEFINE_LONGS(HOST_ORIG_AX, ORIG_RAX);
DEFINE_LONGS(HOST_CS, CS);
DEFINE_LONGS(HOST_SS, SS);
DEFINE_LONGS(HOST_EFLAGS, EFLAGS);
#if 0
DEFINE_LONGS(HOST_FS, FS);
DEFINE_LONGS(HOST_GS, GS);
DEFINE_LONGS(HOST_DS, DS);
DEFINE_LONGS(HOST_ES, ES);
#endif
DEFINE_LONGS(HOST_IP, RIP);
DEFINE_LONGS(HOST_SP, RSP);
#endif
DEFINE(UM_FRAME_SIZE, sizeof(struct user_regs_struct));
DEFINE(UM_POLLIN, POLLIN);
DEFINE(UM_POLLPRI, POLLPRI);
DEFINE(UM_POLLOUT, POLLOUT);
DEFINE(UM_PROT_READ, PROT_READ);
DEFINE(UM_PROT_WRITE, PROT_WRITE);
DEFINE(UM_PROT_EXEC, PROT_EXEC);
DEFINE(__NR_syscall_max, sizeof(syscalls) - 1);
DEFINE(NR_syscalls, sizeof(syscalls));
}
| gpl-2.0 |
Team-Hydra/android_kernel_htc_msm8660-caf | Documentation/prctl/disable-tsc-on-off-stress-test.c | 12901 | 1717 | /*
* Tests for prctl(PR_GET_TSC, ...) / prctl(PR_SET_TSC, ...)
*
* Tests if the control register is updated correctly
* when set with prctl()
*
* Warning: this test will cause a very high load for a few seconds
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <signal.h>
#include <inttypes.h>
#include <wait.h>
#include <sys/prctl.h>
#include <linux/prctl.h>
/* Get/set the process' ability to use the timestamp counter instruction */
#ifndef PR_GET_TSC
#define PR_GET_TSC 25
#define PR_SET_TSC 26
# define PR_TSC_ENABLE 1 /* allow the use of the timestamp counter */
# define PR_TSC_SIGSEGV 2 /* throw a SIGSEGV instead of reading the TSC */
#endif
/* snippet from wikipedia :-) */
uint64_t rdtsc() {
uint32_t lo, hi;
/* We cannot use "=A", since this would use %rax on x86_64 */
__asm__ __volatile__ ("rdtsc" : "=a" (lo), "=d" (hi));
return (uint64_t)hi << 32 | lo;
}
int should_segv = 0;
void sigsegv_cb(int sig)
{
if (!should_segv)
{
fprintf(stderr, "FATAL ERROR, rdtsc() failed while enabled\n");
exit(0);
}
if (prctl(PR_SET_TSC, PR_TSC_ENABLE) < 0)
{
perror("prctl");
exit(0);
}
should_segv = 0;
rdtsc();
}
void task(void)
{
signal(SIGSEGV, sigsegv_cb);
alarm(10);
for(;;)
{
rdtsc();
if (should_segv)
{
fprintf(stderr, "FATAL ERROR, rdtsc() succeeded while disabled\n");
exit(0);
}
if (prctl(PR_SET_TSC, PR_TSC_SIGSEGV) < 0)
{
perror("prctl");
exit(0);
}
should_segv = 1;
}
}
int main(int argc, char **argv)
{
int n_tasks = 100, i;
fprintf(stderr, "[No further output means we're allright]\n");
for (i=0; i<n_tasks; i++)
if (fork() == 0)
task();
for (i=0; i<n_tasks; i++)
wait(NULL);
exit(0);
}
| gpl-2.0 |
iains/darwin-gcc-5 | libgomp/testsuite/libgomp.fortran/udr8.f90 | 102 | 1074 | ! { dg-do run }
module udr8m1
integer, parameter :: a = 6
integer :: b
!$omp declare reduction (foo : integer : omp_out = omp_out + omp_in)
!$omp declare reduction (.add. : integer : &
!$omp & omp_out = omp_out .add. iand (omp_in, -4)) &
!$omp & initializer (omp_priv = 3)
interface operator (.add.)
module procedure f1
end interface
contains
integer function f1 (x, y)
integer, intent (in) :: x, y
f1 = x + y
end function f1
end module udr8m1
module udr8m2
use udr8m1
type dt
integer :: x
end type
!$omp declare reduction (+ : dt : omp_out = omp_out + omp_in) &
!$omp & initializer (omp_priv = dt (0))
interface operator (+)
module procedure f2
end interface
contains
type(dt) function f2 (x, y)
type(dt), intent (in) :: x, y
f2%x = x%x + y%x
end function f2
end module udr8m2
use udr8m2
integer :: i, j
type(dt) :: d
j = 3
d%x = 0
!$omp parallel do reduction (.add.: j) reduction (+ : d)
do i = 1, 100
j = j.add.iand (i, -4)
d = d + dt(i)
end do
if (d%x /= 5050 .or. j /= 4903) call abort
end
| gpl-2.0 |
sudosurootdev/linux | drivers/usb/core/hub.c | 102 | 165602 | /*
* USB hub driver.
*
* (C) Copyright 1999 Linus Torvalds
* (C) Copyright 1999 Johannes Erdfelt
* (C) Copyright 1999 Gregory P. Smith
* (C) Copyright 2001 Brad Hards (bhards@bigpond.net.au)
*
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/completion.h>
#include <linux/sched.h>
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/ioctl.h>
#include <linux/usb.h>
#include <linux/usbdevice_fs.h>
#include <linux/usb/hcd.h>
#include <linux/usb/otg.h>
#include <linux/usb/quirks.h>
#include <linux/workqueue.h>
#include <linux/mutex.h>
#include <linux/random.h>
#include <linux/pm_qos.h>
#include <asm/uaccess.h>
#include <asm/byteorder.h>
#include "hub.h"
#include "otg_whitelist.h"
#define USB_VENDOR_GENESYS_LOGIC 0x05e3
#define HUB_QUIRK_CHECK_PORT_AUTOSUSPEND 0x01
/* Protect struct usb_device->state and ->children members
* Note: Both are also protected by ->dev.sem, except that ->state can
* change to USB_STATE_NOTATTACHED even when the semaphore isn't held. */
static DEFINE_SPINLOCK(device_state_lock);
/* workqueue to process hub events */
static struct workqueue_struct *hub_wq;
static void hub_event(struct work_struct *work);
/* synchronize hub-port add/remove and peering operations */
DEFINE_MUTEX(usb_port_peer_mutex);
/* cycle leds on hubs that aren't blinking for attention */
static bool blinkenlights = 0;
module_param (blinkenlights, bool, S_IRUGO);
MODULE_PARM_DESC (blinkenlights, "true to cycle leds on hubs");
/*
* Device SATA8000 FW1.0 from DATAST0R Technology Corp requires about
* 10 seconds to send reply for the initial 64-byte descriptor request.
*/
/* define initial 64-byte descriptor request timeout in milliseconds */
static int initial_descriptor_timeout = USB_CTRL_GET_TIMEOUT;
module_param(initial_descriptor_timeout, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(initial_descriptor_timeout,
"initial 64-byte descriptor request timeout in milliseconds "
"(default 5000 - 5.0 seconds)");
/*
* As of 2.6.10 we introduce a new USB device initialization scheme which
* closely resembles the way Windows works. Hopefully it will be compatible
* with a wider range of devices than the old scheme. However some previously
* working devices may start giving rise to "device not accepting address"
* errors; if that happens the user can try the old scheme by adjusting the
* following module parameters.
*
* For maximum flexibility there are two boolean parameters to control the
* hub driver's behavior. On the first initialization attempt, if the
* "old_scheme_first" parameter is set then the old scheme will be used,
* otherwise the new scheme is used. If that fails and "use_both_schemes"
* is set, then the driver will make another attempt, using the other scheme.
*/
static bool old_scheme_first = 0;
module_param(old_scheme_first, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(old_scheme_first,
"start with the old device initialization scheme");
static bool use_both_schemes = 1;
module_param(use_both_schemes, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(use_both_schemes,
"try the other device initialization scheme if the "
"first one fails");
/* Mutual exclusion for EHCI CF initialization. This interferes with
* port reset on some companion controllers.
*/
DECLARE_RWSEM(ehci_cf_port_reset_rwsem);
EXPORT_SYMBOL_GPL(ehci_cf_port_reset_rwsem);
#define HUB_DEBOUNCE_TIMEOUT 2000
#define HUB_DEBOUNCE_STEP 25
#define HUB_DEBOUNCE_STABLE 100
static void hub_release(struct kref *kref);
static int usb_reset_and_verify_device(struct usb_device *udev);
static inline char *portspeed(struct usb_hub *hub, int portstatus)
{
if (hub_is_superspeed(hub->hdev))
return "5.0 Gb/s";
if (portstatus & USB_PORT_STAT_HIGH_SPEED)
return "480 Mb/s";
else if (portstatus & USB_PORT_STAT_LOW_SPEED)
return "1.5 Mb/s";
else
return "12 Mb/s";
}
/* Note that hdev or one of its children must be locked! */
struct usb_hub *usb_hub_to_struct_hub(struct usb_device *hdev)
{
if (!hdev || !hdev->actconfig || !hdev->maxchild)
return NULL;
return usb_get_intfdata(hdev->actconfig->interface[0]);
}
static int usb_device_supports_lpm(struct usb_device *udev)
{
/* USB 2.1 (and greater) devices indicate LPM support through
* their USB 2.0 Extended Capabilities BOS descriptor.
*/
if (udev->speed == USB_SPEED_HIGH) {
if (udev->bos->ext_cap &&
(USB_LPM_SUPPORT &
le32_to_cpu(udev->bos->ext_cap->bmAttributes)))
return 1;
return 0;
}
/*
* According to the USB 3.0 spec, all USB 3.0 devices must support LPM.
* However, there are some that don't, and they set the U1/U2 exit
* latencies to zero.
*/
if (!udev->bos->ss_cap) {
dev_info(&udev->dev, "No LPM exit latency info found, disabling LPM.\n");
return 0;
}
if (udev->bos->ss_cap->bU1devExitLat == 0 &&
udev->bos->ss_cap->bU2DevExitLat == 0) {
if (udev->parent)
dev_info(&udev->dev, "LPM exit latency is zeroed, disabling LPM.\n");
else
dev_info(&udev->dev, "We don't know the algorithms for LPM for this host, disabling LPM.\n");
return 0;
}
if (!udev->parent || udev->parent->lpm_capable)
return 1;
return 0;
}
/*
* Set the Maximum Exit Latency (MEL) for the host to initiate a transition from
* either U1 or U2.
*/
static void usb_set_lpm_mel(struct usb_device *udev,
struct usb3_lpm_parameters *udev_lpm_params,
unsigned int udev_exit_latency,
struct usb_hub *hub,
struct usb3_lpm_parameters *hub_lpm_params,
unsigned int hub_exit_latency)
{
unsigned int total_mel;
unsigned int device_mel;
unsigned int hub_mel;
/*
* Calculate the time it takes to transition all links from the roothub
* to the parent hub into U0. The parent hub must then decode the
* packet (hub header decode latency) to figure out which port it was
* bound for.
*
* The Hub Header decode latency is expressed in 0.1us intervals (0x1
* means 0.1us). Multiply that by 100 to get nanoseconds.
*/
total_mel = hub_lpm_params->mel +
(hub->descriptor->u.ss.bHubHdrDecLat * 100);
/*
* How long will it take to transition the downstream hub's port into
* U0? The greater of either the hub exit latency or the device exit
* latency.
*
* The BOS U1/U2 exit latencies are expressed in 1us intervals.
* Multiply that by 1000 to get nanoseconds.
*/
device_mel = udev_exit_latency * 1000;
hub_mel = hub_exit_latency * 1000;
if (device_mel > hub_mel)
total_mel += device_mel;
else
total_mel += hub_mel;
udev_lpm_params->mel = total_mel;
}
/*
* Set the maximum Device to Host Exit Latency (PEL) for the device to initiate
* a transition from either U1 or U2.
*/
static void usb_set_lpm_pel(struct usb_device *udev,
struct usb3_lpm_parameters *udev_lpm_params,
unsigned int udev_exit_latency,
struct usb_hub *hub,
struct usb3_lpm_parameters *hub_lpm_params,
unsigned int hub_exit_latency,
unsigned int port_to_port_exit_latency)
{
unsigned int first_link_pel;
unsigned int hub_pel;
/*
* First, the device sends an LFPS to transition the link between the
* device and the parent hub into U0. The exit latency is the bigger of
* the device exit latency or the hub exit latency.
*/
if (udev_exit_latency > hub_exit_latency)
first_link_pel = udev_exit_latency * 1000;
else
first_link_pel = hub_exit_latency * 1000;
/*
* When the hub starts to receive the LFPS, there is a slight delay for
* it to figure out that one of the ports is sending an LFPS. Then it
* will forward the LFPS to its upstream link. The exit latency is the
* delay, plus the PEL that we calculated for this hub.
*/
hub_pel = port_to_port_exit_latency * 1000 + hub_lpm_params->pel;
/*
* According to figure C-7 in the USB 3.0 spec, the PEL for this device
* is the greater of the two exit latencies.
*/
if (first_link_pel > hub_pel)
udev_lpm_params->pel = first_link_pel;
else
udev_lpm_params->pel = hub_pel;
}
/*
* Set the System Exit Latency (SEL) to indicate the total worst-case time from
* when a device initiates a transition to U0, until when it will receive the
* first packet from the host controller.
*
* Section C.1.5.1 describes the four components to this:
* - t1: device PEL
* - t2: time for the ERDY to make it from the device to the host.
* - t3: a host-specific delay to process the ERDY.
* - t4: time for the packet to make it from the host to the device.
*
* t3 is specific to both the xHCI host and the platform the host is integrated
* into. The Intel HW folks have said it's negligible, FIXME if a different
* vendor says otherwise.
*/
static void usb_set_lpm_sel(struct usb_device *udev,
struct usb3_lpm_parameters *udev_lpm_params)
{
struct usb_device *parent;
unsigned int num_hubs;
unsigned int total_sel;
/* t1 = device PEL */
total_sel = udev_lpm_params->pel;
/* How many external hubs are in between the device & the root port. */
for (parent = udev->parent, num_hubs = 0; parent->parent;
parent = parent->parent)
num_hubs++;
/* t2 = 2.1us + 250ns * (num_hubs - 1) */
if (num_hubs > 0)
total_sel += 2100 + 250 * (num_hubs - 1);
/* t4 = 250ns * num_hubs */
total_sel += 250 * num_hubs;
udev_lpm_params->sel = total_sel;
}
static void usb_set_lpm_parameters(struct usb_device *udev)
{
struct usb_hub *hub;
unsigned int port_to_port_delay;
unsigned int udev_u1_del;
unsigned int udev_u2_del;
unsigned int hub_u1_del;
unsigned int hub_u2_del;
if (!udev->lpm_capable || udev->speed != USB_SPEED_SUPER)
return;
hub = usb_hub_to_struct_hub(udev->parent);
/* It doesn't take time to transition the roothub into U0, since it
* doesn't have an upstream link.
*/
if (!hub)
return;
udev_u1_del = udev->bos->ss_cap->bU1devExitLat;
udev_u2_del = le16_to_cpu(udev->bos->ss_cap->bU2DevExitLat);
hub_u1_del = udev->parent->bos->ss_cap->bU1devExitLat;
hub_u2_del = le16_to_cpu(udev->parent->bos->ss_cap->bU2DevExitLat);
usb_set_lpm_mel(udev, &udev->u1_params, udev_u1_del,
hub, &udev->parent->u1_params, hub_u1_del);
usb_set_lpm_mel(udev, &udev->u2_params, udev_u2_del,
hub, &udev->parent->u2_params, hub_u2_del);
/*
* Appendix C, section C.2.2.2, says that there is a slight delay from
* when the parent hub notices the downstream port is trying to
* transition to U0 to when the hub initiates a U0 transition on its
* upstream port. The section says the delays are tPort2PortU1EL and
* tPort2PortU2EL, but it doesn't define what they are.
*
* The hub chapter, sections 10.4.2.4 and 10.4.2.5 seem to be talking
* about the same delays. Use the maximum delay calculations from those
* sections. For U1, it's tHubPort2PortExitLat, which is 1us max. For
* U2, it's tHubPort2PortExitLat + U2DevExitLat - U1DevExitLat. I
* assume the device exit latencies they are talking about are the hub
* exit latencies.
*
* What do we do if the U2 exit latency is less than the U1 exit
* latency? It's possible, although not likely...
*/
port_to_port_delay = 1;
usb_set_lpm_pel(udev, &udev->u1_params, udev_u1_del,
hub, &udev->parent->u1_params, hub_u1_del,
port_to_port_delay);
if (hub_u2_del > hub_u1_del)
port_to_port_delay = 1 + hub_u2_del - hub_u1_del;
else
port_to_port_delay = 1 + hub_u1_del;
usb_set_lpm_pel(udev, &udev->u2_params, udev_u2_del,
hub, &udev->parent->u2_params, hub_u2_del,
port_to_port_delay);
/* Now that we've got PEL, calculate SEL. */
usb_set_lpm_sel(udev, &udev->u1_params);
usb_set_lpm_sel(udev, &udev->u2_params);
}
/* USB 2.0 spec Section 11.24.4.5 */
static int get_hub_descriptor(struct usb_device *hdev, void *data)
{
int i, ret, size;
unsigned dtype;
if (hub_is_superspeed(hdev)) {
dtype = USB_DT_SS_HUB;
size = USB_DT_SS_HUB_SIZE;
} else {
dtype = USB_DT_HUB;
size = sizeof(struct usb_hub_descriptor);
}
for (i = 0; i < 3; i++) {
ret = usb_control_msg(hdev, usb_rcvctrlpipe(hdev, 0),
USB_REQ_GET_DESCRIPTOR, USB_DIR_IN | USB_RT_HUB,
dtype << 8, 0, data, size,
USB_CTRL_GET_TIMEOUT);
if (ret >= (USB_DT_HUB_NONVAR_SIZE + 2))
return ret;
}
return -EINVAL;
}
/*
* USB 2.0 spec Section 11.24.2.1
*/
static int clear_hub_feature(struct usb_device *hdev, int feature)
{
return usb_control_msg(hdev, usb_sndctrlpipe(hdev, 0),
USB_REQ_CLEAR_FEATURE, USB_RT_HUB, feature, 0, NULL, 0, 1000);
}
/*
* USB 2.0 spec Section 11.24.2.2
*/
int usb_clear_port_feature(struct usb_device *hdev, int port1, int feature)
{
return usb_control_msg(hdev, usb_sndctrlpipe(hdev, 0),
USB_REQ_CLEAR_FEATURE, USB_RT_PORT, feature, port1,
NULL, 0, 1000);
}
/*
* USB 2.0 spec Section 11.24.2.13
*/
static int set_port_feature(struct usb_device *hdev, int port1, int feature)
{
return usb_control_msg(hdev, usb_sndctrlpipe(hdev, 0),
USB_REQ_SET_FEATURE, USB_RT_PORT, feature, port1,
NULL, 0, 1000);
}
static char *to_led_name(int selector)
{
switch (selector) {
case HUB_LED_AMBER:
return "amber";
case HUB_LED_GREEN:
return "green";
case HUB_LED_OFF:
return "off";
case HUB_LED_AUTO:
return "auto";
default:
return "??";
}
}
/*
* USB 2.0 spec Section 11.24.2.7.1.10 and table 11-7
* for info about using port indicators
*/
static void set_port_led(struct usb_hub *hub, int port1, int selector)
{
struct usb_port *port_dev = hub->ports[port1 - 1];
int status;
status = set_port_feature(hub->hdev, (selector << 8) | port1,
USB_PORT_FEAT_INDICATOR);
dev_dbg(&port_dev->dev, "indicator %s status %d\n",
to_led_name(selector), status);
}
#define LED_CYCLE_PERIOD ((2*HZ)/3)
static void led_work (struct work_struct *work)
{
struct usb_hub *hub =
container_of(work, struct usb_hub, leds.work);
struct usb_device *hdev = hub->hdev;
unsigned i;
unsigned changed = 0;
int cursor = -1;
if (hdev->state != USB_STATE_CONFIGURED || hub->quiescing)
return;
for (i = 0; i < hdev->maxchild; i++) {
unsigned selector, mode;
/* 30%-50% duty cycle */
switch (hub->indicator[i]) {
/* cycle marker */
case INDICATOR_CYCLE:
cursor = i;
selector = HUB_LED_AUTO;
mode = INDICATOR_AUTO;
break;
/* blinking green = sw attention */
case INDICATOR_GREEN_BLINK:
selector = HUB_LED_GREEN;
mode = INDICATOR_GREEN_BLINK_OFF;
break;
case INDICATOR_GREEN_BLINK_OFF:
selector = HUB_LED_OFF;
mode = INDICATOR_GREEN_BLINK;
break;
/* blinking amber = hw attention */
case INDICATOR_AMBER_BLINK:
selector = HUB_LED_AMBER;
mode = INDICATOR_AMBER_BLINK_OFF;
break;
case INDICATOR_AMBER_BLINK_OFF:
selector = HUB_LED_OFF;
mode = INDICATOR_AMBER_BLINK;
break;
/* blink green/amber = reserved */
case INDICATOR_ALT_BLINK:
selector = HUB_LED_GREEN;
mode = INDICATOR_ALT_BLINK_OFF;
break;
case INDICATOR_ALT_BLINK_OFF:
selector = HUB_LED_AMBER;
mode = INDICATOR_ALT_BLINK;
break;
default:
continue;
}
if (selector != HUB_LED_AUTO)
changed = 1;
set_port_led(hub, i + 1, selector);
hub->indicator[i] = mode;
}
if (!changed && blinkenlights) {
cursor++;
cursor %= hdev->maxchild;
set_port_led(hub, cursor + 1, HUB_LED_GREEN);
hub->indicator[cursor] = INDICATOR_CYCLE;
changed++;
}
if (changed)
queue_delayed_work(system_power_efficient_wq,
&hub->leds, LED_CYCLE_PERIOD);
}
/* use a short timeout for hub/port status fetches */
#define USB_STS_TIMEOUT 1000
#define USB_STS_RETRIES 5
/*
* USB 2.0 spec Section 11.24.2.6
*/
static int get_hub_status(struct usb_device *hdev,
struct usb_hub_status *data)
{
int i, status = -ETIMEDOUT;
for (i = 0; i < USB_STS_RETRIES &&
(status == -ETIMEDOUT || status == -EPIPE); i++) {
status = usb_control_msg(hdev, usb_rcvctrlpipe(hdev, 0),
USB_REQ_GET_STATUS, USB_DIR_IN | USB_RT_HUB, 0, 0,
data, sizeof(*data), USB_STS_TIMEOUT);
}
return status;
}
/*
* USB 2.0 spec Section 11.24.2.7
*/
static int get_port_status(struct usb_device *hdev, int port1,
struct usb_port_status *data)
{
int i, status = -ETIMEDOUT;
for (i = 0; i < USB_STS_RETRIES &&
(status == -ETIMEDOUT || status == -EPIPE); i++) {
status = usb_control_msg(hdev, usb_rcvctrlpipe(hdev, 0),
USB_REQ_GET_STATUS, USB_DIR_IN | USB_RT_PORT, 0, port1,
data, sizeof(*data), USB_STS_TIMEOUT);
}
return status;
}
static int hub_port_status(struct usb_hub *hub, int port1,
u16 *status, u16 *change)
{
int ret;
mutex_lock(&hub->status_mutex);
ret = get_port_status(hub->hdev, port1, &hub->status->port);
if (ret < 4) {
if (ret != -ENODEV)
dev_err(hub->intfdev,
"%s failed (err = %d)\n", __func__, ret);
if (ret >= 0)
ret = -EIO;
} else {
*status = le16_to_cpu(hub->status->port.wPortStatus);
*change = le16_to_cpu(hub->status->port.wPortChange);
ret = 0;
}
mutex_unlock(&hub->status_mutex);
return ret;
}
static void kick_hub_wq(struct usb_hub *hub)
{
struct usb_interface *intf;
if (hub->disconnected || work_pending(&hub->events))
return;
/*
* Suppress autosuspend until the event is proceed.
*
* Be careful and make sure that the symmetric operation is
* always called. We are here only when there is no pending
* work for this hub. Therefore put the interface either when
* the new work is called or when it is canceled.
*/
intf = to_usb_interface(hub->intfdev);
usb_autopm_get_interface_no_resume(intf);
kref_get(&hub->kref);
if (queue_work(hub_wq, &hub->events))
return;
/* the work has already been scheduled */
usb_autopm_put_interface_async(intf);
kref_put(&hub->kref, hub_release);
}
void usb_kick_hub_wq(struct usb_device *hdev)
{
struct usb_hub *hub = usb_hub_to_struct_hub(hdev);
if (hub)
kick_hub_wq(hub);
}
/*
* Let the USB core know that a USB 3.0 device has sent a Function Wake Device
* Notification, which indicates it had initiated remote wakeup.
*
* USB 3.0 hubs do not report the port link state change from U3 to U0 when the
* device initiates resume, so the USB core will not receive notice of the
* resume through the normal hub interrupt URB.
*/
void usb_wakeup_notification(struct usb_device *hdev,
unsigned int portnum)
{
struct usb_hub *hub;
if (!hdev)
return;
hub = usb_hub_to_struct_hub(hdev);
if (hub) {
set_bit(portnum, hub->wakeup_bits);
kick_hub_wq(hub);
}
}
EXPORT_SYMBOL_GPL(usb_wakeup_notification);
/* completion function, fires on port status changes and various faults */
static void hub_irq(struct urb *urb)
{
struct usb_hub *hub = urb->context;
int status = urb->status;
unsigned i;
unsigned long bits;
switch (status) {
case -ENOENT: /* synchronous unlink */
case -ECONNRESET: /* async unlink */
case -ESHUTDOWN: /* hardware going away */
return;
default: /* presumably an error */
/* Cause a hub reset after 10 consecutive errors */
dev_dbg (hub->intfdev, "transfer --> %d\n", status);
if ((++hub->nerrors < 10) || hub->error)
goto resubmit;
hub->error = status;
/* FALL THROUGH */
/* let hub_wq handle things */
case 0: /* we got data: port status changed */
bits = 0;
for (i = 0; i < urb->actual_length; ++i)
bits |= ((unsigned long) ((*hub->buffer)[i]))
<< (i*8);
hub->event_bits[0] = bits;
break;
}
hub->nerrors = 0;
/* Something happened, let hub_wq figure it out */
kick_hub_wq(hub);
resubmit:
if (hub->quiescing)
return;
if ((status = usb_submit_urb (hub->urb, GFP_ATOMIC)) != 0
&& status != -ENODEV && status != -EPERM)
dev_err (hub->intfdev, "resubmit --> %d\n", status);
}
/* USB 2.0 spec Section 11.24.2.3 */
static inline int
hub_clear_tt_buffer (struct usb_device *hdev, u16 devinfo, u16 tt)
{
/* Need to clear both directions for control ep */
if (((devinfo >> 11) & USB_ENDPOINT_XFERTYPE_MASK) ==
USB_ENDPOINT_XFER_CONTROL) {
int status = usb_control_msg(hdev, usb_sndctrlpipe(hdev, 0),
HUB_CLEAR_TT_BUFFER, USB_RT_PORT,
devinfo ^ 0x8000, tt, NULL, 0, 1000);
if (status)
return status;
}
return usb_control_msg(hdev, usb_sndctrlpipe(hdev, 0),
HUB_CLEAR_TT_BUFFER, USB_RT_PORT, devinfo,
tt, NULL, 0, 1000);
}
/*
* enumeration blocks hub_wq for a long time. we use keventd instead, since
* long blocking there is the exception, not the rule. accordingly, HCDs
* talking to TTs must queue control transfers (not just bulk and iso), so
* both can talk to the same hub concurrently.
*/
static void hub_tt_work(struct work_struct *work)
{
struct usb_hub *hub =
container_of(work, struct usb_hub, tt.clear_work);
unsigned long flags;
spin_lock_irqsave (&hub->tt.lock, flags);
while (!list_empty(&hub->tt.clear_list)) {
struct list_head *next;
struct usb_tt_clear *clear;
struct usb_device *hdev = hub->hdev;
const struct hc_driver *drv;
int status;
next = hub->tt.clear_list.next;
clear = list_entry (next, struct usb_tt_clear, clear_list);
list_del (&clear->clear_list);
/* drop lock so HCD can concurrently report other TT errors */
spin_unlock_irqrestore (&hub->tt.lock, flags);
status = hub_clear_tt_buffer (hdev, clear->devinfo, clear->tt);
if (status && status != -ENODEV)
dev_err (&hdev->dev,
"clear tt %d (%04x) error %d\n",
clear->tt, clear->devinfo, status);
/* Tell the HCD, even if the operation failed */
drv = clear->hcd->driver;
if (drv->clear_tt_buffer_complete)
(drv->clear_tt_buffer_complete)(clear->hcd, clear->ep);
kfree(clear);
spin_lock_irqsave(&hub->tt.lock, flags);
}
spin_unlock_irqrestore (&hub->tt.lock, flags);
}
/**
* usb_hub_set_port_power - control hub port's power state
* @hdev: USB device belonging to the usb hub
* @hub: target hub
* @port1: port index
* @set: expected status
*
* call this function to control port's power via setting or
* clearing the port's PORT_POWER feature.
*
* Return: 0 if successful. A negative error code otherwise.
*/
int usb_hub_set_port_power(struct usb_device *hdev, struct usb_hub *hub,
int port1, bool set)
{
int ret;
if (set)
ret = set_port_feature(hdev, port1, USB_PORT_FEAT_POWER);
else
ret = usb_clear_port_feature(hdev, port1, USB_PORT_FEAT_POWER);
if (ret)
return ret;
if (set)
set_bit(port1, hub->power_bits);
else
clear_bit(port1, hub->power_bits);
return 0;
}
/**
* usb_hub_clear_tt_buffer - clear control/bulk TT state in high speed hub
* @urb: an URB associated with the failed or incomplete split transaction
*
* High speed HCDs use this to tell the hub driver that some split control or
* bulk transaction failed in a way that requires clearing internal state of
* a transaction translator. This is normally detected (and reported) from
* interrupt context.
*
* It may not be possible for that hub to handle additional full (or low)
* speed transactions until that state is fully cleared out.
*
* Return: 0 if successful. A negative error code otherwise.
*/
int usb_hub_clear_tt_buffer(struct urb *urb)
{
struct usb_device *udev = urb->dev;
int pipe = urb->pipe;
struct usb_tt *tt = udev->tt;
unsigned long flags;
struct usb_tt_clear *clear;
/* we've got to cope with an arbitrary number of pending TT clears,
* since each TT has "at least two" buffers that can need it (and
* there can be many TTs per hub). even if they're uncommon.
*/
if ((clear = kmalloc (sizeof *clear, GFP_ATOMIC)) == NULL) {
dev_err (&udev->dev, "can't save CLEAR_TT_BUFFER state\n");
/* FIXME recover somehow ... RESET_TT? */
return -ENOMEM;
}
/* info that CLEAR_TT_BUFFER needs */
clear->tt = tt->multi ? udev->ttport : 1;
clear->devinfo = usb_pipeendpoint (pipe);
clear->devinfo |= udev->devnum << 4;
clear->devinfo |= usb_pipecontrol (pipe)
? (USB_ENDPOINT_XFER_CONTROL << 11)
: (USB_ENDPOINT_XFER_BULK << 11);
if (usb_pipein (pipe))
clear->devinfo |= 1 << 15;
/* info for completion callback */
clear->hcd = bus_to_hcd(udev->bus);
clear->ep = urb->ep;
/* tell keventd to clear state for this TT */
spin_lock_irqsave (&tt->lock, flags);
list_add_tail (&clear->clear_list, &tt->clear_list);
schedule_work(&tt->clear_work);
spin_unlock_irqrestore (&tt->lock, flags);
return 0;
}
EXPORT_SYMBOL_GPL(usb_hub_clear_tt_buffer);
static void hub_power_on(struct usb_hub *hub, bool do_delay)
{
int port1;
/* Enable power on each port. Some hubs have reserved values
* of LPSM (> 2) in their descriptors, even though they are
* USB 2.0 hubs. Some hubs do not implement port-power switching
* but only emulate it. In all cases, the ports won't work
* unless we send these messages to the hub.
*/
if (hub_is_port_power_switchable(hub))
dev_dbg(hub->intfdev, "enabling power on all ports\n");
else
dev_dbg(hub->intfdev, "trying to enable port power on "
"non-switchable hub\n");
for (port1 = 1; port1 <= hub->hdev->maxchild; port1++)
if (test_bit(port1, hub->power_bits))
set_port_feature(hub->hdev, port1, USB_PORT_FEAT_POWER);
else
usb_clear_port_feature(hub->hdev, port1,
USB_PORT_FEAT_POWER);
if (do_delay)
msleep(hub_power_on_good_delay(hub));
}
static int hub_hub_status(struct usb_hub *hub,
u16 *status, u16 *change)
{
int ret;
mutex_lock(&hub->status_mutex);
ret = get_hub_status(hub->hdev, &hub->status->hub);
if (ret < 0) {
if (ret != -ENODEV)
dev_err(hub->intfdev,
"%s failed (err = %d)\n", __func__, ret);
} else {
*status = le16_to_cpu(hub->status->hub.wHubStatus);
*change = le16_to_cpu(hub->status->hub.wHubChange);
ret = 0;
}
mutex_unlock(&hub->status_mutex);
return ret;
}
static int hub_set_port_link_state(struct usb_hub *hub, int port1,
unsigned int link_status)
{
return set_port_feature(hub->hdev,
port1 | (link_status << 3),
USB_PORT_FEAT_LINK_STATE);
}
/*
* If USB 3.0 ports are placed into the Disabled state, they will no longer
* detect any device connects or disconnects. This is generally not what the
* USB core wants, since it expects a disabled port to produce a port status
* change event when a new device connects.
*
* Instead, set the link state to Disabled, wait for the link to settle into
* that state, clear any change bits, and then put the port into the RxDetect
* state.
*/
static int hub_usb3_port_disable(struct usb_hub *hub, int port1)
{
int ret;
int total_time;
u16 portchange, portstatus;
if (!hub_is_superspeed(hub->hdev))
return -EINVAL;
ret = hub_port_status(hub, port1, &portstatus, &portchange);
if (ret < 0)
return ret;
/*
* USB controller Advanced Micro Devices, Inc. [AMD] FCH USB XHCI
* Controller [1022:7814] will have spurious result making the following
* usb 3.0 device hotplugging route to the 2.0 root hub and recognized
* as high-speed device if we set the usb 3.0 port link state to
* Disabled. Since it's already in USB_SS_PORT_LS_RX_DETECT state, we
* check the state here to avoid the bug.
*/
if ((portstatus & USB_PORT_STAT_LINK_STATE) ==
USB_SS_PORT_LS_RX_DETECT) {
dev_dbg(&hub->ports[port1 - 1]->dev,
"Not disabling port; link state is RxDetect\n");
return ret;
}
ret = hub_set_port_link_state(hub, port1, USB_SS_PORT_LS_SS_DISABLED);
if (ret)
return ret;
/* Wait for the link to enter the disabled state. */
for (total_time = 0; ; total_time += HUB_DEBOUNCE_STEP) {
ret = hub_port_status(hub, port1, &portstatus, &portchange);
if (ret < 0)
return ret;
if ((portstatus & USB_PORT_STAT_LINK_STATE) ==
USB_SS_PORT_LS_SS_DISABLED)
break;
if (total_time >= HUB_DEBOUNCE_TIMEOUT)
break;
msleep(HUB_DEBOUNCE_STEP);
}
if (total_time >= HUB_DEBOUNCE_TIMEOUT)
dev_warn(&hub->ports[port1 - 1]->dev,
"Could not disable after %d ms\n", total_time);
return hub_set_port_link_state(hub, port1, USB_SS_PORT_LS_RX_DETECT);
}
static int hub_port_disable(struct usb_hub *hub, int port1, int set_state)
{
struct usb_port *port_dev = hub->ports[port1 - 1];
struct usb_device *hdev = hub->hdev;
int ret = 0;
if (port_dev->child && set_state)
usb_set_device_state(port_dev->child, USB_STATE_NOTATTACHED);
if (!hub->error) {
if (hub_is_superspeed(hub->hdev))
ret = hub_usb3_port_disable(hub, port1);
else
ret = usb_clear_port_feature(hdev, port1,
USB_PORT_FEAT_ENABLE);
}
if (ret && ret != -ENODEV)
dev_err(&port_dev->dev, "cannot disable (err = %d)\n", ret);
return ret;
}
/*
* Disable a port and mark a logical connect-change event, so that some
* time later hub_wq will disconnect() any existing usb_device on the port
* and will re-enumerate if there actually is a device attached.
*/
static void hub_port_logical_disconnect(struct usb_hub *hub, int port1)
{
dev_dbg(&hub->ports[port1 - 1]->dev, "logical disconnect\n");
hub_port_disable(hub, port1, 1);
/* FIXME let caller ask to power down the port:
* - some devices won't enumerate without a VBUS power cycle
* - SRP saves power that way
* - ... new call, TBD ...
* That's easy if this hub can switch power per-port, and
* hub_wq reactivates the port later (timer, SRP, etc).
* Powerdown must be optional, because of reset/DFU.
*/
set_bit(port1, hub->change_bits);
kick_hub_wq(hub);
}
/**
* usb_remove_device - disable a device's port on its parent hub
* @udev: device to be disabled and removed
* Context: @udev locked, must be able to sleep.
*
* After @udev's port has been disabled, hub_wq is notified and it will
* see that the device has been disconnected. When the device is
* physically unplugged and something is plugged in, the events will
* be received and processed normally.
*
* Return: 0 if successful. A negative error code otherwise.
*/
int usb_remove_device(struct usb_device *udev)
{
struct usb_hub *hub;
struct usb_interface *intf;
if (!udev->parent) /* Can't remove a root hub */
return -EINVAL;
hub = usb_hub_to_struct_hub(udev->parent);
intf = to_usb_interface(hub->intfdev);
usb_autopm_get_interface(intf);
set_bit(udev->portnum, hub->removed_bits);
hub_port_logical_disconnect(hub, udev->portnum);
usb_autopm_put_interface(intf);
return 0;
}
enum hub_activation_type {
HUB_INIT, HUB_INIT2, HUB_INIT3, /* INITs must come first */
HUB_POST_RESET, HUB_RESUME, HUB_RESET_RESUME,
};
static void hub_init_func2(struct work_struct *ws);
static void hub_init_func3(struct work_struct *ws);
static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
{
struct usb_device *hdev = hub->hdev;
struct usb_hcd *hcd;
int ret;
int port1;
int status;
bool need_debounce_delay = false;
unsigned delay;
/* Continue a partial initialization */
if (type == HUB_INIT2)
goto init2;
if (type == HUB_INIT3)
goto init3;
/* The superspeed hub except for root hub has to use Hub Depth
* value as an offset into the route string to locate the bits
* it uses to determine the downstream port number. So hub driver
* should send a set hub depth request to superspeed hub after
* the superspeed hub is set configuration in initialization or
* reset procedure.
*
* After a resume, port power should still be on.
* For any other type of activation, turn it on.
*/
if (type != HUB_RESUME) {
if (hdev->parent && hub_is_superspeed(hdev)) {
ret = usb_control_msg(hdev, usb_sndctrlpipe(hdev, 0),
HUB_SET_DEPTH, USB_RT_HUB,
hdev->level - 1, 0, NULL, 0,
USB_CTRL_SET_TIMEOUT);
if (ret < 0)
dev_err(hub->intfdev,
"set hub depth failed\n");
}
/* Speed up system boot by using a delayed_work for the
* hub's initial power-up delays. This is pretty awkward
* and the implementation looks like a home-brewed sort of
* setjmp/longjmp, but it saves at least 100 ms for each
* root hub (assuming usbcore is compiled into the kernel
* rather than as a module). It adds up.
*
* This can't be done for HUB_RESUME or HUB_RESET_RESUME
* because for those activation types the ports have to be
* operational when we return. In theory this could be done
* for HUB_POST_RESET, but it's easier not to.
*/
if (type == HUB_INIT) {
unsigned delay = hub_power_on_good_delay(hub);
hub_power_on(hub, false);
INIT_DELAYED_WORK(&hub->init_work, hub_init_func2);
queue_delayed_work(system_power_efficient_wq,
&hub->init_work,
msecs_to_jiffies(delay));
/* Suppress autosuspend until init is done */
usb_autopm_get_interface_no_resume(
to_usb_interface(hub->intfdev));
return; /* Continues at init2: below */
} else if (type == HUB_RESET_RESUME) {
/* The internal host controller state for the hub device
* may be gone after a host power loss on system resume.
* Update the device's info so the HW knows it's a hub.
*/
hcd = bus_to_hcd(hdev->bus);
if (hcd->driver->update_hub_device) {
ret = hcd->driver->update_hub_device(hcd, hdev,
&hub->tt, GFP_NOIO);
if (ret < 0) {
dev_err(hub->intfdev, "Host not "
"accepting hub info "
"update.\n");
dev_err(hub->intfdev, "LS/FS devices "
"and hubs may not work "
"under this hub\n.");
}
}
hub_power_on(hub, true);
} else {
hub_power_on(hub, true);
}
}
init2:
/*
* Check each port and set hub->change_bits to let hub_wq know
* which ports need attention.
*/
for (port1 = 1; port1 <= hdev->maxchild; ++port1) {
struct usb_port *port_dev = hub->ports[port1 - 1];
struct usb_device *udev = port_dev->child;
u16 portstatus, portchange;
portstatus = portchange = 0;
status = hub_port_status(hub, port1, &portstatus, &portchange);
if (udev || (portstatus & USB_PORT_STAT_CONNECTION))
dev_dbg(&port_dev->dev, "status %04x change %04x\n",
portstatus, portchange);
/*
* After anything other than HUB_RESUME (i.e., initialization
* or any sort of reset), every port should be disabled.
* Unconnected ports should likewise be disabled (paranoia),
* and so should ports for which we have no usb_device.
*/
if ((portstatus & USB_PORT_STAT_ENABLE) && (
type != HUB_RESUME ||
!(portstatus & USB_PORT_STAT_CONNECTION) ||
!udev ||
udev->state == USB_STATE_NOTATTACHED)) {
/*
* USB3 protocol ports will automatically transition
* to Enabled state when detect an USB3.0 device attach.
* Do not disable USB3 protocol ports, just pretend
* power was lost
*/
portstatus &= ~USB_PORT_STAT_ENABLE;
if (!hub_is_superspeed(hdev))
usb_clear_port_feature(hdev, port1,
USB_PORT_FEAT_ENABLE);
}
/* Clear status-change flags; we'll debounce later */
if (portchange & USB_PORT_STAT_C_CONNECTION) {
need_debounce_delay = true;
usb_clear_port_feature(hub->hdev, port1,
USB_PORT_FEAT_C_CONNECTION);
}
if (portchange & USB_PORT_STAT_C_ENABLE) {
need_debounce_delay = true;
usb_clear_port_feature(hub->hdev, port1,
USB_PORT_FEAT_C_ENABLE);
}
if (portchange & USB_PORT_STAT_C_RESET) {
need_debounce_delay = true;
usb_clear_port_feature(hub->hdev, port1,
USB_PORT_FEAT_C_RESET);
}
if ((portchange & USB_PORT_STAT_C_BH_RESET) &&
hub_is_superspeed(hub->hdev)) {
need_debounce_delay = true;
usb_clear_port_feature(hub->hdev, port1,
USB_PORT_FEAT_C_BH_PORT_RESET);
}
/* We can forget about a "removed" device when there's a
* physical disconnect or the connect status changes.
*/
if (!(portstatus & USB_PORT_STAT_CONNECTION) ||
(portchange & USB_PORT_STAT_C_CONNECTION))
clear_bit(port1, hub->removed_bits);
if (!udev || udev->state == USB_STATE_NOTATTACHED) {
/* Tell hub_wq to disconnect the device or
* check for a new connection
*/
if (udev || (portstatus & USB_PORT_STAT_CONNECTION) ||
(portstatus & USB_PORT_STAT_OVERCURRENT))
set_bit(port1, hub->change_bits);
} else if (portstatus & USB_PORT_STAT_ENABLE) {
bool port_resumed = (portstatus &
USB_PORT_STAT_LINK_STATE) ==
USB_SS_PORT_LS_U0;
/* The power session apparently survived the resume.
* If there was an overcurrent or suspend change
* (i.e., remote wakeup request), have hub_wq
* take care of it. Look at the port link state
* for USB 3.0 hubs, since they don't have a suspend
* change bit, and they don't set the port link change
* bit on device-initiated resume.
*/
if (portchange || (hub_is_superspeed(hub->hdev) &&
port_resumed))
set_bit(port1, hub->change_bits);
} else if (udev->persist_enabled) {
#ifdef CONFIG_PM
udev->reset_resume = 1;
#endif
/* Don't set the change_bits when the device
* was powered off.
*/
if (test_bit(port1, hub->power_bits))
set_bit(port1, hub->change_bits);
} else {
/* The power session is gone; tell hub_wq */
usb_set_device_state(udev, USB_STATE_NOTATTACHED);
set_bit(port1, hub->change_bits);
}
}
/* If no port-status-change flags were set, we don't need any
* debouncing. If flags were set we can try to debounce the
* ports all at once right now, instead of letting hub_wq do them
* one at a time later on.
*
* If any port-status changes do occur during this delay, hub_wq
* will see them later and handle them normally.
*/
if (need_debounce_delay) {
delay = HUB_DEBOUNCE_STABLE;
/* Don't do a long sleep inside a workqueue routine */
if (type == HUB_INIT2) {
INIT_DELAYED_WORK(&hub->init_work, hub_init_func3);
queue_delayed_work(system_power_efficient_wq,
&hub->init_work,
msecs_to_jiffies(delay));
return; /* Continues at init3: below */
} else {
msleep(delay);
}
}
init3:
hub->quiescing = 0;
status = usb_submit_urb(hub->urb, GFP_NOIO);
if (status < 0)
dev_err(hub->intfdev, "activate --> %d\n", status);
if (hub->has_indicators && blinkenlights)
queue_delayed_work(system_power_efficient_wq,
&hub->leds, LED_CYCLE_PERIOD);
/* Scan all ports that need attention */
kick_hub_wq(hub);
/* Allow autosuspend if it was suppressed */
if (type <= HUB_INIT3)
usb_autopm_put_interface_async(to_usb_interface(hub->intfdev));
}
/* Implement the continuations for the delays above */
static void hub_init_func2(struct work_struct *ws)
{
struct usb_hub *hub = container_of(ws, struct usb_hub, init_work.work);
hub_activate(hub, HUB_INIT2);
}
static void hub_init_func3(struct work_struct *ws)
{
struct usb_hub *hub = container_of(ws, struct usb_hub, init_work.work);
hub_activate(hub, HUB_INIT3);
}
enum hub_quiescing_type {
HUB_DISCONNECT, HUB_PRE_RESET, HUB_SUSPEND
};
static void hub_quiesce(struct usb_hub *hub, enum hub_quiescing_type type)
{
struct usb_device *hdev = hub->hdev;
int i;
cancel_delayed_work_sync(&hub->init_work);
/* hub_wq and related activity won't re-trigger */
hub->quiescing = 1;
if (type != HUB_SUSPEND) {
/* Disconnect all the children */
for (i = 0; i < hdev->maxchild; ++i) {
if (hub->ports[i]->child)
usb_disconnect(&hub->ports[i]->child);
}
}
/* Stop hub_wq and related activity */
usb_kill_urb(hub->urb);
if (hub->has_indicators)
cancel_delayed_work_sync(&hub->leds);
if (hub->tt.hub)
flush_work(&hub->tt.clear_work);
}
static void hub_pm_barrier_for_all_ports(struct usb_hub *hub)
{
int i;
for (i = 0; i < hub->hdev->maxchild; ++i)
pm_runtime_barrier(&hub->ports[i]->dev);
}
/* caller has locked the hub device */
static int hub_pre_reset(struct usb_interface *intf)
{
struct usb_hub *hub = usb_get_intfdata(intf);
hub_quiesce(hub, HUB_PRE_RESET);
hub->in_reset = 1;
hub_pm_barrier_for_all_ports(hub);
return 0;
}
/* caller has locked the hub device */
static int hub_post_reset(struct usb_interface *intf)
{
struct usb_hub *hub = usb_get_intfdata(intf);
hub->in_reset = 0;
hub_pm_barrier_for_all_ports(hub);
hub_activate(hub, HUB_POST_RESET);
return 0;
}
static int hub_configure(struct usb_hub *hub,
struct usb_endpoint_descriptor *endpoint)
{
struct usb_hcd *hcd;
struct usb_device *hdev = hub->hdev;
struct device *hub_dev = hub->intfdev;
u16 hubstatus, hubchange;
u16 wHubCharacteristics;
unsigned int pipe;
int maxp, ret, i;
char *message = "out of memory";
unsigned unit_load;
unsigned full_load;
unsigned maxchild;
hub->buffer = kmalloc(sizeof(*hub->buffer), GFP_KERNEL);
if (!hub->buffer) {
ret = -ENOMEM;
goto fail;
}
hub->status = kmalloc(sizeof(*hub->status), GFP_KERNEL);
if (!hub->status) {
ret = -ENOMEM;
goto fail;
}
mutex_init(&hub->status_mutex);
hub->descriptor = kmalloc(sizeof(*hub->descriptor), GFP_KERNEL);
if (!hub->descriptor) {
ret = -ENOMEM;
goto fail;
}
/* Request the entire hub descriptor.
* hub->descriptor can handle USB_MAXCHILDREN ports,
* but the hub can/will return fewer bytes here.
*/
ret = get_hub_descriptor(hdev, hub->descriptor);
if (ret < 0) {
message = "can't read hub descriptor";
goto fail;
} else if (hub->descriptor->bNbrPorts > USB_MAXCHILDREN) {
message = "hub has too many ports!";
ret = -ENODEV;
goto fail;
} else if (hub->descriptor->bNbrPorts == 0) {
message = "hub doesn't have any ports!";
ret = -ENODEV;
goto fail;
}
maxchild = hub->descriptor->bNbrPorts;
dev_info(hub_dev, "%d port%s detected\n", maxchild,
(maxchild == 1) ? "" : "s");
hub->ports = kzalloc(maxchild * sizeof(struct usb_port *), GFP_KERNEL);
if (!hub->ports) {
ret = -ENOMEM;
goto fail;
}
wHubCharacteristics = le16_to_cpu(hub->descriptor->wHubCharacteristics);
if (hub_is_superspeed(hdev)) {
unit_load = 150;
full_load = 900;
} else {
unit_load = 100;
full_load = 500;
}
/* FIXME for USB 3.0, skip for now */
if ((wHubCharacteristics & HUB_CHAR_COMPOUND) &&
!(hub_is_superspeed(hdev))) {
int i;
char portstr[USB_MAXCHILDREN + 1];
for (i = 0; i < maxchild; i++)
portstr[i] = hub->descriptor->u.hs.DeviceRemovable
[((i + 1) / 8)] & (1 << ((i + 1) % 8))
? 'F' : 'R';
portstr[maxchild] = 0;
dev_dbg(hub_dev, "compound device; port removable status: %s\n", portstr);
} else
dev_dbg(hub_dev, "standalone hub\n");
switch (wHubCharacteristics & HUB_CHAR_LPSM) {
case HUB_CHAR_COMMON_LPSM:
dev_dbg(hub_dev, "ganged power switching\n");
break;
case HUB_CHAR_INDV_PORT_LPSM:
dev_dbg(hub_dev, "individual port power switching\n");
break;
case HUB_CHAR_NO_LPSM:
case HUB_CHAR_LPSM:
dev_dbg(hub_dev, "no power switching (usb 1.0)\n");
break;
}
switch (wHubCharacteristics & HUB_CHAR_OCPM) {
case HUB_CHAR_COMMON_OCPM:
dev_dbg(hub_dev, "global over-current protection\n");
break;
case HUB_CHAR_INDV_PORT_OCPM:
dev_dbg(hub_dev, "individual port over-current protection\n");
break;
case HUB_CHAR_NO_OCPM:
case HUB_CHAR_OCPM:
dev_dbg(hub_dev, "no over-current protection\n");
break;
}
spin_lock_init (&hub->tt.lock);
INIT_LIST_HEAD (&hub->tt.clear_list);
INIT_WORK(&hub->tt.clear_work, hub_tt_work);
switch (hdev->descriptor.bDeviceProtocol) {
case USB_HUB_PR_FS:
break;
case USB_HUB_PR_HS_SINGLE_TT:
dev_dbg(hub_dev, "Single TT\n");
hub->tt.hub = hdev;
break;
case USB_HUB_PR_HS_MULTI_TT:
ret = usb_set_interface(hdev, 0, 1);
if (ret == 0) {
dev_dbg(hub_dev, "TT per port\n");
hub->tt.multi = 1;
} else
dev_err(hub_dev, "Using single TT (err %d)\n",
ret);
hub->tt.hub = hdev;
break;
case USB_HUB_PR_SS:
/* USB 3.0 hubs don't have a TT */
break;
default:
dev_dbg(hub_dev, "Unrecognized hub protocol %d\n",
hdev->descriptor.bDeviceProtocol);
break;
}
/* Note 8 FS bit times == (8 bits / 12000000 bps) ~= 666ns */
switch (wHubCharacteristics & HUB_CHAR_TTTT) {
case HUB_TTTT_8_BITS:
if (hdev->descriptor.bDeviceProtocol != 0) {
hub->tt.think_time = 666;
dev_dbg(hub_dev, "TT requires at most %d "
"FS bit times (%d ns)\n",
8, hub->tt.think_time);
}
break;
case HUB_TTTT_16_BITS:
hub->tt.think_time = 666 * 2;
dev_dbg(hub_dev, "TT requires at most %d "
"FS bit times (%d ns)\n",
16, hub->tt.think_time);
break;
case HUB_TTTT_24_BITS:
hub->tt.think_time = 666 * 3;
dev_dbg(hub_dev, "TT requires at most %d "
"FS bit times (%d ns)\n",
24, hub->tt.think_time);
break;
case HUB_TTTT_32_BITS:
hub->tt.think_time = 666 * 4;
dev_dbg(hub_dev, "TT requires at most %d "
"FS bit times (%d ns)\n",
32, hub->tt.think_time);
break;
}
/* probe() zeroes hub->indicator[] */
if (wHubCharacteristics & HUB_CHAR_PORTIND) {
hub->has_indicators = 1;
dev_dbg(hub_dev, "Port indicators are supported\n");
}
dev_dbg(hub_dev, "power on to power good time: %dms\n",
hub->descriptor->bPwrOn2PwrGood * 2);
/* power budgeting mostly matters with bus-powered hubs,
* and battery-powered root hubs (may provide just 8 mA).
*/
ret = usb_get_status(hdev, USB_RECIP_DEVICE, 0, &hubstatus);
if (ret) {
message = "can't get hub status";
goto fail;
}
hcd = bus_to_hcd(hdev->bus);
if (hdev == hdev->bus->root_hub) {
if (hcd->power_budget > 0)
hdev->bus_mA = hcd->power_budget;
else
hdev->bus_mA = full_load * maxchild;
if (hdev->bus_mA >= full_load)
hub->mA_per_port = full_load;
else {
hub->mA_per_port = hdev->bus_mA;
hub->limited_power = 1;
}
} else if ((hubstatus & (1 << USB_DEVICE_SELF_POWERED)) == 0) {
int remaining = hdev->bus_mA -
hub->descriptor->bHubContrCurrent;
dev_dbg(hub_dev, "hub controller current requirement: %dmA\n",
hub->descriptor->bHubContrCurrent);
hub->limited_power = 1;
if (remaining < maxchild * unit_load)
dev_warn(hub_dev,
"insufficient power available "
"to use all downstream ports\n");
hub->mA_per_port = unit_load; /* 7.2.1 */
} else { /* Self-powered external hub */
/* FIXME: What about battery-powered external hubs that
* provide less current per port? */
hub->mA_per_port = full_load;
}
if (hub->mA_per_port < full_load)
dev_dbg(hub_dev, "%umA bus power budget for each child\n",
hub->mA_per_port);
ret = hub_hub_status(hub, &hubstatus, &hubchange);
if (ret < 0) {
message = "can't get hub status";
goto fail;
}
/* local power status reports aren't always correct */
if (hdev->actconfig->desc.bmAttributes & USB_CONFIG_ATT_SELFPOWER)
dev_dbg(hub_dev, "local power source is %s\n",
(hubstatus & HUB_STATUS_LOCAL_POWER)
? "lost (inactive)" : "good");
if ((wHubCharacteristics & HUB_CHAR_OCPM) == 0)
dev_dbg(hub_dev, "%sover-current condition exists\n",
(hubstatus & HUB_STATUS_OVERCURRENT) ? "" : "no ");
/* set up the interrupt endpoint
* We use the EP's maxpacket size instead of (PORTS+1+7)/8
* bytes as USB2.0[11.12.3] says because some hubs are known
* to send more data (and thus cause overflow). For root hubs,
* maxpktsize is defined in hcd.c's fake endpoint descriptors
* to be big enough for at least USB_MAXCHILDREN ports. */
pipe = usb_rcvintpipe(hdev, endpoint->bEndpointAddress);
maxp = usb_maxpacket(hdev, pipe, usb_pipeout(pipe));
if (maxp > sizeof(*hub->buffer))
maxp = sizeof(*hub->buffer);
hub->urb = usb_alloc_urb(0, GFP_KERNEL);
if (!hub->urb) {
ret = -ENOMEM;
goto fail;
}
usb_fill_int_urb(hub->urb, hdev, pipe, *hub->buffer, maxp, hub_irq,
hub, endpoint->bInterval);
/* maybe cycle the hub leds */
if (hub->has_indicators && blinkenlights)
hub->indicator[0] = INDICATOR_CYCLE;
mutex_lock(&usb_port_peer_mutex);
for (i = 0; i < maxchild; i++) {
ret = usb_hub_create_port_device(hub, i + 1);
if (ret < 0) {
dev_err(hub->intfdev,
"couldn't create port%d device.\n", i + 1);
break;
}
}
hdev->maxchild = i;
for (i = 0; i < hdev->maxchild; i++) {
struct usb_port *port_dev = hub->ports[i];
pm_runtime_put(&port_dev->dev);
}
mutex_unlock(&usb_port_peer_mutex);
if (ret < 0)
goto fail;
/* Update the HCD's internal representation of this hub before hub_wq
* starts getting port status changes for devices under the hub.
*/
if (hcd->driver->update_hub_device) {
ret = hcd->driver->update_hub_device(hcd, hdev,
&hub->tt, GFP_KERNEL);
if (ret < 0) {
message = "can't update HCD hub info";
goto fail;
}
}
usb_hub_adjust_deviceremovable(hdev, hub->descriptor);
hub_activate(hub, HUB_INIT);
return 0;
fail:
dev_err (hub_dev, "config failed, %s (err %d)\n",
message, ret);
/* hub_disconnect() frees urb and descriptor */
return ret;
}
static void hub_release(struct kref *kref)
{
struct usb_hub *hub = container_of(kref, struct usb_hub, kref);
usb_put_dev(hub->hdev);
usb_put_intf(to_usb_interface(hub->intfdev));
kfree(hub);
}
static unsigned highspeed_hubs;
static void hub_disconnect(struct usb_interface *intf)
{
struct usb_hub *hub = usb_get_intfdata(intf);
struct usb_device *hdev = interface_to_usbdev(intf);
int port1;
/*
* Stop adding new hub events. We do not want to block here and thus
* will not try to remove any pending work item.
*/
hub->disconnected = 1;
/* Disconnect all children and quiesce the hub */
hub->error = 0;
hub_quiesce(hub, HUB_DISCONNECT);
mutex_lock(&usb_port_peer_mutex);
/* Avoid races with recursively_mark_NOTATTACHED() */
spin_lock_irq(&device_state_lock);
port1 = hdev->maxchild;
hdev->maxchild = 0;
usb_set_intfdata(intf, NULL);
spin_unlock_irq(&device_state_lock);
for (; port1 > 0; --port1)
usb_hub_remove_port_device(hub, port1);
mutex_unlock(&usb_port_peer_mutex);
if (hub->hdev->speed == USB_SPEED_HIGH)
highspeed_hubs--;
usb_free_urb(hub->urb);
kfree(hub->ports);
kfree(hub->descriptor);
kfree(hub->status);
kfree(hub->buffer);
pm_suspend_ignore_children(&intf->dev, false);
kref_put(&hub->kref, hub_release);
}
static int hub_probe(struct usb_interface *intf, const struct usb_device_id *id)
{
struct usb_host_interface *desc;
struct usb_endpoint_descriptor *endpoint;
struct usb_device *hdev;
struct usb_hub *hub;
desc = intf->cur_altsetting;
hdev = interface_to_usbdev(intf);
/*
* Set default autosuspend delay as 0 to speedup bus suspend,
* based on the below considerations:
*
* - Unlike other drivers, the hub driver does not rely on the
* autosuspend delay to provide enough time to handle a wakeup
* event, and the submitted status URB is just to check future
* change on hub downstream ports, so it is safe to do it.
*
* - The patch might cause one or more auto supend/resume for
* below very rare devices when they are plugged into hub
* first time:
*
* devices having trouble initializing, and disconnect
* themselves from the bus and then reconnect a second
* or so later
*
* devices just for downloading firmware, and disconnects
* themselves after completing it
*
* For these quite rare devices, their drivers may change the
* autosuspend delay of their parent hub in the probe() to one
* appropriate value to avoid the subtle problem if someone
* does care it.
*
* - The patch may cause one or more auto suspend/resume on
* hub during running 'lsusb', but it is probably too
* infrequent to worry about.
*
* - Change autosuspend delay of hub can avoid unnecessary auto
* suspend timer for hub, also may decrease power consumption
* of USB bus.
*
* - If user has indicated to prevent autosuspend by passing
* usbcore.autosuspend = -1 then keep autosuspend disabled.
*/
#ifdef CONFIG_PM
if (hdev->dev.power.autosuspend_delay >= 0)
pm_runtime_set_autosuspend_delay(&hdev->dev, 0);
#endif
/*
* Hubs have proper suspend/resume support, except for root hubs
* where the controller driver doesn't have bus_suspend and
* bus_resume methods.
*/
if (hdev->parent) { /* normal device */
usb_enable_autosuspend(hdev);
} else { /* root hub */
const struct hc_driver *drv = bus_to_hcd(hdev->bus)->driver;
if (drv->bus_suspend && drv->bus_resume)
usb_enable_autosuspend(hdev);
}
if (hdev->level == MAX_TOPO_LEVEL) {
dev_err(&intf->dev,
"Unsupported bus topology: hub nested too deep\n");
return -E2BIG;
}
#ifdef CONFIG_USB_OTG_BLACKLIST_HUB
if (hdev->parent) {
dev_warn(&intf->dev, "ignoring external hub\n");
return -ENODEV;
}
#endif
/* Some hubs have a subclass of 1, which AFAICT according to the */
/* specs is not defined, but it works */
if ((desc->desc.bInterfaceSubClass != 0) &&
(desc->desc.bInterfaceSubClass != 1)) {
descriptor_error:
dev_err (&intf->dev, "bad descriptor, ignoring hub\n");
return -EIO;
}
/* Multiple endpoints? What kind of mutant ninja-hub is this? */
if (desc->desc.bNumEndpoints != 1)
goto descriptor_error;
endpoint = &desc->endpoint[0].desc;
/* If it's not an interrupt in endpoint, we'd better punt! */
if (!usb_endpoint_is_int_in(endpoint))
goto descriptor_error;
/* We found a hub */
dev_info (&intf->dev, "USB hub found\n");
hub = kzalloc(sizeof(*hub), GFP_KERNEL);
if (!hub) {
dev_dbg (&intf->dev, "couldn't kmalloc hub struct\n");
return -ENOMEM;
}
kref_init(&hub->kref);
hub->intfdev = &intf->dev;
hub->hdev = hdev;
INIT_DELAYED_WORK(&hub->leds, led_work);
INIT_DELAYED_WORK(&hub->init_work, NULL);
INIT_WORK(&hub->events, hub_event);
usb_get_intf(intf);
usb_get_dev(hdev);
usb_set_intfdata (intf, hub);
intf->needs_remote_wakeup = 1;
pm_suspend_ignore_children(&intf->dev, true);
if (hdev->speed == USB_SPEED_HIGH)
highspeed_hubs++;
if (id->driver_info & HUB_QUIRK_CHECK_PORT_AUTOSUSPEND)
hub->quirk_check_port_auto_suspend = 1;
if (hub_configure(hub, endpoint) >= 0)
return 0;
hub_disconnect (intf);
return -ENODEV;
}
static int
hub_ioctl(struct usb_interface *intf, unsigned int code, void *user_data)
{
struct usb_device *hdev = interface_to_usbdev (intf);
struct usb_hub *hub = usb_hub_to_struct_hub(hdev);
/* assert ifno == 0 (part of hub spec) */
switch (code) {
case USBDEVFS_HUB_PORTINFO: {
struct usbdevfs_hub_portinfo *info = user_data;
int i;
spin_lock_irq(&device_state_lock);
if (hdev->devnum <= 0)
info->nports = 0;
else {
info->nports = hdev->maxchild;
for (i = 0; i < info->nports; i++) {
if (hub->ports[i]->child == NULL)
info->port[i] = 0;
else
info->port[i] =
hub->ports[i]->child->devnum;
}
}
spin_unlock_irq(&device_state_lock);
return info->nports + 1;
}
default:
return -ENOSYS;
}
}
/*
* Allow user programs to claim ports on a hub. When a device is attached
* to one of these "claimed" ports, the program will "own" the device.
*/
static int find_port_owner(struct usb_device *hdev, unsigned port1,
struct usb_dev_state ***ppowner)
{
struct usb_hub *hub = usb_hub_to_struct_hub(hdev);
if (hdev->state == USB_STATE_NOTATTACHED)
return -ENODEV;
if (port1 == 0 || port1 > hdev->maxchild)
return -EINVAL;
/* Devices not managed by the hub driver
* will always have maxchild equal to 0.
*/
*ppowner = &(hub->ports[port1 - 1]->port_owner);
return 0;
}
/* In the following three functions, the caller must hold hdev's lock */
int usb_hub_claim_port(struct usb_device *hdev, unsigned port1,
struct usb_dev_state *owner)
{
int rc;
struct usb_dev_state **powner;
rc = find_port_owner(hdev, port1, &powner);
if (rc)
return rc;
if (*powner)
return -EBUSY;
*powner = owner;
return rc;
}
EXPORT_SYMBOL_GPL(usb_hub_claim_port);
int usb_hub_release_port(struct usb_device *hdev, unsigned port1,
struct usb_dev_state *owner)
{
int rc;
struct usb_dev_state **powner;
rc = find_port_owner(hdev, port1, &powner);
if (rc)
return rc;
if (*powner != owner)
return -ENOENT;
*powner = NULL;
return rc;
}
EXPORT_SYMBOL_GPL(usb_hub_release_port);
void usb_hub_release_all_ports(struct usb_device *hdev, struct usb_dev_state *owner)
{
struct usb_hub *hub = usb_hub_to_struct_hub(hdev);
int n;
for (n = 0; n < hdev->maxchild; n++) {
if (hub->ports[n]->port_owner == owner)
hub->ports[n]->port_owner = NULL;
}
}
/* The caller must hold udev's lock */
bool usb_device_is_owned(struct usb_device *udev)
{
struct usb_hub *hub;
if (udev->state == USB_STATE_NOTATTACHED || !udev->parent)
return false;
hub = usb_hub_to_struct_hub(udev->parent);
return !!hub->ports[udev->portnum - 1]->port_owner;
}
static void recursively_mark_NOTATTACHED(struct usb_device *udev)
{
struct usb_hub *hub = usb_hub_to_struct_hub(udev);
int i;
for (i = 0; i < udev->maxchild; ++i) {
if (hub->ports[i]->child)
recursively_mark_NOTATTACHED(hub->ports[i]->child);
}
if (udev->state == USB_STATE_SUSPENDED)
udev->active_duration -= jiffies;
udev->state = USB_STATE_NOTATTACHED;
}
/**
* usb_set_device_state - change a device's current state (usbcore, hcds)
* @udev: pointer to device whose state should be changed
* @new_state: new state value to be stored
*
* udev->state is _not_ fully protected by the device lock. Although
* most transitions are made only while holding the lock, the state can
* can change to USB_STATE_NOTATTACHED at almost any time. This
* is so that devices can be marked as disconnected as soon as possible,
* without having to wait for any semaphores to be released. As a result,
* all changes to any device's state must be protected by the
* device_state_lock spinlock.
*
* Once a device has been added to the device tree, all changes to its state
* should be made using this routine. The state should _not_ be set directly.
*
* If udev->state is already USB_STATE_NOTATTACHED then no change is made.
* Otherwise udev->state is set to new_state, and if new_state is
* USB_STATE_NOTATTACHED then all of udev's descendants' states are also set
* to USB_STATE_NOTATTACHED.
*/
void usb_set_device_state(struct usb_device *udev,
enum usb_device_state new_state)
{
unsigned long flags;
int wakeup = -1;
spin_lock_irqsave(&device_state_lock, flags);
if (udev->state == USB_STATE_NOTATTACHED)
; /* do nothing */
else if (new_state != USB_STATE_NOTATTACHED) {
/* root hub wakeup capabilities are managed out-of-band
* and may involve silicon errata ... ignore them here.
*/
if (udev->parent) {
if (udev->state == USB_STATE_SUSPENDED
|| new_state == USB_STATE_SUSPENDED)
; /* No change to wakeup settings */
else if (new_state == USB_STATE_CONFIGURED)
wakeup = (udev->quirks &
USB_QUIRK_IGNORE_REMOTE_WAKEUP) ? 0 :
udev->actconfig->desc.bmAttributes &
USB_CONFIG_ATT_WAKEUP;
else
wakeup = 0;
}
if (udev->state == USB_STATE_SUSPENDED &&
new_state != USB_STATE_SUSPENDED)
udev->active_duration -= jiffies;
else if (new_state == USB_STATE_SUSPENDED &&
udev->state != USB_STATE_SUSPENDED)
udev->active_duration += jiffies;
udev->state = new_state;
} else
recursively_mark_NOTATTACHED(udev);
spin_unlock_irqrestore(&device_state_lock, flags);
if (wakeup >= 0)
device_set_wakeup_capable(&udev->dev, wakeup);
}
EXPORT_SYMBOL_GPL(usb_set_device_state);
/*
* Choose a device number.
*
* Device numbers are used as filenames in usbfs. On USB-1.1 and
* USB-2.0 buses they are also used as device addresses, however on
* USB-3.0 buses the address is assigned by the controller hardware
* and it usually is not the same as the device number.
*
* WUSB devices are simple: they have no hubs behind, so the mapping
* device <-> virtual port number becomes 1:1. Why? to simplify the
* life of the device connection logic in
* drivers/usb/wusbcore/devconnect.c. When we do the initial secret
* handshake we need to assign a temporary address in the unauthorized
* space. For simplicity we use the first virtual port number found to
* be free [drivers/usb/wusbcore/devconnect.c:wusbhc_devconnect_ack()]
* and that becomes it's address [X < 128] or its unauthorized address
* [X | 0x80].
*
* We add 1 as an offset to the one-based USB-stack port number
* (zero-based wusb virtual port index) for two reasons: (a) dev addr
* 0 is reserved by USB for default address; (b) Linux's USB stack
* uses always #1 for the root hub of the controller. So USB stack's
* port #1, which is wusb virtual-port #0 has address #2.
*
* Devices connected under xHCI are not as simple. The host controller
* supports virtualization, so the hardware assigns device addresses and
* the HCD must setup data structures before issuing a set address
* command to the hardware.
*/
static void choose_devnum(struct usb_device *udev)
{
int devnum;
struct usb_bus *bus = udev->bus;
/* be safe when more hub events are proceed in parallel */
mutex_lock(&bus->usb_address0_mutex);
if (udev->wusb) {
devnum = udev->portnum + 1;
BUG_ON(test_bit(devnum, bus->devmap.devicemap));
} else {
/* Try to allocate the next devnum beginning at
* bus->devnum_next. */
devnum = find_next_zero_bit(bus->devmap.devicemap, 128,
bus->devnum_next);
if (devnum >= 128)
devnum = find_next_zero_bit(bus->devmap.devicemap,
128, 1);
bus->devnum_next = (devnum >= 127 ? 1 : devnum + 1);
}
if (devnum < 128) {
set_bit(devnum, bus->devmap.devicemap);
udev->devnum = devnum;
}
mutex_unlock(&bus->usb_address0_mutex);
}
static void release_devnum(struct usb_device *udev)
{
if (udev->devnum > 0) {
clear_bit(udev->devnum, udev->bus->devmap.devicemap);
udev->devnum = -1;
}
}
static void update_devnum(struct usb_device *udev, int devnum)
{
/* The address for a WUSB device is managed by wusbcore. */
if (!udev->wusb)
udev->devnum = devnum;
}
static void hub_free_dev(struct usb_device *udev)
{
struct usb_hcd *hcd = bus_to_hcd(udev->bus);
/* Root hubs aren't real devices, so don't free HCD resources */
if (hcd->driver->free_dev && udev->parent)
hcd->driver->free_dev(hcd, udev);
}
static void hub_disconnect_children(struct usb_device *udev)
{
struct usb_hub *hub = usb_hub_to_struct_hub(udev);
int i;
/* Free up all the children before we remove this device */
for (i = 0; i < udev->maxchild; i++) {
if (hub->ports[i]->child)
usb_disconnect(&hub->ports[i]->child);
}
}
/**
* usb_disconnect - disconnect a device (usbcore-internal)
* @pdev: pointer to device being disconnected
* Context: !in_interrupt ()
*
* Something got disconnected. Get rid of it and all of its children.
*
* If *pdev is a normal device then the parent hub must already be locked.
* If *pdev is a root hub then the caller must hold the usb_bus_list_lock,
* which protects the set of root hubs as well as the list of buses.
*
* Only hub drivers (including virtual root hub drivers for host
* controllers) should ever call this.
*
* This call is synchronous, and may not be used in an interrupt context.
*/
void usb_disconnect(struct usb_device **pdev)
{
struct usb_port *port_dev = NULL;
struct usb_device *udev = *pdev;
struct usb_hub *hub = NULL;
int port1 = 1;
/* mark the device as inactive, so any further urb submissions for
* this device (and any of its children) will fail immediately.
* this quiesces everything except pending urbs.
*/
usb_set_device_state(udev, USB_STATE_NOTATTACHED);
dev_info(&udev->dev, "USB disconnect, device number %d\n",
udev->devnum);
usb_lock_device(udev);
hub_disconnect_children(udev);
/* deallocate hcd/hardware state ... nuking all pending urbs and
* cleaning up all state associated with the current configuration
* so that the hardware is now fully quiesced.
*/
dev_dbg (&udev->dev, "unregistering device\n");
usb_disable_device(udev, 0);
usb_hcd_synchronize_unlinks(udev);
if (udev->parent) {
port1 = udev->portnum;
hub = usb_hub_to_struct_hub(udev->parent);
port_dev = hub->ports[port1 - 1];
sysfs_remove_link(&udev->dev.kobj, "port");
sysfs_remove_link(&port_dev->dev.kobj, "device");
/*
* As usb_port_runtime_resume() de-references udev, make
* sure no resumes occur during removal
*/
if (!test_and_set_bit(port1, hub->child_usage_bits))
pm_runtime_get_sync(&port_dev->dev);
}
usb_remove_ep_devs(&udev->ep0);
usb_unlock_device(udev);
/* Unregister the device. The device driver is responsible
* for de-configuring the device and invoking the remove-device
* notifier chain (used by usbfs and possibly others).
*/
device_del(&udev->dev);
/* Free the device number and delete the parent's children[]
* (or root_hub) pointer.
*/
release_devnum(udev);
/* Avoid races with recursively_mark_NOTATTACHED() */
spin_lock_irq(&device_state_lock);
*pdev = NULL;
spin_unlock_irq(&device_state_lock);
if (port_dev && test_and_clear_bit(port1, hub->child_usage_bits))
pm_runtime_put(&port_dev->dev);
hub_free_dev(udev);
put_device(&udev->dev);
}
#ifdef CONFIG_USB_ANNOUNCE_NEW_DEVICES
static void show_string(struct usb_device *udev, char *id, char *string)
{
if (!string)
return;
dev_info(&udev->dev, "%s: %s\n", id, string);
}
static void announce_device(struct usb_device *udev)
{
dev_info(&udev->dev, "New USB device found, idVendor=%04x, idProduct=%04x\n",
le16_to_cpu(udev->descriptor.idVendor),
le16_to_cpu(udev->descriptor.idProduct));
dev_info(&udev->dev,
"New USB device strings: Mfr=%d, Product=%d, SerialNumber=%d\n",
udev->descriptor.iManufacturer,
udev->descriptor.iProduct,
udev->descriptor.iSerialNumber);
show_string(udev, "Product", udev->product);
show_string(udev, "Manufacturer", udev->manufacturer);
show_string(udev, "SerialNumber", udev->serial);
}
#else
static inline void announce_device(struct usb_device *udev) { }
#endif
/**
* usb_enumerate_device_otg - FIXME (usbcore-internal)
* @udev: newly addressed device (in ADDRESS state)
*
* Finish enumeration for On-The-Go devices
*
* Return: 0 if successful. A negative error code otherwise.
*/
static int usb_enumerate_device_otg(struct usb_device *udev)
{
int err = 0;
#ifdef CONFIG_USB_OTG
/*
* OTG-aware devices on OTG-capable root hubs may be able to use SRP,
* to wake us after we've powered off VBUS; and HNP, switching roles
* "host" to "peripheral". The OTG descriptor helps figure this out.
*/
if (!udev->bus->is_b_host
&& udev->config
&& udev->parent == udev->bus->root_hub) {
struct usb_otg_descriptor *desc = NULL;
struct usb_bus *bus = udev->bus;
/* descriptor may appear anywhere in config */
if (__usb_get_extra_descriptor (udev->rawdescriptors[0],
le16_to_cpu(udev->config[0].desc.wTotalLength),
USB_DT_OTG, (void **) &desc) == 0) {
if (desc->bmAttributes & USB_OTG_HNP) {
unsigned port1 = udev->portnum;
dev_info(&udev->dev,
"Dual-Role OTG device on %sHNP port\n",
(port1 == bus->otg_port)
? "" : "non-");
/* enable HNP before suspend, it's simpler */
if (port1 == bus->otg_port)
bus->b_hnp_enable = 1;
err = usb_control_msg(udev,
usb_sndctrlpipe(udev, 0),
USB_REQ_SET_FEATURE, 0,
bus->b_hnp_enable
? USB_DEVICE_B_HNP_ENABLE
: USB_DEVICE_A_ALT_HNP_SUPPORT,
0, NULL, 0, USB_CTRL_SET_TIMEOUT);
if (err < 0) {
/* OTG MESSAGE: report errors here,
* customize to match your product.
*/
dev_info(&udev->dev,
"can't set HNP mode: %d\n",
err);
bus->b_hnp_enable = 0;
}
}
}
}
#endif
return err;
}
/**
* usb_enumerate_device - Read device configs/intfs/otg (usbcore-internal)
* @udev: newly addressed device (in ADDRESS state)
*
* This is only called by usb_new_device() and usb_authorize_device()
* and FIXME -- all comments that apply to them apply here wrt to
* environment.
*
* If the device is WUSB and not authorized, we don't attempt to read
* the string descriptors, as they will be errored out by the device
* until it has been authorized.
*
* Return: 0 if successful. A negative error code otherwise.
*/
static int usb_enumerate_device(struct usb_device *udev)
{
int err;
struct usb_hcd *hcd = bus_to_hcd(udev->bus);
if (udev->config == NULL) {
err = usb_get_configuration(udev);
if (err < 0) {
if (err != -ENODEV)
dev_err(&udev->dev, "can't read configurations, error %d\n",
err);
return err;
}
}
/* read the standard strings and cache them if present */
udev->product = usb_cache_string(udev, udev->descriptor.iProduct);
udev->manufacturer = usb_cache_string(udev,
udev->descriptor.iManufacturer);
udev->serial = usb_cache_string(udev, udev->descriptor.iSerialNumber);
err = usb_enumerate_device_otg(udev);
if (err < 0)
return err;
if (IS_ENABLED(CONFIG_USB_OTG_WHITELIST) && hcd->tpl_support &&
!is_targeted(udev)) {
/* Maybe it can talk to us, though we can't talk to it.
* (Includes HNP test device.)
*/
if (IS_ENABLED(CONFIG_USB_OTG) && (udev->bus->b_hnp_enable
|| udev->bus->is_b_host)) {
err = usb_port_suspend(udev, PMSG_AUTO_SUSPEND);
if (err < 0)
dev_dbg(&udev->dev, "HNP fail, %d\n", err);
}
return -ENOTSUPP;
}
usb_detect_interface_quirks(udev);
return 0;
}
static void set_usb_port_removable(struct usb_device *udev)
{
struct usb_device *hdev = udev->parent;
struct usb_hub *hub;
u8 port = udev->portnum;
u16 wHubCharacteristics;
bool removable = true;
if (!hdev)
return;
hub = usb_hub_to_struct_hub(udev->parent);
wHubCharacteristics = le16_to_cpu(hub->descriptor->wHubCharacteristics);
if (!(wHubCharacteristics & HUB_CHAR_COMPOUND))
return;
if (hub_is_superspeed(hdev)) {
if (le16_to_cpu(hub->descriptor->u.ss.DeviceRemovable)
& (1 << port))
removable = false;
} else {
if (hub->descriptor->u.hs.DeviceRemovable[port / 8] & (1 << (port % 8)))
removable = false;
}
if (removable)
udev->removable = USB_DEVICE_REMOVABLE;
else
udev->removable = USB_DEVICE_FIXED;
/*
* Platform firmware may have populated an alternative value for
* removable. If the parent port has a known connect_type use
* that instead.
*/
switch (hub->ports[udev->portnum - 1]->connect_type) {
case USB_PORT_CONNECT_TYPE_HOT_PLUG:
udev->removable = USB_DEVICE_REMOVABLE;
break;
case USB_PORT_CONNECT_TYPE_HARD_WIRED:
udev->removable = USB_DEVICE_FIXED;
break;
default: /* use what was set above */
break;
}
}
/**
* usb_new_device - perform initial device setup (usbcore-internal)
* @udev: newly addressed device (in ADDRESS state)
*
* This is called with devices which have been detected but not fully
* enumerated. The device descriptor is available, but not descriptors
* for any device configuration. The caller must have locked either
* the parent hub (if udev is a normal device) or else the
* usb_bus_list_lock (if udev is a root hub). The parent's pointer to
* udev has already been installed, but udev is not yet visible through
* sysfs or other filesystem code.
*
* This call is synchronous, and may not be used in an interrupt context.
*
* Only the hub driver or root-hub registrar should ever call this.
*
* Return: Whether the device is configured properly or not. Zero if the
* interface was registered with the driver core; else a negative errno
* value.
*
*/
int usb_new_device(struct usb_device *udev)
{
int err;
if (udev->parent) {
/* Initialize non-root-hub device wakeup to disabled;
* device (un)configuration controls wakeup capable
* sysfs power/wakeup controls wakeup enabled/disabled
*/
device_init_wakeup(&udev->dev, 0);
}
/* Tell the runtime-PM framework the device is active */
pm_runtime_set_active(&udev->dev);
pm_runtime_get_noresume(&udev->dev);
pm_runtime_use_autosuspend(&udev->dev);
pm_runtime_enable(&udev->dev);
/* By default, forbid autosuspend for all devices. It will be
* allowed for hubs during binding.
*/
usb_disable_autosuspend(udev);
err = usb_enumerate_device(udev); /* Read descriptors */
if (err < 0)
goto fail;
dev_dbg(&udev->dev, "udev %d, busnum %d, minor = %d\n",
udev->devnum, udev->bus->busnum,
(((udev->bus->busnum-1) * 128) + (udev->devnum-1)));
/* export the usbdev device-node for libusb */
udev->dev.devt = MKDEV(USB_DEVICE_MAJOR,
(((udev->bus->busnum-1) * 128) + (udev->devnum-1)));
/* Tell the world! */
announce_device(udev);
if (udev->serial)
add_device_randomness(udev->serial, strlen(udev->serial));
if (udev->product)
add_device_randomness(udev->product, strlen(udev->product));
if (udev->manufacturer)
add_device_randomness(udev->manufacturer,
strlen(udev->manufacturer));
device_enable_async_suspend(&udev->dev);
/* check whether the hub or firmware marks this port as non-removable */
if (udev->parent)
set_usb_port_removable(udev);
/* Register the device. The device driver is responsible
* for configuring the device and invoking the add-device
* notifier chain (used by usbfs and possibly others).
*/
err = device_add(&udev->dev);
if (err) {
dev_err(&udev->dev, "can't device_add, error %d\n", err);
goto fail;
}
/* Create link files between child device and usb port device. */
if (udev->parent) {
struct usb_hub *hub = usb_hub_to_struct_hub(udev->parent);
int port1 = udev->portnum;
struct usb_port *port_dev = hub->ports[port1 - 1];
err = sysfs_create_link(&udev->dev.kobj,
&port_dev->dev.kobj, "port");
if (err)
goto fail;
err = sysfs_create_link(&port_dev->dev.kobj,
&udev->dev.kobj, "device");
if (err) {
sysfs_remove_link(&udev->dev.kobj, "port");
goto fail;
}
if (!test_and_set_bit(port1, hub->child_usage_bits))
pm_runtime_get_sync(&port_dev->dev);
}
(void) usb_create_ep_devs(&udev->dev, &udev->ep0, udev);
usb_mark_last_busy(udev);
pm_runtime_put_sync_autosuspend(&udev->dev);
return err;
fail:
usb_set_device_state(udev, USB_STATE_NOTATTACHED);
pm_runtime_disable(&udev->dev);
pm_runtime_set_suspended(&udev->dev);
return err;
}
/**
* usb_deauthorize_device - deauthorize a device (usbcore-internal)
* @usb_dev: USB device
*
* Move the USB device to a very basic state where interfaces are disabled
* and the device is in fact unconfigured and unusable.
*
* We share a lock (that we have) with device_del(), so we need to
* defer its call.
*
* Return: 0.
*/
int usb_deauthorize_device(struct usb_device *usb_dev)
{
usb_lock_device(usb_dev);
if (usb_dev->authorized == 0)
goto out_unauthorized;
usb_dev->authorized = 0;
usb_set_configuration(usb_dev, -1);
out_unauthorized:
usb_unlock_device(usb_dev);
return 0;
}
int usb_authorize_device(struct usb_device *usb_dev)
{
int result = 0, c;
usb_lock_device(usb_dev);
if (usb_dev->authorized == 1)
goto out_authorized;
result = usb_autoresume_device(usb_dev);
if (result < 0) {
dev_err(&usb_dev->dev,
"can't autoresume for authorization: %d\n", result);
goto error_autoresume;
}
if (usb_dev->wusb) {
result = usb_get_device_descriptor(usb_dev, sizeof(usb_dev->descriptor));
if (result < 0) {
dev_err(&usb_dev->dev, "can't re-read device descriptor for "
"authorization: %d\n", result);
goto error_device_descriptor;
}
}
usb_dev->authorized = 1;
/* Choose and set the configuration. This registers the interfaces
* with the driver core and lets interface drivers bind to them.
*/
c = usb_choose_configuration(usb_dev);
if (c >= 0) {
result = usb_set_configuration(usb_dev, c);
if (result) {
dev_err(&usb_dev->dev,
"can't set config #%d, error %d\n", c, result);
/* This need not be fatal. The user can try to
* set other configurations. */
}
}
dev_info(&usb_dev->dev, "authorized to connect\n");
error_device_descriptor:
usb_autosuspend_device(usb_dev);
error_autoresume:
out_authorized:
usb_unlock_device(usb_dev); /* complements locktree */
return result;
}
/* Returns 1 if @hub is a WUSB root hub, 0 otherwise */
static unsigned hub_is_wusb(struct usb_hub *hub)
{
struct usb_hcd *hcd;
if (hub->hdev->parent != NULL) /* not a root hub? */
return 0;
hcd = container_of(hub->hdev->bus, struct usb_hcd, self);
return hcd->wireless;
}
#define PORT_RESET_TRIES 5
#define SET_ADDRESS_TRIES 2
#define GET_DESCRIPTOR_TRIES 2
#define SET_CONFIG_TRIES (2 * (use_both_schemes + 1))
#define USE_NEW_SCHEME(i) ((i) / 2 == (int)old_scheme_first)
#define HUB_ROOT_RESET_TIME 50 /* times are in msec */
#define HUB_SHORT_RESET_TIME 10
#define HUB_BH_RESET_TIME 50
#define HUB_LONG_RESET_TIME 200
#define HUB_RESET_TIMEOUT 800
/*
* "New scheme" enumeration causes an extra state transition to be
* exposed to an xhci host and causes USB3 devices to receive control
* commands in the default state. This has been seen to cause
* enumeration failures, so disable this enumeration scheme for USB3
* devices.
*/
static bool use_new_scheme(struct usb_device *udev, int retry)
{
if (udev->speed == USB_SPEED_SUPER)
return false;
return USE_NEW_SCHEME(retry);
}
static int hub_port_reset(struct usb_hub *hub, int port1,
struct usb_device *udev, unsigned int delay, bool warm);
/* Is a USB 3.0 port in the Inactive or Compliance Mode state?
* Port worm reset is required to recover
*/
static bool hub_port_warm_reset_required(struct usb_hub *hub, int port1,
u16 portstatus)
{
u16 link_state;
if (!hub_is_superspeed(hub->hdev))
return false;
if (test_bit(port1, hub->warm_reset_bits))
return true;
link_state = portstatus & USB_PORT_STAT_LINK_STATE;
return link_state == USB_SS_PORT_LS_SS_INACTIVE
|| link_state == USB_SS_PORT_LS_COMP_MOD;
}
static int hub_port_wait_reset(struct usb_hub *hub, int port1,
struct usb_device *udev, unsigned int delay, bool warm)
{
int delay_time, ret;
u16 portstatus;
u16 portchange;
for (delay_time = 0;
delay_time < HUB_RESET_TIMEOUT;
delay_time += delay) {
/* wait to give the device a chance to reset */
msleep(delay);
/* read and decode port status */
ret = hub_port_status(hub, port1, &portstatus, &portchange);
if (ret < 0)
return ret;
/* The port state is unknown until the reset completes. */
if (!(portstatus & USB_PORT_STAT_RESET))
break;
/* switch to the long delay after two short delay failures */
if (delay_time >= 2 * HUB_SHORT_RESET_TIME)
delay = HUB_LONG_RESET_TIME;
dev_dbg(&hub->ports[port1 - 1]->dev,
"not %sreset yet, waiting %dms\n",
warm ? "warm " : "", delay);
}
if ((portstatus & USB_PORT_STAT_RESET))
return -EBUSY;
if (hub_port_warm_reset_required(hub, port1, portstatus))
return -ENOTCONN;
/* Device went away? */
if (!(portstatus & USB_PORT_STAT_CONNECTION))
return -ENOTCONN;
/* bomb out completely if the connection bounced. A USB 3.0
* connection may bounce if multiple warm resets were issued,
* but the device may have successfully re-connected. Ignore it.
*/
if (!hub_is_superspeed(hub->hdev) &&
(portchange & USB_PORT_STAT_C_CONNECTION))
return -ENOTCONN;
if (!(portstatus & USB_PORT_STAT_ENABLE))
return -EBUSY;
if (!udev)
return 0;
if (hub_is_wusb(hub))
udev->speed = USB_SPEED_WIRELESS;
else if (hub_is_superspeed(hub->hdev))
udev->speed = USB_SPEED_SUPER;
else if (portstatus & USB_PORT_STAT_HIGH_SPEED)
udev->speed = USB_SPEED_HIGH;
else if (portstatus & USB_PORT_STAT_LOW_SPEED)
udev->speed = USB_SPEED_LOW;
else
udev->speed = USB_SPEED_FULL;
return 0;
}
static void hub_port_finish_reset(struct usb_hub *hub, int port1,
struct usb_device *udev, int *status)
{
switch (*status) {
case 0:
/* TRSTRCY = 10 ms; plus some extra */
msleep(10 + 40);
if (udev) {
struct usb_hcd *hcd = bus_to_hcd(udev->bus);
update_devnum(udev, 0);
/* The xHC may think the device is already reset,
* so ignore the status.
*/
if (hcd->driver->reset_device)
hcd->driver->reset_device(hcd, udev);
}
/* FALL THROUGH */
case -ENOTCONN:
case -ENODEV:
usb_clear_port_feature(hub->hdev,
port1, USB_PORT_FEAT_C_RESET);
if (hub_is_superspeed(hub->hdev)) {
usb_clear_port_feature(hub->hdev, port1,
USB_PORT_FEAT_C_BH_PORT_RESET);
usb_clear_port_feature(hub->hdev, port1,
USB_PORT_FEAT_C_PORT_LINK_STATE);
usb_clear_port_feature(hub->hdev, port1,
USB_PORT_FEAT_C_CONNECTION);
}
if (udev)
usb_set_device_state(udev, *status
? USB_STATE_NOTATTACHED
: USB_STATE_DEFAULT);
break;
}
}
/* Handle port reset and port warm(BH) reset (for USB3 protocol ports) */
static int hub_port_reset(struct usb_hub *hub, int port1,
struct usb_device *udev, unsigned int delay, bool warm)
{
int i, status;
u16 portchange, portstatus;
struct usb_port *port_dev = hub->ports[port1 - 1];
if (!hub_is_superspeed(hub->hdev)) {
if (warm) {
dev_err(hub->intfdev, "only USB3 hub support "
"warm reset\n");
return -EINVAL;
}
/* Block EHCI CF initialization during the port reset.
* Some companion controllers don't like it when they mix.
*/
down_read(&ehci_cf_port_reset_rwsem);
} else if (!warm) {
/*
* If the caller hasn't explicitly requested a warm reset,
* double check and see if one is needed.
*/
status = hub_port_status(hub, port1,
&portstatus, &portchange);
if (status < 0)
goto done;
if (hub_port_warm_reset_required(hub, port1, portstatus))
warm = true;
}
clear_bit(port1, hub->warm_reset_bits);
/* Reset the port */
for (i = 0; i < PORT_RESET_TRIES; i++) {
status = set_port_feature(hub->hdev, port1, (warm ?
USB_PORT_FEAT_BH_PORT_RESET :
USB_PORT_FEAT_RESET));
if (status == -ENODEV) {
; /* The hub is gone */
} else if (status) {
dev_err(&port_dev->dev,
"cannot %sreset (err = %d)\n",
warm ? "warm " : "", status);
} else {
status = hub_port_wait_reset(hub, port1, udev, delay,
warm);
if (status && status != -ENOTCONN && status != -ENODEV)
dev_dbg(hub->intfdev,
"port_wait_reset: err = %d\n",
status);
}
/* Check for disconnect or reset */
if (status == 0 || status == -ENOTCONN || status == -ENODEV) {
hub_port_finish_reset(hub, port1, udev, &status);
if (!hub_is_superspeed(hub->hdev))
goto done;
/*
* If a USB 3.0 device migrates from reset to an error
* state, re-issue the warm reset.
*/
if (hub_port_status(hub, port1,
&portstatus, &portchange) < 0)
goto done;
if (!hub_port_warm_reset_required(hub, port1,
portstatus))
goto done;
/*
* If the port is in SS.Inactive or Compliance Mode, the
* hot or warm reset failed. Try another warm reset.
*/
if (!warm) {
dev_dbg(&port_dev->dev,
"hot reset failed, warm reset\n");
warm = true;
}
}
dev_dbg(&port_dev->dev,
"not enabled, trying %sreset again...\n",
warm ? "warm " : "");
delay = HUB_LONG_RESET_TIME;
}
dev_err(&port_dev->dev, "Cannot enable. Maybe the USB cable is bad?\n");
done:
if (!hub_is_superspeed(hub->hdev))
up_read(&ehci_cf_port_reset_rwsem);
return status;
}
/* Check if a port is power on */
static int port_is_power_on(struct usb_hub *hub, unsigned portstatus)
{
int ret = 0;
if (hub_is_superspeed(hub->hdev)) {
if (portstatus & USB_SS_PORT_STAT_POWER)
ret = 1;
} else {
if (portstatus & USB_PORT_STAT_POWER)
ret = 1;
}
return ret;
}
static void usb_lock_port(struct usb_port *port_dev)
__acquires(&port_dev->status_lock)
{
mutex_lock(&port_dev->status_lock);
__acquire(&port_dev->status_lock);
}
static void usb_unlock_port(struct usb_port *port_dev)
__releases(&port_dev->status_lock)
{
mutex_unlock(&port_dev->status_lock);
__release(&port_dev->status_lock);
}
#ifdef CONFIG_PM
/* Check if a port is suspended(USB2.0 port) or in U3 state(USB3.0 port) */
static int port_is_suspended(struct usb_hub *hub, unsigned portstatus)
{
int ret = 0;
if (hub_is_superspeed(hub->hdev)) {
if ((portstatus & USB_PORT_STAT_LINK_STATE)
== USB_SS_PORT_LS_U3)
ret = 1;
} else {
if (portstatus & USB_PORT_STAT_SUSPEND)
ret = 1;
}
return ret;
}
/* Determine whether the device on a port is ready for a normal resume,
* is ready for a reset-resume, or should be disconnected.
*/
static int check_port_resume_type(struct usb_device *udev,
struct usb_hub *hub, int port1,
int status, u16 portchange, u16 portstatus)
{
struct usb_port *port_dev = hub->ports[port1 - 1];
int retries = 3;
retry:
/* Is a warm reset needed to recover the connection? */
if (status == 0 && udev->reset_resume
&& hub_port_warm_reset_required(hub, port1, portstatus)) {
/* pass */;
}
/* Is the device still present? */
else if (status || port_is_suspended(hub, portstatus) ||
!port_is_power_on(hub, portstatus)) {
if (status >= 0)
status = -ENODEV;
} else if (!(portstatus & USB_PORT_STAT_CONNECTION)) {
if (retries--) {
usleep_range(200, 300);
status = hub_port_status(hub, port1, &portstatus,
&portchange);
goto retry;
}
status = -ENODEV;
}
/* Can't do a normal resume if the port isn't enabled,
* so try a reset-resume instead.
*/
else if (!(portstatus & USB_PORT_STAT_ENABLE) && !udev->reset_resume) {
if (udev->persist_enabled)
udev->reset_resume = 1;
else
status = -ENODEV;
}
if (status) {
dev_dbg(&port_dev->dev, "status %04x.%04x after resume, %d\n",
portchange, portstatus, status);
} else if (udev->reset_resume) {
/* Late port handoff can set status-change bits */
if (portchange & USB_PORT_STAT_C_CONNECTION)
usb_clear_port_feature(hub->hdev, port1,
USB_PORT_FEAT_C_CONNECTION);
if (portchange & USB_PORT_STAT_C_ENABLE)
usb_clear_port_feature(hub->hdev, port1,
USB_PORT_FEAT_C_ENABLE);
}
return status;
}
int usb_disable_ltm(struct usb_device *udev)
{
struct usb_hcd *hcd = bus_to_hcd(udev->bus);
/* Check if the roothub and device supports LTM. */
if (!usb_device_supports_ltm(hcd->self.root_hub) ||
!usb_device_supports_ltm(udev))
return 0;
/* Clear Feature LTM Enable can only be sent if the device is
* configured.
*/
if (!udev->actconfig)
return 0;
return usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
USB_REQ_CLEAR_FEATURE, USB_RECIP_DEVICE,
USB_DEVICE_LTM_ENABLE, 0, NULL, 0,
USB_CTRL_SET_TIMEOUT);
}
EXPORT_SYMBOL_GPL(usb_disable_ltm);
void usb_enable_ltm(struct usb_device *udev)
{
struct usb_hcd *hcd = bus_to_hcd(udev->bus);
/* Check if the roothub and device supports LTM. */
if (!usb_device_supports_ltm(hcd->self.root_hub) ||
!usb_device_supports_ltm(udev))
return;
/* Set Feature LTM Enable can only be sent if the device is
* configured.
*/
if (!udev->actconfig)
return;
usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
USB_REQ_SET_FEATURE, USB_RECIP_DEVICE,
USB_DEVICE_LTM_ENABLE, 0, NULL, 0,
USB_CTRL_SET_TIMEOUT);
}
EXPORT_SYMBOL_GPL(usb_enable_ltm);
/*
* usb_enable_remote_wakeup - enable remote wakeup for a device
* @udev: target device
*
* For USB-2 devices: Set the device's remote wakeup feature.
*
* For USB-3 devices: Assume there's only one function on the device and
* enable remote wake for the first interface. FIXME if the interface
* association descriptor shows there's more than one function.
*/
static int usb_enable_remote_wakeup(struct usb_device *udev)
{
if (udev->speed < USB_SPEED_SUPER)
return usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
USB_REQ_SET_FEATURE, USB_RECIP_DEVICE,
USB_DEVICE_REMOTE_WAKEUP, 0, NULL, 0,
USB_CTRL_SET_TIMEOUT);
else
return usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
USB_REQ_SET_FEATURE, USB_RECIP_INTERFACE,
USB_INTRF_FUNC_SUSPEND,
USB_INTRF_FUNC_SUSPEND_RW |
USB_INTRF_FUNC_SUSPEND_LP,
NULL, 0, USB_CTRL_SET_TIMEOUT);
}
/*
* usb_disable_remote_wakeup - disable remote wakeup for a device
* @udev: target device
*
* For USB-2 devices: Clear the device's remote wakeup feature.
*
* For USB-3 devices: Assume there's only one function on the device and
* disable remote wake for the first interface. FIXME if the interface
* association descriptor shows there's more than one function.
*/
static int usb_disable_remote_wakeup(struct usb_device *udev)
{
if (udev->speed < USB_SPEED_SUPER)
return usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
USB_REQ_CLEAR_FEATURE, USB_RECIP_DEVICE,
USB_DEVICE_REMOTE_WAKEUP, 0, NULL, 0,
USB_CTRL_SET_TIMEOUT);
else
return usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
USB_REQ_CLEAR_FEATURE, USB_RECIP_INTERFACE,
USB_INTRF_FUNC_SUSPEND, 0, NULL, 0,
USB_CTRL_SET_TIMEOUT);
}
/* Count of wakeup-enabled devices at or below udev */
static unsigned wakeup_enabled_descendants(struct usb_device *udev)
{
struct usb_hub *hub = usb_hub_to_struct_hub(udev);
return udev->do_remote_wakeup +
(hub ? hub->wakeup_enabled_descendants : 0);
}
/*
* usb_port_suspend - suspend a usb device's upstream port
* @udev: device that's no longer in active use, not a root hub
* Context: must be able to sleep; device not locked; pm locks held
*
* Suspends a USB device that isn't in active use, conserving power.
* Devices may wake out of a suspend, if anything important happens,
* using the remote wakeup mechanism. They may also be taken out of
* suspend by the host, using usb_port_resume(). It's also routine
* to disconnect devices while they are suspended.
*
* This only affects the USB hardware for a device; its interfaces
* (and, for hubs, child devices) must already have been suspended.
*
* Selective port suspend reduces power; most suspended devices draw
* less than 500 uA. It's also used in OTG, along with remote wakeup.
* All devices below the suspended port are also suspended.
*
* Devices leave suspend state when the host wakes them up. Some devices
* also support "remote wakeup", where the device can activate the USB
* tree above them to deliver data, such as a keypress or packet. In
* some cases, this wakes the USB host.
*
* Suspending OTG devices may trigger HNP, if that's been enabled
* between a pair of dual-role devices. That will change roles, such
* as from A-Host to A-Peripheral or from B-Host back to B-Peripheral.
*
* Devices on USB hub ports have only one "suspend" state, corresponding
* to ACPI D2, "may cause the device to lose some context".
* State transitions include:
*
* - suspend, resume ... when the VBUS power link stays live
* - suspend, disconnect ... VBUS lost
*
* Once VBUS drop breaks the circuit, the port it's using has to go through
* normal re-enumeration procedures, starting with enabling VBUS power.
* Other than re-initializing the hub (plug/unplug, except for root hubs),
* Linux (2.6) currently has NO mechanisms to initiate that: no hub_wq
* timer, no SRP, no requests through sysfs.
*
* If Runtime PM isn't enabled or used, non-SuperSpeed devices may not get
* suspended until their bus goes into global suspend (i.e., the root
* hub is suspended). Nevertheless, we change @udev->state to
* USB_STATE_SUSPENDED as this is the device's "logical" state. The actual
* upstream port setting is stored in @udev->port_is_suspended.
*
* Returns 0 on success, else negative errno.
*/
int usb_port_suspend(struct usb_device *udev, pm_message_t msg)
{
struct usb_hub *hub = usb_hub_to_struct_hub(udev->parent);
struct usb_port *port_dev = hub->ports[udev->portnum - 1];
int port1 = udev->portnum;
int status;
bool really_suspend = true;
usb_lock_port(port_dev);
/* enable remote wakeup when appropriate; this lets the device
* wake up the upstream hub (including maybe the root hub).
*
* NOTE: OTG devices may issue remote wakeup (or SRP) even when
* we don't explicitly enable it here.
*/
if (udev->do_remote_wakeup) {
status = usb_enable_remote_wakeup(udev);
if (status) {
dev_dbg(&udev->dev, "won't remote wakeup, status %d\n",
status);
/* bail if autosuspend is requested */
if (PMSG_IS_AUTO(msg))
goto err_wakeup;
}
}
/* disable USB2 hardware LPM */
if (udev->usb2_hw_lpm_enabled == 1)
usb_set_usb2_hardware_lpm(udev, 0);
if (usb_disable_ltm(udev)) {
dev_err(&udev->dev, "Failed to disable LTM before suspend\n.");
status = -ENOMEM;
if (PMSG_IS_AUTO(msg))
goto err_ltm;
}
if (usb_unlocked_disable_lpm(udev)) {
dev_err(&udev->dev, "Failed to disable LPM before suspend\n.");
status = -ENOMEM;
if (PMSG_IS_AUTO(msg))
goto err_lpm3;
}
/* see 7.1.7.6 */
if (hub_is_superspeed(hub->hdev))
status = hub_set_port_link_state(hub, port1, USB_SS_PORT_LS_U3);
/*
* For system suspend, we do not need to enable the suspend feature
* on individual USB-2 ports. The devices will automatically go
* into suspend a few ms after the root hub stops sending packets.
* The USB 2.0 spec calls this "global suspend".
*
* However, many USB hubs have a bug: They don't relay wakeup requests
* from a downstream port if the port's suspend feature isn't on.
* Therefore we will turn on the suspend feature if udev or any of its
* descendants is enabled for remote wakeup.
*/
else if (PMSG_IS_AUTO(msg) || wakeup_enabled_descendants(udev) > 0)
status = set_port_feature(hub->hdev, port1,
USB_PORT_FEAT_SUSPEND);
else {
really_suspend = false;
status = 0;
}
if (status) {
dev_dbg(&port_dev->dev, "can't suspend, status %d\n", status);
/* Try to enable USB3 LPM and LTM again */
usb_unlocked_enable_lpm(udev);
err_lpm3:
usb_enable_ltm(udev);
err_ltm:
/* Try to enable USB2 hardware LPM again */
if (udev->usb2_hw_lpm_capable == 1)
usb_set_usb2_hardware_lpm(udev, 1);
if (udev->do_remote_wakeup)
(void) usb_disable_remote_wakeup(udev);
err_wakeup:
/* System sleep transitions should never fail */
if (!PMSG_IS_AUTO(msg))
status = 0;
} else {
dev_dbg(&udev->dev, "usb %ssuspend, wakeup %d\n",
(PMSG_IS_AUTO(msg) ? "auto-" : ""),
udev->do_remote_wakeup);
if (really_suspend) {
udev->port_is_suspended = 1;
/* device has up to 10 msec to fully suspend */
msleep(10);
}
usb_set_device_state(udev, USB_STATE_SUSPENDED);
}
if (status == 0 && !udev->do_remote_wakeup && udev->persist_enabled
&& test_and_clear_bit(port1, hub->child_usage_bits))
pm_runtime_put_sync(&port_dev->dev);
usb_mark_last_busy(hub->hdev);
usb_unlock_port(port_dev);
return status;
}
/*
* If the USB "suspend" state is in use (rather than "global suspend"),
* many devices will be individually taken out of suspend state using
* special "resume" signaling. This routine kicks in shortly after
* hardware resume signaling is finished, either because of selective
* resume (by host) or remote wakeup (by device) ... now see what changed
* in the tree that's rooted at this device.
*
* If @udev->reset_resume is set then the device is reset before the
* status check is done.
*/
static int finish_port_resume(struct usb_device *udev)
{
int status = 0;
u16 devstatus = 0;
/* caller owns the udev device lock */
dev_dbg(&udev->dev, "%s\n",
udev->reset_resume ? "finish reset-resume" : "finish resume");
/* usb ch9 identifies four variants of SUSPENDED, based on what
* state the device resumes to. Linux currently won't see the
* first two on the host side; they'd be inside hub_port_init()
* during many timeouts, but hub_wq can't suspend until later.
*/
usb_set_device_state(udev, udev->actconfig
? USB_STATE_CONFIGURED
: USB_STATE_ADDRESS);
/* 10.5.4.5 says not to reset a suspended port if the attached
* device is enabled for remote wakeup. Hence the reset
* operation is carried out here, after the port has been
* resumed.
*/
if (udev->reset_resume) {
/*
* If the device morphs or switches modes when it is reset,
* we don't want to perform a reset-resume. We'll fail the
* resume, which will cause a logical disconnect, and then
* the device will be rediscovered.
*/
retry_reset_resume:
if (udev->quirks & USB_QUIRK_RESET)
status = -ENODEV;
else
status = usb_reset_and_verify_device(udev);
}
/* 10.5.4.5 says be sure devices in the tree are still there.
* For now let's assume the device didn't go crazy on resume,
* and device drivers will know about any resume quirks.
*/
if (status == 0) {
devstatus = 0;
status = usb_get_status(udev, USB_RECIP_DEVICE, 0, &devstatus);
/* If a normal resume failed, try doing a reset-resume */
if (status && !udev->reset_resume && udev->persist_enabled) {
dev_dbg(&udev->dev, "retry with reset-resume\n");
udev->reset_resume = 1;
goto retry_reset_resume;
}
}
if (status) {
dev_dbg(&udev->dev, "gone after usb resume? status %d\n",
status);
/*
* There are a few quirky devices which violate the standard
* by claiming to have remote wakeup enabled after a reset,
* which crash if the feature is cleared, hence check for
* udev->reset_resume
*/
} else if (udev->actconfig && !udev->reset_resume) {
if (udev->speed < USB_SPEED_SUPER) {
if (devstatus & (1 << USB_DEVICE_REMOTE_WAKEUP))
status = usb_disable_remote_wakeup(udev);
} else {
status = usb_get_status(udev, USB_RECIP_INTERFACE, 0,
&devstatus);
if (!status && devstatus & (USB_INTRF_STAT_FUNC_RW_CAP
| USB_INTRF_STAT_FUNC_RW))
status = usb_disable_remote_wakeup(udev);
}
if (status)
dev_dbg(&udev->dev,
"disable remote wakeup, status %d\n",
status);
status = 0;
}
return status;
}
/*
* There are some SS USB devices which take longer time for link training.
* XHCI specs 4.19.4 says that when Link training is successful, port
* sets CSC bit to 1. So if SW reads port status before successful link
* training, then it will not find device to be present.
* USB Analyzer log with such buggy devices show that in some cases
* device switch on the RX termination after long delay of host enabling
* the VBUS. In few other cases it has been seen that device fails to
* negotiate link training in first attempt. It has been
* reported till now that few devices take as long as 2000 ms to train
* the link after host enabling its VBUS and termination. Following
* routine implements a 2000 ms timeout for link training. If in a case
* link trains before timeout, loop will exit earlier.
*
* FIXME: If a device was connected before suspend, but was removed
* while system was asleep, then the loop in the following routine will
* only exit at timeout.
*
* This routine should only be called when persist is enabled for a SS
* device.
*/
static int wait_for_ss_port_enable(struct usb_device *udev,
struct usb_hub *hub, int *port1,
u16 *portchange, u16 *portstatus)
{
int status = 0, delay_ms = 0;
while (delay_ms < 2000) {
if (status || *portstatus & USB_PORT_STAT_CONNECTION)
break;
msleep(20);
delay_ms += 20;
status = hub_port_status(hub, *port1, portstatus, portchange);
}
return status;
}
/*
* usb_port_resume - re-activate a suspended usb device's upstream port
* @udev: device to re-activate, not a root hub
* Context: must be able to sleep; device not locked; pm locks held
*
* This will re-activate the suspended device, increasing power usage
* while letting drivers communicate again with its endpoints.
* USB resume explicitly guarantees that the power session between
* the host and the device is the same as it was when the device
* suspended.
*
* If @udev->reset_resume is set then this routine won't check that the
* port is still enabled. Furthermore, finish_port_resume() above will
* reset @udev. The end result is that a broken power session can be
* recovered and @udev will appear to persist across a loss of VBUS power.
*
* For example, if a host controller doesn't maintain VBUS suspend current
* during a system sleep or is reset when the system wakes up, all the USB
* power sessions below it will be broken. This is especially troublesome
* for mass-storage devices containing mounted filesystems, since the
* device will appear to have disconnected and all the memory mappings
* to it will be lost. Using the USB_PERSIST facility, the device can be
* made to appear as if it had not disconnected.
*
* This facility can be dangerous. Although usb_reset_and_verify_device() makes
* every effort to insure that the same device is present after the
* reset as before, it cannot provide a 100% guarantee. Furthermore it's
* quite possible for a device to remain unaltered but its media to be
* changed. If the user replaces a flash memory card while the system is
* asleep, he will have only himself to blame when the filesystem on the
* new card is corrupted and the system crashes.
*
* Returns 0 on success, else negative errno.
*/
int usb_port_resume(struct usb_device *udev, pm_message_t msg)
{
struct usb_hub *hub = usb_hub_to_struct_hub(udev->parent);
struct usb_port *port_dev = hub->ports[udev->portnum - 1];
int port1 = udev->portnum;
int status;
u16 portchange, portstatus;
if (!test_and_set_bit(port1, hub->child_usage_bits)) {
status = pm_runtime_get_sync(&port_dev->dev);
if (status < 0) {
dev_dbg(&udev->dev, "can't resume usb port, status %d\n",
status);
return status;
}
}
usb_lock_port(port_dev);
/* Skip the initial Clear-Suspend step for a remote wakeup */
status = hub_port_status(hub, port1, &portstatus, &portchange);
if (status == 0 && !port_is_suspended(hub, portstatus))
goto SuspendCleared;
/* see 7.1.7.7; affects power usage, but not budgeting */
if (hub_is_superspeed(hub->hdev))
status = hub_set_port_link_state(hub, port1, USB_SS_PORT_LS_U0);
else
status = usb_clear_port_feature(hub->hdev,
port1, USB_PORT_FEAT_SUSPEND);
if (status) {
dev_dbg(&port_dev->dev, "can't resume, status %d\n", status);
} else {
/* drive resume for at least 20 msec */
dev_dbg(&udev->dev, "usb %sresume\n",
(PMSG_IS_AUTO(msg) ? "auto-" : ""));
msleep(25);
/* Virtual root hubs can trigger on GET_PORT_STATUS to
* stop resume signaling. Then finish the resume
* sequence.
*/
status = hub_port_status(hub, port1, &portstatus, &portchange);
/* TRSMRCY = 10 msec */
msleep(10);
}
SuspendCleared:
if (status == 0) {
udev->port_is_suspended = 0;
if (hub_is_superspeed(hub->hdev)) {
if (portchange & USB_PORT_STAT_C_LINK_STATE)
usb_clear_port_feature(hub->hdev, port1,
USB_PORT_FEAT_C_PORT_LINK_STATE);
} else {
if (portchange & USB_PORT_STAT_C_SUSPEND)
usb_clear_port_feature(hub->hdev, port1,
USB_PORT_FEAT_C_SUSPEND);
}
}
if (udev->persist_enabled && hub_is_superspeed(hub->hdev))
status = wait_for_ss_port_enable(udev, hub, &port1, &portchange,
&portstatus);
status = check_port_resume_type(udev,
hub, port1, status, portchange, portstatus);
if (status == 0)
status = finish_port_resume(udev);
if (status < 0) {
dev_dbg(&udev->dev, "can't resume, status %d\n", status);
hub_port_logical_disconnect(hub, port1);
} else {
/* Try to enable USB2 hardware LPM */
if (udev->usb2_hw_lpm_capable == 1)
usb_set_usb2_hardware_lpm(udev, 1);
/* Try to enable USB3 LTM and LPM */
usb_enable_ltm(udev);
usb_unlocked_enable_lpm(udev);
}
usb_unlock_port(port_dev);
return status;
}
int usb_remote_wakeup(struct usb_device *udev)
{
int status = 0;
usb_lock_device(udev);
if (udev->state == USB_STATE_SUSPENDED) {
dev_dbg(&udev->dev, "usb %sresume\n", "wakeup-");
status = usb_autoresume_device(udev);
if (status == 0) {
/* Let the drivers do their thing, then... */
usb_autosuspend_device(udev);
}
}
usb_unlock_device(udev);
return status;
}
/* Returns 1 if there was a remote wakeup and a connect status change. */
static int hub_handle_remote_wakeup(struct usb_hub *hub, unsigned int port,
u16 portstatus, u16 portchange)
__must_hold(&port_dev->status_lock)
{
struct usb_port *port_dev = hub->ports[port - 1];
struct usb_device *hdev;
struct usb_device *udev;
int connect_change = 0;
int ret;
hdev = hub->hdev;
udev = port_dev->child;
if (!hub_is_superspeed(hdev)) {
if (!(portchange & USB_PORT_STAT_C_SUSPEND))
return 0;
usb_clear_port_feature(hdev, port, USB_PORT_FEAT_C_SUSPEND);
} else {
if (!udev || udev->state != USB_STATE_SUSPENDED ||
(portstatus & USB_PORT_STAT_LINK_STATE) !=
USB_SS_PORT_LS_U0)
return 0;
}
if (udev) {
/* TRSMRCY = 10 msec */
msleep(10);
usb_unlock_port(port_dev);
ret = usb_remote_wakeup(udev);
usb_lock_port(port_dev);
if (ret < 0)
connect_change = 1;
} else {
ret = -ENODEV;
hub_port_disable(hub, port, 1);
}
dev_dbg(&port_dev->dev, "resume, status %d\n", ret);
return connect_change;
}
static int check_ports_changed(struct usb_hub *hub)
{
int port1;
for (port1 = 1; port1 <= hub->hdev->maxchild; ++port1) {
u16 portstatus, portchange;
int status;
status = hub_port_status(hub, port1, &portstatus, &portchange);
if (!status && portchange)
return 1;
}
return 0;
}
static int hub_suspend(struct usb_interface *intf, pm_message_t msg)
{
struct usb_hub *hub = usb_get_intfdata (intf);
struct usb_device *hdev = hub->hdev;
unsigned port1;
int status;
/*
* Warn if children aren't already suspended.
* Also, add up the number of wakeup-enabled descendants.
*/
hub->wakeup_enabled_descendants = 0;
for (port1 = 1; port1 <= hdev->maxchild; port1++) {
struct usb_port *port_dev = hub->ports[port1 - 1];
struct usb_device *udev = port_dev->child;
if (udev && udev->can_submit) {
dev_warn(&port_dev->dev, "device %s not suspended yet\n",
dev_name(&udev->dev));
if (PMSG_IS_AUTO(msg))
return -EBUSY;
}
if (udev)
hub->wakeup_enabled_descendants +=
wakeup_enabled_descendants(udev);
}
if (hdev->do_remote_wakeup && hub->quirk_check_port_auto_suspend) {
/* check if there are changes pending on hub ports */
if (check_ports_changed(hub)) {
if (PMSG_IS_AUTO(msg))
return -EBUSY;
pm_wakeup_event(&hdev->dev, 2000);
}
}
if (hub_is_superspeed(hdev) && hdev->do_remote_wakeup) {
/* Enable hub to send remote wakeup for all ports. */
for (port1 = 1; port1 <= hdev->maxchild; port1++) {
status = set_port_feature(hdev,
port1 |
USB_PORT_FEAT_REMOTE_WAKE_CONNECT |
USB_PORT_FEAT_REMOTE_WAKE_DISCONNECT |
USB_PORT_FEAT_REMOTE_WAKE_OVER_CURRENT,
USB_PORT_FEAT_REMOTE_WAKE_MASK);
}
}
dev_dbg(&intf->dev, "%s\n", __func__);
/* stop hub_wq and related activity */
hub_quiesce(hub, HUB_SUSPEND);
return 0;
}
static int hub_resume(struct usb_interface *intf)
{
struct usb_hub *hub = usb_get_intfdata(intf);
dev_dbg(&intf->dev, "%s\n", __func__);
hub_activate(hub, HUB_RESUME);
return 0;
}
static int hub_reset_resume(struct usb_interface *intf)
{
struct usb_hub *hub = usb_get_intfdata(intf);
dev_dbg(&intf->dev, "%s\n", __func__);
hub_activate(hub, HUB_RESET_RESUME);
return 0;
}
/**
* usb_root_hub_lost_power - called by HCD if the root hub lost Vbus power
* @rhdev: struct usb_device for the root hub
*
* The USB host controller driver calls this function when its root hub
* is resumed and Vbus power has been interrupted or the controller
* has been reset. The routine marks @rhdev as having lost power.
* When the hub driver is resumed it will take notice and carry out
* power-session recovery for all the "USB-PERSIST"-enabled child devices;
* the others will be disconnected.
*/
void usb_root_hub_lost_power(struct usb_device *rhdev)
{
dev_warn(&rhdev->dev, "root hub lost power or was reset\n");
rhdev->reset_resume = 1;
}
EXPORT_SYMBOL_GPL(usb_root_hub_lost_power);
static const char * const usb3_lpm_names[] = {
"U0",
"U1",
"U2",
"U3",
};
/*
* Send a Set SEL control transfer to the device, prior to enabling
* device-initiated U1 or U2. This lets the device know the exit latencies from
* the time the device initiates a U1 or U2 exit, to the time it will receive a
* packet from the host.
*
* This function will fail if the SEL or PEL values for udev are greater than
* the maximum allowed values for the link state to be enabled.
*/
static int usb_req_set_sel(struct usb_device *udev, enum usb3_link_state state)
{
struct usb_set_sel_req *sel_values;
unsigned long long u1_sel;
unsigned long long u1_pel;
unsigned long long u2_sel;
unsigned long long u2_pel;
int ret;
if (udev->state != USB_STATE_CONFIGURED)
return 0;
/* Convert SEL and PEL stored in ns to us */
u1_sel = DIV_ROUND_UP(udev->u1_params.sel, 1000);
u1_pel = DIV_ROUND_UP(udev->u1_params.pel, 1000);
u2_sel = DIV_ROUND_UP(udev->u2_params.sel, 1000);
u2_pel = DIV_ROUND_UP(udev->u2_params.pel, 1000);
/*
* Make sure that the calculated SEL and PEL values for the link
* state we're enabling aren't bigger than the max SEL/PEL
* value that will fit in the SET SEL control transfer.
* Otherwise the device would get an incorrect idea of the exit
* latency for the link state, and could start a device-initiated
* U1/U2 when the exit latencies are too high.
*/
if ((state == USB3_LPM_U1 &&
(u1_sel > USB3_LPM_MAX_U1_SEL_PEL ||
u1_pel > USB3_LPM_MAX_U1_SEL_PEL)) ||
(state == USB3_LPM_U2 &&
(u2_sel > USB3_LPM_MAX_U2_SEL_PEL ||
u2_pel > USB3_LPM_MAX_U2_SEL_PEL))) {
dev_dbg(&udev->dev, "Device-initiated %s disabled due to long SEL %llu us or PEL %llu us\n",
usb3_lpm_names[state], u1_sel, u1_pel);
return -EINVAL;
}
/*
* If we're enabling device-initiated LPM for one link state,
* but the other link state has a too high SEL or PEL value,
* just set those values to the max in the Set SEL request.
*/
if (u1_sel > USB3_LPM_MAX_U1_SEL_PEL)
u1_sel = USB3_LPM_MAX_U1_SEL_PEL;
if (u1_pel > USB3_LPM_MAX_U1_SEL_PEL)
u1_pel = USB3_LPM_MAX_U1_SEL_PEL;
if (u2_sel > USB3_LPM_MAX_U2_SEL_PEL)
u2_sel = USB3_LPM_MAX_U2_SEL_PEL;
if (u2_pel > USB3_LPM_MAX_U2_SEL_PEL)
u2_pel = USB3_LPM_MAX_U2_SEL_PEL;
/*
* usb_enable_lpm() can be called as part of a failed device reset,
* which may be initiated by an error path of a mass storage driver.
* Therefore, use GFP_NOIO.
*/
sel_values = kmalloc(sizeof *(sel_values), GFP_NOIO);
if (!sel_values)
return -ENOMEM;
sel_values->u1_sel = u1_sel;
sel_values->u1_pel = u1_pel;
sel_values->u2_sel = cpu_to_le16(u2_sel);
sel_values->u2_pel = cpu_to_le16(u2_pel);
ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
USB_REQ_SET_SEL,
USB_RECIP_DEVICE,
0, 0,
sel_values, sizeof *(sel_values),
USB_CTRL_SET_TIMEOUT);
kfree(sel_values);
return ret;
}
/*
* Enable or disable device-initiated U1 or U2 transitions.
*/
static int usb_set_device_initiated_lpm(struct usb_device *udev,
enum usb3_link_state state, bool enable)
{
int ret;
int feature;
switch (state) {
case USB3_LPM_U1:
feature = USB_DEVICE_U1_ENABLE;
break;
case USB3_LPM_U2:
feature = USB_DEVICE_U2_ENABLE;
break;
default:
dev_warn(&udev->dev, "%s: Can't %s non-U1 or U2 state.\n",
__func__, enable ? "enable" : "disable");
return -EINVAL;
}
if (udev->state != USB_STATE_CONFIGURED) {
dev_dbg(&udev->dev, "%s: Can't %s %s state "
"for unconfigured device.\n",
__func__, enable ? "enable" : "disable",
usb3_lpm_names[state]);
return 0;
}
if (enable) {
/*
* Now send the control transfer to enable device-initiated LPM
* for either U1 or U2.
*/
ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
USB_REQ_SET_FEATURE,
USB_RECIP_DEVICE,
feature,
0, NULL, 0,
USB_CTRL_SET_TIMEOUT);
} else {
ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
USB_REQ_CLEAR_FEATURE,
USB_RECIP_DEVICE,
feature,
0, NULL, 0,
USB_CTRL_SET_TIMEOUT);
}
if (ret < 0) {
dev_warn(&udev->dev, "%s of device-initiated %s failed.\n",
enable ? "Enable" : "Disable",
usb3_lpm_names[state]);
return -EBUSY;
}
return 0;
}
static int usb_set_lpm_timeout(struct usb_device *udev,
enum usb3_link_state state, int timeout)
{
int ret;
int feature;
switch (state) {
case USB3_LPM_U1:
feature = USB_PORT_FEAT_U1_TIMEOUT;
break;
case USB3_LPM_U2:
feature = USB_PORT_FEAT_U2_TIMEOUT;
break;
default:
dev_warn(&udev->dev, "%s: Can't set timeout for non-U1 or U2 state.\n",
__func__);
return -EINVAL;
}
if (state == USB3_LPM_U1 && timeout > USB3_LPM_U1_MAX_TIMEOUT &&
timeout != USB3_LPM_DEVICE_INITIATED) {
dev_warn(&udev->dev, "Failed to set %s timeout to 0x%x, "
"which is a reserved value.\n",
usb3_lpm_names[state], timeout);
return -EINVAL;
}
ret = set_port_feature(udev->parent,
USB_PORT_LPM_TIMEOUT(timeout) | udev->portnum,
feature);
if (ret < 0) {
dev_warn(&udev->dev, "Failed to set %s timeout to 0x%x,"
"error code %i\n", usb3_lpm_names[state],
timeout, ret);
return -EBUSY;
}
if (state == USB3_LPM_U1)
udev->u1_params.timeout = timeout;
else
udev->u2_params.timeout = timeout;
return 0;
}
/*
* Enable the hub-initiated U1/U2 idle timeouts, and enable device-initiated
* U1/U2 entry.
*
* We will attempt to enable U1 or U2, but there are no guarantees that the
* control transfers to set the hub timeout or enable device-initiated U1/U2
* will be successful.
*
* If we cannot set the parent hub U1/U2 timeout, we attempt to let the xHCI
* driver know about it. If that call fails, it should be harmless, and just
* take up more slightly more bus bandwidth for unnecessary U1/U2 exit latency.
*/
static void usb_enable_link_state(struct usb_hcd *hcd, struct usb_device *udev,
enum usb3_link_state state)
{
int timeout, ret;
__u8 u1_mel = udev->bos->ss_cap->bU1devExitLat;
__le16 u2_mel = udev->bos->ss_cap->bU2DevExitLat;
/* If the device says it doesn't have *any* exit latency to come out of
* U1 or U2, it's probably lying. Assume it doesn't implement that link
* state.
*/
if ((state == USB3_LPM_U1 && u1_mel == 0) ||
(state == USB3_LPM_U2 && u2_mel == 0))
return;
/*
* First, let the device know about the exit latencies
* associated with the link state we're about to enable.
*/
ret = usb_req_set_sel(udev, state);
if (ret < 0) {
dev_warn(&udev->dev, "Set SEL for device-initiated %s failed.\n",
usb3_lpm_names[state]);
return;
}
/* We allow the host controller to set the U1/U2 timeout internally
* first, so that it can change its schedule to account for the
* additional latency to send data to a device in a lower power
* link state.
*/
timeout = hcd->driver->enable_usb3_lpm_timeout(hcd, udev, state);
/* xHCI host controller doesn't want to enable this LPM state. */
if (timeout == 0)
return;
if (timeout < 0) {
dev_warn(&udev->dev, "Could not enable %s link state, "
"xHCI error %i.\n", usb3_lpm_names[state],
timeout);
return;
}
if (usb_set_lpm_timeout(udev, state, timeout))
/* If we can't set the parent hub U1/U2 timeout,
* device-initiated LPM won't be allowed either, so let the xHCI
* host know that this link state won't be enabled.
*/
hcd->driver->disable_usb3_lpm_timeout(hcd, udev, state);
/* Only a configured device will accept the Set Feature U1/U2_ENABLE */
else if (udev->actconfig)
usb_set_device_initiated_lpm(udev, state, true);
}
/*
* Disable the hub-initiated U1/U2 idle timeouts, and disable device-initiated
* U1/U2 entry.
*
* If this function returns -EBUSY, the parent hub will still allow U1/U2 entry.
* If zero is returned, the parent will not allow the link to go into U1/U2.
*
* If zero is returned, device-initiated U1/U2 entry may still be enabled, but
* it won't have an effect on the bus link state because the parent hub will
* still disallow device-initiated U1/U2 entry.
*
* If zero is returned, the xHCI host controller may still think U1/U2 entry is
* possible. The result will be slightly more bus bandwidth will be taken up
* (to account for U1/U2 exit latency), but it should be harmless.
*/
static int usb_disable_link_state(struct usb_hcd *hcd, struct usb_device *udev,
enum usb3_link_state state)
{
switch (state) {
case USB3_LPM_U1:
case USB3_LPM_U2:
break;
default:
dev_warn(&udev->dev, "%s: Can't disable non-U1 or U2 state.\n",
__func__);
return -EINVAL;
}
if (usb_set_lpm_timeout(udev, state, 0))
return -EBUSY;
usb_set_device_initiated_lpm(udev, state, false);
if (hcd->driver->disable_usb3_lpm_timeout(hcd, udev, state))
dev_warn(&udev->dev, "Could not disable xHCI %s timeout, "
"bus schedule bandwidth may be impacted.\n",
usb3_lpm_names[state]);
return 0;
}
/*
* Disable hub-initiated and device-initiated U1 and U2 entry.
* Caller must own the bandwidth_mutex.
*
* This will call usb_enable_lpm() on failure, which will decrement
* lpm_disable_count, and will re-enable LPM if lpm_disable_count reaches zero.
*/
int usb_disable_lpm(struct usb_device *udev)
{
struct usb_hcd *hcd;
if (!udev || !udev->parent ||
udev->speed != USB_SPEED_SUPER ||
!udev->lpm_capable ||
udev->state < USB_STATE_DEFAULT)
return 0;
hcd = bus_to_hcd(udev->bus);
if (!hcd || !hcd->driver->disable_usb3_lpm_timeout)
return 0;
udev->lpm_disable_count++;
if ((udev->u1_params.timeout == 0 && udev->u2_params.timeout == 0))
return 0;
/* If LPM is enabled, attempt to disable it. */
if (usb_disable_link_state(hcd, udev, USB3_LPM_U1))
goto enable_lpm;
if (usb_disable_link_state(hcd, udev, USB3_LPM_U2))
goto enable_lpm;
return 0;
enable_lpm:
usb_enable_lpm(udev);
return -EBUSY;
}
EXPORT_SYMBOL_GPL(usb_disable_lpm);
/* Grab the bandwidth_mutex before calling usb_disable_lpm() */
int usb_unlocked_disable_lpm(struct usb_device *udev)
{
struct usb_hcd *hcd = bus_to_hcd(udev->bus);
int ret;
if (!hcd)
return -EINVAL;
mutex_lock(hcd->bandwidth_mutex);
ret = usb_disable_lpm(udev);
mutex_unlock(hcd->bandwidth_mutex);
return ret;
}
EXPORT_SYMBOL_GPL(usb_unlocked_disable_lpm);
/*
* Attempt to enable device-initiated and hub-initiated U1 and U2 entry. The
* xHCI host policy may prevent U1 or U2 from being enabled.
*
* Other callers may have disabled link PM, so U1 and U2 entry will be disabled
* until the lpm_disable_count drops to zero. Caller must own the
* bandwidth_mutex.
*/
void usb_enable_lpm(struct usb_device *udev)
{
struct usb_hcd *hcd;
if (!udev || !udev->parent ||
udev->speed != USB_SPEED_SUPER ||
!udev->lpm_capable ||
udev->state < USB_STATE_DEFAULT)
return;
udev->lpm_disable_count--;
hcd = bus_to_hcd(udev->bus);
/* Double check that we can both enable and disable LPM.
* Device must be configured to accept set feature U1/U2 timeout.
*/
if (!hcd || !hcd->driver->enable_usb3_lpm_timeout ||
!hcd->driver->disable_usb3_lpm_timeout)
return;
if (udev->lpm_disable_count > 0)
return;
usb_enable_link_state(hcd, udev, USB3_LPM_U1);
usb_enable_link_state(hcd, udev, USB3_LPM_U2);
}
EXPORT_SYMBOL_GPL(usb_enable_lpm);
/* Grab the bandwidth_mutex before calling usb_enable_lpm() */
void usb_unlocked_enable_lpm(struct usb_device *udev)
{
struct usb_hcd *hcd = bus_to_hcd(udev->bus);
if (!hcd)
return;
mutex_lock(hcd->bandwidth_mutex);
usb_enable_lpm(udev);
mutex_unlock(hcd->bandwidth_mutex);
}
EXPORT_SYMBOL_GPL(usb_unlocked_enable_lpm);
#else /* CONFIG_PM */
#define hub_suspend NULL
#define hub_resume NULL
#define hub_reset_resume NULL
int usb_disable_lpm(struct usb_device *udev)
{
return 0;
}
EXPORT_SYMBOL_GPL(usb_disable_lpm);
void usb_enable_lpm(struct usb_device *udev) { }
EXPORT_SYMBOL_GPL(usb_enable_lpm);
int usb_unlocked_disable_lpm(struct usb_device *udev)
{
return 0;
}
EXPORT_SYMBOL_GPL(usb_unlocked_disable_lpm);
void usb_unlocked_enable_lpm(struct usb_device *udev) { }
EXPORT_SYMBOL_GPL(usb_unlocked_enable_lpm);
int usb_disable_ltm(struct usb_device *udev)
{
return 0;
}
EXPORT_SYMBOL_GPL(usb_disable_ltm);
void usb_enable_ltm(struct usb_device *udev) { }
EXPORT_SYMBOL_GPL(usb_enable_ltm);
static int hub_handle_remote_wakeup(struct usb_hub *hub, unsigned int port,
u16 portstatus, u16 portchange)
{
return 0;
}
#endif /* CONFIG_PM */
/* USB 2.0 spec, 7.1.7.3 / fig 7-29:
*
* Between connect detection and reset signaling there must be a delay
* of 100ms at least for debounce and power-settling. The corresponding
* timer shall restart whenever the downstream port detects a disconnect.
*
* Apparently there are some bluetooth and irda-dongles and a number of
* low-speed devices for which this debounce period may last over a second.
* Not covered by the spec - but easy to deal with.
*
* This implementation uses a 1500ms total debounce timeout; if the
* connection isn't stable by then it returns -ETIMEDOUT. It checks
* every 25ms for transient disconnects. When the port status has been
* unchanged for 100ms it returns the port status.
*/
int hub_port_debounce(struct usb_hub *hub, int port1, bool must_be_connected)
{
int ret;
u16 portchange, portstatus;
unsigned connection = 0xffff;
int total_time, stable_time = 0;
struct usb_port *port_dev = hub->ports[port1 - 1];
for (total_time = 0; ; total_time += HUB_DEBOUNCE_STEP) {
ret = hub_port_status(hub, port1, &portstatus, &portchange);
if (ret < 0)
return ret;
if (!(portchange & USB_PORT_STAT_C_CONNECTION) &&
(portstatus & USB_PORT_STAT_CONNECTION) == connection) {
if (!must_be_connected ||
(connection == USB_PORT_STAT_CONNECTION))
stable_time += HUB_DEBOUNCE_STEP;
if (stable_time >= HUB_DEBOUNCE_STABLE)
break;
} else {
stable_time = 0;
connection = portstatus & USB_PORT_STAT_CONNECTION;
}
if (portchange & USB_PORT_STAT_C_CONNECTION) {
usb_clear_port_feature(hub->hdev, port1,
USB_PORT_FEAT_C_CONNECTION);
}
if (total_time >= HUB_DEBOUNCE_TIMEOUT)
break;
msleep(HUB_DEBOUNCE_STEP);
}
dev_dbg(&port_dev->dev, "debounce total %dms stable %dms status 0x%x\n",
total_time, stable_time, portstatus);
if (stable_time < HUB_DEBOUNCE_STABLE)
return -ETIMEDOUT;
return portstatus;
}
void usb_ep0_reinit(struct usb_device *udev)
{
usb_disable_endpoint(udev, 0 + USB_DIR_IN, true);
usb_disable_endpoint(udev, 0 + USB_DIR_OUT, true);
usb_enable_endpoint(udev, &udev->ep0, true);
}
EXPORT_SYMBOL_GPL(usb_ep0_reinit);
#define usb_sndaddr0pipe() (PIPE_CONTROL << 30)
#define usb_rcvaddr0pipe() ((PIPE_CONTROL << 30) | USB_DIR_IN)
static int hub_set_address(struct usb_device *udev, int devnum)
{
int retval;
struct usb_hcd *hcd = bus_to_hcd(udev->bus);
/*
* The host controller will choose the device address,
* instead of the core having chosen it earlier
*/
if (!hcd->driver->address_device && devnum <= 1)
return -EINVAL;
if (udev->state == USB_STATE_ADDRESS)
return 0;
if (udev->state != USB_STATE_DEFAULT)
return -EINVAL;
if (hcd->driver->address_device)
retval = hcd->driver->address_device(hcd, udev);
else
retval = usb_control_msg(udev, usb_sndaddr0pipe(),
USB_REQ_SET_ADDRESS, 0, devnum, 0,
NULL, 0, USB_CTRL_SET_TIMEOUT);
if (retval == 0) {
update_devnum(udev, devnum);
/* Device now using proper address. */
usb_set_device_state(udev, USB_STATE_ADDRESS);
usb_ep0_reinit(udev);
}
return retval;
}
/*
* There are reports of USB 3.0 devices that say they support USB 2.0 Link PM
* when they're plugged into a USB 2.0 port, but they don't work when LPM is
* enabled.
*
* Only enable USB 2.0 Link PM if the port is internal (hardwired), or the
* device says it supports the new USB 2.0 Link PM errata by setting the BESL
* support bit in the BOS descriptor.
*/
static void hub_set_initial_usb2_lpm_policy(struct usb_device *udev)
{
struct usb_hub *hub = usb_hub_to_struct_hub(udev->parent);
int connect_type = USB_PORT_CONNECT_TYPE_UNKNOWN;
if (!udev->usb2_hw_lpm_capable)
return;
if (hub)
connect_type = hub->ports[udev->portnum - 1]->connect_type;
if ((udev->bos->ext_cap->bmAttributes & cpu_to_le32(USB_BESL_SUPPORT)) ||
connect_type == USB_PORT_CONNECT_TYPE_HARD_WIRED) {
udev->usb2_hw_lpm_allowed = 1;
usb_set_usb2_hardware_lpm(udev, 1);
}
}
static int hub_enable_device(struct usb_device *udev)
{
struct usb_hcd *hcd = bus_to_hcd(udev->bus);
if (!hcd->driver->enable_device)
return 0;
if (udev->state == USB_STATE_ADDRESS)
return 0;
if (udev->state != USB_STATE_DEFAULT)
return -EINVAL;
return hcd->driver->enable_device(hcd, udev);
}
/* Reset device, (re)assign address, get device descriptor.
* Device connection must be stable, no more debouncing needed.
* Returns device in USB_STATE_ADDRESS, except on error.
*
* If this is called for an already-existing device (as part of
* usb_reset_and_verify_device), the caller must own the device lock and
* the port lock. For a newly detected device that is not accessible
* through any global pointers, it's not necessary to lock the device,
* but it is still necessary to lock the port.
*/
static int
hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1,
int retry_counter)
{
struct usb_device *hdev = hub->hdev;
struct usb_hcd *hcd = bus_to_hcd(hdev->bus);
int i, j, retval;
unsigned delay = HUB_SHORT_RESET_TIME;
enum usb_device_speed oldspeed = udev->speed;
const char *speed;
int devnum = udev->devnum;
/* root hub ports have a slightly longer reset period
* (from USB 2.0 spec, section 7.1.7.5)
*/
if (!hdev->parent) {
delay = HUB_ROOT_RESET_TIME;
if (port1 == hdev->bus->otg_port)
hdev->bus->b_hnp_enable = 0;
}
/* Some low speed devices have problems with the quick delay, so */
/* be a bit pessimistic with those devices. RHbug #23670 */
if (oldspeed == USB_SPEED_LOW)
delay = HUB_LONG_RESET_TIME;
mutex_lock(&hdev->bus->usb_address0_mutex);
/* Reset the device; full speed may morph to high speed */
/* FIXME a USB 2.0 device may morph into SuperSpeed on reset. */
retval = hub_port_reset(hub, port1, udev, delay, false);
if (retval < 0) /* error or disconnect */
goto fail;
/* success, speed is known */
retval = -ENODEV;
if (oldspeed != USB_SPEED_UNKNOWN && oldspeed != udev->speed) {
dev_dbg(&udev->dev, "device reset changed speed!\n");
goto fail;
}
oldspeed = udev->speed;
/* USB 2.0 section 5.5.3 talks about ep0 maxpacket ...
* it's fixed size except for full speed devices.
* For Wireless USB devices, ep0 max packet is always 512 (tho
* reported as 0xff in the device descriptor). WUSB1.0[4.8.1].
*/
switch (udev->speed) {
case USB_SPEED_SUPER:
case USB_SPEED_WIRELESS: /* fixed at 512 */
udev->ep0.desc.wMaxPacketSize = cpu_to_le16(512);
break;
case USB_SPEED_HIGH: /* fixed at 64 */
udev->ep0.desc.wMaxPacketSize = cpu_to_le16(64);
break;
case USB_SPEED_FULL: /* 8, 16, 32, or 64 */
/* to determine the ep0 maxpacket size, try to read
* the device descriptor to get bMaxPacketSize0 and
* then correct our initial guess.
*/
udev->ep0.desc.wMaxPacketSize = cpu_to_le16(64);
break;
case USB_SPEED_LOW: /* fixed at 8 */
udev->ep0.desc.wMaxPacketSize = cpu_to_le16(8);
break;
default:
goto fail;
}
if (udev->speed == USB_SPEED_WIRELESS)
speed = "variable speed Wireless";
else
speed = usb_speed_string(udev->speed);
if (udev->speed != USB_SPEED_SUPER)
dev_info(&udev->dev,
"%s %s USB device number %d using %s\n",
(udev->config) ? "reset" : "new", speed,
devnum, udev->bus->controller->driver->name);
/* Set up TT records, if needed */
if (hdev->tt) {
udev->tt = hdev->tt;
udev->ttport = hdev->ttport;
} else if (udev->speed != USB_SPEED_HIGH
&& hdev->speed == USB_SPEED_HIGH) {
if (!hub->tt.hub) {
dev_err(&udev->dev, "parent hub has no TT\n");
retval = -EINVAL;
goto fail;
}
udev->tt = &hub->tt;
udev->ttport = port1;
}
/* Why interleave GET_DESCRIPTOR and SET_ADDRESS this way?
* Because device hardware and firmware is sometimes buggy in
* this area, and this is how Linux has done it for ages.
* Change it cautiously.
*
* NOTE: If use_new_scheme() is true we will start by issuing
* a 64-byte GET_DESCRIPTOR request. This is what Windows does,
* so it may help with some non-standards-compliant devices.
* Otherwise we start with SET_ADDRESS and then try to read the
* first 8 bytes of the device descriptor to get the ep0 maxpacket
* value.
*/
for (i = 0; i < GET_DESCRIPTOR_TRIES; (++i, msleep(100))) {
bool did_new_scheme = false;
if (use_new_scheme(udev, retry_counter)) {
struct usb_device_descriptor *buf;
int r = 0;
did_new_scheme = true;
retval = hub_enable_device(udev);
if (retval < 0) {
dev_err(&udev->dev,
"hub failed to enable device, error %d\n",
retval);
goto fail;
}
#define GET_DESCRIPTOR_BUFSIZE 64
buf = kmalloc(GET_DESCRIPTOR_BUFSIZE, GFP_NOIO);
if (!buf) {
retval = -ENOMEM;
continue;
}
/* Retry on all errors; some devices are flakey.
* 255 is for WUSB devices, we actually need to use
* 512 (WUSB1.0[4.8.1]).
*/
for (j = 0; j < 3; ++j) {
buf->bMaxPacketSize0 = 0;
r = usb_control_msg(udev, usb_rcvaddr0pipe(),
USB_REQ_GET_DESCRIPTOR, USB_DIR_IN,
USB_DT_DEVICE << 8, 0,
buf, GET_DESCRIPTOR_BUFSIZE,
initial_descriptor_timeout);
switch (buf->bMaxPacketSize0) {
case 8: case 16: case 32: case 64: case 255:
if (buf->bDescriptorType ==
USB_DT_DEVICE) {
r = 0;
break;
}
/* FALL THROUGH */
default:
if (r == 0)
r = -EPROTO;
break;
}
if (r == 0)
break;
}
udev->descriptor.bMaxPacketSize0 =
buf->bMaxPacketSize0;
kfree(buf);
retval = hub_port_reset(hub, port1, udev, delay, false);
if (retval < 0) /* error or disconnect */
goto fail;
if (oldspeed != udev->speed) {
dev_dbg(&udev->dev,
"device reset changed speed!\n");
retval = -ENODEV;
goto fail;
}
if (r) {
if (r != -ENODEV)
dev_err(&udev->dev, "device descriptor read/64, error %d\n",
r);
retval = -EMSGSIZE;
continue;
}
#undef GET_DESCRIPTOR_BUFSIZE
}
/*
* If device is WUSB, we already assigned an
* unauthorized address in the Connect Ack sequence;
* authorization will assign the final address.
*/
if (udev->wusb == 0) {
for (j = 0; j < SET_ADDRESS_TRIES; ++j) {
retval = hub_set_address(udev, devnum);
if (retval >= 0)
break;
msleep(200);
}
if (retval < 0) {
if (retval != -ENODEV)
dev_err(&udev->dev, "device not accepting address %d, error %d\n",
devnum, retval);
goto fail;
}
if (udev->speed == USB_SPEED_SUPER) {
devnum = udev->devnum;
dev_info(&udev->dev,
"%s SuperSpeed USB device number %d using %s\n",
(udev->config) ? "reset" : "new",
devnum, udev->bus->controller->driver->name);
}
/* cope with hardware quirkiness:
* - let SET_ADDRESS settle, some device hardware wants it
* - read ep0 maxpacket even for high and low speed,
*/
msleep(10);
/* use_new_scheme() checks the speed which may have
* changed since the initial look so we cache the result
* in did_new_scheme
*/
if (did_new_scheme)
break;
}
retval = usb_get_device_descriptor(udev, 8);
if (retval < 8) {
if (retval != -ENODEV)
dev_err(&udev->dev,
"device descriptor read/8, error %d\n",
retval);
if (retval >= 0)
retval = -EMSGSIZE;
} else {
retval = 0;
break;
}
}
if (retval)
goto fail;
/*
* Some superspeed devices have finished the link training process
* and attached to a superspeed hub port, but the device descriptor
* got from those devices show they aren't superspeed devices. Warm
* reset the port attached by the devices can fix them.
*/
if ((udev->speed == USB_SPEED_SUPER) &&
(le16_to_cpu(udev->descriptor.bcdUSB) < 0x0300)) {
dev_err(&udev->dev, "got a wrong device descriptor, "
"warm reset device\n");
hub_port_reset(hub, port1, udev,
HUB_BH_RESET_TIME, true);
retval = -EINVAL;
goto fail;
}
if (udev->descriptor.bMaxPacketSize0 == 0xff ||
udev->speed == USB_SPEED_SUPER)
i = 512;
else
i = udev->descriptor.bMaxPacketSize0;
if (usb_endpoint_maxp(&udev->ep0.desc) != i) {
if (udev->speed == USB_SPEED_LOW ||
!(i == 8 || i == 16 || i == 32 || i == 64)) {
dev_err(&udev->dev, "Invalid ep0 maxpacket: %d\n", i);
retval = -EMSGSIZE;
goto fail;
}
if (udev->speed == USB_SPEED_FULL)
dev_dbg(&udev->dev, "ep0 maxpacket = %d\n", i);
else
dev_warn(&udev->dev, "Using ep0 maxpacket: %d\n", i);
udev->ep0.desc.wMaxPacketSize = cpu_to_le16(i);
usb_ep0_reinit(udev);
}
retval = usb_get_device_descriptor(udev, USB_DT_DEVICE_SIZE);
if (retval < (signed)sizeof(udev->descriptor)) {
if (retval != -ENODEV)
dev_err(&udev->dev, "device descriptor read/all, error %d\n",
retval);
if (retval >= 0)
retval = -ENOMSG;
goto fail;
}
if (udev->wusb == 0 && le16_to_cpu(udev->descriptor.bcdUSB) >= 0x0201) {
retval = usb_get_bos_descriptor(udev);
if (!retval) {
udev->lpm_capable = usb_device_supports_lpm(udev);
usb_set_lpm_parameters(udev);
}
}
retval = 0;
/* notify HCD that we have a device connected and addressed */
if (hcd->driver->update_device)
hcd->driver->update_device(hcd, udev);
hub_set_initial_usb2_lpm_policy(udev);
fail:
if (retval) {
hub_port_disable(hub, port1, 0);
update_devnum(udev, devnum); /* for disconnect processing */
}
mutex_unlock(&hdev->bus->usb_address0_mutex);
return retval;
}
static void
check_highspeed (struct usb_hub *hub, struct usb_device *udev, int port1)
{
struct usb_qualifier_descriptor *qual;
int status;
if (udev->quirks & USB_QUIRK_DEVICE_QUALIFIER)
return;
qual = kmalloc (sizeof *qual, GFP_KERNEL);
if (qual == NULL)
return;
status = usb_get_descriptor (udev, USB_DT_DEVICE_QUALIFIER, 0,
qual, sizeof *qual);
if (status == sizeof *qual) {
dev_info(&udev->dev, "not running at top speed; "
"connect to a high speed hub\n");
/* hub LEDs are probably harder to miss than syslog */
if (hub->has_indicators) {
hub->indicator[port1-1] = INDICATOR_GREEN_BLINK;
queue_delayed_work(system_power_efficient_wq,
&hub->leds, 0);
}
}
kfree(qual);
}
static unsigned
hub_power_remaining (struct usb_hub *hub)
{
struct usb_device *hdev = hub->hdev;
int remaining;
int port1;
if (!hub->limited_power)
return 0;
remaining = hdev->bus_mA - hub->descriptor->bHubContrCurrent;
for (port1 = 1; port1 <= hdev->maxchild; ++port1) {
struct usb_port *port_dev = hub->ports[port1 - 1];
struct usb_device *udev = port_dev->child;
unsigned unit_load;
int delta;
if (!udev)
continue;
if (hub_is_superspeed(udev))
unit_load = 150;
else
unit_load = 100;
/*
* Unconfigured devices may not use more than one unit load,
* or 8mA for OTG ports
*/
if (udev->actconfig)
delta = usb_get_max_power(udev, udev->actconfig);
else if (port1 != udev->bus->otg_port || hdev->parent)
delta = unit_load;
else
delta = 8;
if (delta > hub->mA_per_port)
dev_warn(&port_dev->dev, "%dmA is over %umA budget!\n",
delta, hub->mA_per_port);
remaining -= delta;
}
if (remaining < 0) {
dev_warn(hub->intfdev, "%dmA over power budget!\n",
-remaining);
remaining = 0;
}
return remaining;
}
static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
u16 portchange)
{
int status, i;
unsigned unit_load;
struct usb_device *hdev = hub->hdev;
struct usb_hcd *hcd = bus_to_hcd(hdev->bus);
struct usb_port *port_dev = hub->ports[port1 - 1];
struct usb_device *udev = port_dev->child;
static int unreliable_port = -1;
/* Disconnect any existing devices under this port */
if (udev) {
if (hcd->usb_phy && !hdev->parent)
usb_phy_notify_disconnect(hcd->usb_phy, udev->speed);
usb_disconnect(&port_dev->child);
}
/* We can forget about a "removed" device when there's a physical
* disconnect or the connect status changes.
*/
if (!(portstatus & USB_PORT_STAT_CONNECTION) ||
(portchange & USB_PORT_STAT_C_CONNECTION))
clear_bit(port1, hub->removed_bits);
if (portchange & (USB_PORT_STAT_C_CONNECTION |
USB_PORT_STAT_C_ENABLE)) {
status = hub_port_debounce_be_stable(hub, port1);
if (status < 0) {
if (status != -ENODEV &&
port1 != unreliable_port &&
printk_ratelimit())
dev_err(&port_dev->dev, "connect-debounce failed\n");
portstatus &= ~USB_PORT_STAT_CONNECTION;
unreliable_port = port1;
} else {
portstatus = status;
}
}
/* Return now if debouncing failed or nothing is connected or
* the device was "removed".
*/
if (!(portstatus & USB_PORT_STAT_CONNECTION) ||
test_bit(port1, hub->removed_bits)) {
/*
* maybe switch power back on (e.g. root hub was reset)
* but only if the port isn't owned by someone else.
*/
if (hub_is_port_power_switchable(hub)
&& !port_is_power_on(hub, portstatus)
&& !port_dev->port_owner)
set_port_feature(hdev, port1, USB_PORT_FEAT_POWER);
if (portstatus & USB_PORT_STAT_ENABLE)
goto done;
return;
}
if (hub_is_superspeed(hub->hdev))
unit_load = 150;
else
unit_load = 100;
status = 0;
for (i = 0; i < SET_CONFIG_TRIES; i++) {
/* reallocate for each attempt, since references
* to the previous one can escape in various ways
*/
udev = usb_alloc_dev(hdev, hdev->bus, port1);
if (!udev) {
dev_err(&port_dev->dev,
"couldn't allocate usb_device\n");
goto done;
}
usb_set_device_state(udev, USB_STATE_POWERED);
udev->bus_mA = hub->mA_per_port;
udev->level = hdev->level + 1;
udev->wusb = hub_is_wusb(hub);
/* Only USB 3.0 devices are connected to SuperSpeed hubs. */
if (hub_is_superspeed(hub->hdev))
udev->speed = USB_SPEED_SUPER;
else
udev->speed = USB_SPEED_UNKNOWN;
choose_devnum(udev);
if (udev->devnum <= 0) {
status = -ENOTCONN; /* Don't retry */
goto loop;
}
/* reset (non-USB 3.0 devices) and get descriptor */
usb_lock_port(port_dev);
status = hub_port_init(hub, udev, port1, i);
usb_unlock_port(port_dev);
if (status < 0)
goto loop;
usb_detect_quirks(udev);
if (udev->quirks & USB_QUIRK_DELAY_INIT)
msleep(1000);
/* consecutive bus-powered hubs aren't reliable; they can
* violate the voltage drop budget. if the new child has
* a "powered" LED, users should notice we didn't enable it
* (without reading syslog), even without per-port LEDs
* on the parent.
*/
if (udev->descriptor.bDeviceClass == USB_CLASS_HUB
&& udev->bus_mA <= unit_load) {
u16 devstat;
status = usb_get_status(udev, USB_RECIP_DEVICE, 0,
&devstat);
if (status) {
dev_dbg(&udev->dev, "get status %d ?\n", status);
goto loop_disable;
}
if ((devstat & (1 << USB_DEVICE_SELF_POWERED)) == 0) {
dev_err(&udev->dev,
"can't connect bus-powered hub "
"to this port\n");
if (hub->has_indicators) {
hub->indicator[port1-1] =
INDICATOR_AMBER_BLINK;
queue_delayed_work(
system_power_efficient_wq,
&hub->leds, 0);
}
status = -ENOTCONN; /* Don't retry */
goto loop_disable;
}
}
/* check for devices running slower than they could */
if (le16_to_cpu(udev->descriptor.bcdUSB) >= 0x0200
&& udev->speed == USB_SPEED_FULL
&& highspeed_hubs != 0)
check_highspeed (hub, udev, port1);
/* Store the parent's children[] pointer. At this point
* udev becomes globally accessible, although presumably
* no one will look at it until hdev is unlocked.
*/
status = 0;
mutex_lock(&usb_port_peer_mutex);
/* We mustn't add new devices if the parent hub has
* been disconnected; we would race with the
* recursively_mark_NOTATTACHED() routine.
*/
spin_lock_irq(&device_state_lock);
if (hdev->state == USB_STATE_NOTATTACHED)
status = -ENOTCONN;
else
port_dev->child = udev;
spin_unlock_irq(&device_state_lock);
mutex_unlock(&usb_port_peer_mutex);
/* Run it through the hoops (find a driver, etc) */
if (!status) {
status = usb_new_device(udev);
if (status) {
mutex_lock(&usb_port_peer_mutex);
spin_lock_irq(&device_state_lock);
port_dev->child = NULL;
spin_unlock_irq(&device_state_lock);
mutex_unlock(&usb_port_peer_mutex);
} else {
if (hcd->usb_phy && !hdev->parent)
usb_phy_notify_connect(hcd->usb_phy,
udev->speed);
}
}
if (status)
goto loop_disable;
status = hub_power_remaining(hub);
if (status)
dev_dbg(hub->intfdev, "%dmA power budget left\n", status);
return;
loop_disable:
hub_port_disable(hub, port1, 1);
loop:
usb_ep0_reinit(udev);
release_devnum(udev);
hub_free_dev(udev);
usb_put_dev(udev);
if ((status == -ENOTCONN) || (status == -ENOTSUPP))
break;
}
if (hub->hdev->parent ||
!hcd->driver->port_handed_over ||
!(hcd->driver->port_handed_over)(hcd, port1)) {
if (status != -ENOTCONN && status != -ENODEV)
dev_err(&port_dev->dev,
"unable to enumerate USB device\n");
}
done:
hub_port_disable(hub, port1, 1);
if (hcd->driver->relinquish_port && !hub->hdev->parent)
hcd->driver->relinquish_port(hcd, port1);
}
/* Handle physical or logical connection change events.
* This routine is called when:
* a port connection-change occurs;
* a port enable-change occurs (often caused by EMI);
* usb_reset_and_verify_device() encounters changed descriptors (as from
* a firmware download)
* caller already locked the hub
*/
static void hub_port_connect_change(struct usb_hub *hub, int port1,
u16 portstatus, u16 portchange)
__must_hold(&port_dev->status_lock)
{
struct usb_port *port_dev = hub->ports[port1 - 1];
struct usb_device *udev = port_dev->child;
int status = -ENODEV;
dev_dbg(&port_dev->dev, "status %04x, change %04x, %s\n", portstatus,
portchange, portspeed(hub, portstatus));
if (hub->has_indicators) {
set_port_led(hub, port1, HUB_LED_AUTO);
hub->indicator[port1-1] = INDICATOR_AUTO;
}
#ifdef CONFIG_USB_OTG
/* during HNP, don't repeat the debounce */
if (hub->hdev->bus->is_b_host)
portchange &= ~(USB_PORT_STAT_C_CONNECTION |
USB_PORT_STAT_C_ENABLE);
#endif
/* Try to resuscitate an existing device */
if ((portstatus & USB_PORT_STAT_CONNECTION) && udev &&
udev->state != USB_STATE_NOTATTACHED) {
if (portstatus & USB_PORT_STAT_ENABLE) {
status = 0; /* Nothing to do */
#ifdef CONFIG_PM
} else if (udev->state == USB_STATE_SUSPENDED &&
udev->persist_enabled) {
/* For a suspended device, treat this as a
* remote wakeup event.
*/
usb_unlock_port(port_dev);
status = usb_remote_wakeup(udev);
usb_lock_port(port_dev);
#endif
} else {
/* Don't resuscitate */;
}
}
clear_bit(port1, hub->change_bits);
/* successfully revalidated the connection */
if (status == 0)
return;
usb_unlock_port(port_dev);
hub_port_connect(hub, port1, portstatus, portchange);
usb_lock_port(port_dev);
}
static void port_event(struct usb_hub *hub, int port1)
__must_hold(&port_dev->status_lock)
{
int connect_change;
struct usb_port *port_dev = hub->ports[port1 - 1];
struct usb_device *udev = port_dev->child;
struct usb_device *hdev = hub->hdev;
u16 portstatus, portchange;
connect_change = test_bit(port1, hub->change_bits);
clear_bit(port1, hub->event_bits);
clear_bit(port1, hub->wakeup_bits);
if (hub_port_status(hub, port1, &portstatus, &portchange) < 0)
return;
if (portchange & USB_PORT_STAT_C_CONNECTION) {
usb_clear_port_feature(hdev, port1, USB_PORT_FEAT_C_CONNECTION);
connect_change = 1;
}
if (portchange & USB_PORT_STAT_C_ENABLE) {
if (!connect_change)
dev_dbg(&port_dev->dev, "enable change, status %08x\n",
portstatus);
usb_clear_port_feature(hdev, port1, USB_PORT_FEAT_C_ENABLE);
/*
* EM interference sometimes causes badly shielded USB devices
* to be shutdown by the hub, this hack enables them again.
* Works at least with mouse driver.
*/
if (!(portstatus & USB_PORT_STAT_ENABLE)
&& !connect_change && udev) {
dev_err(&port_dev->dev, "disabled by hub (EMI?), re-enabling...\n");
connect_change = 1;
}
}
if (portchange & USB_PORT_STAT_C_OVERCURRENT) {
u16 status = 0, unused;
dev_dbg(&port_dev->dev, "over-current change\n");
usb_clear_port_feature(hdev, port1,
USB_PORT_FEAT_C_OVER_CURRENT);
msleep(100); /* Cool down */
hub_power_on(hub, true);
hub_port_status(hub, port1, &status, &unused);
if (status & USB_PORT_STAT_OVERCURRENT)
dev_err(&port_dev->dev, "over-current condition\n");
}
if (portchange & USB_PORT_STAT_C_RESET) {
dev_dbg(&port_dev->dev, "reset change\n");
usb_clear_port_feature(hdev, port1, USB_PORT_FEAT_C_RESET);
}
if ((portchange & USB_PORT_STAT_C_BH_RESET)
&& hub_is_superspeed(hdev)) {
dev_dbg(&port_dev->dev, "warm reset change\n");
usb_clear_port_feature(hdev, port1,
USB_PORT_FEAT_C_BH_PORT_RESET);
}
if (portchange & USB_PORT_STAT_C_LINK_STATE) {
dev_dbg(&port_dev->dev, "link state change\n");
usb_clear_port_feature(hdev, port1,
USB_PORT_FEAT_C_PORT_LINK_STATE);
}
if (portchange & USB_PORT_STAT_C_CONFIG_ERROR) {
dev_warn(&port_dev->dev, "config error\n");
usb_clear_port_feature(hdev, port1,
USB_PORT_FEAT_C_PORT_CONFIG_ERROR);
}
/* skip port actions that require the port to be powered on */
if (!pm_runtime_active(&port_dev->dev))
return;
if (hub_handle_remote_wakeup(hub, port1, portstatus, portchange))
connect_change = 1;
/*
* Warm reset a USB3 protocol port if it's in
* SS.Inactive state.
*/
if (hub_port_warm_reset_required(hub, port1, portstatus)) {
dev_dbg(&port_dev->dev, "do warm reset\n");
if (!udev || !(portstatus & USB_PORT_STAT_CONNECTION)
|| udev->state == USB_STATE_NOTATTACHED) {
if (hub_port_reset(hub, port1, NULL,
HUB_BH_RESET_TIME, true) < 0)
hub_port_disable(hub, port1, 1);
} else {
usb_unlock_port(port_dev);
usb_lock_device(udev);
usb_reset_device(udev);
usb_unlock_device(udev);
usb_lock_port(port_dev);
connect_change = 0;
}
}
if (connect_change)
hub_port_connect_change(hub, port1, portstatus, portchange);
}
static void hub_event(struct work_struct *work)
{
struct usb_device *hdev;
struct usb_interface *intf;
struct usb_hub *hub;
struct device *hub_dev;
u16 hubstatus;
u16 hubchange;
int i, ret;
hub = container_of(work, struct usb_hub, events);
hdev = hub->hdev;
hub_dev = hub->intfdev;
intf = to_usb_interface(hub_dev);
dev_dbg(hub_dev, "state %d ports %d chg %04x evt %04x\n",
hdev->state, hdev->maxchild,
/* NOTE: expects max 15 ports... */
(u16) hub->change_bits[0],
(u16) hub->event_bits[0]);
/* Lock the device, then check to see if we were
* disconnected while waiting for the lock to succeed. */
usb_lock_device(hdev);
if (unlikely(hub->disconnected))
goto out_hdev_lock;
/* If the hub has died, clean up after it */
if (hdev->state == USB_STATE_NOTATTACHED) {
hub->error = -ENODEV;
hub_quiesce(hub, HUB_DISCONNECT);
goto out_hdev_lock;
}
/* Autoresume */
ret = usb_autopm_get_interface(intf);
if (ret) {
dev_dbg(hub_dev, "Can't autoresume: %d\n", ret);
goto out_hdev_lock;
}
/* If this is an inactive hub, do nothing */
if (hub->quiescing)
goto out_autopm;
if (hub->error) {
dev_dbg(hub_dev, "resetting for error %d\n", hub->error);
ret = usb_reset_device(hdev);
if (ret) {
dev_dbg(hub_dev, "error resetting hub: %d\n", ret);
goto out_autopm;
}
hub->nerrors = 0;
hub->error = 0;
}
/* deal with port status changes */
for (i = 1; i <= hdev->maxchild; i++) {
struct usb_port *port_dev = hub->ports[i - 1];
if (test_bit(i, hub->event_bits)
|| test_bit(i, hub->change_bits)
|| test_bit(i, hub->wakeup_bits)) {
/*
* The get_noresume and barrier ensure that if
* the port was in the process of resuming, we
* flush that work and keep the port active for
* the duration of the port_event(). However,
* if the port is runtime pm suspended
* (powered-off), we leave it in that state, run
* an abbreviated port_event(), and move on.
*/
pm_runtime_get_noresume(&port_dev->dev);
pm_runtime_barrier(&port_dev->dev);
usb_lock_port(port_dev);
port_event(hub, i);
usb_unlock_port(port_dev);
pm_runtime_put_sync(&port_dev->dev);
}
}
/* deal with hub status changes */
if (test_and_clear_bit(0, hub->event_bits) == 0)
; /* do nothing */
else if (hub_hub_status(hub, &hubstatus, &hubchange) < 0)
dev_err(hub_dev, "get_hub_status failed\n");
else {
if (hubchange & HUB_CHANGE_LOCAL_POWER) {
dev_dbg(hub_dev, "power change\n");
clear_hub_feature(hdev, C_HUB_LOCAL_POWER);
if (hubstatus & HUB_STATUS_LOCAL_POWER)
/* FIXME: Is this always true? */
hub->limited_power = 1;
else
hub->limited_power = 0;
}
if (hubchange & HUB_CHANGE_OVERCURRENT) {
u16 status = 0;
u16 unused;
dev_dbg(hub_dev, "over-current change\n");
clear_hub_feature(hdev, C_HUB_OVER_CURRENT);
msleep(500); /* Cool down */
hub_power_on(hub, true);
hub_hub_status(hub, &status, &unused);
if (status & HUB_STATUS_OVERCURRENT)
dev_err(hub_dev, "over-current condition\n");
}
}
out_autopm:
/* Balance the usb_autopm_get_interface() above */
usb_autopm_put_interface_no_suspend(intf);
out_hdev_lock:
usb_unlock_device(hdev);
/* Balance the stuff in kick_hub_wq() and allow autosuspend */
usb_autopm_put_interface(intf);
kref_put(&hub->kref, hub_release);
}
static const struct usb_device_id hub_id_table[] = {
{ .match_flags = USB_DEVICE_ID_MATCH_VENDOR
| USB_DEVICE_ID_MATCH_INT_CLASS,
.idVendor = USB_VENDOR_GENESYS_LOGIC,
.bInterfaceClass = USB_CLASS_HUB,
.driver_info = HUB_QUIRK_CHECK_PORT_AUTOSUSPEND},
{ .match_flags = USB_DEVICE_ID_MATCH_DEV_CLASS,
.bDeviceClass = USB_CLASS_HUB},
{ .match_flags = USB_DEVICE_ID_MATCH_INT_CLASS,
.bInterfaceClass = USB_CLASS_HUB},
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE (usb, hub_id_table);
static struct usb_driver hub_driver = {
.name = "hub",
.probe = hub_probe,
.disconnect = hub_disconnect,
.suspend = hub_suspend,
.resume = hub_resume,
.reset_resume = hub_reset_resume,
.pre_reset = hub_pre_reset,
.post_reset = hub_post_reset,
.unlocked_ioctl = hub_ioctl,
.id_table = hub_id_table,
.supports_autosuspend = 1,
};
int usb_hub_init(void)
{
if (usb_register(&hub_driver) < 0) {
printk(KERN_ERR "%s: can't register hub driver\n",
usbcore_name);
return -1;
}
/*
* The workqueue needs to be freezable to avoid interfering with
* USB-PERSIST port handover. Otherwise it might see that a full-speed
* device was gone before the EHCI controller had handed its port
* over to the companion full-speed controller.
*/
hub_wq = alloc_workqueue("usb_hub_wq", WQ_FREEZABLE, 0);
if (hub_wq)
return 0;
/* Fall through if kernel_thread failed */
usb_deregister(&hub_driver);
pr_err("%s: can't allocate workqueue for usb hub\n", usbcore_name);
return -1;
}
void usb_hub_cleanup(void)
{
destroy_workqueue(hub_wq);
/*
* Hub resources are freed for us by usb_deregister. It calls
* usb_driver_purge on every device which in turn calls that
* devices disconnect function if it is using this driver.
* The hub_disconnect function takes care of releasing the
* individual hub resources. -greg
*/
usb_deregister(&hub_driver);
} /* usb_hub_cleanup() */
static int descriptors_changed(struct usb_device *udev,
struct usb_device_descriptor *old_device_descriptor,
struct usb_host_bos *old_bos)
{
int changed = 0;
unsigned index;
unsigned serial_len = 0;
unsigned len;
unsigned old_length;
int length;
char *buf;
if (memcmp(&udev->descriptor, old_device_descriptor,
sizeof(*old_device_descriptor)) != 0)
return 1;
if ((old_bos && !udev->bos) || (!old_bos && udev->bos))
return 1;
if (udev->bos) {
len = le16_to_cpu(udev->bos->desc->wTotalLength);
if (len != le16_to_cpu(old_bos->desc->wTotalLength))
return 1;
if (memcmp(udev->bos->desc, old_bos->desc, len))
return 1;
}
/* Since the idVendor, idProduct, and bcdDevice values in the
* device descriptor haven't changed, we will assume the
* Manufacturer and Product strings haven't changed either.
* But the SerialNumber string could be different (e.g., a
* different flash card of the same brand).
*/
if (udev->serial)
serial_len = strlen(udev->serial) + 1;
len = serial_len;
for (index = 0; index < udev->descriptor.bNumConfigurations; index++) {
old_length = le16_to_cpu(udev->config[index].desc.wTotalLength);
len = max(len, old_length);
}
buf = kmalloc(len, GFP_NOIO);
if (buf == NULL) {
dev_err(&udev->dev, "no mem to re-read configs after reset\n");
/* assume the worst */
return 1;
}
for (index = 0; index < udev->descriptor.bNumConfigurations; index++) {
old_length = le16_to_cpu(udev->config[index].desc.wTotalLength);
length = usb_get_descriptor(udev, USB_DT_CONFIG, index, buf,
old_length);
if (length != old_length) {
dev_dbg(&udev->dev, "config index %d, error %d\n",
index, length);
changed = 1;
break;
}
if (memcmp (buf, udev->rawdescriptors[index], old_length)
!= 0) {
dev_dbg(&udev->dev, "config index %d changed (#%d)\n",
index,
((struct usb_config_descriptor *) buf)->
bConfigurationValue);
changed = 1;
break;
}
}
if (!changed && serial_len) {
length = usb_string(udev, udev->descriptor.iSerialNumber,
buf, serial_len);
if (length + 1 != serial_len) {
dev_dbg(&udev->dev, "serial string error %d\n",
length);
changed = 1;
} else if (memcmp(buf, udev->serial, length) != 0) {
dev_dbg(&udev->dev, "serial string changed\n");
changed = 1;
}
}
kfree(buf);
return changed;
}
/**
* usb_reset_and_verify_device - perform a USB port reset to reinitialize a device
* @udev: device to reset (not in SUSPENDED or NOTATTACHED state)
*
* WARNING - don't use this routine to reset a composite device
* (one with multiple interfaces owned by separate drivers)!
* Use usb_reset_device() instead.
*
* Do a port reset, reassign the device's address, and establish its
* former operating configuration. If the reset fails, or the device's
* descriptors change from their values before the reset, or the original
* configuration and altsettings cannot be restored, a flag will be set
* telling hub_wq to pretend the device has been disconnected and then
* re-connected. All drivers will be unbound, and the device will be
* re-enumerated and probed all over again.
*
* Return: 0 if the reset succeeded, -ENODEV if the device has been
* flagged for logical disconnection, or some other negative error code
* if the reset wasn't even attempted.
*
* Note:
* The caller must own the device lock and the port lock, the latter is
* taken by usb_reset_device(). For example, it's safe to use
* usb_reset_device() from a driver probe() routine after downloading
* new firmware. For calls that might not occur during probe(), drivers
* should lock the device using usb_lock_device_for_reset().
*
* Locking exception: This routine may also be called from within an
* autoresume handler. Such usage won't conflict with other tasks
* holding the device lock because these tasks should always call
* usb_autopm_resume_device(), thereby preventing any unwanted
* autoresume. The autoresume handler is expected to have already
* acquired the port lock before calling this routine.
*/
static int usb_reset_and_verify_device(struct usb_device *udev)
{
struct usb_device *parent_hdev = udev->parent;
struct usb_hub *parent_hub;
struct usb_hcd *hcd = bus_to_hcd(udev->bus);
struct usb_device_descriptor descriptor = udev->descriptor;
struct usb_host_bos *bos;
int i, j, ret = 0;
int port1 = udev->portnum;
if (udev->state == USB_STATE_NOTATTACHED ||
udev->state == USB_STATE_SUSPENDED) {
dev_dbg(&udev->dev, "device reset not allowed in state %d\n",
udev->state);
return -EINVAL;
}
if (!parent_hdev)
return -EISDIR;
parent_hub = usb_hub_to_struct_hub(parent_hdev);
/* Disable USB2 hardware LPM.
* It will be re-enabled by the enumeration process.
*/
if (udev->usb2_hw_lpm_enabled == 1)
usb_set_usb2_hardware_lpm(udev, 0);
bos = udev->bos;
udev->bos = NULL;
/* Disable LPM and LTM while we reset the device and reinstall the alt
* settings. Device-initiated LPM settings, and system exit latency
* settings are cleared when the device is reset, so we have to set
* them up again.
*/
ret = usb_unlocked_disable_lpm(udev);
if (ret) {
dev_err(&udev->dev, "%s Failed to disable LPM\n.", __func__);
goto re_enumerate;
}
ret = usb_disable_ltm(udev);
if (ret) {
dev_err(&udev->dev, "%s Failed to disable LTM\n.",
__func__);
goto re_enumerate;
}
for (i = 0; i < SET_CONFIG_TRIES; ++i) {
/* ep0 maxpacket size may change; let the HCD know about it.
* Other endpoints will be handled by re-enumeration. */
usb_ep0_reinit(udev);
ret = hub_port_init(parent_hub, udev, port1, i);
if (ret >= 0 || ret == -ENOTCONN || ret == -ENODEV)
break;
}
if (ret < 0)
goto re_enumerate;
/* Device might have changed firmware (DFU or similar) */
if (descriptors_changed(udev, &descriptor, bos)) {
dev_info(&udev->dev, "device firmware changed\n");
udev->descriptor = descriptor; /* for disconnect() calls */
goto re_enumerate;
}
/* Restore the device's previous configuration */
if (!udev->actconfig)
goto done;
mutex_lock(hcd->bandwidth_mutex);
ret = usb_hcd_alloc_bandwidth(udev, udev->actconfig, NULL, NULL);
if (ret < 0) {
dev_warn(&udev->dev,
"Busted HC? Not enough HCD resources for "
"old configuration.\n");
mutex_unlock(hcd->bandwidth_mutex);
goto re_enumerate;
}
ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
USB_REQ_SET_CONFIGURATION, 0,
udev->actconfig->desc.bConfigurationValue, 0,
NULL, 0, USB_CTRL_SET_TIMEOUT);
if (ret < 0) {
dev_err(&udev->dev,
"can't restore configuration #%d (error=%d)\n",
udev->actconfig->desc.bConfigurationValue, ret);
mutex_unlock(hcd->bandwidth_mutex);
goto re_enumerate;
}
mutex_unlock(hcd->bandwidth_mutex);
usb_set_device_state(udev, USB_STATE_CONFIGURED);
/* Put interfaces back into the same altsettings as before.
* Don't bother to send the Set-Interface request for interfaces
* that were already in altsetting 0; besides being unnecessary,
* many devices can't handle it. Instead just reset the host-side
* endpoint state.
*/
for (i = 0; i < udev->actconfig->desc.bNumInterfaces; i++) {
struct usb_host_config *config = udev->actconfig;
struct usb_interface *intf = config->interface[i];
struct usb_interface_descriptor *desc;
desc = &intf->cur_altsetting->desc;
if (desc->bAlternateSetting == 0) {
usb_disable_interface(udev, intf, true);
usb_enable_interface(udev, intf, true);
ret = 0;
} else {
/* Let the bandwidth allocation function know that this
* device has been reset, and it will have to use
* alternate setting 0 as the current alternate setting.
*/
intf->resetting_device = 1;
ret = usb_set_interface(udev, desc->bInterfaceNumber,
desc->bAlternateSetting);
intf->resetting_device = 0;
}
if (ret < 0) {
dev_err(&udev->dev, "failed to restore interface %d "
"altsetting %d (error=%d)\n",
desc->bInterfaceNumber,
desc->bAlternateSetting,
ret);
goto re_enumerate;
}
/* Resetting also frees any allocated streams */
for (j = 0; j < intf->cur_altsetting->desc.bNumEndpoints; j++)
intf->cur_altsetting->endpoint[j].streams = 0;
}
done:
/* Now that the alt settings are re-installed, enable LTM and LPM. */
usb_set_usb2_hardware_lpm(udev, 1);
usb_unlocked_enable_lpm(udev);
usb_enable_ltm(udev);
usb_release_bos_descriptor(udev);
udev->bos = bos;
return 0;
re_enumerate:
/* LPM state doesn't matter when we're about to destroy the device. */
hub_port_logical_disconnect(parent_hub, port1);
usb_release_bos_descriptor(udev);
udev->bos = bos;
return -ENODEV;
}
/**
* usb_reset_device - warn interface drivers and perform a USB port reset
* @udev: device to reset (not in SUSPENDED or NOTATTACHED state)
*
* Warns all drivers bound to registered interfaces (using their pre_reset
* method), performs the port reset, and then lets the drivers know that
* the reset is over (using their post_reset method).
*
* Return: The same as for usb_reset_and_verify_device().
*
* Note:
* The caller must own the device lock. For example, it's safe to use
* this from a driver probe() routine after downloading new firmware.
* For calls that might not occur during probe(), drivers should lock
* the device using usb_lock_device_for_reset().
*
* If an interface is currently being probed or disconnected, we assume
* its driver knows how to handle resets. For all other interfaces,
* if the driver doesn't have pre_reset and post_reset methods then
* we attempt to unbind it and rebind afterward.
*/
int usb_reset_device(struct usb_device *udev)
{
int ret;
int i;
unsigned int noio_flag;
struct usb_port *port_dev;
struct usb_host_config *config = udev->actconfig;
struct usb_hub *hub = usb_hub_to_struct_hub(udev->parent);
if (udev->state == USB_STATE_NOTATTACHED ||
udev->state == USB_STATE_SUSPENDED) {
dev_dbg(&udev->dev, "device reset not allowed in state %d\n",
udev->state);
return -EINVAL;
}
if (!udev->parent) {
/* this requires hcd-specific logic; see ohci_restart() */
dev_dbg(&udev->dev, "%s for root hub!\n", __func__);
return -EISDIR;
}
port_dev = hub->ports[udev->portnum - 1];
/*
* Don't allocate memory with GFP_KERNEL in current
* context to avoid possible deadlock if usb mass
* storage interface or usbnet interface(iSCSI case)
* is included in current configuration. The easist
* approach is to do it for every device reset,
* because the device 'memalloc_noio' flag may have
* not been set before reseting the usb device.
*/
noio_flag = memalloc_noio_save();
/* Prevent autosuspend during the reset */
usb_autoresume_device(udev);
if (config) {
for (i = 0; i < config->desc.bNumInterfaces; ++i) {
struct usb_interface *cintf = config->interface[i];
struct usb_driver *drv;
int unbind = 0;
if (cintf->dev.driver) {
drv = to_usb_driver(cintf->dev.driver);
if (drv->pre_reset && drv->post_reset)
unbind = (drv->pre_reset)(cintf);
else if (cintf->condition ==
USB_INTERFACE_BOUND)
unbind = 1;
if (unbind)
usb_forced_unbind_intf(cintf);
}
}
}
usb_lock_port(port_dev);
ret = usb_reset_and_verify_device(udev);
usb_unlock_port(port_dev);
if (config) {
for (i = config->desc.bNumInterfaces - 1; i >= 0; --i) {
struct usb_interface *cintf = config->interface[i];
struct usb_driver *drv;
int rebind = cintf->needs_binding;
if (!rebind && cintf->dev.driver) {
drv = to_usb_driver(cintf->dev.driver);
if (drv->post_reset)
rebind = (drv->post_reset)(cintf);
else if (cintf->condition ==
USB_INTERFACE_BOUND)
rebind = 1;
if (rebind)
cintf->needs_binding = 1;
}
}
usb_unbind_and_rebind_marked_interfaces(udev);
}
usb_autosuspend_device(udev);
memalloc_noio_restore(noio_flag);
return ret;
}
EXPORT_SYMBOL_GPL(usb_reset_device);
/**
* usb_queue_reset_device - Reset a USB device from an atomic context
* @iface: USB interface belonging to the device to reset
*
* This function can be used to reset a USB device from an atomic
* context, where usb_reset_device() won't work (as it blocks).
*
* Doing a reset via this method is functionally equivalent to calling
* usb_reset_device(), except for the fact that it is delayed to a
* workqueue. This means that any drivers bound to other interfaces
* might be unbound, as well as users from usbfs in user space.
*
* Corner cases:
*
* - Scheduling two resets at the same time from two different drivers
* attached to two different interfaces of the same device is
* possible; depending on how the driver attached to each interface
* handles ->pre_reset(), the second reset might happen or not.
*
* - If the reset is delayed so long that the interface is unbound from
* its driver, the reset will be skipped.
*
* - This function can be called during .probe(). It can also be called
* during .disconnect(), but doing so is pointless because the reset
* will not occur. If you really want to reset the device during
* .disconnect(), call usb_reset_device() directly -- but watch out
* for nested unbinding issues!
*/
void usb_queue_reset_device(struct usb_interface *iface)
{
if (schedule_work(&iface->reset_ws))
usb_get_intf(iface);
}
EXPORT_SYMBOL_GPL(usb_queue_reset_device);
/**
* usb_hub_find_child - Get the pointer of child device
* attached to the port which is specified by @port1.
* @hdev: USB device belonging to the usb hub
* @port1: port num to indicate which port the child device
* is attached to.
*
* USB drivers call this function to get hub's child device
* pointer.
*
* Return: %NULL if input param is invalid and
* child's usb_device pointer if non-NULL.
*/
struct usb_device *usb_hub_find_child(struct usb_device *hdev,
int port1)
{
struct usb_hub *hub = usb_hub_to_struct_hub(hdev);
if (port1 < 1 || port1 > hdev->maxchild)
return NULL;
return hub->ports[port1 - 1]->child;
}
EXPORT_SYMBOL_GPL(usb_hub_find_child);
void usb_hub_adjust_deviceremovable(struct usb_device *hdev,
struct usb_hub_descriptor *desc)
{
struct usb_hub *hub = usb_hub_to_struct_hub(hdev);
enum usb_port_connect_type connect_type;
int i;
if (!hub)
return;
if (!hub_is_superspeed(hdev)) {
for (i = 1; i <= hdev->maxchild; i++) {
struct usb_port *port_dev = hub->ports[i - 1];
connect_type = port_dev->connect_type;
if (connect_type == USB_PORT_CONNECT_TYPE_HARD_WIRED) {
u8 mask = 1 << (i%8);
if (!(desc->u.hs.DeviceRemovable[i/8] & mask)) {
dev_dbg(&port_dev->dev, "DeviceRemovable is changed to 1 according to platform information.\n");
desc->u.hs.DeviceRemovable[i/8] |= mask;
}
}
}
} else {
u16 port_removable = le16_to_cpu(desc->u.ss.DeviceRemovable);
for (i = 1; i <= hdev->maxchild; i++) {
struct usb_port *port_dev = hub->ports[i - 1];
connect_type = port_dev->connect_type;
if (connect_type == USB_PORT_CONNECT_TYPE_HARD_WIRED) {
u16 mask = 1 << i;
if (!(port_removable & mask)) {
dev_dbg(&port_dev->dev, "DeviceRemovable is changed to 1 according to platform information.\n");
port_removable |= mask;
}
}
}
desc->u.ss.DeviceRemovable = cpu_to_le16(port_removable);
}
}
#ifdef CONFIG_ACPI
/**
* usb_get_hub_port_acpi_handle - Get the usb port's acpi handle
* @hdev: USB device belonging to the usb hub
* @port1: port num of the port
*
* Return: Port's acpi handle if successful, %NULL if params are
* invalid.
*/
acpi_handle usb_get_hub_port_acpi_handle(struct usb_device *hdev,
int port1)
{
struct usb_hub *hub = usb_hub_to_struct_hub(hdev);
if (!hub)
return NULL;
return ACPI_HANDLE(&hub->ports[port1 - 1]->dev);
}
#endif
| gpl-2.0 |
TeamWin/android_kernel_oppo_r7f | arch/arm64/mm/hugetlbpage.c | 358 | 1861 | /*
* arch/arm64/mm/hugetlbpage.c
*
* Copyright (C) 2013 Linaro Ltd.
*
* Based on arch/x86/mm/hugetlbpage.c.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/init.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/hugetlb.h>
#include <linux/pagemap.h>
#include <linux/err.h>
#include <linux/sysctl.h>
#include <asm/mman.h>
#include <asm/tlb.h>
#include <asm/tlbflush.h>
#include <asm/pgalloc.h>
#ifndef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
{
return 0;
}
#endif
struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
int write)
{
return ERR_PTR(-EINVAL);
}
int pmd_huge(pmd_t pmd)
{
return !(pmd_val(pmd) & PMD_TABLE_BIT);
}
int pud_huge(pud_t pud)
{
return !(pud_val(pud) & PUD_TABLE_BIT);
}
int pmd_huge_support(void)
{
return 1;
}
static __init int setup_hugepagesz(char *opt)
{
unsigned long ps = memparse(opt, &opt);
if (ps == PMD_SIZE) {
hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT);
} else if (ps == PUD_SIZE) {
hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
} else {
pr_err("hugepagesz: Unsupported page size %lu M\n", ps >> 20);
return 0;
}
return 1;
}
__setup("hugepagesz=", setup_hugepagesz);
| gpl-2.0 |
Hundsbuah/tf300t_10_6_1_8_4 | fs/logfs/dir.c | 358 | 21375 | /*
* fs/logfs/dir.c - directory-related code
*
* As should be obvious for Linux kernel code, license is GPLv2
*
* Copyright (c) 2005-2008 Joern Engel <joern@logfs.org>
*/
#include "logfs.h"
#include <linux/slab.h>
/*
* Atomic dir operations
*
* Directory operations are by default not atomic. Dentries and Inodes are
* created/removed/altered in separate operations. Therefore we need to do
* a small amount of journaling.
*
* Create, link, mkdir, mknod and symlink all share the same function to do
* the work: __logfs_create. This function works in two atomic steps:
* 1. allocate inode (remember in journal)
* 2. allocate dentry (clear journal)
*
* As we can only get interrupted between the two, when the inode we just
* created is simply stored in the anchor. On next mount, if we were
* interrupted, we delete the inode. From a users point of view the
* operation never happened.
*
* Unlink and rmdir also share the same function: unlink. Again, this
* function works in two atomic steps
* 1. remove dentry (remember inode in journal)
* 2. unlink inode (clear journal)
*
* And again, on the next mount, if we were interrupted, we delete the inode.
* From a users point of view the operation succeeded.
*
* Rename is the real pain to deal with, harder than all the other methods
* combined. Depending on the circumstances we can run into three cases.
* A "target rename" where the target dentry already existed, a "local
* rename" where both parent directories are identical or a "cross-directory
* rename" in the remaining case.
*
* Local rename is atomic, as the old dentry is simply rewritten with a new
* name.
*
* Cross-directory rename works in two steps, similar to __logfs_create and
* logfs_unlink:
* 1. Write new dentry (remember old dentry in journal)
* 2. Remove old dentry (clear journal)
*
* Here we remember a dentry instead of an inode. On next mount, if we were
* interrupted, we delete the dentry. From a users point of view, the
* operation succeeded.
*
* Target rename works in three atomic steps:
* 1. Attach old inode to new dentry (remember old dentry and new inode)
* 2. Remove old dentry (still remember the new inode)
* 3. Remove victim inode
*
* Here we remember both an inode an a dentry. If we get interrupted
* between steps 1 and 2, we delete both the dentry and the inode. If
* we get interrupted between steps 2 and 3, we delete just the inode.
* In either case, the remaining objects are deleted on next mount. From
* a users point of view, the operation succeeded.
*/
static int write_dir(struct inode *dir, struct logfs_disk_dentry *dd,
loff_t pos)
{
return logfs_inode_write(dir, dd, sizeof(*dd), pos, WF_LOCK, NULL);
}
static int write_inode(struct inode *inode)
{
return __logfs_write_inode(inode, WF_LOCK);
}
static s64 dir_seek_data(struct inode *inode, s64 pos)
{
s64 new_pos = logfs_seek_data(inode, pos);
return max(pos, new_pos - 1);
}
static int beyond_eof(struct inode *inode, loff_t bix)
{
loff_t pos = bix << inode->i_sb->s_blocksize_bits;
return pos >= i_size_read(inode);
}
/*
* Prime value was chosen to be roughly 256 + 26. r5 hash uses 11,
* so short names (len <= 9) don't even occupy the complete 32bit name
* space. A prime >256 ensures short names quickly spread the 32bit
* name space. Add about 26 for the estimated amount of information
* of each character and pick a prime nearby, preferably a bit-sparse
* one.
*/
static u32 hash_32(const char *s, int len, u32 seed)
{
u32 hash = seed;
int i;
for (i = 0; i < len; i++)
hash = hash * 293 + s[i];
return hash;
}
/*
* We have to satisfy several conflicting requirements here. Small
* directories should stay fairly compact and not require too many
* indirect blocks. The number of possible locations for a given hash
* should be small to make lookup() fast. And we should try hard not
* to overflow the 32bit name space or nfs and 32bit host systems will
* be unhappy.
*
* So we use the following scheme. First we reduce the hash to 0..15
* and try a direct block. If that is occupied we reduce the hash to
* 16..255 and try an indirect block. Same for 2x and 3x indirect
* blocks. Lastly we reduce the hash to 0x800_0000 .. 0xffff_ffff,
* but use buckets containing eight entries instead of a single one.
*
* Using 16 entries should allow for a reasonable amount of hash
* collisions, so the 32bit name space can be packed fairly tight
* before overflowing. Oh and currently we don't overflow but return
* and error.
*
* How likely are collisions? Doing the appropriate math is beyond me
* and the Bronstein textbook. But running a test program to brute
* force collisions for a couple of days showed that on average the
* first collision occurs after 598M entries, with 290M being the
* smallest result. Obviously 21 entries could already cause a
* collision if all entries are carefully chosen.
*/
static pgoff_t hash_index(u32 hash, int round)
{
u32 i0_blocks = I0_BLOCKS;
u32 i1_blocks = I1_BLOCKS;
u32 i2_blocks = I2_BLOCKS;
u32 i3_blocks = I3_BLOCKS;
switch (round) {
case 0:
return hash % i0_blocks;
case 1:
return i0_blocks + hash % (i1_blocks - i0_blocks);
case 2:
return i1_blocks + hash % (i2_blocks - i1_blocks);
case 3:
return i2_blocks + hash % (i3_blocks - i2_blocks);
case 4 ... 19:
return i3_blocks + 16 * (hash % (((1<<31) - i3_blocks) / 16))
+ round - 4;
}
BUG();
}
static struct page *logfs_get_dd_page(struct inode *dir, struct dentry *dentry)
{
struct qstr *name = &dentry->d_name;
struct page *page;
struct logfs_disk_dentry *dd;
u32 hash = hash_32(name->name, name->len, 0);
pgoff_t index;
int round;
if (name->len > LOGFS_MAX_NAMELEN)
return ERR_PTR(-ENAMETOOLONG);
for (round = 0; round < 20; round++) {
index = hash_index(hash, round);
if (beyond_eof(dir, index))
return NULL;
if (!logfs_exist_block(dir, index))
continue;
page = read_cache_page(dir->i_mapping, index,
(filler_t *)logfs_readpage, NULL);
if (IS_ERR(page))
return page;
dd = kmap_atomic(page, KM_USER0);
BUG_ON(dd->namelen == 0);
if (name->len != be16_to_cpu(dd->namelen) ||
memcmp(name->name, dd->name, name->len)) {
kunmap_atomic(dd, KM_USER0);
page_cache_release(page);
continue;
}
kunmap_atomic(dd, KM_USER0);
return page;
}
return NULL;
}
static int logfs_remove_inode(struct inode *inode)
{
int ret;
inode->i_nlink--;
ret = write_inode(inode);
LOGFS_BUG_ON(ret, inode->i_sb);
return ret;
}
static void abort_transaction(struct inode *inode, struct logfs_transaction *ta)
{
if (logfs_inode(inode)->li_block)
logfs_inode(inode)->li_block->ta = NULL;
kfree(ta);
}
static int logfs_unlink(struct inode *dir, struct dentry *dentry)
{
struct logfs_super *super = logfs_super(dir->i_sb);
struct inode *inode = dentry->d_inode;
struct logfs_transaction *ta;
struct page *page;
pgoff_t index;
int ret;
ta = kzalloc(sizeof(*ta), GFP_KERNEL);
if (!ta)
return -ENOMEM;
ta->state = UNLINK_1;
ta->ino = inode->i_ino;
inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
page = logfs_get_dd_page(dir, dentry);
if (!page) {
kfree(ta);
return -ENOENT;
}
if (IS_ERR(page)) {
kfree(ta);
return PTR_ERR(page);
}
index = page->index;
page_cache_release(page);
mutex_lock(&super->s_dirop_mutex);
logfs_add_transaction(dir, ta);
ret = logfs_delete(dir, index, NULL);
if (!ret)
ret = write_inode(dir);
if (ret) {
abort_transaction(dir, ta);
printk(KERN_ERR"LOGFS: unable to delete inode\n");
goto out;
}
ta->state = UNLINK_2;
logfs_add_transaction(inode, ta);
ret = logfs_remove_inode(inode);
out:
mutex_unlock(&super->s_dirop_mutex);
return ret;
}
static inline int logfs_empty_dir(struct inode *dir)
{
u64 data;
data = logfs_seek_data(dir, 0) << dir->i_sb->s_blocksize_bits;
return data >= i_size_read(dir);
}
static int logfs_rmdir(struct inode *dir, struct dentry *dentry)
{
struct inode *inode = dentry->d_inode;
if (!logfs_empty_dir(inode))
return -ENOTEMPTY;
return logfs_unlink(dir, dentry);
}
/* FIXME: readdir currently has it's own dir_walk code. I don't see a good
* way to combine the two copies */
#define IMPLICIT_NODES 2
static int __logfs_readdir(struct file *file, void *buf, filldir_t filldir)
{
struct inode *dir = file->f_dentry->d_inode;
loff_t pos = file->f_pos - IMPLICIT_NODES;
struct page *page;
struct logfs_disk_dentry *dd;
int full;
BUG_ON(pos < 0);
for (;; pos++) {
if (beyond_eof(dir, pos))
break;
if (!logfs_exist_block(dir, pos)) {
/* deleted dentry */
pos = dir_seek_data(dir, pos);
continue;
}
page = read_cache_page(dir->i_mapping, pos,
(filler_t *)logfs_readpage, NULL);
if (IS_ERR(page))
return PTR_ERR(page);
dd = kmap(page);
BUG_ON(dd->namelen == 0);
full = filldir(buf, (char *)dd->name, be16_to_cpu(dd->namelen),
pos, be64_to_cpu(dd->ino), dd->type);
kunmap(page);
page_cache_release(page);
if (full)
break;
}
file->f_pos = pos + IMPLICIT_NODES;
return 0;
}
static int logfs_readdir(struct file *file, void *buf, filldir_t filldir)
{
struct inode *inode = file->f_dentry->d_inode;
ino_t pino = parent_ino(file->f_dentry);
int err;
if (file->f_pos < 0)
return -EINVAL;
if (file->f_pos == 0) {
if (filldir(buf, ".", 1, 1, inode->i_ino, DT_DIR) < 0)
return 0;
file->f_pos++;
}
if (file->f_pos == 1) {
if (filldir(buf, "..", 2, 2, pino, DT_DIR) < 0)
return 0;
file->f_pos++;
}
err = __logfs_readdir(file, buf, filldir);
return err;
}
static void logfs_set_name(struct logfs_disk_dentry *dd, struct qstr *name)
{
dd->namelen = cpu_to_be16(name->len);
memcpy(dd->name, name->name, name->len);
}
static struct dentry *logfs_lookup(struct inode *dir, struct dentry *dentry,
struct nameidata *nd)
{
struct page *page;
struct logfs_disk_dentry *dd;
pgoff_t index;
u64 ino = 0;
struct inode *inode;
page = logfs_get_dd_page(dir, dentry);
if (IS_ERR(page))
return ERR_CAST(page);
if (!page) {
d_add(dentry, NULL);
return NULL;
}
index = page->index;
dd = kmap_atomic(page, KM_USER0);
ino = be64_to_cpu(dd->ino);
kunmap_atomic(dd, KM_USER0);
page_cache_release(page);
inode = logfs_iget(dir->i_sb, ino);
if (IS_ERR(inode))
printk(KERN_ERR"LogFS: Cannot read inode #%llx for dentry (%lx, %lx)n",
ino, dir->i_ino, index);
return d_splice_alias(inode, dentry);
}
static void grow_dir(struct inode *dir, loff_t index)
{
index = (index + 1) << dir->i_sb->s_blocksize_bits;
if (i_size_read(dir) < index)
i_size_write(dir, index);
}
static int logfs_write_dir(struct inode *dir, struct dentry *dentry,
struct inode *inode)
{
struct page *page;
struct logfs_disk_dentry *dd;
u32 hash = hash_32(dentry->d_name.name, dentry->d_name.len, 0);
pgoff_t index;
int round, err;
for (round = 0; round < 20; round++) {
index = hash_index(hash, round);
if (logfs_exist_block(dir, index))
continue;
page = find_or_create_page(dir->i_mapping, index, GFP_KERNEL);
if (!page)
return -ENOMEM;
dd = kmap_atomic(page, KM_USER0);
memset(dd, 0, sizeof(*dd));
dd->ino = cpu_to_be64(inode->i_ino);
dd->type = logfs_type(inode);
logfs_set_name(dd, &dentry->d_name);
kunmap_atomic(dd, KM_USER0);
err = logfs_write_buf(dir, page, WF_LOCK);
unlock_page(page);
page_cache_release(page);
if (!err)
grow_dir(dir, index);
return err;
}
/* FIXME: Is there a better return value? In most cases neither
* the filesystem nor the directory are full. But we have had
* too many collisions for this particular hash and no fallback.
*/
return -ENOSPC;
}
static int __logfs_create(struct inode *dir, struct dentry *dentry,
struct inode *inode, const char *dest, long destlen)
{
struct logfs_super *super = logfs_super(dir->i_sb);
struct logfs_inode *li = logfs_inode(inode);
struct logfs_transaction *ta;
int ret;
ta = kzalloc(sizeof(*ta), GFP_KERNEL);
if (!ta) {
inode->i_nlink--;
iput(inode);
return -ENOMEM;
}
ta->state = CREATE_1;
ta->ino = inode->i_ino;
mutex_lock(&super->s_dirop_mutex);
logfs_add_transaction(inode, ta);
if (dest) {
/* symlink */
ret = logfs_inode_write(inode, dest, destlen, 0, WF_LOCK, NULL);
if (!ret)
ret = write_inode(inode);
} else {
/* creat/mkdir/mknod */
ret = write_inode(inode);
}
if (ret) {
abort_transaction(inode, ta);
li->li_flags |= LOGFS_IF_STILLBORN;
/* FIXME: truncate symlink */
inode->i_nlink--;
iput(inode);
goto out;
}
ta->state = CREATE_2;
logfs_add_transaction(dir, ta);
ret = logfs_write_dir(dir, dentry, inode);
/* sync directory */
if (!ret)
ret = write_inode(dir);
if (ret) {
logfs_del_transaction(dir, ta);
ta->state = CREATE_2;
logfs_add_transaction(inode, ta);
logfs_remove_inode(inode);
iput(inode);
goto out;
}
d_instantiate(dentry, inode);
out:
mutex_unlock(&super->s_dirop_mutex);
return ret;
}
static int logfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
{
struct inode *inode;
/*
* FIXME: why do we have to fill in S_IFDIR, while the mode is
* correct for mknod, creat, etc.? Smells like the vfs *should*
* do it for us but for some reason fails to do so.
*/
inode = logfs_new_inode(dir, S_IFDIR | mode);
if (IS_ERR(inode))
return PTR_ERR(inode);
inode->i_op = &logfs_dir_iops;
inode->i_fop = &logfs_dir_fops;
return __logfs_create(dir, dentry, inode, NULL, 0);
}
static int logfs_create(struct inode *dir, struct dentry *dentry, int mode,
struct nameidata *nd)
{
struct inode *inode;
inode = logfs_new_inode(dir, mode);
if (IS_ERR(inode))
return PTR_ERR(inode);
inode->i_op = &logfs_reg_iops;
inode->i_fop = &logfs_reg_fops;
inode->i_mapping->a_ops = &logfs_reg_aops;
return __logfs_create(dir, dentry, inode, NULL, 0);
}
static int logfs_mknod(struct inode *dir, struct dentry *dentry, int mode,
dev_t rdev)
{
struct inode *inode;
if (dentry->d_name.len > LOGFS_MAX_NAMELEN)
return -ENAMETOOLONG;
inode = logfs_new_inode(dir, mode);
if (IS_ERR(inode))
return PTR_ERR(inode);
init_special_inode(inode, mode, rdev);
return __logfs_create(dir, dentry, inode, NULL, 0);
}
static int logfs_symlink(struct inode *dir, struct dentry *dentry,
const char *target)
{
struct inode *inode;
size_t destlen = strlen(target) + 1;
if (destlen > dir->i_sb->s_blocksize)
return -ENAMETOOLONG;
inode = logfs_new_inode(dir, S_IFLNK | 0777);
if (IS_ERR(inode))
return PTR_ERR(inode);
inode->i_op = &logfs_symlink_iops;
inode->i_mapping->a_ops = &logfs_reg_aops;
return __logfs_create(dir, dentry, inode, target, destlen);
}
static int logfs_link(struct dentry *old_dentry, struct inode *dir,
struct dentry *dentry)
{
struct inode *inode = old_dentry->d_inode;
if (inode->i_nlink >= LOGFS_LINK_MAX)
return -EMLINK;
inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
ihold(inode);
inode->i_nlink++;
mark_inode_dirty_sync(inode);
return __logfs_create(dir, dentry, inode, NULL, 0);
}
static int logfs_get_dd(struct inode *dir, struct dentry *dentry,
struct logfs_disk_dentry *dd, loff_t *pos)
{
struct page *page;
void *map;
page = logfs_get_dd_page(dir, dentry);
if (IS_ERR(page))
return PTR_ERR(page);
*pos = page->index;
map = kmap_atomic(page, KM_USER0);
memcpy(dd, map, sizeof(*dd));
kunmap_atomic(map, KM_USER0);
page_cache_release(page);
return 0;
}
static int logfs_delete_dd(struct inode *dir, loff_t pos)
{
/*
* Getting called with pos somewhere beyond eof is either a goofup
* within this file or means someone maliciously edited the
* (crc-protected) journal.
*/
BUG_ON(beyond_eof(dir, pos));
dir->i_ctime = dir->i_mtime = CURRENT_TIME;
log_dir(" Delete dentry (%lx, %llx)\n", dir->i_ino, pos);
return logfs_delete(dir, pos, NULL);
}
/*
* Cross-directory rename, target does not exist. Just a little nasty.
* Create a new dentry in the target dir, then remove the old dentry,
* all the while taking care to remember our operation in the journal.
*/
static int logfs_rename_cross(struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry)
{
struct logfs_super *super = logfs_super(old_dir->i_sb);
struct logfs_disk_dentry dd;
struct logfs_transaction *ta;
loff_t pos;
int err;
/* 1. locate source dd */
err = logfs_get_dd(old_dir, old_dentry, &dd, &pos);
if (err)
return err;
ta = kzalloc(sizeof(*ta), GFP_KERNEL);
if (!ta)
return -ENOMEM;
ta->state = CROSS_RENAME_1;
ta->dir = old_dir->i_ino;
ta->pos = pos;
/* 2. write target dd */
mutex_lock(&super->s_dirop_mutex);
logfs_add_transaction(new_dir, ta);
err = logfs_write_dir(new_dir, new_dentry, old_dentry->d_inode);
if (!err)
err = write_inode(new_dir);
if (err) {
super->s_rename_dir = 0;
super->s_rename_pos = 0;
abort_transaction(new_dir, ta);
goto out;
}
/* 3. remove source dd */
ta->state = CROSS_RENAME_2;
logfs_add_transaction(old_dir, ta);
err = logfs_delete_dd(old_dir, pos);
if (!err)
err = write_inode(old_dir);
LOGFS_BUG_ON(err, old_dir->i_sb);
out:
mutex_unlock(&super->s_dirop_mutex);
return err;
}
static int logfs_replace_inode(struct inode *dir, struct dentry *dentry,
struct logfs_disk_dentry *dd, struct inode *inode)
{
loff_t pos;
int err;
err = logfs_get_dd(dir, dentry, dd, &pos);
if (err)
return err;
dd->ino = cpu_to_be64(inode->i_ino);
dd->type = logfs_type(inode);
err = write_dir(dir, dd, pos);
if (err)
return err;
log_dir("Replace dentry (%lx, %llx) %s -> %llx\n", dir->i_ino, pos,
dd->name, be64_to_cpu(dd->ino));
return write_inode(dir);
}
/* Target dentry exists - the worst case. We need to attach the source
* inode to the target dentry, then remove the orphaned target inode and
* source dentry.
*/
static int logfs_rename_target(struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry)
{
struct logfs_super *super = logfs_super(old_dir->i_sb);
struct inode *old_inode = old_dentry->d_inode;
struct inode *new_inode = new_dentry->d_inode;
int isdir = S_ISDIR(old_inode->i_mode);
struct logfs_disk_dentry dd;
struct logfs_transaction *ta;
loff_t pos;
int err;
BUG_ON(isdir != S_ISDIR(new_inode->i_mode));
if (isdir) {
if (!logfs_empty_dir(new_inode))
return -ENOTEMPTY;
}
/* 1. locate source dd */
err = logfs_get_dd(old_dir, old_dentry, &dd, &pos);
if (err)
return err;
ta = kzalloc(sizeof(*ta), GFP_KERNEL);
if (!ta)
return -ENOMEM;
ta->state = TARGET_RENAME_1;
ta->dir = old_dir->i_ino;
ta->pos = pos;
ta->ino = new_inode->i_ino;
/* 2. attach source inode to target dd */
mutex_lock(&super->s_dirop_mutex);
logfs_add_transaction(new_dir, ta);
err = logfs_replace_inode(new_dir, new_dentry, &dd, old_inode);
if (err) {
super->s_rename_dir = 0;
super->s_rename_pos = 0;
super->s_victim_ino = 0;
abort_transaction(new_dir, ta);
goto out;
}
/* 3. remove source dd */
ta->state = TARGET_RENAME_2;
logfs_add_transaction(old_dir, ta);
err = logfs_delete_dd(old_dir, pos);
if (!err)
err = write_inode(old_dir);
LOGFS_BUG_ON(err, old_dir->i_sb);
/* 4. remove target inode */
ta->state = TARGET_RENAME_3;
logfs_add_transaction(new_inode, ta);
err = logfs_remove_inode(new_inode);
out:
mutex_unlock(&super->s_dirop_mutex);
return err;
}
static int logfs_rename(struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry)
{
if (new_dentry->d_inode)
return logfs_rename_target(old_dir, old_dentry,
new_dir, new_dentry);
return logfs_rename_cross(old_dir, old_dentry, new_dir, new_dentry);
}
/* No locking done here, as this is called before .get_sb() returns. */
int logfs_replay_journal(struct super_block *sb)
{
struct logfs_super *super = logfs_super(sb);
struct inode *inode;
u64 ino, pos;
int err;
if (super->s_victim_ino) {
/* delete victim inode */
ino = super->s_victim_ino;
printk(KERN_INFO"LogFS: delete unmapped inode #%llx\n", ino);
inode = logfs_iget(sb, ino);
if (IS_ERR(inode))
goto fail;
LOGFS_BUG_ON(i_size_read(inode) > 0, sb);
super->s_victim_ino = 0;
err = logfs_remove_inode(inode);
iput(inode);
if (err) {
super->s_victim_ino = ino;
goto fail;
}
}
if (super->s_rename_dir) {
/* delete old dd from rename */
ino = super->s_rename_dir;
pos = super->s_rename_pos;
printk(KERN_INFO"LogFS: delete unbacked dentry (%llx, %llx)\n",
ino, pos);
inode = logfs_iget(sb, ino);
if (IS_ERR(inode))
goto fail;
super->s_rename_dir = 0;
super->s_rename_pos = 0;
err = logfs_delete_dd(inode, pos);
iput(inode);
if (err) {
super->s_rename_dir = ino;
super->s_rename_pos = pos;
goto fail;
}
}
return 0;
fail:
LOGFS_BUG(sb);
return -EIO;
}
const struct inode_operations logfs_symlink_iops = {
.readlink = generic_readlink,
.follow_link = page_follow_link_light,
};
const struct inode_operations logfs_dir_iops = {
.create = logfs_create,
.link = logfs_link,
.lookup = logfs_lookup,
.mkdir = logfs_mkdir,
.mknod = logfs_mknod,
.rename = logfs_rename,
.rmdir = logfs_rmdir,
.symlink = logfs_symlink,
.unlink = logfs_unlink,
};
const struct file_operations logfs_dir_fops = {
.fsync = logfs_fsync,
.unlocked_ioctl = logfs_ioctl,
.readdir = logfs_readdir,
.read = generic_read_dir,
.llseek = default_llseek,
};
| gpl-2.0 |
Cryptoo/kernel | drivers/net/ethernet/packetengines/hamachi.c | 614 | 64048 | /* hamachi.c: A Packet Engines GNIC-II Gigabit Ethernet driver for Linux. */
/*
Written 1998-2000 by Donald Becker.
Updates 2000 by Keith Underwood.
This software may be used and distributed according to the terms of
the GNU General Public License (GPL), incorporated herein by reference.
Drivers based on or derived from this code fall under the GPL and must
retain the authorship, copyright and license notice. This file is not
a complete program and may only be used when the entire operating
system is licensed under the GPL.
The author may be reached as becker@scyld.com, or C/O
Scyld Computing Corporation
410 Severn Ave., Suite 210
Annapolis MD 21403
This driver is for the Packet Engines GNIC-II PCI Gigabit Ethernet
adapter.
Support and updates available at
http://www.scyld.com/network/hamachi.html
[link no longer provides useful info -jgarzik]
or
http://www.parl.clemson.edu/~keithu/hamachi.html
*/
#define DRV_NAME "hamachi"
#define DRV_VERSION "2.1"
#define DRV_RELDATE "Sept 11, 2006"
/* A few user-configurable values. */
static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
#define final_version
#define hamachi_debug debug
/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
static int max_interrupt_work = 40;
static int mtu;
/* Default values selected by testing on a dual processor PIII-450 */
/* These six interrupt control parameters may be set directly when loading the
* module, or through the rx_params and tx_params variables
*/
static int max_rx_latency = 0x11;
static int max_rx_gap = 0x05;
static int min_rx_pkt = 0x18;
static int max_tx_latency = 0x00;
static int max_tx_gap = 0x00;
static int min_tx_pkt = 0x30;
/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
-Setting to > 1518 causes all frames to be copied
-Setting to 0 disables copies
*/
static int rx_copybreak;
/* An override for the hardware detection of bus width.
Set to 1 to force 32 bit PCI bus detection. Set to 4 to force 64 bit.
Add 2 to disable parity detection.
*/
static int force32;
/* Used to pass the media type, etc.
These exist for driver interoperability.
No media types are currently defined.
- The lower 4 bits are reserved for the media type.
- The next three bits may be set to one of the following:
0x00000000 : Autodetect PCI bus
0x00000010 : Force 32 bit PCI bus
0x00000020 : Disable parity detection
0x00000040 : Force 64 bit PCI bus
Default is autodetect
- The next bit can be used to force half-duplex. This is a bad
idea since no known implementations implement half-duplex, and,
in general, half-duplex for gigabit ethernet is a bad idea.
0x00000080 : Force half-duplex
Default is full-duplex.
- In the original driver, the ninth bit could be used to force
full-duplex. Maintain that for compatibility
0x00000200 : Force full-duplex
*/
#define MAX_UNITS 8 /* More are supported, limit only on options */
static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
/* The Hamachi chipset supports 3 parameters each for Rx and Tx
* interruput management. Parameters will be loaded as specified into
* the TxIntControl and RxIntControl registers.
*
* The registers are arranged as follows:
* 23 - 16 15 - 8 7 - 0
* _________________________________
* | min_pkt | max_gap | max_latency |
* ---------------------------------
* min_pkt : The minimum number of packets processed between
* interrupts.
* max_gap : The maximum inter-packet gap in units of 8.192 us
* max_latency : The absolute time between interrupts in units of 8.192 us
*
*/
static int rx_params[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
static int tx_params[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
/* Operational parameters that are set at compile time. */
/* Keep the ring sizes a power of two for compile efficiency.
The compiler will convert <unsigned>'%'<2^N> into a bit mask.
Making the Tx ring too large decreases the effectiveness of channel
bonding and packet priority.
There are no ill effects from too-large receive rings, except for
excessive memory usage */
/* Empirically it appears that the Tx ring needs to be a little bigger
for these Gbit adapters or you get into an overrun condition really
easily. Also, things appear to work a bit better in back-to-back
configurations if the Rx ring is 8 times the size of the Tx ring
*/
#define TX_RING_SIZE 64
#define RX_RING_SIZE 512
#define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct hamachi_desc)
#define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct hamachi_desc)
/*
* Enable netdev_ioctl. Added interrupt coalescing parameter adjustment.
* 2/19/99 Pete Wyckoff <wyckoff@ca.sandia.gov>
*/
/* play with 64-bit addrlen; seems to be a teensy bit slower --pw */
/* #define ADDRLEN 64 */
/*
* RX_CHECKSUM turns on card-generated receive checksum generation for
* TCP and UDP packets. Otherwise the upper layers do the calculation.
* 3/10/1999 Pete Wyckoff <wyckoff@ca.sandia.gov>
*/
#define RX_CHECKSUM
/* Operational parameters that usually are not changed. */
/* Time in jiffies before concluding the transmitter is hung. */
#define TX_TIMEOUT (5*HZ)
#include <linux/capability.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/timer.h>
#include <linux/time.h>
#include <linux/errno.h>
#include <linux/ioport.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/ethtool.h>
#include <linux/mii.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/ip.h>
#include <linux/delay.h>
#include <linux/bitops.h>
#include <asm/uaccess.h>
#include <asm/processor.h> /* Processor type for cache alignment. */
#include <asm/io.h>
#include <asm/unaligned.h>
#include <asm/cache.h>
static const char version[] =
KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker\n"
" Some modifications by Eric kasten <kasten@nscl.msu.edu>\n"
" Further modifications by Keith Underwood <keithu@parl.clemson.edu>\n";
/* IP_MF appears to be only defined in <netinet/ip.h>, however,
we need it for hardware checksumming support. FYI... some of
the definitions in <netinet/ip.h> conflict/duplicate those in
other linux headers causing many compiler warnings.
*/
#ifndef IP_MF
#define IP_MF 0x2000 /* IP more frags from <netinet/ip.h> */
#endif
/* Define IP_OFFSET to be IPOPT_OFFSET */
#ifndef IP_OFFSET
#ifdef IPOPT_OFFSET
#define IP_OFFSET IPOPT_OFFSET
#else
#define IP_OFFSET 2
#endif
#endif
#define RUN_AT(x) (jiffies + (x))
#ifndef ADDRLEN
#define ADDRLEN 32
#endif
/* Condensed bus+endian portability operations. */
#if ADDRLEN == 64
#define cpu_to_leXX(addr) cpu_to_le64(addr)
#define leXX_to_cpu(addr) le64_to_cpu(addr)
#else
#define cpu_to_leXX(addr) cpu_to_le32(addr)
#define leXX_to_cpu(addr) le32_to_cpu(addr)
#endif
/*
Theory of Operation
I. Board Compatibility
This device driver is designed for the Packet Engines "Hamachi"
Gigabit Ethernet chip. The only PCA currently supported is the GNIC-II 64-bit
66Mhz PCI card.
II. Board-specific settings
No jumpers exist on the board. The chip supports software correction of
various motherboard wiring errors, however this driver does not support
that feature.
III. Driver operation
IIIa. Ring buffers
The Hamachi uses a typical descriptor based bus-master architecture.
The descriptor list is similar to that used by the Digital Tulip.
This driver uses two statically allocated fixed-size descriptor lists
formed into rings by a branch from the final descriptor to the beginning of
the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
This driver uses a zero-copy receive and transmit scheme similar my other
network drivers.
The driver allocates full frame size skbuffs for the Rx ring buffers at
open() time and passes the skb->data field to the Hamachi as receive data
buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
a fresh skbuff is allocated and the frame is copied to the new skbuff.
When the incoming frame is larger, the skbuff is passed directly up the
protocol stack and replaced by a newly allocated skbuff.
The RX_COPYBREAK value is chosen to trade-off the memory wasted by
using a full-sized skbuff for small frames vs. the copying costs of larger
frames. Gigabit cards are typically used on generously configured machines
and the underfilled buffers have negligible impact compared to the benefit of
a single allocation size, so the default value of zero results in never
copying packets.
IIIb/c. Transmit/Receive Structure
The Rx and Tx descriptor structure are straight-forward, with no historical
baggage that must be explained. Unlike the awkward DBDMA structure, there
are no unused fields or option bits that had only one allowable setting.
Two details should be noted about the descriptors: The chip supports both 32
bit and 64 bit address structures, and the length field is overwritten on
the receive descriptors. The descriptor length is set in the control word
for each channel. The development driver uses 32 bit addresses only, however
64 bit addresses may be enabled for 64 bit architectures e.g. the Alpha.
IIId. Synchronization
This driver is very similar to my other network drivers.
The driver runs as two independent, single-threaded flows of control. One
is the send-packet routine, which enforces single-threaded use by the
dev->tbusy flag. The other thread is the interrupt handler, which is single
threaded by the hardware and other software.
The send packet thread has partial control over the Tx ring and 'dev->tbusy'
flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next
queue slot is empty, it clears the tbusy flag when finished otherwise it sets
the 'hmp->tx_full' flag.
The interrupt handler has exclusive control over the Rx ring and records stats
from the Tx ring. After reaping the stats, it marks the Tx queue entry as
empty by incrementing the dirty_tx mark. Iff the 'hmp->tx_full' flag is set, it
clears both the tx_full and tbusy flags.
IV. Notes
Thanks to Kim Stearns of Packet Engines for providing a pair of GNIC-II boards.
IVb. References
Hamachi Engineering Design Specification, 5/15/97
(Note: This version was marked "Confidential".)
IVc. Errata
None noted.
V. Recent Changes
01/15/1999 EPK Enlargement of the TX and RX ring sizes. This appears
to help avoid some stall conditions -- this needs further research.
01/15/1999 EPK Creation of the hamachi_tx function. This function cleans
the Tx ring and is called from hamachi_start_xmit (this used to be
called from hamachi_interrupt but it tends to delay execution of the
interrupt handler and thus reduce bandwidth by reducing the latency
between hamachi_rx()'s). Notably, some modification has been made so
that the cleaning loop checks only to make sure that the DescOwn bit
isn't set in the status flag since the card is not required
to set the entire flag to zero after processing.
01/15/1999 EPK In the hamachi_start_tx function, the Tx ring full flag is
checked before attempting to add a buffer to the ring. If the ring is full
an attempt is made to free any dirty buffers and thus find space for
the new buffer or the function returns non-zero which should case the
scheduler to reschedule the buffer later.
01/15/1999 EPK Some adjustments were made to the chip initialization.
End-to-end flow control should now be fully active and the interrupt
algorithm vars have been changed. These could probably use further tuning.
01/15/1999 EPK Added the max_{rx,tx}_latency options. These are used to
set the rx and tx latencies for the Hamachi interrupts. If you're having
problems with network stalls, try setting these to higher values.
Valid values are 0x00 through 0xff.
01/15/1999 EPK In general, the overall bandwidth has increased and
latencies are better (sometimes by a factor of 2). Stalls are rare at
this point, however there still appears to be a bug somewhere between the
hardware and driver. TCP checksum errors under load also appear to be
eliminated at this point.
01/18/1999 EPK Ensured that the DescEndRing bit was being set on both the
Rx and Tx rings. This appears to have been affecting whether a particular
peer-to-peer connection would hang under high load. I believe the Rx
rings was typically getting set correctly, but the Tx ring wasn't getting
the DescEndRing bit set during initialization. ??? Does this mean the
hamachi card is using the DescEndRing in processing even if a particular
slot isn't in use -- hypothetically, the card might be searching the
entire Tx ring for slots with the DescOwn bit set and then processing
them. If the DescEndRing bit isn't set, then it might just wander off
through memory until it hits a chunk of data with that bit set
and then looping back.
02/09/1999 EPK Added Michel Mueller's TxDMA Interrupt and Tx-timeout
problem (TxCmd and RxCmd need only to be set when idle or stopped.
02/09/1999 EPK Added code to check/reset dev->tbusy in hamachi_interrupt.
(Michel Mueller pointed out the ``permanently busy'' potential
problem here).
02/22/1999 EPK Added Pete Wyckoff's ioctl to control the Tx/Rx latencies.
02/23/1999 EPK Verified that the interrupt status field bits for Tx were
incorrectly defined and corrected (as per Michel Mueller).
02/23/1999 EPK Corrected the Tx full check to check that at least 4 slots
were available before reseting the tbusy and tx_full flags
(as per Michel Mueller).
03/11/1999 EPK Added Pete Wyckoff's hardware checksumming support.
12/31/1999 KDU Cleaned up assorted things and added Don's code to force
32 bit.
02/20/2000 KDU Some of the control was just plain odd. Cleaned up the
hamachi_start_xmit() and hamachi_interrupt() code. There is still some
re-structuring I would like to do.
03/01/2000 KDU Experimenting with a WIDE range of interrupt mitigation
parameters on a dual P3-450 setup yielded the new default interrupt
mitigation parameters. Tx should interrupt VERY infrequently due to
Eric's scheme. Rx should be more often...
03/13/2000 KDU Added a patch to make the Rx Checksum code interact
nicely with non-linux machines.
03/13/2000 KDU Experimented with some of the configuration values:
-It seems that enabling PCI performance commands for descriptors
(changing RxDMACtrl and TxDMACtrl lower nibble from 5 to D) has minimal
performance impact for any of my tests. (ttcp, netpipe, netperf) I will
leave them that way until I hear further feedback.
-Increasing the PCI_LATENCY_TIMER to 130
(2 + (burst size of 128 * (0 wait states + 1))) seems to slightly
degrade performance. Leaving default at 64 pending further information.
03/14/2000 KDU Further tuning:
-adjusted boguscnt in hamachi_rx() to depend on interrupt
mitigation parameters chosen.
-Selected a set of interrupt parameters based on some extensive testing.
These may change with more testing.
TO DO:
-Consider borrowing from the acenic driver code to check PCI_COMMAND for
PCI_COMMAND_INVALIDATE. Set maximum burst size to cache line size in
that case.
-fix the reset procedure. It doesn't quite work.
*/
/* A few values that may be tweaked. */
/* Size of each temporary Rx buffer, calculated as:
* 1518 bytes (ethernet packet) + 2 bytes (to get 8 byte alignment for
* the card) + 8 bytes of status info + 8 bytes for the Rx Checksum
*/
#define PKT_BUF_SZ 1536
/* For now, this is going to be set to the maximum size of an ethernet
* packet. Eventually, we may want to make it a variable that is
* related to the MTU
*/
#define MAX_FRAME_SIZE 1518
/* The rest of these values should never change. */
static void hamachi_timer(unsigned long data);
enum capability_flags {CanHaveMII=1, };
static const struct chip_info {
u16 vendor_id, device_id, device_id_mask, pad;
const char *name;
void (*media_timer)(unsigned long data);
int flags;
} chip_tbl[] = {
{0x1318, 0x0911, 0xffff, 0, "Hamachi GNIC-II", hamachi_timer, 0},
{0,},
};
/* Offsets to the Hamachi registers. Various sizes. */
enum hamachi_offsets {
TxDMACtrl=0x00, TxCmd=0x04, TxStatus=0x06, TxPtr=0x08, TxCurPtr=0x10,
RxDMACtrl=0x20, RxCmd=0x24, RxStatus=0x26, RxPtr=0x28, RxCurPtr=0x30,
PCIClkMeas=0x060, MiscStatus=0x066, ChipRev=0x68, ChipReset=0x06B,
LEDCtrl=0x06C, VirtualJumpers=0x06D, GPIO=0x6E,
TxChecksum=0x074, RxChecksum=0x076,
TxIntrCtrl=0x078, RxIntrCtrl=0x07C,
InterruptEnable=0x080, InterruptClear=0x084, IntrStatus=0x088,
EventStatus=0x08C,
MACCnfg=0x0A0, FrameGap0=0x0A2, FrameGap1=0x0A4,
/* See enum MII_offsets below. */
MACCnfg2=0x0B0, RxDepth=0x0B8, FlowCtrl=0x0BC, MaxFrameSize=0x0CE,
AddrMode=0x0D0, StationAddr=0x0D2,
/* Gigabit AutoNegotiation. */
ANCtrl=0x0E0, ANStatus=0x0E2, ANXchngCtrl=0x0E4, ANAdvertise=0x0E8,
ANLinkPartnerAbility=0x0EA,
EECmdStatus=0x0F0, EEData=0x0F1, EEAddr=0x0F2,
FIFOcfg=0x0F8,
};
/* Offsets to the MII-mode registers. */
enum MII_offsets {
MII_Cmd=0xA6, MII_Addr=0xA8, MII_Wr_Data=0xAA, MII_Rd_Data=0xAC,
MII_Status=0xAE,
};
/* Bits in the interrupt status/mask registers. */
enum intr_status_bits {
IntrRxDone=0x01, IntrRxPCIFault=0x02, IntrRxPCIErr=0x04,
IntrTxDone=0x100, IntrTxPCIFault=0x200, IntrTxPCIErr=0x400,
LinkChange=0x10000, NegotiationChange=0x20000, StatsMax=0x40000, };
/* The Hamachi Rx and Tx buffer descriptors. */
struct hamachi_desc {
__le32 status_n_length;
#if ADDRLEN == 64
u32 pad;
__le64 addr;
#else
__le32 addr;
#endif
};
/* Bits in hamachi_desc.status_n_length */
enum desc_status_bits {
DescOwn=0x80000000, DescEndPacket=0x40000000, DescEndRing=0x20000000,
DescIntr=0x10000000,
};
#define PRIV_ALIGN 15 /* Required alignment mask */
#define MII_CNT 4
struct hamachi_private {
/* Descriptor rings first for alignment. Tx requires a second descriptor
for status. */
struct hamachi_desc *rx_ring;
struct hamachi_desc *tx_ring;
struct sk_buff* rx_skbuff[RX_RING_SIZE];
struct sk_buff* tx_skbuff[TX_RING_SIZE];
dma_addr_t tx_ring_dma;
dma_addr_t rx_ring_dma;
struct timer_list timer; /* Media selection timer. */
/* Frequently used and paired value: keep adjacent for cache effect. */
spinlock_t lock;
int chip_id;
unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
unsigned int cur_tx, dirty_tx;
unsigned int rx_buf_sz; /* Based on MTU+slack. */
unsigned int tx_full:1; /* The Tx queue is full. */
unsigned int duplex_lock:1;
unsigned int default_port:4; /* Last dev->if_port value. */
/* MII transceiver section. */
int mii_cnt; /* MII device addresses. */
struct mii_if_info mii_if; /* MII lib hooks/info */
unsigned char phys[MII_CNT]; /* MII device addresses, only first one used. */
u32 rx_int_var, tx_int_var; /* interrupt control variables */
u32 option; /* Hold on to a copy of the options */
struct pci_dev *pci_dev;
void __iomem *base;
};
MODULE_AUTHOR("Donald Becker <becker@scyld.com>, Eric Kasten <kasten@nscl.msu.edu>, Keith Underwood <keithu@parl.clemson.edu>");
MODULE_DESCRIPTION("Packet Engines 'Hamachi' GNIC-II Gigabit Ethernet driver");
MODULE_LICENSE("GPL");
module_param(max_interrupt_work, int, 0);
module_param(mtu, int, 0);
module_param(debug, int, 0);
module_param(min_rx_pkt, int, 0);
module_param(max_rx_gap, int, 0);
module_param(max_rx_latency, int, 0);
module_param(min_tx_pkt, int, 0);
module_param(max_tx_gap, int, 0);
module_param(max_tx_latency, int, 0);
module_param(rx_copybreak, int, 0);
module_param_array(rx_params, int, NULL, 0);
module_param_array(tx_params, int, NULL, 0);
module_param_array(options, int, NULL, 0);
module_param_array(full_duplex, int, NULL, 0);
module_param(force32, int, 0);
MODULE_PARM_DESC(max_interrupt_work, "GNIC-II maximum events handled per interrupt");
MODULE_PARM_DESC(mtu, "GNIC-II MTU (all boards)");
MODULE_PARM_DESC(debug, "GNIC-II debug level (0-7)");
MODULE_PARM_DESC(min_rx_pkt, "GNIC-II minimum Rx packets processed between interrupts");
MODULE_PARM_DESC(max_rx_gap, "GNIC-II maximum Rx inter-packet gap in 8.192 microsecond units");
MODULE_PARM_DESC(max_rx_latency, "GNIC-II time between Rx interrupts in 8.192 microsecond units");
MODULE_PARM_DESC(min_tx_pkt, "GNIC-II minimum Tx packets processed between interrupts");
MODULE_PARM_DESC(max_tx_gap, "GNIC-II maximum Tx inter-packet gap in 8.192 microsecond units");
MODULE_PARM_DESC(max_tx_latency, "GNIC-II time between Tx interrupts in 8.192 microsecond units");
MODULE_PARM_DESC(rx_copybreak, "GNIC-II copy breakpoint for copy-only-tiny-frames");
MODULE_PARM_DESC(rx_params, "GNIC-II min_rx_pkt+max_rx_gap+max_rx_latency");
MODULE_PARM_DESC(tx_params, "GNIC-II min_tx_pkt+max_tx_gap+max_tx_latency");
MODULE_PARM_DESC(options, "GNIC-II Bits 0-3: media type, bits 4-6: as force32, bit 7: half duplex, bit 9 full duplex");
MODULE_PARM_DESC(full_duplex, "GNIC-II full duplex setting(s) (1)");
MODULE_PARM_DESC(force32, "GNIC-II: Bit 0: 32 bit PCI, bit 1: disable parity, bit 2: 64 bit PCI (all boards)");
static int read_eeprom(void __iomem *ioaddr, int location);
static int mdio_read(struct net_device *dev, int phy_id, int location);
static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
static int hamachi_open(struct net_device *dev);
static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
static void hamachi_timer(unsigned long data);
static void hamachi_tx_timeout(struct net_device *dev);
static void hamachi_init_ring(struct net_device *dev);
static netdev_tx_t hamachi_start_xmit(struct sk_buff *skb,
struct net_device *dev);
static irqreturn_t hamachi_interrupt(int irq, void *dev_instance);
static int hamachi_rx(struct net_device *dev);
static inline int hamachi_tx(struct net_device *dev);
static void hamachi_error(struct net_device *dev, int intr_status);
static int hamachi_close(struct net_device *dev);
static struct net_device_stats *hamachi_get_stats(struct net_device *dev);
static void set_rx_mode(struct net_device *dev);
static const struct ethtool_ops ethtool_ops;
static const struct ethtool_ops ethtool_ops_no_mii;
static const struct net_device_ops hamachi_netdev_ops = {
.ndo_open = hamachi_open,
.ndo_stop = hamachi_close,
.ndo_start_xmit = hamachi_start_xmit,
.ndo_get_stats = hamachi_get_stats,
.ndo_set_rx_mode = set_rx_mode,
.ndo_change_mtu = eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
.ndo_tx_timeout = hamachi_tx_timeout,
.ndo_do_ioctl = netdev_ioctl,
};
static int hamachi_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
struct hamachi_private *hmp;
int option, i, rx_int_var, tx_int_var, boguscnt;
int chip_id = ent->driver_data;
int irq;
void __iomem *ioaddr;
unsigned long base;
static int card_idx;
struct net_device *dev;
void *ring_space;
dma_addr_t ring_dma;
int ret = -ENOMEM;
/* when built into the kernel, we only print version if device is found */
#ifndef MODULE
static int printed_version;
if (!printed_version++)
printk(version);
#endif
if (pci_enable_device(pdev)) {
ret = -EIO;
goto err_out;
}
base = pci_resource_start(pdev, 0);
#ifdef __alpha__ /* Really "64 bit addrs" */
base |= (pci_resource_start(pdev, 1) << 32);
#endif
pci_set_master(pdev);
i = pci_request_regions(pdev, DRV_NAME);
if (i)
return i;
irq = pdev->irq;
ioaddr = ioremap(base, 0x400);
if (!ioaddr)
goto err_out_release;
dev = alloc_etherdev(sizeof(struct hamachi_private));
if (!dev)
goto err_out_iounmap;
SET_NETDEV_DEV(dev, &pdev->dev);
for (i = 0; i < 6; i++)
dev->dev_addr[i] = 1 ? read_eeprom(ioaddr, 4 + i)
: readb(ioaddr + StationAddr + i);
#if ! defined(final_version)
if (hamachi_debug > 4)
for (i = 0; i < 0x10; i++)
printk("%2.2x%s",
read_eeprom(ioaddr, i), i % 16 != 15 ? " " : "\n");
#endif
hmp = netdev_priv(dev);
spin_lock_init(&hmp->lock);
hmp->mii_if.dev = dev;
hmp->mii_if.mdio_read = mdio_read;
hmp->mii_if.mdio_write = mdio_write;
hmp->mii_if.phy_id_mask = 0x1f;
hmp->mii_if.reg_num_mask = 0x1f;
ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma);
if (!ring_space)
goto err_out_cleardev;
hmp->tx_ring = ring_space;
hmp->tx_ring_dma = ring_dma;
ring_space = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma);
if (!ring_space)
goto err_out_unmap_tx;
hmp->rx_ring = ring_space;
hmp->rx_ring_dma = ring_dma;
/* Check for options being passed in */
option = card_idx < MAX_UNITS ? options[card_idx] : 0;
if (dev->mem_start)
option = dev->mem_start;
/* If the bus size is misidentified, do the following. */
force32 = force32 ? force32 :
((option >= 0) ? ((option & 0x00000070) >> 4) : 0 );
if (force32)
writeb(force32, ioaddr + VirtualJumpers);
/* Hmmm, do we really need to reset the chip???. */
writeb(0x01, ioaddr + ChipReset);
/* After a reset, the clock speed measurement of the PCI bus will not
* be valid for a moment. Wait for a little while until it is. If
* it takes more than 10ms, forget it.
*/
udelay(10);
i = readb(ioaddr + PCIClkMeas);
for (boguscnt = 0; (!(i & 0x080)) && boguscnt < 1000; boguscnt++){
udelay(10);
i = readb(ioaddr + PCIClkMeas);
}
hmp->base = ioaddr;
pci_set_drvdata(pdev, dev);
hmp->chip_id = chip_id;
hmp->pci_dev = pdev;
/* The lower four bits are the media type. */
if (option > 0) {
hmp->option = option;
if (option & 0x200)
hmp->mii_if.full_duplex = 1;
else if (option & 0x080)
hmp->mii_if.full_duplex = 0;
hmp->default_port = option & 15;
if (hmp->default_port)
hmp->mii_if.force_media = 1;
}
if (card_idx < MAX_UNITS && full_duplex[card_idx] > 0)
hmp->mii_if.full_duplex = 1;
/* lock the duplex mode if someone specified a value */
if (hmp->mii_if.full_duplex || (option & 0x080))
hmp->duplex_lock = 1;
/* Set interrupt tuning parameters */
max_rx_latency = max_rx_latency & 0x00ff;
max_rx_gap = max_rx_gap & 0x00ff;
min_rx_pkt = min_rx_pkt & 0x00ff;
max_tx_latency = max_tx_latency & 0x00ff;
max_tx_gap = max_tx_gap & 0x00ff;
min_tx_pkt = min_tx_pkt & 0x00ff;
rx_int_var = card_idx < MAX_UNITS ? rx_params[card_idx] : -1;
tx_int_var = card_idx < MAX_UNITS ? tx_params[card_idx] : -1;
hmp->rx_int_var = rx_int_var >= 0 ? rx_int_var :
(min_rx_pkt << 16 | max_rx_gap << 8 | max_rx_latency);
hmp->tx_int_var = tx_int_var >= 0 ? tx_int_var :
(min_tx_pkt << 16 | max_tx_gap << 8 | max_tx_latency);
/* The Hamachi-specific entries in the device structure. */
dev->netdev_ops = &hamachi_netdev_ops;
dev->ethtool_ops = (chip_tbl[hmp->chip_id].flags & CanHaveMII) ?
ðtool_ops : ðtool_ops_no_mii;
dev->watchdog_timeo = TX_TIMEOUT;
if (mtu)
dev->mtu = mtu;
i = register_netdev(dev);
if (i) {
ret = i;
goto err_out_unmap_rx;
}
printk(KERN_INFO "%s: %s type %x at %p, %pM, IRQ %d.\n",
dev->name, chip_tbl[chip_id].name, readl(ioaddr + ChipRev),
ioaddr, dev->dev_addr, irq);
i = readb(ioaddr + PCIClkMeas);
printk(KERN_INFO "%s: %d-bit %d Mhz PCI bus (%d), Virtual Jumpers "
"%2.2x, LPA %4.4x.\n",
dev->name, readw(ioaddr + MiscStatus) & 1 ? 64 : 32,
i ? 2000/(i&0x7f) : 0, i&0x7f, (int)readb(ioaddr + VirtualJumpers),
readw(ioaddr + ANLinkPartnerAbility));
if (chip_tbl[hmp->chip_id].flags & CanHaveMII) {
int phy, phy_idx = 0;
for (phy = 0; phy < 32 && phy_idx < MII_CNT; phy++) {
int mii_status = mdio_read(dev, phy, MII_BMSR);
if (mii_status != 0xffff &&
mii_status != 0x0000) {
hmp->phys[phy_idx++] = phy;
hmp->mii_if.advertising = mdio_read(dev, phy, MII_ADVERTISE);
printk(KERN_INFO "%s: MII PHY found at address %d, status "
"0x%4.4x advertising %4.4x.\n",
dev->name, phy, mii_status, hmp->mii_if.advertising);
}
}
hmp->mii_cnt = phy_idx;
if (hmp->mii_cnt > 0)
hmp->mii_if.phy_id = hmp->phys[0];
else
memset(&hmp->mii_if, 0, sizeof(hmp->mii_if));
}
/* Configure gigabit autonegotiation. */
writew(0x0400, ioaddr + ANXchngCtrl); /* Enable legacy links. */
writew(0x08e0, ioaddr + ANAdvertise); /* Set our advertise word. */
writew(0x1000, ioaddr + ANCtrl); /* Enable negotiation */
card_idx++;
return 0;
err_out_unmap_rx:
pci_free_consistent(pdev, RX_TOTAL_SIZE, hmp->rx_ring,
hmp->rx_ring_dma);
err_out_unmap_tx:
pci_free_consistent(pdev, TX_TOTAL_SIZE, hmp->tx_ring,
hmp->tx_ring_dma);
err_out_cleardev:
free_netdev (dev);
err_out_iounmap:
iounmap(ioaddr);
err_out_release:
pci_release_regions(pdev);
err_out:
return ret;
}
static int read_eeprom(void __iomem *ioaddr, int location)
{
int bogus_cnt = 1000;
/* We should check busy first - per docs -KDU */
while ((readb(ioaddr + EECmdStatus) & 0x40) && --bogus_cnt > 0);
writew(location, ioaddr + EEAddr);
writeb(0x02, ioaddr + EECmdStatus);
bogus_cnt = 1000;
while ((readb(ioaddr + EECmdStatus) & 0x40) && --bogus_cnt > 0);
if (hamachi_debug > 5)
printk(" EEPROM status is %2.2x after %d ticks.\n",
(int)readb(ioaddr + EECmdStatus), 1000- bogus_cnt);
return readb(ioaddr + EEData);
}
/* MII Managemen Data I/O accesses.
These routines assume the MDIO controller is idle, and do not exit until
the command is finished. */
static int mdio_read(struct net_device *dev, int phy_id, int location)
{
struct hamachi_private *hmp = netdev_priv(dev);
void __iomem *ioaddr = hmp->base;
int i;
/* We should check busy first - per docs -KDU */
for (i = 10000; i >= 0; i--)
if ((readw(ioaddr + MII_Status) & 1) == 0)
break;
writew((phy_id<<8) + location, ioaddr + MII_Addr);
writew(0x0001, ioaddr + MII_Cmd);
for (i = 10000; i >= 0; i--)
if ((readw(ioaddr + MII_Status) & 1) == 0)
break;
return readw(ioaddr + MII_Rd_Data);
}
static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
{
struct hamachi_private *hmp = netdev_priv(dev);
void __iomem *ioaddr = hmp->base;
int i;
/* We should check busy first - per docs -KDU */
for (i = 10000; i >= 0; i--)
if ((readw(ioaddr + MII_Status) & 1) == 0)
break;
writew((phy_id<<8) + location, ioaddr + MII_Addr);
writew(value, ioaddr + MII_Wr_Data);
/* Wait for the command to finish. */
for (i = 10000; i >= 0; i--)
if ((readw(ioaddr + MII_Status) & 1) == 0)
break;
}
static int hamachi_open(struct net_device *dev)
{
struct hamachi_private *hmp = netdev_priv(dev);
void __iomem *ioaddr = hmp->base;
int i;
u32 rx_int_var, tx_int_var;
u16 fifo_info;
i = request_irq(hmp->pci_dev->irq, hamachi_interrupt, IRQF_SHARED,
dev->name, dev);
if (i)
return i;
hamachi_init_ring(dev);
#if ADDRLEN == 64
/* writellll anyone ? */
writel(hmp->rx_ring_dma, ioaddr + RxPtr);
writel(hmp->rx_ring_dma >> 32, ioaddr + RxPtr + 4);
writel(hmp->tx_ring_dma, ioaddr + TxPtr);
writel(hmp->tx_ring_dma >> 32, ioaddr + TxPtr + 4);
#else
writel(hmp->rx_ring_dma, ioaddr + RxPtr);
writel(hmp->tx_ring_dma, ioaddr + TxPtr);
#endif
/* TODO: It would make sense to organize this as words since the card
* documentation does. -KDU
*/
for (i = 0; i < 6; i++)
writeb(dev->dev_addr[i], ioaddr + StationAddr + i);
/* Initialize other registers: with so many this eventually this will
converted to an offset/value list. */
/* Configure the FIFO */
fifo_info = (readw(ioaddr + GPIO) & 0x00C0) >> 6;
switch (fifo_info){
case 0 :
/* No FIFO */
writew(0x0000, ioaddr + FIFOcfg);
break;
case 1 :
/* Configure the FIFO for 512K external, 16K used for Tx. */
writew(0x0028, ioaddr + FIFOcfg);
break;
case 2 :
/* Configure the FIFO for 1024 external, 32K used for Tx. */
writew(0x004C, ioaddr + FIFOcfg);
break;
case 3 :
/* Configure the FIFO for 2048 external, 32K used for Tx. */
writew(0x006C, ioaddr + FIFOcfg);
break;
default :
printk(KERN_WARNING "%s: Unsupported external memory config!\n",
dev->name);
/* Default to no FIFO */
writew(0x0000, ioaddr + FIFOcfg);
break;
}
if (dev->if_port == 0)
dev->if_port = hmp->default_port;
/* Setting the Rx mode will start the Rx process. */
/* If someone didn't choose a duplex, default to full-duplex */
if (hmp->duplex_lock != 1)
hmp->mii_if.full_duplex = 1;
/* always 1, takes no more time to do it */
writew(0x0001, ioaddr + RxChecksum);
writew(0x0000, ioaddr + TxChecksum);
writew(0x8000, ioaddr + MACCnfg); /* Soft reset the MAC */
writew(0x215F, ioaddr + MACCnfg);
writew(0x000C, ioaddr + FrameGap0);
/* WHAT?!?!? Why isn't this documented somewhere? -KDU */
writew(0x1018, ioaddr + FrameGap1);
/* Why do we enable receives/transmits here? -KDU */
writew(0x0780, ioaddr + MACCnfg2); /* Upper 16 bits control LEDs. */
/* Enable automatic generation of flow control frames, period 0xffff. */
writel(0x0030FFFF, ioaddr + FlowCtrl);
writew(MAX_FRAME_SIZE, ioaddr + MaxFrameSize); /* dev->mtu+14 ??? */
/* Enable legacy links. */
writew(0x0400, ioaddr + ANXchngCtrl); /* Enable legacy links. */
/* Initial Link LED to blinking red. */
writeb(0x03, ioaddr + LEDCtrl);
/* Configure interrupt mitigation. This has a great effect on
performance, so systems tuning should start here!. */
rx_int_var = hmp->rx_int_var;
tx_int_var = hmp->tx_int_var;
if (hamachi_debug > 1) {
printk("max_tx_latency: %d, max_tx_gap: %d, min_tx_pkt: %d\n",
tx_int_var & 0x00ff, (tx_int_var & 0x00ff00) >> 8,
(tx_int_var & 0x00ff0000) >> 16);
printk("max_rx_latency: %d, max_rx_gap: %d, min_rx_pkt: %d\n",
rx_int_var & 0x00ff, (rx_int_var & 0x00ff00) >> 8,
(rx_int_var & 0x00ff0000) >> 16);
printk("rx_int_var: %x, tx_int_var: %x\n", rx_int_var, tx_int_var);
}
writel(tx_int_var, ioaddr + TxIntrCtrl);
writel(rx_int_var, ioaddr + RxIntrCtrl);
set_rx_mode(dev);
netif_start_queue(dev);
/* Enable interrupts by setting the interrupt mask. */
writel(0x80878787, ioaddr + InterruptEnable);
writew(0x0000, ioaddr + EventStatus); /* Clear non-interrupting events */
/* Configure and start the DMA channels. */
/* Burst sizes are in the low three bits: size = 4<<(val&7) */
#if ADDRLEN == 64
writew(0x005D, ioaddr + RxDMACtrl); /* 128 dword bursts */
writew(0x005D, ioaddr + TxDMACtrl);
#else
writew(0x001D, ioaddr + RxDMACtrl);
writew(0x001D, ioaddr + TxDMACtrl);
#endif
writew(0x0001, ioaddr + RxCmd);
if (hamachi_debug > 2) {
printk(KERN_DEBUG "%s: Done hamachi_open(), status: Rx %x Tx %x.\n",
dev->name, readw(ioaddr + RxStatus), readw(ioaddr + TxStatus));
}
/* Set the timer to check for link beat. */
init_timer(&hmp->timer);
hmp->timer.expires = RUN_AT((24*HZ)/10); /* 2.4 sec. */
hmp->timer.data = (unsigned long)dev;
hmp->timer.function = hamachi_timer; /* timer handler */
add_timer(&hmp->timer);
return 0;
}
static inline int hamachi_tx(struct net_device *dev)
{
struct hamachi_private *hmp = netdev_priv(dev);
/* Update the dirty pointer until we find an entry that is
still owned by the card */
for (; hmp->cur_tx - hmp->dirty_tx > 0; hmp->dirty_tx++) {
int entry = hmp->dirty_tx % TX_RING_SIZE;
struct sk_buff *skb;
if (hmp->tx_ring[entry].status_n_length & cpu_to_le32(DescOwn))
break;
/* Free the original skb. */
skb = hmp->tx_skbuff[entry];
if (skb) {
pci_unmap_single(hmp->pci_dev,
leXX_to_cpu(hmp->tx_ring[entry].addr),
skb->len, PCI_DMA_TODEVICE);
dev_kfree_skb(skb);
hmp->tx_skbuff[entry] = NULL;
}
hmp->tx_ring[entry].status_n_length = 0;
if (entry >= TX_RING_SIZE-1)
hmp->tx_ring[TX_RING_SIZE-1].status_n_length |=
cpu_to_le32(DescEndRing);
dev->stats.tx_packets++;
}
return 0;
}
static void hamachi_timer(unsigned long data)
{
struct net_device *dev = (struct net_device *)data;
struct hamachi_private *hmp = netdev_priv(dev);
void __iomem *ioaddr = hmp->base;
int next_tick = 10*HZ;
if (hamachi_debug > 2) {
printk(KERN_INFO "%s: Hamachi Autonegotiation status %4.4x, LPA "
"%4.4x.\n", dev->name, readw(ioaddr + ANStatus),
readw(ioaddr + ANLinkPartnerAbility));
printk(KERN_INFO "%s: Autonegotiation regs %4.4x %4.4x %4.4x "
"%4.4x %4.4x %4.4x.\n", dev->name,
readw(ioaddr + 0x0e0),
readw(ioaddr + 0x0e2),
readw(ioaddr + 0x0e4),
readw(ioaddr + 0x0e6),
readw(ioaddr + 0x0e8),
readw(ioaddr + 0x0eA));
}
/* We could do something here... nah. */
hmp->timer.expires = RUN_AT(next_tick);
add_timer(&hmp->timer);
}
static void hamachi_tx_timeout(struct net_device *dev)
{
int i;
struct hamachi_private *hmp = netdev_priv(dev);
void __iomem *ioaddr = hmp->base;
printk(KERN_WARNING "%s: Hamachi transmit timed out, status %8.8x,"
" resetting...\n", dev->name, (int)readw(ioaddr + TxStatus));
{
printk(KERN_DEBUG " Rx ring %p: ", hmp->rx_ring);
for (i = 0; i < RX_RING_SIZE; i++)
printk(KERN_CONT " %8.8x",
le32_to_cpu(hmp->rx_ring[i].status_n_length));
printk(KERN_CONT "\n");
printk(KERN_DEBUG" Tx ring %p: ", hmp->tx_ring);
for (i = 0; i < TX_RING_SIZE; i++)
printk(KERN_CONT " %4.4x",
le32_to_cpu(hmp->tx_ring[i].status_n_length));
printk(KERN_CONT "\n");
}
/* Reinit the hardware and make sure the Rx and Tx processes
are up and running.
*/
dev->if_port = 0;
/* The right way to do Reset. -KDU
* -Clear OWN bit in all Rx/Tx descriptors
* -Wait 50 uS for channels to go idle
* -Turn off MAC receiver
* -Issue Reset
*/
for (i = 0; i < RX_RING_SIZE; i++)
hmp->rx_ring[i].status_n_length &= cpu_to_le32(~DescOwn);
/* Presume that all packets in the Tx queue are gone if we have to
* re-init the hardware.
*/
for (i = 0; i < TX_RING_SIZE; i++){
struct sk_buff *skb;
if (i >= TX_RING_SIZE - 1)
hmp->tx_ring[i].status_n_length =
cpu_to_le32(DescEndRing) |
(hmp->tx_ring[i].status_n_length &
cpu_to_le32(0x0000ffff));
else
hmp->tx_ring[i].status_n_length &= cpu_to_le32(0x0000ffff);
skb = hmp->tx_skbuff[i];
if (skb){
pci_unmap_single(hmp->pci_dev, leXX_to_cpu(hmp->tx_ring[i].addr),
skb->len, PCI_DMA_TODEVICE);
dev_kfree_skb(skb);
hmp->tx_skbuff[i] = NULL;
}
}
udelay(60); /* Sleep 60 us just for safety sake */
writew(0x0002, ioaddr + RxCmd); /* STOP Rx */
writeb(0x01, ioaddr + ChipReset); /* Reinit the hardware */
hmp->tx_full = 0;
hmp->cur_rx = hmp->cur_tx = 0;
hmp->dirty_rx = hmp->dirty_tx = 0;
/* Rx packets are also presumed lost; however, we need to make sure a
* ring of buffers is in tact. -KDU
*/
for (i = 0; i < RX_RING_SIZE; i++){
struct sk_buff *skb = hmp->rx_skbuff[i];
if (skb){
pci_unmap_single(hmp->pci_dev,
leXX_to_cpu(hmp->rx_ring[i].addr),
hmp->rx_buf_sz, PCI_DMA_FROMDEVICE);
dev_kfree_skb(skb);
hmp->rx_skbuff[i] = NULL;
}
}
/* Fill in the Rx buffers. Handle allocation failure gracefully. */
for (i = 0; i < RX_RING_SIZE; i++) {
struct sk_buff *skb;
skb = netdev_alloc_skb_ip_align(dev, hmp->rx_buf_sz);
hmp->rx_skbuff[i] = skb;
if (skb == NULL)
break;
hmp->rx_ring[i].addr = cpu_to_leXX(pci_map_single(hmp->pci_dev,
skb->data, hmp->rx_buf_sz, PCI_DMA_FROMDEVICE));
hmp->rx_ring[i].status_n_length = cpu_to_le32(DescOwn |
DescEndPacket | DescIntr | (hmp->rx_buf_sz - 2));
}
hmp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
/* Mark the last entry as wrapping the ring. */
hmp->rx_ring[RX_RING_SIZE-1].status_n_length |= cpu_to_le32(DescEndRing);
/* Trigger an immediate transmit demand. */
dev->trans_start = jiffies; /* prevent tx timeout */
dev->stats.tx_errors++;
/* Restart the chip's Tx/Rx processes . */
writew(0x0002, ioaddr + TxCmd); /* STOP Tx */
writew(0x0001, ioaddr + TxCmd); /* START Tx */
writew(0x0001, ioaddr + RxCmd); /* START Rx */
netif_wake_queue(dev);
}
/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
static void hamachi_init_ring(struct net_device *dev)
{
struct hamachi_private *hmp = netdev_priv(dev);
int i;
hmp->tx_full = 0;
hmp->cur_rx = hmp->cur_tx = 0;
hmp->dirty_rx = hmp->dirty_tx = 0;
/* +26 gets the maximum ethernet encapsulation, +7 & ~7 because the
* card needs room to do 8 byte alignment, +2 so we can reserve
* the first 2 bytes, and +16 gets room for the status word from the
* card. -KDU
*/
hmp->rx_buf_sz = (dev->mtu <= 1492 ? PKT_BUF_SZ :
(((dev->mtu+26+7) & ~7) + 16));
/* Initialize all Rx descriptors. */
for (i = 0; i < RX_RING_SIZE; i++) {
hmp->rx_ring[i].status_n_length = 0;
hmp->rx_skbuff[i] = NULL;
}
/* Fill in the Rx buffers. Handle allocation failure gracefully. */
for (i = 0; i < RX_RING_SIZE; i++) {
struct sk_buff *skb = netdev_alloc_skb(dev, hmp->rx_buf_sz + 2);
hmp->rx_skbuff[i] = skb;
if (skb == NULL)
break;
skb_reserve(skb, 2); /* 16 byte align the IP header. */
hmp->rx_ring[i].addr = cpu_to_leXX(pci_map_single(hmp->pci_dev,
skb->data, hmp->rx_buf_sz, PCI_DMA_FROMDEVICE));
/* -2 because it doesn't REALLY have that first 2 bytes -KDU */
hmp->rx_ring[i].status_n_length = cpu_to_le32(DescOwn |
DescEndPacket | DescIntr | (hmp->rx_buf_sz -2));
}
hmp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
hmp->rx_ring[RX_RING_SIZE-1].status_n_length |= cpu_to_le32(DescEndRing);
for (i = 0; i < TX_RING_SIZE; i++) {
hmp->tx_skbuff[i] = NULL;
hmp->tx_ring[i].status_n_length = 0;
}
/* Mark the last entry of the ring */
hmp->tx_ring[TX_RING_SIZE-1].status_n_length |= cpu_to_le32(DescEndRing);
}
static netdev_tx_t hamachi_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
struct hamachi_private *hmp = netdev_priv(dev);
unsigned entry;
u16 status;
/* Ok, now make sure that the queue has space before trying to
add another skbuff. if we return non-zero the scheduler
should interpret this as a queue full and requeue the buffer
for later.
*/
if (hmp->tx_full) {
/* We should NEVER reach this point -KDU */
printk(KERN_WARNING "%s: Hamachi transmit queue full at slot %d.\n",dev->name, hmp->cur_tx);
/* Wake the potentially-idle transmit channel. */
/* If we don't need to read status, DON'T -KDU */
status=readw(hmp->base + TxStatus);
if( !(status & 0x0001) || (status & 0x0002))
writew(0x0001, hmp->base + TxCmd);
return NETDEV_TX_BUSY;
}
/* Caution: the write order is important here, set the field
with the "ownership" bits last. */
/* Calculate the next Tx descriptor entry. */
entry = hmp->cur_tx % TX_RING_SIZE;
hmp->tx_skbuff[entry] = skb;
hmp->tx_ring[entry].addr = cpu_to_leXX(pci_map_single(hmp->pci_dev,
skb->data, skb->len, PCI_DMA_TODEVICE));
/* Hmmmm, could probably put a DescIntr on these, but the way
the driver is currently coded makes Tx interrupts unnecessary
since the clearing of the Tx ring is handled by the start_xmit
routine. This organization helps mitigate the interrupts a
bit and probably renders the max_tx_latency param useless.
Update: Putting a DescIntr bit on all of the descriptors and
mitigating interrupt frequency with the tx_min_pkt parameter. -KDU
*/
if (entry >= TX_RING_SIZE-1) /* Wrap ring */
hmp->tx_ring[entry].status_n_length = cpu_to_le32(DescOwn |
DescEndPacket | DescEndRing | DescIntr | skb->len);
else
hmp->tx_ring[entry].status_n_length = cpu_to_le32(DescOwn |
DescEndPacket | DescIntr | skb->len);
hmp->cur_tx++;
/* Non-x86 Todo: explicitly flush cache lines here. */
/* Wake the potentially-idle transmit channel. */
/* If we don't need to read status, DON'T -KDU */
status=readw(hmp->base + TxStatus);
if( !(status & 0x0001) || (status & 0x0002))
writew(0x0001, hmp->base + TxCmd);
/* Immediately before returning, let's clear as many entries as we can. */
hamachi_tx(dev);
/* We should kick the bottom half here, since we are not accepting
* interrupts with every packet. i.e. realize that Gigabit ethernet
* can transmit faster than ordinary machines can load packets;
* hence, any packet that got put off because we were in the transmit
* routine should IMMEDIATELY get a chance to be re-queued. -KDU
*/
if ((hmp->cur_tx - hmp->dirty_tx) < (TX_RING_SIZE - 4))
netif_wake_queue(dev); /* Typical path */
else {
hmp->tx_full = 1;
netif_stop_queue(dev);
}
if (hamachi_debug > 4) {
printk(KERN_DEBUG "%s: Hamachi transmit frame #%d queued in slot %d.\n",
dev->name, hmp->cur_tx, entry);
}
return NETDEV_TX_OK;
}
/* The interrupt handler does all of the Rx thread work and cleans up
after the Tx thread. */
static irqreturn_t hamachi_interrupt(int irq, void *dev_instance)
{
struct net_device *dev = dev_instance;
struct hamachi_private *hmp = netdev_priv(dev);
void __iomem *ioaddr = hmp->base;
long boguscnt = max_interrupt_work;
int handled = 0;
#ifndef final_version /* Can never occur. */
if (dev == NULL) {
printk (KERN_ERR "hamachi_interrupt(): irq %d for unknown device.\n", irq);
return IRQ_NONE;
}
#endif
spin_lock(&hmp->lock);
do {
u32 intr_status = readl(ioaddr + InterruptClear);
if (hamachi_debug > 4)
printk(KERN_DEBUG "%s: Hamachi interrupt, status %4.4x.\n",
dev->name, intr_status);
if (intr_status == 0)
break;
handled = 1;
if (intr_status & IntrRxDone)
hamachi_rx(dev);
if (intr_status & IntrTxDone){
/* This code should RARELY need to execute. After all, this is
* a gigabit link, it should consume packets as fast as we put
* them in AND we clear the Tx ring in hamachi_start_xmit().
*/
if (hmp->tx_full){
for (; hmp->cur_tx - hmp->dirty_tx > 0; hmp->dirty_tx++){
int entry = hmp->dirty_tx % TX_RING_SIZE;
struct sk_buff *skb;
if (hmp->tx_ring[entry].status_n_length & cpu_to_le32(DescOwn))
break;
skb = hmp->tx_skbuff[entry];
/* Free the original skb. */
if (skb){
pci_unmap_single(hmp->pci_dev,
leXX_to_cpu(hmp->tx_ring[entry].addr),
skb->len,
PCI_DMA_TODEVICE);
dev_kfree_skb_irq(skb);
hmp->tx_skbuff[entry] = NULL;
}
hmp->tx_ring[entry].status_n_length = 0;
if (entry >= TX_RING_SIZE-1)
hmp->tx_ring[TX_RING_SIZE-1].status_n_length |=
cpu_to_le32(DescEndRing);
dev->stats.tx_packets++;
}
if (hmp->cur_tx - hmp->dirty_tx < TX_RING_SIZE - 4){
/* The ring is no longer full */
hmp->tx_full = 0;
netif_wake_queue(dev);
}
} else {
netif_wake_queue(dev);
}
}
/* Abnormal error summary/uncommon events handlers. */
if (intr_status &
(IntrTxPCIFault | IntrTxPCIErr | IntrRxPCIFault | IntrRxPCIErr |
LinkChange | NegotiationChange | StatsMax))
hamachi_error(dev, intr_status);
if (--boguscnt < 0) {
printk(KERN_WARNING "%s: Too much work at interrupt, status=0x%4.4x.\n",
dev->name, intr_status);
break;
}
} while (1);
if (hamachi_debug > 3)
printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
dev->name, readl(ioaddr + IntrStatus));
#ifndef final_version
/* Code that should never be run! Perhaps remove after testing.. */
{
static int stopit = 10;
if (dev->start == 0 && --stopit < 0) {
printk(KERN_ERR "%s: Emergency stop, looping startup interrupt.\n",
dev->name);
free_irq(irq, dev);
}
}
#endif
spin_unlock(&hmp->lock);
return IRQ_RETVAL(handled);
}
/* This routine is logically part of the interrupt handler, but separated
for clarity and better register allocation. */
static int hamachi_rx(struct net_device *dev)
{
struct hamachi_private *hmp = netdev_priv(dev);
int entry = hmp->cur_rx % RX_RING_SIZE;
int boguscnt = (hmp->dirty_rx + RX_RING_SIZE) - hmp->cur_rx;
if (hamachi_debug > 4) {
printk(KERN_DEBUG " In hamachi_rx(), entry %d status %4.4x.\n",
entry, hmp->rx_ring[entry].status_n_length);
}
/* If EOP is set on the next entry, it's a new packet. Send it up. */
while (1) {
struct hamachi_desc *desc = &(hmp->rx_ring[entry]);
u32 desc_status = le32_to_cpu(desc->status_n_length);
u16 data_size = desc_status; /* Implicit truncate */
u8 *buf_addr;
s32 frame_status;
if (desc_status & DescOwn)
break;
pci_dma_sync_single_for_cpu(hmp->pci_dev,
leXX_to_cpu(desc->addr),
hmp->rx_buf_sz,
PCI_DMA_FROMDEVICE);
buf_addr = (u8 *) hmp->rx_skbuff[entry]->data;
frame_status = get_unaligned_le32(&(buf_addr[data_size - 12]));
if (hamachi_debug > 4)
printk(KERN_DEBUG " hamachi_rx() status was %8.8x.\n",
frame_status);
if (--boguscnt < 0)
break;
if ( ! (desc_status & DescEndPacket)) {
printk(KERN_WARNING "%s: Oversized Ethernet frame spanned "
"multiple buffers, entry %#x length %d status %4.4x!\n",
dev->name, hmp->cur_rx, data_size, desc_status);
printk(KERN_WARNING "%s: Oversized Ethernet frame %p vs %p.\n",
dev->name, desc, &hmp->rx_ring[hmp->cur_rx % RX_RING_SIZE]);
printk(KERN_WARNING "%s: Oversized Ethernet frame -- next status %x/%x last status %x.\n",
dev->name,
le32_to_cpu(hmp->rx_ring[(hmp->cur_rx+1) % RX_RING_SIZE].status_n_length) & 0xffff0000,
le32_to_cpu(hmp->rx_ring[(hmp->cur_rx+1) % RX_RING_SIZE].status_n_length) & 0x0000ffff,
le32_to_cpu(hmp->rx_ring[(hmp->cur_rx-1) % RX_RING_SIZE].status_n_length));
dev->stats.rx_length_errors++;
} /* else Omit for prototype errata??? */
if (frame_status & 0x00380000) {
/* There was an error. */
if (hamachi_debug > 2)
printk(KERN_DEBUG " hamachi_rx() Rx error was %8.8x.\n",
frame_status);
dev->stats.rx_errors++;
if (frame_status & 0x00600000)
dev->stats.rx_length_errors++;
if (frame_status & 0x00080000)
dev->stats.rx_frame_errors++;
if (frame_status & 0x00100000)
dev->stats.rx_crc_errors++;
if (frame_status < 0)
dev->stats.rx_dropped++;
} else {
struct sk_buff *skb;
/* Omit CRC */
u16 pkt_len = (frame_status & 0x07ff) - 4;
#ifdef RX_CHECKSUM
u32 pfck = *(u32 *) &buf_addr[data_size - 8];
#endif
#ifndef final_version
if (hamachi_debug > 4)
printk(KERN_DEBUG " hamachi_rx() normal Rx pkt length %d"
" of %d, bogus_cnt %d.\n",
pkt_len, data_size, boguscnt);
if (hamachi_debug > 5)
printk(KERN_DEBUG"%s: rx status %8.8x %8.8x %8.8x %8.8x %8.8x.\n",
dev->name,
*(s32*)&(buf_addr[data_size - 20]),
*(s32*)&(buf_addr[data_size - 16]),
*(s32*)&(buf_addr[data_size - 12]),
*(s32*)&(buf_addr[data_size - 8]),
*(s32*)&(buf_addr[data_size - 4]));
#endif
/* Check if the packet is long enough to accept without copying
to a minimally-sized skbuff. */
if (pkt_len < rx_copybreak &&
(skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
#ifdef RX_CHECKSUM
printk(KERN_ERR "%s: rx_copybreak non-zero "
"not good with RX_CHECKSUM\n", dev->name);
#endif
skb_reserve(skb, 2); /* 16 byte align the IP header */
pci_dma_sync_single_for_cpu(hmp->pci_dev,
leXX_to_cpu(hmp->rx_ring[entry].addr),
hmp->rx_buf_sz,
PCI_DMA_FROMDEVICE);
/* Call copy + cksum if available. */
#if 1 || USE_IP_COPYSUM
skb_copy_to_linear_data(skb,
hmp->rx_skbuff[entry]->data, pkt_len);
skb_put(skb, pkt_len);
#else
memcpy(skb_put(skb, pkt_len), hmp->rx_ring_dma
+ entry*sizeof(*desc), pkt_len);
#endif
pci_dma_sync_single_for_device(hmp->pci_dev,
leXX_to_cpu(hmp->rx_ring[entry].addr),
hmp->rx_buf_sz,
PCI_DMA_FROMDEVICE);
} else {
pci_unmap_single(hmp->pci_dev,
leXX_to_cpu(hmp->rx_ring[entry].addr),
hmp->rx_buf_sz, PCI_DMA_FROMDEVICE);
skb_put(skb = hmp->rx_skbuff[entry], pkt_len);
hmp->rx_skbuff[entry] = NULL;
}
skb->protocol = eth_type_trans(skb, dev);
#ifdef RX_CHECKSUM
/* TCP or UDP on ipv4, DIX encoding */
if (pfck>>24 == 0x91 || pfck>>24 == 0x51) {
struct iphdr *ih = (struct iphdr *) skb->data;
/* Check that IP packet is at least 46 bytes, otherwise,
* there may be pad bytes included in the hardware checksum.
* This wouldn't happen if everyone padded with 0.
*/
if (ntohs(ih->tot_len) >= 46){
/* don't worry about frags */
if (!(ih->frag_off & cpu_to_be16(IP_MF|IP_OFFSET))) {
u32 inv = *(u32 *) &buf_addr[data_size - 16];
u32 *p = (u32 *) &buf_addr[data_size - 20];
register u32 crc, p_r, p_r1;
if (inv & 4) {
inv &= ~4;
--p;
}
p_r = *p;
p_r1 = *(p-1);
switch (inv) {
case 0:
crc = (p_r & 0xffff) + (p_r >> 16);
break;
case 1:
crc = (p_r >> 16) + (p_r & 0xffff)
+ (p_r1 >> 16 & 0xff00);
break;
case 2:
crc = p_r + (p_r1 >> 16);
break;
case 3:
crc = p_r + (p_r1 & 0xff00) + (p_r1 >> 16);
break;
default: /*NOTREACHED*/ crc = 0;
}
if (crc & 0xffff0000) {
crc &= 0xffff;
++crc;
}
/* tcp/udp will add in pseudo */
skb->csum = ntohs(pfck & 0xffff);
if (skb->csum > crc)
skb->csum -= crc;
else
skb->csum += (~crc & 0xffff);
/*
* could do the pseudo myself and return
* CHECKSUM_UNNECESSARY
*/
skb->ip_summed = CHECKSUM_COMPLETE;
}
}
}
#endif /* RX_CHECKSUM */
netif_rx(skb);
dev->stats.rx_packets++;
}
entry = (++hmp->cur_rx) % RX_RING_SIZE;
}
/* Refill the Rx ring buffers. */
for (; hmp->cur_rx - hmp->dirty_rx > 0; hmp->dirty_rx++) {
struct hamachi_desc *desc;
entry = hmp->dirty_rx % RX_RING_SIZE;
desc = &(hmp->rx_ring[entry]);
if (hmp->rx_skbuff[entry] == NULL) {
struct sk_buff *skb = netdev_alloc_skb(dev, hmp->rx_buf_sz + 2);
hmp->rx_skbuff[entry] = skb;
if (skb == NULL)
break; /* Better luck next round. */
skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
desc->addr = cpu_to_leXX(pci_map_single(hmp->pci_dev,
skb->data, hmp->rx_buf_sz, PCI_DMA_FROMDEVICE));
}
desc->status_n_length = cpu_to_le32(hmp->rx_buf_sz);
if (entry >= RX_RING_SIZE-1)
desc->status_n_length |= cpu_to_le32(DescOwn |
DescEndPacket | DescEndRing | DescIntr);
else
desc->status_n_length |= cpu_to_le32(DescOwn |
DescEndPacket | DescIntr);
}
/* Restart Rx engine if stopped. */
/* If we don't need to check status, don't. -KDU */
if (readw(hmp->base + RxStatus) & 0x0002)
writew(0x0001, hmp->base + RxCmd);
return 0;
}
/* This is more properly named "uncommon interrupt events", as it covers more
than just errors. */
static void hamachi_error(struct net_device *dev, int intr_status)
{
struct hamachi_private *hmp = netdev_priv(dev);
void __iomem *ioaddr = hmp->base;
if (intr_status & (LinkChange|NegotiationChange)) {
if (hamachi_debug > 1)
printk(KERN_INFO "%s: Link changed: AutoNegotiation Ctrl"
" %4.4x, Status %4.4x %4.4x Intr status %4.4x.\n",
dev->name, readw(ioaddr + 0x0E0), readw(ioaddr + 0x0E2),
readw(ioaddr + ANLinkPartnerAbility),
readl(ioaddr + IntrStatus));
if (readw(ioaddr + ANStatus) & 0x20)
writeb(0x01, ioaddr + LEDCtrl);
else
writeb(0x03, ioaddr + LEDCtrl);
}
if (intr_status & StatsMax) {
hamachi_get_stats(dev);
/* Read the overflow bits to clear. */
readl(ioaddr + 0x370);
readl(ioaddr + 0x3F0);
}
if ((intr_status & ~(LinkChange|StatsMax|NegotiationChange|IntrRxDone|IntrTxDone)) &&
hamachi_debug)
printk(KERN_ERR "%s: Something Wicked happened! %4.4x.\n",
dev->name, intr_status);
/* Hmmmmm, it's not clear how to recover from PCI faults. */
if (intr_status & (IntrTxPCIErr | IntrTxPCIFault))
dev->stats.tx_fifo_errors++;
if (intr_status & (IntrRxPCIErr | IntrRxPCIFault))
dev->stats.rx_fifo_errors++;
}
static int hamachi_close(struct net_device *dev)
{
struct hamachi_private *hmp = netdev_priv(dev);
void __iomem *ioaddr = hmp->base;
struct sk_buff *skb;
int i;
netif_stop_queue(dev);
if (hamachi_debug > 1) {
printk(KERN_DEBUG "%s: Shutting down ethercard, status was Tx %4.4x Rx %4.4x Int %2.2x.\n",
dev->name, readw(ioaddr + TxStatus),
readw(ioaddr + RxStatus), readl(ioaddr + IntrStatus));
printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
dev->name, hmp->cur_tx, hmp->dirty_tx, hmp->cur_rx, hmp->dirty_rx);
}
/* Disable interrupts by clearing the interrupt mask. */
writel(0x0000, ioaddr + InterruptEnable);
/* Stop the chip's Tx and Rx processes. */
writel(2, ioaddr + RxCmd);
writew(2, ioaddr + TxCmd);
#ifdef __i386__
if (hamachi_debug > 2) {
printk(KERN_DEBUG " Tx ring at %8.8x:\n",
(int)hmp->tx_ring_dma);
for (i = 0; i < TX_RING_SIZE; i++)
printk(KERN_DEBUG " %c #%d desc. %8.8x %8.8x.\n",
readl(ioaddr + TxCurPtr) == (long)&hmp->tx_ring[i] ? '>' : ' ',
i, hmp->tx_ring[i].status_n_length, hmp->tx_ring[i].addr);
printk(KERN_DEBUG " Rx ring %8.8x:\n",
(int)hmp->rx_ring_dma);
for (i = 0; i < RX_RING_SIZE; i++) {
printk(KERN_DEBUG " %c #%d desc. %4.4x %8.8x\n",
readl(ioaddr + RxCurPtr) == (long)&hmp->rx_ring[i] ? '>' : ' ',
i, hmp->rx_ring[i].status_n_length, hmp->rx_ring[i].addr);
if (hamachi_debug > 6) {
if (*(u8*)hmp->rx_skbuff[i]->data != 0x69) {
u16 *addr = (u16 *)
hmp->rx_skbuff[i]->data;
int j;
printk(KERN_DEBUG "Addr: ");
for (j = 0; j < 0x50; j++)
printk(" %4.4x", addr[j]);
printk("\n");
}
}
}
}
#endif /* __i386__ debugging only */
free_irq(hmp->pci_dev->irq, dev);
del_timer_sync(&hmp->timer);
/* Free all the skbuffs in the Rx queue. */
for (i = 0; i < RX_RING_SIZE; i++) {
skb = hmp->rx_skbuff[i];
hmp->rx_ring[i].status_n_length = 0;
if (skb) {
pci_unmap_single(hmp->pci_dev,
leXX_to_cpu(hmp->rx_ring[i].addr),
hmp->rx_buf_sz, PCI_DMA_FROMDEVICE);
dev_kfree_skb(skb);
hmp->rx_skbuff[i] = NULL;
}
hmp->rx_ring[i].addr = cpu_to_leXX(0xBADF00D0); /* An invalid address. */
}
for (i = 0; i < TX_RING_SIZE; i++) {
skb = hmp->tx_skbuff[i];
if (skb) {
pci_unmap_single(hmp->pci_dev,
leXX_to_cpu(hmp->tx_ring[i].addr),
skb->len, PCI_DMA_TODEVICE);
dev_kfree_skb(skb);
hmp->tx_skbuff[i] = NULL;
}
}
writeb(0x00, ioaddr + LEDCtrl);
return 0;
}
static struct net_device_stats *hamachi_get_stats(struct net_device *dev)
{
struct hamachi_private *hmp = netdev_priv(dev);
void __iomem *ioaddr = hmp->base;
/* We should lock this segment of code for SMP eventually, although
the vulnerability window is very small and statistics are
non-critical. */
/* Ok, what goes here? This appears to be stuck at 21 packets
according to ifconfig. It does get incremented in hamachi_tx(),
so I think I'll comment it out here and see if better things
happen.
*/
/* dev->stats.tx_packets = readl(ioaddr + 0x000); */
/* Total Uni+Brd+Multi */
dev->stats.rx_bytes = readl(ioaddr + 0x330);
/* Total Uni+Brd+Multi */
dev->stats.tx_bytes = readl(ioaddr + 0x3B0);
/* Multicast Rx */
dev->stats.multicast = readl(ioaddr + 0x320);
/* Over+Undersized */
dev->stats.rx_length_errors = readl(ioaddr + 0x368);
/* Jabber */
dev->stats.rx_over_errors = readl(ioaddr + 0x35C);
/* Jabber */
dev->stats.rx_crc_errors = readl(ioaddr + 0x360);
/* Symbol Errs */
dev->stats.rx_frame_errors = readl(ioaddr + 0x364);
/* Dropped */
dev->stats.rx_missed_errors = readl(ioaddr + 0x36C);
return &dev->stats;
}
static void set_rx_mode(struct net_device *dev)
{
struct hamachi_private *hmp = netdev_priv(dev);
void __iomem *ioaddr = hmp->base;
if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
writew(0x000F, ioaddr + AddrMode);
} else if ((netdev_mc_count(dev) > 63) || (dev->flags & IFF_ALLMULTI)) {
/* Too many to match, or accept all multicasts. */
writew(0x000B, ioaddr + AddrMode);
} else if (!netdev_mc_empty(dev)) { /* Must use the CAM filter. */
struct netdev_hw_addr *ha;
int i = 0;
netdev_for_each_mc_addr(ha, dev) {
writel(*(u32 *)(ha->addr), ioaddr + 0x100 + i*8);
writel(0x20000 | (*(u16 *)&ha->addr[4]),
ioaddr + 0x104 + i*8);
i++;
}
/* Clear remaining entries. */
for (; i < 64; i++)
writel(0, ioaddr + 0x104 + i*8);
writew(0x0003, ioaddr + AddrMode);
} else { /* Normal, unicast/broadcast-only mode. */
writew(0x0001, ioaddr + AddrMode);
}
}
static int check_if_running(struct net_device *dev)
{
if (!netif_running(dev))
return -EINVAL;
return 0;
}
static void hamachi_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct hamachi_private *np = netdev_priv(dev);
strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
strlcpy(info->version, DRV_VERSION, sizeof(info->version));
strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
}
static int hamachi_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
{
struct hamachi_private *np = netdev_priv(dev);
spin_lock_irq(&np->lock);
mii_ethtool_gset(&np->mii_if, ecmd);
spin_unlock_irq(&np->lock);
return 0;
}
static int hamachi_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
{
struct hamachi_private *np = netdev_priv(dev);
int res;
spin_lock_irq(&np->lock);
res = mii_ethtool_sset(&np->mii_if, ecmd);
spin_unlock_irq(&np->lock);
return res;
}
static int hamachi_nway_reset(struct net_device *dev)
{
struct hamachi_private *np = netdev_priv(dev);
return mii_nway_restart(&np->mii_if);
}
static u32 hamachi_get_link(struct net_device *dev)
{
struct hamachi_private *np = netdev_priv(dev);
return mii_link_ok(&np->mii_if);
}
static const struct ethtool_ops ethtool_ops = {
.begin = check_if_running,
.get_drvinfo = hamachi_get_drvinfo,
.get_settings = hamachi_get_settings,
.set_settings = hamachi_set_settings,
.nway_reset = hamachi_nway_reset,
.get_link = hamachi_get_link,
};
static const struct ethtool_ops ethtool_ops_no_mii = {
.begin = check_if_running,
.get_drvinfo = hamachi_get_drvinfo,
};
static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
struct hamachi_private *np = netdev_priv(dev);
struct mii_ioctl_data *data = if_mii(rq);
int rc;
if (!netif_running(dev))
return -EINVAL;
if (cmd == (SIOCDEVPRIVATE+3)) { /* set rx,tx intr params */
u32 *d = (u32 *)&rq->ifr_ifru;
/* Should add this check here or an ordinary user can do nasty
* things. -KDU
*
* TODO: Shut down the Rx and Tx engines while doing this.
*/
if (!capable(CAP_NET_ADMIN))
return -EPERM;
writel(d[0], np->base + TxIntrCtrl);
writel(d[1], np->base + RxIntrCtrl);
printk(KERN_NOTICE "%s: tx %08x, rx %08x intr\n", dev->name,
(u32) readl(np->base + TxIntrCtrl),
(u32) readl(np->base + RxIntrCtrl));
rc = 0;
}
else {
spin_lock_irq(&np->lock);
rc = generic_mii_ioctl(&np->mii_if, data, cmd, NULL);
spin_unlock_irq(&np->lock);
}
return rc;
}
static void hamachi_remove_one(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
if (dev) {
struct hamachi_private *hmp = netdev_priv(dev);
pci_free_consistent(pdev, RX_TOTAL_SIZE, hmp->rx_ring,
hmp->rx_ring_dma);
pci_free_consistent(pdev, TX_TOTAL_SIZE, hmp->tx_ring,
hmp->tx_ring_dma);
unregister_netdev(dev);
iounmap(hmp->base);
free_netdev(dev);
pci_release_regions(pdev);
}
}
static const struct pci_device_id hamachi_pci_tbl[] = {
{ 0x1318, 0x0911, PCI_ANY_ID, PCI_ANY_ID, },
{ 0, }
};
MODULE_DEVICE_TABLE(pci, hamachi_pci_tbl);
static struct pci_driver hamachi_driver = {
.name = DRV_NAME,
.id_table = hamachi_pci_tbl,
.probe = hamachi_init_one,
.remove = hamachi_remove_one,
};
static int __init hamachi_init (void)
{
/* when a module, this is printed whether or not devices are found in probe */
#ifdef MODULE
printk(version);
#endif
return pci_register_driver(&hamachi_driver);
}
static void __exit hamachi_exit (void)
{
pci_unregister_driver(&hamachi_driver);
}
module_init(hamachi_init);
module_exit(hamachi_exit);
| gpl-2.0 |
sajal/mptcp-rpi | net/nfc/rawsock.c | 614 | 7815 | /*
* Copyright (C) 2011 Instituto Nokia de Tecnologia
*
* Authors:
* Aloisio Almeida Jr <aloisio.almeida@openbossa.org>
* Lauro Ramos Venancio <lauro.venancio@openbossa.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the
* Free Software Foundation, Inc.,
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": %s: " fmt, __func__
#include <net/tcp_states.h>
#include <linux/nfc.h>
#include <linux/export.h>
#include "nfc.h"
static void rawsock_write_queue_purge(struct sock *sk)
{
pr_debug("sk=%p\n", sk);
spin_lock_bh(&sk->sk_write_queue.lock);
__skb_queue_purge(&sk->sk_write_queue);
nfc_rawsock(sk)->tx_work_scheduled = false;
spin_unlock_bh(&sk->sk_write_queue.lock);
}
static void rawsock_report_error(struct sock *sk, int err)
{
pr_debug("sk=%p err=%d\n", sk, err);
sk->sk_shutdown = SHUTDOWN_MASK;
sk->sk_err = -err;
sk->sk_error_report(sk);
rawsock_write_queue_purge(sk);
}
static int rawsock_release(struct socket *sock)
{
struct sock *sk = sock->sk;
pr_debug("sock=%p sk=%p\n", sock, sk);
if (!sk)
return 0;
sock_orphan(sk);
sock_put(sk);
return 0;
}
static int rawsock_connect(struct socket *sock, struct sockaddr *_addr,
int len, int flags)
{
struct sock *sk = sock->sk;
struct sockaddr_nfc *addr = (struct sockaddr_nfc *)_addr;
struct nfc_dev *dev;
int rc = 0;
pr_debug("sock=%p sk=%p flags=%d\n", sock, sk, flags);
if (!addr || len < sizeof(struct sockaddr_nfc) ||
addr->sa_family != AF_NFC)
return -EINVAL;
pr_debug("addr dev_idx=%u target_idx=%u protocol=%u\n",
addr->dev_idx, addr->target_idx, addr->nfc_protocol);
lock_sock(sk);
if (sock->state == SS_CONNECTED) {
rc = -EISCONN;
goto error;
}
dev = nfc_get_device(addr->dev_idx);
if (!dev) {
rc = -ENODEV;
goto error;
}
if (addr->target_idx > dev->target_next_idx - 1 ||
addr->target_idx < dev->target_next_idx - dev->n_targets) {
rc = -EINVAL;
goto error;
}
rc = nfc_activate_target(dev, addr->target_idx, addr->nfc_protocol);
if (rc)
goto put_dev;
nfc_rawsock(sk)->dev = dev;
nfc_rawsock(sk)->target_idx = addr->target_idx;
sock->state = SS_CONNECTED;
sk->sk_state = TCP_ESTABLISHED;
sk->sk_state_change(sk);
release_sock(sk);
return 0;
put_dev:
nfc_put_device(dev);
error:
release_sock(sk);
return rc;
}
static int rawsock_add_header(struct sk_buff *skb)
{
*skb_push(skb, NFC_HEADER_SIZE) = 0;
return 0;
}
static void rawsock_data_exchange_complete(void *context, struct sk_buff *skb,
int err)
{
struct sock *sk = (struct sock *) context;
BUG_ON(in_irq());
pr_debug("sk=%p err=%d\n", sk, err);
if (err)
goto error;
err = rawsock_add_header(skb);
if (err)
goto error;
err = sock_queue_rcv_skb(sk, skb);
if (err)
goto error;
spin_lock_bh(&sk->sk_write_queue.lock);
if (!skb_queue_empty(&sk->sk_write_queue))
schedule_work(&nfc_rawsock(sk)->tx_work);
else
nfc_rawsock(sk)->tx_work_scheduled = false;
spin_unlock_bh(&sk->sk_write_queue.lock);
sock_put(sk);
return;
error:
rawsock_report_error(sk, err);
sock_put(sk);
}
static void rawsock_tx_work(struct work_struct *work)
{
struct sock *sk = to_rawsock_sk(work);
struct nfc_dev *dev = nfc_rawsock(sk)->dev;
u32 target_idx = nfc_rawsock(sk)->target_idx;
struct sk_buff *skb;
int rc;
pr_debug("sk=%p target_idx=%u\n", sk, target_idx);
if (sk->sk_shutdown & SEND_SHUTDOWN) {
rawsock_write_queue_purge(sk);
return;
}
skb = skb_dequeue(&sk->sk_write_queue);
sock_hold(sk);
rc = nfc_data_exchange(dev, target_idx, skb,
rawsock_data_exchange_complete, sk);
if (rc) {
rawsock_report_error(sk, rc);
sock_put(sk);
}
}
static int rawsock_sendmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t len)
{
struct sock *sk = sock->sk;
struct nfc_dev *dev = nfc_rawsock(sk)->dev;
struct sk_buff *skb;
int rc;
pr_debug("sock=%p sk=%p len=%zu\n", sock, sk, len);
if (msg->msg_namelen)
return -EOPNOTSUPP;
if (sock->state != SS_CONNECTED)
return -ENOTCONN;
skb = nfc_alloc_send_skb(dev, sk, msg->msg_flags, len, &rc);
if (skb == NULL)
return rc;
rc = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
if (rc < 0) {
kfree_skb(skb);
return rc;
}
spin_lock_bh(&sk->sk_write_queue.lock);
__skb_queue_tail(&sk->sk_write_queue, skb);
if (!nfc_rawsock(sk)->tx_work_scheduled) {
schedule_work(&nfc_rawsock(sk)->tx_work);
nfc_rawsock(sk)->tx_work_scheduled = true;
}
spin_unlock_bh(&sk->sk_write_queue.lock);
return len;
}
static int rawsock_recvmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t len, int flags)
{
int noblock = flags & MSG_DONTWAIT;
struct sock *sk = sock->sk;
struct sk_buff *skb;
int copied;
int rc;
pr_debug("sock=%p sk=%p len=%zu flags=%d\n", sock, sk, len, flags);
skb = skb_recv_datagram(sk, flags, noblock, &rc);
if (!skb)
return rc;
msg->msg_namelen = 0;
copied = skb->len;
if (len < copied) {
msg->msg_flags |= MSG_TRUNC;
copied = len;
}
rc = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
skb_free_datagram(sk, skb);
return rc ? : copied;
}
static const struct proto_ops rawsock_ops = {
.family = PF_NFC,
.owner = THIS_MODULE,
.release = rawsock_release,
.bind = sock_no_bind,
.connect = rawsock_connect,
.socketpair = sock_no_socketpair,
.accept = sock_no_accept,
.getname = sock_no_getname,
.poll = datagram_poll,
.ioctl = sock_no_ioctl,
.listen = sock_no_listen,
.shutdown = sock_no_shutdown,
.setsockopt = sock_no_setsockopt,
.getsockopt = sock_no_getsockopt,
.sendmsg = rawsock_sendmsg,
.recvmsg = rawsock_recvmsg,
.mmap = sock_no_mmap,
};
static void rawsock_destruct(struct sock *sk)
{
pr_debug("sk=%p\n", sk);
if (sk->sk_state == TCP_ESTABLISHED) {
nfc_deactivate_target(nfc_rawsock(sk)->dev,
nfc_rawsock(sk)->target_idx);
nfc_put_device(nfc_rawsock(sk)->dev);
}
skb_queue_purge(&sk->sk_receive_queue);
if (!sock_flag(sk, SOCK_DEAD)) {
pr_err("Freeing alive NFC raw socket %p\n", sk);
return;
}
}
static int rawsock_create(struct net *net, struct socket *sock,
const struct nfc_protocol *nfc_proto)
{
struct sock *sk;
pr_debug("sock=%p\n", sock);
if (sock->type != SOCK_SEQPACKET)
return -ESOCKTNOSUPPORT;
sock->ops = &rawsock_ops;
sk = sk_alloc(net, PF_NFC, GFP_ATOMIC, nfc_proto->proto);
if (!sk)
return -ENOMEM;
sock_init_data(sock, sk);
sk->sk_protocol = nfc_proto->id;
sk->sk_destruct = rawsock_destruct;
sock->state = SS_UNCONNECTED;
INIT_WORK(&nfc_rawsock(sk)->tx_work, rawsock_tx_work);
nfc_rawsock(sk)->tx_work_scheduled = false;
return 0;
}
static struct proto rawsock_proto = {
.name = "NFC_RAW",
.owner = THIS_MODULE,
.obj_size = sizeof(struct nfc_rawsock),
};
static const struct nfc_protocol rawsock_nfc_proto = {
.id = NFC_SOCKPROTO_RAW,
.proto = &rawsock_proto,
.owner = THIS_MODULE,
.create = rawsock_create
};
int __init rawsock_init(void)
{
int rc;
rc = nfc_proto_register(&rawsock_nfc_proto);
return rc;
}
void rawsock_exit(void)
{
nfc_proto_unregister(&rawsock_nfc_proto);
}
| gpl-2.0 |
hallovveen31/ICED_COLD_Hercules_JB_Kernel | drivers/md/dm-flakey.c | 1638 | 4953 | /*
* Copyright (C) 2003 Sistina Software (UK) Limited.
* Copyright (C) 2004, 2010 Red Hat, Inc. All rights reserved.
*
* This file is released under the GPL.
*/
#include <linux/device-mapper.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/bio.h>
#include <linux/slab.h>
#define DM_MSG_PREFIX "flakey"
/*
* Flakey: Used for testing only, simulates intermittent,
* catastrophic device failure.
*/
struct flakey_c {
struct dm_dev *dev;
unsigned long start_time;
sector_t start;
unsigned up_interval;
unsigned down_interval;
};
/*
* Construct a flakey mapping: <dev_path> <offset> <up interval> <down interval>
*/
static int flakey_ctr(struct dm_target *ti, unsigned int argc, char **argv)
{
struct flakey_c *fc;
unsigned long long tmp;
if (argc != 4) {
ti->error = "dm-flakey: Invalid argument count";
return -EINVAL;
}
fc = kmalloc(sizeof(*fc), GFP_KERNEL);
if (!fc) {
ti->error = "dm-flakey: Cannot allocate linear context";
return -ENOMEM;
}
fc->start_time = jiffies;
if (sscanf(argv[1], "%llu", &tmp) != 1) {
ti->error = "dm-flakey: Invalid device sector";
goto bad;
}
fc->start = tmp;
if (sscanf(argv[2], "%u", &fc->up_interval) != 1) {
ti->error = "dm-flakey: Invalid up interval";
goto bad;
}
if (sscanf(argv[3], "%u", &fc->down_interval) != 1) {
ti->error = "dm-flakey: Invalid down interval";
goto bad;
}
if (!(fc->up_interval + fc->down_interval)) {
ti->error = "dm-flakey: Total (up + down) interval is zero";
goto bad;
}
if (fc->up_interval + fc->down_interval < fc->up_interval) {
ti->error = "dm-flakey: Interval overflow";
goto bad;
}
if (dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &fc->dev)) {
ti->error = "dm-flakey: Device lookup failed";
goto bad;
}
ti->num_flush_requests = 1;
ti->private = fc;
return 0;
bad:
kfree(fc);
return -EINVAL;
}
static void flakey_dtr(struct dm_target *ti)
{
struct flakey_c *fc = ti->private;
dm_put_device(ti, fc->dev);
kfree(fc);
}
static sector_t flakey_map_sector(struct dm_target *ti, sector_t bi_sector)
{
struct flakey_c *fc = ti->private;
return fc->start + (bi_sector - ti->begin);
}
static void flakey_map_bio(struct dm_target *ti, struct bio *bio)
{
struct flakey_c *fc = ti->private;
bio->bi_bdev = fc->dev->bdev;
if (bio_sectors(bio))
bio->bi_sector = flakey_map_sector(ti, bio->bi_sector);
}
static int flakey_map(struct dm_target *ti, struct bio *bio,
union map_info *map_context)
{
struct flakey_c *fc = ti->private;
unsigned elapsed;
/* Are we alive ? */
elapsed = (jiffies - fc->start_time) / HZ;
if (elapsed % (fc->up_interval + fc->down_interval) >= fc->up_interval)
return -EIO;
flakey_map_bio(ti, bio);
return DM_MAPIO_REMAPPED;
}
static int flakey_status(struct dm_target *ti, status_type_t type,
char *result, unsigned int maxlen)
{
struct flakey_c *fc = ti->private;
switch (type) {
case STATUSTYPE_INFO:
result[0] = '\0';
break;
case STATUSTYPE_TABLE:
snprintf(result, maxlen, "%s %llu %u %u", fc->dev->name,
(unsigned long long)fc->start, fc->up_interval,
fc->down_interval);
break;
}
return 0;
}
static int flakey_ioctl(struct dm_target *ti, unsigned int cmd, unsigned long arg)
{
struct flakey_c *fc = ti->private;
struct dm_dev *dev = fc->dev;
int r = 0;
/*
* Only pass ioctls through if the device sizes match exactly.
*/
if (fc->start ||
ti->len != i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT)
r = scsi_verify_blk_ioctl(NULL, cmd);
return r ? : __blkdev_driver_ioctl(dev->bdev, dev->mode, cmd, arg);
}
static int flakey_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
struct bio_vec *biovec, int max_size)
{
struct flakey_c *fc = ti->private;
struct request_queue *q = bdev_get_queue(fc->dev->bdev);
if (!q->merge_bvec_fn)
return max_size;
bvm->bi_bdev = fc->dev->bdev;
bvm->bi_sector = flakey_map_sector(ti, bvm->bi_sector);
return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
}
static int flakey_iterate_devices(struct dm_target *ti, iterate_devices_callout_fn fn, void *data)
{
struct flakey_c *fc = ti->private;
return fn(ti, fc->dev, fc->start, ti->len, data);
}
static struct target_type flakey_target = {
.name = "flakey",
.version = {1, 1, 0},
.module = THIS_MODULE,
.ctr = flakey_ctr,
.dtr = flakey_dtr,
.map = flakey_map,
.status = flakey_status,
.ioctl = flakey_ioctl,
.merge = flakey_merge,
.iterate_devices = flakey_iterate_devices,
};
static int __init dm_flakey_init(void)
{
int r = dm_register_target(&flakey_target);
if (r < 0)
DMERR("register failed %d", r);
return r;
}
static void __exit dm_flakey_exit(void)
{
dm_unregister_target(&flakey_target);
}
/* Module hooks */
module_init(dm_flakey_init);
module_exit(dm_flakey_exit);
MODULE_DESCRIPTION(DM_NAME " flakey target");
MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
MODULE_LICENSE("GPL");
| gpl-2.0 |
2fast4u88/Htc-Kingdom-FastKernel | arch/powerpc/platforms/cell/cpufreq_spudemand.c | 1638 | 4449 | /*
* spu aware cpufreq governor for the cell processor
*
* © Copyright IBM Corporation 2006-2008
*
* Author: Christian Krafft <krafft@de.ibm.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/cpufreq.h>
#include <linux/sched.h>
#include <linux/timer.h>
#include <linux/workqueue.h>
#include <asm/atomic.h>
#include <asm/machdep.h>
#include <asm/spu.h>
#define POLL_TIME 100000 /* in µs */
#define EXP 753 /* exp(-1) in fixed-point */
struct spu_gov_info_struct {
unsigned long busy_spus; /* fixed-point */
struct cpufreq_policy *policy;
struct delayed_work work;
unsigned int poll_int; /* µs */
};
static DEFINE_PER_CPU(struct spu_gov_info_struct, spu_gov_info);
static struct workqueue_struct *kspugov_wq;
static int calc_freq(struct spu_gov_info_struct *info)
{
int cpu;
int busy_spus;
cpu = info->policy->cpu;
busy_spus = atomic_read(&cbe_spu_info[cpu_to_node(cpu)].busy_spus);
CALC_LOAD(info->busy_spus, EXP, busy_spus * FIXED_1);
pr_debug("cpu %d: busy_spus=%d, info->busy_spus=%ld\n",
cpu, busy_spus, info->busy_spus);
return info->policy->max * info->busy_spus / FIXED_1;
}
static void spu_gov_work(struct work_struct *work)
{
struct spu_gov_info_struct *info;
int delay;
unsigned long target_freq;
info = container_of(work, struct spu_gov_info_struct, work.work);
/* after cancel_delayed_work_sync we unset info->policy */
BUG_ON(info->policy == NULL);
target_freq = calc_freq(info);
__cpufreq_driver_target(info->policy, target_freq, CPUFREQ_RELATION_H);
delay = usecs_to_jiffies(info->poll_int);
queue_delayed_work_on(info->policy->cpu, kspugov_wq, &info->work, delay);
}
static void spu_gov_init_work(struct spu_gov_info_struct *info)
{
int delay = usecs_to_jiffies(info->poll_int);
INIT_DELAYED_WORK_DEFERRABLE(&info->work, spu_gov_work);
queue_delayed_work_on(info->policy->cpu, kspugov_wq, &info->work, delay);
}
static void spu_gov_cancel_work(struct spu_gov_info_struct *info)
{
cancel_delayed_work_sync(&info->work);
}
static int spu_gov_govern(struct cpufreq_policy *policy, unsigned int event)
{
unsigned int cpu = policy->cpu;
struct spu_gov_info_struct *info, *affected_info;
int i;
int ret = 0;
info = &per_cpu(spu_gov_info, cpu);
switch (event) {
case CPUFREQ_GOV_START:
if (!cpu_online(cpu)) {
printk(KERN_ERR "cpu %d is not online\n", cpu);
ret = -EINVAL;
break;
}
if (!policy->cur) {
printk(KERN_ERR "no cpu specified in policy\n");
ret = -EINVAL;
break;
}
/* initialize spu_gov_info for all affected cpus */
for_each_cpu(i, policy->cpus) {
affected_info = &per_cpu(spu_gov_info, i);
affected_info->policy = policy;
}
info->poll_int = POLL_TIME;
/* setup timer */
spu_gov_init_work(info);
break;
case CPUFREQ_GOV_STOP:
/* cancel timer */
spu_gov_cancel_work(info);
/* clean spu_gov_info for all affected cpus */
for_each_cpu (i, policy->cpus) {
info = &per_cpu(spu_gov_info, i);
info->policy = NULL;
}
break;
}
return ret;
}
static struct cpufreq_governor spu_governor = {
.name = "spudemand",
.governor = spu_gov_govern,
.owner = THIS_MODULE,
};
/*
* module init and destoy
*/
static int __init spu_gov_init(void)
{
int ret;
kspugov_wq = create_workqueue("kspugov");
if (!kspugov_wq) {
printk(KERN_ERR "creation of kspugov failed\n");
ret = -EFAULT;
goto out;
}
ret = cpufreq_register_governor(&spu_governor);
if (ret) {
printk(KERN_ERR "registration of governor failed\n");
destroy_workqueue(kspugov_wq);
goto out;
}
out:
return ret;
}
static void __exit spu_gov_exit(void)
{
cpufreq_unregister_governor(&spu_governor);
destroy_workqueue(kspugov_wq);
}
module_init(spu_gov_init);
module_exit(spu_gov_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Christian Krafft <krafft@de.ibm.com>");
| gpl-2.0 |
bigzz/big-LITTLE-MP | drivers/hid/hid-roccat-kone.c | 2150 | 24461 | /*
* Roccat Kone driver for Linux
*
* Copyright (c) 2010 Stefan Achatz <erazor_de@users.sourceforge.net>
*/
/*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*/
/*
* Roccat Kone is a gamer mouse which consists of a mouse part and a keyboard
* part. The keyboard part enables the mouse to execute stored macros with mixed
* key- and button-events.
*
* TODO implement on-the-fly polling-rate change
* The windows driver has the ability to change the polling rate of the
* device on the press of a mousebutton.
* Is it possible to remove and reinstall the urb in raw-event- or any
* other handler, or to defer this action to be executed somewhere else?
*
* TODO is it possible to overwrite group for sysfs attributes via udev?
*/
#include <linux/device.h>
#include <linux/input.h>
#include <linux/hid.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/hid-roccat.h>
#include "hid-ids.h"
#include "hid-roccat-common.h"
#include "hid-roccat-kone.h"
static uint profile_numbers[5] = {0, 1, 2, 3, 4};
static void kone_profile_activated(struct kone_device *kone, uint new_profile)
{
kone->actual_profile = new_profile;
kone->actual_dpi = kone->profiles[new_profile - 1].startup_dpi;
}
static void kone_profile_report(struct kone_device *kone, uint new_profile)
{
struct kone_roccat_report roccat_report;
roccat_report.event = kone_mouse_event_switch_profile;
roccat_report.value = new_profile;
roccat_report.key = 0;
roccat_report_event(kone->chrdev_minor, (uint8_t *)&roccat_report);
}
static int kone_receive(struct usb_device *usb_dev, uint usb_command,
void *data, uint size)
{
char *buf;
int len;
buf = kmalloc(size, GFP_KERNEL);
if (buf == NULL)
return -ENOMEM;
len = usb_control_msg(usb_dev, usb_rcvctrlpipe(usb_dev, 0),
HID_REQ_GET_REPORT,
USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN,
usb_command, 0, buf, size, USB_CTRL_SET_TIMEOUT);
memcpy(data, buf, size);
kfree(buf);
return ((len < 0) ? len : ((len != size) ? -EIO : 0));
}
static int kone_send(struct usb_device *usb_dev, uint usb_command,
void const *data, uint size)
{
char *buf;
int len;
buf = kmemdup(data, size, GFP_KERNEL);
if (buf == NULL)
return -ENOMEM;
len = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0),
HID_REQ_SET_REPORT,
USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_OUT,
usb_command, 0, buf, size, USB_CTRL_SET_TIMEOUT);
kfree(buf);
return ((len < 0) ? len : ((len != size) ? -EIO : 0));
}
/* kone_class is used for creating sysfs attributes via roccat char device */
static struct class *kone_class;
static void kone_set_settings_checksum(struct kone_settings *settings)
{
uint16_t checksum = 0;
unsigned char *address = (unsigned char *)settings;
int i;
for (i = 0; i < sizeof(struct kone_settings) - 2; ++i, ++address)
checksum += *address;
settings->checksum = cpu_to_le16(checksum);
}
/*
* Checks success after writing data to mouse
* On success returns 0
* On failure returns errno
*/
static int kone_check_write(struct usb_device *usb_dev)
{
int retval;
uint8_t data;
do {
/*
* Mouse needs 50 msecs until it says ok, but there are
* 30 more msecs needed for next write to work.
*/
msleep(80);
retval = kone_receive(usb_dev,
kone_command_confirm_write, &data, 1);
if (retval)
return retval;
/*
* value of 3 seems to mean something like
* "not finished yet, but it looks good"
* So check again after a moment.
*/
} while (data == 3);
if (data == 1) /* everything alright */
return 0;
/* unknown answer */
dev_err(&usb_dev->dev, "got retval %d when checking write\n", data);
return -EIO;
}
/*
* Reads settings from mouse and stores it in @buf
* On success returns 0
* On failure returns errno
*/
static int kone_get_settings(struct usb_device *usb_dev,
struct kone_settings *buf)
{
return kone_receive(usb_dev, kone_command_settings, buf,
sizeof(struct kone_settings));
}
/*
* Writes settings from @buf to mouse
* On success returns 0
* On failure returns errno
*/
static int kone_set_settings(struct usb_device *usb_dev,
struct kone_settings const *settings)
{
int retval;
retval = kone_send(usb_dev, kone_command_settings,
settings, sizeof(struct kone_settings));
if (retval)
return retval;
return kone_check_write(usb_dev);
}
/*
* Reads profile data from mouse and stores it in @buf
* @number: profile number to read
* On success returns 0
* On failure returns errno
*/
static int kone_get_profile(struct usb_device *usb_dev,
struct kone_profile *buf, int number)
{
int len;
if (number < 1 || number > 5)
return -EINVAL;
len = usb_control_msg(usb_dev, usb_rcvctrlpipe(usb_dev, 0),
USB_REQ_CLEAR_FEATURE,
USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN,
kone_command_profile, number, buf,
sizeof(struct kone_profile), USB_CTRL_SET_TIMEOUT);
if (len != sizeof(struct kone_profile))
return -EIO;
return 0;
}
/*
* Writes profile data to mouse.
* @number: profile number to write
* On success returns 0
* On failure returns errno
*/
static int kone_set_profile(struct usb_device *usb_dev,
struct kone_profile const *profile, int number)
{
int len;
if (number < 1 || number > 5)
return -EINVAL;
len = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0),
USB_REQ_SET_CONFIGURATION,
USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_OUT,
kone_command_profile, number, (void *)profile,
sizeof(struct kone_profile),
USB_CTRL_SET_TIMEOUT);
if (len != sizeof(struct kone_profile))
return len;
if (kone_check_write(usb_dev))
return -EIO;
return 0;
}
/*
* Reads value of "fast-clip-weight" and stores it in @result
* On success returns 0
* On failure returns errno
*/
static int kone_get_weight(struct usb_device *usb_dev, int *result)
{
int retval;
uint8_t data;
retval = kone_receive(usb_dev, kone_command_weight, &data, 1);
if (retval)
return retval;
*result = (int)data;
return 0;
}
/*
* Reads firmware_version of mouse and stores it in @result
* On success returns 0
* On failure returns errno
*/
static int kone_get_firmware_version(struct usb_device *usb_dev, int *result)
{
int retval;
uint16_t data;
retval = kone_receive(usb_dev, kone_command_firmware_version,
&data, 2);
if (retval)
return retval;
*result = le16_to_cpu(data);
return 0;
}
static ssize_t kone_sysfs_read_settings(struct file *fp, struct kobject *kobj,
struct bin_attribute *attr, char *buf,
loff_t off, size_t count) {
struct device *dev =
container_of(kobj, struct device, kobj)->parent->parent;
struct kone_device *kone = hid_get_drvdata(dev_get_drvdata(dev));
if (off >= sizeof(struct kone_settings))
return 0;
if (off + count > sizeof(struct kone_settings))
count = sizeof(struct kone_settings) - off;
mutex_lock(&kone->kone_lock);
memcpy(buf, ((char const *)&kone->settings) + off, count);
mutex_unlock(&kone->kone_lock);
return count;
}
/*
* Writing settings automatically activates startup_profile.
* This function keeps values in kone_device up to date and assumes that in
* case of error the old data is still valid
*/
static ssize_t kone_sysfs_write_settings(struct file *fp, struct kobject *kobj,
struct bin_attribute *attr, char *buf,
loff_t off, size_t count) {
struct device *dev =
container_of(kobj, struct device, kobj)->parent->parent;
struct kone_device *kone = hid_get_drvdata(dev_get_drvdata(dev));
struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
int retval = 0, difference, old_profile;
/* I need to get my data in one piece */
if (off != 0 || count != sizeof(struct kone_settings))
return -EINVAL;
mutex_lock(&kone->kone_lock);
difference = memcmp(buf, &kone->settings, sizeof(struct kone_settings));
if (difference) {
retval = kone_set_settings(usb_dev,
(struct kone_settings const *)buf);
if (retval) {
mutex_unlock(&kone->kone_lock);
return retval;
}
old_profile = kone->settings.startup_profile;
memcpy(&kone->settings, buf, sizeof(struct kone_settings));
kone_profile_activated(kone, kone->settings.startup_profile);
if (kone->settings.startup_profile != old_profile)
kone_profile_report(kone, kone->settings.startup_profile);
}
mutex_unlock(&kone->kone_lock);
return sizeof(struct kone_settings);
}
static ssize_t kone_sysfs_read_profilex(struct file *fp,
struct kobject *kobj, struct bin_attribute *attr,
char *buf, loff_t off, size_t count) {
struct device *dev =
container_of(kobj, struct device, kobj)->parent->parent;
struct kone_device *kone = hid_get_drvdata(dev_get_drvdata(dev));
if (off >= sizeof(struct kone_profile))
return 0;
if (off + count > sizeof(struct kone_profile))
count = sizeof(struct kone_profile) - off;
mutex_lock(&kone->kone_lock);
memcpy(buf, ((char const *)&kone->profiles[*(uint *)(attr->private)]) + off, count);
mutex_unlock(&kone->kone_lock);
return count;
}
/* Writes data only if different to stored data */
static ssize_t kone_sysfs_write_profilex(struct file *fp,
struct kobject *kobj, struct bin_attribute *attr,
char *buf, loff_t off, size_t count) {
struct device *dev =
container_of(kobj, struct device, kobj)->parent->parent;
struct kone_device *kone = hid_get_drvdata(dev_get_drvdata(dev));
struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
struct kone_profile *profile;
int retval = 0, difference;
/* I need to get my data in one piece */
if (off != 0 || count != sizeof(struct kone_profile))
return -EINVAL;
profile = &kone->profiles[*(uint *)(attr->private)];
mutex_lock(&kone->kone_lock);
difference = memcmp(buf, profile, sizeof(struct kone_profile));
if (difference) {
retval = kone_set_profile(usb_dev,
(struct kone_profile const *)buf,
*(uint *)(attr->private) + 1);
if (!retval)
memcpy(profile, buf, sizeof(struct kone_profile));
}
mutex_unlock(&kone->kone_lock);
if (retval)
return retval;
return sizeof(struct kone_profile);
}
static ssize_t kone_sysfs_show_actual_profile(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct kone_device *kone =
hid_get_drvdata(dev_get_drvdata(dev->parent->parent));
return snprintf(buf, PAGE_SIZE, "%d\n", kone->actual_profile);
}
static ssize_t kone_sysfs_show_actual_dpi(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct kone_device *kone =
hid_get_drvdata(dev_get_drvdata(dev->parent->parent));
return snprintf(buf, PAGE_SIZE, "%d\n", kone->actual_dpi);
}
/* weight is read each time, since we don't get informed when it's changed */
static ssize_t kone_sysfs_show_weight(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct kone_device *kone;
struct usb_device *usb_dev;
int weight = 0;
int retval;
dev = dev->parent->parent;
kone = hid_get_drvdata(dev_get_drvdata(dev));
usb_dev = interface_to_usbdev(to_usb_interface(dev));
mutex_lock(&kone->kone_lock);
retval = kone_get_weight(usb_dev, &weight);
mutex_unlock(&kone->kone_lock);
if (retval)
return retval;
return snprintf(buf, PAGE_SIZE, "%d\n", weight);
}
static ssize_t kone_sysfs_show_firmware_version(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct kone_device *kone =
hid_get_drvdata(dev_get_drvdata(dev->parent->parent));
return snprintf(buf, PAGE_SIZE, "%d\n", kone->firmware_version);
}
static ssize_t kone_sysfs_show_tcu(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct kone_device *kone =
hid_get_drvdata(dev_get_drvdata(dev->parent->parent));
return snprintf(buf, PAGE_SIZE, "%d\n", kone->settings.tcu);
}
static int kone_tcu_command(struct usb_device *usb_dev, int number)
{
unsigned char value;
value = number;
return kone_send(usb_dev, kone_command_calibrate, &value, 1);
}
/*
* Calibrating the tcu is the only action that changes settings data inside the
* mouse, so this data needs to be reread
*/
static ssize_t kone_sysfs_set_tcu(struct device *dev,
struct device_attribute *attr, char const *buf, size_t size)
{
struct kone_device *kone;
struct usb_device *usb_dev;
int retval;
unsigned long state;
dev = dev->parent->parent;
kone = hid_get_drvdata(dev_get_drvdata(dev));
usb_dev = interface_to_usbdev(to_usb_interface(dev));
retval = strict_strtoul(buf, 10, &state);
if (retval)
return retval;
if (state != 0 && state != 1)
return -EINVAL;
mutex_lock(&kone->kone_lock);
if (state == 1) { /* state activate */
retval = kone_tcu_command(usb_dev, 1);
if (retval)
goto exit_unlock;
retval = kone_tcu_command(usb_dev, 2);
if (retval)
goto exit_unlock;
ssleep(5); /* tcu needs this time for calibration */
retval = kone_tcu_command(usb_dev, 3);
if (retval)
goto exit_unlock;
retval = kone_tcu_command(usb_dev, 0);
if (retval)
goto exit_unlock;
retval = kone_tcu_command(usb_dev, 4);
if (retval)
goto exit_unlock;
/*
* Kone needs this time to settle things.
* Reading settings too early will result in invalid data.
* Roccat's driver waits 1 sec, maybe this time could be
* shortened.
*/
ssleep(1);
}
/* calibration changes values in settings, so reread */
retval = kone_get_settings(usb_dev, &kone->settings);
if (retval)
goto exit_no_settings;
/* only write settings back if activation state is different */
if (kone->settings.tcu != state) {
kone->settings.tcu = state;
kone_set_settings_checksum(&kone->settings);
retval = kone_set_settings(usb_dev, &kone->settings);
if (retval) {
dev_err(&usb_dev->dev, "couldn't set tcu state\n");
/*
* try to reread valid settings into buffer overwriting
* first error code
*/
retval = kone_get_settings(usb_dev, &kone->settings);
if (retval)
goto exit_no_settings;
goto exit_unlock;
}
/* calibration resets profile */
kone_profile_activated(kone, kone->settings.startup_profile);
}
retval = size;
exit_no_settings:
dev_err(&usb_dev->dev, "couldn't read settings\n");
exit_unlock:
mutex_unlock(&kone->kone_lock);
return retval;
}
static ssize_t kone_sysfs_show_startup_profile(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct kone_device *kone =
hid_get_drvdata(dev_get_drvdata(dev->parent->parent));
return snprintf(buf, PAGE_SIZE, "%d\n", kone->settings.startup_profile);
}
static ssize_t kone_sysfs_set_startup_profile(struct device *dev,
struct device_attribute *attr, char const *buf, size_t size)
{
struct kone_device *kone;
struct usb_device *usb_dev;
int retval;
unsigned long new_startup_profile;
dev = dev->parent->parent;
kone = hid_get_drvdata(dev_get_drvdata(dev));
usb_dev = interface_to_usbdev(to_usb_interface(dev));
retval = strict_strtoul(buf, 10, &new_startup_profile);
if (retval)
return retval;
if (new_startup_profile < 1 || new_startup_profile > 5)
return -EINVAL;
mutex_lock(&kone->kone_lock);
kone->settings.startup_profile = new_startup_profile;
kone_set_settings_checksum(&kone->settings);
retval = kone_set_settings(usb_dev, &kone->settings);
if (retval) {
mutex_unlock(&kone->kone_lock);
return retval;
}
/* changing the startup profile immediately activates this profile */
kone_profile_activated(kone, new_startup_profile);
kone_profile_report(kone, new_startup_profile);
mutex_unlock(&kone->kone_lock);
return size;
}
static struct device_attribute kone_attributes[] = {
/*
* Read actual dpi settings.
* Returns raw value for further processing. Refer to enum
* kone_polling_rates to get real value.
*/
__ATTR(actual_dpi, 0440, kone_sysfs_show_actual_dpi, NULL),
__ATTR(actual_profile, 0440, kone_sysfs_show_actual_profile, NULL),
/*
* The mouse can be equipped with one of four supplied weights from 5
* to 20 grams which are recognized and its value can be read out.
* This returns the raw value reported by the mouse for easy evaluation
* by software. Refer to enum kone_weights to get corresponding real
* weight.
*/
__ATTR(weight, 0440, kone_sysfs_show_weight, NULL),
/*
* Prints firmware version stored in mouse as integer.
* The raw value reported by the mouse is returned for easy evaluation,
* to get the real version number the decimal point has to be shifted 2
* positions to the left. E.g. a value of 138 means 1.38.
*/
__ATTR(firmware_version, 0440,
kone_sysfs_show_firmware_version, NULL),
/*
* Prints state of Tracking Control Unit as number where 0 = off and
* 1 = on. Writing 0 deactivates tcu and writing 1 calibrates and
* activates the tcu
*/
__ATTR(tcu, 0660, kone_sysfs_show_tcu, kone_sysfs_set_tcu),
/* Prints and takes the number of the profile the mouse starts with */
__ATTR(startup_profile, 0660,
kone_sysfs_show_startup_profile,
kone_sysfs_set_startup_profile),
__ATTR_NULL
};
static struct bin_attribute kone_bin_attributes[] = {
{
.attr = { .name = "settings", .mode = 0660 },
.size = sizeof(struct kone_settings),
.read = kone_sysfs_read_settings,
.write = kone_sysfs_write_settings
},
{
.attr = { .name = "profile1", .mode = 0660 },
.size = sizeof(struct kone_profile),
.read = kone_sysfs_read_profilex,
.write = kone_sysfs_write_profilex,
.private = &profile_numbers[0]
},
{
.attr = { .name = "profile2", .mode = 0660 },
.size = sizeof(struct kone_profile),
.read = kone_sysfs_read_profilex,
.write = kone_sysfs_write_profilex,
.private = &profile_numbers[1]
},
{
.attr = { .name = "profile3", .mode = 0660 },
.size = sizeof(struct kone_profile),
.read = kone_sysfs_read_profilex,
.write = kone_sysfs_write_profilex,
.private = &profile_numbers[2]
},
{
.attr = { .name = "profile4", .mode = 0660 },
.size = sizeof(struct kone_profile),
.read = kone_sysfs_read_profilex,
.write = kone_sysfs_write_profilex,
.private = &profile_numbers[3]
},
{
.attr = { .name = "profile5", .mode = 0660 },
.size = sizeof(struct kone_profile),
.read = kone_sysfs_read_profilex,
.write = kone_sysfs_write_profilex,
.private = &profile_numbers[4]
},
__ATTR_NULL
};
static int kone_init_kone_device_struct(struct usb_device *usb_dev,
struct kone_device *kone)
{
uint i;
int retval;
mutex_init(&kone->kone_lock);
for (i = 0; i < 5; ++i) {
retval = kone_get_profile(usb_dev, &kone->profiles[i], i + 1);
if (retval)
return retval;
}
retval = kone_get_settings(usb_dev, &kone->settings);
if (retval)
return retval;
retval = kone_get_firmware_version(usb_dev, &kone->firmware_version);
if (retval)
return retval;
kone_profile_activated(kone, kone->settings.startup_profile);
return 0;
}
/*
* Since IGNORE_MOUSE quirk moved to hid-apple, there is no way to bind only to
* mousepart if usb_hid is compiled into the kernel and kone is compiled as
* module.
* Secial behaviour is bound only to mousepart since only mouseevents contain
* additional notifications.
*/
static int kone_init_specials(struct hid_device *hdev)
{
struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
struct usb_device *usb_dev = interface_to_usbdev(intf);
struct kone_device *kone;
int retval;
if (intf->cur_altsetting->desc.bInterfaceProtocol
== USB_INTERFACE_PROTOCOL_MOUSE) {
kone = kzalloc(sizeof(*kone), GFP_KERNEL);
if (!kone) {
hid_err(hdev, "can't alloc device descriptor\n");
return -ENOMEM;
}
hid_set_drvdata(hdev, kone);
retval = kone_init_kone_device_struct(usb_dev, kone);
if (retval) {
hid_err(hdev, "couldn't init struct kone_device\n");
goto exit_free;
}
retval = roccat_connect(kone_class, hdev,
sizeof(struct kone_roccat_report));
if (retval < 0) {
hid_err(hdev, "couldn't init char dev\n");
/* be tolerant about not getting chrdev */
} else {
kone->roccat_claimed = 1;
kone->chrdev_minor = retval;
}
} else {
hid_set_drvdata(hdev, NULL);
}
return 0;
exit_free:
kfree(kone);
return retval;
}
static void kone_remove_specials(struct hid_device *hdev)
{
struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
struct kone_device *kone;
if (intf->cur_altsetting->desc.bInterfaceProtocol
== USB_INTERFACE_PROTOCOL_MOUSE) {
kone = hid_get_drvdata(hdev);
if (kone->roccat_claimed)
roccat_disconnect(kone->chrdev_minor);
kfree(hid_get_drvdata(hdev));
}
}
static int kone_probe(struct hid_device *hdev, const struct hid_device_id *id)
{
int retval;
retval = hid_parse(hdev);
if (retval) {
hid_err(hdev, "parse failed\n");
goto exit;
}
retval = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
if (retval) {
hid_err(hdev, "hw start failed\n");
goto exit;
}
retval = kone_init_specials(hdev);
if (retval) {
hid_err(hdev, "couldn't install mouse\n");
goto exit_stop;
}
return 0;
exit_stop:
hid_hw_stop(hdev);
exit:
return retval;
}
static void kone_remove(struct hid_device *hdev)
{
kone_remove_specials(hdev);
hid_hw_stop(hdev);
}
/* handle special events and keep actual profile and dpi values up to date */
static void kone_keep_values_up_to_date(struct kone_device *kone,
struct kone_mouse_event const *event)
{
switch (event->event) {
case kone_mouse_event_switch_profile:
kone->actual_dpi = kone->profiles[event->value - 1].
startup_dpi;
case kone_mouse_event_osd_profile:
kone->actual_profile = event->value;
break;
case kone_mouse_event_switch_dpi:
case kone_mouse_event_osd_dpi:
kone->actual_dpi = event->value;
break;
}
}
static void kone_report_to_chrdev(struct kone_device const *kone,
struct kone_mouse_event const *event)
{
struct kone_roccat_report roccat_report;
switch (event->event) {
case kone_mouse_event_switch_profile:
case kone_mouse_event_switch_dpi:
case kone_mouse_event_osd_profile:
case kone_mouse_event_osd_dpi:
roccat_report.event = event->event;
roccat_report.value = event->value;
roccat_report.key = 0;
roccat_report_event(kone->chrdev_minor,
(uint8_t *)&roccat_report);
break;
case kone_mouse_event_call_overlong_macro:
case kone_mouse_event_multimedia:
if (event->value == kone_keystroke_action_press) {
roccat_report.event = event->event;
roccat_report.value = kone->actual_profile;
roccat_report.key = event->macro_key;
roccat_report_event(kone->chrdev_minor,
(uint8_t *)&roccat_report);
}
break;
}
}
/*
* Is called for keyboard- and mousepart.
* Only mousepart gets informations about special events in its extended event
* structure.
*/
static int kone_raw_event(struct hid_device *hdev, struct hid_report *report,
u8 *data, int size)
{
struct kone_device *kone = hid_get_drvdata(hdev);
struct kone_mouse_event *event = (struct kone_mouse_event *)data;
/* keyboard events are always processed by default handler */
if (size != sizeof(struct kone_mouse_event))
return 0;
if (kone == NULL)
return 0;
/*
* Firmware 1.38 introduced new behaviour for tilt and special buttons.
* Pressed button is reported in each movement event.
* Workaround sends only one event per press.
*/
if (memcmp(&kone->last_mouse_event.tilt, &event->tilt, 5))
memcpy(&kone->last_mouse_event, event,
sizeof(struct kone_mouse_event));
else
memset(&event->tilt, 0, 5);
kone_keep_values_up_to_date(kone, event);
if (kone->roccat_claimed)
kone_report_to_chrdev(kone, event);
return 0; /* always do further processing */
}
static const struct hid_device_id kone_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONE) },
{ }
};
MODULE_DEVICE_TABLE(hid, kone_devices);
static struct hid_driver kone_driver = {
.name = "kone",
.id_table = kone_devices,
.probe = kone_probe,
.remove = kone_remove,
.raw_event = kone_raw_event
};
static int __init kone_init(void)
{
int retval;
/* class name has to be same as driver name */
kone_class = class_create(THIS_MODULE, "kone");
if (IS_ERR(kone_class))
return PTR_ERR(kone_class);
kone_class->dev_attrs = kone_attributes;
kone_class->dev_bin_attrs = kone_bin_attributes;
retval = hid_register_driver(&kone_driver);
if (retval)
class_destroy(kone_class);
return retval;
}
static void __exit kone_exit(void)
{
hid_unregister_driver(&kone_driver);
class_destroy(kone_class);
}
module_init(kone_init);
module_exit(kone_exit);
MODULE_AUTHOR("Stefan Achatz");
MODULE_DESCRIPTION("USB Roccat Kone driver");
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
sohkis/android_kernel_motorola_shamu | drivers/pcmcia/electra_cf.c | 2406 | 8746 | /*
* Copyright (C) 2007 PA Semi, Inc
*
* Maintained by: Olof Johansson <olof@lixom.net>
*
* Based on drivers/pcmcia/omap_cf.c
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/platform_device.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/mm.h>
#include <linux/vmalloc.h>
#include <linux/of_platform.h>
#include <linux/slab.h>
#include <pcmcia/ss.h>
static const char driver_name[] = "electra-cf";
struct electra_cf_socket {
struct pcmcia_socket socket;
struct timer_list timer;
unsigned present:1;
unsigned active:1;
struct platform_device *ofdev;
unsigned long mem_phys;
void __iomem * mem_base;
unsigned long mem_size;
void __iomem * io_virt;
unsigned int io_base;
unsigned int io_size;
u_int irq;
struct resource iomem;
void __iomem * gpio_base;
int gpio_detect;
int gpio_vsense;
int gpio_3v;
int gpio_5v;
};
#define POLL_INTERVAL (2 * HZ)
static int electra_cf_present(struct electra_cf_socket *cf)
{
unsigned int gpio;
gpio = in_le32(cf->gpio_base+0x40);
return !(gpio & (1 << cf->gpio_detect));
}
static int electra_cf_ss_init(struct pcmcia_socket *s)
{
return 0;
}
/* the timer is primarily to kick this socket's pccardd */
static void electra_cf_timer(unsigned long _cf)
{
struct electra_cf_socket *cf = (void *) _cf;
int present = electra_cf_present(cf);
if (present != cf->present) {
cf->present = present;
pcmcia_parse_events(&cf->socket, SS_DETECT);
}
if (cf->active)
mod_timer(&cf->timer, jiffies + POLL_INTERVAL);
}
static irqreturn_t electra_cf_irq(int irq, void *_cf)
{
electra_cf_timer((unsigned long)_cf);
return IRQ_HANDLED;
}
static int electra_cf_get_status(struct pcmcia_socket *s, u_int *sp)
{
struct electra_cf_socket *cf;
if (!sp)
return -EINVAL;
cf = container_of(s, struct electra_cf_socket, socket);
/* NOTE CF is always 3VCARD */
if (electra_cf_present(cf)) {
*sp = SS_READY | SS_DETECT | SS_POWERON | SS_3VCARD;
s->pci_irq = cf->irq;
} else
*sp = 0;
return 0;
}
static int electra_cf_set_socket(struct pcmcia_socket *sock,
struct socket_state_t *s)
{
unsigned int gpio;
unsigned int vcc;
struct electra_cf_socket *cf;
cf = container_of(sock, struct electra_cf_socket, socket);
/* "reset" means no power in our case */
vcc = (s->flags & SS_RESET) ? 0 : s->Vcc;
switch (vcc) {
case 0:
gpio = 0;
break;
case 33:
gpio = (1 << cf->gpio_3v);
break;
case 5:
gpio = (1 << cf->gpio_5v);
break;
default:
return -EINVAL;
}
gpio |= 1 << (cf->gpio_3v + 16); /* enwr */
gpio |= 1 << (cf->gpio_5v + 16); /* enwr */
out_le32(cf->gpio_base+0x90, gpio);
pr_debug("%s: Vcc %d, io_irq %d, flags %04x csc %04x\n",
driver_name, s->Vcc, s->io_irq, s->flags, s->csc_mask);
return 0;
}
static int electra_cf_set_io_map(struct pcmcia_socket *s,
struct pccard_io_map *io)
{
return 0;
}
static int electra_cf_set_mem_map(struct pcmcia_socket *s,
struct pccard_mem_map *map)
{
struct electra_cf_socket *cf;
if (map->card_start)
return -EINVAL;
cf = container_of(s, struct electra_cf_socket, socket);
map->static_start = cf->mem_phys;
map->flags &= MAP_ACTIVE|MAP_ATTRIB;
if (!(map->flags & MAP_ATTRIB))
map->static_start += 0x800;
return 0;
}
static struct pccard_operations electra_cf_ops = {
.init = electra_cf_ss_init,
.get_status = electra_cf_get_status,
.set_socket = electra_cf_set_socket,
.set_io_map = electra_cf_set_io_map,
.set_mem_map = electra_cf_set_mem_map,
};
static int electra_cf_probe(struct platform_device *ofdev)
{
struct device *device = &ofdev->dev;
struct device_node *np = ofdev->dev.of_node;
struct electra_cf_socket *cf;
struct resource mem, io;
int status;
const unsigned int *prop;
int err;
struct vm_struct *area;
err = of_address_to_resource(np, 0, &mem);
if (err)
return -EINVAL;
err = of_address_to_resource(np, 1, &io);
if (err)
return -EINVAL;
cf = kzalloc(sizeof *cf, GFP_KERNEL);
if (!cf)
return -ENOMEM;
setup_timer(&cf->timer, electra_cf_timer, (unsigned long)cf);
cf->irq = NO_IRQ;
cf->ofdev = ofdev;
cf->mem_phys = mem.start;
cf->mem_size = PAGE_ALIGN(resource_size(&mem));
cf->mem_base = ioremap(cf->mem_phys, cf->mem_size);
cf->io_size = PAGE_ALIGN(resource_size(&io));
area = __get_vm_area(cf->io_size, 0, PHB_IO_BASE, PHB_IO_END);
if (area == NULL)
return -ENOMEM;
cf->io_virt = (void __iomem *)(area->addr);
cf->gpio_base = ioremap(0xfc103000, 0x1000);
dev_set_drvdata(device, cf);
if (!cf->mem_base || !cf->io_virt || !cf->gpio_base ||
(__ioremap_at(io.start, cf->io_virt, cf->io_size,
_PAGE_NO_CACHE | _PAGE_GUARDED) == NULL)) {
dev_err(device, "can't ioremap ranges\n");
status = -ENOMEM;
goto fail1;
}
cf->io_base = (unsigned long)cf->io_virt - VMALLOC_END;
cf->iomem.start = (unsigned long)cf->mem_base;
cf->iomem.end = (unsigned long)cf->mem_base + (mem.end - mem.start);
cf->iomem.flags = IORESOURCE_MEM;
cf->irq = irq_of_parse_and_map(np, 0);
status = request_irq(cf->irq, electra_cf_irq, IRQF_SHARED,
driver_name, cf);
if (status < 0) {
dev_err(device, "request_irq failed\n");
goto fail1;
}
cf->socket.pci_irq = cf->irq;
prop = of_get_property(np, "card-detect-gpio", NULL);
if (!prop)
goto fail1;
cf->gpio_detect = *prop;
prop = of_get_property(np, "card-vsense-gpio", NULL);
if (!prop)
goto fail1;
cf->gpio_vsense = *prop;
prop = of_get_property(np, "card-3v-gpio", NULL);
if (!prop)
goto fail1;
cf->gpio_3v = *prop;
prop = of_get_property(np, "card-5v-gpio", NULL);
if (!prop)
goto fail1;
cf->gpio_5v = *prop;
cf->socket.io_offset = cf->io_base;
/* reserve chip-select regions */
if (!request_mem_region(cf->mem_phys, cf->mem_size, driver_name)) {
status = -ENXIO;
dev_err(device, "Can't claim memory region\n");
goto fail1;
}
if (!request_region(cf->io_base, cf->io_size, driver_name)) {
status = -ENXIO;
dev_err(device, "Can't claim I/O region\n");
goto fail2;
}
cf->socket.owner = THIS_MODULE;
cf->socket.dev.parent = &ofdev->dev;
cf->socket.ops = &electra_cf_ops;
cf->socket.resource_ops = &pccard_static_ops;
cf->socket.features = SS_CAP_PCCARD | SS_CAP_STATIC_MAP |
SS_CAP_MEM_ALIGN;
cf->socket.map_size = 0x800;
status = pcmcia_register_socket(&cf->socket);
if (status < 0) {
dev_err(device, "pcmcia_register_socket failed\n");
goto fail3;
}
dev_info(device, "at mem 0x%lx io 0x%llx irq %d\n",
cf->mem_phys, io.start, cf->irq);
cf->active = 1;
electra_cf_timer((unsigned long)cf);
return 0;
fail3:
release_region(cf->io_base, cf->io_size);
fail2:
release_mem_region(cf->mem_phys, cf->mem_size);
fail1:
if (cf->irq != NO_IRQ)
free_irq(cf->irq, cf);
if (cf->io_virt)
__iounmap_at(cf->io_virt, cf->io_size);
if (cf->mem_base)
iounmap(cf->mem_base);
if (cf->gpio_base)
iounmap(cf->gpio_base);
device_init_wakeup(&ofdev->dev, 0);
kfree(cf);
return status;
}
static int electra_cf_remove(struct platform_device *ofdev)
{
struct device *device = &ofdev->dev;
struct electra_cf_socket *cf;
cf = dev_get_drvdata(device);
cf->active = 0;
pcmcia_unregister_socket(&cf->socket);
free_irq(cf->irq, cf);
del_timer_sync(&cf->timer);
__iounmap_at(cf->io_virt, cf->io_size);
iounmap(cf->mem_base);
iounmap(cf->gpio_base);
release_mem_region(cf->mem_phys, cf->mem_size);
release_region(cf->io_base, cf->io_size);
kfree(cf);
return 0;
}
static const struct of_device_id electra_cf_match[] = {
{
.compatible = "electra-cf",
},
{},
};
MODULE_DEVICE_TABLE(of, electra_cf_match);
static struct platform_driver electra_cf_driver = {
.driver = {
.name = (char *)driver_name,
.owner = THIS_MODULE,
.of_match_table = electra_cf_match,
},
.probe = electra_cf_probe,
.remove = electra_cf_remove,
};
module_platform_driver(electra_cf_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR ("Olof Johansson <olof@lixom.net>");
MODULE_DESCRIPTION("PA Semi Electra CF driver");
| gpl-2.0 |
bheu/odroid_linux | drivers/gpu/drm/radeon/radeon_atpx_handler.c | 2662 | 7275 | /*
* Copyright (c) 2010 Red Hat Inc.
* Author : Dave Airlie <airlied@redhat.com>
*
* Licensed under GPLv2
*
* ATPX support for both Intel/ATI
*/
#include <linux/vga_switcheroo.h>
#include <linux/slab.h>
#include <acpi/acpi.h>
#include <acpi/acpi_bus.h>
#include <linux/pci.h>
#define ATPX_VERSION 0
#define ATPX_GPU_PWR 2
#define ATPX_MUX_SELECT 3
#define ATPX_I2C_MUX_SELECT 4
#define ATPX_SWITCH_START 5
#define ATPX_SWITCH_END 6
#define ATPX_INTEGRATED 0
#define ATPX_DISCRETE 1
#define ATPX_MUX_IGD 0
#define ATPX_MUX_DISCRETE 1
static struct radeon_atpx_priv {
bool atpx_detected;
/* handle for device - and atpx */
acpi_handle dhandle;
acpi_handle atpx_handle;
acpi_handle atrm_handle;
} radeon_atpx_priv;
/* retrieve the ROM in 4k blocks */
static int radeon_atrm_call(acpi_handle atrm_handle, uint8_t *bios,
int offset, int len)
{
acpi_status status;
union acpi_object atrm_arg_elements[2], *obj;
struct acpi_object_list atrm_arg;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL};
atrm_arg.count = 2;
atrm_arg.pointer = &atrm_arg_elements[0];
atrm_arg_elements[0].type = ACPI_TYPE_INTEGER;
atrm_arg_elements[0].integer.value = offset;
atrm_arg_elements[1].type = ACPI_TYPE_INTEGER;
atrm_arg_elements[1].integer.value = len;
status = acpi_evaluate_object(atrm_handle, NULL, &atrm_arg, &buffer);
if (ACPI_FAILURE(status)) {
printk("failed to evaluate ATRM got %s\n", acpi_format_exception(status));
return -ENODEV;
}
obj = (union acpi_object *)buffer.pointer;
memcpy(bios+offset, obj->buffer.pointer, len);
kfree(buffer.pointer);
return len;
}
bool radeon_atrm_supported(struct pci_dev *pdev)
{
/* get the discrete ROM only via ATRM */
if (!radeon_atpx_priv.atpx_detected)
return false;
if (radeon_atpx_priv.dhandle == DEVICE_ACPI_HANDLE(&pdev->dev))
return false;
return true;
}
int radeon_atrm_get_bios_chunk(uint8_t *bios, int offset, int len)
{
return radeon_atrm_call(radeon_atpx_priv.atrm_handle, bios, offset, len);
}
static int radeon_atpx_get_version(acpi_handle handle)
{
acpi_status status;
union acpi_object atpx_arg_elements[2], *obj;
struct acpi_object_list atpx_arg;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
atpx_arg.count = 2;
atpx_arg.pointer = &atpx_arg_elements[0];
atpx_arg_elements[0].type = ACPI_TYPE_INTEGER;
atpx_arg_elements[0].integer.value = ATPX_VERSION;
atpx_arg_elements[1].type = ACPI_TYPE_INTEGER;
atpx_arg_elements[1].integer.value = ATPX_VERSION;
status = acpi_evaluate_object(handle, NULL, &atpx_arg, &buffer);
if (ACPI_FAILURE(status)) {
printk("%s: failed to call ATPX: %s\n", __func__, acpi_format_exception(status));
return -ENOSYS;
}
obj = (union acpi_object *)buffer.pointer;
if (obj && (obj->type == ACPI_TYPE_BUFFER))
printk(KERN_INFO "radeon atpx: version is %d\n", *((u8 *)(obj->buffer.pointer) + 2));
kfree(buffer.pointer);
return 0;
}
static int radeon_atpx_execute(acpi_handle handle, int cmd_id, u16 value)
{
acpi_status status;
union acpi_object atpx_arg_elements[2];
struct acpi_object_list atpx_arg;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
uint8_t buf[4] = {0};
if (!handle)
return -EINVAL;
atpx_arg.count = 2;
atpx_arg.pointer = &atpx_arg_elements[0];
atpx_arg_elements[0].type = ACPI_TYPE_INTEGER;
atpx_arg_elements[0].integer.value = cmd_id;
buf[2] = value & 0xff;
buf[3] = (value >> 8) & 0xff;
atpx_arg_elements[1].type = ACPI_TYPE_BUFFER;
atpx_arg_elements[1].buffer.length = 4;
atpx_arg_elements[1].buffer.pointer = buf;
status = acpi_evaluate_object(handle, NULL, &atpx_arg, &buffer);
if (ACPI_FAILURE(status)) {
printk("%s: failed to call ATPX: %s\n", __func__, acpi_format_exception(status));
return -ENOSYS;
}
kfree(buffer.pointer);
return 0;
}
static int radeon_atpx_set_discrete_state(acpi_handle handle, int state)
{
return radeon_atpx_execute(handle, ATPX_GPU_PWR, state);
}
static int radeon_atpx_switch_mux(acpi_handle handle, int mux_id)
{
return radeon_atpx_execute(handle, ATPX_MUX_SELECT, mux_id);
}
static int radeon_atpx_switch_i2c_mux(acpi_handle handle, int mux_id)
{
return radeon_atpx_execute(handle, ATPX_I2C_MUX_SELECT, mux_id);
}
static int radeon_atpx_switch_start(acpi_handle handle, int gpu_id)
{
return radeon_atpx_execute(handle, ATPX_SWITCH_START, gpu_id);
}
static int radeon_atpx_switch_end(acpi_handle handle, int gpu_id)
{
return radeon_atpx_execute(handle, ATPX_SWITCH_END, gpu_id);
}
static int radeon_atpx_switchto(enum vga_switcheroo_client_id id)
{
int gpu_id;
if (id == VGA_SWITCHEROO_IGD)
gpu_id = ATPX_INTEGRATED;
else
gpu_id = ATPX_DISCRETE;
radeon_atpx_switch_start(radeon_atpx_priv.atpx_handle, gpu_id);
radeon_atpx_switch_mux(radeon_atpx_priv.atpx_handle, gpu_id);
radeon_atpx_switch_i2c_mux(radeon_atpx_priv.atpx_handle, gpu_id);
radeon_atpx_switch_end(radeon_atpx_priv.atpx_handle, gpu_id);
return 0;
}
static int radeon_atpx_power_state(enum vga_switcheroo_client_id id,
enum vga_switcheroo_state state)
{
/* on w500 ACPI can't change intel gpu state */
if (id == VGA_SWITCHEROO_IGD)
return 0;
radeon_atpx_set_discrete_state(radeon_atpx_priv.atpx_handle, state);
return 0;
}
static bool radeon_atpx_pci_probe_handle(struct pci_dev *pdev)
{
acpi_handle dhandle, atpx_handle, atrm_handle;
acpi_status status;
dhandle = DEVICE_ACPI_HANDLE(&pdev->dev);
if (!dhandle)
return false;
status = acpi_get_handle(dhandle, "ATPX", &atpx_handle);
if (ACPI_FAILURE(status))
return false;
status = acpi_get_handle(dhandle, "ATRM", &atrm_handle);
if (ACPI_FAILURE(status))
return false;
radeon_atpx_priv.dhandle = dhandle;
radeon_atpx_priv.atpx_handle = atpx_handle;
radeon_atpx_priv.atrm_handle = atrm_handle;
return true;
}
static int radeon_atpx_init(void)
{
/* set up the ATPX handle */
radeon_atpx_get_version(radeon_atpx_priv.atpx_handle);
return 0;
}
static int radeon_atpx_get_client_id(struct pci_dev *pdev)
{
if (radeon_atpx_priv.dhandle == DEVICE_ACPI_HANDLE(&pdev->dev))
return VGA_SWITCHEROO_IGD;
else
return VGA_SWITCHEROO_DIS;
}
static struct vga_switcheroo_handler radeon_atpx_handler = {
.switchto = radeon_atpx_switchto,
.power_state = radeon_atpx_power_state,
.init = radeon_atpx_init,
.get_client_id = radeon_atpx_get_client_id,
};
static bool radeon_atpx_detect(void)
{
char acpi_method_name[255] = { 0 };
struct acpi_buffer buffer = {sizeof(acpi_method_name), acpi_method_name};
struct pci_dev *pdev = NULL;
bool has_atpx = false;
int vga_count = 0;
while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
vga_count++;
has_atpx |= (radeon_atpx_pci_probe_handle(pdev) == true);
}
if (has_atpx && vga_count == 2) {
acpi_get_name(radeon_atpx_priv.atpx_handle, ACPI_FULL_PATHNAME, &buffer);
printk(KERN_INFO "VGA switcheroo: detected switching method %s handle\n",
acpi_method_name);
radeon_atpx_priv.atpx_detected = true;
return true;
}
return false;
}
void radeon_register_atpx_handler(void)
{
bool r;
/* detect if we have any ATPX + 2 VGA in the system */
r = radeon_atpx_detect();
if (!r)
return;
vga_switcheroo_register_handler(&radeon_atpx_handler);
}
void radeon_unregister_atpx_handler(void)
{
vga_switcheroo_unregister_handler();
}
| gpl-2.0 |
theophile/SM-N920R7_MM_Kernel | net/rds/tcp_listen.c | 2662 | 5198 | /*
* Copyright (c) 2006 Oracle. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include <linux/kernel.h>
#include <linux/gfp.h>
#include <linux/in.h>
#include <net/tcp.h>
#include "rds.h"
#include "tcp.h"
/*
* cheesy, but simple..
*/
static void rds_tcp_accept_worker(struct work_struct *work);
static DECLARE_WORK(rds_tcp_listen_work, rds_tcp_accept_worker);
static struct socket *rds_tcp_listen_sock;
static int rds_tcp_accept_one(struct socket *sock)
{
struct socket *new_sock = NULL;
struct rds_connection *conn;
int ret;
struct inet_sock *inet;
ret = sock_create_lite(sock->sk->sk_family, sock->sk->sk_type,
sock->sk->sk_protocol, &new_sock);
if (ret)
goto out;
new_sock->type = sock->type;
new_sock->ops = sock->ops;
ret = sock->ops->accept(sock, new_sock, O_NONBLOCK);
if (ret < 0)
goto out;
rds_tcp_tune(new_sock);
inet = inet_sk(new_sock->sk);
rdsdebug("accepted tcp %pI4:%u -> %pI4:%u\n",
&inet->inet_saddr, ntohs(inet->inet_sport),
&inet->inet_daddr, ntohs(inet->inet_dport));
conn = rds_conn_create(inet->inet_saddr, inet->inet_daddr,
&rds_tcp_transport, GFP_KERNEL);
if (IS_ERR(conn)) {
ret = PTR_ERR(conn);
goto out;
}
/*
* see the comment above rds_queue_delayed_reconnect()
*/
if (!rds_conn_transition(conn, RDS_CONN_DOWN, RDS_CONN_CONNECTING)) {
if (rds_conn_state(conn) == RDS_CONN_UP)
rds_tcp_stats_inc(s_tcp_listen_closed_stale);
else
rds_tcp_stats_inc(s_tcp_connect_raced);
rds_conn_drop(conn);
ret = 0;
goto out;
}
rds_tcp_set_callbacks(new_sock, conn);
rds_connect_complete(conn);
new_sock = NULL;
ret = 0;
out:
if (new_sock)
sock_release(new_sock);
return ret;
}
static void rds_tcp_accept_worker(struct work_struct *work)
{
while (rds_tcp_accept_one(rds_tcp_listen_sock) == 0)
cond_resched();
}
void rds_tcp_listen_data_ready(struct sock *sk, int bytes)
{
void (*ready)(struct sock *sk, int bytes);
rdsdebug("listen data ready sk %p\n", sk);
read_lock(&sk->sk_callback_lock);
ready = sk->sk_user_data;
if (!ready) { /* check for teardown race */
ready = sk->sk_data_ready;
goto out;
}
/*
* ->sk_data_ready is also called for a newly established child socket
* before it has been accepted and the accepter has set up their
* data_ready.. we only want to queue listen work for our listening
* socket
*/
if (sk->sk_state == TCP_LISTEN)
queue_work(rds_wq, &rds_tcp_listen_work);
out:
read_unlock(&sk->sk_callback_lock);
ready(sk, bytes);
}
int rds_tcp_listen_init(void)
{
struct sockaddr_in sin;
struct socket *sock = NULL;
int ret;
ret = sock_create(PF_INET, SOCK_STREAM, IPPROTO_TCP, &sock);
if (ret < 0)
goto out;
sock->sk->sk_reuse = SK_CAN_REUSE;
rds_tcp_nonagle(sock);
write_lock_bh(&sock->sk->sk_callback_lock);
sock->sk->sk_user_data = sock->sk->sk_data_ready;
sock->sk->sk_data_ready = rds_tcp_listen_data_ready;
write_unlock_bh(&sock->sk->sk_callback_lock);
sin.sin_family = PF_INET,
sin.sin_addr.s_addr = (__force u32)htonl(INADDR_ANY);
sin.sin_port = (__force u16)htons(RDS_TCP_PORT);
ret = sock->ops->bind(sock, (struct sockaddr *)&sin, sizeof(sin));
if (ret < 0)
goto out;
ret = sock->ops->listen(sock, 64);
if (ret < 0)
goto out;
rds_tcp_listen_sock = sock;
sock = NULL;
out:
if (sock)
sock_release(sock);
return ret;
}
void rds_tcp_listen_stop(void)
{
struct socket *sock = rds_tcp_listen_sock;
struct sock *sk;
if (!sock)
return;
sk = sock->sk;
/* serialize with and prevent further callbacks */
lock_sock(sk);
write_lock_bh(&sk->sk_callback_lock);
if (sk->sk_user_data) {
sk->sk_data_ready = sk->sk_user_data;
sk->sk_user_data = NULL;
}
write_unlock_bh(&sk->sk_callback_lock);
release_sock(sk);
/* wait for accepts to stop and close the socket */
flush_workqueue(rds_wq);
sock_release(sock);
rds_tcp_listen_sock = NULL;
}
| gpl-2.0 |
vakkov/android-n900-nitdroid_kernel | drivers/usb/serial/funsoft.c | 4198 | 1526 | /*
* Funsoft Serial USB driver
*
* Copyright (C) 2006 Greg Kroah-Hartman <gregkh@suse.de>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version
* 2 as published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/tty.h>
#include <linux/module.h>
#include <linux/usb.h>
#include <linux/usb/serial.h>
#include <linux/uaccess.h>
static int debug;
static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x1404, 0xcddc) },
{ },
};
MODULE_DEVICE_TABLE(usb, id_table);
static struct usb_driver funsoft_driver = {
.name = "funsoft",
.probe = usb_serial_probe,
.disconnect = usb_serial_disconnect,
.id_table = id_table,
.no_dynamic_id = 1,
};
static struct usb_serial_driver funsoft_device = {
.driver = {
.owner = THIS_MODULE,
.name = "funsoft",
},
.id_table = id_table,
.usb_driver = &funsoft_driver,
.num_ports = 1,
};
static int __init funsoft_init(void)
{
int retval;
retval = usb_serial_register(&funsoft_device);
if (retval)
return retval;
retval = usb_register(&funsoft_driver);
if (retval)
usb_serial_deregister(&funsoft_device);
return retval;
}
static void __exit funsoft_exit(void)
{
usb_deregister(&funsoft_driver);
usb_serial_deregister(&funsoft_device);
}
module_init(funsoft_init);
module_exit(funsoft_exit);
MODULE_LICENSE("GPL");
module_param(debug, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(debug, "Debug enabled or not");
| gpl-2.0 |
avisconti/prova | fs/bfs/dir.c | 4966 | 9070 | /*
* fs/bfs/dir.c
* BFS directory operations.
* Copyright (C) 1999,2000 Tigran Aivazian <tigran@veritas.com>
* Made endianness-clean by Andrew Stribblehill <ads@wompom.org> 2005
*/
#include <linux/time.h>
#include <linux/string.h>
#include <linux/fs.h>
#include <linux/buffer_head.h>
#include <linux/sched.h>
#include "bfs.h"
#undef DEBUG
#ifdef DEBUG
#define dprintf(x...) printf(x)
#else
#define dprintf(x...)
#endif
static int bfs_add_entry(struct inode *dir, const unsigned char *name,
int namelen, int ino);
static struct buffer_head *bfs_find_entry(struct inode *dir,
const unsigned char *name, int namelen,
struct bfs_dirent **res_dir);
static int bfs_readdir(struct file *f, void *dirent, filldir_t filldir)
{
struct inode *dir = f->f_path.dentry->d_inode;
struct buffer_head *bh;
struct bfs_dirent *de;
struct bfs_sb_info *info = BFS_SB(dir->i_sb);
unsigned int offset;
int block;
mutex_lock(&info->bfs_lock);
if (f->f_pos & (BFS_DIRENT_SIZE - 1)) {
printf("Bad f_pos=%08lx for %s:%08lx\n",
(unsigned long)f->f_pos,
dir->i_sb->s_id, dir->i_ino);
mutex_unlock(&info->bfs_lock);
return -EBADF;
}
while (f->f_pos < dir->i_size) {
offset = f->f_pos & (BFS_BSIZE - 1);
block = BFS_I(dir)->i_sblock + (f->f_pos >> BFS_BSIZE_BITS);
bh = sb_bread(dir->i_sb, block);
if (!bh) {
f->f_pos += BFS_BSIZE - offset;
continue;
}
do {
de = (struct bfs_dirent *)(bh->b_data + offset);
if (de->ino) {
int size = strnlen(de->name, BFS_NAMELEN);
if (filldir(dirent, de->name, size, f->f_pos,
le16_to_cpu(de->ino),
DT_UNKNOWN) < 0) {
brelse(bh);
mutex_unlock(&info->bfs_lock);
return 0;
}
}
offset += BFS_DIRENT_SIZE;
f->f_pos += BFS_DIRENT_SIZE;
} while ((offset < BFS_BSIZE) && (f->f_pos < dir->i_size));
brelse(bh);
}
mutex_unlock(&info->bfs_lock);
return 0;
}
const struct file_operations bfs_dir_operations = {
.read = generic_read_dir,
.readdir = bfs_readdir,
.fsync = generic_file_fsync,
.llseek = generic_file_llseek,
};
extern void dump_imap(const char *, struct super_block *);
static int bfs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
struct nameidata *nd)
{
int err;
struct inode *inode;
struct super_block *s = dir->i_sb;
struct bfs_sb_info *info = BFS_SB(s);
unsigned long ino;
inode = new_inode(s);
if (!inode)
return -ENOSPC;
mutex_lock(&info->bfs_lock);
ino = find_first_zero_bit(info->si_imap, info->si_lasti + 1);
if (ino > info->si_lasti) {
mutex_unlock(&info->bfs_lock);
iput(inode);
return -ENOSPC;
}
set_bit(ino, info->si_imap);
info->si_freei--;
inode_init_owner(inode, dir, mode);
inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC;
inode->i_blocks = 0;
inode->i_op = &bfs_file_inops;
inode->i_fop = &bfs_file_operations;
inode->i_mapping->a_ops = &bfs_aops;
inode->i_ino = ino;
BFS_I(inode)->i_dsk_ino = ino;
BFS_I(inode)->i_sblock = 0;
BFS_I(inode)->i_eblock = 0;
insert_inode_hash(inode);
mark_inode_dirty(inode);
dump_imap("create", s);
err = bfs_add_entry(dir, dentry->d_name.name, dentry->d_name.len,
inode->i_ino);
if (err) {
inode_dec_link_count(inode);
mutex_unlock(&info->bfs_lock);
iput(inode);
return err;
}
mutex_unlock(&info->bfs_lock);
d_instantiate(dentry, inode);
return 0;
}
static struct dentry *bfs_lookup(struct inode *dir, struct dentry *dentry,
struct nameidata *nd)
{
struct inode *inode = NULL;
struct buffer_head *bh;
struct bfs_dirent *de;
struct bfs_sb_info *info = BFS_SB(dir->i_sb);
if (dentry->d_name.len > BFS_NAMELEN)
return ERR_PTR(-ENAMETOOLONG);
mutex_lock(&info->bfs_lock);
bh = bfs_find_entry(dir, dentry->d_name.name, dentry->d_name.len, &de);
if (bh) {
unsigned long ino = (unsigned long)le16_to_cpu(de->ino);
brelse(bh);
inode = bfs_iget(dir->i_sb, ino);
if (IS_ERR(inode)) {
mutex_unlock(&info->bfs_lock);
return ERR_CAST(inode);
}
}
mutex_unlock(&info->bfs_lock);
d_add(dentry, inode);
return NULL;
}
static int bfs_link(struct dentry *old, struct inode *dir,
struct dentry *new)
{
struct inode *inode = old->d_inode;
struct bfs_sb_info *info = BFS_SB(inode->i_sb);
int err;
mutex_lock(&info->bfs_lock);
err = bfs_add_entry(dir, new->d_name.name, new->d_name.len,
inode->i_ino);
if (err) {
mutex_unlock(&info->bfs_lock);
return err;
}
inc_nlink(inode);
inode->i_ctime = CURRENT_TIME_SEC;
mark_inode_dirty(inode);
ihold(inode);
d_instantiate(new, inode);
mutex_unlock(&info->bfs_lock);
return 0;
}
static int bfs_unlink(struct inode *dir, struct dentry *dentry)
{
int error = -ENOENT;
struct inode *inode = dentry->d_inode;
struct buffer_head *bh;
struct bfs_dirent *de;
struct bfs_sb_info *info = BFS_SB(inode->i_sb);
mutex_lock(&info->bfs_lock);
bh = bfs_find_entry(dir, dentry->d_name.name, dentry->d_name.len, &de);
if (!bh || (le16_to_cpu(de->ino) != inode->i_ino))
goto out_brelse;
if (!inode->i_nlink) {
printf("unlinking non-existent file %s:%lu (nlink=%d)\n",
inode->i_sb->s_id, inode->i_ino,
inode->i_nlink);
set_nlink(inode, 1);
}
de->ino = 0;
mark_buffer_dirty_inode(bh, dir);
dir->i_ctime = dir->i_mtime = CURRENT_TIME_SEC;
mark_inode_dirty(dir);
inode->i_ctime = dir->i_ctime;
inode_dec_link_count(inode);
error = 0;
out_brelse:
brelse(bh);
mutex_unlock(&info->bfs_lock);
return error;
}
static int bfs_rename(struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry)
{
struct inode *old_inode, *new_inode;
struct buffer_head *old_bh, *new_bh;
struct bfs_dirent *old_de, *new_de;
struct bfs_sb_info *info;
int error = -ENOENT;
old_bh = new_bh = NULL;
old_inode = old_dentry->d_inode;
if (S_ISDIR(old_inode->i_mode))
return -EINVAL;
info = BFS_SB(old_inode->i_sb);
mutex_lock(&info->bfs_lock);
old_bh = bfs_find_entry(old_dir,
old_dentry->d_name.name,
old_dentry->d_name.len, &old_de);
if (!old_bh || (le16_to_cpu(old_de->ino) != old_inode->i_ino))
goto end_rename;
error = -EPERM;
new_inode = new_dentry->d_inode;
new_bh = bfs_find_entry(new_dir,
new_dentry->d_name.name,
new_dentry->d_name.len, &new_de);
if (new_bh && !new_inode) {
brelse(new_bh);
new_bh = NULL;
}
if (!new_bh) {
error = bfs_add_entry(new_dir,
new_dentry->d_name.name,
new_dentry->d_name.len,
old_inode->i_ino);
if (error)
goto end_rename;
}
old_de->ino = 0;
old_dir->i_ctime = old_dir->i_mtime = CURRENT_TIME_SEC;
mark_inode_dirty(old_dir);
if (new_inode) {
new_inode->i_ctime = CURRENT_TIME_SEC;
inode_dec_link_count(new_inode);
}
mark_buffer_dirty_inode(old_bh, old_dir);
error = 0;
end_rename:
mutex_unlock(&info->bfs_lock);
brelse(old_bh);
brelse(new_bh);
return error;
}
const struct inode_operations bfs_dir_inops = {
.create = bfs_create,
.lookup = bfs_lookup,
.link = bfs_link,
.unlink = bfs_unlink,
.rename = bfs_rename,
};
static int bfs_add_entry(struct inode *dir, const unsigned char *name,
int namelen, int ino)
{
struct buffer_head *bh;
struct bfs_dirent *de;
int block, sblock, eblock, off, pos;
int i;
dprintf("name=%s, namelen=%d\n", name, namelen);
if (!namelen)
return -ENOENT;
if (namelen > BFS_NAMELEN)
return -ENAMETOOLONG;
sblock = BFS_I(dir)->i_sblock;
eblock = BFS_I(dir)->i_eblock;
for (block = sblock; block <= eblock; block++) {
bh = sb_bread(dir->i_sb, block);
if (!bh)
return -ENOSPC;
for (off = 0; off < BFS_BSIZE; off += BFS_DIRENT_SIZE) {
de = (struct bfs_dirent *)(bh->b_data + off);
if (!de->ino) {
pos = (block - sblock) * BFS_BSIZE + off;
if (pos >= dir->i_size) {
dir->i_size += BFS_DIRENT_SIZE;
dir->i_ctime = CURRENT_TIME_SEC;
}
dir->i_mtime = CURRENT_TIME_SEC;
mark_inode_dirty(dir);
de->ino = cpu_to_le16((u16)ino);
for (i = 0; i < BFS_NAMELEN; i++)
de->name[i] =
(i < namelen) ? name[i] : 0;
mark_buffer_dirty_inode(bh, dir);
brelse(bh);
return 0;
}
}
brelse(bh);
}
return -ENOSPC;
}
static inline int bfs_namecmp(int len, const unsigned char *name,
const char *buffer)
{
if ((len < BFS_NAMELEN) && buffer[len])
return 0;
return !memcmp(name, buffer, len);
}
static struct buffer_head *bfs_find_entry(struct inode *dir,
const unsigned char *name, int namelen,
struct bfs_dirent **res_dir)
{
unsigned long block = 0, offset = 0;
struct buffer_head *bh = NULL;
struct bfs_dirent *de;
*res_dir = NULL;
if (namelen > BFS_NAMELEN)
return NULL;
while (block * BFS_BSIZE + offset < dir->i_size) {
if (!bh) {
bh = sb_bread(dir->i_sb, BFS_I(dir)->i_sblock + block);
if (!bh) {
block++;
continue;
}
}
de = (struct bfs_dirent *)(bh->b_data + offset);
offset += BFS_DIRENT_SIZE;
if (le16_to_cpu(de->ino) &&
bfs_namecmp(namelen, name, de->name)) {
*res_dir = de;
return bh;
}
if (offset < bh->b_size)
continue;
brelse(bh);
bh = NULL;
offset = 0;
block++;
}
brelse(bh);
return NULL;
}
| gpl-2.0 |
RenderBroken/ghost_render_kernel | fs/coda/pioctl.c | 5734 | 2201 | /*
* Pioctl operations for Coda.
* Original version: (C) 1996 Peter Braam
* Rewritten for Linux 2.1: (C) 1997 Carnegie Mellon University
*
* Carnegie Mellon encourages users of this code to contribute improvements
* to the Coda project. Contact Peter Braam <coda@cs.cmu.edu>.
*/
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/time.h>
#include <linux/fs.h>
#include <linux/stat.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/namei.h>
#include <linux/module.h>
#include <asm/uaccess.h>
#include <linux/coda.h>
#include <linux/coda_psdev.h>
#include "coda_linux.h"
/* pioctl ops */
static int coda_ioctl_permission(struct inode *inode, int mask);
static long coda_pioctl(struct file *filp, unsigned int cmd,
unsigned long user_data);
/* exported from this file */
const struct inode_operations coda_ioctl_inode_operations = {
.permission = coda_ioctl_permission,
.setattr = coda_setattr,
};
const struct file_operations coda_ioctl_operations = {
.owner = THIS_MODULE,
.unlocked_ioctl = coda_pioctl,
.llseek = noop_llseek,
};
/* the coda pioctl inode ops */
static int coda_ioctl_permission(struct inode *inode, int mask)
{
return (mask & MAY_EXEC) ? -EACCES : 0;
}
static long coda_pioctl(struct file *filp, unsigned int cmd,
unsigned long user_data)
{
struct path path;
int error;
struct PioctlData data;
struct inode *inode = filp->f_dentry->d_inode;
struct inode *target_inode = NULL;
struct coda_inode_info *cnp;
/* get the Pioctl data arguments from user space */
if (copy_from_user(&data, (void __user *)user_data, sizeof(data)))
return -EINVAL;
/*
* Look up the pathname. Note that the pathname is in
* user memory, and namei takes care of this
*/
if (data.follow)
error = user_path(data.path, &path);
else
error = user_lpath(data.path, &path);
if (error)
return error;
target_inode = path.dentry->d_inode;
/* return if it is not a Coda inode */
if (target_inode->i_sb != inode->i_sb) {
error = -EINVAL;
goto out;
}
/* now proceed to make the upcall */
cnp = ITOC(target_inode);
error = venus_pioctl(inode->i_sb, &(cnp->c_fid), cmd, &data);
out:
path_put(&path);
return error;
}
| gpl-2.0 |
jcsullins/kernel-tenderloin-3.0 | drivers/message/i2o/i2o_config.c | 8038 | 26831 | /*
* I2O Configuration Interface Driver
*
* (C) Copyright 1999-2002 Red Hat
*
* Written by Alan Cox, Building Number Three Ltd
*
* Fixes/additions:
* Deepak Saxena (04/20/1999):
* Added basic ioctl() support
* Deepak Saxena (06/07/1999):
* Added software download ioctl (still testing)
* Auvo Häkkinen (09/10/1999):
* Changes to i2o_cfg_reply(), ioctl_parms()
* Added ioct_validate()
* Taneli Vähäkangas (09/30/1999):
* Fixed ioctl_swdl()
* Taneli Vähäkangas (10/04/1999):
* Changed ioctl_swdl(), implemented ioctl_swul() and ioctl_swdel()
* Deepak Saxena (11/18/1999):
* Added event managmenet support
* Alan Cox <alan@lxorguk.ukuu.org.uk>:
* 2.4 rewrite ported to 2.5
* Markus Lidel <Markus.Lidel@shadowconnect.com>:
* Added pass-thru support for Adaptec's raidutils
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/miscdevice.h>
#include <linux/mutex.h>
#include <linux/compat.h>
#include <linux/slab.h>
#include <asm/uaccess.h>
#include "core.h"
#define SG_TABLESIZE 30
static DEFINE_MUTEX(i2o_cfg_mutex);
static long i2o_cfg_ioctl(struct file *, unsigned int, unsigned long);
static spinlock_t i2o_config_lock;
#define MODINC(x,y) ((x) = ((x) + 1) % (y))
struct sg_simple_element {
u32 flag_count;
u32 addr_bus;
};
struct i2o_cfg_info {
struct file *fp;
struct fasync_struct *fasync;
struct i2o_evt_info event_q[I2O_EVT_Q_LEN];
u16 q_in; // Queue head index
u16 q_out; // Queue tail index
u16 q_len; // Queue length
u16 q_lost; // Number of lost events
ulong q_id; // Event queue ID...used as tx_context
struct i2o_cfg_info *next;
};
static struct i2o_cfg_info *open_files = NULL;
static ulong i2o_cfg_info_id = 0;
static int i2o_cfg_getiops(unsigned long arg)
{
struct i2o_controller *c;
u8 __user *user_iop_table = (void __user *)arg;
u8 tmp[MAX_I2O_CONTROLLERS];
int ret = 0;
memset(tmp, 0, MAX_I2O_CONTROLLERS);
list_for_each_entry(c, &i2o_controllers, list)
tmp[c->unit] = 1;
if (copy_to_user(user_iop_table, tmp, MAX_I2O_CONTROLLERS))
ret = -EFAULT;
return ret;
};
static int i2o_cfg_gethrt(unsigned long arg)
{
struct i2o_controller *c;
struct i2o_cmd_hrtlct __user *cmd = (struct i2o_cmd_hrtlct __user *)arg;
struct i2o_cmd_hrtlct kcmd;
i2o_hrt *hrt;
int len;
u32 reslen;
int ret = 0;
if (copy_from_user(&kcmd, cmd, sizeof(struct i2o_cmd_hrtlct)))
return -EFAULT;
if (get_user(reslen, kcmd.reslen) < 0)
return -EFAULT;
if (kcmd.resbuf == NULL)
return -EFAULT;
c = i2o_find_iop(kcmd.iop);
if (!c)
return -ENXIO;
hrt = (i2o_hrt *) c->hrt.virt;
len = 8 + ((hrt->entry_len * hrt->num_entries) << 2);
if (put_user(len, kcmd.reslen))
ret = -EFAULT;
else if (len > reslen)
ret = -ENOBUFS;
else if (copy_to_user(kcmd.resbuf, (void *)hrt, len))
ret = -EFAULT;
return ret;
};
static int i2o_cfg_getlct(unsigned long arg)
{
struct i2o_controller *c;
struct i2o_cmd_hrtlct __user *cmd = (struct i2o_cmd_hrtlct __user *)arg;
struct i2o_cmd_hrtlct kcmd;
i2o_lct *lct;
int len;
int ret = 0;
u32 reslen;
if (copy_from_user(&kcmd, cmd, sizeof(struct i2o_cmd_hrtlct)))
return -EFAULT;
if (get_user(reslen, kcmd.reslen) < 0)
return -EFAULT;
if (kcmd.resbuf == NULL)
return -EFAULT;
c = i2o_find_iop(kcmd.iop);
if (!c)
return -ENXIO;
lct = (i2o_lct *) c->lct;
len = (unsigned int)lct->table_size << 2;
if (put_user(len, kcmd.reslen))
ret = -EFAULT;
else if (len > reslen)
ret = -ENOBUFS;
else if (copy_to_user(kcmd.resbuf, lct, len))
ret = -EFAULT;
return ret;
};
static int i2o_cfg_parms(unsigned long arg, unsigned int type)
{
int ret = 0;
struct i2o_controller *c;
struct i2o_device *dev;
struct i2o_cmd_psetget __user *cmd =
(struct i2o_cmd_psetget __user *)arg;
struct i2o_cmd_psetget kcmd;
u32 reslen;
u8 *ops;
u8 *res;
int len = 0;
u32 i2o_cmd = (type == I2OPARMGET ?
I2O_CMD_UTIL_PARAMS_GET : I2O_CMD_UTIL_PARAMS_SET);
if (copy_from_user(&kcmd, cmd, sizeof(struct i2o_cmd_psetget)))
return -EFAULT;
if (get_user(reslen, kcmd.reslen))
return -EFAULT;
c = i2o_find_iop(kcmd.iop);
if (!c)
return -ENXIO;
dev = i2o_iop_find_device(c, kcmd.tid);
if (!dev)
return -ENXIO;
ops = memdup_user(kcmd.opbuf, kcmd.oplen);
if (IS_ERR(ops))
return PTR_ERR(ops);
/*
* It's possible to have a _very_ large table
* and that the user asks for all of it at once...
*/
res = kmalloc(65536, GFP_KERNEL);
if (!res) {
kfree(ops);
return -ENOMEM;
}
len = i2o_parm_issue(dev, i2o_cmd, ops, kcmd.oplen, res, 65536);
kfree(ops);
if (len < 0) {
kfree(res);
return -EAGAIN;
}
if (put_user(len, kcmd.reslen))
ret = -EFAULT;
else if (len > reslen)
ret = -ENOBUFS;
else if (copy_to_user(kcmd.resbuf, res, len))
ret = -EFAULT;
kfree(res);
return ret;
};
static int i2o_cfg_swdl(unsigned long arg)
{
struct i2o_sw_xfer kxfer;
struct i2o_sw_xfer __user *pxfer = (struct i2o_sw_xfer __user *)arg;
unsigned char maxfrag = 0, curfrag = 1;
struct i2o_dma buffer;
struct i2o_message *msg;
unsigned int status = 0, swlen = 0, fragsize = 8192;
struct i2o_controller *c;
if (copy_from_user(&kxfer, pxfer, sizeof(struct i2o_sw_xfer)))
return -EFAULT;
if (get_user(swlen, kxfer.swlen) < 0)
return -EFAULT;
if (get_user(maxfrag, kxfer.maxfrag) < 0)
return -EFAULT;
if (get_user(curfrag, kxfer.curfrag) < 0)
return -EFAULT;
if (curfrag == maxfrag)
fragsize = swlen - (maxfrag - 1) * 8192;
if (!kxfer.buf || !access_ok(VERIFY_READ, kxfer.buf, fragsize))
return -EFAULT;
c = i2o_find_iop(kxfer.iop);
if (!c)
return -ENXIO;
msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
if (IS_ERR(msg))
return PTR_ERR(msg);
if (i2o_dma_alloc(&c->pdev->dev, &buffer, fragsize)) {
i2o_msg_nop(c, msg);
return -ENOMEM;
}
if (__copy_from_user(buffer.virt, kxfer.buf, fragsize)) {
i2o_msg_nop(c, msg);
i2o_dma_free(&c->pdev->dev, &buffer);
return -EFAULT;
}
msg->u.head[0] = cpu_to_le32(NINE_WORD_MSG_SIZE | SGL_OFFSET_7);
msg->u.head[1] =
cpu_to_le32(I2O_CMD_SW_DOWNLOAD << 24 | HOST_TID << 12 |
ADAPTER_TID);
msg->u.head[2] = cpu_to_le32(i2o_config_driver.context);
msg->u.head[3] = cpu_to_le32(0);
msg->body[0] =
cpu_to_le32((((u32) kxfer.flags) << 24) | (((u32) kxfer.
sw_type) << 16) |
(((u32) maxfrag) << 8) | (((u32) curfrag)));
msg->body[1] = cpu_to_le32(swlen);
msg->body[2] = cpu_to_le32(kxfer.sw_id);
msg->body[3] = cpu_to_le32(0xD0000000 | fragsize);
msg->body[4] = cpu_to_le32(buffer.phys);
osm_debug("swdl frag %d/%d (size %d)\n", curfrag, maxfrag, fragsize);
status = i2o_msg_post_wait_mem(c, msg, 60, &buffer);
if (status != -ETIMEDOUT)
i2o_dma_free(&c->pdev->dev, &buffer);
if (status != I2O_POST_WAIT_OK) {
// it fails if you try and send frags out of order
// and for some yet unknown reasons too
osm_info("swdl failed, DetailedStatus = %d\n", status);
return status;
}
return 0;
};
static int i2o_cfg_swul(unsigned long arg)
{
struct i2o_sw_xfer kxfer;
struct i2o_sw_xfer __user *pxfer = (struct i2o_sw_xfer __user *)arg;
unsigned char maxfrag = 0, curfrag = 1;
struct i2o_dma buffer;
struct i2o_message *msg;
unsigned int status = 0, swlen = 0, fragsize = 8192;
struct i2o_controller *c;
int ret = 0;
if (copy_from_user(&kxfer, pxfer, sizeof(struct i2o_sw_xfer)))
return -EFAULT;
if (get_user(swlen, kxfer.swlen) < 0)
return -EFAULT;
if (get_user(maxfrag, kxfer.maxfrag) < 0)
return -EFAULT;
if (get_user(curfrag, kxfer.curfrag) < 0)
return -EFAULT;
if (curfrag == maxfrag)
fragsize = swlen - (maxfrag - 1) * 8192;
if (!kxfer.buf)
return -EFAULT;
c = i2o_find_iop(kxfer.iop);
if (!c)
return -ENXIO;
msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
if (IS_ERR(msg))
return PTR_ERR(msg);
if (i2o_dma_alloc(&c->pdev->dev, &buffer, fragsize)) {
i2o_msg_nop(c, msg);
return -ENOMEM;
}
msg->u.head[0] = cpu_to_le32(NINE_WORD_MSG_SIZE | SGL_OFFSET_7);
msg->u.head[1] =
cpu_to_le32(I2O_CMD_SW_UPLOAD << 24 | HOST_TID << 12 | ADAPTER_TID);
msg->u.head[2] = cpu_to_le32(i2o_config_driver.context);
msg->u.head[3] = cpu_to_le32(0);
msg->body[0] =
cpu_to_le32((u32) kxfer.flags << 24 | (u32) kxfer.
sw_type << 16 | (u32) maxfrag << 8 | (u32) curfrag);
msg->body[1] = cpu_to_le32(swlen);
msg->body[2] = cpu_to_le32(kxfer.sw_id);
msg->body[3] = cpu_to_le32(0xD0000000 | fragsize);
msg->body[4] = cpu_to_le32(buffer.phys);
osm_debug("swul frag %d/%d (size %d)\n", curfrag, maxfrag, fragsize);
status = i2o_msg_post_wait_mem(c, msg, 60, &buffer);
if (status != I2O_POST_WAIT_OK) {
if (status != -ETIMEDOUT)
i2o_dma_free(&c->pdev->dev, &buffer);
osm_info("swul failed, DetailedStatus = %d\n", status);
return status;
}
if (copy_to_user(kxfer.buf, buffer.virt, fragsize))
ret = -EFAULT;
i2o_dma_free(&c->pdev->dev, &buffer);
return ret;
}
static int i2o_cfg_swdel(unsigned long arg)
{
struct i2o_controller *c;
struct i2o_sw_xfer kxfer;
struct i2o_sw_xfer __user *pxfer = (struct i2o_sw_xfer __user *)arg;
struct i2o_message *msg;
unsigned int swlen;
int token;
if (copy_from_user(&kxfer, pxfer, sizeof(struct i2o_sw_xfer)))
return -EFAULT;
if (get_user(swlen, kxfer.swlen) < 0)
return -EFAULT;
c = i2o_find_iop(kxfer.iop);
if (!c)
return -ENXIO;
msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
if (IS_ERR(msg))
return PTR_ERR(msg);
msg->u.head[0] = cpu_to_le32(SEVEN_WORD_MSG_SIZE | SGL_OFFSET_0);
msg->u.head[1] =
cpu_to_le32(I2O_CMD_SW_REMOVE << 24 | HOST_TID << 12 | ADAPTER_TID);
msg->u.head[2] = cpu_to_le32(i2o_config_driver.context);
msg->u.head[3] = cpu_to_le32(0);
msg->body[0] =
cpu_to_le32((u32) kxfer.flags << 24 | (u32) kxfer.sw_type << 16);
msg->body[1] = cpu_to_le32(swlen);
msg->body[2] = cpu_to_le32(kxfer.sw_id);
token = i2o_msg_post_wait(c, msg, 10);
if (token != I2O_POST_WAIT_OK) {
osm_info("swdel failed, DetailedStatus = %d\n", token);
return -ETIMEDOUT;
}
return 0;
};
static int i2o_cfg_validate(unsigned long arg)
{
int token;
int iop = (int)arg;
struct i2o_message *msg;
struct i2o_controller *c;
c = i2o_find_iop(iop);
if (!c)
return -ENXIO;
msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
if (IS_ERR(msg))
return PTR_ERR(msg);
msg->u.head[0] = cpu_to_le32(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0);
msg->u.head[1] =
cpu_to_le32(I2O_CMD_CONFIG_VALIDATE << 24 | HOST_TID << 12 | iop);
msg->u.head[2] = cpu_to_le32(i2o_config_driver.context);
msg->u.head[3] = cpu_to_le32(0);
token = i2o_msg_post_wait(c, msg, 10);
if (token != I2O_POST_WAIT_OK) {
osm_info("Can't validate configuration, ErrorStatus = %d\n",
token);
return -ETIMEDOUT;
}
return 0;
};
static int i2o_cfg_evt_reg(unsigned long arg, struct file *fp)
{
struct i2o_message *msg;
struct i2o_evt_id __user *pdesc = (struct i2o_evt_id __user *)arg;
struct i2o_evt_id kdesc;
struct i2o_controller *c;
struct i2o_device *d;
if (copy_from_user(&kdesc, pdesc, sizeof(struct i2o_evt_id)))
return -EFAULT;
/* IOP exists? */
c = i2o_find_iop(kdesc.iop);
if (!c)
return -ENXIO;
/* Device exists? */
d = i2o_iop_find_device(c, kdesc.tid);
if (!d)
return -ENODEV;
msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
if (IS_ERR(msg))
return PTR_ERR(msg);
msg->u.head[0] = cpu_to_le32(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0);
msg->u.head[1] =
cpu_to_le32(I2O_CMD_UTIL_EVT_REGISTER << 24 | HOST_TID << 12 |
kdesc.tid);
msg->u.head[2] = cpu_to_le32(i2o_config_driver.context);
msg->u.head[3] = cpu_to_le32(i2o_cntxt_list_add(c, fp->private_data));
msg->body[0] = cpu_to_le32(kdesc.evt_mask);
i2o_msg_post(c, msg);
return 0;
}
static int i2o_cfg_evt_get(unsigned long arg, struct file *fp)
{
struct i2o_cfg_info *p = NULL;
struct i2o_evt_get __user *uget = (struct i2o_evt_get __user *)arg;
struct i2o_evt_get kget;
unsigned long flags;
for (p = open_files; p; p = p->next)
if (p->q_id == (ulong) fp->private_data)
break;
if (!p->q_len)
return -ENOENT;
memcpy(&kget.info, &p->event_q[p->q_out], sizeof(struct i2o_evt_info));
MODINC(p->q_out, I2O_EVT_Q_LEN);
spin_lock_irqsave(&i2o_config_lock, flags);
p->q_len--;
kget.pending = p->q_len;
kget.lost = p->q_lost;
spin_unlock_irqrestore(&i2o_config_lock, flags);
if (copy_to_user(uget, &kget, sizeof(struct i2o_evt_get)))
return -EFAULT;
return 0;
}
#ifdef CONFIG_COMPAT
static int i2o_cfg_passthru32(struct file *file, unsigned cmnd,
unsigned long arg)
{
struct i2o_cmd_passthru32 __user *cmd;
struct i2o_controller *c;
u32 __user *user_msg;
u32 *reply = NULL;
u32 __user *user_reply = NULL;
u32 size = 0;
u32 reply_size = 0;
u32 rcode = 0;
struct i2o_dma sg_list[SG_TABLESIZE];
u32 sg_offset = 0;
u32 sg_count = 0;
u32 i = 0;
u32 sg_index = 0;
i2o_status_block *sb;
struct i2o_message *msg;
unsigned int iop;
cmd = (struct i2o_cmd_passthru32 __user *)arg;
if (get_user(iop, &cmd->iop) || get_user(i, &cmd->msg))
return -EFAULT;
user_msg = compat_ptr(i);
c = i2o_find_iop(iop);
if (!c) {
osm_debug("controller %d not found\n", iop);
return -ENXIO;
}
sb = c->status_block.virt;
if (get_user(size, &user_msg[0])) {
osm_warn("unable to get size!\n");
return -EFAULT;
}
size = size >> 16;
if (size > sb->inbound_frame_size) {
osm_warn("size of message > inbound_frame_size");
return -EFAULT;
}
user_reply = &user_msg[size];
size <<= 2; // Convert to bytes
msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
if (IS_ERR(msg))
return PTR_ERR(msg);
rcode = -EFAULT;
/* Copy in the user's I2O command */
if (copy_from_user(msg, user_msg, size)) {
osm_warn("unable to copy user message\n");
goto out;
}
i2o_dump_message(msg);
if (get_user(reply_size, &user_reply[0]) < 0)
goto out;
reply_size >>= 16;
reply_size <<= 2;
rcode = -ENOMEM;
reply = kzalloc(reply_size, GFP_KERNEL);
if (!reply) {
printk(KERN_WARNING "%s: Could not allocate reply buffer\n",
c->name);
goto out;
}
sg_offset = (msg->u.head[0] >> 4) & 0x0f;
memset(sg_list, 0, sizeof(sg_list[0]) * SG_TABLESIZE);
if (sg_offset) {
struct sg_simple_element *sg;
if (sg_offset * 4 >= size) {
rcode = -EFAULT;
goto cleanup;
}
// TODO 64bit fix
sg = (struct sg_simple_element *)((&msg->u.head[0]) +
sg_offset);
sg_count =
(size - sg_offset * 4) / sizeof(struct sg_simple_element);
if (sg_count > SG_TABLESIZE) {
printk(KERN_DEBUG "%s:IOCTL SG List too large (%u)\n",
c->name, sg_count);
rcode = -EINVAL;
goto cleanup;
}
for (i = 0; i < sg_count; i++) {
int sg_size;
struct i2o_dma *p;
if (!(sg[i].flag_count & 0x10000000
/*I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT */ )) {
printk(KERN_DEBUG
"%s:Bad SG element %d - not simple (%x)\n",
c->name, i, sg[i].flag_count);
rcode = -EINVAL;
goto cleanup;
}
sg_size = sg[i].flag_count & 0xffffff;
p = &(sg_list[sg_index]);
/* Allocate memory for the transfer */
if (i2o_dma_alloc(&c->pdev->dev, p, sg_size)) {
printk(KERN_DEBUG
"%s: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
c->name, sg_size, i, sg_count);
rcode = -ENOMEM;
goto sg_list_cleanup;
}
sg_index++;
/* Copy in the user's SG buffer if necessary */
if (sg[i].
flag_count & 0x04000000 /*I2O_SGL_FLAGS_DIR */ ) {
// TODO 64bit fix
if (copy_from_user
(p->virt,
(void __user *)(unsigned long)sg[i].
addr_bus, sg_size)) {
printk(KERN_DEBUG
"%s: Could not copy SG buf %d FROM user\n",
c->name, i);
rcode = -EFAULT;
goto sg_list_cleanup;
}
}
//TODO 64bit fix
sg[i].addr_bus = (u32) p->phys;
}
}
rcode = i2o_msg_post_wait(c, msg, 60);
msg = NULL;
if (rcode) {
reply[4] = ((u32) rcode) << 24;
goto sg_list_cleanup;
}
if (sg_offset) {
u32 rmsg[I2O_OUTBOUND_MSG_FRAME_SIZE];
/* Copy back the Scatter Gather buffers back to user space */
u32 j;
// TODO 64bit fix
struct sg_simple_element *sg;
int sg_size;
// re-acquire the original message to handle correctly the sg copy operation
memset(&rmsg, 0, I2O_OUTBOUND_MSG_FRAME_SIZE * 4);
// get user msg size in u32s
if (get_user(size, &user_msg[0])) {
rcode = -EFAULT;
goto sg_list_cleanup;
}
size = size >> 16;
size *= 4;
/* Copy in the user's I2O command */
if (copy_from_user(rmsg, user_msg, size)) {
rcode = -EFAULT;
goto sg_list_cleanup;
}
sg_count =
(size - sg_offset * 4) / sizeof(struct sg_simple_element);
// TODO 64bit fix
sg = (struct sg_simple_element *)(rmsg + sg_offset);
for (j = 0; j < sg_count; j++) {
/* Copy out the SG list to user's buffer if necessary */
if (!
(sg[j].
flag_count & 0x4000000 /*I2O_SGL_FLAGS_DIR */ )) {
sg_size = sg[j].flag_count & 0xffffff;
// TODO 64bit fix
if (copy_to_user
((void __user *)(u64) sg[j].addr_bus,
sg_list[j].virt, sg_size)) {
printk(KERN_WARNING
"%s: Could not copy %p TO user %x\n",
c->name, sg_list[j].virt,
sg[j].addr_bus);
rcode = -EFAULT;
goto sg_list_cleanup;
}
}
}
}
sg_list_cleanup:
/* Copy back the reply to user space */
if (reply_size) {
// we wrote our own values for context - now restore the user supplied ones
if (copy_from_user(reply + 2, user_msg + 2, sizeof(u32) * 2)) {
printk(KERN_WARNING
"%s: Could not copy message context FROM user\n",
c->name);
rcode = -EFAULT;
}
if (copy_to_user(user_reply, reply, reply_size)) {
printk(KERN_WARNING
"%s: Could not copy reply TO user\n", c->name);
rcode = -EFAULT;
}
}
for (i = 0; i < sg_index; i++)
i2o_dma_free(&c->pdev->dev, &sg_list[i]);
cleanup:
kfree(reply);
out:
if (msg)
i2o_msg_nop(c, msg);
return rcode;
}
static long i2o_cfg_compat_ioctl(struct file *file, unsigned cmd,
unsigned long arg)
{
int ret;
mutex_lock(&i2o_cfg_mutex);
switch (cmd) {
case I2OGETIOPS:
ret = i2o_cfg_ioctl(file, cmd, arg);
break;
case I2OPASSTHRU32:
ret = i2o_cfg_passthru32(file, cmd, arg);
break;
default:
ret = -ENOIOCTLCMD;
break;
}
mutex_unlock(&i2o_cfg_mutex);
return ret;
}
#endif
#ifdef CONFIG_I2O_EXT_ADAPTEC
static int i2o_cfg_passthru(unsigned long arg)
{
struct i2o_cmd_passthru __user *cmd =
(struct i2o_cmd_passthru __user *)arg;
struct i2o_controller *c;
u32 __user *user_msg;
u32 *reply = NULL;
u32 __user *user_reply = NULL;
u32 size = 0;
u32 reply_size = 0;
u32 rcode = 0;
struct i2o_dma sg_list[SG_TABLESIZE];
u32 sg_offset = 0;
u32 sg_count = 0;
int sg_index = 0;
u32 i = 0;
i2o_status_block *sb;
struct i2o_message *msg;
unsigned int iop;
if (get_user(iop, &cmd->iop) || get_user(user_msg, &cmd->msg))
return -EFAULT;
c = i2o_find_iop(iop);
if (!c) {
osm_warn("controller %d not found\n", iop);
return -ENXIO;
}
sb = c->status_block.virt;
if (get_user(size, &user_msg[0]))
return -EFAULT;
size = size >> 16;
if (size > sb->inbound_frame_size) {
osm_warn("size of message > inbound_frame_size");
return -EFAULT;
}
user_reply = &user_msg[size];
size <<= 2; // Convert to bytes
msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
if (IS_ERR(msg))
return PTR_ERR(msg);
rcode = -EFAULT;
/* Copy in the user's I2O command */
if (copy_from_user(msg, user_msg, size))
goto out;
if (get_user(reply_size, &user_reply[0]) < 0)
goto out;
reply_size >>= 16;
reply_size <<= 2;
reply = kzalloc(reply_size, GFP_KERNEL);
if (!reply) {
printk(KERN_WARNING "%s: Could not allocate reply buffer\n",
c->name);
rcode = -ENOMEM;
goto out;
}
sg_offset = (msg->u.head[0] >> 4) & 0x0f;
memset(sg_list, 0, sizeof(sg_list[0]) * SG_TABLESIZE);
if (sg_offset) {
struct sg_simple_element *sg;
struct i2o_dma *p;
if (sg_offset * 4 >= size) {
rcode = -EFAULT;
goto cleanup;
}
// TODO 64bit fix
sg = (struct sg_simple_element *)((&msg->u.head[0]) +
sg_offset);
sg_count =
(size - sg_offset * 4) / sizeof(struct sg_simple_element);
if (sg_count > SG_TABLESIZE) {
printk(KERN_DEBUG "%s:IOCTL SG List too large (%u)\n",
c->name, sg_count);
rcode = -EINVAL;
goto cleanup;
}
for (i = 0; i < sg_count; i++) {
int sg_size;
if (!(sg[i].flag_count & 0x10000000
/*I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT */ )) {
printk(KERN_DEBUG
"%s:Bad SG element %d - not simple (%x)\n",
c->name, i, sg[i].flag_count);
rcode = -EINVAL;
goto sg_list_cleanup;
}
sg_size = sg[i].flag_count & 0xffffff;
p = &(sg_list[sg_index]);
if (i2o_dma_alloc(&c->pdev->dev, p, sg_size)) {
/* Allocate memory for the transfer */
printk(KERN_DEBUG
"%s: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
c->name, sg_size, i, sg_count);
rcode = -ENOMEM;
goto sg_list_cleanup;
}
sg_index++;
/* Copy in the user's SG buffer if necessary */
if (sg[i].
flag_count & 0x04000000 /*I2O_SGL_FLAGS_DIR */ ) {
// TODO 64bit fix
if (copy_from_user
(p->virt, (void __user *)sg[i].addr_bus,
sg_size)) {
printk(KERN_DEBUG
"%s: Could not copy SG buf %d FROM user\n",
c->name, i);
rcode = -EFAULT;
goto sg_list_cleanup;
}
}
sg[i].addr_bus = p->phys;
}
}
rcode = i2o_msg_post_wait(c, msg, 60);
msg = NULL;
if (rcode) {
reply[4] = ((u32) rcode) << 24;
goto sg_list_cleanup;
}
if (sg_offset) {
u32 rmsg[I2O_OUTBOUND_MSG_FRAME_SIZE];
/* Copy back the Scatter Gather buffers back to user space */
u32 j;
// TODO 64bit fix
struct sg_simple_element *sg;
int sg_size;
// re-acquire the original message to handle correctly the sg copy operation
memset(&rmsg, 0, I2O_OUTBOUND_MSG_FRAME_SIZE * 4);
// get user msg size in u32s
if (get_user(size, &user_msg[0])) {
rcode = -EFAULT;
goto sg_list_cleanup;
}
size = size >> 16;
size *= 4;
/* Copy in the user's I2O command */
if (copy_from_user(rmsg, user_msg, size)) {
rcode = -EFAULT;
goto sg_list_cleanup;
}
sg_count =
(size - sg_offset * 4) / sizeof(struct sg_simple_element);
// TODO 64bit fix
sg = (struct sg_simple_element *)(rmsg + sg_offset);
for (j = 0; j < sg_count; j++) {
/* Copy out the SG list to user's buffer if necessary */
if (!
(sg[j].
flag_count & 0x4000000 /*I2O_SGL_FLAGS_DIR */ )) {
sg_size = sg[j].flag_count & 0xffffff;
// TODO 64bit fix
if (copy_to_user
((void __user *)sg[j].addr_bus, sg_list[j].virt,
sg_size)) {
printk(KERN_WARNING
"%s: Could not copy %p TO user %x\n",
c->name, sg_list[j].virt,
sg[j].addr_bus);
rcode = -EFAULT;
goto sg_list_cleanup;
}
}
}
}
sg_list_cleanup:
/* Copy back the reply to user space */
if (reply_size) {
// we wrote our own values for context - now restore the user supplied ones
if (copy_from_user(reply + 2, user_msg + 2, sizeof(u32) * 2)) {
printk(KERN_WARNING
"%s: Could not copy message context FROM user\n",
c->name);
rcode = -EFAULT;
}
if (copy_to_user(user_reply, reply, reply_size)) {
printk(KERN_WARNING
"%s: Could not copy reply TO user\n", c->name);
rcode = -EFAULT;
}
}
for (i = 0; i < sg_index; i++)
i2o_dma_free(&c->pdev->dev, &sg_list[i]);
cleanup:
kfree(reply);
out:
if (msg)
i2o_msg_nop(c, msg);
return rcode;
}
#endif
/*
* IOCTL Handler
*/
static long i2o_cfg_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
{
int ret;
mutex_lock(&i2o_cfg_mutex);
switch (cmd) {
case I2OGETIOPS:
ret = i2o_cfg_getiops(arg);
break;
case I2OHRTGET:
ret = i2o_cfg_gethrt(arg);
break;
case I2OLCTGET:
ret = i2o_cfg_getlct(arg);
break;
case I2OPARMSET:
ret = i2o_cfg_parms(arg, I2OPARMSET);
break;
case I2OPARMGET:
ret = i2o_cfg_parms(arg, I2OPARMGET);
break;
case I2OSWDL:
ret = i2o_cfg_swdl(arg);
break;
case I2OSWUL:
ret = i2o_cfg_swul(arg);
break;
case I2OSWDEL:
ret = i2o_cfg_swdel(arg);
break;
case I2OVALIDATE:
ret = i2o_cfg_validate(arg);
break;
case I2OEVTREG:
ret = i2o_cfg_evt_reg(arg, fp);
break;
case I2OEVTGET:
ret = i2o_cfg_evt_get(arg, fp);
break;
#ifdef CONFIG_I2O_EXT_ADAPTEC
case I2OPASSTHRU:
ret = i2o_cfg_passthru(arg);
break;
#endif
default:
osm_debug("unknown ioctl called!\n");
ret = -EINVAL;
}
mutex_unlock(&i2o_cfg_mutex);
return ret;
}
static int cfg_open(struct inode *inode, struct file *file)
{
struct i2o_cfg_info *tmp = kmalloc(sizeof(struct i2o_cfg_info),
GFP_KERNEL);
unsigned long flags;
if (!tmp)
return -ENOMEM;
mutex_lock(&i2o_cfg_mutex);
file->private_data = (void *)(i2o_cfg_info_id++);
tmp->fp = file;
tmp->fasync = NULL;
tmp->q_id = (ulong) file->private_data;
tmp->q_len = 0;
tmp->q_in = 0;
tmp->q_out = 0;
tmp->q_lost = 0;
tmp->next = open_files;
spin_lock_irqsave(&i2o_config_lock, flags);
open_files = tmp;
spin_unlock_irqrestore(&i2o_config_lock, flags);
mutex_unlock(&i2o_cfg_mutex);
return 0;
}
static int cfg_fasync(int fd, struct file *fp, int on)
{
ulong id = (ulong) fp->private_data;
struct i2o_cfg_info *p;
int ret = -EBADF;
mutex_lock(&i2o_cfg_mutex);
for (p = open_files; p; p = p->next)
if (p->q_id == id)
break;
if (p)
ret = fasync_helper(fd, fp, on, &p->fasync);
mutex_unlock(&i2o_cfg_mutex);
return ret;
}
static int cfg_release(struct inode *inode, struct file *file)
{
ulong id = (ulong) file->private_data;
struct i2o_cfg_info *p, **q;
unsigned long flags;
mutex_lock(&i2o_cfg_mutex);
spin_lock_irqsave(&i2o_config_lock, flags);
for (q = &open_files; (p = *q) != NULL; q = &p->next) {
if (p->q_id == id) {
*q = p->next;
kfree(p);
break;
}
}
spin_unlock_irqrestore(&i2o_config_lock, flags);
mutex_unlock(&i2o_cfg_mutex);
return 0;
}
static const struct file_operations config_fops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
.unlocked_ioctl = i2o_cfg_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = i2o_cfg_compat_ioctl,
#endif
.open = cfg_open,
.release = cfg_release,
.fasync = cfg_fasync,
};
static struct miscdevice i2o_miscdev = {
I2O_MINOR,
"i2octl",
&config_fops
};
static int __init i2o_config_old_init(void)
{
spin_lock_init(&i2o_config_lock);
if (misc_register(&i2o_miscdev) < 0) {
osm_err("can't register device.\n");
return -EBUSY;
}
return 0;
}
static void i2o_config_old_exit(void)
{
misc_deregister(&i2o_miscdev);
}
MODULE_AUTHOR("Red Hat Software");
| gpl-2.0 |
coreentin/android_kernel_nvidia_s8515 | Documentation/connector/cn_test.c | 9062 | 4694 | /*
* cn_test.c
*
* 2004+ Copyright (c) Evgeniy Polyakov <zbr@ioremap.net>
* All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#define pr_fmt(fmt) "cn_test: " fmt
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <linux/timer.h>
#include <linux/connector.h>
static struct cb_id cn_test_id = { CN_NETLINK_USERS + 3, 0x456 };
static char cn_test_name[] = "cn_test";
static struct sock *nls;
static struct timer_list cn_test_timer;
static void cn_test_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp)
{
pr_info("%s: %lu: idx=%x, val=%x, seq=%u, ack=%u, len=%d: %s.\n",
__func__, jiffies, msg->id.idx, msg->id.val,
msg->seq, msg->ack, msg->len,
msg->len ? (char *)msg->data : "");
}
/*
* Do not remove this function even if no one is using it as
* this is an example of how to get notifications about new
* connector user registration
*/
#if 0
static int cn_test_want_notify(void)
{
struct cn_ctl_msg *ctl;
struct cn_notify_req *req;
struct cn_msg *msg = NULL;
int size, size0;
struct sk_buff *skb;
struct nlmsghdr *nlh;
u32 group = 1;
size0 = sizeof(*msg) + sizeof(*ctl) + 3 * sizeof(*req);
size = NLMSG_SPACE(size0);
skb = alloc_skb(size, GFP_ATOMIC);
if (!skb) {
pr_err("failed to allocate new skb with size=%u\n", size);
return -ENOMEM;
}
nlh = NLMSG_PUT(skb, 0, 0x123, NLMSG_DONE, size - sizeof(*nlh));
msg = (struct cn_msg *)NLMSG_DATA(nlh);
memset(msg, 0, size0);
msg->id.idx = -1;
msg->id.val = -1;
msg->seq = 0x123;
msg->ack = 0x345;
msg->len = size0 - sizeof(*msg);
ctl = (struct cn_ctl_msg *)(msg + 1);
ctl->idx_notify_num = 1;
ctl->val_notify_num = 2;
ctl->group = group;
ctl->len = msg->len - sizeof(*ctl);
req = (struct cn_notify_req *)(ctl + 1);
/*
* Idx.
*/
req->first = cn_test_id.idx;
req->range = 10;
/*
* Val 0.
*/
req++;
req->first = cn_test_id.val;
req->range = 10;
/*
* Val 1.
*/
req++;
req->first = cn_test_id.val + 20;
req->range = 10;
NETLINK_CB(skb).dst_group = ctl->group;
//netlink_broadcast(nls, skb, 0, ctl->group, GFP_ATOMIC);
netlink_unicast(nls, skb, 0, 0);
pr_info("request was sent: group=0x%x\n", ctl->group);
return 0;
nlmsg_failure:
pr_err("failed to send %u.%u\n", msg->seq, msg->ack);
kfree_skb(skb);
return -EINVAL;
}
#endif
static u32 cn_test_timer_counter;
static void cn_test_timer_func(unsigned long __data)
{
struct cn_msg *m;
char data[32];
pr_debug("%s: timer fired with data %lu\n", __func__, __data);
m = kzalloc(sizeof(*m) + sizeof(data), GFP_ATOMIC);
if (m) {
memcpy(&m->id, &cn_test_id, sizeof(m->id));
m->seq = cn_test_timer_counter;
m->len = sizeof(data);
m->len =
scnprintf(data, sizeof(data), "counter = %u",
cn_test_timer_counter) + 1;
memcpy(m + 1, data, m->len);
cn_netlink_send(m, 0, GFP_ATOMIC);
kfree(m);
}
cn_test_timer_counter++;
mod_timer(&cn_test_timer, jiffies + msecs_to_jiffies(1000));
}
static int cn_test_init(void)
{
int err;
err = cn_add_callback(&cn_test_id, cn_test_name, cn_test_callback);
if (err)
goto err_out;
cn_test_id.val++;
err = cn_add_callback(&cn_test_id, cn_test_name, cn_test_callback);
if (err) {
cn_del_callback(&cn_test_id);
goto err_out;
}
setup_timer(&cn_test_timer, cn_test_timer_func, 0);
mod_timer(&cn_test_timer, jiffies + msecs_to_jiffies(1000));
pr_info("initialized with id={%u.%u}\n",
cn_test_id.idx, cn_test_id.val);
return 0;
err_out:
if (nls && nls->sk_socket)
sock_release(nls->sk_socket);
return err;
}
static void cn_test_fini(void)
{
del_timer_sync(&cn_test_timer);
cn_del_callback(&cn_test_id);
cn_test_id.val--;
cn_del_callback(&cn_test_id);
if (nls && nls->sk_socket)
sock_release(nls->sk_socket);
}
module_init(cn_test_init);
module_exit(cn_test_fini);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Evgeniy Polyakov <zbr@ioremap.net>");
MODULE_DESCRIPTION("Connector's test module");
| gpl-2.0 |
MoKee/android_kernel_htc_dlx | arch/x86/kernel/audit_64.c | 13158 | 1870 | #include <linux/init.h>
#include <linux/types.h>
#include <linux/audit.h>
#include <asm/unistd.h>
static unsigned dir_class[] = {
#include <asm-generic/audit_dir_write.h>
~0U
};
static unsigned read_class[] = {
#include <asm-generic/audit_read.h>
~0U
};
static unsigned write_class[] = {
#include <asm-generic/audit_write.h>
~0U
};
static unsigned chattr_class[] = {
#include <asm-generic/audit_change_attr.h>
~0U
};
static unsigned signal_class[] = {
#include <asm-generic/audit_signal.h>
~0U
};
int audit_classify_arch(int arch)
{
#ifdef CONFIG_IA32_EMULATION
if (arch == AUDIT_ARCH_I386)
return 1;
#endif
return 0;
}
int audit_classify_syscall(int abi, unsigned syscall)
{
#ifdef CONFIG_IA32_EMULATION
extern int ia32_classify_syscall(unsigned);
if (abi == AUDIT_ARCH_I386)
return ia32_classify_syscall(syscall);
#endif
switch(syscall) {
case __NR_open:
return 2;
case __NR_openat:
return 3;
case __NR_execve:
return 5;
default:
return 0;
}
}
static int __init audit_classes_init(void)
{
#ifdef CONFIG_IA32_EMULATION
extern __u32 ia32_dir_class[];
extern __u32 ia32_write_class[];
extern __u32 ia32_read_class[];
extern __u32 ia32_chattr_class[];
extern __u32 ia32_signal_class[];
audit_register_class(AUDIT_CLASS_WRITE_32, ia32_write_class);
audit_register_class(AUDIT_CLASS_READ_32, ia32_read_class);
audit_register_class(AUDIT_CLASS_DIR_WRITE_32, ia32_dir_class);
audit_register_class(AUDIT_CLASS_CHATTR_32, ia32_chattr_class);
audit_register_class(AUDIT_CLASS_SIGNAL_32, ia32_signal_class);
#endif
audit_register_class(AUDIT_CLASS_WRITE, write_class);
audit_register_class(AUDIT_CLASS_READ, read_class);
audit_register_class(AUDIT_CLASS_DIR_WRITE, dir_class);
audit_register_class(AUDIT_CLASS_CHATTR, chattr_class);
audit_register_class(AUDIT_CLASS_SIGNAL, signal_class);
return 0;
}
__initcall(audit_classes_init);
| gpl-2.0 |
LeonardKoenig/android_kernel_coolpad_8860U | arch/x86/kernel/audit_64.c | 13158 | 1870 | #include <linux/init.h>
#include <linux/types.h>
#include <linux/audit.h>
#include <asm/unistd.h>
static unsigned dir_class[] = {
#include <asm-generic/audit_dir_write.h>
~0U
};
static unsigned read_class[] = {
#include <asm-generic/audit_read.h>
~0U
};
static unsigned write_class[] = {
#include <asm-generic/audit_write.h>
~0U
};
static unsigned chattr_class[] = {
#include <asm-generic/audit_change_attr.h>
~0U
};
static unsigned signal_class[] = {
#include <asm-generic/audit_signal.h>
~0U
};
int audit_classify_arch(int arch)
{
#ifdef CONFIG_IA32_EMULATION
if (arch == AUDIT_ARCH_I386)
return 1;
#endif
return 0;
}
int audit_classify_syscall(int abi, unsigned syscall)
{
#ifdef CONFIG_IA32_EMULATION
extern int ia32_classify_syscall(unsigned);
if (abi == AUDIT_ARCH_I386)
return ia32_classify_syscall(syscall);
#endif
switch(syscall) {
case __NR_open:
return 2;
case __NR_openat:
return 3;
case __NR_execve:
return 5;
default:
return 0;
}
}
static int __init audit_classes_init(void)
{
#ifdef CONFIG_IA32_EMULATION
extern __u32 ia32_dir_class[];
extern __u32 ia32_write_class[];
extern __u32 ia32_read_class[];
extern __u32 ia32_chattr_class[];
extern __u32 ia32_signal_class[];
audit_register_class(AUDIT_CLASS_WRITE_32, ia32_write_class);
audit_register_class(AUDIT_CLASS_READ_32, ia32_read_class);
audit_register_class(AUDIT_CLASS_DIR_WRITE_32, ia32_dir_class);
audit_register_class(AUDIT_CLASS_CHATTR_32, ia32_chattr_class);
audit_register_class(AUDIT_CLASS_SIGNAL_32, ia32_signal_class);
#endif
audit_register_class(AUDIT_CLASS_WRITE, write_class);
audit_register_class(AUDIT_CLASS_READ, read_class);
audit_register_class(AUDIT_CLASS_DIR_WRITE, dir_class);
audit_register_class(AUDIT_CLASS_CHATTR, chattr_class);
audit_register_class(AUDIT_CLASS_SIGNAL, signal_class);
return 0;
}
__initcall(audit_classes_init);
| gpl-2.0 |
evaautomation/linux | drivers/media/v4l2-core/videobuf2-memops.c | 103 | 3862 | /*
* videobuf2-memops.c - generic memory handling routines for videobuf2
*
* Copyright (C) 2010 Samsung Electronics
*
* Author: Pawel Osciak <pawel@osciak.com>
* Marek Szyprowski <m.szyprowski@samsung.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*/
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/dma-mapping.h>
#include <linux/vmalloc.h>
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/file.h>
#include <media/videobuf2-v4l2.h>
#include <media/videobuf2-memops.h>
/**
* vb2_create_framevec() - map virtual addresses to pfns
* @start: Virtual user address where we start mapping
* @length: Length of a range to map
* @write: Should we map for writing into the area
*
* This function allocates and fills in a vector with pfns corresponding to
* virtual address range passed in arguments. If pfns have corresponding pages,
* page references are also grabbed to pin pages in memory. The function
* returns pointer to the vector on success and error pointer in case of
* failure. Returned vector needs to be freed via vb2_destroy_pfnvec().
*/
struct frame_vector *vb2_create_framevec(unsigned long start,
unsigned long length,
bool write)
{
int ret;
unsigned long first, last;
unsigned long nr;
struct frame_vector *vec;
unsigned int flags = FOLL_FORCE;
if (write)
flags |= FOLL_WRITE;
first = start >> PAGE_SHIFT;
last = (start + length - 1) >> PAGE_SHIFT;
nr = last - first + 1;
vec = frame_vector_create(nr);
if (!vec)
return ERR_PTR(-ENOMEM);
ret = get_vaddr_frames(start & PAGE_MASK, nr, flags, vec);
if (ret < 0)
goto out_destroy;
/* We accept only complete set of PFNs */
if (ret != nr) {
ret = -EFAULT;
goto out_release;
}
return vec;
out_release:
put_vaddr_frames(vec);
out_destroy:
frame_vector_destroy(vec);
return ERR_PTR(ret);
}
EXPORT_SYMBOL(vb2_create_framevec);
/**
* vb2_destroy_framevec() - release vector of mapped pfns
* @vec: vector of pfns / pages to release
*
* This releases references to all pages in the vector @vec (if corresponding
* pfns are backed by pages) and frees the passed vector.
*/
void vb2_destroy_framevec(struct frame_vector *vec)
{
put_vaddr_frames(vec);
frame_vector_destroy(vec);
}
EXPORT_SYMBOL(vb2_destroy_framevec);
/**
* vb2_common_vm_open() - increase refcount of the vma
* @vma: virtual memory region for the mapping
*
* This function adds another user to the provided vma. It expects
* struct vb2_vmarea_handler pointer in vma->vm_private_data.
*/
static void vb2_common_vm_open(struct vm_area_struct *vma)
{
struct vb2_vmarea_handler *h = vma->vm_private_data;
pr_debug("%s: %p, refcount: %d, vma: %08lx-%08lx\n",
__func__, h, refcount_read(h->refcount), vma->vm_start,
vma->vm_end);
refcount_inc(h->refcount);
}
/**
* vb2_common_vm_close() - decrease refcount of the vma
* @vma: virtual memory region for the mapping
*
* This function releases the user from the provided vma. It expects
* struct vb2_vmarea_handler pointer in vma->vm_private_data.
*/
static void vb2_common_vm_close(struct vm_area_struct *vma)
{
struct vb2_vmarea_handler *h = vma->vm_private_data;
pr_debug("%s: %p, refcount: %d, vma: %08lx-%08lx\n",
__func__, h, refcount_read(h->refcount), vma->vm_start,
vma->vm_end);
h->put(h->arg);
}
/**
* vb2_common_vm_ops - common vm_ops used for tracking refcount of mmaped
* video buffers
*/
const struct vm_operations_struct vb2_common_vm_ops = {
.open = vb2_common_vm_open,
.close = vb2_common_vm_close,
};
EXPORT_SYMBOL_GPL(vb2_common_vm_ops);
MODULE_DESCRIPTION("common memory handling routines for videobuf2");
MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>");
MODULE_LICENSE("GPL");
| gpl-2.0 |
fanyukui/linux3.12.10 | arch/arm/mach-hisi/platsmp.c | 359 | 2185 | /*
* Copyright (c) 2013 Linaro Ltd.
* Copyright (c) 2013 Hisilicon Limited.
* Based on arch/arm/mach-vexpress/platsmp.c, Copyright (C) 2002 ARM Ltd.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*/
#include <linux/smp.h>
#include <linux/io.h>
#include <linux/of_address.h>
#include <asm/cacheflush.h>
#include <asm/smp_plat.h>
#include <asm/smp_scu.h>
#include "core.h"
static void __iomem *ctrl_base;
void hi3xxx_set_cpu_jump(int cpu, void *jump_addr)
{
cpu = cpu_logical_map(cpu);
if (!cpu || !ctrl_base)
return;
writel_relaxed(virt_to_phys(jump_addr), ctrl_base + ((cpu - 1) << 2));
}
int hi3xxx_get_cpu_jump(int cpu)
{
cpu = cpu_logical_map(cpu);
if (!cpu || !ctrl_base)
return 0;
return readl_relaxed(ctrl_base + ((cpu - 1) << 2));
}
static void __init hi3xxx_smp_prepare_cpus(unsigned int max_cpus)
{
struct device_node *np = NULL;
unsigned long base = 0;
u32 offset = 0;
void __iomem *scu_base = NULL;
if (scu_a9_has_base()) {
base = scu_a9_get_base();
scu_base = ioremap(base, SZ_4K);
if (!scu_base) {
pr_err("ioremap(scu_base) failed\n");
return;
}
scu_enable(scu_base);
iounmap(scu_base);
}
if (!ctrl_base) {
np = of_find_compatible_node(NULL, NULL, "hisilicon,sysctrl");
if (!np) {
pr_err("failed to find hisilicon,sysctrl node\n");
return;
}
ctrl_base = of_iomap(np, 0);
if (!ctrl_base) {
pr_err("failed to map address\n");
return;
}
if (of_property_read_u32(np, "smp-offset", &offset) < 0) {
pr_err("failed to find smp-offset property\n");
return;
}
ctrl_base += offset;
}
}
static int hi3xxx_boot_secondary(unsigned int cpu, struct task_struct *idle)
{
hi3xxx_set_cpu(cpu, true);
hi3xxx_set_cpu_jump(cpu, secondary_startup);
arch_send_wakeup_ipi_mask(cpumask_of(cpu));
return 0;
}
struct smp_operations hi3xxx_smp_ops __initdata = {
.smp_prepare_cpus = hi3xxx_smp_prepare_cpus,
.smp_boot_secondary = hi3xxx_boot_secondary,
#ifdef CONFIG_HOTPLUG_CPU
.cpu_die = hi3xxx_cpu_die,
.cpu_kill = hi3xxx_cpu_kill,
#endif
};
| gpl-2.0 |
kbukin1/pnotify-linux-4.1.6 | lib/sort.c | 359 | 2604 | /*
* A fast, small, non-recursive O(nlog n) sort for the Linux kernel
*
* Jan 23 2005 Matt Mackall <mpm@selenic.com>
*/
#include <linux/types.h>
#include <linux/export.h>
#include <linux/sort.h>
static void u32_swap(void *a, void *b, int size)
{
u32 t = *(u32 *)a;
*(u32 *)a = *(u32 *)b;
*(u32 *)b = t;
}
static void generic_swap(void *a, void *b, int size)
{
char t;
do {
t = *(char *)a;
*(char *)a++ = *(char *)b;
*(char *)b++ = t;
} while (--size > 0);
}
/**
* sort - sort an array of elements
* @base: pointer to data to sort
* @num: number of elements
* @size: size of each element
* @cmp_func: pointer to comparison function
* @swap_func: pointer to swap function or NULL
*
* This function does a heapsort on the given array. You may provide a
* swap_func function optimized to your element type.
*
* Sorting time is O(n log n) both on average and worst-case. While
* qsort is about 20% faster on average, it suffers from exploitable
* O(n*n) worst-case behavior and extra memory requirements that make
* it less suitable for kernel use.
*/
void sort(void *base, size_t num, size_t size,
int (*cmp_func)(const void *, const void *),
void (*swap_func)(void *, void *, int size))
{
/* pre-scale counters for performance */
int i = (num/2 - 1) * size, n = num * size, c, r;
if (!swap_func)
swap_func = (size == 4 ? u32_swap : generic_swap);
/* heapify */
for ( ; i >= 0; i -= size) {
for (r = i; r * 2 + size < n; r = c) {
c = r * 2 + size;
if (c < n - size &&
cmp_func(base + c, base + c + size) < 0)
c += size;
if (cmp_func(base + r, base + c) >= 0)
break;
swap_func(base + r, base + c, size);
}
}
/* sort */
for (i = n - size; i > 0; i -= size) {
swap_func(base, base + i, size);
for (r = 0; r * 2 + size < i; r = c) {
c = r * 2 + size;
if (c < i - size &&
cmp_func(base + c, base + c + size) < 0)
c += size;
if (cmp_func(base + r, base + c) >= 0)
break;
swap_func(base + r, base + c, size);
}
}
}
EXPORT_SYMBOL(sort);
#if 0
#include <linux/slab.h>
/* a simple boot-time regression test */
int cmpint(const void *a, const void *b)
{
return *(int *)a - *(int *)b;
}
static int sort_test(void)
{
int *a, i, r = 1;
a = kmalloc(1000 * sizeof(int), GFP_KERNEL);
BUG_ON(!a);
printk("testing sort()\n");
for (i = 0; i < 1000; i++) {
r = (r * 725861) % 6599;
a[i] = r;
}
sort(a, 1000, sizeof(int), cmpint, NULL);
for (i = 0; i < 999; i++)
if (a[i] > a[i+1]) {
printk("sort() failed!\n");
break;
}
kfree(a);
return 0;
}
module_init(sort_test);
#endif
| gpl-2.0 |
lmajewski/linux-samsung-thermal | arch/mips/loongson1/common/time.c | 615 | 6029 | /*
* Copyright (c) 2014 Zhang, Keguang <keguang.zhang@gmail.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/clk.h>
#include <linux/interrupt.h>
#include <asm/time.h>
#include <loongson1.h>
#include <platform.h>
#ifdef CONFIG_CEVT_CSRC_LS1X
#if defined(CONFIG_TIMER_USE_PWM1)
#define LS1X_TIMER_BASE LS1X_PWM1_BASE
#define LS1X_TIMER_IRQ LS1X_PWM1_IRQ
#elif defined(CONFIG_TIMER_USE_PWM2)
#define LS1X_TIMER_BASE LS1X_PWM2_BASE
#define LS1X_TIMER_IRQ LS1X_PWM2_IRQ
#elif defined(CONFIG_TIMER_USE_PWM3)
#define LS1X_TIMER_BASE LS1X_PWM3_BASE
#define LS1X_TIMER_IRQ LS1X_PWM3_IRQ
#else
#define LS1X_TIMER_BASE LS1X_PWM0_BASE
#define LS1X_TIMER_IRQ LS1X_PWM0_IRQ
#endif
DEFINE_RAW_SPINLOCK(ls1x_timer_lock);
static void __iomem *timer_base;
static uint32_t ls1x_jiffies_per_tick;
static inline void ls1x_pwmtimer_set_period(uint32_t period)
{
__raw_writel(period, timer_base + PWM_HRC);
__raw_writel(period, timer_base + PWM_LRC);
}
static inline void ls1x_pwmtimer_restart(void)
{
__raw_writel(0x0, timer_base + PWM_CNT);
__raw_writel(INT_EN | CNT_EN, timer_base + PWM_CTRL);
}
void __init ls1x_pwmtimer_init(void)
{
timer_base = ioremap(LS1X_TIMER_BASE, 0xf);
if (!timer_base)
panic("Failed to remap timer registers");
ls1x_jiffies_per_tick = DIV_ROUND_CLOSEST(mips_hpt_frequency, HZ);
ls1x_pwmtimer_set_period(ls1x_jiffies_per_tick);
ls1x_pwmtimer_restart();
}
static cycle_t ls1x_clocksource_read(struct clocksource *cs)
{
unsigned long flags;
int count;
u32 jifs;
static int old_count;
static u32 old_jifs;
raw_spin_lock_irqsave(&ls1x_timer_lock, flags);
/*
* Although our caller may have the read side of xtime_lock,
* this is now a seqlock, and we are cheating in this routine
* by having side effects on state that we cannot undo if
* there is a collision on the seqlock and our caller has to
* retry. (Namely, old_jifs and old_count.) So we must treat
* jiffies as volatile despite the lock. We read jiffies
* before latching the timer count to guarantee that although
* the jiffies value might be older than the count (that is,
* the counter may underflow between the last point where
* jiffies was incremented and the point where we latch the
* count), it cannot be newer.
*/
jifs = jiffies;
/* read the count */
count = __raw_readl(timer_base + PWM_CNT);
/*
* It's possible for count to appear to go the wrong way for this
* reason:
*
* The timer counter underflows, but we haven't handled the resulting
* interrupt and incremented jiffies yet.
*
* Previous attempts to handle these cases intelligently were buggy, so
* we just do the simple thing now.
*/
if (count < old_count && jifs == old_jifs)
count = old_count;
old_count = count;
old_jifs = jifs;
raw_spin_unlock_irqrestore(&ls1x_timer_lock, flags);
return (cycle_t) (jifs * ls1x_jiffies_per_tick) + count;
}
static struct clocksource ls1x_clocksource = {
.name = "ls1x-pwmtimer",
.read = ls1x_clocksource_read,
.mask = CLOCKSOURCE_MASK(24),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
static irqreturn_t ls1x_clockevent_isr(int irq, void *devid)
{
struct clock_event_device *cd = devid;
ls1x_pwmtimer_restart();
cd->event_handler(cd);
return IRQ_HANDLED;
}
static void ls1x_clockevent_set_mode(enum clock_event_mode mode,
struct clock_event_device *cd)
{
raw_spin_lock(&ls1x_timer_lock);
switch (mode) {
case CLOCK_EVT_MODE_PERIODIC:
ls1x_pwmtimer_set_period(ls1x_jiffies_per_tick);
ls1x_pwmtimer_restart();
case CLOCK_EVT_MODE_RESUME:
__raw_writel(INT_EN | CNT_EN, timer_base + PWM_CTRL);
break;
case CLOCK_EVT_MODE_ONESHOT:
case CLOCK_EVT_MODE_SHUTDOWN:
__raw_writel(__raw_readl(timer_base + PWM_CTRL) & ~CNT_EN,
timer_base + PWM_CTRL);
break;
default:
break;
}
raw_spin_unlock(&ls1x_timer_lock);
}
static int ls1x_clockevent_set_next(unsigned long evt,
struct clock_event_device *cd)
{
raw_spin_lock(&ls1x_timer_lock);
ls1x_pwmtimer_set_period(evt);
ls1x_pwmtimer_restart();
raw_spin_unlock(&ls1x_timer_lock);
return 0;
}
static struct clock_event_device ls1x_clockevent = {
.name = "ls1x-pwmtimer",
.features = CLOCK_EVT_FEAT_PERIODIC,
.rating = 300,
.irq = LS1X_TIMER_IRQ,
.set_next_event = ls1x_clockevent_set_next,
.set_mode = ls1x_clockevent_set_mode,
};
static struct irqaction ls1x_pwmtimer_irqaction = {
.name = "ls1x-pwmtimer",
.handler = ls1x_clockevent_isr,
.dev_id = &ls1x_clockevent,
.flags = IRQF_PERCPU | IRQF_TIMER,
};
static void __init ls1x_time_init(void)
{
struct clock_event_device *cd = &ls1x_clockevent;
int ret;
if (!mips_hpt_frequency)
panic("Invalid timer clock rate");
ls1x_pwmtimer_init();
clockevent_set_clock(cd, mips_hpt_frequency);
cd->max_delta_ns = clockevent_delta2ns(0xffffff, cd);
cd->min_delta_ns = clockevent_delta2ns(0x000300, cd);
cd->cpumask = cpumask_of(smp_processor_id());
clockevents_register_device(cd);
ls1x_clocksource.rating = 200 + mips_hpt_frequency / 10000000;
ret = clocksource_register_hz(&ls1x_clocksource, mips_hpt_frequency);
if (ret)
panic(KERN_ERR "Failed to register clocksource: %d\n", ret);
setup_irq(LS1X_TIMER_IRQ, &ls1x_pwmtimer_irqaction);
}
#endif /* CONFIG_CEVT_CSRC_LS1X */
void __init plat_time_init(void)
{
struct clk *clk = NULL;
/* initialize LS1X clocks */
ls1x_clk_init();
#ifdef CONFIG_CEVT_CSRC_LS1X
/* setup LS1X PWM timer */
clk = clk_get(NULL, "ls1x_pwmtimer");
if (IS_ERR(clk))
panic("unable to get timer clock, err=%ld", PTR_ERR(clk));
mips_hpt_frequency = clk_get_rate(clk);
ls1x_time_init();
#else
/* setup mips r4k timer */
clk = clk_get(NULL, "cpu_clk");
if (IS_ERR(clk))
panic("unable to get cpu clock, err=%ld", PTR_ERR(clk));
mips_hpt_frequency = clk_get_rate(clk) / 2;
#endif /* CONFIG_CEVT_CSRC_LS1X */
}
| gpl-2.0 |
jianpingye/linux | sound/soc/codecs/wm8958-dsp2.c | 871 | 27850 | /*
* wm8958-dsp2.c -- WM8958 DSP2 support
*
* Copyright 2011 Wolfson Microelectronics plc
*
* Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/pm.h>
#include <linux/i2c.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <sound/soc.h>
#include <sound/initval.h>
#include <sound/tlv.h>
#include <trace/events/asoc.h>
#include <linux/mfd/wm8994/core.h>
#include <linux/mfd/wm8994/registers.h>
#include <linux/mfd/wm8994/pdata.h>
#include <linux/mfd/wm8994/gpio.h>
#include "wm8994.h"
#define WM_FW_BLOCK_INFO 0xff
#define WM_FW_BLOCK_PM 0x00
#define WM_FW_BLOCK_X 0x01
#define WM_FW_BLOCK_Y 0x02
#define WM_FW_BLOCK_Z 0x03
#define WM_FW_BLOCK_I 0x06
#define WM_FW_BLOCK_A 0x08
#define WM_FW_BLOCK_C 0x0c
static int wm8958_dsp2_fw(struct snd_soc_codec *codec, const char *name,
const struct firmware *fw, bool check)
{
struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
u64 data64;
u32 data32;
const u8 *data;
char *str;
size_t block_len, len;
int ret = 0;
/* Suppress unneeded downloads */
if (wm8994->cur_fw == fw)
return 0;
if (fw->size < 32) {
dev_err(codec->dev, "%s: firmware too short (%zd bytes)\n",
name, fw->size);
goto err;
}
if (memcmp(fw->data, "WMFW", 4) != 0) {
memcpy(&data32, fw->data, sizeof(data32));
data32 = be32_to_cpu(data32);
dev_err(codec->dev, "%s: firmware has bad file magic %08x\n",
name, data32);
goto err;
}
memcpy(&data32, fw->data + 4, sizeof(data32));
len = be32_to_cpu(data32);
memcpy(&data32, fw->data + 8, sizeof(data32));
data32 = be32_to_cpu(data32);
if ((data32 >> 24) & 0xff) {
dev_err(codec->dev, "%s: unsupported firmware version %d\n",
name, (data32 >> 24) & 0xff);
goto err;
}
if ((data32 & 0xffff) != 8958) {
dev_err(codec->dev, "%s: unsupported target device %d\n",
name, data32 & 0xffff);
goto err;
}
if (((data32 >> 16) & 0xff) != 0xc) {
dev_err(codec->dev, "%s: unsupported target core %d\n",
name, (data32 >> 16) & 0xff);
goto err;
}
if (check) {
memcpy(&data64, fw->data + 24, sizeof(u64));
dev_info(codec->dev, "%s timestamp %llx\n",
name, be64_to_cpu(data64));
} else {
snd_soc_write(codec, 0x102, 0x2);
snd_soc_write(codec, 0x900, 0x2);
}
data = fw->data + len;
len = fw->size - len;
while (len) {
if (len < 12) {
dev_err(codec->dev, "%s short data block of %zd\n",
name, len);
goto err;
}
memcpy(&data32, data + 4, sizeof(data32));
block_len = be32_to_cpu(data32);
if (block_len + 8 > len) {
dev_err(codec->dev, "%zd byte block longer than file\n",
block_len);
goto err;
}
if (block_len == 0) {
dev_err(codec->dev, "Zero length block\n");
goto err;
}
memcpy(&data32, data, sizeof(data32));
data32 = be32_to_cpu(data32);
switch ((data32 >> 24) & 0xff) {
case WM_FW_BLOCK_INFO:
/* Informational text */
if (!check)
break;
str = kzalloc(block_len + 1, GFP_KERNEL);
if (str) {
memcpy(str, data + 8, block_len);
dev_info(codec->dev, "%s: %s\n", name, str);
kfree(str);
} else {
dev_err(codec->dev, "Out of memory\n");
}
break;
case WM_FW_BLOCK_PM:
case WM_FW_BLOCK_X:
case WM_FW_BLOCK_Y:
case WM_FW_BLOCK_Z:
case WM_FW_BLOCK_I:
case WM_FW_BLOCK_A:
case WM_FW_BLOCK_C:
dev_dbg(codec->dev, "%s: %zd bytes of %x@%x\n", name,
block_len, (data32 >> 24) & 0xff,
data32 & 0xffffff);
if (check)
break;
data32 &= 0xffffff;
wm8994_bulk_write(wm8994->wm8994,
data32 & 0xffffff,
block_len / 2,
(void *)(data + 8));
break;
default:
dev_warn(codec->dev, "%s: unknown block type %d\n",
name, (data32 >> 24) & 0xff);
break;
}
/* Round up to the next 32 bit word */
block_len += block_len % 4;
data += block_len + 8;
len -= block_len + 8;
}
if (!check) {
dev_dbg(codec->dev, "%s: download done\n", name);
wm8994->cur_fw = fw;
} else {
dev_info(codec->dev, "%s: got firmware\n", name);
}
goto ok;
err:
ret = -EINVAL;
ok:
if (!check) {
snd_soc_write(codec, 0x900, 0x0);
snd_soc_write(codec, 0x102, 0x0);
}
return ret;
}
static void wm8958_dsp_start_mbc(struct snd_soc_codec *codec, int path)
{
struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
struct wm8994 *control = wm8994->wm8994;
int i;
/* If the DSP is already running then noop */
if (snd_soc_read(codec, WM8958_DSP2_PROGRAM) & WM8958_DSP2_ENA)
return;
/* If we have MBC firmware download it */
if (wm8994->mbc)
wm8958_dsp2_fw(codec, "MBC", wm8994->mbc, false);
snd_soc_update_bits(codec, WM8958_DSP2_PROGRAM,
WM8958_DSP2_ENA, WM8958_DSP2_ENA);
/* If we've got user supplied MBC settings use them */
if (control->pdata.num_mbc_cfgs) {
struct wm8958_mbc_cfg *cfg
= &control->pdata.mbc_cfgs[wm8994->mbc_cfg];
for (i = 0; i < ARRAY_SIZE(cfg->coeff_regs); i++)
snd_soc_write(codec, i + WM8958_MBC_BAND_1_K_1,
cfg->coeff_regs[i]);
for (i = 0; i < ARRAY_SIZE(cfg->cutoff_regs); i++)
snd_soc_write(codec,
i + WM8958_MBC_BAND_2_LOWER_CUTOFF_C1_1,
cfg->cutoff_regs[i]);
}
/* Run the DSP */
snd_soc_write(codec, WM8958_DSP2_EXECCONTROL,
WM8958_DSP2_RUNR);
/* And we're off! */
snd_soc_update_bits(codec, WM8958_DSP2_CONFIG,
WM8958_MBC_ENA |
WM8958_MBC_SEL_MASK,
path << WM8958_MBC_SEL_SHIFT |
WM8958_MBC_ENA);
}
static void wm8958_dsp_start_vss(struct snd_soc_codec *codec, int path)
{
struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
struct wm8994 *control = wm8994->wm8994;
int i, ena;
if (wm8994->mbc_vss)
wm8958_dsp2_fw(codec, "MBC+VSS", wm8994->mbc_vss, false);
snd_soc_update_bits(codec, WM8958_DSP2_PROGRAM,
WM8958_DSP2_ENA, WM8958_DSP2_ENA);
/* If we've got user supplied settings use them */
if (control->pdata.num_mbc_cfgs) {
struct wm8958_mbc_cfg *cfg
= &control->pdata.mbc_cfgs[wm8994->mbc_cfg];
for (i = 0; i < ARRAY_SIZE(cfg->combined_regs); i++)
snd_soc_write(codec, i + 0x2800,
cfg->combined_regs[i]);
}
if (control->pdata.num_vss_cfgs) {
struct wm8958_vss_cfg *cfg
= &control->pdata.vss_cfgs[wm8994->vss_cfg];
for (i = 0; i < ARRAY_SIZE(cfg->regs); i++)
snd_soc_write(codec, i + 0x2600, cfg->regs[i]);
}
if (control->pdata.num_vss_hpf_cfgs) {
struct wm8958_vss_hpf_cfg *cfg
= &control->pdata.vss_hpf_cfgs[wm8994->vss_hpf_cfg];
for (i = 0; i < ARRAY_SIZE(cfg->regs); i++)
snd_soc_write(codec, i + 0x2400, cfg->regs[i]);
}
/* Run the DSP */
snd_soc_write(codec, WM8958_DSP2_EXECCONTROL,
WM8958_DSP2_RUNR);
/* Enable the algorithms we've selected */
ena = 0;
if (wm8994->mbc_ena[path])
ena |= 0x8;
if (wm8994->hpf2_ena[path])
ena |= 0x4;
if (wm8994->hpf1_ena[path])
ena |= 0x2;
if (wm8994->vss_ena[path])
ena |= 0x1;
snd_soc_write(codec, 0x2201, ena);
/* Switch the DSP into the data path */
snd_soc_update_bits(codec, WM8958_DSP2_CONFIG,
WM8958_MBC_SEL_MASK | WM8958_MBC_ENA,
path << WM8958_MBC_SEL_SHIFT | WM8958_MBC_ENA);
}
static void wm8958_dsp_start_enh_eq(struct snd_soc_codec *codec, int path)
{
struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
struct wm8994 *control = wm8994->wm8994;
int i;
wm8958_dsp2_fw(codec, "ENH_EQ", wm8994->enh_eq, false);
snd_soc_update_bits(codec, WM8958_DSP2_PROGRAM,
WM8958_DSP2_ENA, WM8958_DSP2_ENA);
/* If we've got user supplied settings use them */
if (control->pdata.num_enh_eq_cfgs) {
struct wm8958_enh_eq_cfg *cfg
= &control->pdata.enh_eq_cfgs[wm8994->enh_eq_cfg];
for (i = 0; i < ARRAY_SIZE(cfg->regs); i++)
snd_soc_write(codec, i + 0x2200,
cfg->regs[i]);
}
/* Run the DSP */
snd_soc_write(codec, WM8958_DSP2_EXECCONTROL,
WM8958_DSP2_RUNR);
/* Switch the DSP into the data path */
snd_soc_update_bits(codec, WM8958_DSP2_CONFIG,
WM8958_MBC_SEL_MASK | WM8958_MBC_ENA,
path << WM8958_MBC_SEL_SHIFT | WM8958_MBC_ENA);
}
static void wm8958_dsp_apply(struct snd_soc_codec *codec, int path, int start)
{
struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
int pwr_reg = snd_soc_read(codec, WM8994_POWER_MANAGEMENT_5);
int ena, reg, aif;
switch (path) {
case 0:
pwr_reg &= (WM8994_AIF1DAC1L_ENA | WM8994_AIF1DAC1R_ENA);
aif = 0;
break;
case 1:
pwr_reg &= (WM8994_AIF1DAC2L_ENA | WM8994_AIF1DAC2R_ENA);
aif = 0;
break;
case 2:
pwr_reg &= (WM8994_AIF2DACL_ENA | WM8994_AIF2DACR_ENA);
aif = 1;
break;
default:
WARN(1, "Invalid path %d\n", path);
return;
}
/* Do we have both an active AIF and an active algorithm? */
ena = wm8994->mbc_ena[path] || wm8994->vss_ena[path] ||
wm8994->hpf1_ena[path] || wm8994->hpf2_ena[path] ||
wm8994->enh_eq_ena[path];
if (!pwr_reg)
ena = 0;
reg = snd_soc_read(codec, WM8958_DSP2_PROGRAM);
dev_dbg(codec->dev, "DSP path %d %d startup: %d, power: %x, DSP: %x\n",
path, wm8994->dsp_active, start, pwr_reg, reg);
if (start && ena) {
/* If the DSP is already running then noop */
if (reg & WM8958_DSP2_ENA)
return;
/* If either AIFnCLK is not yet enabled postpone */
if (!(snd_soc_read(codec, WM8994_AIF1_CLOCKING_1)
& WM8994_AIF1CLK_ENA_MASK) &&
!(snd_soc_read(codec, WM8994_AIF2_CLOCKING_1)
& WM8994_AIF2CLK_ENA_MASK))
return;
/* Switch the clock over to the appropriate AIF */
snd_soc_update_bits(codec, WM8994_CLOCKING_1,
WM8958_DSP2CLK_SRC | WM8958_DSP2CLK_ENA,
aif << WM8958_DSP2CLK_SRC_SHIFT |
WM8958_DSP2CLK_ENA);
if (wm8994->enh_eq_ena[path])
wm8958_dsp_start_enh_eq(codec, path);
else if (wm8994->vss_ena[path] || wm8994->hpf1_ena[path] ||
wm8994->hpf2_ena[path])
wm8958_dsp_start_vss(codec, path);
else if (wm8994->mbc_ena[path])
wm8958_dsp_start_mbc(codec, path);
wm8994->dsp_active = path;
dev_dbg(codec->dev, "DSP running in path %d\n", path);
}
if (!start && wm8994->dsp_active == path) {
/* If the DSP is already stopped then noop */
if (!(reg & WM8958_DSP2_ENA))
return;
snd_soc_update_bits(codec, WM8958_DSP2_CONFIG,
WM8958_MBC_ENA, 0);
snd_soc_write(codec, WM8958_DSP2_EXECCONTROL,
WM8958_DSP2_STOP);
snd_soc_update_bits(codec, WM8958_DSP2_PROGRAM,
WM8958_DSP2_ENA, 0);
snd_soc_update_bits(codec, WM8994_CLOCKING_1,
WM8958_DSP2CLK_ENA, 0);
wm8994->dsp_active = -1;
dev_dbg(codec->dev, "DSP stopped\n");
}
}
int wm8958_aif_ev(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
int i;
switch (event) {
case SND_SOC_DAPM_POST_PMU:
case SND_SOC_DAPM_PRE_PMU:
for (i = 0; i < 3; i++)
wm8958_dsp_apply(codec, i, 1);
break;
case SND_SOC_DAPM_POST_PMD:
case SND_SOC_DAPM_PRE_PMD:
for (i = 0; i < 3; i++)
wm8958_dsp_apply(codec, i, 0);
break;
}
return 0;
}
/* Check if DSP2 is in use on another AIF */
static int wm8958_dsp2_busy(struct wm8994_priv *wm8994, int aif)
{
int i;
for (i = 0; i < ARRAY_SIZE(wm8994->mbc_ena); i++) {
if (i == aif)
continue;
if (wm8994->mbc_ena[i] || wm8994->vss_ena[i] ||
wm8994->hpf1_ena[i] || wm8994->hpf2_ena[i])
return 1;
}
return 0;
}
static int wm8958_put_mbc_enum(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
struct wm8994 *control = wm8994->wm8994;
int value = ucontrol->value.integer.value[0];
int reg;
/* Don't allow on the fly reconfiguration */
reg = snd_soc_read(codec, WM8994_CLOCKING_1);
if (reg < 0 || reg & WM8958_DSP2CLK_ENA)
return -EBUSY;
if (value >= control->pdata.num_mbc_cfgs)
return -EINVAL;
wm8994->mbc_cfg = value;
return 0;
}
static int wm8958_get_mbc_enum(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
ucontrol->value.enumerated.item[0] = wm8994->mbc_cfg;
return 0;
}
static int wm8958_mbc_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
uinfo->type = SNDRV_CTL_ELEM_TYPE_BOOLEAN;
uinfo->count = 1;
uinfo->value.integer.min = 0;
uinfo->value.integer.max = 1;
return 0;
}
static int wm8958_mbc_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
int mbc = kcontrol->private_value;
struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
ucontrol->value.integer.value[0] = wm8994->mbc_ena[mbc];
return 0;
}
static int wm8958_mbc_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
int mbc = kcontrol->private_value;
struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
if (wm8994->mbc_ena[mbc] == ucontrol->value.integer.value[0])
return 0;
if (ucontrol->value.integer.value[0] > 1)
return -EINVAL;
if (wm8958_dsp2_busy(wm8994, mbc)) {
dev_dbg(codec->dev, "DSP2 active on %d already\n", mbc);
return -EBUSY;
}
if (wm8994->enh_eq_ena[mbc])
return -EBUSY;
wm8994->mbc_ena[mbc] = ucontrol->value.integer.value[0];
wm8958_dsp_apply(codec, mbc, wm8994->mbc_ena[mbc]);
return 0;
}
#define WM8958_MBC_SWITCH(xname, xval) {\
.iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = (xname), \
.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,\
.info = wm8958_mbc_info, \
.get = wm8958_mbc_get, .put = wm8958_mbc_put, \
.private_value = xval }
static int wm8958_put_vss_enum(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
struct wm8994 *control = wm8994->wm8994;
int value = ucontrol->value.integer.value[0];
int reg;
/* Don't allow on the fly reconfiguration */
reg = snd_soc_read(codec, WM8994_CLOCKING_1);
if (reg < 0 || reg & WM8958_DSP2CLK_ENA)
return -EBUSY;
if (value >= control->pdata.num_vss_cfgs)
return -EINVAL;
wm8994->vss_cfg = value;
return 0;
}
static int wm8958_get_vss_enum(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
ucontrol->value.enumerated.item[0] = wm8994->vss_cfg;
return 0;
}
static int wm8958_put_vss_hpf_enum(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
struct wm8994 *control = wm8994->wm8994;
int value = ucontrol->value.integer.value[0];
int reg;
/* Don't allow on the fly reconfiguration */
reg = snd_soc_read(codec, WM8994_CLOCKING_1);
if (reg < 0 || reg & WM8958_DSP2CLK_ENA)
return -EBUSY;
if (value >= control->pdata.num_vss_hpf_cfgs)
return -EINVAL;
wm8994->vss_hpf_cfg = value;
return 0;
}
static int wm8958_get_vss_hpf_enum(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
ucontrol->value.enumerated.item[0] = wm8994->vss_hpf_cfg;
return 0;
}
static int wm8958_vss_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
uinfo->type = SNDRV_CTL_ELEM_TYPE_BOOLEAN;
uinfo->count = 1;
uinfo->value.integer.min = 0;
uinfo->value.integer.max = 1;
return 0;
}
static int wm8958_vss_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
int vss = kcontrol->private_value;
struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
ucontrol->value.integer.value[0] = wm8994->vss_ena[vss];
return 0;
}
static int wm8958_vss_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
int vss = kcontrol->private_value;
struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
if (wm8994->vss_ena[vss] == ucontrol->value.integer.value[0])
return 0;
if (ucontrol->value.integer.value[0] > 1)
return -EINVAL;
if (!wm8994->mbc_vss)
return -ENODEV;
if (wm8958_dsp2_busy(wm8994, vss)) {
dev_dbg(codec->dev, "DSP2 active on %d already\n", vss);
return -EBUSY;
}
if (wm8994->enh_eq_ena[vss])
return -EBUSY;
wm8994->vss_ena[vss] = ucontrol->value.integer.value[0];
wm8958_dsp_apply(codec, vss, wm8994->vss_ena[vss]);
return 0;
}
#define WM8958_VSS_SWITCH(xname, xval) {\
.iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = (xname), \
.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,\
.info = wm8958_vss_info, \
.get = wm8958_vss_get, .put = wm8958_vss_put, \
.private_value = xval }
static int wm8958_hpf_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
uinfo->type = SNDRV_CTL_ELEM_TYPE_BOOLEAN;
uinfo->count = 1;
uinfo->value.integer.min = 0;
uinfo->value.integer.max = 1;
return 0;
}
static int wm8958_hpf_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
int hpf = kcontrol->private_value;
struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
if (hpf < 3)
ucontrol->value.integer.value[0] = wm8994->hpf1_ena[hpf % 3];
else
ucontrol->value.integer.value[0] = wm8994->hpf2_ena[hpf % 3];
return 0;
}
static int wm8958_hpf_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
int hpf = kcontrol->private_value;
struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
if (hpf < 3) {
if (wm8994->hpf1_ena[hpf % 3] ==
ucontrol->value.integer.value[0])
return 0;
} else {
if (wm8994->hpf2_ena[hpf % 3] ==
ucontrol->value.integer.value[0])
return 0;
}
if (ucontrol->value.integer.value[0] > 1)
return -EINVAL;
if (!wm8994->mbc_vss)
return -ENODEV;
if (wm8958_dsp2_busy(wm8994, hpf % 3)) {
dev_dbg(codec->dev, "DSP2 active on %d already\n", hpf);
return -EBUSY;
}
if (wm8994->enh_eq_ena[hpf % 3])
return -EBUSY;
if (hpf < 3)
wm8994->hpf1_ena[hpf % 3] = ucontrol->value.integer.value[0];
else
wm8994->hpf2_ena[hpf % 3] = ucontrol->value.integer.value[0];
wm8958_dsp_apply(codec, hpf % 3, ucontrol->value.integer.value[0]);
return 0;
}
#define WM8958_HPF_SWITCH(xname, xval) {\
.iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = (xname), \
.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,\
.info = wm8958_hpf_info, \
.get = wm8958_hpf_get, .put = wm8958_hpf_put, \
.private_value = xval }
static int wm8958_put_enh_eq_enum(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
struct wm8994 *control = wm8994->wm8994;
int value = ucontrol->value.integer.value[0];
int reg;
/* Don't allow on the fly reconfiguration */
reg = snd_soc_read(codec, WM8994_CLOCKING_1);
if (reg < 0 || reg & WM8958_DSP2CLK_ENA)
return -EBUSY;
if (value >= control->pdata.num_enh_eq_cfgs)
return -EINVAL;
wm8994->enh_eq_cfg = value;
return 0;
}
static int wm8958_get_enh_eq_enum(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
ucontrol->value.enumerated.item[0] = wm8994->enh_eq_cfg;
return 0;
}
static int wm8958_enh_eq_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
uinfo->type = SNDRV_CTL_ELEM_TYPE_BOOLEAN;
uinfo->count = 1;
uinfo->value.integer.min = 0;
uinfo->value.integer.max = 1;
return 0;
}
static int wm8958_enh_eq_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
int eq = kcontrol->private_value;
struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
ucontrol->value.integer.value[0] = wm8994->enh_eq_ena[eq];
return 0;
}
static int wm8958_enh_eq_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
int eq = kcontrol->private_value;
struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
if (wm8994->enh_eq_ena[eq] == ucontrol->value.integer.value[0])
return 0;
if (ucontrol->value.integer.value[0] > 1)
return -EINVAL;
if (!wm8994->enh_eq)
return -ENODEV;
if (wm8958_dsp2_busy(wm8994, eq)) {
dev_dbg(codec->dev, "DSP2 active on %d already\n", eq);
return -EBUSY;
}
if (wm8994->mbc_ena[eq] || wm8994->vss_ena[eq] ||
wm8994->hpf1_ena[eq] || wm8994->hpf2_ena[eq])
return -EBUSY;
wm8994->enh_eq_ena[eq] = ucontrol->value.integer.value[0];
wm8958_dsp_apply(codec, eq, ucontrol->value.integer.value[0]);
return 0;
}
#define WM8958_ENH_EQ_SWITCH(xname, xval) {\
.iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = (xname), \
.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,\
.info = wm8958_enh_eq_info, \
.get = wm8958_enh_eq_get, .put = wm8958_enh_eq_put, \
.private_value = xval }
static const struct snd_kcontrol_new wm8958_mbc_snd_controls[] = {
WM8958_MBC_SWITCH("AIF1DAC1 MBC Switch", 0),
WM8958_MBC_SWITCH("AIF1DAC2 MBC Switch", 1),
WM8958_MBC_SWITCH("AIF2DAC MBC Switch", 2),
};
static const struct snd_kcontrol_new wm8958_vss_snd_controls[] = {
WM8958_VSS_SWITCH("AIF1DAC1 VSS Switch", 0),
WM8958_VSS_SWITCH("AIF1DAC2 VSS Switch", 1),
WM8958_VSS_SWITCH("AIF2DAC VSS Switch", 2),
WM8958_HPF_SWITCH("AIF1DAC1 HPF1 Switch", 0),
WM8958_HPF_SWITCH("AIF1DAC2 HPF1 Switch", 1),
WM8958_HPF_SWITCH("AIF2DAC HPF1 Switch", 2),
WM8958_HPF_SWITCH("AIF1DAC1 HPF2 Switch", 3),
WM8958_HPF_SWITCH("AIF1DAC2 HPF2 Switch", 4),
WM8958_HPF_SWITCH("AIF2DAC HPF2 Switch", 5),
};
static const struct snd_kcontrol_new wm8958_enh_eq_snd_controls[] = {
WM8958_ENH_EQ_SWITCH("AIF1DAC1 Enhanced EQ Switch", 0),
WM8958_ENH_EQ_SWITCH("AIF1DAC2 Enhanced EQ Switch", 1),
WM8958_ENH_EQ_SWITCH("AIF2DAC Enhanced EQ Switch", 2),
};
static void wm8958_enh_eq_loaded(const struct firmware *fw, void *context)
{
struct snd_soc_codec *codec = context;
struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
if (fw && (wm8958_dsp2_fw(codec, "ENH_EQ", fw, true) == 0)) {
mutex_lock(&wm8994->fw_lock);
wm8994->enh_eq = fw;
mutex_unlock(&wm8994->fw_lock);
}
}
static void wm8958_mbc_vss_loaded(const struct firmware *fw, void *context)
{
struct snd_soc_codec *codec = context;
struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
if (fw && (wm8958_dsp2_fw(codec, "MBC+VSS", fw, true) == 0)) {
mutex_lock(&wm8994->fw_lock);
wm8994->mbc_vss = fw;
mutex_unlock(&wm8994->fw_lock);
}
}
static void wm8958_mbc_loaded(const struct firmware *fw, void *context)
{
struct snd_soc_codec *codec = context;
struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
if (fw && (wm8958_dsp2_fw(codec, "MBC", fw, true) == 0)) {
mutex_lock(&wm8994->fw_lock);
wm8994->mbc = fw;
mutex_unlock(&wm8994->fw_lock);
}
}
void wm8958_dsp2_init(struct snd_soc_codec *codec)
{
struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
struct wm8994 *control = wm8994->wm8994;
struct wm8994_pdata *pdata = &control->pdata;
int ret, i;
wm8994->dsp_active = -1;
snd_soc_add_codec_controls(codec, wm8958_mbc_snd_controls,
ARRAY_SIZE(wm8958_mbc_snd_controls));
snd_soc_add_codec_controls(codec, wm8958_vss_snd_controls,
ARRAY_SIZE(wm8958_vss_snd_controls));
snd_soc_add_codec_controls(codec, wm8958_enh_eq_snd_controls,
ARRAY_SIZE(wm8958_enh_eq_snd_controls));
/* We don't *require* firmware and don't want to delay boot */
request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
"wm8958_mbc.wfw", codec->dev, GFP_KERNEL,
codec, wm8958_mbc_loaded);
request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
"wm8958_mbc_vss.wfw", codec->dev, GFP_KERNEL,
codec, wm8958_mbc_vss_loaded);
request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
"wm8958_enh_eq.wfw", codec->dev, GFP_KERNEL,
codec, wm8958_enh_eq_loaded);
if (pdata->num_mbc_cfgs) {
struct snd_kcontrol_new control[] = {
SOC_ENUM_EXT("MBC Mode", wm8994->mbc_enum,
wm8958_get_mbc_enum, wm8958_put_mbc_enum),
};
/* We need an array of texts for the enum API */
wm8994->mbc_texts = kmalloc(sizeof(char *)
* pdata->num_mbc_cfgs, GFP_KERNEL);
if (!wm8994->mbc_texts)
return;
for (i = 0; i < pdata->num_mbc_cfgs; i++)
wm8994->mbc_texts[i] = pdata->mbc_cfgs[i].name;
wm8994->mbc_enum.items = pdata->num_mbc_cfgs;
wm8994->mbc_enum.texts = wm8994->mbc_texts;
ret = snd_soc_add_codec_controls(wm8994->hubs.codec,
control, 1);
if (ret != 0)
dev_err(wm8994->hubs.codec->dev,
"Failed to add MBC mode controls: %d\n", ret);
}
if (pdata->num_vss_cfgs) {
struct snd_kcontrol_new control[] = {
SOC_ENUM_EXT("VSS Mode", wm8994->vss_enum,
wm8958_get_vss_enum, wm8958_put_vss_enum),
};
/* We need an array of texts for the enum API */
wm8994->vss_texts = kmalloc(sizeof(char *)
* pdata->num_vss_cfgs, GFP_KERNEL);
if (!wm8994->vss_texts)
return;
for (i = 0; i < pdata->num_vss_cfgs; i++)
wm8994->vss_texts[i] = pdata->vss_cfgs[i].name;
wm8994->vss_enum.items = pdata->num_vss_cfgs;
wm8994->vss_enum.texts = wm8994->vss_texts;
ret = snd_soc_add_codec_controls(wm8994->hubs.codec,
control, 1);
if (ret != 0)
dev_err(wm8994->hubs.codec->dev,
"Failed to add VSS mode controls: %d\n", ret);
}
if (pdata->num_vss_hpf_cfgs) {
struct snd_kcontrol_new control[] = {
SOC_ENUM_EXT("VSS HPF Mode", wm8994->vss_hpf_enum,
wm8958_get_vss_hpf_enum,
wm8958_put_vss_hpf_enum),
};
/* We need an array of texts for the enum API */
wm8994->vss_hpf_texts = kmalloc(sizeof(char *)
* pdata->num_vss_hpf_cfgs, GFP_KERNEL);
if (!wm8994->vss_hpf_texts)
return;
for (i = 0; i < pdata->num_vss_hpf_cfgs; i++)
wm8994->vss_hpf_texts[i] = pdata->vss_hpf_cfgs[i].name;
wm8994->vss_hpf_enum.items = pdata->num_vss_hpf_cfgs;
wm8994->vss_hpf_enum.texts = wm8994->vss_hpf_texts;
ret = snd_soc_add_codec_controls(wm8994->hubs.codec,
control, 1);
if (ret != 0)
dev_err(wm8994->hubs.codec->dev,
"Failed to add VSS HPFmode controls: %d\n",
ret);
}
if (pdata->num_enh_eq_cfgs) {
struct snd_kcontrol_new control[] = {
SOC_ENUM_EXT("Enhanced EQ Mode", wm8994->enh_eq_enum,
wm8958_get_enh_eq_enum,
wm8958_put_enh_eq_enum),
};
/* We need an array of texts for the enum API */
wm8994->enh_eq_texts = kmalloc(sizeof(char *)
* pdata->num_enh_eq_cfgs, GFP_KERNEL);
if (!wm8994->enh_eq_texts)
return;
for (i = 0; i < pdata->num_enh_eq_cfgs; i++)
wm8994->enh_eq_texts[i] = pdata->enh_eq_cfgs[i].name;
wm8994->enh_eq_enum.items = pdata->num_enh_eq_cfgs;
wm8994->enh_eq_enum.texts = wm8994->enh_eq_texts;
ret = snd_soc_add_codec_controls(wm8994->hubs.codec,
control, 1);
if (ret != 0)
dev_err(wm8994->hubs.codec->dev,
"Failed to add enhanced EQ controls: %d\n",
ret);
}
}
| gpl-2.0 |
Compulsion/linux-stable | drivers/i2c/busses/i2c-designware-baytrail.c | 871 | 3862 | /*
* Intel BayTrail PMIC I2C bus semaphore implementaion
* Copyright (c) 2014, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/acpi.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <asm/iosf_mbi.h>
#include "i2c-designware-core.h"
#define SEMAPHORE_TIMEOUT 100
#define PUNIT_SEMAPHORE 0x7
#define PUNIT_SEMAPHORE_BIT BIT(0)
#define PUNIT_SEMAPHORE_ACQUIRE BIT(1)
static unsigned long acquired;
static int get_sem(struct device *dev, u32 *sem)
{
u32 data;
int ret;
ret = iosf_mbi_read(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_READ, PUNIT_SEMAPHORE,
&data);
if (ret) {
dev_err(dev, "iosf failed to read punit semaphore\n");
return ret;
}
*sem = data & PUNIT_SEMAPHORE_BIT;
return 0;
}
static void reset_semaphore(struct device *dev)
{
u32 data;
if (iosf_mbi_read(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_READ,
PUNIT_SEMAPHORE, &data)) {
dev_err(dev, "iosf failed to reset punit semaphore during read\n");
return;
}
data &= ~PUNIT_SEMAPHORE_BIT;
if (iosf_mbi_write(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_WRITE,
PUNIT_SEMAPHORE, data))
dev_err(dev, "iosf failed to reset punit semaphore during write\n");
}
static int baytrail_i2c_acquire(struct dw_i2c_dev *dev)
{
u32 sem;
int ret;
unsigned long start, end;
might_sleep();
if (!dev || !dev->dev)
return -ENODEV;
if (!dev->release_lock)
return 0;
/* host driver writes to side band semaphore register */
ret = iosf_mbi_write(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_WRITE,
PUNIT_SEMAPHORE, PUNIT_SEMAPHORE_ACQUIRE);
if (ret) {
dev_err(dev->dev, "iosf punit semaphore request failed\n");
return ret;
}
/* host driver waits for bit 0 to be set in semaphore register */
start = jiffies;
end = start + msecs_to_jiffies(SEMAPHORE_TIMEOUT);
do {
ret = get_sem(dev->dev, &sem);
if (!ret && sem) {
acquired = jiffies;
dev_dbg(dev->dev, "punit semaphore acquired after %ums\n",
jiffies_to_msecs(jiffies - start));
return 0;
}
usleep_range(1000, 2000);
} while (time_before(jiffies, end));
dev_err(dev->dev, "punit semaphore timed out, resetting\n");
reset_semaphore(dev->dev);
ret = iosf_mbi_read(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_READ,
PUNIT_SEMAPHORE, &sem);
if (ret)
dev_err(dev->dev, "iosf failed to read punit semaphore\n");
else
dev_err(dev->dev, "PUNIT SEM: %d\n", sem);
WARN_ON(1);
return -ETIMEDOUT;
}
static void baytrail_i2c_release(struct dw_i2c_dev *dev)
{
if (!dev || !dev->dev)
return;
if (!dev->acquire_lock)
return;
reset_semaphore(dev->dev);
dev_dbg(dev->dev, "punit semaphore held for %ums\n",
jiffies_to_msecs(jiffies - acquired));
}
int i2c_dw_eval_lock_support(struct dw_i2c_dev *dev)
{
acpi_status status;
unsigned long long shared_host = 0;
acpi_handle handle;
if (!dev || !dev->dev)
return 0;
handle = ACPI_HANDLE(dev->dev);
if (!handle)
return 0;
status = acpi_evaluate_integer(handle, "_SEM", NULL, &shared_host);
if (ACPI_FAILURE(status))
return 0;
if (shared_host) {
dev_info(dev->dev, "I2C bus managed by PUNIT\n");
dev->acquire_lock = baytrail_i2c_acquire;
dev->release_lock = baytrail_i2c_release;
dev->pm_runtime_disabled = true;
}
if (!iosf_mbi_available())
return -EPROBE_DEFER;
return 0;
}
MODULE_AUTHOR("David E. Box <david.e.box@linux.intel.com>");
MODULE_DESCRIPTION("Baytrail I2C Semaphore driver");
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
0x20c24/linux-psec | drivers/pci/slot.c | 1127 | 10406 | /*
* drivers/pci/slot.c
* Copyright (C) 2006 Matthew Wilcox <matthew@wil.cx>
* Copyright (C) 2006-2009 Hewlett-Packard Development Company, L.P.
* Alex Chiang <achiang@hp.com>
*/
#include <linux/kobject.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/err.h>
#include "pci.h"
struct kset *pci_slots_kset;
EXPORT_SYMBOL_GPL(pci_slots_kset);
static ssize_t pci_slot_attr_show(struct kobject *kobj,
struct attribute *attr, char *buf)
{
struct pci_slot *slot = to_pci_slot(kobj);
struct pci_slot_attribute *attribute = to_pci_slot_attr(attr);
return attribute->show ? attribute->show(slot, buf) : -EIO;
}
static ssize_t pci_slot_attr_store(struct kobject *kobj,
struct attribute *attr, const char *buf, size_t len)
{
struct pci_slot *slot = to_pci_slot(kobj);
struct pci_slot_attribute *attribute = to_pci_slot_attr(attr);
return attribute->store ? attribute->store(slot, buf, len) : -EIO;
}
static const struct sysfs_ops pci_slot_sysfs_ops = {
.show = pci_slot_attr_show,
.store = pci_slot_attr_store,
};
static ssize_t address_read_file(struct pci_slot *slot, char *buf)
{
if (slot->number == 0xff)
return sprintf(buf, "%04x:%02x\n",
pci_domain_nr(slot->bus),
slot->bus->number);
else
return sprintf(buf, "%04x:%02x:%02x\n",
pci_domain_nr(slot->bus),
slot->bus->number,
slot->number);
}
/* these strings match up with the values in pci_bus_speed */
static const char *pci_bus_speed_strings[] = {
"33 MHz PCI", /* 0x00 */
"66 MHz PCI", /* 0x01 */
"66 MHz PCI-X", /* 0x02 */
"100 MHz PCI-X", /* 0x03 */
"133 MHz PCI-X", /* 0x04 */
NULL, /* 0x05 */
NULL, /* 0x06 */
NULL, /* 0x07 */
NULL, /* 0x08 */
"66 MHz PCI-X 266", /* 0x09 */
"100 MHz PCI-X 266", /* 0x0a */
"133 MHz PCI-X 266", /* 0x0b */
"Unknown AGP", /* 0x0c */
"1x AGP", /* 0x0d */
"2x AGP", /* 0x0e */
"4x AGP", /* 0x0f */
"8x AGP", /* 0x10 */
"66 MHz PCI-X 533", /* 0x11 */
"100 MHz PCI-X 533", /* 0x12 */
"133 MHz PCI-X 533", /* 0x13 */
"2.5 GT/s PCIe", /* 0x14 */
"5.0 GT/s PCIe", /* 0x15 */
"8.0 GT/s PCIe", /* 0x16 */
};
static ssize_t bus_speed_read(enum pci_bus_speed speed, char *buf)
{
const char *speed_string;
if (speed < ARRAY_SIZE(pci_bus_speed_strings))
speed_string = pci_bus_speed_strings[speed];
else
speed_string = "Unknown";
return sprintf(buf, "%s\n", speed_string);
}
static ssize_t max_speed_read_file(struct pci_slot *slot, char *buf)
{
return bus_speed_read(slot->bus->max_bus_speed, buf);
}
static ssize_t cur_speed_read_file(struct pci_slot *slot, char *buf)
{
return bus_speed_read(slot->bus->cur_bus_speed, buf);
}
static void pci_slot_release(struct kobject *kobj)
{
struct pci_dev *dev;
struct pci_slot *slot = to_pci_slot(kobj);
dev_dbg(&slot->bus->dev, "dev %02x, released physical slot %s\n",
slot->number, pci_slot_name(slot));
list_for_each_entry(dev, &slot->bus->devices, bus_list)
if (PCI_SLOT(dev->devfn) == slot->number)
dev->slot = NULL;
list_del(&slot->list);
kfree(slot);
}
static struct pci_slot_attribute pci_slot_attr_address =
__ATTR(address, S_IRUGO, address_read_file, NULL);
static struct pci_slot_attribute pci_slot_attr_max_speed =
__ATTR(max_bus_speed, S_IRUGO, max_speed_read_file, NULL);
static struct pci_slot_attribute pci_slot_attr_cur_speed =
__ATTR(cur_bus_speed, S_IRUGO, cur_speed_read_file, NULL);
static struct attribute *pci_slot_default_attrs[] = {
&pci_slot_attr_address.attr,
&pci_slot_attr_max_speed.attr,
&pci_slot_attr_cur_speed.attr,
NULL,
};
static struct kobj_type pci_slot_ktype = {
.sysfs_ops = &pci_slot_sysfs_ops,
.release = &pci_slot_release,
.default_attrs = pci_slot_default_attrs,
};
static char *make_slot_name(const char *name)
{
char *new_name;
int len, max, dup;
new_name = kstrdup(name, GFP_KERNEL);
if (!new_name)
return NULL;
/*
* Make sure we hit the realloc case the first time through the
* loop. 'len' will be strlen(name) + 3 at that point which is
* enough space for "name-X" and the trailing NUL.
*/
len = strlen(name) + 2;
max = 1;
dup = 1;
for (;;) {
struct kobject *dup_slot;
dup_slot = kset_find_obj(pci_slots_kset, new_name);
if (!dup_slot)
break;
kobject_put(dup_slot);
if (dup == max) {
len++;
max *= 10;
kfree(new_name);
new_name = kmalloc(len, GFP_KERNEL);
if (!new_name)
break;
}
sprintf(new_name, "%s-%d", name, dup++);
}
return new_name;
}
static int rename_slot(struct pci_slot *slot, const char *name)
{
int result = 0;
char *slot_name;
if (strcmp(pci_slot_name(slot), name) == 0)
return result;
slot_name = make_slot_name(name);
if (!slot_name)
return -ENOMEM;
result = kobject_rename(&slot->kobj, slot_name);
kfree(slot_name);
return result;
}
static struct pci_slot *get_slot(struct pci_bus *parent, int slot_nr)
{
struct pci_slot *slot;
/*
* We already hold pci_bus_sem so don't worry
*/
list_for_each_entry(slot, &parent->slots, list)
if (slot->number == slot_nr) {
kobject_get(&slot->kobj);
return slot;
}
return NULL;
}
/**
* pci_create_slot - create or increment refcount for physical PCI slot
* @parent: struct pci_bus of parent bridge
* @slot_nr: PCI_SLOT(pci_dev->devfn) or -1 for placeholder
* @name: user visible string presented in /sys/bus/pci/slots/<name>
* @hotplug: set if caller is hotplug driver, NULL otherwise
*
* PCI slots have first class attributes such as address, speed, width,
* and a &struct pci_slot is used to manage them. This interface will
* either return a new &struct pci_slot to the caller, or if the pci_slot
* already exists, its refcount will be incremented.
*
* Slots are uniquely identified by a @pci_bus, @slot_nr tuple.
*
* There are known platforms with broken firmware that assign the same
* name to multiple slots. Workaround these broken platforms by renaming
* the slots on behalf of the caller. If firmware assigns name N to
* multiple slots:
*
* The first slot is assigned N
* The second slot is assigned N-1
* The third slot is assigned N-2
* etc.
*
* Placeholder slots:
* In most cases, @pci_bus, @slot_nr will be sufficient to uniquely identify
* a slot. There is one notable exception - pSeries (rpaphp), where the
* @slot_nr cannot be determined until a device is actually inserted into
* the slot. In this scenario, the caller may pass -1 for @slot_nr.
*
* The following semantics are imposed when the caller passes @slot_nr ==
* -1. First, we no longer check for an existing %struct pci_slot, as there
* may be many slots with @slot_nr of -1. The other change in semantics is
* user-visible, which is the 'address' parameter presented in sysfs will
* consist solely of a dddd:bb tuple, where dddd is the PCI domain of the
* %struct pci_bus and bb is the bus number. In other words, the devfn of
* the 'placeholder' slot will not be displayed.
*/
struct pci_slot *pci_create_slot(struct pci_bus *parent, int slot_nr,
const char *name,
struct hotplug_slot *hotplug)
{
struct pci_dev *dev;
struct pci_slot *slot;
int err = 0;
char *slot_name = NULL;
down_write(&pci_bus_sem);
if (slot_nr == -1)
goto placeholder;
/*
* Hotplug drivers are allowed to rename an existing slot,
* but only if not already claimed.
*/
slot = get_slot(parent, slot_nr);
if (slot) {
if (hotplug) {
if ((err = slot->hotplug ? -EBUSY : 0)
|| (err = rename_slot(slot, name))) {
kobject_put(&slot->kobj);
slot = NULL;
goto err;
}
}
goto out;
}
placeholder:
slot = kzalloc(sizeof(*slot), GFP_KERNEL);
if (!slot) {
err = -ENOMEM;
goto err;
}
slot->bus = parent;
slot->number = slot_nr;
slot->kobj.kset = pci_slots_kset;
slot_name = make_slot_name(name);
if (!slot_name) {
err = -ENOMEM;
goto err;
}
err = kobject_init_and_add(&slot->kobj, &pci_slot_ktype, NULL,
"%s", slot_name);
if (err)
goto err;
INIT_LIST_HEAD(&slot->list);
list_add(&slot->list, &parent->slots);
list_for_each_entry(dev, &parent->devices, bus_list)
if (PCI_SLOT(dev->devfn) == slot_nr)
dev->slot = slot;
dev_dbg(&parent->dev, "dev %02x, created physical slot %s\n",
slot_nr, pci_slot_name(slot));
out:
kfree(slot_name);
up_write(&pci_bus_sem);
return slot;
err:
kfree(slot);
slot = ERR_PTR(err);
goto out;
}
EXPORT_SYMBOL_GPL(pci_create_slot);
/**
* pci_destroy_slot - decrement refcount for physical PCI slot
* @slot: struct pci_slot to decrement
*
* %struct pci_slot is refcounted, so destroying them is really easy; we
* just call kobject_put on its kobj and let our release methods do the
* rest.
*/
void pci_destroy_slot(struct pci_slot *slot)
{
dev_dbg(&slot->bus->dev, "dev %02x, dec refcount to %d\n",
slot->number, atomic_read(&slot->kobj.kref.refcount) - 1);
down_write(&pci_bus_sem);
kobject_put(&slot->kobj);
up_write(&pci_bus_sem);
}
EXPORT_SYMBOL_GPL(pci_destroy_slot);
#if defined(CONFIG_HOTPLUG_PCI) || defined(CONFIG_HOTPLUG_PCI_MODULE)
#include <linux/pci_hotplug.h>
/**
* pci_hp_create_link - create symbolic link to the hotplug driver module.
* @pci_slot: struct pci_slot
*
* Helper function for pci_hotplug_core.c to create symbolic link to
* the hotplug driver module.
*/
void pci_hp_create_module_link(struct pci_slot *pci_slot)
{
struct hotplug_slot *slot = pci_slot->hotplug;
struct kobject *kobj = NULL;
int ret;
if (!slot || !slot->ops)
return;
kobj = kset_find_obj(module_kset, slot->ops->mod_name);
if (!kobj)
return;
ret = sysfs_create_link(&pci_slot->kobj, kobj, "module");
if (ret)
dev_err(&pci_slot->bus->dev, "Error creating sysfs link (%d)\n",
ret);
kobject_put(kobj);
}
EXPORT_SYMBOL_GPL(pci_hp_create_module_link);
/**
* pci_hp_remove_link - remove symbolic link to the hotplug driver module.
* @pci_slot: struct pci_slot
*
* Helper function for pci_hotplug_core.c to remove symbolic link to
* the hotplug driver module.
*/
void pci_hp_remove_module_link(struct pci_slot *pci_slot)
{
sysfs_remove_link(&pci_slot->kobj, "module");
}
EXPORT_SYMBOL_GPL(pci_hp_remove_module_link);
#endif
static int pci_slot_init(void)
{
struct kset *pci_bus_kset;
pci_bus_kset = bus_get_kset(&pci_bus_type);
pci_slots_kset = kset_create_and_add("slots", NULL,
&pci_bus_kset->kobj);
if (!pci_slots_kset) {
printk(KERN_ERR "PCI: Slot initialization failure\n");
return -ENOMEM;
}
return 0;
}
subsys_initcall(pci_slot_init);
| gpl-2.0 |
Myself5/android_kernel_sony_msm | drivers/leds/leds-blinkm.c | 2407 | 21023 | /*
* leds-blinkm.c
* (c) Jan-Simon Möller (dl9pf@gmx.de)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/jiffies.h>
#include <linux/i2c.h>
#include <linux/err.h>
#include <linux/mutex.h>
#include <linux/sysfs.h>
#include <linux/printk.h>
#include <linux/pm_runtime.h>
#include <linux/leds.h>
#include <linux/delay.h>
/* Addresses to scan - BlinkM is on 0x09 by default*/
static const unsigned short normal_i2c[] = { 0x09, I2C_CLIENT_END };
static int blinkm_transfer_hw(struct i2c_client *client, int cmd);
static int blinkm_test_run(struct i2c_client *client);
struct blinkm_led {
struct i2c_client *i2c_client;
struct led_classdev led_cdev;
int id;
atomic_t active;
};
struct blinkm_work {
struct blinkm_led *blinkm_led;
struct work_struct work;
};
#define cdev_to_blmled(c) container_of(c, struct blinkm_led, led_cdev)
#define work_to_blmwork(c) container_of(c, struct blinkm_work, work)
struct blinkm_data {
struct i2c_client *i2c_client;
struct mutex update_lock;
/* used for led class interface */
struct blinkm_led blinkm_leds[3];
/* used for "blinkm" sysfs interface */
u8 red; /* color red */
u8 green; /* color green */
u8 blue; /* color blue */
/* next values to use for transfer */
u8 next_red; /* color red */
u8 next_green; /* color green */
u8 next_blue; /* color blue */
/* internal use */
u8 args[7]; /* set of args for transmission */
u8 i2c_addr; /* i2c addr */
u8 fw_ver; /* firmware version */
/* used, but not from userspace */
u8 hue; /* HSB hue */
u8 saturation; /* HSB saturation */
u8 brightness; /* HSB brightness */
u8 next_hue; /* HSB hue */
u8 next_saturation; /* HSB saturation */
u8 next_brightness; /* HSB brightness */
/* currently unused / todo */
u8 fade_speed; /* fade speed 1 - 255 */
s8 time_adjust; /* time adjust -128 - 127 */
u8 fade:1; /* fade on = 1, off = 0 */
u8 rand:1; /* rand fade mode on = 1 */
u8 script_id; /* script ID */
u8 script_repeats; /* repeats of script */
u8 script_startline; /* line to start */
};
/* Colors */
#define RED 0
#define GREEN 1
#define BLUE 2
/* mapping command names to cmd chars - see datasheet */
#define BLM_GO_RGB 0
#define BLM_FADE_RGB 1
#define BLM_FADE_HSB 2
#define BLM_FADE_RAND_RGB 3
#define BLM_FADE_RAND_HSB 4
#define BLM_PLAY_SCRIPT 5
#define BLM_STOP_SCRIPT 6
#define BLM_SET_FADE_SPEED 7
#define BLM_SET_TIME_ADJ 8
#define BLM_GET_CUR_RGB 9
#define BLM_WRITE_SCRIPT_LINE 10
#define BLM_READ_SCRIPT_LINE 11
#define BLM_SET_SCRIPT_LR 12 /* Length & Repeats */
#define BLM_SET_ADDR 13
#define BLM_GET_ADDR 14
#define BLM_GET_FW_VER 15
#define BLM_SET_STARTUP_PARAM 16
/* BlinkM Commands
* as extracted out of the datasheet:
*
* cmdchar = command (ascii)
* cmdbyte = command in hex
* nr_args = number of arguments (to send)
* nr_ret = number of return values (to read)
* dir = direction (0 = read, 1 = write, 2 = both)
*
*/
static const struct {
char cmdchar;
u8 cmdbyte;
u8 nr_args;
u8 nr_ret;
u8 dir:2;
} blinkm_cmds[17] = {
/* cmdchar, cmdbyte, nr_args, nr_ret, dir */
{ 'n', 0x6e, 3, 0, 1},
{ 'c', 0x63, 3, 0, 1},
{ 'h', 0x68, 3, 0, 1},
{ 'C', 0x43, 3, 0, 1},
{ 'H', 0x48, 3, 0, 1},
{ 'p', 0x70, 3, 0, 1},
{ 'o', 0x6f, 0, 0, 1},
{ 'f', 0x66, 1, 0, 1},
{ 't', 0x74, 1, 0, 1},
{ 'g', 0x67, 0, 3, 0},
{ 'W', 0x57, 7, 0, 1},
{ 'R', 0x52, 2, 5, 2},
{ 'L', 0x4c, 3, 0, 1},
{ 'A', 0x41, 4, 0, 1},
{ 'a', 0x61, 0, 1, 0},
{ 'Z', 0x5a, 0, 1, 0},
{ 'B', 0x42, 5, 0, 1},
};
static ssize_t show_color_common(struct device *dev, char *buf, int color)
{
struct i2c_client *client;
struct blinkm_data *data;
int ret;
client = to_i2c_client(dev);
data = i2c_get_clientdata(client);
ret = blinkm_transfer_hw(client, BLM_GET_CUR_RGB);
if (ret < 0)
return ret;
switch (color) {
case RED:
return scnprintf(buf, PAGE_SIZE, "%02X\n", data->red);
break;
case GREEN:
return scnprintf(buf, PAGE_SIZE, "%02X\n", data->green);
break;
case BLUE:
return scnprintf(buf, PAGE_SIZE, "%02X\n", data->blue);
break;
default:
return -EINVAL;
}
return -EINVAL;
}
static int store_color_common(struct device *dev, const char *buf, int color)
{
struct i2c_client *client;
struct blinkm_data *data;
int ret;
u8 value;
client = to_i2c_client(dev);
data = i2c_get_clientdata(client);
ret = kstrtou8(buf, 10, &value);
if (ret < 0) {
dev_err(dev, "BlinkM: value too large!\n");
return ret;
}
switch (color) {
case RED:
data->next_red = value;
break;
case GREEN:
data->next_green = value;
break;
case BLUE:
data->next_blue = value;
break;
default:
return -EINVAL;
}
dev_dbg(dev, "next_red = %d, next_green = %d, next_blue = %d\n",
data->next_red, data->next_green, data->next_blue);
/* if mode ... */
ret = blinkm_transfer_hw(client, BLM_GO_RGB);
if (ret < 0) {
dev_err(dev, "BlinkM: can't set RGB\n");
return ret;
}
return 0;
}
static ssize_t show_red(struct device *dev, struct device_attribute *attr,
char *buf)
{
return show_color_common(dev, buf, RED);
}
static ssize_t store_red(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
int ret;
ret = store_color_common(dev, buf, RED);
if (ret < 0)
return ret;
return count;
}
static DEVICE_ATTR(red, S_IRUGO | S_IWUSR, show_red, store_red);
static ssize_t show_green(struct device *dev, struct device_attribute *attr,
char *buf)
{
return show_color_common(dev, buf, GREEN);
}
static ssize_t store_green(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
int ret;
ret = store_color_common(dev, buf, GREEN);
if (ret < 0)
return ret;
return count;
}
static DEVICE_ATTR(green, S_IRUGO | S_IWUSR, show_green, store_green);
static ssize_t show_blue(struct device *dev, struct device_attribute *attr,
char *buf)
{
return show_color_common(dev, buf, BLUE);
}
static ssize_t store_blue(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
int ret;
ret = store_color_common(dev, buf, BLUE);
if (ret < 0)
return ret;
return count;
}
static DEVICE_ATTR(blue, S_IRUGO | S_IWUSR, show_blue, store_blue);
static ssize_t show_test(struct device *dev, struct device_attribute *attr,
char *buf)
{
return scnprintf(buf, PAGE_SIZE,
"#Write into test to start test sequence!#\n");
}
static ssize_t store_test(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct i2c_client *client;
int ret;
client = to_i2c_client(dev);
/*test */
ret = blinkm_test_run(client);
if (ret < 0)
return ret;
return count;
}
static DEVICE_ATTR(test, S_IRUGO | S_IWUSR, show_test, store_test);
/* TODO: HSB, fade, timeadj, script ... */
static struct attribute *blinkm_attrs[] = {
&dev_attr_red.attr,
&dev_attr_green.attr,
&dev_attr_blue.attr,
&dev_attr_test.attr,
NULL,
};
static struct attribute_group blinkm_group = {
.name = "blinkm",
.attrs = blinkm_attrs,
};
static int blinkm_write(struct i2c_client *client, int cmd, u8 *arg)
{
int result;
int i;
int arglen = blinkm_cmds[cmd].nr_args;
/* write out cmd to blinkm - always / default step */
result = i2c_smbus_write_byte(client, blinkm_cmds[cmd].cmdbyte);
if (result < 0)
return result;
/* no args to write out */
if (arglen == 0)
return 0;
for (i = 0; i < arglen; i++) {
/* repeat for arglen */
result = i2c_smbus_write_byte(client, arg[i]);
if (result < 0)
return result;
}
return 0;
}
static int blinkm_read(struct i2c_client *client, int cmd, u8 *arg)
{
int result;
int i;
int retlen = blinkm_cmds[cmd].nr_ret;
for (i = 0; i < retlen; i++) {
/* repeat for retlen */
result = i2c_smbus_read_byte(client);
if (result < 0)
return result;
arg[i] = result;
}
return 0;
}
static int blinkm_transfer_hw(struct i2c_client *client, int cmd)
{
/* the protocol is simple but non-standard:
* e.g. cmd 'g' (= 0x67) for "get device address"
* - which defaults to 0x09 - would be the sequence:
* a) write 0x67 to the device (byte write)
* b) read the value (0x09) back right after (byte read)
*
* Watch out for "unfinished" sequences (i.e. not enough reads
* or writes after a command. It will make the blinkM misbehave.
* Sequence is key here.
*/
/* args / return are in private data struct */
struct blinkm_data *data = i2c_get_clientdata(client);
/* We start hardware transfers which are not to be
* mixed with other commands. Aquire a lock now. */
if (mutex_lock_interruptible(&data->update_lock) < 0)
return -EAGAIN;
/* switch cmd - usually write before reads */
switch (cmd) {
case BLM_FADE_RAND_RGB:
case BLM_GO_RGB:
case BLM_FADE_RGB:
data->args[0] = data->next_red;
data->args[1] = data->next_green;
data->args[2] = data->next_blue;
blinkm_write(client, cmd, data->args);
data->red = data->args[0];
data->green = data->args[1];
data->blue = data->args[2];
break;
case BLM_FADE_HSB:
case BLM_FADE_RAND_HSB:
data->args[0] = data->next_hue;
data->args[1] = data->next_saturation;
data->args[2] = data->next_brightness;
blinkm_write(client, cmd, data->args);
data->hue = data->next_hue;
data->saturation = data->next_saturation;
data->brightness = data->next_brightness;
break;
case BLM_PLAY_SCRIPT:
data->args[0] = data->script_id;
data->args[1] = data->script_repeats;
data->args[2] = data->script_startline;
blinkm_write(client, cmd, data->args);
break;
case BLM_STOP_SCRIPT:
blinkm_write(client, cmd, NULL);
break;
case BLM_GET_CUR_RGB:
data->args[0] = data->red;
data->args[1] = data->green;
data->args[2] = data->blue;
blinkm_write(client, cmd, NULL);
blinkm_read(client, cmd, data->args);
data->red = data->args[0];
data->green = data->args[1];
data->blue = data->args[2];
break;
case BLM_GET_ADDR:
data->args[0] = data->i2c_addr;
blinkm_write(client, cmd, NULL);
blinkm_read(client, cmd, data->args);
data->i2c_addr = data->args[0];
break;
case BLM_SET_TIME_ADJ:
case BLM_SET_FADE_SPEED:
case BLM_READ_SCRIPT_LINE:
case BLM_WRITE_SCRIPT_LINE:
case BLM_SET_SCRIPT_LR:
case BLM_SET_ADDR:
case BLM_GET_FW_VER:
case BLM_SET_STARTUP_PARAM:
dev_err(&client->dev,
"BlinkM: cmd %d not implemented yet.\n", cmd);
break;
default:
dev_err(&client->dev, "BlinkM: unknown command %d\n", cmd);
mutex_unlock(&data->update_lock);
return -EINVAL;
} /* end switch(cmd) */
/* transfers done, unlock */
mutex_unlock(&data->update_lock);
return 0;
}
static void led_work(struct work_struct *work)
{
int ret;
struct blinkm_led *led;
struct blinkm_data *data ;
struct blinkm_work *blm_work = work_to_blmwork(work);
led = blm_work->blinkm_led;
data = i2c_get_clientdata(led->i2c_client);
ret = blinkm_transfer_hw(led->i2c_client, BLM_GO_RGB);
atomic_dec(&led->active);
dev_dbg(&led->i2c_client->dev,
"# DONE # next_red = %d, next_green = %d,"
" next_blue = %d, active = %d\n",
data->next_red, data->next_green,
data->next_blue, atomic_read(&led->active));
kfree(blm_work);
}
static int blinkm_led_common_set(struct led_classdev *led_cdev,
enum led_brightness value, int color)
{
/* led_brightness is 0, 127 or 255 - we just use it here as-is */
struct blinkm_led *led = cdev_to_blmled(led_cdev);
struct blinkm_data *data = i2c_get_clientdata(led->i2c_client);
struct blinkm_work *bl_work;
switch (color) {
case RED:
/* bail out if there's no change */
if (data->next_red == (u8) value)
return 0;
/* we assume a quite fast sequence here ([off]->on->off)
* think of network led trigger - we cannot blink that fast, so
* in case we already have a off->on->off transition queued up,
* we refuse to queue up more.
* Revisit: fast-changing brightness. */
if (atomic_read(&led->active) > 1)
return 0;
data->next_red = (u8) value;
break;
case GREEN:
/* bail out if there's no change */
if (data->next_green == (u8) value)
return 0;
/* we assume a quite fast sequence here ([off]->on->off)
* Revisit: fast-changing brightness. */
if (atomic_read(&led->active) > 1)
return 0;
data->next_green = (u8) value;
break;
case BLUE:
/* bail out if there's no change */
if (data->next_blue == (u8) value)
return 0;
/* we assume a quite fast sequence here ([off]->on->off)
* Revisit: fast-changing brightness. */
if (atomic_read(&led->active) > 1)
return 0;
data->next_blue = (u8) value;
break;
default:
dev_err(&led->i2c_client->dev, "BlinkM: unknown color.\n");
return -EINVAL;
}
bl_work = kzalloc(sizeof(*bl_work), GFP_ATOMIC);
if (!bl_work)
return -ENOMEM;
atomic_inc(&led->active);
dev_dbg(&led->i2c_client->dev,
"#TO_SCHED# next_red = %d, next_green = %d,"
" next_blue = %d, active = %d\n",
data->next_red, data->next_green,
data->next_blue, atomic_read(&led->active));
/* a fresh work _item_ for each change */
bl_work->blinkm_led = led;
INIT_WORK(&bl_work->work, led_work);
/* queue work in own queue for easy sync on exit*/
schedule_work(&bl_work->work);
return 0;
}
static void blinkm_led_red_set(struct led_classdev *led_cdev,
enum led_brightness value)
{
blinkm_led_common_set(led_cdev, value, RED);
}
static void blinkm_led_green_set(struct led_classdev *led_cdev,
enum led_brightness value)
{
blinkm_led_common_set(led_cdev, value, GREEN);
}
static void blinkm_led_blue_set(struct led_classdev *led_cdev,
enum led_brightness value)
{
blinkm_led_common_set(led_cdev, value, BLUE);
}
static void blinkm_init_hw(struct i2c_client *client)
{
int ret;
ret = blinkm_transfer_hw(client, BLM_STOP_SCRIPT);
ret = blinkm_transfer_hw(client, BLM_GO_RGB);
}
static int blinkm_test_run(struct i2c_client *client)
{
int ret;
struct blinkm_data *data = i2c_get_clientdata(client);
data->next_red = 0x01;
data->next_green = 0x05;
data->next_blue = 0x10;
ret = blinkm_transfer_hw(client, BLM_GO_RGB);
if (ret < 0)
return ret;
msleep(2000);
data->next_red = 0x25;
data->next_green = 0x10;
data->next_blue = 0x31;
ret = blinkm_transfer_hw(client, BLM_FADE_RGB);
if (ret < 0)
return ret;
msleep(2000);
data->next_hue = 0x50;
data->next_saturation = 0x10;
data->next_brightness = 0x20;
ret = blinkm_transfer_hw(client, BLM_FADE_HSB);
if (ret < 0)
return ret;
msleep(2000);
return 0;
}
/* Return 0 if detection is successful, -ENODEV otherwise */
static int blinkm_detect(struct i2c_client *client, struct i2c_board_info *info)
{
struct i2c_adapter *adapter = client->adapter;
int ret;
int count = 99;
u8 tmpargs[7];
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA
| I2C_FUNC_SMBUS_WORD_DATA
| I2C_FUNC_SMBUS_WRITE_BYTE))
return -ENODEV;
/* Now, we do the remaining detection. Simple for now. */
/* We might need more guards to protect other i2c slaves */
/* make sure the blinkM is balanced (read/writes) */
while (count > 0) {
ret = blinkm_write(client, BLM_GET_ADDR, NULL);
usleep_range(5000, 10000);
ret = blinkm_read(client, BLM_GET_ADDR, tmpargs);
usleep_range(5000, 10000);
if (tmpargs[0] == 0x09)
count = 0;
count--;
}
/* Step 1: Read BlinkM address back - cmd_char 'a' */
ret = blinkm_write(client, BLM_GET_ADDR, NULL);
if (ret < 0)
return ret;
usleep_range(20000, 30000); /* allow a small delay */
ret = blinkm_read(client, BLM_GET_ADDR, tmpargs);
if (ret < 0)
return ret;
if (tmpargs[0] != 0x09) {
dev_err(&client->dev, "enodev DEV ADDR = 0x%02X\n", tmpargs[0]);
return -ENODEV;
}
strlcpy(info->type, "blinkm", I2C_NAME_SIZE);
return 0;
}
static int blinkm_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct blinkm_data *data;
struct blinkm_led *led[3];
int err, i;
char blinkm_led_name[28];
data = devm_kzalloc(&client->dev,
sizeof(struct blinkm_data), GFP_KERNEL);
if (!data) {
err = -ENOMEM;
goto exit;
}
data->i2c_addr = 0x09;
data->i2c_addr = 0x08;
/* i2c addr - use fake addr of 0x08 initially (real is 0x09) */
data->fw_ver = 0xfe;
/* firmware version - use fake until we read real value
* (currently broken - BlinkM confused!) */
data->script_id = 0x01;
data->i2c_client = client;
i2c_set_clientdata(client, data);
mutex_init(&data->update_lock);
/* Register sysfs hooks */
err = sysfs_create_group(&client->dev.kobj, &blinkm_group);
if (err < 0) {
dev_err(&client->dev, "couldn't register sysfs group\n");
goto exit;
}
for (i = 0; i < 3; i++) {
/* RED = 0, GREEN = 1, BLUE = 2 */
led[i] = &data->blinkm_leds[i];
led[i]->i2c_client = client;
led[i]->id = i;
led[i]->led_cdev.max_brightness = 255;
led[i]->led_cdev.flags = LED_CORE_SUSPENDRESUME;
atomic_set(&led[i]->active, 0);
switch (i) {
case RED:
snprintf(blinkm_led_name, sizeof(blinkm_led_name),
"blinkm-%d-%d-red",
client->adapter->nr,
client->addr);
led[i]->led_cdev.name = blinkm_led_name;
led[i]->led_cdev.brightness_set = blinkm_led_red_set;
err = led_classdev_register(&client->dev,
&led[i]->led_cdev);
if (err < 0) {
dev_err(&client->dev,
"couldn't register LED %s\n",
led[i]->led_cdev.name);
goto failred;
}
break;
case GREEN:
snprintf(blinkm_led_name, sizeof(blinkm_led_name),
"blinkm-%d-%d-green",
client->adapter->nr,
client->addr);
led[i]->led_cdev.name = blinkm_led_name;
led[i]->led_cdev.brightness_set = blinkm_led_green_set;
err = led_classdev_register(&client->dev,
&led[i]->led_cdev);
if (err < 0) {
dev_err(&client->dev,
"couldn't register LED %s\n",
led[i]->led_cdev.name);
goto failgreen;
}
break;
case BLUE:
snprintf(blinkm_led_name, sizeof(blinkm_led_name),
"blinkm-%d-%d-blue",
client->adapter->nr,
client->addr);
led[i]->led_cdev.name = blinkm_led_name;
led[i]->led_cdev.brightness_set = blinkm_led_blue_set;
err = led_classdev_register(&client->dev,
&led[i]->led_cdev);
if (err < 0) {
dev_err(&client->dev,
"couldn't register LED %s\n",
led[i]->led_cdev.name);
goto failblue;
}
break;
} /* end switch */
} /* end for */
/* Initialize the blinkm */
blinkm_init_hw(client);
return 0;
failblue:
led_classdev_unregister(&led[GREEN]->led_cdev);
failgreen:
led_classdev_unregister(&led[RED]->led_cdev);
failred:
sysfs_remove_group(&client->dev.kobj, &blinkm_group);
exit:
return err;
}
static int blinkm_remove(struct i2c_client *client)
{
struct blinkm_data *data = i2c_get_clientdata(client);
int ret = 0;
int i;
/* make sure no workqueue entries are pending */
for (i = 0; i < 3; i++) {
flush_scheduled_work();
led_classdev_unregister(&data->blinkm_leds[i].led_cdev);
}
/* reset rgb */
data->next_red = 0x00;
data->next_green = 0x00;
data->next_blue = 0x00;
ret = blinkm_transfer_hw(client, BLM_FADE_RGB);
if (ret < 0)
dev_err(&client->dev, "Failure in blinkm_remove ignored. Continuing.\n");
/* reset hsb */
data->next_hue = 0x00;
data->next_saturation = 0x00;
data->next_brightness = 0x00;
ret = blinkm_transfer_hw(client, BLM_FADE_HSB);
if (ret < 0)
dev_err(&client->dev, "Failure in blinkm_remove ignored. Continuing.\n");
/* red fade to off */
data->next_red = 0xff;
ret = blinkm_transfer_hw(client, BLM_GO_RGB);
if (ret < 0)
dev_err(&client->dev, "Failure in blinkm_remove ignored. Continuing.\n");
/* off */
data->next_red = 0x00;
ret = blinkm_transfer_hw(client, BLM_FADE_RGB);
if (ret < 0)
dev_err(&client->dev, "Failure in blinkm_remove ignored. Continuing.\n");
sysfs_remove_group(&client->dev.kobj, &blinkm_group);
return 0;
}
static const struct i2c_device_id blinkm_id[] = {
{"blinkm", 0},
{}
};
MODULE_DEVICE_TABLE(i2c, blinkm_id);
/* This is the driver that will be inserted */
static struct i2c_driver blinkm_driver = {
.class = I2C_CLASS_HWMON,
.driver = {
.name = "blinkm",
},
.probe = blinkm_probe,
.remove = blinkm_remove,
.id_table = blinkm_id,
.detect = blinkm_detect,
.address_list = normal_i2c,
};
module_i2c_driver(blinkm_driver);
MODULE_AUTHOR("Jan-Simon Moeller <dl9pf@gmx.de>");
MODULE_DESCRIPTION("BlinkM RGB LED driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
bigbiff/android_kernel_samsung_sm900p | net/l2tp/l2tp_ip.c | 2663 | 16910 | /*
* L2TPv3 IP encapsulation support
*
* Copyright (c) 2008,2009,2010 Katalix Systems Ltd
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/icmp.h>
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/random.h>
#include <linux/socket.h>
#include <linux/l2tp.h>
#include <linux/in.h>
#include <net/sock.h>
#include <net/ip.h>
#include <net/icmp.h>
#include <net/udp.h>
#include <net/inet_common.h>
#include <net/inet_hashtables.h>
#include <net/tcp_states.h>
#include <net/protocol.h>
#include <net/xfrm.h>
#include "l2tp_core.h"
struct l2tp_ip_sock {
/* inet_sock has to be the first member of l2tp_ip_sock */
struct inet_sock inet;
__u32 conn_id;
__u32 peer_conn_id;
__u64 tx_packets;
__u64 tx_bytes;
__u64 tx_errors;
__u64 rx_packets;
__u64 rx_bytes;
__u64 rx_errors;
};
static DEFINE_RWLOCK(l2tp_ip_lock);
static struct hlist_head l2tp_ip_table;
static struct hlist_head l2tp_ip_bind_table;
static inline struct l2tp_ip_sock *l2tp_ip_sk(const struct sock *sk)
{
return (struct l2tp_ip_sock *)sk;
}
static struct sock *__l2tp_ip_bind_lookup(struct net *net, __be32 laddr, int dif, u32 tunnel_id)
{
struct hlist_node *node;
struct sock *sk;
sk_for_each_bound(sk, node, &l2tp_ip_bind_table) {
struct inet_sock *inet = inet_sk(sk);
struct l2tp_ip_sock *l2tp = l2tp_ip_sk(sk);
if (l2tp == NULL)
continue;
if ((l2tp->conn_id == tunnel_id) &&
net_eq(sock_net(sk), net) &&
!(inet->inet_rcv_saddr && inet->inet_rcv_saddr != laddr) &&
!(sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif))
goto found;
}
sk = NULL;
found:
return sk;
}
static inline struct sock *l2tp_ip_bind_lookup(struct net *net, __be32 laddr, int dif, u32 tunnel_id)
{
struct sock *sk = __l2tp_ip_bind_lookup(net, laddr, dif, tunnel_id);
if (sk)
sock_hold(sk);
return sk;
}
/* When processing receive frames, there are two cases to
* consider. Data frames consist of a non-zero session-id and an
* optional cookie. Control frames consist of a regular L2TP header
* preceded by 32-bits of zeros.
*
* L2TPv3 Session Header Over IP
*
* 0 1 2 3
* 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | Session ID |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | Cookie (optional, maximum 64 bits)...
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
*
* L2TPv3 Control Message Header Over IP
*
* 0 1 2 3
* 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | (32 bits of zeros) |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* |T|L|x|x|S|x|x|x|x|x|x|x| Ver | Length |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | Control Connection ID |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | Ns | Nr |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
*
* All control frames are passed to userspace.
*/
static int l2tp_ip_recv(struct sk_buff *skb)
{
struct sock *sk;
u32 session_id;
u32 tunnel_id;
unsigned char *ptr, *optr;
struct l2tp_session *session;
struct l2tp_tunnel *tunnel = NULL;
int length;
int offset;
/* Point to L2TP header */
optr = ptr = skb->data;
if (!pskb_may_pull(skb, 4))
goto discard;
session_id = ntohl(*((__be32 *) ptr));
ptr += 4;
/* RFC3931: L2TP/IP packets have the first 4 bytes containing
* the session_id. If it is 0, the packet is a L2TP control
* frame and the session_id value can be discarded.
*/
if (session_id == 0) {
__skb_pull(skb, 4);
goto pass_up;
}
/* Ok, this is a data packet. Lookup the session. */
session = l2tp_session_find(&init_net, NULL, session_id);
if (session == NULL)
goto discard;
tunnel = session->tunnel;
if (tunnel == NULL)
goto discard;
/* Trace packet contents, if enabled */
if (tunnel->debug & L2TP_MSG_DATA) {
length = min(32u, skb->len);
if (!pskb_may_pull(skb, length))
goto discard;
printk(KERN_DEBUG "%s: ip recv: ", tunnel->name);
offset = 0;
do {
printk(" %02X", ptr[offset]);
} while (++offset < length);
printk("\n");
}
l2tp_recv_common(session, skb, ptr, optr, 0, skb->len, tunnel->recv_payload_hook);
return 0;
pass_up:
/* Get the tunnel_id from the L2TP header */
if (!pskb_may_pull(skb, 12))
goto discard;
if ((skb->data[0] & 0xc0) != 0xc0)
goto discard;
tunnel_id = ntohl(*(__be32 *) &skb->data[4]);
tunnel = l2tp_tunnel_find(&init_net, tunnel_id);
if (tunnel != NULL)
sk = tunnel->sock;
else {
struct iphdr *iph = (struct iphdr *) skb_network_header(skb);
read_lock_bh(&l2tp_ip_lock);
sk = __l2tp_ip_bind_lookup(&init_net, iph->daddr, 0, tunnel_id);
read_unlock_bh(&l2tp_ip_lock);
}
if (sk == NULL)
goto discard;
sock_hold(sk);
if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
goto discard_put;
nf_reset(skb);
return sk_receive_skb(sk, skb, 1);
discard_put:
sock_put(sk);
discard:
kfree_skb(skb);
return 0;
}
static int l2tp_ip_open(struct sock *sk)
{
/* Prevent autobind. We don't have ports. */
inet_sk(sk)->inet_num = IPPROTO_L2TP;
write_lock_bh(&l2tp_ip_lock);
sk_add_node(sk, &l2tp_ip_table);
write_unlock_bh(&l2tp_ip_lock);
return 0;
}
static void l2tp_ip_close(struct sock *sk, long timeout)
{
write_lock_bh(&l2tp_ip_lock);
hlist_del_init(&sk->sk_bind_node);
sk_del_node_init(sk);
write_unlock_bh(&l2tp_ip_lock);
sk_common_release(sk);
}
static void l2tp_ip_destroy_sock(struct sock *sk)
{
struct sk_buff *skb;
while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL)
kfree_skb(skb);
sk_refcnt_debug_dec(sk);
}
static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
{
struct inet_sock *inet = inet_sk(sk);
struct sockaddr_l2tpip *addr = (struct sockaddr_l2tpip *) uaddr;
int ret = -EINVAL;
int chk_addr_ret;
ret = -EADDRINUSE;
read_lock_bh(&l2tp_ip_lock);
if (__l2tp_ip_bind_lookup(&init_net, addr->l2tp_addr.s_addr, sk->sk_bound_dev_if, addr->l2tp_conn_id))
goto out_in_use;
read_unlock_bh(&l2tp_ip_lock);
lock_sock(sk);
if (sk->sk_state != TCP_CLOSE || addr_len < sizeof(struct sockaddr_l2tpip))
goto out;
chk_addr_ret = inet_addr_type(&init_net, addr->l2tp_addr.s_addr);
ret = -EADDRNOTAVAIL;
if (addr->l2tp_addr.s_addr && chk_addr_ret != RTN_LOCAL &&
chk_addr_ret != RTN_MULTICAST && chk_addr_ret != RTN_BROADCAST)
goto out;
if (addr->l2tp_addr.s_addr)
inet->inet_rcv_saddr = inet->inet_saddr = addr->l2tp_addr.s_addr;
if (chk_addr_ret == RTN_MULTICAST || chk_addr_ret == RTN_BROADCAST)
inet->inet_saddr = 0; /* Use device */
sk_dst_reset(sk);
l2tp_ip_sk(sk)->conn_id = addr->l2tp_conn_id;
write_lock_bh(&l2tp_ip_lock);
sk_add_bind_node(sk, &l2tp_ip_bind_table);
sk_del_node_init(sk);
write_unlock_bh(&l2tp_ip_lock);
ret = 0;
out:
release_sock(sk);
return ret;
out_in_use:
read_unlock_bh(&l2tp_ip_lock);
return ret;
}
static int l2tp_ip_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
{
struct sockaddr_l2tpip *lsa = (struct sockaddr_l2tpip *) uaddr;
struct inet_sock *inet = inet_sk(sk);
struct flowi4 *fl4;
struct rtable *rt;
__be32 saddr;
int oif, rc;
rc = -EINVAL;
if (addr_len < sizeof(*lsa))
goto out;
rc = -EAFNOSUPPORT;
if (lsa->l2tp_family != AF_INET)
goto out;
lock_sock(sk);
sk_dst_reset(sk);
oif = sk->sk_bound_dev_if;
saddr = inet->inet_saddr;
rc = -EINVAL;
if (ipv4_is_multicast(lsa->l2tp_addr.s_addr))
goto out;
fl4 = &inet->cork.fl.u.ip4;
rt = ip_route_connect(fl4, lsa->l2tp_addr.s_addr, saddr,
RT_CONN_FLAGS(sk), oif,
IPPROTO_L2TP,
0, 0, sk, true);
if (IS_ERR(rt)) {
rc = PTR_ERR(rt);
if (rc == -ENETUNREACH)
IP_INC_STATS_BH(&init_net, IPSTATS_MIB_OUTNOROUTES);
goto out;
}
rc = -ENETUNREACH;
if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
ip_rt_put(rt);
goto out;
}
l2tp_ip_sk(sk)->peer_conn_id = lsa->l2tp_conn_id;
if (!inet->inet_saddr)
inet->inet_saddr = fl4->saddr;
if (!inet->inet_rcv_saddr)
inet->inet_rcv_saddr = fl4->saddr;
inet->inet_daddr = fl4->daddr;
sk->sk_state = TCP_ESTABLISHED;
inet->inet_id = jiffies;
sk_dst_set(sk, &rt->dst);
write_lock_bh(&l2tp_ip_lock);
hlist_del_init(&sk->sk_bind_node);
sk_add_bind_node(sk, &l2tp_ip_bind_table);
write_unlock_bh(&l2tp_ip_lock);
rc = 0;
out:
release_sock(sk);
return rc;
}
static int l2tp_ip_getname(struct socket *sock, struct sockaddr *uaddr,
int *uaddr_len, int peer)
{
struct sock *sk = sock->sk;
struct inet_sock *inet = inet_sk(sk);
struct l2tp_ip_sock *lsk = l2tp_ip_sk(sk);
struct sockaddr_l2tpip *lsa = (struct sockaddr_l2tpip *)uaddr;
memset(lsa, 0, sizeof(*lsa));
lsa->l2tp_family = AF_INET;
if (peer) {
if (!inet->inet_dport)
return -ENOTCONN;
lsa->l2tp_conn_id = lsk->peer_conn_id;
lsa->l2tp_addr.s_addr = inet->inet_daddr;
} else {
__be32 addr = inet->inet_rcv_saddr;
if (!addr)
addr = inet->inet_saddr;
lsa->l2tp_conn_id = lsk->conn_id;
lsa->l2tp_addr.s_addr = addr;
}
*uaddr_len = sizeof(*lsa);
return 0;
}
static int l2tp_ip_backlog_recv(struct sock *sk, struct sk_buff *skb)
{
int rc;
/* Charge it to the socket, dropping if the queue is full. */
rc = sock_queue_rcv_skb(sk, skb);
if (rc < 0)
goto drop;
return 0;
drop:
IP_INC_STATS(&init_net, IPSTATS_MIB_INDISCARDS);
kfree_skb(skb);
return -1;
}
/* Userspace will call sendmsg() on the tunnel socket to send L2TP
* control frames.
*/
static int l2tp_ip_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, size_t len)
{
struct sk_buff *skb;
int rc;
struct l2tp_ip_sock *lsa = l2tp_ip_sk(sk);
struct inet_sock *inet = inet_sk(sk);
struct rtable *rt = NULL;
struct flowi4 *fl4;
int connected = 0;
__be32 daddr;
lock_sock(sk);
rc = -ENOTCONN;
if (sock_flag(sk, SOCK_DEAD))
goto out;
/* Get and verify the address. */
if (msg->msg_name) {
struct sockaddr_l2tpip *lip = (struct sockaddr_l2tpip *) msg->msg_name;
rc = -EINVAL;
if (msg->msg_namelen < sizeof(*lip))
goto out;
if (lip->l2tp_family != AF_INET) {
rc = -EAFNOSUPPORT;
if (lip->l2tp_family != AF_UNSPEC)
goto out;
}
daddr = lip->l2tp_addr.s_addr;
} else {
rc = -EDESTADDRREQ;
if (sk->sk_state != TCP_ESTABLISHED)
goto out;
daddr = inet->inet_daddr;
connected = 1;
}
/* Allocate a socket buffer */
rc = -ENOMEM;
skb = sock_wmalloc(sk, 2 + NET_SKB_PAD + sizeof(struct iphdr) +
4 + len, 0, GFP_KERNEL);
if (!skb)
goto error;
/* Reserve space for headers, putting IP header on 4-byte boundary. */
skb_reserve(skb, 2 + NET_SKB_PAD);
skb_reset_network_header(skb);
skb_reserve(skb, sizeof(struct iphdr));
skb_reset_transport_header(skb);
/* Insert 0 session_id */
*((__be32 *) skb_put(skb, 4)) = 0;
/* Copy user data into skb */
rc = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
if (rc < 0) {
kfree_skb(skb);
goto error;
}
fl4 = &inet->cork.fl.u.ip4;
if (connected)
rt = (struct rtable *) __sk_dst_check(sk, 0);
rcu_read_lock();
if (rt == NULL) {
const struct ip_options_rcu *inet_opt;
inet_opt = rcu_dereference(inet->inet_opt);
/* Use correct destination address if we have options. */
if (inet_opt && inet_opt->opt.srr)
daddr = inet_opt->opt.faddr;
/* If this fails, retransmit mechanism of transport layer will
* keep trying until route appears or the connection times
* itself out.
*/
rt = ip_route_output_ports(sock_net(sk), fl4, sk,
daddr, inet->inet_saddr,
inet->inet_dport, inet->inet_sport,
sk->sk_protocol, RT_CONN_FLAGS(sk),
sk->sk_bound_dev_if);
if (IS_ERR(rt))
goto no_route;
if (connected)
sk_setup_caps(sk, &rt->dst);
else
dst_release(&rt->dst); /* safe since we hold rcu_read_lock */
}
/* We dont need to clone dst here, it is guaranteed to not disappear.
* __dev_xmit_skb() might force a refcount if needed.
*/
skb_dst_set_noref(skb, &rt->dst);
/* Queue the packet to IP for output */
rc = ip_queue_xmit(skb, &inet->cork.fl);
rcu_read_unlock();
error:
/* Update stats */
if (rc >= 0) {
lsa->tx_packets++;
lsa->tx_bytes += len;
rc = len;
} else {
lsa->tx_errors++;
}
out:
release_sock(sk);
return rc;
no_route:
rcu_read_unlock();
IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
kfree_skb(skb);
rc = -EHOSTUNREACH;
goto out;
}
static int l2tp_ip_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
size_t len, int noblock, int flags, int *addr_len)
{
struct inet_sock *inet = inet_sk(sk);
struct l2tp_ip_sock *lsk = l2tp_ip_sk(sk);
size_t copied = 0;
int err = -EOPNOTSUPP;
struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name;
struct sk_buff *skb;
if (flags & MSG_OOB)
goto out;
if (addr_len)
*addr_len = sizeof(*sin);
skb = skb_recv_datagram(sk, flags, noblock, &err);
if (!skb)
goto out;
copied = skb->len;
if (len < copied) {
msg->msg_flags |= MSG_TRUNC;
copied = len;
}
err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
if (err)
goto done;
sock_recv_timestamp(msg, sk, skb);
/* Copy the address. */
if (sin) {
sin->sin_family = AF_INET;
sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
sin->sin_port = 0;
memset(&sin->sin_zero, 0, sizeof(sin->sin_zero));
}
if (inet->cmsg_flags)
ip_cmsg_recv(msg, skb);
if (flags & MSG_TRUNC)
copied = skb->len;
done:
skb_free_datagram(sk, skb);
out:
if (err) {
lsk->rx_errors++;
return err;
}
lsk->rx_packets++;
lsk->rx_bytes += copied;
return copied;
}
static struct proto l2tp_ip_prot = {
.name = "L2TP/IP",
.owner = THIS_MODULE,
.init = l2tp_ip_open,
.close = l2tp_ip_close,
.bind = l2tp_ip_bind,
.connect = l2tp_ip_connect,
.disconnect = udp_disconnect,
.ioctl = udp_ioctl,
.destroy = l2tp_ip_destroy_sock,
.setsockopt = ip_setsockopt,
.getsockopt = ip_getsockopt,
.sendmsg = l2tp_ip_sendmsg,
.recvmsg = l2tp_ip_recvmsg,
.backlog_rcv = l2tp_ip_backlog_recv,
.hash = inet_hash,
.unhash = inet_unhash,
.obj_size = sizeof(struct l2tp_ip_sock),
#ifdef CONFIG_COMPAT
.compat_setsockopt = compat_ip_setsockopt,
.compat_getsockopt = compat_ip_getsockopt,
#endif
};
static const struct proto_ops l2tp_ip_ops = {
.family = PF_INET,
.owner = THIS_MODULE,
.release = inet_release,
.bind = inet_bind,
.connect = inet_dgram_connect,
.socketpair = sock_no_socketpair,
.accept = sock_no_accept,
.getname = l2tp_ip_getname,
.poll = datagram_poll,
.ioctl = inet_ioctl,
.listen = sock_no_listen,
.shutdown = inet_shutdown,
.setsockopt = sock_common_setsockopt,
.getsockopt = sock_common_getsockopt,
.sendmsg = inet_sendmsg,
.recvmsg = sock_common_recvmsg,
.mmap = sock_no_mmap,
.sendpage = sock_no_sendpage,
#ifdef CONFIG_COMPAT
.compat_setsockopt = compat_sock_common_setsockopt,
.compat_getsockopt = compat_sock_common_getsockopt,
#endif
};
static struct inet_protosw l2tp_ip_protosw = {
.type = SOCK_DGRAM,
.protocol = IPPROTO_L2TP,
.prot = &l2tp_ip_prot,
.ops = &l2tp_ip_ops,
.no_check = 0,
};
static struct net_protocol l2tp_ip_protocol __read_mostly = {
.handler = l2tp_ip_recv,
};
static int __init l2tp_ip_init(void)
{
int err;
printk(KERN_INFO "L2TP IP encapsulation support (L2TPv3)\n");
err = proto_register(&l2tp_ip_prot, 1);
if (err != 0)
goto out;
err = inet_add_protocol(&l2tp_ip_protocol, IPPROTO_L2TP);
if (err)
goto out1;
inet_register_protosw(&l2tp_ip_protosw);
return 0;
out1:
proto_unregister(&l2tp_ip_prot);
out:
return err;
}
static void __exit l2tp_ip_exit(void)
{
inet_unregister_protosw(&l2tp_ip_protosw);
inet_del_protocol(&l2tp_ip_protocol, IPPROTO_L2TP);
proto_unregister(&l2tp_ip_prot);
}
module_init(l2tp_ip_init);
module_exit(l2tp_ip_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("James Chapman <jchapman@katalix.com>");
MODULE_DESCRIPTION("L2TP over IP");
MODULE_VERSION("1.0");
/* Use the value of SOCK_DGRAM (2) directory, because __stringify doesn't like
* enums
*/
MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET, 2, IPPROTO_L2TP);
| gpl-2.0 |
NookieDevs/android_kernel_bn_encore | drivers/net/lp486e.c | 2919 | 33023 | /* Intel Professional Workstation/panther ethernet driver */
/* lp486e.c: A panther 82596 ethernet driver for linux. */
/*
History and copyrights:
Driver skeleton
Written 1993 by Donald Becker.
Copyright 1993 United States Government as represented by the Director,
National Security Agency. This software may only be used and
distributed according to the terms of the GNU General Public License
as modified by SRC, incorporated herein by reference.
The author may be reached as becker@scyld.com, or C/O
Scyld Computing Corporation
410 Severn Ave., Suite 210
Annapolis MD 21403
Apricot
Written 1994 by Mark Evans.
This driver is for the Apricot 82596 bus-master interface
Modularised 12/94 Mark Evans
Professional Workstation
Derived from apricot.c by Ard van Breemen
<ard@murphy.nl>|<ard@cstmel.hobby.nl>|<ard@cstmel.nl.eu.org>
Credits:
Thanks to Murphy Software BV for letting me write this in their time.
Well, actually, I get paid doing this...
(Also: see http://www.murphy.nl for murphy, and my homepage ~ard for
more information on the Professional Workstation)
Present version
aeb@cwi.nl
*/
/*
There are currently two motherboards that I know of in the
professional workstation. The only one that I know is the
intel panther motherboard. -- ard
*/
/*
The pws is equipped with an intel 82596. This is a very intelligent controller
which runs its own micro-code. Communication with the hostprocessor is done
through linked lists of commands and buffers in the hostprocessors memory.
A complete description of the 82596 is available from intel. Search for
a file called "29021806.pdf". It is a complete description of the chip itself.
To use it for the pws some additions are needed regarding generation of
the PORT and CA signal, and the interrupt glue needed for a pc.
I/O map:
PORT SIZE ACTION MEANING
0xCB0 2 WRITE Lower 16 bits for PORT command
0xCB2 2 WRITE Upper 16 bits for PORT command, and issue of PORT command
0xCB4 1 WRITE Generation of CA signal
0xCB8 1 WRITE Clear interrupt glue
All other communication is through memory!
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/ioport.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/bitops.h>
#include <asm/io.h>
#include <asm/dma.h>
#define DRV_NAME "lp486e"
/* debug print flags */
#define LOG_SRCDST 0x80000000
#define LOG_STATINT 0x40000000
#define LOG_STARTINT 0x20000000
#define i596_debug debug
static int i596_debug = 0;
static const char * const medianame[] = {
"10baseT", "AUI",
"10baseT-FD", "AUI-FD",
};
#define LP486E_TOTAL_SIZE 16
#define I596_NULL (0xffffffff)
#define CMD_EOL 0x8000 /* The last command of the list, stop. */
#define CMD_SUSP 0x4000 /* Suspend after doing cmd. */
#define CMD_INTR 0x2000 /* Interrupt after doing cmd. */
#define CMD_FLEX 0x0008 /* Enable flexible memory model */
enum commands {
CmdNOP = 0,
CmdIASetup = 1,
CmdConfigure = 2,
CmdMulticastList = 3,
CmdTx = 4,
CmdTDR = 5,
CmdDump = 6,
CmdDiagnose = 7
};
#if 0
static const char *CUcmdnames[8] = { "NOP", "IASetup", "Configure", "MulticastList",
"Tx", "TDR", "Dump", "Diagnose" };
#endif
/* Status word bits */
#define STAT_CX 0x8000 /* The CU finished executing a command
with the Interrupt bit set */
#define STAT_FR 0x4000 /* The RU finished receiving a frame */
#define STAT_CNA 0x2000 /* The CU left the active state */
#define STAT_RNR 0x1000 /* The RU left the active state */
#define STAT_ACK (STAT_CX | STAT_FR | STAT_CNA | STAT_RNR)
#define STAT_CUS 0x0700 /* Status of CU: 0: idle, 1: suspended,
2: active, 3-7: unused */
#define STAT_RUS 0x00f0 /* Status of RU: 0: idle, 1: suspended,
2: no resources, 4: ready,
10: no resources due to no more RBDs,
12: no more RBDs, other: unused */
#define STAT_T 0x0008 /* Bus throttle timers loaded */
#define STAT_ZERO 0x0807 /* Always zero */
#if 0
static char *CUstates[8] = {
"idle", "suspended", "active", 0, 0, 0, 0, 0
};
static char *RUstates[16] = {
"idle", "suspended", "no resources", 0, "ready", 0, 0, 0,
0, 0, "no RBDs", 0, "out of RBDs", 0, 0, 0
};
static void
i596_out_status(int status) {
int bad = 0;
char *s;
printk("status %4.4x:", status);
if (status == 0xffff)
printk(" strange..\n");
else {
if (status & STAT_CX)
printk(" CU done");
if (status & STAT_CNA)
printk(" CU stopped");
if (status & STAT_FR)
printk(" got a frame");
if (status & STAT_RNR)
printk(" RU stopped");
if (status & STAT_T)
printk(" throttled");
if (status & STAT_ZERO)
bad = 1;
s = CUstates[(status & STAT_CUS) >> 8];
if (!s)
bad = 1;
else
printk(" CU(%s)", s);
s = RUstates[(status & STAT_RUS) >> 4];
if (!s)
bad = 1;
else
printk(" RU(%s)", s);
if (bad)
printk(" bad status");
printk("\n");
}
}
#endif
/* Command word bits */
#define ACK_CX 0x8000
#define ACK_FR 0x4000
#define ACK_CNA 0x2000
#define ACK_RNR 0x1000
#define CUC_START 0x0100
#define CUC_RESUME 0x0200
#define CUC_SUSPEND 0x0300
#define CUC_ABORT 0x0400
#define RX_START 0x0010
#define RX_RESUME 0x0020
#define RX_SUSPEND 0x0030
#define RX_ABORT 0x0040
typedef u32 phys_addr;
static inline phys_addr
va_to_pa(void *x) {
return x ? virt_to_bus(x) : I596_NULL;
}
static inline void *
pa_to_va(phys_addr x) {
return (x == I596_NULL) ? NULL : bus_to_virt(x);
}
/* status bits for cmd */
#define CMD_STAT_C 0x8000 /* CU command complete */
#define CMD_STAT_B 0x4000 /* CU command in progress */
#define CMD_STAT_OK 0x2000 /* CU command completed without errors */
#define CMD_STAT_A 0x1000 /* CU command abnormally terminated */
struct i596_cmd { /* 8 bytes */
unsigned short status;
unsigned short command;
phys_addr pa_next; /* va_to_pa(struct i596_cmd *next) */
};
#define EOF 0x8000
#define SIZE_MASK 0x3fff
struct i596_tbd {
unsigned short size;
unsigned short pad;
phys_addr pa_next; /* va_to_pa(struct i596_tbd *next) */
phys_addr pa_data; /* va_to_pa(char *data) */
struct sk_buff *skb;
};
struct tx_cmd {
struct i596_cmd cmd;
phys_addr pa_tbd; /* va_to_pa(struct i596_tbd *tbd) */
unsigned short size;
unsigned short pad;
};
/* status bits for rfd */
#define RFD_STAT_C 0x8000 /* Frame reception complete */
#define RFD_STAT_B 0x4000 /* Frame reception in progress */
#define RFD_STAT_OK 0x2000 /* Frame received without errors */
#define RFD_STATUS 0x1fff
#define RFD_LENGTH_ERR 0x1000
#define RFD_CRC_ERR 0x0800
#define RFD_ALIGN_ERR 0x0400
#define RFD_NOBUFS_ERR 0x0200
#define RFD_DMA_ERR 0x0100 /* DMA overrun failure to acquire system bus */
#define RFD_SHORT_FRAME_ERR 0x0080
#define RFD_NOEOP_ERR 0x0040
#define RFD_TRUNC_ERR 0x0020
#define RFD_MULTICAST 0x0002 /* 0: destination had our address
1: destination was broadcast/multicast */
#define RFD_COLLISION 0x0001
/* receive frame descriptor */
struct i596_rfd {
unsigned short stat;
unsigned short cmd;
phys_addr pa_next; /* va_to_pa(struct i596_rfd *next) */
phys_addr pa_rbd; /* va_to_pa(struct i596_rbd *rbd) */
unsigned short count;
unsigned short size;
char data[1532];
};
#define RBD_EL 0x8000
#define RBD_P 0x4000
#define RBD_SIZEMASK 0x3fff
#define RBD_EOF 0x8000
#define RBD_F 0x4000
/* receive buffer descriptor */
struct i596_rbd {
unsigned short size;
unsigned short pad;
phys_addr pa_next; /* va_to_pa(struct i596_tbd *next) */
phys_addr pa_data; /* va_to_pa(char *data) */
phys_addr pa_prev; /* va_to_pa(struct i596_tbd *prev) */
/* Driver private part */
struct sk_buff *skb;
};
#define RX_RING_SIZE 64
#define RX_SKBSIZE (ETH_FRAME_LEN+10)
#define RX_RBD_SIZE 32
/* System Control Block - 40 bytes */
struct i596_scb {
u16 status; /* 0 */
u16 command; /* 2 */
phys_addr pa_cmd; /* 4 - va_to_pa(struct i596_cmd *cmd) */
phys_addr pa_rfd; /* 8 - va_to_pa(struct i596_rfd *rfd) */
u32 crc_err; /* 12 */
u32 align_err; /* 16 */
u32 resource_err; /* 20 */
u32 over_err; /* 24 */
u32 rcvdt_err; /* 28 */
u32 short_err; /* 32 */
u16 t_on; /* 36 */
u16 t_off; /* 38 */
};
/* Intermediate System Configuration Pointer - 8 bytes */
struct i596_iscp {
u32 busy; /* 0 */
phys_addr pa_scb; /* 4 - va_to_pa(struct i596_scb *scb) */
};
/* System Configuration Pointer - 12 bytes */
struct i596_scp {
u32 sysbus; /* 0 */
u32 pad; /* 4 */
phys_addr pa_iscp; /* 8 - va_to_pa(struct i596_iscp *iscp) */
};
/* Selftest and dump results - needs 16-byte alignment */
/*
* The size of the dump area is 304 bytes. When the dump is executed
* by the Port command an extra word will be appended to the dump area.
* The extra word is a copy of the Dump status word (containing the
* C, B, OK bits). [I find 0xa006, with a0 for C+OK and 6 for dump]
*/
struct i596_dump {
u16 dump[153]; /* (304 = 130h) + 2 bytes */
};
struct i596_private { /* aligned to a 16-byte boundary */
struct i596_scp scp; /* 0 - needs 16-byte alignment */
struct i596_iscp iscp; /* 12 */
struct i596_scb scb; /* 20 */
u32 dummy; /* 60 */
struct i596_dump dump; /* 64 - needs 16-byte alignment */
struct i596_cmd set_add;
char eth_addr[8]; /* directly follows set_add */
struct i596_cmd set_conf;
char i596_config[16]; /* directly follows set_conf */
struct i596_cmd tdr;
unsigned long tdr_stat; /* directly follows tdr */
int last_restart;
struct i596_rbd *rbd_list;
struct i596_rbd *rbd_tail;
struct i596_rfd *rx_tail;
struct i596_cmd *cmd_tail;
struct i596_cmd *cmd_head;
int cmd_backlog;
unsigned long last_cmd;
spinlock_t cmd_lock;
};
static char init_setup[14] = {
0x8E, /* length 14 bytes, prefetch on */
0xC8, /* default: fifo to 8, monitor off */
0x40, /* default: don't save bad frames (apricot.c had 0x80) */
0x2E, /* (default is 0x26)
No source address insertion, 8 byte preamble */
0x00, /* default priority and backoff */
0x60, /* default interframe spacing */
0x00, /* default slot time LSB */
0xf2, /* default slot time and nr of retries */
0x00, /* default various bits
(0: promiscuous mode, 1: broadcast disable,
2: encoding mode, 3: transmit on no CRS,
4: no CRC insertion, 5: CRC type,
6: bit stuffing, 7: padding) */
0x00, /* default carrier sense and collision detect */
0x40, /* default minimum frame length */
0xff, /* (default is 0xff, and that is what apricot.c has;
elp486.c has 0xfb: Enable crc append in memory.) */
0x00, /* default: not full duplex */
0x7f /* (default is 0x3f) multi IA */
};
static int i596_open(struct net_device *dev);
static netdev_tx_t i596_start_xmit(struct sk_buff *skb, struct net_device *dev);
static irqreturn_t i596_interrupt(int irq, void *dev_id);
static int i596_close(struct net_device *dev);
static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd);
static void print_eth(char *);
static void set_multicast_list(struct net_device *dev);
static void i596_tx_timeout(struct net_device *dev);
static int
i596_timeout(struct net_device *dev, char *msg, int ct) {
struct i596_private *lp;
int boguscnt = ct;
lp = netdev_priv(dev);
while (lp->scb.command) {
if (--boguscnt == 0) {
printk("%s: %s timed out - stat %4.4x, cmd %4.4x\n",
dev->name, msg,
lp->scb.status, lp->scb.command);
return 1;
}
udelay(5);
barrier();
}
return 0;
}
static inline int
init_rx_bufs(struct net_device *dev, int num) {
struct i596_private *lp;
struct i596_rfd *rfd;
int i;
// struct i596_rbd *rbd;
lp = netdev_priv(dev);
lp->scb.pa_rfd = I596_NULL;
for (i = 0; i < num; i++) {
rfd = kmalloc(sizeof(struct i596_rfd), GFP_KERNEL);
if (rfd == NULL)
break;
rfd->stat = 0;
rfd->pa_rbd = I596_NULL;
rfd->count = 0;
rfd->size = 1532;
if (i == 0) {
rfd->cmd = CMD_EOL;
lp->rx_tail = rfd;
} else {
rfd->cmd = 0;
}
rfd->pa_next = lp->scb.pa_rfd;
lp->scb.pa_rfd = va_to_pa(rfd);
lp->rx_tail->pa_next = lp->scb.pa_rfd;
}
#if 0
for (i = 0; i<RX_RBD_SIZE; i++) {
rbd = kmalloc(sizeof(struct i596_rbd), GFP_KERNEL);
if (rbd) {
rbd->pad = 0;
rbd->count = 0;
rbd->skb = dev_alloc_skb(RX_SKBSIZE);
if (!rbd->skb) {
printk("dev_alloc_skb failed");
}
rbd->next = rfd->rbd;
if (i) {
rfd->rbd->prev = rbd;
rbd->size = RX_SKBSIZE;
} else {
rbd->size = (RX_SKBSIZE | RBD_EL);
lp->rbd_tail = rbd;
}
rfd->rbd = rbd;
} else {
printk("Could not kmalloc rbd\n");
}
}
lp->rbd_tail->next = rfd->rbd;
#endif
return i;
}
static inline void
remove_rx_bufs(struct net_device *dev) {
struct i596_private *lp;
struct i596_rfd *rfd;
lp = netdev_priv(dev);
lp->rx_tail->pa_next = I596_NULL;
do {
rfd = pa_to_va(lp->scb.pa_rfd);
lp->scb.pa_rfd = rfd->pa_next;
kfree(rfd);
} while (rfd != lp->rx_tail);
lp->rx_tail = NULL;
#if 0
for (lp->rbd_list) {
}
#endif
}
#define PORT_RESET 0x00 /* reset 82596 */
#define PORT_SELFTEST 0x01 /* selftest */
#define PORT_ALTSCP 0x02 /* alternate SCB address */
#define PORT_DUMP 0x03 /* dump */
#define IOADDR 0xcb0 /* real constant */
#define IRQ 10 /* default IRQ - can be changed by ECU */
/* The 82596 requires two 16-bit write cycles for a port command */
static inline void
PORT(phys_addr a, unsigned int cmd) {
if (a & 0xf)
printk("lp486e.c: PORT: address not aligned\n");
outw(((a & 0xffff) | cmd), IOADDR);
outw(((a>>16) & 0xffff), IOADDR+2);
}
static inline void
CA(void) {
outb(0, IOADDR+4);
udelay(8);
}
static inline void
CLEAR_INT(void) {
outb(0, IOADDR+8);
}
#if 0
/* selftest or dump */
static void
i596_port_do(struct net_device *dev, int portcmd, char *cmdname) {
struct i596_private *lp = netdev_priv(dev);
u16 *outp;
int i, m;
memset((void *)&(lp->dump), 0, sizeof(struct i596_dump));
outp = &(lp->dump.dump[0]);
PORT(va_to_pa(outp), portcmd);
mdelay(30); /* random, unmotivated */
printk("lp486e i82596 %s result:\n", cmdname);
for (m = ARRAY_SIZE(lp->dump.dump); m && lp->dump.dump[m-1] == 0; m--)
;
for (i = 0; i < m; i++) {
printk(" %04x", lp->dump.dump[i]);
if (i%8 == 7)
printk("\n");
}
printk("\n");
}
#endif
static int
i596_scp_setup(struct net_device *dev) {
struct i596_private *lp = netdev_priv(dev);
int boguscnt;
/* Setup SCP, ISCP, SCB */
/*
* sysbus bits:
* only a single byte is significant - here 0x44
* 0x80: big endian mode (details depend on stepping)
* 0x40: 1
* 0x20: interrupt pin is active low
* 0x10: lock function disabled
* 0x08: external triggering of bus throttle timers
* 0x06: 00: 82586 compat mode, 01: segmented mode, 10: linear mode
* 0x01: unused
*/
lp->scp.sysbus = 0x00440000; /* linear mode */
lp->scp.pad = 0; /* must be zero */
lp->scp.pa_iscp = va_to_pa(&(lp->iscp));
/*
* The CPU sets the ISCP to 1 before it gives the first CA()
*/
lp->iscp.busy = 0x0001;
lp->iscp.pa_scb = va_to_pa(&(lp->scb));
lp->scb.command = 0;
lp->scb.status = 0;
lp->scb.pa_cmd = I596_NULL;
/* lp->scb.pa_rfd has been initialised already */
lp->last_cmd = jiffies;
lp->cmd_backlog = 0;
lp->cmd_head = NULL;
/*
* Reset the 82596.
* We need to wait 10 systemclock cycles, and
* 5 serial clock cycles.
*/
PORT(0, PORT_RESET); /* address part ignored */
udelay(100);
/*
* Before the CA signal is asserted, the default SCP address
* (0x00fffff4) can be changed to a 16-byte aligned value
*/
PORT(va_to_pa(&lp->scp), PORT_ALTSCP); /* change the scp address */
/*
* The initialization procedure begins when a
* Channel Attention signal is asserted after a reset.
*/
CA();
/*
* The ISCP busy is cleared by the 82596 after the SCB address is read.
*/
boguscnt = 100;
while (lp->iscp.busy) {
if (--boguscnt == 0) {
/* No i82596 present? */
printk("%s: i82596 initialization timed out\n",
dev->name);
return 1;
}
udelay(5);
barrier();
}
/* I find here boguscnt==100, so no delay was required. */
return 0;
}
static int
init_i596(struct net_device *dev) {
struct i596_private *lp;
if (i596_scp_setup(dev))
return 1;
lp = netdev_priv(dev);
lp->scb.command = 0;
memcpy ((void *)lp->i596_config, init_setup, 14);
lp->set_conf.command = CmdConfigure;
i596_add_cmd(dev, (void *)&lp->set_conf);
memcpy ((void *)lp->eth_addr, dev->dev_addr, 6);
lp->set_add.command = CmdIASetup;
i596_add_cmd(dev, (struct i596_cmd *)&lp->set_add);
lp->tdr.command = CmdTDR;
i596_add_cmd(dev, (struct i596_cmd *)&lp->tdr);
if (lp->scb.command && i596_timeout(dev, "i82596 init", 200))
return 1;
lp->scb.command = RX_START;
CA();
barrier();
if (lp->scb.command && i596_timeout(dev, "Receive Unit start", 100))
return 1;
return 0;
}
/* Receive a single frame */
static inline int
i596_rx_one(struct net_device *dev, struct i596_private *lp,
struct i596_rfd *rfd, int *frames) {
if (rfd->stat & RFD_STAT_OK) {
/* a good frame */
int pkt_len = (rfd->count & 0x3fff);
struct sk_buff *skb = dev_alloc_skb(pkt_len);
(*frames)++;
if (rfd->cmd & CMD_EOL)
printk("Received on EOL\n");
if (skb == NULL) {
printk ("%s: i596_rx Memory squeeze, "
"dropping packet.\n", dev->name);
dev->stats.rx_dropped++;
return 1;
}
memcpy(skb_put(skb,pkt_len), rfd->data, pkt_len);
skb->protocol = eth_type_trans(skb,dev);
netif_rx(skb);
dev->stats.rx_packets++;
} else {
#if 0
printk("Frame reception error status %04x\n",
rfd->stat);
#endif
dev->stats.rx_errors++;
if (rfd->stat & RFD_COLLISION)
dev->stats.collisions++;
if (rfd->stat & RFD_SHORT_FRAME_ERR)
dev->stats.rx_length_errors++;
if (rfd->stat & RFD_DMA_ERR)
dev->stats.rx_over_errors++;
if (rfd->stat & RFD_NOBUFS_ERR)
dev->stats.rx_fifo_errors++;
if (rfd->stat & RFD_ALIGN_ERR)
dev->stats.rx_frame_errors++;
if (rfd->stat & RFD_CRC_ERR)
dev->stats.rx_crc_errors++;
if (rfd->stat & RFD_LENGTH_ERR)
dev->stats.rx_length_errors++;
}
rfd->stat = rfd->count = 0;
return 0;
}
static int
i596_rx(struct net_device *dev) {
struct i596_private *lp = netdev_priv(dev);
struct i596_rfd *rfd;
int frames = 0;
while (1) {
rfd = pa_to_va(lp->scb.pa_rfd);
if (!rfd) {
printk(KERN_ERR "i596_rx: NULL rfd?\n");
return 0;
}
#if 1
if (rfd->stat && !(rfd->stat & (RFD_STAT_C | RFD_STAT_B)))
printk("SF:%p-%04x\n", rfd, rfd->stat);
#endif
if (!(rfd->stat & RFD_STAT_C))
break; /* next one not ready */
if (i596_rx_one(dev, lp, rfd, &frames))
break; /* out of memory */
rfd->cmd = CMD_EOL;
lp->rx_tail->cmd = 0;
lp->rx_tail = rfd;
lp->scb.pa_rfd = rfd->pa_next;
barrier();
}
return frames;
}
static void
i596_cleanup_cmd(struct net_device *dev) {
struct i596_private *lp;
struct i596_cmd *cmd;
lp = netdev_priv(dev);
while (lp->cmd_head) {
cmd = (struct i596_cmd *)lp->cmd_head;
lp->cmd_head = pa_to_va(lp->cmd_head->pa_next);
lp->cmd_backlog--;
switch ((cmd->command) & 0x7) {
case CmdTx: {
struct tx_cmd *tx_cmd = (struct tx_cmd *) cmd;
struct i596_tbd * tx_cmd_tbd;
tx_cmd_tbd = pa_to_va(tx_cmd->pa_tbd);
dev_kfree_skb_any(tx_cmd_tbd->skb);
dev->stats.tx_errors++;
dev->stats.tx_aborted_errors++;
cmd->pa_next = I596_NULL;
kfree((unsigned char *)tx_cmd);
netif_wake_queue(dev);
break;
}
case CmdMulticastList: {
// unsigned short count = *((unsigned short *) (ptr + 1));
cmd->pa_next = I596_NULL;
kfree((unsigned char *)cmd);
break;
}
default: {
cmd->pa_next = I596_NULL;
break;
}
}
barrier();
}
if (lp->scb.command && i596_timeout(dev, "i596_cleanup_cmd", 100))
;
lp->scb.pa_cmd = va_to_pa(lp->cmd_head);
}
static void i596_reset(struct net_device *dev, struct i596_private *lp, int ioaddr) {
if (lp->scb.command && i596_timeout(dev, "i596_reset", 100))
;
netif_stop_queue(dev);
lp->scb.command = CUC_ABORT | RX_ABORT;
CA();
barrier();
/* wait for shutdown */
if (lp->scb.command && i596_timeout(dev, "i596_reset(2)", 400))
;
i596_cleanup_cmd(dev);
i596_rx(dev);
netif_start_queue(dev);
/*dev_kfree_skb(skb, FREE_WRITE);*/
init_i596(dev);
}
static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd) {
struct i596_private *lp = netdev_priv(dev);
int ioaddr = dev->base_addr;
unsigned long flags;
cmd->status = 0;
cmd->command |= (CMD_EOL | CMD_INTR);
cmd->pa_next = I596_NULL;
spin_lock_irqsave(&lp->cmd_lock, flags);
if (lp->cmd_head) {
lp->cmd_tail->pa_next = va_to_pa(cmd);
} else {
lp->cmd_head = cmd;
if (lp->scb.command && i596_timeout(dev, "i596_add_cmd", 100))
;
lp->scb.pa_cmd = va_to_pa(cmd);
lp->scb.command = CUC_START;
CA();
}
lp->cmd_tail = cmd;
lp->cmd_backlog++;
lp->cmd_head = pa_to_va(lp->scb.pa_cmd);
spin_unlock_irqrestore(&lp->cmd_lock, flags);
if (lp->cmd_backlog > 16) {
int tickssofar = jiffies - lp->last_cmd;
if (tickssofar < HZ/4)
return;
printk(KERN_WARNING "%s: command unit timed out, status resetting.\n", dev->name);
i596_reset(dev, lp, ioaddr);
}
}
static int i596_open(struct net_device *dev)
{
int i;
i = request_irq(dev->irq, i596_interrupt, IRQF_SHARED, dev->name, dev);
if (i) {
printk(KERN_ERR "%s: IRQ %d not free\n", dev->name, dev->irq);
return i;
}
if ((i = init_rx_bufs(dev, RX_RING_SIZE)) < RX_RING_SIZE)
printk(KERN_ERR "%s: only able to allocate %d receive buffers\n", dev->name, i);
if (i < 4) {
free_irq(dev->irq, dev);
return -EAGAIN;
}
netif_start_queue(dev);
init_i596(dev);
return 0; /* Always succeed */
}
static netdev_tx_t i596_start_xmit (struct sk_buff *skb, struct net_device *dev) {
struct tx_cmd *tx_cmd;
short length;
length = skb->len;
if (length < ETH_ZLEN) {
if (skb_padto(skb, ETH_ZLEN))
return NETDEV_TX_OK;
length = ETH_ZLEN;
}
tx_cmd = kmalloc((sizeof (struct tx_cmd) + sizeof (struct i596_tbd)), GFP_ATOMIC);
if (tx_cmd == NULL) {
printk(KERN_WARNING "%s: i596_xmit Memory squeeze, dropping packet.\n", dev->name);
dev->stats.tx_dropped++;
dev_kfree_skb (skb);
} else {
struct i596_tbd *tx_cmd_tbd;
tx_cmd_tbd = (struct i596_tbd *) (tx_cmd + 1);
tx_cmd->pa_tbd = va_to_pa (tx_cmd_tbd);
tx_cmd_tbd->pa_next = I596_NULL;
tx_cmd->cmd.command = (CMD_FLEX | CmdTx);
tx_cmd->pad = 0;
tx_cmd->size = 0;
tx_cmd_tbd->pad = 0;
tx_cmd_tbd->size = (EOF | length);
tx_cmd_tbd->pa_data = va_to_pa (skb->data);
tx_cmd_tbd->skb = skb;
if (i596_debug & LOG_SRCDST)
print_eth (skb->data);
i596_add_cmd (dev, (struct i596_cmd *) tx_cmd);
dev->stats.tx_packets++;
}
return NETDEV_TX_OK;
}
static void
i596_tx_timeout (struct net_device *dev) {
struct i596_private *lp = netdev_priv(dev);
int ioaddr = dev->base_addr;
/* Transmitter timeout, serious problems. */
printk(KERN_WARNING "%s: transmit timed out, status resetting.\n", dev->name);
dev->stats.tx_errors++;
/* Try to restart the adaptor */
if (lp->last_restart == dev->stats.tx_packets) {
printk ("Resetting board.\n");
/* Shutdown and restart */
i596_reset (dev, lp, ioaddr);
} else {
/* Issue a channel attention signal */
printk ("Kicking board.\n");
lp->scb.command = (CUC_START | RX_START);
CA();
lp->last_restart = dev->stats.tx_packets;
}
netif_wake_queue(dev);
}
static void print_eth(char *add)
{
int i;
printk ("Dest ");
for (i = 0; i < 6; i++)
printk(" %2.2X", (unsigned char) add[i]);
printk ("\n");
printk ("Source");
for (i = 0; i < 6; i++)
printk(" %2.2X", (unsigned char) add[i+6]);
printk ("\n");
printk ("type %2.2X%2.2X\n",
(unsigned char) add[12], (unsigned char) add[13]);
}
static const struct net_device_ops i596_netdev_ops = {
.ndo_open = i596_open,
.ndo_stop = i596_close,
.ndo_start_xmit = i596_start_xmit,
.ndo_set_multicast_list = set_multicast_list,
.ndo_tx_timeout = i596_tx_timeout,
.ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
static int __init lp486e_probe(struct net_device *dev) {
struct i596_private *lp;
unsigned char eth_addr[6] = { 0, 0xaa, 0, 0, 0, 0 };
unsigned char *bios;
int i, j;
int ret = -ENOMEM;
static int probed;
if (probed)
return -ENODEV;
probed++;
if (!request_region(IOADDR, LP486E_TOTAL_SIZE, DRV_NAME)) {
printk(KERN_ERR "lp486e: IO address 0x%x in use\n", IOADDR);
return -EBUSY;
}
lp = netdev_priv(dev);
spin_lock_init(&lp->cmd_lock);
/*
* Do we really have this thing?
*/
if (i596_scp_setup(dev)) {
ret = -ENODEV;
goto err_out_kfree;
}
dev->base_addr = IOADDR;
dev->irq = IRQ;
/*
* How do we find the ethernet address? I don't know.
* One possibility is to look at the EISA configuration area
* [0xe8000-0xe9fff]. This contains the ethernet address
* but not at a fixed address - things depend on setup options.
*
* If we find no address, or the wrong address, use
* ifconfig eth0 hw ether a1:a2:a3:a4:a5:a6
* with the value found in the BIOS setup.
*/
bios = bus_to_virt(0xe8000);
for (j = 0; j < 0x2000; j++) {
if (bios[j] == 0 && bios[j+1] == 0xaa && bios[j+2] == 0) {
printk("%s: maybe address at BIOS 0x%x:",
dev->name, 0xe8000+j);
for (i = 0; i < 6; i++) {
eth_addr[i] = bios[i+j];
printk(" %2.2X", eth_addr[i]);
}
printk("\n");
}
}
printk("%s: lp486e 82596 at %#3lx, IRQ %d,",
dev->name, dev->base_addr, dev->irq);
for (i = 0; i < 6; i++)
printk(" %2.2X", dev->dev_addr[i] = eth_addr[i]);
printk("\n");
/* The LP486E-specific entries in the device structure. */
dev->netdev_ops = &i596_netdev_ops;
dev->watchdog_timeo = 5*HZ;
#if 0
/* selftest reports 0x320925ae - don't know what that means */
i596_port_do(dev, PORT_SELFTEST, "selftest");
i596_port_do(dev, PORT_DUMP, "dump");
#endif
return 0;
err_out_kfree:
release_region(IOADDR, LP486E_TOTAL_SIZE);
return ret;
}
static inline void
i596_handle_CU_completion(struct net_device *dev,
struct i596_private *lp,
unsigned short status,
unsigned short *ack_cmdp) {
struct i596_cmd *cmd;
int frames_out = 0;
int commands_done = 0;
int cmd_val;
unsigned long flags;
spin_lock_irqsave(&lp->cmd_lock, flags);
cmd = lp->cmd_head;
while (lp->cmd_head && (lp->cmd_head->status & CMD_STAT_C)) {
cmd = lp->cmd_head;
lp->cmd_head = pa_to_va(lp->cmd_head->pa_next);
lp->cmd_backlog--;
commands_done++;
cmd_val = cmd->command & 0x7;
#if 0
printk("finished CU %s command (%d)\n",
CUcmdnames[cmd_val], cmd_val);
#endif
switch (cmd_val) {
case CmdTx:
{
struct tx_cmd *tx_cmd;
struct i596_tbd *tx_cmd_tbd;
tx_cmd = (struct tx_cmd *) cmd;
tx_cmd_tbd = pa_to_va(tx_cmd->pa_tbd);
frames_out++;
if (cmd->status & CMD_STAT_OK) {
if (i596_debug)
print_eth(pa_to_va(tx_cmd_tbd->pa_data));
} else {
dev->stats.tx_errors++;
if (i596_debug)
printk("transmission failure:%04x\n",
cmd->status);
if (cmd->status & 0x0020)
dev->stats.collisions++;
if (!(cmd->status & 0x0040))
dev->stats.tx_heartbeat_errors++;
if (cmd->status & 0x0400)
dev->stats.tx_carrier_errors++;
if (cmd->status & 0x0800)
dev->stats.collisions++;
if (cmd->status & 0x1000)
dev->stats.tx_aborted_errors++;
}
dev_kfree_skb_irq(tx_cmd_tbd->skb);
cmd->pa_next = I596_NULL;
kfree((unsigned char *)tx_cmd);
netif_wake_queue(dev);
break;
}
case CmdMulticastList:
cmd->pa_next = I596_NULL;
kfree((unsigned char *)cmd);
break;
case CmdTDR:
{
unsigned long status = *((unsigned long *) (cmd + 1));
if (status & 0x8000) {
if (i596_debug)
printk("%s: link ok.\n", dev->name);
} else {
if (status & 0x4000)
printk("%s: Transceiver problem.\n",
dev->name);
if (status & 0x2000)
printk("%s: Termination problem.\n",
dev->name);
if (status & 0x1000)
printk("%s: Short circuit.\n",
dev->name);
printk("%s: Time %ld.\n",
dev->name, status & 0x07ff);
}
}
default:
cmd->pa_next = I596_NULL;
lp->last_cmd = jiffies;
}
barrier();
}
cmd = lp->cmd_head;
while (cmd && (cmd != lp->cmd_tail)) {
cmd->command &= 0x1fff;
cmd = pa_to_va(cmd->pa_next);
barrier();
}
if (lp->cmd_head)
*ack_cmdp |= CUC_START;
lp->scb.pa_cmd = va_to_pa(lp->cmd_head);
spin_unlock_irqrestore(&lp->cmd_lock, flags);
}
static irqreturn_t
i596_interrupt(int irq, void *dev_instance)
{
struct net_device *dev = dev_instance;
struct i596_private *lp = netdev_priv(dev);
unsigned short status, ack_cmd = 0;
int frames_in = 0;
/*
* The 82596 examines the command, performs the required action,
* and then clears the SCB command word.
*/
if (lp->scb.command && i596_timeout(dev, "interrupt", 40))
;
/*
* The status word indicates the status of the 82596.
* It is modified only by the 82596.
*
* [So, we must not clear it. I find often status 0xffff,
* which is not one of the values allowed by the docs.]
*/
status = lp->scb.status;
#if 0
if (i596_debug) {
printk("%s: i596 interrupt, ", dev->name);
i596_out_status(status);
}
#endif
/* Impossible, but it happens - perhaps when we get
a receive interrupt but scb.pa_rfd is I596_NULL. */
if (status == 0xffff) {
printk("%s: i596_interrupt: got status 0xffff\n", dev->name);
goto out;
}
ack_cmd = (status & STAT_ACK);
if (status & (STAT_CX | STAT_CNA))
i596_handle_CU_completion(dev, lp, status, &ack_cmd);
if (status & (STAT_FR | STAT_RNR)) {
/* Restart the receive unit when it got inactive somehow */
if ((status & STAT_RNR) && netif_running(dev))
ack_cmd |= RX_START;
if (status & STAT_FR) {
frames_in = i596_rx(dev);
if (!frames_in)
printk("receive frame reported, but no frames\n");
}
}
/* acknowledge the interrupt */
/*
if ((lp->scb.pa_cmd != I596_NULL) && netif_running(dev))
ack_cmd |= CUC_START;
*/
if (lp->scb.command && i596_timeout(dev, "i596 interrupt", 100))
;
lp->scb.command = ack_cmd;
CLEAR_INT();
CA();
out:
return IRQ_HANDLED;
}
static int i596_close(struct net_device *dev) {
struct i596_private *lp = netdev_priv(dev);
netif_stop_queue(dev);
if (i596_debug)
printk("%s: Shutting down ethercard, status was %4.4x.\n",
dev->name, lp->scb.status);
lp->scb.command = (CUC_ABORT | RX_ABORT);
CA();
i596_cleanup_cmd(dev);
if (lp->scb.command && i596_timeout(dev, "i596_close", 200))
;
free_irq(dev->irq, dev);
remove_rx_bufs(dev);
return 0;
}
/*
* Set or clear the multicast filter for this adaptor.
*/
static void set_multicast_list(struct net_device *dev) {
struct i596_private *lp = netdev_priv(dev);
struct i596_cmd *cmd;
if (i596_debug > 1)
printk ("%s: set multicast list %d\n",
dev->name, netdev_mc_count(dev));
if (!netdev_mc_empty(dev)) {
struct netdev_hw_addr *ha;
char *cp;
cmd = kmalloc(sizeof(struct i596_cmd) + 2 +
netdev_mc_count(dev) * 6, GFP_ATOMIC);
if (cmd == NULL) {
printk (KERN_ERR "%s: set_multicast Memory squeeze.\n", dev->name);
return;
}
cmd->command = CmdMulticastList;
*((unsigned short *) (cmd + 1)) = netdev_mc_count(dev) * 6;
cp = ((char *)(cmd + 1))+2;
netdev_for_each_mc_addr(ha, dev) {
memcpy(cp, ha->addr, 6);
cp += 6;
}
if (i596_debug & LOG_SRCDST)
print_eth (((char *)(cmd + 1)) + 2);
i596_add_cmd(dev, cmd);
} else {
if (lp->set_conf.pa_next != I596_NULL) {
return;
}
if (netdev_mc_empty(dev) &&
!(dev->flags & (IFF_PROMISC | IFF_ALLMULTI))) {
lp->i596_config[8] &= ~0x01;
} else {
lp->i596_config[8] |= 0x01;
}
i596_add_cmd(dev, (struct i596_cmd *) &lp->set_conf);
}
}
MODULE_AUTHOR("Ard van Breemen <ard@cstmel.nl.eu.org>");
MODULE_DESCRIPTION("Intel Panther onboard i82596 driver");
MODULE_LICENSE("GPL");
static struct net_device *dev_lp486e;
static int full_duplex;
static int options;
static int io = IOADDR;
static int irq = IRQ;
module_param(debug, int, 0);
//module_param(max_interrupt_work, int, 0);
//module_param(reverse_probe, int, 0);
//module_param(rx_copybreak, int, 0);
module_param(options, int, 0);
module_param(full_duplex, int, 0);
static int __init lp486e_init_module(void) {
int err;
struct net_device *dev = alloc_etherdev(sizeof(struct i596_private));
if (!dev)
return -ENOMEM;
dev->irq = irq;
dev->base_addr = io;
err = lp486e_probe(dev);
if (err) {
free_netdev(dev);
return err;
}
err = register_netdev(dev);
if (err) {
release_region(dev->base_addr, LP486E_TOTAL_SIZE);
free_netdev(dev);
return err;
}
dev_lp486e = dev;
full_duplex = 0;
options = 0;
return 0;
}
static void __exit lp486e_cleanup_module(void) {
unregister_netdev(dev_lp486e);
release_region(dev_lp486e->base_addr, LP486E_TOTAL_SIZE);
free_netdev(dev_lp486e);
}
module_init(lp486e_init_module);
module_exit(lp486e_cleanup_module);
| gpl-2.0 |
GameTheory-/android_kernel_lge_l1m | arch/sparc/math-emu/math_32.c | 3431 | 17713 | /*
* arch/sparc/math-emu/math.c
*
* Copyright (C) 1998 Peter Maydell (pmaydell@chiark.greenend.org.uk)
* Copyright (C) 1997, 1999 Jakub Jelinek (jj@ultra.linux.cz)
* Copyright (C) 1999 David S. Miller (davem@redhat.com)
*
* This is a good place to start if you're trying to understand the
* emulation code, because it's pretty simple. What we do is
* essentially analyse the instruction to work out what the operation
* is and which registers are involved. We then execute the appropriate
* FXXXX function. [The floating point queue introduces a minor wrinkle;
* see below...]
* The fxxxxx.c files each emulate a single insn. They look relatively
* simple because the complexity is hidden away in an unholy tangle
* of preprocessor macros.
*
* The first layer of macros is single.h, double.h, quad.h. Generally
* these files define macros for working with floating point numbers
* of the three IEEE formats. FP_ADD_D(R,A,B) is for adding doubles,
* for instance. These macros are usually defined as calls to more
* generic macros (in this case _FP_ADD(D,2,R,X,Y) where the number
* of machine words required to store the given IEEE format is passed
* as a parameter. [double.h and co check the number of bits in a word
* and define FP_ADD_D & co appropriately].
* The generic macros are defined in op-common.h. This is where all
* the grotty stuff like handling NaNs is coded. To handle the possible
* word sizes macros in op-common.h use macros like _FP_FRAC_SLL_##wc()
* where wc is the 'number of machine words' parameter (here 2).
* These are defined in the third layer of macros: op-1.h, op-2.h
* and op-4.h. These handle operations on floating point numbers composed
* of 1,2 and 4 machine words respectively. [For example, on sparc64
* doubles are one machine word so macros in double.h eventually use
* constructs in op-1.h, but on sparc32 they use op-2.h definitions.]
* soft-fp.h is on the same level as op-common.h, and defines some
* macros which are independent of both word size and FP format.
* Finally, sfp-machine.h is the machine dependent part of the
* code: it defines the word size and what type a word is. It also
* defines how _FP_MUL_MEAT_t() maps to _FP_MUL_MEAT_n_* : op-n.h
* provide several possible flavours of multiply algorithm, most
* of which require that you supply some form of asm or C primitive to
* do the actual multiply. (such asm primitives should be defined
* in sfp-machine.h too). udivmodti4.c is the same sort of thing.
*
* There may be some errors here because I'm working from a
* SPARC architecture manual V9, and what I really want is V8...
* Also, the insns which can generate exceptions seem to be a
* greater subset of the FPops than for V9 (for example, FCMPED
* has to be emulated on V8). So I think I'm going to have
* to emulate them all just to be on the safe side...
*
* Emulation routines originate from soft-fp package, which is
* part of glibc and has appropriate copyrights in it (allegedly).
*
* NB: on sparc int == long == 4 bytes, long long == 8 bytes.
* Most bits of the kernel seem to go for long rather than int,
* so we follow that practice...
*/
/* TODO:
* fpsave() saves the FP queue but fpload() doesn't reload it.
* Therefore when we context switch or change FPU ownership
* we have to check to see if the queue had anything in it and
* emulate it if it did. This is going to be a pain.
*/
#include <linux/types.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/perf_event.h>
#include <asm/uaccess.h>
#include "sfp-util_32.h"
#include <math-emu/soft-fp.h>
#include <math-emu/single.h>
#include <math-emu/double.h>
#include <math-emu/quad.h>
#define FLOATFUNC(x) extern int x(void *,void *,void *)
/* The Vn labels indicate what version of the SPARC architecture gas thinks
* each insn is. This is from the binutils source :->
*/
/* quadword instructions */
#define FSQRTQ 0x02b /* v8 */
#define FADDQ 0x043 /* v8 */
#define FSUBQ 0x047 /* v8 */
#define FMULQ 0x04b /* v8 */
#define FDIVQ 0x04f /* v8 */
#define FDMULQ 0x06e /* v8 */
#define FQTOS 0x0c7 /* v8 */
#define FQTOD 0x0cb /* v8 */
#define FITOQ 0x0cc /* v8 */
#define FSTOQ 0x0cd /* v8 */
#define FDTOQ 0x0ce /* v8 */
#define FQTOI 0x0d3 /* v8 */
#define FCMPQ 0x053 /* v8 */
#define FCMPEQ 0x057 /* v8 */
/* single/double instructions (subnormal): should all work */
#define FSQRTS 0x029 /* v7 */
#define FSQRTD 0x02a /* v7 */
#define FADDS 0x041 /* v6 */
#define FADDD 0x042 /* v6 */
#define FSUBS 0x045 /* v6 */
#define FSUBD 0x046 /* v6 */
#define FMULS 0x049 /* v6 */
#define FMULD 0x04a /* v6 */
#define FDIVS 0x04d /* v6 */
#define FDIVD 0x04e /* v6 */
#define FSMULD 0x069 /* v6 */
#define FDTOS 0x0c6 /* v6 */
#define FSTOD 0x0c9 /* v6 */
#define FSTOI 0x0d1 /* v6 */
#define FDTOI 0x0d2 /* v6 */
#define FABSS 0x009 /* v6 */
#define FCMPS 0x051 /* v6 */
#define FCMPES 0x055 /* v6 */
#define FCMPD 0x052 /* v6 */
#define FCMPED 0x056 /* v6 */
#define FMOVS 0x001 /* v6 */
#define FNEGS 0x005 /* v6 */
#define FITOS 0x0c4 /* v6 */
#define FITOD 0x0c8 /* v6 */
#define FSR_TEM_SHIFT 23UL
#define FSR_TEM_MASK (0x1fUL << FSR_TEM_SHIFT)
#define FSR_AEXC_SHIFT 5UL
#define FSR_AEXC_MASK (0x1fUL << FSR_AEXC_SHIFT)
#define FSR_CEXC_SHIFT 0UL
#define FSR_CEXC_MASK (0x1fUL << FSR_CEXC_SHIFT)
static int do_one_mathemu(u32 insn, unsigned long *fsr, unsigned long *fregs);
/* Unlike the Sparc64 version (which has a struct fpustate), we
* pass the taskstruct corresponding to the task which currently owns the
* FPU. This is partly because we don't have the fpustate struct and
* partly because the task owning the FPU isn't always current (as is
* the case for the Sparc64 port). This is probably SMP-related...
* This function returns 1 if all queued insns were emulated successfully.
* The test for unimplemented FPop in kernel mode has been moved into
* kernel/traps.c for simplicity.
*/
int do_mathemu(struct pt_regs *regs, struct task_struct *fpt)
{
/* regs->pc isn't necessarily the PC at which the offending insn is sitting.
* The FPU maintains a queue of FPops which cause traps.
* When it hits an instruction that requires that the trapped op succeeded
* (usually because it reads a reg. that the trapped op wrote) then it
* causes this exception. We need to emulate all the insns on the queue
* and then allow the op to proceed.
* This code should also handle the case where the trap was precise,
* in which case the queue length is zero and regs->pc points at the
* single FPop to be emulated. (this case is untested, though :->)
* You'll need this case if you want to be able to emulate all FPops
* because the FPU either doesn't exist or has been software-disabled.
* [The UltraSPARC makes FP a precise trap; this isn't as stupid as it
* might sound because the Ultra does funky things with a superscalar
* architecture.]
*/
/* You wouldn't believe how often I typed 'ftp' when I meant 'fpt' :-> */
int i;
int retcode = 0; /* assume all succeed */
unsigned long insn;
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0);
#ifdef DEBUG_MATHEMU
printk("In do_mathemu()... pc is %08lx\n", regs->pc);
printk("fpqdepth is %ld\n", fpt->thread.fpqdepth);
for (i = 0; i < fpt->thread.fpqdepth; i++)
printk("%d: %08lx at %08lx\n", i, fpt->thread.fpqueue[i].insn,
(unsigned long)fpt->thread.fpqueue[i].insn_addr);
#endif
if (fpt->thread.fpqdepth == 0) { /* no queue, guilty insn is at regs->pc */
#ifdef DEBUG_MATHEMU
printk("precise trap at %08lx\n", regs->pc);
#endif
if (!get_user(insn, (u32 __user *) regs->pc)) {
retcode = do_one_mathemu(insn, &fpt->thread.fsr, fpt->thread.float_regs);
if (retcode) {
/* in this case we need to fix up PC & nPC */
regs->pc = regs->npc;
regs->npc += 4;
}
}
return retcode;
}
/* Normal case: need to empty the queue... */
for (i = 0; i < fpt->thread.fpqdepth; i++) {
retcode = do_one_mathemu(fpt->thread.fpqueue[i].insn, &(fpt->thread.fsr), fpt->thread.float_regs);
if (!retcode) /* insn failed, no point doing any more */
break;
}
/* Now empty the queue and clear the queue_not_empty flag */
if (retcode)
fpt->thread.fsr &= ~(0x3000 | FSR_CEXC_MASK);
else
fpt->thread.fsr &= ~0x3000;
fpt->thread.fpqdepth = 0;
return retcode;
}
/* All routines returning an exception to raise should detect
* such exceptions _before_ rounding to be consistent with
* the behavior of the hardware in the implemented cases
* (and thus with the recommendations in the V9 architecture
* manual).
*
* We return 0 if a SIGFPE should be sent, 1 otherwise.
*/
static inline int record_exception(unsigned long *pfsr, int eflag)
{
unsigned long fsr = *pfsr;
int would_trap;
/* Determine if this exception would have generated a trap. */
would_trap = (fsr & ((long)eflag << FSR_TEM_SHIFT)) != 0UL;
/* If trapping, we only want to signal one bit. */
if (would_trap != 0) {
eflag &= ((fsr & FSR_TEM_MASK) >> FSR_TEM_SHIFT);
if ((eflag & (eflag - 1)) != 0) {
if (eflag & FP_EX_INVALID)
eflag = FP_EX_INVALID;
else if (eflag & FP_EX_OVERFLOW)
eflag = FP_EX_OVERFLOW;
else if (eflag & FP_EX_UNDERFLOW)
eflag = FP_EX_UNDERFLOW;
else if (eflag & FP_EX_DIVZERO)
eflag = FP_EX_DIVZERO;
else if (eflag & FP_EX_INEXACT)
eflag = FP_EX_INEXACT;
}
}
/* Set CEXC, here is the rule:
*
* In general all FPU ops will set one and only one
* bit in the CEXC field, this is always the case
* when the IEEE exception trap is enabled in TEM.
*/
fsr &= ~(FSR_CEXC_MASK);
fsr |= ((long)eflag << FSR_CEXC_SHIFT);
/* Set the AEXC field, rule is:
*
* If a trap would not be generated, the
* CEXC just generated is OR'd into the
* existing value of AEXC.
*/
if (would_trap == 0)
fsr |= ((long)eflag << FSR_AEXC_SHIFT);
/* If trapping, indicate fault trap type IEEE. */
if (would_trap != 0)
fsr |= (1UL << 14);
*pfsr = fsr;
return (would_trap ? 0 : 1);
}
typedef union {
u32 s;
u64 d;
u64 q[2];
} *argp;
static int do_one_mathemu(u32 insn, unsigned long *pfsr, unsigned long *fregs)
{
/* Emulate the given insn, updating fsr and fregs appropriately. */
int type = 0;
/* r is rd, b is rs2 and a is rs1. The *u arg tells
whether the argument should be packed/unpacked (0 - do not unpack/pack, 1 - unpack/pack)
non-u args tells the size of the argument (0 - no argument, 1 - single, 2 - double, 3 - quad */
#define TYPE(dummy, r, ru, b, bu, a, au) type = (au << 2) | (a << 0) | (bu << 5) | (b << 3) | (ru << 8) | (r << 6)
int freg;
argp rs1 = NULL, rs2 = NULL, rd = NULL;
FP_DECL_EX;
FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SR);
FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DR);
FP_DECL_Q(QA); FP_DECL_Q(QB); FP_DECL_Q(QR);
int IR;
long fsr;
#ifdef DEBUG_MATHEMU
printk("In do_mathemu(), emulating %08lx\n", insn);
#endif
if ((insn & 0xc1f80000) == 0x81a00000) /* FPOP1 */ {
switch ((insn >> 5) & 0x1ff) {
case FSQRTQ: TYPE(3,3,1,3,1,0,0); break;
case FADDQ:
case FSUBQ:
case FMULQ:
case FDIVQ: TYPE(3,3,1,3,1,3,1); break;
case FDMULQ: TYPE(3,3,1,2,1,2,1); break;
case FQTOS: TYPE(3,1,1,3,1,0,0); break;
case FQTOD: TYPE(3,2,1,3,1,0,0); break;
case FITOQ: TYPE(3,3,1,1,0,0,0); break;
case FSTOQ: TYPE(3,3,1,1,1,0,0); break;
case FDTOQ: TYPE(3,3,1,2,1,0,0); break;
case FQTOI: TYPE(3,1,0,3,1,0,0); break;
case FSQRTS: TYPE(2,1,1,1,1,0,0); break;
case FSQRTD: TYPE(2,2,1,2,1,0,0); break;
case FADDD:
case FSUBD:
case FMULD:
case FDIVD: TYPE(2,2,1,2,1,2,1); break;
case FADDS:
case FSUBS:
case FMULS:
case FDIVS: TYPE(2,1,1,1,1,1,1); break;
case FSMULD: TYPE(2,2,1,1,1,1,1); break;
case FDTOS: TYPE(2,1,1,2,1,0,0); break;
case FSTOD: TYPE(2,2,1,1,1,0,0); break;
case FSTOI: TYPE(2,1,0,1,1,0,0); break;
case FDTOI: TYPE(2,1,0,2,1,0,0); break;
case FITOS: TYPE(2,1,1,1,0,0,0); break;
case FITOD: TYPE(2,2,1,1,0,0,0); break;
case FMOVS:
case FABSS:
case FNEGS: TYPE(2,1,0,1,0,0,0); break;
}
} else if ((insn & 0xc1f80000) == 0x81a80000) /* FPOP2 */ {
switch ((insn >> 5) & 0x1ff) {
case FCMPS: TYPE(3,0,0,1,1,1,1); break;
case FCMPES: TYPE(3,0,0,1,1,1,1); break;
case FCMPD: TYPE(3,0,0,2,1,2,1); break;
case FCMPED: TYPE(3,0,0,2,1,2,1); break;
case FCMPQ: TYPE(3,0,0,3,1,3,1); break;
case FCMPEQ: TYPE(3,0,0,3,1,3,1); break;
}
}
if (!type) { /* oops, didn't recognise that FPop */
#ifdef DEBUG_MATHEMU
printk("attempt to emulate unrecognised FPop!\n");
#endif
return 0;
}
/* Decode the registers to be used */
freg = (*pfsr >> 14) & 0xf;
*pfsr &= ~0x1c000; /* clear the traptype bits */
freg = ((insn >> 14) & 0x1f);
switch (type & 0x3) { /* is rs1 single, double or quad? */
case 3:
if (freg & 3) { /* quadwords must have bits 4&5 of the */
/* encoded reg. number set to zero. */
*pfsr |= (6 << 14);
return 0; /* simulate invalid_fp_register exception */
}
/* fall through */
case 2:
if (freg & 1) { /* doublewords must have bit 5 zeroed */
*pfsr |= (6 << 14);
return 0;
}
}
rs1 = (argp)&fregs[freg];
switch (type & 0x7) {
case 7: FP_UNPACK_QP (QA, rs1); break;
case 6: FP_UNPACK_DP (DA, rs1); break;
case 5: FP_UNPACK_SP (SA, rs1); break;
}
freg = (insn & 0x1f);
switch ((type >> 3) & 0x3) { /* same again for rs2 */
case 3:
if (freg & 3) { /* quadwords must have bits 4&5 of the */
/* encoded reg. number set to zero. */
*pfsr |= (6 << 14);
return 0; /* simulate invalid_fp_register exception */
}
/* fall through */
case 2:
if (freg & 1) { /* doublewords must have bit 5 zeroed */
*pfsr |= (6 << 14);
return 0;
}
}
rs2 = (argp)&fregs[freg];
switch ((type >> 3) & 0x7) {
case 7: FP_UNPACK_QP (QB, rs2); break;
case 6: FP_UNPACK_DP (DB, rs2); break;
case 5: FP_UNPACK_SP (SB, rs2); break;
}
freg = ((insn >> 25) & 0x1f);
switch ((type >> 6) & 0x3) { /* and finally rd. This one's a bit different */
case 0: /* dest is fcc. (this must be FCMPQ or FCMPEQ) */
if (freg) { /* V8 has only one set of condition codes, so */
/* anything but 0 in the rd field is an error */
*pfsr |= (6 << 14); /* (should probably flag as invalid opcode */
return 0; /* but SIGFPE will do :-> ) */
}
break;
case 3:
if (freg & 3) { /* quadwords must have bits 4&5 of the */
/* encoded reg. number set to zero. */
*pfsr |= (6 << 14);
return 0; /* simulate invalid_fp_register exception */
}
/* fall through */
case 2:
if (freg & 1) { /* doublewords must have bit 5 zeroed */
*pfsr |= (6 << 14);
return 0;
}
/* fall through */
case 1:
rd = (void *)&fregs[freg];
break;
}
#ifdef DEBUG_MATHEMU
printk("executing insn...\n");
#endif
/* do the Right Thing */
switch ((insn >> 5) & 0x1ff) {
/* + */
case FADDS: FP_ADD_S (SR, SA, SB); break;
case FADDD: FP_ADD_D (DR, DA, DB); break;
case FADDQ: FP_ADD_Q (QR, QA, QB); break;
/* - */
case FSUBS: FP_SUB_S (SR, SA, SB); break;
case FSUBD: FP_SUB_D (DR, DA, DB); break;
case FSUBQ: FP_SUB_Q (QR, QA, QB); break;
/* * */
case FMULS: FP_MUL_S (SR, SA, SB); break;
case FSMULD: FP_CONV (D, S, 2, 1, DA, SA);
FP_CONV (D, S, 2, 1, DB, SB);
case FMULD: FP_MUL_D (DR, DA, DB); break;
case FDMULQ: FP_CONV (Q, D, 4, 2, QA, DA);
FP_CONV (Q, D, 4, 2, QB, DB);
case FMULQ: FP_MUL_Q (QR, QA, QB); break;
/* / */
case FDIVS: FP_DIV_S (SR, SA, SB); break;
case FDIVD: FP_DIV_D (DR, DA, DB); break;
case FDIVQ: FP_DIV_Q (QR, QA, QB); break;
/* sqrt */
case FSQRTS: FP_SQRT_S (SR, SB); break;
case FSQRTD: FP_SQRT_D (DR, DB); break;
case FSQRTQ: FP_SQRT_Q (QR, QB); break;
/* mov */
case FMOVS: rd->s = rs2->s; break;
case FABSS: rd->s = rs2->s & 0x7fffffff; break;
case FNEGS: rd->s = rs2->s ^ 0x80000000; break;
/* float to int */
case FSTOI: FP_TO_INT_S (IR, SB, 32, 1); break;
case FDTOI: FP_TO_INT_D (IR, DB, 32, 1); break;
case FQTOI: FP_TO_INT_Q (IR, QB, 32, 1); break;
/* int to float */
case FITOS: IR = rs2->s; FP_FROM_INT_S (SR, IR, 32, int); break;
case FITOD: IR = rs2->s; FP_FROM_INT_D (DR, IR, 32, int); break;
case FITOQ: IR = rs2->s; FP_FROM_INT_Q (QR, IR, 32, int); break;
/* float to float */
case FSTOD: FP_CONV (D, S, 2, 1, DR, SB); break;
case FSTOQ: FP_CONV (Q, S, 4, 1, QR, SB); break;
case FDTOQ: FP_CONV (Q, D, 4, 2, QR, DB); break;
case FDTOS: FP_CONV (S, D, 1, 2, SR, DB); break;
case FQTOS: FP_CONV (S, Q, 1, 4, SR, QB); break;
case FQTOD: FP_CONV (D, Q, 2, 4, DR, QB); break;
/* comparison */
case FCMPS:
case FCMPES:
FP_CMP_S(IR, SB, SA, 3);
if (IR == 3 &&
(((insn >> 5) & 0x1ff) == FCMPES ||
FP_ISSIGNAN_S(SA) ||
FP_ISSIGNAN_S(SB)))
FP_SET_EXCEPTION (FP_EX_INVALID);
break;
case FCMPD:
case FCMPED:
FP_CMP_D(IR, DB, DA, 3);
if (IR == 3 &&
(((insn >> 5) & 0x1ff) == FCMPED ||
FP_ISSIGNAN_D(DA) ||
FP_ISSIGNAN_D(DB)))
FP_SET_EXCEPTION (FP_EX_INVALID);
break;
case FCMPQ:
case FCMPEQ:
FP_CMP_Q(IR, QB, QA, 3);
if (IR == 3 &&
(((insn >> 5) & 0x1ff) == FCMPEQ ||
FP_ISSIGNAN_Q(QA) ||
FP_ISSIGNAN_Q(QB)))
FP_SET_EXCEPTION (FP_EX_INVALID);
}
if (!FP_INHIBIT_RESULTS) {
switch ((type >> 6) & 0x7) {
case 0: fsr = *pfsr;
if (IR == -1) IR = 2;
/* fcc is always fcc0 */
fsr &= ~0xc00; fsr |= (IR << 10); break;
*pfsr = fsr;
break;
case 1: rd->s = IR; break;
case 5: FP_PACK_SP (rd, SR); break;
case 6: FP_PACK_DP (rd, DR); break;
case 7: FP_PACK_QP (rd, QR); break;
}
}
if (_fex == 0)
return 1; /* success! */
return record_exception(pfsr, _fex);
}
| gpl-2.0 |
hominlinx/linux-bananapi | drivers/oprofile/buffer_sync.c | 8039 | 13822 | /**
* @file buffer_sync.c
*
* @remark Copyright 2002-2009 OProfile authors
* @remark Read the file COPYING
*
* @author John Levon <levon@movementarian.org>
* @author Barry Kasindorf
* @author Robert Richter <robert.richter@amd.com>
*
* This is the core of the buffer management. Each
* CPU buffer is processed and entered into the
* global event buffer. Such processing is necessary
* in several circumstances, mentioned below.
*
* The processing does the job of converting the
* transitory EIP value into a persistent dentry/offset
* value that the profiler can record at its leisure.
*
* See fs/dcookies.c for a description of the dentry/offset
* objects.
*/
#include <linux/mm.h>
#include <linux/workqueue.h>
#include <linux/notifier.h>
#include <linux/dcookies.h>
#include <linux/profile.h>
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/oprofile.h>
#include <linux/sched.h>
#include <linux/gfp.h>
#include "oprofile_stats.h"
#include "event_buffer.h"
#include "cpu_buffer.h"
#include "buffer_sync.h"
static LIST_HEAD(dying_tasks);
static LIST_HEAD(dead_tasks);
static cpumask_var_t marked_cpus;
static DEFINE_SPINLOCK(task_mortuary);
static void process_task_mortuary(void);
/* Take ownership of the task struct and place it on the
* list for processing. Only after two full buffer syncs
* does the task eventually get freed, because by then
* we are sure we will not reference it again.
* Can be invoked from softirq via RCU callback due to
* call_rcu() of the task struct, hence the _irqsave.
*/
static int
task_free_notify(struct notifier_block *self, unsigned long val, void *data)
{
unsigned long flags;
struct task_struct *task = data;
spin_lock_irqsave(&task_mortuary, flags);
list_add(&task->tasks, &dying_tasks);
spin_unlock_irqrestore(&task_mortuary, flags);
return NOTIFY_OK;
}
/* The task is on its way out. A sync of the buffer means we can catch
* any remaining samples for this task.
*/
static int
task_exit_notify(struct notifier_block *self, unsigned long val, void *data)
{
/* To avoid latency problems, we only process the current CPU,
* hoping that most samples for the task are on this CPU
*/
sync_buffer(raw_smp_processor_id());
return 0;
}
/* The task is about to try a do_munmap(). We peek at what it's going to
* do, and if it's an executable region, process the samples first, so
* we don't lose any. This does not have to be exact, it's a QoI issue
* only.
*/
static int
munmap_notify(struct notifier_block *self, unsigned long val, void *data)
{
unsigned long addr = (unsigned long)data;
struct mm_struct *mm = current->mm;
struct vm_area_struct *mpnt;
down_read(&mm->mmap_sem);
mpnt = find_vma(mm, addr);
if (mpnt && mpnt->vm_file && (mpnt->vm_flags & VM_EXEC)) {
up_read(&mm->mmap_sem);
/* To avoid latency problems, we only process the current CPU,
* hoping that most samples for the task are on this CPU
*/
sync_buffer(raw_smp_processor_id());
return 0;
}
up_read(&mm->mmap_sem);
return 0;
}
/* We need to be told about new modules so we don't attribute to a previously
* loaded module, or drop the samples on the floor.
*/
static int
module_load_notify(struct notifier_block *self, unsigned long val, void *data)
{
#ifdef CONFIG_MODULES
if (val != MODULE_STATE_COMING)
return 0;
/* FIXME: should we process all CPU buffers ? */
mutex_lock(&buffer_mutex);
add_event_entry(ESCAPE_CODE);
add_event_entry(MODULE_LOADED_CODE);
mutex_unlock(&buffer_mutex);
#endif
return 0;
}
static struct notifier_block task_free_nb = {
.notifier_call = task_free_notify,
};
static struct notifier_block task_exit_nb = {
.notifier_call = task_exit_notify,
};
static struct notifier_block munmap_nb = {
.notifier_call = munmap_notify,
};
static struct notifier_block module_load_nb = {
.notifier_call = module_load_notify,
};
static void free_all_tasks(void)
{
/* make sure we don't leak task structs */
process_task_mortuary();
process_task_mortuary();
}
int sync_start(void)
{
int err;
if (!zalloc_cpumask_var(&marked_cpus, GFP_KERNEL))
return -ENOMEM;
err = task_handoff_register(&task_free_nb);
if (err)
goto out1;
err = profile_event_register(PROFILE_TASK_EXIT, &task_exit_nb);
if (err)
goto out2;
err = profile_event_register(PROFILE_MUNMAP, &munmap_nb);
if (err)
goto out3;
err = register_module_notifier(&module_load_nb);
if (err)
goto out4;
start_cpu_work();
out:
return err;
out4:
profile_event_unregister(PROFILE_MUNMAP, &munmap_nb);
out3:
profile_event_unregister(PROFILE_TASK_EXIT, &task_exit_nb);
out2:
task_handoff_unregister(&task_free_nb);
free_all_tasks();
out1:
free_cpumask_var(marked_cpus);
goto out;
}
void sync_stop(void)
{
end_cpu_work();
unregister_module_notifier(&module_load_nb);
profile_event_unregister(PROFILE_MUNMAP, &munmap_nb);
profile_event_unregister(PROFILE_TASK_EXIT, &task_exit_nb);
task_handoff_unregister(&task_free_nb);
barrier(); /* do all of the above first */
flush_cpu_work();
free_all_tasks();
free_cpumask_var(marked_cpus);
}
/* Optimisation. We can manage without taking the dcookie sem
* because we cannot reach this code without at least one
* dcookie user still being registered (namely, the reader
* of the event buffer). */
static inline unsigned long fast_get_dcookie(struct path *path)
{
unsigned long cookie;
if (path->dentry->d_flags & DCACHE_COOKIE)
return (unsigned long)path->dentry;
get_dcookie(path, &cookie);
return cookie;
}
/* Look up the dcookie for the task's first VM_EXECUTABLE mapping,
* which corresponds loosely to "application name". This is
* not strictly necessary but allows oprofile to associate
* shared-library samples with particular applications
*/
static unsigned long get_exec_dcookie(struct mm_struct *mm)
{
unsigned long cookie = NO_COOKIE;
struct vm_area_struct *vma;
if (!mm)
goto out;
for (vma = mm->mmap; vma; vma = vma->vm_next) {
if (!vma->vm_file)
continue;
if (!(vma->vm_flags & VM_EXECUTABLE))
continue;
cookie = fast_get_dcookie(&vma->vm_file->f_path);
break;
}
out:
return cookie;
}
/* Convert the EIP value of a sample into a persistent dentry/offset
* pair that can then be added to the global event buffer. We make
* sure to do this lookup before a mm->mmap modification happens so
* we don't lose track.
*/
static unsigned long
lookup_dcookie(struct mm_struct *mm, unsigned long addr, off_t *offset)
{
unsigned long cookie = NO_COOKIE;
struct vm_area_struct *vma;
for (vma = find_vma(mm, addr); vma; vma = vma->vm_next) {
if (addr < vma->vm_start || addr >= vma->vm_end)
continue;
if (vma->vm_file) {
cookie = fast_get_dcookie(&vma->vm_file->f_path);
*offset = (vma->vm_pgoff << PAGE_SHIFT) + addr -
vma->vm_start;
} else {
/* must be an anonymous map */
*offset = addr;
}
break;
}
if (!vma)
cookie = INVALID_COOKIE;
return cookie;
}
static unsigned long last_cookie = INVALID_COOKIE;
static void add_cpu_switch(int i)
{
add_event_entry(ESCAPE_CODE);
add_event_entry(CPU_SWITCH_CODE);
add_event_entry(i);
last_cookie = INVALID_COOKIE;
}
static void add_kernel_ctx_switch(unsigned int in_kernel)
{
add_event_entry(ESCAPE_CODE);
if (in_kernel)
add_event_entry(KERNEL_ENTER_SWITCH_CODE);
else
add_event_entry(KERNEL_EXIT_SWITCH_CODE);
}
static void
add_user_ctx_switch(struct task_struct const *task, unsigned long cookie)
{
add_event_entry(ESCAPE_CODE);
add_event_entry(CTX_SWITCH_CODE);
add_event_entry(task->pid);
add_event_entry(cookie);
/* Another code for daemon back-compat */
add_event_entry(ESCAPE_CODE);
add_event_entry(CTX_TGID_CODE);
add_event_entry(task->tgid);
}
static void add_cookie_switch(unsigned long cookie)
{
add_event_entry(ESCAPE_CODE);
add_event_entry(COOKIE_SWITCH_CODE);
add_event_entry(cookie);
}
static void add_trace_begin(void)
{
add_event_entry(ESCAPE_CODE);
add_event_entry(TRACE_BEGIN_CODE);
}
static void add_data(struct op_entry *entry, struct mm_struct *mm)
{
unsigned long code, pc, val;
unsigned long cookie;
off_t offset;
if (!op_cpu_buffer_get_data(entry, &code))
return;
if (!op_cpu_buffer_get_data(entry, &pc))
return;
if (!op_cpu_buffer_get_size(entry))
return;
if (mm) {
cookie = lookup_dcookie(mm, pc, &offset);
if (cookie == NO_COOKIE)
offset = pc;
if (cookie == INVALID_COOKIE) {
atomic_inc(&oprofile_stats.sample_lost_no_mapping);
offset = pc;
}
if (cookie != last_cookie) {
add_cookie_switch(cookie);
last_cookie = cookie;
}
} else
offset = pc;
add_event_entry(ESCAPE_CODE);
add_event_entry(code);
add_event_entry(offset); /* Offset from Dcookie */
while (op_cpu_buffer_get_data(entry, &val))
add_event_entry(val);
}
static inline void add_sample_entry(unsigned long offset, unsigned long event)
{
add_event_entry(offset);
add_event_entry(event);
}
/*
* Add a sample to the global event buffer. If possible the
* sample is converted into a persistent dentry/offset pair
* for later lookup from userspace. Return 0 on failure.
*/
static int
add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
{
unsigned long cookie;
off_t offset;
if (in_kernel) {
add_sample_entry(s->eip, s->event);
return 1;
}
/* add userspace sample */
if (!mm) {
atomic_inc(&oprofile_stats.sample_lost_no_mm);
return 0;
}
cookie = lookup_dcookie(mm, s->eip, &offset);
if (cookie == INVALID_COOKIE) {
atomic_inc(&oprofile_stats.sample_lost_no_mapping);
return 0;
}
if (cookie != last_cookie) {
add_cookie_switch(cookie);
last_cookie = cookie;
}
add_sample_entry(offset, s->event);
return 1;
}
static void release_mm(struct mm_struct *mm)
{
if (!mm)
return;
up_read(&mm->mmap_sem);
mmput(mm);
}
static struct mm_struct *take_tasks_mm(struct task_struct *task)
{
struct mm_struct *mm = get_task_mm(task);
if (mm)
down_read(&mm->mmap_sem);
return mm;
}
static inline int is_code(unsigned long val)
{
return val == ESCAPE_CODE;
}
/* Move tasks along towards death. Any tasks on dead_tasks
* will definitely have no remaining references in any
* CPU buffers at this point, because we use two lists,
* and to have reached the list, it must have gone through
* one full sync already.
*/
static void process_task_mortuary(void)
{
unsigned long flags;
LIST_HEAD(local_dead_tasks);
struct task_struct *task;
struct task_struct *ttask;
spin_lock_irqsave(&task_mortuary, flags);
list_splice_init(&dead_tasks, &local_dead_tasks);
list_splice_init(&dying_tasks, &dead_tasks);
spin_unlock_irqrestore(&task_mortuary, flags);
list_for_each_entry_safe(task, ttask, &local_dead_tasks, tasks) {
list_del(&task->tasks);
free_task(task);
}
}
static void mark_done(int cpu)
{
int i;
cpumask_set_cpu(cpu, marked_cpus);
for_each_online_cpu(i) {
if (!cpumask_test_cpu(i, marked_cpus))
return;
}
/* All CPUs have been processed at least once,
* we can process the mortuary once
*/
process_task_mortuary();
cpumask_clear(marked_cpus);
}
/* FIXME: this is not sufficient if we implement syscall barrier backtrace
* traversal, the code switch to sb_sample_start at first kernel enter/exit
* switch so we need a fifth state and some special handling in sync_buffer()
*/
typedef enum {
sb_bt_ignore = -2,
sb_buffer_start,
sb_bt_start,
sb_sample_start,
} sync_buffer_state;
/* Sync one of the CPU's buffers into the global event buffer.
* Here we need to go through each batch of samples punctuated
* by context switch notes, taking the task's mmap_sem and doing
* lookup in task->mm->mmap to convert EIP into dcookie/offset
* value.
*/
void sync_buffer(int cpu)
{
struct mm_struct *mm = NULL;
struct mm_struct *oldmm;
unsigned long val;
struct task_struct *new;
unsigned long cookie = 0;
int in_kernel = 1;
sync_buffer_state state = sb_buffer_start;
unsigned int i;
unsigned long available;
unsigned long flags;
struct op_entry entry;
struct op_sample *sample;
mutex_lock(&buffer_mutex);
add_cpu_switch(cpu);
op_cpu_buffer_reset(cpu);
available = op_cpu_buffer_entries(cpu);
for (i = 0; i < available; ++i) {
sample = op_cpu_buffer_read_entry(&entry, cpu);
if (!sample)
break;
if (is_code(sample->eip)) {
flags = sample->event;
if (flags & TRACE_BEGIN) {
state = sb_bt_start;
add_trace_begin();
}
if (flags & KERNEL_CTX_SWITCH) {
/* kernel/userspace switch */
in_kernel = flags & IS_KERNEL;
if (state == sb_buffer_start)
state = sb_sample_start;
add_kernel_ctx_switch(flags & IS_KERNEL);
}
if (flags & USER_CTX_SWITCH
&& op_cpu_buffer_get_data(&entry, &val)) {
/* userspace context switch */
new = (struct task_struct *)val;
oldmm = mm;
release_mm(oldmm);
mm = take_tasks_mm(new);
if (mm != oldmm)
cookie = get_exec_dcookie(mm);
add_user_ctx_switch(new, cookie);
}
if (op_cpu_buffer_get_size(&entry))
add_data(&entry, mm);
continue;
}
if (state < sb_bt_start)
/* ignore sample */
continue;
if (add_sample(mm, sample, in_kernel))
continue;
/* ignore backtraces if failed to add a sample */
if (state == sb_bt_start) {
state = sb_bt_ignore;
atomic_inc(&oprofile_stats.bt_lost_no_mapping);
}
}
release_mm(mm);
mark_done(cpu);
mutex_unlock(&buffer_mutex);
}
/* The function can be used to add a buffer worth of data directly to
* the kernel buffer. The buffer is assumed to be a circular buffer.
* Take the entries from index start and end at index end, wrapping
* at max_entries.
*/
void oprofile_put_buff(unsigned long *buf, unsigned int start,
unsigned int stop, unsigned int max)
{
int i;
i = start;
mutex_lock(&buffer_mutex);
while (i != stop) {
add_event_entry(buf[i++]);
if (i >= max)
i = 0;
}
mutex_unlock(&buffer_mutex);
}
| gpl-2.0 |
AOKP/kernel_htc_m7 | fs/logfs/dev_mtd.c | 9319 | 6792 | /*
* fs/logfs/dev_mtd.c - Device access methods for MTD
*
* As should be obvious for Linux kernel code, license is GPLv2
*
* Copyright (c) 2005-2008 Joern Engel <joern@logfs.org>
*/
#include "logfs.h"
#include <linux/completion.h>
#include <linux/mount.h>
#include <linux/sched.h>
#include <linux/slab.h>
#define PAGE_OFS(ofs) ((ofs) & (PAGE_SIZE-1))
static int logfs_mtd_read(struct super_block *sb, loff_t ofs, size_t len,
void *buf)
{
struct mtd_info *mtd = logfs_super(sb)->s_mtd;
size_t retlen;
int ret;
ret = mtd_read(mtd, ofs, len, &retlen, buf);
BUG_ON(ret == -EINVAL);
if (ret)
return ret;
/* Not sure if we should loop instead. */
if (retlen != len)
return -EIO;
return 0;
}
static int loffs_mtd_write(struct super_block *sb, loff_t ofs, size_t len,
void *buf)
{
struct logfs_super *super = logfs_super(sb);
struct mtd_info *mtd = super->s_mtd;
size_t retlen;
loff_t page_start, page_end;
int ret;
if (super->s_flags & LOGFS_SB_FLAG_RO)
return -EROFS;
BUG_ON((ofs >= mtd->size) || (len > mtd->size - ofs));
BUG_ON(ofs != (ofs >> super->s_writeshift) << super->s_writeshift);
BUG_ON(len > PAGE_CACHE_SIZE);
page_start = ofs & PAGE_CACHE_MASK;
page_end = PAGE_CACHE_ALIGN(ofs + len) - 1;
ret = mtd_write(mtd, ofs, len, &retlen, buf);
if (ret || (retlen != len))
return -EIO;
return 0;
}
/*
* For as long as I can remember (since about 2001) mtd->erase has been an
* asynchronous interface lacking the first driver to actually use the
* asynchronous properties. So just to prevent the first implementor of such
* a thing from breaking logfs in 2350, we do the usual pointless dance to
* declare a completion variable and wait for completion before returning
* from logfs_mtd_erase(). What an exercise in futility!
*/
static void logfs_erase_callback(struct erase_info *ei)
{
complete((struct completion *)ei->priv);
}
static int logfs_mtd_erase_mapping(struct super_block *sb, loff_t ofs,
size_t len)
{
struct logfs_super *super = logfs_super(sb);
struct address_space *mapping = super->s_mapping_inode->i_mapping;
struct page *page;
pgoff_t index = ofs >> PAGE_SHIFT;
for (index = ofs >> PAGE_SHIFT; index < (ofs + len) >> PAGE_SHIFT; index++) {
page = find_get_page(mapping, index);
if (!page)
continue;
memset(page_address(page), 0xFF, PAGE_SIZE);
page_cache_release(page);
}
return 0;
}
static int logfs_mtd_erase(struct super_block *sb, loff_t ofs, size_t len,
int ensure_write)
{
struct mtd_info *mtd = logfs_super(sb)->s_mtd;
struct erase_info ei;
DECLARE_COMPLETION_ONSTACK(complete);
int ret;
BUG_ON(len % mtd->erasesize);
if (logfs_super(sb)->s_flags & LOGFS_SB_FLAG_RO)
return -EROFS;
memset(&ei, 0, sizeof(ei));
ei.mtd = mtd;
ei.addr = ofs;
ei.len = len;
ei.callback = logfs_erase_callback;
ei.priv = (long)&complete;
ret = mtd_erase(mtd, &ei);
if (ret)
return -EIO;
wait_for_completion(&complete);
if (ei.state != MTD_ERASE_DONE)
return -EIO;
return logfs_mtd_erase_mapping(sb, ofs, len);
}
static void logfs_mtd_sync(struct super_block *sb)
{
struct mtd_info *mtd = logfs_super(sb)->s_mtd;
mtd_sync(mtd);
}
static int logfs_mtd_readpage(void *_sb, struct page *page)
{
struct super_block *sb = _sb;
int err;
err = logfs_mtd_read(sb, page->index << PAGE_SHIFT, PAGE_SIZE,
page_address(page));
if (err == -EUCLEAN || err == -EBADMSG) {
/* -EBADMSG happens regularly on power failures */
err = 0;
/* FIXME: force GC this segment */
}
if (err) {
ClearPageUptodate(page);
SetPageError(page);
} else {
SetPageUptodate(page);
ClearPageError(page);
}
unlock_page(page);
return err;
}
static struct page *logfs_mtd_find_first_sb(struct super_block *sb, u64 *ofs)
{
struct logfs_super *super = logfs_super(sb);
struct address_space *mapping = super->s_mapping_inode->i_mapping;
filler_t *filler = logfs_mtd_readpage;
struct mtd_info *mtd = super->s_mtd;
*ofs = 0;
while (mtd_block_isbad(mtd, *ofs)) {
*ofs += mtd->erasesize;
if (*ofs >= mtd->size)
return NULL;
}
BUG_ON(*ofs & ~PAGE_MASK);
return read_cache_page(mapping, *ofs >> PAGE_SHIFT, filler, sb);
}
static struct page *logfs_mtd_find_last_sb(struct super_block *sb, u64 *ofs)
{
struct logfs_super *super = logfs_super(sb);
struct address_space *mapping = super->s_mapping_inode->i_mapping;
filler_t *filler = logfs_mtd_readpage;
struct mtd_info *mtd = super->s_mtd;
*ofs = mtd->size - mtd->erasesize;
while (mtd_block_isbad(mtd, *ofs)) {
*ofs -= mtd->erasesize;
if (*ofs <= 0)
return NULL;
}
*ofs = *ofs + mtd->erasesize - 0x1000;
BUG_ON(*ofs & ~PAGE_MASK);
return read_cache_page(mapping, *ofs >> PAGE_SHIFT, filler, sb);
}
static int __logfs_mtd_writeseg(struct super_block *sb, u64 ofs, pgoff_t index,
size_t nr_pages)
{
struct logfs_super *super = logfs_super(sb);
struct address_space *mapping = super->s_mapping_inode->i_mapping;
struct page *page;
int i, err;
for (i = 0; i < nr_pages; i++) {
page = find_lock_page(mapping, index + i);
BUG_ON(!page);
err = loffs_mtd_write(sb, page->index << PAGE_SHIFT, PAGE_SIZE,
page_address(page));
unlock_page(page);
page_cache_release(page);
if (err)
return err;
}
return 0;
}
static void logfs_mtd_writeseg(struct super_block *sb, u64 ofs, size_t len)
{
struct logfs_super *super = logfs_super(sb);
int head;
if (super->s_flags & LOGFS_SB_FLAG_RO)
return;
if (len == 0) {
/* This can happen when the object fit perfectly into a
* segment, the segment gets written per sync and subsequently
* closed.
*/
return;
}
head = ofs & (PAGE_SIZE - 1);
if (head) {
ofs -= head;
len += head;
}
len = PAGE_ALIGN(len);
__logfs_mtd_writeseg(sb, ofs, ofs >> PAGE_SHIFT, len >> PAGE_SHIFT);
}
static void logfs_mtd_put_device(struct logfs_super *s)
{
put_mtd_device(s->s_mtd);
}
static int logfs_mtd_can_write_buf(struct super_block *sb, u64 ofs)
{
struct logfs_super *super = logfs_super(sb);
void *buf;
int err;
buf = kmalloc(super->s_writesize, GFP_KERNEL);
if (!buf)
return -ENOMEM;
err = logfs_mtd_read(sb, ofs, super->s_writesize, buf);
if (err)
goto out;
if (memchr_inv(buf, 0xff, super->s_writesize))
err = -EIO;
kfree(buf);
out:
return err;
}
static const struct logfs_device_ops mtd_devops = {
.find_first_sb = logfs_mtd_find_first_sb,
.find_last_sb = logfs_mtd_find_last_sb,
.readpage = logfs_mtd_readpage,
.writeseg = logfs_mtd_writeseg,
.erase = logfs_mtd_erase,
.can_write_buf = logfs_mtd_can_write_buf,
.sync = logfs_mtd_sync,
.put_device = logfs_mtd_put_device,
};
int logfs_get_sb_mtd(struct logfs_super *s, int mtdnr)
{
struct mtd_info *mtd = get_mtd_device(NULL, mtdnr);
if (IS_ERR(mtd))
return PTR_ERR(mtd);
s->s_bdev = NULL;
s->s_mtd = mtd;
s->s_devops = &mtd_devops;
return 0;
}
| gpl-2.0 |
pacificIT/edison-kernel | drivers/pcmcia/pxa2xx_cm_x270.c | 9831 | 2472 | /*
* linux/drivers/pcmcia/pxa/pxa_cm_x270.c
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Compulab Ltd., 2003, 2007, 2008
* Mike Rapoport <mike@compulab.co.il>
*
*/
#include <linux/platform_device.h>
#include <linux/irq.h>
#include <linux/delay.h>
#include <linux/gpio.h>
#include <linux/export.h>
#include "soc_common.h"
#define GPIO_PCMCIA_S0_CD_VALID (84)
#define GPIO_PCMCIA_S0_RDYINT (82)
#define GPIO_PCMCIA_RESET (53)
static int cmx270_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
{
int ret = gpio_request(GPIO_PCMCIA_RESET, "PCCard reset");
if (ret)
return ret;
gpio_direction_output(GPIO_PCMCIA_RESET, 0);
skt->stat[SOC_STAT_CD].gpio = GPIO_PCMCIA_S0_CD_VALID;
skt->stat[SOC_STAT_CD].name = "PCMCIA0 CD";
skt->stat[SOC_STAT_RDY].gpio = GPIO_PCMCIA_S0_RDYINT;
skt->stat[SOC_STAT_RDY].name = "PCMCIA0 RDY";
return ret;
}
static void cmx270_pcmcia_shutdown(struct soc_pcmcia_socket *skt)
{
gpio_free(GPIO_PCMCIA_RESET);
}
static void cmx270_pcmcia_socket_state(struct soc_pcmcia_socket *skt,
struct pcmcia_state *state)
{
state->vs_3v = 0;
state->vs_Xv = 0;
}
static int cmx270_pcmcia_configure_socket(struct soc_pcmcia_socket *skt,
const socket_state_t *state)
{
switch (skt->nr) {
case 0:
if (state->flags & SS_RESET) {
gpio_set_value(GPIO_PCMCIA_RESET, 1);
udelay(10);
gpio_set_value(GPIO_PCMCIA_RESET, 0);
}
break;
}
return 0;
}
static struct pcmcia_low_level cmx270_pcmcia_ops __initdata = {
.owner = THIS_MODULE,
.hw_init = cmx270_pcmcia_hw_init,
.hw_shutdown = cmx270_pcmcia_shutdown,
.socket_state = cmx270_pcmcia_socket_state,
.configure_socket = cmx270_pcmcia_configure_socket,
.nr = 1,
};
static struct platform_device *cmx270_pcmcia_device;
int __init cmx270_pcmcia_init(void)
{
int ret;
cmx270_pcmcia_device = platform_device_alloc("pxa2xx-pcmcia", -1);
if (!cmx270_pcmcia_device)
return -ENOMEM;
ret = platform_device_add_data(cmx270_pcmcia_device, &cmx270_pcmcia_ops,
sizeof(cmx270_pcmcia_ops));
if (ret == 0) {
printk(KERN_INFO "Registering cm-x270 PCMCIA interface.\n");
ret = platform_device_add(cmx270_pcmcia_device);
}
if (ret)
platform_device_put(cmx270_pcmcia_device);
return ret;
}
void __exit cmx270_pcmcia_exit(void)
{
platform_device_unregister(cmx270_pcmcia_device);
}
| gpl-2.0 |
ohporter/linux-am33x | drivers/mtd/maps/vmax301.c | 10343 | 5370 | /* ######################################################################
Tempustech VMAX SBC301 MTD Driver.
The VMAx 301 is a SBC based on . It
comes with three builtin AMD 29F016B flash chips and a socket for SRAM or
more flash. Each unit has it's own 8k mapping into a settable region
(0xD8000). There are two 8k mappings for each MTD, the first is always set
to the lower 8k of the device the second is paged. Writing a 16 bit page
value to anywhere in the first 8k will cause the second 8k to page around.
To boot the device a bios extension must be installed into the first 8k
of flash that is smart enough to copy itself down, page in the rest of
itself and begin executing.
##################################################################### */
#include <linux/module.h>
#include <linux/ioport.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <asm/io.h>
#include <linux/mtd/map.h>
#include <linux/mtd/mtd.h>
#define WINDOW_START 0xd8000
#define WINDOW_LENGTH 0x2000
#define WINDOW_SHIFT 25
#define WINDOW_MASK 0x1FFF
/* Actually we could use two spinlocks, but we'd have to have
more private space in the struct map_info. We lose a little
performance like this, but we'd probably lose more by having
the extra indirection from having one of the map->map_priv
fields pointing to yet another private struct.
*/
static DEFINE_SPINLOCK(vmax301_spin);
static void __vmax301_page(struct map_info *map, unsigned long page)
{
writew(page, map->map_priv_2 - WINDOW_LENGTH);
map->map_priv_1 = page;
}
static inline void vmax301_page(struct map_info *map,
unsigned long ofs)
{
unsigned long page = (ofs >> WINDOW_SHIFT);
if (map->map_priv_1 != page)
__vmax301_page(map, page);
}
static map_word vmax301_read8(struct map_info *map, unsigned long ofs)
{
map_word ret;
spin_lock(&vmax301_spin);
vmax301_page(map, ofs);
ret.x[0] = readb(map->map_priv_2 + (ofs & WINDOW_MASK));
spin_unlock(&vmax301_spin);
return ret;
}
static void vmax301_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len)
{
while(len) {
unsigned long thislen = len;
if (len > (WINDOW_LENGTH - (from & WINDOW_MASK)))
thislen = WINDOW_LENGTH-(from & WINDOW_MASK);
spin_lock(&vmax301_spin);
vmax301_page(map, from);
memcpy_fromio(to, map->map_priv_2 + from, thislen);
spin_unlock(&vmax301_spin);
to += thislen;
from += thislen;
len -= thislen;
}
}
static void vmax301_write8(struct map_info *map, map_word d, unsigned long adr)
{
spin_lock(&vmax301_spin);
vmax301_page(map, adr);
writeb(d.x[0], map->map_priv_2 + (adr & WINDOW_MASK));
spin_unlock(&vmax301_spin);
}
static void vmax301_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len)
{
while(len) {
unsigned long thislen = len;
if (len > (WINDOW_LENGTH - (to & WINDOW_MASK)))
thislen = WINDOW_LENGTH-(to & WINDOW_MASK);
spin_lock(&vmax301_spin);
vmax301_page(map, to);
memcpy_toio(map->map_priv_2 + to, from, thislen);
spin_unlock(&vmax301_spin);
to += thislen;
from += thislen;
len -= thislen;
}
}
static struct map_info vmax_map[2] = {
{
.name = "VMAX301 Internal Flash",
.phys = NO_XIP,
.size = 3*2*1024*1024,
.bankwidth = 1,
.read = vmax301_read8,
.copy_from = vmax301_copy_from,
.write = vmax301_write8,
.copy_to = vmax301_copy_to,
.map_priv_1 = WINDOW_START + WINDOW_LENGTH,
.map_priv_2 = 0xFFFFFFFF
},
{
.name = "VMAX301 Socket",
.phys = NO_XIP,
.size = 0,
.bankwidth = 1,
.read = vmax301_read8,
.copy_from = vmax301_copy_from,
.write = vmax301_write8,
.copy_to = vmax301_copy_to,
.map_priv_1 = WINDOW_START + (3*WINDOW_LENGTH),
.map_priv_2 = 0xFFFFFFFF
}
};
static struct mtd_info *vmax_mtd[2] = {NULL, NULL};
static void __exit cleanup_vmax301(void)
{
int i;
for (i=0; i<2; i++) {
if (vmax_mtd[i]) {
mtd_device_unregister(vmax_mtd[i]);
map_destroy(vmax_mtd[i]);
}
}
iounmap((void *)vmax_map[0].map_priv_1 - WINDOW_START);
}
static int __init init_vmax301(void)
{
int i;
unsigned long iomapadr;
// Print out our little header..
printk("Tempustech VMAX 301 MEM:0x%x-0x%x\n",WINDOW_START,
WINDOW_START+4*WINDOW_LENGTH);
iomapadr = (unsigned long)ioremap(WINDOW_START, WINDOW_LENGTH*4);
if (!iomapadr) {
printk("Failed to ioremap memory region\n");
return -EIO;
}
/* Put the address in the map's private data area.
We store the actual MTD IO address rather than the
address of the first half, because it's used more
often.
*/
vmax_map[0].map_priv_2 = iomapadr + WINDOW_START;
vmax_map[1].map_priv_2 = iomapadr + (3*WINDOW_START);
for (i=0; i<2; i++) {
vmax_mtd[i] = do_map_probe("cfi_probe", &vmax_map[i]);
if (!vmax_mtd[i])
vmax_mtd[i] = do_map_probe("jedec", &vmax_map[i]);
if (!vmax_mtd[i])
vmax_mtd[i] = do_map_probe("map_ram", &vmax_map[i]);
if (!vmax_mtd[i])
vmax_mtd[i] = do_map_probe("map_rom", &vmax_map[i]);
if (vmax_mtd[i]) {
vmax_mtd[i]->owner = THIS_MODULE;
mtd_device_register(vmax_mtd[i], NULL, 0);
}
}
if (!vmax_mtd[0] && !vmax_mtd[1]) {
iounmap((void *)iomapadr);
return -ENXIO;
}
return 0;
}
module_init(init_vmax301);
module_exit(cleanup_vmax301);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
MODULE_DESCRIPTION("MTD map driver for Tempustech VMAX SBC301 board");
| gpl-2.0 |
TeamTwisted/hells-Core-N5 | drivers/net/ethernet/pasemi/pasemi_mac_ethtool.c | 10855 | 4327 | /*
* Copyright (C) 2006-2008 PA Semi, Inc
*
* Ethtool hooks for the PA Semi PWRficient onchip 1G/10G Ethernet MACs
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/netdevice.h>
#include <linux/ethtool.h>
#include <linux/pci.h>
#include <linux/inet_lro.h>
#include <asm/pasemi_dma.h>
#include "pasemi_mac.h"
static struct {
const char str[ETH_GSTRING_LEN];
} ethtool_stats_keys[] = {
{ "rx-drops" },
{ "rx-bytes" },
{ "rx-packets" },
{ "rx-broadcast-packets" },
{ "rx-multicast-packets" },
{ "rx-crc-errors" },
{ "rx-undersize-errors" },
{ "rx-oversize-errors" },
{ "rx-short-fragment-errors" },
{ "rx-jabber-errors" },
{ "rx-64-byte-packets" },
{ "rx-65-127-byte-packets" },
{ "rx-128-255-byte-packets" },
{ "rx-256-511-byte-packets" },
{ "rx-512-1023-byte-packets" },
{ "rx-1024-1518-byte-packets" },
{ "rx-pause-frames" },
{ "tx-bytes" },
{ "tx-packets" },
{ "tx-broadcast-packets" },
{ "tx-multicast-packets" },
{ "tx-collisions" },
{ "tx-late-collisions" },
{ "tx-excessive-collisions" },
{ "tx-crc-errors" },
{ "tx-undersize-errors" },
{ "tx-oversize-errors" },
{ "tx-64-byte-packets" },
{ "tx-65-127-byte-packets" },
{ "tx-128-255-byte-packets" },
{ "tx-256-511-byte-packets" },
{ "tx-512-1023-byte-packets" },
{ "tx-1024-1518-byte-packets" },
};
static int
pasemi_mac_ethtool_get_settings(struct net_device *netdev,
struct ethtool_cmd *cmd)
{
struct pasemi_mac *mac = netdev_priv(netdev);
struct phy_device *phydev = mac->phydev;
if (!phydev)
return -EOPNOTSUPP;
return phy_ethtool_gset(phydev, cmd);
}
static int
pasemi_mac_ethtool_set_settings(struct net_device *netdev,
struct ethtool_cmd *cmd)
{
struct pasemi_mac *mac = netdev_priv(netdev);
struct phy_device *phydev = mac->phydev;
if (!phydev)
return -EOPNOTSUPP;
return phy_ethtool_sset(phydev, cmd);
}
static u32
pasemi_mac_ethtool_get_msglevel(struct net_device *netdev)
{
struct pasemi_mac *mac = netdev_priv(netdev);
return mac->msg_enable;
}
static void
pasemi_mac_ethtool_set_msglevel(struct net_device *netdev,
u32 level)
{
struct pasemi_mac *mac = netdev_priv(netdev);
mac->msg_enable = level;
}
static void
pasemi_mac_ethtool_get_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ering)
{
struct pasemi_mac *mac = netdev_priv(netdev);
ering->tx_max_pending = TX_RING_SIZE/2;
ering->tx_pending = RING_USED(mac->tx)/2;
ering->rx_max_pending = RX_RING_SIZE/4;
ering->rx_pending = RING_USED(mac->rx)/4;
}
static int pasemi_mac_get_sset_count(struct net_device *netdev, int sset)
{
switch (sset) {
case ETH_SS_STATS:
return ARRAY_SIZE(ethtool_stats_keys);
default:
return -EOPNOTSUPP;
}
}
static void pasemi_mac_get_ethtool_stats(struct net_device *netdev,
struct ethtool_stats *stats, u64 *data)
{
struct pasemi_mac *mac = netdev_priv(netdev);
int i;
data[0] = pasemi_read_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if))
>> PAS_DMA_RXINT_RCMDSTA_DROPS_S;
for (i = 0; i < 32; i++)
data[1+i] = pasemi_read_mac_reg(mac->dma_if, PAS_MAC_RMON(i));
}
static void pasemi_mac_get_strings(struct net_device *netdev, u32 stringset,
u8 *data)
{
memcpy(data, ethtool_stats_keys, sizeof(ethtool_stats_keys));
}
const struct ethtool_ops pasemi_mac_ethtool_ops = {
.get_settings = pasemi_mac_ethtool_get_settings,
.set_settings = pasemi_mac_ethtool_set_settings,
.get_msglevel = pasemi_mac_ethtool_get_msglevel,
.set_msglevel = pasemi_mac_ethtool_set_msglevel,
.get_link = ethtool_op_get_link,
.get_ringparam = pasemi_mac_ethtool_get_ringparam,
.get_strings = pasemi_mac_get_strings,
.get_sset_count = pasemi_mac_get_sset_count,
.get_ethtool_stats = pasemi_mac_get_ethtool_stats,
};
| gpl-2.0 |
htc-mirror/ruby-ics-crc-3.0.16-fd362fb | arch/sh/lib/delay.c | 12135 | 1067 | /*
* Precise Delay Loops for SuperH
*
* Copyright (C) 1999 Niibe Yutaka & Kaz Kojima
*/
#include <linux/sched.h>
#include <linux/delay.h>
void __delay(unsigned long loops)
{
__asm__ __volatile__(
/*
* ST40-300 appears to have an issue with this code,
* normally taking two cycles each loop, as with all
* other SH variants. If however the branch and the
* delay slot straddle an 8 byte boundary, this increases
* to 3 cycles.
* This align directive ensures this doesn't occur.
*/
".balign 8\n\t"
"tst %0, %0\n\t"
"1:\t"
"bf/s 1b\n\t"
" dt %0"
: "=r" (loops)
: "0" (loops)
: "t");
}
inline void __const_udelay(unsigned long xloops)
{
xloops *= 4;
__asm__("dmulu.l %0, %2\n\t"
"sts mach, %0"
: "=r" (xloops)
: "0" (xloops),
"r" (cpu_data[raw_smp_processor_id()].loops_per_jiffy * (HZ/4))
: "macl", "mach");
__delay(++xloops);
}
void __udelay(unsigned long usecs)
{
__const_udelay(usecs * 0x000010c6); /* 2**32 / 1000000 */
}
void __ndelay(unsigned long nsecs)
{
__const_udelay(nsecs * 0x00000005);
}
| gpl-2.0 |
matthiasdiener/kmaf | drivers/s390/cio/qdio_main.c | 104 | 45284 | /*
* Linux for s390 qdio support, buffer handling, qdio API and module support.
*
* Copyright IBM Corp. 2000, 2008
* Author(s): Utz Bacher <utz.bacher@de.ibm.com>
* Jan Glauber <jang@linux.vnet.ibm.com>
* 2.6 cio integration by Cornelia Huck <cornelia.huck@de.ibm.com>
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/timer.h>
#include <linux/delay.h>
#include <linux/gfp.h>
#include <linux/io.h>
#include <linux/atomic.h>
#include <asm/debug.h>
#include <asm/qdio.h>
#include <asm/ipl.h>
#include "cio.h"
#include "css.h"
#include "device.h"
#include "qdio.h"
#include "qdio_debug.h"
MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com>,"\
"Jan Glauber <jang@linux.vnet.ibm.com>");
MODULE_DESCRIPTION("QDIO base support");
MODULE_LICENSE("GPL");
static inline int do_siga_sync(unsigned long schid,
unsigned int out_mask, unsigned int in_mask,
unsigned int fc)
{
register unsigned long __fc asm ("0") = fc;
register unsigned long __schid asm ("1") = schid;
register unsigned long out asm ("2") = out_mask;
register unsigned long in asm ("3") = in_mask;
int cc;
asm volatile(
" siga 0\n"
" ipm %0\n"
" srl %0,28\n"
: "=d" (cc)
: "d" (__fc), "d" (__schid), "d" (out), "d" (in) : "cc");
return cc;
}
static inline int do_siga_input(unsigned long schid, unsigned int mask,
unsigned int fc)
{
register unsigned long __fc asm ("0") = fc;
register unsigned long __schid asm ("1") = schid;
register unsigned long __mask asm ("2") = mask;
int cc;
asm volatile(
" siga 0\n"
" ipm %0\n"
" srl %0,28\n"
: "=d" (cc)
: "d" (__fc), "d" (__schid), "d" (__mask) : "cc");
return cc;
}
/**
* do_siga_output - perform SIGA-w/wt function
* @schid: subchannel id or in case of QEBSM the subchannel token
* @mask: which output queues to process
* @bb: busy bit indicator, set only if SIGA-w/wt could not access a buffer
* @fc: function code to perform
*
* Returns condition code.
* Note: For IQDC unicast queues only the highest priority queue is processed.
*/
static inline int do_siga_output(unsigned long schid, unsigned long mask,
unsigned int *bb, unsigned int fc,
unsigned long aob)
{
register unsigned long __fc asm("0") = fc;
register unsigned long __schid asm("1") = schid;
register unsigned long __mask asm("2") = mask;
register unsigned long __aob asm("3") = aob;
int cc;
asm volatile(
" siga 0\n"
" ipm %0\n"
" srl %0,28\n"
: "=d" (cc), "+d" (__fc), "+d" (__aob)
: "d" (__schid), "d" (__mask)
: "cc");
*bb = __fc >> 31;
return cc;
}
static inline int qdio_check_ccq(struct qdio_q *q, unsigned int ccq)
{
/* all done or next buffer state different */
if (ccq == 0 || ccq == 32)
return 0;
/* no buffer processed */
if (ccq == 97)
return 1;
/* not all buffers processed */
if (ccq == 96)
return 2;
/* notify devices immediately */
DBF_ERROR("%4x ccq:%3d", SCH_NO(q), ccq);
return -EIO;
}
/**
* qdio_do_eqbs - extract buffer states for QEBSM
* @q: queue to manipulate
* @state: state of the extracted buffers
* @start: buffer number to start at
* @count: count of buffers to examine
* @auto_ack: automatically acknowledge buffers
*
* Returns the number of successfully extracted equal buffer states.
* Stops processing if a state is different from the last buffers state.
*/
static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state,
int start, int count, int auto_ack)
{
int rc, tmp_count = count, tmp_start = start, nr = q->nr, retried = 0;
unsigned int ccq = 0;
qperf_inc(q, eqbs);
if (!q->is_input_q)
nr += q->irq_ptr->nr_input_qs;
again:
ccq = do_eqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count,
auto_ack);
rc = qdio_check_ccq(q, ccq);
if (!rc)
return count - tmp_count;
if (rc == 1) {
DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS again:%2d", ccq);
goto again;
}
if (rc == 2) {
qperf_inc(q, eqbs_partial);
DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS part:%02x",
tmp_count);
/*
* Retry once, if that fails bail out and process the
* extracted buffers before trying again.
*/
if (!retried++)
goto again;
else
return count - tmp_count;
}
DBF_ERROR("%4x EQBS ERROR", SCH_NO(q));
DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
q->handler(q->irq_ptr->cdev, QDIO_ERROR_GET_BUF_STATE,
q->nr, q->first_to_kick, count, q->irq_ptr->int_parm);
return 0;
}
/**
* qdio_do_sqbs - set buffer states for QEBSM
* @q: queue to manipulate
* @state: new state of the buffers
* @start: first buffer number to change
* @count: how many buffers to change
*
* Returns the number of successfully changed buffers.
* Does retrying until the specified count of buffer states is set or an
* error occurs.
*/
static int qdio_do_sqbs(struct qdio_q *q, unsigned char state, int start,
int count)
{
unsigned int ccq = 0;
int tmp_count = count, tmp_start = start;
int nr = q->nr;
int rc;
if (!count)
return 0;
qperf_inc(q, sqbs);
if (!q->is_input_q)
nr += q->irq_ptr->nr_input_qs;
again:
ccq = do_sqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count);
rc = qdio_check_ccq(q, ccq);
if (!rc) {
WARN_ON_ONCE(tmp_count);
return count - tmp_count;
}
if (rc == 1 || rc == 2) {
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "SQBS again:%2d", ccq);
qperf_inc(q, sqbs_partial);
goto again;
}
DBF_ERROR("%4x SQBS ERROR", SCH_NO(q));
DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
q->handler(q->irq_ptr->cdev, QDIO_ERROR_SET_BUF_STATE,
q->nr, q->first_to_kick, count, q->irq_ptr->int_parm);
return 0;
}
/* returns number of examined buffers and their common state in *state */
static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr,
unsigned char *state, unsigned int count,
int auto_ack, int merge_pending)
{
unsigned char __state = 0;
int i;
if (is_qebsm(q))
return qdio_do_eqbs(q, state, bufnr, count, auto_ack);
for (i = 0; i < count; i++) {
if (!__state) {
__state = q->slsb.val[bufnr];
if (merge_pending && __state == SLSB_P_OUTPUT_PENDING)
__state = SLSB_P_OUTPUT_EMPTY;
} else if (merge_pending) {
if ((q->slsb.val[bufnr] & __state) != __state)
break;
} else if (q->slsb.val[bufnr] != __state)
break;
bufnr = next_buf(bufnr);
}
*state = __state;
return i;
}
static inline int get_buf_state(struct qdio_q *q, unsigned int bufnr,
unsigned char *state, int auto_ack)
{
return get_buf_states(q, bufnr, state, 1, auto_ack, 0);
}
/* wrap-around safe setting of slsb states, returns number of changed buffers */
static inline int set_buf_states(struct qdio_q *q, int bufnr,
unsigned char state, int count)
{
int i;
if (is_qebsm(q))
return qdio_do_sqbs(q, state, bufnr, count);
for (i = 0; i < count; i++) {
xchg(&q->slsb.val[bufnr], state);
bufnr = next_buf(bufnr);
}
return count;
}
static inline int set_buf_state(struct qdio_q *q, int bufnr,
unsigned char state)
{
return set_buf_states(q, bufnr, state, 1);
}
/* set slsb states to initial state */
static void qdio_init_buf_states(struct qdio_irq *irq_ptr)
{
struct qdio_q *q;
int i;
for_each_input_queue(irq_ptr, q, i)
set_buf_states(q, 0, SLSB_P_INPUT_NOT_INIT,
QDIO_MAX_BUFFERS_PER_Q);
for_each_output_queue(irq_ptr, q, i)
set_buf_states(q, 0, SLSB_P_OUTPUT_NOT_INIT,
QDIO_MAX_BUFFERS_PER_Q);
}
static inline int qdio_siga_sync(struct qdio_q *q, unsigned int output,
unsigned int input)
{
unsigned long schid = *((u32 *) &q->irq_ptr->schid);
unsigned int fc = QDIO_SIGA_SYNC;
int cc;
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-s:%1d", q->nr);
qperf_inc(q, siga_sync);
if (is_qebsm(q)) {
schid = q->irq_ptr->sch_token;
fc |= QDIO_SIGA_QEBSM_FLAG;
}
cc = do_siga_sync(schid, output, input, fc);
if (unlikely(cc))
DBF_ERROR("%4x SIGA-S:%2d", SCH_NO(q), cc);
return (cc) ? -EIO : 0;
}
static inline int qdio_siga_sync_q(struct qdio_q *q)
{
if (q->is_input_q)
return qdio_siga_sync(q, 0, q->mask);
else
return qdio_siga_sync(q, q->mask, 0);
}
static int qdio_siga_output(struct qdio_q *q, unsigned int *busy_bit,
unsigned long aob)
{
unsigned long schid = *((u32 *) &q->irq_ptr->schid);
unsigned int fc = QDIO_SIGA_WRITE;
u64 start_time = 0;
int retries = 0, cc;
unsigned long laob = 0;
if (q->u.out.use_cq && aob != 0) {
fc = QDIO_SIGA_WRITEQ;
laob = aob;
}
if (is_qebsm(q)) {
schid = q->irq_ptr->sch_token;
fc |= QDIO_SIGA_QEBSM_FLAG;
}
again:
WARN_ON_ONCE((aob && queue_type(q) != QDIO_IQDIO_QFMT) ||
(aob && fc != QDIO_SIGA_WRITEQ));
cc = do_siga_output(schid, q->mask, busy_bit, fc, laob);
/* hipersocket busy condition */
if (unlikely(*busy_bit)) {
retries++;
if (!start_time) {
start_time = get_clock();
goto again;
}
if ((get_clock() - start_time) < QDIO_BUSY_BIT_PATIENCE)
goto again;
}
if (retries) {
DBF_DEV_EVENT(DBF_WARN, q->irq_ptr,
"%4x cc2 BB1:%1d", SCH_NO(q), q->nr);
DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "count:%u", retries);
}
return cc;
}
static inline int qdio_siga_input(struct qdio_q *q)
{
unsigned long schid = *((u32 *) &q->irq_ptr->schid);
unsigned int fc = QDIO_SIGA_READ;
int cc;
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-r:%1d", q->nr);
qperf_inc(q, siga_read);
if (is_qebsm(q)) {
schid = q->irq_ptr->sch_token;
fc |= QDIO_SIGA_QEBSM_FLAG;
}
cc = do_siga_input(schid, q->mask, fc);
if (unlikely(cc))
DBF_ERROR("%4x SIGA-R:%2d", SCH_NO(q), cc);
return (cc) ? -EIO : 0;
}
#define qdio_siga_sync_out(q) qdio_siga_sync(q, ~0U, 0)
#define qdio_siga_sync_all(q) qdio_siga_sync(q, ~0U, ~0U)
static inline void qdio_sync_queues(struct qdio_q *q)
{
/* PCI capable outbound queues will also be scanned so sync them too */
if (pci_out_supported(q))
qdio_siga_sync_all(q);
else
qdio_siga_sync_q(q);
}
int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr,
unsigned char *state)
{
if (need_siga_sync(q))
qdio_siga_sync_q(q);
return get_buf_states(q, bufnr, state, 1, 0, 0);
}
static inline void qdio_stop_polling(struct qdio_q *q)
{
if (!q->u.in.polling)
return;
q->u.in.polling = 0;
qperf_inc(q, stop_polling);
/* show the card that we are not polling anymore */
if (is_qebsm(q)) {
set_buf_states(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT,
q->u.in.ack_count);
q->u.in.ack_count = 0;
} else
set_buf_state(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT);
}
static inline void account_sbals(struct qdio_q *q, int count)
{
int pos = 0;
q->q_stats.nr_sbal_total += count;
if (count == QDIO_MAX_BUFFERS_MASK) {
q->q_stats.nr_sbals[7]++;
return;
}
while (count >>= 1)
pos++;
q->q_stats.nr_sbals[pos]++;
}
static void process_buffer_error(struct qdio_q *q, int count)
{
unsigned char state = (q->is_input_q) ? SLSB_P_INPUT_NOT_INIT :
SLSB_P_OUTPUT_NOT_INIT;
q->qdio_error = QDIO_ERROR_SLSB_STATE;
/* special handling for no target buffer empty */
if ((!q->is_input_q &&
(q->sbal[q->first_to_check]->element[15].sflags) == 0x10)) {
qperf_inc(q, target_full);
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "OUTFULL FTC:%02x",
q->first_to_check);
goto set;
}
DBF_ERROR("%4x BUF ERROR", SCH_NO(q));
DBF_ERROR((q->is_input_q) ? "IN:%2d" : "OUT:%2d", q->nr);
DBF_ERROR("FTC:%3d C:%3d", q->first_to_check, count);
DBF_ERROR("F14:%2x F15:%2x",
q->sbal[q->first_to_check]->element[14].sflags,
q->sbal[q->first_to_check]->element[15].sflags);
set:
/*
* Interrupts may be avoided as long as the error is present
* so change the buffer state immediately to avoid starvation.
*/
set_buf_states(q, q->first_to_check, state, count);
}
static inline void inbound_primed(struct qdio_q *q, int count)
{
int new;
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in prim: %02x", count);
/* for QEBSM the ACK was already set by EQBS */
if (is_qebsm(q)) {
if (!q->u.in.polling) {
q->u.in.polling = 1;
q->u.in.ack_count = count;
q->u.in.ack_start = q->first_to_check;
return;
}
/* delete the previous ACK's */
set_buf_states(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT,
q->u.in.ack_count);
q->u.in.ack_count = count;
q->u.in.ack_start = q->first_to_check;
return;
}
/*
* ACK the newest buffer. The ACK will be removed in qdio_stop_polling
* or by the next inbound run.
*/
new = add_buf(q->first_to_check, count - 1);
if (q->u.in.polling) {
/* reset the previous ACK but first set the new one */
set_buf_state(q, new, SLSB_P_INPUT_ACK);
set_buf_state(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT);
} else {
q->u.in.polling = 1;
set_buf_state(q, new, SLSB_P_INPUT_ACK);
}
q->u.in.ack_start = new;
count--;
if (!count)
return;
/* need to change ALL buffers to get more interrupts */
set_buf_states(q, q->first_to_check, SLSB_P_INPUT_NOT_INIT, count);
}
static int get_inbound_buffer_frontier(struct qdio_q *q)
{
int count, stop;
unsigned char state = 0;
q->timestamp = get_clock();
/*
* Don't check 128 buffers, as otherwise qdio_inbound_q_moved
* would return 0.
*/
count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK);
stop = add_buf(q->first_to_check, count);
if (q->first_to_check == stop)
goto out;
/*
* No siga sync here, as a PCI or we after a thin interrupt
* already sync'ed the queues.
*/
count = get_buf_states(q, q->first_to_check, &state, count, 1, 0);
if (!count)
goto out;
switch (state) {
case SLSB_P_INPUT_PRIMED:
inbound_primed(q, count);
q->first_to_check = add_buf(q->first_to_check, count);
if (atomic_sub(count, &q->nr_buf_used) == 0)
qperf_inc(q, inbound_queue_full);
if (q->irq_ptr->perf_stat_enabled)
account_sbals(q, count);
break;
case SLSB_P_INPUT_ERROR:
process_buffer_error(q, count);
q->first_to_check = add_buf(q->first_to_check, count);
atomic_sub(count, &q->nr_buf_used);
if (q->irq_ptr->perf_stat_enabled)
account_sbals_error(q, count);
break;
case SLSB_CU_INPUT_EMPTY:
case SLSB_P_INPUT_NOT_INIT:
case SLSB_P_INPUT_ACK:
if (q->irq_ptr->perf_stat_enabled)
q->q_stats.nr_sbal_nop++;
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in nop");
break;
default:
WARN_ON_ONCE(1);
}
out:
return q->first_to_check;
}
static int qdio_inbound_q_moved(struct qdio_q *q)
{
int bufnr;
bufnr = get_inbound_buffer_frontier(q);
if (bufnr != q->last_move) {
q->last_move = bufnr;
if (!is_thinint_irq(q->irq_ptr) && MACHINE_IS_LPAR)
q->u.in.timestamp = get_clock();
return 1;
} else
return 0;
}
static inline int qdio_inbound_q_done(struct qdio_q *q)
{
unsigned char state = 0;
if (!atomic_read(&q->nr_buf_used))
return 1;
if (need_siga_sync(q))
qdio_siga_sync_q(q);
get_buf_state(q, q->first_to_check, &state, 0);
if (state == SLSB_P_INPUT_PRIMED || state == SLSB_P_INPUT_ERROR)
/* more work coming */
return 0;
if (is_thinint_irq(q->irq_ptr))
return 1;
/* don't poll under z/VM */
if (MACHINE_IS_VM)
return 1;
/*
* At this point we know, that inbound first_to_check
* has (probably) not moved (see qdio_inbound_processing).
*/
if (get_clock() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) {
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in done:%02x",
q->first_to_check);
return 1;
} else
return 0;
}
static inline int contains_aobs(struct qdio_q *q)
{
return !q->is_input_q && q->u.out.use_cq;
}
static inline void qdio_trace_aob(struct qdio_irq *irq, struct qdio_q *q,
int i, struct qaob *aob)
{
int tmp;
DBF_DEV_EVENT(DBF_INFO, irq, "AOB%d:%lx", i,
(unsigned long) virt_to_phys(aob));
DBF_DEV_EVENT(DBF_INFO, irq, "RES00:%lx",
(unsigned long) aob->res0[0]);
DBF_DEV_EVENT(DBF_INFO, irq, "RES01:%lx",
(unsigned long) aob->res0[1]);
DBF_DEV_EVENT(DBF_INFO, irq, "RES02:%lx",
(unsigned long) aob->res0[2]);
DBF_DEV_EVENT(DBF_INFO, irq, "RES03:%lx",
(unsigned long) aob->res0[3]);
DBF_DEV_EVENT(DBF_INFO, irq, "RES04:%lx",
(unsigned long) aob->res0[4]);
DBF_DEV_EVENT(DBF_INFO, irq, "RES05:%lx",
(unsigned long) aob->res0[5]);
DBF_DEV_EVENT(DBF_INFO, irq, "RES1:%x", aob->res1);
DBF_DEV_EVENT(DBF_INFO, irq, "RES2:%x", aob->res2);
DBF_DEV_EVENT(DBF_INFO, irq, "RES3:%x", aob->res3);
DBF_DEV_EVENT(DBF_INFO, irq, "AORC:%u", aob->aorc);
DBF_DEV_EVENT(DBF_INFO, irq, "FLAGS:%u", aob->flags);
DBF_DEV_EVENT(DBF_INFO, irq, "CBTBS:%u", aob->cbtbs);
DBF_DEV_EVENT(DBF_INFO, irq, "SBC:%u", aob->sb_count);
for (tmp = 0; tmp < QDIO_MAX_ELEMENTS_PER_BUFFER; ++tmp) {
DBF_DEV_EVENT(DBF_INFO, irq, "SBA%d:%lx", tmp,
(unsigned long) aob->sba[tmp]);
DBF_DEV_EVENT(DBF_INFO, irq, "rSBA%d:%lx", tmp,
(unsigned long) q->sbal[i]->element[tmp].addr);
DBF_DEV_EVENT(DBF_INFO, irq, "DC%d:%u", tmp, aob->dcount[tmp]);
DBF_DEV_EVENT(DBF_INFO, irq, "rDC%d:%u", tmp,
q->sbal[i]->element[tmp].length);
}
DBF_DEV_EVENT(DBF_INFO, irq, "USER0:%lx", (unsigned long) aob->user0);
for (tmp = 0; tmp < 2; ++tmp) {
DBF_DEV_EVENT(DBF_INFO, irq, "RES4%d:%lx", tmp,
(unsigned long) aob->res4[tmp]);
}
DBF_DEV_EVENT(DBF_INFO, irq, "USER1:%lx", (unsigned long) aob->user1);
DBF_DEV_EVENT(DBF_INFO, irq, "USER2:%lx", (unsigned long) aob->user2);
}
static inline void qdio_handle_aobs(struct qdio_q *q, int start, int count)
{
unsigned char state = 0;
int j, b = start;
if (!contains_aobs(q))
return;
for (j = 0; j < count; ++j) {
get_buf_state(q, b, &state, 0);
if (state == SLSB_P_OUTPUT_PENDING) {
struct qaob *aob = q->u.out.aobs[b];
if (aob == NULL)
continue;
q->u.out.sbal_state[b].flags |=
QDIO_OUTBUF_STATE_FLAG_PENDING;
q->u.out.aobs[b] = NULL;
} else if (state == SLSB_P_OUTPUT_EMPTY) {
q->u.out.sbal_state[b].aob = NULL;
}
b = next_buf(b);
}
}
static inline unsigned long qdio_aob_for_buffer(struct qdio_output_q *q,
int bufnr)
{
unsigned long phys_aob = 0;
if (!q->use_cq)
goto out;
if (!q->aobs[bufnr]) {
struct qaob *aob = qdio_allocate_aob();
q->aobs[bufnr] = aob;
}
if (q->aobs[bufnr]) {
q->sbal_state[bufnr].flags = QDIO_OUTBUF_STATE_FLAG_NONE;
q->sbal_state[bufnr].aob = q->aobs[bufnr];
q->aobs[bufnr]->user1 = (u64) q->sbal_state[bufnr].user;
phys_aob = virt_to_phys(q->aobs[bufnr]);
WARN_ON_ONCE(phys_aob & 0xFF);
}
out:
return phys_aob;
}
static void qdio_kick_handler(struct qdio_q *q)
{
int start = q->first_to_kick;
int end = q->first_to_check;
int count;
if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
return;
count = sub_buf(end, start);
if (q->is_input_q) {
qperf_inc(q, inbound_handler);
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "kih s:%02x c:%02x", start, count);
} else {
qperf_inc(q, outbound_handler);
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "koh: s:%02x c:%02x",
start, count);
}
qdio_handle_aobs(q, start, count);
q->handler(q->irq_ptr->cdev, q->qdio_error, q->nr, start, count,
q->irq_ptr->int_parm);
/* for the next time */
q->first_to_kick = end;
q->qdio_error = 0;
}
static void __qdio_inbound_processing(struct qdio_q *q)
{
qperf_inc(q, tasklet_inbound);
if (!qdio_inbound_q_moved(q))
return;
qdio_kick_handler(q);
if (!qdio_inbound_q_done(q)) {
/* means poll time is not yet over */
qperf_inc(q, tasklet_inbound_resched);
if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) {
tasklet_schedule(&q->tasklet);
return;
}
}
qdio_stop_polling(q);
/*
* We need to check again to not lose initiative after
* resetting the ACK state.
*/
if (!qdio_inbound_q_done(q)) {
qperf_inc(q, tasklet_inbound_resched2);
if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED))
tasklet_schedule(&q->tasklet);
}
}
void qdio_inbound_processing(unsigned long data)
{
struct qdio_q *q = (struct qdio_q *)data;
__qdio_inbound_processing(q);
}
static int get_outbound_buffer_frontier(struct qdio_q *q)
{
int count, stop;
unsigned char state = 0;
q->timestamp = get_clock();
if (need_siga_sync(q))
if (((queue_type(q) != QDIO_IQDIO_QFMT) &&
!pci_out_supported(q)) ||
(queue_type(q) == QDIO_IQDIO_QFMT &&
multicast_outbound(q)))
qdio_siga_sync_q(q);
/*
* Don't check 128 buffers, as otherwise qdio_inbound_q_moved
* would return 0.
*/
count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK);
stop = add_buf(q->first_to_check, count);
if (q->first_to_check == stop)
goto out;
count = get_buf_states(q, q->first_to_check, &state, count, 0, 1);
if (!count)
goto out;
switch (state) {
case SLSB_P_OUTPUT_EMPTY:
/* the adapter got it */
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr,
"out empty:%1d %02x", q->nr, count);
atomic_sub(count, &q->nr_buf_used);
q->first_to_check = add_buf(q->first_to_check, count);
if (q->irq_ptr->perf_stat_enabled)
account_sbals(q, count);
break;
case SLSB_P_OUTPUT_ERROR:
process_buffer_error(q, count);
q->first_to_check = add_buf(q->first_to_check, count);
atomic_sub(count, &q->nr_buf_used);
if (q->irq_ptr->perf_stat_enabled)
account_sbals_error(q, count);
break;
case SLSB_CU_OUTPUT_PRIMED:
/* the adapter has not fetched the output yet */
if (q->irq_ptr->perf_stat_enabled)
q->q_stats.nr_sbal_nop++;
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out primed:%1d",
q->nr);
break;
case SLSB_P_OUTPUT_NOT_INIT:
case SLSB_P_OUTPUT_HALTED:
break;
default:
WARN_ON_ONCE(1);
}
out:
return q->first_to_check;
}
/* all buffers processed? */
static inline int qdio_outbound_q_done(struct qdio_q *q)
{
return atomic_read(&q->nr_buf_used) == 0;
}
static inline int qdio_outbound_q_moved(struct qdio_q *q)
{
int bufnr;
bufnr = get_outbound_buffer_frontier(q);
if (bufnr != q->last_move) {
q->last_move = bufnr;
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out moved:%1d", q->nr);
return 1;
} else
return 0;
}
static int qdio_kick_outbound_q(struct qdio_q *q, unsigned long aob)
{
int retries = 0, cc;
unsigned int busy_bit;
if (!need_siga_out(q))
return 0;
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w:%1d", q->nr);
retry:
qperf_inc(q, siga_write);
cc = qdio_siga_output(q, &busy_bit, aob);
switch (cc) {
case 0:
break;
case 2:
if (busy_bit) {
while (++retries < QDIO_BUSY_BIT_RETRIES) {
mdelay(QDIO_BUSY_BIT_RETRY_DELAY);
goto retry;
}
DBF_ERROR("%4x cc2 BBC:%1d", SCH_NO(q), q->nr);
cc = -EBUSY;
} else {
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w cc2:%1d", q->nr);
cc = -ENOBUFS;
}
break;
case 1:
case 3:
DBF_ERROR("%4x SIGA-W:%1d", SCH_NO(q), cc);
cc = -EIO;
break;
}
if (retries) {
DBF_ERROR("%4x cc2 BB2:%1d", SCH_NO(q), q->nr);
DBF_ERROR("count:%u", retries);
}
return cc;
}
static void __qdio_outbound_processing(struct qdio_q *q)
{
qperf_inc(q, tasklet_outbound);
WARN_ON_ONCE(atomic_read(&q->nr_buf_used) < 0);
if (qdio_outbound_q_moved(q))
qdio_kick_handler(q);
if (queue_type(q) == QDIO_ZFCP_QFMT)
if (!pci_out_supported(q) && !qdio_outbound_q_done(q))
goto sched;
if (q->u.out.pci_out_enabled)
return;
/*
* Now we know that queue type is either qeth without pci enabled
* or HiperSockets. Make sure buffer switch from PRIMED to EMPTY
* is noticed and outbound_handler is called after some time.
*/
if (qdio_outbound_q_done(q))
del_timer(&q->u.out.timer);
else
if (!timer_pending(&q->u.out.timer))
mod_timer(&q->u.out.timer, jiffies + 10 * HZ);
return;
sched:
if (unlikely(q->irq_ptr->state == QDIO_IRQ_STATE_STOPPED))
return;
tasklet_schedule(&q->tasklet);
}
/* outbound tasklet */
void qdio_outbound_processing(unsigned long data)
{
struct qdio_q *q = (struct qdio_q *)data;
__qdio_outbound_processing(q);
}
void qdio_outbound_timer(unsigned long data)
{
struct qdio_q *q = (struct qdio_q *)data;
if (unlikely(q->irq_ptr->state == QDIO_IRQ_STATE_STOPPED))
return;
tasklet_schedule(&q->tasklet);
}
static inline void qdio_check_outbound_after_thinint(struct qdio_q *q)
{
struct qdio_q *out;
int i;
if (!pci_out_supported(q))
return;
for_each_output_queue(q->irq_ptr, out, i)
if (!qdio_outbound_q_done(out))
tasklet_schedule(&out->tasklet);
}
static void __tiqdio_inbound_processing(struct qdio_q *q)
{
qperf_inc(q, tasklet_inbound);
if (need_siga_sync(q) && need_siga_sync_after_ai(q))
qdio_sync_queues(q);
/*
* The interrupt could be caused by a PCI request. Check the
* PCI capable outbound queues.
*/
qdio_check_outbound_after_thinint(q);
if (!qdio_inbound_q_moved(q))
return;
qdio_kick_handler(q);
if (!qdio_inbound_q_done(q)) {
qperf_inc(q, tasklet_inbound_resched);
if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) {
tasklet_schedule(&q->tasklet);
return;
}
}
qdio_stop_polling(q);
/*
* We need to check again to not lose initiative after
* resetting the ACK state.
*/
if (!qdio_inbound_q_done(q)) {
qperf_inc(q, tasklet_inbound_resched2);
if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED))
tasklet_schedule(&q->tasklet);
}
}
void tiqdio_inbound_processing(unsigned long data)
{
struct qdio_q *q = (struct qdio_q *)data;
__tiqdio_inbound_processing(q);
}
static inline void qdio_set_state(struct qdio_irq *irq_ptr,
enum qdio_irq_states state)
{
DBF_DEV_EVENT(DBF_INFO, irq_ptr, "newstate: %1d", state);
irq_ptr->state = state;
mb();
}
static void qdio_irq_check_sense(struct qdio_irq *irq_ptr, struct irb *irb)
{
if (irb->esw.esw0.erw.cons) {
DBF_ERROR("%4x sense:", irq_ptr->schid.sch_no);
DBF_ERROR_HEX(irb, 64);
DBF_ERROR_HEX(irb->ecw, 64);
}
}
/* PCI interrupt handler */
static void qdio_int_handler_pci(struct qdio_irq *irq_ptr)
{
int i;
struct qdio_q *q;
if (unlikely(irq_ptr->state == QDIO_IRQ_STATE_STOPPED))
return;
for_each_input_queue(irq_ptr, q, i) {
if (q->u.in.queue_start_poll) {
/* skip if polling is enabled or already in work */
if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
&q->u.in.queue_irq_state)) {
qperf_inc(q, int_discarded);
continue;
}
q->u.in.queue_start_poll(q->irq_ptr->cdev, q->nr,
q->irq_ptr->int_parm);
} else {
tasklet_schedule(&q->tasklet);
}
}
if (!pci_out_supported(q))
return;
for_each_output_queue(irq_ptr, q, i) {
if (qdio_outbound_q_done(q))
continue;
if (need_siga_sync(q) && need_siga_sync_out_after_pci(q))
qdio_siga_sync_q(q);
tasklet_schedule(&q->tasklet);
}
}
static void qdio_handle_activate_check(struct ccw_device *cdev,
unsigned long intparm, int cstat, int dstat)
{
struct qdio_irq *irq_ptr = cdev->private->qdio_data;
struct qdio_q *q;
int count;
DBF_ERROR("%4x ACT CHECK", irq_ptr->schid.sch_no);
DBF_ERROR("intp :%lx", intparm);
DBF_ERROR("ds: %2x cs:%2x", dstat, cstat);
if (irq_ptr->nr_input_qs) {
q = irq_ptr->input_qs[0];
} else if (irq_ptr->nr_output_qs) {
q = irq_ptr->output_qs[0];
} else {
dump_stack();
goto no_handler;
}
count = sub_buf(q->first_to_check, q->first_to_kick);
q->handler(q->irq_ptr->cdev, QDIO_ERROR_ACTIVATE,
q->nr, q->first_to_kick, count, irq_ptr->int_parm);
no_handler:
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
/*
* In case of z/VM LGR (Live Guest Migration) QDIO recovery will happen.
* Therefore we call the LGR detection function here.
*/
lgr_info_log();
}
static void qdio_establish_handle_irq(struct ccw_device *cdev, int cstat,
int dstat)
{
struct qdio_irq *irq_ptr = cdev->private->qdio_data;
DBF_DEV_EVENT(DBF_INFO, irq_ptr, "qest irq");
if (cstat)
goto error;
if (dstat & ~(DEV_STAT_DEV_END | DEV_STAT_CHN_END))
goto error;
if (!(dstat & DEV_STAT_DEV_END))
goto error;
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ESTABLISHED);
return;
error:
DBF_ERROR("%4x EQ:error", irq_ptr->schid.sch_no);
DBF_ERROR("ds: %2x cs:%2x", dstat, cstat);
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
}
/* qdio interrupt handler */
void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
struct irb *irb)
{
struct qdio_irq *irq_ptr = cdev->private->qdio_data;
int cstat, dstat;
if (!intparm || !irq_ptr) {
DBF_ERROR("qint:%4x", cdev->private->schid.sch_no);
return;
}
if (irq_ptr->perf_stat_enabled)
irq_ptr->perf_stat.qdio_int++;
if (IS_ERR(irb)) {
DBF_ERROR("%4x IO error", irq_ptr->schid.sch_no);
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
wake_up(&cdev->private->wait_q);
return;
}
qdio_irq_check_sense(irq_ptr, irb);
cstat = irb->scsw.cmd.cstat;
dstat = irb->scsw.cmd.dstat;
switch (irq_ptr->state) {
case QDIO_IRQ_STATE_INACTIVE:
qdio_establish_handle_irq(cdev, cstat, dstat);
break;
case QDIO_IRQ_STATE_CLEANUP:
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
break;
case QDIO_IRQ_STATE_ESTABLISHED:
case QDIO_IRQ_STATE_ACTIVE:
if (cstat & SCHN_STAT_PCI) {
qdio_int_handler_pci(irq_ptr);
return;
}
if (cstat || dstat)
qdio_handle_activate_check(cdev, intparm, cstat,
dstat);
break;
case QDIO_IRQ_STATE_STOPPED:
break;
default:
WARN_ON_ONCE(1);
}
wake_up(&cdev->private->wait_q);
}
/**
* qdio_get_ssqd_desc - get qdio subchannel description
* @cdev: ccw device to get description for
* @data: where to store the ssqd
*
* Returns 0 or an error code. The results of the chsc are stored in the
* specified structure.
*/
int qdio_get_ssqd_desc(struct ccw_device *cdev,
struct qdio_ssqd_desc *data)
{
if (!cdev || !cdev->private)
return -EINVAL;
DBF_EVENT("get ssqd:%4x", cdev->private->schid.sch_no);
return qdio_setup_get_ssqd(NULL, &cdev->private->schid, data);
}
EXPORT_SYMBOL_GPL(qdio_get_ssqd_desc);
static void qdio_shutdown_queues(struct ccw_device *cdev)
{
struct qdio_irq *irq_ptr = cdev->private->qdio_data;
struct qdio_q *q;
int i;
for_each_input_queue(irq_ptr, q, i)
tasklet_kill(&q->tasklet);
for_each_output_queue(irq_ptr, q, i) {
del_timer(&q->u.out.timer);
tasklet_kill(&q->tasklet);
}
}
/**
* qdio_shutdown - shut down a qdio subchannel
* @cdev: associated ccw device
* @how: use halt or clear to shutdown
*/
int qdio_shutdown(struct ccw_device *cdev, int how)
{
struct qdio_irq *irq_ptr = cdev->private->qdio_data;
int rc;
unsigned long flags;
if (!irq_ptr)
return -ENODEV;
WARN_ON_ONCE(irqs_disabled());
DBF_EVENT("qshutdown:%4x", cdev->private->schid.sch_no);
mutex_lock(&irq_ptr->setup_mutex);
/*
* Subchannel was already shot down. We cannot prevent being called
* twice since cio may trigger a shutdown asynchronously.
*/
if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) {
mutex_unlock(&irq_ptr->setup_mutex);
return 0;
}
/*
* Indicate that the device is going down. Scheduling the queue
* tasklets is forbidden from here on.
*/
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
tiqdio_remove_input_queues(irq_ptr);
qdio_shutdown_queues(cdev);
qdio_shutdown_debug_entries(irq_ptr, cdev);
/* cleanup subchannel */
spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
if (how & QDIO_FLAG_CLEANUP_USING_CLEAR)
rc = ccw_device_clear(cdev, QDIO_DOING_CLEANUP);
else
/* default behaviour is halt */
rc = ccw_device_halt(cdev, QDIO_DOING_CLEANUP);
if (rc) {
DBF_ERROR("%4x SHUTD ERR", irq_ptr->schid.sch_no);
DBF_ERROR("rc:%4d", rc);
goto no_cleanup;
}
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_CLEANUP);
spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
wait_event_interruptible_timeout(cdev->private->wait_q,
irq_ptr->state == QDIO_IRQ_STATE_INACTIVE ||
irq_ptr->state == QDIO_IRQ_STATE_ERR,
10 * HZ);
spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
no_cleanup:
qdio_shutdown_thinint(irq_ptr);
/* restore interrupt handler */
if ((void *)cdev->handler == (void *)qdio_int_handler)
cdev->handler = irq_ptr->orig_handler;
spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
mutex_unlock(&irq_ptr->setup_mutex);
if (rc)
return rc;
return 0;
}
EXPORT_SYMBOL_GPL(qdio_shutdown);
/**
* qdio_free - free data structures for a qdio subchannel
* @cdev: associated ccw device
*/
int qdio_free(struct ccw_device *cdev)
{
struct qdio_irq *irq_ptr = cdev->private->qdio_data;
if (!irq_ptr)
return -ENODEV;
DBF_EVENT("qfree:%4x", cdev->private->schid.sch_no);
mutex_lock(&irq_ptr->setup_mutex);
if (irq_ptr->debug_area != NULL) {
debug_unregister(irq_ptr->debug_area);
irq_ptr->debug_area = NULL;
}
cdev->private->qdio_data = NULL;
mutex_unlock(&irq_ptr->setup_mutex);
qdio_release_memory(irq_ptr);
return 0;
}
EXPORT_SYMBOL_GPL(qdio_free);
/**
* qdio_allocate - allocate qdio queues and associated data
* @init_data: initialization data
*/
int qdio_allocate(struct qdio_initialize *init_data)
{
struct qdio_irq *irq_ptr;
DBF_EVENT("qallocate:%4x", init_data->cdev->private->schid.sch_no);
if ((init_data->no_input_qs && !init_data->input_handler) ||
(init_data->no_output_qs && !init_data->output_handler))
return -EINVAL;
if ((init_data->no_input_qs > QDIO_MAX_QUEUES_PER_IRQ) ||
(init_data->no_output_qs > QDIO_MAX_QUEUES_PER_IRQ))
return -EINVAL;
if ((!init_data->input_sbal_addr_array) ||
(!init_data->output_sbal_addr_array))
return -EINVAL;
/* irq_ptr must be in GFP_DMA since it contains ccw1.cda */
irq_ptr = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!irq_ptr)
goto out_err;
mutex_init(&irq_ptr->setup_mutex);
qdio_allocate_dbf(init_data, irq_ptr);
/*
* Allocate a page for the chsc calls in qdio_establish.
* Must be pre-allocated since a zfcp recovery will call
* qdio_establish. In case of low memory and swap on a zfcp disk
* we may not be able to allocate memory otherwise.
*/
irq_ptr->chsc_page = get_zeroed_page(GFP_KERNEL);
if (!irq_ptr->chsc_page)
goto out_rel;
/* qdr is used in ccw1.cda which is u32 */
irq_ptr->qdr = (struct qdr *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!irq_ptr->qdr)
goto out_rel;
if (qdio_allocate_qs(irq_ptr, init_data->no_input_qs,
init_data->no_output_qs))
goto out_rel;
init_data->cdev->private->qdio_data = irq_ptr;
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
return 0;
out_rel:
qdio_release_memory(irq_ptr);
out_err:
return -ENOMEM;
}
EXPORT_SYMBOL_GPL(qdio_allocate);
static void qdio_detect_hsicq(struct qdio_irq *irq_ptr)
{
struct qdio_q *q = irq_ptr->input_qs[0];
int i, use_cq = 0;
if (irq_ptr->nr_input_qs > 1 && queue_type(q) == QDIO_IQDIO_QFMT)
use_cq = 1;
for_each_output_queue(irq_ptr, q, i) {
if (use_cq) {
if (qdio_enable_async_operation(&q->u.out) < 0) {
use_cq = 0;
continue;
}
} else
qdio_disable_async_operation(&q->u.out);
}
DBF_EVENT("use_cq:%d", use_cq);
}
/**
* qdio_establish - establish queues on a qdio subchannel
* @init_data: initialization data
*/
int qdio_establish(struct qdio_initialize *init_data)
{
struct qdio_irq *irq_ptr;
struct ccw_device *cdev = init_data->cdev;
unsigned long saveflags;
int rc;
DBF_EVENT("qestablish:%4x", cdev->private->schid.sch_no);
irq_ptr = cdev->private->qdio_data;
if (!irq_ptr)
return -ENODEV;
if (cdev->private->state != DEV_STATE_ONLINE)
return -EINVAL;
mutex_lock(&irq_ptr->setup_mutex);
qdio_setup_irq(init_data);
rc = qdio_establish_thinint(irq_ptr);
if (rc) {
mutex_unlock(&irq_ptr->setup_mutex);
qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
return rc;
}
/* establish q */
irq_ptr->ccw.cmd_code = irq_ptr->equeue.cmd;
irq_ptr->ccw.flags = CCW_FLAG_SLI;
irq_ptr->ccw.count = irq_ptr->equeue.count;
irq_ptr->ccw.cda = (u32)((addr_t)irq_ptr->qdr);
spin_lock_irqsave(get_ccwdev_lock(cdev), saveflags);
ccw_device_set_options_mask(cdev, 0);
rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ESTABLISH, 0, 0);
if (rc) {
DBF_ERROR("%4x est IO ERR", irq_ptr->schid.sch_no);
DBF_ERROR("rc:%4x", rc);
}
spin_unlock_irqrestore(get_ccwdev_lock(cdev), saveflags);
if (rc) {
mutex_unlock(&irq_ptr->setup_mutex);
qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
return rc;
}
wait_event_interruptible_timeout(cdev->private->wait_q,
irq_ptr->state == QDIO_IRQ_STATE_ESTABLISHED ||
irq_ptr->state == QDIO_IRQ_STATE_ERR, HZ);
if (irq_ptr->state != QDIO_IRQ_STATE_ESTABLISHED) {
mutex_unlock(&irq_ptr->setup_mutex);
qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
return -EIO;
}
qdio_setup_ssqd_info(irq_ptr);
qdio_detect_hsicq(irq_ptr);
/* qebsm is now setup if available, initialize buffer states */
qdio_init_buf_states(irq_ptr);
mutex_unlock(&irq_ptr->setup_mutex);
qdio_print_subchannel_info(irq_ptr, cdev);
qdio_setup_debug_entries(irq_ptr, cdev);
return 0;
}
EXPORT_SYMBOL_GPL(qdio_establish);
/**
* qdio_activate - activate queues on a qdio subchannel
* @cdev: associated cdev
*/
int qdio_activate(struct ccw_device *cdev)
{
struct qdio_irq *irq_ptr;
int rc;
unsigned long saveflags;
DBF_EVENT("qactivate:%4x", cdev->private->schid.sch_no);
irq_ptr = cdev->private->qdio_data;
if (!irq_ptr)
return -ENODEV;
if (cdev->private->state != DEV_STATE_ONLINE)
return -EINVAL;
mutex_lock(&irq_ptr->setup_mutex);
if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) {
rc = -EBUSY;
goto out;
}
irq_ptr->ccw.cmd_code = irq_ptr->aqueue.cmd;
irq_ptr->ccw.flags = CCW_FLAG_SLI;
irq_ptr->ccw.count = irq_ptr->aqueue.count;
irq_ptr->ccw.cda = 0;
spin_lock_irqsave(get_ccwdev_lock(cdev), saveflags);
ccw_device_set_options(cdev, CCWDEV_REPORT_ALL);
rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ACTIVATE,
0, DOIO_DENY_PREFETCH);
if (rc) {
DBF_ERROR("%4x act IO ERR", irq_ptr->schid.sch_no);
DBF_ERROR("rc:%4x", rc);
}
spin_unlock_irqrestore(get_ccwdev_lock(cdev), saveflags);
if (rc)
goto out;
if (is_thinint_irq(irq_ptr))
tiqdio_add_input_queues(irq_ptr);
/* wait for subchannel to become active */
msleep(5);
switch (irq_ptr->state) {
case QDIO_IRQ_STATE_STOPPED:
case QDIO_IRQ_STATE_ERR:
rc = -EIO;
break;
default:
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ACTIVE);
rc = 0;
}
out:
mutex_unlock(&irq_ptr->setup_mutex);
return rc;
}
EXPORT_SYMBOL_GPL(qdio_activate);
static inline int buf_in_between(int bufnr, int start, int count)
{
int end = add_buf(start, count);
if (end > start) {
if (bufnr >= start && bufnr < end)
return 1;
else
return 0;
}
/* wrap-around case */
if ((bufnr >= start && bufnr <= QDIO_MAX_BUFFERS_PER_Q) ||
(bufnr < end))
return 1;
else
return 0;
}
/**
* handle_inbound - reset processed input buffers
* @q: queue containing the buffers
* @callflags: flags
* @bufnr: first buffer to process
* @count: how many buffers are emptied
*/
static int handle_inbound(struct qdio_q *q, unsigned int callflags,
int bufnr, int count)
{
int used, diff;
qperf_inc(q, inbound_call);
if (!q->u.in.polling)
goto set;
/* protect against stop polling setting an ACK for an emptied slsb */
if (count == QDIO_MAX_BUFFERS_PER_Q) {
/* overwriting everything, just delete polling status */
q->u.in.polling = 0;
q->u.in.ack_count = 0;
goto set;
} else if (buf_in_between(q->u.in.ack_start, bufnr, count)) {
if (is_qebsm(q)) {
/* partial overwrite, just update ack_start */
diff = add_buf(bufnr, count);
diff = sub_buf(diff, q->u.in.ack_start);
q->u.in.ack_count -= diff;
if (q->u.in.ack_count <= 0) {
q->u.in.polling = 0;
q->u.in.ack_count = 0;
goto set;
}
q->u.in.ack_start = add_buf(q->u.in.ack_start, diff);
}
else
/* the only ACK will be deleted, so stop polling */
q->u.in.polling = 0;
}
set:
count = set_buf_states(q, bufnr, SLSB_CU_INPUT_EMPTY, count);
used = atomic_add_return(count, &q->nr_buf_used) - count;
if (need_siga_in(q))
return qdio_siga_input(q);
return 0;
}
/**
* handle_outbound - process filled outbound buffers
* @q: queue containing the buffers
* @callflags: flags
* @bufnr: first buffer to process
* @count: how many buffers are filled
*/
static int handle_outbound(struct qdio_q *q, unsigned int callflags,
int bufnr, int count)
{
unsigned char state = 0;
int used, rc = 0;
qperf_inc(q, outbound_call);
count = set_buf_states(q, bufnr, SLSB_CU_OUTPUT_PRIMED, count);
used = atomic_add_return(count, &q->nr_buf_used);
if (used == QDIO_MAX_BUFFERS_PER_Q)
qperf_inc(q, outbound_queue_full);
if (callflags & QDIO_FLAG_PCI_OUT) {
q->u.out.pci_out_enabled = 1;
qperf_inc(q, pci_request_int);
} else
q->u.out.pci_out_enabled = 0;
if (queue_type(q) == QDIO_IQDIO_QFMT) {
unsigned long phys_aob = 0;
/* One SIGA-W per buffer required for unicast HSI */
WARN_ON_ONCE(count > 1 && !multicast_outbound(q));
phys_aob = qdio_aob_for_buffer(&q->u.out, bufnr);
rc = qdio_kick_outbound_q(q, phys_aob);
} else if (need_siga_sync(q)) {
rc = qdio_siga_sync_q(q);
} else {
/* try to fast requeue buffers */
get_buf_state(q, prev_buf(bufnr), &state, 0);
if (state != SLSB_CU_OUTPUT_PRIMED)
rc = qdio_kick_outbound_q(q, 0);
else
qperf_inc(q, fast_requeue);
}
/* in case of SIGA errors we must process the error immediately */
if (used >= q->u.out.scan_threshold || rc)
tasklet_schedule(&q->tasklet);
else
/* free the SBALs in case of no further traffic */
if (!timer_pending(&q->u.out.timer))
mod_timer(&q->u.out.timer, jiffies + HZ);
return rc;
}
/**
* do_QDIO - process input or output buffers
* @cdev: associated ccw_device for the qdio subchannel
* @callflags: input or output and special flags from the program
* @q_nr: queue number
* @bufnr: buffer number
* @count: how many buffers to process
*/
int do_QDIO(struct ccw_device *cdev, unsigned int callflags,
int q_nr, unsigned int bufnr, unsigned int count)
{
struct qdio_irq *irq_ptr;
if (bufnr >= QDIO_MAX_BUFFERS_PER_Q || count > QDIO_MAX_BUFFERS_PER_Q)
return -EINVAL;
irq_ptr = cdev->private->qdio_data;
if (!irq_ptr)
return -ENODEV;
DBF_DEV_EVENT(DBF_INFO, irq_ptr,
"do%02x b:%02x c:%02x", callflags, bufnr, count);
if (irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)
return -EIO;
if (!count)
return 0;
if (callflags & QDIO_FLAG_SYNC_INPUT)
return handle_inbound(irq_ptr->input_qs[q_nr],
callflags, bufnr, count);
else if (callflags & QDIO_FLAG_SYNC_OUTPUT)
return handle_outbound(irq_ptr->output_qs[q_nr],
callflags, bufnr, count);
return -EINVAL;
}
EXPORT_SYMBOL_GPL(do_QDIO);
/**
* qdio_start_irq - process input buffers
* @cdev: associated ccw_device for the qdio subchannel
* @nr: input queue number
*
* Return codes
* 0 - success
* 1 - irqs not started since new data is available
*/
int qdio_start_irq(struct ccw_device *cdev, int nr)
{
struct qdio_q *q;
struct qdio_irq *irq_ptr = cdev->private->qdio_data;
if (!irq_ptr)
return -ENODEV;
q = irq_ptr->input_qs[nr];
clear_nonshared_ind(irq_ptr);
qdio_stop_polling(q);
clear_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state);
/*
* We need to check again to not lose initiative after
* resetting the ACK state.
*/
if (test_nonshared_ind(irq_ptr))
goto rescan;
if (!qdio_inbound_q_done(q))
goto rescan;
return 0;
rescan:
if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
&q->u.in.queue_irq_state))
return 0;
else
return 1;
}
EXPORT_SYMBOL(qdio_start_irq);
/**
* qdio_get_next_buffers - process input buffers
* @cdev: associated ccw_device for the qdio subchannel
* @nr: input queue number
* @bufnr: first filled buffer number
* @error: buffers are in error state
*
* Return codes
* < 0 - error
* = 0 - no new buffers found
* > 0 - number of processed buffers
*/
int qdio_get_next_buffers(struct ccw_device *cdev, int nr, int *bufnr,
int *error)
{
struct qdio_q *q;
int start, end;
struct qdio_irq *irq_ptr = cdev->private->qdio_data;
if (!irq_ptr)
return -ENODEV;
q = irq_ptr->input_qs[nr];
/*
* Cannot rely on automatic sync after interrupt since queues may
* also be examined without interrupt.
*/
if (need_siga_sync(q))
qdio_sync_queues(q);
/* check the PCI capable outbound queues. */
qdio_check_outbound_after_thinint(q);
if (!qdio_inbound_q_moved(q))
return 0;
/* Note: upper-layer MUST stop processing immediately here ... */
if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
return -EIO;
start = q->first_to_kick;
end = q->first_to_check;
*bufnr = start;
*error = q->qdio_error;
/* for the next time */
q->first_to_kick = end;
q->qdio_error = 0;
return sub_buf(end, start);
}
EXPORT_SYMBOL(qdio_get_next_buffers);
/**
* qdio_stop_irq - disable interrupt processing for the device
* @cdev: associated ccw_device for the qdio subchannel
* @nr: input queue number
*
* Return codes
* 0 - interrupts were already disabled
* 1 - interrupts successfully disabled
*/
int qdio_stop_irq(struct ccw_device *cdev, int nr)
{
struct qdio_q *q;
struct qdio_irq *irq_ptr = cdev->private->qdio_data;
if (!irq_ptr)
return -ENODEV;
q = irq_ptr->input_qs[nr];
if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
&q->u.in.queue_irq_state))
return 0;
else
return 1;
}
EXPORT_SYMBOL(qdio_stop_irq);
static int __init init_QDIO(void)
{
int rc;
rc = qdio_debug_init();
if (rc)
return rc;
rc = qdio_setup_init();
if (rc)
goto out_debug;
rc = tiqdio_allocate_memory();
if (rc)
goto out_cache;
rc = tiqdio_register_thinints();
if (rc)
goto out_ti;
return 0;
out_ti:
tiqdio_free_memory();
out_cache:
qdio_setup_exit();
out_debug:
qdio_debug_exit();
return rc;
}
static void __exit exit_QDIO(void)
{
tiqdio_unregister_thinints();
tiqdio_free_memory();
qdio_setup_exit();
qdio_debug_exit();
}
module_init(init_QDIO);
module_exit(exit_QDIO);
| gpl-2.0 |
zuoyanyouwu/Telegram | TMessagesProj/jni/boringssl/crypto/x509/x509spki.c | 104 | 4710 | /* Written by Dr Stephen N Henson (steve@openssl.org) for the OpenSSL
* project 1999. */
/* ====================================================================
* Copyright (c) 1999 The OpenSSL Project. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* 3. All advertising materials mentioning features or use of this
* software must display the following acknowledgment:
* "This product includes software developed by the OpenSSL Project
* for use in the OpenSSL Toolkit. (http://www.OpenSSL.org/)"
*
* 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
* endorse or promote products derived from this software without
* prior written permission. For written permission, please contact
* licensing@OpenSSL.org.
*
* 5. Products derived from this software may not be called "OpenSSL"
* nor may "OpenSSL" appear in their names without prior written
* permission of the OpenSSL Project.
*
* 6. Redistributions of any form whatsoever must retain the following
* acknowledgment:
* "This product includes software developed by the OpenSSL Project
* for use in the OpenSSL Toolkit (http://www.OpenSSL.org/)"
*
* THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
* EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR
* ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
* OF THE POSSIBILITY OF SUCH DAMAGE.
* ====================================================================
*
* This product includes cryptographic software written by Eric Young
* (eay@cryptsoft.com). This product includes software written by Tim
* Hudson (tjh@cryptsoft.com). */
#include <string.h>
#include <openssl/base64.h>
#include <openssl/err.h>
#include <openssl/mem.h>
#include <openssl/x509.h>
int NETSCAPE_SPKI_set_pubkey(NETSCAPE_SPKI *x, EVP_PKEY *pkey)
{
if ((x == NULL) || (x->spkac == NULL)) return(0);
return(X509_PUBKEY_set(&(x->spkac->pubkey),pkey));
}
EVP_PKEY *NETSCAPE_SPKI_get_pubkey(NETSCAPE_SPKI *x)
{
if ((x == NULL) || (x->spkac == NULL))
return(NULL);
return(X509_PUBKEY_get(x->spkac->pubkey));
}
/* Load a Netscape SPKI from a base64 encoded string */
NETSCAPE_SPKI * NETSCAPE_SPKI_b64_decode(const char *str, int len)
{
unsigned char *spki_der;
const unsigned char *p;
size_t spki_len;
NETSCAPE_SPKI *spki;
if (len <= 0)
len = strlen(str);
if (!EVP_DecodedLength(&spki_len, len)) {
OPENSSL_PUT_ERROR(X509, X509_R_BASE64_DECODE_ERROR);
return NULL;
}
if (!(spki_der = OPENSSL_malloc(spki_len))) {
OPENSSL_PUT_ERROR(X509, ERR_R_MALLOC_FAILURE);
return NULL;
}
if (!EVP_DecodeBase64(spki_der, &spki_len, spki_len, (const uint8_t *)str, len)) {
OPENSSL_PUT_ERROR(X509, X509_R_BASE64_DECODE_ERROR);
OPENSSL_free(spki_der);
return NULL;
}
p = spki_der;
spki = d2i_NETSCAPE_SPKI(NULL, &p, spki_len);
OPENSSL_free(spki_der);
return spki;
}
/* Generate a base64 encoded string from an SPKI */
char * NETSCAPE_SPKI_b64_encode(NETSCAPE_SPKI *spki)
{
unsigned char *der_spki, *p;
char *b64_str;
size_t b64_len;
int der_len;
der_len = i2d_NETSCAPE_SPKI(spki, NULL);
if (!EVP_EncodedLength(&b64_len, der_len))
{
OPENSSL_PUT_ERROR(X509, ERR_R_OVERFLOW);
return NULL;
}
der_spki = OPENSSL_malloc(der_len);
if (der_spki == NULL) {
OPENSSL_PUT_ERROR(X509, ERR_R_MALLOC_FAILURE);
return NULL;
}
b64_str = OPENSSL_malloc(b64_len);
if (b64_str == NULL) {
OPENSSL_free(der_spki);
OPENSSL_PUT_ERROR(X509, ERR_R_MALLOC_FAILURE);
return NULL;
}
p = der_spki;
i2d_NETSCAPE_SPKI(spki, &p);
EVP_EncodeBlock((unsigned char *)b64_str, der_spki, der_len);
OPENSSL_free(der_spki);
return b64_str;
}
| gpl-2.0 |
oliliango/linux-cedarview_gfx | drivers/staging/xgifb/vb_init.c | 104 | 40553 | #include <linux/delay.h>
#include <linux/vmalloc.h>
#include "XGIfb.h"
#include "vb_def.h"
#include "vb_util.h"
#include "vb_setmode.h"
#include "vb_init.h"
static const unsigned short XGINew_DDRDRAM_TYPE340[4][2] = {
{ 16, 0x45},
{ 8, 0x35},
{ 4, 0x31},
{ 2, 0x21} };
static const unsigned short XGINew_DDRDRAM_TYPE20[12][2] = {
{ 128, 0x5D},
{ 64, 0x59},
{ 64, 0x4D},
{ 32, 0x55},
{ 32, 0x49},
{ 32, 0x3D},
{ 16, 0x51},
{ 16, 0x45},
{ 16, 0x39},
{ 8, 0x41},
{ 8, 0x35},
{ 4, 0x31} };
#define XGIFB_ROM_SIZE 65536
static unsigned char
XGINew_GetXG20DRAMType(struct xgi_hw_device_info *HwDeviceExtension,
struct vb_device_info *pVBInfo)
{
unsigned char data, temp;
if (HwDeviceExtension->jChipType < XG20) {
data = xgifb_reg_get(pVBInfo->P3c4, 0x39) & 0x02;
if (data == 0)
data = (xgifb_reg_get(pVBInfo->P3c4, 0x3A) &
0x02) >> 1;
return data;
} else if (HwDeviceExtension->jChipType == XG27) {
temp = xgifb_reg_get(pVBInfo->P3c4, 0x3B);
/* SR3B[7][3]MAA15 MAA11 (Power on Trapping) */
if (((temp & 0x88) == 0x80) || ((temp & 0x88) == 0x08))
data = 0; /* DDR */
else
data = 1; /* DDRII */
return data;
} else if (HwDeviceExtension->jChipType == XG21) {
/* Independent GPIO control */
xgifb_reg_and(pVBInfo->P3d4, 0xB4, ~0x02);
udelay(800);
xgifb_reg_or(pVBInfo->P3d4, 0x4A, 0x80); /* Enable GPIOH read */
/* GPIOF 0:DVI 1:DVO */
temp = xgifb_reg_get(pVBInfo->P3d4, 0x48);
/* HOTPLUG_SUPPORT */
/* for current XG20 & XG21, GPIOH is floating, driver will
* fix DDR temporarily */
if (temp & 0x01) /* DVI read GPIOH */
data = 1; /* DDRII */
else
data = 0; /* DDR */
/* ~HOTPLUG_SUPPORT */
xgifb_reg_or(pVBInfo->P3d4, 0xB4, 0x02);
return data;
} else {
data = xgifb_reg_get(pVBInfo->P3d4, 0x97) & 0x01;
if (data == 1)
data++;
return data;
}
}
static void XGINew_DDR1x_MRS_340(unsigned long P3c4,
struct vb_device_info *pVBInfo)
{
xgifb_reg_set(P3c4, 0x18, 0x01);
xgifb_reg_set(P3c4, 0x19, 0x20);
xgifb_reg_set(P3c4, 0x16, 0x00);
xgifb_reg_set(P3c4, 0x16, 0x80);
mdelay(3);
xgifb_reg_set(P3c4, 0x18, 0x00);
xgifb_reg_set(P3c4, 0x19, 0x20);
xgifb_reg_set(P3c4, 0x16, 0x00);
xgifb_reg_set(P3c4, 0x16, 0x80);
udelay(60);
xgifb_reg_set(P3c4,
0x18,
pVBInfo->SR15[2][pVBInfo->ram_type]); /* SR18 */
xgifb_reg_set(P3c4, 0x19, 0x01);
xgifb_reg_set(P3c4, 0x16, 0x03);
xgifb_reg_set(P3c4, 0x16, 0x83);
mdelay(1);
xgifb_reg_set(P3c4, 0x1B, 0x03);
udelay(500);
xgifb_reg_set(P3c4,
0x18,
pVBInfo->SR15[2][pVBInfo->ram_type]); /* SR18 */
xgifb_reg_set(P3c4, 0x19, 0x00);
xgifb_reg_set(P3c4, 0x16, 0x03);
xgifb_reg_set(P3c4, 0x16, 0x83);
xgifb_reg_set(P3c4, 0x1B, 0x00);
}
static void XGINew_SetMemoryClock(struct xgi_hw_device_info *HwDeviceExtension,
struct vb_device_info *pVBInfo)
{
xgifb_reg_set(pVBInfo->P3c4,
0x28,
pVBInfo->MCLKData[pVBInfo->ram_type].SR28);
xgifb_reg_set(pVBInfo->P3c4,
0x29,
pVBInfo->MCLKData[pVBInfo->ram_type].SR29);
xgifb_reg_set(pVBInfo->P3c4,
0x2A,
pVBInfo->MCLKData[pVBInfo->ram_type].SR2A);
xgifb_reg_set(pVBInfo->P3c4,
0x2E,
XGI340_ECLKData[pVBInfo->ram_type].SR2E);
xgifb_reg_set(pVBInfo->P3c4,
0x2F,
XGI340_ECLKData[pVBInfo->ram_type].SR2F);
xgifb_reg_set(pVBInfo->P3c4,
0x30,
XGI340_ECLKData[pVBInfo->ram_type].SR30);
/* When XG42 ECLK = MCLK = 207MHz, Set SR32 D[1:0] = 10b */
/* Modify SR32 value, when MCLK=207MHZ, ELCK=250MHz,
* Set SR32 D[1:0] = 10b */
if (HwDeviceExtension->jChipType == XG42) {
if ((pVBInfo->MCLKData[pVBInfo->ram_type].SR28 == 0x1C) &&
(pVBInfo->MCLKData[pVBInfo->ram_type].SR29 == 0x01) &&
(((XGI340_ECLKData[pVBInfo->ram_type].SR2E == 0x1C) &&
(XGI340_ECLKData[pVBInfo->ram_type].SR2F == 0x01)) ||
((XGI340_ECLKData[pVBInfo->ram_type].SR2E == 0x22) &&
(XGI340_ECLKData[pVBInfo->ram_type].SR2F == 0x01))))
xgifb_reg_set(pVBInfo->P3c4,
0x32,
((unsigned char) xgifb_reg_get(
pVBInfo->P3c4, 0x32) & 0xFC) | 0x02);
}
}
static void XGINew_DDRII_Bootup_XG27(
struct xgi_hw_device_info *HwDeviceExtension,
unsigned long P3c4, struct vb_device_info *pVBInfo)
{
unsigned long P3d4 = P3c4 + 0x10;
pVBInfo->ram_type = XGINew_GetXG20DRAMType(HwDeviceExtension, pVBInfo);
XGINew_SetMemoryClock(HwDeviceExtension, pVBInfo);
/* Set Double Frequency */
xgifb_reg_set(P3d4, 0x97, pVBInfo->XGINew_CR97); /* CR97 */
udelay(200);
xgifb_reg_set(P3c4, 0x18, 0x00); /* Set SR18 */ /* EMRS2 */
xgifb_reg_set(P3c4, 0x19, 0x80); /* Set SR19 */
xgifb_reg_set(P3c4, 0x16, 0x20); /* Set SR16 */
udelay(15);
xgifb_reg_set(P3c4, 0x16, 0xA0); /* Set SR16 */
udelay(15);
xgifb_reg_set(P3c4, 0x18, 0x00); /* Set SR18 */ /* EMRS3 */
xgifb_reg_set(P3c4, 0x19, 0xC0); /* Set SR19 */
xgifb_reg_set(P3c4, 0x16, 0x20); /* Set SR16 */
udelay(15);
xgifb_reg_set(P3c4, 0x16, 0xA0); /* Set SR16 */
udelay(15);
xgifb_reg_set(P3c4, 0x18, 0x00); /* Set SR18 */ /* EMRS1 */
xgifb_reg_set(P3c4, 0x19, 0x40); /* Set SR19 */
xgifb_reg_set(P3c4, 0x16, 0x20); /* Set SR16 */
udelay(30);
xgifb_reg_set(P3c4, 0x16, 0xA0); /* Set SR16 */
udelay(15);
xgifb_reg_set(P3c4, 0x18, 0x42); /* Set SR18 */ /* MRS, DLL Enable */
xgifb_reg_set(P3c4, 0x19, 0x0A); /* Set SR19 */
xgifb_reg_set(P3c4, 0x16, 0x00); /* Set SR16 */
udelay(30);
xgifb_reg_set(P3c4, 0x16, 0x00); /* Set SR16 */
xgifb_reg_set(P3c4, 0x16, 0x80); /* Set SR16 */
xgifb_reg_set(P3c4, 0x1B, 0x04); /* Set SR1B */
udelay(60);
xgifb_reg_set(P3c4, 0x1B, 0x00); /* Set SR1B */
xgifb_reg_set(P3c4, 0x18, 0x42); /* Set SR18 */ /* MRS, DLL Reset */
xgifb_reg_set(P3c4, 0x19, 0x08); /* Set SR19 */
xgifb_reg_set(P3c4, 0x16, 0x00); /* Set SR16 */
udelay(30);
xgifb_reg_set(P3c4, 0x16, 0x83); /* Set SR16 */
udelay(15);
xgifb_reg_set(P3c4, 0x18, 0x80); /* Set SR18 */ /* MRS, ODT */
xgifb_reg_set(P3c4, 0x19, 0x46); /* Set SR19 */
xgifb_reg_set(P3c4, 0x16, 0x20); /* Set SR16 */
udelay(30);
xgifb_reg_set(P3c4, 0x16, 0xA0); /* Set SR16 */
udelay(15);
xgifb_reg_set(P3c4, 0x18, 0x00); /* Set SR18 */ /* EMRS */
xgifb_reg_set(P3c4, 0x19, 0x40); /* Set SR19 */
xgifb_reg_set(P3c4, 0x16, 0x20); /* Set SR16 */
udelay(30);
xgifb_reg_set(P3c4, 0x16, 0xA0); /* Set SR16 */
udelay(15);
/* Set SR1B refresh control 000:close; 010:open */
xgifb_reg_set(P3c4, 0x1B, 0x04);
udelay(200);
}
static void XGINew_DDR2_MRS_XG20(struct xgi_hw_device_info *HwDeviceExtension,
unsigned long P3c4, struct vb_device_info *pVBInfo)
{
unsigned long P3d4 = P3c4 + 0x10;
pVBInfo->ram_type = XGINew_GetXG20DRAMType(HwDeviceExtension, pVBInfo);
XGINew_SetMemoryClock(HwDeviceExtension, pVBInfo);
xgifb_reg_set(P3d4, 0x97, 0x11); /* CR97 */
udelay(200);
xgifb_reg_set(P3c4, 0x18, 0x00); /* EMRS2 */
xgifb_reg_set(P3c4, 0x19, 0x80);
xgifb_reg_set(P3c4, 0x16, 0x05);
xgifb_reg_set(P3c4, 0x16, 0x85);
xgifb_reg_set(P3c4, 0x18, 0x00); /* EMRS3 */
xgifb_reg_set(P3c4, 0x19, 0xC0);
xgifb_reg_set(P3c4, 0x16, 0x05);
xgifb_reg_set(P3c4, 0x16, 0x85);
xgifb_reg_set(P3c4, 0x18, 0x00); /* EMRS1 */
xgifb_reg_set(P3c4, 0x19, 0x40);
xgifb_reg_set(P3c4, 0x16, 0x05);
xgifb_reg_set(P3c4, 0x16, 0x85);
xgifb_reg_set(P3c4, 0x18, 0x42); /* MRS1 */
xgifb_reg_set(P3c4, 0x19, 0x02);
xgifb_reg_set(P3c4, 0x16, 0x05);
xgifb_reg_set(P3c4, 0x16, 0x85);
udelay(15);
xgifb_reg_set(P3c4, 0x1B, 0x04); /* SR1B */
udelay(30);
xgifb_reg_set(P3c4, 0x1B, 0x00); /* SR1B */
udelay(100);
xgifb_reg_set(P3c4, 0x18, 0x42); /* MRS1 */
xgifb_reg_set(P3c4, 0x19, 0x00);
xgifb_reg_set(P3c4, 0x16, 0x05);
xgifb_reg_set(P3c4, 0x16, 0x85);
udelay(200);
}
static void XGINew_DDR1x_MRS_XG20(unsigned long P3c4,
struct vb_device_info *pVBInfo)
{
xgifb_reg_set(P3c4, 0x18, 0x01);
xgifb_reg_set(P3c4, 0x19, 0x40);
xgifb_reg_set(P3c4, 0x16, 0x00);
xgifb_reg_set(P3c4, 0x16, 0x80);
udelay(60);
xgifb_reg_set(P3c4, 0x18, 0x00);
xgifb_reg_set(P3c4, 0x19, 0x40);
xgifb_reg_set(P3c4, 0x16, 0x00);
xgifb_reg_set(P3c4, 0x16, 0x80);
udelay(60);
xgifb_reg_set(P3c4,
0x18,
pVBInfo->SR15[2][pVBInfo->ram_type]); /* SR18 */
xgifb_reg_set(P3c4, 0x19, 0x01);
xgifb_reg_set(P3c4, 0x16, 0x03);
xgifb_reg_set(P3c4, 0x16, 0x83);
mdelay(1);
xgifb_reg_set(P3c4, 0x1B, 0x03);
udelay(500);
xgifb_reg_set(P3c4,
0x18,
pVBInfo->SR15[2][pVBInfo->ram_type]); /* SR18 */
xgifb_reg_set(P3c4, 0x19, 0x00);
xgifb_reg_set(P3c4, 0x16, 0x03);
xgifb_reg_set(P3c4, 0x16, 0x83);
xgifb_reg_set(P3c4, 0x1B, 0x00);
}
static void XGINew_DDR1x_DefaultRegister(
struct xgi_hw_device_info *HwDeviceExtension,
unsigned long Port, struct vb_device_info *pVBInfo)
{
unsigned long P3d4 = Port, P3c4 = Port - 0x10;
if (HwDeviceExtension->jChipType >= XG20) {
XGINew_SetMemoryClock(HwDeviceExtension, pVBInfo);
xgifb_reg_set(P3d4,
0x82,
pVBInfo->CR40[11][pVBInfo->ram_type]); /* CR82 */
xgifb_reg_set(P3d4,
0x85,
pVBInfo->CR40[12][pVBInfo->ram_type]); /* CR85 */
xgifb_reg_set(P3d4,
0x86,
pVBInfo->CR40[13][pVBInfo->ram_type]); /* CR86 */
xgifb_reg_set(P3d4, 0x98, 0x01);
xgifb_reg_set(P3d4, 0x9A, 0x02);
XGINew_DDR1x_MRS_XG20(P3c4, pVBInfo);
} else {
XGINew_SetMemoryClock(HwDeviceExtension, pVBInfo);
switch (HwDeviceExtension->jChipType) {
case XG42:
/* CR82 */
xgifb_reg_set(P3d4,
0x82,
pVBInfo->CR40[11][pVBInfo->ram_type]);
/* CR85 */
xgifb_reg_set(P3d4,
0x85,
pVBInfo->CR40[12][pVBInfo->ram_type]);
/* CR86 */
xgifb_reg_set(P3d4,
0x86,
pVBInfo->CR40[13][pVBInfo->ram_type]);
break;
default:
xgifb_reg_set(P3d4, 0x82, 0x88);
xgifb_reg_set(P3d4, 0x86, 0x00);
/* Insert read command for delay */
xgifb_reg_get(P3d4, 0x86);
xgifb_reg_set(P3d4, 0x86, 0x88);
xgifb_reg_get(P3d4, 0x86);
xgifb_reg_set(P3d4,
0x86,
pVBInfo->CR40[13][pVBInfo->ram_type]);
xgifb_reg_set(P3d4, 0x82, 0x77);
xgifb_reg_set(P3d4, 0x85, 0x00);
/* Insert read command for delay */
xgifb_reg_get(P3d4, 0x85);
xgifb_reg_set(P3d4, 0x85, 0x88);
/* Insert read command for delay */
xgifb_reg_get(P3d4, 0x85);
/* CR85 */
xgifb_reg_set(P3d4,
0x85,
pVBInfo->CR40[12][pVBInfo->ram_type]);
/* CR82 */
xgifb_reg_set(P3d4,
0x82,
pVBInfo->CR40[11][pVBInfo->ram_type]);
break;
}
xgifb_reg_set(P3d4, 0x97, 0x00);
xgifb_reg_set(P3d4, 0x98, 0x01);
xgifb_reg_set(P3d4, 0x9A, 0x02);
XGINew_DDR1x_MRS_340(P3c4, pVBInfo);
}
}
static void XGINew_DDR2_DefaultRegister(
struct xgi_hw_device_info *HwDeviceExtension,
unsigned long Port, struct vb_device_info *pVBInfo)
{
unsigned long P3d4 = Port, P3c4 = Port - 0x10;
/* keep following setting sequence, each setting in
* the same reg insert idle */
xgifb_reg_set(P3d4, 0x82, 0x77);
xgifb_reg_set(P3d4, 0x86, 0x00);
xgifb_reg_get(P3d4, 0x86); /* Insert read command for delay */
xgifb_reg_set(P3d4, 0x86, 0x88);
xgifb_reg_get(P3d4, 0x86); /* Insert read command for delay */
/* CR86 */
xgifb_reg_set(P3d4, 0x86, pVBInfo->CR40[13][pVBInfo->ram_type]);
xgifb_reg_set(P3d4, 0x82, 0x77);
xgifb_reg_set(P3d4, 0x85, 0x00);
xgifb_reg_get(P3d4, 0x85); /* Insert read command for delay */
xgifb_reg_set(P3d4, 0x85, 0x88);
xgifb_reg_get(P3d4, 0x85); /* Insert read command for delay */
xgifb_reg_set(P3d4,
0x85,
pVBInfo->CR40[12][pVBInfo->ram_type]); /* CR85 */
if (HwDeviceExtension->jChipType == XG27)
/* CR82 */
xgifb_reg_set(P3d4, 0x82, pVBInfo->CR40[11][pVBInfo->ram_type]);
else
xgifb_reg_set(P3d4, 0x82, 0xA8); /* CR82 */
xgifb_reg_set(P3d4, 0x98, 0x01);
xgifb_reg_set(P3d4, 0x9A, 0x02);
if (HwDeviceExtension->jChipType == XG27)
XGINew_DDRII_Bootup_XG27(HwDeviceExtension, P3c4, pVBInfo);
else
XGINew_DDR2_MRS_XG20(HwDeviceExtension, P3c4, pVBInfo);
}
static void XGINew_SetDRAMDefaultRegister340(
struct xgi_hw_device_info *HwDeviceExtension,
unsigned long Port, struct vb_device_info *pVBInfo)
{
unsigned char temp, temp1, temp2, temp3, i, j, k;
unsigned long P3d4 = Port, P3c4 = Port - 0x10;
xgifb_reg_set(P3d4, 0x6D, pVBInfo->CR40[8][pVBInfo->ram_type]);
xgifb_reg_set(P3d4, 0x68, pVBInfo->CR40[5][pVBInfo->ram_type]);
xgifb_reg_set(P3d4, 0x69, pVBInfo->CR40[6][pVBInfo->ram_type]);
xgifb_reg_set(P3d4, 0x6A, pVBInfo->CR40[7][pVBInfo->ram_type]);
temp2 = 0;
for (i = 0; i < 4; i++) {
/* CR6B DQS fine tune delay */
temp = XGI340_CR6B[pVBInfo->ram_type][i];
for (j = 0; j < 4; j++) {
temp1 = ((temp >> (2 * j)) & 0x03) << 2;
temp2 |= temp1;
xgifb_reg_set(P3d4, 0x6B, temp2);
/* Insert read command for delay */
xgifb_reg_get(P3d4, 0x6B);
temp2 &= 0xF0;
temp2 += 0x10;
}
}
temp2 = 0;
for (i = 0; i < 4; i++) {
/* CR6E DQM fine tune delay */
temp = 0;
for (j = 0; j < 4; j++) {
temp1 = ((temp >> (2 * j)) & 0x03) << 2;
temp2 |= temp1;
xgifb_reg_set(P3d4, 0x6E, temp2);
/* Insert read command for delay */
xgifb_reg_get(P3d4, 0x6E);
temp2 &= 0xF0;
temp2 += 0x10;
}
}
temp3 = 0;
for (k = 0; k < 4; k++) {
/* CR6E_D[1:0] select channel */
xgifb_reg_and_or(P3d4, 0x6E, 0xFC, temp3);
temp2 = 0;
for (i = 0; i < 8; i++) {
/* CR6F DQ fine tune delay */
temp = 0;
for (j = 0; j < 4; j++) {
temp1 = (temp >> (2 * j)) & 0x03;
temp2 |= temp1;
xgifb_reg_set(P3d4, 0x6F, temp2);
/* Insert read command for delay */
xgifb_reg_get(P3d4, 0x6F);
temp2 &= 0xF8;
temp2 += 0x08;
}
}
temp3 += 0x01;
}
xgifb_reg_set(P3d4,
0x80,
pVBInfo->CR40[9][pVBInfo->ram_type]); /* CR80 */
xgifb_reg_set(P3d4,
0x81,
pVBInfo->CR40[10][pVBInfo->ram_type]); /* CR81 */
temp2 = 0x80;
/* CR89 terminator type select */
temp = 0;
for (j = 0; j < 4; j++) {
temp1 = (temp >> (2 * j)) & 0x03;
temp2 |= temp1;
xgifb_reg_set(P3d4, 0x89, temp2);
xgifb_reg_get(P3d4, 0x89); /* Insert read command for delay */
temp2 &= 0xF0;
temp2 += 0x10;
}
temp = 0;
temp1 = temp & 0x03;
temp2 |= temp1;
xgifb_reg_set(P3d4, 0x89, temp2);
temp = pVBInfo->CR40[3][pVBInfo->ram_type];
temp1 = temp & 0x0F;
temp2 = (temp >> 4) & 0x07;
temp3 = temp & 0x80;
xgifb_reg_set(P3d4, 0x45, temp1); /* CR45 */
xgifb_reg_set(P3d4, 0x99, temp2); /* CR99 */
xgifb_reg_or(P3d4, 0x40, temp3); /* CR40_D[7] */
xgifb_reg_set(P3d4,
0x41,
pVBInfo->CR40[0][pVBInfo->ram_type]); /* CR41 */
if (HwDeviceExtension->jChipType == XG27)
xgifb_reg_set(P3d4, 0x8F, XG27_CR8F); /* CR8F */
for (j = 0; j <= 6; j++) /* CR90 - CR96 */
xgifb_reg_set(P3d4, (0x90 + j),
pVBInfo->CR40[14 + j][pVBInfo->ram_type]);
for (j = 0; j <= 2; j++) /* CRC3 - CRC5 */
xgifb_reg_set(P3d4, (0xC3 + j),
pVBInfo->CR40[21 + j][pVBInfo->ram_type]);
for (j = 0; j < 2; j++) /* CR8A - CR8B */
xgifb_reg_set(P3d4, (0x8A + j),
pVBInfo->CR40[1 + j][pVBInfo->ram_type]);
if (HwDeviceExtension->jChipType == XG42)
xgifb_reg_set(P3d4, 0x8C, 0x87);
xgifb_reg_set(P3d4,
0x59,
pVBInfo->CR40[4][pVBInfo->ram_type]); /* CR59 */
xgifb_reg_set(P3d4, 0x83, 0x09); /* CR83 */
xgifb_reg_set(P3d4, 0x87, 0x00); /* CR87 */
xgifb_reg_set(P3d4, 0xCF, XG40_CRCF); /* CRCF */
if (pVBInfo->ram_type) {
xgifb_reg_set(P3c4, 0x17, 0x80); /* SR17 DDRII */
if (HwDeviceExtension->jChipType == XG27)
xgifb_reg_set(P3c4, 0x17, 0x02); /* SR17 DDRII */
} else {
xgifb_reg_set(P3c4, 0x17, 0x00); /* SR17 DDR */
}
xgifb_reg_set(P3c4, 0x1A, 0x87); /* SR1A */
temp = XGINew_GetXG20DRAMType(HwDeviceExtension, pVBInfo);
if (temp == 0) {
XGINew_DDR1x_DefaultRegister(HwDeviceExtension, P3d4, pVBInfo);
} else {
xgifb_reg_set(P3d4, 0xB0, 0x80); /* DDRII Dual frequency mode */
XGINew_DDR2_DefaultRegister(HwDeviceExtension, P3d4, pVBInfo);
}
xgifb_reg_set(P3c4,
0x1B,
pVBInfo->SR15[3][pVBInfo->ram_type]); /* SR1B */
}
static unsigned short XGINew_SetDRAMSize20Reg(
unsigned short dram_size,
struct vb_device_info *pVBInfo)
{
unsigned short data = 0, memsize = 0;
int RankSize;
unsigned char ChannelNo;
RankSize = dram_size * pVBInfo->ram_bus / 8;
data = xgifb_reg_get(pVBInfo->P3c4, 0x13);
data &= 0x80;
if (data == 0x80)
RankSize *= 2;
data = 0;
if (pVBInfo->ram_channel == 3)
ChannelNo = 4;
else
ChannelNo = pVBInfo->ram_channel;
if (ChannelNo * RankSize <= 256) {
while ((RankSize >>= 1) > 0)
data += 0x10;
memsize = data >> 4;
/* Fix DRAM Sizing Error */
xgifb_reg_set(pVBInfo->P3c4,
0x14,
(xgifb_reg_get(pVBInfo->P3c4, 0x14) & 0x0F) |
(data & 0xF0));
udelay(15);
}
return memsize;
}
static int XGINew_ReadWriteRest(unsigned short StopAddr,
unsigned short StartAddr, struct vb_device_info *pVBInfo)
{
int i;
unsigned long Position = 0;
void __iomem *fbaddr = pVBInfo->FBAddr;
writel(Position, fbaddr + Position);
for (i = StartAddr; i <= StopAddr; i++) {
Position = 1 << i;
writel(Position, fbaddr + Position);
}
udelay(500); /* Fix #1759 Memory Size error in Multi-Adapter. */
Position = 0;
if (readl(fbaddr + Position) != Position)
return 0;
for (i = StartAddr; i <= StopAddr; i++) {
Position = 1 << i;
if (readl(fbaddr + Position) != Position)
return 0;
}
return 1;
}
static unsigned char XGINew_CheckFrequence(struct vb_device_info *pVBInfo)
{
unsigned char data;
data = xgifb_reg_get(pVBInfo->P3d4, 0x97);
if ((data & 0x10) == 0) {
data = xgifb_reg_get(pVBInfo->P3c4, 0x39);
data = (data & 0x02) >> 1;
return data;
} else {
return data & 0x01;
}
}
static void XGINew_CheckChannel(struct xgi_hw_device_info *HwDeviceExtension,
struct vb_device_info *pVBInfo)
{
unsigned char data;
switch (HwDeviceExtension->jChipType) {
case XG20:
case XG21:
data = xgifb_reg_get(pVBInfo->P3d4, 0x97);
data = data & 0x01;
pVBInfo->ram_channel = 1; /* XG20 "JUST" one channel */
if (data == 0) { /* Single_32_16 */
if ((HwDeviceExtension->ulVideoMemorySize - 1)
> 0x1000000) {
pVBInfo->ram_bus = 32; /* 32 bits */
/* 22bit + 2 rank + 32bit */
xgifb_reg_set(pVBInfo->P3c4, 0x13, 0xB1);
xgifb_reg_set(pVBInfo->P3c4, 0x14, 0x52);
udelay(15);
if (XGINew_ReadWriteRest(24, 23, pVBInfo) == 1)
return;
if ((HwDeviceExtension->ulVideoMemorySize - 1) >
0x800000) {
/* 22bit + 1 rank + 32bit */
xgifb_reg_set(pVBInfo->P3c4,
0x13,
0x31);
xgifb_reg_set(pVBInfo->P3c4,
0x14,
0x42);
udelay(15);
if (XGINew_ReadWriteRest(23,
23,
pVBInfo) == 1)
return;
}
}
if ((HwDeviceExtension->ulVideoMemorySize - 1) >
0x800000) {
pVBInfo->ram_bus = 16; /* 16 bits */
/* 22bit + 2 rank + 16bit */
xgifb_reg_set(pVBInfo->P3c4, 0x13, 0xB1);
xgifb_reg_set(pVBInfo->P3c4, 0x14, 0x41);
udelay(15);
if (XGINew_ReadWriteRest(23, 22, pVBInfo) == 1)
return;
else
xgifb_reg_set(pVBInfo->P3c4,
0x13,
0x31);
udelay(15);
}
} else { /* Dual_16_8 */
if ((HwDeviceExtension->ulVideoMemorySize - 1) >
0x800000) {
pVBInfo->ram_bus = 16; /* 16 bits */
/* (0x31:12x8x2) 22bit + 2 rank */
xgifb_reg_set(pVBInfo->P3c4, 0x13, 0xB1);
/* 0x41:16Mx16 bit*/
xgifb_reg_set(pVBInfo->P3c4, 0x14, 0x41);
udelay(15);
if (XGINew_ReadWriteRest(23, 22, pVBInfo) == 1)
return;
if ((HwDeviceExtension->ulVideoMemorySize - 1) >
0x400000) {
/* (0x31:12x8x2) 22bit + 1 rank */
xgifb_reg_set(pVBInfo->P3c4,
0x13,
0x31);
/* 0x31:8Mx16 bit*/
xgifb_reg_set(pVBInfo->P3c4,
0x14,
0x31);
udelay(15);
if (XGINew_ReadWriteRest(22,
22,
pVBInfo) == 1)
return;
}
}
if ((HwDeviceExtension->ulVideoMemorySize - 1) >
0x400000) {
pVBInfo->ram_bus = 8; /* 8 bits */
/* (0x31:12x8x2) 22bit + 2 rank */
xgifb_reg_set(pVBInfo->P3c4, 0x13, 0xB1);
/* 0x30:8Mx8 bit*/
xgifb_reg_set(pVBInfo->P3c4, 0x14, 0x30);
udelay(15);
if (XGINew_ReadWriteRest(22, 21, pVBInfo) == 1)
return;
else /* (0x31:12x8x2) 22bit + 1 rank */
xgifb_reg_set(pVBInfo->P3c4,
0x13,
0x31);
udelay(15);
}
}
break;
case XG27:
pVBInfo->ram_bus = 16; /* 16 bits */
pVBInfo->ram_channel = 1; /* Single channel */
xgifb_reg_set(pVBInfo->P3c4, 0x14, 0x51); /* 32Mx16 bit*/
break;
case XG42:
/*
XG42 SR14 D[3] Reserve
D[2] = 1, Dual Channel
= 0, Single Channel
It's Different from Other XG40 Series.
*/
if (XGINew_CheckFrequence(pVBInfo) == 1) { /* DDRII, DDR2x */
pVBInfo->ram_bus = 32; /* 32 bits */
pVBInfo->ram_channel = 2; /* 2 Channel */
xgifb_reg_set(pVBInfo->P3c4, 0x13, 0xA1);
xgifb_reg_set(pVBInfo->P3c4, 0x14, 0x44);
if (XGINew_ReadWriteRest(24, 23, pVBInfo) == 1)
return;
xgifb_reg_set(pVBInfo->P3c4, 0x13, 0x21);
xgifb_reg_set(pVBInfo->P3c4, 0x14, 0x34);
if (XGINew_ReadWriteRest(23, 22, pVBInfo) == 1)
return;
pVBInfo->ram_channel = 1; /* Single Channel */
xgifb_reg_set(pVBInfo->P3c4, 0x13, 0xA1);
xgifb_reg_set(pVBInfo->P3c4, 0x14, 0x40);
if (XGINew_ReadWriteRest(23, 22, pVBInfo) == 1)
return;
else {
xgifb_reg_set(pVBInfo->P3c4, 0x13, 0x21);
xgifb_reg_set(pVBInfo->P3c4, 0x14, 0x30);
}
} else { /* DDR */
pVBInfo->ram_bus = 64; /* 64 bits */
pVBInfo->ram_channel = 1; /* 1 channels */
xgifb_reg_set(pVBInfo->P3c4, 0x13, 0xA1);
xgifb_reg_set(pVBInfo->P3c4, 0x14, 0x52);
if (XGINew_ReadWriteRest(24, 23, pVBInfo) == 1)
return;
else {
xgifb_reg_set(pVBInfo->P3c4, 0x13, 0x21);
xgifb_reg_set(pVBInfo->P3c4, 0x14, 0x42);
}
}
break;
default: /* XG40 */
if (XGINew_CheckFrequence(pVBInfo) == 1) { /* DDRII */
pVBInfo->ram_bus = 32; /* 32 bits */
pVBInfo->ram_channel = 3;
xgifb_reg_set(pVBInfo->P3c4, 0x13, 0xA1);
xgifb_reg_set(pVBInfo->P3c4, 0x14, 0x4C);
if (XGINew_ReadWriteRest(25, 23, pVBInfo) == 1)
return;
pVBInfo->ram_channel = 2; /* 2 channels */
xgifb_reg_set(pVBInfo->P3c4, 0x14, 0x48);
if (XGINew_ReadWriteRest(24, 23, pVBInfo) == 1)
return;
xgifb_reg_set(pVBInfo->P3c4, 0x13, 0x21);
xgifb_reg_set(pVBInfo->P3c4, 0x14, 0x3C);
if (XGINew_ReadWriteRest(24, 23, pVBInfo) == 1) {
pVBInfo->ram_channel = 3; /* 4 channels */
} else {
pVBInfo->ram_channel = 2; /* 2 channels */
xgifb_reg_set(pVBInfo->P3c4, 0x14, 0x38);
}
} else { /* DDR */
pVBInfo->ram_bus = 64; /* 64 bits */
pVBInfo->ram_channel = 2; /* 2 channels */
xgifb_reg_set(pVBInfo->P3c4, 0x13, 0xA1);
xgifb_reg_set(pVBInfo->P3c4, 0x14, 0x5A);
if (XGINew_ReadWriteRest(25, 24, pVBInfo) == 1) {
return;
} else {
xgifb_reg_set(pVBInfo->P3c4, 0x13, 0x21);
xgifb_reg_set(pVBInfo->P3c4, 0x14, 0x4A);
}
}
break;
}
}
static int XGINew_DDRSizing340(struct xgi_hw_device_info *HwDeviceExtension,
struct vb_device_info *pVBInfo)
{
u8 i, size;
unsigned short memsize, start_addr;
const unsigned short (*dram_table)[2];
xgifb_reg_set(pVBInfo->P3c4, 0x15, 0x00); /* noninterleaving */
xgifb_reg_set(pVBInfo->P3c4, 0x1C, 0x00); /* nontiling */
XGINew_CheckChannel(HwDeviceExtension, pVBInfo);
if (HwDeviceExtension->jChipType >= XG20) {
dram_table = XGINew_DDRDRAM_TYPE20;
size = ARRAY_SIZE(XGINew_DDRDRAM_TYPE20);
start_addr = 5;
} else {
dram_table = XGINew_DDRDRAM_TYPE340;
size = ARRAY_SIZE(XGINew_DDRDRAM_TYPE340);
start_addr = 9;
}
for (i = 0; i < size; i++) {
/* SetDRAMSizingType */
xgifb_reg_and_or(pVBInfo->P3c4, 0x13, 0x80, dram_table[i][1]);
udelay(15); /* should delay 50 ns */
memsize = XGINew_SetDRAMSize20Reg(dram_table[i][0], pVBInfo);
if (memsize == 0)
continue;
memsize += (pVBInfo->ram_channel - 2) + 20;
if ((HwDeviceExtension->ulVideoMemorySize - 1) <
(unsigned long) (1 << memsize))
continue;
if (XGINew_ReadWriteRest(memsize, start_addr, pVBInfo) == 1)
return 1;
}
return 0;
}
static void XGINew_SetDRAMSize_340(struct xgifb_video_info *xgifb_info,
struct xgi_hw_device_info *HwDeviceExtension,
struct vb_device_info *pVBInfo)
{
unsigned short data;
pVBInfo->FBAddr = HwDeviceExtension->pjVideoMemoryAddress;
XGISetModeNew(xgifb_info, HwDeviceExtension, 0x2e);
data = xgifb_reg_get(pVBInfo->P3c4, 0x21);
/* disable read cache */
xgifb_reg_set(pVBInfo->P3c4, 0x21, (unsigned short) (data & 0xDF));
XGI_DisplayOff(xgifb_info, HwDeviceExtension, pVBInfo);
XGINew_DDRSizing340(HwDeviceExtension, pVBInfo);
data = xgifb_reg_get(pVBInfo->P3c4, 0x21);
/* enable read cache */
xgifb_reg_set(pVBInfo->P3c4, 0x21, (unsigned short) (data | 0x20));
}
static u8 *xgifb_copy_rom(struct pci_dev *dev, size_t *rom_size)
{
void __iomem *rom_address;
u8 *rom_copy;
rom_address = pci_map_rom(dev, rom_size);
if (rom_address == NULL)
return NULL;
rom_copy = vzalloc(XGIFB_ROM_SIZE);
if (rom_copy == NULL)
goto done;
*rom_size = min_t(size_t, *rom_size, XGIFB_ROM_SIZE);
memcpy_fromio(rom_copy, rom_address, *rom_size);
done:
pci_unmap_rom(dev, rom_address);
return rom_copy;
}
static void xgifb_read_vbios(struct pci_dev *pdev,
struct vb_device_info *pVBInfo)
{
struct xgifb_video_info *xgifb_info = pci_get_drvdata(pdev);
u8 *vbios;
unsigned long i;
unsigned char j;
struct XGI21_LVDSCapStruct *lvds;
size_t vbios_size;
int entry;
if (xgifb_info->chip != XG21)
return;
pVBInfo->IF_DEF_LVDS = 0;
vbios = xgifb_copy_rom(pdev, &vbios_size);
if (vbios == NULL) {
dev_err(&pdev->dev, "Video BIOS not available\n");
return;
}
if (vbios_size <= 0x65)
goto error;
/*
* The user can ignore the LVDS bit in the BIOS and force the display
* type.
*/
if (!(vbios[0x65] & 0x1) &&
(!xgifb_info->display2_force ||
xgifb_info->display2 != XGIFB_DISP_LCD)) {
vfree(vbios);
return;
}
if (vbios_size <= 0x317)
goto error;
i = vbios[0x316] | (vbios[0x317] << 8);
if (vbios_size <= i - 1)
goto error;
j = vbios[i - 1];
if (j == 0)
goto error;
if (j == 0xff)
j = 1;
/*
* Read the LVDS table index scratch register set by the BIOS.
*/
entry = xgifb_reg_get(xgifb_info->dev_info.P3d4, 0x36);
if (entry >= j)
entry = 0;
i += entry * 25;
lvds = &xgifb_info->lvds_data;
if (vbios_size <= i + 24)
goto error;
lvds->LVDS_Capability = vbios[i] | (vbios[i + 1] << 8);
lvds->LVDSHT = vbios[i + 2] | (vbios[i + 3] << 8);
lvds->LVDSVT = vbios[i + 4] | (vbios[i + 5] << 8);
lvds->LVDSHDE = vbios[i + 6] | (vbios[i + 7] << 8);
lvds->LVDSVDE = vbios[i + 8] | (vbios[i + 9] << 8);
lvds->LVDSHFP = vbios[i + 10] | (vbios[i + 11] << 8);
lvds->LVDSVFP = vbios[i + 12] | (vbios[i + 13] << 8);
lvds->LVDSHSYNC = vbios[i + 14] | (vbios[i + 15] << 8);
lvds->LVDSVSYNC = vbios[i + 16] | (vbios[i + 17] << 8);
lvds->VCLKData1 = vbios[i + 18];
lvds->VCLKData2 = vbios[i + 19];
lvds->PSC_S1 = vbios[i + 20];
lvds->PSC_S2 = vbios[i + 21];
lvds->PSC_S3 = vbios[i + 22];
lvds->PSC_S4 = vbios[i + 23];
lvds->PSC_S5 = vbios[i + 24];
vfree(vbios);
pVBInfo->IF_DEF_LVDS = 1;
return;
error:
dev_err(&pdev->dev, "Video BIOS corrupted\n");
vfree(vbios);
}
static void XGINew_ChkSenseStatus(struct xgi_hw_device_info *HwDeviceExtension,
struct vb_device_info *pVBInfo)
{
unsigned short tempbx = 0, temp, tempcx, CR3CData;
temp = xgifb_reg_get(pVBInfo->P3d4, 0x32);
if (temp & Monitor1Sense)
tempbx |= ActiveCRT1;
if (temp & LCDSense)
tempbx |= ActiveLCD;
if (temp & Monitor2Sense)
tempbx |= ActiveCRT2;
if (temp & TVSense) {
tempbx |= ActiveTV;
if (temp & AVIDEOSense)
tempbx |= (ActiveAVideo << 8);
if (temp & SVIDEOSense)
tempbx |= (ActiveSVideo << 8);
if (temp & SCARTSense)
tempbx |= (ActiveSCART << 8);
if (temp & HiTVSense)
tempbx |= (ActiveHiTV << 8);
if (temp & YPbPrSense)
tempbx |= (ActiveYPbPr << 8);
}
tempcx = xgifb_reg_get(pVBInfo->P3d4, 0x3d);
tempcx |= (xgifb_reg_get(pVBInfo->P3d4, 0x3e) << 8);
if (tempbx & tempcx) {
CR3CData = xgifb_reg_get(pVBInfo->P3d4, 0x3c);
if (!(CR3CData & DisplayDeviceFromCMOS))
tempcx = 0x1FF0;
} else {
tempcx = 0x1FF0;
}
tempbx &= tempcx;
xgifb_reg_set(pVBInfo->P3d4, 0x3d, (tempbx & 0x00FF));
xgifb_reg_set(pVBInfo->P3d4, 0x3e, ((tempbx & 0xFF00) >> 8));
}
static void XGINew_SetModeScratch(struct xgi_hw_device_info *HwDeviceExtension,
struct vb_device_info *pVBInfo)
{
unsigned short temp, tempcl = 0, tempch = 0, CR31Data, CR38Data;
temp = xgifb_reg_get(pVBInfo->P3d4, 0x3d);
temp |= xgifb_reg_get(pVBInfo->P3d4, 0x3e) << 8;
temp |= (xgifb_reg_get(pVBInfo->P3d4, 0x31) & (DriverMode >> 8)) << 8;
if (pVBInfo->IF_DEF_CRT2Monitor == 1) {
if (temp & ActiveCRT2)
tempcl = SetCRT2ToRAMDAC;
}
if (temp & ActiveLCD) {
tempcl |= SetCRT2ToLCD;
if (temp & DriverMode) {
if (temp & ActiveTV) {
tempch = SetToLCDA | EnableDualEdge;
temp ^= SetCRT2ToLCD;
if ((temp >> 8) & ActiveAVideo)
tempcl |= SetCRT2ToAVIDEO;
if ((temp >> 8) & ActiveSVideo)
tempcl |= SetCRT2ToSVIDEO;
if ((temp >> 8) & ActiveSCART)
tempcl |= SetCRT2ToSCART;
if (pVBInfo->IF_DEF_HiVision == 1) {
if ((temp >> 8) & ActiveHiTV)
tempcl |= SetCRT2ToHiVision;
}
if (pVBInfo->IF_DEF_YPbPr == 1) {
if ((temp >> 8) & ActiveYPbPr)
tempch |= SetYPbPr;
}
}
}
} else {
if ((temp >> 8) & ActiveAVideo)
tempcl |= SetCRT2ToAVIDEO;
if ((temp >> 8) & ActiveSVideo)
tempcl |= SetCRT2ToSVIDEO;
if ((temp >> 8) & ActiveSCART)
tempcl |= SetCRT2ToSCART;
if (pVBInfo->IF_DEF_HiVision == 1) {
if ((temp >> 8) & ActiveHiTV)
tempcl |= SetCRT2ToHiVision;
}
if (pVBInfo->IF_DEF_YPbPr == 1) {
if ((temp >> 8) & ActiveYPbPr)
tempch |= SetYPbPr;
}
}
tempcl |= SetSimuScanMode;
if ((!(temp & ActiveCRT1)) && ((temp & ActiveLCD) || (temp & ActiveTV)
|| (temp & ActiveCRT2)))
tempcl ^= (SetSimuScanMode | SwitchCRT2);
if ((temp & ActiveLCD) && (temp & ActiveTV))
tempcl ^= (SetSimuScanMode | SwitchCRT2);
xgifb_reg_set(pVBInfo->P3d4, 0x30, tempcl);
CR31Data = xgifb_reg_get(pVBInfo->P3d4, 0x31);
CR31Data &= ~(SetNotSimuMode >> 8);
if (!(temp & ActiveCRT1))
CR31Data |= (SetNotSimuMode >> 8);
CR31Data &= ~(DisableCRT2Display >> 8);
if (!((temp & ActiveLCD) || (temp & ActiveTV) || (temp & ActiveCRT2)))
CR31Data |= (DisableCRT2Display >> 8);
xgifb_reg_set(pVBInfo->P3d4, 0x31, CR31Data);
CR38Data = xgifb_reg_get(pVBInfo->P3d4, 0x38);
CR38Data &= ~SetYPbPr;
CR38Data |= tempch;
xgifb_reg_set(pVBInfo->P3d4, 0x38, CR38Data);
}
static unsigned short XGINew_SenseLCD(struct xgi_hw_device_info
*HwDeviceExtension,
struct vb_device_info *pVBInfo)
{
unsigned short temp;
/* add lcd sense */
if (HwDeviceExtension->ulCRT2LCDType == LCD_UNKNOWN) {
return 0;
} else {
temp = (unsigned short) HwDeviceExtension->ulCRT2LCDType;
switch (HwDeviceExtension->ulCRT2LCDType) {
case LCD_INVALID:
case LCD_800x600:
case LCD_1024x768:
case LCD_1280x1024:
break;
case LCD_640x480:
case LCD_1024x600:
case LCD_1152x864:
case LCD_1280x960:
case LCD_1152x768:
temp = 0;
break;
case LCD_1400x1050:
case LCD_1280x768:
case LCD_1600x1200:
break;
case LCD_1920x1440:
case LCD_2048x1536:
temp = 0;
break;
default:
break;
}
xgifb_reg_and_or(pVBInfo->P3d4, 0x36, 0xF0, temp);
return 1;
}
}
static void XGINew_GetXG21Sense(struct xgi_hw_device_info *HwDeviceExtension,
struct vb_device_info *pVBInfo)
{
unsigned char Temp;
if (pVBInfo->IF_DEF_LVDS) { /* For XG21 LVDS */
xgifb_reg_or(pVBInfo->P3d4, 0x32, LCDSense);
/* LVDS on chip */
xgifb_reg_and_or(pVBInfo->P3d4, 0x38, ~0xE0, 0xC0);
} else {
/* Enable GPIOA/B read */
xgifb_reg_and_or(pVBInfo->P3d4, 0x4A, ~0x03, 0x03);
Temp = xgifb_reg_get(pVBInfo->P3d4, 0x48) & 0xC0;
if (Temp == 0xC0) { /* DVI & DVO GPIOA/B pull high */
XGINew_SenseLCD(HwDeviceExtension, pVBInfo);
xgifb_reg_or(pVBInfo->P3d4, 0x32, LCDSense);
/* Enable read GPIOF */
xgifb_reg_and_or(pVBInfo->P3d4, 0x4A, ~0x20, 0x20);
Temp = xgifb_reg_get(pVBInfo->P3d4, 0x48) & 0x04;
if (!Temp)
xgifb_reg_and_or(pVBInfo->P3d4,
0x38,
~0xE0,
0x80); /* TMDS on chip */
else
xgifb_reg_and_or(pVBInfo->P3d4,
0x38,
~0xE0,
0xA0); /* Only DVO on chip */
/* Disable read GPIOF */
xgifb_reg_and(pVBInfo->P3d4, 0x4A, ~0x20);
}
}
}
static void XGINew_GetXG27Sense(struct xgi_hw_device_info *HwDeviceExtension,
struct vb_device_info *pVBInfo)
{
unsigned char Temp, bCR4A;
pVBInfo->IF_DEF_LVDS = 0;
bCR4A = xgifb_reg_get(pVBInfo->P3d4, 0x4A);
/* Enable GPIOA/B/C read */
xgifb_reg_and_or(pVBInfo->P3d4, 0x4A, ~0x07, 0x07);
Temp = xgifb_reg_get(pVBInfo->P3d4, 0x48) & 0x07;
xgifb_reg_set(pVBInfo->P3d4, 0x4A, bCR4A);
if (Temp <= 0x02) {
/* LVDS setting */
xgifb_reg_and_or(pVBInfo->P3d4, 0x38, ~0xE0, 0xC0);
xgifb_reg_set(pVBInfo->P3d4, 0x30, 0x21);
} else {
/* TMDS/DVO setting */
xgifb_reg_and_or(pVBInfo->P3d4, 0x38, ~0xE0, 0xA0);
}
xgifb_reg_or(pVBInfo->P3d4, 0x32, LCDSense);
}
static unsigned char GetXG21FPBits(struct vb_device_info *pVBInfo)
{
unsigned char CR38, CR4A, temp;
CR4A = xgifb_reg_get(pVBInfo->P3d4, 0x4A);
/* enable GPIOE read */
xgifb_reg_and_or(pVBInfo->P3d4, 0x4A, ~0x10, 0x10);
CR38 = xgifb_reg_get(pVBInfo->P3d4, 0x38);
temp = 0;
if ((CR38 & 0xE0) > 0x80) {
temp = xgifb_reg_get(pVBInfo->P3d4, 0x48);
temp &= 0x08;
temp >>= 3;
}
xgifb_reg_set(pVBInfo->P3d4, 0x4A, CR4A);
return temp;
}
static unsigned char GetXG27FPBits(struct vb_device_info *pVBInfo)
{
unsigned char CR4A, temp;
CR4A = xgifb_reg_get(pVBInfo->P3d4, 0x4A);
/* enable GPIOA/B/C read */
xgifb_reg_and_or(pVBInfo->P3d4, 0x4A, ~0x03, 0x03);
temp = xgifb_reg_get(pVBInfo->P3d4, 0x48);
if (temp <= 2)
temp &= 0x03;
else
temp = ((temp & 0x04) >> 1) | ((~temp) & 0x01);
xgifb_reg_set(pVBInfo->P3d4, 0x4A, CR4A);
return temp;
}
unsigned char XGIInitNew(struct pci_dev *pdev)
{
struct xgifb_video_info *xgifb_info = pci_get_drvdata(pdev);
struct xgi_hw_device_info *HwDeviceExtension = &xgifb_info->hw_info;
struct vb_device_info VBINF;
struct vb_device_info *pVBInfo = &VBINF;
unsigned char i, temp = 0, temp1;
pVBInfo->FBAddr = HwDeviceExtension->pjVideoMemoryAddress;
pVBInfo->BaseAddr = xgifb_info->vga_base;
if (pVBInfo->FBAddr == NULL) {
dev_dbg(&pdev->dev, "pVBInfo->FBAddr == 0\n");
return 0;
}
if (pVBInfo->BaseAddr == 0) {
dev_dbg(&pdev->dev, "pVBInfo->BaseAddr == 0\n");
return 0;
}
outb(0x67, (pVBInfo->BaseAddr + 0x12)); /* 3c2 <- 67 ,ynlai */
pVBInfo->P3c4 = pVBInfo->BaseAddr + 0x14;
pVBInfo->P3d4 = pVBInfo->BaseAddr + 0x24;
pVBInfo->P3c0 = pVBInfo->BaseAddr + 0x10;
pVBInfo->P3ce = pVBInfo->BaseAddr + 0x1e;
pVBInfo->P3c2 = pVBInfo->BaseAddr + 0x12;
pVBInfo->P3ca = pVBInfo->BaseAddr + 0x1a;
pVBInfo->P3c6 = pVBInfo->BaseAddr + 0x16;
pVBInfo->P3c7 = pVBInfo->BaseAddr + 0x17;
pVBInfo->P3c8 = pVBInfo->BaseAddr + 0x18;
pVBInfo->P3c9 = pVBInfo->BaseAddr + 0x19;
pVBInfo->P3da = pVBInfo->BaseAddr + 0x2A;
pVBInfo->Part0Port = pVBInfo->BaseAddr + XGI_CRT2_PORT_00;
pVBInfo->Part1Port = pVBInfo->BaseAddr + SIS_CRT2_PORT_04;
pVBInfo->Part2Port = pVBInfo->BaseAddr + SIS_CRT2_PORT_10;
pVBInfo->Part3Port = pVBInfo->BaseAddr + SIS_CRT2_PORT_12;
pVBInfo->Part4Port = pVBInfo->BaseAddr + SIS_CRT2_PORT_14;
pVBInfo->Part5Port = pVBInfo->BaseAddr + SIS_CRT2_PORT_14 + 2;
if (HwDeviceExtension->jChipType < XG20)
/* Run XGI_GetVBType before InitTo330Pointer */
XGI_GetVBType(pVBInfo);
InitTo330Pointer(HwDeviceExtension->jChipType, pVBInfo);
xgifb_read_vbios(pdev, pVBInfo);
/* Openkey */
xgifb_reg_set(pVBInfo->P3c4, 0x05, 0x86);
/* GetXG21Sense (GPIO) */
if (HwDeviceExtension->jChipType == XG21)
XGINew_GetXG21Sense(HwDeviceExtension, pVBInfo);
if (HwDeviceExtension->jChipType == XG27)
XGINew_GetXG27Sense(HwDeviceExtension, pVBInfo);
/* Reset Extended register */
for (i = 0x06; i < 0x20; i++)
xgifb_reg_set(pVBInfo->P3c4, i, 0);
for (i = 0x21; i <= 0x27; i++)
xgifb_reg_set(pVBInfo->P3c4, i, 0);
for (i = 0x31; i <= 0x3B; i++)
xgifb_reg_set(pVBInfo->P3c4, i, 0);
/* Auto over driver for XG42 */
if (HwDeviceExtension->jChipType == XG42)
xgifb_reg_set(pVBInfo->P3c4, 0x3B, 0xC0);
for (i = 0x79; i <= 0x7C; i++)
xgifb_reg_set(pVBInfo->P3d4, i, 0);
if (HwDeviceExtension->jChipType >= XG20)
xgifb_reg_set(pVBInfo->P3d4, 0x97, pVBInfo->XGINew_CR97);
/* SetDefExt1Regs begin */
xgifb_reg_set(pVBInfo->P3c4, 0x07, XGI330_SR07);
if (HwDeviceExtension->jChipType == XG27) {
xgifb_reg_set(pVBInfo->P3c4, 0x40, XG27_SR40);
xgifb_reg_set(pVBInfo->P3c4, 0x41, XG27_SR41);
}
xgifb_reg_set(pVBInfo->P3c4, 0x11, 0x0F);
xgifb_reg_set(pVBInfo->P3c4, 0x1F, XGI330_SR1F);
/* Frame buffer can read/write SR20 */
xgifb_reg_set(pVBInfo->P3c4, 0x20, 0xA0);
/* H/W request for slow corner chip */
xgifb_reg_set(pVBInfo->P3c4, 0x36, 0x70);
if (HwDeviceExtension->jChipType == XG27)
xgifb_reg_set(pVBInfo->P3c4, 0x36, XG27_SR36);
if (HwDeviceExtension->jChipType < XG20) {
u32 Temp;
/* Set AGP customize registers (in SetDefAGPRegs) Start */
for (i = 0x47; i <= 0x4C; i++)
xgifb_reg_set(pVBInfo->P3d4,
i,
XGI340_AGPReg[i - 0x47]);
for (i = 0x70; i <= 0x71; i++)
xgifb_reg_set(pVBInfo->P3d4,
i,
XGI340_AGPReg[6 + i - 0x70]);
for (i = 0x74; i <= 0x77; i++)
xgifb_reg_set(pVBInfo->P3d4,
i,
XGI340_AGPReg[8 + i - 0x74]);
pci_read_config_dword(pdev, 0x50, &Temp);
Temp >>= 20;
Temp &= 0xF;
if (Temp == 1)
xgifb_reg_set(pVBInfo->P3d4, 0x48, 0x20); /* CR48 */
} /* != XG20 */
/* Set PCI */
xgifb_reg_set(pVBInfo->P3c4, 0x23, XGI330_SR23);
xgifb_reg_set(pVBInfo->P3c4, 0x24, XGI330_SR24);
xgifb_reg_set(pVBInfo->P3c4, 0x25, 0);
if (HwDeviceExtension->jChipType < XG20) {
/* Set VB */
XGI_UnLockCRT2(HwDeviceExtension, pVBInfo);
/* disable VideoCapture */
xgifb_reg_and_or(pVBInfo->Part0Port, 0x3F, 0xEF, 0x00);
xgifb_reg_set(pVBInfo->Part1Port, 0x00, 0x00);
/* chk if BCLK>=100MHz */
temp1 = (unsigned char) xgifb_reg_get(pVBInfo->P3d4, 0x7B);
temp = (unsigned char) ((temp1 >> 4) & 0x0F);
xgifb_reg_set(pVBInfo->Part1Port,
0x02, XGI330_CRT2Data_1_2);
xgifb_reg_set(pVBInfo->Part1Port, 0x2E, 0x08); /* use VB */
} /* != XG20 */
xgifb_reg_set(pVBInfo->P3c4, 0x27, 0x1F);
if ((HwDeviceExtension->jChipType == XG42) &&
XGINew_GetXG20DRAMType(HwDeviceExtension, pVBInfo) != 0) {
/* Not DDR */
xgifb_reg_set(pVBInfo->P3c4,
0x31,
(XGI330_SR31 & 0x3F) | 0x40);
xgifb_reg_set(pVBInfo->P3c4,
0x32,
(XGI330_SR32 & 0xFC) | 0x01);
} else {
xgifb_reg_set(pVBInfo->P3c4, 0x31, XGI330_SR31);
xgifb_reg_set(pVBInfo->P3c4, 0x32, XGI330_SR32);
}
xgifb_reg_set(pVBInfo->P3c4, 0x33, XGI330_SR33);
if (HwDeviceExtension->jChipType < XG20) {
if (XGI_BridgeIsOn(pVBInfo) == 1) {
if (pVBInfo->IF_DEF_LVDS == 0) {
xgifb_reg_set(pVBInfo->Part2Port, 0x00, 0x1C);
xgifb_reg_set(pVBInfo->Part4Port,
0x0D, XGI330_CRT2Data_4_D);
xgifb_reg_set(pVBInfo->Part4Port,
0x0E, XGI330_CRT2Data_4_E);
xgifb_reg_set(pVBInfo->Part4Port,
0x10, XGI330_CRT2Data_4_10);
xgifb_reg_set(pVBInfo->Part4Port, 0x0F, 0x3F);
}
XGI_LockCRT2(HwDeviceExtension, pVBInfo);
}
} /* != XG20 */
XGI_SenseCRT1(pVBInfo);
if (HwDeviceExtension->jChipType == XG21) {
xgifb_reg_and_or(pVBInfo->P3d4,
0x32,
~Monitor1Sense,
Monitor1Sense); /* Z9 default has CRT */
temp = GetXG21FPBits(pVBInfo);
xgifb_reg_and_or(pVBInfo->P3d4, 0x37, ~0x01, temp);
}
if (HwDeviceExtension->jChipType == XG27) {
xgifb_reg_and_or(pVBInfo->P3d4,
0x32,
~Monitor1Sense,
Monitor1Sense); /* Z9 default has CRT */
temp = GetXG27FPBits(pVBInfo);
xgifb_reg_and_or(pVBInfo->P3d4, 0x37, ~0x03, temp);
}
pVBInfo->ram_type = XGINew_GetXG20DRAMType(HwDeviceExtension, pVBInfo);
XGINew_SetDRAMDefaultRegister340(HwDeviceExtension,
pVBInfo->P3d4,
pVBInfo);
XGINew_SetDRAMSize_340(xgifb_info, HwDeviceExtension, pVBInfo);
xgifb_reg_set(pVBInfo->P3c4, 0x22, 0xfa);
xgifb_reg_set(pVBInfo->P3c4, 0x21, 0xa3);
XGINew_ChkSenseStatus(HwDeviceExtension, pVBInfo);
XGINew_SetModeScratch(HwDeviceExtension, pVBInfo);
xgifb_reg_set(pVBInfo->P3d4, 0x8c, 0x87);
return 1;
} /* end of init */
| gpl-2.0 |
Shimejing/linux | drivers/acpi/acpica/tbinstal.c | 360 | 15869 | /******************************************************************************
*
* Module Name: tbinstal - ACPI table installation and removal
*
*****************************************************************************/
/*
* Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions, and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* substantially similar to the "NO WARRANTY" disclaimer below
* ("Disclaimer") and any redistribution must be conditioned upon
* including a substantially similar Disclaimer requirement for further
* binary redistribution.
* 3. Neither the names of the above-listed copyright holders nor the names
* of any contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGES.
*/
#include <acpi/acpi.h>
#include "accommon.h"
#include "actables.h"
#define _COMPONENT ACPI_TABLES
ACPI_MODULE_NAME("tbinstal")
/* Local prototypes */
static u8
acpi_tb_compare_tables(struct acpi_table_desc *table_desc, u32 table_index);
/*******************************************************************************
*
* FUNCTION: acpi_tb_compare_tables
*
* PARAMETERS: table_desc - Table 1 descriptor to be compared
* table_index - Index of table 2 to be compared
*
* RETURN: TRUE if both tables are identical.
*
* DESCRIPTION: This function compares a table with another table that has
* already been installed in the root table list.
*
******************************************************************************/
static u8
acpi_tb_compare_tables(struct acpi_table_desc *table_desc, u32 table_index)
{
acpi_status status = AE_OK;
u8 is_identical;
struct acpi_table_header *table;
u32 table_length;
u8 table_flags;
status =
acpi_tb_acquire_table(&acpi_gbl_root_table_list.tables[table_index],
&table, &table_length, &table_flags);
if (ACPI_FAILURE(status)) {
return (FALSE);
}
/*
* Check for a table match on the entire table length,
* not just the header.
*/
is_identical = (u8)((table_desc->length != table_length ||
memcmp(table_desc->pointer, table, table_length)) ?
FALSE : TRUE);
/* Release the acquired table */
acpi_tb_release_table(table, table_length, table_flags);
return (is_identical);
}
/*******************************************************************************
*
* FUNCTION: acpi_tb_install_table_with_override
*
* PARAMETERS: new_table_desc - New table descriptor to install
* override - Whether override should be performed
* table_index - Where the table index is returned
*
* RETURN: None
*
* DESCRIPTION: Install an ACPI table into the global data structure. The
* table override mechanism is called to allow the host
* OS to replace any table before it is installed in the root
* table array.
*
******************************************************************************/
void
acpi_tb_install_table_with_override(struct acpi_table_desc *new_table_desc,
u8 override, u32 *table_index)
{
u32 i;
acpi_status status;
status = acpi_tb_get_next_table_descriptor(&i, NULL);
if (ACPI_FAILURE(status)) {
return;
}
/*
* ACPI Table Override:
*
* Before we install the table, let the host OS override it with a new
* one if desired. Any table within the RSDT/XSDT can be replaced,
* including the DSDT which is pointed to by the FADT.
*/
if (override) {
acpi_tb_override_table(new_table_desc);
}
acpi_tb_init_table_descriptor(&acpi_gbl_root_table_list.tables[i],
new_table_desc->address,
new_table_desc->flags,
new_table_desc->pointer);
acpi_tb_print_table_header(new_table_desc->address,
new_table_desc->pointer);
/* This synchronizes acpi_gbl_dsdt_index */
*table_index = i;
/* Set the global integer width (based upon revision of the DSDT) */
if (i == acpi_gbl_dsdt_index) {
acpi_ut_set_integer_width(new_table_desc->pointer->revision);
}
}
/*******************************************************************************
*
* FUNCTION: acpi_tb_install_fixed_table
*
* PARAMETERS: address - Physical address of DSDT or FACS
* signature - Table signature, NULL if no need to
* match
* table_index - Where the table index is returned
*
* RETURN: Status
*
* DESCRIPTION: Install a fixed ACPI table (DSDT/FACS) into the global data
* structure.
*
******************************************************************************/
acpi_status
acpi_tb_install_fixed_table(acpi_physical_address address,
char *signature, u32 *table_index)
{
struct acpi_table_desc new_table_desc;
acpi_status status;
ACPI_FUNCTION_TRACE(tb_install_fixed_table);
if (!address) {
ACPI_ERROR((AE_INFO,
"Null physical address for ACPI table [%s]",
signature));
return (AE_NO_MEMORY);
}
/* Fill a table descriptor for validation */
status = acpi_tb_acquire_temp_table(&new_table_desc, address,
ACPI_TABLE_ORIGIN_INTERNAL_PHYSICAL);
if (ACPI_FAILURE(status)) {
ACPI_ERROR((AE_INFO,
"Could not acquire table length at %8.8X%8.8X",
ACPI_FORMAT_UINT64(address)));
return_ACPI_STATUS(status);
}
/* Validate and verify a table before installation */
status = acpi_tb_verify_temp_table(&new_table_desc, signature);
if (ACPI_FAILURE(status)) {
goto release_and_exit;
}
/* Add the table to the global root table list */
acpi_tb_install_table_with_override(&new_table_desc, TRUE, table_index);
release_and_exit:
/* Release the temporary table descriptor */
acpi_tb_release_temp_table(&new_table_desc);
return_ACPI_STATUS(status);
}
/*******************************************************************************
*
* FUNCTION: acpi_tb_install_standard_table
*
* PARAMETERS: address - Address of the table (might be a virtual
* address depending on the table_flags)
* flags - Flags for the table
* reload - Whether reload should be performed
* override - Whether override should be performed
* table_index - Where the table index is returned
*
* RETURN: Status
*
* DESCRIPTION: This function is called to install an ACPI table that is
* neither DSDT nor FACS (a "standard" table.)
* When this function is called by "Load" or "LoadTable" opcodes,
* or by acpi_load_table() API, the "Reload" parameter is set.
* After sucessfully returning from this function, table is
* "INSTALLED" but not "VALIDATED".
*
******************************************************************************/
acpi_status
acpi_tb_install_standard_table(acpi_physical_address address,
u8 flags,
u8 reload, u8 override, u32 *table_index)
{
u32 i;
acpi_status status = AE_OK;
struct acpi_table_desc new_table_desc;
ACPI_FUNCTION_TRACE(tb_install_standard_table);
/* Acquire a temporary table descriptor for validation */
status = acpi_tb_acquire_temp_table(&new_table_desc, address, flags);
if (ACPI_FAILURE(status)) {
ACPI_ERROR((AE_INFO,
"Could not acquire table length at %8.8X%8.8X",
ACPI_FORMAT_UINT64(address)));
return_ACPI_STATUS(status);
}
/*
* Optionally do not load any SSDTs from the RSDT/XSDT. This can
* be useful for debugging ACPI problems on some machines.
*/
if (!reload &&
acpi_gbl_disable_ssdt_table_install &&
ACPI_COMPARE_NAME(&new_table_desc.signature, ACPI_SIG_SSDT)) {
ACPI_INFO((AE_INFO,
"Ignoring installation of %4.4s at %8.8X%8.8X",
new_table_desc.signature.ascii,
ACPI_FORMAT_UINT64(address)));
goto release_and_exit;
}
/* Validate and verify a table before installation */
status = acpi_tb_verify_temp_table(&new_table_desc, NULL);
if (ACPI_FAILURE(status)) {
goto release_and_exit;
}
if (reload) {
/*
* Validate the incoming table signature.
*
* 1) Originally, we checked the table signature for "SSDT" or "PSDT".
* 2) We added support for OEMx tables, signature "OEM".
* 3) Valid tables were encountered with a null signature, so we just
* gave up on validating the signature, (05/2008).
* 4) We encountered non-AML tables such as the MADT, which caused
* interpreter errors and kernel faults. So now, we once again allow
* only "SSDT", "OEMx", and now, also a null signature. (05/2011).
*/
if ((new_table_desc.signature.ascii[0] != 0x00) &&
(!ACPI_COMPARE_NAME
(&new_table_desc.signature, ACPI_SIG_SSDT))
&& (strncmp(new_table_desc.signature.ascii, "OEM", 3))) {
ACPI_BIOS_ERROR((AE_INFO,
"Table has invalid signature [%4.4s] (0x%8.8X), "
"must be SSDT or OEMx",
acpi_ut_valid_acpi_name(new_table_desc.
signature.
ascii) ?
new_table_desc.signature.
ascii : "????",
new_table_desc.signature.integer));
status = AE_BAD_SIGNATURE;
goto release_and_exit;
}
/* Check if table is already registered */
for (i = 0; i < acpi_gbl_root_table_list.current_table_count;
++i) {
/*
* Check for a table match on the entire table length,
* not just the header.
*/
if (!acpi_tb_compare_tables(&new_table_desc, i)) {
continue;
}
/*
* Note: the current mechanism does not unregister a table if it is
* dynamically unloaded. The related namespace entries are deleted,
* but the table remains in the root table list.
*
* The assumption here is that the number of different tables that
* will be loaded is actually small, and there is minimal overhead
* in just keeping the table in case it is needed again.
*
* If this assumption changes in the future (perhaps on large
* machines with many table load/unload operations), tables will
* need to be unregistered when they are unloaded, and slots in the
* root table list should be reused when empty.
*/
if (acpi_gbl_root_table_list.tables[i].
flags & ACPI_TABLE_IS_LOADED) {
/* Table is still loaded, this is an error */
status = AE_ALREADY_EXISTS;
goto release_and_exit;
} else {
/*
* Table was unloaded, allow it to be reloaded.
* As we are going to return AE_OK to the caller, we should
* take the responsibility of freeing the input descriptor.
* Refill the input descriptor to ensure
* acpi_tb_install_table_with_override() can be called again to
* indicate the re-installation.
*/
acpi_tb_uninstall_table(&new_table_desc);
*table_index = i;
return_ACPI_STATUS(AE_OK);
}
}
}
/* Add the table to the global root table list */
acpi_tb_install_table_with_override(&new_table_desc, override,
table_index);
release_and_exit:
/* Release the temporary table descriptor */
acpi_tb_release_temp_table(&new_table_desc);
return_ACPI_STATUS(status);
}
/*******************************************************************************
*
* FUNCTION: acpi_tb_override_table
*
* PARAMETERS: old_table_desc - Validated table descriptor to be
* overridden
*
* RETURN: None
*
* DESCRIPTION: Attempt table override by calling the OSL override functions.
* Note: If the table is overridden, then the entire new table
* is acquired and returned by this function.
* Before/after invocation, the table descriptor is in a state
* that is "VALIDATED".
*
******************************************************************************/
void acpi_tb_override_table(struct acpi_table_desc *old_table_desc)
{
acpi_status status;
char *override_type;
struct acpi_table_desc new_table_desc;
struct acpi_table_header *table;
acpi_physical_address address;
u32 length;
/* (1) Attempt logical override (returns a logical address) */
status = acpi_os_table_override(old_table_desc->pointer, &table);
if (ACPI_SUCCESS(status) && table) {
acpi_tb_acquire_temp_table(&new_table_desc,
ACPI_PTR_TO_PHYSADDR(table),
ACPI_TABLE_ORIGIN_EXTERNAL_VIRTUAL);
override_type = "Logical";
goto finish_override;
}
/* (2) Attempt physical override (returns a physical address) */
status = acpi_os_physical_table_override(old_table_desc->pointer,
&address, &length);
if (ACPI_SUCCESS(status) && address && length) {
acpi_tb_acquire_temp_table(&new_table_desc, address,
ACPI_TABLE_ORIGIN_INTERNAL_PHYSICAL);
override_type = "Physical";
goto finish_override;
}
return; /* There was no override */
finish_override:
/* Validate and verify a table before overriding */
status = acpi_tb_verify_temp_table(&new_table_desc, NULL);
if (ACPI_FAILURE(status)) {
return;
}
ACPI_INFO((AE_INFO, "%4.4s 0x%8.8X%8.8X"
" %s table override, new table: 0x%8.8X%8.8X",
old_table_desc->signature.ascii,
ACPI_FORMAT_UINT64(old_table_desc->address),
override_type, ACPI_FORMAT_UINT64(new_table_desc.address)));
/* We can now uninstall the original table */
acpi_tb_uninstall_table(old_table_desc);
/*
* Replace the original table descriptor and keep its state as
* "VALIDATED".
*/
acpi_tb_init_table_descriptor(old_table_desc, new_table_desc.address,
new_table_desc.flags,
new_table_desc.pointer);
acpi_tb_validate_temp_table(old_table_desc);
/* Release the temporary table descriptor */
acpi_tb_release_temp_table(&new_table_desc);
}
/*******************************************************************************
*
* FUNCTION: acpi_tb_uninstall_table
*
* PARAMETERS: table_desc - Table descriptor
*
* RETURN: None
*
* DESCRIPTION: Delete one internal ACPI table
*
******************************************************************************/
void acpi_tb_uninstall_table(struct acpi_table_desc *table_desc)
{
ACPI_FUNCTION_TRACE(tb_uninstall_table);
/* Table must be installed */
if (!table_desc->address) {
return_VOID;
}
acpi_tb_invalidate_table(table_desc);
if ((table_desc->flags & ACPI_TABLE_ORIGIN_MASK) ==
ACPI_TABLE_ORIGIN_INTERNAL_VIRTUAL) {
ACPI_FREE(ACPI_PHYSADDR_TO_PTR(table_desc->address));
}
table_desc->address = ACPI_PTR_TO_PHYSADDR(NULL);
return_VOID;
}
| gpl-2.0 |
froggy666uk/Froggy_SensMod_CM10.1 | mm/memory-failure.c | 616 | 40340 | /*
* Copyright (C) 2008, 2009 Intel Corporation
* Authors: Andi Kleen, Fengguang Wu
*
* This software may be redistributed and/or modified under the terms of
* the GNU General Public License ("GPL") version 2 only as published by the
* Free Software Foundation.
*
* High level machine check handler. Handles pages reported by the
* hardware as being corrupted usually due to a multi-bit ECC memory or cache
* failure.
*
* In addition there is a "soft offline" entry point that allows stop using
* not-yet-corrupted-by-suspicious pages without killing anything.
*
* Handles page cache pages in various states. The tricky part
* here is that we can access any page asynchronously in respect to
* other VM users, because memory failures could happen anytime and
* anywhere. This could violate some of their assumptions. This is why
* this code has to be extremely careful. Generally it tries to use
* normal locking rules, as in get the standard locks, even if that means
* the error handling takes potentially a long time.
*
* There are several operations here with exponential complexity because
* of unsuitable VM data structures. For example the operation to map back
* from RMAP chains to processes has to walk the complete process list and
* has non linear complexity with the number. But since memory corruptions
* are rare we hope to get away with this. This avoids impacting the core
* VM.
*/
/*
* Notebook:
* - hugetlb needs more code
* - kcore/oldmem/vmcore/mem/kmem check for hwpoison pages
* - pass bad pages to kdump next kernel
*/
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/page-flags.h>
#include <linux/kernel-page-flags.h>
#include <linux/sched.h>
#include <linux/ksm.h>
#include <linux/rmap.h>
#include <linux/pagemap.h>
#include <linux/swap.h>
#include <linux/backing-dev.h>
#include <linux/migrate.h>
#include <linux/page-isolation.h>
#include <linux/suspend.h>
#include <linux/slab.h>
#include <linux/swapops.h>
#include <linux/hugetlb.h>
#include <linux/memory_hotplug.h>
#include <linux/mm_inline.h>
#include "internal.h"
int sysctl_memory_failure_early_kill __read_mostly = 0;
int sysctl_memory_failure_recovery __read_mostly = 1;
atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
#if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
u32 hwpoison_filter_enable = 0;
u32 hwpoison_filter_dev_major = ~0U;
u32 hwpoison_filter_dev_minor = ~0U;
u64 hwpoison_filter_flags_mask;
u64 hwpoison_filter_flags_value;
EXPORT_SYMBOL_GPL(hwpoison_filter_enable);
EXPORT_SYMBOL_GPL(hwpoison_filter_dev_major);
EXPORT_SYMBOL_GPL(hwpoison_filter_dev_minor);
EXPORT_SYMBOL_GPL(hwpoison_filter_flags_mask);
EXPORT_SYMBOL_GPL(hwpoison_filter_flags_value);
static int hwpoison_filter_dev(struct page *p)
{
struct address_space *mapping;
dev_t dev;
if (hwpoison_filter_dev_major == ~0U &&
hwpoison_filter_dev_minor == ~0U)
return 0;
/*
* page_mapping() does not accept slab pages.
*/
if (PageSlab(p))
return -EINVAL;
mapping = page_mapping(p);
if (mapping == NULL || mapping->host == NULL)
return -EINVAL;
dev = mapping->host->i_sb->s_dev;
if (hwpoison_filter_dev_major != ~0U &&
hwpoison_filter_dev_major != MAJOR(dev))
return -EINVAL;
if (hwpoison_filter_dev_minor != ~0U &&
hwpoison_filter_dev_minor != MINOR(dev))
return -EINVAL;
return 0;
}
static int hwpoison_filter_flags(struct page *p)
{
if (!hwpoison_filter_flags_mask)
return 0;
if ((stable_page_flags(p) & hwpoison_filter_flags_mask) ==
hwpoison_filter_flags_value)
return 0;
else
return -EINVAL;
}
/*
* This allows stress tests to limit test scope to a collection of tasks
* by putting them under some memcg. This prevents killing unrelated/important
* processes such as /sbin/init. Note that the target task may share clean
* pages with init (eg. libc text), which is harmless. If the target task
* share _dirty_ pages with another task B, the test scheme must make sure B
* is also included in the memcg. At last, due to race conditions this filter
* can only guarantee that the page either belongs to the memcg tasks, or is
* a freed page.
*/
#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
u64 hwpoison_filter_memcg;
EXPORT_SYMBOL_GPL(hwpoison_filter_memcg);
static int hwpoison_filter_task(struct page *p)
{
struct mem_cgroup *mem;
struct cgroup_subsys_state *css;
unsigned long ino;
if (!hwpoison_filter_memcg)
return 0;
mem = try_get_mem_cgroup_from_page(p);
if (!mem)
return -EINVAL;
css = mem_cgroup_css(mem);
/* root_mem_cgroup has NULL dentries */
if (!css->cgroup->dentry)
return -EINVAL;
ino = css->cgroup->dentry->d_inode->i_ino;
css_put(css);
if (ino != hwpoison_filter_memcg)
return -EINVAL;
return 0;
}
#else
static int hwpoison_filter_task(struct page *p) { return 0; }
#endif
int hwpoison_filter(struct page *p)
{
if (!hwpoison_filter_enable)
return 0;
if (hwpoison_filter_dev(p))
return -EINVAL;
if (hwpoison_filter_flags(p))
return -EINVAL;
if (hwpoison_filter_task(p))
return -EINVAL;
return 0;
}
#else
int hwpoison_filter(struct page *p)
{
return 0;
}
#endif
EXPORT_SYMBOL_GPL(hwpoison_filter);
/*
* Send all the processes who have the page mapped an ``action optional''
* signal.
*/
static int kill_proc_ao(struct task_struct *t, unsigned long addr, int trapno,
unsigned long pfn, struct page *page)
{
struct siginfo si;
int ret;
printk(KERN_ERR
"MCE %#lx: Killing %s:%d early due to hardware memory corruption\n",
pfn, t->comm, t->pid);
si.si_signo = SIGBUS;
si.si_errno = 0;
si.si_code = BUS_MCEERR_AO;
si.si_addr = (void *)addr;
#ifdef __ARCH_SI_TRAPNO
si.si_trapno = trapno;
#endif
si.si_addr_lsb = compound_trans_order(compound_head(page)) + PAGE_SHIFT;
/*
* Don't use force here, it's convenient if the signal
* can be temporarily blocked.
* This could cause a loop when the user sets SIGBUS
* to SIG_IGN, but hopefully no one will do that?
*/
ret = send_sig_info(SIGBUS, &si, t); /* synchronous? */
if (ret < 0)
printk(KERN_INFO "MCE: Error sending signal to %s:%d: %d\n",
t->comm, t->pid, ret);
return ret;
}
/*
* When a unknown page type is encountered drain as many buffers as possible
* in the hope to turn the page into a LRU or free page, which we can handle.
*/
void shake_page(struct page *p, int access)
{
if (!PageSlab(p)) {
lru_add_drain_all();
if (PageLRU(p))
return;
drain_all_pages();
if (PageLRU(p) || is_free_buddy_page(p))
return;
}
/*
* Only call shrink_slab here (which would also shrink other caches) if
* access is not potentially fatal.
*/
if (access) {
int nr;
do {
struct shrink_control shrink = {
.gfp_mask = GFP_KERNEL,
};
nr = shrink_slab(&shrink, 1000, 1000);
if (page_count(p) == 1)
break;
} while (nr > 10);
}
}
EXPORT_SYMBOL_GPL(shake_page);
/*
* Kill all processes that have a poisoned page mapped and then isolate
* the page.
*
* General strategy:
* Find all processes having the page mapped and kill them.
* But we keep a page reference around so that the page is not
* actually freed yet.
* Then stash the page away
*
* There's no convenient way to get back to mapped processes
* from the VMAs. So do a brute-force search over all
* running processes.
*
* Remember that machine checks are not common (or rather
* if they are common you have other problems), so this shouldn't
* be a performance issue.
*
* Also there are some races possible while we get from the
* error detection to actually handle it.
*/
struct to_kill {
struct list_head nd;
struct task_struct *tsk;
unsigned long addr;
char addr_valid;
};
/*
* Failure handling: if we can't find or can't kill a process there's
* not much we can do. We just print a message and ignore otherwise.
*/
/*
* Schedule a process for later kill.
* Uses GFP_ATOMIC allocations to avoid potential recursions in the VM.
* TBD would GFP_NOIO be enough?
*/
static void add_to_kill(struct task_struct *tsk, struct page *p,
struct vm_area_struct *vma,
struct list_head *to_kill,
struct to_kill **tkc)
{
struct to_kill *tk;
if (*tkc) {
tk = *tkc;
*tkc = NULL;
} else {
tk = kmalloc(sizeof(struct to_kill), GFP_ATOMIC);
if (!tk) {
printk(KERN_ERR
"MCE: Out of memory while machine check handling\n");
return;
}
}
tk->addr = page_address_in_vma(p, vma);
tk->addr_valid = 1;
/*
* In theory we don't have to kill when the page was
* munmaped. But it could be also a mremap. Since that's
* likely very rare kill anyways just out of paranoia, but use
* a SIGKILL because the error is not contained anymore.
*/
if (tk->addr == -EFAULT) {
pr_info("MCE: Unable to find user space address %lx in %s\n",
page_to_pfn(p), tsk->comm);
tk->addr_valid = 0;
}
get_task_struct(tsk);
tk->tsk = tsk;
list_add_tail(&tk->nd, to_kill);
}
/*
* Kill the processes that have been collected earlier.
*
* Only do anything when DOIT is set, otherwise just free the list
* (this is used for clean pages which do not need killing)
* Also when FAIL is set do a force kill because something went
* wrong earlier.
*/
static void kill_procs_ao(struct list_head *to_kill, int doit, int trapno,
int fail, struct page *page, unsigned long pfn)
{
struct to_kill *tk, *next;
list_for_each_entry_safe (tk, next, to_kill, nd) {
if (doit) {
/*
* In case something went wrong with munmapping
* make sure the process doesn't catch the
* signal and then access the memory. Just kill it.
*/
if (fail || tk->addr_valid == 0) {
printk(KERN_ERR
"MCE %#lx: forcibly killing %s:%d because of failure to unmap corrupted page\n",
pfn, tk->tsk->comm, tk->tsk->pid);
force_sig(SIGKILL, tk->tsk);
}
/*
* In theory the process could have mapped
* something else on the address in-between. We could
* check for that, but we need to tell the
* process anyways.
*/
else if (kill_proc_ao(tk->tsk, tk->addr, trapno,
pfn, page) < 0)
printk(KERN_ERR
"MCE %#lx: Cannot send advisory machine check signal to %s:%d\n",
pfn, tk->tsk->comm, tk->tsk->pid);
}
put_task_struct(tk->tsk);
kfree(tk);
}
}
static int task_early_kill(struct task_struct *tsk)
{
if (!tsk->mm)
return 0;
if (tsk->flags & PF_MCE_PROCESS)
return !!(tsk->flags & PF_MCE_EARLY);
return sysctl_memory_failure_early_kill;
}
/*
* Collect processes when the error hit an anonymous page.
*/
static void collect_procs_anon(struct page *page, struct list_head *to_kill,
struct to_kill **tkc)
{
struct vm_area_struct *vma;
struct task_struct *tsk;
struct anon_vma *av;
av = page_lock_anon_vma(page);
if (av == NULL) /* Not actually mapped anymore */
return;
read_lock(&tasklist_lock);
for_each_process (tsk) {
struct anon_vma_chain *vmac;
if (!task_early_kill(tsk))
continue;
list_for_each_entry(vmac, &av->head, same_anon_vma) {
vma = vmac->vma;
if (!page_mapped_in_vma(page, vma))
continue;
if (vma->vm_mm == tsk->mm)
add_to_kill(tsk, page, vma, to_kill, tkc);
}
}
read_unlock(&tasklist_lock);
page_unlock_anon_vma(av);
}
/*
* Collect processes when the error hit a file mapped page.
*/
static void collect_procs_file(struct page *page, struct list_head *to_kill,
struct to_kill **tkc)
{
struct vm_area_struct *vma;
struct task_struct *tsk;
struct prio_tree_iter iter;
struct address_space *mapping = page->mapping;
mutex_lock(&mapping->i_mmap_mutex);
read_lock(&tasklist_lock);
for_each_process(tsk) {
pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
if (!task_early_kill(tsk))
continue;
vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff,
pgoff) {
/*
* Send early kill signal to tasks where a vma covers
* the page but the corrupted page is not necessarily
* mapped it in its pte.
* Assume applications who requested early kill want
* to be informed of all such data corruptions.
*/
if (vma->vm_mm == tsk->mm)
add_to_kill(tsk, page, vma, to_kill, tkc);
}
}
read_unlock(&tasklist_lock);
mutex_unlock(&mapping->i_mmap_mutex);
}
/*
* Collect the processes who have the corrupted page mapped to kill.
* This is done in two steps for locking reasons.
* First preallocate one tokill structure outside the spin locks,
* so that we can kill at least one process reasonably reliable.
*/
static void collect_procs(struct page *page, struct list_head *tokill)
{
struct to_kill *tk;
if (!page->mapping)
return;
tk = kmalloc(sizeof(struct to_kill), GFP_NOIO);
if (!tk)
return;
if (PageAnon(page))
collect_procs_anon(page, tokill, &tk);
else
collect_procs_file(page, tokill, &tk);
kfree(tk);
}
/*
* Error handlers for various types of pages.
*/
enum outcome {
IGNORED, /* Error: cannot be handled */
FAILED, /* Error: handling failed */
DELAYED, /* Will be handled later */
RECOVERED, /* Successfully recovered */
};
static const char *action_name[] = {
[IGNORED] = "Ignored",
[FAILED] = "Failed",
[DELAYED] = "Delayed",
[RECOVERED] = "Recovered",
};
/*
* XXX: It is possible that a page is isolated from LRU cache,
* and then kept in swap cache or failed to remove from page cache.
* The page count will stop it from being freed by unpoison.
* Stress tests should be aware of this memory leak problem.
*/
static int delete_from_lru_cache(struct page *p)
{
if (!isolate_lru_page(p)) {
/*
* Clear sensible page flags, so that the buddy system won't
* complain when the page is unpoison-and-freed.
*/
ClearPageActive(p);
ClearPageUnevictable(p);
/*
* drop the page count elevated by isolate_lru_page()
*/
page_cache_release(p);
return 0;
}
return -EIO;
}
/*
* Error hit kernel page.
* Do nothing, try to be lucky and not touch this instead. For a few cases we
* could be more sophisticated.
*/
static int me_kernel(struct page *p, unsigned long pfn)
{
return IGNORED;
}
/*
* Page in unknown state. Do nothing.
*/
static int me_unknown(struct page *p, unsigned long pfn)
{
printk(KERN_ERR "MCE %#lx: Unknown page state\n", pfn);
return FAILED;
}
/*
* Clean (or cleaned) page cache page.
*/
static int me_pagecache_clean(struct page *p, unsigned long pfn)
{
int err;
int ret = FAILED;
struct address_space *mapping;
delete_from_lru_cache(p);
/*
* For anonymous pages we're done the only reference left
* should be the one m_f() holds.
*/
if (PageAnon(p))
return RECOVERED;
/*
* Now truncate the page in the page cache. This is really
* more like a "temporary hole punch"
* Don't do this for block devices when someone else
* has a reference, because it could be file system metadata
* and that's not safe to truncate.
*/
mapping = page_mapping(p);
if (!mapping) {
/*
* Page has been teared down in the meanwhile
*/
return FAILED;
}
/*
* Truncation is a bit tricky. Enable it per file system for now.
*
* Open: to take i_mutex or not for this? Right now we don't.
*/
if (mapping->a_ops->error_remove_page) {
err = mapping->a_ops->error_remove_page(mapping, p);
if (err != 0) {
printk(KERN_INFO "MCE %#lx: Failed to punch page: %d\n",
pfn, err);
} else if (page_has_private(p) &&
!try_to_release_page(p, GFP_NOIO)) {
pr_info("MCE %#lx: failed to release buffers\n", pfn);
} else {
ret = RECOVERED;
}
} else {
/*
* If the file system doesn't support it just invalidate
* This fails on dirty or anything with private pages
*/
if (invalidate_inode_page(p))
ret = RECOVERED;
else
printk(KERN_INFO "MCE %#lx: Failed to invalidate\n",
pfn);
}
return ret;
}
/*
* Dirty cache page page
* Issues: when the error hit a hole page the error is not properly
* propagated.
*/
static int me_pagecache_dirty(struct page *p, unsigned long pfn)
{
struct address_space *mapping = page_mapping(p);
SetPageError(p);
/* TBD: print more information about the file. */
if (mapping) {
/*
* IO error will be reported by write(), fsync(), etc.
* who check the mapping.
* This way the application knows that something went
* wrong with its dirty file data.
*
* There's one open issue:
*
* The EIO will be only reported on the next IO
* operation and then cleared through the IO map.
* Normally Linux has two mechanisms to pass IO error
* first through the AS_EIO flag in the address space
* and then through the PageError flag in the page.
* Since we drop pages on memory failure handling the
* only mechanism open to use is through AS_AIO.
*
* This has the disadvantage that it gets cleared on
* the first operation that returns an error, while
* the PageError bit is more sticky and only cleared
* when the page is reread or dropped. If an
* application assumes it will always get error on
* fsync, but does other operations on the fd before
* and the page is dropped between then the error
* will not be properly reported.
*
* This can already happen even without hwpoisoned
* pages: first on metadata IO errors (which only
* report through AS_EIO) or when the page is dropped
* at the wrong time.
*
* So right now we assume that the application DTRT on
* the first EIO, but we're not worse than other parts
* of the kernel.
*/
mapping_set_error(mapping, EIO);
}
return me_pagecache_clean(p, pfn);
}
/*
* Clean and dirty swap cache.
*
* Dirty swap cache page is tricky to handle. The page could live both in page
* cache and swap cache(ie. page is freshly swapped in). So it could be
* referenced concurrently by 2 types of PTEs:
* normal PTEs and swap PTEs. We try to handle them consistently by calling
* try_to_unmap(TTU_IGNORE_HWPOISON) to convert the normal PTEs to swap PTEs,
* and then
* - clear dirty bit to prevent IO
* - remove from LRU
* - but keep in the swap cache, so that when we return to it on
* a later page fault, we know the application is accessing
* corrupted data and shall be killed (we installed simple
* interception code in do_swap_page to catch it).
*
* Clean swap cache pages can be directly isolated. A later page fault will
* bring in the known good data from disk.
*/
static int me_swapcache_dirty(struct page *p, unsigned long pfn)
{
ClearPageDirty(p);
/* Trigger EIO in shmem: */
ClearPageUptodate(p);
if (!delete_from_lru_cache(p))
return DELAYED;
else
return FAILED;
}
static int me_swapcache_clean(struct page *p, unsigned long pfn)
{
delete_from_swap_cache(p);
if (!delete_from_lru_cache(p))
return RECOVERED;
else
return FAILED;
}
/*
* Huge pages. Needs work.
* Issues:
* - Error on hugepage is contained in hugepage unit (not in raw page unit.)
* To narrow down kill region to one page, we need to break up pmd.
*/
static int me_huge_page(struct page *p, unsigned long pfn)
{
int res = 0;
struct page *hpage = compound_head(p);
/*
* We can safely recover from error on free or reserved (i.e.
* not in-use) hugepage by dequeuing it from freelist.
* To check whether a hugepage is in-use or not, we can't use
* page->lru because it can be used in other hugepage operations,
* such as __unmap_hugepage_range() and gather_surplus_pages().
* So instead we use page_mapping() and PageAnon().
* We assume that this function is called with page lock held,
* so there is no race between isolation and mapping/unmapping.
*/
if (!(page_mapping(hpage) || PageAnon(hpage))) {
res = dequeue_hwpoisoned_huge_page(hpage);
if (!res)
return RECOVERED;
}
return DELAYED;
}
/*
* Various page states we can handle.
*
* A page state is defined by its current page->flags bits.
* The table matches them in order and calls the right handler.
*
* This is quite tricky because we can access page at any time
* in its live cycle, so all accesses have to be extremely careful.
*
* This is not complete. More states could be added.
* For any missing state don't attempt recovery.
*/
#define dirty (1UL << PG_dirty)
#define sc (1UL << PG_swapcache)
#define unevict (1UL << PG_unevictable)
#define mlock (1UL << PG_mlocked)
#define writeback (1UL << PG_writeback)
#define lru (1UL << PG_lru)
#define swapbacked (1UL << PG_swapbacked)
#define head (1UL << PG_head)
#define tail (1UL << PG_tail)
#define compound (1UL << PG_compound)
#define slab (1UL << PG_slab)
#define reserved (1UL << PG_reserved)
static struct page_state {
unsigned long mask;
unsigned long res;
char *msg;
int (*action)(struct page *p, unsigned long pfn);
} error_states[] = {
{ reserved, reserved, "reserved kernel", me_kernel },
/*
* free pages are specially detected outside this table:
* PG_buddy pages only make a small fraction of all free pages.
*/
/*
* Could in theory check if slab page is free or if we can drop
* currently unused objects without touching them. But just
* treat it as standard kernel for now.
*/
{ slab, slab, "kernel slab", me_kernel },
#ifdef CONFIG_PAGEFLAGS_EXTENDED
{ head, head, "huge", me_huge_page },
{ tail, tail, "huge", me_huge_page },
#else
{ compound, compound, "huge", me_huge_page },
#endif
{ sc|dirty, sc|dirty, "swapcache", me_swapcache_dirty },
{ sc|dirty, sc, "swapcache", me_swapcache_clean },
{ unevict|dirty, unevict|dirty, "unevictable LRU", me_pagecache_dirty},
{ unevict, unevict, "unevictable LRU", me_pagecache_clean},
{ mlock|dirty, mlock|dirty, "mlocked LRU", me_pagecache_dirty },
{ mlock, mlock, "mlocked LRU", me_pagecache_clean },
{ lru|dirty, lru|dirty, "LRU", me_pagecache_dirty },
{ lru|dirty, lru, "clean LRU", me_pagecache_clean },
/*
* Catchall entry: must be at end.
*/
{ 0, 0, "unknown page state", me_unknown },
};
#undef dirty
#undef sc
#undef unevict
#undef mlock
#undef writeback
#undef lru
#undef swapbacked
#undef head
#undef tail
#undef compound
#undef slab
#undef reserved
static void action_result(unsigned long pfn, char *msg, int result)
{
struct page *page = pfn_to_page(pfn);
printk(KERN_ERR "MCE %#lx: %s%s page recovery: %s\n",
pfn,
PageDirty(page) ? "dirty " : "",
msg, action_name[result]);
}
static int page_action(struct page_state *ps, struct page *p,
unsigned long pfn)
{
int result;
int count;
result = ps->action(p, pfn);
action_result(pfn, ps->msg, result);
count = page_count(p) - 1;
if (ps->action == me_swapcache_dirty && result == DELAYED)
count--;
if (count != 0) {
printk(KERN_ERR
"MCE %#lx: %s page still referenced by %d users\n",
pfn, ps->msg, count);
result = FAILED;
}
/* Could do more checks here if page looks ok */
/*
* Could adjust zone counters here to correct for the missing page.
*/
return (result == RECOVERED || result == DELAYED) ? 0 : -EBUSY;
}
/*
* Do all that is necessary to remove user space mappings. Unmap
* the pages and send SIGBUS to the processes if the data was dirty.
*/
static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
int trapno)
{
enum ttu_flags ttu = TTU_UNMAP | TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS;
struct address_space *mapping;
LIST_HEAD(tokill);
int ret;
int kill = 1;
struct page *hpage = compound_head(p);
struct page *ppage;
if (PageReserved(p) || PageSlab(p))
return SWAP_SUCCESS;
/*
* This check implies we don't kill processes if their pages
* are in the swap cache early. Those are always late kills.
*/
if (!page_mapped(hpage))
return SWAP_SUCCESS;
if (PageKsm(p))
return SWAP_FAIL;
if (PageSwapCache(p)) {
printk(KERN_ERR
"MCE %#lx: keeping poisoned page in swap cache\n", pfn);
ttu |= TTU_IGNORE_HWPOISON;
}
/*
* Propagate the dirty bit from PTEs to struct page first, because we
* need this to decide if we should kill or just drop the page.
* XXX: the dirty test could be racy: set_page_dirty() may not always
* be called inside page lock (it's recommended but not enforced).
*/
mapping = page_mapping(hpage);
if (!PageDirty(hpage) && mapping &&
mapping_cap_writeback_dirty(mapping)) {
if (page_mkclean(hpage)) {
SetPageDirty(hpage);
} else {
kill = 0;
ttu |= TTU_IGNORE_HWPOISON;
printk(KERN_INFO
"MCE %#lx: corrupted page was clean: dropped without side effects\n",
pfn);
}
}
/*
* ppage: poisoned page
* if p is regular page(4k page)
* ppage == real poisoned page;
* else p is hugetlb or THP, ppage == head page.
*/
ppage = hpage;
if (PageTransHuge(hpage)) {
/*
* Verify that this isn't a hugetlbfs head page, the check for
* PageAnon is just for avoid tripping a split_huge_page
* internal debug check, as split_huge_page refuses to deal with
* anything that isn't an anon page. PageAnon can't go away fro
* under us because we hold a refcount on the hpage, without a
* refcount on the hpage. split_huge_page can't be safely called
* in the first place, having a refcount on the tail isn't
* enough * to be safe.
*/
if (!PageHuge(hpage) && PageAnon(hpage)) {
if (unlikely(split_huge_page(hpage))) {
/*
* FIXME: if splitting THP is failed, it is
* better to stop the following operation rather
* than causing panic by unmapping. System might
* survive if the page is freed later.
*/
printk(KERN_INFO
"MCE %#lx: failed to split THP\n", pfn);
BUG_ON(!PageHWPoison(p));
return SWAP_FAIL;
}
/* THP is split, so ppage should be the real poisoned page. */
ppage = p;
}
}
/*
* First collect all the processes that have the page
* mapped in dirty form. This has to be done before try_to_unmap,
* because ttu takes the rmap data structures down.
*
* Error handling: We ignore errors here because
* there's nothing that can be done.
*/
if (kill)
collect_procs(ppage, &tokill);
if (hpage != ppage)
lock_page(ppage);
ret = try_to_unmap(ppage, ttu);
if (ret != SWAP_SUCCESS)
printk(KERN_ERR "MCE %#lx: failed to unmap page (mapcount=%d)\n",
pfn, page_mapcount(ppage));
if (hpage != ppage)
unlock_page(ppage);
/*
* Now that the dirty bit has been propagated to the
* struct page and all unmaps done we can decide if
* killing is needed or not. Only kill when the page
* was dirty, otherwise the tokill list is merely
* freed. When there was a problem unmapping earlier
* use a more force-full uncatchable kill to prevent
* any accesses to the poisoned memory.
*/
kill_procs_ao(&tokill, !!PageDirty(ppage), trapno,
ret != SWAP_SUCCESS, p, pfn);
return ret;
}
static void set_page_hwpoison_huge_page(struct page *hpage)
{
int i;
int nr_pages = 1 << compound_trans_order(hpage);
for (i = 0; i < nr_pages; i++)
SetPageHWPoison(hpage + i);
}
static void clear_page_hwpoison_huge_page(struct page *hpage)
{
int i;
int nr_pages = 1 << compound_trans_order(hpage);
for (i = 0; i < nr_pages; i++)
ClearPageHWPoison(hpage + i);
}
int __memory_failure(unsigned long pfn, int trapno, int flags)
{
struct page_state *ps;
struct page *p;
struct page *hpage;
int res;
unsigned int nr_pages;
if (!sysctl_memory_failure_recovery)
panic("Memory failure from trap %d on page %lx", trapno, pfn);
if (!pfn_valid(pfn)) {
printk(KERN_ERR
"MCE %#lx: memory outside kernel control\n",
pfn);
return -ENXIO;
}
p = pfn_to_page(pfn);
hpage = compound_head(p);
if (TestSetPageHWPoison(p)) {
printk(KERN_ERR "MCE %#lx: already hardware poisoned\n", pfn);
return 0;
}
nr_pages = 1 << compound_trans_order(hpage);
atomic_long_add(nr_pages, &mce_bad_pages);
/*
* We need/can do nothing about count=0 pages.
* 1) it's a free page, and therefore in safe hand:
* prep_new_page() will be the gate keeper.
* 2) it's a free hugepage, which is also safe:
* an affected hugepage will be dequeued from hugepage freelist,
* so there's no concern about reusing it ever after.
* 3) it's part of a non-compound high order page.
* Implies some kernel user: cannot stop them from
* R/W the page; let's pray that the page has been
* used and will be freed some time later.
* In fact it's dangerous to directly bump up page count from 0,
* that may make page_freeze_refs()/page_unfreeze_refs() mismatch.
*/
if (!(flags & MF_COUNT_INCREASED) &&
!get_page_unless_zero(hpage)) {
if (is_free_buddy_page(p)) {
action_result(pfn, "free buddy", DELAYED);
return 0;
} else if (PageHuge(hpage)) {
/*
* Check "just unpoisoned", "filter hit", and
* "race with other subpage."
*/
lock_page(hpage);
if (!PageHWPoison(hpage)
|| (hwpoison_filter(p) && TestClearPageHWPoison(p))
|| (p != hpage && TestSetPageHWPoison(hpage))) {
atomic_long_sub(nr_pages, &mce_bad_pages);
return 0;
}
set_page_hwpoison_huge_page(hpage);
res = dequeue_hwpoisoned_huge_page(hpage);
action_result(pfn, "free huge",
res ? IGNORED : DELAYED);
unlock_page(hpage);
return res;
} else {
action_result(pfn, "high order kernel", IGNORED);
return -EBUSY;
}
}
/*
* We ignore non-LRU pages for good reasons.
* - PG_locked is only well defined for LRU pages and a few others
* - to avoid races with __set_page_locked()
* - to avoid races with __SetPageSlab*() (and more non-atomic ops)
* The check (unnecessarily) ignores LRU pages being isolated and
* walked by the page reclaim code, however that's not a big loss.
*/
if (!PageHuge(p) && !PageTransCompound(p)) {
if (!PageLRU(p))
shake_page(p, 0);
if (!PageLRU(p)) {
/*
* shake_page could have turned it free.
*/
if (is_free_buddy_page(p)) {
action_result(pfn, "free buddy, 2nd try",
DELAYED);
return 0;
}
action_result(pfn, "non LRU", IGNORED);
put_page(p);
return -EBUSY;
}
}
/*
* Lock the page and wait for writeback to finish.
* It's very difficult to mess with pages currently under IO
* and in many cases impossible, so we just avoid it here.
*/
lock_page(hpage);
/*
* unpoison always clear PG_hwpoison inside page lock
*/
if (!PageHWPoison(p)) {
printk(KERN_ERR "MCE %#lx: just unpoisoned\n", pfn);
res = 0;
goto out;
}
if (hwpoison_filter(p)) {
if (TestClearPageHWPoison(p))
atomic_long_sub(nr_pages, &mce_bad_pages);
unlock_page(hpage);
put_page(hpage);
return 0;
}
/*
* For error on the tail page, we should set PG_hwpoison
* on the head page to show that the hugepage is hwpoisoned
*/
if (PageHuge(p) && PageTail(p) && TestSetPageHWPoison(hpage)) {
action_result(pfn, "hugepage already hardware poisoned",
IGNORED);
unlock_page(hpage);
put_page(hpage);
return 0;
}
/*
* Set PG_hwpoison on all pages in an error hugepage,
* because containment is done in hugepage unit for now.
* Since we have done TestSetPageHWPoison() for the head page with
* page lock held, we can safely set PG_hwpoison bits on tail pages.
*/
if (PageHuge(p))
set_page_hwpoison_huge_page(hpage);
wait_on_page_writeback(p);
/*
* Now take care of user space mappings.
* Abort on fail: __delete_from_page_cache() assumes unmapped page.
*/
if (hwpoison_user_mappings(p, pfn, trapno) != SWAP_SUCCESS) {
printk(KERN_ERR "MCE %#lx: cannot unmap page, give up\n", pfn);
res = -EBUSY;
goto out;
}
/*
* Torn down by someone else?
*/
if (PageLRU(p) && !PageSwapCache(p) && p->mapping == NULL) {
action_result(pfn, "already truncated LRU", IGNORED);
res = -EBUSY;
goto out;
}
res = -EBUSY;
for (ps = error_states;; ps++) {
if ((p->flags & ps->mask) == ps->res) {
res = page_action(ps, p, pfn);
break;
}
}
out:
unlock_page(hpage);
return res;
}
EXPORT_SYMBOL_GPL(__memory_failure);
/**
* memory_failure - Handle memory failure of a page.
* @pfn: Page Number of the corrupted page
* @trapno: Trap number reported in the signal to user space.
*
* This function is called by the low level machine check code
* of an architecture when it detects hardware memory corruption
* of a page. It tries its best to recover, which includes
* dropping pages, killing processes etc.
*
* The function is primarily of use for corruptions that
* happen outside the current execution context (e.g. when
* detected by a background scrubber)
*
* Must run in process context (e.g. a work queue) with interrupts
* enabled and no spinlocks hold.
*/
void memory_failure(unsigned long pfn, int trapno)
{
__memory_failure(pfn, trapno, 0);
}
/**
* unpoison_memory - Unpoison a previously poisoned page
* @pfn: Page number of the to be unpoisoned page
*
* Software-unpoison a page that has been poisoned by
* memory_failure() earlier.
*
* This is only done on the software-level, so it only works
* for linux injected failures, not real hardware failures
*
* Returns 0 for success, otherwise -errno.
*/
int unpoison_memory(unsigned long pfn)
{
struct page *page;
struct page *p;
int freeit = 0;
unsigned int nr_pages;
if (!pfn_valid(pfn))
return -ENXIO;
p = pfn_to_page(pfn);
page = compound_head(p);
if (!PageHWPoison(p)) {
pr_info("MCE: Page was already unpoisoned %#lx\n", pfn);
return 0;
}
nr_pages = 1 << compound_trans_order(page);
if (!get_page_unless_zero(page)) {
/*
* Since HWPoisoned hugepage should have non-zero refcount,
* race between memory failure and unpoison seems to happen.
* In such case unpoison fails and memory failure runs
* to the end.
*/
if (PageHuge(page)) {
pr_debug("MCE: Memory failure is now running on free hugepage %#lx\n", pfn);
return 0;
}
if (TestClearPageHWPoison(p))
atomic_long_sub(nr_pages, &mce_bad_pages);
pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
return 0;
}
lock_page(page);
/*
* This test is racy because PG_hwpoison is set outside of page lock.
* That's acceptable because that won't trigger kernel panic. Instead,
* the PG_hwpoison page will be caught and isolated on the entrance to
* the free buddy page pool.
*/
if (TestClearPageHWPoison(page)) {
pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
atomic_long_sub(nr_pages, &mce_bad_pages);
freeit = 1;
if (PageHuge(page))
clear_page_hwpoison_huge_page(page);
}
unlock_page(page);
put_page(page);
if (freeit)
put_page(page);
return 0;
}
EXPORT_SYMBOL(unpoison_memory);
static struct page *new_page(struct page *p, unsigned long private, int **x)
{
int nid = page_to_nid(p);
if (PageHuge(p))
return alloc_huge_page_node(page_hstate(compound_head(p)),
nid);
else
return alloc_pages_exact_node(nid, GFP_HIGHUSER_MOVABLE, 0);
}
/*
* Safely get reference count of an arbitrary page.
* Returns 0 for a free page, -EIO for a zero refcount page
* that is not free, and 1 for any other page type.
* For 1 the page is returned with increased page count, otherwise not.
*/
static int get_any_page(struct page *p, unsigned long pfn, int flags)
{
int ret;
if (flags & MF_COUNT_INCREASED)
return 1;
/*
* The lock_memory_hotplug prevents a race with memory hotplug.
* This is a big hammer, a better would be nicer.
*/
lock_memory_hotplug();
/*
* Isolate the page, so that it doesn't get reallocated if it
* was free.
*/
set_migratetype_isolate(p);
/*
* When the target page is a free hugepage, just remove it
* from free hugepage list.
*/
if (!get_page_unless_zero(compound_head(p))) {
if (PageHuge(p)) {
pr_info("get_any_page: %#lx free huge page\n", pfn);
ret = dequeue_hwpoisoned_huge_page(compound_head(p));
} else if (is_free_buddy_page(p)) {
pr_info("get_any_page: %#lx free buddy page\n", pfn);
/* Set hwpoison bit while page is still isolated */
SetPageHWPoison(p);
ret = 0;
} else {
pr_info("get_any_page: %#lx: unknown zero refcount page type %lx\n",
pfn, p->flags);
ret = -EIO;
}
} else {
/* Not a free page */
ret = 1;
}
unset_migratetype_isolate(p);
unlock_memory_hotplug();
return ret;
}
static int soft_offline_huge_page(struct page *page, int flags)
{
int ret;
unsigned long pfn = page_to_pfn(page);
struct page *hpage = compound_head(page);
LIST_HEAD(pagelist);
ret = get_any_page(page, pfn, flags);
if (ret < 0)
return ret;
if (ret == 0)
goto done;
if (PageHWPoison(hpage)) {
put_page(hpage);
pr_debug("soft offline: %#lx hugepage already poisoned\n", pfn);
return -EBUSY;
}
/* Keep page count to indicate a given hugepage is isolated. */
list_add(&hpage->lru, &pagelist);
ret = migrate_huge_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL, false,
MIGRATE_SYNC);
if (ret) {
struct page *page1, *page2;
list_for_each_entry_safe(page1, page2, &pagelist, lru)
put_page(page1);
pr_debug("soft offline: %#lx: migration failed %d, type %lx\n",
pfn, ret, page->flags);
if (ret > 0)
ret = -EIO;
return ret;
}
done:
if (!PageHWPoison(hpage))
atomic_long_add(1 << compound_trans_order(hpage), &mce_bad_pages);
set_page_hwpoison_huge_page(hpage);
dequeue_hwpoisoned_huge_page(hpage);
/* keep elevated page count for bad page */
return ret;
}
/**
* soft_offline_page - Soft offline a page.
* @page: page to offline
* @flags: flags. Same as memory_failure().
*
* Returns 0 on success, otherwise negated errno.
*
* Soft offline a page, by migration or invalidation,
* without killing anything. This is for the case when
* a page is not corrupted yet (so it's still valid to access),
* but has had a number of corrected errors and is better taken
* out.
*
* The actual policy on when to do that is maintained by
* user space.
*
* This should never impact any application or cause data loss,
* however it might take some time.
*
* This is not a 100% solution for all memory, but tries to be
* ``good enough'' for the majority of memory.
*/
int soft_offline_page(struct page *page, int flags)
{
int ret;
unsigned long pfn = page_to_pfn(page);
struct page *hpage = compound_trans_head(page);
if (PageHuge(page))
return soft_offline_huge_page(page, flags);
if (PageTransHuge(hpage)) {
if (PageAnon(hpage) && unlikely(split_huge_page(hpage))) {
pr_info("soft offline: %#lx: failed to split THP\n",
pfn);
return -EBUSY;
}
}
ret = get_any_page(page, pfn, flags);
if (ret < 0)
return ret;
if (ret == 0)
goto done;
/*
* Page cache page we can handle?
*/
if (!PageLRU(page)) {
/*
* Try to free it.
*/
put_page(page);
shake_page(page, 1);
/*
* Did it turn free?
*/
ret = get_any_page(page, pfn, 0);
if (ret < 0)
return ret;
if (ret == 0)
goto done;
}
if (!PageLRU(page)) {
pr_info("soft_offline: %#lx: unknown non LRU page type %lx\n",
pfn, page->flags);
return -EIO;
}
lock_page(page);
wait_on_page_writeback(page);
/*
* Synchronized using the page lock with memory_failure()
*/
if (PageHWPoison(page)) {
unlock_page(page);
put_page(page);
pr_info("soft offline: %#lx page already poisoned\n", pfn);
return -EBUSY;
}
/*
* Try to invalidate first. This should work for
* non dirty unmapped page cache pages.
*/
ret = invalidate_inode_page(page);
unlock_page(page);
/*
* RED-PEN would be better to keep it isolated here, but we
* would need to fix isolation locking first.
*/
if (ret == 1) {
put_page(page);
ret = 0;
pr_info("soft_offline: %#lx: invalidated\n", pfn);
goto done;
}
/*
* Simple invalidation didn't work.
* Try to migrate to a new page instead. migrate.c
* handles a large number of cases for us.
*/
ret = isolate_lru_page(page);
/*
* Drop page reference which is came from get_any_page()
* successful isolate_lru_page() already took another one.
*/
put_page(page);
if (!ret) {
LIST_HEAD(pagelist);
inc_zone_page_state(page, NR_ISOLATED_ANON +
page_is_file_cache(page));
list_add(&page->lru, &pagelist);
ret = migrate_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL,
false, MIGRATE_SYNC);
if (ret) {
putback_lru_pages(&pagelist);
pr_info("soft offline: %#lx: migration failed %d, type %lx\n",
pfn, ret, page->flags);
if (ret > 0)
ret = -EIO;
}
} else {
pr_info("soft offline: %#lx: isolation failed: %d, page count %d, type %lx\n",
pfn, ret, page_count(page), page->flags);
}
if (ret)
return ret;
done:
atomic_long_add(1, &mce_bad_pages);
SetPageHWPoison(page);
/* keep elevated page count for bad page */
return ret;
}
| gpl-2.0 |
nanata1115/linux | fs/reiserfs/xattr_trusted.c | 616 | 1411 | #include "reiserfs.h"
#include <linux/capability.h>
#include <linux/errno.h>
#include <linux/fs.h>
#include <linux/pagemap.h>
#include <linux/xattr.h>
#include "xattr.h"
#include <linux/uaccess.h>
static int
trusted_get(struct dentry *dentry, const char *name, void *buffer, size_t size,
int handler_flags)
{
if (strlen(name) < sizeof(XATTR_TRUSTED_PREFIX))
return -EINVAL;
if (!capable(CAP_SYS_ADMIN) || IS_PRIVATE(d_inode(dentry)))
return -EPERM;
return reiserfs_xattr_get(d_inode(dentry), name, buffer, size);
}
static int
trusted_set(struct dentry *dentry, const char *name, const void *buffer,
size_t size, int flags, int handler_flags)
{
if (strlen(name) < sizeof(XATTR_TRUSTED_PREFIX))
return -EINVAL;
if (!capable(CAP_SYS_ADMIN) || IS_PRIVATE(d_inode(dentry)))
return -EPERM;
return reiserfs_xattr_set(d_inode(dentry), name, buffer, size, flags);
}
static size_t trusted_list(struct dentry *dentry, char *list, size_t list_size,
const char *name, size_t name_len, int handler_flags)
{
const size_t len = name_len + 1;
if (!capable(CAP_SYS_ADMIN) || IS_PRIVATE(d_inode(dentry)))
return 0;
if (list && len <= list_size) {
memcpy(list, name, name_len);
list[name_len] = '\0';
}
return len;
}
const struct xattr_handler reiserfs_xattr_trusted_handler = {
.prefix = XATTR_TRUSTED_PREFIX,
.get = trusted_get,
.set = trusted_set,
.list = trusted_list,
};
| gpl-2.0 |
Elite-Kernels/elite_bullhead | fs/ceph/ioctl.c | 1896 | 7606 | #include <linux/in.h>
#include "super.h"
#include "mds_client.h"
#include <linux/ceph/ceph_debug.h>
#include "ioctl.h"
/*
* ioctls
*/
/*
* get and set the file layout
*/
static long ceph_ioctl_get_layout(struct file *file, void __user *arg)
{
struct ceph_inode_info *ci = ceph_inode(file_inode(file));
struct ceph_ioctl_layout l;
int err;
err = ceph_do_getattr(file_inode(file), CEPH_STAT_CAP_LAYOUT);
if (!err) {
l.stripe_unit = ceph_file_layout_su(ci->i_layout);
l.stripe_count = ceph_file_layout_stripe_count(ci->i_layout);
l.object_size = ceph_file_layout_object_size(ci->i_layout);
l.data_pool = le32_to_cpu(ci->i_layout.fl_pg_pool);
l.preferred_osd = (s32)-1;
if (copy_to_user(arg, &l, sizeof(l)))
return -EFAULT;
}
return err;
}
static long __validate_layout(struct ceph_mds_client *mdsc,
struct ceph_ioctl_layout *l)
{
int i, err;
/* validate striping parameters */
if ((l->object_size & ~PAGE_MASK) ||
(l->stripe_unit & ~PAGE_MASK) ||
(l->stripe_unit != 0 &&
((unsigned)l->object_size % (unsigned)l->stripe_unit)))
return -EINVAL;
/* make sure it's a valid data pool */
mutex_lock(&mdsc->mutex);
err = -EINVAL;
for (i = 0; i < mdsc->mdsmap->m_num_data_pg_pools; i++)
if (mdsc->mdsmap->m_data_pg_pools[i] == l->data_pool) {
err = 0;
break;
}
mutex_unlock(&mdsc->mutex);
if (err)
return err;
return 0;
}
static long ceph_ioctl_set_layout(struct file *file, void __user *arg)
{
struct inode *inode = file_inode(file);
struct inode *parent_inode;
struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
struct ceph_mds_request *req;
struct ceph_ioctl_layout l;
struct ceph_inode_info *ci = ceph_inode(file_inode(file));
struct ceph_ioctl_layout nl;
int err;
if (copy_from_user(&l, arg, sizeof(l)))
return -EFAULT;
/* validate changed params against current layout */
err = ceph_do_getattr(file_inode(file), CEPH_STAT_CAP_LAYOUT);
if (err)
return err;
memset(&nl, 0, sizeof(nl));
if (l.stripe_count)
nl.stripe_count = l.stripe_count;
else
nl.stripe_count = ceph_file_layout_stripe_count(ci->i_layout);
if (l.stripe_unit)
nl.stripe_unit = l.stripe_unit;
else
nl.stripe_unit = ceph_file_layout_su(ci->i_layout);
if (l.object_size)
nl.object_size = l.object_size;
else
nl.object_size = ceph_file_layout_object_size(ci->i_layout);
if (l.data_pool)
nl.data_pool = l.data_pool;
else
nl.data_pool = ceph_file_layout_pg_pool(ci->i_layout);
/* this is obsolete, and always -1 */
nl.preferred_osd = le64_to_cpu(-1);
err = __validate_layout(mdsc, &nl);
if (err)
return err;
req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETLAYOUT,
USE_AUTH_MDS);
if (IS_ERR(req))
return PTR_ERR(req);
req->r_inode = inode;
ihold(inode);
req->r_inode_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_EXCL;
req->r_args.setlayout.layout.fl_stripe_unit =
cpu_to_le32(l.stripe_unit);
req->r_args.setlayout.layout.fl_stripe_count =
cpu_to_le32(l.stripe_count);
req->r_args.setlayout.layout.fl_object_size =
cpu_to_le32(l.object_size);
req->r_args.setlayout.layout.fl_pg_pool = cpu_to_le32(l.data_pool);
parent_inode = ceph_get_dentry_parent_inode(file->f_dentry);
err = ceph_mdsc_do_request(mdsc, parent_inode, req);
iput(parent_inode);
ceph_mdsc_put_request(req);
return err;
}
/*
* Set a layout policy on a directory inode. All items in the tree
* rooted at this inode will inherit this layout on creation,
* (It doesn't apply retroactively )
* unless a subdirectory has its own layout policy.
*/
static long ceph_ioctl_set_layout_policy (struct file *file, void __user *arg)
{
struct inode *inode = file_inode(file);
struct ceph_mds_request *req;
struct ceph_ioctl_layout l;
int err;
struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
/* copy and validate */
if (copy_from_user(&l, arg, sizeof(l)))
return -EFAULT;
err = __validate_layout(mdsc, &l);
if (err)
return err;
req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETDIRLAYOUT,
USE_AUTH_MDS);
if (IS_ERR(req))
return PTR_ERR(req);
req->r_inode = inode;
ihold(inode);
req->r_args.setlayout.layout.fl_stripe_unit =
cpu_to_le32(l.stripe_unit);
req->r_args.setlayout.layout.fl_stripe_count =
cpu_to_le32(l.stripe_count);
req->r_args.setlayout.layout.fl_object_size =
cpu_to_le32(l.object_size);
req->r_args.setlayout.layout.fl_pg_pool =
cpu_to_le32(l.data_pool);
err = ceph_mdsc_do_request(mdsc, inode, req);
ceph_mdsc_put_request(req);
return err;
}
/*
* Return object name, size/offset information, and location (OSD
* number, network address) for a given file offset.
*/
static long ceph_ioctl_get_dataloc(struct file *file, void __user *arg)
{
struct ceph_ioctl_dataloc dl;
struct inode *inode = file_inode(file);
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_osd_client *osdc =
&ceph_sb_to_client(inode->i_sb)->client->osdc;
u64 len = 1, olen;
u64 tmp;
struct ceph_pg pgid;
int r;
/* copy and validate */
if (copy_from_user(&dl, arg, sizeof(dl)))
return -EFAULT;
down_read(&osdc->map_sem);
r = ceph_calc_file_object_mapping(&ci->i_layout, dl.file_offset, len,
&dl.object_no, &dl.object_offset,
&olen);
if (r < 0) {
up_read(&osdc->map_sem);
return -EIO;
}
dl.file_offset -= dl.object_offset;
dl.object_size = ceph_file_layout_object_size(ci->i_layout);
dl.block_size = ceph_file_layout_su(ci->i_layout);
/* block_offset = object_offset % block_size */
tmp = dl.object_offset;
dl.block_offset = do_div(tmp, dl.block_size);
snprintf(dl.object_name, sizeof(dl.object_name), "%llx.%08llx",
ceph_ino(inode), dl.object_no);
r = ceph_calc_ceph_pg(&pgid, dl.object_name, osdc->osdmap,
ceph_file_layout_pg_pool(ci->i_layout));
if (r < 0) {
up_read(&osdc->map_sem);
return r;
}
dl.osd = ceph_calc_pg_primary(osdc->osdmap, pgid);
if (dl.osd >= 0) {
struct ceph_entity_addr *a =
ceph_osd_addr(osdc->osdmap, dl.osd);
if (a)
memcpy(&dl.osd_addr, &a->in_addr, sizeof(dl.osd_addr));
} else {
memset(&dl.osd_addr, 0, sizeof(dl.osd_addr));
}
up_read(&osdc->map_sem);
/* send result back to user */
if (copy_to_user(arg, &dl, sizeof(dl)))
return -EFAULT;
return 0;
}
static long ceph_ioctl_lazyio(struct file *file)
{
struct ceph_file_info *fi = file->private_data;
struct inode *inode = file_inode(file);
struct ceph_inode_info *ci = ceph_inode(inode);
if ((fi->fmode & CEPH_FILE_MODE_LAZY) == 0) {
spin_lock(&ci->i_ceph_lock);
ci->i_nr_by_mode[fi->fmode]--;
fi->fmode |= CEPH_FILE_MODE_LAZY;
ci->i_nr_by_mode[fi->fmode]++;
spin_unlock(&ci->i_ceph_lock);
dout("ioctl_layzio: file %p marked lazy\n", file);
ceph_check_caps(ci, 0, NULL);
} else {
dout("ioctl_layzio: file %p already lazy\n", file);
}
return 0;
}
static long ceph_ioctl_syncio(struct file *file)
{
struct ceph_file_info *fi = file->private_data;
fi->flags |= CEPH_F_SYNC;
return 0;
}
long ceph_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
dout("ioctl file %p cmd %u arg %lu\n", file, cmd, arg);
switch (cmd) {
case CEPH_IOC_GET_LAYOUT:
return ceph_ioctl_get_layout(file, (void __user *)arg);
case CEPH_IOC_SET_LAYOUT:
return ceph_ioctl_set_layout(file, (void __user *)arg);
case CEPH_IOC_SET_LAYOUT_POLICY:
return ceph_ioctl_set_layout_policy(file, (void __user *)arg);
case CEPH_IOC_GET_DATALOC:
return ceph_ioctl_get_dataloc(file, (void __user *)arg);
case CEPH_IOC_LAZYIO:
return ceph_ioctl_lazyio(file);
case CEPH_IOC_SYNCIO:
return ceph_ioctl_syncio(file);
}
return -ENOTTY;
}
| gpl-2.0 |
HarveyHunt/CI20_linux | drivers/media/dvb-frontends/stv090x.c | 2664 | 139732 | /*
STV0900/0903 Multistandard Broadcast Frontend driver
Copyright (C) Manu Abraham <abraham.manu@gmail.com>
Copyright (C) ST Microelectronics
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/mutex.h>
#include <linux/dvb/frontend.h>
#include "dvb_frontend.h"
#include "stv6110x.h" /* for demodulator internal modes */
#include "stv090x_reg.h"
#include "stv090x.h"
#include "stv090x_priv.h"
/* Max transfer size done by I2C transfer functions */
#define MAX_XFER_SIZE 64
static unsigned int verbose;
module_param(verbose, int, 0644);
/* internal params node */
struct stv090x_dev {
/* pointer for internal params, one for each pair of demods */
struct stv090x_internal *internal;
struct stv090x_dev *next_dev;
};
/* first internal params */
static struct stv090x_dev *stv090x_first_dev;
/* find chip by i2c adapter and i2c address */
static struct stv090x_dev *find_dev(struct i2c_adapter *i2c_adap,
u8 i2c_addr)
{
struct stv090x_dev *temp_dev = stv090x_first_dev;
/*
Search of the last stv0900 chip or
find it by i2c adapter and i2c address */
while ((temp_dev != NULL) &&
((temp_dev->internal->i2c_adap != i2c_adap) ||
(temp_dev->internal->i2c_addr != i2c_addr))) {
temp_dev = temp_dev->next_dev;
}
return temp_dev;
}
/* deallocating chip */
static void remove_dev(struct stv090x_internal *internal)
{
struct stv090x_dev *prev_dev = stv090x_first_dev;
struct stv090x_dev *del_dev = find_dev(internal->i2c_adap,
internal->i2c_addr);
if (del_dev != NULL) {
if (del_dev == stv090x_first_dev) {
stv090x_first_dev = del_dev->next_dev;
} else {
while (prev_dev->next_dev != del_dev)
prev_dev = prev_dev->next_dev;
prev_dev->next_dev = del_dev->next_dev;
}
kfree(del_dev);
}
}
/* allocating new chip */
static struct stv090x_dev *append_internal(struct stv090x_internal *internal)
{
struct stv090x_dev *new_dev;
struct stv090x_dev *temp_dev;
new_dev = kmalloc(sizeof(struct stv090x_dev), GFP_KERNEL);
if (new_dev != NULL) {
new_dev->internal = internal;
new_dev->next_dev = NULL;
/* append to list */
if (stv090x_first_dev == NULL) {
stv090x_first_dev = new_dev;
} else {
temp_dev = stv090x_first_dev;
while (temp_dev->next_dev != NULL)
temp_dev = temp_dev->next_dev;
temp_dev->next_dev = new_dev;
}
}
return new_dev;
}
/* DVBS1 and DSS C/N Lookup table */
static const struct stv090x_tab stv090x_s1cn_tab[] = {
{ 0, 8917 }, /* 0.0dB */
{ 5, 8801 }, /* 0.5dB */
{ 10, 8667 }, /* 1.0dB */
{ 15, 8522 }, /* 1.5dB */
{ 20, 8355 }, /* 2.0dB */
{ 25, 8175 }, /* 2.5dB */
{ 30, 7979 }, /* 3.0dB */
{ 35, 7763 }, /* 3.5dB */
{ 40, 7530 }, /* 4.0dB */
{ 45, 7282 }, /* 4.5dB */
{ 50, 7026 }, /* 5.0dB */
{ 55, 6781 }, /* 5.5dB */
{ 60, 6514 }, /* 6.0dB */
{ 65, 6241 }, /* 6.5dB */
{ 70, 5965 }, /* 7.0dB */
{ 75, 5690 }, /* 7.5dB */
{ 80, 5424 }, /* 8.0dB */
{ 85, 5161 }, /* 8.5dB */
{ 90, 4902 }, /* 9.0dB */
{ 95, 4654 }, /* 9.5dB */
{ 100, 4417 }, /* 10.0dB */
{ 105, 4186 }, /* 10.5dB */
{ 110, 3968 }, /* 11.0dB */
{ 115, 3757 }, /* 11.5dB */
{ 120, 3558 }, /* 12.0dB */
{ 125, 3366 }, /* 12.5dB */
{ 130, 3185 }, /* 13.0dB */
{ 135, 3012 }, /* 13.5dB */
{ 140, 2850 }, /* 14.0dB */
{ 145, 2698 }, /* 14.5dB */
{ 150, 2550 }, /* 15.0dB */
{ 160, 2283 }, /* 16.0dB */
{ 170, 2042 }, /* 17.0dB */
{ 180, 1827 }, /* 18.0dB */
{ 190, 1636 }, /* 19.0dB */
{ 200, 1466 }, /* 20.0dB */
{ 210, 1315 }, /* 21.0dB */
{ 220, 1181 }, /* 22.0dB */
{ 230, 1064 }, /* 23.0dB */
{ 240, 960 }, /* 24.0dB */
{ 250, 869 }, /* 25.0dB */
{ 260, 792 }, /* 26.0dB */
{ 270, 724 }, /* 27.0dB */
{ 280, 665 }, /* 28.0dB */
{ 290, 616 }, /* 29.0dB */
{ 300, 573 }, /* 30.0dB */
{ 310, 537 }, /* 31.0dB */
{ 320, 507 }, /* 32.0dB */
{ 330, 483 }, /* 33.0dB */
{ 400, 398 }, /* 40.0dB */
{ 450, 381 }, /* 45.0dB */
{ 500, 377 } /* 50.0dB */
};
/* DVBS2 C/N Lookup table */
static const struct stv090x_tab stv090x_s2cn_tab[] = {
{ -30, 13348 }, /* -3.0dB */
{ -20, 12640 }, /* -2d.0B */
{ -10, 11883 }, /* -1.0dB */
{ 0, 11101 }, /* -0.0dB */
{ 5, 10718 }, /* 0.5dB */
{ 10, 10339 }, /* 1.0dB */
{ 15, 9947 }, /* 1.5dB */
{ 20, 9552 }, /* 2.0dB */
{ 25, 9183 }, /* 2.5dB */
{ 30, 8799 }, /* 3.0dB */
{ 35, 8422 }, /* 3.5dB */
{ 40, 8062 }, /* 4.0dB */
{ 45, 7707 }, /* 4.5dB */
{ 50, 7353 }, /* 5.0dB */
{ 55, 7025 }, /* 5.5dB */
{ 60, 6684 }, /* 6.0dB */
{ 65, 6331 }, /* 6.5dB */
{ 70, 6036 }, /* 7.0dB */
{ 75, 5727 }, /* 7.5dB */
{ 80, 5437 }, /* 8.0dB */
{ 85, 5164 }, /* 8.5dB */
{ 90, 4902 }, /* 9.0dB */
{ 95, 4653 }, /* 9.5dB */
{ 100, 4408 }, /* 10.0dB */
{ 105, 4187 }, /* 10.5dB */
{ 110, 3961 }, /* 11.0dB */
{ 115, 3751 }, /* 11.5dB */
{ 120, 3558 }, /* 12.0dB */
{ 125, 3368 }, /* 12.5dB */
{ 130, 3191 }, /* 13.0dB */
{ 135, 3017 }, /* 13.5dB */
{ 140, 2862 }, /* 14.0dB */
{ 145, 2710 }, /* 14.5dB */
{ 150, 2565 }, /* 15.0dB */
{ 160, 2300 }, /* 16.0dB */
{ 170, 2058 }, /* 17.0dB */
{ 180, 1849 }, /* 18.0dB */
{ 190, 1663 }, /* 19.0dB */
{ 200, 1495 }, /* 20.0dB */
{ 210, 1349 }, /* 21.0dB */
{ 220, 1222 }, /* 22.0dB */
{ 230, 1110 }, /* 23.0dB */
{ 240, 1011 }, /* 24.0dB */
{ 250, 925 }, /* 25.0dB */
{ 260, 853 }, /* 26.0dB */
{ 270, 789 }, /* 27.0dB */
{ 280, 734 }, /* 28.0dB */
{ 290, 690 }, /* 29.0dB */
{ 300, 650 }, /* 30.0dB */
{ 310, 619 }, /* 31.0dB */
{ 320, 593 }, /* 32.0dB */
{ 330, 571 }, /* 33.0dB */
{ 400, 498 }, /* 40.0dB */
{ 450, 484 }, /* 45.0dB */
{ 500, 481 } /* 50.0dB */
};
/* RF level C/N lookup table */
static const struct stv090x_tab stv090x_rf_tab[] = {
{ -5, 0xcaa1 }, /* -5dBm */
{ -10, 0xc229 }, /* -10dBm */
{ -15, 0xbb08 }, /* -15dBm */
{ -20, 0xb4bc }, /* -20dBm */
{ -25, 0xad5a }, /* -25dBm */
{ -30, 0xa298 }, /* -30dBm */
{ -35, 0x98a8 }, /* -35dBm */
{ -40, 0x8389 }, /* -40dBm */
{ -45, 0x59be }, /* -45dBm */
{ -50, 0x3a14 }, /* -50dBm */
{ -55, 0x2d11 }, /* -55dBm */
{ -60, 0x210d }, /* -60dBm */
{ -65, 0xa14f }, /* -65dBm */
{ -70, 0x07aa } /* -70dBm */
};
static struct stv090x_reg stv0900_initval[] = {
{ STV090x_OUTCFG, 0x00 },
{ STV090x_MODECFG, 0xff },
{ STV090x_AGCRF1CFG, 0x11 },
{ STV090x_AGCRF2CFG, 0x13 },
{ STV090x_TSGENERAL1X, 0x14 },
{ STV090x_TSTTNR2, 0x21 },
{ STV090x_TSTTNR4, 0x21 },
{ STV090x_P2_DISTXCTL, 0x22 },
{ STV090x_P2_F22TX, 0xc0 },
{ STV090x_P2_F22RX, 0xc0 },
{ STV090x_P2_DISRXCTL, 0x00 },
{ STV090x_P2_DMDCFGMD, 0xF9 },
{ STV090x_P2_DEMOD, 0x08 },
{ STV090x_P2_DMDCFG3, 0xc4 },
{ STV090x_P2_CARFREQ, 0xed },
{ STV090x_P2_LDT, 0xd0 },
{ STV090x_P2_LDT2, 0xb8 },
{ STV090x_P2_TMGCFG, 0xd2 },
{ STV090x_P2_TMGTHRISE, 0x20 },
{ STV090x_P1_TMGCFG, 0xd2 },
{ STV090x_P2_TMGTHFALL, 0x00 },
{ STV090x_P2_FECSPY, 0x88 },
{ STV090x_P2_FSPYDATA, 0x3a },
{ STV090x_P2_FBERCPT4, 0x00 },
{ STV090x_P2_FSPYBER, 0x10 },
{ STV090x_P2_ERRCTRL1, 0x35 },
{ STV090x_P2_ERRCTRL2, 0xc1 },
{ STV090x_P2_CFRICFG, 0xf8 },
{ STV090x_P2_NOSCFG, 0x1c },
{ STV090x_P2_DMDTOM, 0x20 },
{ STV090x_P2_CORRELMANT, 0x70 },
{ STV090x_P2_CORRELABS, 0x88 },
{ STV090x_P2_AGC2O, 0x5b },
{ STV090x_P2_AGC2REF, 0x38 },
{ STV090x_P2_CARCFG, 0xe4 },
{ STV090x_P2_ACLC, 0x1A },
{ STV090x_P2_BCLC, 0x09 },
{ STV090x_P2_CARHDR, 0x08 },
{ STV090x_P2_KREFTMG, 0xc1 },
{ STV090x_P2_SFRUPRATIO, 0xf0 },
{ STV090x_P2_SFRLOWRATIO, 0x70 },
{ STV090x_P2_SFRSTEP, 0x58 },
{ STV090x_P2_TMGCFG2, 0x01 },
{ STV090x_P2_CAR2CFG, 0x26 },
{ STV090x_P2_BCLC2S2Q, 0x86 },
{ STV090x_P2_BCLC2S28, 0x86 },
{ STV090x_P2_SMAPCOEF7, 0x77 },
{ STV090x_P2_SMAPCOEF6, 0x85 },
{ STV090x_P2_SMAPCOEF5, 0x77 },
{ STV090x_P2_TSCFGL, 0x20 },
{ STV090x_P2_DMDCFG2, 0x3b },
{ STV090x_P2_MODCODLST0, 0xff },
{ STV090x_P2_MODCODLST1, 0xff },
{ STV090x_P2_MODCODLST2, 0xff },
{ STV090x_P2_MODCODLST3, 0xff },
{ STV090x_P2_MODCODLST4, 0xff },
{ STV090x_P2_MODCODLST5, 0xff },
{ STV090x_P2_MODCODLST6, 0xff },
{ STV090x_P2_MODCODLST7, 0xcc },
{ STV090x_P2_MODCODLST8, 0xcc },
{ STV090x_P2_MODCODLST9, 0xcc },
{ STV090x_P2_MODCODLSTA, 0xcc },
{ STV090x_P2_MODCODLSTB, 0xcc },
{ STV090x_P2_MODCODLSTC, 0xcc },
{ STV090x_P2_MODCODLSTD, 0xcc },
{ STV090x_P2_MODCODLSTE, 0xcc },
{ STV090x_P2_MODCODLSTF, 0xcf },
{ STV090x_P1_DISTXCTL, 0x22 },
{ STV090x_P1_F22TX, 0xc0 },
{ STV090x_P1_F22RX, 0xc0 },
{ STV090x_P1_DISRXCTL, 0x00 },
{ STV090x_P1_DMDCFGMD, 0xf9 },
{ STV090x_P1_DEMOD, 0x08 },
{ STV090x_P1_DMDCFG3, 0xc4 },
{ STV090x_P1_DMDTOM, 0x20 },
{ STV090x_P1_CARFREQ, 0xed },
{ STV090x_P1_LDT, 0xd0 },
{ STV090x_P1_LDT2, 0xb8 },
{ STV090x_P1_TMGCFG, 0xd2 },
{ STV090x_P1_TMGTHRISE, 0x20 },
{ STV090x_P1_TMGTHFALL, 0x00 },
{ STV090x_P1_SFRUPRATIO, 0xf0 },
{ STV090x_P1_SFRLOWRATIO, 0x70 },
{ STV090x_P1_TSCFGL, 0x20 },
{ STV090x_P1_FECSPY, 0x88 },
{ STV090x_P1_FSPYDATA, 0x3a },
{ STV090x_P1_FBERCPT4, 0x00 },
{ STV090x_P1_FSPYBER, 0x10 },
{ STV090x_P1_ERRCTRL1, 0x35 },
{ STV090x_P1_ERRCTRL2, 0xc1 },
{ STV090x_P1_CFRICFG, 0xf8 },
{ STV090x_P1_NOSCFG, 0x1c },
{ STV090x_P1_CORRELMANT, 0x70 },
{ STV090x_P1_CORRELABS, 0x88 },
{ STV090x_P1_AGC2O, 0x5b },
{ STV090x_P1_AGC2REF, 0x38 },
{ STV090x_P1_CARCFG, 0xe4 },
{ STV090x_P1_ACLC, 0x1A },
{ STV090x_P1_BCLC, 0x09 },
{ STV090x_P1_CARHDR, 0x08 },
{ STV090x_P1_KREFTMG, 0xc1 },
{ STV090x_P1_SFRSTEP, 0x58 },
{ STV090x_P1_TMGCFG2, 0x01 },
{ STV090x_P1_CAR2CFG, 0x26 },
{ STV090x_P1_BCLC2S2Q, 0x86 },
{ STV090x_P1_BCLC2S28, 0x86 },
{ STV090x_P1_SMAPCOEF7, 0x77 },
{ STV090x_P1_SMAPCOEF6, 0x85 },
{ STV090x_P1_SMAPCOEF5, 0x77 },
{ STV090x_P1_DMDCFG2, 0x3b },
{ STV090x_P1_MODCODLST0, 0xff },
{ STV090x_P1_MODCODLST1, 0xff },
{ STV090x_P1_MODCODLST2, 0xff },
{ STV090x_P1_MODCODLST3, 0xff },
{ STV090x_P1_MODCODLST4, 0xff },
{ STV090x_P1_MODCODLST5, 0xff },
{ STV090x_P1_MODCODLST6, 0xff },
{ STV090x_P1_MODCODLST7, 0xcc },
{ STV090x_P1_MODCODLST8, 0xcc },
{ STV090x_P1_MODCODLST9, 0xcc },
{ STV090x_P1_MODCODLSTA, 0xcc },
{ STV090x_P1_MODCODLSTB, 0xcc },
{ STV090x_P1_MODCODLSTC, 0xcc },
{ STV090x_P1_MODCODLSTD, 0xcc },
{ STV090x_P1_MODCODLSTE, 0xcc },
{ STV090x_P1_MODCODLSTF, 0xcf },
{ STV090x_GENCFG, 0x1d },
{ STV090x_NBITER_NF4, 0x37 },
{ STV090x_NBITER_NF5, 0x29 },
{ STV090x_NBITER_NF6, 0x37 },
{ STV090x_NBITER_NF7, 0x33 },
{ STV090x_NBITER_NF8, 0x31 },
{ STV090x_NBITER_NF9, 0x2f },
{ STV090x_NBITER_NF10, 0x39 },
{ STV090x_NBITER_NF11, 0x3a },
{ STV090x_NBITER_NF12, 0x29 },
{ STV090x_NBITER_NF13, 0x37 },
{ STV090x_NBITER_NF14, 0x33 },
{ STV090x_NBITER_NF15, 0x2f },
{ STV090x_NBITER_NF16, 0x39 },
{ STV090x_NBITER_NF17, 0x3a },
{ STV090x_NBITERNOERR, 0x04 },
{ STV090x_GAINLLR_NF4, 0x0C },
{ STV090x_GAINLLR_NF5, 0x0F },
{ STV090x_GAINLLR_NF6, 0x11 },
{ STV090x_GAINLLR_NF7, 0x14 },
{ STV090x_GAINLLR_NF8, 0x17 },
{ STV090x_GAINLLR_NF9, 0x19 },
{ STV090x_GAINLLR_NF10, 0x20 },
{ STV090x_GAINLLR_NF11, 0x21 },
{ STV090x_GAINLLR_NF12, 0x0D },
{ STV090x_GAINLLR_NF13, 0x0F },
{ STV090x_GAINLLR_NF14, 0x13 },
{ STV090x_GAINLLR_NF15, 0x1A },
{ STV090x_GAINLLR_NF16, 0x1F },
{ STV090x_GAINLLR_NF17, 0x21 },
{ STV090x_RCCFGH, 0x20 },
{ STV090x_P1_FECM, 0x01 }, /* disable DSS modes */
{ STV090x_P2_FECM, 0x01 }, /* disable DSS modes */
{ STV090x_P1_PRVIT, 0x2F }, /* disable PR 6/7 */
{ STV090x_P2_PRVIT, 0x2F }, /* disable PR 6/7 */
};
static struct stv090x_reg stv0903_initval[] = {
{ STV090x_OUTCFG, 0x00 },
{ STV090x_AGCRF1CFG, 0x11 },
{ STV090x_STOPCLK1, 0x48 },
{ STV090x_STOPCLK2, 0x14 },
{ STV090x_TSTTNR1, 0x27 },
{ STV090x_TSTTNR2, 0x21 },
{ STV090x_P1_DISTXCTL, 0x22 },
{ STV090x_P1_F22TX, 0xc0 },
{ STV090x_P1_F22RX, 0xc0 },
{ STV090x_P1_DISRXCTL, 0x00 },
{ STV090x_P1_DMDCFGMD, 0xF9 },
{ STV090x_P1_DEMOD, 0x08 },
{ STV090x_P1_DMDCFG3, 0xc4 },
{ STV090x_P1_CARFREQ, 0xed },
{ STV090x_P1_TNRCFG2, 0x82 },
{ STV090x_P1_LDT, 0xd0 },
{ STV090x_P1_LDT2, 0xb8 },
{ STV090x_P1_TMGCFG, 0xd2 },
{ STV090x_P1_TMGTHRISE, 0x20 },
{ STV090x_P1_TMGTHFALL, 0x00 },
{ STV090x_P1_SFRUPRATIO, 0xf0 },
{ STV090x_P1_SFRLOWRATIO, 0x70 },
{ STV090x_P1_TSCFGL, 0x20 },
{ STV090x_P1_FECSPY, 0x88 },
{ STV090x_P1_FSPYDATA, 0x3a },
{ STV090x_P1_FBERCPT4, 0x00 },
{ STV090x_P1_FSPYBER, 0x10 },
{ STV090x_P1_ERRCTRL1, 0x35 },
{ STV090x_P1_ERRCTRL2, 0xc1 },
{ STV090x_P1_CFRICFG, 0xf8 },
{ STV090x_P1_NOSCFG, 0x1c },
{ STV090x_P1_DMDTOM, 0x20 },
{ STV090x_P1_CORRELMANT, 0x70 },
{ STV090x_P1_CORRELABS, 0x88 },
{ STV090x_P1_AGC2O, 0x5b },
{ STV090x_P1_AGC2REF, 0x38 },
{ STV090x_P1_CARCFG, 0xe4 },
{ STV090x_P1_ACLC, 0x1A },
{ STV090x_P1_BCLC, 0x09 },
{ STV090x_P1_CARHDR, 0x08 },
{ STV090x_P1_KREFTMG, 0xc1 },
{ STV090x_P1_SFRSTEP, 0x58 },
{ STV090x_P1_TMGCFG2, 0x01 },
{ STV090x_P1_CAR2CFG, 0x26 },
{ STV090x_P1_BCLC2S2Q, 0x86 },
{ STV090x_P1_BCLC2S28, 0x86 },
{ STV090x_P1_SMAPCOEF7, 0x77 },
{ STV090x_P1_SMAPCOEF6, 0x85 },
{ STV090x_P1_SMAPCOEF5, 0x77 },
{ STV090x_P1_DMDCFG2, 0x3b },
{ STV090x_P1_MODCODLST0, 0xff },
{ STV090x_P1_MODCODLST1, 0xff },
{ STV090x_P1_MODCODLST2, 0xff },
{ STV090x_P1_MODCODLST3, 0xff },
{ STV090x_P1_MODCODLST4, 0xff },
{ STV090x_P1_MODCODLST5, 0xff },
{ STV090x_P1_MODCODLST6, 0xff },
{ STV090x_P1_MODCODLST7, 0xcc },
{ STV090x_P1_MODCODLST8, 0xcc },
{ STV090x_P1_MODCODLST9, 0xcc },
{ STV090x_P1_MODCODLSTA, 0xcc },
{ STV090x_P1_MODCODLSTB, 0xcc },
{ STV090x_P1_MODCODLSTC, 0xcc },
{ STV090x_P1_MODCODLSTD, 0xcc },
{ STV090x_P1_MODCODLSTE, 0xcc },
{ STV090x_P1_MODCODLSTF, 0xcf },
{ STV090x_GENCFG, 0x1c },
{ STV090x_NBITER_NF4, 0x37 },
{ STV090x_NBITER_NF5, 0x29 },
{ STV090x_NBITER_NF6, 0x37 },
{ STV090x_NBITER_NF7, 0x33 },
{ STV090x_NBITER_NF8, 0x31 },
{ STV090x_NBITER_NF9, 0x2f },
{ STV090x_NBITER_NF10, 0x39 },
{ STV090x_NBITER_NF11, 0x3a },
{ STV090x_NBITER_NF12, 0x29 },
{ STV090x_NBITER_NF13, 0x37 },
{ STV090x_NBITER_NF14, 0x33 },
{ STV090x_NBITER_NF15, 0x2f },
{ STV090x_NBITER_NF16, 0x39 },
{ STV090x_NBITER_NF17, 0x3a },
{ STV090x_NBITERNOERR, 0x04 },
{ STV090x_GAINLLR_NF4, 0x0C },
{ STV090x_GAINLLR_NF5, 0x0F },
{ STV090x_GAINLLR_NF6, 0x11 },
{ STV090x_GAINLLR_NF7, 0x14 },
{ STV090x_GAINLLR_NF8, 0x17 },
{ STV090x_GAINLLR_NF9, 0x19 },
{ STV090x_GAINLLR_NF10, 0x20 },
{ STV090x_GAINLLR_NF11, 0x21 },
{ STV090x_GAINLLR_NF12, 0x0D },
{ STV090x_GAINLLR_NF13, 0x0F },
{ STV090x_GAINLLR_NF14, 0x13 },
{ STV090x_GAINLLR_NF15, 0x1A },
{ STV090x_GAINLLR_NF16, 0x1F },
{ STV090x_GAINLLR_NF17, 0x21 },
{ STV090x_RCCFGH, 0x20 },
{ STV090x_P1_FECM, 0x01 }, /*disable the DSS mode */
{ STV090x_P1_PRVIT, 0x2f } /*disable puncture rate 6/7*/
};
static struct stv090x_reg stv0900_cut20_val[] = {
{ STV090x_P2_DMDCFG3, 0xe8 },
{ STV090x_P2_DMDCFG4, 0x10 },
{ STV090x_P2_CARFREQ, 0x38 },
{ STV090x_P2_CARHDR, 0x20 },
{ STV090x_P2_KREFTMG, 0x5a },
{ STV090x_P2_SMAPCOEF7, 0x06 },
{ STV090x_P2_SMAPCOEF6, 0x00 },
{ STV090x_P2_SMAPCOEF5, 0x04 },
{ STV090x_P2_NOSCFG, 0x0c },
{ STV090x_P1_DMDCFG3, 0xe8 },
{ STV090x_P1_DMDCFG4, 0x10 },
{ STV090x_P1_CARFREQ, 0x38 },
{ STV090x_P1_CARHDR, 0x20 },
{ STV090x_P1_KREFTMG, 0x5a },
{ STV090x_P1_SMAPCOEF7, 0x06 },
{ STV090x_P1_SMAPCOEF6, 0x00 },
{ STV090x_P1_SMAPCOEF5, 0x04 },
{ STV090x_P1_NOSCFG, 0x0c },
{ STV090x_GAINLLR_NF4, 0x21 },
{ STV090x_GAINLLR_NF5, 0x21 },
{ STV090x_GAINLLR_NF6, 0x20 },
{ STV090x_GAINLLR_NF7, 0x1F },
{ STV090x_GAINLLR_NF8, 0x1E },
{ STV090x_GAINLLR_NF9, 0x1E },
{ STV090x_GAINLLR_NF10, 0x1D },
{ STV090x_GAINLLR_NF11, 0x1B },
{ STV090x_GAINLLR_NF12, 0x20 },
{ STV090x_GAINLLR_NF13, 0x20 },
{ STV090x_GAINLLR_NF14, 0x20 },
{ STV090x_GAINLLR_NF15, 0x20 },
{ STV090x_GAINLLR_NF16, 0x20 },
{ STV090x_GAINLLR_NF17, 0x21 },
};
static struct stv090x_reg stv0903_cut20_val[] = {
{ STV090x_P1_DMDCFG3, 0xe8 },
{ STV090x_P1_DMDCFG4, 0x10 },
{ STV090x_P1_CARFREQ, 0x38 },
{ STV090x_P1_CARHDR, 0x20 },
{ STV090x_P1_KREFTMG, 0x5a },
{ STV090x_P1_SMAPCOEF7, 0x06 },
{ STV090x_P1_SMAPCOEF6, 0x00 },
{ STV090x_P1_SMAPCOEF5, 0x04 },
{ STV090x_P1_NOSCFG, 0x0c },
{ STV090x_GAINLLR_NF4, 0x21 },
{ STV090x_GAINLLR_NF5, 0x21 },
{ STV090x_GAINLLR_NF6, 0x20 },
{ STV090x_GAINLLR_NF7, 0x1F },
{ STV090x_GAINLLR_NF8, 0x1E },
{ STV090x_GAINLLR_NF9, 0x1E },
{ STV090x_GAINLLR_NF10, 0x1D },
{ STV090x_GAINLLR_NF11, 0x1B },
{ STV090x_GAINLLR_NF12, 0x20 },
{ STV090x_GAINLLR_NF13, 0x20 },
{ STV090x_GAINLLR_NF14, 0x20 },
{ STV090x_GAINLLR_NF15, 0x20 },
{ STV090x_GAINLLR_NF16, 0x20 },
{ STV090x_GAINLLR_NF17, 0x21 }
};
/* Cut 2.0 Long Frame Tracking CR loop */
static struct stv090x_long_frame_crloop stv090x_s2_crl_cut20[] = {
/* MODCOD 2MPon 2MPoff 5MPon 5MPoff 10MPon 10MPoff 20MPon 20MPoff 30MPon 30MPoff */
{ STV090x_QPSK_12, 0x1f, 0x3f, 0x1e, 0x3f, 0x3d, 0x1f, 0x3d, 0x3e, 0x3d, 0x1e },
{ STV090x_QPSK_35, 0x2f, 0x3f, 0x2e, 0x2f, 0x3d, 0x0f, 0x0e, 0x2e, 0x3d, 0x0e },
{ STV090x_QPSK_23, 0x2f, 0x3f, 0x2e, 0x2f, 0x0e, 0x0f, 0x0e, 0x1e, 0x3d, 0x3d },
{ STV090x_QPSK_34, 0x3f, 0x3f, 0x3e, 0x1f, 0x0e, 0x3e, 0x0e, 0x1e, 0x3d, 0x3d },
{ STV090x_QPSK_45, 0x3f, 0x3f, 0x3e, 0x1f, 0x0e, 0x3e, 0x0e, 0x1e, 0x3d, 0x3d },
{ STV090x_QPSK_56, 0x3f, 0x3f, 0x3e, 0x1f, 0x0e, 0x3e, 0x0e, 0x1e, 0x3d, 0x3d },
{ STV090x_QPSK_89, 0x3f, 0x3f, 0x3e, 0x1f, 0x1e, 0x3e, 0x0e, 0x1e, 0x3d, 0x3d },
{ STV090x_QPSK_910, 0x3f, 0x3f, 0x3e, 0x1f, 0x1e, 0x3e, 0x0e, 0x1e, 0x3d, 0x3d },
{ STV090x_8PSK_35, 0x3c, 0x3e, 0x1c, 0x2e, 0x0c, 0x1e, 0x2b, 0x2d, 0x1b, 0x1d },
{ STV090x_8PSK_23, 0x1d, 0x3e, 0x3c, 0x2e, 0x2c, 0x1e, 0x0c, 0x2d, 0x2b, 0x1d },
{ STV090x_8PSK_34, 0x0e, 0x3e, 0x3d, 0x2e, 0x0d, 0x1e, 0x2c, 0x2d, 0x0c, 0x1d },
{ STV090x_8PSK_56, 0x2e, 0x3e, 0x1e, 0x2e, 0x2d, 0x1e, 0x3c, 0x2d, 0x2c, 0x1d },
{ STV090x_8PSK_89, 0x3e, 0x3e, 0x1e, 0x2e, 0x3d, 0x1e, 0x0d, 0x2d, 0x3c, 0x1d },
{ STV090x_8PSK_910, 0x3e, 0x3e, 0x1e, 0x2e, 0x3d, 0x1e, 0x1d, 0x2d, 0x0d, 0x1d }
};
/* Cut 3.0 Long Frame Tracking CR loop */
static struct stv090x_long_frame_crloop stv090x_s2_crl_cut30[] = {
/* MODCOD 2MPon 2MPoff 5MPon 5MPoff 10MPon 10MPoff 20MPon 20MPoff 30MPon 30MPoff */
{ STV090x_QPSK_12, 0x3c, 0x2c, 0x0c, 0x2c, 0x1b, 0x2c, 0x1b, 0x1c, 0x0b, 0x3b },
{ STV090x_QPSK_35, 0x0d, 0x0d, 0x0c, 0x0d, 0x1b, 0x3c, 0x1b, 0x1c, 0x0b, 0x3b },
{ STV090x_QPSK_23, 0x1d, 0x0d, 0x0c, 0x1d, 0x2b, 0x3c, 0x1b, 0x1c, 0x0b, 0x3b },
{ STV090x_QPSK_34, 0x1d, 0x1d, 0x0c, 0x1d, 0x2b, 0x3c, 0x1b, 0x1c, 0x0b, 0x3b },
{ STV090x_QPSK_45, 0x2d, 0x1d, 0x1c, 0x1d, 0x2b, 0x3c, 0x2b, 0x0c, 0x1b, 0x3b },
{ STV090x_QPSK_56, 0x2d, 0x1d, 0x1c, 0x1d, 0x2b, 0x3c, 0x2b, 0x0c, 0x1b, 0x3b },
{ STV090x_QPSK_89, 0x3d, 0x2d, 0x1c, 0x1d, 0x3b, 0x3c, 0x2b, 0x0c, 0x1b, 0x3b },
{ STV090x_QPSK_910, 0x3d, 0x2d, 0x1c, 0x1d, 0x3b, 0x3c, 0x2b, 0x0c, 0x1b, 0x3b },
{ STV090x_8PSK_35, 0x39, 0x29, 0x39, 0x19, 0x19, 0x19, 0x19, 0x19, 0x09, 0x19 },
{ STV090x_8PSK_23, 0x2a, 0x39, 0x1a, 0x0a, 0x39, 0x0a, 0x29, 0x39, 0x29, 0x0a },
{ STV090x_8PSK_34, 0x2b, 0x3a, 0x1b, 0x1b, 0x3a, 0x1b, 0x1a, 0x0b, 0x1a, 0x3a },
{ STV090x_8PSK_56, 0x0c, 0x1b, 0x3b, 0x3b, 0x1b, 0x3b, 0x3a, 0x3b, 0x3a, 0x1b },
{ STV090x_8PSK_89, 0x0d, 0x3c, 0x2c, 0x2c, 0x2b, 0x0c, 0x0b, 0x3b, 0x0b, 0x1b },
{ STV090x_8PSK_910, 0x0d, 0x0d, 0x2c, 0x3c, 0x3b, 0x1c, 0x0b, 0x3b, 0x0b, 0x1b }
};
/* Cut 2.0 Long Frame Tracking CR Loop */
static struct stv090x_long_frame_crloop stv090x_s2_apsk_crl_cut20[] = {
/* MODCOD 2MPon 2MPoff 5MPon 5MPoff 10MPon 10MPoff 20MPon 20MPoff 30MPon 30MPoff */
{ STV090x_16APSK_23, 0x0c, 0x0c, 0x0c, 0x0c, 0x1d, 0x0c, 0x3c, 0x0c, 0x2c, 0x0c },
{ STV090x_16APSK_34, 0x0c, 0x0c, 0x0c, 0x0c, 0x0e, 0x0c, 0x2d, 0x0c, 0x1d, 0x0c },
{ STV090x_16APSK_45, 0x0c, 0x0c, 0x0c, 0x0c, 0x1e, 0x0c, 0x3d, 0x0c, 0x2d, 0x0c },
{ STV090x_16APSK_56, 0x0c, 0x0c, 0x0c, 0x0c, 0x1e, 0x0c, 0x3d, 0x0c, 0x2d, 0x0c },
{ STV090x_16APSK_89, 0x0c, 0x0c, 0x0c, 0x0c, 0x2e, 0x0c, 0x0e, 0x0c, 0x3d, 0x0c },
{ STV090x_16APSK_910, 0x0c, 0x0c, 0x0c, 0x0c, 0x2e, 0x0c, 0x0e, 0x0c, 0x3d, 0x0c },
{ STV090x_32APSK_34, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c },
{ STV090x_32APSK_45, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c },
{ STV090x_32APSK_56, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c },
{ STV090x_32APSK_89, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c },
{ STV090x_32APSK_910, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c }
};
/* Cut 3.0 Long Frame Tracking CR Loop */
static struct stv090x_long_frame_crloop stv090x_s2_apsk_crl_cut30[] = {
/* MODCOD 2MPon 2MPoff 5MPon 5MPoff 10MPon 10MPoff 20MPon 20MPoff 30MPon 30MPoff */
{ STV090x_16APSK_23, 0x0a, 0x0a, 0x0a, 0x0a, 0x1a, 0x0a, 0x3a, 0x0a, 0x2a, 0x0a },
{ STV090x_16APSK_34, 0x0a, 0x0a, 0x0a, 0x0a, 0x0b, 0x0a, 0x3b, 0x0a, 0x1b, 0x0a },
{ STV090x_16APSK_45, 0x0a, 0x0a, 0x0a, 0x0a, 0x1b, 0x0a, 0x3b, 0x0a, 0x2b, 0x0a },
{ STV090x_16APSK_56, 0x0a, 0x0a, 0x0a, 0x0a, 0x1b, 0x0a, 0x3b, 0x0a, 0x2b, 0x0a },
{ STV090x_16APSK_89, 0x0a, 0x0a, 0x0a, 0x0a, 0x2b, 0x0a, 0x0c, 0x0a, 0x3b, 0x0a },
{ STV090x_16APSK_910, 0x0a, 0x0a, 0x0a, 0x0a, 0x2b, 0x0a, 0x0c, 0x0a, 0x3b, 0x0a },
{ STV090x_32APSK_34, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a },
{ STV090x_32APSK_45, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a },
{ STV090x_32APSK_56, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a },
{ STV090x_32APSK_89, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a },
{ STV090x_32APSK_910, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a }
};
static struct stv090x_long_frame_crloop stv090x_s2_lowqpsk_crl_cut20[] = {
/* MODCOD 2MPon 2MPoff 5MPon 5MPoff 10MPon 10MPoff 20MPon 20MPoff 30MPon 30MPoff */
{ STV090x_QPSK_14, 0x0f, 0x3f, 0x0e, 0x3f, 0x2d, 0x2f, 0x2d, 0x1f, 0x3d, 0x3e },
{ STV090x_QPSK_13, 0x0f, 0x3f, 0x0e, 0x3f, 0x2d, 0x2f, 0x3d, 0x0f, 0x3d, 0x2e },
{ STV090x_QPSK_25, 0x1f, 0x3f, 0x1e, 0x3f, 0x3d, 0x1f, 0x3d, 0x3e, 0x3d, 0x2e }
};
static struct stv090x_long_frame_crloop stv090x_s2_lowqpsk_crl_cut30[] = {
/* MODCOD 2MPon 2MPoff 5MPon 5MPoff 10MPon 10MPoff 20MPon 20MPoff 30MPon 30MPoff */
{ STV090x_QPSK_14, 0x0c, 0x3c, 0x0b, 0x3c, 0x2a, 0x2c, 0x2a, 0x1c, 0x3a, 0x3b },
{ STV090x_QPSK_13, 0x0c, 0x3c, 0x0b, 0x3c, 0x2a, 0x2c, 0x3a, 0x0c, 0x3a, 0x2b },
{ STV090x_QPSK_25, 0x1c, 0x3c, 0x1b, 0x3c, 0x3a, 0x1c, 0x3a, 0x3b, 0x3a, 0x2b }
};
/* Cut 2.0 Short Frame Tracking CR Loop */
static struct stv090x_short_frame_crloop stv090x_s2_short_crl_cut20[] = {
/* MODCOD 2M 5M 10M 20M 30M */
{ STV090x_QPSK, 0x2f, 0x2e, 0x0e, 0x0e, 0x3d },
{ STV090x_8PSK, 0x3e, 0x0e, 0x2d, 0x0d, 0x3c },
{ STV090x_16APSK, 0x1e, 0x1e, 0x1e, 0x3d, 0x2d },
{ STV090x_32APSK, 0x1e, 0x1e, 0x1e, 0x3d, 0x2d }
};
/* Cut 3.0 Short Frame Tracking CR Loop */
static struct stv090x_short_frame_crloop stv090x_s2_short_crl_cut30[] = {
/* MODCOD 2M 5M 10M 20M 30M */
{ STV090x_QPSK, 0x2C, 0x2B, 0x0B, 0x0B, 0x3A },
{ STV090x_8PSK, 0x3B, 0x0B, 0x2A, 0x0A, 0x39 },
{ STV090x_16APSK, 0x1B, 0x1B, 0x1B, 0x3A, 0x2A },
{ STV090x_32APSK, 0x1B, 0x1B, 0x1B, 0x3A, 0x2A }
};
static inline s32 comp2(s32 __x, s32 __width)
{
if (__width == 32)
return __x;
else
return (__x >= (1 << (__width - 1))) ? (__x - (1 << __width)) : __x;
}
static int stv090x_read_reg(struct stv090x_state *state, unsigned int reg)
{
const struct stv090x_config *config = state->config;
int ret;
u8 b0[] = { reg >> 8, reg & 0xff };
u8 buf;
struct i2c_msg msg[] = {
{ .addr = config->address, .flags = 0, .buf = b0, .len = 2 },
{ .addr = config->address, .flags = I2C_M_RD, .buf = &buf, .len = 1 }
};
ret = i2c_transfer(state->i2c, msg, 2);
if (ret != 2) {
if (ret != -ERESTARTSYS)
dprintk(FE_ERROR, 1,
"Read error, Reg=[0x%02x], Status=%d",
reg, ret);
return ret < 0 ? ret : -EREMOTEIO;
}
if (unlikely(*state->verbose >= FE_DEBUGREG))
dprintk(FE_ERROR, 1, "Reg=[0x%02x], data=%02x",
reg, buf);
return (unsigned int) buf;
}
static int stv090x_write_regs(struct stv090x_state *state, unsigned int reg, u8 *data, u32 count)
{
const struct stv090x_config *config = state->config;
int ret;
u8 buf[MAX_XFER_SIZE];
struct i2c_msg i2c_msg = { .addr = config->address, .flags = 0, .buf = buf, .len = 2 + count };
if (2 + count > sizeof(buf)) {
printk(KERN_WARNING
"%s: i2c wr reg=%04x: len=%d is too big!\n",
KBUILD_MODNAME, reg, count);
return -EINVAL;
}
buf[0] = reg >> 8;
buf[1] = reg & 0xff;
memcpy(&buf[2], data, count);
if (unlikely(*state->verbose >= FE_DEBUGREG)) {
int i;
printk(KERN_DEBUG "%s [0x%04x]:", __func__, reg);
for (i = 0; i < count; i++)
printk(" %02x", data[i]);
printk("\n");
}
ret = i2c_transfer(state->i2c, &i2c_msg, 1);
if (ret != 1) {
if (ret != -ERESTARTSYS)
dprintk(FE_ERROR, 1, "Reg=[0x%04x], Data=[0x%02x ...], Count=%u, Status=%d",
reg, data[0], count, ret);
return ret < 0 ? ret : -EREMOTEIO;
}
return 0;
}
static int stv090x_write_reg(struct stv090x_state *state, unsigned int reg, u8 data)
{
return stv090x_write_regs(state, reg, &data, 1);
}
static int stv090x_i2c_gate_ctrl(struct stv090x_state *state, int enable)
{
u32 reg;
/*
* NOTE! A lock is used as a FSM to control the state in which
* access is serialized between two tuners on the same demod.
* This has nothing to do with a lock to protect a critical section
* which may in some other cases be confused with protecting I/O
* access to the demodulator gate.
* In case of any error, the lock is unlocked and exit within the
* relevant operations themselves.
*/
if (enable) {
if (state->config->tuner_i2c_lock)
state->config->tuner_i2c_lock(&state->frontend, 1);
else
mutex_lock(&state->internal->tuner_lock);
}
reg = STV090x_READ_DEMOD(state, I2CRPT);
if (enable) {
dprintk(FE_DEBUG, 1, "Enable Gate");
STV090x_SETFIELD_Px(reg, I2CT_ON_FIELD, 1);
if (STV090x_WRITE_DEMOD(state, I2CRPT, reg) < 0)
goto err;
} else {
dprintk(FE_DEBUG, 1, "Disable Gate");
STV090x_SETFIELD_Px(reg, I2CT_ON_FIELD, 0);
if ((STV090x_WRITE_DEMOD(state, I2CRPT, reg)) < 0)
goto err;
}
if (!enable) {
if (state->config->tuner_i2c_lock)
state->config->tuner_i2c_lock(&state->frontend, 0);
else
mutex_unlock(&state->internal->tuner_lock);
}
return 0;
err:
dprintk(FE_ERROR, 1, "I/O error");
if (state->config->tuner_i2c_lock)
state->config->tuner_i2c_lock(&state->frontend, 0);
else
mutex_unlock(&state->internal->tuner_lock);
return -1;
}
static void stv090x_get_lock_tmg(struct stv090x_state *state)
{
switch (state->algo) {
case STV090x_BLIND_SEARCH:
dprintk(FE_DEBUG, 1, "Blind Search");
if (state->srate <= 1500000) { /*10Msps< SR <=15Msps*/
state->DemodTimeout = 1500;
state->FecTimeout = 400;
} else if (state->srate <= 5000000) { /*10Msps< SR <=15Msps*/
state->DemodTimeout = 1000;
state->FecTimeout = 300;
} else { /*SR >20Msps*/
state->DemodTimeout = 700;
state->FecTimeout = 100;
}
break;
case STV090x_COLD_SEARCH:
case STV090x_WARM_SEARCH:
default:
dprintk(FE_DEBUG, 1, "Normal Search");
if (state->srate <= 1000000) { /*SR <=1Msps*/
state->DemodTimeout = 4500;
state->FecTimeout = 1700;
} else if (state->srate <= 2000000) { /*1Msps < SR <= 2Msps */
state->DemodTimeout = 2500;
state->FecTimeout = 1100;
} else if (state->srate <= 5000000) { /*2Msps < SR <= 5Msps */
state->DemodTimeout = 1000;
state->FecTimeout = 550;
} else if (state->srate <= 10000000) { /*5Msps < SR <= 10Msps */
state->DemodTimeout = 700;
state->FecTimeout = 250;
} else if (state->srate <= 20000000) { /*10Msps < SR <= 20Msps */
state->DemodTimeout = 400;
state->FecTimeout = 130;
} else { /*SR >20Msps*/
state->DemodTimeout = 300;
state->FecTimeout = 100;
}
break;
}
if (state->algo == STV090x_WARM_SEARCH)
state->DemodTimeout /= 2;
}
static int stv090x_set_srate(struct stv090x_state *state, u32 srate)
{
u32 sym;
if (srate > 60000000) {
sym = (srate << 4); /* SR * 2^16 / master_clk */
sym /= (state->internal->mclk >> 12);
} else if (srate > 6000000) {
sym = (srate << 6);
sym /= (state->internal->mclk >> 10);
} else {
sym = (srate << 9);
sym /= (state->internal->mclk >> 7);
}
if (STV090x_WRITE_DEMOD(state, SFRINIT1, (sym >> 8) & 0x7f) < 0) /* MSB */
goto err;
if (STV090x_WRITE_DEMOD(state, SFRINIT0, (sym & 0xff)) < 0) /* LSB */
goto err;
return 0;
err:
dprintk(FE_ERROR, 1, "I/O error");
return -1;
}
static int stv090x_set_max_srate(struct stv090x_state *state, u32 clk, u32 srate)
{
u32 sym;
srate = 105 * (srate / 100);
if (srate > 60000000) {
sym = (srate << 4); /* SR * 2^16 / master_clk */
sym /= (state->internal->mclk >> 12);
} else if (srate > 6000000) {
sym = (srate << 6);
sym /= (state->internal->mclk >> 10);
} else {
sym = (srate << 9);
sym /= (state->internal->mclk >> 7);
}
if (sym < 0x7fff) {
if (STV090x_WRITE_DEMOD(state, SFRUP1, (sym >> 8) & 0x7f) < 0) /* MSB */
goto err;
if (STV090x_WRITE_DEMOD(state, SFRUP0, sym & 0xff) < 0) /* LSB */
goto err;
} else {
if (STV090x_WRITE_DEMOD(state, SFRUP1, 0x7f) < 0) /* MSB */
goto err;
if (STV090x_WRITE_DEMOD(state, SFRUP0, 0xff) < 0) /* LSB */
goto err;
}
return 0;
err:
dprintk(FE_ERROR, 1, "I/O error");
return -1;
}
static int stv090x_set_min_srate(struct stv090x_state *state, u32 clk, u32 srate)
{
u32 sym;
srate = 95 * (srate / 100);
if (srate > 60000000) {
sym = (srate << 4); /* SR * 2^16 / master_clk */
sym /= (state->internal->mclk >> 12);
} else if (srate > 6000000) {
sym = (srate << 6);
sym /= (state->internal->mclk >> 10);
} else {
sym = (srate << 9);
sym /= (state->internal->mclk >> 7);
}
if (STV090x_WRITE_DEMOD(state, SFRLOW1, ((sym >> 8) & 0x7f)) < 0) /* MSB */
goto err;
if (STV090x_WRITE_DEMOD(state, SFRLOW0, (sym & 0xff)) < 0) /* LSB */
goto err;
return 0;
err:
dprintk(FE_ERROR, 1, "I/O error");
return -1;
}
static u32 stv090x_car_width(u32 srate, enum stv090x_rolloff rolloff)
{
u32 ro;
switch (rolloff) {
case STV090x_RO_20:
ro = 20;
break;
case STV090x_RO_25:
ro = 25;
break;
case STV090x_RO_35:
default:
ro = 35;
break;
}
return srate + (srate * ro) / 100;
}
static int stv090x_set_vit_thacq(struct stv090x_state *state)
{
if (STV090x_WRITE_DEMOD(state, VTH12, 0x96) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, VTH23, 0x64) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, VTH34, 0x36) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, VTH56, 0x23) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, VTH67, 0x1e) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, VTH78, 0x19) < 0)
goto err;
return 0;
err:
dprintk(FE_ERROR, 1, "I/O error");
return -1;
}
static int stv090x_set_vit_thtracq(struct stv090x_state *state)
{
if (STV090x_WRITE_DEMOD(state, VTH12, 0xd0) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, VTH23, 0x7d) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, VTH34, 0x53) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, VTH56, 0x2f) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, VTH67, 0x24) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, VTH78, 0x1f) < 0)
goto err;
return 0;
err:
dprintk(FE_ERROR, 1, "I/O error");
return -1;
}
static int stv090x_set_viterbi(struct stv090x_state *state)
{
switch (state->search_mode) {
case STV090x_SEARCH_AUTO:
if (STV090x_WRITE_DEMOD(state, FECM, 0x10) < 0) /* DVB-S and DVB-S2 */
goto err;
if (STV090x_WRITE_DEMOD(state, PRVIT, 0x3f) < 0) /* all puncture rate */
goto err;
break;
case STV090x_SEARCH_DVBS1:
if (STV090x_WRITE_DEMOD(state, FECM, 0x00) < 0) /* disable DSS */
goto err;
switch (state->fec) {
case STV090x_PR12:
if (STV090x_WRITE_DEMOD(state, PRVIT, 0x01) < 0)
goto err;
break;
case STV090x_PR23:
if (STV090x_WRITE_DEMOD(state, PRVIT, 0x02) < 0)
goto err;
break;
case STV090x_PR34:
if (STV090x_WRITE_DEMOD(state, PRVIT, 0x04) < 0)
goto err;
break;
case STV090x_PR56:
if (STV090x_WRITE_DEMOD(state, PRVIT, 0x08) < 0)
goto err;
break;
case STV090x_PR78:
if (STV090x_WRITE_DEMOD(state, PRVIT, 0x20) < 0)
goto err;
break;
default:
if (STV090x_WRITE_DEMOD(state, PRVIT, 0x2f) < 0) /* all */
goto err;
break;
}
break;
case STV090x_SEARCH_DSS:
if (STV090x_WRITE_DEMOD(state, FECM, 0x80) < 0)
goto err;
switch (state->fec) {
case STV090x_PR12:
if (STV090x_WRITE_DEMOD(state, PRVIT, 0x01) < 0)
goto err;
break;
case STV090x_PR23:
if (STV090x_WRITE_DEMOD(state, PRVIT, 0x02) < 0)
goto err;
break;
case STV090x_PR67:
if (STV090x_WRITE_DEMOD(state, PRVIT, 0x10) < 0)
goto err;
break;
default:
if (STV090x_WRITE_DEMOD(state, PRVIT, 0x13) < 0) /* 1/2, 2/3, 6/7 */
goto err;
break;
}
break;
default:
break;
}
return 0;
err:
dprintk(FE_ERROR, 1, "I/O error");
return -1;
}
static int stv090x_stop_modcod(struct stv090x_state *state)
{
if (STV090x_WRITE_DEMOD(state, MODCODLST0, 0xff) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, MODCODLST1, 0xff) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, MODCODLST2, 0xff) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, MODCODLST3, 0xff) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, MODCODLST4, 0xff) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, MODCODLST5, 0xff) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, MODCODLST6, 0xff) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, MODCODLST7, 0xff) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, MODCODLST8, 0xff) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, MODCODLST9, 0xff) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, MODCODLSTA, 0xff) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, MODCODLSTB, 0xff) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, MODCODLSTC, 0xff) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, MODCODLSTD, 0xff) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, MODCODLSTE, 0xff) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, MODCODLSTF, 0xff) < 0)
goto err;
return 0;
err:
dprintk(FE_ERROR, 1, "I/O error");
return -1;
}
static int stv090x_activate_modcod(struct stv090x_state *state)
{
if (STV090x_WRITE_DEMOD(state, MODCODLST0, 0xff) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, MODCODLST1, 0xfc) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, MODCODLST2, 0xcc) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, MODCODLST3, 0xcc) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, MODCODLST4, 0xcc) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, MODCODLST5, 0xcc) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, MODCODLST6, 0xcc) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, MODCODLST7, 0xcc) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, MODCODLST8, 0xcc) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, MODCODLST9, 0xcc) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, MODCODLSTA, 0xcc) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, MODCODLSTB, 0xcc) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, MODCODLSTC, 0xcc) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, MODCODLSTD, 0xcc) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, MODCODLSTE, 0xcc) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, MODCODLSTF, 0xcf) < 0)
goto err;
return 0;
err:
dprintk(FE_ERROR, 1, "I/O error");
return -1;
}
static int stv090x_activate_modcod_single(struct stv090x_state *state)
{
if (STV090x_WRITE_DEMOD(state, MODCODLST0, 0xff) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, MODCODLST1, 0xf0) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, MODCODLST2, 0x00) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, MODCODLST3, 0x00) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, MODCODLST4, 0x00) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, MODCODLST5, 0x00) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, MODCODLST6, 0x00) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, MODCODLST7, 0x00) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, MODCODLST8, 0x00) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, MODCODLST9, 0x00) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, MODCODLSTA, 0x00) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, MODCODLSTB, 0x00) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, MODCODLSTC, 0x00) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, MODCODLSTD, 0x00) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, MODCODLSTE, 0x00) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, MODCODLSTF, 0x0f) < 0)
goto err;
return 0;
err:
dprintk(FE_ERROR, 1, "I/O error");
return -1;
}
static int stv090x_vitclk_ctl(struct stv090x_state *state, int enable)
{
u32 reg;
switch (state->demod) {
case STV090x_DEMODULATOR_0:
mutex_lock(&state->internal->demod_lock);
reg = stv090x_read_reg(state, STV090x_STOPCLK2);
STV090x_SETFIELD(reg, STOP_CLKVIT1_FIELD, enable);
if (stv090x_write_reg(state, STV090x_STOPCLK2, reg) < 0)
goto err;
mutex_unlock(&state->internal->demod_lock);
break;
case STV090x_DEMODULATOR_1:
mutex_lock(&state->internal->demod_lock);
reg = stv090x_read_reg(state, STV090x_STOPCLK2);
STV090x_SETFIELD(reg, STOP_CLKVIT2_FIELD, enable);
if (stv090x_write_reg(state, STV090x_STOPCLK2, reg) < 0)
goto err;
mutex_unlock(&state->internal->demod_lock);
break;
default:
dprintk(FE_ERROR, 1, "Wrong demodulator!");
break;
}
return 0;
err:
mutex_unlock(&state->internal->demod_lock);
dprintk(FE_ERROR, 1, "I/O error");
return -1;
}
static int stv090x_dvbs_track_crl(struct stv090x_state *state)
{
if (state->internal->dev_ver >= 0x30) {
/* Set ACLC BCLC optimised value vs SR */
if (state->srate >= 15000000) {
if (STV090x_WRITE_DEMOD(state, ACLC, 0x2b) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, BCLC, 0x1a) < 0)
goto err;
} else if ((state->srate >= 7000000) && (15000000 > state->srate)) {
if (STV090x_WRITE_DEMOD(state, ACLC, 0x0c) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, BCLC, 0x1b) < 0)
goto err;
} else if (state->srate < 7000000) {
if (STV090x_WRITE_DEMOD(state, ACLC, 0x2c) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, BCLC, 0x1c) < 0)
goto err;
}
} else {
/* Cut 2.0 */
if (STV090x_WRITE_DEMOD(state, ACLC, 0x1a) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, BCLC, 0x09) < 0)
goto err;
}
return 0;
err:
dprintk(FE_ERROR, 1, "I/O error");
return -1;
}
static int stv090x_delivery_search(struct stv090x_state *state)
{
u32 reg;
switch (state->search_mode) {
case STV090x_SEARCH_DVBS1:
case STV090x_SEARCH_DSS:
reg = STV090x_READ_DEMOD(state, DMDCFGMD);
STV090x_SETFIELD_Px(reg, DVBS1_ENABLE_FIELD, 1);
STV090x_SETFIELD_Px(reg, DVBS2_ENABLE_FIELD, 0);
if (STV090x_WRITE_DEMOD(state, DMDCFGMD, reg) < 0)
goto err;
/* Activate Viterbi decoder in legacy search,
* do not use FRESVIT1, might impact VITERBI2
*/
if (stv090x_vitclk_ctl(state, 0) < 0)
goto err;
if (stv090x_dvbs_track_crl(state) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, CAR2CFG, 0x22) < 0) /* disable DVB-S2 */
goto err;
if (stv090x_set_vit_thacq(state) < 0)
goto err;
if (stv090x_set_viterbi(state) < 0)
goto err;
break;
case STV090x_SEARCH_DVBS2:
reg = STV090x_READ_DEMOD(state, DMDCFGMD);
STV090x_SETFIELD_Px(reg, DVBS1_ENABLE_FIELD, 0);
STV090x_SETFIELD_Px(reg, DVBS2_ENABLE_FIELD, 0);
if (STV090x_WRITE_DEMOD(state, DMDCFGMD, reg) < 0)
goto err;
STV090x_SETFIELD_Px(reg, DVBS1_ENABLE_FIELD, 1);
STV090x_SETFIELD_Px(reg, DVBS2_ENABLE_FIELD, 1);
if (STV090x_WRITE_DEMOD(state, DMDCFGMD, reg) < 0)
goto err;
if (stv090x_vitclk_ctl(state, 1) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, ACLC, 0x1a) < 0) /* stop DVB-S CR loop */
goto err;
if (STV090x_WRITE_DEMOD(state, BCLC, 0x09) < 0)
goto err;
if (state->internal->dev_ver <= 0x20) {
/* enable S2 carrier loop */
if (STV090x_WRITE_DEMOD(state, CAR2CFG, 0x26) < 0)
goto err;
} else {
/* > Cut 3: Stop carrier 3 */
if (STV090x_WRITE_DEMOD(state, CAR2CFG, 0x66) < 0)
goto err;
}
if (state->demod_mode != STV090x_SINGLE) {
/* Cut 2: enable link during search */
if (stv090x_activate_modcod(state) < 0)
goto err;
} else {
/* Single demodulator
* Authorize SHORT and LONG frames,
* QPSK, 8PSK, 16APSK and 32APSK
*/
if (stv090x_activate_modcod_single(state) < 0)
goto err;
}
if (stv090x_set_vit_thtracq(state) < 0)
goto err;
break;
case STV090x_SEARCH_AUTO:
default:
/* enable DVB-S2 and DVB-S2 in Auto MODE */
reg = STV090x_READ_DEMOD(state, DMDCFGMD);
STV090x_SETFIELD_Px(reg, DVBS1_ENABLE_FIELD, 0);
STV090x_SETFIELD_Px(reg, DVBS2_ENABLE_FIELD, 0);
if (STV090x_WRITE_DEMOD(state, DMDCFGMD, reg) < 0)
goto err;
STV090x_SETFIELD_Px(reg, DVBS1_ENABLE_FIELD, 1);
STV090x_SETFIELD_Px(reg, DVBS2_ENABLE_FIELD, 1);
if (STV090x_WRITE_DEMOD(state, DMDCFGMD, reg) < 0)
goto err;
if (stv090x_vitclk_ctl(state, 0) < 0)
goto err;
if (stv090x_dvbs_track_crl(state) < 0)
goto err;
if (state->internal->dev_ver <= 0x20) {
/* enable S2 carrier loop */
if (STV090x_WRITE_DEMOD(state, CAR2CFG, 0x26) < 0)
goto err;
} else {
/* > Cut 3: Stop carrier 3 */
if (STV090x_WRITE_DEMOD(state, CAR2CFG, 0x66) < 0)
goto err;
}
if (state->demod_mode != STV090x_SINGLE) {
/* Cut 2: enable link during search */
if (stv090x_activate_modcod(state) < 0)
goto err;
} else {
/* Single demodulator
* Authorize SHORT and LONG frames,
* QPSK, 8PSK, 16APSK and 32APSK
*/
if (stv090x_activate_modcod_single(state) < 0)
goto err;
}
if (stv090x_set_vit_thacq(state) < 0)
goto err;
if (stv090x_set_viterbi(state) < 0)
goto err;
break;
}
return 0;
err:
dprintk(FE_ERROR, 1, "I/O error");
return -1;
}
static int stv090x_start_search(struct stv090x_state *state)
{
u32 reg, freq_abs;
s16 freq;
/* Reset demodulator */
reg = STV090x_READ_DEMOD(state, DMDISTATE);
STV090x_SETFIELD_Px(reg, I2C_DEMOD_MODE_FIELD, 0x1f);
if (STV090x_WRITE_DEMOD(state, DMDISTATE, reg) < 0)
goto err;
if (state->internal->dev_ver <= 0x20) {
if (state->srate <= 5000000) {
if (STV090x_WRITE_DEMOD(state, CARCFG, 0x44) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, CFRUP1, 0x0f) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, CFRUP0, 0xff) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, CFRLOW1, 0xf0) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, CFRLOW0, 0x00) < 0)
goto err;
/*enlarge the timing bandwidth for Low SR*/
if (STV090x_WRITE_DEMOD(state, RTCS2, 0x68) < 0)
goto err;
} else {
/* If the symbol rate is >5 Msps
Set The carrier search up and low to auto mode */
if (STV090x_WRITE_DEMOD(state, CARCFG, 0xc4) < 0)
goto err;
/*reduce the timing bandwidth for high SR*/
if (STV090x_WRITE_DEMOD(state, RTCS2, 0x44) < 0)
goto err;
}
} else {
/* >= Cut 3 */
if (state->srate <= 5000000) {
/* enlarge the timing bandwidth for Low SR */
STV090x_WRITE_DEMOD(state, RTCS2, 0x68);
} else {
/* reduce timing bandwidth for high SR */
STV090x_WRITE_DEMOD(state, RTCS2, 0x44);
}
/* Set CFR min and max to manual mode */
STV090x_WRITE_DEMOD(state, CARCFG, 0x46);
if (state->algo == STV090x_WARM_SEARCH) {
/* WARM Start
* CFR min = -1MHz,
* CFR max = +1MHz
*/
freq_abs = 1000 << 16;
freq_abs /= (state->internal->mclk / 1000);
freq = (s16) freq_abs;
} else {
/* COLD Start
* CFR min =- (SearchRange / 2 + 600KHz)
* CFR max = +(SearchRange / 2 + 600KHz)
* (600KHz for the tuner step size)
*/
freq_abs = (state->search_range / 2000) + 600;
freq_abs = freq_abs << 16;
freq_abs /= (state->internal->mclk / 1000);
freq = (s16) freq_abs;
}
if (STV090x_WRITE_DEMOD(state, CFRUP1, MSB(freq)) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, CFRUP0, LSB(freq)) < 0)
goto err;
freq *= -1;
if (STV090x_WRITE_DEMOD(state, CFRLOW1, MSB(freq)) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, CFRLOW0, LSB(freq)) < 0)
goto err;
}
if (STV090x_WRITE_DEMOD(state, CFRINIT1, 0) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, CFRINIT0, 0) < 0)
goto err;
if (state->internal->dev_ver >= 0x20) {
if (STV090x_WRITE_DEMOD(state, EQUALCFG, 0x41) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, FFECFG, 0x41) < 0)
goto err;
if ((state->search_mode == STV090x_SEARCH_DVBS1) ||
(state->search_mode == STV090x_SEARCH_DSS) ||
(state->search_mode == STV090x_SEARCH_AUTO)) {
if (STV090x_WRITE_DEMOD(state, VITSCALE, 0x82) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, VAVSRVIT, 0x00) < 0)
goto err;
}
}
if (STV090x_WRITE_DEMOD(state, SFRSTEP, 0x00) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, TMGTHRISE, 0xe0) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, TMGTHFALL, 0xc0) < 0)
goto err;
reg = STV090x_READ_DEMOD(state, DMDCFGMD);
STV090x_SETFIELD_Px(reg, SCAN_ENABLE_FIELD, 0);
STV090x_SETFIELD_Px(reg, CFR_AUTOSCAN_FIELD, 0);
if (STV090x_WRITE_DEMOD(state, DMDCFGMD, reg) < 0)
goto err;
reg = STV090x_READ_DEMOD(state, DMDCFG2);
STV090x_SETFIELD_Px(reg, S1S2_SEQUENTIAL_FIELD, 0x0);
if (STV090x_WRITE_DEMOD(state, DMDCFG2, reg) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, RTC, 0x88) < 0)
goto err;
if (state->internal->dev_ver >= 0x20) {
/*Frequency offset detector setting*/
if (state->srate < 2000000) {
if (state->internal->dev_ver <= 0x20) {
/* Cut 2 */
if (STV090x_WRITE_DEMOD(state, CARFREQ, 0x39) < 0)
goto err;
} else {
/* Cut 3 */
if (STV090x_WRITE_DEMOD(state, CARFREQ, 0x89) < 0)
goto err;
}
if (STV090x_WRITE_DEMOD(state, CARHDR, 0x40) < 0)
goto err;
} else if (state->srate < 10000000) {
if (STV090x_WRITE_DEMOD(state, CARFREQ, 0x4c) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, CARHDR, 0x20) < 0)
goto err;
} else {
if (STV090x_WRITE_DEMOD(state, CARFREQ, 0x4b) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, CARHDR, 0x20) < 0)
goto err;
}
} else {
if (state->srate < 10000000) {
if (STV090x_WRITE_DEMOD(state, CARFREQ, 0xef) < 0)
goto err;
} else {
if (STV090x_WRITE_DEMOD(state, CARFREQ, 0xed) < 0)
goto err;
}
}
switch (state->algo) {
case STV090x_WARM_SEARCH:
/* The symbol rate and the exact
* carrier Frequency are known
*/
if (STV090x_WRITE_DEMOD(state, DMDISTATE, 0x1f) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, DMDISTATE, 0x18) < 0)
goto err;
break;
case STV090x_COLD_SEARCH:
/* The symbol rate is known */
if (STV090x_WRITE_DEMOD(state, DMDISTATE, 0x1f) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, DMDISTATE, 0x15) < 0)
goto err;
break;
default:
break;
}
return 0;
err:
dprintk(FE_ERROR, 1, "I/O error");
return -1;
}
static int stv090x_get_agc2_min_level(struct stv090x_state *state)
{
u32 agc2_min = 0xffff, agc2 = 0, freq_init, freq_step, reg;
s32 i, j, steps, dir;
if (STV090x_WRITE_DEMOD(state, AGC2REF, 0x38) < 0)
goto err;
reg = STV090x_READ_DEMOD(state, DMDCFGMD);
STV090x_SETFIELD_Px(reg, SCAN_ENABLE_FIELD, 0);
STV090x_SETFIELD_Px(reg, CFR_AUTOSCAN_FIELD, 0);
if (STV090x_WRITE_DEMOD(state, DMDCFGMD, reg) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, SFRUP1, 0x83) < 0) /* SR = 65 Msps Max */
goto err;
if (STV090x_WRITE_DEMOD(state, SFRUP0, 0xc0) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, SFRLOW1, 0x82) < 0) /* SR= 400 ksps Min */
goto err;
if (STV090x_WRITE_DEMOD(state, SFRLOW0, 0xa0) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, DMDTOM, 0x00) < 0) /* stop acq @ coarse carrier state */
goto err;
if (stv090x_set_srate(state, 1000000) < 0)
goto err;
steps = state->search_range / 1000000;
if (steps <= 0)
steps = 1;
dir = 1;
freq_step = (1000000 * 256) / (state->internal->mclk / 256);
freq_init = 0;
for (i = 0; i < steps; i++) {
if (dir > 0)
freq_init = freq_init + (freq_step * i);
else
freq_init = freq_init - (freq_step * i);
dir *= -1;
if (STV090x_WRITE_DEMOD(state, DMDISTATE, 0x5c) < 0) /* Demod RESET */
goto err;
if (STV090x_WRITE_DEMOD(state, CFRINIT1, (freq_init >> 8) & 0xff) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, CFRINIT0, freq_init & 0xff) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, DMDISTATE, 0x58) < 0) /* Demod RESET */
goto err;
msleep(10);
agc2 = 0;
for (j = 0; j < 10; j++) {
agc2 += (STV090x_READ_DEMOD(state, AGC2I1) << 8) |
STV090x_READ_DEMOD(state, AGC2I0);
}
agc2 /= 10;
if (agc2 < agc2_min)
agc2_min = agc2;
}
return agc2_min;
err:
dprintk(FE_ERROR, 1, "I/O error");
return -1;
}
static u32 stv090x_get_srate(struct stv090x_state *state, u32 clk)
{
u8 r3, r2, r1, r0;
s32 srate, int_1, int_2, tmp_1, tmp_2;
r3 = STV090x_READ_DEMOD(state, SFR3);
r2 = STV090x_READ_DEMOD(state, SFR2);
r1 = STV090x_READ_DEMOD(state, SFR1);
r0 = STV090x_READ_DEMOD(state, SFR0);
srate = ((r3 << 24) | (r2 << 16) | (r1 << 8) | r0);
int_1 = clk >> 16;
int_2 = srate >> 16;
tmp_1 = clk % 0x10000;
tmp_2 = srate % 0x10000;
srate = (int_1 * int_2) +
((int_1 * tmp_2) >> 16) +
((int_2 * tmp_1) >> 16);
return srate;
}
static u32 stv090x_srate_srch_coarse(struct stv090x_state *state)
{
struct dvb_frontend *fe = &state->frontend;
int tmg_lock = 0, i;
s32 tmg_cpt = 0, dir = 1, steps, cur_step = 0, freq;
u32 srate_coarse = 0, agc2 = 0, car_step = 1200, reg;
u32 agc2th;
if (state->internal->dev_ver >= 0x30)
agc2th = 0x2e00;
else
agc2th = 0x1f00;
reg = STV090x_READ_DEMOD(state, DMDISTATE);
STV090x_SETFIELD_Px(reg, I2C_DEMOD_MODE_FIELD, 0x1f); /* Demod RESET */
if (STV090x_WRITE_DEMOD(state, DMDISTATE, reg) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, TMGCFG, 0x12) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, TMGCFG2, 0xc0) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, TMGTHRISE, 0xf0) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, TMGTHFALL, 0xe0) < 0)
goto err;
reg = STV090x_READ_DEMOD(state, DMDCFGMD);
STV090x_SETFIELD_Px(reg, SCAN_ENABLE_FIELD, 1);
STV090x_SETFIELD_Px(reg, CFR_AUTOSCAN_FIELD, 0);
if (STV090x_WRITE_DEMOD(state, DMDCFGMD, reg) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, SFRUP1, 0x83) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, SFRUP0, 0xc0) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, SFRLOW1, 0x82) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, SFRLOW0, 0xa0) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, DMDTOM, 0x00) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, AGC2REF, 0x50) < 0)
goto err;
if (state->internal->dev_ver >= 0x30) {
if (STV090x_WRITE_DEMOD(state, CARFREQ, 0x99) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, SFRSTEP, 0x98) < 0)
goto err;
} else if (state->internal->dev_ver >= 0x20) {
if (STV090x_WRITE_DEMOD(state, CARFREQ, 0x6a) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, SFRSTEP, 0x95) < 0)
goto err;
}
if (state->srate <= 2000000)
car_step = 1000;
else if (state->srate <= 5000000)
car_step = 2000;
else if (state->srate <= 12000000)
car_step = 3000;
else
car_step = 5000;
steps = -1 + ((state->search_range / 1000) / car_step);
steps /= 2;
steps = (2 * steps) + 1;
if (steps < 0)
steps = 1;
else if (steps > 10) {
steps = 11;
car_step = (state->search_range / 1000) / 10;
}
cur_step = 0;
dir = 1;
freq = state->frequency;
while ((!tmg_lock) && (cur_step < steps)) {
if (STV090x_WRITE_DEMOD(state, DMDISTATE, 0x5f) < 0) /* Demod RESET */
goto err;
if (STV090x_WRITE_DEMOD(state, CFRINIT1, 0x00) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, CFRINIT0, 0x00) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, SFRINIT1, 0x00) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, SFRINIT0, 0x00) < 0)
goto err;
/* trigger acquisition */
if (STV090x_WRITE_DEMOD(state, DMDISTATE, 0x40) < 0)
goto err;
msleep(50);
for (i = 0; i < 10; i++) {
reg = STV090x_READ_DEMOD(state, DSTATUS);
if (STV090x_GETFIELD_Px(reg, TMGLOCK_QUALITY_FIELD) >= 2)
tmg_cpt++;
agc2 += (STV090x_READ_DEMOD(state, AGC2I1) << 8) |
STV090x_READ_DEMOD(state, AGC2I0);
}
agc2 /= 10;
srate_coarse = stv090x_get_srate(state, state->internal->mclk);
cur_step++;
dir *= -1;
if ((tmg_cpt >= 5) && (agc2 < agc2th) &&
(srate_coarse < 50000000) && (srate_coarse > 850000))
tmg_lock = 1;
else if (cur_step < steps) {
if (dir > 0)
freq += cur_step * car_step;
else
freq -= cur_step * car_step;
/* Setup tuner */
if (stv090x_i2c_gate_ctrl(state, 1) < 0)
goto err;
if (state->config->tuner_set_frequency) {
if (state->config->tuner_set_frequency(fe, freq) < 0)
goto err_gateoff;
}
if (state->config->tuner_set_bandwidth) {
if (state->config->tuner_set_bandwidth(fe, state->tuner_bw) < 0)
goto err_gateoff;
}
if (stv090x_i2c_gate_ctrl(state, 0) < 0)
goto err;
msleep(50);
if (stv090x_i2c_gate_ctrl(state, 1) < 0)
goto err;
if (state->config->tuner_get_status) {
if (state->config->tuner_get_status(fe, ®) < 0)
goto err_gateoff;
}
if (reg)
dprintk(FE_DEBUG, 1, "Tuner phase locked");
else
dprintk(FE_DEBUG, 1, "Tuner unlocked");
if (stv090x_i2c_gate_ctrl(state, 0) < 0)
goto err;
}
}
if (!tmg_lock)
srate_coarse = 0;
else
srate_coarse = stv090x_get_srate(state, state->internal->mclk);
return srate_coarse;
err_gateoff:
stv090x_i2c_gate_ctrl(state, 0);
err:
dprintk(FE_ERROR, 1, "I/O error");
return -1;
}
static u32 stv090x_srate_srch_fine(struct stv090x_state *state)
{
u32 srate_coarse, freq_coarse, sym, reg;
srate_coarse = stv090x_get_srate(state, state->internal->mclk);
freq_coarse = STV090x_READ_DEMOD(state, CFR2) << 8;
freq_coarse |= STV090x_READ_DEMOD(state, CFR1);
sym = 13 * (srate_coarse / 10); /* SFRUP = SFR + 30% */
if (sym < state->srate)
srate_coarse = 0;
else {
if (STV090x_WRITE_DEMOD(state, DMDISTATE, 0x1f) < 0) /* Demod RESET */
goto err;
if (STV090x_WRITE_DEMOD(state, TMGCFG2, 0xc1) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, TMGTHRISE, 0x20) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, TMGTHFALL, 0x00) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, TMGCFG, 0xd2) < 0)
goto err;
reg = STV090x_READ_DEMOD(state, DMDCFGMD);
STV090x_SETFIELD_Px(reg, CFR_AUTOSCAN_FIELD, 0x00);
if (STV090x_WRITE_DEMOD(state, DMDCFGMD, reg) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, AGC2REF, 0x38) < 0)
goto err;
if (state->internal->dev_ver >= 0x30) {
if (STV090x_WRITE_DEMOD(state, CARFREQ, 0x79) < 0)
goto err;
} else if (state->internal->dev_ver >= 0x20) {
if (STV090x_WRITE_DEMOD(state, CARFREQ, 0x49) < 0)
goto err;
}
if (srate_coarse > 3000000) {
sym = 13 * (srate_coarse / 10); /* SFRUP = SFR + 30% */
sym = (sym / 1000) * 65536;
sym /= (state->internal->mclk / 1000);
if (STV090x_WRITE_DEMOD(state, SFRUP1, (sym >> 8) & 0x7f) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, SFRUP0, sym & 0xff) < 0)
goto err;
sym = 10 * (srate_coarse / 13); /* SFRLOW = SFR - 30% */
sym = (sym / 1000) * 65536;
sym /= (state->internal->mclk / 1000);
if (STV090x_WRITE_DEMOD(state, SFRLOW1, (sym >> 8) & 0x7f) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, SFRLOW0, sym & 0xff) < 0)
goto err;
sym = (srate_coarse / 1000) * 65536;
sym /= (state->internal->mclk / 1000);
if (STV090x_WRITE_DEMOD(state, SFRINIT1, (sym >> 8) & 0xff) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, SFRINIT0, sym & 0xff) < 0)
goto err;
} else {
sym = 13 * (srate_coarse / 10); /* SFRUP = SFR + 30% */
sym = (sym / 100) * 65536;
sym /= (state->internal->mclk / 100);
if (STV090x_WRITE_DEMOD(state, SFRUP1, (sym >> 8) & 0x7f) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, SFRUP0, sym & 0xff) < 0)
goto err;
sym = 10 * (srate_coarse / 14); /* SFRLOW = SFR - 30% */
sym = (sym / 100) * 65536;
sym /= (state->internal->mclk / 100);
if (STV090x_WRITE_DEMOD(state, SFRLOW1, (sym >> 8) & 0x7f) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, SFRLOW0, sym & 0xff) < 0)
goto err;
sym = (srate_coarse / 100) * 65536;
sym /= (state->internal->mclk / 100);
if (STV090x_WRITE_DEMOD(state, SFRINIT1, (sym >> 8) & 0xff) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, SFRINIT0, sym & 0xff) < 0)
goto err;
}
if (STV090x_WRITE_DEMOD(state, DMDTOM, 0x20) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, CFRINIT1, (freq_coarse >> 8) & 0xff) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, CFRINIT0, freq_coarse & 0xff) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, DMDISTATE, 0x15) < 0) /* trigger acquisition */
goto err;
}
return srate_coarse;
err:
dprintk(FE_ERROR, 1, "I/O error");
return -1;
}
static int stv090x_get_dmdlock(struct stv090x_state *state, s32 timeout)
{
s32 timer = 0, lock = 0;
u32 reg;
u8 stat;
while ((timer < timeout) && (!lock)) {
reg = STV090x_READ_DEMOD(state, DMDSTATE);
stat = STV090x_GETFIELD_Px(reg, HEADER_MODE_FIELD);
switch (stat) {
case 0: /* searching */
case 1: /* first PLH detected */
default:
dprintk(FE_DEBUG, 1, "Demodulator searching ..");
lock = 0;
break;
case 2: /* DVB-S2 mode */
case 3: /* DVB-S1/legacy mode */
reg = STV090x_READ_DEMOD(state, DSTATUS);
lock = STV090x_GETFIELD_Px(reg, LOCK_DEFINITIF_FIELD);
break;
}
if (!lock)
msleep(10);
else
dprintk(FE_DEBUG, 1, "Demodulator acquired LOCK");
timer += 10;
}
return lock;
}
static int stv090x_blind_search(struct stv090x_state *state)
{
u32 agc2, reg, srate_coarse;
s32 cpt_fail, agc2_ovflw, i;
u8 k_ref, k_max, k_min;
int coarse_fail = 0;
int lock;
k_max = 110;
k_min = 10;
agc2 = stv090x_get_agc2_min_level(state);
if (agc2 > STV090x_SEARCH_AGC2_TH(state->internal->dev_ver)) {
lock = 0;
} else {
if (state->internal->dev_ver <= 0x20) {
if (STV090x_WRITE_DEMOD(state, CARCFG, 0xc4) < 0)
goto err;
} else {
/* > Cut 3 */
if (STV090x_WRITE_DEMOD(state, CARCFG, 0x06) < 0)
goto err;
}
if (STV090x_WRITE_DEMOD(state, RTCS2, 0x44) < 0)
goto err;
if (state->internal->dev_ver >= 0x20) {
if (STV090x_WRITE_DEMOD(state, EQUALCFG, 0x41) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, FFECFG, 0x41) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, VITSCALE, 0x82) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, VAVSRVIT, 0x00) < 0) /* set viterbi hysteresis */
goto err;
}
k_ref = k_max;
do {
if (STV090x_WRITE_DEMOD(state, KREFTMG, k_ref) < 0)
goto err;
if (stv090x_srate_srch_coarse(state) != 0) {
srate_coarse = stv090x_srate_srch_fine(state);
if (srate_coarse != 0) {
stv090x_get_lock_tmg(state);
lock = stv090x_get_dmdlock(state,
state->DemodTimeout);
} else {
lock = 0;
}
} else {
cpt_fail = 0;
agc2_ovflw = 0;
for (i = 0; i < 10; i++) {
agc2 += (STV090x_READ_DEMOD(state, AGC2I1) << 8) |
STV090x_READ_DEMOD(state, AGC2I0);
if (agc2 >= 0xff00)
agc2_ovflw++;
reg = STV090x_READ_DEMOD(state, DSTATUS2);
if ((STV090x_GETFIELD_Px(reg, CFR_OVERFLOW_FIELD) == 0x01) &&
(STV090x_GETFIELD_Px(reg, DEMOD_DELOCK_FIELD) == 0x01))
cpt_fail++;
}
if ((cpt_fail > 7) || (agc2_ovflw > 7))
coarse_fail = 1;
lock = 0;
}
k_ref -= 20;
} while ((k_ref >= k_min) && (!lock) && (!coarse_fail));
}
return lock;
err:
dprintk(FE_ERROR, 1, "I/O error");
return -1;
}
static int stv090x_chk_tmg(struct stv090x_state *state)
{
u32 reg;
s32 tmg_cpt = 0, i;
u8 freq, tmg_thh, tmg_thl;
int tmg_lock = 0;
freq = STV090x_READ_DEMOD(state, CARFREQ);
tmg_thh = STV090x_READ_DEMOD(state, TMGTHRISE);
tmg_thl = STV090x_READ_DEMOD(state, TMGTHFALL);
if (STV090x_WRITE_DEMOD(state, TMGTHRISE, 0x20) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, TMGTHFALL, 0x00) < 0)
goto err;
reg = STV090x_READ_DEMOD(state, DMDCFGMD);
STV090x_SETFIELD_Px(reg, CFR_AUTOSCAN_FIELD, 0x00); /* stop carrier offset search */
if (STV090x_WRITE_DEMOD(state, DMDCFGMD, reg) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, RTC, 0x80) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, RTCS2, 0x40) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, CARFREQ, 0x00) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, CFRINIT1, 0x00) < 0) /* set car ofset to 0 */
goto err;
if (STV090x_WRITE_DEMOD(state, CFRINIT0, 0x00) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, AGC2REF, 0x65) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, DMDISTATE, 0x18) < 0) /* trigger acquisition */
goto err;
msleep(10);
for (i = 0; i < 10; i++) {
reg = STV090x_READ_DEMOD(state, DSTATUS);
if (STV090x_GETFIELD_Px(reg, TMGLOCK_QUALITY_FIELD) >= 2)
tmg_cpt++;
msleep(1);
}
if (tmg_cpt >= 3)
tmg_lock = 1;
if (STV090x_WRITE_DEMOD(state, AGC2REF, 0x38) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, RTC, 0x88) < 0) /* DVB-S1 timing */
goto err;
if (STV090x_WRITE_DEMOD(state, RTCS2, 0x68) < 0) /* DVB-S2 timing */
goto err;
if (STV090x_WRITE_DEMOD(state, CARFREQ, freq) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, TMGTHRISE, tmg_thh) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, TMGTHFALL, tmg_thl) < 0)
goto err;
return tmg_lock;
err:
dprintk(FE_ERROR, 1, "I/O error");
return -1;
}
static int stv090x_get_coldlock(struct stv090x_state *state, s32 timeout_dmd)
{
struct dvb_frontend *fe = &state->frontend;
u32 reg;
s32 car_step, steps, cur_step, dir, freq, timeout_lock;
int lock = 0;
if (state->srate >= 10000000)
timeout_lock = timeout_dmd / 3;
else
timeout_lock = timeout_dmd / 2;
lock = stv090x_get_dmdlock(state, timeout_lock); /* cold start wait */
if (!lock) {
if (state->srate >= 10000000) {
if (stv090x_chk_tmg(state)) {
if (STV090x_WRITE_DEMOD(state, DMDISTATE, 0x1f) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, DMDISTATE, 0x15) < 0)
goto err;
lock = stv090x_get_dmdlock(state, timeout_dmd);
} else {
lock = 0;
}
} else {
if (state->srate <= 4000000)
car_step = 1000;
else if (state->srate <= 7000000)
car_step = 2000;
else if (state->srate <= 10000000)
car_step = 3000;
else
car_step = 5000;
steps = (state->search_range / 1000) / car_step;
steps /= 2;
steps = 2 * (steps + 1);
if (steps < 0)
steps = 2;
else if (steps > 12)
steps = 12;
cur_step = 1;
dir = 1;
if (!lock) {
freq = state->frequency;
state->tuner_bw = stv090x_car_width(state->srate, state->rolloff) + state->srate;
while ((cur_step <= steps) && (!lock)) {
if (dir > 0)
freq += cur_step * car_step;
else
freq -= cur_step * car_step;
/* Setup tuner */
if (stv090x_i2c_gate_ctrl(state, 1) < 0)
goto err;
if (state->config->tuner_set_frequency) {
if (state->config->tuner_set_frequency(fe, freq) < 0)
goto err_gateoff;
}
if (state->config->tuner_set_bandwidth) {
if (state->config->tuner_set_bandwidth(fe, state->tuner_bw) < 0)
goto err_gateoff;
}
if (stv090x_i2c_gate_ctrl(state, 0) < 0)
goto err;
msleep(50);
if (stv090x_i2c_gate_ctrl(state, 1) < 0)
goto err;
if (state->config->tuner_get_status) {
if (state->config->tuner_get_status(fe, ®) < 0)
goto err_gateoff;
}
if (reg)
dprintk(FE_DEBUG, 1, "Tuner phase locked");
else
dprintk(FE_DEBUG, 1, "Tuner unlocked");
if (stv090x_i2c_gate_ctrl(state, 0) < 0)
goto err;
STV090x_WRITE_DEMOD(state, DMDISTATE, 0x1c);
if (STV090x_WRITE_DEMOD(state, CFRINIT1, 0x00) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, CFRINIT0, 0x00) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, DMDISTATE, 0x1f) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, DMDISTATE, 0x15) < 0)
goto err;
lock = stv090x_get_dmdlock(state, (timeout_dmd / 3));
dir *= -1;
cur_step++;
}
}
}
}
return lock;
err_gateoff:
stv090x_i2c_gate_ctrl(state, 0);
err:
dprintk(FE_ERROR, 1, "I/O error");
return -1;
}
static int stv090x_get_loop_params(struct stv090x_state *state, s32 *freq_inc, s32 *timeout_sw, s32 *steps)
{
s32 timeout, inc, steps_max, srate, car_max;
srate = state->srate;
car_max = state->search_range / 1000;
car_max += car_max / 10;
car_max = 65536 * (car_max / 2);
car_max /= (state->internal->mclk / 1000);
if (car_max > 0x4000)
car_max = 0x4000 ; /* maxcarrier should be<= +-1/4 Mclk */
inc = srate;
inc /= state->internal->mclk / 1000;
inc *= 256;
inc *= 256;
inc /= 1000;
switch (state->search_mode) {
case STV090x_SEARCH_DVBS1:
case STV090x_SEARCH_DSS:
inc *= 3; /* freq step = 3% of srate */
timeout = 20;
break;
case STV090x_SEARCH_DVBS2:
inc *= 4;
timeout = 25;
break;
case STV090x_SEARCH_AUTO:
default:
inc *= 3;
timeout = 25;
break;
}
inc /= 100;
if ((inc > car_max) || (inc < 0))
inc = car_max / 2; /* increment <= 1/8 Mclk */
timeout *= 27500; /* 27.5 Msps reference */
if (srate > 0)
timeout /= (srate / 1000);
if ((timeout > 100) || (timeout < 0))
timeout = 100;
steps_max = (car_max / inc) + 1; /* min steps = 3 */
if ((steps_max > 100) || (steps_max < 0)) {
steps_max = 100; /* max steps <= 100 */
inc = car_max / steps_max;
}
*freq_inc = inc;
*timeout_sw = timeout;
*steps = steps_max;
return 0;
}
static int stv090x_chk_signal(struct stv090x_state *state)
{
s32 offst_car, agc2, car_max;
int no_signal;
offst_car = STV090x_READ_DEMOD(state, CFR2) << 8;
offst_car |= STV090x_READ_DEMOD(state, CFR1);
offst_car = comp2(offst_car, 16);
agc2 = STV090x_READ_DEMOD(state, AGC2I1) << 8;
agc2 |= STV090x_READ_DEMOD(state, AGC2I0);
car_max = state->search_range / 1000;
car_max += (car_max / 10); /* 10% margin */
car_max = (65536 * car_max / 2);
car_max /= state->internal->mclk / 1000;
if (car_max > 0x4000)
car_max = 0x4000;
if ((agc2 > 0x2000) || (offst_car > 2 * car_max) || (offst_car < -2 * car_max)) {
no_signal = 1;
dprintk(FE_DEBUG, 1, "No Signal");
} else {
no_signal = 0;
dprintk(FE_DEBUG, 1, "Found Signal");
}
return no_signal;
}
static int stv090x_search_car_loop(struct stv090x_state *state, s32 inc, s32 timeout, int zigzag, s32 steps_max)
{
int no_signal, lock = 0;
s32 cpt_step = 0, offst_freq, car_max;
u32 reg;
car_max = state->search_range / 1000;
car_max += (car_max / 10);
car_max = (65536 * car_max / 2);
car_max /= (state->internal->mclk / 1000);
if (car_max > 0x4000)
car_max = 0x4000;
if (zigzag)
offst_freq = 0;
else
offst_freq = -car_max + inc;
do {
if (STV090x_WRITE_DEMOD(state, DMDISTATE, 0x1c) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, CFRINIT1, ((offst_freq / 256) & 0xff)) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, CFRINIT0, offst_freq & 0xff) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, DMDISTATE, 0x18) < 0)
goto err;
reg = STV090x_READ_DEMOD(state, PDELCTRL1);
STV090x_SETFIELD_Px(reg, ALGOSWRST_FIELD, 0x1); /* stop DVB-S2 packet delin */
if (STV090x_WRITE_DEMOD(state, PDELCTRL1, reg) < 0)
goto err;
if (zigzag) {
if (offst_freq >= 0)
offst_freq = -offst_freq - 2 * inc;
else
offst_freq = -offst_freq;
} else {
offst_freq += 2 * inc;
}
cpt_step++;
lock = stv090x_get_dmdlock(state, timeout);
no_signal = stv090x_chk_signal(state);
} while ((!lock) &&
(!no_signal) &&
((offst_freq - inc) < car_max) &&
((offst_freq + inc) > -car_max) &&
(cpt_step < steps_max));
reg = STV090x_READ_DEMOD(state, PDELCTRL1);
STV090x_SETFIELD_Px(reg, ALGOSWRST_FIELD, 0);
if (STV090x_WRITE_DEMOD(state, PDELCTRL1, reg) < 0)
goto err;
return lock;
err:
dprintk(FE_ERROR, 1, "I/O error");
return -1;
}
static int stv090x_sw_algo(struct stv090x_state *state)
{
int no_signal, zigzag, lock = 0;
u32 reg;
s32 dvbs2_fly_wheel;
s32 inc, timeout_step, trials, steps_max;
/* get params */
stv090x_get_loop_params(state, &inc, &timeout_step, &steps_max);
switch (state->search_mode) {
case STV090x_SEARCH_DVBS1:
case STV090x_SEARCH_DSS:
/* accelerate the frequency detector */
if (state->internal->dev_ver >= 0x20) {
if (STV090x_WRITE_DEMOD(state, CARFREQ, 0x3B) < 0)
goto err;
}
if (STV090x_WRITE_DEMOD(state, DMDCFGMD, 0x49) < 0)
goto err;
zigzag = 0;
break;
case STV090x_SEARCH_DVBS2:
if (state->internal->dev_ver >= 0x20) {
if (STV090x_WRITE_DEMOD(state, CORRELABS, 0x79) < 0)
goto err;
}
if (STV090x_WRITE_DEMOD(state, DMDCFGMD, 0x89) < 0)
goto err;
zigzag = 1;
break;
case STV090x_SEARCH_AUTO:
default:
/* accelerate the frequency detector */
if (state->internal->dev_ver >= 0x20) {
if (STV090x_WRITE_DEMOD(state, CARFREQ, 0x3b) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, CORRELABS, 0x79) < 0)
goto err;
}
if (STV090x_WRITE_DEMOD(state, DMDCFGMD, 0xc9) < 0)
goto err;
zigzag = 0;
break;
}
trials = 0;
do {
lock = stv090x_search_car_loop(state, inc, timeout_step, zigzag, steps_max);
no_signal = stv090x_chk_signal(state);
trials++;
/*run the SW search 2 times maximum*/
if (lock || no_signal || (trials == 2)) {
/*Check if the demod is not losing lock in DVBS2*/
if (state->internal->dev_ver >= 0x20) {
if (STV090x_WRITE_DEMOD(state, CARFREQ, 0x49) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, CORRELABS, 0x9e) < 0)
goto err;
}
reg = STV090x_READ_DEMOD(state, DMDSTATE);
if ((lock) && (STV090x_GETFIELD_Px(reg, HEADER_MODE_FIELD) == STV090x_DVBS2)) {
/*Check if the demod is not losing lock in DVBS2*/
msleep(timeout_step);
reg = STV090x_READ_DEMOD(state, DMDFLYW);
dvbs2_fly_wheel = STV090x_GETFIELD_Px(reg, FLYWHEEL_CPT_FIELD);
if (dvbs2_fly_wheel < 0xd) { /*if correct frames is decrementing */
msleep(timeout_step);
reg = STV090x_READ_DEMOD(state, DMDFLYW);
dvbs2_fly_wheel = STV090x_GETFIELD_Px(reg, FLYWHEEL_CPT_FIELD);
}
if (dvbs2_fly_wheel < 0xd) {
/*FALSE lock, The demod is losing lock */
lock = 0;
if (trials < 2) {
if (state->internal->dev_ver >= 0x20) {
if (STV090x_WRITE_DEMOD(state, CORRELABS, 0x79) < 0)
goto err;
}
if (STV090x_WRITE_DEMOD(state, DMDCFGMD, 0x89) < 0)
goto err;
}
}
}
}
} while ((!lock) && (trials < 2) && (!no_signal));
return lock;
err:
dprintk(FE_ERROR, 1, "I/O error");
return -1;
}
static enum stv090x_delsys stv090x_get_std(struct stv090x_state *state)
{
u32 reg;
enum stv090x_delsys delsys;
reg = STV090x_READ_DEMOD(state, DMDSTATE);
if (STV090x_GETFIELD_Px(reg, HEADER_MODE_FIELD) == 2)
delsys = STV090x_DVBS2;
else if (STV090x_GETFIELD_Px(reg, HEADER_MODE_FIELD) == 3) {
reg = STV090x_READ_DEMOD(state, FECM);
if (STV090x_GETFIELD_Px(reg, DSS_DVB_FIELD) == 1)
delsys = STV090x_DSS;
else
delsys = STV090x_DVBS1;
} else {
delsys = STV090x_ERROR;
}
return delsys;
}
/* in Hz */
static s32 stv090x_get_car_freq(struct stv090x_state *state, u32 mclk)
{
s32 derot, int_1, int_2, tmp_1, tmp_2;
derot = STV090x_READ_DEMOD(state, CFR2) << 16;
derot |= STV090x_READ_DEMOD(state, CFR1) << 8;
derot |= STV090x_READ_DEMOD(state, CFR0);
derot = comp2(derot, 24);
int_1 = mclk >> 12;
int_2 = derot >> 12;
/* carrier_frequency = MasterClock * Reg / 2^24 */
tmp_1 = mclk % 0x1000;
tmp_2 = derot % 0x1000;
derot = (int_1 * int_2) +
((int_1 * tmp_2) >> 12) +
((int_2 * tmp_1) >> 12);
return derot;
}
static int stv090x_get_viterbi(struct stv090x_state *state)
{
u32 reg, rate;
reg = STV090x_READ_DEMOD(state, VITCURPUN);
rate = STV090x_GETFIELD_Px(reg, VIT_CURPUN_FIELD);
switch (rate) {
case 13:
state->fec = STV090x_PR12;
break;
case 18:
state->fec = STV090x_PR23;
break;
case 21:
state->fec = STV090x_PR34;
break;
case 24:
state->fec = STV090x_PR56;
break;
case 25:
state->fec = STV090x_PR67;
break;
case 26:
state->fec = STV090x_PR78;
break;
default:
state->fec = STV090x_PRERR;
break;
}
return 0;
}
static enum stv090x_signal_state stv090x_get_sig_params(struct stv090x_state *state)
{
struct dvb_frontend *fe = &state->frontend;
u8 tmg;
u32 reg;
s32 i = 0, offst_freq;
msleep(5);
if (state->algo == STV090x_BLIND_SEARCH) {
tmg = STV090x_READ_DEMOD(state, TMGREG2);
STV090x_WRITE_DEMOD(state, SFRSTEP, 0x5c);
while ((i <= 50) && (tmg != 0) && (tmg != 0xff)) {
tmg = STV090x_READ_DEMOD(state, TMGREG2);
msleep(5);
i += 5;
}
}
state->delsys = stv090x_get_std(state);
if (stv090x_i2c_gate_ctrl(state, 1) < 0)
goto err;
if (state->config->tuner_get_frequency) {
if (state->config->tuner_get_frequency(fe, &state->frequency) < 0)
goto err_gateoff;
}
if (stv090x_i2c_gate_ctrl(state, 0) < 0)
goto err;
offst_freq = stv090x_get_car_freq(state, state->internal->mclk) / 1000;
state->frequency += offst_freq;
if (stv090x_get_viterbi(state) < 0)
goto err;
reg = STV090x_READ_DEMOD(state, DMDMODCOD);
state->modcod = STV090x_GETFIELD_Px(reg, DEMOD_MODCOD_FIELD);
state->pilots = STV090x_GETFIELD_Px(reg, DEMOD_TYPE_FIELD) & 0x01;
state->frame_len = STV090x_GETFIELD_Px(reg, DEMOD_TYPE_FIELD) >> 1;
reg = STV090x_READ_DEMOD(state, TMGOBS);
state->rolloff = STV090x_GETFIELD_Px(reg, ROLLOFF_STATUS_FIELD);
reg = STV090x_READ_DEMOD(state, FECM);
state->inversion = STV090x_GETFIELD_Px(reg, IQINV_FIELD);
if ((state->algo == STV090x_BLIND_SEARCH) || (state->srate < 10000000)) {
if (stv090x_i2c_gate_ctrl(state, 1) < 0)
goto err;
if (state->config->tuner_get_frequency) {
if (state->config->tuner_get_frequency(fe, &state->frequency) < 0)
goto err_gateoff;
}
if (stv090x_i2c_gate_ctrl(state, 0) < 0)
goto err;
if (abs(offst_freq) <= ((state->search_range / 2000) + 500))
return STV090x_RANGEOK;
else if (abs(offst_freq) <= (stv090x_car_width(state->srate, state->rolloff) / 2000))
return STV090x_RANGEOK;
else
return STV090x_OUTOFRANGE; /* Out of Range */
} else {
if (abs(offst_freq) <= ((state->search_range / 2000) + 500))
return STV090x_RANGEOK;
else
return STV090x_OUTOFRANGE;
}
return STV090x_OUTOFRANGE;
err_gateoff:
stv090x_i2c_gate_ctrl(state, 0);
err:
dprintk(FE_ERROR, 1, "I/O error");
return -1;
}
static u32 stv090x_get_tmgoffst(struct stv090x_state *state, u32 srate)
{
s32 offst_tmg;
offst_tmg = STV090x_READ_DEMOD(state, TMGREG2) << 16;
offst_tmg |= STV090x_READ_DEMOD(state, TMGREG1) << 8;
offst_tmg |= STV090x_READ_DEMOD(state, TMGREG0);
offst_tmg = comp2(offst_tmg, 24); /* 2's complement */
if (!offst_tmg)
offst_tmg = 1;
offst_tmg = ((s32) srate * 10) / ((s32) 0x1000000 / offst_tmg);
offst_tmg /= 320;
return offst_tmg;
}
static u8 stv090x_optimize_carloop(struct stv090x_state *state, enum stv090x_modcod modcod, s32 pilots)
{
u8 aclc = 0x29;
s32 i;
struct stv090x_long_frame_crloop *car_loop, *car_loop_qpsk_low, *car_loop_apsk_low;
if (state->internal->dev_ver == 0x20) {
car_loop = stv090x_s2_crl_cut20;
car_loop_qpsk_low = stv090x_s2_lowqpsk_crl_cut20;
car_loop_apsk_low = stv090x_s2_apsk_crl_cut20;
} else {
/* >= Cut 3 */
car_loop = stv090x_s2_crl_cut30;
car_loop_qpsk_low = stv090x_s2_lowqpsk_crl_cut30;
car_loop_apsk_low = stv090x_s2_apsk_crl_cut30;
}
if (modcod < STV090x_QPSK_12) {
i = 0;
while ((i < 3) && (modcod != car_loop_qpsk_low[i].modcod))
i++;
if (i >= 3)
i = 2;
} else {
i = 0;
while ((i < 14) && (modcod != car_loop[i].modcod))
i++;
if (i >= 14) {
i = 0;
while ((i < 11) && (modcod != car_loop_apsk_low[i].modcod))
i++;
if (i >= 11)
i = 10;
}
}
if (modcod <= STV090x_QPSK_25) {
if (pilots) {
if (state->srate <= 3000000)
aclc = car_loop_qpsk_low[i].crl_pilots_on_2;
else if (state->srate <= 7000000)
aclc = car_loop_qpsk_low[i].crl_pilots_on_5;
else if (state->srate <= 15000000)
aclc = car_loop_qpsk_low[i].crl_pilots_on_10;
else if (state->srate <= 25000000)
aclc = car_loop_qpsk_low[i].crl_pilots_on_20;
else
aclc = car_loop_qpsk_low[i].crl_pilots_on_30;
} else {
if (state->srate <= 3000000)
aclc = car_loop_qpsk_low[i].crl_pilots_off_2;
else if (state->srate <= 7000000)
aclc = car_loop_qpsk_low[i].crl_pilots_off_5;
else if (state->srate <= 15000000)
aclc = car_loop_qpsk_low[i].crl_pilots_off_10;
else if (state->srate <= 25000000)
aclc = car_loop_qpsk_low[i].crl_pilots_off_20;
else
aclc = car_loop_qpsk_low[i].crl_pilots_off_30;
}
} else if (modcod <= STV090x_8PSK_910) {
if (pilots) {
if (state->srate <= 3000000)
aclc = car_loop[i].crl_pilots_on_2;
else if (state->srate <= 7000000)
aclc = car_loop[i].crl_pilots_on_5;
else if (state->srate <= 15000000)
aclc = car_loop[i].crl_pilots_on_10;
else if (state->srate <= 25000000)
aclc = car_loop[i].crl_pilots_on_20;
else
aclc = car_loop[i].crl_pilots_on_30;
} else {
if (state->srate <= 3000000)
aclc = car_loop[i].crl_pilots_off_2;
else if (state->srate <= 7000000)
aclc = car_loop[i].crl_pilots_off_5;
else if (state->srate <= 15000000)
aclc = car_loop[i].crl_pilots_off_10;
else if (state->srate <= 25000000)
aclc = car_loop[i].crl_pilots_off_20;
else
aclc = car_loop[i].crl_pilots_off_30;
}
} else { /* 16APSK and 32APSK */
if (state->srate <= 3000000)
aclc = car_loop_apsk_low[i].crl_pilots_on_2;
else if (state->srate <= 7000000)
aclc = car_loop_apsk_low[i].crl_pilots_on_5;
else if (state->srate <= 15000000)
aclc = car_loop_apsk_low[i].crl_pilots_on_10;
else if (state->srate <= 25000000)
aclc = car_loop_apsk_low[i].crl_pilots_on_20;
else
aclc = car_loop_apsk_low[i].crl_pilots_on_30;
}
return aclc;
}
static u8 stv090x_optimize_carloop_short(struct stv090x_state *state)
{
struct stv090x_short_frame_crloop *short_crl = NULL;
s32 index = 0;
u8 aclc = 0x0b;
switch (state->modulation) {
case STV090x_QPSK:
default:
index = 0;
break;
case STV090x_8PSK:
index = 1;
break;
case STV090x_16APSK:
index = 2;
break;
case STV090x_32APSK:
index = 3;
break;
}
if (state->internal->dev_ver >= 0x30) {
/* Cut 3.0 and up */
short_crl = stv090x_s2_short_crl_cut30;
} else {
/* Cut 2.0 and up: we don't support cuts older than 2.0 */
short_crl = stv090x_s2_short_crl_cut20;
}
if (state->srate <= 3000000)
aclc = short_crl[index].crl_2;
else if (state->srate <= 7000000)
aclc = short_crl[index].crl_5;
else if (state->srate <= 15000000)
aclc = short_crl[index].crl_10;
else if (state->srate <= 25000000)
aclc = short_crl[index].crl_20;
else
aclc = short_crl[index].crl_30;
return aclc;
}
static int stv090x_optimize_track(struct stv090x_state *state)
{
struct dvb_frontend *fe = &state->frontend;
enum stv090x_modcod modcod;
s32 srate, pilots, aclc, f_1, f_0, i = 0, blind_tune = 0;
u32 reg;
srate = stv090x_get_srate(state, state->internal->mclk);
srate += stv090x_get_tmgoffst(state, srate);
switch (state->delsys) {
case STV090x_DVBS1:
case STV090x_DSS:
if (state->search_mode == STV090x_SEARCH_AUTO) {
reg = STV090x_READ_DEMOD(state, DMDCFGMD);
STV090x_SETFIELD_Px(reg, DVBS1_ENABLE_FIELD, 1);
STV090x_SETFIELD_Px(reg, DVBS2_ENABLE_FIELD, 0);
if (STV090x_WRITE_DEMOD(state, DMDCFGMD, reg) < 0)
goto err;
}
reg = STV090x_READ_DEMOD(state, DEMOD);
STV090x_SETFIELD_Px(reg, ROLLOFF_CONTROL_FIELD, state->rolloff);
STV090x_SETFIELD_Px(reg, MANUAL_SXROLLOFF_FIELD, 0x01);
if (STV090x_WRITE_DEMOD(state, DEMOD, reg) < 0)
goto err;
if (state->internal->dev_ver >= 0x30) {
if (stv090x_get_viterbi(state) < 0)
goto err;
if (state->fec == STV090x_PR12) {
if (STV090x_WRITE_DEMOD(state, GAUSSR0, 0x98) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, CCIR0, 0x18) < 0)
goto err;
} else {
if (STV090x_WRITE_DEMOD(state, GAUSSR0, 0x18) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, CCIR0, 0x18) < 0)
goto err;
}
}
if (STV090x_WRITE_DEMOD(state, ERRCTRL1, 0x75) < 0)
goto err;
break;
case STV090x_DVBS2:
reg = STV090x_READ_DEMOD(state, DMDCFGMD);
STV090x_SETFIELD_Px(reg, DVBS1_ENABLE_FIELD, 0);
STV090x_SETFIELD_Px(reg, DVBS2_ENABLE_FIELD, 1);
if (STV090x_WRITE_DEMOD(state, DMDCFGMD, reg) < 0)
goto err;
if (state->internal->dev_ver >= 0x30) {
if (STV090x_WRITE_DEMOD(state, ACLC, 0) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, BCLC, 0) < 0)
goto err;
}
if (state->frame_len == STV090x_LONG_FRAME) {
reg = STV090x_READ_DEMOD(state, DMDMODCOD);
modcod = STV090x_GETFIELD_Px(reg, DEMOD_MODCOD_FIELD);
pilots = STV090x_GETFIELD_Px(reg, DEMOD_TYPE_FIELD) & 0x01;
aclc = stv090x_optimize_carloop(state, modcod, pilots);
if (modcod <= STV090x_QPSK_910) {
STV090x_WRITE_DEMOD(state, ACLC2S2Q, aclc);
} else if (modcod <= STV090x_8PSK_910) {
if (STV090x_WRITE_DEMOD(state, ACLC2S2Q, 0x2a) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, ACLC2S28, aclc) < 0)
goto err;
}
if ((state->demod_mode == STV090x_SINGLE) && (modcod > STV090x_8PSK_910)) {
if (modcod <= STV090x_16APSK_910) {
if (STV090x_WRITE_DEMOD(state, ACLC2S2Q, 0x2a) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, ACLC2S216A, aclc) < 0)
goto err;
} else {
if (STV090x_WRITE_DEMOD(state, ACLC2S2Q, 0x2a) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, ACLC2S232A, aclc) < 0)
goto err;
}
}
} else {
/*Carrier loop setting for short frame*/
aclc = stv090x_optimize_carloop_short(state);
if (state->modulation == STV090x_QPSK) {
if (STV090x_WRITE_DEMOD(state, ACLC2S2Q, aclc) < 0)
goto err;
} else if (state->modulation == STV090x_8PSK) {
if (STV090x_WRITE_DEMOD(state, ACLC2S2Q, 0x2a) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, ACLC2S28, aclc) < 0)
goto err;
} else if (state->modulation == STV090x_16APSK) {
if (STV090x_WRITE_DEMOD(state, ACLC2S2Q, 0x2a) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, ACLC2S216A, aclc) < 0)
goto err;
} else if (state->modulation == STV090x_32APSK) {
if (STV090x_WRITE_DEMOD(state, ACLC2S2Q, 0x2a) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, ACLC2S232A, aclc) < 0)
goto err;
}
}
STV090x_WRITE_DEMOD(state, ERRCTRL1, 0x67); /* PER */
break;
case STV090x_ERROR:
default:
reg = STV090x_READ_DEMOD(state, DMDCFGMD);
STV090x_SETFIELD_Px(reg, DVBS1_ENABLE_FIELD, 1);
STV090x_SETFIELD_Px(reg, DVBS2_ENABLE_FIELD, 1);
if (STV090x_WRITE_DEMOD(state, DMDCFGMD, reg) < 0)
goto err;
break;
}
f_1 = STV090x_READ_DEMOD(state, CFR2);
f_0 = STV090x_READ_DEMOD(state, CFR1);
reg = STV090x_READ_DEMOD(state, TMGOBS);
if (state->algo == STV090x_BLIND_SEARCH) {
STV090x_WRITE_DEMOD(state, SFRSTEP, 0x00);
reg = STV090x_READ_DEMOD(state, DMDCFGMD);
STV090x_SETFIELD_Px(reg, SCAN_ENABLE_FIELD, 0x00);
STV090x_SETFIELD_Px(reg, CFR_AUTOSCAN_FIELD, 0x00);
if (STV090x_WRITE_DEMOD(state, DMDCFGMD, reg) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, TMGCFG2, 0xc1) < 0)
goto err;
if (stv090x_set_srate(state, srate) < 0)
goto err;
blind_tune = 1;
if (stv090x_dvbs_track_crl(state) < 0)
goto err;
}
if (state->internal->dev_ver >= 0x20) {
if ((state->search_mode == STV090x_SEARCH_DVBS1) ||
(state->search_mode == STV090x_SEARCH_DSS) ||
(state->search_mode == STV090x_SEARCH_AUTO)) {
if (STV090x_WRITE_DEMOD(state, VAVSRVIT, 0x0a) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, VITSCALE, 0x00) < 0)
goto err;
}
}
if (STV090x_WRITE_DEMOD(state, AGC2REF, 0x38) < 0)
goto err;
/* AUTO tracking MODE */
if (STV090x_WRITE_DEMOD(state, SFRUP1, 0x80) < 0)
goto err;
/* AUTO tracking MODE */
if (STV090x_WRITE_DEMOD(state, SFRLOW1, 0x80) < 0)
goto err;
if ((state->internal->dev_ver >= 0x20) || (blind_tune == 1) ||
(state->srate < 10000000)) {
/* update initial carrier freq with the found freq offset */
if (STV090x_WRITE_DEMOD(state, CFRINIT1, f_1) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, CFRINIT0, f_0) < 0)
goto err;
state->tuner_bw = stv090x_car_width(srate, state->rolloff) + 10000000;
if ((state->internal->dev_ver >= 0x20) || (blind_tune == 1)) {
if (state->algo != STV090x_WARM_SEARCH) {
if (stv090x_i2c_gate_ctrl(state, 1) < 0)
goto err;
if (state->config->tuner_set_bandwidth) {
if (state->config->tuner_set_bandwidth(fe, state->tuner_bw) < 0)
goto err_gateoff;
}
if (stv090x_i2c_gate_ctrl(state, 0) < 0)
goto err;
}
}
if ((state->algo == STV090x_BLIND_SEARCH) || (state->srate < 10000000))
msleep(50); /* blind search: wait 50ms for SR stabilization */
else
msleep(5);
stv090x_get_lock_tmg(state);
if (!(stv090x_get_dmdlock(state, (state->DemodTimeout / 2)))) {
if (STV090x_WRITE_DEMOD(state, DMDISTATE, 0x1f) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, CFRINIT1, f_1) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, CFRINIT0, f_0) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, DMDISTATE, 0x18) < 0)
goto err;
i = 0;
while ((!(stv090x_get_dmdlock(state, (state->DemodTimeout / 2)))) && (i <= 2)) {
if (STV090x_WRITE_DEMOD(state, DMDISTATE, 0x1f) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, CFRINIT1, f_1) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, CFRINIT0, f_0) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, DMDISTATE, 0x18) < 0)
goto err;
i++;
}
}
}
if (state->internal->dev_ver >= 0x20) {
if (STV090x_WRITE_DEMOD(state, CARFREQ, 0x49) < 0)
goto err;
}
if ((state->delsys == STV090x_DVBS1) || (state->delsys == STV090x_DSS))
stv090x_set_vit_thtracq(state);
return 0;
err_gateoff:
stv090x_i2c_gate_ctrl(state, 0);
err:
dprintk(FE_ERROR, 1, "I/O error");
return -1;
}
static int stv090x_get_feclock(struct stv090x_state *state, s32 timeout)
{
s32 timer = 0, lock = 0, stat;
u32 reg;
while ((timer < timeout) && (!lock)) {
reg = STV090x_READ_DEMOD(state, DMDSTATE);
stat = STV090x_GETFIELD_Px(reg, HEADER_MODE_FIELD);
switch (stat) {
case 0: /* searching */
case 1: /* first PLH detected */
default:
lock = 0;
break;
case 2: /* DVB-S2 mode */
reg = STV090x_READ_DEMOD(state, PDELSTATUS1);
lock = STV090x_GETFIELD_Px(reg, PKTDELIN_LOCK_FIELD);
break;
case 3: /* DVB-S1/legacy mode */
reg = STV090x_READ_DEMOD(state, VSTATUSVIT);
lock = STV090x_GETFIELD_Px(reg, LOCKEDVIT_FIELD);
break;
}
if (!lock) {
msleep(10);
timer += 10;
}
}
return lock;
}
static int stv090x_get_lock(struct stv090x_state *state, s32 timeout_dmd, s32 timeout_fec)
{
u32 reg;
s32 timer = 0;
int lock;
lock = stv090x_get_dmdlock(state, timeout_dmd);
if (lock)
lock = stv090x_get_feclock(state, timeout_fec);
if (lock) {
lock = 0;
while ((timer < timeout_fec) && (!lock)) {
reg = STV090x_READ_DEMOD(state, TSSTATUS);
lock = STV090x_GETFIELD_Px(reg, TSFIFO_LINEOK_FIELD);
msleep(1);
timer++;
}
}
return lock;
}
static int stv090x_set_s2rolloff(struct stv090x_state *state)
{
u32 reg;
if (state->internal->dev_ver <= 0x20) {
/* rolloff to auto mode if DVBS2 */
reg = STV090x_READ_DEMOD(state, DEMOD);
STV090x_SETFIELD_Px(reg, MANUAL_SXROLLOFF_FIELD, 0x00);
if (STV090x_WRITE_DEMOD(state, DEMOD, reg) < 0)
goto err;
} else {
/* DVB-S2 rolloff to auto mode if DVBS2 */
reg = STV090x_READ_DEMOD(state, DEMOD);
STV090x_SETFIELD_Px(reg, MANUAL_S2ROLLOFF_FIELD, 0x00);
if (STV090x_WRITE_DEMOD(state, DEMOD, reg) < 0)
goto err;
}
return 0;
err:
dprintk(FE_ERROR, 1, "I/O error");
return -1;
}
static enum stv090x_signal_state stv090x_algo(struct stv090x_state *state)
{
struct dvb_frontend *fe = &state->frontend;
enum stv090x_signal_state signal_state = STV090x_NOCARRIER;
u32 reg;
s32 agc1_power, power_iq = 0, i;
int lock = 0, low_sr = 0;
reg = STV090x_READ_DEMOD(state, TSCFGH);
STV090x_SETFIELD_Px(reg, RST_HWARE_FIELD, 1); /* Stop path 1 stream merger */
if (STV090x_WRITE_DEMOD(state, TSCFGH, reg) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, DMDISTATE, 0x5c) < 0) /* Demod stop */
goto err;
if (state->internal->dev_ver >= 0x20) {
if (state->srate > 5000000) {
if (STV090x_WRITE_DEMOD(state, CORRELABS, 0x9e) < 0)
goto err;
} else {
if (STV090x_WRITE_DEMOD(state, CORRELABS, 0x82) < 0)
goto err;
}
}
stv090x_get_lock_tmg(state);
if (state->algo == STV090x_BLIND_SEARCH) {
state->tuner_bw = 2 * 36000000; /* wide bw for unknown srate */
if (STV090x_WRITE_DEMOD(state, TMGCFG2, 0xc0) < 0) /* wider srate scan */
goto err;
if (STV090x_WRITE_DEMOD(state, CORRELMANT, 0x70) < 0)
goto err;
if (stv090x_set_srate(state, 1000000) < 0) /* initial srate = 1Msps */
goto err;
} else {
/* known srate */
if (STV090x_WRITE_DEMOD(state, DMDTOM, 0x20) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, TMGCFG, 0xd2) < 0)
goto err;
if (state->srate < 2000000) {
/* SR < 2MSPS */
if (STV090x_WRITE_DEMOD(state, CORRELMANT, 0x63) < 0)
goto err;
} else {
/* SR >= 2Msps */
if (STV090x_WRITE_DEMOD(state, CORRELMANT, 0x70) < 0)
goto err;
}
if (STV090x_WRITE_DEMOD(state, AGC2REF, 0x38) < 0)
goto err;
if (state->internal->dev_ver >= 0x20) {
if (STV090x_WRITE_DEMOD(state, KREFTMG, 0x5a) < 0)
goto err;
if (state->algo == STV090x_COLD_SEARCH)
state->tuner_bw = (15 * (stv090x_car_width(state->srate, state->rolloff) + 10000000)) / 10;
else if (state->algo == STV090x_WARM_SEARCH)
state->tuner_bw = stv090x_car_width(state->srate, state->rolloff) + 10000000;
}
/* if cold start or warm (Symbolrate is known)
* use a Narrow symbol rate scan range
*/
if (STV090x_WRITE_DEMOD(state, TMGCFG2, 0xc1) < 0) /* narrow srate scan */
goto err;
if (stv090x_set_srate(state, state->srate) < 0)
goto err;
if (stv090x_set_max_srate(state, state->internal->mclk,
state->srate) < 0)
goto err;
if (stv090x_set_min_srate(state, state->internal->mclk,
state->srate) < 0)
goto err;
if (state->srate >= 10000000)
low_sr = 0;
else
low_sr = 1;
}
/* Setup tuner */
if (stv090x_i2c_gate_ctrl(state, 1) < 0)
goto err;
if (state->config->tuner_set_bbgain) {
reg = state->config->tuner_bbgain;
if (reg == 0)
reg = 10; /* default: 10dB */
if (state->config->tuner_set_bbgain(fe, reg) < 0)
goto err_gateoff;
}
if (state->config->tuner_set_frequency) {
if (state->config->tuner_set_frequency(fe, state->frequency) < 0)
goto err_gateoff;
}
if (state->config->tuner_set_bandwidth) {
if (state->config->tuner_set_bandwidth(fe, state->tuner_bw) < 0)
goto err_gateoff;
}
if (stv090x_i2c_gate_ctrl(state, 0) < 0)
goto err;
msleep(50);
if (state->config->tuner_get_status) {
if (stv090x_i2c_gate_ctrl(state, 1) < 0)
goto err;
if (state->config->tuner_get_status(fe, ®) < 0)
goto err_gateoff;
if (stv090x_i2c_gate_ctrl(state, 0) < 0)
goto err;
if (reg)
dprintk(FE_DEBUG, 1, "Tuner phase locked");
else {
dprintk(FE_DEBUG, 1, "Tuner unlocked");
return STV090x_NOCARRIER;
}
}
msleep(10);
agc1_power = MAKEWORD16(STV090x_READ_DEMOD(state, AGCIQIN1),
STV090x_READ_DEMOD(state, AGCIQIN0));
if (agc1_power == 0) {
/* If AGC1 integrator value is 0
* then read POWERI, POWERQ
*/
for (i = 0; i < 5; i++) {
power_iq += (STV090x_READ_DEMOD(state, POWERI) +
STV090x_READ_DEMOD(state, POWERQ)) >> 1;
}
power_iq /= 5;
}
if ((agc1_power == 0) && (power_iq < STV090x_IQPOWER_THRESHOLD)) {
dprintk(FE_ERROR, 1, "No Signal: POWER_IQ=0x%02x", power_iq);
lock = 0;
signal_state = STV090x_NOAGC1;
} else {
reg = STV090x_READ_DEMOD(state, DEMOD);
STV090x_SETFIELD_Px(reg, SPECINV_CONTROL_FIELD, state->inversion);
if (state->internal->dev_ver <= 0x20) {
/* rolloff to auto mode if DVBS2 */
STV090x_SETFIELD_Px(reg, MANUAL_SXROLLOFF_FIELD, 1);
} else {
/* DVB-S2 rolloff to auto mode if DVBS2 */
STV090x_SETFIELD_Px(reg, MANUAL_S2ROLLOFF_FIELD, 1);
}
if (STV090x_WRITE_DEMOD(state, DEMOD, reg) < 0)
goto err;
if (stv090x_delivery_search(state) < 0)
goto err;
if (state->algo != STV090x_BLIND_SEARCH) {
if (stv090x_start_search(state) < 0)
goto err;
}
}
if (signal_state == STV090x_NOAGC1)
return signal_state;
if (state->algo == STV090x_BLIND_SEARCH)
lock = stv090x_blind_search(state);
else if (state->algo == STV090x_COLD_SEARCH)
lock = stv090x_get_coldlock(state, state->DemodTimeout);
else if (state->algo == STV090x_WARM_SEARCH)
lock = stv090x_get_dmdlock(state, state->DemodTimeout);
if ((!lock) && (state->algo == STV090x_COLD_SEARCH)) {
if (!low_sr) {
if (stv090x_chk_tmg(state))
lock = stv090x_sw_algo(state);
}
}
if (lock)
signal_state = stv090x_get_sig_params(state);
if ((lock) && (signal_state == STV090x_RANGEOK)) { /* signal within Range */
stv090x_optimize_track(state);
if (state->internal->dev_ver >= 0x20) {
/* >= Cut 2.0 :release TS reset after
* demod lock and optimized Tracking
*/
reg = STV090x_READ_DEMOD(state, TSCFGH);
STV090x_SETFIELD_Px(reg, RST_HWARE_FIELD, 0); /* release merger reset */
if (STV090x_WRITE_DEMOD(state, TSCFGH, reg) < 0)
goto err;
msleep(3);
STV090x_SETFIELD_Px(reg, RST_HWARE_FIELD, 1); /* merger reset */
if (STV090x_WRITE_DEMOD(state, TSCFGH, reg) < 0)
goto err;
STV090x_SETFIELD_Px(reg, RST_HWARE_FIELD, 0); /* release merger reset */
if (STV090x_WRITE_DEMOD(state, TSCFGH, reg) < 0)
goto err;
}
lock = stv090x_get_lock(state, state->FecTimeout,
state->FecTimeout);
if (lock) {
if (state->delsys == STV090x_DVBS2) {
stv090x_set_s2rolloff(state);
reg = STV090x_READ_DEMOD(state, PDELCTRL2);
STV090x_SETFIELD_Px(reg, RESET_UPKO_COUNT, 1);
if (STV090x_WRITE_DEMOD(state, PDELCTRL2, reg) < 0)
goto err;
/* Reset DVBS2 packet delinator error counter */
reg = STV090x_READ_DEMOD(state, PDELCTRL2);
STV090x_SETFIELD_Px(reg, RESET_UPKO_COUNT, 0);
if (STV090x_WRITE_DEMOD(state, PDELCTRL2, reg) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, ERRCTRL1, 0x67) < 0) /* PER */
goto err;
} else {
if (STV090x_WRITE_DEMOD(state, ERRCTRL1, 0x75) < 0)
goto err;
}
/* Reset the Total packet counter */
if (STV090x_WRITE_DEMOD(state, FBERCPT4, 0x00) < 0)
goto err;
/* Reset the packet Error counter2 */
if (STV090x_WRITE_DEMOD(state, ERRCTRL2, 0xc1) < 0)
goto err;
} else {
signal_state = STV090x_NODATA;
stv090x_chk_signal(state);
}
}
return signal_state;
err_gateoff:
stv090x_i2c_gate_ctrl(state, 0);
err:
dprintk(FE_ERROR, 1, "I/O error");
return -1;
}
static int stv090x_set_mis(struct stv090x_state *state, int mis)
{
u32 reg;
if (mis < 0 || mis > 255) {
dprintk(FE_DEBUG, 1, "Disable MIS filtering");
reg = STV090x_READ_DEMOD(state, PDELCTRL1);
STV090x_SETFIELD_Px(reg, FILTER_EN_FIELD, 0x00);
if (STV090x_WRITE_DEMOD(state, PDELCTRL1, reg) < 0)
goto err;
} else {
dprintk(FE_DEBUG, 1, "Enable MIS filtering - %d", mis);
reg = STV090x_READ_DEMOD(state, PDELCTRL1);
STV090x_SETFIELD_Px(reg, FILTER_EN_FIELD, 0x01);
if (STV090x_WRITE_DEMOD(state, PDELCTRL1, reg) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, ISIENTRY, mis) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, ISIBITENA, 0xff) < 0)
goto err;
}
return 0;
err:
dprintk(FE_ERROR, 1, "I/O error");
return -1;
}
static enum dvbfe_search stv090x_search(struct dvb_frontend *fe)
{
struct stv090x_state *state = fe->demodulator_priv;
struct dtv_frontend_properties *props = &fe->dtv_property_cache;
if (props->frequency == 0)
return DVBFE_ALGO_SEARCH_INVALID;
state->delsys = props->delivery_system;
state->frequency = props->frequency;
state->srate = props->symbol_rate;
state->search_mode = STV090x_SEARCH_AUTO;
state->algo = STV090x_COLD_SEARCH;
state->fec = STV090x_PRERR;
if (state->srate > 10000000) {
dprintk(FE_DEBUG, 1, "Search range: 10 MHz");
state->search_range = 10000000;
} else {
dprintk(FE_DEBUG, 1, "Search range: 5 MHz");
state->search_range = 5000000;
}
stv090x_set_mis(state, props->stream_id);
if (stv090x_algo(state) == STV090x_RANGEOK) {
dprintk(FE_DEBUG, 1, "Search success!");
return DVBFE_ALGO_SEARCH_SUCCESS;
} else {
dprintk(FE_DEBUG, 1, "Search failed!");
return DVBFE_ALGO_SEARCH_FAILED;
}
return DVBFE_ALGO_SEARCH_ERROR;
}
static int stv090x_read_status(struct dvb_frontend *fe, enum fe_status *status)
{
struct stv090x_state *state = fe->demodulator_priv;
u32 reg, dstatus;
u8 search_state;
*status = 0;
dstatus = STV090x_READ_DEMOD(state, DSTATUS);
if (STV090x_GETFIELD_Px(dstatus, CAR_LOCK_FIELD))
*status |= FE_HAS_SIGNAL | FE_HAS_CARRIER;
reg = STV090x_READ_DEMOD(state, DMDSTATE);
search_state = STV090x_GETFIELD_Px(reg, HEADER_MODE_FIELD);
switch (search_state) {
case 0: /* searching */
case 1: /* first PLH detected */
default:
dprintk(FE_DEBUG, 1, "Status: Unlocked (Searching ..)");
break;
case 2: /* DVB-S2 mode */
dprintk(FE_DEBUG, 1, "Delivery system: DVB-S2");
if (STV090x_GETFIELD_Px(dstatus, LOCK_DEFINITIF_FIELD)) {
reg = STV090x_READ_DEMOD(state, PDELSTATUS1);
if (STV090x_GETFIELD_Px(reg, PKTDELIN_LOCK_FIELD)) {
*status |= FE_HAS_VITERBI;
reg = STV090x_READ_DEMOD(state, TSSTATUS);
if (STV090x_GETFIELD_Px(reg, TSFIFO_LINEOK_FIELD))
*status |= FE_HAS_SYNC | FE_HAS_LOCK;
}
}
break;
case 3: /* DVB-S1/legacy mode */
dprintk(FE_DEBUG, 1, "Delivery system: DVB-S");
if (STV090x_GETFIELD_Px(dstatus, LOCK_DEFINITIF_FIELD)) {
reg = STV090x_READ_DEMOD(state, VSTATUSVIT);
if (STV090x_GETFIELD_Px(reg, LOCKEDVIT_FIELD)) {
*status |= FE_HAS_VITERBI;
reg = STV090x_READ_DEMOD(state, TSSTATUS);
if (STV090x_GETFIELD_Px(reg, TSFIFO_LINEOK_FIELD))
*status |= FE_HAS_SYNC | FE_HAS_LOCK;
}
}
break;
}
return 0;
}
static int stv090x_read_per(struct dvb_frontend *fe, u32 *per)
{
struct stv090x_state *state = fe->demodulator_priv;
s32 count_4, count_3, count_2, count_1, count_0, count;
u32 reg, h, m, l;
enum fe_status status;
stv090x_read_status(fe, &status);
if (!(status & FE_HAS_LOCK)) {
*per = 1 << 23; /* Max PER */
} else {
/* Counter 2 */
reg = STV090x_READ_DEMOD(state, ERRCNT22);
h = STV090x_GETFIELD_Px(reg, ERR_CNT2_FIELD);
reg = STV090x_READ_DEMOD(state, ERRCNT21);
m = STV090x_GETFIELD_Px(reg, ERR_CNT21_FIELD);
reg = STV090x_READ_DEMOD(state, ERRCNT20);
l = STV090x_GETFIELD_Px(reg, ERR_CNT20_FIELD);
*per = ((h << 16) | (m << 8) | l);
count_4 = STV090x_READ_DEMOD(state, FBERCPT4);
count_3 = STV090x_READ_DEMOD(state, FBERCPT3);
count_2 = STV090x_READ_DEMOD(state, FBERCPT2);
count_1 = STV090x_READ_DEMOD(state, FBERCPT1);
count_0 = STV090x_READ_DEMOD(state, FBERCPT0);
if ((!count_4) && (!count_3)) {
count = (count_2 & 0xff) << 16;
count |= (count_1 & 0xff) << 8;
count |= count_0 & 0xff;
} else {
count = 1 << 24;
}
if (count == 0)
*per = 1;
}
if (STV090x_WRITE_DEMOD(state, FBERCPT4, 0) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, ERRCTRL2, 0xc1) < 0)
goto err;
return 0;
err:
dprintk(FE_ERROR, 1, "I/O error");
return -1;
}
static int stv090x_table_lookup(const struct stv090x_tab *tab, int max, int val)
{
int res = 0;
int min = 0, med;
if ((val >= tab[min].read && val < tab[max].read) ||
(val >= tab[max].read && val < tab[min].read)) {
while ((max - min) > 1) {
med = (max + min) / 2;
if ((val >= tab[min].read && val < tab[med].read) ||
(val >= tab[med].read && val < tab[min].read))
max = med;
else
min = med;
}
res = ((val - tab[min].read) *
(tab[max].real - tab[min].real) /
(tab[max].read - tab[min].read)) +
tab[min].real;
} else {
if (tab[min].read < tab[max].read) {
if (val < tab[min].read)
res = tab[min].real;
else if (val >= tab[max].read)
res = tab[max].real;
} else {
if (val >= tab[min].read)
res = tab[min].real;
else if (val < tab[max].read)
res = tab[max].real;
}
}
return res;
}
static int stv090x_read_signal_strength(struct dvb_frontend *fe, u16 *strength)
{
struct stv090x_state *state = fe->demodulator_priv;
u32 reg;
s32 agc_0, agc_1, agc;
s32 str;
reg = STV090x_READ_DEMOD(state, AGCIQIN1);
agc_1 = STV090x_GETFIELD_Px(reg, AGCIQ_VALUE_FIELD);
reg = STV090x_READ_DEMOD(state, AGCIQIN0);
agc_0 = STV090x_GETFIELD_Px(reg, AGCIQ_VALUE_FIELD);
agc = MAKEWORD16(agc_1, agc_0);
str = stv090x_table_lookup(stv090x_rf_tab,
ARRAY_SIZE(stv090x_rf_tab) - 1, agc);
if (agc > stv090x_rf_tab[0].read)
str = 0;
else if (agc < stv090x_rf_tab[ARRAY_SIZE(stv090x_rf_tab) - 1].read)
str = -100;
*strength = (str + 100) * 0xFFFF / 100;
return 0;
}
static int stv090x_read_cnr(struct dvb_frontend *fe, u16 *cnr)
{
struct stv090x_state *state = fe->demodulator_priv;
u32 reg_0, reg_1, reg, i;
s32 val_0, val_1, val = 0;
u8 lock_f;
s32 div;
u32 last;
switch (state->delsys) {
case STV090x_DVBS2:
reg = STV090x_READ_DEMOD(state, DSTATUS);
lock_f = STV090x_GETFIELD_Px(reg, LOCK_DEFINITIF_FIELD);
if (lock_f) {
msleep(5);
for (i = 0; i < 16; i++) {
reg_1 = STV090x_READ_DEMOD(state, NNOSPLHT1);
val_1 = STV090x_GETFIELD_Px(reg_1, NOSPLHT_NORMED_FIELD);
reg_0 = STV090x_READ_DEMOD(state, NNOSPLHT0);
val_0 = STV090x_GETFIELD_Px(reg_0, NOSPLHT_NORMED_FIELD);
val += MAKEWORD16(val_1, val_0);
msleep(1);
}
val /= 16;
last = ARRAY_SIZE(stv090x_s2cn_tab) - 1;
div = stv090x_s2cn_tab[0].read -
stv090x_s2cn_tab[last].read;
*cnr = 0xFFFF - ((val * 0xFFFF) / div);
}
break;
case STV090x_DVBS1:
case STV090x_DSS:
reg = STV090x_READ_DEMOD(state, DSTATUS);
lock_f = STV090x_GETFIELD_Px(reg, LOCK_DEFINITIF_FIELD);
if (lock_f) {
msleep(5);
for (i = 0; i < 16; i++) {
reg_1 = STV090x_READ_DEMOD(state, NOSDATAT1);
val_1 = STV090x_GETFIELD_Px(reg_1, NOSDATAT_UNNORMED_FIELD);
reg_0 = STV090x_READ_DEMOD(state, NOSDATAT0);
val_0 = STV090x_GETFIELD_Px(reg_0, NOSDATAT_UNNORMED_FIELD);
val += MAKEWORD16(val_1, val_0);
msleep(1);
}
val /= 16;
last = ARRAY_SIZE(stv090x_s1cn_tab) - 1;
div = stv090x_s1cn_tab[0].read -
stv090x_s1cn_tab[last].read;
*cnr = 0xFFFF - ((val * 0xFFFF) / div);
}
break;
default:
break;
}
return 0;
}
static int stv090x_set_tone(struct dvb_frontend *fe, fe_sec_tone_mode_t tone)
{
struct stv090x_state *state = fe->demodulator_priv;
u32 reg;
reg = STV090x_READ_DEMOD(state, DISTXCTL);
switch (tone) {
case SEC_TONE_ON:
STV090x_SETFIELD_Px(reg, DISTX_MODE_FIELD, 0);
STV090x_SETFIELD_Px(reg, DISEQC_RESET_FIELD, 1);
if (STV090x_WRITE_DEMOD(state, DISTXCTL, reg) < 0)
goto err;
STV090x_SETFIELD_Px(reg, DISEQC_RESET_FIELD, 0);
if (STV090x_WRITE_DEMOD(state, DISTXCTL, reg) < 0)
goto err;
break;
case SEC_TONE_OFF:
STV090x_SETFIELD_Px(reg, DISTX_MODE_FIELD, 0);
STV090x_SETFIELD_Px(reg, DISEQC_RESET_FIELD, 1);
if (STV090x_WRITE_DEMOD(state, DISTXCTL, reg) < 0)
goto err;
break;
default:
return -EINVAL;
}
return 0;
err:
dprintk(FE_ERROR, 1, "I/O error");
return -1;
}
static enum dvbfe_algo stv090x_frontend_algo(struct dvb_frontend *fe)
{
return DVBFE_ALGO_CUSTOM;
}
static int stv090x_send_diseqc_msg(struct dvb_frontend *fe, struct dvb_diseqc_master_cmd *cmd)
{
struct stv090x_state *state = fe->demodulator_priv;
u32 reg, idle = 0, fifo_full = 1;
int i;
reg = STV090x_READ_DEMOD(state, DISTXCTL);
STV090x_SETFIELD_Px(reg, DISTX_MODE_FIELD,
(state->config->diseqc_envelope_mode) ? 4 : 2);
STV090x_SETFIELD_Px(reg, DISEQC_RESET_FIELD, 1);
if (STV090x_WRITE_DEMOD(state, DISTXCTL, reg) < 0)
goto err;
STV090x_SETFIELD_Px(reg, DISEQC_RESET_FIELD, 0);
if (STV090x_WRITE_DEMOD(state, DISTXCTL, reg) < 0)
goto err;
STV090x_SETFIELD_Px(reg, DIS_PRECHARGE_FIELD, 1);
if (STV090x_WRITE_DEMOD(state, DISTXCTL, reg) < 0)
goto err;
for (i = 0; i < cmd->msg_len; i++) {
while (fifo_full) {
reg = STV090x_READ_DEMOD(state, DISTXSTATUS);
fifo_full = STV090x_GETFIELD_Px(reg, FIFO_FULL_FIELD);
}
if (STV090x_WRITE_DEMOD(state, DISTXDATA, cmd->msg[i]) < 0)
goto err;
}
reg = STV090x_READ_DEMOD(state, DISTXCTL);
STV090x_SETFIELD_Px(reg, DIS_PRECHARGE_FIELD, 0);
if (STV090x_WRITE_DEMOD(state, DISTXCTL, reg) < 0)
goto err;
i = 0;
while ((!idle) && (i < 10)) {
reg = STV090x_READ_DEMOD(state, DISTXSTATUS);
idle = STV090x_GETFIELD_Px(reg, TX_IDLE_FIELD);
msleep(10);
i++;
}
return 0;
err:
dprintk(FE_ERROR, 1, "I/O error");
return -1;
}
static int stv090x_send_diseqc_burst(struct dvb_frontend *fe, fe_sec_mini_cmd_t burst)
{
struct stv090x_state *state = fe->demodulator_priv;
u32 reg, idle = 0, fifo_full = 1;
u8 mode, value;
int i;
reg = STV090x_READ_DEMOD(state, DISTXCTL);
if (burst == SEC_MINI_A) {
mode = (state->config->diseqc_envelope_mode) ? 5 : 3;
value = 0x00;
} else {
mode = (state->config->diseqc_envelope_mode) ? 4 : 2;
value = 0xFF;
}
STV090x_SETFIELD_Px(reg, DISTX_MODE_FIELD, mode);
STV090x_SETFIELD_Px(reg, DISEQC_RESET_FIELD, 1);
if (STV090x_WRITE_DEMOD(state, DISTXCTL, reg) < 0)
goto err;
STV090x_SETFIELD_Px(reg, DISEQC_RESET_FIELD, 0);
if (STV090x_WRITE_DEMOD(state, DISTXCTL, reg) < 0)
goto err;
STV090x_SETFIELD_Px(reg, DIS_PRECHARGE_FIELD, 1);
if (STV090x_WRITE_DEMOD(state, DISTXCTL, reg) < 0)
goto err;
while (fifo_full) {
reg = STV090x_READ_DEMOD(state, DISTXSTATUS);
fifo_full = STV090x_GETFIELD_Px(reg, FIFO_FULL_FIELD);
}
if (STV090x_WRITE_DEMOD(state, DISTXDATA, value) < 0)
goto err;
reg = STV090x_READ_DEMOD(state, DISTXCTL);
STV090x_SETFIELD_Px(reg, DIS_PRECHARGE_FIELD, 0);
if (STV090x_WRITE_DEMOD(state, DISTXCTL, reg) < 0)
goto err;
i = 0;
while ((!idle) && (i < 10)) {
reg = STV090x_READ_DEMOD(state, DISTXSTATUS);
idle = STV090x_GETFIELD_Px(reg, TX_IDLE_FIELD);
msleep(10);
i++;
}
return 0;
err:
dprintk(FE_ERROR, 1, "I/O error");
return -1;
}
static int stv090x_recv_slave_reply(struct dvb_frontend *fe, struct dvb_diseqc_slave_reply *reply)
{
struct stv090x_state *state = fe->demodulator_priv;
u32 reg = 0, i = 0, rx_end = 0;
while ((rx_end != 1) && (i < 10)) {
msleep(10);
i++;
reg = STV090x_READ_DEMOD(state, DISRX_ST0);
rx_end = STV090x_GETFIELD_Px(reg, RX_END_FIELD);
}
if (rx_end) {
reply->msg_len = STV090x_GETFIELD_Px(reg, FIFO_BYTENBR_FIELD);
for (i = 0; i < reply->msg_len; i++)
reply->msg[i] = STV090x_READ_DEMOD(state, DISRXDATA);
}
return 0;
}
static int stv090x_sleep(struct dvb_frontend *fe)
{
struct stv090x_state *state = fe->demodulator_priv;
u32 reg;
u8 full_standby = 0;
if (stv090x_i2c_gate_ctrl(state, 1) < 0)
goto err;
if (state->config->tuner_sleep) {
if (state->config->tuner_sleep(fe) < 0)
goto err_gateoff;
}
if (stv090x_i2c_gate_ctrl(state, 0) < 0)
goto err;
dprintk(FE_DEBUG, 1, "Set %s(%d) to sleep",
state->device == STV0900 ? "STV0900" : "STV0903",
state->demod);
mutex_lock(&state->internal->demod_lock);
switch (state->demod) {
case STV090x_DEMODULATOR_0:
/* power off ADC 1 */
reg = stv090x_read_reg(state, STV090x_TSTTNR1);
STV090x_SETFIELD(reg, ADC1_PON_FIELD, 0);
if (stv090x_write_reg(state, STV090x_TSTTNR1, reg) < 0)
goto err_unlock;
/* power off DiSEqC 1 */
reg = stv090x_read_reg(state, STV090x_TSTTNR2);
STV090x_SETFIELD(reg, DISEQC1_PON_FIELD, 0);
if (stv090x_write_reg(state, STV090x_TSTTNR2, reg) < 0)
goto err_unlock;
/* check whether path 2 is already sleeping, that is when
ADC2 is off */
reg = stv090x_read_reg(state, STV090x_TSTTNR3);
if (STV090x_GETFIELD(reg, ADC2_PON_FIELD) == 0)
full_standby = 1;
/* stop clocks */
reg = stv090x_read_reg(state, STV090x_STOPCLK1);
/* packet delineator 1 clock */
STV090x_SETFIELD(reg, STOP_CLKPKDT1_FIELD, 1);
/* ADC 1 clock */
STV090x_SETFIELD(reg, STOP_CLKADCI1_FIELD, 1);
/* FEC clock is shared between the two paths, only stop it
when full standby is possible */
if (full_standby)
STV090x_SETFIELD(reg, STOP_CLKFEC_FIELD, 1);
if (stv090x_write_reg(state, STV090x_STOPCLK1, reg) < 0)
goto err_unlock;
reg = stv090x_read_reg(state, STV090x_STOPCLK2);
/* sampling 1 clock */
STV090x_SETFIELD(reg, STOP_CLKSAMP1_FIELD, 1);
/* viterbi 1 clock */
STV090x_SETFIELD(reg, STOP_CLKVIT1_FIELD, 1);
/* TS clock is shared between the two paths, only stop it
when full standby is possible */
if (full_standby)
STV090x_SETFIELD(reg, STOP_CLKTS_FIELD, 1);
if (stv090x_write_reg(state, STV090x_STOPCLK2, reg) < 0)
goto err_unlock;
break;
case STV090x_DEMODULATOR_1:
/* power off ADC 2 */
reg = stv090x_read_reg(state, STV090x_TSTTNR3);
STV090x_SETFIELD(reg, ADC2_PON_FIELD, 0);
if (stv090x_write_reg(state, STV090x_TSTTNR3, reg) < 0)
goto err_unlock;
/* power off DiSEqC 2 */
reg = stv090x_read_reg(state, STV090x_TSTTNR4);
STV090x_SETFIELD(reg, DISEQC2_PON_FIELD, 0);
if (stv090x_write_reg(state, STV090x_TSTTNR4, reg) < 0)
goto err_unlock;
/* check whether path 1 is already sleeping, that is when
ADC1 is off */
reg = stv090x_read_reg(state, STV090x_TSTTNR1);
if (STV090x_GETFIELD(reg, ADC1_PON_FIELD) == 0)
full_standby = 1;
/* stop clocks */
reg = stv090x_read_reg(state, STV090x_STOPCLK1);
/* packet delineator 2 clock */
STV090x_SETFIELD(reg, STOP_CLKPKDT2_FIELD, 1);
/* ADC 2 clock */
STV090x_SETFIELD(reg, STOP_CLKADCI2_FIELD, 1);
/* FEC clock is shared between the two paths, only stop it
when full standby is possible */
if (full_standby)
STV090x_SETFIELD(reg, STOP_CLKFEC_FIELD, 1);
if (stv090x_write_reg(state, STV090x_STOPCLK1, reg) < 0)
goto err_unlock;
reg = stv090x_read_reg(state, STV090x_STOPCLK2);
/* sampling 2 clock */
STV090x_SETFIELD(reg, STOP_CLKSAMP2_FIELD, 1);
/* viterbi 2 clock */
STV090x_SETFIELD(reg, STOP_CLKVIT2_FIELD, 1);
/* TS clock is shared between the two paths, only stop it
when full standby is possible */
if (full_standby)
STV090x_SETFIELD(reg, STOP_CLKTS_FIELD, 1);
if (stv090x_write_reg(state, STV090x_STOPCLK2, reg) < 0)
goto err_unlock;
break;
default:
dprintk(FE_ERROR, 1, "Wrong demodulator!");
break;
}
if (full_standby) {
/* general power off */
reg = stv090x_read_reg(state, STV090x_SYNTCTRL);
STV090x_SETFIELD(reg, STANDBY_FIELD, 0x01);
if (stv090x_write_reg(state, STV090x_SYNTCTRL, reg) < 0)
goto err_unlock;
}
mutex_unlock(&state->internal->demod_lock);
return 0;
err_gateoff:
stv090x_i2c_gate_ctrl(state, 0);
goto err;
err_unlock:
mutex_unlock(&state->internal->demod_lock);
err:
dprintk(FE_ERROR, 1, "I/O error");
return -1;
}
static int stv090x_wakeup(struct dvb_frontend *fe)
{
struct stv090x_state *state = fe->demodulator_priv;
u32 reg;
dprintk(FE_DEBUG, 1, "Wake %s(%d) from standby",
state->device == STV0900 ? "STV0900" : "STV0903",
state->demod);
mutex_lock(&state->internal->demod_lock);
/* general power on */
reg = stv090x_read_reg(state, STV090x_SYNTCTRL);
STV090x_SETFIELD(reg, STANDBY_FIELD, 0x00);
if (stv090x_write_reg(state, STV090x_SYNTCTRL, reg) < 0)
goto err;
switch (state->demod) {
case STV090x_DEMODULATOR_0:
/* power on ADC 1 */
reg = stv090x_read_reg(state, STV090x_TSTTNR1);
STV090x_SETFIELD(reg, ADC1_PON_FIELD, 1);
if (stv090x_write_reg(state, STV090x_TSTTNR1, reg) < 0)
goto err;
/* power on DiSEqC 1 */
reg = stv090x_read_reg(state, STV090x_TSTTNR2);
STV090x_SETFIELD(reg, DISEQC1_PON_FIELD, 1);
if (stv090x_write_reg(state, STV090x_TSTTNR2, reg) < 0)
goto err;
/* activate clocks */
reg = stv090x_read_reg(state, STV090x_STOPCLK1);
/* packet delineator 1 clock */
STV090x_SETFIELD(reg, STOP_CLKPKDT1_FIELD, 0);
/* ADC 1 clock */
STV090x_SETFIELD(reg, STOP_CLKADCI1_FIELD, 0);
/* FEC clock */
STV090x_SETFIELD(reg, STOP_CLKFEC_FIELD, 0);
if (stv090x_write_reg(state, STV090x_STOPCLK1, reg) < 0)
goto err;
reg = stv090x_read_reg(state, STV090x_STOPCLK2);
/* sampling 1 clock */
STV090x_SETFIELD(reg, STOP_CLKSAMP1_FIELD, 0);
/* viterbi 1 clock */
STV090x_SETFIELD(reg, STOP_CLKVIT1_FIELD, 0);
/* TS clock */
STV090x_SETFIELD(reg, STOP_CLKTS_FIELD, 0);
if (stv090x_write_reg(state, STV090x_STOPCLK2, reg) < 0)
goto err;
break;
case STV090x_DEMODULATOR_1:
/* power on ADC 2 */
reg = stv090x_read_reg(state, STV090x_TSTTNR3);
STV090x_SETFIELD(reg, ADC2_PON_FIELD, 1);
if (stv090x_write_reg(state, STV090x_TSTTNR3, reg) < 0)
goto err;
/* power on DiSEqC 2 */
reg = stv090x_read_reg(state, STV090x_TSTTNR4);
STV090x_SETFIELD(reg, DISEQC2_PON_FIELD, 1);
if (stv090x_write_reg(state, STV090x_TSTTNR4, reg) < 0)
goto err;
/* activate clocks */
reg = stv090x_read_reg(state, STV090x_STOPCLK1);
/* packet delineator 2 clock */
STV090x_SETFIELD(reg, STOP_CLKPKDT2_FIELD, 0);
/* ADC 2 clock */
STV090x_SETFIELD(reg, STOP_CLKADCI2_FIELD, 0);
/* FEC clock */
STV090x_SETFIELD(reg, STOP_CLKFEC_FIELD, 0);
if (stv090x_write_reg(state, STV090x_STOPCLK1, reg) < 0)
goto err;
reg = stv090x_read_reg(state, STV090x_STOPCLK2);
/* sampling 2 clock */
STV090x_SETFIELD(reg, STOP_CLKSAMP2_FIELD, 0);
/* viterbi 2 clock */
STV090x_SETFIELD(reg, STOP_CLKVIT2_FIELD, 0);
/* TS clock */
STV090x_SETFIELD(reg, STOP_CLKTS_FIELD, 0);
if (stv090x_write_reg(state, STV090x_STOPCLK2, reg) < 0)
goto err;
break;
default:
dprintk(FE_ERROR, 1, "Wrong demodulator!");
break;
}
mutex_unlock(&state->internal->demod_lock);
return 0;
err:
mutex_unlock(&state->internal->demod_lock);
dprintk(FE_ERROR, 1, "I/O error");
return -1;
}
static void stv090x_release(struct dvb_frontend *fe)
{
struct stv090x_state *state = fe->demodulator_priv;
state->internal->num_used--;
if (state->internal->num_used <= 0) {
dprintk(FE_ERROR, 1, "Actually removing");
remove_dev(state->internal);
kfree(state->internal);
}
kfree(state);
}
static int stv090x_ldpc_mode(struct stv090x_state *state, enum stv090x_mode ldpc_mode)
{
u32 reg = 0;
reg = stv090x_read_reg(state, STV090x_GENCFG);
switch (ldpc_mode) {
case STV090x_DUAL:
default:
if ((state->demod_mode != STV090x_DUAL) || (STV090x_GETFIELD(reg, DDEMOD_FIELD) != 1)) {
/* set LDPC to dual mode */
if (stv090x_write_reg(state, STV090x_GENCFG, 0x1d) < 0)
goto err;
state->demod_mode = STV090x_DUAL;
reg = stv090x_read_reg(state, STV090x_TSTRES0);
STV090x_SETFIELD(reg, FRESFEC_FIELD, 0x1);
if (stv090x_write_reg(state, STV090x_TSTRES0, reg) < 0)
goto err;
STV090x_SETFIELD(reg, FRESFEC_FIELD, 0x0);
if (stv090x_write_reg(state, STV090x_TSTRES0, reg) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, MODCODLST0, 0xff) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, MODCODLST1, 0xff) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, MODCODLST2, 0xff) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, MODCODLST3, 0xff) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, MODCODLST4, 0xff) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, MODCODLST5, 0xff) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, MODCODLST6, 0xff) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, MODCODLST7, 0xcc) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, MODCODLST8, 0xcc) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, MODCODLST9, 0xcc) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, MODCODLSTA, 0xcc) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, MODCODLSTB, 0xcc) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, MODCODLSTC, 0xcc) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, MODCODLSTD, 0xcc) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, MODCODLSTE, 0xff) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, MODCODLSTF, 0xcf) < 0)
goto err;
}
break;
case STV090x_SINGLE:
if (stv090x_stop_modcod(state) < 0)
goto err;
if (stv090x_activate_modcod_single(state) < 0)
goto err;
if (state->demod == STV090x_DEMODULATOR_1) {
if (stv090x_write_reg(state, STV090x_GENCFG, 0x06) < 0) /* path 2 */
goto err;
} else {
if (stv090x_write_reg(state, STV090x_GENCFG, 0x04) < 0) /* path 1 */
goto err;
}
reg = stv090x_read_reg(state, STV090x_TSTRES0);
STV090x_SETFIELD(reg, FRESFEC_FIELD, 0x1);
if (stv090x_write_reg(state, STV090x_TSTRES0, reg) < 0)
goto err;
STV090x_SETFIELD(reg, FRESFEC_FIELD, 0x0);
if (stv090x_write_reg(state, STV090x_TSTRES0, reg) < 0)
goto err;
reg = STV090x_READ_DEMOD(state, PDELCTRL1);
STV090x_SETFIELD_Px(reg, ALGOSWRST_FIELD, 0x01);
if (STV090x_WRITE_DEMOD(state, PDELCTRL1, reg) < 0)
goto err;
STV090x_SETFIELD_Px(reg, ALGOSWRST_FIELD, 0x00);
if (STV090x_WRITE_DEMOD(state, PDELCTRL1, reg) < 0)
goto err;
break;
}
return 0;
err:
dprintk(FE_ERROR, 1, "I/O error");
return -1;
}
/* return (Hz), clk in Hz*/
static u32 stv090x_get_mclk(struct stv090x_state *state)
{
const struct stv090x_config *config = state->config;
u32 div, reg;
u8 ratio;
div = stv090x_read_reg(state, STV090x_NCOARSE);
reg = stv090x_read_reg(state, STV090x_SYNTCTRL);
ratio = STV090x_GETFIELD(reg, SELX1RATIO_FIELD) ? 4 : 6;
return (div + 1) * config->xtal / ratio; /* kHz */
}
static int stv090x_set_mclk(struct stv090x_state *state, u32 mclk, u32 clk)
{
const struct stv090x_config *config = state->config;
u32 reg, div, clk_sel;
reg = stv090x_read_reg(state, STV090x_SYNTCTRL);
clk_sel = ((STV090x_GETFIELD(reg, SELX1RATIO_FIELD) == 1) ? 4 : 6);
div = ((clk_sel * mclk) / config->xtal) - 1;
reg = stv090x_read_reg(state, STV090x_NCOARSE);
STV090x_SETFIELD(reg, M_DIV_FIELD, div);
if (stv090x_write_reg(state, STV090x_NCOARSE, reg) < 0)
goto err;
state->internal->mclk = stv090x_get_mclk(state);
/*Set the DiseqC frequency to 22KHz */
div = state->internal->mclk / 704000;
if (STV090x_WRITE_DEMOD(state, F22TX, div) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, F22RX, div) < 0)
goto err;
return 0;
err:
dprintk(FE_ERROR, 1, "I/O error");
return -1;
}
static int stv0900_set_tspath(struct stv090x_state *state)
{
u32 reg;
if (state->internal->dev_ver >= 0x20) {
switch (state->config->ts1_mode) {
case STV090x_TSMODE_PARALLEL_PUNCTURED:
case STV090x_TSMODE_DVBCI:
switch (state->config->ts2_mode) {
case STV090x_TSMODE_SERIAL_PUNCTURED:
case STV090x_TSMODE_SERIAL_CONTINUOUS:
default:
stv090x_write_reg(state, STV090x_TSGENERAL, 0x00);
break;
case STV090x_TSMODE_PARALLEL_PUNCTURED:
case STV090x_TSMODE_DVBCI:
if (stv090x_write_reg(state, STV090x_TSGENERAL, 0x06) < 0) /* Mux'd stream mode */
goto err;
reg = stv090x_read_reg(state, STV090x_P1_TSCFGM);
STV090x_SETFIELD_Px(reg, TSFIFO_MANSPEED_FIELD, 3);
if (stv090x_write_reg(state, STV090x_P1_TSCFGM, reg) < 0)
goto err;
reg = stv090x_read_reg(state, STV090x_P2_TSCFGM);
STV090x_SETFIELD_Px(reg, TSFIFO_MANSPEED_FIELD, 3);
if (stv090x_write_reg(state, STV090x_P2_TSCFGM, reg) < 0)
goto err;
if (stv090x_write_reg(state, STV090x_P1_TSSPEED, 0x14) < 0)
goto err;
if (stv090x_write_reg(state, STV090x_P2_TSSPEED, 0x28) < 0)
goto err;
break;
}
break;
case STV090x_TSMODE_SERIAL_PUNCTURED:
case STV090x_TSMODE_SERIAL_CONTINUOUS:
default:
switch (state->config->ts2_mode) {
case STV090x_TSMODE_SERIAL_PUNCTURED:
case STV090x_TSMODE_SERIAL_CONTINUOUS:
default:
if (stv090x_write_reg(state, STV090x_TSGENERAL, 0x0c) < 0)
goto err;
break;
case STV090x_TSMODE_PARALLEL_PUNCTURED:
case STV090x_TSMODE_DVBCI:
if (stv090x_write_reg(state, STV090x_TSGENERAL, 0x0a) < 0)
goto err;
break;
}
break;
}
} else {
switch (state->config->ts1_mode) {
case STV090x_TSMODE_PARALLEL_PUNCTURED:
case STV090x_TSMODE_DVBCI:
switch (state->config->ts2_mode) {
case STV090x_TSMODE_SERIAL_PUNCTURED:
case STV090x_TSMODE_SERIAL_CONTINUOUS:
default:
stv090x_write_reg(state, STV090x_TSGENERAL1X, 0x10);
break;
case STV090x_TSMODE_PARALLEL_PUNCTURED:
case STV090x_TSMODE_DVBCI:
stv090x_write_reg(state, STV090x_TSGENERAL1X, 0x16);
reg = stv090x_read_reg(state, STV090x_P1_TSCFGM);
STV090x_SETFIELD_Px(reg, TSFIFO_MANSPEED_FIELD, 3);
if (stv090x_write_reg(state, STV090x_P1_TSCFGM, reg) < 0)
goto err;
reg = stv090x_read_reg(state, STV090x_P1_TSCFGM);
STV090x_SETFIELD_Px(reg, TSFIFO_MANSPEED_FIELD, 0);
if (stv090x_write_reg(state, STV090x_P1_TSCFGM, reg) < 0)
goto err;
if (stv090x_write_reg(state, STV090x_P1_TSSPEED, 0x14) < 0)
goto err;
if (stv090x_write_reg(state, STV090x_P2_TSSPEED, 0x28) < 0)
goto err;
break;
}
break;
case STV090x_TSMODE_SERIAL_PUNCTURED:
case STV090x_TSMODE_SERIAL_CONTINUOUS:
default:
switch (state->config->ts2_mode) {
case STV090x_TSMODE_SERIAL_PUNCTURED:
case STV090x_TSMODE_SERIAL_CONTINUOUS:
default:
stv090x_write_reg(state, STV090x_TSGENERAL1X, 0x14);
break;
case STV090x_TSMODE_PARALLEL_PUNCTURED:
case STV090x_TSMODE_DVBCI:
stv090x_write_reg(state, STV090x_TSGENERAL1X, 0x12);
break;
}
break;
}
}
switch (state->config->ts1_mode) {
case STV090x_TSMODE_PARALLEL_PUNCTURED:
reg = stv090x_read_reg(state, STV090x_P1_TSCFGH);
STV090x_SETFIELD_Px(reg, TSFIFO_TEIUPDATE_FIELD, state->config->ts1_tei);
STV090x_SETFIELD_Px(reg, TSFIFO_SERIAL_FIELD, 0x00);
STV090x_SETFIELD_Px(reg, TSFIFO_DVBCI_FIELD, 0x00);
if (stv090x_write_reg(state, STV090x_P1_TSCFGH, reg) < 0)
goto err;
break;
case STV090x_TSMODE_DVBCI:
reg = stv090x_read_reg(state, STV090x_P1_TSCFGH);
STV090x_SETFIELD_Px(reg, TSFIFO_TEIUPDATE_FIELD, state->config->ts1_tei);
STV090x_SETFIELD_Px(reg, TSFIFO_SERIAL_FIELD, 0x00);
STV090x_SETFIELD_Px(reg, TSFIFO_DVBCI_FIELD, 0x01);
if (stv090x_write_reg(state, STV090x_P1_TSCFGH, reg) < 0)
goto err;
break;
case STV090x_TSMODE_SERIAL_PUNCTURED:
reg = stv090x_read_reg(state, STV090x_P1_TSCFGH);
STV090x_SETFIELD_Px(reg, TSFIFO_TEIUPDATE_FIELD, state->config->ts1_tei);
STV090x_SETFIELD_Px(reg, TSFIFO_SERIAL_FIELD, 0x01);
STV090x_SETFIELD_Px(reg, TSFIFO_DVBCI_FIELD, 0x00);
if (stv090x_write_reg(state, STV090x_P1_TSCFGH, reg) < 0)
goto err;
break;
case STV090x_TSMODE_SERIAL_CONTINUOUS:
reg = stv090x_read_reg(state, STV090x_P1_TSCFGH);
STV090x_SETFIELD_Px(reg, TSFIFO_TEIUPDATE_FIELD, state->config->ts1_tei);
STV090x_SETFIELD_Px(reg, TSFIFO_SERIAL_FIELD, 0x01);
STV090x_SETFIELD_Px(reg, TSFIFO_DVBCI_FIELD, 0x01);
if (stv090x_write_reg(state, STV090x_P1_TSCFGH, reg) < 0)
goto err;
break;
default:
break;
}
switch (state->config->ts2_mode) {
case STV090x_TSMODE_PARALLEL_PUNCTURED:
reg = stv090x_read_reg(state, STV090x_P2_TSCFGH);
STV090x_SETFIELD_Px(reg, TSFIFO_TEIUPDATE_FIELD, state->config->ts2_tei);
STV090x_SETFIELD_Px(reg, TSFIFO_SERIAL_FIELD, 0x00);
STV090x_SETFIELD_Px(reg, TSFIFO_DVBCI_FIELD, 0x00);
if (stv090x_write_reg(state, STV090x_P2_TSCFGH, reg) < 0)
goto err;
break;
case STV090x_TSMODE_DVBCI:
reg = stv090x_read_reg(state, STV090x_P2_TSCFGH);
STV090x_SETFIELD_Px(reg, TSFIFO_TEIUPDATE_FIELD, state->config->ts2_tei);
STV090x_SETFIELD_Px(reg, TSFIFO_SERIAL_FIELD, 0x00);
STV090x_SETFIELD_Px(reg, TSFIFO_DVBCI_FIELD, 0x01);
if (stv090x_write_reg(state, STV090x_P2_TSCFGH, reg) < 0)
goto err;
break;
case STV090x_TSMODE_SERIAL_PUNCTURED:
reg = stv090x_read_reg(state, STV090x_P2_TSCFGH);
STV090x_SETFIELD_Px(reg, TSFIFO_TEIUPDATE_FIELD, state->config->ts2_tei);
STV090x_SETFIELD_Px(reg, TSFIFO_SERIAL_FIELD, 0x01);
STV090x_SETFIELD_Px(reg, TSFIFO_DVBCI_FIELD, 0x00);
if (stv090x_write_reg(state, STV090x_P2_TSCFGH, reg) < 0)
goto err;
break;
case STV090x_TSMODE_SERIAL_CONTINUOUS:
reg = stv090x_read_reg(state, STV090x_P2_TSCFGH);
STV090x_SETFIELD_Px(reg, TSFIFO_TEIUPDATE_FIELD, state->config->ts2_tei);
STV090x_SETFIELD_Px(reg, TSFIFO_SERIAL_FIELD, 0x01);
STV090x_SETFIELD_Px(reg, TSFIFO_DVBCI_FIELD, 0x01);
if (stv090x_write_reg(state, STV090x_P2_TSCFGH, reg) < 0)
goto err;
break;
default:
break;
}
if (state->config->ts1_clk > 0) {
u32 speed;
switch (state->config->ts1_mode) {
case STV090x_TSMODE_PARALLEL_PUNCTURED:
case STV090x_TSMODE_DVBCI:
default:
speed = state->internal->mclk /
(state->config->ts1_clk / 4);
if (speed < 0x08)
speed = 0x08;
if (speed > 0xFF)
speed = 0xFF;
break;
case STV090x_TSMODE_SERIAL_PUNCTURED:
case STV090x_TSMODE_SERIAL_CONTINUOUS:
speed = state->internal->mclk /
(state->config->ts1_clk / 32);
if (speed < 0x20)
speed = 0x20;
if (speed > 0xFF)
speed = 0xFF;
break;
}
reg = stv090x_read_reg(state, STV090x_P1_TSCFGM);
STV090x_SETFIELD_Px(reg, TSFIFO_MANSPEED_FIELD, 3);
if (stv090x_write_reg(state, STV090x_P1_TSCFGM, reg) < 0)
goto err;
if (stv090x_write_reg(state, STV090x_P1_TSSPEED, speed) < 0)
goto err;
}
if (state->config->ts2_clk > 0) {
u32 speed;
switch (state->config->ts2_mode) {
case STV090x_TSMODE_PARALLEL_PUNCTURED:
case STV090x_TSMODE_DVBCI:
default:
speed = state->internal->mclk /
(state->config->ts2_clk / 4);
if (speed < 0x08)
speed = 0x08;
if (speed > 0xFF)
speed = 0xFF;
break;
case STV090x_TSMODE_SERIAL_PUNCTURED:
case STV090x_TSMODE_SERIAL_CONTINUOUS:
speed = state->internal->mclk /
(state->config->ts2_clk / 32);
if (speed < 0x20)
speed = 0x20;
if (speed > 0xFF)
speed = 0xFF;
break;
}
reg = stv090x_read_reg(state, STV090x_P2_TSCFGM);
STV090x_SETFIELD_Px(reg, TSFIFO_MANSPEED_FIELD, 3);
if (stv090x_write_reg(state, STV090x_P2_TSCFGM, reg) < 0)
goto err;
if (stv090x_write_reg(state, STV090x_P2_TSSPEED, speed) < 0)
goto err;
}
reg = stv090x_read_reg(state, STV090x_P2_TSCFGH);
STV090x_SETFIELD_Px(reg, RST_HWARE_FIELD, 0x01);
if (stv090x_write_reg(state, STV090x_P2_TSCFGH, reg) < 0)
goto err;
STV090x_SETFIELD_Px(reg, RST_HWARE_FIELD, 0x00);
if (stv090x_write_reg(state, STV090x_P2_TSCFGH, reg) < 0)
goto err;
reg = stv090x_read_reg(state, STV090x_P1_TSCFGH);
STV090x_SETFIELD_Px(reg, RST_HWARE_FIELD, 0x01);
if (stv090x_write_reg(state, STV090x_P1_TSCFGH, reg) < 0)
goto err;
STV090x_SETFIELD_Px(reg, RST_HWARE_FIELD, 0x00);
if (stv090x_write_reg(state, STV090x_P1_TSCFGH, reg) < 0)
goto err;
return 0;
err:
dprintk(FE_ERROR, 1, "I/O error");
return -1;
}
static int stv0903_set_tspath(struct stv090x_state *state)
{
u32 reg;
if (state->internal->dev_ver >= 0x20) {
switch (state->config->ts1_mode) {
case STV090x_TSMODE_PARALLEL_PUNCTURED:
case STV090x_TSMODE_DVBCI:
stv090x_write_reg(state, STV090x_TSGENERAL, 0x00);
break;
case STV090x_TSMODE_SERIAL_PUNCTURED:
case STV090x_TSMODE_SERIAL_CONTINUOUS:
default:
stv090x_write_reg(state, STV090x_TSGENERAL, 0x0c);
break;
}
} else {
switch (state->config->ts1_mode) {
case STV090x_TSMODE_PARALLEL_PUNCTURED:
case STV090x_TSMODE_DVBCI:
stv090x_write_reg(state, STV090x_TSGENERAL1X, 0x10);
break;
case STV090x_TSMODE_SERIAL_PUNCTURED:
case STV090x_TSMODE_SERIAL_CONTINUOUS:
default:
stv090x_write_reg(state, STV090x_TSGENERAL1X, 0x14);
break;
}
}
switch (state->config->ts1_mode) {
case STV090x_TSMODE_PARALLEL_PUNCTURED:
reg = stv090x_read_reg(state, STV090x_P1_TSCFGH);
STV090x_SETFIELD_Px(reg, TSFIFO_SERIAL_FIELD, 0x00);
STV090x_SETFIELD_Px(reg, TSFIFO_DVBCI_FIELD, 0x00);
if (stv090x_write_reg(state, STV090x_P1_TSCFGH, reg) < 0)
goto err;
break;
case STV090x_TSMODE_DVBCI:
reg = stv090x_read_reg(state, STV090x_P1_TSCFGH);
STV090x_SETFIELD_Px(reg, TSFIFO_SERIAL_FIELD, 0x00);
STV090x_SETFIELD_Px(reg, TSFIFO_DVBCI_FIELD, 0x01);
if (stv090x_write_reg(state, STV090x_P1_TSCFGH, reg) < 0)
goto err;
break;
case STV090x_TSMODE_SERIAL_PUNCTURED:
reg = stv090x_read_reg(state, STV090x_P1_TSCFGH);
STV090x_SETFIELD_Px(reg, TSFIFO_SERIAL_FIELD, 0x01);
STV090x_SETFIELD_Px(reg, TSFIFO_DVBCI_FIELD, 0x00);
if (stv090x_write_reg(state, STV090x_P1_TSCFGH, reg) < 0)
goto err;
break;
case STV090x_TSMODE_SERIAL_CONTINUOUS:
reg = stv090x_read_reg(state, STV090x_P1_TSCFGH);
STV090x_SETFIELD_Px(reg, TSFIFO_SERIAL_FIELD, 0x01);
STV090x_SETFIELD_Px(reg, TSFIFO_DVBCI_FIELD, 0x01);
if (stv090x_write_reg(state, STV090x_P1_TSCFGH, reg) < 0)
goto err;
break;
default:
break;
}
if (state->config->ts1_clk > 0) {
u32 speed;
switch (state->config->ts1_mode) {
case STV090x_TSMODE_PARALLEL_PUNCTURED:
case STV090x_TSMODE_DVBCI:
default:
speed = state->internal->mclk /
(state->config->ts1_clk / 4);
if (speed < 0x08)
speed = 0x08;
if (speed > 0xFF)
speed = 0xFF;
break;
case STV090x_TSMODE_SERIAL_PUNCTURED:
case STV090x_TSMODE_SERIAL_CONTINUOUS:
speed = state->internal->mclk /
(state->config->ts1_clk / 32);
if (speed < 0x20)
speed = 0x20;
if (speed > 0xFF)
speed = 0xFF;
break;
}
reg = stv090x_read_reg(state, STV090x_P1_TSCFGM);
STV090x_SETFIELD_Px(reg, TSFIFO_MANSPEED_FIELD, 3);
if (stv090x_write_reg(state, STV090x_P1_TSCFGM, reg) < 0)
goto err;
if (stv090x_write_reg(state, STV090x_P1_TSSPEED, speed) < 0)
goto err;
}
reg = stv090x_read_reg(state, STV090x_P1_TSCFGH);
STV090x_SETFIELD_Px(reg, RST_HWARE_FIELD, 0x01);
if (stv090x_write_reg(state, STV090x_P1_TSCFGH, reg) < 0)
goto err;
STV090x_SETFIELD_Px(reg, RST_HWARE_FIELD, 0x00);
if (stv090x_write_reg(state, STV090x_P1_TSCFGH, reg) < 0)
goto err;
return 0;
err:
dprintk(FE_ERROR, 1, "I/O error");
return -1;
}
static int stv090x_init(struct dvb_frontend *fe)
{
struct stv090x_state *state = fe->demodulator_priv;
const struct stv090x_config *config = state->config;
u32 reg;
if (state->internal->mclk == 0) {
/* call tuner init to configure the tuner's clock output
divider directly before setting up the master clock of
the stv090x. */
if (stv090x_i2c_gate_ctrl(state, 1) < 0)
goto err;
if (config->tuner_init) {
if (config->tuner_init(fe) < 0)
goto err_gateoff;
}
if (stv090x_i2c_gate_ctrl(state, 0) < 0)
goto err;
stv090x_set_mclk(state, 135000000, config->xtal); /* 135 Mhz */
msleep(5);
if (stv090x_write_reg(state, STV090x_SYNTCTRL,
0x20 | config->clk_mode) < 0)
goto err;
stv090x_get_mclk(state);
}
if (stv090x_wakeup(fe) < 0) {
dprintk(FE_ERROR, 1, "Error waking device");
goto err;
}
if (stv090x_ldpc_mode(state, state->demod_mode) < 0)
goto err;
reg = STV090x_READ_DEMOD(state, TNRCFG2);
STV090x_SETFIELD_Px(reg, TUN_IQSWAP_FIELD, state->inversion);
if (STV090x_WRITE_DEMOD(state, TNRCFG2, reg) < 0)
goto err;
reg = STV090x_READ_DEMOD(state, DEMOD);
STV090x_SETFIELD_Px(reg, ROLLOFF_CONTROL_FIELD, state->rolloff);
if (STV090x_WRITE_DEMOD(state, DEMOD, reg) < 0)
goto err;
if (stv090x_i2c_gate_ctrl(state, 1) < 0)
goto err;
if (config->tuner_set_mode) {
if (config->tuner_set_mode(fe, TUNER_WAKE) < 0)
goto err_gateoff;
}
if (config->tuner_init) {
if (config->tuner_init(fe) < 0)
goto err_gateoff;
}
if (stv090x_i2c_gate_ctrl(state, 0) < 0)
goto err;
if (state->device == STV0900) {
if (stv0900_set_tspath(state) < 0)
goto err;
} else {
if (stv0903_set_tspath(state) < 0)
goto err;
}
return 0;
err_gateoff:
stv090x_i2c_gate_ctrl(state, 0);
err:
dprintk(FE_ERROR, 1, "I/O error");
return -1;
}
static int stv090x_setup(struct dvb_frontend *fe)
{
struct stv090x_state *state = fe->demodulator_priv;
const struct stv090x_config *config = state->config;
const struct stv090x_reg *stv090x_initval = NULL;
const struct stv090x_reg *stv090x_cut20_val = NULL;
unsigned long t1_size = 0, t2_size = 0;
u32 reg = 0;
int i;
if (state->device == STV0900) {
dprintk(FE_DEBUG, 1, "Initializing STV0900");
stv090x_initval = stv0900_initval;
t1_size = ARRAY_SIZE(stv0900_initval);
stv090x_cut20_val = stv0900_cut20_val;
t2_size = ARRAY_SIZE(stv0900_cut20_val);
} else if (state->device == STV0903) {
dprintk(FE_DEBUG, 1, "Initializing STV0903");
stv090x_initval = stv0903_initval;
t1_size = ARRAY_SIZE(stv0903_initval);
stv090x_cut20_val = stv0903_cut20_val;
t2_size = ARRAY_SIZE(stv0903_cut20_val);
}
/* STV090x init */
/* Stop Demod */
if (stv090x_write_reg(state, STV090x_P1_DMDISTATE, 0x5c) < 0)
goto err;
if (state->device == STV0900)
if (stv090x_write_reg(state, STV090x_P2_DMDISTATE, 0x5c) < 0)
goto err;
msleep(5);
/* Set No Tuner Mode */
if (stv090x_write_reg(state, STV090x_P1_TNRCFG, 0x6c) < 0)
goto err;
if (state->device == STV0900)
if (stv090x_write_reg(state, STV090x_P2_TNRCFG, 0x6c) < 0)
goto err;
/* I2C repeater OFF */
STV090x_SETFIELD_Px(reg, ENARPT_LEVEL_FIELD, config->repeater_level);
if (stv090x_write_reg(state, STV090x_P1_I2CRPT, reg) < 0)
goto err;
if (state->device == STV0900)
if (stv090x_write_reg(state, STV090x_P2_I2CRPT, reg) < 0)
goto err;
if (stv090x_write_reg(state, STV090x_NCOARSE, 0x13) < 0) /* set PLL divider */
goto err;
msleep(5);
if (stv090x_write_reg(state, STV090x_I2CCFG, 0x08) < 0) /* 1/41 oversampling */
goto err;
if (stv090x_write_reg(state, STV090x_SYNTCTRL, 0x20 | config->clk_mode) < 0) /* enable PLL */
goto err;
msleep(5);
/* write initval */
dprintk(FE_DEBUG, 1, "Setting up initial values");
for (i = 0; i < t1_size; i++) {
if (stv090x_write_reg(state, stv090x_initval[i].addr, stv090x_initval[i].data) < 0)
goto err;
}
state->internal->dev_ver = stv090x_read_reg(state, STV090x_MID);
if (state->internal->dev_ver >= 0x20) {
if (stv090x_write_reg(state, STV090x_TSGENERAL, 0x0c) < 0)
goto err;
/* write cut20_val*/
dprintk(FE_DEBUG, 1, "Setting up Cut 2.0 initial values");
for (i = 0; i < t2_size; i++) {
if (stv090x_write_reg(state, stv090x_cut20_val[i].addr, stv090x_cut20_val[i].data) < 0)
goto err;
}
} else if (state->internal->dev_ver < 0x20) {
dprintk(FE_ERROR, 1, "ERROR: Unsupported Cut: 0x%02x!",
state->internal->dev_ver);
goto err;
} else if (state->internal->dev_ver > 0x30) {
/* we shouldn't bail out from here */
dprintk(FE_ERROR, 1, "INFO: Cut: 0x%02x probably incomplete support!",
state->internal->dev_ver);
}
/* ADC1 range */
reg = stv090x_read_reg(state, STV090x_TSTTNR1);
STV090x_SETFIELD(reg, ADC1_INMODE_FIELD,
(config->adc1_range == STV090x_ADC_1Vpp) ? 0 : 1);
if (stv090x_write_reg(state, STV090x_TSTTNR1, reg) < 0)
goto err;
/* ADC2 range */
reg = stv090x_read_reg(state, STV090x_TSTTNR3);
STV090x_SETFIELD(reg, ADC2_INMODE_FIELD,
(config->adc2_range == STV090x_ADC_1Vpp) ? 0 : 1);
if (stv090x_write_reg(state, STV090x_TSTTNR3, reg) < 0)
goto err;
if (stv090x_write_reg(state, STV090x_TSTRES0, 0x80) < 0)
goto err;
if (stv090x_write_reg(state, STV090x_TSTRES0, 0x00) < 0)
goto err;
return 0;
err:
dprintk(FE_ERROR, 1, "I/O error");
return -1;
}
int stv090x_set_gpio(struct dvb_frontend *fe, u8 gpio, u8 dir, u8 value,
u8 xor_value)
{
struct stv090x_state *state = fe->demodulator_priv;
u8 reg = 0;
STV090x_SETFIELD(reg, GPIOx_OPD_FIELD, dir);
STV090x_SETFIELD(reg, GPIOx_CONFIG_FIELD, value);
STV090x_SETFIELD(reg, GPIOx_XOR_FIELD, xor_value);
return stv090x_write_reg(state, STV090x_GPIOxCFG(gpio), reg);
}
EXPORT_SYMBOL(stv090x_set_gpio);
static struct dvb_frontend_ops stv090x_ops = {
.delsys = { SYS_DVBS, SYS_DVBS2, SYS_DSS },
.info = {
.name = "STV090x Multistandard",
.frequency_min = 950000,
.frequency_max = 2150000,
.frequency_stepsize = 0,
.frequency_tolerance = 0,
.symbol_rate_min = 1000000,
.symbol_rate_max = 45000000,
.caps = FE_CAN_INVERSION_AUTO |
FE_CAN_FEC_AUTO |
FE_CAN_QPSK |
FE_CAN_2G_MODULATION
},
.release = stv090x_release,
.init = stv090x_init,
.sleep = stv090x_sleep,
.get_frontend_algo = stv090x_frontend_algo,
.diseqc_send_master_cmd = stv090x_send_diseqc_msg,
.diseqc_send_burst = stv090x_send_diseqc_burst,
.diseqc_recv_slave_reply = stv090x_recv_slave_reply,
.set_tone = stv090x_set_tone,
.search = stv090x_search,
.read_status = stv090x_read_status,
.read_ber = stv090x_read_per,
.read_signal_strength = stv090x_read_signal_strength,
.read_snr = stv090x_read_cnr,
};
struct dvb_frontend *stv090x_attach(const struct stv090x_config *config,
struct i2c_adapter *i2c,
enum stv090x_demodulator demod)
{
struct stv090x_state *state = NULL;
struct stv090x_dev *temp_int;
state = kzalloc(sizeof (struct stv090x_state), GFP_KERNEL);
if (state == NULL)
goto error;
state->verbose = &verbose;
state->config = config;
state->i2c = i2c;
state->frontend.ops = stv090x_ops;
state->frontend.demodulator_priv = state;
state->demod = demod;
state->demod_mode = config->demod_mode; /* Single or Dual mode */
state->device = config->device;
state->rolloff = STV090x_RO_35; /* default */
temp_int = find_dev(state->i2c,
state->config->address);
if ((temp_int != NULL) && (state->demod_mode == STV090x_DUAL)) {
state->internal = temp_int->internal;
state->internal->num_used++;
dprintk(FE_INFO, 1, "Found Internal Structure!");
} else {
state->internal = kmalloc(sizeof(struct stv090x_internal),
GFP_KERNEL);
if (!state->internal)
goto error;
temp_int = append_internal(state->internal);
if (!temp_int) {
kfree(state->internal);
goto error;
}
state->internal->num_used = 1;
state->internal->mclk = 0;
state->internal->dev_ver = 0;
state->internal->i2c_adap = state->i2c;
state->internal->i2c_addr = state->config->address;
dprintk(FE_INFO, 1, "Create New Internal Structure!");
mutex_init(&state->internal->demod_lock);
mutex_init(&state->internal->tuner_lock);
if (stv090x_setup(&state->frontend) < 0) {
dprintk(FE_ERROR, 1, "Error setting up device");
goto err_remove;
}
}
if (state->internal->dev_ver >= 0x30)
state->frontend.ops.info.caps |= FE_CAN_MULTISTREAM;
/* workaround for stuck DiSEqC output */
if (config->diseqc_envelope_mode)
stv090x_send_diseqc_burst(&state->frontend, SEC_MINI_A);
dprintk(FE_ERROR, 1, "Attaching %s demodulator(%d) Cut=0x%02x",
state->device == STV0900 ? "STV0900" : "STV0903",
demod,
state->internal->dev_ver);
return &state->frontend;
err_remove:
remove_dev(state->internal);
kfree(state->internal);
error:
kfree(state);
return NULL;
}
EXPORT_SYMBOL(stv090x_attach);
MODULE_PARM_DESC(verbose, "Set Verbosity level");
MODULE_AUTHOR("Manu Abraham");
MODULE_DESCRIPTION("STV090x Multi-Std Broadcast frontend");
MODULE_LICENSE("GPL");
| gpl-2.0 |
wulsic/Hyper_CM11 | drivers/staging/rtl8187se/r8180_dm.c | 3176 | 47704 | //#include "r8180.h"
#include "r8180_dm.h"
#include "r8180_hw.h"
#include "r8180_93cx6.h"
//{by amy 080312
//
// Description:
// Return TRUE if we shall perform High Power Mecahnism, FALSE otherwise.
//
//+by amy 080312
#define RATE_ADAPTIVE_TIMER_PERIOD 300
bool CheckHighPower(struct net_device *dev)
{
struct r8180_priv *priv = ieee80211_priv(dev);
struct ieee80211_device *ieee = priv->ieee80211;
if(!priv->bRegHighPowerMechanism)
{
return false;
}
if(ieee->state == IEEE80211_LINKED_SCANNING)
{
return false;
}
return true;
}
//
// Description:
// Update Tx power level if necessary.
// See also DoRxHighPower() and SetTxPowerLevel8185() for reference.
//
// Note:
// The reason why we udpate Tx power level here instead of DoRxHighPower()
// is the number of IO to change Tx power is much more than channel TR switch
// and they are related to OFDM and MAC registers.
// So, we don't want to update it so frequently in per-Rx packet base.
//
void
DoTxHighPower(
struct net_device *dev
)
{
struct r8180_priv *priv = ieee80211_priv(dev);
u16 HiPwrUpperTh = 0;
u16 HiPwrLowerTh = 0;
u8 RSSIHiPwrUpperTh;
u8 RSSIHiPwrLowerTh;
u8 u1bTmp;
char OfdmTxPwrIdx, CckTxPwrIdx;
//printk("----> DoTxHighPower()\n");
HiPwrUpperTh = priv->RegHiPwrUpperTh;
HiPwrLowerTh = priv->RegHiPwrLowerTh;
HiPwrUpperTh = HiPwrUpperTh * 10;
HiPwrLowerTh = HiPwrLowerTh * 10;
RSSIHiPwrUpperTh = priv->RegRSSIHiPwrUpperTh;
RSSIHiPwrLowerTh = priv->RegRSSIHiPwrLowerTh;
//lzm add 080826
OfdmTxPwrIdx = priv->chtxpwr_ofdm[priv->ieee80211->current_network.channel];
CckTxPwrIdx = priv->chtxpwr[priv->ieee80211->current_network.channel];
// printk("DoTxHighPower() - UndecoratedSmoothedSS:%d, CurCCKRSSI = %d , bCurCCKPkt= %d \n", priv->UndecoratedSmoothedSS, priv->CurCCKRSSI, priv->bCurCCKPkt );
if((priv->UndecoratedSmoothedSS > HiPwrUpperTh) ||
(priv->bCurCCKPkt && (priv->CurCCKRSSI > RSSIHiPwrUpperTh)))
{
// Stevenl suggested that degrade 8dbm in high power sate. 2007-12-04 Isaiah
// printk("=====>DoTxHighPower() - High Power - UndecoratedSmoothedSS:%d, HiPwrUpperTh = %d \n", priv->UndecoratedSmoothedSS, HiPwrUpperTh );
priv->bToUpdateTxPwr = true;
u1bTmp= read_nic_byte(dev, CCK_TXAGC);
// If it never enter High Power.
if( CckTxPwrIdx == u1bTmp)
{
u1bTmp = (u1bTmp > 16) ? (u1bTmp -16): 0; // 8dbm
write_nic_byte(dev, CCK_TXAGC, u1bTmp);
u1bTmp= read_nic_byte(dev, OFDM_TXAGC);
u1bTmp = (u1bTmp > 16) ? (u1bTmp -16): 0; // 8dbm
write_nic_byte(dev, OFDM_TXAGC, u1bTmp);
}
}
else if((priv->UndecoratedSmoothedSS < HiPwrLowerTh) &&
(!priv->bCurCCKPkt || priv->CurCCKRSSI < RSSIHiPwrLowerTh))
{
// printk("DoTxHighPower() - lower Power - UndecoratedSmoothedSS:%d, HiPwrUpperTh = %d \n", priv->UndecoratedSmoothedSS, HiPwrLowerTh );
if(priv->bToUpdateTxPwr)
{
priv->bToUpdateTxPwr = false;
//SD3 required.
u1bTmp= read_nic_byte(dev, CCK_TXAGC);
if(u1bTmp < CckTxPwrIdx)
{
//u1bTmp = ((u1bTmp+16) > 35) ? 35: (u1bTmp+16); // 8dbm
//write_nic_byte(dev, CCK_TXAGC, u1bTmp);
write_nic_byte(dev, CCK_TXAGC, CckTxPwrIdx);
}
u1bTmp= read_nic_byte(dev, OFDM_TXAGC);
if(u1bTmp < OfdmTxPwrIdx)
{
//u1bTmp = ((u1bTmp+16) > 35) ? 35: (u1bTmp+16); // 8dbm
//write_nic_byte(dev, OFDM_TXAGC, u1bTmp);
write_nic_byte(dev, OFDM_TXAGC, OfdmTxPwrIdx);
}
}
}
//printk("<---- DoTxHighPower()\n");
}
//
// Description:
// Callback function of UpdateTxPowerWorkItem.
// Because of some event happened, e.g. CCX TPC, High Power Mechanism,
// We update Tx power of current channel again.
//
void rtl8180_tx_pw_wq (struct work_struct *work)
{
// struct r8180_priv *priv = container_of(work, struct r8180_priv, watch_dog_wq);
// struct ieee80211_device * ieee = (struct ieee80211_device*)
// container_of(work, struct ieee80211_device, watch_dog_wq);
struct delayed_work *dwork = to_delayed_work(work);
struct ieee80211_device *ieee = container_of(dwork,struct ieee80211_device,tx_pw_wq);
struct net_device *dev = ieee->dev;
// printk("----> UpdateTxPowerWorkItemCallback()\n");
DoTxHighPower(dev);
// printk("<---- UpdateTxPowerWorkItemCallback()\n");
}
//
// Description:
// Return TRUE if we shall perform DIG Mecahnism, FALSE otherwise.
//
bool
CheckDig(
struct net_device *dev
)
{
struct r8180_priv *priv = ieee80211_priv(dev);
struct ieee80211_device *ieee = priv->ieee80211;
if(!priv->bDigMechanism)
return false;
if(ieee->state != IEEE80211_LINKED)
return false;
//if(priv->CurrentOperaRate < 36) // Schedule Dig under all OFDM rates. By Bruce, 2007-06-01.
if((priv->ieee80211->rate/5) < 36) // Schedule Dig under all OFDM rates. By Bruce, 2007-06-01.
return false;
return true;
}
//
// Description:
// Implementation of DIG for Zebra and Zebra2.
//
void
DIG_Zebra(
struct net_device *dev
)
{
struct r8180_priv *priv = ieee80211_priv(dev);
u16 CCKFalseAlarm, OFDMFalseAlarm;
u16 OfdmFA1, OfdmFA2;
int InitialGainStep = 7; // The number of initial gain stages.
int LowestGainStage = 4; // The capable lowest stage of performing dig workitem.
u32 AwakePeriodIn2Sec=0;
//printk("---------> DIG_Zebra()\n");
CCKFalseAlarm = (u16)(priv->FalseAlarmRegValue & 0x0000ffff);
OFDMFalseAlarm = (u16)((priv->FalseAlarmRegValue >> 16) & 0x0000ffff);
OfdmFA1 = 0x15;
OfdmFA2 = ((u16)(priv->RegDigOfdmFaUpTh)) << 8;
// printk("DIG**********CCK False Alarm: %#X \n",CCKFalseAlarm);
// printk("DIG**********OFDM False Alarm: %#X \n",OFDMFalseAlarm);
// The number of initial gain steps is different, by Bruce, 2007-04-13.
if (priv->InitialGain == 0 ) //autoDIG
{ // Advised from SD3 DZ
priv->InitialGain = 4; // In 87B, m74dBm means State 4 (m82dBm)
}
{ // Advised from SD3 DZ
OfdmFA1 = 0x20;
}
#if 1 //lzm reserved 080826
AwakePeriodIn2Sec = (2000-priv ->DozePeriodInPast2Sec);
//printk("&&& DozePeriod=%d AwakePeriod=%d\n", priv->DozePeriodInPast2Sec, AwakePeriodIn2Sec);
priv ->DozePeriodInPast2Sec=0;
if(AwakePeriodIn2Sec)
{
//RT_TRACE(COMP_DIG, DBG_TRACE, ("DIG: AwakePeriodIn2Sec(%d) - FATh(0x%X , 0x%X) ->",AwakePeriodIn2Sec, OfdmFA1, OfdmFA2));
// adjuest DIG threshold.
OfdmFA1 = (u16)((OfdmFA1*AwakePeriodIn2Sec) / 2000) ;
OfdmFA2 = (u16)((OfdmFA2*AwakePeriodIn2Sec) / 2000) ;
//RT_TRACE(COMP_DIG, DBG_TRACE, ("( 0x%X , 0x%X)\n", OfdmFA1, OfdmFA2));
}
else
{
;//RT_TRACE(COMP_DIG, DBG_WARNING, ("ERROR!! AwakePeriodIn2Sec should not be ZERO!!\n"));
}
#endif
InitialGainStep = 8;
LowestGainStage = priv->RegBModeGainStage; // Lowest gain stage.
if (OFDMFalseAlarm > OfdmFA1)
{
if (OFDMFalseAlarm > OfdmFA2)
{
priv->DIG_NumberFallbackVote++;
if (priv->DIG_NumberFallbackVote >1)
{
//serious OFDM False Alarm, need fallback
if (priv->InitialGain < InitialGainStep)
{
priv->InitialGainBackUp= priv->InitialGain;
priv->InitialGain = (priv->InitialGain + 1);
// printk("DIG**********OFDM False Alarm: %#X, OfdmFA1: %#X, OfdmFA2: %#X\n", OFDMFalseAlarm, OfdmFA1, OfdmFA2);
// printk("DIG+++++++ fallback OFDM:%d \n", priv->InitialGain);
UpdateInitialGain(dev);
}
priv->DIG_NumberFallbackVote = 0;
priv->DIG_NumberUpgradeVote=0;
}
}
else
{
if (priv->DIG_NumberFallbackVote)
priv->DIG_NumberFallbackVote--;
}
priv->DIG_NumberUpgradeVote=0;
}
else
{
if (priv->DIG_NumberFallbackVote)
priv->DIG_NumberFallbackVote--;
priv->DIG_NumberUpgradeVote++;
if (priv->DIG_NumberUpgradeVote>9)
{
if (priv->InitialGain > LowestGainStage) // In 87B, m78dBm means State 4 (m864dBm)
{
priv->InitialGainBackUp= priv->InitialGain;
priv->InitialGain = (priv->InitialGain - 1);
// printk("DIG**********OFDM False Alarm: %#X, OfdmFA1: %#X, OfdmFA2: %#X\n", OFDMFalseAlarm, OfdmFA1, OfdmFA2);
// printk("DIG--------- Upgrade OFDM:%d \n", priv->InitialGain);
UpdateInitialGain(dev);
}
priv->DIG_NumberFallbackVote = 0;
priv->DIG_NumberUpgradeVote=0;
}
}
// printk("DIG+++++++ OFDM:%d\n", priv->InitialGain);
//printk("<--------- DIG_Zebra()\n");
}
//
// Description:
// Dispatch DIG implementation according to RF.
//
void
DynamicInitGain(struct net_device *dev)
{
DIG_Zebra(dev);
}
void rtl8180_hw_dig_wq (struct work_struct *work)
{
struct delayed_work *dwork = to_delayed_work(work);
struct ieee80211_device *ieee = container_of(dwork,struct ieee80211_device,hw_dig_wq);
struct net_device *dev = ieee->dev;
struct r8180_priv *priv = ieee80211_priv(dev);
// Read CCK and OFDM False Alarm.
priv->FalseAlarmRegValue = read_nic_dword(dev, CCK_FALSE_ALARM);
// Adjust Initial Gain dynamically.
DynamicInitGain(dev);
}
int
IncludedInSupportedRates(
struct r8180_priv *priv,
u8 TxRate )
{
u8 rate_len;
u8 rate_ex_len;
u8 RateMask = 0x7F;
u8 idx;
unsigned short Found = 0;
u8 NaiveTxRate = TxRate&RateMask;
rate_len = priv->ieee80211->current_network.rates_len;
rate_ex_len = priv->ieee80211->current_network.rates_ex_len;
for( idx=0; idx< rate_len; idx++ )
{
if( (priv->ieee80211->current_network.rates[idx] & RateMask) == NaiveTxRate )
{
Found = 1;
goto found_rate;
}
}
for( idx=0; idx< rate_ex_len; idx++ )
{
if( (priv->ieee80211->current_network.rates_ex[idx] & RateMask) == NaiveTxRate )
{
Found = 1;
goto found_rate;
}
}
return Found;
found_rate:
return Found;
}
//
// Description:
// Get the Tx rate one degree up form the input rate in the supported rates.
// Return the upgrade rate if it is successed, otherwise return the input rate.
// By Bruce, 2007-06-05.
//
u8
GetUpgradeTxRate(
struct net_device *dev,
u8 rate
)
{
struct r8180_priv *priv = ieee80211_priv(dev);
u8 UpRate;
// Upgrade 1 degree.
switch(rate)
{
case 108: // Up to 54Mbps.
UpRate = 108;
break;
case 96: // Up to 54Mbps.
UpRate = 108;
break;
case 72: // Up to 48Mbps.
UpRate = 96;
break;
case 48: // Up to 36Mbps.
UpRate = 72;
break;
case 36: // Up to 24Mbps.
UpRate = 48;
break;
case 22: // Up to 18Mbps.
UpRate = 36;
break;
case 11: // Up to 11Mbps.
UpRate = 22;
break;
case 4: // Up to 5.5Mbps.
UpRate = 11;
break;
case 2: // Up to 2Mbps.
UpRate = 4;
break;
default:
printk("GetUpgradeTxRate(): Input Tx Rate(%d) is undefined!\n", rate);
return rate;
}
// Check if the rate is valid.
if(IncludedInSupportedRates(priv, UpRate))
{
// printk("GetUpgradeTxRate(): GetUpgrade Tx rate(%d) from %d !\n", UpRate, priv->CurrentOperaRate);
return UpRate;
}
else
{
//printk("GetUpgradeTxRate(): Tx rate (%d) is not in supported rates\n", UpRate);
return rate;
}
return rate;
}
//
// Description:
// Get the Tx rate one degree down form the input rate in the supported rates.
// Return the degrade rate if it is successed, otherwise return the input rate.
// By Bruce, 2007-06-05.
//
u8
GetDegradeTxRate(
struct net_device *dev,
u8 rate
)
{
struct r8180_priv *priv = ieee80211_priv(dev);
u8 DownRate;
// Upgrade 1 degree.
switch(rate)
{
case 108: // Down to 48Mbps.
DownRate = 96;
break;
case 96: // Down to 36Mbps.
DownRate = 72;
break;
case 72: // Down to 24Mbps.
DownRate = 48;
break;
case 48: // Down to 18Mbps.
DownRate = 36;
break;
case 36: // Down to 11Mbps.
DownRate = 22;
break;
case 22: // Down to 5.5Mbps.
DownRate = 11;
break;
case 11: // Down to 2Mbps.
DownRate = 4;
break;
case 4: // Down to 1Mbps.
DownRate = 2;
break;
case 2: // Down to 1Mbps.
DownRate = 2;
break;
default:
printk("GetDegradeTxRate(): Input Tx Rate(%d) is undefined!\n", rate);
return rate;
}
// Check if the rate is valid.
if(IncludedInSupportedRates(priv, DownRate))
{
// printk("GetDegradeTxRate(): GetDegrade Tx rate(%d) from %d!\n", DownRate, priv->CurrentOperaRate);
return DownRate;
}
else
{
//printk("GetDegradeTxRate(): Tx rate (%d) is not in supported rates\n", DownRate);
return rate;
}
return rate;
}
//
// Helper function to determine if specified data rate is
// CCK rate.
// 2005.01.25, by rcnjko.
//
bool
MgntIsCckRate(
u16 rate
)
{
bool bReturn = false;
if((rate <= 22) && (rate != 12) && (rate != 18))
{
bReturn = true;
}
return bReturn;
}
//
// Description:
// Tx Power tracking mechanism routine on 87SE.
// Created by Roger, 2007.12.11.
//
void
TxPwrTracking87SE(
struct net_device *dev
)
{
struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
u8 tmpu1Byte, CurrentThermal, Idx;
char CckTxPwrIdx, OfdmTxPwrIdx;
//u32 u4bRfReg;
tmpu1Byte = read_nic_byte(dev, EN_LPF_CAL);
CurrentThermal = (tmpu1Byte & 0xf0)>>4; //[ 7:4]: thermal meter indication.
CurrentThermal = (CurrentThermal>0x0c)? 0x0c:CurrentThermal;//lzm add 080826
//printk("TxPwrTracking87SE(): CurrentThermal(%d)\n", CurrentThermal);
if( CurrentThermal != priv->ThermalMeter)
{
// printk("TxPwrTracking87SE(): Thermal meter changed!!!\n");
// Update Tx Power level on each channel.
for(Idx = 1; Idx<15; Idx++)
{
CckTxPwrIdx = priv->chtxpwr[Idx];
OfdmTxPwrIdx = priv->chtxpwr_ofdm[Idx];
if( CurrentThermal > priv->ThermalMeter )
{ // higher thermal meter.
CckTxPwrIdx += (CurrentThermal - priv->ThermalMeter)*2;
OfdmTxPwrIdx += (CurrentThermal - priv->ThermalMeter)*2;
if(CckTxPwrIdx >35)
CckTxPwrIdx = 35; // Force TxPower to maximal index.
if(OfdmTxPwrIdx >35)
OfdmTxPwrIdx = 35;
}
else
{ // lower thermal meter.
CckTxPwrIdx -= (priv->ThermalMeter - CurrentThermal)*2;
OfdmTxPwrIdx -= (priv->ThermalMeter - CurrentThermal)*2;
if(CckTxPwrIdx <0)
CckTxPwrIdx = 0;
if(OfdmTxPwrIdx <0)
OfdmTxPwrIdx = 0;
}
// Update TxPower level on CCK and OFDM resp.
priv->chtxpwr[Idx] = CckTxPwrIdx;
priv->chtxpwr_ofdm[Idx] = OfdmTxPwrIdx;
}
// Update TxPower level immediately.
rtl8225z2_SetTXPowerLevel(dev, priv->ieee80211->current_network.channel);
}
priv->ThermalMeter = CurrentThermal;
}
void
StaRateAdaptive87SE(
struct net_device *dev
)
{
struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
unsigned long CurrTxokCnt;
u16 CurrRetryCnt;
u16 CurrRetryRate;
//u16 i,idx;
unsigned long CurrRxokCnt;
bool bTryUp = false;
bool bTryDown = false;
u8 TryUpTh = 1;
u8 TryDownTh = 2;
u32 TxThroughput;
long CurrSignalStrength;
bool bUpdateInitialGain = false;
u8 u1bOfdm=0, u1bCck = 0;
char OfdmTxPwrIdx, CckTxPwrIdx;
priv->RateAdaptivePeriod= RATE_ADAPTIVE_TIMER_PERIOD;
CurrRetryCnt = priv->CurrRetryCnt;
CurrTxokCnt = priv->NumTxOkTotal - priv->LastTxokCnt;
CurrRxokCnt = priv->ieee80211->NumRxOkTotal - priv->LastRxokCnt;
CurrSignalStrength = priv->Stats_RecvSignalPower;
TxThroughput = (u32)(priv->NumTxOkBytesTotal - priv->LastTxOKBytes);
priv->LastTxOKBytes = priv->NumTxOkBytesTotal;
priv->CurrentOperaRate = priv->ieee80211->rate/5;
//printk("priv->CurrentOperaRate is %d\n",priv->CurrentOperaRate);
//2 Compute retry ratio.
if (CurrTxokCnt>0)
{
CurrRetryRate = (u16)(CurrRetryCnt*100/CurrTxokCnt);
}
else
{ // It may be serious retry. To distinguish serious retry or no packets modified by Bruce
CurrRetryRate = (u16)(CurrRetryCnt*100/1);
}
//
// Added by Roger, 2007.01.02.
// For debug information.
//
//printk("\n(1) pHalData->LastRetryRate: %d \n",priv->LastRetryRate);
//printk("(2) RetryCnt = %d \n", CurrRetryCnt);
//printk("(3) TxokCnt = %d \n", CurrTxokCnt);
//printk("(4) CurrRetryRate = %d \n", CurrRetryRate);
//printk("(5) CurrSignalStrength = %d \n",CurrSignalStrength);
//printk("(6) TxThroughput is %d\n",TxThroughput);
//printk("priv->NumTxOkBytesTotal is %d\n",priv->NumTxOkBytesTotal);
priv->LastRetryCnt = priv->CurrRetryCnt;
priv->LastTxokCnt = priv->NumTxOkTotal;
priv->LastRxokCnt = priv->ieee80211->NumRxOkTotal;
priv->CurrRetryCnt = 0;
//2No Tx packets, return to init_rate or not?
if (CurrRetryRate==0 && CurrTxokCnt == 0)
{
//
//After 9 (30*300ms) seconds in this condition, we try to raise rate.
//
priv->TryupingCountNoData++;
// printk("No Tx packets, TryupingCountNoData(%d)\n", priv->TryupingCountNoData);
//[TRC Dell Lab] Extend raised period from 4.5sec to 9sec, Isaiah 2008-02-15 18:00
if (priv->TryupingCountNoData>30)
{
priv->TryupingCountNoData = 0;
priv->CurrentOperaRate = GetUpgradeTxRate(dev, priv->CurrentOperaRate);
// Reset Fail Record
priv->LastFailTxRate = 0;
priv->LastFailTxRateSS = -200;
priv->FailTxRateCount = 0;
}
goto SetInitialGain;
}
else
{
priv->TryupingCountNoData=0; //Reset trying up times.
}
//
// For Netgear case, I comment out the following signal strength estimation,
// which can results in lower rate to transmit when sample is NOT enough (e.g. PING request).
// 2007.04.09, by Roger.
//
//
// Restructure rate adaptive as the following main stages:
// (1) Add retry threshold in 54M upgrading condition with signal strength.
// (2) Add the mechanism to degrade to CCK rate according to signal strength
// and retry rate.
// (3) Remove all Initial Gain Updates over OFDM rate. To avoid the complicated
// situation, Initial Gain Update is upon on DIG mechanism except CCK rate.
// (4) Add the mehanism of trying to upgrade tx rate.
// (5) Record the information of upping tx rate to avoid trying upping tx rate constantly.
// By Bruce, 2007-06-05.
//
//
// 11Mbps or 36Mbps
// Check more times in these rate(key rates).
//
if(priv->CurrentOperaRate == 22 || priv->CurrentOperaRate == 72)
{
TryUpTh += 9;
}
//
// Let these rates down more difficult.
//
if(MgntIsCckRate(priv->CurrentOperaRate) || priv->CurrentOperaRate == 36)
{
TryDownTh += 1;
}
//1 Adjust Rate.
if (priv->bTryuping == true)
{
//2 For Test Upgrading mechanism
// Note:
// Sometimes the throughput is upon on the capability bwtween the AP and NIC,
// thus the low data rate does not improve the performance.
// We randomly upgrade the data rate and check if the retry rate is improved.
// Upgrading rate did not improve the retry rate, fallback to the original rate.
if ( (CurrRetryRate > 25) && TxThroughput < priv->LastTxThroughput)
{
//Not necessary raising rate, fall back rate.
bTryDown = true;
//printk("case1-1: Not necessary raising rate, fall back rate....\n");
//printk("case1-1: pMgntInfo->CurrentOperaRate =%d, TxThroughput = %d, LastThroughput = %d\n",
// priv->CurrentOperaRate, TxThroughput, priv->LastTxThroughput);
}
else
{
priv->bTryuping = false;
}
}
else if (CurrSignalStrength > -47 && (CurrRetryRate < 50))
{
//2For High Power
//
// Added by Roger, 2007.04.09.
// Return to highest data rate, if signal strength is good enough.
// SignalStrength threshold(-50dbm) is for RTL8186.
// Revise SignalStrength threshold to -51dbm.
//
// Also need to check retry rate for safety, by Bruce, 2007-06-05.
if(priv->CurrentOperaRate != priv->ieee80211->current_network.HighestOperaRate )
{
bTryUp = true;
// Upgrade Tx Rate directly.
priv->TryupingCount += TryUpTh;
}
// printk("case2: StaRateAdaptive87SE: Power(%d) is high enough!!. \n", CurrSignalStrength);
}
else if(CurrTxokCnt > 9 && CurrTxokCnt< 100 && CurrRetryRate >= 600)
{
//2 For Serious Retry
//
// Traffic is not busy but our Tx retry is serious.
//
bTryDown = true;
// Let Rate Mechanism to degrade tx rate directly.
priv->TryDownCountLowData += TryDownTh;
// printk("case3: RA: Tx Retry is serious. Degrade Tx Rate to %d directly...\n", priv->CurrentOperaRate);
}
else if ( priv->CurrentOperaRate == 108 )
{
//2For 54Mbps
// Air Link
if ( (CurrRetryRate>26)&&(priv->LastRetryRate>25))
// if ( (CurrRetryRate>40)&&(priv->LastRetryRate>39))
{
//Down to rate 48Mbps.
bTryDown = true;
}
// Cable Link
else if ( (CurrRetryRate>17)&&(priv->LastRetryRate>16) && (CurrSignalStrength > -72))
// else if ( (CurrRetryRate>17)&&(priv->LastRetryRate>16) && (CurrSignalStrength > -72))
{
//Down to rate 48Mbps.
bTryDown = true;
}
if(bTryDown && (CurrSignalStrength < -75)) //cable link
{
priv->TryDownCountLowData += TryDownTh;
}
//printk("case4---54M \n");
}
else if ( priv->CurrentOperaRate == 96 )
{
//2For 48Mbps
//Air Link
if ( ((CurrRetryRate>48) && (priv->LastRetryRate>47)))
// if ( ((CurrRetryRate>65) && (priv->LastRetryRate>64)))
{
//Down to rate 36Mbps.
bTryDown = true;
}
//Cable Link
else if ( ((CurrRetryRate>21) && (priv->LastRetryRate>20)) && (CurrSignalStrength > -74))
{
//Down to rate 36Mbps.
bTryDown = true;
}
else if((CurrRetryRate> (priv->LastRetryRate + 50 )) && (priv->FailTxRateCount >2 ))
// else if((CurrRetryRate> (priv->LastRetryRate + 70 )) && (priv->FailTxRateCount >2 ))
{
bTryDown = true;
priv->TryDownCountLowData += TryDownTh;
}
else if ( (CurrRetryRate<8) && (priv->LastRetryRate<8) ) //TO DO: need to consider (RSSI)
// else if ( (CurrRetryRate<28) && (priv->LastRetryRate<8) )
{
bTryUp = true;
}
if(bTryDown && (CurrSignalStrength < -75))
{
priv->TryDownCountLowData += TryDownTh;
}
//printk("case5---48M \n");
}
else if ( priv->CurrentOperaRate == 72 )
{
//2For 36Mbps
if ( (CurrRetryRate>43) && (priv->LastRetryRate>41))
// if ( (CurrRetryRate>60) && (priv->LastRetryRate>59))
{
//Down to rate 24Mbps.
bTryDown = true;
}
else if((CurrRetryRate> (priv->LastRetryRate + 50 )) && (priv->FailTxRateCount >2 ))
// else if((CurrRetryRate> (priv->LastRetryRate + 70 )) && (priv->FailTxRateCount >2 ))
{
bTryDown = true;
priv->TryDownCountLowData += TryDownTh;
}
else if ( (CurrRetryRate<15) && (priv->LastRetryRate<16)) //TO DO: need to consider (RSSI)
// else if ( (CurrRetryRate<35) && (priv->LastRetryRate<36))
{
bTryUp = true;
}
if(bTryDown && (CurrSignalStrength < -80))
{
priv->TryDownCountLowData += TryDownTh;
}
//printk("case6---36M \n");
}
else if ( priv->CurrentOperaRate == 48 )
{
//2For 24Mbps
// Air Link
if ( ((CurrRetryRate>63) && (priv->LastRetryRate>62)))
// if ( ((CurrRetryRate>83) && (priv->LastRetryRate>82)))
{
//Down to rate 18Mbps.
bTryDown = true;
}
//Cable Link
else if ( ((CurrRetryRate>33) && (priv->LastRetryRate>32)) && (CurrSignalStrength > -82) )
// else if ( ((CurrRetryRate>50) && (priv->LastRetryRate>49)) && (CurrSignalStrength > -82) )
{
//Down to rate 18Mbps.
bTryDown = true;
}
else if((CurrRetryRate> (priv->LastRetryRate + 50 )) && (priv->FailTxRateCount >2 ))
// else if((CurrRetryRate> (priv->LastRetryRate + 70 )) && (priv->FailTxRateCount >2 ))
{
bTryDown = true;
priv->TryDownCountLowData += TryDownTh;
}
else if ( (CurrRetryRate<20) && (priv->LastRetryRate<21)) //TO DO: need to consider (RSSI)
// else if ( (CurrRetryRate<40) && (priv->LastRetryRate<41))
{
bTryUp = true;
}
if(bTryDown && (CurrSignalStrength < -82))
{
priv->TryDownCountLowData += TryDownTh;
}
//printk("case7---24M \n");
}
else if ( priv->CurrentOperaRate == 36 )
{
//2For 18Mbps
// original (109, 109)
//[TRC Dell Lab] (90, 91), Isaiah 2008-02-18 23:24
// (85, 86), Isaiah 2008-02-18 24:00
if ( ((CurrRetryRate>85) && (priv->LastRetryRate>86)))
// if ( ((CurrRetryRate>115) && (priv->LastRetryRate>116)))
{
//Down to rate 11Mbps.
bTryDown = true;
}
//[TRC Dell Lab] Isaiah 2008-02-18 23:24
else if((CurrRetryRate> (priv->LastRetryRate + 50 )) && (priv->FailTxRateCount >2 ))
// else if((CurrRetryRate> (priv->LastRetryRate + 70 )) && (priv->FailTxRateCount >2 ))
{
bTryDown = true;
priv->TryDownCountLowData += TryDownTh;
}
else if ( (CurrRetryRate<22) && (priv->LastRetryRate<23)) //TO DO: need to consider (RSSI)
// else if ( (CurrRetryRate<42) && (priv->LastRetryRate<43))
{
bTryUp = true;
}
//printk("case8---18M \n");
}
else if ( priv->CurrentOperaRate == 22 )
{
//2For 11Mbps
if (CurrRetryRate>95)
// if (CurrRetryRate>155)
{
bTryDown = true;
}
else if ( (CurrRetryRate<29) && (priv->LastRetryRate <30) )//TO DO: need to consider (RSSI)
// else if ( (CurrRetryRate<49) && (priv->LastRetryRate <50) )
{
bTryUp = true;
}
//printk("case9---11M \n");
}
else if ( priv->CurrentOperaRate == 11 )
{
//2For 5.5Mbps
if (CurrRetryRate>149)
// if (CurrRetryRate>189)
{
bTryDown = true;
}
else if ( (CurrRetryRate<60) && (priv->LastRetryRate < 65))
// else if ( (CurrRetryRate<80) && (priv->LastRetryRate < 85))
{
bTryUp = true;
}
//printk("case10---5.5M \n");
}
else if ( priv->CurrentOperaRate == 4 )
{
//2For 2 Mbps
if((CurrRetryRate>99) && (priv->LastRetryRate>99))
// if((CurrRetryRate>199) && (priv->LastRetryRate>199))
{
bTryDown = true;
}
else if ( (CurrRetryRate < 65) && (priv->LastRetryRate < 70))
// else if ( (CurrRetryRate < 85) && (priv->LastRetryRate < 90))
{
bTryUp = true;
}
//printk("case11---2M \n");
}
else if ( priv->CurrentOperaRate == 2 )
{
//2For 1 Mbps
if( (CurrRetryRate<70) && (priv->LastRetryRate<75))
// if( (CurrRetryRate<90) && (priv->LastRetryRate<95))
{
bTryUp = true;
}
//printk("case12---1M \n");
}
if(bTryUp && bTryDown)
printk("StaRateAdaptive87B(): Tx Rate tried upping and downing simultaneously!\n");
//1 Test Upgrading Tx Rate
// Sometimes the cause of the low throughput (high retry rate) is the compatibility between the AP and NIC.
// To test if the upper rate may cause lower retry rate, this mechanism randomly occurs to test upgrading tx rate.
if(!bTryUp && !bTryDown && (priv->TryupingCount == 0) && (priv->TryDownCountLowData == 0)
&& priv->CurrentOperaRate != priv->ieee80211->current_network.HighestOperaRate && priv->FailTxRateCount < 2)
{
if(jiffies% (CurrRetryRate + 101) == 0)
{
bTryUp = true;
priv->bTryuping = true;
//printk("StaRateAdaptive87SE(): Randomly try upgrading...\n");
}
}
//1 Rate Mechanism
if(bTryUp)
{
priv->TryupingCount++;
priv->TryDownCountLowData = 0;
{
// printk("UP: pHalData->TryupingCount = %d\n", priv->TryupingCount);
// printk("UP: TryUpTh(%d)+ (FailTxRateCount(%d))^2 =%d\n",
// TryUpTh, priv->FailTxRateCount, (TryUpTh + priv->FailTxRateCount * priv->FailTxRateCount) );
// printk("UP: pHalData->bTryuping=%d\n", priv->bTryuping);
}
//
// Check more times if we need to upgrade indeed.
// Because the largest value of pHalData->TryupingCount is 0xFFFF and
// the largest value of pHalData->FailTxRateCount is 0x14,
// this condition will be satisfied at most every 2 min.
//
if((priv->TryupingCount > (TryUpTh + priv->FailTxRateCount * priv->FailTxRateCount)) ||
(CurrSignalStrength > priv->LastFailTxRateSS) || priv->bTryuping)
{
priv->TryupingCount = 0;
//
// When transferring from CCK to OFDM, DIG is an important issue.
//
if(priv->CurrentOperaRate == 22)
bUpdateInitialGain = true;
// The difference in throughput between 48Mbps and 36Mbps is 8M.
// So, we must be carefully in this rate scale. Isaiah 2008-02-15.
//
if( ((priv->CurrentOperaRate == 72) || (priv->CurrentOperaRate == 48) || (priv->CurrentOperaRate == 36)) &&
(priv->FailTxRateCount > 2) )
priv->RateAdaptivePeriod= (RATE_ADAPTIVE_TIMER_PERIOD/2);
// (1)To avoid upgrade frequently to the fail tx rate, add the FailTxRateCount into the threshold.
// (2)If the signal strength is increased, it may be able to upgrade.
priv->CurrentOperaRate = GetUpgradeTxRate(dev, priv->CurrentOperaRate);
// printk("StaRateAdaptive87SE(): Upgrade Tx Rate to %d\n", priv->CurrentOperaRate);
//[TRC Dell Lab] Bypass 12/9/6, Isaiah 2008-02-18 20:00
if(priv->CurrentOperaRate ==36)
{
priv->bUpdateARFR=true;
write_nic_word(dev, ARFR, 0x0F8F); //bypass 12/9/6
// printk("UP: ARFR=0xF8F\n");
}
else if(priv->bUpdateARFR)
{
priv->bUpdateARFR=false;
write_nic_word(dev, ARFR, 0x0FFF); //set 1M ~ 54Mbps.
// printk("UP: ARFR=0xFFF\n");
}
// Update Fail Tx rate and count.
if(priv->LastFailTxRate != priv->CurrentOperaRate)
{
priv->LastFailTxRate = priv->CurrentOperaRate;
priv->FailTxRateCount = 0;
priv->LastFailTxRateSS = -200; // Set lowest power.
}
}
}
else
{
if(priv->TryupingCount > 0)
priv->TryupingCount --;
}
if(bTryDown)
{
priv->TryDownCountLowData++;
priv->TryupingCount = 0;
{
// printk("DN: pHalData->TryDownCountLowData = %d\n",priv->TryDownCountLowData);
// printk("DN: TryDownTh =%d\n", TryDownTh);
// printk("DN: pHalData->bTryuping=%d\n", priv->bTryuping);
}
//Check if Tx rate can be degraded or Test trying upgrading should fallback.
if(priv->TryDownCountLowData > TryDownTh || priv->bTryuping)
{
priv->TryDownCountLowData = 0;
priv->bTryuping = false;
// Update fail information.
if(priv->LastFailTxRate == priv->CurrentOperaRate)
{
priv->FailTxRateCount ++;
// Record the Tx fail rate signal strength.
if(CurrSignalStrength > priv->LastFailTxRateSS)
{
priv->LastFailTxRateSS = CurrSignalStrength;
}
}
else
{
priv->LastFailTxRate = priv->CurrentOperaRate;
priv->FailTxRateCount = 1;
priv->LastFailTxRateSS = CurrSignalStrength;
}
priv->CurrentOperaRate = GetDegradeTxRate(dev, priv->CurrentOperaRate);
// Reduce chariot training time at weak signal strength situation. SD3 ED demand.
//[TRC Dell Lab] Revise Signal Threshold from -75 to -80 , Isaiah 2008-02-18 20:00
if( (CurrSignalStrength < -80) && (priv->CurrentOperaRate > 72 ))
{
priv->CurrentOperaRate = 72;
// printk("DN: weak signal strength (%d), degrade to 36Mbps\n", CurrSignalStrength);
}
//[TRC Dell Lab] Bypass 12/9/6, Isaiah 2008-02-18 20:00
if(priv->CurrentOperaRate ==36)
{
priv->bUpdateARFR=true;
write_nic_word(dev, ARFR, 0x0F8F); //bypass 12/9/6
// printk("DN: ARFR=0xF8F\n");
}
else if(priv->bUpdateARFR)
{
priv->bUpdateARFR=false;
write_nic_word(dev, ARFR, 0x0FFF); //set 1M ~ 54Mbps.
// printk("DN: ARFR=0xFFF\n");
}
//
// When it is CCK rate, it may need to update initial gain to receive lower power packets.
//
if(MgntIsCckRate(priv->CurrentOperaRate))
{
bUpdateInitialGain = true;
}
// printk("StaRateAdaptive87SE(): Degrade Tx Rate to %d\n", priv->CurrentOperaRate);
}
}
else
{
if(priv->TryDownCountLowData > 0)
priv->TryDownCountLowData --;
}
// Keep the Tx fail rate count to equal to 0x15 at most.
// Reduce the fail count at least to 10 sec if tx rate is tending stable.
if(priv->FailTxRateCount >= 0x15 ||
(!bTryUp && !bTryDown && priv->TryDownCountLowData == 0 && priv->TryupingCount && priv->FailTxRateCount > 0x6))
{
priv->FailTxRateCount --;
}
OfdmTxPwrIdx = priv->chtxpwr_ofdm[priv->ieee80211->current_network.channel];
CckTxPwrIdx = priv->chtxpwr[priv->ieee80211->current_network.channel];
//[TRC Dell Lab] Mac0x9e increase 2 level in 36M~18M situation, Isaiah 2008-02-18 24:00
if((priv->CurrentOperaRate < 96) &&(priv->CurrentOperaRate > 22))
{
u1bCck = read_nic_byte(dev, CCK_TXAGC);
u1bOfdm = read_nic_byte(dev, OFDM_TXAGC);
// case 1: Never enter High power
if(u1bCck == CckTxPwrIdx )
{
if(u1bOfdm != (OfdmTxPwrIdx+2) )
{
priv->bEnhanceTxPwr= true;
u1bOfdm = ((u1bOfdm+2) > 35) ? 35: (u1bOfdm+2);
write_nic_byte(dev, OFDM_TXAGC, u1bOfdm);
// printk("Enhance OFDM_TXAGC : +++++ u1bOfdm= 0x%x\n", u1bOfdm);
}
}
// case 2: enter high power
else if(u1bCck < CckTxPwrIdx)
{
if(!priv->bEnhanceTxPwr)
{
priv->bEnhanceTxPwr= true;
u1bOfdm = ((u1bOfdm+2) > 35) ? 35: (u1bOfdm+2);
write_nic_byte(dev, OFDM_TXAGC, u1bOfdm);
//RT_TRACE(COMP_RATE, DBG_TRACE, ("Enhance OFDM_TXAGC(2) : +++++ u1bOfdm= 0x%x\n", u1bOfdm));
}
}
}
else if(priv->bEnhanceTxPwr) //54/48/11/5.5/2/1
{
u1bCck = read_nic_byte(dev, CCK_TXAGC);
u1bOfdm = read_nic_byte(dev, OFDM_TXAGC);
// case 1: Never enter High power
if(u1bCck == CckTxPwrIdx )
{
priv->bEnhanceTxPwr= false;
write_nic_byte(dev, OFDM_TXAGC, OfdmTxPwrIdx);
//printk("Recover OFDM_TXAGC : ===== u1bOfdm= 0x%x\n", OfdmTxPwrIdx);
}
// case 2: enter high power
else if(u1bCck < CckTxPwrIdx)
{
priv->bEnhanceTxPwr= false;
u1bOfdm = ((u1bOfdm-2) > 0) ? (u1bOfdm-2): 0;
write_nic_byte(dev, OFDM_TXAGC, u1bOfdm);
//RT_TRACE(COMP_RATE, DBG_TRACE, ("Recover OFDM_TXAGC(2): ===== u1bOfdm= 0x%x\n", u1bOfdm));
}
}
//
// We need update initial gain when we set tx rate "from OFDM to CCK" or
// "from CCK to OFDM".
//
SetInitialGain:
if(bUpdateInitialGain)
{
if(MgntIsCckRate(priv->CurrentOperaRate)) // CCK
{
if(priv->InitialGain > priv->RegBModeGainStage)
{
priv->InitialGainBackUp= priv->InitialGain;
if(CurrSignalStrength < -85) // Low power, OFDM [0x17] = 26.
{
//SD3 SYs suggest that CurrSignalStrength < -65, ofdm 0x17=26.
priv->InitialGain = priv->RegBModeGainStage;
}
else if(priv->InitialGain > priv->RegBModeGainStage + 1)
{
priv->InitialGain -= 2;
}
else
{
priv->InitialGain --;
}
printk("StaRateAdaptive87SE(): update init_gain to index %d for date rate %d\n",priv->InitialGain, priv->CurrentOperaRate);
UpdateInitialGain(dev);
}
}
else // OFDM
{
if(priv->InitialGain < 4)
{
priv->InitialGainBackUp= priv->InitialGain;
priv->InitialGain ++;
printk("StaRateAdaptive87SE(): update init_gain to index %d for date rate %d\n",priv->InitialGain, priv->CurrentOperaRate);
UpdateInitialGain(dev);
}
}
}
//Record the related info
priv->LastRetryRate = CurrRetryRate;
priv->LastTxThroughput = TxThroughput;
priv->ieee80211->rate = priv->CurrentOperaRate * 5;
}
void rtl8180_rate_adapter(struct work_struct * work)
{
struct delayed_work *dwork = to_delayed_work(work);
struct ieee80211_device *ieee = container_of(dwork,struct ieee80211_device,rate_adapter_wq);
struct net_device *dev = ieee->dev;
//struct r8180_priv *priv = ieee80211_priv(dev);
// DMESG("---->rtl8180_rate_adapter");
StaRateAdaptive87SE(dev);
// DMESG("<----rtl8180_rate_adapter");
}
void timer_rate_adaptive(unsigned long data)
{
struct r8180_priv* priv = ieee80211_priv((struct net_device *)data);
//DMESG("---->timer_rate_adaptive()\n");
if(!priv->up)
{
// DMESG("<----timer_rate_adaptive():driver is not up!\n");
return;
}
if((priv->ieee80211->iw_mode != IW_MODE_MASTER)
&& (priv->ieee80211->state == IEEE80211_LINKED) &&
(priv->ForcedDataRate == 0) )
{
// DMESG("timer_rate_adaptive():schedule rate_adapter_wq\n");
queue_work(priv->ieee80211->wq, (void *)&priv->ieee80211->rate_adapter_wq);
// StaRateAdaptive87SE((struct net_device *)data);
}
priv->rateadapter_timer.expires = jiffies + MSECS(priv->RateAdaptivePeriod);
add_timer(&priv->rateadapter_timer);
//DMESG("<----timer_rate_adaptive()\n");
}
//by amy 080312}
void
SwAntennaDiversityRxOk8185(
struct net_device *dev,
u8 SignalStrength
)
{
struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
// printk("+SwAntennaDiversityRxOk8185: RxSs: %d\n", SignalStrength);
priv->AdRxOkCnt++;
if( priv->AdRxSignalStrength != -1)
{
priv->AdRxSignalStrength = ((priv->AdRxSignalStrength*7) + (SignalStrength*3)) / 10;
}
else
{ // Initialization case.
priv->AdRxSignalStrength = SignalStrength;
}
//{+by amy 080312
if( priv->LastRxPktAntenna ) //Main antenna.
priv->AdMainAntennaRxOkCnt++;
else // Aux antenna.
priv->AdAuxAntennaRxOkCnt++;
//+by amy 080312
// printk("-SwAntennaDiversityRxOk8185: AdRxOkCnt: %d AdRxSignalStrength: %d\n", priv->AdRxOkCnt, priv->AdRxSignalStrength);
}
//
// Description:
// Change Antenna Switch.
//
bool
SetAntenna8185(
struct net_device *dev,
u8 u1bAntennaIndex
)
{
struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
bool bAntennaSwitched = false;
// printk("+SetAntenna8185(): Antenna is switching to: %d \n", u1bAntennaIndex);
switch(u1bAntennaIndex)
{
case 0:
/* Mac register, main antenna */
write_nic_byte(dev, ANTSEL, 0x03);
/* base band */
write_phy_cck(dev, 0x11, 0x9b); /* Config CCK RX antenna. */
write_phy_ofdm(dev, 0x0d, 0x5c); /* Config OFDM RX antenna. */
bAntennaSwitched = true;
break;
case 1:
/* Mac register, aux antenna */
write_nic_byte(dev, ANTSEL, 0x00);
/* base band */
write_phy_cck(dev, 0x11, 0xbb); /* Config CCK RX antenna. */
write_phy_ofdm(dev, 0x0d, 0x54); /* Config OFDM RX antenna. */
bAntennaSwitched = true;
break;
default:
printk("SetAntenna8185: unknown u1bAntennaIndex(%d)\n", u1bAntennaIndex);
break;
}
if(bAntennaSwitched)
{
priv->CurrAntennaIndex = u1bAntennaIndex;
}
// printk("-SetAntenna8185(): return (%#X)\n", bAntennaSwitched);
return bAntennaSwitched;
}
//
// Description:
// Toggle Antenna switch.
//
bool
SwitchAntenna(
struct net_device *dev
)
{
struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
bool bResult;
if(priv->CurrAntennaIndex == 0)
{
bResult = SetAntenna8185(dev, 1);
//by amy 080312
// printk("SwitchAntenna(): switching to antenna 1 ......\n");
// bResult = SetAntenna8185(dev, 1);//-by amy 080312
}
else
{
bResult = SetAntenna8185(dev, 0);
//by amy 080312
// printk("SwitchAntenna(): switching to antenna 0 ......\n");
// bResult = SetAntenna8185(dev, 0);//-by amy 080312
}
return bResult;
}
//
// Description:
// Engine of SW Antenna Diversity mechanism.
// Since 8187 has no Tx part information,
// this implementation is only dependend on Rx part information.
//
// 2006.04.17, by rcnjko.
//
void
SwAntennaDiversity(
struct net_device *dev
)
{
struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
bool bSwCheckSS=false;
// printk("+SwAntennaDiversity(): CurrAntennaIndex: %d\n", priv->CurrAntennaIndex);
// printk("AdTickCount is %d\n",priv->AdTickCount);
//by amy 080312
if(bSwCheckSS)
{
priv->AdTickCount++;
printk("(1) AdTickCount: %d, AdCheckPeriod: %d\n",
priv->AdTickCount, priv->AdCheckPeriod);
printk("(2) AdRxSignalStrength: %ld, AdRxSsThreshold: %ld\n",
priv->AdRxSignalStrength, priv->AdRxSsThreshold);
}
// priv->AdTickCount++;//-by amy 080312
// Case 1. No Link.
if(priv->ieee80211->state != IEEE80211_LINKED)
{
// printk("SwAntennaDiversity(): Case 1. No Link.\n");
priv->bAdSwitchedChecking = false;
// I switch antenna here to prevent any one of antenna is broken before link established, 2006.04.18, by rcnjko..
SwitchAntenna(dev);
}
// Case 2. Linked but no packet received.
else if(priv->AdRxOkCnt == 0)
{
// printk("SwAntennaDiversity(): Case 2. Linked but no packet received.\n");
priv->bAdSwitchedChecking = false;
SwitchAntenna(dev);
}
// Case 3. Evaluate last antenna switch action and undo it if necessary.
else if(priv->bAdSwitchedChecking == true)
{
// printk("SwAntennaDiversity(): Case 3. Evaluate last antenna switch action.\n");
priv->bAdSwitchedChecking = false;
// Adjust Rx signal strength threshold.
priv->AdRxSsThreshold = (priv->AdRxSignalStrength + priv->AdRxSsBeforeSwitched) / 2;
priv->AdRxSsThreshold = (priv->AdRxSsThreshold > priv->AdMaxRxSsThreshold) ?
priv->AdMaxRxSsThreshold: priv->AdRxSsThreshold;
if(priv->AdRxSignalStrength < priv->AdRxSsBeforeSwitched)
{ // Rx signal strength is not improved after we swtiched antenna. => Swich back.
// printk("SwAntennaDiversity(): Rx Signal Strength is not improved, CurrRxSs: %d, LastRxSs: %d\n",
// priv->AdRxSignalStrength, priv->AdRxSsBeforeSwitched);
//by amy 080312
// Increase Antenna Diversity checking period due to bad decision.
priv->AdCheckPeriod *= 2;
//by amy 080312
// Increase Antenna Diversity checking period.
if(priv->AdCheckPeriod > priv->AdMaxCheckPeriod)
priv->AdCheckPeriod = priv->AdMaxCheckPeriod;
// Wrong deceision => switch back.
SwitchAntenna(dev);
}
else
{ // Rx Signal Strength is improved.
// printk("SwAntennaDiversity(): Rx Signal Strength is improved, CurrRxSs: %d, LastRxSs: %d\n",
// priv->AdRxSignalStrength, priv->AdRxSsBeforeSwitched);
// Reset Antenna Diversity checking period to its min value.
priv->AdCheckPeriod = priv->AdMinCheckPeriod;
}
// printk("SwAntennaDiversity(): AdRxSsThreshold: %d, AdCheckPeriod: %d\n",
// priv->AdRxSsThreshold, priv->AdCheckPeriod);
}
// Case 4. Evaluate if we shall switch antenna now.
// Cause Table Speed is very fast in TRC Dell Lab, we check it every time.
else// if(priv->AdTickCount >= priv->AdCheckPeriod)//-by amy 080312
{
// printk("SwAntennaDiversity(): Case 4. Evaluate if we shall switch antenna now.\n");
priv->AdTickCount = 0;
//
// <Roger_Notes> We evaluate RxOk counts for each antenna first and than
// evaluate signal strength.
// The following operation can overcome the disability of CCA on both two antennas
// When signal strength was extremely low or high.
// 2008.01.30.
//
//
// Evaluate RxOk count from each antenna if we shall switch default antenna now.
// Added by Roger, 2008.02.21.
//{by amy 080312
if((priv->AdMainAntennaRxOkCnt < priv->AdAuxAntennaRxOkCnt)
&& (priv->CurrAntennaIndex == 0))
{ // We set Main antenna as default but RxOk count was less than Aux ones.
// printk("SwAntennaDiversity(): Main antenna RxOK is poor, AdMainAntennaRxOkCnt: %d, AdAuxAntennaRxOkCnt: %d\n",
// priv->AdMainAntennaRxOkCnt, priv->AdAuxAntennaRxOkCnt);
// Switch to Aux antenna.
SwitchAntenna(dev);
priv->bHWAdSwitched = true;
}
else if((priv->AdAuxAntennaRxOkCnt < priv->AdMainAntennaRxOkCnt)
&& (priv->CurrAntennaIndex == 1))
{ // We set Aux antenna as default but RxOk count was less than Main ones.
// printk("SwAntennaDiversity(): Aux antenna RxOK is poor, AdMainAntennaRxOkCnt: %d, AdAuxAntennaRxOkCnt: %d\n",
// priv->AdMainAntennaRxOkCnt, priv->AdAuxAntennaRxOkCnt);
// Switch to Main antenna.
SwitchAntenna(dev);
priv->bHWAdSwitched = true;
}
else
{// Default antenna is better.
// printk("SwAntennaDiversity(): Default antenna is better., AdMainAntennaRxOkCnt: %d, AdAuxAntennaRxOkCnt: %d\n",
// priv->AdMainAntennaRxOkCnt, priv->AdAuxAntennaRxOkCnt);
// Still need to check current signal strength.
priv->bHWAdSwitched = false;
}
//
// <Roger_Notes> We evaluate Rx signal strength ONLY when default antenna
// didn't changed by HW evaluation.
// 2008.02.27.
//
// [TRC Dell Lab] SignalStrength is inaccuracy. Isaiah 2008-03-05
// For example, Throughput of aux is better than main antenna(about 10M v.s 2M),
// but AdRxSignalStrength is less than main.
// Our guess is that main antenna have lower throughput and get many change
// to receive more CCK packets(ex.Beacon) which have stronger SignalStrength.
//
if( (!priv->bHWAdSwitched) && (bSwCheckSS))
{
//by amy 080312}
// Evaluate Rx signal strength if we shall switch antenna now.
if(priv->AdRxSignalStrength < priv->AdRxSsThreshold)
{ // Rx signal strength is weak => Switch Antenna.
// printk("SwAntennaDiversity(): Rx Signal Strength is weak, CurrRxSs: %d, RxSsThreshold: %d\n",
// priv->AdRxSignalStrength, priv->AdRxSsThreshold);
priv->AdRxSsBeforeSwitched = priv->AdRxSignalStrength;
priv->bAdSwitchedChecking = true;
SwitchAntenna(dev);
}
else
{ // Rx signal strength is OK.
// printk("SwAntennaDiversity(): Rx Signal Strength is OK, CurrRxSs: %d, RxSsThreshold: %d\n",
// priv->AdRxSignalStrength, priv->AdRxSsThreshold);
priv->bAdSwitchedChecking = false;
// Increase Rx signal strength threshold if necessary.
if( (priv->AdRxSignalStrength > (priv->AdRxSsThreshold + 10)) && // Signal is much stronger than current threshold
priv->AdRxSsThreshold <= priv->AdMaxRxSsThreshold) // Current threhold is not yet reach upper limit.
{
priv->AdRxSsThreshold = (priv->AdRxSsThreshold + priv->AdRxSignalStrength) / 2;
priv->AdRxSsThreshold = (priv->AdRxSsThreshold > priv->AdMaxRxSsThreshold) ?
priv->AdMaxRxSsThreshold: priv->AdRxSsThreshold;//+by amy 080312
}
// Reduce Antenna Diversity checking period if possible.
if( priv->AdCheckPeriod > priv->AdMinCheckPeriod )
{
priv->AdCheckPeriod /= 2;
}
}
}
}
//by amy 080312
// Reset antenna diversity Rx related statistics.
priv->AdRxOkCnt = 0;
priv->AdMainAntennaRxOkCnt = 0;
priv->AdAuxAntennaRxOkCnt = 0;
//by amy 080312
// priv->AdRxOkCnt = 0;//-by amy 080312
// printk("-SwAntennaDiversity()\n");
}
//
// Description:
// Return TRUE if we shall perform Tx Power Tracking Mecahnism, FALSE otherwise.
//
bool
CheckTxPwrTracking( struct net_device *dev)
{
struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
if(!priv->bTxPowerTrack)
{
return false;
}
//lzm reserved 080826
//if(priv->bScanInProgress)
//{
// return false;
//}
//if 87SE is in High Power , don't do Tx Power Tracking. asked by SD3 ED. 2008-08-08 Isaiah
if(priv->bToUpdateTxPwr)
{
return false;
}
return true;
}
//
// Description:
// Timer callback function of SW Antenna Diversity.
//
void
SwAntennaDiversityTimerCallback(
struct net_device *dev
)
{
struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
RT_RF_POWER_STATE rtState;
//printk("+SwAntennaDiversityTimerCallback()\n");
//
// We do NOT need to switch antenna while RF is off.
// 2007.05.09, added by Roger.
//
rtState = priv->eRFPowerState;
do{
if (rtState == eRfOff)
{
// printk("SwAntennaDiversityTimer - RF is OFF.\n");
break;
}
else if (rtState == eRfSleep)
{
// Don't access BB/RF under Disable PLL situation.
//RT_TRACE((COMP_RF|COMP_ANTENNA), DBG_LOUD, ("SwAntennaDiversityTimerCallback(): RF is Sleep => skip it\n"));
break;
}
SwAntennaDiversity(dev);
}while(false);
if(priv->up)
{
priv->SwAntennaDiversityTimer.expires = jiffies + MSECS(ANTENNA_DIVERSITY_TIMER_PERIOD);
add_timer(&priv->SwAntennaDiversityTimer);
}
//printk("-SwAntennaDiversityTimerCallback()\n");
}
| gpl-2.0 |
kamarush/sony_yuga_kernel | drivers/ata/pata_ninja32.c | 8040 | 5498 | /*
* pata_ninja32.c - Ninja32 PATA for new ATA layer
* (C) 2007 Red Hat Inc
*
* Note: The controller like many controllers has shared timings for
* PIO and DMA. We thus flip to the DMA timings in dma_start and flip back
* in the dma_stop function. Thus we actually don't need a set_dmamode
* method as the PIO method is always called and will set the right PIO
* timing parameters.
*
* The Ninja32 Cardbus is not a generic SFF controller. Instead it is
* laid out as follows off BAR 0. This is based upon Mark Lord's delkin
* driver and the extensive analysis done by the BSD developers, notably
* ITOH Yasufumi.
*
* Base + 0x00 IRQ Status
* Base + 0x01 IRQ control
* Base + 0x02 Chipset control
* Base + 0x03 Unknown
* Base + 0x04 VDMA and reset control + wait bits
* Base + 0x08 BMIMBA
* Base + 0x0C DMA Length
* Base + 0x10 Taskfile
* Base + 0x18 BMDMA Status ?
* Base + 0x1C
* Base + 0x1D Bus master control
* bit 0 = enable
* bit 1 = 0 write/1 read
* bit 2 = 1 sgtable
* bit 3 = go
* bit 4-6 wait bits
* bit 7 = done
* Base + 0x1E AltStatus
* Base + 0x1F timing register
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <scsi/scsi_host.h>
#include <linux/libata.h>
#define DRV_NAME "pata_ninja32"
#define DRV_VERSION "0.1.5"
/**
* ninja32_set_piomode - set initial PIO mode data
* @ap: ATA interface
* @adev: ATA device
*
* Called to do the PIO mode setup. Our timing registers are shared
* but we want to set the PIO timing by default.
*/
static void ninja32_set_piomode(struct ata_port *ap, struct ata_device *adev)
{
static u16 pio_timing[5] = {
0xd6, 0x85, 0x44, 0x33, 0x13
};
iowrite8(pio_timing[adev->pio_mode - XFER_PIO_0],
ap->ioaddr.bmdma_addr + 0x1f);
ap->private_data = adev;
}
static void ninja32_dev_select(struct ata_port *ap, unsigned int device)
{
struct ata_device *adev = &ap->link.device[device];
if (ap->private_data != adev) {
iowrite8(0xd6, ap->ioaddr.bmdma_addr + 0x1f);
ata_sff_dev_select(ap, device);
ninja32_set_piomode(ap, adev);
}
}
static struct scsi_host_template ninja32_sht = {
ATA_BMDMA_SHT(DRV_NAME),
};
static struct ata_port_operations ninja32_port_ops = {
.inherits = &ata_bmdma_port_ops,
.sff_dev_select = ninja32_dev_select,
.cable_detect = ata_cable_40wire,
.set_piomode = ninja32_set_piomode,
.sff_data_xfer = ata_sff_data_xfer32
};
static void ninja32_program(void __iomem *base)
{
iowrite8(0x05, base + 0x01); /* Enable interrupt lines */
iowrite8(0xBE, base + 0x02); /* Burst, ?? setup */
iowrite8(0x01, base + 0x03); /* Unknown */
iowrite8(0x20, base + 0x04); /* WAIT0 */
iowrite8(0x8f, base + 0x05); /* Unknown */
iowrite8(0xa4, base + 0x1c); /* Unknown */
iowrite8(0x83, base + 0x1d); /* BMDMA control: WAIT0 */
}
static int ninja32_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
struct ata_host *host;
struct ata_port *ap;
void __iomem *base;
int rc;
host = ata_host_alloc(&dev->dev, 1);
if (!host)
return -ENOMEM;
ap = host->ports[0];
/* Set up the PCI device */
rc = pcim_enable_device(dev);
if (rc)
return rc;
rc = pcim_iomap_regions(dev, 1 << 0, DRV_NAME);
if (rc == -EBUSY)
pcim_pin_device(dev);
if (rc)
return rc;
host->iomap = pcim_iomap_table(dev);
rc = pci_set_dma_mask(dev, ATA_DMA_MASK);
if (rc)
return rc;
rc = pci_set_consistent_dma_mask(dev, ATA_DMA_MASK);
if (rc)
return rc;
pci_set_master(dev);
/* Set up the register mappings. We use the I/O mapping as only the
older chips also have MMIO on BAR 1 */
base = host->iomap[0];
if (!base)
return -ENOMEM;
ap->ops = &ninja32_port_ops;
ap->pio_mask = ATA_PIO4;
ap->flags |= ATA_FLAG_SLAVE_POSS;
ap->ioaddr.cmd_addr = base + 0x10;
ap->ioaddr.ctl_addr = base + 0x1E;
ap->ioaddr.altstatus_addr = base + 0x1E;
ap->ioaddr.bmdma_addr = base;
ata_sff_std_ports(&ap->ioaddr);
ap->pflags = ATA_PFLAG_PIO32 | ATA_PFLAG_PIO32CHANGE;
ninja32_program(base);
/* FIXME: Should we disable them at remove ? */
return ata_host_activate(host, dev->irq, ata_bmdma_interrupt,
IRQF_SHARED, &ninja32_sht);
}
#ifdef CONFIG_PM
static int ninja32_reinit_one(struct pci_dev *pdev)
{
struct ata_host *host = dev_get_drvdata(&pdev->dev);
int rc;
rc = ata_pci_device_do_resume(pdev);
if (rc)
return rc;
ninja32_program(host->iomap[0]);
ata_host_resume(host);
return 0;
}
#endif
static const struct pci_device_id ninja32[] = {
{ 0x10FC, 0x0003, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ 0x1145, 0x8008, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ 0x1145, 0xf008, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ 0x1145, 0xf021, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ 0x1145, 0xf024, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ 0x1145, 0xf02C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ },
};
static struct pci_driver ninja32_pci_driver = {
.name = DRV_NAME,
.id_table = ninja32,
.probe = ninja32_init_one,
.remove = ata_pci_remove_one,
#ifdef CONFIG_PM
.suspend = ata_pci_device_suspend,
.resume = ninja32_reinit_one,
#endif
};
static int __init ninja32_init(void)
{
return pci_register_driver(&ninja32_pci_driver);
}
static void __exit ninja32_exit(void)
{
pci_unregister_driver(&ninja32_pci_driver);
}
MODULE_AUTHOR("Alan Cox");
MODULE_DESCRIPTION("low-level driver for Ninja32 ATA");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, ninja32);
MODULE_VERSION(DRV_VERSION);
module_init(ninja32_init);
module_exit(ninja32_exit);
| gpl-2.0 |
bemolxd/android_kernel_x2xtreme-test | drivers/macintosh/nvram.c | 9320 | 2552 | /*
* /dev/nvram driver for Power Macintosh.
*/
#define NVRAM_VERSION "1.0"
#include <linux/module.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/fs.h>
#include <linux/miscdevice.h>
#include <linux/fcntl.h>
#include <linux/nvram.h>
#include <linux/init.h>
#include <asm/uaccess.h>
#include <asm/nvram.h>
#define NVRAM_SIZE 8192
static loff_t nvram_llseek(struct file *file, loff_t offset, int origin)
{
switch (origin) {
case 0:
break;
case 1:
offset += file->f_pos;
break;
case 2:
offset += NVRAM_SIZE;
break;
default:
offset = -1;
}
if (offset < 0)
return -EINVAL;
file->f_pos = offset;
return file->f_pos;
}
static ssize_t read_nvram(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
unsigned int i;
char __user *p = buf;
if (!access_ok(VERIFY_WRITE, buf, count))
return -EFAULT;
if (*ppos >= NVRAM_SIZE)
return 0;
for (i = *ppos; count > 0 && i < NVRAM_SIZE; ++i, ++p, --count)
if (__put_user(nvram_read_byte(i), p))
return -EFAULT;
*ppos = i;
return p - buf;
}
static ssize_t write_nvram(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
unsigned int i;
const char __user *p = buf;
char c;
if (!access_ok(VERIFY_READ, buf, count))
return -EFAULT;
if (*ppos >= NVRAM_SIZE)
return 0;
for (i = *ppos; count > 0 && i < NVRAM_SIZE; ++i, ++p, --count) {
if (__get_user(c, p))
return -EFAULT;
nvram_write_byte(c, i);
}
*ppos = i;
return p - buf;
}
static long nvram_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
switch(cmd) {
case PMAC_NVRAM_GET_OFFSET:
{
int part, offset;
if (copy_from_user(&part, (void __user*)arg, sizeof(part)) != 0)
return -EFAULT;
if (part < pmac_nvram_OF || part > pmac_nvram_NR)
return -EINVAL;
offset = pmac_get_partition(part);
if (copy_to_user((void __user*)arg, &offset, sizeof(offset)) != 0)
return -EFAULT;
break;
}
default:
return -EINVAL;
}
return 0;
}
const struct file_operations nvram_fops = {
.owner = THIS_MODULE,
.llseek = nvram_llseek,
.read = read_nvram,
.write = write_nvram,
.unlocked_ioctl = nvram_ioctl,
};
static struct miscdevice nvram_dev = {
NVRAM_MINOR,
"nvram",
&nvram_fops
};
int __init nvram_init(void)
{
printk(KERN_INFO "Macintosh non-volatile memory driver v%s\n",
NVRAM_VERSION);
return misc_register(&nvram_dev);
}
void __exit nvram_cleanup(void)
{
misc_deregister( &nvram_dev );
}
module_init(nvram_init);
module_exit(nvram_cleanup);
MODULE_LICENSE("GPL");
| gpl-2.0 |
nimengyu2/dm37x-kernel-2.6.37-psp04.02.00.07 | samples/kfifo/dma-example.c | 10856 | 3496 | /*
* Sample fifo dma implementation
*
* Copyright (C) 2010 Stefani Seibold <stefani@seibold.net>
*
* Released under the GPL version 2 only.
*
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/kfifo.h>
/*
* This module shows how to handle fifo dma operations.
*/
/* fifo size in elements (bytes) */
#define FIFO_SIZE 32
static struct kfifo fifo;
static int __init example_init(void)
{
int i;
unsigned int ret;
unsigned int nents;
struct scatterlist sg[10];
printk(KERN_INFO "DMA fifo test start\n");
if (kfifo_alloc(&fifo, FIFO_SIZE, GFP_KERNEL)) {
printk(KERN_WARNING "error kfifo_alloc\n");
return -ENOMEM;
}
printk(KERN_INFO "queue size: %u\n", kfifo_size(&fifo));
kfifo_in(&fifo, "test", 4);
for (i = 0; i != 9; i++)
kfifo_put(&fifo, &i);
/* kick away first byte */
kfifo_skip(&fifo);
printk(KERN_INFO "queue len: %u\n", kfifo_len(&fifo));
/*
* Configure the kfifo buffer to receive data from DMA input.
*
* .--------------------------------------.
* | 0 | 1 | 2 | ... | 12 | 13 | ... | 31 |
* |---|------------------|---------------|
* \_/ \________________/ \_____________/
* \ \ \
* \ \_allocated data \
* \_*free space* \_*free space*
*
* We need two different SG entries: one for the free space area at the
* end of the kfifo buffer (19 bytes) and another for the first free
* byte at the beginning, after the kfifo_skip().
*/
sg_init_table(sg, ARRAY_SIZE(sg));
nents = kfifo_dma_in_prepare(&fifo, sg, ARRAY_SIZE(sg), FIFO_SIZE);
printk(KERN_INFO "DMA sgl entries: %d\n", nents);
if (!nents) {
/* fifo is full and no sgl was created */
printk(KERN_WARNING "error kfifo_dma_in_prepare\n");
return -EIO;
}
/* receive data */
printk(KERN_INFO "scatterlist for receive:\n");
for (i = 0; i < nents; i++) {
printk(KERN_INFO
"sg[%d] -> "
"page_link 0x%.8lx offset 0x%.8x length 0x%.8x\n",
i, sg[i].page_link, sg[i].offset, sg[i].length);
if (sg_is_last(&sg[i]))
break;
}
/* put here your code to setup and exectute the dma operation */
/* ... */
/* example: zero bytes received */
ret = 0;
/* finish the dma operation and update the received data */
kfifo_dma_in_finish(&fifo, ret);
/* Prepare to transmit data, example: 8 bytes */
nents = kfifo_dma_out_prepare(&fifo, sg, ARRAY_SIZE(sg), 8);
printk(KERN_INFO "DMA sgl entries: %d\n", nents);
if (!nents) {
/* no data was available and no sgl was created */
printk(KERN_WARNING "error kfifo_dma_out_prepare\n");
return -EIO;
}
printk(KERN_INFO "scatterlist for transmit:\n");
for (i = 0; i < nents; i++) {
printk(KERN_INFO
"sg[%d] -> "
"page_link 0x%.8lx offset 0x%.8x length 0x%.8x\n",
i, sg[i].page_link, sg[i].offset, sg[i].length);
if (sg_is_last(&sg[i]))
break;
}
/* put here your code to setup and exectute the dma operation */
/* ... */
/* example: 5 bytes transmitted */
ret = 5;
/* finish the dma operation and update the transmitted data */
kfifo_dma_out_finish(&fifo, ret);
ret = kfifo_len(&fifo);
printk(KERN_INFO "queue len: %u\n", kfifo_len(&fifo));
if (ret != 7) {
printk(KERN_WARNING "size mismatch: test failed");
return -EIO;
}
printk(KERN_INFO "test passed\n");
return 0;
}
static void __exit example_exit(void)
{
kfifo_free(&fifo);
}
module_init(example_init);
module_exit(example_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Stefani Seibold <stefani@seibold.net>");
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.