repo_name string | path string | copies string | size string | content string | license string |
|---|---|---|---|---|---|
Mazout360/lge-kernel-star | drivers/char/mbcs.c | 3341 | 20363 | /*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (c) 2005 Silicon Graphics, Inc. All rights reserved.
*/
/*
* MOATB Core Services driver.
*/
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/types.h>
#include <linux/ioport.h>
#include <linux/kernel.h>
#include <linux/notifier.h>
#include <linux/reboot.h>
#include <linux/init.h>
#include <linux/fs.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/mm.h>
#include <linux/uio.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include <asm/io.h>
#include <asm/uaccess.h>
#include <asm/system.h>
#include <asm/pgtable.h>
#include <asm/sn/addrs.h>
#include <asm/sn/intr.h>
#include <asm/sn/tiocx.h>
#include "mbcs.h"
#define MBCS_DEBUG 0
#if MBCS_DEBUG
#define DBG(fmt...) printk(KERN_ALERT fmt)
#else
#define DBG(fmt...)
#endif
static DEFINE_MUTEX(mbcs_mutex);
static int mbcs_major;
static LIST_HEAD(soft_list);
/*
* file operations
*/
static const struct file_operations mbcs_ops = {
.open = mbcs_open,
.llseek = mbcs_sram_llseek,
.read = mbcs_sram_read,
.write = mbcs_sram_write,
.mmap = mbcs_gscr_mmap,
};
struct mbcs_callback_arg {
int minor;
struct cx_dev *cx_dev;
};
static inline void mbcs_getdma_init(struct getdma *gdma)
{
memset(gdma, 0, sizeof(struct getdma));
gdma->DoneIntEnable = 1;
}
static inline void mbcs_putdma_init(struct putdma *pdma)
{
memset(pdma, 0, sizeof(struct putdma));
pdma->DoneIntEnable = 1;
}
static inline void mbcs_algo_init(struct algoblock *algo_soft)
{
memset(algo_soft, 0, sizeof(struct algoblock));
}
static inline void mbcs_getdma_set(void *mmr,
uint64_t hostAddr,
uint64_t localAddr,
uint64_t localRamSel,
uint64_t numPkts,
uint64_t amoEnable,
uint64_t intrEnable,
uint64_t peerIO,
uint64_t amoHostDest,
uint64_t amoModType, uint64_t intrHostDest,
uint64_t intrVector)
{
union dma_control rdma_control;
union dma_amo_dest amo_dest;
union intr_dest intr_dest;
union dma_localaddr local_addr;
union dma_hostaddr host_addr;
rdma_control.dma_control_reg = 0;
amo_dest.dma_amo_dest_reg = 0;
intr_dest.intr_dest_reg = 0;
local_addr.dma_localaddr_reg = 0;
host_addr.dma_hostaddr_reg = 0;
host_addr.dma_sys_addr = hostAddr;
MBCS_MMR_SET(mmr, MBCS_RD_DMA_SYS_ADDR, host_addr.dma_hostaddr_reg);
local_addr.dma_ram_addr = localAddr;
local_addr.dma_ram_sel = localRamSel;
MBCS_MMR_SET(mmr, MBCS_RD_DMA_LOC_ADDR, local_addr.dma_localaddr_reg);
rdma_control.dma_op_length = numPkts;
rdma_control.done_amo_en = amoEnable;
rdma_control.done_int_en = intrEnable;
rdma_control.pio_mem_n = peerIO;
MBCS_MMR_SET(mmr, MBCS_RD_DMA_CTRL, rdma_control.dma_control_reg);
amo_dest.dma_amo_sys_addr = amoHostDest;
amo_dest.dma_amo_mod_type = amoModType;
MBCS_MMR_SET(mmr, MBCS_RD_DMA_AMO_DEST, amo_dest.dma_amo_dest_reg);
intr_dest.address = intrHostDest;
intr_dest.int_vector = intrVector;
MBCS_MMR_SET(mmr, MBCS_RD_DMA_INT_DEST, intr_dest.intr_dest_reg);
}
static inline void mbcs_putdma_set(void *mmr,
uint64_t hostAddr,
uint64_t localAddr,
uint64_t localRamSel,
uint64_t numPkts,
uint64_t amoEnable,
uint64_t intrEnable,
uint64_t peerIO,
uint64_t amoHostDest,
uint64_t amoModType,
uint64_t intrHostDest, uint64_t intrVector)
{
union dma_control wdma_control;
union dma_amo_dest amo_dest;
union intr_dest intr_dest;
union dma_localaddr local_addr;
union dma_hostaddr host_addr;
wdma_control.dma_control_reg = 0;
amo_dest.dma_amo_dest_reg = 0;
intr_dest.intr_dest_reg = 0;
local_addr.dma_localaddr_reg = 0;
host_addr.dma_hostaddr_reg = 0;
host_addr.dma_sys_addr = hostAddr;
MBCS_MMR_SET(mmr, MBCS_WR_DMA_SYS_ADDR, host_addr.dma_hostaddr_reg);
local_addr.dma_ram_addr = localAddr;
local_addr.dma_ram_sel = localRamSel;
MBCS_MMR_SET(mmr, MBCS_WR_DMA_LOC_ADDR, local_addr.dma_localaddr_reg);
wdma_control.dma_op_length = numPkts;
wdma_control.done_amo_en = amoEnable;
wdma_control.done_int_en = intrEnable;
wdma_control.pio_mem_n = peerIO;
MBCS_MMR_SET(mmr, MBCS_WR_DMA_CTRL, wdma_control.dma_control_reg);
amo_dest.dma_amo_sys_addr = amoHostDest;
amo_dest.dma_amo_mod_type = amoModType;
MBCS_MMR_SET(mmr, MBCS_WR_DMA_AMO_DEST, amo_dest.dma_amo_dest_reg);
intr_dest.address = intrHostDest;
intr_dest.int_vector = intrVector;
MBCS_MMR_SET(mmr, MBCS_WR_DMA_INT_DEST, intr_dest.intr_dest_reg);
}
static inline void mbcs_algo_set(void *mmr,
uint64_t amoHostDest,
uint64_t amoModType,
uint64_t intrHostDest,
uint64_t intrVector, uint64_t algoStepCount)
{
union dma_amo_dest amo_dest;
union intr_dest intr_dest;
union algo_step step;
step.algo_step_reg = 0;
intr_dest.intr_dest_reg = 0;
amo_dest.dma_amo_dest_reg = 0;
amo_dest.dma_amo_sys_addr = amoHostDest;
amo_dest.dma_amo_mod_type = amoModType;
MBCS_MMR_SET(mmr, MBCS_ALG_AMO_DEST, amo_dest.dma_amo_dest_reg);
intr_dest.address = intrHostDest;
intr_dest.int_vector = intrVector;
MBCS_MMR_SET(mmr, MBCS_ALG_INT_DEST, intr_dest.intr_dest_reg);
step.alg_step_cnt = algoStepCount;
MBCS_MMR_SET(mmr, MBCS_ALG_STEP, step.algo_step_reg);
}
static inline int mbcs_getdma_start(struct mbcs_soft *soft)
{
void *mmr_base;
struct getdma *gdma;
uint64_t numPkts;
union cm_control cm_control;
mmr_base = soft->mmr_base;
gdma = &soft->getdma;
/* check that host address got setup */
if (!gdma->hostAddr)
return -1;
numPkts =
(gdma->bytes + (MBCS_CACHELINE_SIZE - 1)) / MBCS_CACHELINE_SIZE;
/* program engine */
mbcs_getdma_set(mmr_base, tiocx_dma_addr(gdma->hostAddr),
gdma->localAddr,
(gdma->localAddr < MB2) ? 0 :
(gdma->localAddr < MB4) ? 1 :
(gdma->localAddr < MB6) ? 2 : 3,
numPkts,
gdma->DoneAmoEnable,
gdma->DoneIntEnable,
gdma->peerIO,
gdma->amoHostDest,
gdma->amoModType,
gdma->intrHostDest, gdma->intrVector);
/* start engine */
cm_control.cm_control_reg = MBCS_MMR_GET(mmr_base, MBCS_CM_CONTROL);
cm_control.rd_dma_go = 1;
MBCS_MMR_SET(mmr_base, MBCS_CM_CONTROL, cm_control.cm_control_reg);
return 0;
}
static inline int mbcs_putdma_start(struct mbcs_soft *soft)
{
void *mmr_base;
struct putdma *pdma;
uint64_t numPkts;
union cm_control cm_control;
mmr_base = soft->mmr_base;
pdma = &soft->putdma;
/* check that host address got setup */
if (!pdma->hostAddr)
return -1;
numPkts =
(pdma->bytes + (MBCS_CACHELINE_SIZE - 1)) / MBCS_CACHELINE_SIZE;
/* program engine */
mbcs_putdma_set(mmr_base, tiocx_dma_addr(pdma->hostAddr),
pdma->localAddr,
(pdma->localAddr < MB2) ? 0 :
(pdma->localAddr < MB4) ? 1 :
(pdma->localAddr < MB6) ? 2 : 3,
numPkts,
pdma->DoneAmoEnable,
pdma->DoneIntEnable,
pdma->peerIO,
pdma->amoHostDest,
pdma->amoModType,
pdma->intrHostDest, pdma->intrVector);
/* start engine */
cm_control.cm_control_reg = MBCS_MMR_GET(mmr_base, MBCS_CM_CONTROL);
cm_control.wr_dma_go = 1;
MBCS_MMR_SET(mmr_base, MBCS_CM_CONTROL, cm_control.cm_control_reg);
return 0;
}
static inline int mbcs_algo_start(struct mbcs_soft *soft)
{
struct algoblock *algo_soft = &soft->algo;
void *mmr_base = soft->mmr_base;
union cm_control cm_control;
if (mutex_lock_interruptible(&soft->algolock))
return -ERESTARTSYS;
atomic_set(&soft->algo_done, 0);
mbcs_algo_set(mmr_base,
algo_soft->amoHostDest,
algo_soft->amoModType,
algo_soft->intrHostDest,
algo_soft->intrVector, algo_soft->algoStepCount);
/* start algorithm */
cm_control.cm_control_reg = MBCS_MMR_GET(mmr_base, MBCS_CM_CONTROL);
cm_control.alg_done_int_en = 1;
cm_control.alg_go = 1;
MBCS_MMR_SET(mmr_base, MBCS_CM_CONTROL, cm_control.cm_control_reg);
mutex_unlock(&soft->algolock);
return 0;
}
static inline ssize_t
do_mbcs_sram_dmawrite(struct mbcs_soft *soft, uint64_t hostAddr,
size_t len, loff_t * off)
{
int rv = 0;
if (mutex_lock_interruptible(&soft->dmawritelock))
return -ERESTARTSYS;
atomic_set(&soft->dmawrite_done, 0);
soft->putdma.hostAddr = hostAddr;
soft->putdma.localAddr = *off;
soft->putdma.bytes = len;
if (mbcs_putdma_start(soft) < 0) {
DBG(KERN_ALERT "do_mbcs_sram_dmawrite: "
"mbcs_putdma_start failed\n");
rv = -EAGAIN;
goto dmawrite_exit;
}
if (wait_event_interruptible(soft->dmawrite_queue,
atomic_read(&soft->dmawrite_done))) {
rv = -ERESTARTSYS;
goto dmawrite_exit;
}
rv = len;
*off += len;
dmawrite_exit:
mutex_unlock(&soft->dmawritelock);
return rv;
}
static inline ssize_t
do_mbcs_sram_dmaread(struct mbcs_soft *soft, uint64_t hostAddr,
size_t len, loff_t * off)
{
int rv = 0;
if (mutex_lock_interruptible(&soft->dmareadlock))
return -ERESTARTSYS;
atomic_set(&soft->dmawrite_done, 0);
soft->getdma.hostAddr = hostAddr;
soft->getdma.localAddr = *off;
soft->getdma.bytes = len;
if (mbcs_getdma_start(soft) < 0) {
DBG(KERN_ALERT "mbcs_strategy: mbcs_getdma_start failed\n");
rv = -EAGAIN;
goto dmaread_exit;
}
if (wait_event_interruptible(soft->dmaread_queue,
atomic_read(&soft->dmaread_done))) {
rv = -ERESTARTSYS;
goto dmaread_exit;
}
rv = len;
*off += len;
dmaread_exit:
mutex_unlock(&soft->dmareadlock);
return rv;
}
static int mbcs_open(struct inode *ip, struct file *fp)
{
struct mbcs_soft *soft;
int minor;
mutex_lock(&mbcs_mutex);
minor = iminor(ip);
/* Nothing protects access to this list... */
list_for_each_entry(soft, &soft_list, list) {
if (soft->nasid == minor) {
fp->private_data = soft->cxdev;
mutex_unlock(&mbcs_mutex);
return 0;
}
}
mutex_unlock(&mbcs_mutex);
return -ENODEV;
}
static ssize_t mbcs_sram_read(struct file * fp, char __user *buf, size_t len, loff_t * off)
{
struct cx_dev *cx_dev = fp->private_data;
struct mbcs_soft *soft = cx_dev->soft;
uint64_t hostAddr;
int rv = 0;
hostAddr = __get_dma_pages(GFP_KERNEL, get_order(len));
if (hostAddr == 0)
return -ENOMEM;
rv = do_mbcs_sram_dmawrite(soft, hostAddr, len, off);
if (rv < 0)
goto exit;
if (copy_to_user(buf, (void *)hostAddr, len))
rv = -EFAULT;
exit:
free_pages(hostAddr, get_order(len));
return rv;
}
static ssize_t
mbcs_sram_write(struct file * fp, const char __user *buf, size_t len, loff_t * off)
{
struct cx_dev *cx_dev = fp->private_data;
struct mbcs_soft *soft = cx_dev->soft;
uint64_t hostAddr;
int rv = 0;
hostAddr = __get_dma_pages(GFP_KERNEL, get_order(len));
if (hostAddr == 0)
return -ENOMEM;
if (copy_from_user((void *)hostAddr, buf, len)) {
rv = -EFAULT;
goto exit;
}
rv = do_mbcs_sram_dmaread(soft, hostAddr, len, off);
exit:
free_pages(hostAddr, get_order(len));
return rv;
}
static loff_t mbcs_sram_llseek(struct file * filp, loff_t off, int whence)
{
loff_t newpos;
switch (whence) {
case SEEK_SET:
newpos = off;
break;
case SEEK_CUR:
newpos = filp->f_pos + off;
break;
case SEEK_END:
newpos = MBCS_SRAM_SIZE + off;
break;
default: /* can't happen */
return -EINVAL;
}
if (newpos < 0)
return -EINVAL;
filp->f_pos = newpos;
return newpos;
}
static uint64_t mbcs_pioaddr(struct mbcs_soft *soft, uint64_t offset)
{
uint64_t mmr_base;
mmr_base = (uint64_t) (soft->mmr_base + offset);
return mmr_base;
}
static void mbcs_debug_pioaddr_set(struct mbcs_soft *soft)
{
soft->debug_addr = mbcs_pioaddr(soft, MBCS_DEBUG_START);
}
static void mbcs_gscr_pioaddr_set(struct mbcs_soft *soft)
{
soft->gscr_addr = mbcs_pioaddr(soft, MBCS_GSCR_START);
}
static int mbcs_gscr_mmap(struct file *fp, struct vm_area_struct *vma)
{
struct cx_dev *cx_dev = fp->private_data;
struct mbcs_soft *soft = cx_dev->soft;
if (vma->vm_pgoff != 0)
return -EINVAL;
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
/* Remap-pfn-range will mark the range VM_IO and VM_RESERVED */
if (remap_pfn_range(vma,
vma->vm_start,
__pa(soft->gscr_addr) >> PAGE_SHIFT,
PAGE_SIZE,
vma->vm_page_prot))
return -EAGAIN;
return 0;
}
/**
* mbcs_completion_intr_handler - Primary completion handler.
* @irq: irq
* @arg: soft struct for device
*
*/
static irqreturn_t
mbcs_completion_intr_handler(int irq, void *arg)
{
struct mbcs_soft *soft = (struct mbcs_soft *)arg;
void *mmr_base;
union cm_status cm_status;
union cm_control cm_control;
mmr_base = soft->mmr_base;
cm_status.cm_status_reg = MBCS_MMR_GET(mmr_base, MBCS_CM_STATUS);
if (cm_status.rd_dma_done) {
/* stop dma-read engine, clear status */
cm_control.cm_control_reg =
MBCS_MMR_GET(mmr_base, MBCS_CM_CONTROL);
cm_control.rd_dma_clr = 1;
MBCS_MMR_SET(mmr_base, MBCS_CM_CONTROL,
cm_control.cm_control_reg);
atomic_set(&soft->dmaread_done, 1);
wake_up(&soft->dmaread_queue);
}
if (cm_status.wr_dma_done) {
/* stop dma-write engine, clear status */
cm_control.cm_control_reg =
MBCS_MMR_GET(mmr_base, MBCS_CM_CONTROL);
cm_control.wr_dma_clr = 1;
MBCS_MMR_SET(mmr_base, MBCS_CM_CONTROL,
cm_control.cm_control_reg);
atomic_set(&soft->dmawrite_done, 1);
wake_up(&soft->dmawrite_queue);
}
if (cm_status.alg_done) {
/* clear status */
cm_control.cm_control_reg =
MBCS_MMR_GET(mmr_base, MBCS_CM_CONTROL);
cm_control.alg_done_clr = 1;
MBCS_MMR_SET(mmr_base, MBCS_CM_CONTROL,
cm_control.cm_control_reg);
atomic_set(&soft->algo_done, 1);
wake_up(&soft->algo_queue);
}
return IRQ_HANDLED;
}
/**
* mbcs_intr_alloc - Allocate interrupts.
* @dev: device pointer
*
*/
static int mbcs_intr_alloc(struct cx_dev *dev)
{
struct sn_irq_info *sn_irq;
struct mbcs_soft *soft;
struct getdma *getdma;
struct putdma *putdma;
struct algoblock *algo;
soft = dev->soft;
getdma = &soft->getdma;
putdma = &soft->putdma;
algo = &soft->algo;
soft->get_sn_irq = NULL;
soft->put_sn_irq = NULL;
soft->algo_sn_irq = NULL;
sn_irq = tiocx_irq_alloc(dev->cx_id.nasid, TIOCX_CORELET, -1, -1, -1);
if (sn_irq == NULL)
return -EAGAIN;
soft->get_sn_irq = sn_irq;
getdma->intrHostDest = sn_irq->irq_xtalkaddr;
getdma->intrVector = sn_irq->irq_irq;
if (request_irq(sn_irq->irq_irq,
(void *)mbcs_completion_intr_handler, IRQF_SHARED,
"MBCS get intr", (void *)soft)) {
tiocx_irq_free(soft->get_sn_irq);
return -EAGAIN;
}
sn_irq = tiocx_irq_alloc(dev->cx_id.nasid, TIOCX_CORELET, -1, -1, -1);
if (sn_irq == NULL) {
free_irq(soft->get_sn_irq->irq_irq, soft);
tiocx_irq_free(soft->get_sn_irq);
return -EAGAIN;
}
soft->put_sn_irq = sn_irq;
putdma->intrHostDest = sn_irq->irq_xtalkaddr;
putdma->intrVector = sn_irq->irq_irq;
if (request_irq(sn_irq->irq_irq,
(void *)mbcs_completion_intr_handler, IRQF_SHARED,
"MBCS put intr", (void *)soft)) {
tiocx_irq_free(soft->put_sn_irq);
free_irq(soft->get_sn_irq->irq_irq, soft);
tiocx_irq_free(soft->get_sn_irq);
return -EAGAIN;
}
sn_irq = tiocx_irq_alloc(dev->cx_id.nasid, TIOCX_CORELET, -1, -1, -1);
if (sn_irq == NULL) {
free_irq(soft->put_sn_irq->irq_irq, soft);
tiocx_irq_free(soft->put_sn_irq);
free_irq(soft->get_sn_irq->irq_irq, soft);
tiocx_irq_free(soft->get_sn_irq);
return -EAGAIN;
}
soft->algo_sn_irq = sn_irq;
algo->intrHostDest = sn_irq->irq_xtalkaddr;
algo->intrVector = sn_irq->irq_irq;
if (request_irq(sn_irq->irq_irq,
(void *)mbcs_completion_intr_handler, IRQF_SHARED,
"MBCS algo intr", (void *)soft)) {
tiocx_irq_free(soft->algo_sn_irq);
free_irq(soft->put_sn_irq->irq_irq, soft);
tiocx_irq_free(soft->put_sn_irq);
free_irq(soft->get_sn_irq->irq_irq, soft);
tiocx_irq_free(soft->get_sn_irq);
return -EAGAIN;
}
return 0;
}
/**
* mbcs_intr_dealloc - Remove interrupts.
* @dev: device pointer
*
*/
static void mbcs_intr_dealloc(struct cx_dev *dev)
{
struct mbcs_soft *soft;
soft = dev->soft;
free_irq(soft->get_sn_irq->irq_irq, soft);
tiocx_irq_free(soft->get_sn_irq);
free_irq(soft->put_sn_irq->irq_irq, soft);
tiocx_irq_free(soft->put_sn_irq);
free_irq(soft->algo_sn_irq->irq_irq, soft);
tiocx_irq_free(soft->algo_sn_irq);
}
static inline int mbcs_hw_init(struct mbcs_soft *soft)
{
void *mmr_base = soft->mmr_base;
union cm_control cm_control;
union cm_req_timeout cm_req_timeout;
uint64_t err_stat;
cm_req_timeout.cm_req_timeout_reg =
MBCS_MMR_GET(mmr_base, MBCS_CM_REQ_TOUT);
cm_req_timeout.time_out = MBCS_CM_CONTROL_REQ_TOUT_MASK;
MBCS_MMR_SET(mmr_base, MBCS_CM_REQ_TOUT,
cm_req_timeout.cm_req_timeout_reg);
mbcs_gscr_pioaddr_set(soft);
mbcs_debug_pioaddr_set(soft);
/* clear errors */
err_stat = MBCS_MMR_GET(mmr_base, MBCS_CM_ERR_STAT);
MBCS_MMR_SET(mmr_base, MBCS_CM_CLR_ERR_STAT, err_stat);
MBCS_MMR_ZERO(mmr_base, MBCS_CM_ERROR_DETAIL1);
/* enable interrupts */
/* turn off 2^23 (INT_EN_PIO_REQ_ADDR_INV) */
MBCS_MMR_SET(mmr_base, MBCS_CM_ERR_INT_EN, 0x3ffffff7e00ffUL);
/* arm status regs and clear engines */
cm_control.cm_control_reg = MBCS_MMR_GET(mmr_base, MBCS_CM_CONTROL);
cm_control.rearm_stat_regs = 1;
cm_control.alg_clr = 1;
cm_control.wr_dma_clr = 1;
cm_control.rd_dma_clr = 1;
MBCS_MMR_SET(mmr_base, MBCS_CM_CONTROL, cm_control.cm_control_reg);
return 0;
}
static ssize_t show_algo(struct device *dev, struct device_attribute *attr, char *buf)
{
struct cx_dev *cx_dev = to_cx_dev(dev);
struct mbcs_soft *soft = cx_dev->soft;
uint64_t debug0;
/*
* By convention, the first debug register contains the
* algorithm number and revision.
*/
debug0 = *(uint64_t *) soft->debug_addr;
return sprintf(buf, "0x%x 0x%x\n",
upper_32_bits(debug0), lower_32_bits(debug0));
}
static ssize_t store_algo(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
{
int n;
struct cx_dev *cx_dev = to_cx_dev(dev);
struct mbcs_soft *soft = cx_dev->soft;
if (count <= 0)
return 0;
n = simple_strtoul(buf, NULL, 0);
if (n == 1) {
mbcs_algo_start(soft);
if (wait_event_interruptible(soft->algo_queue,
atomic_read(&soft->algo_done)))
return -ERESTARTSYS;
}
return count;
}
DEVICE_ATTR(algo, 0644, show_algo, store_algo);
/**
* mbcs_probe - Initialize for device
* @dev: device pointer
* @device_id: id table pointer
*
*/
static int mbcs_probe(struct cx_dev *dev, const struct cx_device_id *id)
{
struct mbcs_soft *soft;
dev->soft = NULL;
soft = kzalloc(sizeof(struct mbcs_soft), GFP_KERNEL);
if (soft == NULL)
return -ENOMEM;
soft->nasid = dev->cx_id.nasid;
list_add(&soft->list, &soft_list);
soft->mmr_base = (void *)tiocx_swin_base(dev->cx_id.nasid);
dev->soft = soft;
soft->cxdev = dev;
init_waitqueue_head(&soft->dmawrite_queue);
init_waitqueue_head(&soft->dmaread_queue);
init_waitqueue_head(&soft->algo_queue);
mutex_init(&soft->dmawritelock);
mutex_init(&soft->dmareadlock);
mutex_init(&soft->algolock);
mbcs_getdma_init(&soft->getdma);
mbcs_putdma_init(&soft->putdma);
mbcs_algo_init(&soft->algo);
mbcs_hw_init(soft);
/* Allocate interrupts */
mbcs_intr_alloc(dev);
device_create_file(&dev->dev, &dev_attr_algo);
return 0;
}
static int mbcs_remove(struct cx_dev *dev)
{
if (dev->soft) {
mbcs_intr_dealloc(dev);
kfree(dev->soft);
}
device_remove_file(&dev->dev, &dev_attr_algo);
return 0;
}
static const struct cx_device_id __devinitdata mbcs_id_table[] = {
{
.part_num = MBCS_PART_NUM,
.mfg_num = MBCS_MFG_NUM,
},
{
.part_num = MBCS_PART_NUM_ALG0,
.mfg_num = MBCS_MFG_NUM,
},
{0, 0}
};
MODULE_DEVICE_TABLE(cx, mbcs_id_table);
static struct cx_drv mbcs_driver = {
.name = DEVICE_NAME,
.id_table = mbcs_id_table,
.probe = mbcs_probe,
.remove = mbcs_remove,
};
static void __exit mbcs_exit(void)
{
unregister_chrdev(mbcs_major, DEVICE_NAME);
cx_driver_unregister(&mbcs_driver);
}
static int __init mbcs_init(void)
{
int rv;
if (!ia64_platform_is("sn2"))
return -ENODEV;
// Put driver into chrdevs[]. Get major number.
rv = register_chrdev(mbcs_major, DEVICE_NAME, &mbcs_ops);
if (rv < 0) {
DBG(KERN_ALERT "mbcs_init: can't get major number. %d\n", rv);
return rv;
}
mbcs_major = rv;
return cx_driver_register(&mbcs_driver);
}
module_init(mbcs_init);
module_exit(mbcs_exit);
MODULE_AUTHOR("Bruce Losure <blosure@sgi.com>");
MODULE_DESCRIPTION("Driver for MOATB Core Services");
MODULE_LICENSE("GPL");
| gpl-2.0 |
lawnn/Dorimanx-LG-G2-D802-Kernel | arch/arm/mach-vexpress/v2m.c | 4621 | 16375 | /*
* Versatile Express V2M Motherboard Support
*/
#include <linux/device.h>
#include <linux/amba/bus.h>
#include <linux/amba/mmci.h>
#include <linux/io.h>
#include <linux/init.h>
#include <linux/of_address.h>
#include <linux/of_fdt.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/ata_platform.h>
#include <linux/smsc911x.h>
#include <linux/spinlock.h>
#include <linux/device.h>
#include <linux/usb/isp1760.h>
#include <linux/clkdev.h>
#include <linux/mtd/physmap.h>
#include <asm/mach-types.h>
#include <asm/sizes.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
#include <asm/mach/time.h>
#include <asm/hardware/arm_timer.h>
#include <asm/hardware/cache-l2x0.h>
#include <asm/hardware/gic.h>
#include <asm/hardware/timer-sp.h>
#include <asm/hardware/sp810.h>
#include <asm/hardware/gic.h>
#include <mach/ct-ca9x4.h>
#include <mach/motherboard.h>
#include <plat/sched_clock.h>
#include "core.h"
#define V2M_PA_CS0 0x40000000
#define V2M_PA_CS1 0x44000000
#define V2M_PA_CS2 0x48000000
#define V2M_PA_CS3 0x4c000000
#define V2M_PA_CS7 0x10000000
static struct map_desc v2m_io_desc[] __initdata = {
{
.virtual = V2M_PERIPH,
.pfn = __phys_to_pfn(V2M_PA_CS7),
.length = SZ_128K,
.type = MT_DEVICE,
},
};
static void __iomem *v2m_sysreg_base;
static void __init v2m_sysctl_init(void __iomem *base)
{
u32 scctrl;
if (WARN_ON(!base))
return;
/* Select 1MHz TIMCLK as the reference clock for SP804 timers */
scctrl = readl(base + SCCTRL);
scctrl |= SCCTRL_TIMEREN0SEL_TIMCLK;
scctrl |= SCCTRL_TIMEREN1SEL_TIMCLK;
writel(scctrl, base + SCCTRL);
}
static void __init v2m_sp804_init(void __iomem *base, unsigned int irq)
{
if (WARN_ON(!base || irq == NO_IRQ))
return;
writel(0, base + TIMER_1_BASE + TIMER_CTRL);
writel(0, base + TIMER_2_BASE + TIMER_CTRL);
sp804_clocksource_init(base + TIMER_2_BASE, "v2m-timer1");
sp804_clockevents_init(base + TIMER_1_BASE, irq, "v2m-timer0");
}
static void __init v2m_timer_init(void)
{
v2m_sysctl_init(ioremap(V2M_SYSCTL, SZ_4K));
v2m_sp804_init(ioremap(V2M_TIMER01, SZ_4K), IRQ_V2M_TIMER0);
}
static struct sys_timer v2m_timer = {
.init = v2m_timer_init,
};
static DEFINE_SPINLOCK(v2m_cfg_lock);
int v2m_cfg_write(u32 devfn, u32 data)
{
/* Configuration interface broken? */
u32 val;
printk("%s: writing %08x to %08x\n", __func__, data, devfn);
devfn |= SYS_CFG_START | SYS_CFG_WRITE;
spin_lock(&v2m_cfg_lock);
val = readl(v2m_sysreg_base + V2M_SYS_CFGSTAT);
writel(val & ~SYS_CFG_COMPLETE, v2m_sysreg_base + V2M_SYS_CFGSTAT);
writel(data, v2m_sysreg_base + V2M_SYS_CFGDATA);
writel(devfn, v2m_sysreg_base + V2M_SYS_CFGCTRL);
do {
val = readl(v2m_sysreg_base + V2M_SYS_CFGSTAT);
} while (val == 0);
spin_unlock(&v2m_cfg_lock);
return !!(val & SYS_CFG_ERR);
}
int v2m_cfg_read(u32 devfn, u32 *data)
{
u32 val;
devfn |= SYS_CFG_START;
spin_lock(&v2m_cfg_lock);
writel(0, v2m_sysreg_base + V2M_SYS_CFGSTAT);
writel(devfn, v2m_sysreg_base + V2M_SYS_CFGCTRL);
mb();
do {
cpu_relax();
val = readl(v2m_sysreg_base + V2M_SYS_CFGSTAT);
} while (val == 0);
*data = readl(v2m_sysreg_base + V2M_SYS_CFGDATA);
spin_unlock(&v2m_cfg_lock);
return !!(val & SYS_CFG_ERR);
}
void __init v2m_flags_set(u32 data)
{
writel(~0, v2m_sysreg_base + V2M_SYS_FLAGSCLR);
writel(data, v2m_sysreg_base + V2M_SYS_FLAGSSET);
}
static struct resource v2m_pcie_i2c_resource = {
.start = V2M_SERIAL_BUS_PCI,
.end = V2M_SERIAL_BUS_PCI + SZ_4K - 1,
.flags = IORESOURCE_MEM,
};
static struct platform_device v2m_pcie_i2c_device = {
.name = "versatile-i2c",
.id = 0,
.num_resources = 1,
.resource = &v2m_pcie_i2c_resource,
};
static struct resource v2m_ddc_i2c_resource = {
.start = V2M_SERIAL_BUS_DVI,
.end = V2M_SERIAL_BUS_DVI + SZ_4K - 1,
.flags = IORESOURCE_MEM,
};
static struct platform_device v2m_ddc_i2c_device = {
.name = "versatile-i2c",
.id = 1,
.num_resources = 1,
.resource = &v2m_ddc_i2c_resource,
};
static struct resource v2m_eth_resources[] = {
{
.start = V2M_LAN9118,
.end = V2M_LAN9118 + SZ_64K - 1,
.flags = IORESOURCE_MEM,
}, {
.start = IRQ_V2M_LAN9118,
.end = IRQ_V2M_LAN9118,
.flags = IORESOURCE_IRQ,
},
};
static struct smsc911x_platform_config v2m_eth_config = {
.flags = SMSC911X_USE_32BIT,
.irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_HIGH,
.irq_type = SMSC911X_IRQ_TYPE_PUSH_PULL,
.phy_interface = PHY_INTERFACE_MODE_MII,
};
static struct platform_device v2m_eth_device = {
.name = "smsc911x",
.id = -1,
.resource = v2m_eth_resources,
.num_resources = ARRAY_SIZE(v2m_eth_resources),
.dev.platform_data = &v2m_eth_config,
};
static struct resource v2m_usb_resources[] = {
{
.start = V2M_ISP1761,
.end = V2M_ISP1761 + SZ_128K - 1,
.flags = IORESOURCE_MEM,
}, {
.start = IRQ_V2M_ISP1761,
.end = IRQ_V2M_ISP1761,
.flags = IORESOURCE_IRQ,
},
};
static struct isp1760_platform_data v2m_usb_config = {
.is_isp1761 = true,
.bus_width_16 = false,
.port1_otg = true,
.analog_oc = false,
.dack_polarity_high = false,
.dreq_polarity_high = false,
};
static struct platform_device v2m_usb_device = {
.name = "isp1760",
.id = -1,
.resource = v2m_usb_resources,
.num_resources = ARRAY_SIZE(v2m_usb_resources),
.dev.platform_data = &v2m_usb_config,
};
static void v2m_flash_set_vpp(struct platform_device *pdev, int on)
{
writel(on != 0, v2m_sysreg_base + V2M_SYS_FLASH);
}
static struct physmap_flash_data v2m_flash_data = {
.width = 4,
.set_vpp = v2m_flash_set_vpp,
};
static struct resource v2m_flash_resources[] = {
{
.start = V2M_NOR0,
.end = V2M_NOR0 + SZ_64M - 1,
.flags = IORESOURCE_MEM,
}, {
.start = V2M_NOR1,
.end = V2M_NOR1 + SZ_64M - 1,
.flags = IORESOURCE_MEM,
},
};
static struct platform_device v2m_flash_device = {
.name = "physmap-flash",
.id = -1,
.resource = v2m_flash_resources,
.num_resources = ARRAY_SIZE(v2m_flash_resources),
.dev.platform_data = &v2m_flash_data,
};
static struct pata_platform_info v2m_pata_data = {
.ioport_shift = 2,
};
static struct resource v2m_pata_resources[] = {
{
.start = V2M_CF,
.end = V2M_CF + 0xff,
.flags = IORESOURCE_MEM,
}, {
.start = V2M_CF + 0x100,
.end = V2M_CF + SZ_4K - 1,
.flags = IORESOURCE_MEM,
},
};
static struct platform_device v2m_cf_device = {
.name = "pata_platform",
.id = -1,
.resource = v2m_pata_resources,
.num_resources = ARRAY_SIZE(v2m_pata_resources),
.dev.platform_data = &v2m_pata_data,
};
static unsigned int v2m_mmci_status(struct device *dev)
{
return readl(v2m_sysreg_base + V2M_SYS_MCI) & (1 << 0);
}
static struct mmci_platform_data v2m_mmci_data = {
.ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34,
.status = v2m_mmci_status,
};
static AMBA_APB_DEVICE(aaci, "mb:aaci", 0, V2M_AACI, IRQ_V2M_AACI, NULL);
static AMBA_APB_DEVICE(mmci, "mb:mmci", 0, V2M_MMCI, IRQ_V2M_MMCI, &v2m_mmci_data);
static AMBA_APB_DEVICE(kmi0, "mb:kmi0", 0, V2M_KMI0, IRQ_V2M_KMI0, NULL);
static AMBA_APB_DEVICE(kmi1, "mb:kmi1", 0, V2M_KMI1, IRQ_V2M_KMI1, NULL);
static AMBA_APB_DEVICE(uart0, "mb:uart0", 0, V2M_UART0, IRQ_V2M_UART0, NULL);
static AMBA_APB_DEVICE(uart1, "mb:uart1", 0, V2M_UART1, IRQ_V2M_UART1, NULL);
static AMBA_APB_DEVICE(uart2, "mb:uart2", 0, V2M_UART2, IRQ_V2M_UART2, NULL);
static AMBA_APB_DEVICE(uart3, "mb:uart3", 0, V2M_UART3, IRQ_V2M_UART3, NULL);
static AMBA_APB_DEVICE(wdt, "mb:wdt", 0, V2M_WDT, IRQ_V2M_WDT, NULL);
static AMBA_APB_DEVICE(rtc, "mb:rtc", 0, V2M_RTC, IRQ_V2M_RTC, NULL);
static struct amba_device *v2m_amba_devs[] __initdata = {
&aaci_device,
&mmci_device,
&kmi0_device,
&kmi1_device,
&uart0_device,
&uart1_device,
&uart2_device,
&uart3_device,
&wdt_device,
&rtc_device,
};
static long v2m_osc_round(struct clk *clk, unsigned long rate)
{
return rate;
}
static int v2m_osc1_set(struct clk *clk, unsigned long rate)
{
return v2m_cfg_write(SYS_CFG_OSC | SYS_CFG_SITE_MB | 1, rate);
}
static const struct clk_ops osc1_clk_ops = {
.round = v2m_osc_round,
.set = v2m_osc1_set,
};
static struct clk osc1_clk = {
.ops = &osc1_clk_ops,
.rate = 24000000,
};
static struct clk osc2_clk = {
.rate = 24000000,
};
static struct clk v2m_sp804_clk = {
.rate = 1000000,
};
static struct clk v2m_ref_clk = {
.rate = 32768,
};
static struct clk dummy_apb_pclk;
static struct clk_lookup v2m_lookups[] = {
{ /* AMBA bus clock */
.con_id = "apb_pclk",
.clk = &dummy_apb_pclk,
}, { /* UART0 */
.dev_id = "mb:uart0",
.clk = &osc2_clk,
}, { /* UART1 */
.dev_id = "mb:uart1",
.clk = &osc2_clk,
}, { /* UART2 */
.dev_id = "mb:uart2",
.clk = &osc2_clk,
}, { /* UART3 */
.dev_id = "mb:uart3",
.clk = &osc2_clk,
}, { /* KMI0 */
.dev_id = "mb:kmi0",
.clk = &osc2_clk,
}, { /* KMI1 */
.dev_id = "mb:kmi1",
.clk = &osc2_clk,
}, { /* MMC0 */
.dev_id = "mb:mmci",
.clk = &osc2_clk,
}, { /* CLCD */
.dev_id = "mb:clcd",
.clk = &osc1_clk,
}, { /* SP805 WDT */
.dev_id = "mb:wdt",
.clk = &v2m_ref_clk,
}, { /* SP804 timers */
.dev_id = "sp804",
.con_id = "v2m-timer0",
.clk = &v2m_sp804_clk,
}, { /* SP804 timers */
.dev_id = "sp804",
.con_id = "v2m-timer1",
.clk = &v2m_sp804_clk,
},
};
static void __init v2m_init_early(void)
{
ct_desc->init_early();
clkdev_add_table(v2m_lookups, ARRAY_SIZE(v2m_lookups));
versatile_sched_clock_init(v2m_sysreg_base + V2M_SYS_24MHZ, 24000000);
}
static void v2m_power_off(void)
{
if (v2m_cfg_write(SYS_CFG_SHUTDOWN | SYS_CFG_SITE_MB, 0))
printk(KERN_EMERG "Unable to shutdown\n");
}
static void v2m_restart(char str, const char *cmd)
{
if (v2m_cfg_write(SYS_CFG_REBOOT | SYS_CFG_SITE_MB, 0))
printk(KERN_EMERG "Unable to reboot\n");
}
struct ct_desc *ct_desc;
static struct ct_desc *ct_descs[] __initdata = {
#ifdef CONFIG_ARCH_VEXPRESS_CA9X4
&ct_ca9x4_desc,
#endif
};
static void __init v2m_populate_ct_desc(void)
{
int i;
u32 current_tile_id;
ct_desc = NULL;
current_tile_id = readl(v2m_sysreg_base + V2M_SYS_PROCID0)
& V2M_CT_ID_MASK;
for (i = 0; i < ARRAY_SIZE(ct_descs) && !ct_desc; ++i)
if (ct_descs[i]->id == current_tile_id)
ct_desc = ct_descs[i];
if (!ct_desc)
panic("vexpress: this kernel does not support core tile ID 0x%08x when booting via ATAGs.\n"
"You may need a device tree blob or a different kernel to boot on this board.\n",
current_tile_id);
}
static void __init v2m_map_io(void)
{
iotable_init(v2m_io_desc, ARRAY_SIZE(v2m_io_desc));
v2m_sysreg_base = ioremap(V2M_SYSREGS, SZ_4K);
v2m_populate_ct_desc();
ct_desc->map_io();
}
static void __init v2m_init_irq(void)
{
ct_desc->init_irq();
}
static void __init v2m_init(void)
{
int i;
platform_device_register(&v2m_pcie_i2c_device);
platform_device_register(&v2m_ddc_i2c_device);
platform_device_register(&v2m_flash_device);
platform_device_register(&v2m_cf_device);
platform_device_register(&v2m_eth_device);
platform_device_register(&v2m_usb_device);
for (i = 0; i < ARRAY_SIZE(v2m_amba_devs); i++)
amba_device_register(v2m_amba_devs[i], &iomem_resource);
pm_power_off = v2m_power_off;
ct_desc->init_tile();
}
MACHINE_START(VEXPRESS, "ARM-Versatile Express")
.atag_offset = 0x100,
.map_io = v2m_map_io,
.init_early = v2m_init_early,
.init_irq = v2m_init_irq,
.timer = &v2m_timer,
.handle_irq = gic_handle_irq,
.init_machine = v2m_init,
.restart = v2m_restart,
MACHINE_END
#if defined(CONFIG_ARCH_VEXPRESS_DT)
static struct map_desc v2m_rs1_io_desc __initdata = {
.virtual = V2M_PERIPH,
.pfn = __phys_to_pfn(0x1c000000),
.length = SZ_2M,
.type = MT_DEVICE,
};
static int __init v2m_dt_scan_memory_map(unsigned long node, const char *uname,
int depth, void *data)
{
const char **map = data;
if (strcmp(uname, "motherboard") != 0)
return 0;
*map = of_get_flat_dt_prop(node, "arm,v2m-memory-map", NULL);
return 1;
}
void __init v2m_dt_map_io(void)
{
const char *map = NULL;
of_scan_flat_dt(v2m_dt_scan_memory_map, &map);
if (map && strcmp(map, "rs1") == 0)
iotable_init(&v2m_rs1_io_desc, 1);
else
iotable_init(v2m_io_desc, ARRAY_SIZE(v2m_io_desc));
#if defined(CONFIG_SMP)
vexpress_dt_smp_map_io();
#endif
}
static struct clk_lookup v2m_dt_lookups[] = {
{ /* AMBA bus clock */
.con_id = "apb_pclk",
.clk = &dummy_apb_pclk,
}, { /* SP804 timers */
.dev_id = "sp804",
.con_id = "v2m-timer0",
.clk = &v2m_sp804_clk,
}, { /* SP804 timers */
.dev_id = "sp804",
.con_id = "v2m-timer1",
.clk = &v2m_sp804_clk,
}, { /* PL180 MMCI */
.dev_id = "mb:mmci", /* 10005000.mmci */
.clk = &osc2_clk,
}, { /* PL050 KMI0 */
.dev_id = "10006000.kmi",
.clk = &osc2_clk,
}, { /* PL050 KMI1 */
.dev_id = "10007000.kmi",
.clk = &osc2_clk,
}, { /* PL011 UART0 */
.dev_id = "10009000.uart",
.clk = &osc2_clk,
}, { /* PL011 UART1 */
.dev_id = "1000a000.uart",
.clk = &osc2_clk,
}, { /* PL011 UART2 */
.dev_id = "1000b000.uart",
.clk = &osc2_clk,
}, { /* PL011 UART3 */
.dev_id = "1000c000.uart",
.clk = &osc2_clk,
}, { /* SP805 WDT */
.dev_id = "1000f000.wdt",
.clk = &v2m_ref_clk,
}, { /* PL111 CLCD */
.dev_id = "1001f000.clcd",
.clk = &osc1_clk,
},
/* RS1 memory map */
{ /* PL180 MMCI */
.dev_id = "mb:mmci", /* 1c050000.mmci */
.clk = &osc2_clk,
}, { /* PL050 KMI0 */
.dev_id = "1c060000.kmi",
.clk = &osc2_clk,
}, { /* PL050 KMI1 */
.dev_id = "1c070000.kmi",
.clk = &osc2_clk,
}, { /* PL011 UART0 */
.dev_id = "1c090000.uart",
.clk = &osc2_clk,
}, { /* PL011 UART1 */
.dev_id = "1c0a0000.uart",
.clk = &osc2_clk,
}, { /* PL011 UART2 */
.dev_id = "1c0b0000.uart",
.clk = &osc2_clk,
}, { /* PL011 UART3 */
.dev_id = "1c0c0000.uart",
.clk = &osc2_clk,
}, { /* SP805 WDT */
.dev_id = "1c0f0000.wdt",
.clk = &v2m_ref_clk,
}, { /* PL111 CLCD */
.dev_id = "1c1f0000.clcd",
.clk = &osc1_clk,
},
};
void __init v2m_dt_init_early(void)
{
struct device_node *node;
u32 dt_hbi;
node = of_find_compatible_node(NULL, NULL, "arm,vexpress-sysreg");
v2m_sysreg_base = of_iomap(node, 0);
if (WARN_ON(!v2m_sysreg_base))
return;
/* Confirm board type against DT property, if available */
if (of_property_read_u32(allnodes, "arm,hbi", &dt_hbi) == 0) {
u32 misc = readl(v2m_sysreg_base + V2M_SYS_MISC);
u32 id = readl(v2m_sysreg_base + (misc & SYS_MISC_MASTERSITE ?
V2M_SYS_PROCID1 : V2M_SYS_PROCID0));
u32 hbi = id & SYS_PROCIDx_HBI_MASK;
if (WARN_ON(dt_hbi != hbi))
pr_warning("vexpress: DT HBI (%x) is not matching "
"hardware (%x)!\n", dt_hbi, hbi);
}
clkdev_add_table(v2m_dt_lookups, ARRAY_SIZE(v2m_dt_lookups));
versatile_sched_clock_init(v2m_sysreg_base + V2M_SYS_24MHZ, 24000000);
}
static struct of_device_id vexpress_irq_match[] __initdata = {
{ .compatible = "arm,cortex-a9-gic", .data = gic_of_init, },
{}
};
static void __init v2m_dt_init_irq(void)
{
of_irq_init(vexpress_irq_match);
}
static void __init v2m_dt_timer_init(void)
{
struct device_node *node;
const char *path;
int err;
node = of_find_compatible_node(NULL, NULL, "arm,sp810");
v2m_sysctl_init(of_iomap(node, 0));
err = of_property_read_string(of_aliases, "arm,v2m_timer", &path);
if (WARN_ON(err))
return;
node = of_find_node_by_path(path);
v2m_sp804_init(of_iomap(node, 0), irq_of_parse_and_map(node, 0));
}
static struct sys_timer v2m_dt_timer = {
.init = v2m_dt_timer_init,
};
static struct of_dev_auxdata v2m_dt_auxdata_lookup[] __initdata = {
OF_DEV_AUXDATA("arm,vexpress-flash", V2M_NOR0, "physmap-flash",
&v2m_flash_data),
OF_DEV_AUXDATA("arm,primecell", V2M_MMCI, "mb:mmci", &v2m_mmci_data),
/* RS1 memory map */
OF_DEV_AUXDATA("arm,vexpress-flash", 0x08000000, "physmap-flash",
&v2m_flash_data),
OF_DEV_AUXDATA("arm,primecell", 0x1c050000, "mb:mmci", &v2m_mmci_data),
{}
};
static void __init v2m_dt_init(void)
{
l2x0_of_init(0x00400000, 0xfe0fffff);
of_platform_populate(NULL, of_default_bus_match_table,
v2m_dt_auxdata_lookup, NULL);
pm_power_off = v2m_power_off;
}
const static char *v2m_dt_match[] __initconst = {
"arm,vexpress",
NULL,
};
DT_MACHINE_START(VEXPRESS_DT, "ARM-Versatile Express")
.dt_compat = v2m_dt_match,
.map_io = v2m_dt_map_io,
.init_early = v2m_dt_init_early,
.init_irq = v2m_dt_init_irq,
.timer = &v2m_dt_timer,
.init_machine = v2m_dt_init,
.handle_irq = gic_handle_irq,
.restart = v2m_restart,
MACHINE_END
#endif
| gpl-2.0 |
farchanrifai/kernel_cancro | drivers/usb/gadget/zero.c | 4877 | 9655 | /*
* zero.c -- Gadget Zero, for USB development
*
* Copyright (C) 2003-2008 David Brownell
* Copyright (C) 2008 by Nokia Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
/*
* Gadget Zero only needs two bulk endpoints, and is an example of how you
* can write a hardware-agnostic gadget driver running inside a USB device.
* Some hardware details are visible, but don't affect most of the driver.
*
* Use it with the Linux host/master side "usbtest" driver to get a basic
* functional test of your device-side usb stack, or with "usb-skeleton".
*
* It supports two similar configurations. One sinks whatever the usb host
* writes, and in return sources zeroes. The other loops whatever the host
* writes back, so the host can read it.
*
* Many drivers will only have one configuration, letting them be much
* simpler if they also don't support high speed operation (like this
* driver does).
*
* Why is *this* driver using two configurations, rather than setting up
* two interfaces with different functions? To help verify that multiple
* configuration infrastucture is working correctly; also, so that it can
* work with low capability USB controllers without four bulk endpoints.
*/
/*
* driver assumes self-powered hardware, and
* has no way for users to trigger remote wakeup.
*/
/* #define VERBOSE_DEBUG */
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/utsname.h>
#include <linux/device.h>
#include "g_zero.h"
#include "gadget_chips.h"
/*-------------------------------------------------------------------------*/
/*
* Kbuild is not very cooperative with respect to linking separately
* compiled library objects into one module. So for now we won't use
* separate compilation ... ensuring init/exit sections work to shrink
* the runtime footprint, and giving us at least some parts of what
* a "gcc --combine ... part1.c part2.c part3.c ... " build would.
*/
#include "composite.c"
#include "usbstring.c"
#include "config.c"
#include "epautoconf.c"
#include "f_sourcesink.c"
#include "f_loopback.c"
/*-------------------------------------------------------------------------*/
#define DRIVER_VERSION "Cinco de Mayo 2008"
static const char longname[] = "Gadget Zero";
unsigned buflen = 4096;
module_param(buflen, uint, 0);
/*
* Normally the "loopback" configuration is second (index 1) so
* it's not the default. Here's where to change that order, to
* work better with hosts where config changes are problematic or
* controllers (like original superh) that only support one config.
*/
static bool loopdefault = 0;
module_param(loopdefault, bool, S_IRUGO|S_IWUSR);
/*-------------------------------------------------------------------------*/
/* Thanks to NetChip Technologies for donating this product ID.
*
* DO NOT REUSE THESE IDs with a protocol-incompatible driver!! Ever!!
* Instead: allocate your own, using normal USB-IF procedures.
*/
#ifndef CONFIG_USB_ZERO_HNPTEST
#define DRIVER_VENDOR_NUM 0x0525 /* NetChip */
#define DRIVER_PRODUCT_NUM 0xa4a0 /* Linux-USB "Gadget Zero" */
#define DEFAULT_AUTORESUME 0
#else
#define DRIVER_VENDOR_NUM 0x1a0a /* OTG test device IDs */
#define DRIVER_PRODUCT_NUM 0xbadd
#define DEFAULT_AUTORESUME 5
#endif
/* If the optional "autoresume" mode is enabled, it provides good
* functional coverage for the "USBCV" test harness from USB-IF.
* It's always set if OTG mode is enabled.
*/
unsigned autoresume = DEFAULT_AUTORESUME;
module_param(autoresume, uint, S_IRUGO);
MODULE_PARM_DESC(autoresume, "zero, or seconds before remote wakeup");
/*-------------------------------------------------------------------------*/
static struct usb_device_descriptor device_desc = {
.bLength = sizeof device_desc,
.bDescriptorType = USB_DT_DEVICE,
.bcdUSB = cpu_to_le16(0x0200),
.bDeviceClass = USB_CLASS_VENDOR_SPEC,
.idVendor = cpu_to_le16(DRIVER_VENDOR_NUM),
.idProduct = cpu_to_le16(DRIVER_PRODUCT_NUM),
.bNumConfigurations = 2,
};
#ifdef CONFIG_USB_OTG
static struct usb_otg_descriptor otg_descriptor = {
.bLength = sizeof otg_descriptor,
.bDescriptorType = USB_DT_OTG,
/* REVISIT SRP-only hardware is possible, although
* it would not be called "OTG" ...
*/
.bmAttributes = USB_OTG_SRP | USB_OTG_HNP,
};
const struct usb_descriptor_header *otg_desc[] = {
(struct usb_descriptor_header *) &otg_descriptor,
NULL,
};
#endif
/* string IDs are assigned dynamically */
#define STRING_MANUFACTURER_IDX 0
#define STRING_PRODUCT_IDX 1
#define STRING_SERIAL_IDX 2
static char manufacturer[50];
/* default serial number takes at least two packets */
static char serial[] = "0123456789.0123456789.0123456789";
static struct usb_string strings_dev[] = {
[STRING_MANUFACTURER_IDX].s = manufacturer,
[STRING_PRODUCT_IDX].s = longname,
[STRING_SERIAL_IDX].s = serial,
{ } /* end of list */
};
static struct usb_gadget_strings stringtab_dev = {
.language = 0x0409, /* en-us */
.strings = strings_dev,
};
static struct usb_gadget_strings *dev_strings[] = {
&stringtab_dev,
NULL,
};
/*-------------------------------------------------------------------------*/
struct usb_request *alloc_ep_req(struct usb_ep *ep)
{
struct usb_request *req;
req = usb_ep_alloc_request(ep, GFP_ATOMIC);
if (req) {
req->length = buflen;
req->buf = kmalloc(buflen, GFP_ATOMIC);
if (!req->buf) {
usb_ep_free_request(ep, req);
req = NULL;
}
}
return req;
}
void free_ep_req(struct usb_ep *ep, struct usb_request *req)
{
kfree(req->buf);
usb_ep_free_request(ep, req);
}
static void disable_ep(struct usb_composite_dev *cdev, struct usb_ep *ep)
{
int value;
if (ep->driver_data) {
value = usb_ep_disable(ep);
if (value < 0)
DBG(cdev, "disable %s --> %d\n",
ep->name, value);
ep->driver_data = NULL;
}
}
void disable_endpoints(struct usb_composite_dev *cdev,
struct usb_ep *in, struct usb_ep *out)
{
disable_ep(cdev, in);
disable_ep(cdev, out);
}
/*-------------------------------------------------------------------------*/
static struct timer_list autoresume_timer;
static void zero_autoresume(unsigned long _c)
{
struct usb_composite_dev *cdev = (void *)_c;
struct usb_gadget *g = cdev->gadget;
/* unconfigured devices can't issue wakeups */
if (!cdev->config)
return;
/* Normally the host would be woken up for something
* more significant than just a timer firing; likely
* because of some direct user request.
*/
if (g->speed != USB_SPEED_UNKNOWN) {
int status = usb_gadget_wakeup(g);
INFO(cdev, "%s --> %d\n", __func__, status);
}
}
static void zero_suspend(struct usb_composite_dev *cdev)
{
if (cdev->gadget->speed == USB_SPEED_UNKNOWN)
return;
if (autoresume) {
mod_timer(&autoresume_timer, jiffies + (HZ * autoresume));
DBG(cdev, "suspend, wakeup in %d seconds\n", autoresume);
} else
DBG(cdev, "%s\n", __func__);
}
static void zero_resume(struct usb_composite_dev *cdev)
{
DBG(cdev, "%s\n", __func__);
del_timer(&autoresume_timer);
}
/*-------------------------------------------------------------------------*/
static int __init zero_bind(struct usb_composite_dev *cdev)
{
int gcnum;
struct usb_gadget *gadget = cdev->gadget;
int id;
/* Allocate string descriptor numbers ... note that string
* contents can be overridden by the composite_dev glue.
*/
id = usb_string_id(cdev);
if (id < 0)
return id;
strings_dev[STRING_MANUFACTURER_IDX].id = id;
device_desc.iManufacturer = id;
id = usb_string_id(cdev);
if (id < 0)
return id;
strings_dev[STRING_PRODUCT_IDX].id = id;
device_desc.iProduct = id;
id = usb_string_id(cdev);
if (id < 0)
return id;
strings_dev[STRING_SERIAL_IDX].id = id;
device_desc.iSerialNumber = id;
setup_timer(&autoresume_timer, zero_autoresume, (unsigned long) cdev);
/* Register primary, then secondary configuration. Note that
* SH3 only allows one config...
*/
if (loopdefault) {
loopback_add(cdev, autoresume != 0);
sourcesink_add(cdev, autoresume != 0);
} else {
sourcesink_add(cdev, autoresume != 0);
loopback_add(cdev, autoresume != 0);
}
gcnum = usb_gadget_controller_number(gadget);
if (gcnum >= 0)
device_desc.bcdDevice = cpu_to_le16(0x0200 + gcnum);
else {
/* gadget zero is so simple (for now, no altsettings) that
* it SHOULD NOT have problems with bulk-capable hardware.
* so just warn about unrcognized controllers -- don't panic.
*
* things like configuration and altsetting numbering
* can need hardware-specific attention though.
*/
pr_warning("%s: controller '%s' not recognized\n",
longname, gadget->name);
device_desc.bcdDevice = cpu_to_le16(0x9999);
}
INFO(cdev, "%s, version: " DRIVER_VERSION "\n", longname);
snprintf(manufacturer, sizeof manufacturer, "%s %s with %s",
init_utsname()->sysname, init_utsname()->release,
gadget->name);
return 0;
}
static int zero_unbind(struct usb_composite_dev *cdev)
{
del_timer_sync(&autoresume_timer);
return 0;
}
static struct usb_composite_driver zero_driver = {
.name = "zero",
.dev = &device_desc,
.strings = dev_strings,
.max_speed = USB_SPEED_SUPER,
.unbind = zero_unbind,
.suspend = zero_suspend,
.resume = zero_resume,
};
MODULE_AUTHOR("David Brownell");
MODULE_LICENSE("GPL");
static int __init init(void)
{
return usb_composite_probe(&zero_driver, zero_bind);
}
module_init(init);
static void __exit cleanup(void)
{
usb_composite_unregister(&zero_driver);
}
module_exit(cleanup);
| gpl-2.0 |
caio2k/kernel-n9 | drivers/isdn/divert/divert_init.c | 5133 | 2399 | /* $Id divert_init.c,v 1.5.6.2 2001/01/24 22:18:17 kai Exp $
*
* Module init for DSS1 diversion services for i4l.
*
* Copyright 1999 by Werner Cornelius (werner@isdn4linux.de)
*
* This software may be used and distributed according to the terms
* of the GNU General Public License, incorporated herein by reference.
*
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include "isdn_divert.h"
MODULE_DESCRIPTION("ISDN4Linux: Call diversion support");
MODULE_AUTHOR("Werner Cornelius");
MODULE_LICENSE("GPL");
/****************************************/
/* structure containing interface to hl */
/****************************************/
isdn_divert_if divert_if =
{ DIVERT_IF_MAGIC, /* magic value */
DIVERT_CMD_REG, /* register cmd */
ll_callback, /* callback routine from ll */
NULL, /* command still not specified */
NULL, /* drv_to_name */
NULL, /* name_to_drv */
};
/*************************/
/* Module interface code */
/* no cmd line parms */
/*************************/
static int __init divert_init(void)
{ int i;
if (divert_dev_init())
{ printk(KERN_WARNING "dss1_divert: cannot install device, not loaded\n");
return(-EIO);
}
if ((i = DIVERT_REG_NAME(&divert_if)) != DIVERT_NO_ERR)
{ divert_dev_deinit();
printk(KERN_WARNING "dss1_divert: error %d registering module, not loaded\n",i);
return(-EIO);
}
printk(KERN_INFO "dss1_divert module successfully installed\n");
return(0);
}
/**********************/
/* Module deinit code */
/**********************/
static void __exit divert_exit(void)
{
unsigned long flags;
int i;
spin_lock_irqsave(&divert_lock, flags);
divert_if.cmd = DIVERT_CMD_REL; /* release */
if ((i = DIVERT_REG_NAME(&divert_if)) != DIVERT_NO_ERR)
{ printk(KERN_WARNING "dss1_divert: error %d releasing module\n",i);
spin_unlock_irqrestore(&divert_lock, flags);
return;
}
if (divert_dev_deinit())
{ printk(KERN_WARNING "dss1_divert: device busy, remove cancelled\n");
spin_unlock_irqrestore(&divert_lock, flags);
return;
}
spin_unlock_irqrestore(&divert_lock, flags);
deleterule(-1); /* delete all rules and free mem */
deleteprocs();
printk(KERN_INFO "dss1_divert module successfully removed \n");
}
module_init(divert_init);
module_exit(divert_exit);
| gpl-2.0 |
morristech/GT-I9300-JB-3.0.y | drivers/isdn/hardware/eicon/mntfunc.c | 5133 | 8521 | /* $Id: mntfunc.c,v 1.19.6.4 2005/01/31 12:22:20 armin Exp $
*
* Driver for Eicon DIVA Server ISDN cards.
* Maint module
*
* Copyright 2000-2003 by Armin Schindler (mac@melware.de)
* Copyright 2000-2003 Cytronics & Melware (info@melware.de)
*
* This software may be used and distributed according to the terms
* of the GNU General Public License, incorporated herein by reference.
*/
#include "platform.h"
#include "di_defs.h"
#include "divasync.h"
#include "debug_if.h"
extern char *DRIVERRELEASE_MNT;
#define DBG_MINIMUM (DL_LOG + DL_FTL + DL_ERR)
#define DBG_DEFAULT (DBG_MINIMUM + DL_XLOG + DL_REG)
extern void DIVA_DIDD_Read(void *, int);
static dword notify_handle;
static DESCRIPTOR DAdapter;
static DESCRIPTOR MAdapter;
static DESCRIPTOR MaintDescriptor =
{ IDI_DIMAINT, 0, 0, (IDI_CALL) diva_maint_prtComp };
extern int diva_os_copy_to_user(void *os_handle, void __user *dst,
const void *src, int length);
extern int diva_os_copy_from_user(void *os_handle, void *dst,
const void __user *src, int length);
static void no_printf(unsigned char *x, ...)
{
/* dummy debug function */
}
#include "debuglib.c"
/*
* DIDD callback function
*/
static void *didd_callback(void *context, DESCRIPTOR * adapter,
int removal)
{
if (adapter->type == IDI_DADAPTER) {
DBG_ERR(("cb: Change in DAdapter ? Oops ?."));
} else if (adapter->type == IDI_DIMAINT) {
if (removal) {
DbgDeregister();
memset(&MAdapter, 0, sizeof(MAdapter));
dprintf = no_printf;
} else {
memcpy(&MAdapter, adapter, sizeof(MAdapter));
dprintf = (DIVA_DI_PRINTF) MAdapter.request;
DbgRegister("MAINT", DRIVERRELEASE_MNT, DBG_DEFAULT);
}
} else if ((adapter->type > 0) && (adapter->type < 16)) {
if (removal) {
diva_mnt_remove_xdi_adapter(adapter);
} else {
diva_mnt_add_xdi_adapter(adapter);
}
}
return (NULL);
}
/*
* connect to didd
*/
static int DIVA_INIT_FUNCTION connect_didd(void)
{
int x = 0;
int dadapter = 0;
IDI_SYNC_REQ req;
DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
for (x = 0; x < MAX_DESCRIPTORS; x++) {
if (DIDD_Table[x].type == IDI_DADAPTER) { /* DADAPTER found */
dadapter = 1;
memcpy(&DAdapter, &DIDD_Table[x], sizeof(DAdapter));
req.didd_notify.e.Req = 0;
req.didd_notify.e.Rc =
IDI_SYNC_REQ_DIDD_REGISTER_ADAPTER_NOTIFY;
req.didd_notify.info.callback = (void *)didd_callback;
req.didd_notify.info.context = NULL;
DAdapter.request((ENTITY *) & req);
if (req.didd_notify.e.Rc != 0xff)
return (0);
notify_handle = req.didd_notify.info.handle;
/* Register MAINT (me) */
req.didd_add_adapter.e.Req = 0;
req.didd_add_adapter.e.Rc =
IDI_SYNC_REQ_DIDD_ADD_ADAPTER;
req.didd_add_adapter.info.descriptor =
(void *) &MaintDescriptor;
DAdapter.request((ENTITY *) & req);
if (req.didd_add_adapter.e.Rc != 0xff)
return (0);
} else if ((DIDD_Table[x].type > 0)
&& (DIDD_Table[x].type < 16)) {
diva_mnt_add_xdi_adapter(&DIDD_Table[x]);
}
}
return (dadapter);
}
/*
* disconnect from didd
*/
static void DIVA_EXIT_FUNCTION disconnect_didd(void)
{
IDI_SYNC_REQ req;
req.didd_notify.e.Req = 0;
req.didd_notify.e.Rc = IDI_SYNC_REQ_DIDD_REMOVE_ADAPTER_NOTIFY;
req.didd_notify.info.handle = notify_handle;
DAdapter.request((ENTITY *) & req);
req.didd_remove_adapter.e.Req = 0;
req.didd_remove_adapter.e.Rc = IDI_SYNC_REQ_DIDD_REMOVE_ADAPTER;
req.didd_remove_adapter.info.p_request =
(IDI_CALL) MaintDescriptor.request;
DAdapter.request((ENTITY *) & req);
}
/*
* read/write maint
*/
int maint_read_write(void __user *buf, int count)
{
byte data[128];
dword cmd, id, mask;
int ret = 0;
if (count < (3 * sizeof(dword)))
return (-EFAULT);
if (diva_os_copy_from_user(NULL, (void *) &data[0],
buf, 3 * sizeof(dword))) {
return (-EFAULT);
}
cmd = *(dword *) & data[0]; /* command */
id = *(dword *) & data[4]; /* driver id */
mask = *(dword *) & data[8]; /* mask or size */
switch (cmd) {
case DITRACE_CMD_GET_DRIVER_INFO:
if ((ret = diva_get_driver_info(id, data, sizeof(data))) > 0) {
if ((count < ret) || diva_os_copy_to_user
(NULL, buf, (void *) &data[0], ret))
ret = -EFAULT;
} else {
ret = -EINVAL;
}
break;
case DITRACE_READ_DRIVER_DBG_MASK:
if ((ret = diva_get_driver_dbg_mask(id, (byte *) data)) > 0) {
if ((count < ret) || diva_os_copy_to_user
(NULL, buf, (void *) &data[0], ret))
ret = -EFAULT;
} else {
ret = -ENODEV;
}
break;
case DITRACE_WRITE_DRIVER_DBG_MASK:
if ((ret = diva_set_driver_dbg_mask(id, mask)) <= 0) {
ret = -ENODEV;
}
break;
/*
Filter commands will ignore the ID due to fact that filtering affects
the B- channel and Audio Tap trace levels only. Also MAINT driver will
select the right trace ID by itself
*/
case DITRACE_WRITE_SELECTIVE_TRACE_FILTER:
if (!mask) {
ret = diva_set_trace_filter (1, "*");
} else if (mask < sizeof(data)) {
if (diva_os_copy_from_user(NULL, data, (char __user *)buf+12, mask)) {
ret = -EFAULT;
} else {
ret = diva_set_trace_filter ((int)mask, data);
}
} else {
ret = -EINVAL;
}
break;
case DITRACE_READ_SELECTIVE_TRACE_FILTER:
if ((ret = diva_get_trace_filter (sizeof(data), data)) > 0) {
if (diva_os_copy_to_user (NULL, buf, data, ret))
ret = -EFAULT;
} else {
ret = -ENODEV;
}
break;
case DITRACE_READ_TRACE_ENTRY:{
diva_os_spin_lock_magic_t old_irql;
word size;
diva_dbg_entry_head_t *pmsg;
byte *pbuf;
if (!(pbuf = diva_os_malloc(0, mask))) {
return (-ENOMEM);
}
for(;;) {
if (!(pmsg =
diva_maint_get_message(&size, &old_irql))) {
break;
}
if (size > mask) {
diva_maint_ack_message(0, &old_irql);
ret = -EINVAL;
break;
}
ret = size;
memcpy(pbuf, pmsg, size);
diva_maint_ack_message(1, &old_irql);
if ((count < size) ||
diva_os_copy_to_user (NULL, buf, (void *) pbuf, size))
ret = -EFAULT;
break;
}
diva_os_free(0, pbuf);
}
break;
case DITRACE_READ_TRACE_ENTRYS:{
diva_os_spin_lock_magic_t old_irql;
word size;
diva_dbg_entry_head_t *pmsg;
byte *pbuf = NULL;
int written = 0;
if (mask < 4096) {
ret = -EINVAL;
break;
}
if (!(pbuf = diva_os_malloc(0, mask))) {
return (-ENOMEM);
}
for (;;) {
if (!(pmsg =
diva_maint_get_message(&size, &old_irql))) {
break;
}
if ((size + 8) > mask) {
diva_maint_ack_message(0, &old_irql);
break;
}
/*
Write entry length
*/
pbuf[written++] = (byte) size;
pbuf[written++] = (byte) (size >> 8);
pbuf[written++] = 0;
pbuf[written++] = 0;
/*
Write message
*/
memcpy(&pbuf[written], pmsg, size);
diva_maint_ack_message(1, &old_irql);
written += size;
mask -= (size + 4);
}
pbuf[written++] = 0;
pbuf[written++] = 0;
pbuf[written++] = 0;
pbuf[written++] = 0;
if ((count < written) || diva_os_copy_to_user(NULL, buf, (void *) pbuf, written)) {
ret = -EFAULT;
} else {
ret = written;
}
diva_os_free(0, pbuf);
}
break;
default:
ret = -EINVAL;
}
return (ret);
}
/*
* init
*/
int DIVA_INIT_FUNCTION mntfunc_init(int *buffer_length, void **buffer,
unsigned long diva_dbg_mem)
{
if (*buffer_length < 64) {
*buffer_length = 64;
}
if (*buffer_length > 512) {
*buffer_length = 512;
}
*buffer_length *= 1024;
if (diva_dbg_mem) {
*buffer = (void *) diva_dbg_mem;
} else {
while ((*buffer_length >= (64 * 1024))
&&
(!(*buffer = diva_os_malloc (0, *buffer_length)))) {
*buffer_length -= 1024;
}
if (!*buffer) {
DBG_ERR(("init: Can not alloc trace buffer"));
return (0);
}
}
if (diva_maint_init(*buffer, *buffer_length, (diva_dbg_mem == 0))) {
if (!diva_dbg_mem) {
diva_os_free (0, *buffer);
}
DBG_ERR(("init: maint init failed"));
return (0);
}
if (!connect_didd()) {
DBG_ERR(("init: failed to connect to DIDD."));
diva_maint_finit();
if (!diva_dbg_mem) {
diva_os_free (0, *buffer);
}
return (0);
}
return (1);
}
/*
* exit
*/
void DIVA_EXIT_FUNCTION mntfunc_finit(void)
{
void *buffer;
int i = 100;
DbgDeregister();
while (diva_mnt_shutdown_xdi_adapters() && i--) {
diva_os_sleep(10);
}
disconnect_didd();
if ((buffer = diva_maint_finit())) {
diva_os_free (0, buffer);
}
memset(&MAdapter, 0, sizeof(MAdapter));
dprintf = no_printf;
}
| gpl-2.0 |
SinxOner/android_kernel_lge_p710 | arch/sparc/kernel/ebus.c | 7181 | 5983 | /* ebus.c: EBUS DMA library code.
*
* Copyright (C) 1997 Eddie C. Dost (ecd@skynet.be)
* Copyright (C) 1999 David S. Miller (davem@redhat.com)
*/
#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <asm/ebus_dma.h>
#include <asm/io.h>
#define EBDMA_CSR 0x00UL /* Control/Status */
#define EBDMA_ADDR 0x04UL /* DMA Address */
#define EBDMA_COUNT 0x08UL /* DMA Count */
#define EBDMA_CSR_INT_PEND 0x00000001
#define EBDMA_CSR_ERR_PEND 0x00000002
#define EBDMA_CSR_DRAIN 0x00000004
#define EBDMA_CSR_INT_EN 0x00000010
#define EBDMA_CSR_RESET 0x00000080
#define EBDMA_CSR_WRITE 0x00000100
#define EBDMA_CSR_EN_DMA 0x00000200
#define EBDMA_CSR_CYC_PEND 0x00000400
#define EBDMA_CSR_DIAG_RD_DONE 0x00000800
#define EBDMA_CSR_DIAG_WR_DONE 0x00001000
#define EBDMA_CSR_EN_CNT 0x00002000
#define EBDMA_CSR_TC 0x00004000
#define EBDMA_CSR_DIS_CSR_DRN 0x00010000
#define EBDMA_CSR_BURST_SZ_MASK 0x000c0000
#define EBDMA_CSR_BURST_SZ_1 0x00080000
#define EBDMA_CSR_BURST_SZ_4 0x00000000
#define EBDMA_CSR_BURST_SZ_8 0x00040000
#define EBDMA_CSR_BURST_SZ_16 0x000c0000
#define EBDMA_CSR_DIAG_EN 0x00100000
#define EBDMA_CSR_DIS_ERR_PEND 0x00400000
#define EBDMA_CSR_TCI_DIS 0x00800000
#define EBDMA_CSR_EN_NEXT 0x01000000
#define EBDMA_CSR_DMA_ON 0x02000000
#define EBDMA_CSR_A_LOADED 0x04000000
#define EBDMA_CSR_NA_LOADED 0x08000000
#define EBDMA_CSR_DEV_ID_MASK 0xf0000000
#define EBUS_DMA_RESET_TIMEOUT 10000
static void __ebus_dma_reset(struct ebus_dma_info *p, int no_drain)
{
int i;
u32 val = 0;
writel(EBDMA_CSR_RESET, p->regs + EBDMA_CSR);
udelay(1);
if (no_drain)
return;
for (i = EBUS_DMA_RESET_TIMEOUT; i > 0; i--) {
val = readl(p->regs + EBDMA_CSR);
if (!(val & (EBDMA_CSR_DRAIN | EBDMA_CSR_CYC_PEND)))
break;
udelay(10);
}
}
static irqreturn_t ebus_dma_irq(int irq, void *dev_id)
{
struct ebus_dma_info *p = dev_id;
unsigned long flags;
u32 csr = 0;
spin_lock_irqsave(&p->lock, flags);
csr = readl(p->regs + EBDMA_CSR);
writel(csr, p->regs + EBDMA_CSR);
spin_unlock_irqrestore(&p->lock, flags);
if (csr & EBDMA_CSR_ERR_PEND) {
printk(KERN_CRIT "ebus_dma(%s): DMA error!\n", p->name);
p->callback(p, EBUS_DMA_EVENT_ERROR, p->client_cookie);
return IRQ_HANDLED;
} else if (csr & EBDMA_CSR_INT_PEND) {
p->callback(p,
(csr & EBDMA_CSR_TC) ?
EBUS_DMA_EVENT_DMA : EBUS_DMA_EVENT_DEVICE,
p->client_cookie);
return IRQ_HANDLED;
}
return IRQ_NONE;
}
int ebus_dma_register(struct ebus_dma_info *p)
{
u32 csr;
if (!p->regs)
return -EINVAL;
if (p->flags & ~(EBUS_DMA_FLAG_USE_EBDMA_HANDLER |
EBUS_DMA_FLAG_TCI_DISABLE))
return -EINVAL;
if ((p->flags & EBUS_DMA_FLAG_USE_EBDMA_HANDLER) && !p->callback)
return -EINVAL;
if (!strlen(p->name))
return -EINVAL;
__ebus_dma_reset(p, 1);
csr = EBDMA_CSR_BURST_SZ_16 | EBDMA_CSR_EN_CNT;
if (p->flags & EBUS_DMA_FLAG_TCI_DISABLE)
csr |= EBDMA_CSR_TCI_DIS;
writel(csr, p->regs + EBDMA_CSR);
return 0;
}
EXPORT_SYMBOL(ebus_dma_register);
int ebus_dma_irq_enable(struct ebus_dma_info *p, int on)
{
unsigned long flags;
u32 csr;
if (on) {
if (p->flags & EBUS_DMA_FLAG_USE_EBDMA_HANDLER) {
if (request_irq(p->irq, ebus_dma_irq, IRQF_SHARED, p->name, p))
return -EBUSY;
}
spin_lock_irqsave(&p->lock, flags);
csr = readl(p->regs + EBDMA_CSR);
csr |= EBDMA_CSR_INT_EN;
writel(csr, p->regs + EBDMA_CSR);
spin_unlock_irqrestore(&p->lock, flags);
} else {
spin_lock_irqsave(&p->lock, flags);
csr = readl(p->regs + EBDMA_CSR);
csr &= ~EBDMA_CSR_INT_EN;
writel(csr, p->regs + EBDMA_CSR);
spin_unlock_irqrestore(&p->lock, flags);
if (p->flags & EBUS_DMA_FLAG_USE_EBDMA_HANDLER) {
free_irq(p->irq, p);
}
}
return 0;
}
EXPORT_SYMBOL(ebus_dma_irq_enable);
void ebus_dma_unregister(struct ebus_dma_info *p)
{
unsigned long flags;
u32 csr;
int irq_on = 0;
spin_lock_irqsave(&p->lock, flags);
csr = readl(p->regs + EBDMA_CSR);
if (csr & EBDMA_CSR_INT_EN) {
csr &= ~EBDMA_CSR_INT_EN;
writel(csr, p->regs + EBDMA_CSR);
irq_on = 1;
}
spin_unlock_irqrestore(&p->lock, flags);
if (irq_on)
free_irq(p->irq, p);
}
EXPORT_SYMBOL(ebus_dma_unregister);
int ebus_dma_request(struct ebus_dma_info *p, dma_addr_t bus_addr, size_t len)
{
unsigned long flags;
u32 csr;
int err;
if (len >= (1 << 24))
return -EINVAL;
spin_lock_irqsave(&p->lock, flags);
csr = readl(p->regs + EBDMA_CSR);
err = -EINVAL;
if (!(csr & EBDMA_CSR_EN_DMA))
goto out;
err = -EBUSY;
if (csr & EBDMA_CSR_NA_LOADED)
goto out;
writel(len, p->regs + EBDMA_COUNT);
writel(bus_addr, p->regs + EBDMA_ADDR);
err = 0;
out:
spin_unlock_irqrestore(&p->lock, flags);
return err;
}
EXPORT_SYMBOL(ebus_dma_request);
void ebus_dma_prepare(struct ebus_dma_info *p, int write)
{
unsigned long flags;
u32 csr;
spin_lock_irqsave(&p->lock, flags);
__ebus_dma_reset(p, 0);
csr = (EBDMA_CSR_INT_EN |
EBDMA_CSR_EN_CNT |
EBDMA_CSR_BURST_SZ_16 |
EBDMA_CSR_EN_NEXT);
if (write)
csr |= EBDMA_CSR_WRITE;
if (p->flags & EBUS_DMA_FLAG_TCI_DISABLE)
csr |= EBDMA_CSR_TCI_DIS;
writel(csr, p->regs + EBDMA_CSR);
spin_unlock_irqrestore(&p->lock, flags);
}
EXPORT_SYMBOL(ebus_dma_prepare);
unsigned int ebus_dma_residue(struct ebus_dma_info *p)
{
return readl(p->regs + EBDMA_COUNT);
}
EXPORT_SYMBOL(ebus_dma_residue);
unsigned int ebus_dma_addr(struct ebus_dma_info *p)
{
return readl(p->regs + EBDMA_ADDR);
}
EXPORT_SYMBOL(ebus_dma_addr);
void ebus_dma_enable(struct ebus_dma_info *p, int on)
{
unsigned long flags;
u32 orig_csr, csr;
spin_lock_irqsave(&p->lock, flags);
orig_csr = csr = readl(p->regs + EBDMA_CSR);
if (on)
csr |= EBDMA_CSR_EN_DMA;
else
csr &= ~EBDMA_CSR_EN_DMA;
if ((orig_csr & EBDMA_CSR_EN_DMA) !=
(csr & EBDMA_CSR_EN_DMA))
writel(csr, p->regs + EBDMA_CSR);
spin_unlock_irqrestore(&p->lock, flags);
}
EXPORT_SYMBOL(ebus_dma_enable);
| gpl-2.0 |
xdabbeb/g2-kk-kernel | arch/sparc/kernel/of_device_common.c | 7181 | 3946 | #include <linux/string.h>
#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/init.h>
#include <linux/export.h>
#include <linux/mod_devicetable.h>
#include <linux/errno.h>
#include <linux/irq.h>
#include <linux/of_device.h>
#include <linux/of_platform.h>
#include "of_device_common.h"
unsigned int irq_of_parse_and_map(struct device_node *node, int index)
{
struct platform_device *op = of_find_device_by_node(node);
if (!op || index >= op->archdata.num_irqs)
return 0;
return op->archdata.irqs[index];
}
EXPORT_SYMBOL(irq_of_parse_and_map);
int of_address_to_resource(struct device_node *node, int index,
struct resource *r)
{
struct platform_device *op = of_find_device_by_node(node);
if (!op || index >= op->num_resources)
return -EINVAL;
memcpy(r, &op->archdata.resource[index], sizeof(*r));
return 0;
}
EXPORT_SYMBOL_GPL(of_address_to_resource);
void __iomem *of_iomap(struct device_node *node, int index)
{
struct platform_device *op = of_find_device_by_node(node);
struct resource *r;
if (!op || index >= op->num_resources)
return NULL;
r = &op->archdata.resource[index];
return of_ioremap(r, 0, resource_size(r), (char *) r->name);
}
EXPORT_SYMBOL(of_iomap);
/* Take the archdata values for IOMMU, STC, and HOSTDATA found in
* BUS and propagate to all child platform_device objects.
*/
void of_propagate_archdata(struct platform_device *bus)
{
struct dev_archdata *bus_sd = &bus->dev.archdata;
struct device_node *bus_dp = bus->dev.of_node;
struct device_node *dp;
for (dp = bus_dp->child; dp; dp = dp->sibling) {
struct platform_device *op = of_find_device_by_node(dp);
op->dev.archdata.iommu = bus_sd->iommu;
op->dev.archdata.stc = bus_sd->stc;
op->dev.archdata.host_controller = bus_sd->host_controller;
op->dev.archdata.numa_node = bus_sd->numa_node;
if (dp->child)
of_propagate_archdata(op);
}
}
static void get_cells(struct device_node *dp, int *addrc, int *sizec)
{
if (addrc)
*addrc = of_n_addr_cells(dp);
if (sizec)
*sizec = of_n_size_cells(dp);
}
/*
* Default translator (generic bus)
*/
void of_bus_default_count_cells(struct device_node *dev, int *addrc, int *sizec)
{
get_cells(dev, addrc, sizec);
}
/* Make sure the least significant 64-bits are in-range. Even
* for 3 or 4 cell values it is a good enough approximation.
*/
int of_out_of_range(const u32 *addr, const u32 *base,
const u32 *size, int na, int ns)
{
u64 a = of_read_addr(addr, na);
u64 b = of_read_addr(base, na);
if (a < b)
return 1;
b += of_read_addr(size, ns);
if (a >= b)
return 1;
return 0;
}
int of_bus_default_map(u32 *addr, const u32 *range, int na, int ns, int pna)
{
u32 result[OF_MAX_ADDR_CELLS];
int i;
if (ns > 2) {
printk("of_device: Cannot handle size cells (%d) > 2.", ns);
return -EINVAL;
}
if (of_out_of_range(addr, range, range + na + pna, na, ns))
return -EINVAL;
/* Start with the parent range base. */
memcpy(result, range + na, pna * 4);
/* Add in the child address offset. */
for (i = 0; i < na; i++)
result[pna - 1 - i] +=
(addr[na - 1 - i] -
range[na - 1 - i]);
memcpy(addr, result, pna * 4);
return 0;
}
unsigned long of_bus_default_get_flags(const u32 *addr, unsigned long flags)
{
if (flags)
return flags;
return IORESOURCE_MEM;
}
/*
* SBUS bus specific translator
*/
int of_bus_sbus_match(struct device_node *np)
{
struct device_node *dp = np;
while (dp) {
if (!strcmp(dp->name, "sbus") ||
!strcmp(dp->name, "sbi"))
return 1;
/* Have a look at use_1to1_mapping(). We're trying
* to match SBUS if that's the top-level bus and we
* don't have some intervening real bus that provides
* ranges based translations.
*/
if (of_find_property(dp, "ranges", NULL) != NULL)
break;
dp = dp->parent;
}
return 0;
}
void of_bus_sbus_count_cells(struct device_node *child, int *addrc, int *sizec)
{
if (addrc)
*addrc = 2;
if (sizec)
*sizec = 1;
}
| gpl-2.0 |
Marvell-Semi/EBU_mainline_public | arch/sparc/kernel/of_device_common.c | 7181 | 3946 | #include <linux/string.h>
#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/init.h>
#include <linux/export.h>
#include <linux/mod_devicetable.h>
#include <linux/errno.h>
#include <linux/irq.h>
#include <linux/of_device.h>
#include <linux/of_platform.h>
#include "of_device_common.h"
unsigned int irq_of_parse_and_map(struct device_node *node, int index)
{
struct platform_device *op = of_find_device_by_node(node);
if (!op || index >= op->archdata.num_irqs)
return 0;
return op->archdata.irqs[index];
}
EXPORT_SYMBOL(irq_of_parse_and_map);
int of_address_to_resource(struct device_node *node, int index,
struct resource *r)
{
struct platform_device *op = of_find_device_by_node(node);
if (!op || index >= op->num_resources)
return -EINVAL;
memcpy(r, &op->archdata.resource[index], sizeof(*r));
return 0;
}
EXPORT_SYMBOL_GPL(of_address_to_resource);
void __iomem *of_iomap(struct device_node *node, int index)
{
struct platform_device *op = of_find_device_by_node(node);
struct resource *r;
if (!op || index >= op->num_resources)
return NULL;
r = &op->archdata.resource[index];
return of_ioremap(r, 0, resource_size(r), (char *) r->name);
}
EXPORT_SYMBOL(of_iomap);
/* Take the archdata values for IOMMU, STC, and HOSTDATA found in
* BUS and propagate to all child platform_device objects.
*/
void of_propagate_archdata(struct platform_device *bus)
{
struct dev_archdata *bus_sd = &bus->dev.archdata;
struct device_node *bus_dp = bus->dev.of_node;
struct device_node *dp;
for (dp = bus_dp->child; dp; dp = dp->sibling) {
struct platform_device *op = of_find_device_by_node(dp);
op->dev.archdata.iommu = bus_sd->iommu;
op->dev.archdata.stc = bus_sd->stc;
op->dev.archdata.host_controller = bus_sd->host_controller;
op->dev.archdata.numa_node = bus_sd->numa_node;
if (dp->child)
of_propagate_archdata(op);
}
}
static void get_cells(struct device_node *dp, int *addrc, int *sizec)
{
if (addrc)
*addrc = of_n_addr_cells(dp);
if (sizec)
*sizec = of_n_size_cells(dp);
}
/*
* Default translator (generic bus)
*/
void of_bus_default_count_cells(struct device_node *dev, int *addrc, int *sizec)
{
get_cells(dev, addrc, sizec);
}
/* Make sure the least significant 64-bits are in-range. Even
* for 3 or 4 cell values it is a good enough approximation.
*/
int of_out_of_range(const u32 *addr, const u32 *base,
const u32 *size, int na, int ns)
{
u64 a = of_read_addr(addr, na);
u64 b = of_read_addr(base, na);
if (a < b)
return 1;
b += of_read_addr(size, ns);
if (a >= b)
return 1;
return 0;
}
int of_bus_default_map(u32 *addr, const u32 *range, int na, int ns, int pna)
{
u32 result[OF_MAX_ADDR_CELLS];
int i;
if (ns > 2) {
printk("of_device: Cannot handle size cells (%d) > 2.", ns);
return -EINVAL;
}
if (of_out_of_range(addr, range, range + na + pna, na, ns))
return -EINVAL;
/* Start with the parent range base. */
memcpy(result, range + na, pna * 4);
/* Add in the child address offset. */
for (i = 0; i < na; i++)
result[pna - 1 - i] +=
(addr[na - 1 - i] -
range[na - 1 - i]);
memcpy(addr, result, pna * 4);
return 0;
}
unsigned long of_bus_default_get_flags(const u32 *addr, unsigned long flags)
{
if (flags)
return flags;
return IORESOURCE_MEM;
}
/*
* SBUS bus specific translator
*/
int of_bus_sbus_match(struct device_node *np)
{
struct device_node *dp = np;
while (dp) {
if (!strcmp(dp->name, "sbus") ||
!strcmp(dp->name, "sbi"))
return 1;
/* Have a look at use_1to1_mapping(). We're trying
* to match SBUS if that's the top-level bus and we
* don't have some intervening real bus that provides
* ranges based translations.
*/
if (of_find_property(dp, "ranges", NULL) != NULL)
break;
dp = dp->parent;
}
return 0;
}
void of_bus_sbus_count_cells(struct device_node *child, int *addrc, int *sizec)
{
if (addrc)
*addrc = 2;
if (sizec)
*sizec = 1;
}
| gpl-2.0 |
sudosurootdev/kernel_lge_ls980 | kernel/test_kprobes.c | 8461 | 8671 | /*
* test_kprobes.c - simple sanity test for *probes
*
* Copyright IBM Corp. 2008
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it would be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
* the GNU General Public License for more details.
*/
#include <linux/kernel.h>
#include <linux/kprobes.h>
#include <linux/random.h>
#define div_factor 3
static u32 rand1, preh_val, posth_val, jph_val;
static int errors, handler_errors, num_tests;
static u32 (*target)(u32 value);
static u32 (*target2)(u32 value);
static noinline u32 kprobe_target(u32 value)
{
return (value / div_factor);
}
static int kp_pre_handler(struct kprobe *p, struct pt_regs *regs)
{
preh_val = (rand1 / div_factor);
return 0;
}
static void kp_post_handler(struct kprobe *p, struct pt_regs *regs,
unsigned long flags)
{
if (preh_val != (rand1 / div_factor)) {
handler_errors++;
printk(KERN_ERR "Kprobe smoke test failed: "
"incorrect value in post_handler\n");
}
posth_val = preh_val + div_factor;
}
static struct kprobe kp = {
.symbol_name = "kprobe_target",
.pre_handler = kp_pre_handler,
.post_handler = kp_post_handler
};
static int test_kprobe(void)
{
int ret;
ret = register_kprobe(&kp);
if (ret < 0) {
printk(KERN_ERR "Kprobe smoke test failed: "
"register_kprobe returned %d\n", ret);
return ret;
}
ret = target(rand1);
unregister_kprobe(&kp);
if (preh_val == 0) {
printk(KERN_ERR "Kprobe smoke test failed: "
"kprobe pre_handler not called\n");
handler_errors++;
}
if (posth_val == 0) {
printk(KERN_ERR "Kprobe smoke test failed: "
"kprobe post_handler not called\n");
handler_errors++;
}
return 0;
}
static noinline u32 kprobe_target2(u32 value)
{
return (value / div_factor) + 1;
}
static int kp_pre_handler2(struct kprobe *p, struct pt_regs *regs)
{
preh_val = (rand1 / div_factor) + 1;
return 0;
}
static void kp_post_handler2(struct kprobe *p, struct pt_regs *regs,
unsigned long flags)
{
if (preh_val != (rand1 / div_factor) + 1) {
handler_errors++;
printk(KERN_ERR "Kprobe smoke test failed: "
"incorrect value in post_handler2\n");
}
posth_val = preh_val + div_factor;
}
static struct kprobe kp2 = {
.symbol_name = "kprobe_target2",
.pre_handler = kp_pre_handler2,
.post_handler = kp_post_handler2
};
static int test_kprobes(void)
{
int ret;
struct kprobe *kps[2] = {&kp, &kp2};
/* addr and flags should be cleard for reusing kprobe. */
kp.addr = NULL;
kp.flags = 0;
ret = register_kprobes(kps, 2);
if (ret < 0) {
printk(KERN_ERR "Kprobe smoke test failed: "
"register_kprobes returned %d\n", ret);
return ret;
}
preh_val = 0;
posth_val = 0;
ret = target(rand1);
if (preh_val == 0) {
printk(KERN_ERR "Kprobe smoke test failed: "
"kprobe pre_handler not called\n");
handler_errors++;
}
if (posth_val == 0) {
printk(KERN_ERR "Kprobe smoke test failed: "
"kprobe post_handler not called\n");
handler_errors++;
}
preh_val = 0;
posth_val = 0;
ret = target2(rand1);
if (preh_val == 0) {
printk(KERN_ERR "Kprobe smoke test failed: "
"kprobe pre_handler2 not called\n");
handler_errors++;
}
if (posth_val == 0) {
printk(KERN_ERR "Kprobe smoke test failed: "
"kprobe post_handler2 not called\n");
handler_errors++;
}
unregister_kprobes(kps, 2);
return 0;
}
static u32 j_kprobe_target(u32 value)
{
if (value != rand1) {
handler_errors++;
printk(KERN_ERR "Kprobe smoke test failed: "
"incorrect value in jprobe handler\n");
}
jph_val = rand1;
jprobe_return();
return 0;
}
static struct jprobe jp = {
.entry = j_kprobe_target,
.kp.symbol_name = "kprobe_target"
};
static int test_jprobe(void)
{
int ret;
ret = register_jprobe(&jp);
if (ret < 0) {
printk(KERN_ERR "Kprobe smoke test failed: "
"register_jprobe returned %d\n", ret);
return ret;
}
ret = target(rand1);
unregister_jprobe(&jp);
if (jph_val == 0) {
printk(KERN_ERR "Kprobe smoke test failed: "
"jprobe handler not called\n");
handler_errors++;
}
return 0;
}
static struct jprobe jp2 = {
.entry = j_kprobe_target,
.kp.symbol_name = "kprobe_target2"
};
static int test_jprobes(void)
{
int ret;
struct jprobe *jps[2] = {&jp, &jp2};
/* addr and flags should be cleard for reusing kprobe. */
jp.kp.addr = NULL;
jp.kp.flags = 0;
ret = register_jprobes(jps, 2);
if (ret < 0) {
printk(KERN_ERR "Kprobe smoke test failed: "
"register_jprobes returned %d\n", ret);
return ret;
}
jph_val = 0;
ret = target(rand1);
if (jph_val == 0) {
printk(KERN_ERR "Kprobe smoke test failed: "
"jprobe handler not called\n");
handler_errors++;
}
jph_val = 0;
ret = target2(rand1);
if (jph_val == 0) {
printk(KERN_ERR "Kprobe smoke test failed: "
"jprobe handler2 not called\n");
handler_errors++;
}
unregister_jprobes(jps, 2);
return 0;
}
#ifdef CONFIG_KRETPROBES
static u32 krph_val;
static int entry_handler(struct kretprobe_instance *ri, struct pt_regs *regs)
{
krph_val = (rand1 / div_factor);
return 0;
}
static int return_handler(struct kretprobe_instance *ri, struct pt_regs *regs)
{
unsigned long ret = regs_return_value(regs);
if (ret != (rand1 / div_factor)) {
handler_errors++;
printk(KERN_ERR "Kprobe smoke test failed: "
"incorrect value in kretprobe handler\n");
}
if (krph_val == 0) {
handler_errors++;
printk(KERN_ERR "Kprobe smoke test failed: "
"call to kretprobe entry handler failed\n");
}
krph_val = rand1;
return 0;
}
static struct kretprobe rp = {
.handler = return_handler,
.entry_handler = entry_handler,
.kp.symbol_name = "kprobe_target"
};
static int test_kretprobe(void)
{
int ret;
ret = register_kretprobe(&rp);
if (ret < 0) {
printk(KERN_ERR "Kprobe smoke test failed: "
"register_kretprobe returned %d\n", ret);
return ret;
}
ret = target(rand1);
unregister_kretprobe(&rp);
if (krph_val != rand1) {
printk(KERN_ERR "Kprobe smoke test failed: "
"kretprobe handler not called\n");
handler_errors++;
}
return 0;
}
static int return_handler2(struct kretprobe_instance *ri, struct pt_regs *regs)
{
unsigned long ret = regs_return_value(regs);
if (ret != (rand1 / div_factor) + 1) {
handler_errors++;
printk(KERN_ERR "Kprobe smoke test failed: "
"incorrect value in kretprobe handler2\n");
}
if (krph_val == 0) {
handler_errors++;
printk(KERN_ERR "Kprobe smoke test failed: "
"call to kretprobe entry handler failed\n");
}
krph_val = rand1;
return 0;
}
static struct kretprobe rp2 = {
.handler = return_handler2,
.entry_handler = entry_handler,
.kp.symbol_name = "kprobe_target2"
};
static int test_kretprobes(void)
{
int ret;
struct kretprobe *rps[2] = {&rp, &rp2};
/* addr and flags should be cleard for reusing kprobe. */
rp.kp.addr = NULL;
rp.kp.flags = 0;
ret = register_kretprobes(rps, 2);
if (ret < 0) {
printk(KERN_ERR "Kprobe smoke test failed: "
"register_kretprobe returned %d\n", ret);
return ret;
}
krph_val = 0;
ret = target(rand1);
if (krph_val != rand1) {
printk(KERN_ERR "Kprobe smoke test failed: "
"kretprobe handler not called\n");
handler_errors++;
}
krph_val = 0;
ret = target2(rand1);
if (krph_val != rand1) {
printk(KERN_ERR "Kprobe smoke test failed: "
"kretprobe handler2 not called\n");
handler_errors++;
}
unregister_kretprobes(rps, 2);
return 0;
}
#endif /* CONFIG_KRETPROBES */
int init_test_probes(void)
{
int ret;
target = kprobe_target;
target2 = kprobe_target2;
do {
rand1 = random32();
} while (rand1 <= div_factor);
printk(KERN_INFO "Kprobe smoke test started\n");
num_tests++;
ret = test_kprobe();
if (ret < 0)
errors++;
num_tests++;
ret = test_kprobes();
if (ret < 0)
errors++;
num_tests++;
ret = test_jprobe();
if (ret < 0)
errors++;
num_tests++;
ret = test_jprobes();
if (ret < 0)
errors++;
#ifdef CONFIG_KRETPROBES
num_tests++;
ret = test_kretprobe();
if (ret < 0)
errors++;
num_tests++;
ret = test_kretprobes();
if (ret < 0)
errors++;
#endif /* CONFIG_KRETPROBES */
if (errors)
printk(KERN_ERR "BUG: Kprobe smoke test: %d out of "
"%d tests failed\n", errors, num_tests);
else if (handler_errors)
printk(KERN_ERR "BUG: Kprobe smoke test: %d error(s) "
"running handlers\n", handler_errors);
else
printk(KERN_INFO "Kprobe smoke test passed successfully\n");
return 0;
}
| gpl-2.0 |
mathkid95/linux_motorola_lollipop | drivers/isdn/hardware/eicon/diva_dma.c | 9741 | 2712 |
/*
*
Copyright (c) Eicon Networks, 2002.
*
This source file is supplied for the use with
Eicon Networks range of DIVA Server Adapters.
*
Eicon File Revision : 2.1
*
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2, or (at your option)
any later version.
*
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY OF ANY KIND WHATSOEVER INCLUDING ANY
implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU General Public License for more details.
*
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
*/
#include "platform.h"
#include "diva_dma.h"
/*
Every entry has length of PAGE_SIZE
and represents one single physical page
*/
struct _diva_dma_map_entry {
int busy;
dword phys_bus_addr; /* 32bit address as seen by the card */
void *local_ram_addr; /* local address as seen by the host */
void *addr_handle; /* handle uset to free allocated memory */
};
/*
Create local mapping structure and init it to default state
*/
struct _diva_dma_map_entry *diva_alloc_dma_map(void *os_context, int nentries) {
diva_dma_map_entry_t *pmap = diva_os_malloc(0, sizeof(*pmap) * (nentries + 1));
if (pmap)
memset(pmap, 0, sizeof(*pmap) * (nentries + 1));
return pmap;
}
/*
Free local map (context should be freed before) if any
*/
void diva_free_dma_mapping(struct _diva_dma_map_entry *pmap) {
if (pmap) {
diva_os_free(0, pmap);
}
}
/*
Set information saved on the map entry
*/
void diva_init_dma_map_entry(struct _diva_dma_map_entry *pmap,
int nr, void *virt, dword phys,
void *addr_handle) {
pmap[nr].phys_bus_addr = phys;
pmap[nr].local_ram_addr = virt;
pmap[nr].addr_handle = addr_handle;
}
/*
Allocate one single entry in the map
*/
int diva_alloc_dma_map_entry(struct _diva_dma_map_entry *pmap) {
int i;
for (i = 0; (pmap && pmap[i].local_ram_addr); i++) {
if (!pmap[i].busy) {
pmap[i].busy = 1;
return (i);
}
}
return (-1);
}
/*
Free one single entry in the map
*/
void diva_free_dma_map_entry(struct _diva_dma_map_entry *pmap, int nr) {
pmap[nr].busy = 0;
}
/*
Get information saved on the map entry
*/
void diva_get_dma_map_entry(struct _diva_dma_map_entry *pmap, int nr,
void **pvirt, dword *pphys) {
*pphys = pmap[nr].phys_bus_addr;
*pvirt = pmap[nr].local_ram_addr;
}
void *diva_get_entry_handle(struct _diva_dma_map_entry *pmap, int nr) {
return (pmap[nr].addr_handle);
}
| gpl-2.0 |
ktoonsez/KTSGS5 | arch/powerpc/oprofile/backtrace.c | 10509 | 2953 | /**
* Copyright (C) 2005 Brian Rogan <bcr6@cornell.edu>, IBM
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
**/
#include <linux/oprofile.h>
#include <linux/sched.h>
#include <asm/processor.h>
#include <asm/uaccess.h>
#include <asm/compat.h>
#define STACK_SP(STACK) *(STACK)
#define STACK_LR64(STACK) *((unsigned long *)(STACK) + 2)
#define STACK_LR32(STACK) *((unsigned int *)(STACK) + 1)
#ifdef CONFIG_PPC64
#define STACK_LR(STACK) STACK_LR64(STACK)
#else
#define STACK_LR(STACK) STACK_LR32(STACK)
#endif
static unsigned int user_getsp32(unsigned int sp, int is_first)
{
unsigned int stack_frame[2];
void __user *p = compat_ptr(sp);
if (!access_ok(VERIFY_READ, p, sizeof(stack_frame)))
return 0;
/*
* The most likely reason for this is that we returned -EFAULT,
* which means that we've done all that we can do from
* interrupt context.
*/
if (__copy_from_user_inatomic(stack_frame, p, sizeof(stack_frame)))
return 0;
if (!is_first)
oprofile_add_trace(STACK_LR32(stack_frame));
/*
* We do not enforce increasing stack addresses here because
* we may transition to a different stack, eg a signal handler.
*/
return STACK_SP(stack_frame);
}
#ifdef CONFIG_PPC64
static unsigned long user_getsp64(unsigned long sp, int is_first)
{
unsigned long stack_frame[3];
if (!access_ok(VERIFY_READ, (void __user *)sp, sizeof(stack_frame)))
return 0;
if (__copy_from_user_inatomic(stack_frame, (void __user *)sp,
sizeof(stack_frame)))
return 0;
if (!is_first)
oprofile_add_trace(STACK_LR64(stack_frame));
return STACK_SP(stack_frame);
}
#endif
static unsigned long kernel_getsp(unsigned long sp, int is_first)
{
unsigned long *stack_frame = (unsigned long *)sp;
if (!validate_sp(sp, current, STACK_FRAME_OVERHEAD))
return 0;
if (!is_first)
oprofile_add_trace(STACK_LR(stack_frame));
/*
* We do not enforce increasing stack addresses here because
* we might be transitioning from an interrupt stack to a kernel
* stack. validate_sp() is designed to understand this, so just
* use it.
*/
return STACK_SP(stack_frame);
}
void op_powerpc_backtrace(struct pt_regs * const regs, unsigned int depth)
{
unsigned long sp = regs->gpr[1];
int first_frame = 1;
/* We ditch the top stackframe so need to loop through an extra time */
depth += 1;
if (!user_mode(regs)) {
while (depth--) {
sp = kernel_getsp(sp, first_frame);
if (!sp)
break;
first_frame = 0;
}
} else {
#ifdef CONFIG_PPC64
if (!is_32bit_task()) {
while (depth--) {
sp = user_getsp64(sp, first_frame);
if (!sp)
break;
first_frame = 0;
}
return;
}
#endif
while (depth--) {
sp = user_getsp32(sp, first_frame);
if (!sp)
break;
first_frame = 0;
}
}
}
| gpl-2.0 |
jakew02/android_kernel_asus_fugu | drivers/media/pci/cx18/cx18-cards.c | 12301 | 20522 | /*
* cx18 functions to query card hardware
*
* Derived from ivtv-cards.c
*
* Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
* Copyright (C) 2008 Andy Walls <awalls@md.metrocast.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
* 02111-1307 USA
*/
#include "cx18-driver.h"
#include "cx18-cards.h"
#include "cx18-av-core.h"
#include "cx18-i2c.h"
#include <media/cs5345.h>
#define V4L2_STD_PAL_SECAM (V4L2_STD_PAL|V4L2_STD_SECAM)
/********************** card configuration *******************************/
/* usual i2c tuner addresses to probe */
static struct cx18_card_tuner_i2c cx18_i2c_std = {
.radio = { I2C_CLIENT_END },
.demod = { 0x43, I2C_CLIENT_END },
.tv = { 0x61, 0x60, I2C_CLIENT_END },
};
/*
* usual i2c tuner addresses to probe with additional demod address for
* an NXP TDA8295 at 0x42 (N.B. it can possibly be at 0x4b or 0x4c too).
*/
static struct cx18_card_tuner_i2c cx18_i2c_nxp = {
.radio = { I2C_CLIENT_END },
.demod = { 0x42, 0x43, I2C_CLIENT_END },
.tv = { 0x61, 0x60, I2C_CLIENT_END },
};
/* Please add new PCI IDs to: http://pci-ids.ucw.cz/
This keeps the PCI ID database up to date. Note that the entries
must be added under vendor 0x4444 (Conexant) as subsystem IDs.
New vendor IDs should still be added to the vendor ID list. */
/* Hauppauge HVR-1600 cards */
/* Note: for Hauppauge cards the tveeprom information is used instead
of PCI IDs */
static const struct cx18_card cx18_card_hvr1600_esmt = {
.type = CX18_CARD_HVR_1600_ESMT,
.name = "Hauppauge HVR-1600",
.comment = "Simultaneous Digital and Analog TV capture supported\n",
.v4l2_capabilities = CX18_CAP_ENCODER,
.hw_audio_ctrl = CX18_HW_418_AV,
.hw_muxer = CX18_HW_CS5345,
.hw_all = CX18_HW_TVEEPROM | CX18_HW_418_AV | CX18_HW_TUNER |
CX18_HW_CS5345 | CX18_HW_DVB | CX18_HW_GPIO_RESET_CTRL |
CX18_HW_Z8F0811_IR_HAUP,
.video_inputs = {
{ CX18_CARD_INPUT_VID_TUNER, 0, CX18_AV_COMPOSITE7 },
{ CX18_CARD_INPUT_SVIDEO1, 1, CX18_AV_SVIDEO1 },
{ CX18_CARD_INPUT_COMPOSITE1, 1, CX18_AV_COMPOSITE3 },
{ CX18_CARD_INPUT_SVIDEO2, 2, CX18_AV_SVIDEO2 },
{ CX18_CARD_INPUT_COMPOSITE2, 2, CX18_AV_COMPOSITE4 },
},
.audio_inputs = {
{ CX18_CARD_INPUT_AUD_TUNER,
CX18_AV_AUDIO8, CS5345_IN_1 | CS5345_MCLK_1_5 },
{ CX18_CARD_INPUT_LINE_IN1,
CX18_AV_AUDIO_SERIAL1, CS5345_IN_2 },
{ CX18_CARD_INPUT_LINE_IN2,
CX18_AV_AUDIO_SERIAL1, CS5345_IN_3 },
},
.radio_input = { CX18_CARD_INPUT_AUD_TUNER,
CX18_AV_AUDIO_SERIAL1, CS5345_IN_4 },
.ddr = {
/* ESMT M13S128324A-5B memory */
.chip_config = 0x003,
.refresh = 0x30c,
.timing1 = 0x44220e82,
.timing2 = 0x08,
.tune_lane = 0,
.initial_emrs = 0,
},
.gpio_init.initial_value = 0x3001,
.gpio_init.direction = 0x3001,
.gpio_i2c_slave_reset = {
.active_lo_mask = 0x3001,
.msecs_asserted = 10,
.msecs_recovery = 40,
.ir_reset_mask = 0x0001,
},
.i2c = &cx18_i2c_std,
};
static const struct cx18_card cx18_card_hvr1600_s5h1411 = {
.type = CX18_CARD_HVR_1600_S5H1411,
.name = "Hauppauge HVR-1600",
.comment = "Simultaneous Digital and Analog TV capture supported\n",
.v4l2_capabilities = CX18_CAP_ENCODER,
.hw_audio_ctrl = CX18_HW_418_AV,
.hw_muxer = CX18_HW_CS5345,
.hw_all = CX18_HW_TVEEPROM | CX18_HW_418_AV | CX18_HW_TUNER |
CX18_HW_CS5345 | CX18_HW_DVB | CX18_HW_GPIO_RESET_CTRL |
CX18_HW_Z8F0811_IR_HAUP,
.video_inputs = {
{ CX18_CARD_INPUT_VID_TUNER, 0, CX18_AV_COMPOSITE7 },
{ CX18_CARD_INPUT_SVIDEO1, 1, CX18_AV_SVIDEO1 },
{ CX18_CARD_INPUT_COMPOSITE1, 1, CX18_AV_COMPOSITE3 },
{ CX18_CARD_INPUT_SVIDEO2, 2, CX18_AV_SVIDEO2 },
{ CX18_CARD_INPUT_COMPOSITE2, 2, CX18_AV_COMPOSITE4 },
},
.audio_inputs = {
{ CX18_CARD_INPUT_AUD_TUNER,
CX18_AV_AUDIO8, CS5345_IN_1 | CS5345_MCLK_1_5 },
{ CX18_CARD_INPUT_LINE_IN1,
CX18_AV_AUDIO_SERIAL1, CS5345_IN_2 },
{ CX18_CARD_INPUT_LINE_IN2,
CX18_AV_AUDIO_SERIAL1, CS5345_IN_3 },
},
.radio_input = { CX18_CARD_INPUT_AUD_TUNER,
CX18_AV_AUDIO_SERIAL1, CS5345_IN_4 },
.ddr = {
/* ESMT M13S128324A-5B memory */
.chip_config = 0x003,
.refresh = 0x30c,
.timing1 = 0x44220e82,
.timing2 = 0x08,
.tune_lane = 0,
.initial_emrs = 0,
},
.gpio_init.initial_value = 0x3801,
.gpio_init.direction = 0x3801,
.gpio_i2c_slave_reset = {
.active_lo_mask = 0x3801,
.msecs_asserted = 10,
.msecs_recovery = 40,
.ir_reset_mask = 0x0001,
},
.i2c = &cx18_i2c_nxp,
};
static const struct cx18_card cx18_card_hvr1600_samsung = {
.type = CX18_CARD_HVR_1600_SAMSUNG,
.name = "Hauppauge HVR-1600 (Preproduction)",
.comment = "Simultaneous Digital and Analog TV capture supported\n",
.v4l2_capabilities = CX18_CAP_ENCODER,
.hw_audio_ctrl = CX18_HW_418_AV,
.hw_muxer = CX18_HW_CS5345,
.hw_all = CX18_HW_TVEEPROM | CX18_HW_418_AV | CX18_HW_TUNER |
CX18_HW_CS5345 | CX18_HW_DVB | CX18_HW_GPIO_RESET_CTRL |
CX18_HW_Z8F0811_IR_HAUP,
.video_inputs = {
{ CX18_CARD_INPUT_VID_TUNER, 0, CX18_AV_COMPOSITE7 },
{ CX18_CARD_INPUT_SVIDEO1, 1, CX18_AV_SVIDEO1 },
{ CX18_CARD_INPUT_COMPOSITE1, 1, CX18_AV_COMPOSITE3 },
{ CX18_CARD_INPUT_SVIDEO2, 2, CX18_AV_SVIDEO2 },
{ CX18_CARD_INPUT_COMPOSITE2, 2, CX18_AV_COMPOSITE4 },
},
.audio_inputs = {
{ CX18_CARD_INPUT_AUD_TUNER,
CX18_AV_AUDIO8, CS5345_IN_1 | CS5345_MCLK_1_5 },
{ CX18_CARD_INPUT_LINE_IN1,
CX18_AV_AUDIO_SERIAL1, CS5345_IN_2 },
{ CX18_CARD_INPUT_LINE_IN2,
CX18_AV_AUDIO_SERIAL1, CS5345_IN_3 },
},
.radio_input = { CX18_CARD_INPUT_AUD_TUNER,
CX18_AV_AUDIO_SERIAL1, CS5345_IN_4 },
.ddr = {
/* Samsung K4D263238G-VC33 memory */
.chip_config = 0x003,
.refresh = 0x30c,
.timing1 = 0x23230b73,
.timing2 = 0x08,
.tune_lane = 0,
.initial_emrs = 2,
},
.gpio_init.initial_value = 0x3001,
.gpio_init.direction = 0x3001,
.gpio_i2c_slave_reset = {
.active_lo_mask = 0x3001,
.msecs_asserted = 10,
.msecs_recovery = 40,
.ir_reset_mask = 0x0001,
},
.i2c = &cx18_i2c_std,
};
/* ------------------------------------------------------------------------- */
/* Compro VideoMate H900: note that this card is analog only! */
static const struct cx18_card_pci_info cx18_pci_h900[] = {
{ PCI_DEVICE_ID_CX23418, CX18_PCI_ID_COMPRO, 0xe100 },
{ 0, 0, 0 }
};
static const struct cx18_card cx18_card_h900 = {
.type = CX18_CARD_COMPRO_H900,
.name = "Compro VideoMate H900",
.comment = "Analog TV capture supported\n",
.v4l2_capabilities = CX18_CAP_ENCODER,
.hw_audio_ctrl = CX18_HW_418_AV,
.hw_all = CX18_HW_418_AV | CX18_HW_TUNER | CX18_HW_GPIO_RESET_CTRL,
.video_inputs = {
{ CX18_CARD_INPUT_VID_TUNER, 0, CX18_AV_COMPOSITE2 },
{ CX18_CARD_INPUT_SVIDEO1, 1,
CX18_AV_SVIDEO_LUMA3 | CX18_AV_SVIDEO_CHROMA4 },
{ CX18_CARD_INPUT_COMPOSITE1, 1, CX18_AV_COMPOSITE1 },
},
.audio_inputs = {
{ CX18_CARD_INPUT_AUD_TUNER,
CX18_AV_AUDIO5, 0 },
{ CX18_CARD_INPUT_LINE_IN1,
CX18_AV_AUDIO_SERIAL1, 0 },
},
.radio_input = { CX18_CARD_INPUT_AUD_TUNER,
CX18_AV_AUDIO_SERIAL1, 0 },
.tuners = {
{ .std = V4L2_STD_ALL, .tuner = TUNER_XC2028 },
},
.ddr = {
/* EtronTech EM6A9160TS-5G memory */
.chip_config = 0x50003,
.refresh = 0x753,
.timing1 = 0x24330e84,
.timing2 = 0x1f,
.tune_lane = 0,
.initial_emrs = 0,
},
.xceive_pin = 15,
.pci_list = cx18_pci_h900,
.i2c = &cx18_i2c_std,
};
/* ------------------------------------------------------------------------- */
/* Yuan MPC718: not working at the moment! */
static const struct cx18_card_pci_info cx18_pci_mpc718[] = {
{ PCI_DEVICE_ID_CX23418, CX18_PCI_ID_YUAN, 0x0718 },
{ 0, 0, 0 }
};
static const struct cx18_card cx18_card_mpc718 = {
.type = CX18_CARD_YUAN_MPC718,
.name = "Yuan MPC718 MiniPCI DVB-T/Analog",
.comment = "Experimenters needed for device to work well.\n"
"\tTo help, mail the ivtv-devel list (www.ivtvdriver.org).\n",
.v4l2_capabilities = CX18_CAP_ENCODER,
.hw_audio_ctrl = CX18_HW_418_AV,
.hw_muxer = CX18_HW_GPIO_MUX,
.hw_all = CX18_HW_TVEEPROM | CX18_HW_418_AV | CX18_HW_TUNER |
CX18_HW_GPIO_MUX | CX18_HW_DVB | CX18_HW_GPIO_RESET_CTRL,
.video_inputs = {
{ CX18_CARD_INPUT_VID_TUNER, 0, CX18_AV_COMPOSITE2 },
{ CX18_CARD_INPUT_SVIDEO1, 1,
CX18_AV_SVIDEO_LUMA3 | CX18_AV_SVIDEO_CHROMA4 },
{ CX18_CARD_INPUT_COMPOSITE1, 1, CX18_AV_COMPOSITE1 },
{ CX18_CARD_INPUT_SVIDEO2, 2,
CX18_AV_SVIDEO_LUMA7 | CX18_AV_SVIDEO_CHROMA8 },
{ CX18_CARD_INPUT_COMPOSITE2, 2, CX18_AV_COMPOSITE6 },
},
.audio_inputs = {
{ CX18_CARD_INPUT_AUD_TUNER, CX18_AV_AUDIO5, 0 },
{ CX18_CARD_INPUT_LINE_IN1, CX18_AV_AUDIO_SERIAL1, 1 },
{ CX18_CARD_INPUT_LINE_IN2, CX18_AV_AUDIO_SERIAL2, 1 },
},
.tuners = {
/* XC3028 tuner */
{ .std = V4L2_STD_ALL, .tuner = TUNER_XC2028 },
},
/* FIXME - the FM radio is just a guess and driver doesn't use SIF */
.radio_input = { CX18_CARD_INPUT_AUD_TUNER, CX18_AV_AUDIO5, 2 },
.ddr = {
/* Hynix HY5DU283222B DDR RAM */
.chip_config = 0x303,
.refresh = 0x3bd,
.timing1 = 0x36320966,
.timing2 = 0x1f,
.tune_lane = 0,
.initial_emrs = 2,
},
.gpio_init.initial_value = 0x1,
.gpio_init.direction = 0x3,
/* FIXME - these GPIO's are just guesses */
.gpio_audio_input = { .mask = 0x3,
.tuner = 0x1,
.linein = 0x3,
.radio = 0x1 },
.xceive_pin = 0,
.pci_list = cx18_pci_mpc718,
.i2c = &cx18_i2c_std,
};
/* ------------------------------------------------------------------------- */
/* GoTView PCI */
static const struct cx18_card_pci_info cx18_pci_gotview_dvd3[] = {
{ PCI_DEVICE_ID_CX23418, CX18_PCI_ID_GOTVIEW, 0x3343 },
{ 0, 0, 0 }
};
static const struct cx18_card cx18_card_gotview_dvd3 = {
.type = CX18_CARD_GOTVIEW_PCI_DVD3,
.name = "GoTView PCI DVD3 Hybrid",
.comment = "Experimenters needed for device to work well.\n"
"\tTo help, mail the ivtv-devel list (www.ivtvdriver.org).\n",
.v4l2_capabilities = CX18_CAP_ENCODER,
.hw_audio_ctrl = CX18_HW_418_AV,
.hw_muxer = CX18_HW_GPIO_MUX,
.hw_all = CX18_HW_TVEEPROM | CX18_HW_418_AV | CX18_HW_TUNER |
CX18_HW_GPIO_MUX | CX18_HW_DVB | CX18_HW_GPIO_RESET_CTRL,
.video_inputs = {
{ CX18_CARD_INPUT_VID_TUNER, 0, CX18_AV_COMPOSITE2 },
{ CX18_CARD_INPUT_SVIDEO1, 1,
CX18_AV_SVIDEO_LUMA3 | CX18_AV_SVIDEO_CHROMA4 },
{ CX18_CARD_INPUT_COMPOSITE1, 1, CX18_AV_COMPOSITE1 },
{ CX18_CARD_INPUT_SVIDEO2, 2,
CX18_AV_SVIDEO_LUMA7 | CX18_AV_SVIDEO_CHROMA8 },
{ CX18_CARD_INPUT_COMPOSITE2, 2, CX18_AV_COMPOSITE6 },
},
.audio_inputs = {
{ CX18_CARD_INPUT_AUD_TUNER, CX18_AV_AUDIO5, 0 },
{ CX18_CARD_INPUT_LINE_IN1, CX18_AV_AUDIO_SERIAL1, 1 },
{ CX18_CARD_INPUT_LINE_IN2, CX18_AV_AUDIO_SERIAL2, 1 },
},
.tuners = {
/* XC3028 tuner */
{ .std = V4L2_STD_ALL, .tuner = TUNER_XC2028 },
},
/* FIXME - the FM radio is just a guess and driver doesn't use SIF */
.radio_input = { CX18_CARD_INPUT_AUD_TUNER, CX18_AV_AUDIO5, 2 },
.ddr = {
/* Hynix HY5DU283222B DDR RAM */
.chip_config = 0x303,
.refresh = 0x3bd,
.timing1 = 0x36320966,
.timing2 = 0x1f,
.tune_lane = 0,
.initial_emrs = 2,
},
.gpio_init.initial_value = 0x1,
.gpio_init.direction = 0x3,
.gpio_audio_input = { .mask = 0x3,
.tuner = 0x1,
.linein = 0x2,
.radio = 0x1 },
.xceive_pin = 0,
.pci_list = cx18_pci_gotview_dvd3,
.i2c = &cx18_i2c_std,
};
/* ------------------------------------------------------------------------- */
/* Conexant Raptor PAL/SECAM: note that this card is analog only! */
static const struct cx18_card_pci_info cx18_pci_cnxt_raptor_pal[] = {
{ PCI_DEVICE_ID_CX23418, CX18_PCI_ID_CONEXANT, 0x0009 },
{ 0, 0, 0 }
};
static const struct cx18_card cx18_card_cnxt_raptor_pal = {
.type = CX18_CARD_CNXT_RAPTOR_PAL,
.name = "Conexant Raptor PAL/SECAM",
.comment = "Analog TV capture supported\n",
.v4l2_capabilities = CX18_CAP_ENCODER,
.hw_audio_ctrl = CX18_HW_418_AV,
.hw_muxer = CX18_HW_GPIO_MUX,
.hw_all = CX18_HW_418_AV | CX18_HW_TUNER | CX18_HW_GPIO_MUX,
.video_inputs = {
{ CX18_CARD_INPUT_VID_TUNER, 0, CX18_AV_COMPOSITE2 },
{ CX18_CARD_INPUT_SVIDEO1, 1,
CX18_AV_SVIDEO_LUMA3 | CX18_AV_SVIDEO_CHROMA4 },
{ CX18_CARD_INPUT_COMPOSITE1, 1, CX18_AV_COMPOSITE1 },
{ CX18_CARD_INPUT_SVIDEO2, 2,
CX18_AV_SVIDEO_LUMA7 | CX18_AV_SVIDEO_CHROMA8 },
{ CX18_CARD_INPUT_COMPOSITE2, 2, CX18_AV_COMPOSITE6 },
},
.audio_inputs = {
{ CX18_CARD_INPUT_AUD_TUNER, CX18_AV_AUDIO5, 0 },
{ CX18_CARD_INPUT_LINE_IN1, CX18_AV_AUDIO_SERIAL1, 1 },
{ CX18_CARD_INPUT_LINE_IN2, CX18_AV_AUDIO_SERIAL2, 1 },
},
.tuners = {
{ .std = V4L2_STD_PAL_SECAM, .tuner = TUNER_PHILIPS_FM1216ME_MK3 },
},
.radio_input = { CX18_CARD_INPUT_AUD_TUNER, CX18_AV_AUDIO_SERIAL1, 2 },
.ddr = {
/* MT 46V16M16 memory */
.chip_config = 0x50306,
.refresh = 0x753,
.timing1 = 0x33220953,
.timing2 = 0x09,
.tune_lane = 0,
.initial_emrs = 0,
},
.gpio_init.initial_value = 0x1002,
.gpio_init.direction = 0xf002,
.gpio_audio_input = { .mask = 0xf002,
.tuner = 0x1002, /* LED D1 Tuner AF */
.linein = 0x2000, /* LED D2 Line In 1 */
.radio = 0x4002 }, /* LED D3 Tuner AF */
.pci_list = cx18_pci_cnxt_raptor_pal,
.i2c = &cx18_i2c_std,
};
/* ------------------------------------------------------------------------- */
/* Toshiba Qosmio laptop internal DVB-T/Analog Hybrid Tuner */
static const struct cx18_card_pci_info cx18_pci_toshiba_qosmio_dvbt[] = {
{ PCI_DEVICE_ID_CX23418, CX18_PCI_ID_TOSHIBA, 0x0110 },
{ 0, 0, 0 }
};
static const struct cx18_card cx18_card_toshiba_qosmio_dvbt = {
.type = CX18_CARD_TOSHIBA_QOSMIO_DVBT,
.name = "Toshiba Qosmio DVB-T/Analog",
.comment = "Experimenters and photos needed for device to work well.\n"
"\tTo help, mail the ivtv-devel list (www.ivtvdriver.org).\n",
.v4l2_capabilities = CX18_CAP_ENCODER,
.hw_audio_ctrl = CX18_HW_418_AV,
.hw_all = CX18_HW_418_AV | CX18_HW_TUNER | CX18_HW_GPIO_RESET_CTRL,
.video_inputs = {
{ CX18_CARD_INPUT_VID_TUNER, 0, CX18_AV_COMPOSITE6 },
{ CX18_CARD_INPUT_SVIDEO1, 1,
CX18_AV_SVIDEO_LUMA3 | CX18_AV_SVIDEO_CHROMA4 },
{ CX18_CARD_INPUT_COMPOSITE1, 1, CX18_AV_COMPOSITE1 },
},
.audio_inputs = {
{ CX18_CARD_INPUT_AUD_TUNER, CX18_AV_AUDIO5, 0 },
{ CX18_CARD_INPUT_LINE_IN1, CX18_AV_AUDIO_SERIAL1, 1 },
},
.tuners = {
{ .std = V4L2_STD_ALL, .tuner = TUNER_XC2028 },
},
.ddr = {
.chip_config = 0x202,
.refresh = 0x3bb,
.timing1 = 0x33320a63,
.timing2 = 0x0a,
.tune_lane = 0,
.initial_emrs = 0x42,
},
.xceive_pin = 15,
.pci_list = cx18_pci_toshiba_qosmio_dvbt,
.i2c = &cx18_i2c_std,
};
/* ------------------------------------------------------------------------- */
/* Leadtek WinFast PVR2100 */
static const struct cx18_card_pci_info cx18_pci_leadtek_pvr2100[] = {
{ PCI_DEVICE_ID_CX23418, CX18_PCI_ID_LEADTEK, 0x6f27 }, /* PVR2100 */
{ 0, 0, 0 }
};
static const struct cx18_card cx18_card_leadtek_pvr2100 = {
.type = CX18_CARD_LEADTEK_PVR2100,
.name = "Leadtek WinFast PVR2100",
.comment = "Experimenters and photos needed for device to work well.\n"
"\tTo help, mail the ivtv-devel list (www.ivtvdriver.org).\n",
.v4l2_capabilities = CX18_CAP_ENCODER,
.hw_audio_ctrl = CX18_HW_418_AV,
.hw_muxer = CX18_HW_GPIO_MUX,
.hw_all = CX18_HW_418_AV | CX18_HW_TUNER | CX18_HW_GPIO_MUX |
CX18_HW_GPIO_RESET_CTRL,
.video_inputs = {
{ CX18_CARD_INPUT_VID_TUNER, 0, CX18_AV_COMPOSITE2 },
{ CX18_CARD_INPUT_SVIDEO1, 1,
CX18_AV_SVIDEO_LUMA3 | CX18_AV_SVIDEO_CHROMA4 },
{ CX18_CARD_INPUT_COMPOSITE1, 1, CX18_AV_COMPOSITE7 },
{ CX18_CARD_INPUT_COMPONENT1, 1, CX18_AV_COMPONENT1 },
},
.audio_inputs = {
{ CX18_CARD_INPUT_AUD_TUNER, CX18_AV_AUDIO5, 0 },
{ CX18_CARD_INPUT_LINE_IN1, CX18_AV_AUDIO_SERIAL1, 1 },
},
.tuners = {
/* XC2028 tuner */
{ .std = V4L2_STD_ALL, .tuner = TUNER_XC2028 },
},
.radio_input = { CX18_CARD_INPUT_AUD_TUNER, CX18_AV_AUDIO5, 2 },
.ddr = {
/* Pointer to proper DDR config values provided by Terry Wu */
.chip_config = 0x303,
.refresh = 0x3bb,
.timing1 = 0x24220e83,
.timing2 = 0x1f,
.tune_lane = 0,
.initial_emrs = 0x2,
},
.gpio_init.initial_value = 0x6,
.gpio_init.direction = 0x7,
.gpio_audio_input = { .mask = 0x7,
.tuner = 0x6, .linein = 0x2, .radio = 0x2 },
.xceive_pin = 1,
.pci_list = cx18_pci_leadtek_pvr2100,
.i2c = &cx18_i2c_std,
};
/* ------------------------------------------------------------------------- */
/* Leadtek WinFast DVR3100 H */
static const struct cx18_card_pci_info cx18_pci_leadtek_dvr3100h[] = {
{ PCI_DEVICE_ID_CX23418, CX18_PCI_ID_LEADTEK, 0x6690 }, /* DVR3100 H */
{ 0, 0, 0 }
};
static const struct cx18_card cx18_card_leadtek_dvr3100h = {
.type = CX18_CARD_LEADTEK_DVR3100H,
.name = "Leadtek WinFast DVR3100 H",
.comment = "Simultaneous DVB-T and Analog capture supported,\n"
"\texcept when capturing Analog from the antenna input.\n",
.v4l2_capabilities = CX18_CAP_ENCODER,
.hw_audio_ctrl = CX18_HW_418_AV,
.hw_muxer = CX18_HW_GPIO_MUX,
.hw_all = CX18_HW_418_AV | CX18_HW_TUNER | CX18_HW_GPIO_MUX |
CX18_HW_DVB | CX18_HW_GPIO_RESET_CTRL,
.video_inputs = {
{ CX18_CARD_INPUT_VID_TUNER, 0, CX18_AV_COMPOSITE2 },
{ CX18_CARD_INPUT_SVIDEO1, 1,
CX18_AV_SVIDEO_LUMA3 | CX18_AV_SVIDEO_CHROMA4 },
{ CX18_CARD_INPUT_COMPOSITE1, 1, CX18_AV_COMPOSITE7 },
{ CX18_CARD_INPUT_COMPONENT1, 1, CX18_AV_COMPONENT1 },
},
.audio_inputs = {
{ CX18_CARD_INPUT_AUD_TUNER, CX18_AV_AUDIO5, 0 },
{ CX18_CARD_INPUT_LINE_IN1, CX18_AV_AUDIO_SERIAL1, 1 },
},
.tuners = {
/* XC3028 tuner */
{ .std = V4L2_STD_ALL, .tuner = TUNER_XC2028 },
},
.radio_input = { CX18_CARD_INPUT_AUD_TUNER, CX18_AV_AUDIO5, 2 },
.ddr = {
/* Pointer to proper DDR config values provided by Terry Wu */
.chip_config = 0x303,
.refresh = 0x3bb,
.timing1 = 0x24220e83,
.timing2 = 0x1f,
.tune_lane = 0,
.initial_emrs = 0x2,
},
.gpio_init.initial_value = 0x6,
.gpio_init.direction = 0x7,
.gpio_audio_input = { .mask = 0x7,
.tuner = 0x6, .linein = 0x2, .radio = 0x2 },
.xceive_pin = 1,
.pci_list = cx18_pci_leadtek_dvr3100h,
.i2c = &cx18_i2c_std,
};
/* ------------------------------------------------------------------------- */
static const struct cx18_card *cx18_card_list[] = {
&cx18_card_hvr1600_esmt,
&cx18_card_hvr1600_samsung,
&cx18_card_h900,
&cx18_card_mpc718,
&cx18_card_cnxt_raptor_pal,
&cx18_card_toshiba_qosmio_dvbt,
&cx18_card_leadtek_pvr2100,
&cx18_card_leadtek_dvr3100h,
&cx18_card_gotview_dvd3,
&cx18_card_hvr1600_s5h1411
};
const struct cx18_card *cx18_get_card(u16 index)
{
if (index >= ARRAY_SIZE(cx18_card_list))
return NULL;
return cx18_card_list[index];
}
int cx18_get_input(struct cx18 *cx, u16 index, struct v4l2_input *input)
{
const struct cx18_card_video_input *card_input =
cx->card->video_inputs + index;
static const char * const input_strs[] = {
"Tuner 1",
"S-Video 1",
"S-Video 2",
"Composite 1",
"Composite 2",
"Component 1"
};
if (index >= cx->nof_inputs)
return -EINVAL;
input->index = index;
strlcpy(input->name, input_strs[card_input->video_type - 1],
sizeof(input->name));
input->type = (card_input->video_type == CX18_CARD_INPUT_VID_TUNER ?
V4L2_INPUT_TYPE_TUNER : V4L2_INPUT_TYPE_CAMERA);
input->audioset = (1 << cx->nof_audio_inputs) - 1;
input->std = (input->type == V4L2_INPUT_TYPE_TUNER) ?
cx->tuner_std : V4L2_STD_ALL;
return 0;
}
int cx18_get_audio_input(struct cx18 *cx, u16 index, struct v4l2_audio *audio)
{
const struct cx18_card_audio_input *aud_input =
cx->card->audio_inputs + index;
static const char * const input_strs[] = {
"Tuner 1",
"Line In 1",
"Line In 2"
};
memset(audio, 0, sizeof(*audio));
if (index >= cx->nof_audio_inputs)
return -EINVAL;
strlcpy(audio->name, input_strs[aud_input->audio_type - 1],
sizeof(audio->name));
audio->index = index;
audio->capability = V4L2_AUDCAP_STEREO;
return 0;
}
| gpl-2.0 |
VilleEvitaCake/android_kernel_htc_msm8960 | drivers/scsi/isci/remote_node_table.c | 13069 | 20105 | /*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
* The full GNU General Public License is included in this distribution
* in the file called LICENSE.GPL.
*
* BSD LICENSE
*
* Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/**
* This file contains the implementation of the SCIC_SDS_REMOTE_NODE_TABLE
* public, protected, and private methods.
*
*
*/
#include "remote_node_table.h"
#include "remote_node_context.h"
/**
*
* @remote_node_table: This is the remote node index table from which the
* selection will be made.
* @group_table_index: This is the index to the group table from which to
* search for an available selection.
*
* This routine will find the bit position in absolute bit terms of the next 32
* + bit position. If there are available bits in the first u32 then it is
* just bit position. u32 This is the absolute bit position for an available
* group.
*/
static u32 sci_remote_node_table_get_group_index(
struct sci_remote_node_table *remote_node_table,
u32 group_table_index)
{
u32 dword_index;
u32 *group_table;
u32 bit_index;
group_table = remote_node_table->remote_node_groups[group_table_index];
for (dword_index = 0; dword_index < remote_node_table->group_array_size; dword_index++) {
if (group_table[dword_index] != 0) {
for (bit_index = 0; bit_index < 32; bit_index++) {
if ((group_table[dword_index] & (1 << bit_index)) != 0) {
return (dword_index * 32) + bit_index;
}
}
}
}
return SCIC_SDS_REMOTE_NODE_TABLE_INVALID_INDEX;
}
/**
*
* @out]: remote_node_table This the remote node table in which to clear the
* selector.
* @set_index: This is the remote node selector in which the change will be
* made.
* @group_index: This is the bit index in the table to be modified.
*
* This method will clear the group index entry in the specified group index
* table. none
*/
static void sci_remote_node_table_clear_group_index(
struct sci_remote_node_table *remote_node_table,
u32 group_table_index,
u32 group_index)
{
u32 dword_index;
u32 bit_index;
u32 *group_table;
BUG_ON(group_table_index >= SCU_STP_REMOTE_NODE_COUNT);
BUG_ON(group_index >= (u32)(remote_node_table->group_array_size * 32));
dword_index = group_index / 32;
bit_index = group_index % 32;
group_table = remote_node_table->remote_node_groups[group_table_index];
group_table[dword_index] = group_table[dword_index] & ~(1 << bit_index);
}
/**
*
* @out]: remote_node_table This the remote node table in which to set the
* selector.
* @group_table_index: This is the remote node selector in which the change
* will be made.
* @group_index: This is the bit position in the table to be modified.
*
* This method will set the group index bit entry in the specified gropu index
* table. none
*/
static void sci_remote_node_table_set_group_index(
struct sci_remote_node_table *remote_node_table,
u32 group_table_index,
u32 group_index)
{
u32 dword_index;
u32 bit_index;
u32 *group_table;
BUG_ON(group_table_index >= SCU_STP_REMOTE_NODE_COUNT);
BUG_ON(group_index >= (u32)(remote_node_table->group_array_size * 32));
dword_index = group_index / 32;
bit_index = group_index % 32;
group_table = remote_node_table->remote_node_groups[group_table_index];
group_table[dword_index] = group_table[dword_index] | (1 << bit_index);
}
/**
*
* @out]: remote_node_table This is the remote node table in which to modify
* the remote node availability.
* @remote_node_index: This is the remote node index that is being returned to
* the table.
*
* This method will set the remote to available in the remote node allocation
* table. none
*/
static void sci_remote_node_table_set_node_index(
struct sci_remote_node_table *remote_node_table,
u32 remote_node_index)
{
u32 dword_location;
u32 dword_remainder;
u32 slot_normalized;
u32 slot_position;
BUG_ON(
(remote_node_table->available_nodes_array_size * SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD)
<= (remote_node_index / SCU_STP_REMOTE_NODE_COUNT)
);
dword_location = remote_node_index / SCIC_SDS_REMOTE_NODES_PER_DWORD;
dword_remainder = remote_node_index % SCIC_SDS_REMOTE_NODES_PER_DWORD;
slot_normalized = (dword_remainder / SCU_STP_REMOTE_NODE_COUNT) * sizeof(u32);
slot_position = remote_node_index % SCU_STP_REMOTE_NODE_COUNT;
remote_node_table->available_remote_nodes[dword_location] |=
1 << (slot_normalized + slot_position);
}
/**
*
* @out]: remote_node_table This is the remote node table from which to clear
* the available remote node bit.
* @remote_node_index: This is the remote node index which is to be cleared
* from the table.
*
* This method clears the remote node index from the table of available remote
* nodes. none
*/
static void sci_remote_node_table_clear_node_index(
struct sci_remote_node_table *remote_node_table,
u32 remote_node_index)
{
u32 dword_location;
u32 dword_remainder;
u32 slot_position;
u32 slot_normalized;
BUG_ON(
(remote_node_table->available_nodes_array_size * SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD)
<= (remote_node_index / SCU_STP_REMOTE_NODE_COUNT)
);
dword_location = remote_node_index / SCIC_SDS_REMOTE_NODES_PER_DWORD;
dword_remainder = remote_node_index % SCIC_SDS_REMOTE_NODES_PER_DWORD;
slot_normalized = (dword_remainder / SCU_STP_REMOTE_NODE_COUNT) * sizeof(u32);
slot_position = remote_node_index % SCU_STP_REMOTE_NODE_COUNT;
remote_node_table->available_remote_nodes[dword_location] &=
~(1 << (slot_normalized + slot_position));
}
/**
*
* @out]: remote_node_table The remote node table from which the slot will be
* cleared.
* @group_index: The index for the slot that is to be cleared.
*
* This method clears the entire table slot at the specified slot index. none
*/
static void sci_remote_node_table_clear_group(
struct sci_remote_node_table *remote_node_table,
u32 group_index)
{
u32 dword_location;
u32 dword_remainder;
u32 dword_value;
BUG_ON(
(remote_node_table->available_nodes_array_size * SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD)
<= (group_index / SCU_STP_REMOTE_NODE_COUNT)
);
dword_location = group_index / SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD;
dword_remainder = group_index % SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD;
dword_value = remote_node_table->available_remote_nodes[dword_location];
dword_value &= ~(SCIC_SDS_REMOTE_NODE_TABLE_FULL_SLOT_VALUE << (dword_remainder * 4));
remote_node_table->available_remote_nodes[dword_location] = dword_value;
}
/**
*
* @remote_node_table:
*
* THis method sets an entire remote node group in the remote node table.
*/
static void sci_remote_node_table_set_group(
struct sci_remote_node_table *remote_node_table,
u32 group_index)
{
u32 dword_location;
u32 dword_remainder;
u32 dword_value;
BUG_ON(
(remote_node_table->available_nodes_array_size * SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD)
<= (group_index / SCU_STP_REMOTE_NODE_COUNT)
);
dword_location = group_index / SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD;
dword_remainder = group_index % SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD;
dword_value = remote_node_table->available_remote_nodes[dword_location];
dword_value |= (SCIC_SDS_REMOTE_NODE_TABLE_FULL_SLOT_VALUE << (dword_remainder * 4));
remote_node_table->available_remote_nodes[dword_location] = dword_value;
}
/**
*
* @remote_node_table: This is the remote node table that for which the group
* value is to be returned.
* @group_index: This is the group index to use to find the group value.
*
* This method will return the group value for the specified group index. The
* bit values at the specified remote node group index.
*/
static u8 sci_remote_node_table_get_group_value(
struct sci_remote_node_table *remote_node_table,
u32 group_index)
{
u32 dword_location;
u32 dword_remainder;
u32 dword_value;
dword_location = group_index / SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD;
dword_remainder = group_index % SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD;
dword_value = remote_node_table->available_remote_nodes[dword_location];
dword_value &= (SCIC_SDS_REMOTE_NODE_TABLE_FULL_SLOT_VALUE << (dword_remainder * 4));
dword_value = dword_value >> (dword_remainder * 4);
return (u8)dword_value;
}
/**
*
* @out]: remote_node_table The remote that which is to be initialized.
* @remote_node_entries: The number of entries to put in the table.
*
* This method will initialize the remote node table for use. none
*/
void sci_remote_node_table_initialize(
struct sci_remote_node_table *remote_node_table,
u32 remote_node_entries)
{
u32 index;
/*
* Initialize the raw data we could improve the speed by only initializing
* those entries that we are actually going to be used */
memset(
remote_node_table->available_remote_nodes,
0x00,
sizeof(remote_node_table->available_remote_nodes)
);
memset(
remote_node_table->remote_node_groups,
0x00,
sizeof(remote_node_table->remote_node_groups)
);
/* Initialize the available remote node sets */
remote_node_table->available_nodes_array_size = (u16)
(remote_node_entries / SCIC_SDS_REMOTE_NODES_PER_DWORD)
+ ((remote_node_entries % SCIC_SDS_REMOTE_NODES_PER_DWORD) != 0);
/* Initialize each full DWORD to a FULL SET of remote nodes */
for (index = 0; index < remote_node_entries; index++) {
sci_remote_node_table_set_node_index(remote_node_table, index);
}
remote_node_table->group_array_size = (u16)
(remote_node_entries / (SCU_STP_REMOTE_NODE_COUNT * 32))
+ ((remote_node_entries % (SCU_STP_REMOTE_NODE_COUNT * 32)) != 0);
for (index = 0; index < (remote_node_entries / SCU_STP_REMOTE_NODE_COUNT); index++) {
/*
* These are all guaranteed to be full slot values so fill them in the
* available sets of 3 remote nodes */
sci_remote_node_table_set_group_index(remote_node_table, 2, index);
}
/* Now fill in any remainders that we may find */
if ((remote_node_entries % SCU_STP_REMOTE_NODE_COUNT) == 2) {
sci_remote_node_table_set_group_index(remote_node_table, 1, index);
} else if ((remote_node_entries % SCU_STP_REMOTE_NODE_COUNT) == 1) {
sci_remote_node_table_set_group_index(remote_node_table, 0, index);
}
}
/**
*
* @out]: remote_node_table The remote node table from which to allocate a
* remote node.
* @table_index: The group index that is to be used for the search.
*
* This method will allocate a single RNi from the remote node table. The
* table index will determine from which remote node group table to search.
* This search may fail and another group node table can be specified. The
* function is designed to allow a serach of the available single remote node
* group up to the triple remote node group. If an entry is found in the
* specified table the remote node is removed and the remote node groups are
* updated. The RNi value or an invalid remote node context if an RNi can not
* be found.
*/
static u16 sci_remote_node_table_allocate_single_remote_node(
struct sci_remote_node_table *remote_node_table,
u32 group_table_index)
{
u8 index;
u8 group_value;
u32 group_index;
u16 remote_node_index = SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX;
group_index = sci_remote_node_table_get_group_index(
remote_node_table, group_table_index);
/* We could not find an available slot in the table selector 0 */
if (group_index != SCIC_SDS_REMOTE_NODE_TABLE_INVALID_INDEX) {
group_value = sci_remote_node_table_get_group_value(
remote_node_table, group_index);
for (index = 0; index < SCU_STP_REMOTE_NODE_COUNT; index++) {
if (((1 << index) & group_value) != 0) {
/* We have selected a bit now clear it */
remote_node_index = (u16)(group_index * SCU_STP_REMOTE_NODE_COUNT
+ index);
sci_remote_node_table_clear_group_index(
remote_node_table, group_table_index, group_index
);
sci_remote_node_table_clear_node_index(
remote_node_table, remote_node_index
);
if (group_table_index > 0) {
sci_remote_node_table_set_group_index(
remote_node_table, group_table_index - 1, group_index
);
}
break;
}
}
}
return remote_node_index;
}
/**
*
* @remote_node_table: This is the remote node table from which to allocate the
* remote node entries.
* @group_table_index: THis is the group table index which must equal two (2)
* for this operation.
*
* This method will allocate three consecutive remote node context entries. If
* there are no remaining triple entries the function will return a failure.
* The remote node index that represents three consecutive remote node entries
* or an invalid remote node context if none can be found.
*/
static u16 sci_remote_node_table_allocate_triple_remote_node(
struct sci_remote_node_table *remote_node_table,
u32 group_table_index)
{
u32 group_index;
u16 remote_node_index = SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX;
group_index = sci_remote_node_table_get_group_index(
remote_node_table, group_table_index);
if (group_index != SCIC_SDS_REMOTE_NODE_TABLE_INVALID_INDEX) {
remote_node_index = (u16)group_index * SCU_STP_REMOTE_NODE_COUNT;
sci_remote_node_table_clear_group_index(
remote_node_table, group_table_index, group_index
);
sci_remote_node_table_clear_group(
remote_node_table, group_index
);
}
return remote_node_index;
}
/**
*
* @remote_node_table: This is the remote node table from which the remote node
* allocation is to take place.
* @remote_node_count: This is ther remote node count which is one of
* SCU_SSP_REMOTE_NODE_COUNT(1) or SCU_STP_REMOTE_NODE_COUNT(3).
*
* This method will allocate a remote node that mataches the remote node count
* specified by the caller. Valid values for remote node count is
* SCU_SSP_REMOTE_NODE_COUNT(1) or SCU_STP_REMOTE_NODE_COUNT(3). u16 This is
* the remote node index that is returned or an invalid remote node context.
*/
u16 sci_remote_node_table_allocate_remote_node(
struct sci_remote_node_table *remote_node_table,
u32 remote_node_count)
{
u16 remote_node_index = SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX;
if (remote_node_count == SCU_SSP_REMOTE_NODE_COUNT) {
remote_node_index =
sci_remote_node_table_allocate_single_remote_node(
remote_node_table, 0);
if (remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) {
remote_node_index =
sci_remote_node_table_allocate_single_remote_node(
remote_node_table, 1);
}
if (remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) {
remote_node_index =
sci_remote_node_table_allocate_single_remote_node(
remote_node_table, 2);
}
} else if (remote_node_count == SCU_STP_REMOTE_NODE_COUNT) {
remote_node_index =
sci_remote_node_table_allocate_triple_remote_node(
remote_node_table, 2);
}
return remote_node_index;
}
/**
*
* @remote_node_table:
*
* This method will free a single remote node index back to the remote node
* table. This routine will update the remote node groups
*/
static void sci_remote_node_table_release_single_remote_node(
struct sci_remote_node_table *remote_node_table,
u16 remote_node_index)
{
u32 group_index;
u8 group_value;
group_index = remote_node_index / SCU_STP_REMOTE_NODE_COUNT;
group_value = sci_remote_node_table_get_group_value(remote_node_table, group_index);
/*
* Assert that we are not trying to add an entry to a slot that is already
* full. */
BUG_ON(group_value == SCIC_SDS_REMOTE_NODE_TABLE_FULL_SLOT_VALUE);
if (group_value == 0x00) {
/*
* There are no entries in this slot so it must be added to the single
* slot table. */
sci_remote_node_table_set_group_index(remote_node_table, 0, group_index);
} else if ((group_value & (group_value - 1)) == 0) {
/*
* There is only one entry in this slot so it must be moved from the
* single slot table to the dual slot table */
sci_remote_node_table_clear_group_index(remote_node_table, 0, group_index);
sci_remote_node_table_set_group_index(remote_node_table, 1, group_index);
} else {
/*
* There are two entries in the slot so it must be moved from the dual
* slot table to the tripple slot table. */
sci_remote_node_table_clear_group_index(remote_node_table, 1, group_index);
sci_remote_node_table_set_group_index(remote_node_table, 2, group_index);
}
sci_remote_node_table_set_node_index(remote_node_table, remote_node_index);
}
/**
*
* @remote_node_table: This is the remote node table to which the remote node
* index is to be freed.
*
* This method will release a group of three consecutive remote nodes back to
* the free remote nodes.
*/
static void sci_remote_node_table_release_triple_remote_node(
struct sci_remote_node_table *remote_node_table,
u16 remote_node_index)
{
u32 group_index;
group_index = remote_node_index / SCU_STP_REMOTE_NODE_COUNT;
sci_remote_node_table_set_group_index(
remote_node_table, 2, group_index
);
sci_remote_node_table_set_group(remote_node_table, group_index);
}
/**
*
* @remote_node_table: The remote node table to which the remote node index is
* to be freed.
* @remote_node_count: This is the count of consecutive remote nodes that are
* to be freed.
*
* This method will release the remote node index back into the remote node
* table free pool.
*/
void sci_remote_node_table_release_remote_node_index(
struct sci_remote_node_table *remote_node_table,
u32 remote_node_count,
u16 remote_node_index)
{
if (remote_node_count == SCU_SSP_REMOTE_NODE_COUNT) {
sci_remote_node_table_release_single_remote_node(
remote_node_table, remote_node_index);
} else if (remote_node_count == SCU_STP_REMOTE_NODE_COUNT) {
sci_remote_node_table_release_triple_remote_node(
remote_node_table, remote_node_index);
}
}
| gpl-2.0 |
alexbevi/scummvm | graphics/macgui/macwindowborder.cpp | 14 | 2828 | /* ScummVM - Graphic Adventure Engine
*
* ScummVM is the legal property of its developers, whose names
* are too numerous to list here. Please refer to the COPYRIGHT
* file distributed with this source distribution.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
*/
#include "common/system.h"
#include "graphics/macgui/macwindowborder.h"
#include "graphics/macgui/macwindowmanager.h"
namespace Graphics {
using namespace Graphics::MacGUIConstants;
MacWindowBorder::MacWindowBorder() : _activeInitialized(false), _inactiveInitialized(false) {
_activeBorder = nullptr;
_inactiveBorder = nullptr;
_hasOffsets = false;
}
MacWindowBorder::~MacWindowBorder() {
if (_activeBorder)
delete _activeBorder;
if (_inactiveBorder)
delete _inactiveBorder;
}
bool MacWindowBorder::hasBorder(bool active) {
return active ? _activeInitialized : _inactiveInitialized;
}
void MacWindowBorder::addActiveBorder(TransparentSurface *source) {
assert(!_activeBorder);
_activeBorder = new NinePatchBitmap(source, true);
_activeInitialized = true;
}
void MacWindowBorder::addInactiveBorder(TransparentSurface *source) {
assert(!_inactiveBorder);
_inactiveBorder = new NinePatchBitmap(source, true);
_inactiveInitialized = true;
}
bool MacWindowBorder::hasOffsets() {
return _hasOffsets;
}
void MacWindowBorder::setOffsets(int left, int right, int top, int bottom) {
_borderOffsets[0] = left;
_borderOffsets[1] = right;
_borderOffsets[2] = top;
_borderOffsets[3] = bottom;
_hasOffsets = true;
}
int MacWindowBorder::getOffset(MacBorderOffset offset) {
return _borderOffsets[offset];
}
void MacWindowBorder::blitBorderInto(ManagedSurface &destination, bool active) {
TransparentSurface srf;
NinePatchBitmap *src = active ? _activeBorder : _inactiveBorder;
srf.create(destination.w, destination.h, destination.format);
srf.fillRect(Common::Rect(0, 0, srf.w, srf.h), kColorGreen2);
byte palette[kColorCount];
g_system->getPaletteManager()->grabPalette(palette, 0, kColorCount);
src->blit(srf, 0, 0, srf.w, srf.h, palette, kColorCount);
destination.transBlitFrom(srf, kColorGreen2);
srf.free();
}
} // End of namespace Graphics
| gpl-2.0 |
sktjdgns1189/android_kernel_samsung_c1skt | drivers/media/video/samsung/fimg2d4x-exynos4/fimg2d_drv.c | 14 | 13814 | /* linux/drivers/media/video/samsung/fimg2d4x/fimg2d_drv.c
*
* Copyright (c) 2011 Samsung Electronics Co., Ltd.
* http://www.samsung.com/
*
* Samsung Graphics 2D driver
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/poll.h>
#include <linux/platform_device.h>
#include <linux/miscdevice.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
#include <linux/dma-mapping.h>
#include <linux/sched.h>
#include <linux/wait.h>
#include <linux/atomic.h>
#include <linux/delay.h>
#include <asm/cacheflush.h>
#include <plat/cpu.h>
#include <plat/fimg2d.h>
#include <plat/s5p-sysmmu.h>
#include <mach/dev.h>
#ifdef CONFIG_PM_RUNTIME
#include <linux/pm_runtime.h>
#endif
#include "fimg2d.h"
#include "fimg2d_clk.h"
#include "fimg2d_ctx.h"
#include "fimg2d_helper.h"
#include "fimg2d_cache.h"
#define CTX_TIMEOUT msecs_to_jiffies(1000)
#define LV1_SHIFT 20
#define LV2_BASE_MASK 0x3ff
#define LV2_PT_MASK 0xff000
#define LV2_SHIFT 12
#define LV1_DESC_MASK 0x3
#define LV2_VALUE_META 0xc7f
#define LV2_VALUE_BASE_MASK 0xfff
static struct fimg2d_control *info;
static void fimg2d_worker(struct work_struct *work)
{
fimg2d_debug("start kernel thread\n");
info->blit(info);
}
static DECLARE_WORK(fimg2d_work, fimg2d_worker);
/**
* @irq: irq number
* @dev_id: pointer to private data
*/
static irqreturn_t fimg2d_irq(int irq, void *dev_id)
{
fimg2d_debug("irq\n");
if (!atomic_read(&info->clkon)) {
fimg2d_clk_on(info);
info->stop(info);
fimg2d_clk_off(info);
} else {
info->stop(info);
}
return IRQ_HANDLED;
}
static int fimg2d_sysmmu_fault_handler(enum S5P_SYSMMU_INTERRUPT_TYPE itype,
unsigned long pgtable_base, unsigned long fault_addr)
{
struct fimg2d_bltcmd *cmd;
unsigned long *pgd;
unsigned long *lv1d, *lv2d;
if (itype == SYSMMU_PAGEFAULT) {
printk(KERN_ERR "[%s] sysmmu page fault(0x%lx), pgd(0x%lx)\n",
__func__, fault_addr, pgtable_base);
} else {
printk(KERN_ERR "[%s] sysmmu interrupt "
"type(%d) pgd(0x%lx) addr(0x%lx)\n",
__func__, itype, pgtable_base, fault_addr);
}
cmd = fimg2d_get_first_command(info);
if (!cmd) {
printk(KERN_ERR "[%s] null command\n", __func__);
goto next;
}
if (cmd->ctx->mm->pgd != phys_to_virt(pgtable_base)) {
printk(KERN_ERR "[%s] pgtable base is different from current command\n",
__func__);
goto next;
}
fimg2d_dump_command(cmd);
pgd = (unsigned long *)cmd->ctx->mm->pgd;
lv1d = pgd + (fault_addr >> LV1_SHIFT);
printk(KERN_ERR " Level 1 descriptor(0x%lx)\n", *lv1d);
if ((*lv1d & LV1_DESC_MASK) != 0x1) {
fimg2d_clean_outer_pagetable(cmd->ctx->mm, fault_addr, 4);
goto next;
}
lv2d = (unsigned long *)phys_to_virt(*lv1d & ~LV2_BASE_MASK) +
((fault_addr & LV2_PT_MASK) >> LV2_SHIFT);
printk(KERN_ERR " Level 2 descriptor(0x%lx)\n", *lv2d);
if (*lv2d == 0) {
fimg2d_mmutable_value_replace(cmd, fault_addr,
(info->dbuffer_addr & ~LV2_VALUE_BASE_MASK) | LV2_VALUE_META);
info->fault_addr = fault_addr;
} else
fimg2d_clean_outer_pagetable(cmd->ctx->mm, fault_addr, 4);
next:
return 0;
}
static void fimg2d_context_wait(struct fimg2d_context *ctx)
{
while (atomic_read(&ctx->ncmd)) {
if (!wait_event_timeout(ctx->wait_q, !atomic_read(&ctx->ncmd), CTX_TIMEOUT)) {
atomic_set(&info->active, 1);
queue_work(info->work_q, &fimg2d_work);
printk(KERN_ERR "[%s] ctx %p cmd wait timeout\n", __func__, ctx);
}
}
}
static void fimg2d_request_bitblt(struct fimg2d_context *ctx)
{
spin_lock(&info->bltlock);
if (!atomic_read(&info->active)) {
atomic_set(&info->active, 1);
fimg2d_debug("dispatch ctx %p to kernel thread\n", ctx);
queue_work(info->work_q, &fimg2d_work);
}
spin_unlock(&info->bltlock);
fimg2d_context_wait(ctx);
}
static int fimg2d_open(struct inode *inode, struct file *file)
{
struct fimg2d_context *ctx;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx) {
printk(KERN_ERR "[%s] not enough memory for ctx\n", __func__);
return -ENOMEM;
}
file->private_data = (void *)ctx;
ctx->mm = current->mm;
fimg2d_debug("ctx %p current pgd %p init_mm pgd %p\n",
ctx, (unsigned long *)ctx->mm->pgd,
(unsigned long *)init_mm.pgd);
ctx->pgd_clone = kzalloc(L1_DESCRIPTOR_SIZE, GFP_KERNEL);
fimg2d_add_context(info, ctx);
return 0;
}
static int fimg2d_release(struct inode *inode, struct file *file)
{
struct fimg2d_context *ctx = file->private_data;
fimg2d_debug("ctx %p\n", ctx);
while (1) {
if (!atomic_read(&ctx->ncmd))
break;
mdelay(2);
}
fimg2d_del_context(info, ctx);
kfree(ctx->pgd_clone);
kfree(ctx);
return 0;
}
static int fimg2d_mmap(struct file *file, struct vm_area_struct *vma)
{
return 0;
}
static unsigned int fimg2d_poll(struct file *file, struct poll_table_struct *wait)
{
return 0;
}
static long fimg2d_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
int ret = 0;
struct fimg2d_context *ctx;
struct fimg2d_platdata *pdata;
struct fimg2d_blit blit;
struct fimg2d_version ver;
struct fimg2d_image dst;
ctx = file->private_data;
if (!ctx) {
printk(KERN_ERR "[%s] missing ctx\n", __func__);
return -EFAULT;
}
switch (cmd) {
case FIMG2D_BITBLT_BLIT:
if (info->secure)
return -EFAULT;
if (copy_from_user(&blit, (void *)arg, sizeof(blit)))
return -EFAULT;
if (blit.dst)
if (copy_from_user(&dst, (void *)blit.dst, sizeof(dst)))
return -EFAULT;
#ifdef CONFIG_BUSFREQ_OPP
#if defined(CONFIG_CPU_EXYNOS4212) || defined(CONFIG_CPU_EXYNOS4412)
#if defined(CONFIG_BUSFREQ_ELEVATION)
dev_lock(info->bus_dev, info->dev, 267160);
#else
#if defined(CONFIG_MACH_P4NOTE)
/* P4Note(SHV-E230x,SHW-M480x) : sometime screen-noise in vertical mode.
* reason : when layer-composition changing between GPU and Overlay. insufficient bandwidth
* solution : increase bandwidth
* by : mingu85.jeon (LSI)
*/
if (blit.param.rotate == ROT_90 || blit.param.rotate == ROT_270)
dev_lock(info->bus_dev, info->dev, 267160);
else
#endif
dev_lock(info->bus_dev, info->dev, 160160);
#endif
#endif
#endif
if ((blit.dst) && (dst.addr.type == ADDR_USER)
&& (blit.seq_no == SEQ_NO_BLT_SKIA))
if (!down_write_trylock(&page_alloc_slow_rwsem))
ret = -EAGAIN;
if (ret != -EAGAIN)
ret = fimg2d_add_command(info, ctx, &blit, dst.addr.type);
if (!ret) {
fimg2d_request_bitblt(ctx);
}
#ifdef PERF_PROFILE
perf_print(ctx, blit.seq_no);
perf_clear(ctx);
#endif
if ((blit.dst) && (dst.addr.type == ADDR_USER)
&& (blit.seq_no == SEQ_NO_BLT_SKIA)
&& ret != -EAGAIN)
up_write(&page_alloc_slow_rwsem);
#ifdef CONFIG_BUSFREQ_OPP
#if defined(CONFIG_CPU_EXYNOS4212) || defined(CONFIG_CPU_EXYNOS4412)
dev_unlock(info->bus_dev, info->dev);
#endif
#endif
if (info->fault_addr) {
printk(KERN_INFO "Return by G2D fault handler");
info->fault_addr = 0;
ret = -EFAULT;
}
break;
case FIMG2D_BITBLT_SYNC:
fimg2d_debug("FIMG2D_BITBLT_SYNC ctx: %p\n", ctx);
/* FIXME: */
break;
case FIMG2D_BITBLT_VERSION:
pdata = to_fimg2d_plat(info->dev);
ver.hw = pdata->hw_ver;
ver.sw = 0;
fimg2d_debug("fimg2d version, hw: 0x%x sw: 0x%x\n",
ver.hw, ver.sw);
if (copy_to_user((void *)arg, &ver, sizeof(ver)))
return -EFAULT;
break;
case FIMG2D_BITBLT_SECURE:
if (copy_from_user(&info->secure,
(unsigned int *)arg,
sizeof(unsigned int))) {
printk(KERN_ERR
"[%s] failed to FIMG2D_BITBLT_SECURE: copy_from_user error\n\n",
__func__);
return -EFAULT;
}
while (1) {
if (fimg2d_queue_is_empty(&info->cmd_q))
break;
mdelay(2);
}
break;
case FIMG2D_BITBLT_DBUFFER:
if (copy_from_user(&info->dbuffer_addr,
(unsigned long *)arg,
sizeof(unsigned long))) {
printk(KERN_ERR
"[%s] failed to FIMG2D_BITBLT_DBUFFER: copy_from_user error\n\n",
__func__);
return -EFAULT;
}
break;
default:
printk(KERN_ERR "[%s] unknown ioctl\n", __func__);
ret = -EFAULT;
break;
}
return ret;
}
/* fops */
static const struct file_operations fimg2d_fops = {
.owner = THIS_MODULE,
.open = fimg2d_open,
.release = fimg2d_release,
.mmap = fimg2d_mmap,
.poll = fimg2d_poll,
.unlocked_ioctl = fimg2d_ioctl,
};
/* miscdev */
static struct miscdevice fimg2d_dev = {
.minor = FIMG2D_MINOR,
.name = "fimg2d",
.fops = &fimg2d_fops,
};
static int fimg2d_setup_controller(struct fimg2d_control *info)
{
atomic_set(&info->suspended, 0);
atomic_set(&info->clkon, 0);
atomic_set(&info->busy, 0);
atomic_set(&info->nctx, 0);
atomic_set(&info->active, 0);
info->secure = 0;
info->fault_addr = 0;
spin_lock_init(&info->bltlock);
INIT_LIST_HEAD(&info->cmd_q);
init_waitqueue_head(&info->wait_q);
fimg2d_register_ops(info);
info->work_q = create_singlethread_workqueue("kfimg2dd");
if (!info->work_q)
return -ENOMEM;
return 0;
}
static int fimg2d_probe(struct platform_device *pdev)
{
struct resource *res;
struct fimg2d_platdata *pdata;
int ret;
pdata = to_fimg2d_plat(&pdev->dev);
if (!pdata) {
printk(KERN_ERR "FIMG2D failed to get platform data\n");
ret = -ENOMEM;
goto err_plat;
}
/* global structure */
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info) {
printk(KERN_ERR "FIMG2D failed to allocate memory for controller\n");
ret = -ENOMEM;
goto err_plat;
}
/* setup global info */
ret = fimg2d_setup_controller(info);
if (ret) {
printk(KERN_ERR "FIMG2D failed to setup controller\n");
goto err_setup;
}
info->dev = &pdev->dev;
/* memory region */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
printk(KERN_ERR "FIMG2D failed to get resource\n");
ret = -ENOENT;
goto err_res;
}
info->mem = request_mem_region(res->start, resource_size(res),
pdev->name);
if (!info->mem) {
printk(KERN_ERR "FIMG2D failed to request memory region\n");
ret = -ENOMEM;
goto err_region;
}
/* ioremap */
info->regs = ioremap(res->start, resource_size(res));
if (!info->regs) {
printk(KERN_ERR "FIMG2D failed to ioremap for SFR\n");
ret = -ENOENT;
goto err_map;
}
fimg2d_debug("device name: %s base address: 0x%lx\n",
pdev->name, (unsigned long)res->start);
/* Clock setup */
ret = fimg2d_clk_setup(info);
if (ret) {
printk(KERN_ERR "FIMG2D failed to setup clk\n");
ret = -ENOENT;
goto err_clk;
}
/* irq */
info->irq = platform_get_irq(pdev, 0);
if (!info->irq) {
printk(KERN_ERR "FIMG2D failed to get irq resource\n");
ret = -ENOENT;
goto err_irq;
}
fimg2d_debug("irq: %d\n", info->irq);
ret = request_irq(info->irq, fimg2d_irq, IRQF_DISABLED, pdev->name, info);
if (ret) {
printk(KERN_ERR "FIMG2D failed to request irq\n");
ret = -ENOENT;
goto err_irq;
}
#ifdef CONFIG_PM_RUNTIME
pm_runtime_enable(info->dev);
fimg2d_debug("enable runtime pm\n");
#endif
#ifdef CONFIG_BUSFREQ_OPP
#if defined(CONFIG_CPU_EXYNOS4212) || defined(CONFIG_CPU_EXYNOS4412)
/* To lock bus frequency in OPP mode */
info->bus_dev = dev_get("exynos-busfreq");
#endif
#endif
s5p_sysmmu_set_fault_handler(info->dev, fimg2d_sysmmu_fault_handler);
fimg2d_debug("register sysmmu page fault handler\n");
/* misc register */
ret = misc_register(&fimg2d_dev);
if (ret) {
printk(KERN_ERR "FIMG2D failed to register misc driver\n");
goto err_reg;
}
printk(KERN_INFO "Samsung Graphics 2D driver, (c) 2011 Samsung Electronics\n");
return 0;
err_reg:
free_irq(info->irq, NULL);
err_irq:
fimg2d_clk_release(info);
err_clk:
iounmap(info->regs);
err_map:
release_mem_region(res->start, resource_size(res));
kfree(info->mem);
err_region:
release_resource(res);
err_res:
destroy_workqueue(info->work_q);
err_setup:
kfree(info);
err_plat:
return ret;
}
static int fimg2d_remove(struct platform_device *pdev)
{
free_irq(info->irq, NULL);
if (info->mem) {
iounmap(info->regs);
release_resource(info->mem);
kfree(info->mem);
}
destroy_workqueue(info->work_q);
misc_deregister(&fimg2d_dev);
kfree(info);
#ifdef CONFIG_PM_RUNTIME
pm_runtime_disable(&pdev->dev);
fimg2d_debug("disable runtime pm\n");
#endif
return 0;
}
static int fimg2d_suspend(struct device *dev)
{
fimg2d_debug("suspend... start\n");
atomic_set(&info->suspended, 1);
while (1) {
if (fimg2d_queue_is_empty(&info->cmd_q))
break;
mdelay(2);
}
fimg2d_debug("suspend... done\n");
return 0;
}
static int fimg2d_resume(struct device *dev)
{
fimg2d_debug("resume... start\n");
atomic_set(&info->suspended, 0);
fimg2d_debug("resume... done\n");
return 0;
}
#ifdef CONFIG_PM_RUNTIME
static int fimg2d_runtime_suspend(struct device *dev)
{
fimg2d_debug("runtime suspend... done\n");
return 0;
}
static int fimg2d_runtime_resume(struct device *dev)
{
fimg2d_debug("runtime resume... done\n");
return 0;
}
#endif
static const struct dev_pm_ops fimg2d_pm_ops = {
.suspend = fimg2d_suspend,
.resume = fimg2d_resume,
#ifdef CONFIG_PM_RUNTIME
.runtime_suspend = fimg2d_runtime_suspend,
.runtime_resume = fimg2d_runtime_resume,
#endif
};
static struct platform_driver fimg2d_driver = {
.probe = fimg2d_probe,
.remove = fimg2d_remove,
.driver = {
.owner = THIS_MODULE,
.name = "s5p-fimg2d",
.pm = &fimg2d_pm_ops,
},
};
static int __init fimg2d_register(void)
{
return platform_driver_register(&fimg2d_driver);
}
static void __exit fimg2d_unregister(void)
{
platform_driver_unregister(&fimg2d_driver);
}
module_init(fimg2d_register);
module_exit(fimg2d_unregister);
MODULE_AUTHOR("Eunseok Choi <es10.choi@samsung.com>");
MODULE_AUTHOR("Jinsung Yang <jsgood.yang@samsung.com>");
MODULE_DESCRIPTION("Samsung Graphics 2D driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
aib/glito | intl/finddomain.c | 14 | 5707 | /* Handle list of needed message catalogs
Copyright (C) 1995-1999, 2000, 2001 Free Software Foundation, Inc.
Written by Ulrich Drepper <drepper@gnu.org>, 1995.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2, or (at your option)
any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software Foundation,
Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
#ifdef HAVE_CONFIG_H
# include <config.h>
#endif
#include <stdio.h>
#include <sys/types.h>
#include <stdlib.h>
#include <string.h>
#if defined HAVE_UNISTD_H || defined _LIBC
# include <unistd.h>
#endif
#include "gettextP.h"
#ifdef _LIBC
# include <libintl.h>
#else
# include "libgnuintl.h"
#endif
/* @@ end of prolog @@ */
/* List of already loaded domains. */
static struct loaded_l10nfile *_nl_loaded_domains;
/* Return a data structure describing the message catalog described by
the DOMAINNAME and CATEGORY parameters with respect to the currently
established bindings. */
struct loaded_l10nfile *
internal_function
_nl_find_domain (dirname, locale, domainname, domainbinding)
const char *dirname;
char *locale;
const char *domainname;
struct binding *domainbinding;
{
struct loaded_l10nfile *retval;
const char *language;
const char *modifier;
const char *territory;
const char *codeset;
const char *normalized_codeset;
const char *special;
const char *sponsor;
const char *revision;
const char *alias_value;
int mask;
/* LOCALE can consist of up to four recognized parts for the XPG syntax:
language[_territory[.codeset]][@modifier]
and six parts for the CEN syntax:
language[_territory][+audience][+special][,[sponsor][_revision]]
Beside the first part all of them are allowed to be missing. If
the full specified locale is not found, the less specific one are
looked for. The various parts will be stripped off according to
the following order:
(1) revision
(2) sponsor
(3) special
(4) codeset
(5) normalized codeset
(6) territory
(7) audience/modifier
*/
/* If we have already tested for this locale entry there has to
be one data set in the list of loaded domains. */
retval = _nl_make_l10nflist (&_nl_loaded_domains, dirname,
strlen (dirname) + 1, 0, locale, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, domainname, 0);
if (retval != NULL)
{
/* We know something about this locale. */
int cnt;
if (retval->decided == 0)
_nl_load_domain (retval, domainbinding);
if (retval->data != NULL)
return retval;
for (cnt = 0; retval->successor[cnt] != NULL; ++cnt)
{
if (retval->successor[cnt]->decided == 0)
_nl_load_domain (retval->successor[cnt], domainbinding);
if (retval->successor[cnt]->data != NULL)
break;
}
return cnt >= 0 ? retval : NULL;
/* NOTREACHED */
}
/* See whether the locale value is an alias. If yes its value
*overwrites* the alias name. No test for the original value is
done. */
alias_value = _nl_expand_alias (locale);
if (alias_value != NULL)
{
#if defined _LIBC || defined HAVE_STRDUP
locale = strdup (alias_value);
if (locale == NULL)
return NULL;
#else
size_t len = strlen (alias_value) + 1;
locale = (char *) malloc (len);
if (locale == NULL)
return NULL;
memcpy (locale, alias_value, len);
#endif
}
/* Now we determine the single parts of the locale name. First
look for the language. Termination symbols are `_' and `@' if
we use XPG4 style, and `_', `+', and `,' if we use CEN syntax. */
mask = _nl_explode_name (locale, &language, &modifier, &territory,
&codeset, &normalized_codeset, &special,
&sponsor, &revision);
/* Create all possible locale entries which might be interested in
generalization. */
retval = _nl_make_l10nflist (&_nl_loaded_domains, dirname,
strlen (dirname) + 1, mask, language, territory,
codeset, normalized_codeset, modifier, special,
sponsor, revision, domainname, 1);
if (retval == NULL)
/* This means we are out of core. */
return NULL;
if (retval->decided == 0)
_nl_load_domain (retval, domainbinding);
if (retval->data == NULL)
{
int cnt;
for (cnt = 0; retval->successor[cnt] != NULL; ++cnt)
{
if (retval->successor[cnt]->decided == 0)
_nl_load_domain (retval->successor[cnt], domainbinding);
if (retval->successor[cnt]->data != NULL)
break;
}
}
/* The room for an alias was dynamically allocated. Free it now. */
if (alias_value != NULL)
free (locale);
/* The space for normalized_codeset is dynamically allocated. Free it. */
if (mask & XPG_NORM_CODESET)
free ((void *) normalized_codeset);
return retval;
}
#ifdef _LIBC
static void __attribute__ ((unused))
free_mem (void)
{
struct loaded_l10nfile *runp = _nl_loaded_domains;
while (runp != NULL)
{
struct loaded_l10nfile *here = runp;
if (runp->data != NULL)
_nl_unload_domain ((struct loaded_domain *) runp->data);
runp = runp->next;
free ((char *) here->filename);
free (here);
}
}
text_set_element (__libc_subfreeres, free_mem);
#endif
| gpl-2.0 |
rogersb11/android_kernel_samsung_smdk4x12 | drivers/hid/hid-picolcd.c | 14 | 78375 | /***************************************************************************
* Copyright (C) 2010 by Bruno Prémont <bonbons@linux-vserver.org> *
* *
* Based on Logitech G13 driver (v0.4) *
* Copyright (C) 2009 by Rick L. Vinyard, Jr. <rvinyard@cs.nmsu.edu> *
* *
* This program is free software: you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation, version 2 of the License. *
* *
* This driver is distributed in the hope that it will be useful, but *
* WITHOUT ANY WARRANTY; without even the implied warranty of *
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU *
* General Public License for more details. *
* *
* You should have received a copy of the GNU General Public License *
* along with this software. If not see <http://www.gnu.org/licenses/>. *
***************************************************************************/
#include <linux/hid.h>
#include <linux/hid-debug.h>
#include <linux/input.h>
#include "hid-ids.h"
#include "usbhid/usbhid.h"
#include <linux/usb.h>
#include <linux/fb.h>
#include <linux/vmalloc.h>
#include <linux/backlight.h>
#include <linux/lcd.h>
#include <linux/leds.h>
#include <linux/seq_file.h>
#include <linux/debugfs.h>
#include <linux/completion.h>
#include <linux/uaccess.h>
#define PICOLCD_NAME "PicoLCD (graphic)"
/* Report numbers */
#define REPORT_ERROR_CODE 0x10 /* LCD: IN[16] */
#define ERR_SUCCESS 0x00
#define ERR_PARAMETER_MISSING 0x01
#define ERR_DATA_MISSING 0x02
#define ERR_BLOCK_READ_ONLY 0x03
#define ERR_BLOCK_NOT_ERASABLE 0x04
#define ERR_BLOCK_TOO_BIG 0x05
#define ERR_SECTION_OVERFLOW 0x06
#define ERR_INVALID_CMD_LEN 0x07
#define ERR_INVALID_DATA_LEN 0x08
#define REPORT_KEY_STATE 0x11 /* LCD: IN[2] */
#define REPORT_IR_DATA 0x21 /* LCD: IN[63] */
#define REPORT_EE_DATA 0x32 /* LCD: IN[63] */
#define REPORT_MEMORY 0x41 /* LCD: IN[63] */
#define REPORT_LED_STATE 0x81 /* LCD: OUT[1] */
#define REPORT_BRIGHTNESS 0x91 /* LCD: OUT[1] */
#define REPORT_CONTRAST 0x92 /* LCD: OUT[1] */
#define REPORT_RESET 0x93 /* LCD: OUT[2] */
#define REPORT_LCD_CMD 0x94 /* LCD: OUT[63] */
#define REPORT_LCD_DATA 0x95 /* LCD: OUT[63] */
#define REPORT_LCD_CMD_DATA 0x96 /* LCD: OUT[63] */
#define REPORT_EE_READ 0xa3 /* LCD: OUT[63] */
#define REPORT_EE_WRITE 0xa4 /* LCD: OUT[63] */
#define REPORT_ERASE_MEMORY 0xb2 /* LCD: OUT[2] */
#define REPORT_READ_MEMORY 0xb3 /* LCD: OUT[3] */
#define REPORT_WRITE_MEMORY 0xb4 /* LCD: OUT[63] */
#define REPORT_SPLASH_RESTART 0xc1 /* LCD: OUT[1] */
#define REPORT_EXIT_KEYBOARD 0xef /* LCD: OUT[2] */
#define REPORT_VERSION 0xf1 /* LCD: IN[2],OUT[1] Bootloader: IN[2],OUT[1] */
#define REPORT_BL_ERASE_MEMORY 0xf2 /* Bootloader: IN[36],OUT[4] */
#define REPORT_BL_READ_MEMORY 0xf3 /* Bootloader: IN[36],OUT[4] */
#define REPORT_BL_WRITE_MEMORY 0xf4 /* Bootloader: IN[36],OUT[36] */
#define REPORT_DEVID 0xf5 /* LCD: IN[5], OUT[1] Bootloader: IN[5],OUT[1] */
#define REPORT_SPLASH_SIZE 0xf6 /* LCD: IN[4], OUT[1] */
#define REPORT_HOOK_VERSION 0xf7 /* LCD: IN[2], OUT[1] */
#define REPORT_EXIT_FLASHER 0xff /* Bootloader: OUT[2] */
#ifdef CONFIG_HID_PICOLCD_FB
/* Framebuffer
*
* The PicoLCD use a Topway LCD module of 256x64 pixel
* This display area is tiled over 4 controllers with 8 tiles
* each. Each tile has 8x64 pixel, each data byte representing
* a 1-bit wide vertical line of the tile.
*
* The display can be updated at a tile granularity.
*
* Chip 1 Chip 2 Chip 3 Chip 4
* +----------------+----------------+----------------+----------------+
* | Tile 1 | Tile 1 | Tile 1 | Tile 1 |
* +----------------+----------------+----------------+----------------+
* | Tile 2 | Tile 2 | Tile 2 | Tile 2 |
* +----------------+----------------+----------------+----------------+
* ...
* +----------------+----------------+----------------+----------------+
* | Tile 8 | Tile 8 | Tile 8 | Tile 8 |
* +----------------+----------------+----------------+----------------+
*/
#define PICOLCDFB_NAME "picolcdfb"
#define PICOLCDFB_WIDTH (256)
#define PICOLCDFB_HEIGHT (64)
#define PICOLCDFB_SIZE (PICOLCDFB_WIDTH * PICOLCDFB_HEIGHT / 8)
#define PICOLCDFB_UPDATE_RATE_LIMIT 10
#define PICOLCDFB_UPDATE_RATE_DEFAULT 2
/* Framebuffer visual structures */
static const struct fb_fix_screeninfo picolcdfb_fix = {
.id = PICOLCDFB_NAME,
.type = FB_TYPE_PACKED_PIXELS,
.visual = FB_VISUAL_MONO01,
.xpanstep = 0,
.ypanstep = 0,
.ywrapstep = 0,
.line_length = PICOLCDFB_WIDTH / 8,
.accel = FB_ACCEL_NONE,
};
static const struct fb_var_screeninfo picolcdfb_var = {
.xres = PICOLCDFB_WIDTH,
.yres = PICOLCDFB_HEIGHT,
.xres_virtual = PICOLCDFB_WIDTH,
.yres_virtual = PICOLCDFB_HEIGHT,
.width = 103,
.height = 26,
.bits_per_pixel = 1,
.grayscale = 1,
.red = {
.offset = 0,
.length = 1,
.msb_right = 0,
},
.green = {
.offset = 0,
.length = 1,
.msb_right = 0,
},
.blue = {
.offset = 0,
.length = 1,
.msb_right = 0,
},
.transp = {
.offset = 0,
.length = 0,
.msb_right = 0,
},
};
#endif /* CONFIG_HID_PICOLCD_FB */
/* Input device
*
* The PicoLCD has an IR receiver header, a built-in keypad with 5 keys
* and header for 4x4 key matrix. The built-in keys are part of the matrix.
*/
static const unsigned short def_keymap[] = {
KEY_RESERVED, /* none */
KEY_BACK, /* col 4 + row 1 */
KEY_HOMEPAGE, /* col 3 + row 1 */
KEY_RESERVED, /* col 2 + row 1 */
KEY_RESERVED, /* col 1 + row 1 */
KEY_SCROLLUP, /* col 4 + row 2 */
KEY_OK, /* col 3 + row 2 */
KEY_SCROLLDOWN, /* col 2 + row 2 */
KEY_RESERVED, /* col 1 + row 2 */
KEY_RESERVED, /* col 4 + row 3 */
KEY_RESERVED, /* col 3 + row 3 */
KEY_RESERVED, /* col 2 + row 3 */
KEY_RESERVED, /* col 1 + row 3 */
KEY_RESERVED, /* col 4 + row 4 */
KEY_RESERVED, /* col 3 + row 4 */
KEY_RESERVED, /* col 2 + row 4 */
KEY_RESERVED, /* col 1 + row 4 */
};
#define PICOLCD_KEYS ARRAY_SIZE(def_keymap)
/* Description of in-progress IO operation, used for operations
* that trigger response from device */
struct picolcd_pending {
struct hid_report *out_report;
struct hid_report *in_report;
struct completion ready;
int raw_size;
u8 raw_data[64];
};
/* Per device data structure */
struct picolcd_data {
struct hid_device *hdev;
#ifdef CONFIG_DEBUG_FS
struct dentry *debug_reset;
struct dentry *debug_eeprom;
struct dentry *debug_flash;
struct mutex mutex_flash;
int addr_sz;
#endif
u8 version[2];
unsigned short opmode_delay;
/* input stuff */
u8 pressed_keys[2];
struct input_dev *input_keys;
struct input_dev *input_cir;
unsigned short keycode[PICOLCD_KEYS];
#ifdef CONFIG_HID_PICOLCD_FB
/* Framebuffer stuff */
u8 fb_update_rate;
u8 fb_bpp;
u8 fb_force;
u8 *fb_vbitmap; /* local copy of what was sent to PicoLCD */
u8 *fb_bitmap; /* framebuffer */
struct fb_info *fb_info;
struct fb_deferred_io fb_defio;
#endif /* CONFIG_HID_PICOLCD_FB */
#ifdef CONFIG_HID_PICOLCD_LCD
struct lcd_device *lcd;
u8 lcd_contrast;
#endif /* CONFIG_HID_PICOLCD_LCD */
#ifdef CONFIG_HID_PICOLCD_BACKLIGHT
struct backlight_device *backlight;
u8 lcd_brightness;
u8 lcd_power;
#endif /* CONFIG_HID_PICOLCD_BACKLIGHT */
#ifdef CONFIG_HID_PICOLCD_LEDS
/* LED stuff */
u8 led_state;
struct led_classdev *led[8];
#endif /* CONFIG_HID_PICOLCD_LEDS */
/* Housekeeping stuff */
spinlock_t lock;
struct mutex mutex;
struct picolcd_pending *pending;
int status;
#define PICOLCD_BOOTLOADER 1
#define PICOLCD_FAILED 2
#define PICOLCD_READY_FB 4
};
/* Find a given report */
#define picolcd_in_report(id, dev) picolcd_report(id, dev, HID_INPUT_REPORT)
#define picolcd_out_report(id, dev) picolcd_report(id, dev, HID_OUTPUT_REPORT)
static struct hid_report *picolcd_report(int id, struct hid_device *hdev, int dir)
{
struct list_head *feature_report_list = &hdev->report_enum[dir].report_list;
struct hid_report *report = NULL;
list_for_each_entry(report, feature_report_list, list) {
if (report->id == id)
return report;
}
hid_warn(hdev, "No report with id 0x%x found\n", id);
return NULL;
}
#ifdef CONFIG_DEBUG_FS
static void picolcd_debug_out_report(struct picolcd_data *data,
struct hid_device *hdev, struct hid_report *report);
#define usbhid_submit_report(a, b, c) \
do { \
picolcd_debug_out_report(hid_get_drvdata(a), a, b); \
usbhid_submit_report(a, b, c); \
} while (0)
#endif
/* Submit a report and wait for a reply from device - if device fades away
* or does not respond in time, return NULL */
static struct picolcd_pending *picolcd_send_and_wait(struct hid_device *hdev,
int report_id, const u8 *raw_data, int size)
{
struct picolcd_data *data = hid_get_drvdata(hdev);
struct picolcd_pending *work;
struct hid_report *report = picolcd_out_report(report_id, hdev);
unsigned long flags;
int i, j, k;
if (!report || !data)
return NULL;
if (data->status & PICOLCD_FAILED)
return NULL;
work = kzalloc(sizeof(*work), GFP_KERNEL);
if (!work)
return NULL;
init_completion(&work->ready);
work->out_report = report;
work->in_report = NULL;
work->raw_size = 0;
mutex_lock(&data->mutex);
spin_lock_irqsave(&data->lock, flags);
for (i = k = 0; i < report->maxfield; i++)
for (j = 0; j < report->field[i]->report_count; j++) {
hid_set_field(report->field[i], j, k < size ? raw_data[k] : 0);
k++;
}
data->pending = work;
usbhid_submit_report(data->hdev, report, USB_DIR_OUT);
spin_unlock_irqrestore(&data->lock, flags);
wait_for_completion_interruptible_timeout(&work->ready, HZ*2);
spin_lock_irqsave(&data->lock, flags);
data->pending = NULL;
spin_unlock_irqrestore(&data->lock, flags);
mutex_unlock(&data->mutex);
return work;
}
#ifdef CONFIG_HID_PICOLCD_FB
/* Send a given tile to PicoLCD */
static int picolcd_fb_send_tile(struct hid_device *hdev, int chip, int tile)
{
struct picolcd_data *data = hid_get_drvdata(hdev);
struct hid_report *report1 = picolcd_out_report(REPORT_LCD_CMD_DATA, hdev);
struct hid_report *report2 = picolcd_out_report(REPORT_LCD_DATA, hdev);
unsigned long flags;
u8 *tdata;
int i;
if (!report1 || report1->maxfield != 1 || !report2 || report2->maxfield != 1)
return -ENODEV;
spin_lock_irqsave(&data->lock, flags);
hid_set_field(report1->field[0], 0, chip << 2);
hid_set_field(report1->field[0], 1, 0x02);
hid_set_field(report1->field[0], 2, 0x00);
hid_set_field(report1->field[0], 3, 0x00);
hid_set_field(report1->field[0], 4, 0xb8 | tile);
hid_set_field(report1->field[0], 5, 0x00);
hid_set_field(report1->field[0], 6, 0x00);
hid_set_field(report1->field[0], 7, 0x40);
hid_set_field(report1->field[0], 8, 0x00);
hid_set_field(report1->field[0], 9, 0x00);
hid_set_field(report1->field[0], 10, 32);
hid_set_field(report2->field[0], 0, (chip << 2) | 0x01);
hid_set_field(report2->field[0], 1, 0x00);
hid_set_field(report2->field[0], 2, 0x00);
hid_set_field(report2->field[0], 3, 32);
tdata = data->fb_vbitmap + (tile * 4 + chip) * 64;
for (i = 0; i < 64; i++)
if (i < 32)
hid_set_field(report1->field[0], 11 + i, tdata[i]);
else
hid_set_field(report2->field[0], 4 + i - 32, tdata[i]);
usbhid_submit_report(data->hdev, report1, USB_DIR_OUT);
usbhid_submit_report(data->hdev, report2, USB_DIR_OUT);
spin_unlock_irqrestore(&data->lock, flags);
return 0;
}
/* Translate a single tile*/
static int picolcd_fb_update_tile(u8 *vbitmap, const u8 *bitmap, int bpp,
int chip, int tile)
{
int i, b, changed = 0;
u8 tdata[64];
u8 *vdata = vbitmap + (tile * 4 + chip) * 64;
if (bpp == 1) {
for (b = 7; b >= 0; b--) {
const u8 *bdata = bitmap + tile * 256 + chip * 8 + b * 32;
for (i = 0; i < 64; i++) {
tdata[i] <<= 1;
tdata[i] |= (bdata[i/8] >> (i % 8)) & 0x01;
}
}
} else if (bpp == 8) {
for (b = 7; b >= 0; b--) {
const u8 *bdata = bitmap + (tile * 256 + chip * 8 + b * 32) * 8;
for (i = 0; i < 64; i++) {
tdata[i] <<= 1;
tdata[i] |= (bdata[i] & 0x80) ? 0x01 : 0x00;
}
}
} else {
/* Oops, we should never get here! */
WARN_ON(1);
return 0;
}
for (i = 0; i < 64; i++)
if (tdata[i] != vdata[i]) {
changed = 1;
vdata[i] = tdata[i];
}
return changed;
}
/* Reconfigure LCD display */
static int picolcd_fb_reset(struct picolcd_data *data, int clear)
{
struct hid_report *report = picolcd_out_report(REPORT_LCD_CMD, data->hdev);
int i, j;
unsigned long flags;
static const u8 mapcmd[8] = { 0x00, 0x02, 0x00, 0x64, 0x3f, 0x00, 0x64, 0xc0 };
if (!report || report->maxfield != 1)
return -ENODEV;
spin_lock_irqsave(&data->lock, flags);
for (i = 0; i < 4; i++) {
for (j = 0; j < report->field[0]->maxusage; j++)
if (j == 0)
hid_set_field(report->field[0], j, i << 2);
else if (j < sizeof(mapcmd))
hid_set_field(report->field[0], j, mapcmd[j]);
else
hid_set_field(report->field[0], j, 0);
usbhid_submit_report(data->hdev, report, USB_DIR_OUT);
}
data->status |= PICOLCD_READY_FB;
spin_unlock_irqrestore(&data->lock, flags);
if (data->fb_bitmap) {
if (clear) {
memset(data->fb_vbitmap, 0, PICOLCDFB_SIZE);
memset(data->fb_bitmap, 0, PICOLCDFB_SIZE*data->fb_bpp);
}
data->fb_force = 1;
}
/* schedule first output of framebuffer */
if (data->fb_info)
schedule_delayed_work(&data->fb_info->deferred_work, 0);
return 0;
}
/* Update fb_vbitmap from the screen_base and send changed tiles to device */
static void picolcd_fb_update(struct picolcd_data *data)
{
int chip, tile, n;
unsigned long flags;
if (!data)
return;
spin_lock_irqsave(&data->lock, flags);
if (!(data->status & PICOLCD_READY_FB)) {
spin_unlock_irqrestore(&data->lock, flags);
picolcd_fb_reset(data, 0);
} else {
spin_unlock_irqrestore(&data->lock, flags);
}
/*
* Translate the framebuffer into the format needed by the PicoLCD.
* See display layout above.
* Do this one tile after the other and push those tiles that changed.
*
* Wait for our IO to complete as otherwise we might flood the queue!
*/
n = 0;
for (chip = 0; chip < 4; chip++)
for (tile = 0; tile < 8; tile++)
if (picolcd_fb_update_tile(data->fb_vbitmap,
data->fb_bitmap, data->fb_bpp, chip, tile) ||
data->fb_force) {
n += 2;
if (!data->fb_info->par)
return; /* device lost! */
if (n >= HID_OUTPUT_FIFO_SIZE / 2) {
usbhid_wait_io(data->hdev);
n = 0;
}
picolcd_fb_send_tile(data->hdev, chip, tile);
}
data->fb_force = false;
if (n)
usbhid_wait_io(data->hdev);
}
/* Stub to call the system default and update the image on the picoLCD */
static void picolcd_fb_fillrect(struct fb_info *info,
const struct fb_fillrect *rect)
{
if (!info->par)
return;
sys_fillrect(info, rect);
schedule_delayed_work(&info->deferred_work, 0);
}
/* Stub to call the system default and update the image on the picoLCD */
static void picolcd_fb_copyarea(struct fb_info *info,
const struct fb_copyarea *area)
{
if (!info->par)
return;
sys_copyarea(info, area);
schedule_delayed_work(&info->deferred_work, 0);
}
/* Stub to call the system default and update the image on the picoLCD */
static void picolcd_fb_imageblit(struct fb_info *info, const struct fb_image *image)
{
if (!info->par)
return;
sys_imageblit(info, image);
schedule_delayed_work(&info->deferred_work, 0);
}
/*
* this is the slow path from userspace. they can seek and write to
* the fb. it's inefficient to do anything less than a full screen draw
*/
static ssize_t picolcd_fb_write(struct fb_info *info, const char __user *buf,
size_t count, loff_t *ppos)
{
ssize_t ret;
if (!info->par)
return -ENODEV;
ret = fb_sys_write(info, buf, count, ppos);
if (ret >= 0)
schedule_delayed_work(&info->deferred_work, 0);
return ret;
}
static int picolcd_fb_blank(int blank, struct fb_info *info)
{
if (!info->par)
return -ENODEV;
/* We let fb notification do this for us via lcd/backlight device */
return 0;
}
static void picolcd_fb_destroy(struct fb_info *info)
{
struct picolcd_data *data = info->par;
u32 *ref_cnt = info->pseudo_palette;
int may_release;
info->par = NULL;
if (data)
data->fb_info = NULL;
fb_deferred_io_cleanup(info);
ref_cnt--;
mutex_lock(&info->lock);
(*ref_cnt)--;
may_release = !*ref_cnt;
mutex_unlock(&info->lock);
if (may_release) {
vfree((u8 *)info->fix.smem_start);
framebuffer_release(info);
}
}
static int picolcd_fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
{
__u32 bpp = var->bits_per_pixel;
__u32 activate = var->activate;
/* only allow 1/8 bit depth (8-bit is grayscale) */
*var = picolcdfb_var;
var->activate = activate;
if (bpp >= 8) {
var->bits_per_pixel = 8;
var->red.length = 8;
var->green.length = 8;
var->blue.length = 8;
} else {
var->bits_per_pixel = 1;
var->red.length = 1;
var->green.length = 1;
var->blue.length = 1;
}
return 0;
}
static int picolcd_set_par(struct fb_info *info)
{
struct picolcd_data *data = info->par;
u8 *tmp_fb, *o_fb;
if (!data)
return -ENODEV;
if (info->var.bits_per_pixel == data->fb_bpp)
return 0;
/* switch between 1/8 bit depths */
if (info->var.bits_per_pixel != 1 && info->var.bits_per_pixel != 8)
return -EINVAL;
o_fb = data->fb_bitmap;
tmp_fb = kmalloc(PICOLCDFB_SIZE*info->var.bits_per_pixel, GFP_KERNEL);
if (!tmp_fb)
return -ENOMEM;
/* translate FB content to new bits-per-pixel */
if (info->var.bits_per_pixel == 1) {
int i, b;
for (i = 0; i < PICOLCDFB_SIZE; i++) {
u8 p = 0;
for (b = 0; b < 8; b++) {
p <<= 1;
p |= o_fb[i*8+b] ? 0x01 : 0x00;
}
tmp_fb[i] = p;
}
memcpy(o_fb, tmp_fb, PICOLCDFB_SIZE);
info->fix.visual = FB_VISUAL_MONO01;
info->fix.line_length = PICOLCDFB_WIDTH / 8;
} else {
int i;
memcpy(tmp_fb, o_fb, PICOLCDFB_SIZE);
for (i = 0; i < PICOLCDFB_SIZE * 8; i++)
o_fb[i] = tmp_fb[i/8] & (0x01 << (7 - i % 8)) ? 0xff : 0x00;
info->fix.visual = FB_VISUAL_DIRECTCOLOR;
info->fix.line_length = PICOLCDFB_WIDTH;
}
kfree(tmp_fb);
data->fb_bpp = info->var.bits_per_pixel;
return 0;
}
/* Do refcounting on our FB and cleanup per worker if FB is
* closed after unplug of our device
* (fb_release holds info->lock and still touches info after
* we return so we can't release it immediately.
*/
struct picolcd_fb_cleanup_item {
struct fb_info *info;
struct picolcd_fb_cleanup_item *next;
};
static struct picolcd_fb_cleanup_item *fb_pending;
DEFINE_SPINLOCK(fb_pending_lock);
static void picolcd_fb_do_cleanup(struct work_struct *data)
{
struct picolcd_fb_cleanup_item *item;
unsigned long flags;
do {
spin_lock_irqsave(&fb_pending_lock, flags);
item = fb_pending;
fb_pending = item ? item->next : NULL;
spin_unlock_irqrestore(&fb_pending_lock, flags);
if (item) {
u8 *fb = (u8 *)item->info->fix.smem_start;
/* make sure we do not race against fb core when
* releasing */
mutex_lock(&item->info->lock);
mutex_unlock(&item->info->lock);
framebuffer_release(item->info);
vfree(fb);
}
} while (item);
}
DECLARE_WORK(picolcd_fb_cleanup, picolcd_fb_do_cleanup);
static int picolcd_fb_open(struct fb_info *info, int u)
{
u32 *ref_cnt = info->pseudo_palette;
ref_cnt--;
(*ref_cnt)++;
return 0;
}
static int picolcd_fb_release(struct fb_info *info, int u)
{
u32 *ref_cnt = info->pseudo_palette;
ref_cnt--;
(*ref_cnt)++;
if (!*ref_cnt) {
unsigned long flags;
struct picolcd_fb_cleanup_item *item = (struct picolcd_fb_cleanup_item *)ref_cnt;
item--;
spin_lock_irqsave(&fb_pending_lock, flags);
item->next = fb_pending;
fb_pending = item;
spin_unlock_irqrestore(&fb_pending_lock, flags);
schedule_work(&picolcd_fb_cleanup);
}
return 0;
}
/* Note this can't be const because of struct fb_info definition */
static struct fb_ops picolcdfb_ops = {
.owner = THIS_MODULE,
.fb_destroy = picolcd_fb_destroy,
.fb_open = picolcd_fb_open,
.fb_release = picolcd_fb_release,
.fb_read = fb_sys_read,
.fb_write = picolcd_fb_write,
.fb_blank = picolcd_fb_blank,
.fb_fillrect = picolcd_fb_fillrect,
.fb_copyarea = picolcd_fb_copyarea,
.fb_imageblit = picolcd_fb_imageblit,
.fb_check_var = picolcd_fb_check_var,
.fb_set_par = picolcd_set_par,
};
/* Callback from deferred IO workqueue */
static void picolcd_fb_deferred_io(struct fb_info *info, struct list_head *pagelist)
{
picolcd_fb_update(info->par);
}
static const struct fb_deferred_io picolcd_fb_defio = {
.delay = HZ / PICOLCDFB_UPDATE_RATE_DEFAULT,
.deferred_io = picolcd_fb_deferred_io,
};
/*
* The "fb_update_rate" sysfs attribute
*/
static ssize_t picolcd_fb_update_rate_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct picolcd_data *data = dev_get_drvdata(dev);
unsigned i, fb_update_rate = data->fb_update_rate;
size_t ret = 0;
for (i = 1; i <= PICOLCDFB_UPDATE_RATE_LIMIT; i++)
if (ret >= PAGE_SIZE)
break;
else if (i == fb_update_rate)
ret += snprintf(buf+ret, PAGE_SIZE-ret, "[%u] ", i);
else
ret += snprintf(buf+ret, PAGE_SIZE-ret, "%u ", i);
if (ret > 0)
buf[min(ret, (size_t)PAGE_SIZE)-1] = '\n';
return ret;
}
static ssize_t picolcd_fb_update_rate_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct picolcd_data *data = dev_get_drvdata(dev);
int i;
unsigned u;
if (count < 1 || count > 10)
return -EINVAL;
i = sscanf(buf, "%u", &u);
if (i != 1)
return -EINVAL;
if (u > PICOLCDFB_UPDATE_RATE_LIMIT)
return -ERANGE;
else if (u == 0)
u = PICOLCDFB_UPDATE_RATE_DEFAULT;
data->fb_update_rate = u;
data->fb_defio.delay = HZ / data->fb_update_rate;
return count;
}
static DEVICE_ATTR(fb_update_rate, 0666, picolcd_fb_update_rate_show,
picolcd_fb_update_rate_store);
/* initialize Framebuffer device */
static int picolcd_init_framebuffer(struct picolcd_data *data)
{
struct device *dev = &data->hdev->dev;
struct fb_info *info = NULL;
int i, error = -ENOMEM;
u8 *fb_vbitmap = NULL;
u8 *fb_bitmap = NULL;
u32 *palette;
fb_bitmap = vmalloc(PICOLCDFB_SIZE*8);
if (fb_bitmap == NULL) {
dev_err(dev, "can't get a free page for framebuffer\n");
goto err_nomem;
}
fb_vbitmap = kmalloc(PICOLCDFB_SIZE, GFP_KERNEL);
if (fb_vbitmap == NULL) {
dev_err(dev, "can't alloc vbitmap image buffer\n");
goto err_nomem;
}
data->fb_update_rate = PICOLCDFB_UPDATE_RATE_DEFAULT;
data->fb_defio = picolcd_fb_defio;
/* The extra memory is:
* - struct picolcd_fb_cleanup_item
* - u32 for ref_count
* - 256*u32 for pseudo_palette
*/
info = framebuffer_alloc(257 * sizeof(u32) + sizeof(struct picolcd_fb_cleanup_item), dev);
if (info == NULL) {
dev_err(dev, "failed to allocate a framebuffer\n");
goto err_nomem;
}
palette = info->par + sizeof(struct picolcd_fb_cleanup_item);
*palette = 1;
palette++;
for (i = 0; i < 256; i++)
palette[i] = i > 0 && i < 16 ? 0xff : 0;
info->pseudo_palette = palette;
info->fbdefio = &data->fb_defio;
info->screen_base = (char __force __iomem *)fb_bitmap;
info->fbops = &picolcdfb_ops;
info->var = picolcdfb_var;
info->fix = picolcdfb_fix;
info->fix.smem_len = PICOLCDFB_SIZE*8;
info->fix.smem_start = (unsigned long)fb_bitmap;
info->par = data;
info->flags = FBINFO_FLAG_DEFAULT;
data->fb_vbitmap = fb_vbitmap;
data->fb_bitmap = fb_bitmap;
data->fb_bpp = picolcdfb_var.bits_per_pixel;
error = picolcd_fb_reset(data, 1);
if (error) {
dev_err(dev, "failed to configure display\n");
goto err_cleanup;
}
error = device_create_file(dev, &dev_attr_fb_update_rate);
if (error) {
dev_err(dev, "failed to create sysfs attributes\n");
goto err_cleanup;
}
fb_deferred_io_init(info);
data->fb_info = info;
error = register_framebuffer(info);
if (error) {
dev_err(dev, "failed to register framebuffer\n");
goto err_sysfs;
}
/* schedule first output of framebuffer */
data->fb_force = 1;
schedule_delayed_work(&info->deferred_work, 0);
return 0;
err_sysfs:
fb_deferred_io_cleanup(info);
device_remove_file(dev, &dev_attr_fb_update_rate);
err_cleanup:
data->fb_vbitmap = NULL;
data->fb_bitmap = NULL;
data->fb_bpp = 0;
data->fb_info = NULL;
err_nomem:
framebuffer_release(info);
vfree(fb_bitmap);
kfree(fb_vbitmap);
return error;
}
static void picolcd_exit_framebuffer(struct picolcd_data *data)
{
struct fb_info *info = data->fb_info;
u8 *fb_vbitmap = data->fb_vbitmap;
if (!info)
return;
info->par = NULL;
device_remove_file(&data->hdev->dev, &dev_attr_fb_update_rate);
unregister_framebuffer(info);
data->fb_vbitmap = NULL;
data->fb_bitmap = NULL;
data->fb_bpp = 0;
data->fb_info = NULL;
kfree(fb_vbitmap);
}
#define picolcd_fbinfo(d) ((d)->fb_info)
#else
static inline int picolcd_fb_reset(struct picolcd_data *data, int clear)
{
return 0;
}
static inline int picolcd_init_framebuffer(struct picolcd_data *data)
{
return 0;
}
static inline void picolcd_exit_framebuffer(struct picolcd_data *data)
{
}
#define picolcd_fbinfo(d) NULL
#endif /* CONFIG_HID_PICOLCD_FB */
#ifdef CONFIG_HID_PICOLCD_BACKLIGHT
/*
* backlight class device
*/
static int picolcd_get_brightness(struct backlight_device *bdev)
{
struct picolcd_data *data = bl_get_data(bdev);
return data->lcd_brightness;
}
static int picolcd_set_brightness(struct backlight_device *bdev)
{
struct picolcd_data *data = bl_get_data(bdev);
struct hid_report *report = picolcd_out_report(REPORT_BRIGHTNESS, data->hdev);
unsigned long flags;
if (!report || report->maxfield != 1 || report->field[0]->report_count != 1)
return -ENODEV;
data->lcd_brightness = bdev->props.brightness & 0x0ff;
data->lcd_power = bdev->props.power;
spin_lock_irqsave(&data->lock, flags);
hid_set_field(report->field[0], 0, data->lcd_power == FB_BLANK_UNBLANK ? data->lcd_brightness : 0);
usbhid_submit_report(data->hdev, report, USB_DIR_OUT);
spin_unlock_irqrestore(&data->lock, flags);
return 0;
}
static int picolcd_check_bl_fb(struct backlight_device *bdev, struct fb_info *fb)
{
return fb && fb == picolcd_fbinfo((struct picolcd_data *)bl_get_data(bdev));
}
static const struct backlight_ops picolcd_blops = {
.update_status = picolcd_set_brightness,
.get_brightness = picolcd_get_brightness,
.check_fb = picolcd_check_bl_fb,
};
static int picolcd_init_backlight(struct picolcd_data *data, struct hid_report *report)
{
struct device *dev = &data->hdev->dev;
struct backlight_device *bdev;
struct backlight_properties props;
if (!report)
return -ENODEV;
if (report->maxfield != 1 || report->field[0]->report_count != 1 ||
report->field[0]->report_size != 8) {
dev_err(dev, "unsupported BRIGHTNESS report");
return -EINVAL;
}
memset(&props, 0, sizeof(props));
props.type = BACKLIGHT_RAW;
props.max_brightness = 0xff;
bdev = backlight_device_register(dev_name(dev), dev, data,
&picolcd_blops, &props);
if (IS_ERR(bdev)) {
dev_err(dev, "failed to register backlight\n");
return PTR_ERR(bdev);
}
bdev->props.brightness = 0xff;
data->lcd_brightness = 0xff;
data->backlight = bdev;
picolcd_set_brightness(bdev);
return 0;
}
static void picolcd_exit_backlight(struct picolcd_data *data)
{
struct backlight_device *bdev = data->backlight;
data->backlight = NULL;
if (bdev)
backlight_device_unregister(bdev);
}
static inline int picolcd_resume_backlight(struct picolcd_data *data)
{
if (!data->backlight)
return 0;
return picolcd_set_brightness(data->backlight);
}
#ifdef CONFIG_PM
static void picolcd_suspend_backlight(struct picolcd_data *data)
{
int bl_power = data->lcd_power;
if (!data->backlight)
return;
data->backlight->props.power = FB_BLANK_POWERDOWN;
picolcd_set_brightness(data->backlight);
data->lcd_power = data->backlight->props.power = bl_power;
}
#endif /* CONFIG_PM */
#else
static inline int picolcd_init_backlight(struct picolcd_data *data,
struct hid_report *report)
{
return 0;
}
static inline void picolcd_exit_backlight(struct picolcd_data *data)
{
}
static inline int picolcd_resume_backlight(struct picolcd_data *data)
{
return 0;
}
static inline void picolcd_suspend_backlight(struct picolcd_data *data)
{
}
#endif /* CONFIG_HID_PICOLCD_BACKLIGHT */
#ifdef CONFIG_HID_PICOLCD_LCD
/*
* lcd class device
*/
static int picolcd_get_contrast(struct lcd_device *ldev)
{
struct picolcd_data *data = lcd_get_data(ldev);
return data->lcd_contrast;
}
static int picolcd_set_contrast(struct lcd_device *ldev, int contrast)
{
struct picolcd_data *data = lcd_get_data(ldev);
struct hid_report *report = picolcd_out_report(REPORT_CONTRAST, data->hdev);
unsigned long flags;
if (!report || report->maxfield != 1 || report->field[0]->report_count != 1)
return -ENODEV;
data->lcd_contrast = contrast & 0x0ff;
spin_lock_irqsave(&data->lock, flags);
hid_set_field(report->field[0], 0, data->lcd_contrast);
usbhid_submit_report(data->hdev, report, USB_DIR_OUT);
spin_unlock_irqrestore(&data->lock, flags);
return 0;
}
static int picolcd_check_lcd_fb(struct lcd_device *ldev, struct fb_info *fb)
{
return fb && fb == picolcd_fbinfo((struct picolcd_data *)lcd_get_data(ldev));
}
static struct lcd_ops picolcd_lcdops = {
.get_contrast = picolcd_get_contrast,
.set_contrast = picolcd_set_contrast,
.check_fb = picolcd_check_lcd_fb,
};
static int picolcd_init_lcd(struct picolcd_data *data, struct hid_report *report)
{
struct device *dev = &data->hdev->dev;
struct lcd_device *ldev;
if (!report)
return -ENODEV;
if (report->maxfield != 1 || report->field[0]->report_count != 1 ||
report->field[0]->report_size != 8) {
dev_err(dev, "unsupported CONTRAST report");
return -EINVAL;
}
ldev = lcd_device_register(dev_name(dev), dev, data, &picolcd_lcdops);
if (IS_ERR(ldev)) {
dev_err(dev, "failed to register LCD\n");
return PTR_ERR(ldev);
}
ldev->props.max_contrast = 0x0ff;
data->lcd_contrast = 0xe5;
data->lcd = ldev;
picolcd_set_contrast(ldev, 0xe5);
return 0;
}
static void picolcd_exit_lcd(struct picolcd_data *data)
{
struct lcd_device *ldev = data->lcd;
data->lcd = NULL;
if (ldev)
lcd_device_unregister(ldev);
}
static inline int picolcd_resume_lcd(struct picolcd_data *data)
{
if (!data->lcd)
return 0;
return picolcd_set_contrast(data->lcd, data->lcd_contrast);
}
#else
static inline int picolcd_init_lcd(struct picolcd_data *data,
struct hid_report *report)
{
return 0;
}
static inline void picolcd_exit_lcd(struct picolcd_data *data)
{
}
static inline int picolcd_resume_lcd(struct picolcd_data *data)
{
return 0;
}
#endif /* CONFIG_HID_PICOLCD_LCD */
#ifdef CONFIG_HID_PICOLCD_LEDS
/**
* LED class device
*/
static void picolcd_leds_set(struct picolcd_data *data)
{
struct hid_report *report;
unsigned long flags;
if (!data->led[0])
return;
report = picolcd_out_report(REPORT_LED_STATE, data->hdev);
if (!report || report->maxfield != 1 || report->field[0]->report_count != 1)
return;
spin_lock_irqsave(&data->lock, flags);
hid_set_field(report->field[0], 0, data->led_state);
usbhid_submit_report(data->hdev, report, USB_DIR_OUT);
spin_unlock_irqrestore(&data->lock, flags);
}
static void picolcd_led_set_brightness(struct led_classdev *led_cdev,
enum led_brightness value)
{
struct device *dev;
struct hid_device *hdev;
struct picolcd_data *data;
int i, state = 0;
dev = led_cdev->dev->parent;
hdev = container_of(dev, struct hid_device, dev);
data = hid_get_drvdata(hdev);
for (i = 0; i < 8; i++) {
if (led_cdev != data->led[i])
continue;
state = (data->led_state >> i) & 1;
if (value == LED_OFF && state) {
data->led_state &= ~(1 << i);
picolcd_leds_set(data);
} else if (value != LED_OFF && !state) {
data->led_state |= 1 << i;
picolcd_leds_set(data);
}
break;
}
}
static enum led_brightness picolcd_led_get_brightness(struct led_classdev *led_cdev)
{
struct device *dev;
struct hid_device *hdev;
struct picolcd_data *data;
int i, value = 0;
dev = led_cdev->dev->parent;
hdev = container_of(dev, struct hid_device, dev);
data = hid_get_drvdata(hdev);
for (i = 0; i < 8; i++)
if (led_cdev == data->led[i]) {
value = (data->led_state >> i) & 1;
break;
}
return value ? LED_FULL : LED_OFF;
}
static int picolcd_init_leds(struct picolcd_data *data, struct hid_report *report)
{
struct device *dev = &data->hdev->dev;
struct led_classdev *led;
size_t name_sz = strlen(dev_name(dev)) + 8;
char *name;
int i, ret = 0;
if (!report)
return -ENODEV;
if (report->maxfield != 1 || report->field[0]->report_count != 1 ||
report->field[0]->report_size != 8) {
dev_err(dev, "unsupported LED_STATE report");
return -EINVAL;
}
for (i = 0; i < 8; i++) {
led = kzalloc(sizeof(struct led_classdev)+name_sz, GFP_KERNEL);
if (!led) {
dev_err(dev, "can't allocate memory for LED %d\n", i);
ret = -ENOMEM;
goto err;
}
name = (void *)(&led[1]);
snprintf(name, name_sz, "%s::GPO%d", dev_name(dev), i);
led->name = name;
led->brightness = 0;
led->max_brightness = 1;
led->brightness_get = picolcd_led_get_brightness;
led->brightness_set = picolcd_led_set_brightness;
data->led[i] = led;
ret = led_classdev_register(dev, data->led[i]);
if (ret) {
data->led[i] = NULL;
kfree(led);
dev_err(dev, "can't register LED %d\n", i);
goto err;
}
}
return 0;
err:
for (i = 0; i < 8; i++)
if (data->led[i]) {
led = data->led[i];
data->led[i] = NULL;
led_classdev_unregister(led);
kfree(led);
}
return ret;
}
static void picolcd_exit_leds(struct picolcd_data *data)
{
struct led_classdev *led;
int i;
for (i = 0; i < 8; i++) {
led = data->led[i];
data->led[i] = NULL;
if (!led)
continue;
led_classdev_unregister(led);
kfree(led);
}
}
#else
static inline int picolcd_init_leds(struct picolcd_data *data,
struct hid_report *report)
{
return 0;
}
static inline void picolcd_exit_leds(struct picolcd_data *data)
{
}
static inline int picolcd_leds_set(struct picolcd_data *data)
{
return 0;
}
#endif /* CONFIG_HID_PICOLCD_LEDS */
/*
* input class device
*/
static int picolcd_raw_keypad(struct picolcd_data *data,
struct hid_report *report, u8 *raw_data, int size)
{
/*
* Keypad event
* First and second data bytes list currently pressed keys,
* 0x00 means no key and at most 2 keys may be pressed at same time
*/
int i, j;
/* determine newly pressed keys */
for (i = 0; i < size; i++) {
unsigned int key_code;
if (raw_data[i] == 0)
continue;
for (j = 0; j < sizeof(data->pressed_keys); j++)
if (data->pressed_keys[j] == raw_data[i])
goto key_already_down;
for (j = 0; j < sizeof(data->pressed_keys); j++)
if (data->pressed_keys[j] == 0) {
data->pressed_keys[j] = raw_data[i];
break;
}
input_event(data->input_keys, EV_MSC, MSC_SCAN, raw_data[i]);
if (raw_data[i] < PICOLCD_KEYS)
key_code = data->keycode[raw_data[i]];
else
key_code = KEY_UNKNOWN;
if (key_code != KEY_UNKNOWN) {
dbg_hid(PICOLCD_NAME " got key press for %u:%d",
raw_data[i], key_code);
input_report_key(data->input_keys, key_code, 1);
}
input_sync(data->input_keys);
key_already_down:
continue;
}
/* determine newly released keys */
for (j = 0; j < sizeof(data->pressed_keys); j++) {
unsigned int key_code;
if (data->pressed_keys[j] == 0)
continue;
for (i = 0; i < size; i++)
if (data->pressed_keys[j] == raw_data[i])
goto key_still_down;
input_event(data->input_keys, EV_MSC, MSC_SCAN, data->pressed_keys[j]);
if (data->pressed_keys[j] < PICOLCD_KEYS)
key_code = data->keycode[data->pressed_keys[j]];
else
key_code = KEY_UNKNOWN;
if (key_code != KEY_UNKNOWN) {
dbg_hid(PICOLCD_NAME " got key release for %u:%d",
data->pressed_keys[j], key_code);
input_report_key(data->input_keys, key_code, 0);
}
input_sync(data->input_keys);
data->pressed_keys[j] = 0;
key_still_down:
continue;
}
return 1;
}
static int picolcd_raw_cir(struct picolcd_data *data,
struct hid_report *report, u8 *raw_data, int size)
{
/* Need understanding of CIR data format to implement ... */
return 1;
}
static int picolcd_check_version(struct hid_device *hdev)
{
struct picolcd_data *data = hid_get_drvdata(hdev);
struct picolcd_pending *verinfo;
int ret = 0;
if (!data)
return -ENODEV;
verinfo = picolcd_send_and_wait(hdev, REPORT_VERSION, NULL, 0);
if (!verinfo) {
hid_err(hdev, "no version response from PicoLCD\n");
return -ENODEV;
}
if (verinfo->raw_size == 2) {
data->version[0] = verinfo->raw_data[1];
data->version[1] = verinfo->raw_data[0];
if (data->status & PICOLCD_BOOTLOADER) {
hid_info(hdev, "PicoLCD, bootloader version %d.%d\n",
verinfo->raw_data[1], verinfo->raw_data[0]);
} else {
hid_info(hdev, "PicoLCD, firmware version %d.%d\n",
verinfo->raw_data[1], verinfo->raw_data[0]);
}
} else {
hid_err(hdev, "confused, got unexpected version response from PicoLCD\n");
ret = -EINVAL;
}
kfree(verinfo);
return ret;
}
/*
* Reset our device and wait for answer to VERSION request
*/
static int picolcd_reset(struct hid_device *hdev)
{
struct picolcd_data *data = hid_get_drvdata(hdev);
struct hid_report *report = picolcd_out_report(REPORT_RESET, hdev);
unsigned long flags;
int error;
if (!data || !report || report->maxfield != 1)
return -ENODEV;
spin_lock_irqsave(&data->lock, flags);
if (hdev->product == USB_DEVICE_ID_PICOLCD_BOOTLOADER)
data->status |= PICOLCD_BOOTLOADER;
/* perform the reset */
hid_set_field(report->field[0], 0, 1);
usbhid_submit_report(hdev, report, USB_DIR_OUT);
spin_unlock_irqrestore(&data->lock, flags);
error = picolcd_check_version(hdev);
if (error)
return error;
picolcd_resume_lcd(data);
picolcd_resume_backlight(data);
#ifdef CONFIG_HID_PICOLCD_FB
if (data->fb_info)
schedule_delayed_work(&data->fb_info->deferred_work, 0);
#endif /* CONFIG_HID_PICOLCD_FB */
picolcd_leds_set(data);
return 0;
}
/*
* The "operation_mode" sysfs attribute
*/
static ssize_t picolcd_operation_mode_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct picolcd_data *data = dev_get_drvdata(dev);
if (data->status & PICOLCD_BOOTLOADER)
return snprintf(buf, PAGE_SIZE, "[bootloader] lcd\n");
else
return snprintf(buf, PAGE_SIZE, "bootloader [lcd]\n");
}
static ssize_t picolcd_operation_mode_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct picolcd_data *data = dev_get_drvdata(dev);
struct hid_report *report = NULL;
size_t cnt = count;
int timeout = data->opmode_delay;
unsigned long flags;
if (cnt >= 3 && strncmp("lcd", buf, 3) == 0) {
if (data->status & PICOLCD_BOOTLOADER)
report = picolcd_out_report(REPORT_EXIT_FLASHER, data->hdev);
buf += 3;
cnt -= 3;
} else if (cnt >= 10 && strncmp("bootloader", buf, 10) == 0) {
if (!(data->status & PICOLCD_BOOTLOADER))
report = picolcd_out_report(REPORT_EXIT_KEYBOARD, data->hdev);
buf += 10;
cnt -= 10;
}
if (!report)
return -EINVAL;
while (cnt > 0 && (buf[cnt-1] == '\n' || buf[cnt-1] == '\r'))
cnt--;
if (cnt != 0)
return -EINVAL;
spin_lock_irqsave(&data->lock, flags);
hid_set_field(report->field[0], 0, timeout & 0xff);
hid_set_field(report->field[0], 1, (timeout >> 8) & 0xff);
usbhid_submit_report(data->hdev, report, USB_DIR_OUT);
spin_unlock_irqrestore(&data->lock, flags);
return count;
}
static DEVICE_ATTR(operation_mode, 0644, picolcd_operation_mode_show,
picolcd_operation_mode_store);
/*
* The "operation_mode_delay" sysfs attribute
*/
static ssize_t picolcd_operation_mode_delay_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct picolcd_data *data = dev_get_drvdata(dev);
return snprintf(buf, PAGE_SIZE, "%hu\n", data->opmode_delay);
}
static ssize_t picolcd_operation_mode_delay_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct picolcd_data *data = dev_get_drvdata(dev);
unsigned u;
if (sscanf(buf, "%u", &u) != 1)
return -EINVAL;
if (u > 30000)
return -EINVAL;
else
data->opmode_delay = u;
return count;
}
static DEVICE_ATTR(operation_mode_delay, 0644, picolcd_operation_mode_delay_show,
picolcd_operation_mode_delay_store);
#ifdef CONFIG_DEBUG_FS
/*
* The "reset" file
*/
static int picolcd_debug_reset_show(struct seq_file *f, void *p)
{
if (picolcd_fbinfo((struct picolcd_data *)f->private))
seq_printf(f, "all fb\n");
else
seq_printf(f, "all\n");
return 0;
}
static int picolcd_debug_reset_open(struct inode *inode, struct file *f)
{
return single_open(f, picolcd_debug_reset_show, inode->i_private);
}
static ssize_t picolcd_debug_reset_write(struct file *f, const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct picolcd_data *data = ((struct seq_file *)f->private_data)->private;
char buf[32];
size_t cnt = min(count, sizeof(buf)-1);
if (copy_from_user(buf, user_buf, cnt))
return -EFAULT;
while (cnt > 0 && (buf[cnt-1] == ' ' || buf[cnt-1] == '\n'))
cnt--;
buf[cnt] = '\0';
if (strcmp(buf, "all") == 0) {
picolcd_reset(data->hdev);
picolcd_fb_reset(data, 1);
} else if (strcmp(buf, "fb") == 0) {
picolcd_fb_reset(data, 1);
} else {
return -EINVAL;
}
return count;
}
static const struct file_operations picolcd_debug_reset_fops = {
.owner = THIS_MODULE,
.open = picolcd_debug_reset_open,
.read = seq_read,
.llseek = seq_lseek,
.write = picolcd_debug_reset_write,
.release = single_release,
};
/*
* The "eeprom" file
*/
static int picolcd_debug_eeprom_open(struct inode *i, struct file *f)
{
f->private_data = i->i_private;
return 0;
}
static ssize_t picolcd_debug_eeprom_read(struct file *f, char __user *u,
size_t s, loff_t *off)
{
struct picolcd_data *data = f->private_data;
struct picolcd_pending *resp;
u8 raw_data[3];
ssize_t ret = -EIO;
if (s == 0)
return -EINVAL;
if (*off > 0x0ff)
return 0;
/* prepare buffer with info about what we want to read (addr & len) */
raw_data[0] = *off & 0xff;
raw_data[1] = (*off >> 8) & 0xff;
raw_data[2] = s < 20 ? s : 20;
if (*off + raw_data[2] > 0xff)
raw_data[2] = 0x100 - *off;
resp = picolcd_send_and_wait(data->hdev, REPORT_EE_READ, raw_data,
sizeof(raw_data));
if (!resp)
return -EIO;
if (resp->in_report && resp->in_report->id == REPORT_EE_DATA) {
/* successful read :) */
ret = resp->raw_data[2];
if (ret > s)
ret = s;
if (copy_to_user(u, resp->raw_data+3, ret))
ret = -EFAULT;
else
*off += ret;
} /* anything else is some kind of IO error */
kfree(resp);
return ret;
}
static ssize_t picolcd_debug_eeprom_write(struct file *f, const char __user *u,
size_t s, loff_t *off)
{
struct picolcd_data *data = f->private_data;
struct picolcd_pending *resp;
ssize_t ret = -EIO;
u8 raw_data[23];
if (s == 0)
return -EINVAL;
if (*off > 0x0ff)
return -ENOSPC;
memset(raw_data, 0, sizeof(raw_data));
raw_data[0] = *off & 0xff;
raw_data[1] = (*off >> 8) & 0xff;
raw_data[2] = min((size_t)20, s);
if (*off + raw_data[2] > 0xff)
raw_data[2] = 0x100 - *off;
if (copy_from_user(raw_data+3, u, min((u8)20, raw_data[2])))
return -EFAULT;
resp = picolcd_send_and_wait(data->hdev, REPORT_EE_WRITE, raw_data,
sizeof(raw_data));
if (!resp)
return -EIO;
if (resp->in_report && resp->in_report->id == REPORT_EE_DATA) {
/* check if written data matches */
if (memcmp(raw_data, resp->raw_data, 3+raw_data[2]) == 0) {
*off += raw_data[2];
ret = raw_data[2];
}
}
kfree(resp);
return ret;
}
/*
* Notes:
* - read/write happens in chunks of at most 20 bytes, it's up to userspace
* to loop in order to get more data.
* - on write errors on otherwise correct write request the bytes
* that should have been written are in undefined state.
*/
static const struct file_operations picolcd_debug_eeprom_fops = {
.owner = THIS_MODULE,
.open = picolcd_debug_eeprom_open,
.read = picolcd_debug_eeprom_read,
.write = picolcd_debug_eeprom_write,
.llseek = generic_file_llseek,
};
/*
* The "flash" file
*/
static int picolcd_debug_flash_open(struct inode *i, struct file *f)
{
f->private_data = i->i_private;
return 0;
}
/* record a flash address to buf (bounds check to be done by caller) */
static int _picolcd_flash_setaddr(struct picolcd_data *data, u8 *buf, long off)
{
buf[0] = off & 0xff;
buf[1] = (off >> 8) & 0xff;
if (data->addr_sz == 3)
buf[2] = (off >> 16) & 0xff;
return data->addr_sz == 2 ? 2 : 3;
}
/* read a given size of data (bounds check to be done by caller) */
static ssize_t _picolcd_flash_read(struct picolcd_data *data, int report_id,
char __user *u, size_t s, loff_t *off)
{
struct picolcd_pending *resp;
u8 raw_data[4];
ssize_t ret = 0;
int len_off, err = -EIO;
while (s > 0) {
err = -EIO;
len_off = _picolcd_flash_setaddr(data, raw_data, *off);
raw_data[len_off] = s > 32 ? 32 : s;
resp = picolcd_send_and_wait(data->hdev, report_id, raw_data, len_off+1);
if (!resp || !resp->in_report)
goto skip;
if (resp->in_report->id == REPORT_MEMORY ||
resp->in_report->id == REPORT_BL_READ_MEMORY) {
if (memcmp(raw_data, resp->raw_data, len_off+1) != 0)
goto skip;
if (copy_to_user(u+ret, resp->raw_data+len_off+1, raw_data[len_off])) {
err = -EFAULT;
goto skip;
}
*off += raw_data[len_off];
s -= raw_data[len_off];
ret += raw_data[len_off];
err = 0;
}
skip:
kfree(resp);
if (err)
return ret > 0 ? ret : err;
}
return ret;
}
static ssize_t picolcd_debug_flash_read(struct file *f, char __user *u,
size_t s, loff_t *off)
{
struct picolcd_data *data = f->private_data;
if (s == 0)
return -EINVAL;
if (*off > 0x05fff)
return 0;
if (*off + s > 0x05fff)
s = 0x06000 - *off;
if (data->status & PICOLCD_BOOTLOADER)
return _picolcd_flash_read(data, REPORT_BL_READ_MEMORY, u, s, off);
else
return _picolcd_flash_read(data, REPORT_READ_MEMORY, u, s, off);
}
/* erase block aligned to 64bytes boundary */
static ssize_t _picolcd_flash_erase64(struct picolcd_data *data, int report_id,
loff_t *off)
{
struct picolcd_pending *resp;
u8 raw_data[3];
int len_off;
ssize_t ret = -EIO;
if (*off & 0x3f)
return -EINVAL;
len_off = _picolcd_flash_setaddr(data, raw_data, *off);
resp = picolcd_send_and_wait(data->hdev, report_id, raw_data, len_off);
if (!resp || !resp->in_report)
goto skip;
if (resp->in_report->id == REPORT_MEMORY ||
resp->in_report->id == REPORT_BL_ERASE_MEMORY) {
if (memcmp(raw_data, resp->raw_data, len_off) != 0)
goto skip;
ret = 0;
}
skip:
kfree(resp);
return ret;
}
/* write a given size of data (bounds check to be done by caller) */
static ssize_t _picolcd_flash_write(struct picolcd_data *data, int report_id,
const char __user *u, size_t s, loff_t *off)
{
struct picolcd_pending *resp;
u8 raw_data[36];
ssize_t ret = 0;
int len_off, err = -EIO;
while (s > 0) {
err = -EIO;
len_off = _picolcd_flash_setaddr(data, raw_data, *off);
raw_data[len_off] = s > 32 ? 32 : s;
if (copy_from_user(raw_data+len_off+1, u, raw_data[len_off])) {
err = -EFAULT;
break;
}
resp = picolcd_send_and_wait(data->hdev, report_id, raw_data,
len_off+1+raw_data[len_off]);
if (!resp || !resp->in_report)
goto skip;
if (resp->in_report->id == REPORT_MEMORY ||
resp->in_report->id == REPORT_BL_WRITE_MEMORY) {
if (memcmp(raw_data, resp->raw_data, len_off+1+raw_data[len_off]) != 0)
goto skip;
*off += raw_data[len_off];
s -= raw_data[len_off];
ret += raw_data[len_off];
err = 0;
}
skip:
kfree(resp);
if (err)
break;
}
return ret > 0 ? ret : err;
}
static ssize_t picolcd_debug_flash_write(struct file *f, const char __user *u,
size_t s, loff_t *off)
{
struct picolcd_data *data = f->private_data;
ssize_t err, ret = 0;
int report_erase, report_write;
if (s == 0)
return -EINVAL;
if (*off > 0x5fff)
return -ENOSPC;
if (s & 0x3f)
return -EINVAL;
if (*off & 0x3f)
return -EINVAL;
if (data->status & PICOLCD_BOOTLOADER) {
report_erase = REPORT_BL_ERASE_MEMORY;
report_write = REPORT_BL_WRITE_MEMORY;
} else {
report_erase = REPORT_ERASE_MEMORY;
report_write = REPORT_WRITE_MEMORY;
}
mutex_lock(&data->mutex_flash);
while (s > 0) {
err = _picolcd_flash_erase64(data, report_erase, off);
if (err)
break;
err = _picolcd_flash_write(data, report_write, u, 64, off);
if (err < 0)
break;
ret += err;
*off += err;
s -= err;
if (err != 64)
break;
}
mutex_unlock(&data->mutex_flash);
return ret > 0 ? ret : err;
}
/*
* Notes:
* - concurrent writing is prevented by mutex and all writes must be
* n*64 bytes and 64-byte aligned, each write being preceded by an
* ERASE which erases a 64byte block.
* If less than requested was written or an error is returned for an
* otherwise correct write request the next 64-byte block which should
* have been written is in undefined state (mostly: original, erased,
* (half-)written with write error)
* - reading can happen without special restriction
*/
static const struct file_operations picolcd_debug_flash_fops = {
.owner = THIS_MODULE,
.open = picolcd_debug_flash_open,
.read = picolcd_debug_flash_read,
.write = picolcd_debug_flash_write,
.llseek = generic_file_llseek,
};
/*
* Helper code for HID report level dumping/debugging
*/
static const char *error_codes[] = {
"success", "parameter missing", "data_missing", "block readonly",
"block not erasable", "block too big", "section overflow",
"invalid command length", "invalid data length",
};
static void dump_buff_as_hex(char *dst, size_t dst_sz, const u8 *data,
const size_t data_len)
{
int i, j;
for (i = j = 0; i < data_len && j + 3 < dst_sz; i++) {
dst[j++] = hex_asc[(data[i] >> 4) & 0x0f];
dst[j++] = hex_asc[data[i] & 0x0f];
dst[j++] = ' ';
}
if (j < dst_sz) {
dst[j--] = '\0';
dst[j] = '\n';
} else
dst[j] = '\0';
}
static void picolcd_debug_out_report(struct picolcd_data *data,
struct hid_device *hdev, struct hid_report *report)
{
u8 raw_data[70];
int raw_size = (report->size >> 3) + 1;
char *buff;
#define BUFF_SZ 256
/* Avoid unnecessary overhead if debugfs is disabled */
if (!hdev->debug_events)
return;
buff = kmalloc(BUFF_SZ, GFP_ATOMIC);
if (!buff)
return;
snprintf(buff, BUFF_SZ, "\nout report %d (size %d) = ",
report->id, raw_size);
hid_debug_event(hdev, buff);
if (raw_size + 5 > sizeof(raw_data)) {
kfree(buff);
hid_debug_event(hdev, " TOO BIG\n");
return;
} else {
raw_data[0] = report->id;
hid_output_report(report, raw_data);
dump_buff_as_hex(buff, BUFF_SZ, raw_data, raw_size);
hid_debug_event(hdev, buff);
}
switch (report->id) {
case REPORT_LED_STATE:
/* 1 data byte with GPO state */
snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n",
"REPORT_LED_STATE", report->id, raw_size-1);
hid_debug_event(hdev, buff);
snprintf(buff, BUFF_SZ, "\tGPO state: 0x%02x\n", raw_data[1]);
hid_debug_event(hdev, buff);
break;
case REPORT_BRIGHTNESS:
/* 1 data byte with brightness */
snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n",
"REPORT_BRIGHTNESS", report->id, raw_size-1);
hid_debug_event(hdev, buff);
snprintf(buff, BUFF_SZ, "\tBrightness: 0x%02x\n", raw_data[1]);
hid_debug_event(hdev, buff);
break;
case REPORT_CONTRAST:
/* 1 data byte with contrast */
snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n",
"REPORT_CONTRAST", report->id, raw_size-1);
hid_debug_event(hdev, buff);
snprintf(buff, BUFF_SZ, "\tContrast: 0x%02x\n", raw_data[1]);
hid_debug_event(hdev, buff);
break;
case REPORT_RESET:
/* 2 data bytes with reset duration in ms */
snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n",
"REPORT_RESET", report->id, raw_size-1);
hid_debug_event(hdev, buff);
snprintf(buff, BUFF_SZ, "\tDuration: 0x%02x%02x (%dms)\n",
raw_data[2], raw_data[1], raw_data[2] << 8 | raw_data[1]);
hid_debug_event(hdev, buff);
break;
case REPORT_LCD_CMD:
/* 63 data bytes with LCD commands */
snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n",
"REPORT_LCD_CMD", report->id, raw_size-1);
hid_debug_event(hdev, buff);
/* TODO: format decoding */
break;
case REPORT_LCD_DATA:
/* 63 data bytes with LCD data */
snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n",
"REPORT_LCD_CMD", report->id, raw_size-1);
/* TODO: format decoding */
hid_debug_event(hdev, buff);
break;
case REPORT_LCD_CMD_DATA:
/* 63 data bytes with LCD commands and data */
snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n",
"REPORT_LCD_CMD", report->id, raw_size-1);
/* TODO: format decoding */
hid_debug_event(hdev, buff);
break;
case REPORT_EE_READ:
/* 3 data bytes with read area description */
snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n",
"REPORT_EE_READ", report->id, raw_size-1);
hid_debug_event(hdev, buff);
snprintf(buff, BUFF_SZ, "\tData address: 0x%02x%02x\n",
raw_data[2], raw_data[1]);
hid_debug_event(hdev, buff);
snprintf(buff, BUFF_SZ, "\tData length: %d\n", raw_data[3]);
hid_debug_event(hdev, buff);
break;
case REPORT_EE_WRITE:
/* 3+1..20 data bytes with write area description */
snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n",
"REPORT_EE_WRITE", report->id, raw_size-1);
hid_debug_event(hdev, buff);
snprintf(buff, BUFF_SZ, "\tData address: 0x%02x%02x\n",
raw_data[2], raw_data[1]);
hid_debug_event(hdev, buff);
snprintf(buff, BUFF_SZ, "\tData length: %d\n", raw_data[3]);
hid_debug_event(hdev, buff);
if (raw_data[3] == 0) {
snprintf(buff, BUFF_SZ, "\tNo data\n");
} else if (raw_data[3] + 4 <= raw_size) {
snprintf(buff, BUFF_SZ, "\tData: ");
hid_debug_event(hdev, buff);
dump_buff_as_hex(buff, BUFF_SZ, raw_data+4, raw_data[3]);
} else {
snprintf(buff, BUFF_SZ, "\tData overflowed\n");
}
hid_debug_event(hdev, buff);
break;
case REPORT_ERASE_MEMORY:
case REPORT_BL_ERASE_MEMORY:
/* 3 data bytes with pointer inside erase block */
snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n",
"REPORT_ERASE_MEMORY", report->id, raw_size-1);
hid_debug_event(hdev, buff);
switch (data->addr_sz) {
case 2:
snprintf(buff, BUFF_SZ, "\tAddress inside 64 byte block: 0x%02x%02x\n",
raw_data[2], raw_data[1]);
break;
case 3:
snprintf(buff, BUFF_SZ, "\tAddress inside 64 byte block: 0x%02x%02x%02x\n",
raw_data[3], raw_data[2], raw_data[1]);
break;
default:
snprintf(buff, BUFF_SZ, "\tNot supported\n");
}
hid_debug_event(hdev, buff);
break;
case REPORT_READ_MEMORY:
case REPORT_BL_READ_MEMORY:
/* 4 data bytes with read area description */
snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n",
"REPORT_READ_MEMORY", report->id, raw_size-1);
hid_debug_event(hdev, buff);
switch (data->addr_sz) {
case 2:
snprintf(buff, BUFF_SZ, "\tData address: 0x%02x%02x\n",
raw_data[2], raw_data[1]);
hid_debug_event(hdev, buff);
snprintf(buff, BUFF_SZ, "\tData length: %d\n", raw_data[3]);
break;
case 3:
snprintf(buff, BUFF_SZ, "\tData address: 0x%02x%02x%02x\n",
raw_data[3], raw_data[2], raw_data[1]);
hid_debug_event(hdev, buff);
snprintf(buff, BUFF_SZ, "\tData length: %d\n", raw_data[4]);
break;
default:
snprintf(buff, BUFF_SZ, "\tNot supported\n");
}
hid_debug_event(hdev, buff);
break;
case REPORT_WRITE_MEMORY:
case REPORT_BL_WRITE_MEMORY:
/* 4+1..32 data bytes with write adrea description */
snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n",
"REPORT_WRITE_MEMORY", report->id, raw_size-1);
hid_debug_event(hdev, buff);
switch (data->addr_sz) {
case 2:
snprintf(buff, BUFF_SZ, "\tData address: 0x%02x%02x\n",
raw_data[2], raw_data[1]);
hid_debug_event(hdev, buff);
snprintf(buff, BUFF_SZ, "\tData length: %d\n", raw_data[3]);
hid_debug_event(hdev, buff);
if (raw_data[3] == 0) {
snprintf(buff, BUFF_SZ, "\tNo data\n");
} else if (raw_data[3] + 4 <= raw_size) {
snprintf(buff, BUFF_SZ, "\tData: ");
hid_debug_event(hdev, buff);
dump_buff_as_hex(buff, BUFF_SZ, raw_data+4, raw_data[3]);
} else {
snprintf(buff, BUFF_SZ, "\tData overflowed\n");
}
break;
case 3:
snprintf(buff, BUFF_SZ, "\tData address: 0x%02x%02x%02x\n",
raw_data[3], raw_data[2], raw_data[1]);
hid_debug_event(hdev, buff);
snprintf(buff, BUFF_SZ, "\tData length: %d\n", raw_data[4]);
hid_debug_event(hdev, buff);
if (raw_data[4] == 0) {
snprintf(buff, BUFF_SZ, "\tNo data\n");
} else if (raw_data[4] + 5 <= raw_size) {
snprintf(buff, BUFF_SZ, "\tData: ");
hid_debug_event(hdev, buff);
dump_buff_as_hex(buff, BUFF_SZ, raw_data+5, raw_data[4]);
} else {
snprintf(buff, BUFF_SZ, "\tData overflowed\n");
}
break;
default:
snprintf(buff, BUFF_SZ, "\tNot supported\n");
}
hid_debug_event(hdev, buff);
break;
case REPORT_SPLASH_RESTART:
/* TODO */
break;
case REPORT_EXIT_KEYBOARD:
snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n",
"REPORT_EXIT_KEYBOARD", report->id, raw_size-1);
hid_debug_event(hdev, buff);
snprintf(buff, BUFF_SZ, "\tRestart delay: %dms (0x%02x%02x)\n",
raw_data[1] | (raw_data[2] << 8),
raw_data[2], raw_data[1]);
hid_debug_event(hdev, buff);
break;
case REPORT_VERSION:
snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n",
"REPORT_VERSION", report->id, raw_size-1);
hid_debug_event(hdev, buff);
break;
case REPORT_DEVID:
snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n",
"REPORT_DEVID", report->id, raw_size-1);
hid_debug_event(hdev, buff);
break;
case REPORT_SPLASH_SIZE:
snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n",
"REPORT_SPLASH_SIZE", report->id, raw_size-1);
hid_debug_event(hdev, buff);
break;
case REPORT_HOOK_VERSION:
snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n",
"REPORT_HOOK_VERSION", report->id, raw_size-1);
hid_debug_event(hdev, buff);
break;
case REPORT_EXIT_FLASHER:
snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n",
"REPORT_VERSION", report->id, raw_size-1);
hid_debug_event(hdev, buff);
snprintf(buff, BUFF_SZ, "\tRestart delay: %dms (0x%02x%02x)\n",
raw_data[1] | (raw_data[2] << 8),
raw_data[2], raw_data[1]);
hid_debug_event(hdev, buff);
break;
default:
snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n",
"<unknown>", report->id, raw_size-1);
hid_debug_event(hdev, buff);
break;
}
wake_up_interruptible(&hdev->debug_wait);
kfree(buff);
}
static void picolcd_debug_raw_event(struct picolcd_data *data,
struct hid_device *hdev, struct hid_report *report,
u8 *raw_data, int size)
{
char *buff;
#define BUFF_SZ 256
/* Avoid unnecessary overhead if debugfs is disabled */
if (!hdev->debug_events)
return;
buff = kmalloc(BUFF_SZ, GFP_ATOMIC);
if (!buff)
return;
switch (report->id) {
case REPORT_ERROR_CODE:
/* 2 data bytes with affected report and error code */
snprintf(buff, BUFF_SZ, "report %s (%d, size=%d)\n",
"REPORT_ERROR_CODE", report->id, size-1);
hid_debug_event(hdev, buff);
if (raw_data[2] < ARRAY_SIZE(error_codes))
snprintf(buff, BUFF_SZ, "\tError code 0x%02x (%s) in reply to report 0x%02x\n",
raw_data[2], error_codes[raw_data[2]], raw_data[1]);
else
snprintf(buff, BUFF_SZ, "\tError code 0x%02x in reply to report 0x%02x\n",
raw_data[2], raw_data[1]);
hid_debug_event(hdev, buff);
break;
case REPORT_KEY_STATE:
/* 2 data bytes with key state */
snprintf(buff, BUFF_SZ, "report %s (%d, size=%d)\n",
"REPORT_KEY_STATE", report->id, size-1);
hid_debug_event(hdev, buff);
if (raw_data[1] == 0)
snprintf(buff, BUFF_SZ, "\tNo key pressed\n");
else if (raw_data[2] == 0)
snprintf(buff, BUFF_SZ, "\tOne key pressed: 0x%02x (%d)\n",
raw_data[1], raw_data[1]);
else
snprintf(buff, BUFF_SZ, "\tTwo keys pressed: 0x%02x (%d), 0x%02x (%d)\n",
raw_data[1], raw_data[1], raw_data[2], raw_data[2]);
hid_debug_event(hdev, buff);
break;
case REPORT_IR_DATA:
/* Up to 20 byes of IR scancode data */
snprintf(buff, BUFF_SZ, "report %s (%d, size=%d)\n",
"REPORT_IR_DATA", report->id, size-1);
hid_debug_event(hdev, buff);
if (raw_data[1] == 0) {
snprintf(buff, BUFF_SZ, "\tUnexpectedly 0 data length\n");
hid_debug_event(hdev, buff);
} else if (raw_data[1] + 1 <= size) {
snprintf(buff, BUFF_SZ, "\tData length: %d\n\tIR Data: ",
raw_data[1]-1);
hid_debug_event(hdev, buff);
dump_buff_as_hex(buff, BUFF_SZ, raw_data+2, raw_data[1]-1);
hid_debug_event(hdev, buff);
} else {
snprintf(buff, BUFF_SZ, "\tOverflowing data length: %d\n",
raw_data[1]-1);
hid_debug_event(hdev, buff);
}
break;
case REPORT_EE_DATA:
/* Data buffer in response to REPORT_EE_READ or REPORT_EE_WRITE */
snprintf(buff, BUFF_SZ, "report %s (%d, size=%d)\n",
"REPORT_EE_DATA", report->id, size-1);
hid_debug_event(hdev, buff);
snprintf(buff, BUFF_SZ, "\tData address: 0x%02x%02x\n",
raw_data[2], raw_data[1]);
hid_debug_event(hdev, buff);
snprintf(buff, BUFF_SZ, "\tData length: %d\n", raw_data[3]);
hid_debug_event(hdev, buff);
if (raw_data[3] == 0) {
snprintf(buff, BUFF_SZ, "\tNo data\n");
hid_debug_event(hdev, buff);
} else if (raw_data[3] + 4 <= size) {
snprintf(buff, BUFF_SZ, "\tData: ");
hid_debug_event(hdev, buff);
dump_buff_as_hex(buff, BUFF_SZ, raw_data+4, raw_data[3]);
hid_debug_event(hdev, buff);
} else {
snprintf(buff, BUFF_SZ, "\tData overflowed\n");
hid_debug_event(hdev, buff);
}
break;
case REPORT_MEMORY:
/* Data buffer in response to REPORT_READ_MEMORY or REPORT_WRTIE_MEMORY */
snprintf(buff, BUFF_SZ, "report %s (%d, size=%d)\n",
"REPORT_MEMORY", report->id, size-1);
hid_debug_event(hdev, buff);
switch (data->addr_sz) {
case 2:
snprintf(buff, BUFF_SZ, "\tData address: 0x%02x%02x\n",
raw_data[2], raw_data[1]);
hid_debug_event(hdev, buff);
snprintf(buff, BUFF_SZ, "\tData length: %d\n", raw_data[3]);
hid_debug_event(hdev, buff);
if (raw_data[3] == 0) {
snprintf(buff, BUFF_SZ, "\tNo data\n");
} else if (raw_data[3] + 4 <= size) {
snprintf(buff, BUFF_SZ, "\tData: ");
hid_debug_event(hdev, buff);
dump_buff_as_hex(buff, BUFF_SZ, raw_data+4, raw_data[3]);
} else {
snprintf(buff, BUFF_SZ, "\tData overflowed\n");
}
break;
case 3:
snprintf(buff, BUFF_SZ, "\tData address: 0x%02x%02x%02x\n",
raw_data[3], raw_data[2], raw_data[1]);
hid_debug_event(hdev, buff);
snprintf(buff, BUFF_SZ, "\tData length: %d\n", raw_data[4]);
hid_debug_event(hdev, buff);
if (raw_data[4] == 0) {
snprintf(buff, BUFF_SZ, "\tNo data\n");
} else if (raw_data[4] + 5 <= size) {
snprintf(buff, BUFF_SZ, "\tData: ");
hid_debug_event(hdev, buff);
dump_buff_as_hex(buff, BUFF_SZ, raw_data+5, raw_data[4]);
} else {
snprintf(buff, BUFF_SZ, "\tData overflowed\n");
}
break;
default:
snprintf(buff, BUFF_SZ, "\tNot supported\n");
}
hid_debug_event(hdev, buff);
break;
case REPORT_VERSION:
snprintf(buff, BUFF_SZ, "report %s (%d, size=%d)\n",
"REPORT_VERSION", report->id, size-1);
hid_debug_event(hdev, buff);
snprintf(buff, BUFF_SZ, "\tFirmware version: %d.%d\n",
raw_data[2], raw_data[1]);
hid_debug_event(hdev, buff);
break;
case REPORT_BL_ERASE_MEMORY:
snprintf(buff, BUFF_SZ, "report %s (%d, size=%d)\n",
"REPORT_BL_ERASE_MEMORY", report->id, size-1);
hid_debug_event(hdev, buff);
/* TODO */
break;
case REPORT_BL_READ_MEMORY:
snprintf(buff, BUFF_SZ, "report %s (%d, size=%d)\n",
"REPORT_BL_READ_MEMORY", report->id, size-1);
hid_debug_event(hdev, buff);
/* TODO */
break;
case REPORT_BL_WRITE_MEMORY:
snprintf(buff, BUFF_SZ, "report %s (%d, size=%d)\n",
"REPORT_BL_WRITE_MEMORY", report->id, size-1);
hid_debug_event(hdev, buff);
/* TODO */
break;
case REPORT_DEVID:
snprintf(buff, BUFF_SZ, "report %s (%d, size=%d)\n",
"REPORT_DEVID", report->id, size-1);
hid_debug_event(hdev, buff);
snprintf(buff, BUFF_SZ, "\tSerial: 0x%02x%02x%02x%02x\n",
raw_data[1], raw_data[2], raw_data[3], raw_data[4]);
hid_debug_event(hdev, buff);
snprintf(buff, BUFF_SZ, "\tType: 0x%02x\n",
raw_data[5]);
hid_debug_event(hdev, buff);
break;
case REPORT_SPLASH_SIZE:
snprintf(buff, BUFF_SZ, "report %s (%d, size=%d)\n",
"REPORT_SPLASH_SIZE", report->id, size-1);
hid_debug_event(hdev, buff);
snprintf(buff, BUFF_SZ, "\tTotal splash space: %d\n",
(raw_data[2] << 8) | raw_data[1]);
hid_debug_event(hdev, buff);
snprintf(buff, BUFF_SZ, "\tUsed splash space: %d\n",
(raw_data[4] << 8) | raw_data[3]);
hid_debug_event(hdev, buff);
break;
case REPORT_HOOK_VERSION:
snprintf(buff, BUFF_SZ, "report %s (%d, size=%d)\n",
"REPORT_HOOK_VERSION", report->id, size-1);
hid_debug_event(hdev, buff);
snprintf(buff, BUFF_SZ, "\tFirmware version: %d.%d\n",
raw_data[1], raw_data[2]);
hid_debug_event(hdev, buff);
break;
default:
snprintf(buff, BUFF_SZ, "report %s (%d, size=%d)\n",
"<unknown>", report->id, size-1);
hid_debug_event(hdev, buff);
break;
}
wake_up_interruptible(&hdev->debug_wait);
kfree(buff);
}
static void picolcd_init_devfs(struct picolcd_data *data,
struct hid_report *eeprom_r, struct hid_report *eeprom_w,
struct hid_report *flash_r, struct hid_report *flash_w,
struct hid_report *reset)
{
struct hid_device *hdev = data->hdev;
mutex_init(&data->mutex_flash);
/* reset */
if (reset)
data->debug_reset = debugfs_create_file("reset", 0600,
hdev->debug_dir, data, &picolcd_debug_reset_fops);
/* eeprom */
if (eeprom_r || eeprom_w)
data->debug_eeprom = debugfs_create_file("eeprom",
(eeprom_w ? S_IWUSR : 0) | (eeprom_r ? S_IRUSR : 0),
hdev->debug_dir, data, &picolcd_debug_eeprom_fops);
/* flash */
if (flash_r && flash_r->maxfield == 1 && flash_r->field[0]->report_size == 8)
data->addr_sz = flash_r->field[0]->report_count - 1;
else
data->addr_sz = -1;
if (data->addr_sz == 2 || data->addr_sz == 3) {
data->debug_flash = debugfs_create_file("flash",
(flash_w ? S_IWUSR : 0) | (flash_r ? S_IRUSR : 0),
hdev->debug_dir, data, &picolcd_debug_flash_fops);
} else if (flash_r || flash_w)
hid_warn(hdev, "Unexpected FLASH access reports, please submit rdesc for review\n");
}
static void picolcd_exit_devfs(struct picolcd_data *data)
{
struct dentry *dent;
dent = data->debug_reset;
data->debug_reset = NULL;
if (dent)
debugfs_remove(dent);
dent = data->debug_eeprom;
data->debug_eeprom = NULL;
if (dent)
debugfs_remove(dent);
dent = data->debug_flash;
data->debug_flash = NULL;
if (dent)
debugfs_remove(dent);
mutex_destroy(&data->mutex_flash);
}
#else
static inline void picolcd_debug_raw_event(struct picolcd_data *data,
struct hid_device *hdev, struct hid_report *report,
u8 *raw_data, int size)
{
}
static inline void picolcd_init_devfs(struct picolcd_data *data,
struct hid_report *eeprom_r, struct hid_report *eeprom_w,
struct hid_report *flash_r, struct hid_report *flash_w,
struct hid_report *reset)
{
}
static inline void picolcd_exit_devfs(struct picolcd_data *data)
{
}
#endif /* CONFIG_DEBUG_FS */
/*
* Handle raw report as sent by device
*/
static int picolcd_raw_event(struct hid_device *hdev,
struct hid_report *report, u8 *raw_data, int size)
{
struct picolcd_data *data = hid_get_drvdata(hdev);
unsigned long flags;
int ret = 0;
if (!data)
return 1;
if (report->id == REPORT_KEY_STATE) {
if (data->input_keys)
ret = picolcd_raw_keypad(data, report, raw_data+1, size-1);
} else if (report->id == REPORT_IR_DATA) {
if (data->input_cir)
ret = picolcd_raw_cir(data, report, raw_data+1, size-1);
} else {
spin_lock_irqsave(&data->lock, flags);
/*
* We let the caller of picolcd_send_and_wait() check if the
* report we got is one of the expected ones or not.
*/
if (data->pending) {
memcpy(data->pending->raw_data, raw_data+1, size-1);
data->pending->raw_size = size-1;
data->pending->in_report = report;
complete(&data->pending->ready);
}
spin_unlock_irqrestore(&data->lock, flags);
}
picolcd_debug_raw_event(data, hdev, report, raw_data, size);
return 1;
}
#ifdef CONFIG_PM
static int picolcd_suspend(struct hid_device *hdev, pm_message_t message)
{
if (message.event & PM_EVENT_AUTO)
return 0;
picolcd_suspend_backlight(hid_get_drvdata(hdev));
dbg_hid(PICOLCD_NAME " device ready for suspend\n");
return 0;
}
static int picolcd_resume(struct hid_device *hdev)
{
int ret;
ret = picolcd_resume_backlight(hid_get_drvdata(hdev));
if (ret)
dbg_hid(PICOLCD_NAME " restoring backlight failed: %d\n", ret);
return 0;
}
static int picolcd_reset_resume(struct hid_device *hdev)
{
int ret;
ret = picolcd_reset(hdev);
if (ret)
dbg_hid(PICOLCD_NAME " resetting our device failed: %d\n", ret);
ret = picolcd_fb_reset(hid_get_drvdata(hdev), 0);
if (ret)
dbg_hid(PICOLCD_NAME " restoring framebuffer content failed: %d\n", ret);
ret = picolcd_resume_lcd(hid_get_drvdata(hdev));
if (ret)
dbg_hid(PICOLCD_NAME " restoring lcd failed: %d\n", ret);
ret = picolcd_resume_backlight(hid_get_drvdata(hdev));
if (ret)
dbg_hid(PICOLCD_NAME " restoring backlight failed: %d\n", ret);
picolcd_leds_set(hid_get_drvdata(hdev));
return 0;
}
#endif
/* initialize keypad input device */
static int picolcd_init_keys(struct picolcd_data *data,
struct hid_report *report)
{
struct hid_device *hdev = data->hdev;
struct input_dev *idev;
int error, i;
if (!report)
return -ENODEV;
if (report->maxfield != 1 || report->field[0]->report_count != 2 ||
report->field[0]->report_size != 8) {
hid_err(hdev, "unsupported KEY_STATE report\n");
return -EINVAL;
}
idev = input_allocate_device();
if (idev == NULL) {
hid_err(hdev, "failed to allocate input device\n");
return -ENOMEM;
}
input_set_drvdata(idev, hdev);
memcpy(data->keycode, def_keymap, sizeof(def_keymap));
idev->name = hdev->name;
idev->phys = hdev->phys;
idev->uniq = hdev->uniq;
idev->id.bustype = hdev->bus;
idev->id.vendor = hdev->vendor;
idev->id.product = hdev->product;
idev->id.version = hdev->version;
idev->dev.parent = hdev->dev.parent;
idev->keycode = &data->keycode;
idev->keycodemax = PICOLCD_KEYS;
idev->keycodesize = sizeof(data->keycode[0]);
input_set_capability(idev, EV_MSC, MSC_SCAN);
set_bit(EV_REP, idev->evbit);
for (i = 0; i < PICOLCD_KEYS; i++)
input_set_capability(idev, EV_KEY, data->keycode[i]);
error = input_register_device(idev);
if (error) {
hid_err(hdev, "error registering the input device\n");
input_free_device(idev);
return error;
}
data->input_keys = idev;
return 0;
}
static void picolcd_exit_keys(struct picolcd_data *data)
{
struct input_dev *idev = data->input_keys;
data->input_keys = NULL;
if (idev)
input_unregister_device(idev);
}
/* initialize CIR input device */
static inline int picolcd_init_cir(struct picolcd_data *data, struct hid_report *report)
{
/* support not implemented yet */
return 0;
}
static inline void picolcd_exit_cir(struct picolcd_data *data)
{
}
static int picolcd_probe_lcd(struct hid_device *hdev, struct picolcd_data *data)
{
int error;
error = picolcd_check_version(hdev);
if (error)
return error;
if (data->version[0] != 0 && data->version[1] != 3)
hid_info(hdev, "Device with untested firmware revision, please submit /sys/kernel/debug/hid/%s/rdesc for this device.\n",
dev_name(&hdev->dev));
/* Setup keypad input device */
error = picolcd_init_keys(data, picolcd_in_report(REPORT_KEY_STATE, hdev));
if (error)
goto err;
/* Setup CIR input device */
error = picolcd_init_cir(data, picolcd_in_report(REPORT_IR_DATA, hdev));
if (error)
goto err;
/* Set up the framebuffer device */
error = picolcd_init_framebuffer(data);
if (error)
goto err;
/* Setup lcd class device */
error = picolcd_init_lcd(data, picolcd_out_report(REPORT_CONTRAST, hdev));
if (error)
goto err;
/* Setup backlight class device */
error = picolcd_init_backlight(data, picolcd_out_report(REPORT_BRIGHTNESS, hdev));
if (error)
goto err;
/* Setup the LED class devices */
error = picolcd_init_leds(data, picolcd_out_report(REPORT_LED_STATE, hdev));
if (error)
goto err;
picolcd_init_devfs(data, picolcd_out_report(REPORT_EE_READ, hdev),
picolcd_out_report(REPORT_EE_WRITE, hdev),
picolcd_out_report(REPORT_READ_MEMORY, hdev),
picolcd_out_report(REPORT_WRITE_MEMORY, hdev),
picolcd_out_report(REPORT_RESET, hdev));
return 0;
err:
picolcd_exit_leds(data);
picolcd_exit_backlight(data);
picolcd_exit_lcd(data);
picolcd_exit_framebuffer(data);
picolcd_exit_cir(data);
picolcd_exit_keys(data);
return error;
}
static int picolcd_probe_bootloader(struct hid_device *hdev, struct picolcd_data *data)
{
int error;
error = picolcd_check_version(hdev);
if (error)
return error;
if (data->version[0] != 1 && data->version[1] != 0)
hid_info(hdev, "Device with untested bootloader revision, please submit /sys/kernel/debug/hid/%s/rdesc for this device.\n",
dev_name(&hdev->dev));
picolcd_init_devfs(data, NULL, NULL,
picolcd_out_report(REPORT_BL_READ_MEMORY, hdev),
picolcd_out_report(REPORT_BL_WRITE_MEMORY, hdev), NULL);
return 0;
}
static int picolcd_probe(struct hid_device *hdev,
const struct hid_device_id *id)
{
struct picolcd_data *data;
int error = -ENOMEM;
dbg_hid(PICOLCD_NAME " hardware probe...\n");
/*
* Let's allocate the picolcd data structure, set some reasonable
* defaults, and associate it with the device
*/
data = kzalloc(sizeof(struct picolcd_data), GFP_KERNEL);
if (data == NULL) {
hid_err(hdev, "can't allocate space for Minibox PicoLCD device data\n");
error = -ENOMEM;
goto err_no_cleanup;
}
spin_lock_init(&data->lock);
mutex_init(&data->mutex);
data->hdev = hdev;
data->opmode_delay = 5000;
if (hdev->product == USB_DEVICE_ID_PICOLCD_BOOTLOADER)
data->status |= PICOLCD_BOOTLOADER;
hid_set_drvdata(hdev, data);
/* Parse the device reports and start it up */
error = hid_parse(hdev);
if (error) {
hid_err(hdev, "device report parse failed\n");
goto err_cleanup_data;
}
error = hid_hw_start(hdev, 0);
if (error) {
hid_err(hdev, "hardware start failed\n");
goto err_cleanup_data;
}
error = hid_hw_open(hdev);
if (error) {
hid_err(hdev, "failed to open input interrupt pipe for key and IR events\n");
goto err_cleanup_hid_hw;
}
error = device_create_file(&hdev->dev, &dev_attr_operation_mode_delay);
if (error) {
hid_err(hdev, "failed to create sysfs attributes\n");
goto err_cleanup_hid_ll;
}
error = device_create_file(&hdev->dev, &dev_attr_operation_mode);
if (error) {
hid_err(hdev, "failed to create sysfs attributes\n");
goto err_cleanup_sysfs1;
}
if (data->status & PICOLCD_BOOTLOADER)
error = picolcd_probe_bootloader(hdev, data);
else
error = picolcd_probe_lcd(hdev, data);
if (error)
goto err_cleanup_sysfs2;
dbg_hid(PICOLCD_NAME " activated and initialized\n");
return 0;
err_cleanup_sysfs2:
device_remove_file(&hdev->dev, &dev_attr_operation_mode);
err_cleanup_sysfs1:
device_remove_file(&hdev->dev, &dev_attr_operation_mode_delay);
err_cleanup_hid_ll:
hid_hw_close(hdev);
err_cleanup_hid_hw:
hid_hw_stop(hdev);
err_cleanup_data:
kfree(data);
err_no_cleanup:
hid_set_drvdata(hdev, NULL);
return error;
}
static void picolcd_remove(struct hid_device *hdev)
{
struct picolcd_data *data = hid_get_drvdata(hdev);
unsigned long flags;
dbg_hid(PICOLCD_NAME " hardware remove...\n");
spin_lock_irqsave(&data->lock, flags);
data->status |= PICOLCD_FAILED;
spin_unlock_irqrestore(&data->lock, flags);
#ifdef CONFIG_HID_PICOLCD_FB
/* short-circuit FB as early as possible in order to
* avoid long delays if we host console.
*/
if (data->fb_info)
data->fb_info->par = NULL;
#endif
picolcd_exit_devfs(data);
device_remove_file(&hdev->dev, &dev_attr_operation_mode);
device_remove_file(&hdev->dev, &dev_attr_operation_mode_delay);
hid_hw_close(hdev);
hid_hw_stop(hdev);
hid_set_drvdata(hdev, NULL);
/* Shortcut potential pending reply that will never arrive */
spin_lock_irqsave(&data->lock, flags);
if (data->pending)
complete(&data->pending->ready);
spin_unlock_irqrestore(&data->lock, flags);
/* Cleanup LED */
picolcd_exit_leds(data);
/* Clean up the framebuffer */
picolcd_exit_backlight(data);
picolcd_exit_lcd(data);
picolcd_exit_framebuffer(data);
/* Cleanup input */
picolcd_exit_cir(data);
picolcd_exit_keys(data);
mutex_destroy(&data->mutex);
/* Finally, clean up the picolcd data itself */
kfree(data);
}
static const struct hid_device_id picolcd_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICOLCD) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICOLCD_BOOTLOADER) },
{ }
};
MODULE_DEVICE_TABLE(hid, picolcd_devices);
static struct hid_driver picolcd_driver = {
.name = "hid-picolcd",
.id_table = picolcd_devices,
.probe = picolcd_probe,
.remove = picolcd_remove,
.raw_event = picolcd_raw_event,
#ifdef CONFIG_PM
.suspend = picolcd_suspend,
.resume = picolcd_resume,
.reset_resume = picolcd_reset_resume,
#endif
};
static int __init picolcd_init(void)
{
return hid_register_driver(&picolcd_driver);
}
static void __exit picolcd_exit(void)
{
hid_unregister_driver(&picolcd_driver);
#ifdef CONFIG_HID_PICOLCD_FB
flush_work_sync(&picolcd_fb_cleanup);
WARN_ON(fb_pending);
#endif
}
module_init(picolcd_init);
module_exit(picolcd_exit);
MODULE_DESCRIPTION("Minibox graphics PicoLCD Driver");
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
openRPi/linux | drivers/gpu/drm/radeon/ni_dpm.c | 14 | 133514 | /*
* Copyright 2012 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "drmP.h"
#include "radeon.h"
#include "nid.h"
#include "r600_dpm.h"
#include "ni_dpm.h"
#include "atom.h"
#include <linux/math64.h>
#include <linux/seq_file.h>
#define MC_CG_ARB_FREQ_F0 0x0a
#define MC_CG_ARB_FREQ_F1 0x0b
#define MC_CG_ARB_FREQ_F2 0x0c
#define MC_CG_ARB_FREQ_F3 0x0d
#define SMC_RAM_END 0xC000
static const struct ni_cac_weights cac_weights_cayman_xt =
{
0x15,
0x2,
0x19,
0x2,
0x8,
0x14,
0x2,
0x16,
0xE,
0x17,
0x13,
0x2B,
0x10,
0x7,
0x5,
0x5,
0x5,
0x2,
0x3,
0x9,
0x10,
0x10,
0x2B,
0xA,
0x9,
0x4,
0xD,
0xD,
0x3E,
0x18,
0x14,
0,
0x3,
0x3,
0x5,
0,
0x2,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0x1CC,
0,
0x164,
1,
1,
1,
1,
12,
12,
12,
0x12,
0x1F,
132,
5,
7,
0,
{ 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0 },
true
};
static const struct ni_cac_weights cac_weights_cayman_pro =
{
0x16,
0x4,
0x10,
0x2,
0xA,
0x16,
0x2,
0x18,
0x10,
0x1A,
0x16,
0x2D,
0x12,
0xA,
0x6,
0x6,
0x6,
0x2,
0x4,
0xB,
0x11,
0x11,
0x2D,
0xC,
0xC,
0x7,
0x10,
0x10,
0x3F,
0x1A,
0x16,
0,
0x7,
0x4,
0x6,
1,
0x2,
0x1,
0,
0,
0,
0,
0,
0,
0x30,
0,
0x1CF,
0,
0x166,
1,
1,
1,
1,
12,
12,
12,
0x15,
0x1F,
132,
6,
6,
0,
{ 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0 },
true
};
static const struct ni_cac_weights cac_weights_cayman_le =
{
0x7,
0xE,
0x1,
0xA,
0x1,
0x3F,
0x2,
0x18,
0x10,
0x1A,
0x1,
0x3F,
0x1,
0xE,
0x6,
0x6,
0x6,
0x2,
0x4,
0x9,
0x1A,
0x1A,
0x2C,
0xA,
0x11,
0x8,
0x19,
0x19,
0x1,
0x1,
0x1A,
0,
0x8,
0x5,
0x8,
0x1,
0x3,
0x1,
0,
0,
0,
0,
0,
0,
0x38,
0x38,
0x239,
0x3,
0x18A,
1,
1,
1,
1,
12,
12,
12,
0x15,
0x22,
132,
6,
6,
0,
{ 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0 },
true
};
#define NISLANDS_MGCG_SEQUENCE 300
static const u32 cayman_cgcg_cgls_default[] =
{
0x000008f8, 0x00000010, 0xffffffff,
0x000008fc, 0x00000000, 0xffffffff,
0x000008f8, 0x00000011, 0xffffffff,
0x000008fc, 0x00000000, 0xffffffff,
0x000008f8, 0x00000012, 0xffffffff,
0x000008fc, 0x00000000, 0xffffffff,
0x000008f8, 0x00000013, 0xffffffff,
0x000008fc, 0x00000000, 0xffffffff,
0x000008f8, 0x00000014, 0xffffffff,
0x000008fc, 0x00000000, 0xffffffff,
0x000008f8, 0x00000015, 0xffffffff,
0x000008fc, 0x00000000, 0xffffffff,
0x000008f8, 0x00000016, 0xffffffff,
0x000008fc, 0x00000000, 0xffffffff,
0x000008f8, 0x00000017, 0xffffffff,
0x000008fc, 0x00000000, 0xffffffff,
0x000008f8, 0x00000018, 0xffffffff,
0x000008fc, 0x00000000, 0xffffffff,
0x000008f8, 0x00000019, 0xffffffff,
0x000008fc, 0x00000000, 0xffffffff,
0x000008f8, 0x0000001a, 0xffffffff,
0x000008fc, 0x00000000, 0xffffffff,
0x000008f8, 0x0000001b, 0xffffffff,
0x000008fc, 0x00000000, 0xffffffff,
0x000008f8, 0x00000020, 0xffffffff,
0x000008fc, 0x00000000, 0xffffffff,
0x000008f8, 0x00000021, 0xffffffff,
0x000008fc, 0x00000000, 0xffffffff,
0x000008f8, 0x00000022, 0xffffffff,
0x000008fc, 0x00000000, 0xffffffff,
0x000008f8, 0x00000023, 0xffffffff,
0x000008fc, 0x00000000, 0xffffffff,
0x000008f8, 0x00000024, 0xffffffff,
0x000008fc, 0x00000000, 0xffffffff,
0x000008f8, 0x00000025, 0xffffffff,
0x000008fc, 0x00000000, 0xffffffff,
0x000008f8, 0x00000026, 0xffffffff,
0x000008fc, 0x00000000, 0xffffffff,
0x000008f8, 0x00000027, 0xffffffff,
0x000008fc, 0x00000000, 0xffffffff,
0x000008f8, 0x00000028, 0xffffffff,
0x000008fc, 0x00000000, 0xffffffff,
0x000008f8, 0x00000029, 0xffffffff,
0x000008fc, 0x00000000, 0xffffffff,
0x000008f8, 0x0000002a, 0xffffffff,
0x000008fc, 0x00000000, 0xffffffff,
0x000008f8, 0x0000002b, 0xffffffff,
0x000008fc, 0x00000000, 0xffffffff
};
#define CAYMAN_CGCG_CGLS_DEFAULT_LENGTH sizeof(cayman_cgcg_cgls_default) / (3 * sizeof(u32))
static const u32 cayman_cgcg_cgls_disable[] =
{
0x000008f8, 0x00000010, 0xffffffff,
0x000008fc, 0xffffffff, 0xffffffff,
0x000008f8, 0x00000011, 0xffffffff,
0x000008fc, 0xffffffff, 0xffffffff,
0x000008f8, 0x00000012, 0xffffffff,
0x000008fc, 0xffffffff, 0xffffffff,
0x000008f8, 0x00000013, 0xffffffff,
0x000008fc, 0xffffffff, 0xffffffff,
0x000008f8, 0x00000014, 0xffffffff,
0x000008fc, 0xffffffff, 0xffffffff,
0x000008f8, 0x00000015, 0xffffffff,
0x000008fc, 0xffffffff, 0xffffffff,
0x000008f8, 0x00000016, 0xffffffff,
0x000008fc, 0xffffffff, 0xffffffff,
0x000008f8, 0x00000017, 0xffffffff,
0x000008fc, 0xffffffff, 0xffffffff,
0x000008f8, 0x00000018, 0xffffffff,
0x000008fc, 0xffffffff, 0xffffffff,
0x000008f8, 0x00000019, 0xffffffff,
0x000008fc, 0xffffffff, 0xffffffff,
0x000008f8, 0x0000001a, 0xffffffff,
0x000008fc, 0xffffffff, 0xffffffff,
0x000008f8, 0x0000001b, 0xffffffff,
0x000008fc, 0xffffffff, 0xffffffff,
0x000008f8, 0x00000020, 0xffffffff,
0x000008fc, 0x00000000, 0xffffffff,
0x000008f8, 0x00000021, 0xffffffff,
0x000008fc, 0x00000000, 0xffffffff,
0x000008f8, 0x00000022, 0xffffffff,
0x000008fc, 0x00000000, 0xffffffff,
0x000008f8, 0x00000023, 0xffffffff,
0x000008fc, 0x00000000, 0xffffffff,
0x000008f8, 0x00000024, 0xffffffff,
0x000008fc, 0x00000000, 0xffffffff,
0x000008f8, 0x00000025, 0xffffffff,
0x000008fc, 0x00000000, 0xffffffff,
0x000008f8, 0x00000026, 0xffffffff,
0x000008fc, 0x00000000, 0xffffffff,
0x000008f8, 0x00000027, 0xffffffff,
0x000008fc, 0x00000000, 0xffffffff,
0x000008f8, 0x00000028, 0xffffffff,
0x000008fc, 0x00000000, 0xffffffff,
0x000008f8, 0x00000029, 0xffffffff,
0x000008fc, 0x00000000, 0xffffffff,
0x000008f8, 0x0000002a, 0xffffffff,
0x000008fc, 0x00000000, 0xffffffff,
0x000008f8, 0x0000002b, 0xffffffff,
0x000008fc, 0x00000000, 0xffffffff,
0x00000644, 0x000f7902, 0x001f4180,
0x00000644, 0x000f3802, 0x001f4180
};
#define CAYMAN_CGCG_CGLS_DISABLE_LENGTH sizeof(cayman_cgcg_cgls_disable) / (3 * sizeof(u32))
static const u32 cayman_cgcg_cgls_enable[] =
{
0x00000644, 0x000f7882, 0x001f4080,
0x000008f8, 0x00000010, 0xffffffff,
0x000008fc, 0x00000000, 0xffffffff,
0x000008f8, 0x00000011, 0xffffffff,
0x000008fc, 0x00000000, 0xffffffff,
0x000008f8, 0x00000012, 0xffffffff,
0x000008fc, 0x00000000, 0xffffffff,
0x000008f8, 0x00000013, 0xffffffff,
0x000008fc, 0x00000000, 0xffffffff,
0x000008f8, 0x00000014, 0xffffffff,
0x000008fc, 0x00000000, 0xffffffff,
0x000008f8, 0x00000015, 0xffffffff,
0x000008fc, 0x00000000, 0xffffffff,
0x000008f8, 0x00000016, 0xffffffff,
0x000008fc, 0x00000000, 0xffffffff,
0x000008f8, 0x00000017, 0xffffffff,
0x000008fc, 0x00000000, 0xffffffff,
0x000008f8, 0x00000018, 0xffffffff,
0x000008fc, 0x00000000, 0xffffffff,
0x000008f8, 0x00000019, 0xffffffff,
0x000008fc, 0x00000000, 0xffffffff,
0x000008f8, 0x0000001a, 0xffffffff,
0x000008fc, 0x00000000, 0xffffffff,
0x000008f8, 0x0000001b, 0xffffffff,
0x000008fc, 0x00000000, 0xffffffff,
0x000008f8, 0x00000020, 0xffffffff,
0x000008fc, 0xffffffff, 0xffffffff,
0x000008f8, 0x00000021, 0xffffffff,
0x000008fc, 0xffffffff, 0xffffffff,
0x000008f8, 0x00000022, 0xffffffff,
0x000008fc, 0xffffffff, 0xffffffff,
0x000008f8, 0x00000023, 0xffffffff,
0x000008fc, 0xffffffff, 0xffffffff,
0x000008f8, 0x00000024, 0xffffffff,
0x000008fc, 0xffffffff, 0xffffffff,
0x000008f8, 0x00000025, 0xffffffff,
0x000008fc, 0xffffffff, 0xffffffff,
0x000008f8, 0x00000026, 0xffffffff,
0x000008fc, 0xffffffff, 0xffffffff,
0x000008f8, 0x00000027, 0xffffffff,
0x000008fc, 0xffffffff, 0xffffffff,
0x000008f8, 0x00000028, 0xffffffff,
0x000008fc, 0xffffffff, 0xffffffff,
0x000008f8, 0x00000029, 0xffffffff,
0x000008fc, 0xffffffff, 0xffffffff,
0x000008f8, 0x0000002a, 0xffffffff,
0x000008fc, 0xffffffff, 0xffffffff,
0x000008f8, 0x0000002b, 0xffffffff,
0x000008fc, 0xffffffff, 0xffffffff
};
#define CAYMAN_CGCG_CGLS_ENABLE_LENGTH sizeof(cayman_cgcg_cgls_enable) / (3 * sizeof(u32))
static const u32 cayman_mgcg_default[] =
{
0x0000802c, 0xc0000000, 0xffffffff,
0x00003fc4, 0xc0000000, 0xffffffff,
0x00005448, 0x00000100, 0xffffffff,
0x000055e4, 0x00000100, 0xffffffff,
0x0000160c, 0x00000100, 0xffffffff,
0x00008984, 0x06000100, 0xffffffff,
0x0000c164, 0x00000100, 0xffffffff,
0x00008a18, 0x00000100, 0xffffffff,
0x0000897c, 0x06000100, 0xffffffff,
0x00008b28, 0x00000100, 0xffffffff,
0x00009144, 0x00800200, 0xffffffff,
0x00009a60, 0x00000100, 0xffffffff,
0x00009868, 0x00000100, 0xffffffff,
0x00008d58, 0x00000100, 0xffffffff,
0x00009510, 0x00000100, 0xffffffff,
0x0000949c, 0x00000100, 0xffffffff,
0x00009654, 0x00000100, 0xffffffff,
0x00009030, 0x00000100, 0xffffffff,
0x00009034, 0x00000100, 0xffffffff,
0x00009038, 0x00000100, 0xffffffff,
0x0000903c, 0x00000100, 0xffffffff,
0x00009040, 0x00000100, 0xffffffff,
0x0000a200, 0x00000100, 0xffffffff,
0x0000a204, 0x00000100, 0xffffffff,
0x0000a208, 0x00000100, 0xffffffff,
0x0000a20c, 0x00000100, 0xffffffff,
0x00009744, 0x00000100, 0xffffffff,
0x00003f80, 0x00000100, 0xffffffff,
0x0000a210, 0x00000100, 0xffffffff,
0x0000a214, 0x00000100, 0xffffffff,
0x000004d8, 0x00000100, 0xffffffff,
0x00009664, 0x00000100, 0xffffffff,
0x00009698, 0x00000100, 0xffffffff,
0x000004d4, 0x00000200, 0xffffffff,
0x000004d0, 0x00000000, 0xffffffff,
0x000030cc, 0x00000104, 0xffffffff,
0x0000d0c0, 0x00000100, 0xffffffff,
0x0000d8c0, 0x00000100, 0xffffffff,
0x0000802c, 0x40000000, 0xffffffff,
0x00003fc4, 0x40000000, 0xffffffff,
0x0000915c, 0x00010000, 0xffffffff,
0x00009160, 0x00030002, 0xffffffff,
0x00009164, 0x00050004, 0xffffffff,
0x00009168, 0x00070006, 0xffffffff,
0x00009178, 0x00070000, 0xffffffff,
0x0000917c, 0x00030002, 0xffffffff,
0x00009180, 0x00050004, 0xffffffff,
0x0000918c, 0x00010006, 0xffffffff,
0x00009190, 0x00090008, 0xffffffff,
0x00009194, 0x00070000, 0xffffffff,
0x00009198, 0x00030002, 0xffffffff,
0x0000919c, 0x00050004, 0xffffffff,
0x000091a8, 0x00010006, 0xffffffff,
0x000091ac, 0x00090008, 0xffffffff,
0x000091b0, 0x00070000, 0xffffffff,
0x000091b4, 0x00030002, 0xffffffff,
0x000091b8, 0x00050004, 0xffffffff,
0x000091c4, 0x00010006, 0xffffffff,
0x000091c8, 0x00090008, 0xffffffff,
0x000091cc, 0x00070000, 0xffffffff,
0x000091d0, 0x00030002, 0xffffffff,
0x000091d4, 0x00050004, 0xffffffff,
0x000091e0, 0x00010006, 0xffffffff,
0x000091e4, 0x00090008, 0xffffffff,
0x000091e8, 0x00000000, 0xffffffff,
0x000091ec, 0x00070000, 0xffffffff,
0x000091f0, 0x00030002, 0xffffffff,
0x000091f4, 0x00050004, 0xffffffff,
0x00009200, 0x00010006, 0xffffffff,
0x00009204, 0x00090008, 0xffffffff,
0x00009208, 0x00070000, 0xffffffff,
0x0000920c, 0x00030002, 0xffffffff,
0x00009210, 0x00050004, 0xffffffff,
0x0000921c, 0x00010006, 0xffffffff,
0x00009220, 0x00090008, 0xffffffff,
0x00009224, 0x00070000, 0xffffffff,
0x00009228, 0x00030002, 0xffffffff,
0x0000922c, 0x00050004, 0xffffffff,
0x00009238, 0x00010006, 0xffffffff,
0x0000923c, 0x00090008, 0xffffffff,
0x00009240, 0x00070000, 0xffffffff,
0x00009244, 0x00030002, 0xffffffff,
0x00009248, 0x00050004, 0xffffffff,
0x00009254, 0x00010006, 0xffffffff,
0x00009258, 0x00090008, 0xffffffff,
0x0000925c, 0x00070000, 0xffffffff,
0x00009260, 0x00030002, 0xffffffff,
0x00009264, 0x00050004, 0xffffffff,
0x00009270, 0x00010006, 0xffffffff,
0x00009274, 0x00090008, 0xffffffff,
0x00009278, 0x00070000, 0xffffffff,
0x0000927c, 0x00030002, 0xffffffff,
0x00009280, 0x00050004, 0xffffffff,
0x0000928c, 0x00010006, 0xffffffff,
0x00009290, 0x00090008, 0xffffffff,
0x000092a8, 0x00070000, 0xffffffff,
0x000092ac, 0x00030002, 0xffffffff,
0x000092b0, 0x00050004, 0xffffffff,
0x000092bc, 0x00010006, 0xffffffff,
0x000092c0, 0x00090008, 0xffffffff,
0x000092c4, 0x00070000, 0xffffffff,
0x000092c8, 0x00030002, 0xffffffff,
0x000092cc, 0x00050004, 0xffffffff,
0x000092d8, 0x00010006, 0xffffffff,
0x000092dc, 0x00090008, 0xffffffff,
0x00009294, 0x00000000, 0xffffffff,
0x0000802c, 0x40010000, 0xffffffff,
0x00003fc4, 0x40010000, 0xffffffff,
0x0000915c, 0x00010000, 0xffffffff,
0x00009160, 0x00030002, 0xffffffff,
0x00009164, 0x00050004, 0xffffffff,
0x00009168, 0x00070006, 0xffffffff,
0x00009178, 0x00070000, 0xffffffff,
0x0000917c, 0x00030002, 0xffffffff,
0x00009180, 0x00050004, 0xffffffff,
0x0000918c, 0x00010006, 0xffffffff,
0x00009190, 0x00090008, 0xffffffff,
0x00009194, 0x00070000, 0xffffffff,
0x00009198, 0x00030002, 0xffffffff,
0x0000919c, 0x00050004, 0xffffffff,
0x000091a8, 0x00010006, 0xffffffff,
0x000091ac, 0x00090008, 0xffffffff,
0x000091b0, 0x00070000, 0xffffffff,
0x000091b4, 0x00030002, 0xffffffff,
0x000091b8, 0x00050004, 0xffffffff,
0x000091c4, 0x00010006, 0xffffffff,
0x000091c8, 0x00090008, 0xffffffff,
0x000091cc, 0x00070000, 0xffffffff,
0x000091d0, 0x00030002, 0xffffffff,
0x000091d4, 0x00050004, 0xffffffff,
0x000091e0, 0x00010006, 0xffffffff,
0x000091e4, 0x00090008, 0xffffffff,
0x000091e8, 0x00000000, 0xffffffff,
0x000091ec, 0x00070000, 0xffffffff,
0x000091f0, 0x00030002, 0xffffffff,
0x000091f4, 0x00050004, 0xffffffff,
0x00009200, 0x00010006, 0xffffffff,
0x00009204, 0x00090008, 0xffffffff,
0x00009208, 0x00070000, 0xffffffff,
0x0000920c, 0x00030002, 0xffffffff,
0x00009210, 0x00050004, 0xffffffff,
0x0000921c, 0x00010006, 0xffffffff,
0x00009220, 0x00090008, 0xffffffff,
0x00009224, 0x00070000, 0xffffffff,
0x00009228, 0x00030002, 0xffffffff,
0x0000922c, 0x00050004, 0xffffffff,
0x00009238, 0x00010006, 0xffffffff,
0x0000923c, 0x00090008, 0xffffffff,
0x00009240, 0x00070000, 0xffffffff,
0x00009244, 0x00030002, 0xffffffff,
0x00009248, 0x00050004, 0xffffffff,
0x00009254, 0x00010006, 0xffffffff,
0x00009258, 0x00090008, 0xffffffff,
0x0000925c, 0x00070000, 0xffffffff,
0x00009260, 0x00030002, 0xffffffff,
0x00009264, 0x00050004, 0xffffffff,
0x00009270, 0x00010006, 0xffffffff,
0x00009274, 0x00090008, 0xffffffff,
0x00009278, 0x00070000, 0xffffffff,
0x0000927c, 0x00030002, 0xffffffff,
0x00009280, 0x00050004, 0xffffffff,
0x0000928c, 0x00010006, 0xffffffff,
0x00009290, 0x00090008, 0xffffffff,
0x000092a8, 0x00070000, 0xffffffff,
0x000092ac, 0x00030002, 0xffffffff,
0x000092b0, 0x00050004, 0xffffffff,
0x000092bc, 0x00010006, 0xffffffff,
0x000092c0, 0x00090008, 0xffffffff,
0x000092c4, 0x00070000, 0xffffffff,
0x000092c8, 0x00030002, 0xffffffff,
0x000092cc, 0x00050004, 0xffffffff,
0x000092d8, 0x00010006, 0xffffffff,
0x000092dc, 0x00090008, 0xffffffff,
0x00009294, 0x00000000, 0xffffffff,
0x0000802c, 0xc0000000, 0xffffffff,
0x00003fc4, 0xc0000000, 0xffffffff,
0x000008f8, 0x00000010, 0xffffffff,
0x000008fc, 0x00000000, 0xffffffff,
0x000008f8, 0x00000011, 0xffffffff,
0x000008fc, 0x00000000, 0xffffffff,
0x000008f8, 0x00000012, 0xffffffff,
0x000008fc, 0x00000000, 0xffffffff,
0x000008f8, 0x00000013, 0xffffffff,
0x000008fc, 0x00000000, 0xffffffff,
0x000008f8, 0x00000014, 0xffffffff,
0x000008fc, 0x00000000, 0xffffffff,
0x000008f8, 0x00000015, 0xffffffff,
0x000008fc, 0x00000000, 0xffffffff,
0x000008f8, 0x00000016, 0xffffffff,
0x000008fc, 0x00000000, 0xffffffff,
0x000008f8, 0x00000017, 0xffffffff,
0x000008fc, 0x00000000, 0xffffffff,
0x000008f8, 0x00000018, 0xffffffff,
0x000008fc, 0x00000000, 0xffffffff,
0x000008f8, 0x00000019, 0xffffffff,
0x000008fc, 0x00000000, 0xffffffff,
0x000008f8, 0x0000001a, 0xffffffff,
0x000008fc, 0x00000000, 0xffffffff,
0x000008f8, 0x0000001b, 0xffffffff,
0x000008fc, 0x00000000, 0xffffffff
};
#define CAYMAN_MGCG_DEFAULT_LENGTH sizeof(cayman_mgcg_default) / (3 * sizeof(u32))
static const u32 cayman_mgcg_disable[] =
{
0x0000802c, 0xc0000000, 0xffffffff,
0x000008f8, 0x00000000, 0xffffffff,
0x000008fc, 0xffffffff, 0xffffffff,
0x000008f8, 0x00000001, 0xffffffff,
0x000008fc, 0xffffffff, 0xffffffff,
0x000008f8, 0x00000002, 0xffffffff,
0x000008fc, 0xffffffff, 0xffffffff,
0x000008f8, 0x00000003, 0xffffffff,
0x000008fc, 0xffffffff, 0xffffffff,
0x00009150, 0x00600000, 0xffffffff
};
#define CAYMAN_MGCG_DISABLE_LENGTH sizeof(cayman_mgcg_disable) / (3 * sizeof(u32))
static const u32 cayman_mgcg_enable[] =
{
0x0000802c, 0xc0000000, 0xffffffff,
0x000008f8, 0x00000000, 0xffffffff,
0x000008fc, 0x00000000, 0xffffffff,
0x000008f8, 0x00000001, 0xffffffff,
0x000008fc, 0x00000000, 0xffffffff,
0x000008f8, 0x00000002, 0xffffffff,
0x000008fc, 0x00600000, 0xffffffff,
0x000008f8, 0x00000003, 0xffffffff,
0x000008fc, 0x00000000, 0xffffffff,
0x00009150, 0x96944200, 0xffffffff
};
#define CAYMAN_MGCG_ENABLE_LENGTH sizeof(cayman_mgcg_enable) / (3 * sizeof(u32))
#define NISLANDS_SYSLS_SEQUENCE 100
static const u32 cayman_sysls_default[] =
{
/* Register, Value, Mask bits */
0x000055e8, 0x00000000, 0xffffffff,
0x0000d0bc, 0x00000000, 0xffffffff,
0x0000d8bc, 0x00000000, 0xffffffff,
0x000015c0, 0x000c1401, 0xffffffff,
0x0000264c, 0x000c0400, 0xffffffff,
0x00002648, 0x000c0400, 0xffffffff,
0x00002650, 0x000c0400, 0xffffffff,
0x000020b8, 0x000c0400, 0xffffffff,
0x000020bc, 0x000c0400, 0xffffffff,
0x000020c0, 0x000c0c80, 0xffffffff,
0x0000f4a0, 0x000000c0, 0xffffffff,
0x0000f4a4, 0x00680fff, 0xffffffff,
0x00002f50, 0x00000404, 0xffffffff,
0x000004c8, 0x00000001, 0xffffffff,
0x000064ec, 0x00000000, 0xffffffff,
0x00000c7c, 0x00000000, 0xffffffff,
0x00008dfc, 0x00000000, 0xffffffff
};
#define CAYMAN_SYSLS_DEFAULT_LENGTH sizeof(cayman_sysls_default) / (3 * sizeof(u32))
static const u32 cayman_sysls_disable[] =
{
/* Register, Value, Mask bits */
0x0000d0c0, 0x00000000, 0xffffffff,
0x0000d8c0, 0x00000000, 0xffffffff,
0x000055e8, 0x00000000, 0xffffffff,
0x0000d0bc, 0x00000000, 0xffffffff,
0x0000d8bc, 0x00000000, 0xffffffff,
0x000015c0, 0x00041401, 0xffffffff,
0x0000264c, 0x00040400, 0xffffffff,
0x00002648, 0x00040400, 0xffffffff,
0x00002650, 0x00040400, 0xffffffff,
0x000020b8, 0x00040400, 0xffffffff,
0x000020bc, 0x00040400, 0xffffffff,
0x000020c0, 0x00040c80, 0xffffffff,
0x0000f4a0, 0x000000c0, 0xffffffff,
0x0000f4a4, 0x00680000, 0xffffffff,
0x00002f50, 0x00000404, 0xffffffff,
0x000004c8, 0x00000001, 0xffffffff,
0x000064ec, 0x00007ffd, 0xffffffff,
0x00000c7c, 0x0000ff00, 0xffffffff,
0x00008dfc, 0x0000007f, 0xffffffff
};
#define CAYMAN_SYSLS_DISABLE_LENGTH sizeof(cayman_sysls_disable) / (3 * sizeof(u32))
static const u32 cayman_sysls_enable[] =
{
/* Register, Value, Mask bits */
0x000055e8, 0x00000001, 0xffffffff,
0x0000d0bc, 0x00000100, 0xffffffff,
0x0000d8bc, 0x00000100, 0xffffffff,
0x000015c0, 0x000c1401, 0xffffffff,
0x0000264c, 0x000c0400, 0xffffffff,
0x00002648, 0x000c0400, 0xffffffff,
0x00002650, 0x000c0400, 0xffffffff,
0x000020b8, 0x000c0400, 0xffffffff,
0x000020bc, 0x000c0400, 0xffffffff,
0x000020c0, 0x000c0c80, 0xffffffff,
0x0000f4a0, 0x000000c0, 0xffffffff,
0x0000f4a4, 0x00680fff, 0xffffffff,
0x00002f50, 0x00000903, 0xffffffff,
0x000004c8, 0x00000000, 0xffffffff,
0x000064ec, 0x00000000, 0xffffffff,
0x00000c7c, 0x00000000, 0xffffffff,
0x00008dfc, 0x00000000, 0xffffffff
};
#define CAYMAN_SYSLS_ENABLE_LENGTH sizeof(cayman_sysls_enable) / (3 * sizeof(u32))
struct rv7xx_power_info *rv770_get_pi(struct radeon_device *rdev);
struct evergreen_power_info *evergreen_get_pi(struct radeon_device *rdev);
struct ni_power_info *ni_get_pi(struct radeon_device *rdev)
{
struct ni_power_info *pi = rdev->pm.dpm.priv;
return pi;
}
struct ni_ps *ni_get_ps(struct radeon_ps *rps)
{
struct ni_ps *ps = rps->ps_priv;
return ps;
}
static void ni_calculate_leakage_for_v_and_t_formula(const struct ni_leakage_coeffients *coeff,
u16 v, s32 t,
u32 ileakage,
u32 *leakage)
{
s64 kt, kv, leakage_w, i_leakage, vddc, temperature;
i_leakage = div64_s64(drm_int2fixp(ileakage), 1000);
vddc = div64_s64(drm_int2fixp(v), 1000);
temperature = div64_s64(drm_int2fixp(t), 1000);
kt = drm_fixp_mul(div64_s64(drm_int2fixp(coeff->at), 1000),
drm_fixp_exp(drm_fixp_mul(div64_s64(drm_int2fixp(coeff->bt), 1000), temperature)));
kv = drm_fixp_mul(div64_s64(drm_int2fixp(coeff->av), 1000),
drm_fixp_exp(drm_fixp_mul(div64_s64(drm_int2fixp(coeff->bv), 1000), vddc)));
leakage_w = drm_fixp_mul(drm_fixp_mul(drm_fixp_mul(i_leakage, kt), kv), vddc);
*leakage = drm_fixp2int(leakage_w * 1000);
}
static void ni_calculate_leakage_for_v_and_t(struct radeon_device *rdev,
const struct ni_leakage_coeffients *coeff,
u16 v,
s32 t,
u32 i_leakage,
u32 *leakage)
{
ni_calculate_leakage_for_v_and_t_formula(coeff, v, t, i_leakage, leakage);
}
bool ni_dpm_vblank_too_short(struct radeon_device *rdev)
{
struct rv7xx_power_info *pi = rv770_get_pi(rdev);
u32 vblank_time = r600_dpm_get_vblank_time(rdev);
/* we never hit the non-gddr5 limit so disable it */
u32 switch_limit = pi->mem_gddr5 ? 450 : 0;
if (vblank_time < switch_limit)
return true;
else
return false;
}
static void ni_apply_state_adjust_rules(struct radeon_device *rdev,
struct radeon_ps *rps)
{
struct ni_ps *ps = ni_get_ps(rps);
struct radeon_clock_and_voltage_limits *max_limits;
bool disable_mclk_switching;
u32 mclk, sclk;
u16 vddc, vddci;
u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc;
int i;
if ((rdev->pm.dpm.new_active_crtc_count > 1) ||
ni_dpm_vblank_too_short(rdev))
disable_mclk_switching = true;
else
disable_mclk_switching = false;
if (rdev->pm.dpm.ac_power)
max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
else
max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
if (rdev->pm.dpm.ac_power == false) {
for (i = 0; i < ps->performance_level_count; i++) {
if (ps->performance_levels[i].mclk > max_limits->mclk)
ps->performance_levels[i].mclk = max_limits->mclk;
if (ps->performance_levels[i].sclk > max_limits->sclk)
ps->performance_levels[i].sclk = max_limits->sclk;
if (ps->performance_levels[i].vddc > max_limits->vddc)
ps->performance_levels[i].vddc = max_limits->vddc;
if (ps->performance_levels[i].vddci > max_limits->vddci)
ps->performance_levels[i].vddci = max_limits->vddci;
}
}
/* limit clocks to max supported clocks based on voltage dependency tables */
btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
&max_sclk_vddc);
btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
&max_mclk_vddci);
btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
&max_mclk_vddc);
for (i = 0; i < ps->performance_level_count; i++) {
if (max_sclk_vddc) {
if (ps->performance_levels[i].sclk > max_sclk_vddc)
ps->performance_levels[i].sclk = max_sclk_vddc;
}
if (max_mclk_vddci) {
if (ps->performance_levels[i].mclk > max_mclk_vddci)
ps->performance_levels[i].mclk = max_mclk_vddci;
}
if (max_mclk_vddc) {
if (ps->performance_levels[i].mclk > max_mclk_vddc)
ps->performance_levels[i].mclk = max_mclk_vddc;
}
}
/* XXX validate the min clocks required for display */
if (disable_mclk_switching) {
mclk = ps->performance_levels[ps->performance_level_count - 1].mclk;
sclk = ps->performance_levels[0].sclk;
vddc = ps->performance_levels[0].vddc;
vddci = ps->performance_levels[ps->performance_level_count - 1].vddci;
} else {
sclk = ps->performance_levels[0].sclk;
mclk = ps->performance_levels[0].mclk;
vddc = ps->performance_levels[0].vddc;
vddci = ps->performance_levels[0].vddci;
}
/* adjusted low state */
ps->performance_levels[0].sclk = sclk;
ps->performance_levels[0].mclk = mclk;
ps->performance_levels[0].vddc = vddc;
ps->performance_levels[0].vddci = vddci;
btc_skip_blacklist_clocks(rdev, max_limits->sclk, max_limits->mclk,
&ps->performance_levels[0].sclk,
&ps->performance_levels[0].mclk);
for (i = 1; i < ps->performance_level_count; i++) {
if (ps->performance_levels[i].sclk < ps->performance_levels[i - 1].sclk)
ps->performance_levels[i].sclk = ps->performance_levels[i - 1].sclk;
if (ps->performance_levels[i].vddc < ps->performance_levels[i - 1].vddc)
ps->performance_levels[i].vddc = ps->performance_levels[i - 1].vddc;
}
if (disable_mclk_switching) {
mclk = ps->performance_levels[0].mclk;
for (i = 1; i < ps->performance_level_count; i++) {
if (mclk < ps->performance_levels[i].mclk)
mclk = ps->performance_levels[i].mclk;
}
for (i = 0; i < ps->performance_level_count; i++) {
ps->performance_levels[i].mclk = mclk;
ps->performance_levels[i].vddci = vddci;
}
} else {
for (i = 1; i < ps->performance_level_count; i++) {
if (ps->performance_levels[i].mclk < ps->performance_levels[i - 1].mclk)
ps->performance_levels[i].mclk = ps->performance_levels[i - 1].mclk;
if (ps->performance_levels[i].vddci < ps->performance_levels[i - 1].vddci)
ps->performance_levels[i].vddci = ps->performance_levels[i - 1].vddci;
}
}
for (i = 1; i < ps->performance_level_count; i++)
btc_skip_blacklist_clocks(rdev, max_limits->sclk, max_limits->mclk,
&ps->performance_levels[i].sclk,
&ps->performance_levels[i].mclk);
for (i = 0; i < ps->performance_level_count; i++)
btc_adjust_clock_combinations(rdev, max_limits,
&ps->performance_levels[i]);
for (i = 0; i < ps->performance_level_count; i++) {
btc_apply_voltage_dependency_rules(&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
ps->performance_levels[i].sclk,
max_limits->vddc, &ps->performance_levels[i].vddc);
btc_apply_voltage_dependency_rules(&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
ps->performance_levels[i].mclk,
max_limits->vddci, &ps->performance_levels[i].vddci);
btc_apply_voltage_dependency_rules(&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
ps->performance_levels[i].mclk,
max_limits->vddc, &ps->performance_levels[i].vddc);
btc_apply_voltage_dependency_rules(&rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk,
rdev->clock.current_dispclk,
max_limits->vddc, &ps->performance_levels[i].vddc);
}
for (i = 0; i < ps->performance_level_count; i++) {
btc_apply_voltage_delta_rules(rdev,
max_limits->vddc, max_limits->vddci,
&ps->performance_levels[i].vddc,
&ps->performance_levels[i].vddci);
}
ps->dc_compatible = true;
for (i = 0; i < ps->performance_level_count; i++) {
if (ps->performance_levels[i].vddc > rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddc)
ps->dc_compatible = false;
if (ps->performance_levels[i].vddc < rdev->pm.dpm.dyn_state.min_vddc_for_pcie_gen2)
ps->performance_levels[i].flags &= ~ATOM_PPLIB_R600_FLAGS_PCIEGEN2;
}
}
static void ni_cg_clockgating_default(struct radeon_device *rdev)
{
u32 count;
const u32 *ps = NULL;
ps = (const u32 *)&cayman_cgcg_cgls_default;
count = CAYMAN_CGCG_CGLS_DEFAULT_LENGTH;
btc_program_mgcg_hw_sequence(rdev, ps, count);
}
static void ni_gfx_clockgating_enable(struct radeon_device *rdev,
bool enable)
{
u32 count;
const u32 *ps = NULL;
if (enable) {
ps = (const u32 *)&cayman_cgcg_cgls_enable;
count = CAYMAN_CGCG_CGLS_ENABLE_LENGTH;
} else {
ps = (const u32 *)&cayman_cgcg_cgls_disable;
count = CAYMAN_CGCG_CGLS_DISABLE_LENGTH;
}
btc_program_mgcg_hw_sequence(rdev, ps, count);
}
static void ni_mg_clockgating_default(struct radeon_device *rdev)
{
u32 count;
const u32 *ps = NULL;
ps = (const u32 *)&cayman_mgcg_default;
count = CAYMAN_MGCG_DEFAULT_LENGTH;
btc_program_mgcg_hw_sequence(rdev, ps, count);
}
static void ni_mg_clockgating_enable(struct radeon_device *rdev,
bool enable)
{
u32 count;
const u32 *ps = NULL;
if (enable) {
ps = (const u32 *)&cayman_mgcg_enable;
count = CAYMAN_MGCG_ENABLE_LENGTH;
} else {
ps = (const u32 *)&cayman_mgcg_disable;
count = CAYMAN_MGCG_DISABLE_LENGTH;
}
btc_program_mgcg_hw_sequence(rdev, ps, count);
}
static void ni_ls_clockgating_default(struct radeon_device *rdev)
{
u32 count;
const u32 *ps = NULL;
ps = (const u32 *)&cayman_sysls_default;
count = CAYMAN_SYSLS_DEFAULT_LENGTH;
btc_program_mgcg_hw_sequence(rdev, ps, count);
}
static void ni_ls_clockgating_enable(struct radeon_device *rdev,
bool enable)
{
u32 count;
const u32 *ps = NULL;
if (enable) {
ps = (const u32 *)&cayman_sysls_enable;
count = CAYMAN_SYSLS_ENABLE_LENGTH;
} else {
ps = (const u32 *)&cayman_sysls_disable;
count = CAYMAN_SYSLS_DISABLE_LENGTH;
}
btc_program_mgcg_hw_sequence(rdev, ps, count);
}
static int ni_patch_single_dependency_table_based_on_leakage(struct radeon_device *rdev,
struct radeon_clock_voltage_dependency_table *table)
{
struct rv7xx_power_info *pi = rv770_get_pi(rdev);
u32 i;
if (table) {
for (i = 0; i < table->count; i++) {
if (0xff01 == table->entries[i].v) {
if (pi->max_vddc == 0)
return -EINVAL;
table->entries[i].v = pi->max_vddc;
}
}
}
return 0;
}
static int ni_patch_dependency_tables_based_on_leakage(struct radeon_device *rdev)
{
int ret = 0;
ret = ni_patch_single_dependency_table_based_on_leakage(rdev,
&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk);
ret = ni_patch_single_dependency_table_based_on_leakage(rdev,
&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk);
return ret;
}
static void ni_stop_dpm(struct radeon_device *rdev)
{
WREG32_P(GENERAL_PWRMGT, 0, ~GLOBAL_PWRMGT_EN);
}
#if 0
static int ni_notify_hw_of_power_source(struct radeon_device *rdev,
bool ac_power)
{
if (ac_power)
return (rv770_send_msg_to_smc(rdev, PPSMC_MSG_RunningOnAC) == PPSMC_Result_OK) ?
0 : -EINVAL;
return 0;
}
#endif
static PPSMC_Result ni_send_msg_to_smc_with_parameter(struct radeon_device *rdev,
PPSMC_Msg msg, u32 parameter)
{
WREG32(SMC_SCRATCH0, parameter);
return rv770_send_msg_to_smc(rdev, msg);
}
static int ni_restrict_performance_levels_before_switch(struct radeon_device *rdev)
{
if (rv770_send_msg_to_smc(rdev, PPSMC_MSG_NoForcedLevel) != PPSMC_Result_OK)
return -EINVAL;
return (ni_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetEnabledLevels, 1) == PPSMC_Result_OK) ?
0 : -EINVAL;
}
int ni_dpm_force_performance_level(struct radeon_device *rdev,
enum radeon_dpm_forced_level level)
{
if (level == RADEON_DPM_FORCED_LEVEL_HIGH) {
if (ni_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetEnabledLevels, 0) != PPSMC_Result_OK)
return -EINVAL;
if (ni_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetForcedLevels, 1) != PPSMC_Result_OK)
return -EINVAL;
} else if (level == RADEON_DPM_FORCED_LEVEL_LOW) {
if (ni_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetForcedLevels, 0) != PPSMC_Result_OK)
return -EINVAL;
if (ni_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetEnabledLevels, 1) != PPSMC_Result_OK)
return -EINVAL;
} else if (level == RADEON_DPM_FORCED_LEVEL_AUTO) {
if (ni_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetForcedLevels, 0) != PPSMC_Result_OK)
return -EINVAL;
if (ni_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetEnabledLevels, 0) != PPSMC_Result_OK)
return -EINVAL;
}
rdev->pm.dpm.forced_level = level;
return 0;
}
static void ni_stop_smc(struct radeon_device *rdev)
{
u32 tmp;
int i;
for (i = 0; i < rdev->usec_timeout; i++) {
tmp = RREG32(LB_SYNC_RESET_SEL) & LB_SYNC_RESET_SEL_MASK;
if (tmp != 1)
break;
udelay(1);
}
udelay(100);
r7xx_stop_smc(rdev);
}
static int ni_process_firmware_header(struct radeon_device *rdev)
{
struct rv7xx_power_info *pi = rv770_get_pi(rdev);
struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
struct ni_power_info *ni_pi = ni_get_pi(rdev);
u32 tmp;
int ret;
ret = rv770_read_smc_sram_dword(rdev,
NISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
NISLANDS_SMC_FIRMWARE_HEADER_stateTable,
&tmp, pi->sram_end);
if (ret)
return ret;
pi->state_table_start = (u16)tmp;
ret = rv770_read_smc_sram_dword(rdev,
NISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
NISLANDS_SMC_FIRMWARE_HEADER_softRegisters,
&tmp, pi->sram_end);
if (ret)
return ret;
pi->soft_regs_start = (u16)tmp;
ret = rv770_read_smc_sram_dword(rdev,
NISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
NISLANDS_SMC_FIRMWARE_HEADER_mcRegisterTable,
&tmp, pi->sram_end);
if (ret)
return ret;
eg_pi->mc_reg_table_start = (u16)tmp;
ret = rv770_read_smc_sram_dword(rdev,
NISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
NISLANDS_SMC_FIRMWARE_HEADER_fanTable,
&tmp, pi->sram_end);
if (ret)
return ret;
ni_pi->fan_table_start = (u16)tmp;
ret = rv770_read_smc_sram_dword(rdev,
NISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
NISLANDS_SMC_FIRMWARE_HEADER_mcArbDramAutoRefreshTable,
&tmp, pi->sram_end);
if (ret)
return ret;
ni_pi->arb_table_start = (u16)tmp;
ret = rv770_read_smc_sram_dword(rdev,
NISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
NISLANDS_SMC_FIRMWARE_HEADER_cacTable,
&tmp, pi->sram_end);
if (ret)
return ret;
ni_pi->cac_table_start = (u16)tmp;
ret = rv770_read_smc_sram_dword(rdev,
NISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
NISLANDS_SMC_FIRMWARE_HEADER_spllTable,
&tmp, pi->sram_end);
if (ret)
return ret;
ni_pi->spll_table_start = (u16)tmp;
return ret;
}
static void ni_read_clock_registers(struct radeon_device *rdev)
{
struct ni_power_info *ni_pi = ni_get_pi(rdev);
ni_pi->clock_registers.cg_spll_func_cntl = RREG32(CG_SPLL_FUNC_CNTL);
ni_pi->clock_registers.cg_spll_func_cntl_2 = RREG32(CG_SPLL_FUNC_CNTL_2);
ni_pi->clock_registers.cg_spll_func_cntl_3 = RREG32(CG_SPLL_FUNC_CNTL_3);
ni_pi->clock_registers.cg_spll_func_cntl_4 = RREG32(CG_SPLL_FUNC_CNTL_4);
ni_pi->clock_registers.cg_spll_spread_spectrum = RREG32(CG_SPLL_SPREAD_SPECTRUM);
ni_pi->clock_registers.cg_spll_spread_spectrum_2 = RREG32(CG_SPLL_SPREAD_SPECTRUM_2);
ni_pi->clock_registers.mpll_ad_func_cntl = RREG32(MPLL_AD_FUNC_CNTL);
ni_pi->clock_registers.mpll_ad_func_cntl_2 = RREG32(MPLL_AD_FUNC_CNTL_2);
ni_pi->clock_registers.mpll_dq_func_cntl = RREG32(MPLL_DQ_FUNC_CNTL);
ni_pi->clock_registers.mpll_dq_func_cntl_2 = RREG32(MPLL_DQ_FUNC_CNTL_2);
ni_pi->clock_registers.mclk_pwrmgt_cntl = RREG32(MCLK_PWRMGT_CNTL);
ni_pi->clock_registers.dll_cntl = RREG32(DLL_CNTL);
ni_pi->clock_registers.mpll_ss1 = RREG32(MPLL_SS1);
ni_pi->clock_registers.mpll_ss2 = RREG32(MPLL_SS2);
}
#if 0
static int ni_enter_ulp_state(struct radeon_device *rdev)
{
struct rv7xx_power_info *pi = rv770_get_pi(rdev);
if (pi->gfx_clock_gating) {
WREG32_P(SCLK_PWRMGT_CNTL, 0, ~DYN_GFX_CLK_OFF_EN);
WREG32_P(SCLK_PWRMGT_CNTL, GFX_CLK_FORCE_ON, ~GFX_CLK_FORCE_ON);
WREG32_P(SCLK_PWRMGT_CNTL, 0, ~GFX_CLK_FORCE_ON);
RREG32(GB_ADDR_CONFIG);
}
WREG32_P(SMC_MSG, HOST_SMC_MSG(PPSMC_MSG_SwitchToMinimumPower),
~HOST_SMC_MSG_MASK);
udelay(25000);
return 0;
}
#endif
static void ni_program_response_times(struct radeon_device *rdev)
{
u32 voltage_response_time, backbias_response_time, acpi_delay_time, vbi_time_out;
u32 vddc_dly, bb_dly, acpi_dly, vbi_dly, mclk_switch_limit;
u32 reference_clock;
rv770_write_smc_soft_register(rdev, NI_SMC_SOFT_REGISTER_mvdd_chg_time, 1);
voltage_response_time = (u32)rdev->pm.dpm.voltage_response_time;
backbias_response_time = (u32)rdev->pm.dpm.backbias_response_time;
if (voltage_response_time == 0)
voltage_response_time = 1000;
if (backbias_response_time == 0)
backbias_response_time = 1000;
acpi_delay_time = 15000;
vbi_time_out = 100000;
reference_clock = radeon_get_xclk(rdev);
vddc_dly = (voltage_response_time * reference_clock) / 1600;
bb_dly = (backbias_response_time * reference_clock) / 1600;
acpi_dly = (acpi_delay_time * reference_clock) / 1600;
vbi_dly = (vbi_time_out * reference_clock) / 1600;
mclk_switch_limit = (460 * reference_clock) / 100;
rv770_write_smc_soft_register(rdev, NI_SMC_SOFT_REGISTER_delay_vreg, vddc_dly);
rv770_write_smc_soft_register(rdev, NI_SMC_SOFT_REGISTER_delay_bbias, bb_dly);
rv770_write_smc_soft_register(rdev, NI_SMC_SOFT_REGISTER_delay_acpi, acpi_dly);
rv770_write_smc_soft_register(rdev, NI_SMC_SOFT_REGISTER_mclk_chg_timeout, vbi_dly);
rv770_write_smc_soft_register(rdev, NI_SMC_SOFT_REGISTER_mc_block_delay, 0xAA);
rv770_write_smc_soft_register(rdev, NI_SMC_SOFT_REGISTER_mclk_switch_lim, mclk_switch_limit);
}
static void ni_populate_smc_voltage_table(struct radeon_device *rdev,
struct atom_voltage_table *voltage_table,
NISLANDS_SMC_STATETABLE *table)
{
unsigned int i;
for (i = 0; i < voltage_table->count; i++) {
table->highSMIO[i] = 0;
table->lowSMIO[i] |= cpu_to_be32(voltage_table->entries[i].smio_low);
}
}
static void ni_populate_smc_voltage_tables(struct radeon_device *rdev,
NISLANDS_SMC_STATETABLE *table)
{
struct rv7xx_power_info *pi = rv770_get_pi(rdev);
struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
unsigned char i;
if (eg_pi->vddc_voltage_table.count) {
ni_populate_smc_voltage_table(rdev, &eg_pi->vddc_voltage_table, table);
table->voltageMaskTable.highMask[NISLANDS_SMC_VOLTAGEMASK_VDDC] = 0;
table->voltageMaskTable.lowMask[NISLANDS_SMC_VOLTAGEMASK_VDDC] =
cpu_to_be32(eg_pi->vddc_voltage_table.mask_low);
for (i = 0; i < eg_pi->vddc_voltage_table.count; i++) {
if (pi->max_vddc_in_table <= eg_pi->vddc_voltage_table.entries[i].value) {
table->maxVDDCIndexInPPTable = i;
break;
}
}
}
if (eg_pi->vddci_voltage_table.count) {
ni_populate_smc_voltage_table(rdev, &eg_pi->vddci_voltage_table, table);
table->voltageMaskTable.highMask[NISLANDS_SMC_VOLTAGEMASK_VDDCI] = 0;
table->voltageMaskTable.lowMask[NISLANDS_SMC_VOLTAGEMASK_VDDCI] =
cpu_to_be32(eg_pi->vddc_voltage_table.mask_low);
}
}
static int ni_populate_voltage_value(struct radeon_device *rdev,
struct atom_voltage_table *table,
u16 value,
NISLANDS_SMC_VOLTAGE_VALUE *voltage)
{
unsigned int i;
for (i = 0; i < table->count; i++) {
if (value <= table->entries[i].value) {
voltage->index = (u8)i;
voltage->value = cpu_to_be16(table->entries[i].value);
break;
}
}
if (i >= table->count)
return -EINVAL;
return 0;
}
static void ni_populate_mvdd_value(struct radeon_device *rdev,
u32 mclk,
NISLANDS_SMC_VOLTAGE_VALUE *voltage)
{
struct rv7xx_power_info *pi = rv770_get_pi(rdev);
struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
if (!pi->mvdd_control) {
voltage->index = eg_pi->mvdd_high_index;
voltage->value = cpu_to_be16(MVDD_HIGH_VALUE);
return;
}
if (mclk <= pi->mvdd_split_frequency) {
voltage->index = eg_pi->mvdd_low_index;
voltage->value = cpu_to_be16(MVDD_LOW_VALUE);
} else {
voltage->index = eg_pi->mvdd_high_index;
voltage->value = cpu_to_be16(MVDD_HIGH_VALUE);
}
}
static int ni_get_std_voltage_value(struct radeon_device *rdev,
NISLANDS_SMC_VOLTAGE_VALUE *voltage,
u16 *std_voltage)
{
if (rdev->pm.dpm.dyn_state.cac_leakage_table.entries &&
((u32)voltage->index < rdev->pm.dpm.dyn_state.cac_leakage_table.count))
*std_voltage = rdev->pm.dpm.dyn_state.cac_leakage_table.entries[voltage->index].vddc;
else
*std_voltage = be16_to_cpu(voltage->value);
return 0;
}
static void ni_populate_std_voltage_value(struct radeon_device *rdev,
u16 value, u8 index,
NISLANDS_SMC_VOLTAGE_VALUE *voltage)
{
voltage->index = index;
voltage->value = cpu_to_be16(value);
}
static u32 ni_get_smc_power_scaling_factor(struct radeon_device *rdev)
{
u32 xclk_period;
u32 xclk = radeon_get_xclk(rdev);
u32 tmp = RREG32(CG_CAC_CTRL) & TID_CNT_MASK;
xclk_period = (1000000000UL / xclk);
xclk_period /= 10000UL;
return tmp * xclk_period;
}
static u32 ni_scale_power_for_smc(u32 power_in_watts, u32 scaling_factor)
{
return (power_in_watts * scaling_factor) << 2;
}
static u32 ni_calculate_power_boost_limit(struct radeon_device *rdev,
struct radeon_ps *radeon_state,
u32 near_tdp_limit)
{
struct ni_ps *state = ni_get_ps(radeon_state);
struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
struct ni_power_info *ni_pi = ni_get_pi(rdev);
u32 power_boost_limit = 0;
int ret;
if (ni_pi->enable_power_containment &&
ni_pi->use_power_boost_limit) {
NISLANDS_SMC_VOLTAGE_VALUE vddc;
u16 std_vddc_med;
u16 std_vddc_high;
u64 tmp, n, d;
if (state->performance_level_count < 3)
return 0;
ret = ni_populate_voltage_value(rdev, &eg_pi->vddc_voltage_table,
state->performance_levels[state->performance_level_count - 2].vddc,
&vddc);
if (ret)
return 0;
ret = ni_get_std_voltage_value(rdev, &vddc, &std_vddc_med);
if (ret)
return 0;
ret = ni_populate_voltage_value(rdev, &eg_pi->vddc_voltage_table,
state->performance_levels[state->performance_level_count - 1].vddc,
&vddc);
if (ret)
return 0;
ret = ni_get_std_voltage_value(rdev, &vddc, &std_vddc_high);
if (ret)
return 0;
n = ((u64)near_tdp_limit * ((u64)std_vddc_med * (u64)std_vddc_med) * 90);
d = ((u64)std_vddc_high * (u64)std_vddc_high * 100);
tmp = div64_u64(n, d);
if (tmp >> 32)
return 0;
power_boost_limit = (u32)tmp;
}
return power_boost_limit;
}
static int ni_calculate_adjusted_tdp_limits(struct radeon_device *rdev,
bool adjust_polarity,
u32 tdp_adjustment,
u32 *tdp_limit,
u32 *near_tdp_limit)
{
if (tdp_adjustment > (u32)rdev->pm.dpm.tdp_od_limit)
return -EINVAL;
if (adjust_polarity) {
*tdp_limit = ((100 + tdp_adjustment) * rdev->pm.dpm.tdp_limit) / 100;
*near_tdp_limit = rdev->pm.dpm.near_tdp_limit + (*tdp_limit - rdev->pm.dpm.tdp_limit);
} else {
*tdp_limit = ((100 - tdp_adjustment) * rdev->pm.dpm.tdp_limit) / 100;
*near_tdp_limit = rdev->pm.dpm.near_tdp_limit - (rdev->pm.dpm.tdp_limit - *tdp_limit);
}
return 0;
}
static int ni_populate_smc_tdp_limits(struct radeon_device *rdev,
struct radeon_ps *radeon_state)
{
struct rv7xx_power_info *pi = rv770_get_pi(rdev);
struct ni_power_info *ni_pi = ni_get_pi(rdev);
if (ni_pi->enable_power_containment) {
NISLANDS_SMC_STATETABLE *smc_table = &ni_pi->smc_statetable;
u32 scaling_factor = ni_get_smc_power_scaling_factor(rdev);
u32 tdp_limit;
u32 near_tdp_limit;
u32 power_boost_limit;
int ret;
if (scaling_factor == 0)
return -EINVAL;
memset(smc_table, 0, sizeof(NISLANDS_SMC_STATETABLE));
ret = ni_calculate_adjusted_tdp_limits(rdev,
false, /* ??? */
rdev->pm.dpm.tdp_adjustment,
&tdp_limit,
&near_tdp_limit);
if (ret)
return ret;
power_boost_limit = ni_calculate_power_boost_limit(rdev, radeon_state,
near_tdp_limit);
smc_table->dpm2Params.TDPLimit =
cpu_to_be32(ni_scale_power_for_smc(tdp_limit, scaling_factor));
smc_table->dpm2Params.NearTDPLimit =
cpu_to_be32(ni_scale_power_for_smc(near_tdp_limit, scaling_factor));
smc_table->dpm2Params.SafePowerLimit =
cpu_to_be32(ni_scale_power_for_smc((near_tdp_limit * NISLANDS_DPM2_TDP_SAFE_LIMIT_PERCENT) / 100,
scaling_factor));
smc_table->dpm2Params.PowerBoostLimit =
cpu_to_be32(ni_scale_power_for_smc(power_boost_limit, scaling_factor));
ret = rv770_copy_bytes_to_smc(rdev,
(u16)(pi->state_table_start + offsetof(NISLANDS_SMC_STATETABLE, dpm2Params) +
offsetof(PP_NIslands_DPM2Parameters, TDPLimit)),
(u8 *)(&smc_table->dpm2Params.TDPLimit),
sizeof(u32) * 4, pi->sram_end);
if (ret)
return ret;
}
return 0;
}
int ni_copy_and_switch_arb_sets(struct radeon_device *rdev,
u32 arb_freq_src, u32 arb_freq_dest)
{
u32 mc_arb_dram_timing;
u32 mc_arb_dram_timing2;
u32 burst_time;
u32 mc_cg_config;
switch (arb_freq_src) {
case MC_CG_ARB_FREQ_F0:
mc_arb_dram_timing = RREG32(MC_ARB_DRAM_TIMING);
mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2);
burst_time = (RREG32(MC_ARB_BURST_TIME) & STATE0_MASK) >> STATE0_SHIFT;
break;
case MC_CG_ARB_FREQ_F1:
mc_arb_dram_timing = RREG32(MC_ARB_DRAM_TIMING_1);
mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2_1);
burst_time = (RREG32(MC_ARB_BURST_TIME) & STATE1_MASK) >> STATE1_SHIFT;
break;
case MC_CG_ARB_FREQ_F2:
mc_arb_dram_timing = RREG32(MC_ARB_DRAM_TIMING_2);
mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2_2);
burst_time = (RREG32(MC_ARB_BURST_TIME) & STATE2_MASK) >> STATE2_SHIFT;
break;
case MC_CG_ARB_FREQ_F3:
mc_arb_dram_timing = RREG32(MC_ARB_DRAM_TIMING_3);
mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2_3);
burst_time = (RREG32(MC_ARB_BURST_TIME) & STATE3_MASK) >> STATE3_SHIFT;
break;
default:
return -EINVAL;
}
switch (arb_freq_dest) {
case MC_CG_ARB_FREQ_F0:
WREG32(MC_ARB_DRAM_TIMING, mc_arb_dram_timing);
WREG32(MC_ARB_DRAM_TIMING2, mc_arb_dram_timing2);
WREG32_P(MC_ARB_BURST_TIME, STATE0(burst_time), ~STATE0_MASK);
break;
case MC_CG_ARB_FREQ_F1:
WREG32(MC_ARB_DRAM_TIMING_1, mc_arb_dram_timing);
WREG32(MC_ARB_DRAM_TIMING2_1, mc_arb_dram_timing2);
WREG32_P(MC_ARB_BURST_TIME, STATE1(burst_time), ~STATE1_MASK);
break;
case MC_CG_ARB_FREQ_F2:
WREG32(MC_ARB_DRAM_TIMING_2, mc_arb_dram_timing);
WREG32(MC_ARB_DRAM_TIMING2_2, mc_arb_dram_timing2);
WREG32_P(MC_ARB_BURST_TIME, STATE2(burst_time), ~STATE2_MASK);
break;
case MC_CG_ARB_FREQ_F3:
WREG32(MC_ARB_DRAM_TIMING_3, mc_arb_dram_timing);
WREG32(MC_ARB_DRAM_TIMING2_3, mc_arb_dram_timing2);
WREG32_P(MC_ARB_BURST_TIME, STATE3(burst_time), ~STATE3_MASK);
break;
default:
return -EINVAL;
}
mc_cg_config = RREG32(MC_CG_CONFIG) | 0x0000000F;
WREG32(MC_CG_CONFIG, mc_cg_config);
WREG32_P(MC_ARB_CG, CG_ARB_REQ(arb_freq_dest), ~CG_ARB_REQ_MASK);
return 0;
}
static int ni_init_arb_table_index(struct radeon_device *rdev)
{
struct rv7xx_power_info *pi = rv770_get_pi(rdev);
struct ni_power_info *ni_pi = ni_get_pi(rdev);
u32 tmp;
int ret;
ret = rv770_read_smc_sram_dword(rdev, ni_pi->arb_table_start,
&tmp, pi->sram_end);
if (ret)
return ret;
tmp &= 0x00FFFFFF;
tmp |= ((u32)MC_CG_ARB_FREQ_F1) << 24;
return rv770_write_smc_sram_dword(rdev, ni_pi->arb_table_start,
tmp, pi->sram_end);
}
static int ni_initial_switch_from_arb_f0_to_f1(struct radeon_device *rdev)
{
return ni_copy_and_switch_arb_sets(rdev, MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1);
}
static int ni_force_switch_to_arb_f0(struct radeon_device *rdev)
{
struct rv7xx_power_info *pi = rv770_get_pi(rdev);
struct ni_power_info *ni_pi = ni_get_pi(rdev);
u32 tmp;
int ret;
ret = rv770_read_smc_sram_dword(rdev, ni_pi->arb_table_start,
&tmp, pi->sram_end);
if (ret)
return ret;
tmp = (tmp >> 24) & 0xff;
if (tmp == MC_CG_ARB_FREQ_F0)
return 0;
return ni_copy_and_switch_arb_sets(rdev, tmp, MC_CG_ARB_FREQ_F0);
}
static int ni_populate_memory_timing_parameters(struct radeon_device *rdev,
struct rv7xx_pl *pl,
SMC_NIslands_MCArbDramTimingRegisterSet *arb_regs)
{
u32 dram_timing;
u32 dram_timing2;
arb_regs->mc_arb_rfsh_rate =
(u8)rv770_calculate_memory_refresh_rate(rdev, pl->sclk);
radeon_atom_set_engine_dram_timings(rdev,
pl->sclk,
pl->mclk);
dram_timing = RREG32(MC_ARB_DRAM_TIMING);
dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2);
arb_regs->mc_arb_dram_timing = cpu_to_be32(dram_timing);
arb_regs->mc_arb_dram_timing2 = cpu_to_be32(dram_timing2);
return 0;
}
static int ni_do_program_memory_timing_parameters(struct radeon_device *rdev,
struct radeon_ps *radeon_state,
unsigned int first_arb_set)
{
struct rv7xx_power_info *pi = rv770_get_pi(rdev);
struct ni_power_info *ni_pi = ni_get_pi(rdev);
struct ni_ps *state = ni_get_ps(radeon_state);
SMC_NIslands_MCArbDramTimingRegisterSet arb_regs = { 0 };
int i, ret = 0;
for (i = 0; i < state->performance_level_count; i++) {
ret = ni_populate_memory_timing_parameters(rdev, &state->performance_levels[i], &arb_regs);
if (ret)
break;
ret = rv770_copy_bytes_to_smc(rdev,
(u16)(ni_pi->arb_table_start +
offsetof(SMC_NIslands_MCArbDramTimingRegisters, data) +
sizeof(SMC_NIslands_MCArbDramTimingRegisterSet) * (first_arb_set + i)),
(u8 *)&arb_regs,
(u16)sizeof(SMC_NIslands_MCArbDramTimingRegisterSet),
pi->sram_end);
if (ret)
break;
}
return ret;
}
static int ni_program_memory_timing_parameters(struct radeon_device *rdev,
struct radeon_ps *radeon_new_state)
{
return ni_do_program_memory_timing_parameters(rdev, radeon_new_state,
NISLANDS_DRIVER_STATE_ARB_INDEX);
}
static void ni_populate_initial_mvdd_value(struct radeon_device *rdev,
struct NISLANDS_SMC_VOLTAGE_VALUE *voltage)
{
struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
voltage->index = eg_pi->mvdd_high_index;
voltage->value = cpu_to_be16(MVDD_HIGH_VALUE);
}
static int ni_populate_smc_initial_state(struct radeon_device *rdev,
struct radeon_ps *radeon_initial_state,
NISLANDS_SMC_STATETABLE *table)
{
struct ni_ps *initial_state = ni_get_ps(radeon_initial_state);
struct rv7xx_power_info *pi = rv770_get_pi(rdev);
struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
struct ni_power_info *ni_pi = ni_get_pi(rdev);
u32 reg;
int ret;
table->initialState.levels[0].mclk.vMPLL_AD_FUNC_CNTL =
cpu_to_be32(ni_pi->clock_registers.mpll_ad_func_cntl);
table->initialState.levels[0].mclk.vMPLL_AD_FUNC_CNTL_2 =
cpu_to_be32(ni_pi->clock_registers.mpll_ad_func_cntl_2);
table->initialState.levels[0].mclk.vMPLL_DQ_FUNC_CNTL =
cpu_to_be32(ni_pi->clock_registers.mpll_dq_func_cntl);
table->initialState.levels[0].mclk.vMPLL_DQ_FUNC_CNTL_2 =
cpu_to_be32(ni_pi->clock_registers.mpll_dq_func_cntl_2);
table->initialState.levels[0].mclk.vMCLK_PWRMGT_CNTL =
cpu_to_be32(ni_pi->clock_registers.mclk_pwrmgt_cntl);
table->initialState.levels[0].mclk.vDLL_CNTL =
cpu_to_be32(ni_pi->clock_registers.dll_cntl);
table->initialState.levels[0].mclk.vMPLL_SS =
cpu_to_be32(ni_pi->clock_registers.mpll_ss1);
table->initialState.levels[0].mclk.vMPLL_SS2 =
cpu_to_be32(ni_pi->clock_registers.mpll_ss2);
table->initialState.levels[0].mclk.mclk_value =
cpu_to_be32(initial_state->performance_levels[0].mclk);
table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL =
cpu_to_be32(ni_pi->clock_registers.cg_spll_func_cntl);
table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_2 =
cpu_to_be32(ni_pi->clock_registers.cg_spll_func_cntl_2);
table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_3 =
cpu_to_be32(ni_pi->clock_registers.cg_spll_func_cntl_3);
table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_4 =
cpu_to_be32(ni_pi->clock_registers.cg_spll_func_cntl_4);
table->initialState.levels[0].sclk.vCG_SPLL_SPREAD_SPECTRUM =
cpu_to_be32(ni_pi->clock_registers.cg_spll_spread_spectrum);
table->initialState.levels[0].sclk.vCG_SPLL_SPREAD_SPECTRUM_2 =
cpu_to_be32(ni_pi->clock_registers.cg_spll_spread_spectrum_2);
table->initialState.levels[0].sclk.sclk_value =
cpu_to_be32(initial_state->performance_levels[0].sclk);
table->initialState.levels[0].arbRefreshState =
NISLANDS_INITIAL_STATE_ARB_INDEX;
table->initialState.levels[0].ACIndex = 0;
ret = ni_populate_voltage_value(rdev, &eg_pi->vddc_voltage_table,
initial_state->performance_levels[0].vddc,
&table->initialState.levels[0].vddc);
if (!ret) {
u16 std_vddc;
ret = ni_get_std_voltage_value(rdev,
&table->initialState.levels[0].vddc,
&std_vddc);
if (!ret)
ni_populate_std_voltage_value(rdev, std_vddc,
table->initialState.levels[0].vddc.index,
&table->initialState.levels[0].std_vddc);
}
if (eg_pi->vddci_control)
ni_populate_voltage_value(rdev,
&eg_pi->vddci_voltage_table,
initial_state->performance_levels[0].vddci,
&table->initialState.levels[0].vddci);
ni_populate_initial_mvdd_value(rdev, &table->initialState.levels[0].mvdd);
reg = CG_R(0xffff) | CG_L(0);
table->initialState.levels[0].aT = cpu_to_be32(reg);
table->initialState.levels[0].bSP = cpu_to_be32(pi->dsp);
if (pi->boot_in_gen2)
table->initialState.levels[0].gen2PCIE = 1;
else
table->initialState.levels[0].gen2PCIE = 0;
if (pi->mem_gddr5) {
table->initialState.levels[0].strobeMode =
cypress_get_strobe_mode_settings(rdev,
initial_state->performance_levels[0].mclk);
if (initial_state->performance_levels[0].mclk > pi->mclk_edc_enable_threshold)
table->initialState.levels[0].mcFlags = NISLANDS_SMC_MC_EDC_RD_FLAG | NISLANDS_SMC_MC_EDC_WR_FLAG;
else
table->initialState.levels[0].mcFlags = 0;
}
table->initialState.levelCount = 1;
table->initialState.flags |= PPSMC_SWSTATE_FLAG_DC;
table->initialState.levels[0].dpm2.MaxPS = 0;
table->initialState.levels[0].dpm2.NearTDPDec = 0;
table->initialState.levels[0].dpm2.AboveSafeInc = 0;
table->initialState.levels[0].dpm2.BelowSafeInc = 0;
reg = MIN_POWER_MASK | MAX_POWER_MASK;
table->initialState.levels[0].SQPowerThrottle = cpu_to_be32(reg);
reg = MAX_POWER_DELTA_MASK | STI_SIZE_MASK | LTI_RATIO_MASK;
table->initialState.levels[0].SQPowerThrottle_2 = cpu_to_be32(reg);
return 0;
}
static int ni_populate_smc_acpi_state(struct radeon_device *rdev,
NISLANDS_SMC_STATETABLE *table)
{
struct rv7xx_power_info *pi = rv770_get_pi(rdev);
struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
struct ni_power_info *ni_pi = ni_get_pi(rdev);
u32 mpll_ad_func_cntl = ni_pi->clock_registers.mpll_ad_func_cntl;
u32 mpll_ad_func_cntl_2 = ni_pi->clock_registers.mpll_ad_func_cntl_2;
u32 mpll_dq_func_cntl = ni_pi->clock_registers.mpll_dq_func_cntl;
u32 mpll_dq_func_cntl_2 = ni_pi->clock_registers.mpll_dq_func_cntl_2;
u32 spll_func_cntl = ni_pi->clock_registers.cg_spll_func_cntl;
u32 spll_func_cntl_2 = ni_pi->clock_registers.cg_spll_func_cntl_2;
u32 spll_func_cntl_3 = ni_pi->clock_registers.cg_spll_func_cntl_3;
u32 spll_func_cntl_4 = ni_pi->clock_registers.cg_spll_func_cntl_4;
u32 mclk_pwrmgt_cntl = ni_pi->clock_registers.mclk_pwrmgt_cntl;
u32 dll_cntl = ni_pi->clock_registers.dll_cntl;
u32 reg;
int ret;
table->ACPIState = table->initialState;
table->ACPIState.flags &= ~PPSMC_SWSTATE_FLAG_DC;
if (pi->acpi_vddc) {
ret = ni_populate_voltage_value(rdev,
&eg_pi->vddc_voltage_table,
pi->acpi_vddc, &table->ACPIState.levels[0].vddc);
if (!ret) {
u16 std_vddc;
ret = ni_get_std_voltage_value(rdev,
&table->ACPIState.levels[0].vddc, &std_vddc);
if (!ret)
ni_populate_std_voltage_value(rdev, std_vddc,
table->ACPIState.levels[0].vddc.index,
&table->ACPIState.levels[0].std_vddc);
}
if (pi->pcie_gen2) {
if (pi->acpi_pcie_gen2)
table->ACPIState.levels[0].gen2PCIE = 1;
else
table->ACPIState.levels[0].gen2PCIE = 0;
} else {
table->ACPIState.levels[0].gen2PCIE = 0;
}
} else {
ret = ni_populate_voltage_value(rdev,
&eg_pi->vddc_voltage_table,
pi->min_vddc_in_table,
&table->ACPIState.levels[0].vddc);
if (!ret) {
u16 std_vddc;
ret = ni_get_std_voltage_value(rdev,
&table->ACPIState.levels[0].vddc,
&std_vddc);
if (!ret)
ni_populate_std_voltage_value(rdev, std_vddc,
table->ACPIState.levels[0].vddc.index,
&table->ACPIState.levels[0].std_vddc);
}
table->ACPIState.levels[0].gen2PCIE = 0;
}
if (eg_pi->acpi_vddci) {
if (eg_pi->vddci_control)
ni_populate_voltage_value(rdev,
&eg_pi->vddci_voltage_table,
eg_pi->acpi_vddci,
&table->ACPIState.levels[0].vddci);
}
mpll_ad_func_cntl &= ~PDNB;
mpll_ad_func_cntl_2 |= BIAS_GEN_PDNB | RESET_EN;
if (pi->mem_gddr5)
mpll_dq_func_cntl &= ~PDNB;
mpll_dq_func_cntl_2 |= BIAS_GEN_PDNB | RESET_EN | BYPASS;
mclk_pwrmgt_cntl |= (MRDCKA0_RESET |
MRDCKA1_RESET |
MRDCKB0_RESET |
MRDCKB1_RESET |
MRDCKC0_RESET |
MRDCKC1_RESET |
MRDCKD0_RESET |
MRDCKD1_RESET);
mclk_pwrmgt_cntl &= ~(MRDCKA0_PDNB |
MRDCKA1_PDNB |
MRDCKB0_PDNB |
MRDCKB1_PDNB |
MRDCKC0_PDNB |
MRDCKC1_PDNB |
MRDCKD0_PDNB |
MRDCKD1_PDNB);
dll_cntl |= (MRDCKA0_BYPASS |
MRDCKA1_BYPASS |
MRDCKB0_BYPASS |
MRDCKB1_BYPASS |
MRDCKC0_BYPASS |
MRDCKC1_BYPASS |
MRDCKD0_BYPASS |
MRDCKD1_BYPASS);
spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK;
spll_func_cntl_2 |= SCLK_MUX_SEL(4);
table->ACPIState.levels[0].mclk.vMPLL_AD_FUNC_CNTL = cpu_to_be32(mpll_ad_func_cntl);
table->ACPIState.levels[0].mclk.vMPLL_AD_FUNC_CNTL_2 = cpu_to_be32(mpll_ad_func_cntl_2);
table->ACPIState.levels[0].mclk.vMPLL_DQ_FUNC_CNTL = cpu_to_be32(mpll_dq_func_cntl);
table->ACPIState.levels[0].mclk.vMPLL_DQ_FUNC_CNTL_2 = cpu_to_be32(mpll_dq_func_cntl_2);
table->ACPIState.levels[0].mclk.vMCLK_PWRMGT_CNTL = cpu_to_be32(mclk_pwrmgt_cntl);
table->ACPIState.levels[0].mclk.vDLL_CNTL = cpu_to_be32(dll_cntl);
table->ACPIState.levels[0].mclk.mclk_value = 0;
table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL = cpu_to_be32(spll_func_cntl);
table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_2 = cpu_to_be32(spll_func_cntl_2);
table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_3 = cpu_to_be32(spll_func_cntl_3);
table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_4 = cpu_to_be32(spll_func_cntl_4);
table->ACPIState.levels[0].sclk.sclk_value = 0;
ni_populate_mvdd_value(rdev, 0, &table->ACPIState.levels[0].mvdd);
if (eg_pi->dynamic_ac_timing)
table->ACPIState.levels[0].ACIndex = 1;
table->ACPIState.levels[0].dpm2.MaxPS = 0;
table->ACPIState.levels[0].dpm2.NearTDPDec = 0;
table->ACPIState.levels[0].dpm2.AboveSafeInc = 0;
table->ACPIState.levels[0].dpm2.BelowSafeInc = 0;
reg = MIN_POWER_MASK | MAX_POWER_MASK;
table->ACPIState.levels[0].SQPowerThrottle = cpu_to_be32(reg);
reg = MAX_POWER_DELTA_MASK | STI_SIZE_MASK | LTI_RATIO_MASK;
table->ACPIState.levels[0].SQPowerThrottle_2 = cpu_to_be32(reg);
return 0;
}
static int ni_init_smc_table(struct radeon_device *rdev)
{
struct rv7xx_power_info *pi = rv770_get_pi(rdev);
struct ni_power_info *ni_pi = ni_get_pi(rdev);
int ret;
struct radeon_ps *radeon_boot_state = rdev->pm.dpm.boot_ps;
NISLANDS_SMC_STATETABLE *table = &ni_pi->smc_statetable;
memset(table, 0, sizeof(NISLANDS_SMC_STATETABLE));
ni_populate_smc_voltage_tables(rdev, table);
switch (rdev->pm.int_thermal_type) {
case THERMAL_TYPE_NI:
case THERMAL_TYPE_EMC2103_WITH_INTERNAL:
table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_INTERNAL;
break;
case THERMAL_TYPE_NONE:
table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_NONE;
break;
default:
table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_EXTERNAL;
break;
}
if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_HARDWAREDC)
table->systemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_REGULATOR_HOT)
table->systemFlags |= PPSMC_SYSTEMFLAG_REGULATOR_HOT;
if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_STEPVDDC)
table->systemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
if (pi->mem_gddr5)
table->systemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
ret = ni_populate_smc_initial_state(rdev, radeon_boot_state, table);
if (ret)
return ret;
ret = ni_populate_smc_acpi_state(rdev, table);
if (ret)
return ret;
table->driverState = table->initialState;
table->ULVState = table->initialState;
ret = ni_do_program_memory_timing_parameters(rdev, radeon_boot_state,
NISLANDS_INITIAL_STATE_ARB_INDEX);
if (ret)
return ret;
return rv770_copy_bytes_to_smc(rdev, pi->state_table_start, (u8 *)table,
sizeof(NISLANDS_SMC_STATETABLE), pi->sram_end);
}
static int ni_calculate_sclk_params(struct radeon_device *rdev,
u32 engine_clock,
NISLANDS_SMC_SCLK_VALUE *sclk)
{
struct rv7xx_power_info *pi = rv770_get_pi(rdev);
struct ni_power_info *ni_pi = ni_get_pi(rdev);
struct atom_clock_dividers dividers;
u32 spll_func_cntl = ni_pi->clock_registers.cg_spll_func_cntl;
u32 spll_func_cntl_2 = ni_pi->clock_registers.cg_spll_func_cntl_2;
u32 spll_func_cntl_3 = ni_pi->clock_registers.cg_spll_func_cntl_3;
u32 spll_func_cntl_4 = ni_pi->clock_registers.cg_spll_func_cntl_4;
u32 cg_spll_spread_spectrum = ni_pi->clock_registers.cg_spll_spread_spectrum;
u32 cg_spll_spread_spectrum_2 = ni_pi->clock_registers.cg_spll_spread_spectrum_2;
u64 tmp;
u32 reference_clock = rdev->clock.spll.reference_freq;
u32 reference_divider;
u32 fbdiv;
int ret;
ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
engine_clock, false, ÷rs);
if (ret)
return ret;
reference_divider = 1 + dividers.ref_div;
tmp = (u64) engine_clock * reference_divider * dividers.post_div * 16834;
do_div(tmp, reference_clock);
fbdiv = (u32) tmp;
spll_func_cntl &= ~(SPLL_PDIV_A_MASK | SPLL_REF_DIV_MASK);
spll_func_cntl |= SPLL_REF_DIV(dividers.ref_div);
spll_func_cntl |= SPLL_PDIV_A(dividers.post_div);
spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK;
spll_func_cntl_2 |= SCLK_MUX_SEL(2);
spll_func_cntl_3 &= ~SPLL_FB_DIV_MASK;
spll_func_cntl_3 |= SPLL_FB_DIV(fbdiv);
spll_func_cntl_3 |= SPLL_DITHEN;
if (pi->sclk_ss) {
struct radeon_atom_ss ss;
u32 vco_freq = engine_clock * dividers.post_div;
if (radeon_atombios_get_asic_ss_info(rdev, &ss,
ASIC_INTERNAL_ENGINE_SS, vco_freq)) {
u32 clk_s = reference_clock * 5 / (reference_divider * ss.rate);
u32 clk_v = 4 * ss.percentage * fbdiv / (clk_s * 10000);
cg_spll_spread_spectrum &= ~CLK_S_MASK;
cg_spll_spread_spectrum |= CLK_S(clk_s);
cg_spll_spread_spectrum |= SSEN;
cg_spll_spread_spectrum_2 &= ~CLK_V_MASK;
cg_spll_spread_spectrum_2 |= CLK_V(clk_v);
}
}
sclk->sclk_value = engine_clock;
sclk->vCG_SPLL_FUNC_CNTL = spll_func_cntl;
sclk->vCG_SPLL_FUNC_CNTL_2 = spll_func_cntl_2;
sclk->vCG_SPLL_FUNC_CNTL_3 = spll_func_cntl_3;
sclk->vCG_SPLL_FUNC_CNTL_4 = spll_func_cntl_4;
sclk->vCG_SPLL_SPREAD_SPECTRUM = cg_spll_spread_spectrum;
sclk->vCG_SPLL_SPREAD_SPECTRUM_2 = cg_spll_spread_spectrum_2;
return 0;
}
static int ni_populate_sclk_value(struct radeon_device *rdev,
u32 engine_clock,
NISLANDS_SMC_SCLK_VALUE *sclk)
{
NISLANDS_SMC_SCLK_VALUE sclk_tmp;
int ret;
ret = ni_calculate_sclk_params(rdev, engine_clock, &sclk_tmp);
if (!ret) {
sclk->sclk_value = cpu_to_be32(sclk_tmp.sclk_value);
sclk->vCG_SPLL_FUNC_CNTL = cpu_to_be32(sclk_tmp.vCG_SPLL_FUNC_CNTL);
sclk->vCG_SPLL_FUNC_CNTL_2 = cpu_to_be32(sclk_tmp.vCG_SPLL_FUNC_CNTL_2);
sclk->vCG_SPLL_FUNC_CNTL_3 = cpu_to_be32(sclk_tmp.vCG_SPLL_FUNC_CNTL_3);
sclk->vCG_SPLL_FUNC_CNTL_4 = cpu_to_be32(sclk_tmp.vCG_SPLL_FUNC_CNTL_4);
sclk->vCG_SPLL_SPREAD_SPECTRUM = cpu_to_be32(sclk_tmp.vCG_SPLL_SPREAD_SPECTRUM);
sclk->vCG_SPLL_SPREAD_SPECTRUM_2 = cpu_to_be32(sclk_tmp.vCG_SPLL_SPREAD_SPECTRUM_2);
}
return ret;
}
static int ni_init_smc_spll_table(struct radeon_device *rdev)
{
struct rv7xx_power_info *pi = rv770_get_pi(rdev);
struct ni_power_info *ni_pi = ni_get_pi(rdev);
SMC_NISLANDS_SPLL_DIV_TABLE *spll_table;
NISLANDS_SMC_SCLK_VALUE sclk_params;
u32 fb_div;
u32 p_div;
u32 clk_s;
u32 clk_v;
u32 sclk = 0;
int i, ret;
u32 tmp;
if (ni_pi->spll_table_start == 0)
return -EINVAL;
spll_table = kzalloc(sizeof(SMC_NISLANDS_SPLL_DIV_TABLE), GFP_KERNEL);
if (spll_table == NULL)
return -ENOMEM;
for (i = 0; i < 256; i++) {
ret = ni_calculate_sclk_params(rdev, sclk, &sclk_params);
if (ret)
break;
p_div = (sclk_params.vCG_SPLL_FUNC_CNTL & SPLL_PDIV_A_MASK) >> SPLL_PDIV_A_SHIFT;
fb_div = (sclk_params.vCG_SPLL_FUNC_CNTL_3 & SPLL_FB_DIV_MASK) >> SPLL_FB_DIV_SHIFT;
clk_s = (sclk_params.vCG_SPLL_SPREAD_SPECTRUM & CLK_S_MASK) >> CLK_S_SHIFT;
clk_v = (sclk_params.vCG_SPLL_SPREAD_SPECTRUM_2 & CLK_V_MASK) >> CLK_V_SHIFT;
fb_div &= ~0x00001FFF;
fb_div >>= 1;
clk_v >>= 6;
if (p_div & ~(SMC_NISLANDS_SPLL_DIV_TABLE_PDIV_MASK >> SMC_NISLANDS_SPLL_DIV_TABLE_PDIV_SHIFT))
ret = -EINVAL;
if (clk_s & ~(SMC_NISLANDS_SPLL_DIV_TABLE_CLKS_MASK >> SMC_NISLANDS_SPLL_DIV_TABLE_CLKS_SHIFT))
ret = -EINVAL;
if (clk_s & ~(SMC_NISLANDS_SPLL_DIV_TABLE_CLKS_MASK >> SMC_NISLANDS_SPLL_DIV_TABLE_CLKS_SHIFT))
ret = -EINVAL;
if (clk_v & ~(SMC_NISLANDS_SPLL_DIV_TABLE_CLKV_MASK >> SMC_NISLANDS_SPLL_DIV_TABLE_CLKV_SHIFT))
ret = -EINVAL;
if (ret)
break;
tmp = ((fb_div << SMC_NISLANDS_SPLL_DIV_TABLE_FBDIV_SHIFT) & SMC_NISLANDS_SPLL_DIV_TABLE_FBDIV_MASK) |
((p_div << SMC_NISLANDS_SPLL_DIV_TABLE_PDIV_SHIFT) & SMC_NISLANDS_SPLL_DIV_TABLE_PDIV_MASK);
spll_table->freq[i] = cpu_to_be32(tmp);
tmp = ((clk_v << SMC_NISLANDS_SPLL_DIV_TABLE_CLKV_SHIFT) & SMC_NISLANDS_SPLL_DIV_TABLE_CLKV_MASK) |
((clk_s << SMC_NISLANDS_SPLL_DIV_TABLE_CLKS_SHIFT) & SMC_NISLANDS_SPLL_DIV_TABLE_CLKS_MASK);
spll_table->ss[i] = cpu_to_be32(tmp);
sclk += 512;
}
if (!ret)
ret = rv770_copy_bytes_to_smc(rdev, ni_pi->spll_table_start, (u8 *)spll_table,
sizeof(SMC_NISLANDS_SPLL_DIV_TABLE), pi->sram_end);
kfree(spll_table);
return ret;
}
static int ni_populate_mclk_value(struct radeon_device *rdev,
u32 engine_clock,
u32 memory_clock,
NISLANDS_SMC_MCLK_VALUE *mclk,
bool strobe_mode,
bool dll_state_on)
{
struct rv7xx_power_info *pi = rv770_get_pi(rdev);
struct ni_power_info *ni_pi = ni_get_pi(rdev);
u32 mpll_ad_func_cntl = ni_pi->clock_registers.mpll_ad_func_cntl;
u32 mpll_ad_func_cntl_2 = ni_pi->clock_registers.mpll_ad_func_cntl_2;
u32 mpll_dq_func_cntl = ni_pi->clock_registers.mpll_dq_func_cntl;
u32 mpll_dq_func_cntl_2 = ni_pi->clock_registers.mpll_dq_func_cntl_2;
u32 mclk_pwrmgt_cntl = ni_pi->clock_registers.mclk_pwrmgt_cntl;
u32 dll_cntl = ni_pi->clock_registers.dll_cntl;
u32 mpll_ss1 = ni_pi->clock_registers.mpll_ss1;
u32 mpll_ss2 = ni_pi->clock_registers.mpll_ss2;
struct atom_clock_dividers dividers;
u32 ibias;
u32 dll_speed;
int ret;
u32 mc_seq_misc7;
ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_MEMORY_PLL_PARAM,
memory_clock, strobe_mode, ÷rs);
if (ret)
return ret;
if (!strobe_mode) {
mc_seq_misc7 = RREG32(MC_SEQ_MISC7);
if (mc_seq_misc7 & 0x8000000)
dividers.post_div = 1;
}
ibias = cypress_map_clkf_to_ibias(rdev, dividers.whole_fb_div);
mpll_ad_func_cntl &= ~(CLKR_MASK |
YCLK_POST_DIV_MASK |
CLKF_MASK |
CLKFRAC_MASK |
IBIAS_MASK);
mpll_ad_func_cntl |= CLKR(dividers.ref_div);
mpll_ad_func_cntl |= YCLK_POST_DIV(dividers.post_div);
mpll_ad_func_cntl |= CLKF(dividers.whole_fb_div);
mpll_ad_func_cntl |= CLKFRAC(dividers.frac_fb_div);
mpll_ad_func_cntl |= IBIAS(ibias);
if (dividers.vco_mode)
mpll_ad_func_cntl_2 |= VCO_MODE;
else
mpll_ad_func_cntl_2 &= ~VCO_MODE;
if (pi->mem_gddr5) {
mpll_dq_func_cntl &= ~(CLKR_MASK |
YCLK_POST_DIV_MASK |
CLKF_MASK |
CLKFRAC_MASK |
IBIAS_MASK);
mpll_dq_func_cntl |= CLKR(dividers.ref_div);
mpll_dq_func_cntl |= YCLK_POST_DIV(dividers.post_div);
mpll_dq_func_cntl |= CLKF(dividers.whole_fb_div);
mpll_dq_func_cntl |= CLKFRAC(dividers.frac_fb_div);
mpll_dq_func_cntl |= IBIAS(ibias);
if (strobe_mode)
mpll_dq_func_cntl &= ~PDNB;
else
mpll_dq_func_cntl |= PDNB;
if (dividers.vco_mode)
mpll_dq_func_cntl_2 |= VCO_MODE;
else
mpll_dq_func_cntl_2 &= ~VCO_MODE;
}
if (pi->mclk_ss) {
struct radeon_atom_ss ss;
u32 vco_freq = memory_clock * dividers.post_div;
if (radeon_atombios_get_asic_ss_info(rdev, &ss,
ASIC_INTERNAL_MEMORY_SS, vco_freq)) {
u32 reference_clock = rdev->clock.mpll.reference_freq;
u32 decoded_ref = rv740_get_decoded_reference_divider(dividers.ref_div);
u32 clk_s = reference_clock * 5 / (decoded_ref * ss.rate);
u32 clk_v = ss.percentage *
(0x4000 * dividers.whole_fb_div + 0x800 * dividers.frac_fb_div) / (clk_s * 625);
mpll_ss1 &= ~CLKV_MASK;
mpll_ss1 |= CLKV(clk_v);
mpll_ss2 &= ~CLKS_MASK;
mpll_ss2 |= CLKS(clk_s);
}
}
dll_speed = rv740_get_dll_speed(pi->mem_gddr5,
memory_clock);
mclk_pwrmgt_cntl &= ~DLL_SPEED_MASK;
mclk_pwrmgt_cntl |= DLL_SPEED(dll_speed);
if (dll_state_on)
mclk_pwrmgt_cntl |= (MRDCKA0_PDNB |
MRDCKA1_PDNB |
MRDCKB0_PDNB |
MRDCKB1_PDNB |
MRDCKC0_PDNB |
MRDCKC1_PDNB |
MRDCKD0_PDNB |
MRDCKD1_PDNB);
else
mclk_pwrmgt_cntl &= ~(MRDCKA0_PDNB |
MRDCKA1_PDNB |
MRDCKB0_PDNB |
MRDCKB1_PDNB |
MRDCKC0_PDNB |
MRDCKC1_PDNB |
MRDCKD0_PDNB |
MRDCKD1_PDNB);
mclk->mclk_value = cpu_to_be32(memory_clock);
mclk->vMPLL_AD_FUNC_CNTL = cpu_to_be32(mpll_ad_func_cntl);
mclk->vMPLL_AD_FUNC_CNTL_2 = cpu_to_be32(mpll_ad_func_cntl_2);
mclk->vMPLL_DQ_FUNC_CNTL = cpu_to_be32(mpll_dq_func_cntl);
mclk->vMPLL_DQ_FUNC_CNTL_2 = cpu_to_be32(mpll_dq_func_cntl_2);
mclk->vMCLK_PWRMGT_CNTL = cpu_to_be32(mclk_pwrmgt_cntl);
mclk->vDLL_CNTL = cpu_to_be32(dll_cntl);
mclk->vMPLL_SS = cpu_to_be32(mpll_ss1);
mclk->vMPLL_SS2 = cpu_to_be32(mpll_ss2);
return 0;
}
static void ni_populate_smc_sp(struct radeon_device *rdev,
struct radeon_ps *radeon_state,
NISLANDS_SMC_SWSTATE *smc_state)
{
struct ni_ps *ps = ni_get_ps(radeon_state);
struct rv7xx_power_info *pi = rv770_get_pi(rdev);
int i;
for (i = 0; i < ps->performance_level_count - 1; i++)
smc_state->levels[i].bSP = cpu_to_be32(pi->dsp);
smc_state->levels[ps->performance_level_count - 1].bSP =
cpu_to_be32(pi->psp);
}
static int ni_convert_power_level_to_smc(struct radeon_device *rdev,
struct rv7xx_pl *pl,
NISLANDS_SMC_HW_PERFORMANCE_LEVEL *level)
{
struct rv7xx_power_info *pi = rv770_get_pi(rdev);
struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
struct ni_power_info *ni_pi = ni_get_pi(rdev);
int ret;
bool dll_state_on;
u16 std_vddc;
u32 tmp = RREG32(DC_STUTTER_CNTL);
level->gen2PCIE = pi->pcie_gen2 ?
((pl->flags & ATOM_PPLIB_R600_FLAGS_PCIEGEN2) ? 1 : 0) : 0;
ret = ni_populate_sclk_value(rdev, pl->sclk, &level->sclk);
if (ret)
return ret;
level->mcFlags = 0;
if (pi->mclk_stutter_mode_threshold &&
(pl->mclk <= pi->mclk_stutter_mode_threshold) &&
!eg_pi->uvd_enabled &&
(tmp & DC_STUTTER_ENABLE_A) &&
(tmp & DC_STUTTER_ENABLE_B))
level->mcFlags |= NISLANDS_SMC_MC_STUTTER_EN;
if (pi->mem_gddr5) {
if (pl->mclk > pi->mclk_edc_enable_threshold)
level->mcFlags |= NISLANDS_SMC_MC_EDC_RD_FLAG;
if (pl->mclk > eg_pi->mclk_edc_wr_enable_threshold)
level->mcFlags |= NISLANDS_SMC_MC_EDC_WR_FLAG;
level->strobeMode = cypress_get_strobe_mode_settings(rdev, pl->mclk);
if (level->strobeMode & NISLANDS_SMC_STROBE_ENABLE) {
if (cypress_get_mclk_frequency_ratio(rdev, pl->mclk, true) >=
((RREG32(MC_SEQ_MISC7) >> 16) & 0xf))
dll_state_on = ((RREG32(MC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
else
dll_state_on = ((RREG32(MC_SEQ_MISC6) >> 1) & 0x1) ? true : false;
} else {
dll_state_on = false;
if (pl->mclk > ni_pi->mclk_rtt_mode_threshold)
level->mcFlags |= NISLANDS_SMC_MC_RTT_ENABLE;
}
ret = ni_populate_mclk_value(rdev, pl->sclk, pl->mclk,
&level->mclk,
(level->strobeMode & NISLANDS_SMC_STROBE_ENABLE) != 0,
dll_state_on);
} else
ret = ni_populate_mclk_value(rdev, pl->sclk, pl->mclk, &level->mclk, 1, 1);
if (ret)
return ret;
ret = ni_populate_voltage_value(rdev, &eg_pi->vddc_voltage_table,
pl->vddc, &level->vddc);
if (ret)
return ret;
ret = ni_get_std_voltage_value(rdev, &level->vddc, &std_vddc);
if (ret)
return ret;
ni_populate_std_voltage_value(rdev, std_vddc,
level->vddc.index, &level->std_vddc);
if (eg_pi->vddci_control) {
ret = ni_populate_voltage_value(rdev, &eg_pi->vddci_voltage_table,
pl->vddci, &level->vddci);
if (ret)
return ret;
}
ni_populate_mvdd_value(rdev, pl->mclk, &level->mvdd);
return ret;
}
static int ni_populate_smc_t(struct radeon_device *rdev,
struct radeon_ps *radeon_state,
NISLANDS_SMC_SWSTATE *smc_state)
{
struct rv7xx_power_info *pi = rv770_get_pi(rdev);
struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
struct ni_ps *state = ni_get_ps(radeon_state);
u32 a_t;
u32 t_l, t_h;
u32 high_bsp;
int i, ret;
if (state->performance_level_count >= 9)
return -EINVAL;
if (state->performance_level_count < 2) {
a_t = CG_R(0xffff) | CG_L(0);
smc_state->levels[0].aT = cpu_to_be32(a_t);
return 0;
}
smc_state->levels[0].aT = cpu_to_be32(0);
for (i = 0; i <= state->performance_level_count - 2; i++) {
if (eg_pi->uvd_enabled)
ret = r600_calculate_at(
1000 * (i * (eg_pi->smu_uvd_hs ? 2 : 8) + 2),
100 * R600_AH_DFLT,
state->performance_levels[i + 1].sclk,
state->performance_levels[i].sclk,
&t_l,
&t_h);
else
ret = r600_calculate_at(
1000 * (i + 1),
100 * R600_AH_DFLT,
state->performance_levels[i + 1].sclk,
state->performance_levels[i].sclk,
&t_l,
&t_h);
if (ret) {
t_h = (i + 1) * 1000 - 50 * R600_AH_DFLT;
t_l = (i + 1) * 1000 + 50 * R600_AH_DFLT;
}
a_t = be32_to_cpu(smc_state->levels[i].aT) & ~CG_R_MASK;
a_t |= CG_R(t_l * pi->bsp / 20000);
smc_state->levels[i].aT = cpu_to_be32(a_t);
high_bsp = (i == state->performance_level_count - 2) ?
pi->pbsp : pi->bsp;
a_t = CG_R(0xffff) | CG_L(t_h * high_bsp / 20000);
smc_state->levels[i + 1].aT = cpu_to_be32(a_t);
}
return 0;
}
static int ni_populate_power_containment_values(struct radeon_device *rdev,
struct radeon_ps *radeon_state,
NISLANDS_SMC_SWSTATE *smc_state)
{
struct rv7xx_power_info *pi = rv770_get_pi(rdev);
struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
struct ni_power_info *ni_pi = ni_get_pi(rdev);
struct ni_ps *state = ni_get_ps(radeon_state);
u32 prev_sclk;
u32 max_sclk;
u32 min_sclk;
int i, ret;
u32 tdp_limit;
u32 near_tdp_limit;
u32 power_boost_limit;
u8 max_ps_percent;
if (ni_pi->enable_power_containment == false)
return 0;
if (state->performance_level_count == 0)
return -EINVAL;
if (smc_state->levelCount != state->performance_level_count)
return -EINVAL;
ret = ni_calculate_adjusted_tdp_limits(rdev,
false, /* ??? */
rdev->pm.dpm.tdp_adjustment,
&tdp_limit,
&near_tdp_limit);
if (ret)
return ret;
power_boost_limit = ni_calculate_power_boost_limit(rdev, radeon_state, near_tdp_limit);
ret = rv770_write_smc_sram_dword(rdev,
pi->state_table_start +
offsetof(NISLANDS_SMC_STATETABLE, dpm2Params) +
offsetof(PP_NIslands_DPM2Parameters, PowerBoostLimit),
ni_scale_power_for_smc(power_boost_limit, ni_get_smc_power_scaling_factor(rdev)),
pi->sram_end);
if (ret)
power_boost_limit = 0;
smc_state->levels[0].dpm2.MaxPS = 0;
smc_state->levels[0].dpm2.NearTDPDec = 0;
smc_state->levels[0].dpm2.AboveSafeInc = 0;
smc_state->levels[0].dpm2.BelowSafeInc = 0;
smc_state->levels[0].stateFlags |= power_boost_limit ? PPSMC_STATEFLAG_POWERBOOST : 0;
for (i = 1; i < state->performance_level_count; i++) {
prev_sclk = state->performance_levels[i-1].sclk;
max_sclk = state->performance_levels[i].sclk;
max_ps_percent = (i != (state->performance_level_count - 1)) ?
NISLANDS_DPM2_MAXPS_PERCENT_M : NISLANDS_DPM2_MAXPS_PERCENT_H;
if (max_sclk < prev_sclk)
return -EINVAL;
if ((max_ps_percent == 0) || (prev_sclk == max_sclk) || eg_pi->uvd_enabled)
min_sclk = max_sclk;
else if (1 == i)
min_sclk = prev_sclk;
else
min_sclk = (prev_sclk * (u32)max_ps_percent) / 100;
if (min_sclk < state->performance_levels[0].sclk)
min_sclk = state->performance_levels[0].sclk;
if (min_sclk == 0)
return -EINVAL;
smc_state->levels[i].dpm2.MaxPS =
(u8)((NISLANDS_DPM2_MAX_PULSE_SKIP * (max_sclk - min_sclk)) / max_sclk);
smc_state->levels[i].dpm2.NearTDPDec = NISLANDS_DPM2_NEAR_TDP_DEC;
smc_state->levels[i].dpm2.AboveSafeInc = NISLANDS_DPM2_ABOVE_SAFE_INC;
smc_state->levels[i].dpm2.BelowSafeInc = NISLANDS_DPM2_BELOW_SAFE_INC;
smc_state->levels[i].stateFlags |=
((i != (state->performance_level_count - 1)) && power_boost_limit) ?
PPSMC_STATEFLAG_POWERBOOST : 0;
}
return 0;
}
static int ni_populate_sq_ramping_values(struct radeon_device *rdev,
struct radeon_ps *radeon_state,
NISLANDS_SMC_SWSTATE *smc_state)
{
struct ni_power_info *ni_pi = ni_get_pi(rdev);
struct ni_ps *state = ni_get_ps(radeon_state);
u32 sq_power_throttle;
u32 sq_power_throttle2;
bool enable_sq_ramping = ni_pi->enable_sq_ramping;
int i;
if (state->performance_level_count == 0)
return -EINVAL;
if (smc_state->levelCount != state->performance_level_count)
return -EINVAL;
if (rdev->pm.dpm.sq_ramping_threshold == 0)
return -EINVAL;
if (NISLANDS_DPM2_SQ_RAMP_MAX_POWER > (MAX_POWER_MASK >> MAX_POWER_SHIFT))
enable_sq_ramping = false;
if (NISLANDS_DPM2_SQ_RAMP_MIN_POWER > (MIN_POWER_MASK >> MIN_POWER_SHIFT))
enable_sq_ramping = false;
if (NISLANDS_DPM2_SQ_RAMP_MAX_POWER_DELTA > (MAX_POWER_DELTA_MASK >> MAX_POWER_DELTA_SHIFT))
enable_sq_ramping = false;
if (NISLANDS_DPM2_SQ_RAMP_STI_SIZE > (STI_SIZE_MASK >> STI_SIZE_SHIFT))
enable_sq_ramping = false;
if (NISLANDS_DPM2_SQ_RAMP_LTI_RATIO > (LTI_RATIO_MASK >> LTI_RATIO_SHIFT))
enable_sq_ramping = false;
for (i = 0; i < state->performance_level_count; i++) {
sq_power_throttle = 0;
sq_power_throttle2 = 0;
if ((state->performance_levels[i].sclk >= rdev->pm.dpm.sq_ramping_threshold) &&
enable_sq_ramping) {
sq_power_throttle |= MAX_POWER(NISLANDS_DPM2_SQ_RAMP_MAX_POWER);
sq_power_throttle |= MIN_POWER(NISLANDS_DPM2_SQ_RAMP_MIN_POWER);
sq_power_throttle2 |= MAX_POWER_DELTA(NISLANDS_DPM2_SQ_RAMP_MAX_POWER_DELTA);
sq_power_throttle2 |= STI_SIZE(NISLANDS_DPM2_SQ_RAMP_STI_SIZE);
sq_power_throttle2 |= LTI_RATIO(NISLANDS_DPM2_SQ_RAMP_LTI_RATIO);
} else {
sq_power_throttle |= MAX_POWER_MASK | MIN_POWER_MASK;
sq_power_throttle2 |= MAX_POWER_DELTA_MASK | STI_SIZE_MASK | LTI_RATIO_MASK;
}
smc_state->levels[i].SQPowerThrottle = cpu_to_be32(sq_power_throttle);
smc_state->levels[i].SQPowerThrottle_2 = cpu_to_be32(sq_power_throttle2);
}
return 0;
}
static int ni_enable_power_containment(struct radeon_device *rdev,
struct radeon_ps *radeon_new_state,
bool enable)
{
struct ni_power_info *ni_pi = ni_get_pi(rdev);
PPSMC_Result smc_result;
int ret = 0;
if (ni_pi->enable_power_containment) {
if (enable) {
if (!r600_is_uvd_state(radeon_new_state->class, radeon_new_state->class2)) {
smc_result = rv770_send_msg_to_smc(rdev, PPSMC_TDPClampingActive);
if (smc_result != PPSMC_Result_OK) {
ret = -EINVAL;
ni_pi->pc_enabled = false;
} else {
ni_pi->pc_enabled = true;
}
}
} else {
smc_result = rv770_send_msg_to_smc(rdev, PPSMC_TDPClampingInactive);
if (smc_result != PPSMC_Result_OK)
ret = -EINVAL;
ni_pi->pc_enabled = false;
}
}
return ret;
}
static int ni_convert_power_state_to_smc(struct radeon_device *rdev,
struct radeon_ps *radeon_state,
NISLANDS_SMC_SWSTATE *smc_state)
{
struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
struct ni_power_info *ni_pi = ni_get_pi(rdev);
struct ni_ps *state = ni_get_ps(radeon_state);
int i, ret;
u32 threshold = state->performance_levels[state->performance_level_count - 1].sclk * 100 / 100;
if (!(radeon_state->caps & ATOM_PPLIB_DISALLOW_ON_DC))
smc_state->flags |= PPSMC_SWSTATE_FLAG_DC;
smc_state->levelCount = 0;
if (state->performance_level_count > NISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE)
return -EINVAL;
for (i = 0; i < state->performance_level_count; i++) {
ret = ni_convert_power_level_to_smc(rdev, &state->performance_levels[i],
&smc_state->levels[i]);
smc_state->levels[i].arbRefreshState =
(u8)(NISLANDS_DRIVER_STATE_ARB_INDEX + i);
if (ret)
return ret;
if (ni_pi->enable_power_containment)
smc_state->levels[i].displayWatermark =
(state->performance_levels[i].sclk < threshold) ?
PPSMC_DISPLAY_WATERMARK_LOW : PPSMC_DISPLAY_WATERMARK_HIGH;
else
smc_state->levels[i].displayWatermark = (i < 2) ?
PPSMC_DISPLAY_WATERMARK_LOW : PPSMC_DISPLAY_WATERMARK_HIGH;
if (eg_pi->dynamic_ac_timing)
smc_state->levels[i].ACIndex = NISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT + i;
else
smc_state->levels[i].ACIndex = 0;
smc_state->levelCount++;
}
rv770_write_smc_soft_register(rdev, NI_SMC_SOFT_REGISTER_watermark_threshold,
cpu_to_be32(threshold / 512));
ni_populate_smc_sp(rdev, radeon_state, smc_state);
ret = ni_populate_power_containment_values(rdev, radeon_state, smc_state);
if (ret)
ni_pi->enable_power_containment = false;
ret = ni_populate_sq_ramping_values(rdev, radeon_state, smc_state);
if (ret)
ni_pi->enable_sq_ramping = false;
return ni_populate_smc_t(rdev, radeon_state, smc_state);
}
static int ni_upload_sw_state(struct radeon_device *rdev,
struct radeon_ps *radeon_new_state)
{
struct rv7xx_power_info *pi = rv770_get_pi(rdev);
u16 address = pi->state_table_start +
offsetof(NISLANDS_SMC_STATETABLE, driverState);
u16 state_size = sizeof(NISLANDS_SMC_SWSTATE) +
((NISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE - 1) * sizeof(NISLANDS_SMC_HW_PERFORMANCE_LEVEL));
int ret;
NISLANDS_SMC_SWSTATE *smc_state = kzalloc(state_size, GFP_KERNEL);
if (smc_state == NULL)
return -ENOMEM;
ret = ni_convert_power_state_to_smc(rdev, radeon_new_state, smc_state);
if (ret)
goto done;
ret = rv770_copy_bytes_to_smc(rdev, address, (u8 *)smc_state, state_size, pi->sram_end);
done:
kfree(smc_state);
return ret;
}
static int ni_set_mc_special_registers(struct radeon_device *rdev,
struct ni_mc_reg_table *table)
{
struct rv7xx_power_info *pi = rv770_get_pi(rdev);
u8 i, j, k;
u32 temp_reg;
for (i = 0, j = table->last; i < table->last; i++) {
switch (table->mc_reg_address[i].s1) {
case MC_SEQ_MISC1 >> 2:
if (j >= SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE)
return -EINVAL;
temp_reg = RREG32(MC_PMG_CMD_EMRS);
table->mc_reg_address[j].s1 = MC_PMG_CMD_EMRS >> 2;
table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_EMRS_LP >> 2;
for (k = 0; k < table->num_entries; k++)
table->mc_reg_table_entry[k].mc_data[j] =
((temp_reg & 0xffff0000)) |
((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
j++;
if (j >= SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE)
return -EINVAL;
temp_reg = RREG32(MC_PMG_CMD_MRS);
table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS >> 2;
table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS_LP >> 2;
for(k = 0; k < table->num_entries; k++) {
table->mc_reg_table_entry[k].mc_data[j] =
(temp_reg & 0xffff0000) |
(table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
if (!pi->mem_gddr5)
table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
}
j++;
if (j > SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE)
return -EINVAL;
break;
case MC_SEQ_RESERVE_M >> 2:
temp_reg = RREG32(MC_PMG_CMD_MRS1);
table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS1 >> 2;
table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS1_LP >> 2;
for (k = 0; k < table->num_entries; k++)
table->mc_reg_table_entry[k].mc_data[j] =
(temp_reg & 0xffff0000) |
(table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
j++;
if (j > SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE)
return -EINVAL;
break;
default:
break;
}
}
table->last = j;
return 0;
}
static bool ni_check_s0_mc_reg_index(u16 in_reg, u16 *out_reg)
{
bool result = true;
switch (in_reg) {
case MC_SEQ_RAS_TIMING >> 2:
*out_reg = MC_SEQ_RAS_TIMING_LP >> 2;
break;
case MC_SEQ_CAS_TIMING >> 2:
*out_reg = MC_SEQ_CAS_TIMING_LP >> 2;
break;
case MC_SEQ_MISC_TIMING >> 2:
*out_reg = MC_SEQ_MISC_TIMING_LP >> 2;
break;
case MC_SEQ_MISC_TIMING2 >> 2:
*out_reg = MC_SEQ_MISC_TIMING2_LP >> 2;
break;
case MC_SEQ_RD_CTL_D0 >> 2:
*out_reg = MC_SEQ_RD_CTL_D0_LP >> 2;
break;
case MC_SEQ_RD_CTL_D1 >> 2:
*out_reg = MC_SEQ_RD_CTL_D1_LP >> 2;
break;
case MC_SEQ_WR_CTL_D0 >> 2:
*out_reg = MC_SEQ_WR_CTL_D0_LP >> 2;
break;
case MC_SEQ_WR_CTL_D1 >> 2:
*out_reg = MC_SEQ_WR_CTL_D1_LP >> 2;
break;
case MC_PMG_CMD_EMRS >> 2:
*out_reg = MC_SEQ_PMG_CMD_EMRS_LP >> 2;
break;
case MC_PMG_CMD_MRS >> 2:
*out_reg = MC_SEQ_PMG_CMD_MRS_LP >> 2;
break;
case MC_PMG_CMD_MRS1 >> 2:
*out_reg = MC_SEQ_PMG_CMD_MRS1_LP >> 2;
break;
case MC_SEQ_PMG_TIMING >> 2:
*out_reg = MC_SEQ_PMG_TIMING_LP >> 2;
break;
case MC_PMG_CMD_MRS2 >> 2:
*out_reg = MC_SEQ_PMG_CMD_MRS2_LP >> 2;
break;
default:
result = false;
break;
}
return result;
}
static void ni_set_valid_flag(struct ni_mc_reg_table *table)
{
u8 i, j;
for (i = 0; i < table->last; i++) {
for (j = 1; j < table->num_entries; j++) {
if (table->mc_reg_table_entry[j-1].mc_data[i] != table->mc_reg_table_entry[j].mc_data[i]) {
table->valid_flag |= 1 << i;
break;
}
}
}
}
static void ni_set_s0_mc_reg_index(struct ni_mc_reg_table *table)
{
u32 i;
u16 address;
for (i = 0; i < table->last; i++)
table->mc_reg_address[i].s0 =
ni_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address) ?
address : table->mc_reg_address[i].s1;
}
static int ni_copy_vbios_mc_reg_table(struct atom_mc_reg_table *table,
struct ni_mc_reg_table *ni_table)
{
u8 i, j;
if (table->last > SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE)
return -EINVAL;
if (table->num_entries > MAX_AC_TIMING_ENTRIES)
return -EINVAL;
for (i = 0; i < table->last; i++)
ni_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
ni_table->last = table->last;
for (i = 0; i < table->num_entries; i++) {
ni_table->mc_reg_table_entry[i].mclk_max =
table->mc_reg_table_entry[i].mclk_max;
for (j = 0; j < table->last; j++)
ni_table->mc_reg_table_entry[i].mc_data[j] =
table->mc_reg_table_entry[i].mc_data[j];
}
ni_table->num_entries = table->num_entries;
return 0;
}
static int ni_initialize_mc_reg_table(struct radeon_device *rdev)
{
struct ni_power_info *ni_pi = ni_get_pi(rdev);
int ret;
struct atom_mc_reg_table *table;
struct ni_mc_reg_table *ni_table = &ni_pi->mc_reg_table;
u8 module_index = rv770_get_memory_module_index(rdev);
table = kzalloc(sizeof(struct atom_mc_reg_table), GFP_KERNEL);
if (!table)
return -ENOMEM;
WREG32(MC_SEQ_RAS_TIMING_LP, RREG32(MC_SEQ_RAS_TIMING));
WREG32(MC_SEQ_CAS_TIMING_LP, RREG32(MC_SEQ_CAS_TIMING));
WREG32(MC_SEQ_MISC_TIMING_LP, RREG32(MC_SEQ_MISC_TIMING));
WREG32(MC_SEQ_MISC_TIMING2_LP, RREG32(MC_SEQ_MISC_TIMING2));
WREG32(MC_SEQ_PMG_CMD_EMRS_LP, RREG32(MC_PMG_CMD_EMRS));
WREG32(MC_SEQ_PMG_CMD_MRS_LP, RREG32(MC_PMG_CMD_MRS));
WREG32(MC_SEQ_PMG_CMD_MRS1_LP, RREG32(MC_PMG_CMD_MRS1));
WREG32(MC_SEQ_WR_CTL_D0_LP, RREG32(MC_SEQ_WR_CTL_D0));
WREG32(MC_SEQ_WR_CTL_D1_LP, RREG32(MC_SEQ_WR_CTL_D1));
WREG32(MC_SEQ_RD_CTL_D0_LP, RREG32(MC_SEQ_RD_CTL_D0));
WREG32(MC_SEQ_RD_CTL_D1_LP, RREG32(MC_SEQ_RD_CTL_D1));
WREG32(MC_SEQ_PMG_TIMING_LP, RREG32(MC_SEQ_PMG_TIMING));
WREG32(MC_SEQ_PMG_CMD_MRS2_LP, RREG32(MC_PMG_CMD_MRS2));
ret = radeon_atom_init_mc_reg_table(rdev, module_index, table);
if (ret)
goto init_mc_done;
ret = ni_copy_vbios_mc_reg_table(table, ni_table);
if (ret)
goto init_mc_done;
ni_set_s0_mc_reg_index(ni_table);
ret = ni_set_mc_special_registers(rdev, ni_table);
if (ret)
goto init_mc_done;
ni_set_valid_flag(ni_table);
init_mc_done:
kfree(table);
return ret;
}
static void ni_populate_mc_reg_addresses(struct radeon_device *rdev,
SMC_NIslands_MCRegisters *mc_reg_table)
{
struct ni_power_info *ni_pi = ni_get_pi(rdev);
u32 i, j;
for (i = 0, j = 0; j < ni_pi->mc_reg_table.last; j++) {
if (ni_pi->mc_reg_table.valid_flag & (1 << j)) {
if (i >= SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE)
break;
mc_reg_table->address[i].s0 =
cpu_to_be16(ni_pi->mc_reg_table.mc_reg_address[j].s0);
mc_reg_table->address[i].s1 =
cpu_to_be16(ni_pi->mc_reg_table.mc_reg_address[j].s1);
i++;
}
}
mc_reg_table->last = (u8)i;
}
static void ni_convert_mc_registers(struct ni_mc_reg_entry *entry,
SMC_NIslands_MCRegisterSet *data,
u32 num_entries, u32 valid_flag)
{
u32 i, j;
for (i = 0, j = 0; j < num_entries; j++) {
if (valid_flag & (1 << j)) {
data->value[i] = cpu_to_be32(entry->mc_data[j]);
i++;
}
}
}
static void ni_convert_mc_reg_table_entry_to_smc(struct radeon_device *rdev,
struct rv7xx_pl *pl,
SMC_NIslands_MCRegisterSet *mc_reg_table_data)
{
struct ni_power_info *ni_pi = ni_get_pi(rdev);
u32 i = 0;
for (i = 0; i < ni_pi->mc_reg_table.num_entries; i++) {
if (pl->mclk <= ni_pi->mc_reg_table.mc_reg_table_entry[i].mclk_max)
break;
}
if ((i == ni_pi->mc_reg_table.num_entries) && (i > 0))
--i;
ni_convert_mc_registers(&ni_pi->mc_reg_table.mc_reg_table_entry[i],
mc_reg_table_data,
ni_pi->mc_reg_table.last,
ni_pi->mc_reg_table.valid_flag);
}
static void ni_convert_mc_reg_table_to_smc(struct radeon_device *rdev,
struct radeon_ps *radeon_state,
SMC_NIslands_MCRegisters *mc_reg_table)
{
struct ni_ps *state = ni_get_ps(radeon_state);
int i;
for (i = 0; i < state->performance_level_count; i++) {
ni_convert_mc_reg_table_entry_to_smc(rdev,
&state->performance_levels[i],
&mc_reg_table->data[NISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT + i]);
}
}
static int ni_populate_mc_reg_table(struct radeon_device *rdev,
struct radeon_ps *radeon_boot_state)
{
struct rv7xx_power_info *pi = rv770_get_pi(rdev);
struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
struct ni_power_info *ni_pi = ni_get_pi(rdev);
struct ni_ps *boot_state = ni_get_ps(radeon_boot_state);
SMC_NIslands_MCRegisters *mc_reg_table = &ni_pi->smc_mc_reg_table;
memset(mc_reg_table, 0, sizeof(SMC_NIslands_MCRegisters));
rv770_write_smc_soft_register(rdev, NI_SMC_SOFT_REGISTER_seq_index, 1);
ni_populate_mc_reg_addresses(rdev, mc_reg_table);
ni_convert_mc_reg_table_entry_to_smc(rdev, &boot_state->performance_levels[0],
&mc_reg_table->data[0]);
ni_convert_mc_registers(&ni_pi->mc_reg_table.mc_reg_table_entry[0],
&mc_reg_table->data[1],
ni_pi->mc_reg_table.last,
ni_pi->mc_reg_table.valid_flag);
ni_convert_mc_reg_table_to_smc(rdev, radeon_boot_state, mc_reg_table);
return rv770_copy_bytes_to_smc(rdev, eg_pi->mc_reg_table_start,
(u8 *)mc_reg_table,
sizeof(SMC_NIslands_MCRegisters),
pi->sram_end);
}
static int ni_upload_mc_reg_table(struct radeon_device *rdev,
struct radeon_ps *radeon_new_state)
{
struct rv7xx_power_info *pi = rv770_get_pi(rdev);
struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
struct ni_power_info *ni_pi = ni_get_pi(rdev);
struct ni_ps *ni_new_state = ni_get_ps(radeon_new_state);
SMC_NIslands_MCRegisters *mc_reg_table = &ni_pi->smc_mc_reg_table;
u16 address;
memset(mc_reg_table, 0, sizeof(SMC_NIslands_MCRegisters));
ni_convert_mc_reg_table_to_smc(rdev, radeon_new_state, mc_reg_table);
address = eg_pi->mc_reg_table_start +
(u16)offsetof(SMC_NIslands_MCRegisters, data[NISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT]);
return rv770_copy_bytes_to_smc(rdev, address,
(u8 *)&mc_reg_table->data[NISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT],
sizeof(SMC_NIslands_MCRegisterSet) * ni_new_state->performance_level_count,
pi->sram_end);
}
static int ni_init_driver_calculated_leakage_table(struct radeon_device *rdev,
PP_NIslands_CACTABLES *cac_tables)
{
struct ni_power_info *ni_pi = ni_get_pi(rdev);
struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
u32 leakage = 0;
unsigned int i, j, table_size;
s32 t;
u32 smc_leakage, max_leakage = 0;
u32 scaling_factor;
table_size = eg_pi->vddc_voltage_table.count;
if (SMC_NISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES < table_size)
table_size = SMC_NISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES;
scaling_factor = ni_get_smc_power_scaling_factor(rdev);
for (i = 0; i < SMC_NISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES; i++) {
for (j = 0; j < table_size; j++) {
t = (1000 * ((i + 1) * 8));
if (t < ni_pi->cac_data.leakage_minimum_temperature)
t = ni_pi->cac_data.leakage_minimum_temperature;
ni_calculate_leakage_for_v_and_t(rdev,
&ni_pi->cac_data.leakage_coefficients,
eg_pi->vddc_voltage_table.entries[j].value,
t,
ni_pi->cac_data.i_leakage,
&leakage);
smc_leakage = ni_scale_power_for_smc(leakage, scaling_factor) / 1000;
if (smc_leakage > max_leakage)
max_leakage = smc_leakage;
cac_tables->cac_lkge_lut[i][j] = cpu_to_be32(smc_leakage);
}
}
for (j = table_size; j < SMC_NISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES; j++) {
for (i = 0; i < SMC_NISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES; i++)
cac_tables->cac_lkge_lut[i][j] = cpu_to_be32(max_leakage);
}
return 0;
}
static int ni_init_simplified_leakage_table(struct radeon_device *rdev,
PP_NIslands_CACTABLES *cac_tables)
{
struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
struct radeon_cac_leakage_table *leakage_table =
&rdev->pm.dpm.dyn_state.cac_leakage_table;
u32 i, j, table_size;
u32 smc_leakage, max_leakage = 0;
u32 scaling_factor;
if (!leakage_table)
return -EINVAL;
table_size = leakage_table->count;
if (eg_pi->vddc_voltage_table.count != table_size)
table_size = (eg_pi->vddc_voltage_table.count < leakage_table->count) ?
eg_pi->vddc_voltage_table.count : leakage_table->count;
if (SMC_NISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES < table_size)
table_size = SMC_NISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES;
if (table_size == 0)
return -EINVAL;
scaling_factor = ni_get_smc_power_scaling_factor(rdev);
for (j = 0; j < table_size; j++) {
smc_leakage = leakage_table->entries[j].leakage;
if (smc_leakage > max_leakage)
max_leakage = smc_leakage;
for (i = 0; i < SMC_NISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES; i++)
cac_tables->cac_lkge_lut[i][j] =
cpu_to_be32(ni_scale_power_for_smc(smc_leakage, scaling_factor));
}
for (j = table_size; j < SMC_NISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES; j++) {
for (i = 0; i < SMC_NISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES; i++)
cac_tables->cac_lkge_lut[i][j] =
cpu_to_be32(ni_scale_power_for_smc(max_leakage, scaling_factor));
}
return 0;
}
static int ni_initialize_smc_cac_tables(struct radeon_device *rdev)
{
struct rv7xx_power_info *pi = rv770_get_pi(rdev);
struct ni_power_info *ni_pi = ni_get_pi(rdev);
PP_NIslands_CACTABLES *cac_tables = NULL;
int i, ret;
u32 reg;
if (ni_pi->enable_cac == false)
return 0;
cac_tables = kzalloc(sizeof(PP_NIslands_CACTABLES), GFP_KERNEL);
if (!cac_tables)
return -ENOMEM;
reg = RREG32(CG_CAC_CTRL) & ~(TID_CNT_MASK | TID_UNIT_MASK);
reg |= (TID_CNT(ni_pi->cac_weights->tid_cnt) |
TID_UNIT(ni_pi->cac_weights->tid_unit));
WREG32(CG_CAC_CTRL, reg);
for (i = 0; i < NISLANDS_DCCAC_MAX_LEVELS; i++)
ni_pi->dc_cac_table[i] = ni_pi->cac_weights->dc_cac[i];
for (i = 0; i < SMC_NISLANDS_BIF_LUT_NUM_OF_ENTRIES; i++)
cac_tables->cac_bif_lut[i] = ni_pi->cac_weights->pcie_cac[i];
ni_pi->cac_data.i_leakage = rdev->pm.dpm.cac_leakage;
ni_pi->cac_data.pwr_const = 0;
ni_pi->cac_data.dc_cac_value = ni_pi->dc_cac_table[NISLANDS_DCCAC_LEVEL_0];
ni_pi->cac_data.bif_cac_value = 0;
ni_pi->cac_data.mc_wr_weight = ni_pi->cac_weights->mc_write_weight;
ni_pi->cac_data.mc_rd_weight = ni_pi->cac_weights->mc_read_weight;
ni_pi->cac_data.allow_ovrflw = 0;
ni_pi->cac_data.l2num_win_tdp = ni_pi->lta_window_size;
ni_pi->cac_data.num_win_tdp = 0;
ni_pi->cac_data.lts_truncate_n = ni_pi->lts_truncate;
if (ni_pi->driver_calculate_cac_leakage)
ret = ni_init_driver_calculated_leakage_table(rdev, cac_tables);
else
ret = ni_init_simplified_leakage_table(rdev, cac_tables);
if (ret)
goto done_free;
cac_tables->pwr_const = cpu_to_be32(ni_pi->cac_data.pwr_const);
cac_tables->dc_cacValue = cpu_to_be32(ni_pi->cac_data.dc_cac_value);
cac_tables->bif_cacValue = cpu_to_be32(ni_pi->cac_data.bif_cac_value);
cac_tables->AllowOvrflw = ni_pi->cac_data.allow_ovrflw;
cac_tables->MCWrWeight = ni_pi->cac_data.mc_wr_weight;
cac_tables->MCRdWeight = ni_pi->cac_data.mc_rd_weight;
cac_tables->numWin_TDP = ni_pi->cac_data.num_win_tdp;
cac_tables->l2numWin_TDP = ni_pi->cac_data.l2num_win_tdp;
cac_tables->lts_truncate_n = ni_pi->cac_data.lts_truncate_n;
ret = rv770_copy_bytes_to_smc(rdev, ni_pi->cac_table_start, (u8 *)cac_tables,
sizeof(PP_NIslands_CACTABLES), pi->sram_end);
done_free:
if (ret) {
ni_pi->enable_cac = false;
ni_pi->enable_power_containment = false;
}
kfree(cac_tables);
return 0;
}
static int ni_initialize_hardware_cac_manager(struct radeon_device *rdev)
{
struct ni_power_info *ni_pi = ni_get_pi(rdev);
u32 reg;
if (!ni_pi->enable_cac ||
!ni_pi->cac_configuration_required)
return 0;
if (ni_pi->cac_weights == NULL)
return -EINVAL;
reg = RREG32_CG(CG_CAC_REGION_1_WEIGHT_0) & ~(WEIGHT_TCP_SIG0_MASK |
WEIGHT_TCP_SIG1_MASK |
WEIGHT_TA_SIG_MASK);
reg |= (WEIGHT_TCP_SIG0(ni_pi->cac_weights->weight_tcp_sig0) |
WEIGHT_TCP_SIG1(ni_pi->cac_weights->weight_tcp_sig1) |
WEIGHT_TA_SIG(ni_pi->cac_weights->weight_ta_sig));
WREG32_CG(CG_CAC_REGION_1_WEIGHT_0, reg);
reg = RREG32_CG(CG_CAC_REGION_1_WEIGHT_1) & ~(WEIGHT_TCC_EN0_MASK |
WEIGHT_TCC_EN1_MASK |
WEIGHT_TCC_EN2_MASK);
reg |= (WEIGHT_TCC_EN0(ni_pi->cac_weights->weight_tcc_en0) |
WEIGHT_TCC_EN1(ni_pi->cac_weights->weight_tcc_en1) |
WEIGHT_TCC_EN2(ni_pi->cac_weights->weight_tcc_en2));
WREG32_CG(CG_CAC_REGION_1_WEIGHT_1, reg);
reg = RREG32_CG(CG_CAC_REGION_2_WEIGHT_0) & ~(WEIGHT_CB_EN0_MASK |
WEIGHT_CB_EN1_MASK |
WEIGHT_CB_EN2_MASK |
WEIGHT_CB_EN3_MASK);
reg |= (WEIGHT_CB_EN0(ni_pi->cac_weights->weight_cb_en0) |
WEIGHT_CB_EN1(ni_pi->cac_weights->weight_cb_en1) |
WEIGHT_CB_EN2(ni_pi->cac_weights->weight_cb_en2) |
WEIGHT_CB_EN3(ni_pi->cac_weights->weight_cb_en3));
WREG32_CG(CG_CAC_REGION_2_WEIGHT_0, reg);
reg = RREG32_CG(CG_CAC_REGION_2_WEIGHT_1) & ~(WEIGHT_DB_SIG0_MASK |
WEIGHT_DB_SIG1_MASK |
WEIGHT_DB_SIG2_MASK |
WEIGHT_DB_SIG3_MASK);
reg |= (WEIGHT_DB_SIG0(ni_pi->cac_weights->weight_db_sig0) |
WEIGHT_DB_SIG1(ni_pi->cac_weights->weight_db_sig1) |
WEIGHT_DB_SIG2(ni_pi->cac_weights->weight_db_sig2) |
WEIGHT_DB_SIG3(ni_pi->cac_weights->weight_db_sig3));
WREG32_CG(CG_CAC_REGION_2_WEIGHT_1, reg);
reg = RREG32_CG(CG_CAC_REGION_2_WEIGHT_2) & ~(WEIGHT_SXM_SIG0_MASK |
WEIGHT_SXM_SIG1_MASK |
WEIGHT_SXM_SIG2_MASK |
WEIGHT_SXS_SIG0_MASK |
WEIGHT_SXS_SIG1_MASK);
reg |= (WEIGHT_SXM_SIG0(ni_pi->cac_weights->weight_sxm_sig0) |
WEIGHT_SXM_SIG1(ni_pi->cac_weights->weight_sxm_sig1) |
WEIGHT_SXM_SIG2(ni_pi->cac_weights->weight_sxm_sig2) |
WEIGHT_SXS_SIG0(ni_pi->cac_weights->weight_sxs_sig0) |
WEIGHT_SXS_SIG1(ni_pi->cac_weights->weight_sxs_sig1));
WREG32_CG(CG_CAC_REGION_2_WEIGHT_2, reg);
reg = RREG32_CG(CG_CAC_REGION_3_WEIGHT_0) & ~(WEIGHT_XBR_0_MASK |
WEIGHT_XBR_1_MASK |
WEIGHT_XBR_2_MASK |
WEIGHT_SPI_SIG0_MASK);
reg |= (WEIGHT_XBR_0(ni_pi->cac_weights->weight_xbr_0) |
WEIGHT_XBR_1(ni_pi->cac_weights->weight_xbr_1) |
WEIGHT_XBR_2(ni_pi->cac_weights->weight_xbr_2) |
WEIGHT_SPI_SIG0(ni_pi->cac_weights->weight_spi_sig0));
WREG32_CG(CG_CAC_REGION_3_WEIGHT_0, reg);
reg = RREG32_CG(CG_CAC_REGION_3_WEIGHT_1) & ~(WEIGHT_SPI_SIG1_MASK |
WEIGHT_SPI_SIG2_MASK |
WEIGHT_SPI_SIG3_MASK |
WEIGHT_SPI_SIG4_MASK |
WEIGHT_SPI_SIG5_MASK);
reg |= (WEIGHT_SPI_SIG1(ni_pi->cac_weights->weight_spi_sig1) |
WEIGHT_SPI_SIG2(ni_pi->cac_weights->weight_spi_sig2) |
WEIGHT_SPI_SIG3(ni_pi->cac_weights->weight_spi_sig3) |
WEIGHT_SPI_SIG4(ni_pi->cac_weights->weight_spi_sig4) |
WEIGHT_SPI_SIG5(ni_pi->cac_weights->weight_spi_sig5));
WREG32_CG(CG_CAC_REGION_3_WEIGHT_1, reg);
reg = RREG32_CG(CG_CAC_REGION_4_WEIGHT_0) & ~(WEIGHT_LDS_SIG0_MASK |
WEIGHT_LDS_SIG1_MASK |
WEIGHT_SC_MASK);
reg |= (WEIGHT_LDS_SIG0(ni_pi->cac_weights->weight_lds_sig0) |
WEIGHT_LDS_SIG1(ni_pi->cac_weights->weight_lds_sig1) |
WEIGHT_SC(ni_pi->cac_weights->weight_sc));
WREG32_CG(CG_CAC_REGION_4_WEIGHT_0, reg);
reg = RREG32_CG(CG_CAC_REGION_4_WEIGHT_1) & ~(WEIGHT_BIF_MASK |
WEIGHT_CP_MASK |
WEIGHT_PA_SIG0_MASK |
WEIGHT_PA_SIG1_MASK |
WEIGHT_VGT_SIG0_MASK);
reg |= (WEIGHT_BIF(ni_pi->cac_weights->weight_bif) |
WEIGHT_CP(ni_pi->cac_weights->weight_cp) |
WEIGHT_PA_SIG0(ni_pi->cac_weights->weight_pa_sig0) |
WEIGHT_PA_SIG1(ni_pi->cac_weights->weight_pa_sig1) |
WEIGHT_VGT_SIG0(ni_pi->cac_weights->weight_vgt_sig0));
WREG32_CG(CG_CAC_REGION_4_WEIGHT_1, reg);
reg = RREG32_CG(CG_CAC_REGION_4_WEIGHT_2) & ~(WEIGHT_VGT_SIG1_MASK |
WEIGHT_VGT_SIG2_MASK |
WEIGHT_DC_SIG0_MASK |
WEIGHT_DC_SIG1_MASK |
WEIGHT_DC_SIG2_MASK);
reg |= (WEIGHT_VGT_SIG1(ni_pi->cac_weights->weight_vgt_sig1) |
WEIGHT_VGT_SIG2(ni_pi->cac_weights->weight_vgt_sig2) |
WEIGHT_DC_SIG0(ni_pi->cac_weights->weight_dc_sig0) |
WEIGHT_DC_SIG1(ni_pi->cac_weights->weight_dc_sig1) |
WEIGHT_DC_SIG2(ni_pi->cac_weights->weight_dc_sig2));
WREG32_CG(CG_CAC_REGION_4_WEIGHT_2, reg);
reg = RREG32_CG(CG_CAC_REGION_4_WEIGHT_3) & ~(WEIGHT_DC_SIG3_MASK |
WEIGHT_UVD_SIG0_MASK |
WEIGHT_UVD_SIG1_MASK |
WEIGHT_SPARE0_MASK |
WEIGHT_SPARE1_MASK);
reg |= (WEIGHT_DC_SIG3(ni_pi->cac_weights->weight_dc_sig3) |
WEIGHT_UVD_SIG0(ni_pi->cac_weights->weight_uvd_sig0) |
WEIGHT_UVD_SIG1(ni_pi->cac_weights->weight_uvd_sig1) |
WEIGHT_SPARE0(ni_pi->cac_weights->weight_spare0) |
WEIGHT_SPARE1(ni_pi->cac_weights->weight_spare1));
WREG32_CG(CG_CAC_REGION_4_WEIGHT_3, reg);
reg = RREG32_CG(CG_CAC_REGION_5_WEIGHT_0) & ~(WEIGHT_SQ_VSP_MASK |
WEIGHT_SQ_VSP0_MASK);
reg |= (WEIGHT_SQ_VSP(ni_pi->cac_weights->weight_sq_vsp) |
WEIGHT_SQ_VSP0(ni_pi->cac_weights->weight_sq_vsp0));
WREG32_CG(CG_CAC_REGION_5_WEIGHT_0, reg);
reg = RREG32_CG(CG_CAC_REGION_5_WEIGHT_1) & ~(WEIGHT_SQ_GPR_MASK);
reg |= WEIGHT_SQ_GPR(ni_pi->cac_weights->weight_sq_gpr);
WREG32_CG(CG_CAC_REGION_5_WEIGHT_1, reg);
reg = RREG32_CG(CG_CAC_REGION_4_OVERRIDE_4) & ~(OVR_MODE_SPARE_0_MASK |
OVR_VAL_SPARE_0_MASK |
OVR_MODE_SPARE_1_MASK |
OVR_VAL_SPARE_1_MASK);
reg |= (OVR_MODE_SPARE_0(ni_pi->cac_weights->ovr_mode_spare_0) |
OVR_VAL_SPARE_0(ni_pi->cac_weights->ovr_val_spare_0) |
OVR_MODE_SPARE_1(ni_pi->cac_weights->ovr_mode_spare_1) |
OVR_VAL_SPARE_1(ni_pi->cac_weights->ovr_val_spare_1));
WREG32_CG(CG_CAC_REGION_4_OVERRIDE_4, reg);
reg = RREG32(SQ_CAC_THRESHOLD) & ~(VSP_MASK |
VSP0_MASK |
GPR_MASK);
reg |= (VSP(ni_pi->cac_weights->vsp) |
VSP0(ni_pi->cac_weights->vsp0) |
GPR(ni_pi->cac_weights->gpr));
WREG32(SQ_CAC_THRESHOLD, reg);
reg = (MCDW_WR_ENABLE |
MCDX_WR_ENABLE |
MCDY_WR_ENABLE |
MCDZ_WR_ENABLE |
INDEX(0x09D4));
WREG32(MC_CG_CONFIG, reg);
reg = (READ_WEIGHT(ni_pi->cac_weights->mc_read_weight) |
WRITE_WEIGHT(ni_pi->cac_weights->mc_write_weight) |
ALLOW_OVERFLOW);
WREG32(MC_CG_DATAPORT, reg);
return 0;
}
static int ni_enable_smc_cac(struct radeon_device *rdev,
struct radeon_ps *radeon_new_state,
bool enable)
{
struct ni_power_info *ni_pi = ni_get_pi(rdev);
int ret = 0;
PPSMC_Result smc_result;
if (ni_pi->enable_cac) {
if (enable) {
if (!r600_is_uvd_state(radeon_new_state->class, radeon_new_state->class2)) {
smc_result = rv770_send_msg_to_smc(rdev, PPSMC_MSG_CollectCAC_PowerCorreln);
if (ni_pi->support_cac_long_term_average) {
smc_result = rv770_send_msg_to_smc(rdev, PPSMC_CACLongTermAvgEnable);
if (PPSMC_Result_OK != smc_result)
ni_pi->support_cac_long_term_average = false;
}
smc_result = rv770_send_msg_to_smc(rdev, PPSMC_MSG_EnableCac);
if (PPSMC_Result_OK != smc_result)
ret = -EINVAL;
ni_pi->cac_enabled = (PPSMC_Result_OK == smc_result) ? true : false;
}
} else if (ni_pi->cac_enabled) {
smc_result = rv770_send_msg_to_smc(rdev, PPSMC_MSG_DisableCac);
ni_pi->cac_enabled = false;
if (ni_pi->support_cac_long_term_average) {
smc_result = rv770_send_msg_to_smc(rdev, PPSMC_CACLongTermAvgDisable);
if (PPSMC_Result_OK != smc_result)
ni_pi->support_cac_long_term_average = false;
}
}
}
return ret;
}
static int ni_pcie_performance_request(struct radeon_device *rdev,
u8 perf_req, bool advertise)
{
struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
#if defined(CONFIG_ACPI)
if ((perf_req == PCIE_PERF_REQ_PECI_GEN1) ||
(perf_req == PCIE_PERF_REQ_PECI_GEN2)) {
if (eg_pi->pcie_performance_request_registered == false)
radeon_acpi_pcie_notify_device_ready(rdev);
eg_pi->pcie_performance_request_registered = true;
return radeon_acpi_pcie_performance_request(rdev, perf_req, advertise);
} else if ((perf_req == PCIE_PERF_REQ_REMOVE_REGISTRY) &&
eg_pi->pcie_performance_request_registered) {
eg_pi->pcie_performance_request_registered = false;
return radeon_acpi_pcie_performance_request(rdev, perf_req, advertise);
}
#endif
return 0;
}
static int ni_advertise_gen2_capability(struct radeon_device *rdev)
{
struct rv7xx_power_info *pi = rv770_get_pi(rdev);
u32 tmp;
tmp = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
if ((tmp & LC_OTHER_SIDE_EVER_SENT_GEN2) &&
(tmp & LC_OTHER_SIDE_SUPPORTS_GEN2))
pi->pcie_gen2 = true;
else
pi->pcie_gen2 = false;
if (!pi->pcie_gen2)
ni_pcie_performance_request(rdev, PCIE_PERF_REQ_PECI_GEN2, true);
return 0;
}
static void ni_enable_bif_dynamic_pcie_gen2(struct radeon_device *rdev,
bool enable)
{
struct rv7xx_power_info *pi = rv770_get_pi(rdev);
u32 tmp, bif;
tmp = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
if ((tmp & LC_OTHER_SIDE_EVER_SENT_GEN2) &&
(tmp & LC_OTHER_SIDE_SUPPORTS_GEN2)) {
if (enable) {
if (!pi->boot_in_gen2) {
bif = RREG32(CG_BIF_REQ_AND_RSP) & ~CG_CLIENT_REQ_MASK;
bif |= CG_CLIENT_REQ(0xd);
WREG32(CG_BIF_REQ_AND_RSP, bif);
}
tmp &= ~LC_HW_VOLTAGE_IF_CONTROL_MASK;
tmp |= LC_HW_VOLTAGE_IF_CONTROL(1);
tmp |= LC_GEN2_EN_STRAP;
tmp |= LC_CLR_FAILED_SPD_CHANGE_CNT;
WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, tmp);
udelay(10);
tmp &= ~LC_CLR_FAILED_SPD_CHANGE_CNT;
WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, tmp);
} else {
if (!pi->boot_in_gen2) {
bif = RREG32(CG_BIF_REQ_AND_RSP) & ~CG_CLIENT_REQ_MASK;
bif |= CG_CLIENT_REQ(0xd);
WREG32(CG_BIF_REQ_AND_RSP, bif);
tmp &= ~LC_HW_VOLTAGE_IF_CONTROL_MASK;
tmp &= ~LC_GEN2_EN_STRAP;
}
WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, tmp);
}
}
}
static void ni_enable_dynamic_pcie_gen2(struct radeon_device *rdev,
bool enable)
{
ni_enable_bif_dynamic_pcie_gen2(rdev, enable);
if (enable)
WREG32_P(GENERAL_PWRMGT, ENABLE_GEN2PCIE, ~ENABLE_GEN2PCIE);
else
WREG32_P(GENERAL_PWRMGT, 0, ~ENABLE_GEN2PCIE);
}
void ni_set_uvd_clock_before_set_eng_clock(struct radeon_device *rdev,
struct radeon_ps *new_ps,
struct radeon_ps *old_ps)
{
struct ni_ps *new_state = ni_get_ps(new_ps);
struct ni_ps *current_state = ni_get_ps(old_ps);
if ((new_ps->vclk == old_ps->vclk) &&
(new_ps->dclk == old_ps->dclk))
return;
if (new_state->performance_levels[new_state->performance_level_count - 1].sclk >=
current_state->performance_levels[current_state->performance_level_count - 1].sclk)
return;
radeon_set_uvd_clocks(rdev, new_ps->vclk, new_ps->dclk);
}
void ni_set_uvd_clock_after_set_eng_clock(struct radeon_device *rdev,
struct radeon_ps *new_ps,
struct radeon_ps *old_ps)
{
struct ni_ps *new_state = ni_get_ps(new_ps);
struct ni_ps *current_state = ni_get_ps(old_ps);
if ((new_ps->vclk == old_ps->vclk) &&
(new_ps->dclk == old_ps->dclk))
return;
if (new_state->performance_levels[new_state->performance_level_count - 1].sclk <
current_state->performance_levels[current_state->performance_level_count - 1].sclk)
return;
radeon_set_uvd_clocks(rdev, new_ps->vclk, new_ps->dclk);
}
void ni_dpm_setup_asic(struct radeon_device *rdev)
{
struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
ni_read_clock_registers(rdev);
btc_read_arb_registers(rdev);
rv770_get_memory_type(rdev);
if (eg_pi->pcie_performance_request)
ni_advertise_gen2_capability(rdev);
rv770_get_pcie_gen2_status(rdev);
rv770_enable_acpi_pm(rdev);
}
void ni_update_current_ps(struct radeon_device *rdev,
struct radeon_ps *rps)
{
struct ni_ps *new_ps = ni_get_ps(rps);
struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
struct ni_power_info *ni_pi = ni_get_pi(rdev);
eg_pi->current_rps = *rps;
ni_pi->current_ps = *new_ps;
eg_pi->current_rps.ps_priv = &ni_pi->current_ps;
}
void ni_update_requested_ps(struct radeon_device *rdev,
struct radeon_ps *rps)
{
struct ni_ps *new_ps = ni_get_ps(rps);
struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
struct ni_power_info *ni_pi = ni_get_pi(rdev);
eg_pi->requested_rps = *rps;
ni_pi->requested_ps = *new_ps;
eg_pi->requested_rps.ps_priv = &ni_pi->requested_ps;
}
int ni_dpm_enable(struct radeon_device *rdev)
{
struct rv7xx_power_info *pi = rv770_get_pi(rdev);
struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
int ret;
if (pi->gfx_clock_gating)
ni_cg_clockgating_default(rdev);
if (btc_dpm_enabled(rdev))
return -EINVAL;
if (pi->mg_clock_gating)
ni_mg_clockgating_default(rdev);
if (eg_pi->ls_clock_gating)
ni_ls_clockgating_default(rdev);
if (pi->voltage_control) {
rv770_enable_voltage_control(rdev, true);
ret = cypress_construct_voltage_tables(rdev);
if (ret) {
DRM_ERROR("cypress_construct_voltage_tables failed\n");
return ret;
}
}
if (eg_pi->dynamic_ac_timing) {
ret = ni_initialize_mc_reg_table(rdev);
if (ret)
eg_pi->dynamic_ac_timing = false;
}
if (pi->dynamic_ss)
cypress_enable_spread_spectrum(rdev, true);
if (pi->thermal_protection)
rv770_enable_thermal_protection(rdev, true);
rv770_setup_bsp(rdev);
rv770_program_git(rdev);
rv770_program_tp(rdev);
rv770_program_tpp(rdev);
rv770_program_sstp(rdev);
cypress_enable_display_gap(rdev);
rv770_program_vc(rdev);
if (pi->dynamic_pcie_gen2)
ni_enable_dynamic_pcie_gen2(rdev, true);
ret = rv770_upload_firmware(rdev);
if (ret) {
DRM_ERROR("rv770_upload_firmware failed\n");
return ret;
}
ret = ni_process_firmware_header(rdev);
if (ret) {
DRM_ERROR("ni_process_firmware_header failed\n");
return ret;
}
ret = ni_initial_switch_from_arb_f0_to_f1(rdev);
if (ret) {
DRM_ERROR("ni_initial_switch_from_arb_f0_to_f1 failed\n");
return ret;
}
ret = ni_init_smc_table(rdev);
if (ret) {
DRM_ERROR("ni_init_smc_table failed\n");
return ret;
}
ret = ni_init_smc_spll_table(rdev);
if (ret) {
DRM_ERROR("ni_init_smc_spll_table failed\n");
return ret;
}
ret = ni_init_arb_table_index(rdev);
if (ret) {
DRM_ERROR("ni_init_arb_table_index failed\n");
return ret;
}
if (eg_pi->dynamic_ac_timing) {
ret = ni_populate_mc_reg_table(rdev, boot_ps);
if (ret) {
DRM_ERROR("ni_populate_mc_reg_table failed\n");
return ret;
}
}
ret = ni_initialize_smc_cac_tables(rdev);
if (ret) {
DRM_ERROR("ni_initialize_smc_cac_tables failed\n");
return ret;
}
ret = ni_initialize_hardware_cac_manager(rdev);
if (ret) {
DRM_ERROR("ni_initialize_hardware_cac_manager failed\n");
return ret;
}
ret = ni_populate_smc_tdp_limits(rdev, boot_ps);
if (ret) {
DRM_ERROR("ni_populate_smc_tdp_limits failed\n");
return ret;
}
ni_program_response_times(rdev);
r7xx_start_smc(rdev);
ret = cypress_notify_smc_display_change(rdev, false);
if (ret) {
DRM_ERROR("cypress_notify_smc_display_change failed\n");
return ret;
}
cypress_enable_sclk_control(rdev, true);
if (eg_pi->memory_transition)
cypress_enable_mclk_control(rdev, true);
cypress_start_dpm(rdev);
if (pi->gfx_clock_gating)
ni_gfx_clockgating_enable(rdev, true);
if (pi->mg_clock_gating)
ni_mg_clockgating_enable(rdev, true);
if (eg_pi->ls_clock_gating)
ni_ls_clockgating_enable(rdev, true);
if (rdev->irq.installed &&
r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
PPSMC_Result result;
ret = rv770_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, 0xff * 1000);
if (ret)
return ret;
rdev->irq.dpm_thermal = true;
radeon_irq_set(rdev);
result = rv770_send_msg_to_smc(rdev, PPSMC_MSG_EnableThermalInterrupt);
if (result != PPSMC_Result_OK)
DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
}
rv770_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
ni_update_current_ps(rdev, boot_ps);
return 0;
}
void ni_dpm_disable(struct radeon_device *rdev)
{
struct rv7xx_power_info *pi = rv770_get_pi(rdev);
struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
if (!btc_dpm_enabled(rdev))
return;
rv770_clear_vc(rdev);
if (pi->thermal_protection)
rv770_enable_thermal_protection(rdev, false);
ni_enable_power_containment(rdev, boot_ps, false);
ni_enable_smc_cac(rdev, boot_ps, false);
cypress_enable_spread_spectrum(rdev, false);
rv770_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, false);
if (pi->dynamic_pcie_gen2)
ni_enable_dynamic_pcie_gen2(rdev, false);
if (rdev->irq.installed &&
r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
rdev->irq.dpm_thermal = false;
radeon_irq_set(rdev);
}
if (pi->gfx_clock_gating)
ni_gfx_clockgating_enable(rdev, false);
if (pi->mg_clock_gating)
ni_mg_clockgating_enable(rdev, false);
if (eg_pi->ls_clock_gating)
ni_ls_clockgating_enable(rdev, false);
ni_stop_dpm(rdev);
btc_reset_to_default(rdev);
ni_stop_smc(rdev);
ni_force_switch_to_arb_f0(rdev);
ni_update_current_ps(rdev, boot_ps);
}
static int ni_power_control_set_level(struct radeon_device *rdev)
{
struct radeon_ps *new_ps = rdev->pm.dpm.requested_ps;
int ret;
ret = ni_restrict_performance_levels_before_switch(rdev);
if (ret)
return ret;
ret = rv770_halt_smc(rdev);
if (ret)
return ret;
ret = ni_populate_smc_tdp_limits(rdev, new_ps);
if (ret)
return ret;
ret = rv770_resume_smc(rdev);
if (ret)
return ret;
ret = rv770_set_sw_state(rdev);
if (ret)
return ret;
return 0;
}
int ni_dpm_pre_set_power_state(struct radeon_device *rdev)
{
struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
struct radeon_ps requested_ps = *rdev->pm.dpm.requested_ps;
struct radeon_ps *new_ps = &requested_ps;
ni_update_requested_ps(rdev, new_ps);
ni_apply_state_adjust_rules(rdev, &eg_pi->requested_rps);
return 0;
}
int ni_dpm_set_power_state(struct radeon_device *rdev)
{
struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
struct radeon_ps *new_ps = &eg_pi->requested_rps;
struct radeon_ps *old_ps = &eg_pi->current_rps;
int ret;
ret = ni_restrict_performance_levels_before_switch(rdev);
if (ret) {
DRM_ERROR("ni_restrict_performance_levels_before_switch failed\n");
return ret;
}
ni_set_uvd_clock_before_set_eng_clock(rdev, new_ps, old_ps);
ret = ni_enable_power_containment(rdev, new_ps, false);
if (ret) {
DRM_ERROR("ni_enable_power_containment failed\n");
return ret;
}
ret = ni_enable_smc_cac(rdev, new_ps, false);
if (ret) {
DRM_ERROR("ni_enable_smc_cac failed\n");
return ret;
}
ret = rv770_halt_smc(rdev);
if (ret) {
DRM_ERROR("rv770_halt_smc failed\n");
return ret;
}
if (eg_pi->smu_uvd_hs)
btc_notify_uvd_to_smc(rdev, new_ps);
ret = ni_upload_sw_state(rdev, new_ps);
if (ret) {
DRM_ERROR("ni_upload_sw_state failed\n");
return ret;
}
if (eg_pi->dynamic_ac_timing) {
ret = ni_upload_mc_reg_table(rdev, new_ps);
if (ret) {
DRM_ERROR("ni_upload_mc_reg_table failed\n");
return ret;
}
}
ret = ni_program_memory_timing_parameters(rdev, new_ps);
if (ret) {
DRM_ERROR("ni_program_memory_timing_parameters failed\n");
return ret;
}
ret = rv770_resume_smc(rdev);
if (ret) {
DRM_ERROR("rv770_resume_smc failed\n");
return ret;
}
ret = rv770_set_sw_state(rdev);
if (ret) {
DRM_ERROR("rv770_set_sw_state failed\n");
return ret;
}
ni_set_uvd_clock_after_set_eng_clock(rdev, new_ps, old_ps);
ret = ni_enable_smc_cac(rdev, new_ps, true);
if (ret) {
DRM_ERROR("ni_enable_smc_cac failed\n");
return ret;
}
ret = ni_enable_power_containment(rdev, new_ps, true);
if (ret) {
DRM_ERROR("ni_enable_power_containment failed\n");
return ret;
}
/* update tdp */
ret = ni_power_control_set_level(rdev);
if (ret) {
DRM_ERROR("ni_power_control_set_level failed\n");
return ret;
}
return 0;
}
void ni_dpm_post_set_power_state(struct radeon_device *rdev)
{
struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
struct radeon_ps *new_ps = &eg_pi->requested_rps;
ni_update_current_ps(rdev, new_ps);
}
void ni_dpm_reset_asic(struct radeon_device *rdev)
{
ni_restrict_performance_levels_before_switch(rdev);
rv770_set_boot_state(rdev);
}
union power_info {
struct _ATOM_POWERPLAY_INFO info;
struct _ATOM_POWERPLAY_INFO_V2 info_2;
struct _ATOM_POWERPLAY_INFO_V3 info_3;
struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
};
union pplib_clock_info {
struct _ATOM_PPLIB_R600_CLOCK_INFO r600;
struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780;
struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
};
union pplib_power_state {
struct _ATOM_PPLIB_STATE v1;
struct _ATOM_PPLIB_STATE_V2 v2;
};
static void ni_parse_pplib_non_clock_info(struct radeon_device *rdev,
struct radeon_ps *rps,
struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info,
u8 table_rev)
{
rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings);
rps->class = le16_to_cpu(non_clock_info->usClassification);
rps->class2 = le16_to_cpu(non_clock_info->usClassification2);
if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) {
rps->vclk = le32_to_cpu(non_clock_info->ulVCLK);
rps->dclk = le32_to_cpu(non_clock_info->ulDCLK);
} else if (r600_is_uvd_state(rps->class, rps->class2)) {
rps->vclk = RV770_DEFAULT_VCLK_FREQ;
rps->dclk = RV770_DEFAULT_DCLK_FREQ;
} else {
rps->vclk = 0;
rps->dclk = 0;
}
if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT)
rdev->pm.dpm.boot_ps = rps;
if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
rdev->pm.dpm.uvd_ps = rps;
}
static void ni_parse_pplib_clock_info(struct radeon_device *rdev,
struct radeon_ps *rps, int index,
union pplib_clock_info *clock_info)
{
struct rv7xx_power_info *pi = rv770_get_pi(rdev);
struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
struct ni_ps *ps = ni_get_ps(rps);
u16 vddc;
struct rv7xx_pl *pl = &ps->performance_levels[index];
ps->performance_level_count = index + 1;
pl->sclk = le16_to_cpu(clock_info->evergreen.usEngineClockLow);
pl->sclk |= clock_info->evergreen.ucEngineClockHigh << 16;
pl->mclk = le16_to_cpu(clock_info->evergreen.usMemoryClockLow);
pl->mclk |= clock_info->evergreen.ucMemoryClockHigh << 16;
pl->vddc = le16_to_cpu(clock_info->evergreen.usVDDC);
pl->vddci = le16_to_cpu(clock_info->evergreen.usVDDCI);
pl->flags = le32_to_cpu(clock_info->evergreen.ulFlags);
/* patch up vddc if necessary */
if (pl->vddc == 0xff01) {
if (radeon_atom_get_max_vddc(rdev, 0, 0, &vddc) == 0)
pl->vddc = vddc;
}
if (rps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) {
pi->acpi_vddc = pl->vddc;
eg_pi->acpi_vddci = pl->vddci;
if (ps->performance_levels[0].flags & ATOM_PPLIB_R600_FLAGS_PCIEGEN2)
pi->acpi_pcie_gen2 = true;
else
pi->acpi_pcie_gen2 = false;
}
if (rps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) {
eg_pi->ulv.supported = true;
eg_pi->ulv.pl = pl;
}
if (pi->min_vddc_in_table > pl->vddc)
pi->min_vddc_in_table = pl->vddc;
if (pi->max_vddc_in_table < pl->vddc)
pi->max_vddc_in_table = pl->vddc;
/* patch up boot state */
if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) {
u16 vddc, vddci, mvdd;
radeon_atombios_get_default_voltages(rdev, &vddc, &vddci, &mvdd);
pl->mclk = rdev->clock.default_mclk;
pl->sclk = rdev->clock.default_sclk;
pl->vddc = vddc;
pl->vddci = vddci;
}
if ((rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) ==
ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) {
rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk = pl->sclk;
rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.mclk = pl->mclk;
rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddc = pl->vddc;
rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddci = pl->vddci;
}
}
static int ni_parse_power_table(struct radeon_device *rdev)
{
struct radeon_mode_info *mode_info = &rdev->mode_info;
struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
union pplib_power_state *power_state;
int i, j;
union pplib_clock_info *clock_info;
union power_info *power_info;
int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
u16 data_offset;
u8 frev, crev;
struct ni_ps *ps;
if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
&frev, &crev, &data_offset))
return -EINVAL;
power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
rdev->pm.dpm.ps = kzalloc(sizeof(struct radeon_ps) *
power_info->pplib.ucNumStates, GFP_KERNEL);
if (!rdev->pm.dpm.ps)
return -ENOMEM;
rdev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps);
rdev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime);
rdev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime);
for (i = 0; i < power_info->pplib.ucNumStates; i++) {
power_state = (union pplib_power_state *)
(mode_info->atom_context->bios + data_offset +
le16_to_cpu(power_info->pplib.usStateArrayOffset) +
i * power_info->pplib.ucStateEntrySize);
non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
(mode_info->atom_context->bios + data_offset +
le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset) +
(power_state->v1.ucNonClockStateIndex *
power_info->pplib.ucNonClockSize));
if (power_info->pplib.ucStateEntrySize - 1) {
u8 *idx;
ps = kzalloc(sizeof(struct ni_ps), GFP_KERNEL);
if (ps == NULL) {
kfree(rdev->pm.dpm.ps);
return -ENOMEM;
}
rdev->pm.dpm.ps[i].ps_priv = ps;
ni_parse_pplib_non_clock_info(rdev, &rdev->pm.dpm.ps[i],
non_clock_info,
power_info->pplib.ucNonClockSize);
idx = (u8 *)&power_state->v1.ucClockStateIndices[0];
for (j = 0; j < (power_info->pplib.ucStateEntrySize - 1); j++) {
clock_info = (union pplib_clock_info *)
(mode_info->atom_context->bios + data_offset +
le16_to_cpu(power_info->pplib.usClockInfoArrayOffset) +
(idx[j] * power_info->pplib.ucClockInfoSize));
ni_parse_pplib_clock_info(rdev,
&rdev->pm.dpm.ps[i], j,
clock_info);
}
}
}
rdev->pm.dpm.num_ps = power_info->pplib.ucNumStates;
return 0;
}
int ni_dpm_init(struct radeon_device *rdev)
{
struct rv7xx_power_info *pi;
struct evergreen_power_info *eg_pi;
struct ni_power_info *ni_pi;
struct atom_clock_dividers dividers;
int ret;
ni_pi = kzalloc(sizeof(struct ni_power_info), GFP_KERNEL);
if (ni_pi == NULL)
return -ENOMEM;
rdev->pm.dpm.priv = ni_pi;
eg_pi = &ni_pi->eg;
pi = &eg_pi->rv7xx;
rv770_get_max_vddc(rdev);
eg_pi->ulv.supported = false;
pi->acpi_vddc = 0;
eg_pi->acpi_vddci = 0;
pi->min_vddc_in_table = 0;
pi->max_vddc_in_table = 0;
ret = ni_parse_power_table(rdev);
if (ret)
return ret;
ret = r600_parse_extended_power_table(rdev);
if (ret)
return ret;
rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries =
kzalloc(4 * sizeof(struct radeon_clock_voltage_dependency_entry), GFP_KERNEL);
if (!rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) {
r600_free_extended_power_table(rdev);
return -ENOMEM;
}
rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count = 4;
rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].clk = 0;
rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].v = 0;
rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].clk = 36000;
rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].v = 720;
rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].clk = 54000;
rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].v = 810;
rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].clk = 72000;
rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].v = 900;
ni_patch_dependency_tables_based_on_leakage(rdev);
if (rdev->pm.dpm.voltage_response_time == 0)
rdev->pm.dpm.voltage_response_time = R600_VOLTAGERESPONSETIME_DFLT;
if (rdev->pm.dpm.backbias_response_time == 0)
rdev->pm.dpm.backbias_response_time = R600_BACKBIASRESPONSETIME_DFLT;
ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
0, false, ÷rs);
if (ret)
pi->ref_div = dividers.ref_div + 1;
else
pi->ref_div = R600_REFERENCEDIVIDER_DFLT;
pi->rlp = RV770_RLP_DFLT;
pi->rmp = RV770_RMP_DFLT;
pi->lhp = RV770_LHP_DFLT;
pi->lmp = RV770_LMP_DFLT;
eg_pi->ats[0].rlp = RV770_RLP_DFLT;
eg_pi->ats[0].rmp = RV770_RMP_DFLT;
eg_pi->ats[0].lhp = RV770_LHP_DFLT;
eg_pi->ats[0].lmp = RV770_LMP_DFLT;
eg_pi->ats[1].rlp = BTC_RLP_UVD_DFLT;
eg_pi->ats[1].rmp = BTC_RMP_UVD_DFLT;
eg_pi->ats[1].lhp = BTC_LHP_UVD_DFLT;
eg_pi->ats[1].lmp = BTC_LMP_UVD_DFLT;
eg_pi->smu_uvd_hs = true;
if (rdev->pdev->device == 0x6707) {
pi->mclk_strobe_mode_threshold = 55000;
pi->mclk_edc_enable_threshold = 55000;
eg_pi->mclk_edc_wr_enable_threshold = 55000;
} else {
pi->mclk_strobe_mode_threshold = 40000;
pi->mclk_edc_enable_threshold = 40000;
eg_pi->mclk_edc_wr_enable_threshold = 40000;
}
ni_pi->mclk_rtt_mode_threshold = eg_pi->mclk_edc_wr_enable_threshold;
pi->voltage_control =
radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDC, 0);
pi->mvdd_control =
radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_MVDDC, 0);
eg_pi->vddci_control =
radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDCI, 0);
rv770_get_engine_memory_ss(rdev);
pi->asi = RV770_ASI_DFLT;
pi->pasi = CYPRESS_HASI_DFLT;
pi->vrc = CYPRESS_VRC_DFLT;
pi->power_gating = false;
pi->gfx_clock_gating = true;
pi->mg_clock_gating = true;
pi->mgcgtssm = true;
eg_pi->ls_clock_gating = false;
eg_pi->sclk_deep_sleep = false;
pi->dynamic_pcie_gen2 = true;
if (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE)
pi->thermal_protection = true;
else
pi->thermal_protection = false;
pi->display_gap = true;
pi->dcodt = true;
pi->ulps = true;
eg_pi->dynamic_ac_timing = true;
eg_pi->abm = true;
eg_pi->mcls = true;
eg_pi->light_sleep = true;
eg_pi->memory_transition = true;
#if defined(CONFIG_ACPI)
eg_pi->pcie_performance_request =
radeon_acpi_is_pcie_performance_request_supported(rdev);
#else
eg_pi->pcie_performance_request = false;
#endif
eg_pi->dll_default_on = false;
eg_pi->sclk_deep_sleep = false;
pi->mclk_stutter_mode_threshold = 0;
pi->sram_end = SMC_RAM_END;
rdev->pm.dpm.dyn_state.mclk_sclk_ratio = 3;
rdev->pm.dpm.dyn_state.vddc_vddci_delta = 200;
rdev->pm.dpm.dyn_state.min_vddc_for_pcie_gen2 = 900;
rdev->pm.dpm.dyn_state.valid_sclk_values.count = ARRAY_SIZE(btc_valid_sclk);
rdev->pm.dpm.dyn_state.valid_sclk_values.values = btc_valid_sclk;
rdev->pm.dpm.dyn_state.valid_mclk_values.count = 0;
rdev->pm.dpm.dyn_state.valid_mclk_values.values = NULL;
rdev->pm.dpm.dyn_state.sclk_mclk_delta = 12500;
ni_pi->cac_data.leakage_coefficients.at = 516;
ni_pi->cac_data.leakage_coefficients.bt = 18;
ni_pi->cac_data.leakage_coefficients.av = 51;
ni_pi->cac_data.leakage_coefficients.bv = 2957;
switch (rdev->pdev->device) {
case 0x6700:
case 0x6701:
case 0x6702:
case 0x6703:
case 0x6718:
ni_pi->cac_weights = &cac_weights_cayman_xt;
break;
case 0x6705:
case 0x6719:
case 0x671D:
case 0x671C:
default:
ni_pi->cac_weights = &cac_weights_cayman_pro;
break;
case 0x6704:
case 0x6706:
case 0x6707:
case 0x6708:
case 0x6709:
ni_pi->cac_weights = &cac_weights_cayman_le;
break;
}
if (ni_pi->cac_weights->enable_power_containment_by_default) {
ni_pi->enable_power_containment = true;
ni_pi->enable_cac = true;
ni_pi->enable_sq_ramping = true;
} else {
ni_pi->enable_power_containment = false;
ni_pi->enable_cac = false;
ni_pi->enable_sq_ramping = false;
}
ni_pi->driver_calculate_cac_leakage = false;
ni_pi->cac_configuration_required = true;
if (ni_pi->cac_configuration_required) {
ni_pi->support_cac_long_term_average = true;
ni_pi->lta_window_size = ni_pi->cac_weights->l2_lta_window_size;
ni_pi->lts_truncate = ni_pi->cac_weights->lts_truncate;
} else {
ni_pi->support_cac_long_term_average = false;
ni_pi->lta_window_size = 0;
ni_pi->lts_truncate = 0;
}
ni_pi->use_power_boost_limit = true;
/* make sure dc limits are valid */
if ((rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk == 0) ||
(rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk == 0))
rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc =
rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
return 0;
}
void ni_dpm_fini(struct radeon_device *rdev)
{
int i;
for (i = 0; i < rdev->pm.dpm.num_ps; i++) {
kfree(rdev->pm.dpm.ps[i].ps_priv);
}
kfree(rdev->pm.dpm.ps);
kfree(rdev->pm.dpm.priv);
kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries);
r600_free_extended_power_table(rdev);
}
void ni_dpm_print_power_state(struct radeon_device *rdev,
struct radeon_ps *rps)
{
struct ni_ps *ps = ni_get_ps(rps);
struct rv7xx_pl *pl;
int i;
r600_dpm_print_class_info(rps->class, rps->class2);
r600_dpm_print_cap_info(rps->caps);
printk("\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
for (i = 0; i < ps->performance_level_count; i++) {
pl = &ps->performance_levels[i];
if (rdev->family >= CHIP_TAHITI)
printk("\t\tpower level %d sclk: %u mclk: %u vddc: %u vddci: %u pcie gen: %u\n",
i, pl->sclk, pl->mclk, pl->vddc, pl->vddci, pl->pcie_gen + 1);
else
printk("\t\tpower level %d sclk: %u mclk: %u vddc: %u vddci: %u\n",
i, pl->sclk, pl->mclk, pl->vddc, pl->vddci);
}
r600_dpm_print_ps_status(rdev, rps);
}
void ni_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
struct seq_file *m)
{
struct radeon_ps *rps = rdev->pm.dpm.current_ps;
struct ni_ps *ps = ni_get_ps(rps);
struct rv7xx_pl *pl;
u32 current_index =
(RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_STATE_INDEX_MASK) >>
CURRENT_STATE_INDEX_SHIFT;
if (current_index >= ps->performance_level_count) {
seq_printf(m, "invalid dpm profile %d\n", current_index);
} else {
pl = &ps->performance_levels[current_index];
seq_printf(m, "uvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
seq_printf(m, "power level %d sclk: %u mclk: %u vddc: %u vddci: %u\n",
current_index, pl->sclk, pl->mclk, pl->vddc, pl->vddci);
}
}
u32 ni_dpm_get_sclk(struct radeon_device *rdev, bool low)
{
struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
struct ni_ps *requested_state = ni_get_ps(&eg_pi->requested_rps);
if (low)
return requested_state->performance_levels[0].sclk;
else
return requested_state->performance_levels[requested_state->performance_level_count - 1].sclk;
}
u32 ni_dpm_get_mclk(struct radeon_device *rdev, bool low)
{
struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
struct ni_ps *requested_state = ni_get_ps(&eg_pi->requested_rps);
if (low)
return requested_state->performance_levels[0].mclk;
else
return requested_state->performance_levels[requested_state->performance_level_count - 1].mclk;
}
| gpl-2.0 |
elliott10/decaf-platform | shared/function_map.cpp | 14 | 5306 |
/*
Copyright (C) <2012> <Syracuse System Security (Sycure) Lab>
DECAF is based on QEMU, a whole-system emulator. You can redistribute
and modify it under the terms of the GNU GPL, version 3 or later,
but it is made available WITHOUT ANY WARRANTY. See the top-level
README file for more details.
For more information about DECAF and other softwares, see our
web site at:
http://sycurelab.ecs.syr.edu/
If you have any questions about DECAF,please post it on
http://code.google.com/p/decaf-platform/
*/
/********************************************************************
** function_map.cpp
** Author: Heng Yin <heyin@syr.edu>
**
**
** used to map eip to function name. this file uses the fact
** that TEMU knows module information for loaded modules.
** using this, and the print_funcs_on command, we can print
** every library call that is made within the program.
**
*/
#include <inttypes.h>
#include <map>
#include <vector>
#include <list>
#include <string>
#include <iostream>
#include <fstream>
#include <iomanip>
#include <cassert>
#include <errno.h>
#include <stdlib.h>
#include <string.h>
#include "DECAF_main.h"
#include "DECAF_vm_compress.h"
#include "hw/hw.h" // AWH
#include "shared/vmi.h"
#include "function_map.h"
#include "shared/hookapi.h"
using namespace std;
// Map ``module name" -> "function name" -> offset
map<string, map<string, uint32_t> > map_function_offset;
// Map "module name" -> "offset" -> "function name"
map<string, map<uint32_t, string> > map_offset_function;
target_ulong funcmap_get_pc(const char *module_name, const char *function_name, target_ulong cr3) __attribute__((optimize("O0")));
target_ulong funcmap_get_pc(const char *module_name, const char *function_name, target_ulong cr3)
{
target_ulong base;
module *mod = VMI_find_module_by_name(module_name, cr3, &base);
if(!mod)
return 0;
map<string, map<string, uint32_t> >::iterator iter = map_function_offset.find(module_name);
if(iter == map_function_offset.end())
return 0;
map<string, uint32_t>::iterator iter2 = iter->second.find(function_name);
if(iter2 == iter->second.end())
return 0;
return iter2->second + base;
}
int funcmap_get_name(target_ulong pc, target_ulong cr3, string &mod_name, string &func_name)
{
target_ulong base;
module *mod = VMI_find_module_by_pc(pc, cr3, &base);
if(!mod)
return -1;
map<string, map<uint32_t, string> >::iterator iter = map_offset_function.find(mod->name);
if (iter == map_offset_function.end())
return -1;
map<uint32_t, string>::iterator iter2 = iter->second.find(pc - base);
if (iter2 == iter->second.end())
return -1;
mod_name = mod->name;
func_name = iter2->second;
return 0;
}
int funcmap_get_name_c(target_ulong pc, target_ulong cr3, char *mod_name, char *func_name)
{
string mod, func;
int ret = funcmap_get_name(pc, cr3, mod, func);
if(ret == 0) {
//we assume the caller has allocated enough space for mod_name and func_name
strncpy(mod_name, mod.c_str(), 512);
strncpy(func_name, func.c_str(), 512);
}
return ret;
}
#define BOUNDED_STR(len) "%" #len "s"
#define BOUNDED_QUOTED(len) "%" #len "[^\"]"
#define BOUNDED_STR_x(len) BOUNDED_STR(len)
#define BOUNDED_QUOTED_x(len) BOUNDED_QUOTED(len)
#define BSTR BOUNDED_STR_x(511)
#define BQUOT BOUNDED_QUOTED_x(511)
void parse_function(const char *message)
{
char module[512];
char fname[512];
uint32_t offset;
if (sscanf(message, " F " BSTR " " BSTR " %x ", module, fname, &offset) != 3)
return;
funcmap_insert_function(module, fname, offset);
}
// void funcmap_insert_function(const char *module, const char *fname, uint32_t offset) __attribute__((optimize("O0")));
void funcmap_insert_function(const char *module, const char *fname, uint32_t offset)
{
// cout << module << fname << offset << endl;
map<string, map<string, uint32_t> >::iterator iter = map_function_offset.find(module);
if (iter == map_function_offset.end()) {
map<string, uint32_t> func_offset;
func_offset[fname] = offset;
map_function_offset[module] = func_offset;
} else {
iter->second.insert(pair<string, uint32_t>(string(fname), offset));
}
map<string, map<uint32_t, string> >::iterator iter2 = map_offset_function.find(module);
if (iter2 == map_offset_function.end()) {
map<uint32_t, string> offset_func;
offset_func[offset] = fname;
map_offset_function[module] = offset_func;
} else
iter2->second.insert(pair<uint32_t, string>(offset, fname));
}
static void function_map_save(QEMUFile * f, void *opaque)
{
/* Nothing here if we are loading from guest.log */ //Aravind
}
static int function_map_load(QEMUFile * f, void *opaque, int version_id)
{
//Aravind start
/* Loading the entries from guest.log.
* This only works if TEMU_loadvm has executed (guest.log is generated).
* Ideal would be to serialize, compress and checkpoint the maps in the image and then restore.
*/
FILE *fp = fopen("guest.log", "r");
char line[1024] = {'\0'};
while( fgets (line, 1024, fp)) {
if(line[0] != 'F')
continue;
parse_function(line);
}
fclose(fp);
return 0;
//end
}
void function_map_init()
{
register_savevm(NULL, "funmap", 0, 1,
function_map_save, function_map_load, NULL);
}
void function_map_cleanup()
{
map_function_offset.clear();
map_offset_function.clear();
unregister_savevm(NULL, "funmap", 0);
}
| gpl-2.0 |
bogzybodo/CatalystCore-4.3.4 | dep/acelite/ace/OS_NS_wchar.cpp | 270 | 9857 | // $Id: OS_NS_wchar.cpp 92712 2010-11-25 12:22:13Z johnnyw $
#include "ace/OS_NS_wchar.h"
#if !defined (ACE_HAS_INLINED_OSCALLS)
# include "ace/OS_NS_wchar.inl"
#endif /* ACE_HAS_INLINED_OSCALLS */
#if defined (ACE_HAS_WCHAR)
# include "ace/OS_NS_ctype.h"
# include "ace/OS_NS_string.h"
#endif /* ACE_HAS_WCHAR */
// The following wcs*_emulation methods were created based on BSD code:
/*-
* Copyright (c) 1991, 1993
* The Regents of the University of California. All rights reserved.
*
* This code is derived from software contributed to Berkeley by
* James W. Williams of NASA Goddard Space Flight Center.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by the University of
* California, Berkeley and its contributors.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
ACE_BEGIN_VERSIONED_NAMESPACE_DECL
#if defined (ACE_HAS_WCHAR) && defined (ACE_LACKS_WCSCAT)
wchar_t *
ACE_OS::wcscat_emulation (wchar_t *destination,
const wchar_t *source)
{
wchar_t *save = destination;
for (; *destination; ++destination);
while ((*destination++ = *source++));
return save;
}
#endif /* ACE_HAS_WCHAR && ACE_LACKS_WCSCAT */
#if defined (ACE_HAS_WCHAR) && defined (ACE_LACKS_WCSCHR)
wchar_t *
ACE_OS::wcschr_emulation (const wchar_t *string, wchar_t c)
{
for (;*string ; ++string)
if (*string == c)
return const_cast<wchar_t *> (string);
return 0;
}
#endif /* ACE_HAS_WCHAR && ACE_LACKS_WCSCHR */
#if !defined (ACE_HAS_WCHAR) || defined (ACE_LACKS_WCSCMP)
int
ACE_OS::wcscmp_emulation (const ACE_WCHAR_T *string1,
const ACE_WCHAR_T *string2)
{
while (*string1 == *string2++)
if (*string1++ == 0)
return 0;
return (*string1 - *--string2);
}
#endif /* !ACE_HAS_WCHAR || ACE_LACKS_WCSCMP */
#if defined (ACE_HAS_WCHAR) && defined (ACE_LACKS_WCSCPY)
wchar_t *
ACE_OS::wcscpy_emulation (wchar_t *destination,
const wchar_t *source)
{
wchar_t *save = destination;
for (; (*destination = *source); ++source, ++destination);
return save;
}
#endif /* ACE_HAS_WCHAR && ACE_LACKS_WCSCPY */
#if defined (ACE_HAS_WCHAR) && defined (ACE_LACKS_WCSCSPN)
size_t
ACE_OS::wcscspn_emulation (const wchar_t *s, const wchar_t *reject)
{
const wchar_t *scan = 0;
const wchar_t *rej_scan = 0;
int count = 0;
for (scan = s; *scan; scan++)
{
for (rej_scan = reject; *rej_scan; rej_scan++)
if (*scan == *rej_scan)
return count;
count++;
}
return count;
}
#endif /* ACE_HAS_WCHAR && ACE_LACKS_WCSCSPN */
#if defined (ACE_HAS_WCHAR) && defined (ACE_LACKS_WCSICMP)
int
ACE_OS::wcsicmp_emulation (const wchar_t *s, const wchar_t *t)
{
const wchar_t *scan1 = s;
const wchar_t *scan2 = t;
while (*scan1 != 0
&& ACE_OS::ace_towlower (*scan1)
== ACE_OS::ace_towlower (*scan2))
{
++scan1;
++scan2;
}
// The following case analysis is necessary so that characters which
// look negative collate low against normal characters but high
// against the end-of-string NUL.
if (*scan1 == '\0' && *scan2 == '\0')
return 0;
else if (*scan1 == '\0')
return -1;
else if (*scan2 == '\0')
return 1;
else
return ACE_OS::ace_tolower (*scan1) - ACE_OS::ace_towlower (*scan2);
}
#endif /* ACE_HAS_WCHAR && ACE_LACKS_WCSICMP */
#if !defined (ACE_HAS_WCHAR) || defined (ACE_LACKS_WCSLEN)
size_t
ACE_OS::wcslen_emulation (const ACE_WCHAR_T *string)
{
const ACE_WCHAR_T *s = 0;
for (s = string; *s; ++s)
continue;
return s - string;
}
#endif /* !ACE_HAS_WCHAR || ACE_LACKS_WCSLEN */
#if !defined (ACE_HAS_WCHAR) || defined (ACE_LACKS_WCSNCAT)
ACE_WCHAR_T *
ACE_OS::wcsncat_emulation (ACE_WCHAR_T *destination,
const ACE_WCHAR_T *source,
size_t count)
{
if (count != 0)
{
ACE_WCHAR_T *d = destination;
const ACE_WCHAR_T *s = source;
while (*d != 0)
++d;
do
{
if ((*d = *s++) == 0)
break;
++d;
} while (--count != 0);
*d = 0;
}
return destination;
}
#endif /* !ACE_HAS_WCHAR || ACE_LACKS_WCSCAT */
#if !defined (ACE_HAS_WCHAR) || defined (ACE_LACKS_WCSNCMP)
int
ACE_OS::wcsncmp_emulation (const ACE_WCHAR_T *s1,
const ACE_WCHAR_T *s2,
size_t len)
{
if (len == 0)
return 0;
do
{
if (*s1 != *s2++)
return (*s1 - *--s2);
if (*s1++ == 0)
break;
} while (--len != 0);
return 0;
}
#endif /* !ACE_HAS_WCHAR || ACE_LACKS_WCSNCMP */
#if !defined (ACE_HAS_WCHAR) || defined (ACE_LACKS_WCSNCPY)
ACE_WCHAR_T *
ACE_OS::wcsncpy_emulation (ACE_WCHAR_T *destination,
const ACE_WCHAR_T *source,
size_t len)
{
if (len != 0)
{
ACE_WCHAR_T *d = destination;
const ACE_WCHAR_T *s = source;
do
{
if ((*d++ = *s++) == 0)
{
// NUL pad the remaining n-1 bytes
while (--len != 0)
*d++ = 0;
break;
}
} while (--len != 0);
}
return destination;
}
#endif /* !ACE_HAS_WCHAR || ACE_LACKS_WCSNCPY */
#if defined (ACE_HAS_WCHAR) && defined (ACE_LACKS_WCSNICMP)
int
ACE_OS::wcsnicmp_emulation (const wchar_t *s,
const wchar_t *t,
size_t len)
{
const wchar_t *scan1 = s;
const wchar_t *scan2 = t;
size_t count = 0;
while (count++ < len
&& *scan1 != 0
&& ACE_OS::ace_towlower (*scan1)
== ACE_OS::ace_towlower (*scan2))
{
++scan1;
++scan2;
}
if (count > len)
return 0;
// The following case analysis is necessary so that characters which
// look negative collate low against normal characters but high
// against the end-of-string NUL.
if (*scan1 == '\0' && *scan2 == '\0')
return 0;
else if (*scan1 == '\0')
return -1;
else if (*scan2 == '\0')
return 1;
else
return ACE_OS::ace_towlower (*scan1) - ACE_OS::ace_towlower (*scan2);
}
#endif /* ACE_HAS_WCHAR && ACE_LACKS_WCSNICMP */
#if defined (ACE_HAS_WCHAR) && defined (ACE_LACKS_WCSPBRK)
wchar_t *
ACE_OS::wcspbrk_emulation (const wchar_t *string,
const wchar_t *charset)
{
const wchar_t *scanp = 0;
int c, sc;
while ((c = *string++) != 0)
{
for (scanp = charset; (sc = *scanp++) != 0;)
if (sc == c)
return const_cast<wchar_t *> (string - 1);
}
return 0;
}
#endif /* ACE_HAS_WCHAR && ACE_LACKS_WCSPBRK */
#if defined (ACE_HAS_WCHAR) && defined (ACE_LACKS_WCSRCHR)
const wchar_t *
ACE_OS::wcsrchr_emulation (const wchar_t *s, wint_t c)
{
const wchar_t *p = s + ACE_OS::strlen (s);
while (*p != static_cast<wchar_t> (c))
if (p == s)
return 0;
else
p--;
return p;
}
wchar_t *
ACE_OS::wcsrchr_emulation (wchar_t *s, wint_t c)
{
wchar_t *p = s + ACE_OS::strlen (s);
while (*p != static_cast<wchar_t> (c))
if (p == s)
return 0;
else
p--;
return p;
}
#endif /* ACE_HAS_WCHAR && ACE_LACKS_WCSRCHR */
#if defined (ACE_HAS_WCHAR) && defined (ACE_LACKS_WCSSPN)
size_t
ACE_OS::wcsspn_emulation (const wchar_t *string,
const wchar_t *charset)
{
const wchar_t *p = string;
const wchar_t *spanp = 0;
wchar_t c, sc;
// Skip any characters in charset, excluding the terminating \0.
cont:
c = *p++;
for (spanp = charset; (sc = *spanp++) != 0;)
if (sc == c)
goto cont;
return (p - 1 - string);
}
#endif /* ACE_HAS_WCHAR && ACE_LACKS_WCSSPN */
#if defined (ACE_HAS_WCHAR) && defined (ACE_LACKS_WCSSTR)
wchar_t *
ACE_OS::wcsstr_emulation (const wchar_t *string,
const wchar_t *charset)
{
wchar_t c, sc;
size_t len;
if ((c = *charset++) != 0)
{
len = ACE_OS::strlen (charset);
do
{
do
{
if ((sc = *string++) == 0)
return 0;
} while (sc != c);
} while (ACE_OS::strncmp (string, charset, len) != 0);
string--;
}
return const_cast<wchar_t *> (string);
}
#endif /* ACE_HAS_WCHAR && ACE_LACKS_WCSSTR */
ACE_END_VERSIONED_NAMESPACE_DECL
| gpl-2.0 |
Hacker432-Y550/android_kernel_huawei_msm8916 | drivers/media/platform/exynos4-is/fimc-is.c | 2062 | 23513 | /*
* Samsung EXYNOS4x12 FIMC-IS (Imaging Subsystem) driver
*
* Copyright (C) 2013 Samsung Electronics Co., Ltd.
*
* Authors: Sylwester Nawrocki <s.nawrocki@samsung.com>
* Younghwan Joo <yhwan.joo@samsung.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#define pr_fmt(fmt) "%s:%d " fmt, __func__, __LINE__
#include <linux/device.h>
#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/dma-contiguous.h>
#include <linux/errno.h>
#include <linux/firmware.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of_i2c.h>
#include <linux/of_irq.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/videodev2.h>
#include <media/v4l2-of.h>
#include <media/videobuf2-dma-contig.h>
#include "media-dev.h"
#include "fimc-is.h"
#include "fimc-is-command.h"
#include "fimc-is-errno.h"
#include "fimc-is-i2c.h"
#include "fimc-is-param.h"
#include "fimc-is-regs.h"
static char *fimc_is_clocks[ISS_CLKS_MAX] = {
[ISS_CLK_PPMUISPX] = "ppmuispx",
[ISS_CLK_PPMUISPMX] = "ppmuispmx",
[ISS_CLK_LITE0] = "lite0",
[ISS_CLK_LITE1] = "lite1",
[ISS_CLK_MPLL] = "mpll",
[ISS_CLK_ISP] = "isp",
[ISS_CLK_DRC] = "drc",
[ISS_CLK_FD] = "fd",
[ISS_CLK_MCUISP] = "mcuisp",
[ISS_CLK_UART] = "uart",
[ISS_CLK_ISP_DIV0] = "ispdiv0",
[ISS_CLK_ISP_DIV1] = "ispdiv1",
[ISS_CLK_MCUISP_DIV0] = "mcuispdiv0",
[ISS_CLK_MCUISP_DIV1] = "mcuispdiv1",
[ISS_CLK_ACLK200] = "aclk200",
[ISS_CLK_ACLK200_DIV] = "div_aclk200",
[ISS_CLK_ACLK400MCUISP] = "aclk400mcuisp",
[ISS_CLK_ACLK400MCUISP_DIV] = "div_aclk400mcuisp",
};
static void fimc_is_put_clocks(struct fimc_is *is)
{
int i;
for (i = 0; i < ISS_CLKS_MAX; i++) {
if (IS_ERR(is->clocks[i]))
continue;
clk_put(is->clocks[i]);
is->clocks[i] = ERR_PTR(-EINVAL);
}
}
static int fimc_is_get_clocks(struct fimc_is *is)
{
int i, ret;
for (i = 0; i < ISS_CLKS_MAX; i++)
is->clocks[i] = ERR_PTR(-EINVAL);
for (i = 0; i < ISS_CLKS_MAX; i++) {
is->clocks[i] = clk_get(&is->pdev->dev, fimc_is_clocks[i]);
if (IS_ERR(is->clocks[i])) {
ret = PTR_ERR(is->clocks[i]);
goto err;
}
}
return 0;
err:
fimc_is_put_clocks(is);
dev_err(&is->pdev->dev, "failed to get clock: %s\n",
fimc_is_clocks[i]);
return ret;
}
static int fimc_is_setup_clocks(struct fimc_is *is)
{
int ret;
ret = clk_set_parent(is->clocks[ISS_CLK_ACLK200],
is->clocks[ISS_CLK_ACLK200_DIV]);
if (ret < 0)
return ret;
ret = clk_set_parent(is->clocks[ISS_CLK_ACLK400MCUISP],
is->clocks[ISS_CLK_ACLK400MCUISP_DIV]);
if (ret < 0)
return ret;
ret = clk_set_rate(is->clocks[ISS_CLK_ISP_DIV0], ACLK_AXI_FREQUENCY);
if (ret < 0)
return ret;
ret = clk_set_rate(is->clocks[ISS_CLK_ISP_DIV1], ACLK_AXI_FREQUENCY);
if (ret < 0)
return ret;
ret = clk_set_rate(is->clocks[ISS_CLK_MCUISP_DIV0],
ATCLK_MCUISP_FREQUENCY);
if (ret < 0)
return ret;
return clk_set_rate(is->clocks[ISS_CLK_MCUISP_DIV1],
ATCLK_MCUISP_FREQUENCY);
}
int fimc_is_enable_clocks(struct fimc_is *is)
{
int i, ret;
for (i = 0; i < ISS_GATE_CLKS_MAX; i++) {
if (IS_ERR(is->clocks[i]))
continue;
ret = clk_prepare_enable(is->clocks[i]);
if (ret < 0) {
dev_err(&is->pdev->dev, "clock %s enable failed\n",
fimc_is_clocks[i]);
for (--i; i >= 0; i--)
clk_disable(is->clocks[i]);
return ret;
}
pr_debug("enabled clock: %s\n", fimc_is_clocks[i]);
}
return 0;
}
void fimc_is_disable_clocks(struct fimc_is *is)
{
int i;
for (i = 0; i < ISS_GATE_CLKS_MAX; i++) {
if (!IS_ERR(is->clocks[i])) {
clk_disable_unprepare(is->clocks[i]);
pr_debug("disabled clock: %s\n", fimc_is_clocks[i]);
}
}
}
static int fimc_is_parse_sensor_config(struct fimc_is_sensor *sensor,
struct device_node *np)
{
u32 tmp = 0;
int ret;
np = v4l2_of_get_next_endpoint(np, NULL);
if (!np)
return -ENXIO;
np = v4l2_of_get_remote_port(np);
if (!np)
return -ENXIO;
/* Use MIPI-CSIS channel id to determine the ISP I2C bus index. */
ret = of_property_read_u32(np, "reg", &tmp);
sensor->i2c_bus = tmp - FIMC_INPUT_MIPI_CSI2_0;
return ret;
}
static int fimc_is_register_subdevs(struct fimc_is *is)
{
struct device_node *adapter, *child;
int ret;
ret = fimc_isp_subdev_create(&is->isp);
if (ret < 0)
return ret;
for_each_compatible_node(adapter, NULL, FIMC_IS_I2C_COMPATIBLE) {
if (!of_find_device_by_node(adapter)) {
of_node_put(adapter);
return -EPROBE_DEFER;
}
for_each_available_child_of_node(adapter, child) {
struct i2c_client *client;
struct v4l2_subdev *sd;
client = of_find_i2c_device_by_node(child);
if (!client)
goto e_retry;
sd = i2c_get_clientdata(client);
if (!sd)
goto e_retry;
/* FIXME: Add support for multiple sensors. */
if (WARN_ON(is->sensor))
continue;
is->sensor = sd_to_fimc_is_sensor(sd);
if (fimc_is_parse_sensor_config(is->sensor, child)) {
dev_warn(&is->pdev->dev, "DT parse error: %s\n",
child->full_name);
}
pr_debug("%s(): registered subdev: %p\n",
__func__, sd->name);
}
}
return 0;
e_retry:
of_node_put(child);
return -EPROBE_DEFER;
}
static int fimc_is_unregister_subdevs(struct fimc_is *is)
{
fimc_isp_subdev_destroy(&is->isp);
is->sensor = NULL;
return 0;
}
static int fimc_is_load_setfile(struct fimc_is *is, char *file_name)
{
const struct firmware *fw;
void *buf;
int ret;
ret = request_firmware(&fw, file_name, &is->pdev->dev);
if (ret < 0) {
dev_err(&is->pdev->dev, "firmware request failed (%d)\n", ret);
return ret;
}
buf = is->memory.vaddr + is->setfile.base;
memcpy(buf, fw->data, fw->size);
fimc_is_mem_barrier();
is->setfile.size = fw->size;
pr_debug("mem vaddr: %p, setfile buf: %p\n", is->memory.vaddr, buf);
memcpy(is->fw.setfile_info,
fw->data + fw->size - FIMC_IS_SETFILE_INFO_LEN,
FIMC_IS_SETFILE_INFO_LEN - 1);
is->fw.setfile_info[FIMC_IS_SETFILE_INFO_LEN - 1] = '\0';
is->setfile.state = 1;
pr_debug("FIMC-IS setfile loaded: base: %#x, size: %zu B\n",
is->setfile.base, fw->size);
release_firmware(fw);
return ret;
}
int fimc_is_cpu_set_power(struct fimc_is *is, int on)
{
unsigned int timeout = FIMC_IS_POWER_ON_TIMEOUT;
if (on) {
/* Disable watchdog */
mcuctl_write(0, is, REG_WDT_ISP);
/* Cortex-A5 start address setting */
mcuctl_write(is->memory.paddr, is, MCUCTL_REG_BBOAR);
/* Enable and start Cortex-A5 */
pmuisp_write(0x18000, is, REG_PMU_ISP_ARM_OPTION);
pmuisp_write(0x1, is, REG_PMU_ISP_ARM_CONFIGURATION);
} else {
/* A5 power off */
pmuisp_write(0x10000, is, REG_PMU_ISP_ARM_OPTION);
pmuisp_write(0x0, is, REG_PMU_ISP_ARM_CONFIGURATION);
while (pmuisp_read(is, REG_PMU_ISP_ARM_STATUS) & 1) {
if (timeout == 0)
return -ETIME;
timeout--;
udelay(1);
}
}
return 0;
}
/* Wait until @bit of @is->state is set to @state in the interrupt handler. */
int fimc_is_wait_event(struct fimc_is *is, unsigned long bit,
unsigned int state, unsigned int timeout)
{
int ret = wait_event_timeout(is->irq_queue,
!state ^ test_bit(bit, &is->state),
timeout);
if (ret == 0) {
dev_WARN(&is->pdev->dev, "%s() timed out\n", __func__);
return -ETIME;
}
return 0;
}
int fimc_is_start_firmware(struct fimc_is *is)
{
struct device *dev = &is->pdev->dev;
int ret;
if (is->fw.f_w == NULL) {
dev_err(dev, "firmware is not loaded\n");
return -EINVAL;
}
memcpy(is->memory.vaddr, is->fw.f_w->data, is->fw.f_w->size);
wmb();
ret = fimc_is_cpu_set_power(is, 1);
if (ret < 0)
return ret;
ret = fimc_is_wait_event(is, IS_ST_A5_PWR_ON, 1,
msecs_to_jiffies(FIMC_IS_FW_LOAD_TIMEOUT));
if (ret < 0)
dev_err(dev, "FIMC-IS CPU power on failed\n");
return ret;
}
/* Allocate working memory for the FIMC-IS CPU. */
static int fimc_is_alloc_cpu_memory(struct fimc_is *is)
{
struct device *dev = &is->pdev->dev;
is->memory.vaddr = dma_alloc_coherent(dev, FIMC_IS_CPU_MEM_SIZE,
&is->memory.paddr, GFP_KERNEL);
if (is->memory.vaddr == NULL)
return -ENOMEM;
is->memory.size = FIMC_IS_CPU_MEM_SIZE;
memset(is->memory.vaddr, 0, is->memory.size);
dev_info(dev, "FIMC-IS CPU memory base: %#x\n", (u32)is->memory.paddr);
if (((u32)is->memory.paddr) & FIMC_IS_FW_ADDR_MASK) {
dev_err(dev, "invalid firmware memory alignment: %#x\n",
(u32)is->memory.paddr);
dma_free_coherent(dev, is->memory.size, is->memory.vaddr,
is->memory.paddr);
return -EIO;
}
is->is_p_region = (struct is_region *)(is->memory.vaddr +
FIMC_IS_CPU_MEM_SIZE - FIMC_IS_REGION_SIZE);
is->is_dma_p_region = is->memory.paddr +
FIMC_IS_CPU_MEM_SIZE - FIMC_IS_REGION_SIZE;
is->is_shared_region = (struct is_share_region *)(is->memory.vaddr +
FIMC_IS_SHARED_REGION_OFFSET);
return 0;
}
static void fimc_is_free_cpu_memory(struct fimc_is *is)
{
struct device *dev = &is->pdev->dev;
dma_free_coherent(dev, is->memory.size, is->memory.vaddr,
is->memory.paddr);
}
static void fimc_is_load_firmware(const struct firmware *fw, void *context)
{
struct fimc_is *is = context;
struct device *dev = &is->pdev->dev;
void *buf;
int ret;
if (fw == NULL) {
dev_err(dev, "firmware request failed\n");
return;
}
mutex_lock(&is->lock);
if (fw->size < FIMC_IS_FW_SIZE_MIN || fw->size > FIMC_IS_FW_SIZE_MAX) {
dev_err(dev, "wrong firmware size: %d\n", fw->size);
goto done;
}
is->fw.size = fw->size;
ret = fimc_is_alloc_cpu_memory(is);
if (ret < 0) {
dev_err(dev, "failed to allocate FIMC-IS CPU memory\n");
goto done;
}
memcpy(is->memory.vaddr, fw->data, fw->size);
wmb();
/* Read firmware description. */
buf = (void *)(is->memory.vaddr + fw->size - FIMC_IS_FW_DESC_LEN);
memcpy(&is->fw.info, buf, FIMC_IS_FW_INFO_LEN);
is->fw.info[FIMC_IS_FW_INFO_LEN] = 0;
buf = (void *)(is->memory.vaddr + fw->size - FIMC_IS_FW_VER_LEN);
memcpy(&is->fw.version, buf, FIMC_IS_FW_VER_LEN);
is->fw.version[FIMC_IS_FW_VER_LEN - 1] = 0;
is->fw.state = 1;
dev_info(dev, "loaded firmware: %s, rev. %s\n",
is->fw.info, is->fw.version);
dev_dbg(dev, "FW size: %d, paddr: %#x\n", fw->size, is->memory.paddr);
is->is_shared_region->chip_id = 0xe4412;
is->is_shared_region->chip_rev_no = 1;
fimc_is_mem_barrier();
/*
* FIXME: The firmware is not being released for now, as it is
* needed around for copying to the IS working memory every
* time before the Cortex-A5 is restarted.
*/
if (is->fw.f_w)
release_firmware(is->fw.f_w);
is->fw.f_w = fw;
done:
mutex_unlock(&is->lock);
}
static int fimc_is_request_firmware(struct fimc_is *is, const char *fw_name)
{
return request_firmware_nowait(THIS_MODULE,
FW_ACTION_HOTPLUG, fw_name, &is->pdev->dev,
GFP_KERNEL, is, fimc_is_load_firmware);
}
/* General IS interrupt handler */
static void fimc_is_general_irq_handler(struct fimc_is *is)
{
is->i2h_cmd.cmd = mcuctl_read(is, MCUCTL_REG_ISSR(10));
switch (is->i2h_cmd.cmd) {
case IHC_GET_SENSOR_NUM:
fimc_is_hw_get_params(is, 1);
fimc_is_hw_wait_intmsr0_intmsd0(is);
fimc_is_hw_set_sensor_num(is);
pr_debug("ISP FW version: %#x\n", is->i2h_cmd.args[0]);
break;
case IHC_SET_FACE_MARK:
case IHC_FRAME_DONE:
fimc_is_hw_get_params(is, 2);
break;
case IHC_SET_SHOT_MARK:
case IHC_AA_DONE:
case IH_REPLY_DONE:
fimc_is_hw_get_params(is, 3);
break;
case IH_REPLY_NOT_DONE:
fimc_is_hw_get_params(is, 4);
break;
case IHC_NOT_READY:
break;
default:
pr_info("unknown command: %#x\n", is->i2h_cmd.cmd);
}
fimc_is_fw_clear_irq1(is, FIMC_IS_INT_GENERAL);
switch (is->i2h_cmd.cmd) {
case IHC_GET_SENSOR_NUM:
fimc_is_hw_set_intgr0_gd0(is);
set_bit(IS_ST_A5_PWR_ON, &is->state);
break;
case IHC_SET_SHOT_MARK:
break;
case IHC_SET_FACE_MARK:
is->fd_header.count = is->i2h_cmd.args[0];
is->fd_header.index = is->i2h_cmd.args[1];
is->fd_header.offset = 0;
break;
case IHC_FRAME_DONE:
break;
case IHC_AA_DONE:
pr_debug("AA_DONE - %d, %d, %d\n", is->i2h_cmd.args[0],
is->i2h_cmd.args[1], is->i2h_cmd.args[2]);
break;
case IH_REPLY_DONE:
pr_debug("ISR_DONE: args[0]: %#x\n", is->i2h_cmd.args[0]);
switch (is->i2h_cmd.args[0]) {
case HIC_PREVIEW_STILL...HIC_CAPTURE_VIDEO:
/* Get CAC margin */
set_bit(IS_ST_CHANGE_MODE, &is->state);
is->isp.cac_margin_x = is->i2h_cmd.args[1];
is->isp.cac_margin_y = is->i2h_cmd.args[2];
pr_debug("CAC margin (x,y): (%d,%d)\n",
is->isp.cac_margin_x, is->isp.cac_margin_y);
break;
case HIC_STREAM_ON:
clear_bit(IS_ST_STREAM_OFF, &is->state);
set_bit(IS_ST_STREAM_ON, &is->state);
break;
case HIC_STREAM_OFF:
clear_bit(IS_ST_STREAM_ON, &is->state);
set_bit(IS_ST_STREAM_OFF, &is->state);
break;
case HIC_SET_PARAMETER:
is->config[is->config_index].p_region_index1 = 0;
is->config[is->config_index].p_region_index2 = 0;
set_bit(IS_ST_BLOCK_CMD_CLEARED, &is->state);
pr_debug("HIC_SET_PARAMETER\n");
break;
case HIC_GET_PARAMETER:
break;
case HIC_SET_TUNE:
break;
case HIC_GET_STATUS:
break;
case HIC_OPEN_SENSOR:
set_bit(IS_ST_OPEN_SENSOR, &is->state);
pr_debug("data lanes: %d, settle line: %d\n",
is->i2h_cmd.args[2], is->i2h_cmd.args[1]);
break;
case HIC_CLOSE_SENSOR:
clear_bit(IS_ST_OPEN_SENSOR, &is->state);
is->sensor_index = 0;
break;
case HIC_MSG_TEST:
pr_debug("config MSG level completed\n");
break;
case HIC_POWER_DOWN:
clear_bit(IS_ST_PWR_SUBIP_ON, &is->state);
break;
case HIC_GET_SET_FILE_ADDR:
is->setfile.base = is->i2h_cmd.args[1];
set_bit(IS_ST_SETFILE_LOADED, &is->state);
break;
case HIC_LOAD_SET_FILE:
set_bit(IS_ST_SETFILE_LOADED, &is->state);
break;
}
break;
case IH_REPLY_NOT_DONE:
pr_err("ISR_NDONE: %d: %#x, %s\n", is->i2h_cmd.args[0],
is->i2h_cmd.args[1],
fimc_is_strerr(is->i2h_cmd.args[1]));
if (is->i2h_cmd.args[1] & IS_ERROR_TIME_OUT_FLAG)
pr_err("IS_ERROR_TIME_OUT\n");
switch (is->i2h_cmd.args[1]) {
case IS_ERROR_SET_PARAMETER:
fimc_is_mem_barrier();
}
switch (is->i2h_cmd.args[0]) {
case HIC_SET_PARAMETER:
is->config[is->config_index].p_region_index1 = 0;
is->config[is->config_index].p_region_index2 = 0;
set_bit(IS_ST_BLOCK_CMD_CLEARED, &is->state);
break;
}
break;
case IHC_NOT_READY:
pr_err("IS control sequence error: Not Ready\n");
break;
}
wake_up(&is->irq_queue);
}
static irqreturn_t fimc_is_irq_handler(int irq, void *priv)
{
struct fimc_is *is = priv;
unsigned long flags;
u32 status;
spin_lock_irqsave(&is->slock, flags);
status = mcuctl_read(is, MCUCTL_REG_INTSR1);
if (status & (1UL << FIMC_IS_INT_GENERAL))
fimc_is_general_irq_handler(is);
if (status & (1UL << FIMC_IS_INT_FRAME_DONE_ISP))
fimc_isp_irq_handler(is);
spin_unlock_irqrestore(&is->slock, flags);
return IRQ_HANDLED;
}
static int fimc_is_hw_open_sensor(struct fimc_is *is,
struct fimc_is_sensor *sensor)
{
struct sensor_open_extended *soe = (void *)&is->is_p_region->shared;
fimc_is_hw_wait_intmsr0_intmsd0(is);
soe->self_calibration_mode = 1;
soe->actuator_type = 0;
soe->mipi_lane_num = 0;
soe->mclk = 0;
soe->mipi_speed = 0;
soe->fast_open_sensor = 0;
soe->i2c_sclk = 88000000;
fimc_is_mem_barrier();
mcuctl_write(HIC_OPEN_SENSOR, is, MCUCTL_REG_ISSR(0));
mcuctl_write(is->sensor_index, is, MCUCTL_REG_ISSR(1));
mcuctl_write(sensor->drvdata->id, is, MCUCTL_REG_ISSR(2));
mcuctl_write(sensor->i2c_bus, is, MCUCTL_REG_ISSR(3));
mcuctl_write(is->is_dma_p_region, is, MCUCTL_REG_ISSR(4));
fimc_is_hw_set_intgr0_gd0(is);
return fimc_is_wait_event(is, IS_ST_OPEN_SENSOR, 1,
FIMC_IS_SENSOR_OPEN_TIMEOUT);
}
int fimc_is_hw_initialize(struct fimc_is *is)
{
const int config_ids[] = {
IS_SC_PREVIEW_STILL, IS_SC_PREVIEW_VIDEO,
IS_SC_CAPTURE_STILL, IS_SC_CAPTURE_VIDEO
};
struct device *dev = &is->pdev->dev;
u32 prev_id;
int i, ret;
/* Sensor initialization. */
ret = fimc_is_hw_open_sensor(is, is->sensor);
if (ret < 0)
return ret;
/* Get the setfile address. */
fimc_is_hw_get_setfile_addr(is);
ret = fimc_is_wait_event(is, IS_ST_SETFILE_LOADED, 1,
FIMC_IS_CONFIG_TIMEOUT);
if (ret < 0) {
dev_err(dev, "get setfile address timed out\n");
return ret;
}
pr_debug("setfile.base: %#x\n", is->setfile.base);
/* Load the setfile. */
fimc_is_load_setfile(is, FIMC_IS_SETFILE_6A3);
clear_bit(IS_ST_SETFILE_LOADED, &is->state);
fimc_is_hw_load_setfile(is);
ret = fimc_is_wait_event(is, IS_ST_SETFILE_LOADED, 1,
FIMC_IS_CONFIG_TIMEOUT);
if (ret < 0) {
dev_err(dev, "loading setfile timed out\n");
return ret;
}
pr_debug("setfile: base: %#x, size: %d\n",
is->setfile.base, is->setfile.size);
pr_info("FIMC-IS Setfile info: %s\n", is->fw.setfile_info);
/* Check magic number. */
if (is->is_p_region->shared[MAX_SHARED_COUNT - 1] !=
FIMC_IS_MAGIC_NUMBER) {
dev_err(dev, "magic number error!\n");
return -EIO;
}
pr_debug("shared region: %#x, parameter region: %#x\n",
is->memory.paddr + FIMC_IS_SHARED_REGION_OFFSET,
is->is_dma_p_region);
is->setfile.sub_index = 0;
/* Stream off. */
fimc_is_hw_stream_off(is);
ret = fimc_is_wait_event(is, IS_ST_STREAM_OFF, 1,
FIMC_IS_CONFIG_TIMEOUT);
if (ret < 0) {
dev_err(dev, "stream off timeout\n");
return ret;
}
/* Preserve previous mode. */
prev_id = is->config_index;
/* Set initial parameter values. */
for (i = 0; i < ARRAY_SIZE(config_ids); i++) {
is->config_index = config_ids[i];
fimc_is_set_initial_params(is);
ret = fimc_is_itf_s_param(is, true);
if (ret < 0) {
is->config_index = prev_id;
return ret;
}
}
is->config_index = prev_id;
set_bit(IS_ST_INIT_DONE, &is->state);
dev_info(dev, "initialization sequence completed (%d)\n",
is->config_index);
return 0;
}
static int fimc_is_log_show(struct seq_file *s, void *data)
{
struct fimc_is *is = s->private;
const u8 *buf = is->memory.vaddr + FIMC_IS_DEBUG_REGION_OFFSET;
if (is->memory.vaddr == NULL) {
dev_err(&is->pdev->dev, "firmware memory is not initialized\n");
return -EIO;
}
seq_printf(s, "%s\n", buf);
return 0;
}
static int fimc_is_debugfs_open(struct inode *inode, struct file *file)
{
return single_open(file, fimc_is_log_show, inode->i_private);
}
static const struct file_operations fimc_is_debugfs_fops = {
.open = fimc_is_debugfs_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static void fimc_is_debugfs_remove(struct fimc_is *is)
{
debugfs_remove_recursive(is->debugfs_entry);
is->debugfs_entry = NULL;
}
static int fimc_is_debugfs_create(struct fimc_is *is)
{
struct dentry *dentry;
is->debugfs_entry = debugfs_create_dir("fimc_is", NULL);
dentry = debugfs_create_file("fw_log", S_IRUGO, is->debugfs_entry,
is, &fimc_is_debugfs_fops);
if (!dentry)
fimc_is_debugfs_remove(is);
return is->debugfs_entry == NULL ? -EIO : 0;
}
static int fimc_is_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct fimc_is *is;
struct resource res;
struct device_node *node;
int ret;
is = devm_kzalloc(&pdev->dev, sizeof(*is), GFP_KERNEL);
if (!is)
return -ENOMEM;
is->pdev = pdev;
is->isp.pdev = pdev;
init_waitqueue_head(&is->irq_queue);
spin_lock_init(&is->slock);
mutex_init(&is->lock);
ret = of_address_to_resource(dev->of_node, 0, &res);
if (ret < 0)
return ret;
is->regs = devm_ioremap_resource(dev, &res);
if (IS_ERR(is->regs))
return PTR_ERR(is->regs);
node = of_get_child_by_name(dev->of_node, "pmu");
if (!node)
return -ENODEV;
is->pmu_regs = of_iomap(node, 0);
if (!is->pmu_regs)
return -ENOMEM;
is->irq = irq_of_parse_and_map(dev->of_node, 0);
if (is->irq < 0) {
dev_err(dev, "no irq found\n");
return is->irq;
}
ret = fimc_is_get_clocks(is);
if (ret < 0)
return ret;
platform_set_drvdata(pdev, is);
ret = request_irq(is->irq, fimc_is_irq_handler, 0, dev_name(dev), is);
if (ret < 0) {
dev_err(dev, "irq request failed\n");
goto err_clk;
}
pm_runtime_enable(dev);
ret = pm_runtime_get_sync(dev);
if (ret < 0)
goto err_irq;
is->alloc_ctx = vb2_dma_contig_init_ctx(dev);
if (IS_ERR(is->alloc_ctx)) {
ret = PTR_ERR(is->alloc_ctx);
goto err_irq;
}
/*
* Register FIMC-IS V4L2 subdevs to this driver. The video nodes
* will be created within the subdev's registered() callback.
*/
ret = fimc_is_register_subdevs(is);
if (ret < 0)
goto err_vb;
ret = fimc_is_debugfs_create(is);
if (ret < 0)
goto err_sd;
ret = fimc_is_request_firmware(is, FIMC_IS_FW_FILENAME);
if (ret < 0)
goto err_dfs;
pm_runtime_put_sync(dev);
dev_dbg(dev, "FIMC-IS registered successfully\n");
return 0;
err_dfs:
fimc_is_debugfs_remove(is);
err_vb:
vb2_dma_contig_cleanup_ctx(is->alloc_ctx);
err_sd:
fimc_is_unregister_subdevs(is);
err_irq:
free_irq(is->irq, is);
err_clk:
fimc_is_put_clocks(is);
return ret;
}
static int fimc_is_runtime_resume(struct device *dev)
{
struct fimc_is *is = dev_get_drvdata(dev);
int ret;
ret = fimc_is_setup_clocks(is);
if (ret)
return ret;
return fimc_is_enable_clocks(is);
}
static int fimc_is_runtime_suspend(struct device *dev)
{
struct fimc_is *is = dev_get_drvdata(dev);
fimc_is_disable_clocks(is);
return 0;
}
#ifdef CONFIG_PM_SLEEP
static int fimc_is_resume(struct device *dev)
{
/* TODO: */
return 0;
}
static int fimc_is_suspend(struct device *dev)
{
struct fimc_is *is = dev_get_drvdata(dev);
/* TODO: */
if (test_bit(IS_ST_A5_PWR_ON, &is->state))
return -EBUSY;
return 0;
}
#endif /* CONFIG_PM_SLEEP */
static int fimc_is_remove(struct platform_device *pdev)
{
struct fimc_is *is = platform_get_drvdata(pdev);
pm_runtime_disable(&pdev->dev);
pm_runtime_set_suspended(&pdev->dev);
free_irq(is->irq, is);
fimc_is_unregister_subdevs(is);
vb2_dma_contig_cleanup_ctx(is->alloc_ctx);
fimc_is_put_clocks(is);
fimc_is_debugfs_remove(is);
if (is->fw.f_w)
release_firmware(is->fw.f_w);
fimc_is_free_cpu_memory(is);
return 0;
}
static const struct of_device_id fimc_is_of_match[] = {
{ .compatible = "samsung,exynos4212-fimc-is" },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, fimc_is_of_match);
static const struct dev_pm_ops fimc_is_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(fimc_is_suspend, fimc_is_resume)
SET_RUNTIME_PM_OPS(fimc_is_runtime_suspend, fimc_is_runtime_resume,
NULL)
};
static struct platform_driver fimc_is_driver = {
.probe = fimc_is_probe,
.remove = fimc_is_remove,
.driver = {
.of_match_table = fimc_is_of_match,
.name = FIMC_IS_DRV_NAME,
.owner = THIS_MODULE,
.pm = &fimc_is_pm_ops,
}
};
static int fimc_is_module_init(void)
{
int ret;
ret = fimc_is_register_sensor_driver();
if (ret < 0)
return ret;
ret = fimc_is_register_i2c_driver();
if (ret < 0)
goto err_sens;
ret = platform_driver_register(&fimc_is_driver);
if (!ret)
return ret;
fimc_is_unregister_i2c_driver();
err_sens:
fimc_is_unregister_sensor_driver();
return ret;
}
static void fimc_is_module_exit(void)
{
fimc_is_unregister_sensor_driver();
fimc_is_unregister_i2c_driver();
platform_driver_unregister(&fimc_is_driver);
}
module_init(fimc_is_module_init);
module_exit(fimc_is_module_exit);
MODULE_ALIAS("platform:" FIMC_IS_DRV_NAME);
MODULE_AUTHOR("Younghwan Joo <yhwan.joo@samsung.com>");
MODULE_AUTHOR("Sylwester Nawrocki <s.nawrocki@samsung.com>");
| gpl-2.0 |
friedrich420/Note-4-TMO-AEL-Kernel-Lollipop-Source | drivers/spi/spi-sh-hspi.c | 2062 | 7830 | /*
* SuperH HSPI bus driver
*
* Copyright (C) 2011 Kuninori Morimoto
*
* Based on spi-sh.c:
* Based on pxa2xx_spi.c:
* Copyright (C) 2011 Renesas Solutions Corp.
* Copyright (C) 2005 Stephen Street / StreetFire Sound Labs
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*/
#include <linux/clk.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/timer.h>
#include <linux/delay.h>
#include <linux/list.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/io.h>
#include <linux/spi/spi.h>
#include <linux/spi/sh_hspi.h>
#define SPCR 0x00
#define SPSR 0x04
#define SPSCR 0x08
#define SPTBR 0x0C
#define SPRBR 0x10
#define SPCR2 0x14
/* SPSR */
#define RXFL (1 << 2)
#define hspi2info(h) (h->dev->platform_data)
struct hspi_priv {
void __iomem *addr;
struct spi_master *master;
struct device *dev;
struct clk *clk;
};
/*
* basic function
*/
static void hspi_write(struct hspi_priv *hspi, int reg, u32 val)
{
iowrite32(val, hspi->addr + reg);
}
static u32 hspi_read(struct hspi_priv *hspi, int reg)
{
return ioread32(hspi->addr + reg);
}
static void hspi_bit_set(struct hspi_priv *hspi, int reg, u32 mask, u32 set)
{
u32 val = hspi_read(hspi, reg);
val &= ~mask;
val |= set & mask;
hspi_write(hspi, reg, val);
}
/*
* transfer function
*/
static int hspi_status_check_timeout(struct hspi_priv *hspi, u32 mask, u32 val)
{
int t = 256;
while (t--) {
if ((mask & hspi_read(hspi, SPSR)) == val)
return 0;
udelay(10);
}
dev_err(hspi->dev, "timeout\n");
return -ETIMEDOUT;
}
/*
* spi master function
*/
static int hspi_prepare_transfer(struct spi_master *master)
{
struct hspi_priv *hspi = spi_master_get_devdata(master);
pm_runtime_get_sync(hspi->dev);
return 0;
}
static int hspi_unprepare_transfer(struct spi_master *master)
{
struct hspi_priv *hspi = spi_master_get_devdata(master);
pm_runtime_put_sync(hspi->dev);
return 0;
}
#define hspi_hw_cs_enable(hspi) hspi_hw_cs_ctrl(hspi, 0)
#define hspi_hw_cs_disable(hspi) hspi_hw_cs_ctrl(hspi, 1)
static void hspi_hw_cs_ctrl(struct hspi_priv *hspi, int hi)
{
hspi_bit_set(hspi, SPSCR, (1 << 6), (hi) << 6);
}
static void hspi_hw_setup(struct hspi_priv *hspi,
struct spi_message *msg,
struct spi_transfer *t)
{
struct spi_device *spi = msg->spi;
struct device *dev = hspi->dev;
u32 target_rate;
u32 spcr, idiv_clk;
u32 rate, best_rate, min, tmp;
target_rate = t ? t->speed_hz : 0;
if (!target_rate)
target_rate = spi->max_speed_hz;
/*
* find best IDIV/CLKCx settings
*/
min = ~0;
best_rate = 0;
spcr = 0;
for (idiv_clk = 0x00; idiv_clk <= 0x3F; idiv_clk++) {
rate = clk_get_rate(hspi->clk);
/* IDIV calculation */
if (idiv_clk & (1 << 5))
rate /= 128;
else
rate /= 16;
/* CLKCx calculation */
rate /= (((idiv_clk & 0x1F) + 1) * 2) ;
/* save best settings */
tmp = abs(target_rate - rate);
if (tmp < min) {
min = tmp;
spcr = idiv_clk;
best_rate = rate;
}
}
if (spi->mode & SPI_CPHA)
spcr |= 1 << 7;
if (spi->mode & SPI_CPOL)
spcr |= 1 << 6;
dev_dbg(dev, "speed %d/%d\n", target_rate, best_rate);
hspi_write(hspi, SPCR, spcr);
hspi_write(hspi, SPSR, 0x0);
hspi_write(hspi, SPSCR, 0x21); /* master mode / CS control */
}
static int hspi_transfer_one_message(struct spi_master *master,
struct spi_message *msg)
{
struct hspi_priv *hspi = spi_master_get_devdata(master);
struct spi_transfer *t;
u32 tx;
u32 rx;
int ret, i;
unsigned int cs_change;
const int nsecs = 50;
dev_dbg(hspi->dev, "%s\n", __func__);
cs_change = 1;
ret = 0;
list_for_each_entry(t, &msg->transfers, transfer_list) {
if (cs_change) {
hspi_hw_setup(hspi, msg, t);
hspi_hw_cs_enable(hspi);
ndelay(nsecs);
}
cs_change = t->cs_change;
for (i = 0; i < t->len; i++) {
/* wait remains */
ret = hspi_status_check_timeout(hspi, 0x1, 0);
if (ret < 0)
break;
tx = 0;
if (t->tx_buf)
tx = (u32)((u8 *)t->tx_buf)[i];
hspi_write(hspi, SPTBR, tx);
/* wait recive */
ret = hspi_status_check_timeout(hspi, 0x4, 0x4);
if (ret < 0)
break;
rx = hspi_read(hspi, SPRBR);
if (t->rx_buf)
((u8 *)t->rx_buf)[i] = (u8)rx;
}
msg->actual_length += t->len;
if (t->delay_usecs)
udelay(t->delay_usecs);
if (cs_change) {
ndelay(nsecs);
hspi_hw_cs_disable(hspi);
ndelay(nsecs);
}
}
msg->status = ret;
if (!cs_change) {
ndelay(nsecs);
hspi_hw_cs_disable(hspi);
}
spi_finalize_current_message(master);
return ret;
}
static int hspi_setup(struct spi_device *spi)
{
struct hspi_priv *hspi = spi_master_get_devdata(spi->master);
struct device *dev = hspi->dev;
if (8 != spi->bits_per_word) {
dev_err(dev, "bits_per_word should be 8\n");
return -EIO;
}
dev_dbg(dev, "%s setup\n", spi->modalias);
return 0;
}
static void hspi_cleanup(struct spi_device *spi)
{
struct hspi_priv *hspi = spi_master_get_devdata(spi->master);
struct device *dev = hspi->dev;
dev_dbg(dev, "%s cleanup\n", spi->modalias);
}
static int hspi_probe(struct platform_device *pdev)
{
struct resource *res;
struct spi_master *master;
struct hspi_priv *hspi;
struct clk *clk;
int ret;
/* get base addr */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
dev_err(&pdev->dev, "invalid resource\n");
return -EINVAL;
}
master = spi_alloc_master(&pdev->dev, sizeof(*hspi));
if (!master) {
dev_err(&pdev->dev, "spi_alloc_master error.\n");
return -ENOMEM;
}
clk = clk_get(NULL, "shyway_clk");
if (IS_ERR(clk)) {
dev_err(&pdev->dev, "shyway_clk is required\n");
ret = -EINVAL;
goto error0;
}
hspi = spi_master_get_devdata(master);
dev_set_drvdata(&pdev->dev, hspi);
/* init hspi */
hspi->master = master;
hspi->dev = &pdev->dev;
hspi->clk = clk;
hspi->addr = devm_ioremap(hspi->dev,
res->start, resource_size(res));
if (!hspi->addr) {
dev_err(&pdev->dev, "ioremap error.\n");
ret = -ENOMEM;
goto error1;
}
master->num_chipselect = 1;
master->bus_num = pdev->id;
master->setup = hspi_setup;
master->cleanup = hspi_cleanup;
master->mode_bits = SPI_CPOL | SPI_CPHA;
master->prepare_transfer_hardware = hspi_prepare_transfer;
master->transfer_one_message = hspi_transfer_one_message;
master->unprepare_transfer_hardware = hspi_unprepare_transfer;
ret = spi_register_master(master);
if (ret < 0) {
dev_err(&pdev->dev, "spi_register_master error.\n");
goto error1;
}
pm_runtime_enable(&pdev->dev);
dev_info(&pdev->dev, "probed\n");
return 0;
error1:
clk_put(clk);
error0:
spi_master_put(master);
return ret;
}
static int hspi_remove(struct platform_device *pdev)
{
struct hspi_priv *hspi = dev_get_drvdata(&pdev->dev);
pm_runtime_disable(&pdev->dev);
clk_put(hspi->clk);
spi_unregister_master(hspi->master);
return 0;
}
static struct platform_driver hspi_driver = {
.probe = hspi_probe,
.remove = hspi_remove,
.driver = {
.name = "sh-hspi",
.owner = THIS_MODULE,
},
};
module_platform_driver(hspi_driver);
MODULE_DESCRIPTION("SuperH HSPI bus driver");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>");
MODULE_ALIAS("platform:sh_spi");
| gpl-2.0 |
goodhanrry/G9250_goodhanrry_kernel | drivers/media/dvb-frontends/or51211.c | 2830 | 14254 | /*
* Support for OR51211 (pcHDTV HD-2000) - VSB
*
* Copyright (C) 2005 Kirk Lapray <kirk_lapray@bigfoot.com>
*
* Based on code from Jack Kelliher (kelliher@xmission.com)
* Copyright (C) 2002 & pcHDTV, inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": %s: " fmt, __func__
/*
* This driver needs external firmware. Please use the command
* "<kerneldir>/Documentation/dvb/get_dvb_firmware or51211" to
* download/extract it, and then copy it to /usr/lib/hotplug/firmware
* or /lib/firmware (depending on configuration of firmware hotplug).
*/
#define OR51211_DEFAULT_FIRMWARE "dvb-fe-or51211.fw"
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/firmware.h>
#include <linux/string.h>
#include <linux/slab.h>
#include <asm/byteorder.h>
#include "dvb_math.h"
#include "dvb_frontend.h"
#include "or51211.h"
static int debug;
#define dprintk(args...) \
do { if (debug) pr_debug(args); } while (0)
static u8 run_buf[] = {0x7f,0x01};
static u8 cmd_buf[] = {0x04,0x01,0x50,0x80,0x06}; // ATSC
struct or51211_state {
struct i2c_adapter* i2c;
/* Configuration settings */
const struct or51211_config* config;
struct dvb_frontend frontend;
struct bt878* bt;
/* Demodulator private data */
u8 initialized:1;
u32 snr; /* Result of last SNR claculation */
/* Tuner private data */
u32 current_frequency;
};
static int i2c_writebytes (struct or51211_state* state, u8 reg, const u8 *buf,
int len)
{
int err;
struct i2c_msg msg;
msg.addr = reg;
msg.flags = 0;
msg.len = len;
msg.buf = (u8 *)buf;
if ((err = i2c_transfer (state->i2c, &msg, 1)) != 1) {
pr_warn("error (addr %02x, err == %i)\n", reg, err);
return -EREMOTEIO;
}
return 0;
}
static int i2c_readbytes(struct or51211_state *state, u8 reg, u8 *buf, int len)
{
int err;
struct i2c_msg msg;
msg.addr = reg;
msg.flags = I2C_M_RD;
msg.len = len;
msg.buf = buf;
if ((err = i2c_transfer (state->i2c, &msg, 1)) != 1) {
pr_warn("error (addr %02x, err == %i)\n", reg, err);
return -EREMOTEIO;
}
return 0;
}
static int or51211_load_firmware (struct dvb_frontend* fe,
const struct firmware *fw)
{
struct or51211_state* state = fe->demodulator_priv;
u8 tudata[585];
int i;
dprintk("Firmware is %zd bytes\n",fw->size);
/* Get eprom data */
tudata[0] = 17;
if (i2c_writebytes(state,0x50,tudata,1)) {
pr_warn("error eprom addr\n");
return -1;
}
if (i2c_readbytes(state,0x50,&tudata[145],192)) {
pr_warn("error eprom\n");
return -1;
}
/* Create firmware buffer */
for (i = 0; i < 145; i++)
tudata[i] = fw->data[i];
for (i = 0; i < 248; i++)
tudata[i+337] = fw->data[145+i];
state->config->reset(fe);
if (i2c_writebytes(state,state->config->demod_address,tudata,585)) {
pr_warn("error 1\n");
return -1;
}
msleep(1);
if (i2c_writebytes(state,state->config->demod_address,
&fw->data[393],8125)) {
pr_warn("error 2\n");
return -1;
}
msleep(1);
if (i2c_writebytes(state,state->config->demod_address,run_buf,2)) {
pr_warn("error 3\n");
return -1;
}
/* Wait at least 5 msec */
msleep(10);
if (i2c_writebytes(state,state->config->demod_address,run_buf,2)) {
pr_warn("error 4\n");
return -1;
}
msleep(10);
pr_info("Done.\n");
return 0;
};
static int or51211_setmode(struct dvb_frontend* fe, int mode)
{
struct or51211_state* state = fe->demodulator_priv;
u8 rec_buf[14];
state->config->setmode(fe, mode);
if (i2c_writebytes(state,state->config->demod_address,run_buf,2)) {
pr_warn("error 1\n");
return -1;
}
/* Wait at least 5 msec */
msleep(10);
if (i2c_writebytes(state,state->config->demod_address,run_buf,2)) {
pr_warn("error 2\n");
return -1;
}
msleep(10);
/* Set operation mode in Receiver 1 register;
* type 1:
* data 0x50h Automatic sets receiver channel conditions
* Automatic NTSC rejection filter
* Enable MPEG serial data output
* MPEG2tr
* High tuner phase noise
* normal +/-150kHz Carrier acquisition range
*/
if (i2c_writebytes(state,state->config->demod_address,cmd_buf,3)) {
pr_warn("error 3\n");
return -1;
}
rec_buf[0] = 0x04;
rec_buf[1] = 0x00;
rec_buf[2] = 0x03;
rec_buf[3] = 0x00;
msleep(20);
if (i2c_writebytes(state,state->config->demod_address,rec_buf,3)) {
pr_warn("error 5\n");
}
msleep(3);
if (i2c_readbytes(state,state->config->demod_address,&rec_buf[10],2)) {
pr_warn("error 6\n");
return -1;
}
dprintk("rec status %02x %02x\n", rec_buf[10], rec_buf[11]);
return 0;
}
static int or51211_set_parameters(struct dvb_frontend *fe)
{
struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct or51211_state* state = fe->demodulator_priv;
/* Change only if we are actually changing the channel */
if (state->current_frequency != p->frequency) {
if (fe->ops.tuner_ops.set_params) {
fe->ops.tuner_ops.set_params(fe);
if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0);
}
/* Set to ATSC mode */
or51211_setmode(fe,0);
/* Update current frequency */
state->current_frequency = p->frequency;
}
return 0;
}
static int or51211_read_status(struct dvb_frontend* fe, fe_status_t* status)
{
struct or51211_state* state = fe->demodulator_priv;
unsigned char rec_buf[2];
unsigned char snd_buf[] = {0x04,0x00,0x03,0x00};
*status = 0;
/* Receiver Status */
if (i2c_writebytes(state,state->config->demod_address,snd_buf,3)) {
pr_warn("write error\n");
return -1;
}
msleep(3);
if (i2c_readbytes(state,state->config->demod_address,rec_buf,2)) {
pr_warn("read error\n");
return -1;
}
dprintk("%x %x\n", rec_buf[0], rec_buf[1]);
if (rec_buf[0] & 0x01) { /* Receiver Lock */
*status |= FE_HAS_SIGNAL;
*status |= FE_HAS_CARRIER;
*status |= FE_HAS_VITERBI;
*status |= FE_HAS_SYNC;
*status |= FE_HAS_LOCK;
}
return 0;
}
/* Calculate SNR estimation (scaled by 2^24)
8-VSB SNR equation from Oren datasheets
For 8-VSB:
SNR[dB] = 10 * log10(219037.9454 / MSE^2 )
We re-write the snr equation as:
SNR * 2^24 = 10*(c - 2*intlog10(MSE))
Where for 8-VSB, c = log10(219037.9454) * 2^24 */
static u32 calculate_snr(u32 mse, u32 c)
{
if (mse == 0) /* No signal */
return 0;
mse = 2*intlog10(mse);
if (mse > c) {
/* Negative SNR, which is possible, but realisticly the
demod will lose lock before the signal gets this bad. The
API only allows for unsigned values, so just return 0 */
return 0;
}
return 10*(c - mse);
}
static int or51211_read_snr(struct dvb_frontend* fe, u16* snr)
{
struct or51211_state* state = fe->demodulator_priv;
u8 rec_buf[2];
u8 snd_buf[3];
/* SNR after Equalizer */
snd_buf[0] = 0x04;
snd_buf[1] = 0x00;
snd_buf[2] = 0x04;
if (i2c_writebytes(state,state->config->demod_address,snd_buf,3)) {
pr_warn("error writing snr reg\n");
return -1;
}
if (i2c_readbytes(state,state->config->demod_address,rec_buf,2)) {
pr_warn("read_status read error\n");
return -1;
}
state->snr = calculate_snr(rec_buf[0], 89599047);
*snr = (state->snr) >> 16;
dprintk("noise = 0x%02x, snr = %d.%02d dB\n", rec_buf[0],
state->snr >> 24, (((state->snr>>8) & 0xffff) * 100) >> 16);
return 0;
}
static int or51211_read_signal_strength(struct dvb_frontend* fe, u16* strength)
{
/* Calculate Strength from SNR up to 35dB */
/* Even though the SNR can go higher than 35dB, there is some comfort */
/* factor in having a range of strong signals that can show at 100% */
struct or51211_state* state = (struct or51211_state*)fe->demodulator_priv;
u16 snr;
int ret;
ret = fe->ops.read_snr(fe, &snr);
if (ret != 0)
return ret;
/* Rather than use the 8.8 value snr, use state->snr which is 8.24 */
/* scale the range 0 - 35*2^24 into 0 - 65535 */
if (state->snr >= 8960 * 0x10000)
*strength = 0xffff;
else
*strength = state->snr / 8960;
return 0;
}
static int or51211_read_ber(struct dvb_frontend* fe, u32* ber)
{
*ber = -ENOSYS;
return 0;
}
static int or51211_read_ucblocks(struct dvb_frontend* fe, u32* ucblocks)
{
*ucblocks = -ENOSYS;
return 0;
}
static int or51211_sleep(struct dvb_frontend* fe)
{
return 0;
}
static int or51211_init(struct dvb_frontend* fe)
{
struct or51211_state* state = fe->demodulator_priv;
const struct or51211_config* config = state->config;
const struct firmware* fw;
unsigned char get_ver_buf[] = {0x04,0x00,0x30,0x00,0x00};
unsigned char rec_buf[14];
int ret,i;
if (!state->initialized) {
/* Request the firmware, this will block until it uploads */
pr_info("Waiting for firmware upload (%s)...\n",
OR51211_DEFAULT_FIRMWARE);
ret = config->request_firmware(fe, &fw,
OR51211_DEFAULT_FIRMWARE);
pr_info("Got Hotplug firmware\n");
if (ret) {
pr_warn("No firmware uploaded "
"(timeout or file not found?)\n");
return ret;
}
ret = or51211_load_firmware(fe, fw);
release_firmware(fw);
if (ret) {
pr_warn("Writing firmware to device failed!\n");
return ret;
}
pr_info("Firmware upload complete.\n");
/* Set operation mode in Receiver 1 register;
* type 1:
* data 0x50h Automatic sets receiver channel conditions
* Automatic NTSC rejection filter
* Enable MPEG serial data output
* MPEG2tr
* High tuner phase noise
* normal +/-150kHz Carrier acquisition range
*/
if (i2c_writebytes(state,state->config->demod_address,
cmd_buf,3)) {
pr_warn("Load DVR Error 5\n");
return -1;
}
/* Read back ucode version to besure we loaded correctly */
/* and are really up and running */
rec_buf[0] = 0x04;
rec_buf[1] = 0x00;
rec_buf[2] = 0x03;
rec_buf[3] = 0x00;
msleep(30);
if (i2c_writebytes(state,state->config->demod_address,
rec_buf,3)) {
pr_warn("Load DVR Error A\n");
return -1;
}
msleep(3);
if (i2c_readbytes(state,state->config->demod_address,
&rec_buf[10],2)) {
pr_warn("Load DVR Error B\n");
return -1;
}
rec_buf[0] = 0x04;
rec_buf[1] = 0x00;
rec_buf[2] = 0x01;
rec_buf[3] = 0x00;
msleep(20);
if (i2c_writebytes(state,state->config->demod_address,
rec_buf,3)) {
pr_warn("Load DVR Error C\n");
return -1;
}
msleep(3);
if (i2c_readbytes(state,state->config->demod_address,
&rec_buf[12],2)) {
pr_warn("Load DVR Error D\n");
return -1;
}
for (i = 0; i < 8; i++)
rec_buf[i]=0xed;
for (i = 0; i < 5; i++) {
msleep(30);
get_ver_buf[4] = i+1;
if (i2c_writebytes(state,state->config->demod_address,
get_ver_buf,5)) {
pr_warn("Load DVR Error 6 - %d\n", i);
return -1;
}
msleep(3);
if (i2c_readbytes(state,state->config->demod_address,
&rec_buf[i*2],2)) {
pr_warn("Load DVR Error 7 - %d\n", i);
return -1;
}
/* If we didn't receive the right index, try again */
if ((int)rec_buf[i*2+1]!=i+1){
i--;
}
}
dprintk("read_fwbits %10ph\n", rec_buf);
pr_info("ver TU%02x%02x%02x VSB mode %02x Status %02x\n",
rec_buf[2], rec_buf[4], rec_buf[6], rec_buf[12],
rec_buf[10]);
rec_buf[0] = 0x04;
rec_buf[1] = 0x00;
rec_buf[2] = 0x03;
rec_buf[3] = 0x00;
msleep(20);
if (i2c_writebytes(state,state->config->demod_address,
rec_buf,3)) {
pr_warn("Load DVR Error 8\n");
return -1;
}
msleep(20);
if (i2c_readbytes(state,state->config->demod_address,
&rec_buf[8],2)) {
pr_warn("Load DVR Error 9\n");
return -1;
}
state->initialized = 1;
}
return 0;
}
static int or51211_get_tune_settings(struct dvb_frontend* fe,
struct dvb_frontend_tune_settings* fesettings)
{
fesettings->min_delay_ms = 500;
fesettings->step_size = 0;
fesettings->max_drift = 0;
return 0;
}
static void or51211_release(struct dvb_frontend* fe)
{
struct or51211_state* state = fe->demodulator_priv;
state->config->sleep(fe);
kfree(state);
}
static struct dvb_frontend_ops or51211_ops;
struct dvb_frontend* or51211_attach(const struct or51211_config* config,
struct i2c_adapter* i2c)
{
struct or51211_state* state = NULL;
/* Allocate memory for the internal state */
state = kzalloc(sizeof(struct or51211_state), GFP_KERNEL);
if (state == NULL)
return NULL;
/* Setup the state */
state->config = config;
state->i2c = i2c;
state->initialized = 0;
state->current_frequency = 0;
/* Create dvb_frontend */
memcpy(&state->frontend.ops, &or51211_ops, sizeof(struct dvb_frontend_ops));
state->frontend.demodulator_priv = state;
return &state->frontend;
}
static struct dvb_frontend_ops or51211_ops = {
.delsys = { SYS_ATSC, SYS_DVBC_ANNEX_B },
.info = {
.name = "Oren OR51211 VSB Frontend",
.frequency_min = 44000000,
.frequency_max = 958000000,
.frequency_stepsize = 166666,
.caps = FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 |
FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO |
FE_CAN_8VSB
},
.release = or51211_release,
.init = or51211_init,
.sleep = or51211_sleep,
.set_frontend = or51211_set_parameters,
.get_tune_settings = or51211_get_tune_settings,
.read_status = or51211_read_status,
.read_ber = or51211_read_ber,
.read_signal_strength = or51211_read_signal_strength,
.read_snr = or51211_read_snr,
.read_ucblocks = or51211_read_ucblocks,
};
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "Turn on/off frontend debugging (default:off).");
MODULE_DESCRIPTION("Oren OR51211 VSB [pcHDTV HD-2000] Demodulator Driver");
MODULE_AUTHOR("Kirk Lapray");
MODULE_LICENSE("GPL");
EXPORT_SYMBOL(or51211_attach);
| gpl-2.0 |
AndroidDeveloperAlliance/ZenSERIES-N7100 | drivers/usb/serial/kl5kusb105.c | 3086 | 19631 | /*
* KLSI KL5KUSB105 chip RS232 converter driver
*
* Copyright (C) 2010 Johan Hovold <jhovold@gmail.com>
* Copyright (C) 2001 Utz-Uwe Haus <haus@uuhaus.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* All information about the device was acquired using SniffUSB ans snoopUSB
* on Windows98.
* It was written out of frustration with the PalmConnect USB Serial adapter
* sold by Palm Inc.
* Neither Palm, nor their contractor (MCCI) or their supplier (KLSI) provided
* information that was not already available.
*
* It seems that KLSI bought some silicon-design information from ScanLogic,
* whose SL11R processor is at the core of the KL5KUSB chipset from KLSI.
* KLSI has firmware available for their devices; it is probable that the
* firmware differs from that used by KLSI in their products. If you have an
* original KLSI device and can provide some information on it, I would be
* most interested in adding support for it here. If you have any information
* on the protocol used (or find errors in my reverse-engineered stuff), please
* let me know.
*
* The code was only tested with a PalmConnect USB adapter; if you
* are adventurous, try it with any KLSI-based device and let me know how it
* breaks so that I can fix it!
*/
/* TODO:
* check modem line signals
* implement handshaking or decide that we do not support it
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
#include <linux/tty_flip.h>
#include <linux/module.h>
#include <linux/uaccess.h>
#include <asm/unaligned.h>
#include <linux/usb.h>
#include <linux/usb/serial.h>
#include "kl5kusb105.h"
static int debug;
/*
* Version Information
*/
#define DRIVER_VERSION "v0.4"
#define DRIVER_AUTHOR "Utz-Uwe Haus <haus@uuhaus.de>, Johan Hovold <jhovold@gmail.com>"
#define DRIVER_DESC "KLSI KL5KUSB105 chipset USB->Serial Converter driver"
/*
* Function prototypes
*/
static int klsi_105_startup(struct usb_serial *serial);
static void klsi_105_release(struct usb_serial *serial);
static int klsi_105_open(struct tty_struct *tty, struct usb_serial_port *port);
static void klsi_105_close(struct usb_serial_port *port);
static void klsi_105_set_termios(struct tty_struct *tty,
struct usb_serial_port *port, struct ktermios *old);
static int klsi_105_tiocmget(struct tty_struct *tty);
static int klsi_105_tiocmset(struct tty_struct *tty,
unsigned int set, unsigned int clear);
static void klsi_105_process_read_urb(struct urb *urb);
static int klsi_105_prepare_write_buffer(struct usb_serial_port *port,
void *dest, size_t size);
/*
* All of the device info needed for the KLSI converters.
*/
static const struct usb_device_id id_table[] = {
{ USB_DEVICE(PALMCONNECT_VID, PALMCONNECT_PID) },
{ USB_DEVICE(KLSI_VID, KLSI_KL5KUSB105D_PID) },
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, id_table);
static struct usb_driver kl5kusb105d_driver = {
.name = "kl5kusb105d",
.probe = usb_serial_probe,
.disconnect = usb_serial_disconnect,
.id_table = id_table,
.no_dynamic_id = 1,
};
static struct usb_serial_driver kl5kusb105d_device = {
.driver = {
.owner = THIS_MODULE,
.name = "kl5kusb105d",
},
.description = "KL5KUSB105D / PalmConnect",
.usb_driver = &kl5kusb105d_driver,
.id_table = id_table,
.num_ports = 1,
.bulk_out_size = 64,
.open = klsi_105_open,
.close = klsi_105_close,
.set_termios = klsi_105_set_termios,
/*.break_ctl = klsi_105_break_ctl,*/
.tiocmget = klsi_105_tiocmget,
.tiocmset = klsi_105_tiocmset,
.attach = klsi_105_startup,
.release = klsi_105_release,
.throttle = usb_serial_generic_throttle,
.unthrottle = usb_serial_generic_unthrottle,
.process_read_urb = klsi_105_process_read_urb,
.prepare_write_buffer = klsi_105_prepare_write_buffer,
};
struct klsi_105_port_settings {
__u8 pktlen; /* always 5, it seems */
__u8 baudrate;
__u8 databits;
__u8 unknown1;
__u8 unknown2;
} __attribute__ ((packed));
struct klsi_105_private {
struct klsi_105_port_settings cfg;
struct ktermios termios;
unsigned long line_state; /* modem line settings */
spinlock_t lock;
};
/*
* Handle vendor specific USB requests
*/
#define KLSI_TIMEOUT 5000 /* default urb timeout */
static int klsi_105_chg_port_settings(struct usb_serial_port *port,
struct klsi_105_port_settings *settings)
{
int rc;
rc = usb_control_msg(port->serial->dev,
usb_sndctrlpipe(port->serial->dev, 0),
KL5KUSB105A_SIO_SET_DATA,
USB_TYPE_VENDOR | USB_DIR_OUT | USB_RECIP_INTERFACE,
0, /* value */
0, /* index */
settings,
sizeof(struct klsi_105_port_settings),
KLSI_TIMEOUT);
if (rc < 0)
dev_err(&port->dev,
"Change port settings failed (error = %d)\n", rc);
dev_info(&port->serial->dev->dev,
"%d byte block, baudrate %x, databits %d, u1 %d, u2 %d\n",
settings->pktlen, settings->baudrate, settings->databits,
settings->unknown1, settings->unknown2);
return rc;
}
/* translate a 16-bit status value from the device to linux's TIO bits */
static unsigned long klsi_105_status2linestate(const __u16 status)
{
unsigned long res = 0;
res = ((status & KL5KUSB105A_DSR) ? TIOCM_DSR : 0)
| ((status & KL5KUSB105A_CTS) ? TIOCM_CTS : 0)
;
return res;
}
/*
* Read line control via vendor command and return result through
* *line_state_p
*/
/* It seems that the status buffer has always only 2 bytes length */
#define KLSI_STATUSBUF_LEN 2
static int klsi_105_get_line_state(struct usb_serial_port *port,
unsigned long *line_state_p)
{
int rc;
u8 *status_buf;
__u16 status;
dev_info(&port->serial->dev->dev, "sending SIO Poll request\n");
status_buf = kmalloc(KLSI_STATUSBUF_LEN, GFP_KERNEL);
if (!status_buf) {
dev_err(&port->dev, "%s - out of memory for status buffer.\n",
__func__);
return -ENOMEM;
}
status_buf[0] = 0xff;
status_buf[1] = 0xff;
rc = usb_control_msg(port->serial->dev,
usb_rcvctrlpipe(port->serial->dev, 0),
KL5KUSB105A_SIO_POLL,
USB_TYPE_VENDOR | USB_DIR_IN,
0, /* value */
0, /* index */
status_buf, KLSI_STATUSBUF_LEN,
10000
);
if (rc < 0)
dev_err(&port->dev, "Reading line status failed (error = %d)\n",
rc);
else {
status = get_unaligned_le16(status_buf);
dev_info(&port->serial->dev->dev, "read status %x %x",
status_buf[0], status_buf[1]);
*line_state_p = klsi_105_status2linestate(status);
}
kfree(status_buf);
return rc;
}
/*
* Driver's tty interface functions
*/
static int klsi_105_startup(struct usb_serial *serial)
{
struct klsi_105_private *priv;
int i;
/* check if we support the product id (see keyspan.c)
* FIXME
*/
/* allocate the private data structure */
for (i = 0; i < serial->num_ports; i++) {
priv = kmalloc(sizeof(struct klsi_105_private),
GFP_KERNEL);
if (!priv) {
dbg("%skmalloc for klsi_105_private failed.", __func__);
i--;
goto err_cleanup;
}
/* set initial values for control structures */
priv->cfg.pktlen = 5;
priv->cfg.baudrate = kl5kusb105a_sio_b9600;
priv->cfg.databits = kl5kusb105a_dtb_8;
priv->cfg.unknown1 = 0;
priv->cfg.unknown2 = 1;
priv->line_state = 0;
usb_set_serial_port_data(serial->port[i], priv);
spin_lock_init(&priv->lock);
/* priv->termios is left uninitialized until port opening */
init_waitqueue_head(&serial->port[i]->write_wait);
}
return 0;
err_cleanup:
for (; i >= 0; i--) {
priv = usb_get_serial_port_data(serial->port[i]);
kfree(priv);
usb_set_serial_port_data(serial->port[i], NULL);
}
return -ENOMEM;
}
static void klsi_105_release(struct usb_serial *serial)
{
int i;
dbg("%s", __func__);
for (i = 0; i < serial->num_ports; ++i)
kfree(usb_get_serial_port_data(serial->port[i]));
}
static int klsi_105_open(struct tty_struct *tty, struct usb_serial_port *port)
{
struct klsi_105_private *priv = usb_get_serial_port_data(port);
int retval = 0;
int rc;
int i;
unsigned long line_state;
struct klsi_105_port_settings *cfg;
unsigned long flags;
dbg("%s port %d", __func__, port->number);
/* Do a defined restart:
* Set up sane default baud rate and send the 'READ_ON'
* vendor command.
* FIXME: set modem line control (how?)
* Then read the modem line control and store values in
* priv->line_state.
*/
cfg = kmalloc(sizeof(*cfg), GFP_KERNEL);
if (!cfg) {
dev_err(&port->dev, "%s - out of memory for config buffer.\n",
__func__);
return -ENOMEM;
}
cfg->pktlen = 5;
cfg->baudrate = kl5kusb105a_sio_b9600;
cfg->databits = kl5kusb105a_dtb_8;
cfg->unknown1 = 0;
cfg->unknown2 = 1;
klsi_105_chg_port_settings(port, cfg);
/* set up termios structure */
spin_lock_irqsave(&priv->lock, flags);
priv->termios.c_iflag = tty->termios->c_iflag;
priv->termios.c_oflag = tty->termios->c_oflag;
priv->termios.c_cflag = tty->termios->c_cflag;
priv->termios.c_lflag = tty->termios->c_lflag;
for (i = 0; i < NCCS; i++)
priv->termios.c_cc[i] = tty->termios->c_cc[i];
priv->cfg.pktlen = cfg->pktlen;
priv->cfg.baudrate = cfg->baudrate;
priv->cfg.databits = cfg->databits;
priv->cfg.unknown1 = cfg->unknown1;
priv->cfg.unknown2 = cfg->unknown2;
spin_unlock_irqrestore(&priv->lock, flags);
/* READ_ON and urb submission */
rc = usb_serial_generic_open(tty, port);
if (rc) {
retval = rc;
goto exit;
}
rc = usb_control_msg(port->serial->dev,
usb_sndctrlpipe(port->serial->dev, 0),
KL5KUSB105A_SIO_CONFIGURE,
USB_TYPE_VENDOR|USB_DIR_OUT|USB_RECIP_INTERFACE,
KL5KUSB105A_SIO_CONFIGURE_READ_ON,
0, /* index */
NULL,
0,
KLSI_TIMEOUT);
if (rc < 0) {
dev_err(&port->dev, "Enabling read failed (error = %d)\n", rc);
retval = rc;
} else
dbg("%s - enabled reading", __func__);
rc = klsi_105_get_line_state(port, &line_state);
if (rc >= 0) {
spin_lock_irqsave(&priv->lock, flags);
priv->line_state = line_state;
spin_unlock_irqrestore(&priv->lock, flags);
dbg("%s - read line state 0x%lx", __func__, line_state);
retval = 0;
} else
retval = rc;
exit:
kfree(cfg);
return retval;
}
static void klsi_105_close(struct usb_serial_port *port)
{
int rc;
dbg("%s port %d", __func__, port->number);
mutex_lock(&port->serial->disc_mutex);
if (!port->serial->disconnected) {
/* send READ_OFF */
rc = usb_control_msg(port->serial->dev,
usb_sndctrlpipe(port->serial->dev, 0),
KL5KUSB105A_SIO_CONFIGURE,
USB_TYPE_VENDOR | USB_DIR_OUT,
KL5KUSB105A_SIO_CONFIGURE_READ_OFF,
0, /* index */
NULL, 0,
KLSI_TIMEOUT);
if (rc < 0)
dev_err(&port->dev,
"Disabling read failed (error = %d)\n", rc);
}
mutex_unlock(&port->serial->disc_mutex);
/* shutdown our bulk reads and writes */
usb_serial_generic_close(port);
/* wgg - do I need this? I think so. */
usb_kill_urb(port->interrupt_in_urb);
}
/* We need to write a complete 64-byte data block and encode the
* number actually sent in the first double-byte, LSB-order. That
* leaves at most 62 bytes of payload.
*/
#define KLSI_HDR_LEN 2
static int klsi_105_prepare_write_buffer(struct usb_serial_port *port,
void *dest, size_t size)
{
unsigned char *buf = dest;
int count;
count = kfifo_out_locked(&port->write_fifo, buf + KLSI_HDR_LEN, size,
&port->lock);
put_unaligned_le16(count, buf);
return count + KLSI_HDR_LEN;
}
/* The data received is preceded by a length double-byte in LSB-first order.
*/
static void klsi_105_process_read_urb(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
unsigned char *data = urb->transfer_buffer;
struct tty_struct *tty;
unsigned len;
/* empty urbs seem to happen, we ignore them */
if (!urb->actual_length)
return;
if (urb->actual_length <= KLSI_HDR_LEN) {
dbg("%s - malformed packet", __func__);
return;
}
tty = tty_port_tty_get(&port->port);
if (!tty)
return;
len = get_unaligned_le16(data);
if (len > urb->actual_length - KLSI_HDR_LEN) {
dbg("%s - packet length mismatch", __func__);
len = urb->actual_length - KLSI_HDR_LEN;
}
tty_insert_flip_string(tty, data + KLSI_HDR_LEN, len);
tty_flip_buffer_push(tty);
tty_kref_put(tty);
}
static void klsi_105_set_termios(struct tty_struct *tty,
struct usb_serial_port *port,
struct ktermios *old_termios)
{
struct klsi_105_private *priv = usb_get_serial_port_data(port);
unsigned int iflag = tty->termios->c_iflag;
unsigned int old_iflag = old_termios->c_iflag;
unsigned int cflag = tty->termios->c_cflag;
unsigned int old_cflag = old_termios->c_cflag;
struct klsi_105_port_settings *cfg;
unsigned long flags;
speed_t baud;
cfg = kmalloc(sizeof(*cfg), GFP_KERNEL);
if (!cfg) {
dev_err(&port->dev, "%s - out of memory for config buffer.\n",
__func__);
return;
}
/* lock while we are modifying the settings */
spin_lock_irqsave(&priv->lock, flags);
/*
* Update baud rate
*/
baud = tty_get_baud_rate(tty);
if ((cflag & CBAUD) != (old_cflag & CBAUD)) {
/* reassert DTR and (maybe) RTS on transition from B0 */
if ((old_cflag & CBAUD) == B0) {
dbg("%s: baud was B0", __func__);
#if 0
priv->control_state |= TIOCM_DTR;
/* don't set RTS if using hardware flow control */
if (!(old_cflag & CRTSCTS))
priv->control_state |= TIOCM_RTS;
mct_u232_set_modem_ctrl(serial, priv->control_state);
#endif
}
}
switch (baud) {
case 0: /* handled below */
break;
case 1200:
priv->cfg.baudrate = kl5kusb105a_sio_b1200;
break;
case 2400:
priv->cfg.baudrate = kl5kusb105a_sio_b2400;
break;
case 4800:
priv->cfg.baudrate = kl5kusb105a_sio_b4800;
break;
case 9600:
priv->cfg.baudrate = kl5kusb105a_sio_b9600;
break;
case 19200:
priv->cfg.baudrate = kl5kusb105a_sio_b19200;
break;
case 38400:
priv->cfg.baudrate = kl5kusb105a_sio_b38400;
break;
case 57600:
priv->cfg.baudrate = kl5kusb105a_sio_b57600;
break;
case 115200:
priv->cfg.baudrate = kl5kusb105a_sio_b115200;
break;
default:
dbg("KLSI USB->Serial converter:"
" unsupported baudrate request, using default of 9600");
priv->cfg.baudrate = kl5kusb105a_sio_b9600;
baud = 9600;
break;
}
if ((cflag & CBAUD) == B0) {
dbg("%s: baud is B0", __func__);
/* Drop RTS and DTR */
/* maybe this should be simulated by sending read
* disable and read enable messages?
*/
;
#if 0
priv->control_state &= ~(TIOCM_DTR | TIOCM_RTS);
mct_u232_set_modem_ctrl(serial, priv->control_state);
#endif
}
tty_encode_baud_rate(tty, baud, baud);
if ((cflag & CSIZE) != (old_cflag & CSIZE)) {
/* set the number of data bits */
switch (cflag & CSIZE) {
case CS5:
dbg("%s - 5 bits/byte not supported", __func__);
spin_unlock_irqrestore(&priv->lock, flags);
goto err;
case CS6:
dbg("%s - 6 bits/byte not supported", __func__);
spin_unlock_irqrestore(&priv->lock, flags);
goto err;
case CS7:
priv->cfg.databits = kl5kusb105a_dtb_7;
break;
case CS8:
priv->cfg.databits = kl5kusb105a_dtb_8;
break;
default:
dev_err(&port->dev,
"CSIZE was not CS5-CS8, using default of 8\n");
priv->cfg.databits = kl5kusb105a_dtb_8;
break;
}
}
/*
* Update line control register (LCR)
*/
if ((cflag & (PARENB|PARODD)) != (old_cflag & (PARENB|PARODD))
|| (cflag & CSTOPB) != (old_cflag & CSTOPB)) {
/* Not currently supported */
tty->termios->c_cflag &= ~(PARENB|PARODD|CSTOPB);
#if 0
priv->last_lcr = 0;
/* set the parity */
if (cflag & PARENB)
priv->last_lcr |= (cflag & PARODD) ?
MCT_U232_PARITY_ODD : MCT_U232_PARITY_EVEN;
else
priv->last_lcr |= MCT_U232_PARITY_NONE;
/* set the number of stop bits */
priv->last_lcr |= (cflag & CSTOPB) ?
MCT_U232_STOP_BITS_2 : MCT_U232_STOP_BITS_1;
mct_u232_set_line_ctrl(serial, priv->last_lcr);
#endif
;
}
/*
* Set flow control: well, I do not really now how to handle DTR/RTS.
* Just do what we have seen with SniffUSB on Win98.
*/
if ((iflag & IXOFF) != (old_iflag & IXOFF)
|| (iflag & IXON) != (old_iflag & IXON)
|| (cflag & CRTSCTS) != (old_cflag & CRTSCTS)) {
/* Not currently supported */
tty->termios->c_cflag &= ~CRTSCTS;
/* Drop DTR/RTS if no flow control otherwise assert */
#if 0
if ((iflag & IXOFF) || (iflag & IXON) || (cflag & CRTSCTS))
priv->control_state |= TIOCM_DTR | TIOCM_RTS;
else
priv->control_state &= ~(TIOCM_DTR | TIOCM_RTS);
mct_u232_set_modem_ctrl(serial, priv->control_state);
#endif
;
}
memcpy(cfg, &priv->cfg, sizeof(*cfg));
spin_unlock_irqrestore(&priv->lock, flags);
/* now commit changes to device */
klsi_105_chg_port_settings(port, cfg);
err:
kfree(cfg);
}
#if 0
static void mct_u232_break_ctl(struct tty_struct *tty, int break_state)
{
struct usb_serial_port *port = tty->driver_data;
struct usb_serial *serial = port->serial;
struct mct_u232_private *priv =
(struct mct_u232_private *)port->private;
unsigned char lcr = priv->last_lcr;
dbg("%sstate=%d", __func__, break_state);
/* LOCKING */
if (break_state)
lcr |= MCT_U232_SET_BREAK;
mct_u232_set_line_ctrl(serial, lcr);
}
#endif
static int klsi_105_tiocmget(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct klsi_105_private *priv = usb_get_serial_port_data(port);
unsigned long flags;
int rc;
unsigned long line_state;
dbg("%s - request, just guessing", __func__);
rc = klsi_105_get_line_state(port, &line_state);
if (rc < 0) {
dev_err(&port->dev,
"Reading line control failed (error = %d)\n", rc);
/* better return value? EAGAIN? */
return rc;
}
spin_lock_irqsave(&priv->lock, flags);
priv->line_state = line_state;
spin_unlock_irqrestore(&priv->lock, flags);
dbg("%s - read line state 0x%lx", __func__, line_state);
return (int)line_state;
}
static int klsi_105_tiocmset(struct tty_struct *tty,
unsigned int set, unsigned int clear)
{
int retval = -EINVAL;
dbg("%s", __func__);
/* if this ever gets implemented, it should be done something like this:
struct usb_serial *serial = port->serial;
struct klsi_105_private *priv = usb_get_serial_port_data(port);
unsigned long flags;
int control;
spin_lock_irqsave (&priv->lock, flags);
if (set & TIOCM_RTS)
priv->control_state |= TIOCM_RTS;
if (set & TIOCM_DTR)
priv->control_state |= TIOCM_DTR;
if (clear & TIOCM_RTS)
priv->control_state &= ~TIOCM_RTS;
if (clear & TIOCM_DTR)
priv->control_state &= ~TIOCM_DTR;
control = priv->control_state;
spin_unlock_irqrestore (&priv->lock, flags);
retval = mct_u232_set_modem_ctrl(serial, control);
*/
return retval;
}
static int __init klsi_105_init(void)
{
int retval;
retval = usb_serial_register(&kl5kusb105d_device);
if (retval)
goto failed_usb_serial_register;
retval = usb_register(&kl5kusb105d_driver);
if (retval)
goto failed_usb_register;
printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
DRIVER_DESC "\n");
return 0;
failed_usb_register:
usb_serial_deregister(&kl5kusb105d_device);
failed_usb_serial_register:
return retval;
}
static void __exit klsi_105_exit(void)
{
usb_deregister(&kl5kusb105d_driver);
usb_serial_deregister(&kl5kusb105d_device);
}
module_init(klsi_105_init);
module_exit(klsi_105_exit);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
module_param(debug, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(debug, "enable extensive debugging messages");
| gpl-2.0 |
hyuh/m7-wls | drivers/video/fsl-diu-fb.c | 3342 | 45870 | /*
* Copyright 2008 Freescale Semiconductor, Inc. All Rights Reserved.
*
* Freescale DIU Frame Buffer device driver
*
* Authors: Hongjun Chen <hong-jun.chen@freescale.com>
* Paul Widmer <paul.widmer@freescale.com>
* Srikanth Srinivasan <srikanth.srinivasan@freescale.com>
* York Sun <yorksun@freescale.com>
*
* Based on imxfb.c Copyright (C) 2004 S.Hauer, Pengutronix
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/fb.h>
#include <linux/init.h>
#include <linux/dma-mapping.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/clk.h>
#include <linux/uaccess.h>
#include <linux/vmalloc.h>
#include <linux/spinlock.h>
#include <sysdev/fsl_soc.h>
#include <linux/fsl-diu-fb.h>
#include "edid.h"
#define NUM_AOIS 5 /* 1 for plane 0, 2 for planes 1 & 2 each */
/* HW cursor parameters */
#define MAX_CURS 32
/* INT_STATUS/INT_MASK field descriptions */
#define INT_VSYNC 0x01 /* Vsync interrupt */
#define INT_VSYNC_WB 0x02 /* Vsync interrupt for write back operation */
#define INT_UNDRUN 0x04 /* Under run exception interrupt */
#define INT_PARERR 0x08 /* Display parameters error interrupt */
#define INT_LS_BF_VS 0x10 /* Lines before vsync. interrupt */
/*
* List of supported video modes
*
* The first entry is the default video mode. The remain entries are in
* order if increasing resolution and frequency. The 320x240-60 mode is
* the initial AOI for the second and third planes.
*/
static struct fb_videomode __devinitdata fsl_diu_mode_db[] = {
{
.refresh = 60,
.xres = 1024,
.yres = 768,
.pixclock = 15385,
.left_margin = 160,
.right_margin = 24,
.upper_margin = 29,
.lower_margin = 3,
.hsync_len = 136,
.vsync_len = 6,
.sync = FB_SYNC_COMP_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
.vmode = FB_VMODE_NONINTERLACED
},
{
.refresh = 60,
.xres = 320,
.yres = 240,
.pixclock = 79440,
.left_margin = 16,
.right_margin = 16,
.upper_margin = 16,
.lower_margin = 5,
.hsync_len = 48,
.vsync_len = 1,
.sync = FB_SYNC_COMP_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
.vmode = FB_VMODE_NONINTERLACED
},
{
.refresh = 60,
.xres = 640,
.yres = 480,
.pixclock = 39722,
.left_margin = 48,
.right_margin = 16,
.upper_margin = 33,
.lower_margin = 10,
.hsync_len = 96,
.vsync_len = 2,
.sync = FB_SYNC_COMP_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
.vmode = FB_VMODE_NONINTERLACED
},
{
.refresh = 72,
.xres = 640,
.yres = 480,
.pixclock = 32052,
.left_margin = 128,
.right_margin = 24,
.upper_margin = 28,
.lower_margin = 9,
.hsync_len = 40,
.vsync_len = 3,
.sync = FB_SYNC_COMP_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
.vmode = FB_VMODE_NONINTERLACED
},
{
.refresh = 75,
.xres = 640,
.yres = 480,
.pixclock = 31747,
.left_margin = 120,
.right_margin = 16,
.upper_margin = 16,
.lower_margin = 1,
.hsync_len = 64,
.vsync_len = 3,
.sync = FB_SYNC_COMP_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
.vmode = FB_VMODE_NONINTERLACED
},
{
.refresh = 90,
.xres = 640,
.yres = 480,
.pixclock = 25057,
.left_margin = 120,
.right_margin = 32,
.upper_margin = 14,
.lower_margin = 25,
.hsync_len = 40,
.vsync_len = 14,
.sync = FB_SYNC_COMP_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
.vmode = FB_VMODE_NONINTERLACED
},
{
.refresh = 100,
.xres = 640,
.yres = 480,
.pixclock = 22272,
.left_margin = 48,
.right_margin = 32,
.upper_margin = 17,
.lower_margin = 22,
.hsync_len = 128,
.vsync_len = 12,
.sync = FB_SYNC_COMP_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
.vmode = FB_VMODE_NONINTERLACED
},
{
.refresh = 60,
.xres = 800,
.yres = 480,
.pixclock = 33805,
.left_margin = 96,
.right_margin = 24,
.upper_margin = 10,
.lower_margin = 3,
.hsync_len = 72,
.vsync_len = 7,
.sync = FB_SYNC_COMP_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
.vmode = FB_VMODE_NONINTERLACED
},
{
.refresh = 60,
.xres = 800,
.yres = 600,
.pixclock = 25000,
.left_margin = 88,
.right_margin = 40,
.upper_margin = 23,
.lower_margin = 1,
.hsync_len = 128,
.vsync_len = 4,
.sync = FB_SYNC_COMP_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
.vmode = FB_VMODE_NONINTERLACED
},
{
.refresh = 60,
.xres = 854,
.yres = 480,
.pixclock = 31518,
.left_margin = 104,
.right_margin = 16,
.upper_margin = 13,
.lower_margin = 1,
.hsync_len = 88,
.vsync_len = 3,
.sync = FB_SYNC_COMP_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
.vmode = FB_VMODE_NONINTERLACED
},
{
.refresh = 70,
.xres = 1024,
.yres = 768,
.pixclock = 16886,
.left_margin = 3,
.right_margin = 3,
.upper_margin = 2,
.lower_margin = 2,
.hsync_len = 40,
.vsync_len = 18,
.sync = FB_SYNC_COMP_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
.vmode = FB_VMODE_NONINTERLACED
},
{
.refresh = 75,
.xres = 1024,
.yres = 768,
.pixclock = 15009,
.left_margin = 3,
.right_margin = 3,
.upper_margin = 2,
.lower_margin = 2,
.hsync_len = 80,
.vsync_len = 32,
.sync = FB_SYNC_COMP_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
.vmode = FB_VMODE_NONINTERLACED
},
{
.refresh = 60,
.xres = 1280,
.yres = 480,
.pixclock = 18939,
.left_margin = 353,
.right_margin = 47,
.upper_margin = 39,
.lower_margin = 4,
.hsync_len = 8,
.vsync_len = 2,
.sync = FB_SYNC_COMP_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
.vmode = FB_VMODE_NONINTERLACED
},
{
.refresh = 60,
.xres = 1280,
.yres = 720,
.pixclock = 13426,
.left_margin = 192,
.right_margin = 64,
.upper_margin = 22,
.lower_margin = 1,
.hsync_len = 136,
.vsync_len = 3,
.sync = FB_SYNC_COMP_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
.vmode = FB_VMODE_NONINTERLACED
},
{
.refresh = 60,
.xres = 1280,
.yres = 1024,
.pixclock = 9375,
.left_margin = 38,
.right_margin = 128,
.upper_margin = 2,
.lower_margin = 7,
.hsync_len = 216,
.vsync_len = 37,
.sync = FB_SYNC_COMP_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
.vmode = FB_VMODE_NONINTERLACED
},
{
.refresh = 70,
.xres = 1280,
.yres = 1024,
.pixclock = 9380,
.left_margin = 6,
.right_margin = 6,
.upper_margin = 4,
.lower_margin = 4,
.hsync_len = 60,
.vsync_len = 94,
.sync = FB_SYNC_COMP_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
.vmode = FB_VMODE_NONINTERLACED
},
{
.refresh = 75,
.xres = 1280,
.yres = 1024,
.pixclock = 9380,
.left_margin = 6,
.right_margin = 6,
.upper_margin = 4,
.lower_margin = 4,
.hsync_len = 60,
.vsync_len = 15,
.sync = FB_SYNC_COMP_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
.vmode = FB_VMODE_NONINTERLACED
},
{
.refresh = 60,
.xres = 1920,
.yres = 1080,
.pixclock = 5787,
.left_margin = 328,
.right_margin = 120,
.upper_margin = 34,
.lower_margin = 1,
.hsync_len = 208,
.vsync_len = 3,
.sync = FB_SYNC_COMP_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
.vmode = FB_VMODE_NONINTERLACED
},
};
static char *fb_mode;
static unsigned long default_bpp = 32;
static enum fsl_diu_monitor_port monitor_port;
static char *monitor_string;
#if defined(CONFIG_NOT_COHERENT_CACHE)
static u8 *coherence_data;
static size_t coherence_data_size;
static unsigned int d_cache_line_size;
#endif
static DEFINE_SPINLOCK(diu_lock);
enum mfb_index {
PLANE0 = 0, /* Plane 0, only one AOI that fills the screen */
PLANE1_AOI0, /* Plane 1, first AOI */
PLANE1_AOI1, /* Plane 1, second AOI */
PLANE2_AOI0, /* Plane 2, first AOI */
PLANE2_AOI1, /* Plane 2, second AOI */
};
struct mfb_info {
enum mfb_index index;
char *id;
int registered;
unsigned long pseudo_palette[16];
struct diu_ad *ad;
int cursor_reset;
unsigned char g_alpha;
unsigned int count;
int x_aoi_d; /* aoi display x offset to physical screen */
int y_aoi_d; /* aoi display y offset to physical screen */
struct fsl_diu_data *parent;
u8 *edid_data;
};
/**
* struct fsl_diu_data - per-DIU data structure
* @dma_addr: DMA address of this structure
* @fsl_diu_info: fb_info objects, one per AOI
* @dev_attr: sysfs structure
* @irq: IRQ
* @monitor_port: the monitor port this DIU is connected to
* @diu_reg: pointer to the DIU hardware registers
* @reg_lock: spinlock for register access
* @dummy_aoi: video buffer for the 4x4 32-bit dummy AOI
* dummy_ad: DIU Area Descriptor for the dummy AOI
* @ad[]: Area Descriptors for each real AOI
* @gamma: gamma color table
* @cursor: hardware cursor data
*
* This data structure must be allocated with 32-byte alignment, so that the
* internal fields can be aligned properly.
*/
struct fsl_diu_data {
dma_addr_t dma_addr;
struct fb_info fsl_diu_info[NUM_AOIS];
struct mfb_info mfb[NUM_AOIS];
struct device_attribute dev_attr;
unsigned int irq;
enum fsl_diu_monitor_port monitor_port;
struct diu __iomem *diu_reg;
spinlock_t reg_lock;
u8 dummy_aoi[4 * 4 * 4];
struct diu_ad dummy_ad __aligned(8);
struct diu_ad ad[NUM_AOIS] __aligned(8);
u8 gamma[256 * 3] __aligned(32);
u8 cursor[MAX_CURS * MAX_CURS * 2] __aligned(32);
} __aligned(32);
/* Determine the DMA address of a member of the fsl_diu_data structure */
#define DMA_ADDR(p, f) ((p)->dma_addr + offsetof(struct fsl_diu_data, f))
static struct mfb_info mfb_template[] = {
{
.index = PLANE0,
.id = "Panel0",
.registered = 0,
.count = 0,
.x_aoi_d = 0,
.y_aoi_d = 0,
},
{
.index = PLANE1_AOI0,
.id = "Panel1 AOI0",
.registered = 0,
.g_alpha = 0xff,
.count = 0,
.x_aoi_d = 0,
.y_aoi_d = 0,
},
{
.index = PLANE1_AOI1,
.id = "Panel1 AOI1",
.registered = 0,
.g_alpha = 0xff,
.count = 0,
.x_aoi_d = 0,
.y_aoi_d = 480,
},
{
.index = PLANE2_AOI0,
.id = "Panel2 AOI0",
.registered = 0,
.g_alpha = 0xff,
.count = 0,
.x_aoi_d = 640,
.y_aoi_d = 0,
},
{
.index = PLANE2_AOI1,
.id = "Panel2 AOI1",
.registered = 0,
.g_alpha = 0xff,
.count = 0,
.x_aoi_d = 640,
.y_aoi_d = 480,
},
};
/**
* fsl_diu_name_to_port - convert a port name to a monitor port enum
*
* Takes the name of a monitor port ("dvi", "lvds", or "dlvds") and returns
* the enum fsl_diu_monitor_port that corresponds to that string.
*
* For compatibility with older versions, a number ("0", "1", or "2") is also
* supported.
*
* If the string is unknown, DVI is assumed.
*
* If the particular port is not supported by the platform, another port
* (platform-specific) is chosen instead.
*/
static enum fsl_diu_monitor_port fsl_diu_name_to_port(const char *s)
{
enum fsl_diu_monitor_port port = FSL_DIU_PORT_DVI;
unsigned long val;
if (s) {
if (!strict_strtoul(s, 10, &val) && (val <= 2))
port = (enum fsl_diu_monitor_port) val;
else if (strncmp(s, "lvds", 4) == 0)
port = FSL_DIU_PORT_LVDS;
else if (strncmp(s, "dlvds", 5) == 0)
port = FSL_DIU_PORT_DLVDS;
}
return diu_ops.valid_monitor_port(port);
}
/*
* Workaround for failed writing desc register of planes.
* Needed with MPC5121 DIU rev 2.0 silicon.
*/
void wr_reg_wa(u32 *reg, u32 val)
{
do {
out_be32(reg, val);
} while (in_be32(reg) != val);
}
static void fsl_diu_enable_panel(struct fb_info *info)
{
struct mfb_info *pmfbi, *cmfbi, *mfbi = info->par;
struct diu_ad *ad = mfbi->ad;
struct fsl_diu_data *data = mfbi->parent;
struct diu __iomem *hw = data->diu_reg;
switch (mfbi->index) {
case PLANE0:
if (hw->desc[0] != ad->paddr)
wr_reg_wa(&hw->desc[0], ad->paddr);
break;
case PLANE1_AOI0:
cmfbi = &data->mfb[2];
if (hw->desc[1] != ad->paddr) { /* AOI0 closed */
if (cmfbi->count > 0) /* AOI1 open */
ad->next_ad =
cpu_to_le32(cmfbi->ad->paddr);
else
ad->next_ad = 0;
wr_reg_wa(&hw->desc[1], ad->paddr);
}
break;
case PLANE2_AOI0:
cmfbi = &data->mfb[4];
if (hw->desc[2] != ad->paddr) { /* AOI0 closed */
if (cmfbi->count > 0) /* AOI1 open */
ad->next_ad =
cpu_to_le32(cmfbi->ad->paddr);
else
ad->next_ad = 0;
wr_reg_wa(&hw->desc[2], ad->paddr);
}
break;
case PLANE1_AOI1:
pmfbi = &data->mfb[1];
ad->next_ad = 0;
if (hw->desc[1] == data->dummy_ad.paddr)
wr_reg_wa(&hw->desc[1], ad->paddr);
else /* AOI0 open */
pmfbi->ad->next_ad = cpu_to_le32(ad->paddr);
break;
case PLANE2_AOI1:
pmfbi = &data->mfb[3];
ad->next_ad = 0;
if (hw->desc[2] == data->dummy_ad.paddr)
wr_reg_wa(&hw->desc[2], ad->paddr);
else /* AOI0 was open */
pmfbi->ad->next_ad = cpu_to_le32(ad->paddr);
break;
}
}
static void fsl_diu_disable_panel(struct fb_info *info)
{
struct mfb_info *pmfbi, *cmfbi, *mfbi = info->par;
struct diu_ad *ad = mfbi->ad;
struct fsl_diu_data *data = mfbi->parent;
struct diu __iomem *hw = data->diu_reg;
switch (mfbi->index) {
case PLANE0:
if (hw->desc[0] != data->dummy_ad.paddr)
wr_reg_wa(&hw->desc[0], data->dummy_ad.paddr);
break;
case PLANE1_AOI0:
cmfbi = &data->mfb[2];
if (cmfbi->count > 0) /* AOI1 is open */
wr_reg_wa(&hw->desc[1], cmfbi->ad->paddr);
/* move AOI1 to the first */
else /* AOI1 was closed */
wr_reg_wa(&hw->desc[1], data->dummy_ad.paddr);
/* close AOI 0 */
break;
case PLANE2_AOI0:
cmfbi = &data->mfb[4];
if (cmfbi->count > 0) /* AOI1 is open */
wr_reg_wa(&hw->desc[2], cmfbi->ad->paddr);
/* move AOI1 to the first */
else /* AOI1 was closed */
wr_reg_wa(&hw->desc[2], data->dummy_ad.paddr);
/* close AOI 0 */
break;
case PLANE1_AOI1:
pmfbi = &data->mfb[1];
if (hw->desc[1] != ad->paddr) {
/* AOI1 is not the first in the chain */
if (pmfbi->count > 0)
/* AOI0 is open, must be the first */
pmfbi->ad->next_ad = 0;
} else /* AOI1 is the first in the chain */
wr_reg_wa(&hw->desc[1], data->dummy_ad.paddr);
/* close AOI 1 */
break;
case PLANE2_AOI1:
pmfbi = &data->mfb[3];
if (hw->desc[2] != ad->paddr) {
/* AOI1 is not the first in the chain */
if (pmfbi->count > 0)
/* AOI0 is open, must be the first */
pmfbi->ad->next_ad = 0;
} else /* AOI1 is the first in the chain */
wr_reg_wa(&hw->desc[2], data->dummy_ad.paddr);
/* close AOI 1 */
break;
}
}
static void enable_lcdc(struct fb_info *info)
{
struct mfb_info *mfbi = info->par;
struct fsl_diu_data *data = mfbi->parent;
struct diu __iomem *hw = data->diu_reg;
out_be32(&hw->diu_mode, MFB_MODE1);
}
static void disable_lcdc(struct fb_info *info)
{
struct mfb_info *mfbi = info->par;
struct fsl_diu_data *data = mfbi->parent;
struct diu __iomem *hw = data->diu_reg;
out_be32(&hw->diu_mode, 0);
}
static void adjust_aoi_size_position(struct fb_var_screeninfo *var,
struct fb_info *info)
{
struct mfb_info *lower_aoi_mfbi, *upper_aoi_mfbi, *mfbi = info->par;
struct fsl_diu_data *data = mfbi->parent;
int available_height, upper_aoi_bottom;
enum mfb_index index = mfbi->index;
int lower_aoi_is_open, upper_aoi_is_open;
__u32 base_plane_width, base_plane_height, upper_aoi_height;
base_plane_width = data->fsl_diu_info[0].var.xres;
base_plane_height = data->fsl_diu_info[0].var.yres;
if (mfbi->x_aoi_d < 0)
mfbi->x_aoi_d = 0;
if (mfbi->y_aoi_d < 0)
mfbi->y_aoi_d = 0;
switch (index) {
case PLANE0:
if (mfbi->x_aoi_d != 0)
mfbi->x_aoi_d = 0;
if (mfbi->y_aoi_d != 0)
mfbi->y_aoi_d = 0;
break;
case PLANE1_AOI0:
case PLANE2_AOI0:
lower_aoi_mfbi = data->fsl_diu_info[index+1].par;
lower_aoi_is_open = lower_aoi_mfbi->count > 0 ? 1 : 0;
if (var->xres > base_plane_width)
var->xres = base_plane_width;
if ((mfbi->x_aoi_d + var->xres) > base_plane_width)
mfbi->x_aoi_d = base_plane_width - var->xres;
if (lower_aoi_is_open)
available_height = lower_aoi_mfbi->y_aoi_d;
else
available_height = base_plane_height;
if (var->yres > available_height)
var->yres = available_height;
if ((mfbi->y_aoi_d + var->yres) > available_height)
mfbi->y_aoi_d = available_height - var->yres;
break;
case PLANE1_AOI1:
case PLANE2_AOI1:
upper_aoi_mfbi = data->fsl_diu_info[index-1].par;
upper_aoi_height = data->fsl_diu_info[index-1].var.yres;
upper_aoi_bottom = upper_aoi_mfbi->y_aoi_d + upper_aoi_height;
upper_aoi_is_open = upper_aoi_mfbi->count > 0 ? 1 : 0;
if (var->xres > base_plane_width)
var->xres = base_plane_width;
if ((mfbi->x_aoi_d + var->xres) > base_plane_width)
mfbi->x_aoi_d = base_plane_width - var->xres;
if (mfbi->y_aoi_d < 0)
mfbi->y_aoi_d = 0;
if (upper_aoi_is_open) {
if (mfbi->y_aoi_d < upper_aoi_bottom)
mfbi->y_aoi_d = upper_aoi_bottom;
available_height = base_plane_height
- upper_aoi_bottom;
} else
available_height = base_plane_height;
if (var->yres > available_height)
var->yres = available_height;
if ((mfbi->y_aoi_d + var->yres) > base_plane_height)
mfbi->y_aoi_d = base_plane_height - var->yres;
break;
}
}
/*
* Checks to see if the hardware supports the state requested by var passed
* in. This function does not alter the hardware state! If the var passed in
* is slightly off by what the hardware can support then we alter the var
* PASSED in to what we can do. If the hardware doesn't support mode change
* a -EINVAL will be returned by the upper layers.
*/
static int fsl_diu_check_var(struct fb_var_screeninfo *var,
struct fb_info *info)
{
if (var->xres_virtual < var->xres)
var->xres_virtual = var->xres;
if (var->yres_virtual < var->yres)
var->yres_virtual = var->yres;
if (var->xoffset < 0)
var->xoffset = 0;
if (var->yoffset < 0)
var->yoffset = 0;
if (var->xoffset + info->var.xres > info->var.xres_virtual)
var->xoffset = info->var.xres_virtual - info->var.xres;
if (var->yoffset + info->var.yres > info->var.yres_virtual)
var->yoffset = info->var.yres_virtual - info->var.yres;
if ((var->bits_per_pixel != 32) && (var->bits_per_pixel != 24) &&
(var->bits_per_pixel != 16))
var->bits_per_pixel = default_bpp;
switch (var->bits_per_pixel) {
case 16:
var->red.length = 5;
var->red.offset = 11;
var->red.msb_right = 0;
var->green.length = 6;
var->green.offset = 5;
var->green.msb_right = 0;
var->blue.length = 5;
var->blue.offset = 0;
var->blue.msb_right = 0;
var->transp.length = 0;
var->transp.offset = 0;
var->transp.msb_right = 0;
break;
case 24:
var->red.length = 8;
var->red.offset = 0;
var->red.msb_right = 0;
var->green.length = 8;
var->green.offset = 8;
var->green.msb_right = 0;
var->blue.length = 8;
var->blue.offset = 16;
var->blue.msb_right = 0;
var->transp.length = 0;
var->transp.offset = 0;
var->transp.msb_right = 0;
break;
case 32:
var->red.length = 8;
var->red.offset = 16;
var->red.msb_right = 0;
var->green.length = 8;
var->green.offset = 8;
var->green.msb_right = 0;
var->blue.length = 8;
var->blue.offset = 0;
var->blue.msb_right = 0;
var->transp.length = 8;
var->transp.offset = 24;
var->transp.msb_right = 0;
break;
}
var->height = -1;
var->width = -1;
var->grayscale = 0;
/* Copy nonstd field to/from sync for fbset usage */
var->sync |= var->nonstd;
var->nonstd |= var->sync;
adjust_aoi_size_position(var, info);
return 0;
}
static void set_fix(struct fb_info *info)
{
struct fb_fix_screeninfo *fix = &info->fix;
struct fb_var_screeninfo *var = &info->var;
struct mfb_info *mfbi = info->par;
strncpy(fix->id, mfbi->id, sizeof(fix->id));
fix->line_length = var->xres_virtual * var->bits_per_pixel / 8;
fix->type = FB_TYPE_PACKED_PIXELS;
fix->accel = FB_ACCEL_NONE;
fix->visual = FB_VISUAL_TRUECOLOR;
fix->xpanstep = 1;
fix->ypanstep = 1;
}
static void update_lcdc(struct fb_info *info)
{
struct fb_var_screeninfo *var = &info->var;
struct mfb_info *mfbi = info->par;
struct fsl_diu_data *data = mfbi->parent;
struct diu __iomem *hw;
int i, j;
u8 *gamma_table_base;
u32 temp;
hw = data->diu_reg;
diu_ops.set_monitor_port(data->monitor_port);
gamma_table_base = data->gamma;
/* Prep for DIU init - gamma table, cursor table */
for (i = 0; i <= 2; i++)
for (j = 0; j <= 255; j++)
*gamma_table_base++ = j;
if (diu_ops.set_gamma_table)
diu_ops.set_gamma_table(data->monitor_port, data->gamma);
disable_lcdc(info);
/* Program DIU registers */
out_be32(&hw->gamma, DMA_ADDR(data, gamma));
out_be32(&hw->cursor, DMA_ADDR(data, cursor));
out_be32(&hw->bgnd, 0x007F7F7F); /* BGND */
out_be32(&hw->bgnd_wb, 0); /* BGND_WB */
out_be32(&hw->disp_size, (var->yres << 16 | var->xres));
/* DISP SIZE */
out_be32(&hw->wb_size, 0); /* WB SIZE */
out_be32(&hw->wb_mem_addr, 0); /* WB MEM ADDR */
/* Horizontal and vertical configuration register */
temp = var->left_margin << 22 | /* BP_H */
var->hsync_len << 11 | /* PW_H */
var->right_margin; /* FP_H */
out_be32(&hw->hsyn_para, temp);
temp = var->upper_margin << 22 | /* BP_V */
var->vsync_len << 11 | /* PW_V */
var->lower_margin; /* FP_V */
out_be32(&hw->vsyn_para, temp);
diu_ops.set_pixel_clock(var->pixclock);
out_be32(&hw->syn_pol, 0); /* SYNC SIGNALS POLARITY */
out_be32(&hw->thresholds, 0x00037800); /* The Thresholds */
out_be32(&hw->int_status, 0); /* INTERRUPT STATUS */
out_be32(&hw->plut, 0x01F5F666);
/* Enable the DIU */
enable_lcdc(info);
}
static int map_video_memory(struct fb_info *info)
{
u32 smem_len = info->fix.line_length * info->var.yres_virtual;
void *p;
p = alloc_pages_exact(smem_len, GFP_DMA | __GFP_ZERO);
if (!p) {
dev_err(info->dev, "unable to allocate fb memory\n");
return -ENOMEM;
}
mutex_lock(&info->mm_lock);
info->screen_base = p;
info->fix.smem_start = virt_to_phys(info->screen_base);
info->fix.smem_len = smem_len;
mutex_unlock(&info->mm_lock);
info->screen_size = info->fix.smem_len;
return 0;
}
static void unmap_video_memory(struct fb_info *info)
{
void *p = info->screen_base;
size_t l = info->fix.smem_len;
mutex_lock(&info->mm_lock);
info->screen_base = NULL;
info->fix.smem_start = 0;
info->fix.smem_len = 0;
mutex_unlock(&info->mm_lock);
if (p)
free_pages_exact(p, l);
}
/*
* Using the fb_var_screeninfo in fb_info we set the aoi of this
* particular framebuffer. It is a light version of fsl_diu_set_par.
*/
static int fsl_diu_set_aoi(struct fb_info *info)
{
struct fb_var_screeninfo *var = &info->var;
struct mfb_info *mfbi = info->par;
struct diu_ad *ad = mfbi->ad;
/* AOI should not be greater than display size */
ad->offset_xyi = cpu_to_le32((var->yoffset << 16) | var->xoffset);
ad->offset_xyd = cpu_to_le32((mfbi->y_aoi_d << 16) | mfbi->x_aoi_d);
return 0;
}
/**
* fsl_diu_get_pixel_format: return the pixel format for a given color depth
*
* The pixel format is a 32-bit value that determine which bits in each
* pixel are to be used for each color. This is the default function used
* if the platform does not define its own version.
*/
static u32 fsl_diu_get_pixel_format(unsigned int bits_per_pixel)
{
#define PF_BYTE_F 0x10000000
#define PF_ALPHA_C_MASK 0x0E000000
#define PF_ALPHA_C_SHIFT 25
#define PF_BLUE_C_MASK 0x01800000
#define PF_BLUE_C_SHIFT 23
#define PF_GREEN_C_MASK 0x00600000
#define PF_GREEN_C_SHIFT 21
#define PF_RED_C_MASK 0x00180000
#define PF_RED_C_SHIFT 19
#define PF_PALETTE 0x00040000
#define PF_PIXEL_S_MASK 0x00030000
#define PF_PIXEL_S_SHIFT 16
#define PF_COMP_3_MASK 0x0000F000
#define PF_COMP_3_SHIFT 12
#define PF_COMP_2_MASK 0x00000F00
#define PF_COMP_2_SHIFT 8
#define PF_COMP_1_MASK 0x000000F0
#define PF_COMP_1_SHIFT 4
#define PF_COMP_0_MASK 0x0000000F
#define PF_COMP_0_SHIFT 0
#define MAKE_PF(alpha, red, blue, green, size, c0, c1, c2, c3) \
cpu_to_le32(PF_BYTE_F | (alpha << PF_ALPHA_C_SHIFT) | \
(blue << PF_BLUE_C_SHIFT) | (green << PF_GREEN_C_SHIFT) | \
(red << PF_RED_C_SHIFT) | (c3 << PF_COMP_3_SHIFT) | \
(c2 << PF_COMP_2_SHIFT) | (c1 << PF_COMP_1_SHIFT) | \
(c0 << PF_COMP_0_SHIFT) | (size << PF_PIXEL_S_SHIFT))
switch (bits_per_pixel) {
case 32:
/* 0x88883316 */
return MAKE_PF(3, 2, 0, 1, 3, 8, 8, 8, 8);
case 24:
/* 0x88082219 */
return MAKE_PF(4, 0, 1, 2, 2, 0, 8, 8, 8);
case 16:
/* 0x65053118 */
return MAKE_PF(4, 2, 1, 0, 1, 5, 6, 5, 0);
default:
pr_err("fsl-diu: unsupported color depth %u\n", bits_per_pixel);
return 0;
}
}
/*
* Using the fb_var_screeninfo in fb_info we set the resolution of this
* particular framebuffer. This function alters the fb_fix_screeninfo stored
* in fb_info. It does not alter var in fb_info since we are using that
* data. This means we depend on the data in var inside fb_info to be
* supported by the hardware. fsl_diu_check_var is always called before
* fsl_diu_set_par to ensure this.
*/
static int fsl_diu_set_par(struct fb_info *info)
{
unsigned long len;
struct fb_var_screeninfo *var = &info->var;
struct mfb_info *mfbi = info->par;
struct fsl_diu_data *data = mfbi->parent;
struct diu_ad *ad = mfbi->ad;
struct diu __iomem *hw;
hw = data->diu_reg;
set_fix(info);
mfbi->cursor_reset = 1;
len = info->var.yres_virtual * info->fix.line_length;
/* Alloc & dealloc each time resolution/bpp change */
if (len != info->fix.smem_len) {
if (info->fix.smem_start)
unmap_video_memory(info);
/* Memory allocation for framebuffer */
if (map_video_memory(info)) {
dev_err(info->dev, "unable to allocate fb memory 1\n");
return -ENOMEM;
}
}
if (diu_ops.get_pixel_format)
ad->pix_fmt = diu_ops.get_pixel_format(data->monitor_port,
var->bits_per_pixel);
else
ad->pix_fmt = fsl_diu_get_pixel_format(var->bits_per_pixel);
ad->addr = cpu_to_le32(info->fix.smem_start);
ad->src_size_g_alpha = cpu_to_le32((var->yres_virtual << 12) |
var->xres_virtual) | mfbi->g_alpha;
/* AOI should not be greater than display size */
ad->aoi_size = cpu_to_le32((var->yres << 16) | var->xres);
ad->offset_xyi = cpu_to_le32((var->yoffset << 16) | var->xoffset);
ad->offset_xyd = cpu_to_le32((mfbi->y_aoi_d << 16) | mfbi->x_aoi_d);
/* Disable chroma keying function */
ad->ckmax_r = 0;
ad->ckmax_g = 0;
ad->ckmax_b = 0;
ad->ckmin_r = 255;
ad->ckmin_g = 255;
ad->ckmin_b = 255;
if (mfbi->index == PLANE0)
update_lcdc(info);
return 0;
}
static inline __u32 CNVT_TOHW(__u32 val, __u32 width)
{
return ((val << width) + 0x7FFF - val) >> 16;
}
/*
* Set a single color register. The values supplied have a 16 bit magnitude
* which needs to be scaled in this function for the hardware. Things to take
* into consideration are how many color registers, if any, are supported with
* the current color visual. With truecolor mode no color palettes are
* supported. Here a pseudo palette is created which we store the value in
* pseudo_palette in struct fb_info. For pseudocolor mode we have a limited
* color palette.
*/
static int fsl_diu_setcolreg(unsigned int regno, unsigned int red,
unsigned int green, unsigned int blue,
unsigned int transp, struct fb_info *info)
{
int ret = 1;
/*
* If greyscale is true, then we convert the RGB value
* to greyscale no matter what visual we are using.
*/
if (info->var.grayscale)
red = green = blue = (19595 * red + 38470 * green +
7471 * blue) >> 16;
switch (info->fix.visual) {
case FB_VISUAL_TRUECOLOR:
/*
* 16-bit True Colour. We encode the RGB value
* according to the RGB bitfield information.
*/
if (regno < 16) {
u32 *pal = info->pseudo_palette;
u32 v;
red = CNVT_TOHW(red, info->var.red.length);
green = CNVT_TOHW(green, info->var.green.length);
blue = CNVT_TOHW(blue, info->var.blue.length);
transp = CNVT_TOHW(transp, info->var.transp.length);
v = (red << info->var.red.offset) |
(green << info->var.green.offset) |
(blue << info->var.blue.offset) |
(transp << info->var.transp.offset);
pal[regno] = v;
ret = 0;
}
break;
}
return ret;
}
/*
* Pan (or wrap, depending on the `vmode' field) the display using the
* 'xoffset' and 'yoffset' fields of the 'var' structure. If the values
* don't fit, return -EINVAL.
*/
static int fsl_diu_pan_display(struct fb_var_screeninfo *var,
struct fb_info *info)
{
if ((info->var.xoffset == var->xoffset) &&
(info->var.yoffset == var->yoffset))
return 0; /* No change, do nothing */
if (var->xoffset < 0 || var->yoffset < 0
|| var->xoffset + info->var.xres > info->var.xres_virtual
|| var->yoffset + info->var.yres > info->var.yres_virtual)
return -EINVAL;
info->var.xoffset = var->xoffset;
info->var.yoffset = var->yoffset;
if (var->vmode & FB_VMODE_YWRAP)
info->var.vmode |= FB_VMODE_YWRAP;
else
info->var.vmode &= ~FB_VMODE_YWRAP;
fsl_diu_set_aoi(info);
return 0;
}
static int fsl_diu_ioctl(struct fb_info *info, unsigned int cmd,
unsigned long arg)
{
struct mfb_info *mfbi = info->par;
struct diu_ad *ad = mfbi->ad;
struct mfb_chroma_key ck;
unsigned char global_alpha;
struct aoi_display_offset aoi_d;
__u32 pix_fmt;
void __user *buf = (void __user *)arg;
if (!arg)
return -EINVAL;
switch (cmd) {
case MFB_SET_PIXFMT_OLD:
dev_warn(info->dev,
"MFB_SET_PIXFMT value of 0x%08x is deprecated.\n",
MFB_SET_PIXFMT_OLD);
case MFB_SET_PIXFMT:
if (copy_from_user(&pix_fmt, buf, sizeof(pix_fmt)))
return -EFAULT;
ad->pix_fmt = pix_fmt;
break;
case MFB_GET_PIXFMT_OLD:
dev_warn(info->dev,
"MFB_GET_PIXFMT value of 0x%08x is deprecated.\n",
MFB_GET_PIXFMT_OLD);
case MFB_GET_PIXFMT:
pix_fmt = ad->pix_fmt;
if (copy_to_user(buf, &pix_fmt, sizeof(pix_fmt)))
return -EFAULT;
break;
case MFB_SET_AOID:
if (copy_from_user(&aoi_d, buf, sizeof(aoi_d)))
return -EFAULT;
mfbi->x_aoi_d = aoi_d.x_aoi_d;
mfbi->y_aoi_d = aoi_d.y_aoi_d;
fsl_diu_check_var(&info->var, info);
fsl_diu_set_aoi(info);
break;
case MFB_GET_AOID:
aoi_d.x_aoi_d = mfbi->x_aoi_d;
aoi_d.y_aoi_d = mfbi->y_aoi_d;
if (copy_to_user(buf, &aoi_d, sizeof(aoi_d)))
return -EFAULT;
break;
case MFB_GET_ALPHA:
global_alpha = mfbi->g_alpha;
if (copy_to_user(buf, &global_alpha, sizeof(global_alpha)))
return -EFAULT;
break;
case MFB_SET_ALPHA:
/* set panel information */
if (copy_from_user(&global_alpha, buf, sizeof(global_alpha)))
return -EFAULT;
ad->src_size_g_alpha = (ad->src_size_g_alpha & (~0xff)) |
(global_alpha & 0xff);
mfbi->g_alpha = global_alpha;
break;
case MFB_SET_CHROMA_KEY:
/* set panel winformation */
if (copy_from_user(&ck, buf, sizeof(ck)))
return -EFAULT;
if (ck.enable &&
(ck.red_max < ck.red_min ||
ck.green_max < ck.green_min ||
ck.blue_max < ck.blue_min))
return -EINVAL;
if (!ck.enable) {
ad->ckmax_r = 0;
ad->ckmax_g = 0;
ad->ckmax_b = 0;
ad->ckmin_r = 255;
ad->ckmin_g = 255;
ad->ckmin_b = 255;
} else {
ad->ckmax_r = ck.red_max;
ad->ckmax_g = ck.green_max;
ad->ckmax_b = ck.blue_max;
ad->ckmin_r = ck.red_min;
ad->ckmin_g = ck.green_min;
ad->ckmin_b = ck.blue_min;
}
break;
default:
dev_err(info->dev, "unknown ioctl command (0x%08X)\n", cmd);
return -ENOIOCTLCMD;
}
return 0;
}
/* turn on fb if count == 1
*/
static int fsl_diu_open(struct fb_info *info, int user)
{
struct mfb_info *mfbi = info->par;
int res = 0;
/* free boot splash memory on first /dev/fb0 open */
if ((mfbi->index == PLANE0) && diu_ops.release_bootmem)
diu_ops.release_bootmem();
spin_lock(&diu_lock);
mfbi->count++;
if (mfbi->count == 1) {
fsl_diu_check_var(&info->var, info);
res = fsl_diu_set_par(info);
if (res < 0)
mfbi->count--;
else
fsl_diu_enable_panel(info);
}
spin_unlock(&diu_lock);
return res;
}
/* turn off fb if count == 0
*/
static int fsl_diu_release(struct fb_info *info, int user)
{
struct mfb_info *mfbi = info->par;
int res = 0;
spin_lock(&diu_lock);
mfbi->count--;
if (mfbi->count == 0)
fsl_diu_disable_panel(info);
spin_unlock(&diu_lock);
return res;
}
static struct fb_ops fsl_diu_ops = {
.owner = THIS_MODULE,
.fb_check_var = fsl_diu_check_var,
.fb_set_par = fsl_diu_set_par,
.fb_setcolreg = fsl_diu_setcolreg,
.fb_pan_display = fsl_diu_pan_display,
.fb_fillrect = cfb_fillrect,
.fb_copyarea = cfb_copyarea,
.fb_imageblit = cfb_imageblit,
.fb_ioctl = fsl_diu_ioctl,
.fb_open = fsl_diu_open,
.fb_release = fsl_diu_release,
};
static int __devinit install_fb(struct fb_info *info)
{
int rc;
struct mfb_info *mfbi = info->par;
const char *aoi_mode, *init_aoi_mode = "320x240";
struct fb_videomode *db = fsl_diu_mode_db;
unsigned int dbsize = ARRAY_SIZE(fsl_diu_mode_db);
int has_default_mode = 1;
info->var.activate = FB_ACTIVATE_NOW;
info->fbops = &fsl_diu_ops;
info->flags = FBINFO_DEFAULT | FBINFO_VIRTFB | FBINFO_PARTIAL_PAN_OK |
FBINFO_READS_FAST;
info->pseudo_palette = mfbi->pseudo_palette;
rc = fb_alloc_cmap(&info->cmap, 16, 0);
if (rc)
return rc;
if (mfbi->index == PLANE0) {
if (mfbi->edid_data) {
/* Now build modedb from EDID */
fb_edid_to_monspecs(mfbi->edid_data, &info->monspecs);
fb_videomode_to_modelist(info->monspecs.modedb,
info->monspecs.modedb_len,
&info->modelist);
db = info->monspecs.modedb;
dbsize = info->monspecs.modedb_len;
}
aoi_mode = fb_mode;
} else {
aoi_mode = init_aoi_mode;
}
rc = fb_find_mode(&info->var, info, aoi_mode, db, dbsize, NULL,
default_bpp);
if (!rc) {
/*
* For plane 0 we continue and look into
* driver's internal modedb.
*/
if ((mfbi->index == PLANE0) && mfbi->edid_data)
has_default_mode = 0;
else
return -EINVAL;
}
if (!has_default_mode) {
rc = fb_find_mode(&info->var, info, aoi_mode, fsl_diu_mode_db,
ARRAY_SIZE(fsl_diu_mode_db), NULL, default_bpp);
if (rc)
has_default_mode = 1;
}
/* Still not found, use preferred mode from database if any */
if (!has_default_mode && info->monspecs.modedb) {
struct fb_monspecs *specs = &info->monspecs;
struct fb_videomode *modedb = &specs->modedb[0];
/*
* Get preferred timing. If not found,
* first mode in database will be used.
*/
if (specs->misc & FB_MISC_1ST_DETAIL) {
int i;
for (i = 0; i < specs->modedb_len; i++) {
if (specs->modedb[i].flag & FB_MODE_IS_FIRST) {
modedb = &specs->modedb[i];
break;
}
}
}
info->var.bits_per_pixel = default_bpp;
fb_videomode_to_var(&info->var, modedb);
}
if (fsl_diu_check_var(&info->var, info)) {
dev_err(info->dev, "fsl_diu_check_var failed\n");
unmap_video_memory(info);
fb_dealloc_cmap(&info->cmap);
return -EINVAL;
}
if (register_framebuffer(info) < 0) {
dev_err(info->dev, "register_framebuffer failed\n");
unmap_video_memory(info);
fb_dealloc_cmap(&info->cmap);
return -EINVAL;
}
mfbi->registered = 1;
dev_info(info->dev, "%s registered successfully\n", mfbi->id);
return 0;
}
static void uninstall_fb(struct fb_info *info)
{
struct mfb_info *mfbi = info->par;
if (!mfbi->registered)
return;
if (mfbi->index == PLANE0)
kfree(mfbi->edid_data);
unregister_framebuffer(info);
unmap_video_memory(info);
if (&info->cmap)
fb_dealloc_cmap(&info->cmap);
mfbi->registered = 0;
}
static irqreturn_t fsl_diu_isr(int irq, void *dev_id)
{
struct diu __iomem *hw = dev_id;
unsigned int status = in_be32(&hw->int_status);
if (status) {
/* This is the workaround for underrun */
if (status & INT_UNDRUN) {
out_be32(&hw->diu_mode, 0);
udelay(1);
out_be32(&hw->diu_mode, 1);
}
#if defined(CONFIG_NOT_COHERENT_CACHE)
else if (status & INT_VSYNC) {
unsigned int i;
for (i = 0; i < coherence_data_size;
i += d_cache_line_size)
__asm__ __volatile__ (
"dcbz 0, %[input]"
::[input]"r"(&coherence_data[i]));
}
#endif
return IRQ_HANDLED;
}
return IRQ_NONE;
}
static int request_irq_local(struct fsl_diu_data *data)
{
struct diu __iomem *hw = data->diu_reg;
u32 ints;
int ret;
/* Read to clear the status */
in_be32(&hw->int_status);
ret = request_irq(data->irq, fsl_diu_isr, 0, "fsl-diu-fb", hw);
if (!ret) {
ints = INT_PARERR | INT_LS_BF_VS;
#if !defined(CONFIG_NOT_COHERENT_CACHE)
ints |= INT_VSYNC;
#endif
/* Read to clear the status */
in_be32(&hw->int_status);
out_be32(&hw->int_mask, ints);
}
return ret;
}
static void free_irq_local(struct fsl_diu_data *data)
{
struct diu __iomem *hw = data->diu_reg;
/* Disable all LCDC interrupt */
out_be32(&hw->int_mask, 0x1f);
free_irq(data->irq, NULL);
}
#ifdef CONFIG_PM
/*
* Power management hooks. Note that we won't be called from IRQ context,
* unlike the blank functions above, so we may sleep.
*/
static int fsl_diu_suspend(struct platform_device *ofdev, pm_message_t state)
{
struct fsl_diu_data *data;
data = dev_get_drvdata(&ofdev->dev);
disable_lcdc(data->fsl_diu_info);
return 0;
}
static int fsl_diu_resume(struct platform_device *ofdev)
{
struct fsl_diu_data *data;
data = dev_get_drvdata(&ofdev->dev);
enable_lcdc(data->fsl_diu_info);
return 0;
}
#else
#define fsl_diu_suspend NULL
#define fsl_diu_resume NULL
#endif /* CONFIG_PM */
static ssize_t store_monitor(struct device *device,
struct device_attribute *attr, const char *buf, size_t count)
{
enum fsl_diu_monitor_port old_monitor_port;
struct fsl_diu_data *data =
container_of(attr, struct fsl_diu_data, dev_attr);
old_monitor_port = data->monitor_port;
data->monitor_port = fsl_diu_name_to_port(buf);
if (old_monitor_port != data->monitor_port) {
/* All AOIs need adjust pixel format
* fsl_diu_set_par only change the pixsel format here
* unlikely to fail. */
unsigned int i;
for (i=0; i < NUM_AOIS; i++)
fsl_diu_set_par(&data->fsl_diu_info[i]);
}
return count;
}
static ssize_t show_monitor(struct device *device,
struct device_attribute *attr, char *buf)
{
struct fsl_diu_data *data =
container_of(attr, struct fsl_diu_data, dev_attr);
switch (data->monitor_port) {
case FSL_DIU_PORT_DVI:
return sprintf(buf, "DVI\n");
case FSL_DIU_PORT_LVDS:
return sprintf(buf, "Single-link LVDS\n");
case FSL_DIU_PORT_DLVDS:
return sprintf(buf, "Dual-link LVDS\n");
}
return 0;
}
static int __devinit fsl_diu_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct mfb_info *mfbi;
struct fsl_diu_data *data;
int diu_mode;
dma_addr_t dma_addr; /* DMA addr of fsl_diu_data struct */
unsigned int i;
int ret;
data = dma_alloc_coherent(&pdev->dev, sizeof(struct fsl_diu_data),
&dma_addr, GFP_DMA | __GFP_ZERO);
if (!data)
return -ENOMEM;
data->dma_addr = dma_addr;
/*
* dma_alloc_coherent() uses a page allocator, so the address is
* always page-aligned. We need the memory to be 32-byte aligned,
* so that's good. However, if one day the allocator changes, we
* need to catch that. It's not worth the effort to handle unaligned
* alloctions now because it's highly unlikely to ever be a problem.
*/
if ((unsigned long)data & 31) {
dev_err(&pdev->dev, "misaligned allocation");
ret = -ENOMEM;
goto error;
}
spin_lock_init(&data->reg_lock);
for (i = 0; i < NUM_AOIS; i++) {
struct fb_info *info = &data->fsl_diu_info[i];
info->device = &pdev->dev;
info->par = &data->mfb[i];
/*
* We store the physical address of the AD in the reserved
* 'paddr' field of the AD itself.
*/
data->ad[i].paddr = DMA_ADDR(data, ad[i]);
info->fix.smem_start = 0;
/* Initialize the AOI data structure */
mfbi = info->par;
memcpy(mfbi, &mfb_template[i], sizeof(struct mfb_info));
mfbi->parent = data;
mfbi->ad = &data->ad[i];
if (mfbi->index == PLANE0) {
const u8 *prop;
int len;
/* Get EDID */
prop = of_get_property(np, "edid", &len);
if (prop && len == EDID_LENGTH)
mfbi->edid_data = kmemdup(prop, EDID_LENGTH,
GFP_KERNEL);
}
}
data->diu_reg = of_iomap(np, 0);
if (!data->diu_reg) {
dev_err(&pdev->dev, "cannot map DIU registers\n");
ret = -EFAULT;
goto error;
}
diu_mode = in_be32(&data->diu_reg->diu_mode);
if (diu_mode == MFB_MODE0)
out_be32(&data->diu_reg->diu_mode, 0); /* disable DIU */
/* Get the IRQ of the DIU */
data->irq = irq_of_parse_and_map(np, 0);
if (!data->irq) {
dev_err(&pdev->dev, "could not get DIU IRQ\n");
ret = -EINVAL;
goto error;
}
data->monitor_port = monitor_port;
/* Initialize the dummy Area Descriptor */
data->dummy_ad.addr = cpu_to_le32(DMA_ADDR(data, dummy_aoi));
data->dummy_ad.pix_fmt = 0x88882317;
data->dummy_ad.src_size_g_alpha = cpu_to_le32((4 << 12) | 4);
data->dummy_ad.aoi_size = cpu_to_le32((4 << 16) | 2);
data->dummy_ad.offset_xyi = 0;
data->dummy_ad.offset_xyd = 0;
data->dummy_ad.next_ad = 0;
data->dummy_ad.paddr = DMA_ADDR(data, dummy_ad);
/*
* Let DIU display splash screen if it was pre-initialized
* by the bootloader, set dummy area descriptor otherwise.
*/
if (diu_mode == MFB_MODE0)
out_be32(&data->diu_reg->desc[0], data->dummy_ad.paddr);
out_be32(&data->diu_reg->desc[1], data->dummy_ad.paddr);
out_be32(&data->diu_reg->desc[2], data->dummy_ad.paddr);
for (i = 0; i < NUM_AOIS; i++) {
ret = install_fb(&data->fsl_diu_info[i]);
if (ret) {
dev_err(&pdev->dev, "could not register fb %d\n", i);
goto error;
}
}
if (request_irq_local(data)) {
dev_err(&pdev->dev, "could not claim irq\n");
goto error;
}
sysfs_attr_init(&data->dev_attr.attr);
data->dev_attr.attr.name = "monitor";
data->dev_attr.attr.mode = S_IRUGO|S_IWUSR;
data->dev_attr.show = show_monitor;
data->dev_attr.store = store_monitor;
ret = device_create_file(&pdev->dev, &data->dev_attr);
if (ret) {
dev_err(&pdev->dev, "could not create sysfs file %s\n",
data->dev_attr.attr.name);
}
dev_set_drvdata(&pdev->dev, data);
return 0;
error:
for (i = 0; i < NUM_AOIS; i++)
uninstall_fb(&data->fsl_diu_info[i]);
iounmap(data->diu_reg);
dma_free_coherent(&pdev->dev, sizeof(struct fsl_diu_data), data,
data->dma_addr);
return ret;
}
static int fsl_diu_remove(struct platform_device *pdev)
{
struct fsl_diu_data *data;
int i;
data = dev_get_drvdata(&pdev->dev);
disable_lcdc(&data->fsl_diu_info[0]);
free_irq_local(data);
for (i = 0; i < NUM_AOIS; i++)
uninstall_fb(&data->fsl_diu_info[i]);
iounmap(data->diu_reg);
dma_free_coherent(&pdev->dev, sizeof(struct fsl_diu_data), data,
data->dma_addr);
return 0;
}
#ifndef MODULE
static int __init fsl_diu_setup(char *options)
{
char *opt;
unsigned long val;
if (!options || !*options)
return 0;
while ((opt = strsep(&options, ",")) != NULL) {
if (!*opt)
continue;
if (!strncmp(opt, "monitor=", 8)) {
monitor_port = fsl_diu_name_to_port(opt + 8);
} else if (!strncmp(opt, "bpp=", 4)) {
if (!strict_strtoul(opt + 4, 10, &val))
default_bpp = val;
} else
fb_mode = opt;
}
return 0;
}
#endif
static struct of_device_id fsl_diu_match[] = {
#ifdef CONFIG_PPC_MPC512x
{
.compatible = "fsl,mpc5121-diu",
},
#endif
{
.compatible = "fsl,diu",
},
{}
};
MODULE_DEVICE_TABLE(of, fsl_diu_match);
static struct platform_driver fsl_diu_driver = {
.driver = {
.name = "fsl-diu-fb",
.owner = THIS_MODULE,
.of_match_table = fsl_diu_match,
},
.probe = fsl_diu_probe,
.remove = fsl_diu_remove,
.suspend = fsl_diu_suspend,
.resume = fsl_diu_resume,
};
static int __init fsl_diu_init(void)
{
#ifdef CONFIG_NOT_COHERENT_CACHE
struct device_node *np;
const u32 *prop;
#endif
int ret;
#ifndef MODULE
char *option;
/*
* For kernel boot options (in 'video=xxxfb:<options>' format)
*/
if (fb_get_options("fslfb", &option))
return -ENODEV;
fsl_diu_setup(option);
#else
monitor_port = fsl_diu_name_to_port(monitor_string);
#endif
pr_info("Freescale Display Interface Unit (DIU) framebuffer driver\n");
#ifdef CONFIG_NOT_COHERENT_CACHE
np = of_find_node_by_type(NULL, "cpu");
if (!np) {
pr_err("fsl-diu-fb: can't find 'cpu' device node\n");
return -ENODEV;
}
prop = of_get_property(np, "d-cache-size", NULL);
if (prop == NULL) {
pr_err("fsl-diu-fb: missing 'd-cache-size' property' "
"in 'cpu' node\n");
of_node_put(np);
return -ENODEV;
}
/*
* Freescale PLRU requires 13/8 times the cache size to do a proper
* displacement flush
*/
coherence_data_size = be32_to_cpup(prop) * 13;
coherence_data_size /= 8;
prop = of_get_property(np, "d-cache-line-size", NULL);
if (prop == NULL) {
pr_err("fsl-diu-fb: missing 'd-cache-line-size' property' "
"in 'cpu' node\n");
of_node_put(np);
return -ENODEV;
}
d_cache_line_size = be32_to_cpup(prop);
of_node_put(np);
coherence_data = vmalloc(coherence_data_size);
if (!coherence_data)
return -ENOMEM;
#endif
ret = platform_driver_register(&fsl_diu_driver);
if (ret) {
pr_err("fsl-diu-fb: failed to register platform driver\n");
#if defined(CONFIG_NOT_COHERENT_CACHE)
vfree(coherence_data);
#endif
}
return ret;
}
static void __exit fsl_diu_exit(void)
{
platform_driver_unregister(&fsl_diu_driver);
#if defined(CONFIG_NOT_COHERENT_CACHE)
vfree(coherence_data);
#endif
}
module_init(fsl_diu_init);
module_exit(fsl_diu_exit);
MODULE_AUTHOR("York Sun <yorksun@freescale.com>");
MODULE_DESCRIPTION("Freescale DIU framebuffer driver");
MODULE_LICENSE("GPL");
module_param_named(mode, fb_mode, charp, 0);
MODULE_PARM_DESC(mode,
"Specify resolution as \"<xres>x<yres>[-<bpp>][@<refresh>]\" ");
module_param_named(bpp, default_bpp, ulong, 0);
MODULE_PARM_DESC(bpp, "Specify bit-per-pixel if not specified in 'mode'");
module_param_named(monitor, monitor_string, charp, 0);
MODULE_PARM_DESC(monitor, "Specify the monitor port "
"(\"dvi\", \"lvds\", or \"dlvds\") if supported by the platform");
| gpl-2.0 |
kecinzer/kernel_opo_kecinzer | drivers/staging/iio/accel/lis3l02dq_ring.c | 4878 | 10892 | #include <linux/interrupt.h>
#include <linux/gpio.h>
#include <linux/mutex.h>
#include <linux/kernel.h>
#include <linux/spi/spi.h>
#include <linux/slab.h>
#include <linux/export.h>
#include "../iio.h"
#include "../ring_sw.h"
#include "../kfifo_buf.h"
#include "../trigger.h"
#include "../trigger_consumer.h"
#include "lis3l02dq.h"
/**
* combine_8_to_16() utility function to munge to u8s into u16
**/
static inline u16 combine_8_to_16(u8 lower, u8 upper)
{
u16 _lower = lower;
u16 _upper = upper;
return _lower | (_upper << 8);
}
/**
* lis3l02dq_data_rdy_trig_poll() the event handler for the data rdy trig
**/
irqreturn_t lis3l02dq_data_rdy_trig_poll(int irq, void *private)
{
struct iio_dev *indio_dev = private;
struct lis3l02dq_state *st = iio_priv(indio_dev);
if (st->trigger_on) {
iio_trigger_poll(st->trig, iio_get_time_ns());
return IRQ_HANDLED;
} else
return IRQ_WAKE_THREAD;
}
static const u8 read_all_tx_array[] = {
LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_X_L_ADDR), 0,
LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_X_H_ADDR), 0,
LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_Y_L_ADDR), 0,
LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_Y_H_ADDR), 0,
LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_Z_L_ADDR), 0,
LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_Z_H_ADDR), 0,
};
/**
* lis3l02dq_read_all() Reads all channels currently selected
* @st: device specific state
* @rx_array: (dma capable) receive array, must be at least
* 4*number of channels
**/
static int lis3l02dq_read_all(struct iio_dev *indio_dev, u8 *rx_array)
{
struct lis3l02dq_state *st = iio_priv(indio_dev);
struct spi_transfer *xfers;
struct spi_message msg;
int ret, i, j = 0;
xfers = kcalloc(bitmap_weight(indio_dev->active_scan_mask,
indio_dev->masklength) * 2,
sizeof(*xfers), GFP_KERNEL);
if (!xfers)
return -ENOMEM;
mutex_lock(&st->buf_lock);
for (i = 0; i < ARRAY_SIZE(read_all_tx_array)/4; i++)
if (test_bit(i, indio_dev->active_scan_mask)) {
/* lower byte */
xfers[j].tx_buf = st->tx + 2*j;
st->tx[2*j] = read_all_tx_array[i*4];
st->tx[2*j + 1] = 0;
if (rx_array)
xfers[j].rx_buf = rx_array + j*2;
xfers[j].bits_per_word = 8;
xfers[j].len = 2;
xfers[j].cs_change = 1;
j++;
/* upper byte */
xfers[j].tx_buf = st->tx + 2*j;
st->tx[2*j] = read_all_tx_array[i*4 + 2];
st->tx[2*j + 1] = 0;
if (rx_array)
xfers[j].rx_buf = rx_array + j*2;
xfers[j].bits_per_word = 8;
xfers[j].len = 2;
xfers[j].cs_change = 1;
j++;
}
/* After these are transmitted, the rx_buff should have
* values in alternate bytes
*/
spi_message_init(&msg);
for (j = 0; j < bitmap_weight(indio_dev->active_scan_mask,
indio_dev->masklength) * 2; j++)
spi_message_add_tail(&xfers[j], &msg);
ret = spi_sync(st->us, &msg);
mutex_unlock(&st->buf_lock);
kfree(xfers);
return ret;
}
static int lis3l02dq_get_buffer_element(struct iio_dev *indio_dev,
u8 *buf)
{
int ret, i;
u8 *rx_array ;
s16 *data = (s16 *)buf;
int scan_count = bitmap_weight(indio_dev->active_scan_mask,
indio_dev->masklength);
rx_array = kzalloc(4 * scan_count, GFP_KERNEL);
if (rx_array == NULL)
return -ENOMEM;
ret = lis3l02dq_read_all(indio_dev, rx_array);
if (ret < 0)
return ret;
for (i = 0; i < scan_count; i++)
data[i] = combine_8_to_16(rx_array[i*4+1],
rx_array[i*4+3]);
kfree(rx_array);
return i*sizeof(data[0]);
}
static irqreturn_t lis3l02dq_trigger_handler(int irq, void *p)
{
struct iio_poll_func *pf = p;
struct iio_dev *indio_dev = pf->indio_dev;
struct iio_buffer *buffer = indio_dev->buffer;
int len = 0;
size_t datasize = buffer->access->get_bytes_per_datum(buffer);
char *data = kmalloc(datasize, GFP_KERNEL);
if (data == NULL) {
dev_err(indio_dev->dev.parent,
"memory alloc failed in buffer bh");
return -ENOMEM;
}
if (!bitmap_empty(indio_dev->active_scan_mask, indio_dev->masklength))
len = lis3l02dq_get_buffer_element(indio_dev, data);
/* Guaranteed to be aligned with 8 byte boundary */
if (buffer->scan_timestamp)
*(s64 *)(((phys_addr_t)data + len
+ sizeof(s64) - 1) & ~(sizeof(s64) - 1))
= pf->timestamp;
buffer->access->store_to(buffer, (u8 *)data, pf->timestamp);
iio_trigger_notify_done(indio_dev->trig);
kfree(data);
return IRQ_HANDLED;
}
/* Caller responsible for locking as necessary. */
static int
__lis3l02dq_write_data_ready_config(struct device *dev, bool state)
{
int ret;
u8 valold;
bool currentlyset;
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct lis3l02dq_state *st = iio_priv(indio_dev);
/* Get the current event mask register */
ret = lis3l02dq_spi_read_reg_8(indio_dev,
LIS3L02DQ_REG_CTRL_2_ADDR,
&valold);
if (ret)
goto error_ret;
/* Find out if data ready is already on */
currentlyset
= valold & LIS3L02DQ_REG_CTRL_2_ENABLE_DATA_READY_GENERATION;
/* Disable requested */
if (!state && currentlyset) {
/* disable the data ready signal */
valold &= ~LIS3L02DQ_REG_CTRL_2_ENABLE_DATA_READY_GENERATION;
/* The double write is to overcome a hardware bug?*/
ret = lis3l02dq_spi_write_reg_8(indio_dev,
LIS3L02DQ_REG_CTRL_2_ADDR,
valold);
if (ret)
goto error_ret;
ret = lis3l02dq_spi_write_reg_8(indio_dev,
LIS3L02DQ_REG_CTRL_2_ADDR,
valold);
if (ret)
goto error_ret;
st->trigger_on = false;
/* Enable requested */
} else if (state && !currentlyset) {
/* if not set, enable requested */
/* first disable all events */
ret = lis3l02dq_disable_all_events(indio_dev);
if (ret < 0)
goto error_ret;
valold = ret |
LIS3L02DQ_REG_CTRL_2_ENABLE_DATA_READY_GENERATION;
st->trigger_on = true;
ret = lis3l02dq_spi_write_reg_8(indio_dev,
LIS3L02DQ_REG_CTRL_2_ADDR,
valold);
if (ret)
goto error_ret;
}
return 0;
error_ret:
return ret;
}
/**
* lis3l02dq_data_rdy_trigger_set_state() set datardy interrupt state
*
* If disabling the interrupt also does a final read to ensure it is clear.
* This is only important in some cases where the scan enable elements are
* switched before the buffer is reenabled.
**/
static int lis3l02dq_data_rdy_trigger_set_state(struct iio_trigger *trig,
bool state)
{
struct iio_dev *indio_dev = trig->private_data;
int ret = 0;
u8 t;
__lis3l02dq_write_data_ready_config(&indio_dev->dev, state);
if (state == false) {
/*
* A possible quirk with the handler is currently worked around
* by ensuring outstanding read events are cleared.
*/
ret = lis3l02dq_read_all(indio_dev, NULL);
}
lis3l02dq_spi_read_reg_8(indio_dev,
LIS3L02DQ_REG_WAKE_UP_SRC_ADDR,
&t);
return ret;
}
/**
* lis3l02dq_trig_try_reen() try renabling irq for data rdy trigger
* @trig: the datardy trigger
*/
static int lis3l02dq_trig_try_reen(struct iio_trigger *trig)
{
struct iio_dev *indio_dev = trig->private_data;
struct lis3l02dq_state *st = iio_priv(indio_dev);
int i;
/* If gpio still high (or high again) */
/* In theory possible we will need to do this several times */
for (i = 0; i < 5; i++)
if (gpio_get_value(irq_to_gpio(st->us->irq)))
lis3l02dq_read_all(indio_dev, NULL);
else
break;
if (i == 5)
printk(KERN_INFO
"Failed to clear the interrupt for lis3l02dq\n");
/* irq reenabled so success! */
return 0;
}
static const struct iio_trigger_ops lis3l02dq_trigger_ops = {
.owner = THIS_MODULE,
.set_trigger_state = &lis3l02dq_data_rdy_trigger_set_state,
.try_reenable = &lis3l02dq_trig_try_reen,
};
int lis3l02dq_probe_trigger(struct iio_dev *indio_dev)
{
int ret;
struct lis3l02dq_state *st = iio_priv(indio_dev);
st->trig = iio_allocate_trigger("lis3l02dq-dev%d", indio_dev->id);
if (!st->trig) {
ret = -ENOMEM;
goto error_ret;
}
st->trig->dev.parent = &st->us->dev;
st->trig->ops = &lis3l02dq_trigger_ops;
st->trig->private_data = indio_dev;
ret = iio_trigger_register(st->trig);
if (ret)
goto error_free_trig;
return 0;
error_free_trig:
iio_free_trigger(st->trig);
error_ret:
return ret;
}
void lis3l02dq_remove_trigger(struct iio_dev *indio_dev)
{
struct lis3l02dq_state *st = iio_priv(indio_dev);
iio_trigger_unregister(st->trig);
iio_free_trigger(st->trig);
}
void lis3l02dq_unconfigure_buffer(struct iio_dev *indio_dev)
{
iio_dealloc_pollfunc(indio_dev->pollfunc);
lis3l02dq_free_buf(indio_dev->buffer);
}
static int lis3l02dq_buffer_postenable(struct iio_dev *indio_dev)
{
/* Disable unwanted channels otherwise the interrupt will not clear */
u8 t;
int ret;
bool oneenabled = false;
ret = lis3l02dq_spi_read_reg_8(indio_dev,
LIS3L02DQ_REG_CTRL_1_ADDR,
&t);
if (ret)
goto error_ret;
if (test_bit(0, indio_dev->active_scan_mask)) {
t |= LIS3L02DQ_REG_CTRL_1_AXES_X_ENABLE;
oneenabled = true;
} else
t &= ~LIS3L02DQ_REG_CTRL_1_AXES_X_ENABLE;
if (test_bit(1, indio_dev->active_scan_mask)) {
t |= LIS3L02DQ_REG_CTRL_1_AXES_Y_ENABLE;
oneenabled = true;
} else
t &= ~LIS3L02DQ_REG_CTRL_1_AXES_Y_ENABLE;
if (test_bit(2, indio_dev->active_scan_mask)) {
t |= LIS3L02DQ_REG_CTRL_1_AXES_Z_ENABLE;
oneenabled = true;
} else
t &= ~LIS3L02DQ_REG_CTRL_1_AXES_Z_ENABLE;
if (!oneenabled) /* what happens in this case is unknown */
return -EINVAL;
ret = lis3l02dq_spi_write_reg_8(indio_dev,
LIS3L02DQ_REG_CTRL_1_ADDR,
t);
if (ret)
goto error_ret;
return iio_triggered_buffer_postenable(indio_dev);
error_ret:
return ret;
}
/* Turn all channels on again */
static int lis3l02dq_buffer_predisable(struct iio_dev *indio_dev)
{
u8 t;
int ret;
ret = iio_triggered_buffer_predisable(indio_dev);
if (ret)
goto error_ret;
ret = lis3l02dq_spi_read_reg_8(indio_dev,
LIS3L02DQ_REG_CTRL_1_ADDR,
&t);
if (ret)
goto error_ret;
t |= LIS3L02DQ_REG_CTRL_1_AXES_X_ENABLE |
LIS3L02DQ_REG_CTRL_1_AXES_Y_ENABLE |
LIS3L02DQ_REG_CTRL_1_AXES_Z_ENABLE;
ret = lis3l02dq_spi_write_reg_8(indio_dev,
LIS3L02DQ_REG_CTRL_1_ADDR,
t);
error_ret:
return ret;
}
static const struct iio_buffer_setup_ops lis3l02dq_buffer_setup_ops = {
.preenable = &iio_sw_buffer_preenable,
.postenable = &lis3l02dq_buffer_postenable,
.predisable = &lis3l02dq_buffer_predisable,
};
int lis3l02dq_configure_buffer(struct iio_dev *indio_dev)
{
int ret;
struct iio_buffer *buffer;
buffer = lis3l02dq_alloc_buf(indio_dev);
if (!buffer)
return -ENOMEM;
indio_dev->buffer = buffer;
buffer->scan_timestamp = true;
indio_dev->setup_ops = &lis3l02dq_buffer_setup_ops;
/* Functions are NULL as we set handler below */
indio_dev->pollfunc = iio_alloc_pollfunc(&iio_pollfunc_store_time,
&lis3l02dq_trigger_handler,
0,
indio_dev,
"lis3l02dq_consumer%d",
indio_dev->id);
if (indio_dev->pollfunc == NULL) {
ret = -ENOMEM;
goto error_iio_sw_rb_free;
}
indio_dev->modes |= INDIO_BUFFER_TRIGGERED;
return 0;
error_iio_sw_rb_free:
lis3l02dq_free_buf(indio_dev->buffer);
return ret;
}
| gpl-2.0 |
GalaxyTab4/android_kernel_samsung_degas | arch/alpha/kernel/sys_dp264.c | 6670 | 17720 | /*
* linux/arch/alpha/kernel/sys_dp264.c
*
* Copyright (C) 1995 David A Rusling
* Copyright (C) 1996, 1999 Jay A Estabrook
* Copyright (C) 1998, 1999 Richard Henderson
*
* Modified by Christopher C. Chimelis, 2001 to
* add support for the addition of Shark to the
* Tsunami family.
*
* Code supporting the DP264 (EV6+TSUNAMI).
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/bitops.h>
#include <asm/ptrace.h>
#include <asm/dma.h>
#include <asm/irq.h>
#include <asm/mmu_context.h>
#include <asm/io.h>
#include <asm/pgtable.h>
#include <asm/core_tsunami.h>
#include <asm/hwrpb.h>
#include <asm/tlbflush.h>
#include "proto.h"
#include "irq_impl.h"
#include "pci_impl.h"
#include "machvec_impl.h"
/* Note mask bit is true for ENABLED irqs. */
static unsigned long cached_irq_mask;
/* dp264 boards handle at max four CPUs */
static unsigned long cpu_irq_affinity[4] = { 0UL, 0UL, 0UL, 0UL };
DEFINE_SPINLOCK(dp264_irq_lock);
static void
tsunami_update_irq_hw(unsigned long mask)
{
register tsunami_cchip *cchip = TSUNAMI_cchip;
unsigned long isa_enable = 1UL << 55;
register int bcpu = boot_cpuid;
#ifdef CONFIG_SMP
volatile unsigned long *dim0, *dim1, *dim2, *dim3;
unsigned long mask0, mask1, mask2, mask3, dummy;
mask &= ~isa_enable;
mask0 = mask & cpu_irq_affinity[0];
mask1 = mask & cpu_irq_affinity[1];
mask2 = mask & cpu_irq_affinity[2];
mask3 = mask & cpu_irq_affinity[3];
if (bcpu == 0) mask0 |= isa_enable;
else if (bcpu == 1) mask1 |= isa_enable;
else if (bcpu == 2) mask2 |= isa_enable;
else mask3 |= isa_enable;
dim0 = &cchip->dim0.csr;
dim1 = &cchip->dim1.csr;
dim2 = &cchip->dim2.csr;
dim3 = &cchip->dim3.csr;
if (!cpu_possible(0)) dim0 = &dummy;
if (!cpu_possible(1)) dim1 = &dummy;
if (!cpu_possible(2)) dim2 = &dummy;
if (!cpu_possible(3)) dim3 = &dummy;
*dim0 = mask0;
*dim1 = mask1;
*dim2 = mask2;
*dim3 = mask3;
mb();
*dim0;
*dim1;
*dim2;
*dim3;
#else
volatile unsigned long *dimB;
if (bcpu == 0) dimB = &cchip->dim0.csr;
else if (bcpu == 1) dimB = &cchip->dim1.csr;
else if (bcpu == 2) dimB = &cchip->dim2.csr;
else dimB = &cchip->dim3.csr;
*dimB = mask | isa_enable;
mb();
*dimB;
#endif
}
static void
dp264_enable_irq(struct irq_data *d)
{
spin_lock(&dp264_irq_lock);
cached_irq_mask |= 1UL << d->irq;
tsunami_update_irq_hw(cached_irq_mask);
spin_unlock(&dp264_irq_lock);
}
static void
dp264_disable_irq(struct irq_data *d)
{
spin_lock(&dp264_irq_lock);
cached_irq_mask &= ~(1UL << d->irq);
tsunami_update_irq_hw(cached_irq_mask);
spin_unlock(&dp264_irq_lock);
}
static void
clipper_enable_irq(struct irq_data *d)
{
spin_lock(&dp264_irq_lock);
cached_irq_mask |= 1UL << (d->irq - 16);
tsunami_update_irq_hw(cached_irq_mask);
spin_unlock(&dp264_irq_lock);
}
static void
clipper_disable_irq(struct irq_data *d)
{
spin_lock(&dp264_irq_lock);
cached_irq_mask &= ~(1UL << (d->irq - 16));
tsunami_update_irq_hw(cached_irq_mask);
spin_unlock(&dp264_irq_lock);
}
static void
cpu_set_irq_affinity(unsigned int irq, cpumask_t affinity)
{
int cpu;
for (cpu = 0; cpu < 4; cpu++) {
unsigned long aff = cpu_irq_affinity[cpu];
if (cpumask_test_cpu(cpu, &affinity))
aff |= 1UL << irq;
else
aff &= ~(1UL << irq);
cpu_irq_affinity[cpu] = aff;
}
}
static int
dp264_set_affinity(struct irq_data *d, const struct cpumask *affinity,
bool force)
{
spin_lock(&dp264_irq_lock);
cpu_set_irq_affinity(d->irq, *affinity);
tsunami_update_irq_hw(cached_irq_mask);
spin_unlock(&dp264_irq_lock);
return 0;
}
static int
clipper_set_affinity(struct irq_data *d, const struct cpumask *affinity,
bool force)
{
spin_lock(&dp264_irq_lock);
cpu_set_irq_affinity(d->irq - 16, *affinity);
tsunami_update_irq_hw(cached_irq_mask);
spin_unlock(&dp264_irq_lock);
return 0;
}
static struct irq_chip dp264_irq_type = {
.name = "DP264",
.irq_unmask = dp264_enable_irq,
.irq_mask = dp264_disable_irq,
.irq_mask_ack = dp264_disable_irq,
.irq_set_affinity = dp264_set_affinity,
};
static struct irq_chip clipper_irq_type = {
.name = "CLIPPER",
.irq_unmask = clipper_enable_irq,
.irq_mask = clipper_disable_irq,
.irq_mask_ack = clipper_disable_irq,
.irq_set_affinity = clipper_set_affinity,
};
static void
dp264_device_interrupt(unsigned long vector)
{
#if 1
printk("dp264_device_interrupt: NOT IMPLEMENTED YET!!\n");
#else
unsigned long pld;
unsigned int i;
/* Read the interrupt summary register of TSUNAMI */
pld = TSUNAMI_cchip->dir0.csr;
/*
* Now for every possible bit set, work through them and call
* the appropriate interrupt handler.
*/
while (pld) {
i = ffz(~pld);
pld &= pld - 1; /* clear least bit set */
if (i == 55)
isa_device_interrupt(vector);
else
handle_irq(16 + i);
#if 0
TSUNAMI_cchip->dir0.csr = 1UL << i; mb();
tmp = TSUNAMI_cchip->dir0.csr;
#endif
}
#endif
}
static void
dp264_srm_device_interrupt(unsigned long vector)
{
int irq;
irq = (vector - 0x800) >> 4;
/*
* The SRM console reports PCI interrupts with a vector calculated by:
*
* 0x900 + (0x10 * DRIR-bit)
*
* So bit 16 shows up as IRQ 32, etc.
*
* On DP264/BRICK/MONET, we adjust it down by 16 because at least
* that many of the low order bits of the DRIR are not used, and
* so we don't count them.
*/
if (irq >= 32)
irq -= 16;
handle_irq(irq);
}
static void
clipper_srm_device_interrupt(unsigned long vector)
{
int irq;
irq = (vector - 0x800) >> 4;
/*
* The SRM console reports PCI interrupts with a vector calculated by:
*
* 0x900 + (0x10 * DRIR-bit)
*
* So bit 16 shows up as IRQ 32, etc.
*
* CLIPPER uses bits 8-47 for PCI interrupts, so we do not need
* to scale down the vector reported, we just use it.
*
* Eg IRQ 24 is DRIR bit 8, etc, etc
*/
handle_irq(irq);
}
static void __init
init_tsunami_irqs(struct irq_chip * ops, int imin, int imax)
{
long i;
for (i = imin; i <= imax; ++i) {
irq_set_chip_and_handler(i, ops, handle_level_irq);
irq_set_status_flags(i, IRQ_LEVEL);
}
}
static void __init
dp264_init_irq(void)
{
outb(0, DMA1_RESET_REG);
outb(0, DMA2_RESET_REG);
outb(DMA_MODE_CASCADE, DMA2_MODE_REG);
outb(0, DMA2_MASK_REG);
if (alpha_using_srm)
alpha_mv.device_interrupt = dp264_srm_device_interrupt;
tsunami_update_irq_hw(0);
init_i8259a_irqs();
init_tsunami_irqs(&dp264_irq_type, 16, 47);
}
static void __init
clipper_init_irq(void)
{
outb(0, DMA1_RESET_REG);
outb(0, DMA2_RESET_REG);
outb(DMA_MODE_CASCADE, DMA2_MODE_REG);
outb(0, DMA2_MASK_REG);
if (alpha_using_srm)
alpha_mv.device_interrupt = clipper_srm_device_interrupt;
tsunami_update_irq_hw(0);
init_i8259a_irqs();
init_tsunami_irqs(&clipper_irq_type, 24, 63);
}
/*
* PCI Fixup configuration.
*
* Summary @ TSUNAMI_CSR_DIM0:
* Bit Meaning
* 0-17 Unused
*18 Interrupt SCSI B (Adaptec 7895 builtin)
*19 Interrupt SCSI A (Adaptec 7895 builtin)
*20 Interrupt Line D from slot 2 PCI0
*21 Interrupt Line C from slot 2 PCI0
*22 Interrupt Line B from slot 2 PCI0
*23 Interrupt Line A from slot 2 PCI0
*24 Interrupt Line D from slot 1 PCI0
*25 Interrupt Line C from slot 1 PCI0
*26 Interrupt Line B from slot 1 PCI0
*27 Interrupt Line A from slot 1 PCI0
*28 Interrupt Line D from slot 0 PCI0
*29 Interrupt Line C from slot 0 PCI0
*30 Interrupt Line B from slot 0 PCI0
*31 Interrupt Line A from slot 0 PCI0
*
*32 Interrupt Line D from slot 3 PCI1
*33 Interrupt Line C from slot 3 PCI1
*34 Interrupt Line B from slot 3 PCI1
*35 Interrupt Line A from slot 3 PCI1
*36 Interrupt Line D from slot 2 PCI1
*37 Interrupt Line C from slot 2 PCI1
*38 Interrupt Line B from slot 2 PCI1
*39 Interrupt Line A from slot 2 PCI1
*40 Interrupt Line D from slot 1 PCI1
*41 Interrupt Line C from slot 1 PCI1
*42 Interrupt Line B from slot 1 PCI1
*43 Interrupt Line A from slot 1 PCI1
*44 Interrupt Line D from slot 0 PCI1
*45 Interrupt Line C from slot 0 PCI1
*46 Interrupt Line B from slot 0 PCI1
*47 Interrupt Line A from slot 0 PCI1
*48-52 Unused
*53 PCI0 NMI (from Cypress)
*54 PCI0 SMI INT (from Cypress)
*55 PCI0 ISA Interrupt (from Cypress)
*56-60 Unused
*61 PCI1 Bus Error
*62 PCI0 Bus Error
*63 Reserved
*
* IdSel
* 5 Cypress Bridge I/O
* 6 SCSI Adaptec builtin
* 7 64 bit PCI option slot 0 (all busses)
* 8 64 bit PCI option slot 1 (all busses)
* 9 64 bit PCI option slot 2 (all busses)
* 10 64 bit PCI option slot 3 (not bus 0)
*/
static int __init
isa_irq_fixup(const struct pci_dev *dev, int irq)
{
u8 irq8;
if (irq > 0)
return irq;
/* This interrupt is routed via ISA bridge, so we'll
just have to trust whatever value the console might
have assigned. */
pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq8);
return irq8 & 0xf;
}
static int __init
dp264_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
static char irq_tab[6][5] __initdata = {
/*INT INTA INTB INTC INTD */
{ -1, -1, -1, -1, -1}, /* IdSel 5 ISA Bridge */
{ 16+ 3, 16+ 3, 16+ 2, 16+ 2, 16+ 2}, /* IdSel 6 SCSI builtin*/
{ 16+15, 16+15, 16+14, 16+13, 16+12}, /* IdSel 7 slot 0 */
{ 16+11, 16+11, 16+10, 16+ 9, 16+ 8}, /* IdSel 8 slot 1 */
{ 16+ 7, 16+ 7, 16+ 6, 16+ 5, 16+ 4}, /* IdSel 9 slot 2 */
{ 16+ 3, 16+ 3, 16+ 2, 16+ 1, 16+ 0} /* IdSel 10 slot 3 */
};
const long min_idsel = 5, max_idsel = 10, irqs_per_slot = 5;
struct pci_controller *hose = dev->sysdata;
int irq = COMMON_TABLE_LOOKUP;
if (irq > 0)
irq += 16 * hose->index;
return isa_irq_fixup(dev, irq);
}
static int __init
monet_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
static char irq_tab[13][5] __initdata = {
/*INT INTA INTB INTC INTD */
{ 45, 45, 45, 45, 45}, /* IdSel 3 21143 PCI1 */
{ -1, -1, -1, -1, -1}, /* IdSel 4 unused */
{ -1, -1, -1, -1, -1}, /* IdSel 5 unused */
{ 47, 47, 47, 47, 47}, /* IdSel 6 SCSI PCI1 */
{ -1, -1, -1, -1, -1}, /* IdSel 7 ISA Bridge */
{ -1, -1, -1, -1, -1}, /* IdSel 8 P2P PCI1 */
#if 1
{ 28, 28, 29, 30, 31}, /* IdSel 14 slot 4 PCI2*/
{ 24, 24, 25, 26, 27}, /* IdSel 15 slot 5 PCI2*/
#else
{ -1, -1, -1, -1, -1}, /* IdSel 9 unused */
{ -1, -1, -1, -1, -1}, /* IdSel 10 unused */
#endif
{ 40, 40, 41, 42, 43}, /* IdSel 11 slot 1 PCI0*/
{ 36, 36, 37, 38, 39}, /* IdSel 12 slot 2 PCI0*/
{ 32, 32, 33, 34, 35}, /* IdSel 13 slot 3 PCI0*/
{ 28, 28, 29, 30, 31}, /* IdSel 14 slot 4 PCI2*/
{ 24, 24, 25, 26, 27} /* IdSel 15 slot 5 PCI2*/
};
const long min_idsel = 3, max_idsel = 15, irqs_per_slot = 5;
return isa_irq_fixup(dev, COMMON_TABLE_LOOKUP);
}
static u8 __init
monet_swizzle(struct pci_dev *dev, u8 *pinp)
{
struct pci_controller *hose = dev->sysdata;
int slot, pin = *pinp;
if (!dev->bus->parent) {
slot = PCI_SLOT(dev->devfn);
}
/* Check for the built-in bridge on hose 1. */
else if (hose->index == 1 && PCI_SLOT(dev->bus->self->devfn) == 8) {
slot = PCI_SLOT(dev->devfn);
} else {
/* Must be a card-based bridge. */
do {
/* Check for built-in bridge on hose 1. */
if (hose->index == 1 &&
PCI_SLOT(dev->bus->self->devfn) == 8) {
slot = PCI_SLOT(dev->devfn);
break;
}
pin = pci_swizzle_interrupt_pin(dev, pin);
/* Move up the chain of bridges. */
dev = dev->bus->self;
/* Slot of the next bridge. */
slot = PCI_SLOT(dev->devfn);
} while (dev->bus->self);
}
*pinp = pin;
return slot;
}
static int __init
webbrick_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
static char irq_tab[13][5] __initdata = {
/*INT INTA INTB INTC INTD */
{ -1, -1, -1, -1, -1}, /* IdSel 7 ISA Bridge */
{ -1, -1, -1, -1, -1}, /* IdSel 8 unused */
{ 29, 29, 29, 29, 29}, /* IdSel 9 21143 #1 */
{ -1, -1, -1, -1, -1}, /* IdSel 10 unused */
{ 30, 30, 30, 30, 30}, /* IdSel 11 21143 #2 */
{ -1, -1, -1, -1, -1}, /* IdSel 12 unused */
{ -1, -1, -1, -1, -1}, /* IdSel 13 unused */
{ 35, 35, 34, 33, 32}, /* IdSel 14 slot 0 */
{ 39, 39, 38, 37, 36}, /* IdSel 15 slot 1 */
{ 43, 43, 42, 41, 40}, /* IdSel 16 slot 2 */
{ 47, 47, 46, 45, 44}, /* IdSel 17 slot 3 */
};
const long min_idsel = 7, max_idsel = 17, irqs_per_slot = 5;
return isa_irq_fixup(dev, COMMON_TABLE_LOOKUP);
}
static int __init
clipper_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
static char irq_tab[7][5] __initdata = {
/*INT INTA INTB INTC INTD */
{ 16+ 8, 16+ 8, 16+ 9, 16+10, 16+11}, /* IdSel 1 slot 1 */
{ 16+12, 16+12, 16+13, 16+14, 16+15}, /* IdSel 2 slot 2 */
{ 16+16, 16+16, 16+17, 16+18, 16+19}, /* IdSel 3 slot 3 */
{ 16+20, 16+20, 16+21, 16+22, 16+23}, /* IdSel 4 slot 4 */
{ 16+24, 16+24, 16+25, 16+26, 16+27}, /* IdSel 5 slot 5 */
{ 16+28, 16+28, 16+29, 16+30, 16+31}, /* IdSel 6 slot 6 */
{ -1, -1, -1, -1, -1} /* IdSel 7 ISA Bridge */
};
const long min_idsel = 1, max_idsel = 7, irqs_per_slot = 5;
struct pci_controller *hose = dev->sysdata;
int irq = COMMON_TABLE_LOOKUP;
if (irq > 0)
irq += 16 * hose->index;
return isa_irq_fixup(dev, irq);
}
static void __init
dp264_init_pci(void)
{
common_init_pci();
SMC669_Init(0);
locate_and_init_vga(NULL);
}
static void __init
monet_init_pci(void)
{
common_init_pci();
SMC669_Init(1);
es1888_init();
locate_and_init_vga(NULL);
}
static void __init
clipper_init_pci(void)
{
common_init_pci();
locate_and_init_vga(NULL);
}
static void __init
webbrick_init_arch(void)
{
tsunami_init_arch();
/* Tsunami caches 4 PTEs at a time; DS10 has only 1 hose. */
hose_head->sg_isa->align_entry = 4;
hose_head->sg_pci->align_entry = 4;
}
/*
* The System Vectors
*/
struct alpha_machine_vector dp264_mv __initmv = {
.vector_name = "DP264",
DO_EV6_MMU,
DO_DEFAULT_RTC,
DO_TSUNAMI_IO,
.machine_check = tsunami_machine_check,
.max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
.min_io_address = DEFAULT_IO_BASE,
.min_mem_address = DEFAULT_MEM_BASE,
.pci_dac_offset = TSUNAMI_DAC_OFFSET,
.nr_irqs = 64,
.device_interrupt = dp264_device_interrupt,
.init_arch = tsunami_init_arch,
.init_irq = dp264_init_irq,
.init_rtc = common_init_rtc,
.init_pci = dp264_init_pci,
.kill_arch = tsunami_kill_arch,
.pci_map_irq = dp264_map_irq,
.pci_swizzle = common_swizzle,
};
ALIAS_MV(dp264)
struct alpha_machine_vector monet_mv __initmv = {
.vector_name = "Monet",
DO_EV6_MMU,
DO_DEFAULT_RTC,
DO_TSUNAMI_IO,
.machine_check = tsunami_machine_check,
.max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
.min_io_address = DEFAULT_IO_BASE,
.min_mem_address = DEFAULT_MEM_BASE,
.pci_dac_offset = TSUNAMI_DAC_OFFSET,
.nr_irqs = 64,
.device_interrupt = dp264_device_interrupt,
.init_arch = tsunami_init_arch,
.init_irq = dp264_init_irq,
.init_rtc = common_init_rtc,
.init_pci = monet_init_pci,
.kill_arch = tsunami_kill_arch,
.pci_map_irq = monet_map_irq,
.pci_swizzle = monet_swizzle,
};
struct alpha_machine_vector webbrick_mv __initmv = {
.vector_name = "Webbrick",
DO_EV6_MMU,
DO_DEFAULT_RTC,
DO_TSUNAMI_IO,
.machine_check = tsunami_machine_check,
.max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
.min_io_address = DEFAULT_IO_BASE,
.min_mem_address = DEFAULT_MEM_BASE,
.pci_dac_offset = TSUNAMI_DAC_OFFSET,
.nr_irqs = 64,
.device_interrupt = dp264_device_interrupt,
.init_arch = webbrick_init_arch,
.init_irq = dp264_init_irq,
.init_rtc = common_init_rtc,
.init_pci = common_init_pci,
.kill_arch = tsunami_kill_arch,
.pci_map_irq = webbrick_map_irq,
.pci_swizzle = common_swizzle,
};
struct alpha_machine_vector clipper_mv __initmv = {
.vector_name = "Clipper",
DO_EV6_MMU,
DO_DEFAULT_RTC,
DO_TSUNAMI_IO,
.machine_check = tsunami_machine_check,
.max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
.min_io_address = DEFAULT_IO_BASE,
.min_mem_address = DEFAULT_MEM_BASE,
.pci_dac_offset = TSUNAMI_DAC_OFFSET,
.nr_irqs = 64,
.device_interrupt = dp264_device_interrupt,
.init_arch = tsunami_init_arch,
.init_irq = clipper_init_irq,
.init_rtc = common_init_rtc,
.init_pci = clipper_init_pci,
.kill_arch = tsunami_kill_arch,
.pci_map_irq = clipper_map_irq,
.pci_swizzle = common_swizzle,
};
/* Sharks strongly resemble Clipper, at least as far
* as interrupt routing, etc, so we're using the
* same functions as Clipper does
*/
struct alpha_machine_vector shark_mv __initmv = {
.vector_name = "Shark",
DO_EV6_MMU,
DO_DEFAULT_RTC,
DO_TSUNAMI_IO,
.machine_check = tsunami_machine_check,
.max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
.min_io_address = DEFAULT_IO_BASE,
.min_mem_address = DEFAULT_MEM_BASE,
.pci_dac_offset = TSUNAMI_DAC_OFFSET,
.nr_irqs = 64,
.device_interrupt = dp264_device_interrupt,
.init_arch = tsunami_init_arch,
.init_irq = clipper_init_irq,
.init_rtc = common_init_rtc,
.init_pci = common_init_pci,
.kill_arch = tsunami_kill_arch,
.pci_map_irq = clipper_map_irq,
.pci_swizzle = common_swizzle,
};
/* No alpha_mv alias for webbrick/monet/clipper, since we compile them
in unconditionally with DP264; setup_arch knows how to cope. */
| gpl-2.0 |
stev47/linux | arch/powerpc/sysdev/bestcomm/fec.c | 9486 | 7075 | /*
* Bestcomm FEC tasks driver
*
*
* Copyright (C) 2006-2007 Sylvain Munaut <tnt@246tNt.com>
* Copyright (C) 2003-2004 MontaVista, Software, Inc.
* ( by Dale Farnsworth <dfarnsworth@mvista.com> )
*
* This file is licensed under the terms of the GNU General Public License
* version 2. This program is licensed "as is" without any warranty of any
* kind, whether express or implied.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/types.h>
#include <asm/io.h>
#include "bestcomm.h"
#include "bestcomm_priv.h"
#include "fec.h"
/* ======================================================================== */
/* Task image/var/inc */
/* ======================================================================== */
/* fec tasks images */
extern u32 bcom_fec_rx_task[];
extern u32 bcom_fec_tx_task[];
/* rx task vars that need to be set before enabling the task */
struct bcom_fec_rx_var {
u32 enable; /* (u16*) address of task's control register */
u32 fifo; /* (u32*) address of fec's fifo */
u32 bd_base; /* (struct bcom_bd*) beginning of ring buffer */
u32 bd_last; /* (struct bcom_bd*) end of ring buffer */
u32 bd_start; /* (struct bcom_bd*) current bd */
u32 buffer_size; /* size of receive buffer */
};
/* rx task incs that need to be set before enabling the task */
struct bcom_fec_rx_inc {
u16 pad0;
s16 incr_bytes;
u16 pad1;
s16 incr_dst;
u16 pad2;
s16 incr_dst_ma;
};
/* tx task vars that need to be set before enabling the task */
struct bcom_fec_tx_var {
u32 DRD; /* (u32*) address of self-modified DRD */
u32 fifo; /* (u32*) address of fec's fifo */
u32 enable; /* (u16*) address of task's control register */
u32 bd_base; /* (struct bcom_bd*) beginning of ring buffer */
u32 bd_last; /* (struct bcom_bd*) end of ring buffer */
u32 bd_start; /* (struct bcom_bd*) current bd */
u32 buffer_size; /* set by uCode for each packet */
};
/* tx task incs that need to be set before enabling the task */
struct bcom_fec_tx_inc {
u16 pad0;
s16 incr_bytes;
u16 pad1;
s16 incr_src;
u16 pad2;
s16 incr_src_ma;
};
/* private structure in the task */
struct bcom_fec_priv {
phys_addr_t fifo;
int maxbufsize;
};
/* ======================================================================== */
/* Task support code */
/* ======================================================================== */
struct bcom_task *
bcom_fec_rx_init(int queue_len, phys_addr_t fifo, int maxbufsize)
{
struct bcom_task *tsk;
struct bcom_fec_priv *priv;
tsk = bcom_task_alloc(queue_len, sizeof(struct bcom_fec_bd),
sizeof(struct bcom_fec_priv));
if (!tsk)
return NULL;
tsk->flags = BCOM_FLAGS_NONE;
priv = tsk->priv;
priv->fifo = fifo;
priv->maxbufsize = maxbufsize;
if (bcom_fec_rx_reset(tsk)) {
bcom_task_free(tsk);
return NULL;
}
return tsk;
}
EXPORT_SYMBOL_GPL(bcom_fec_rx_init);
int
bcom_fec_rx_reset(struct bcom_task *tsk)
{
struct bcom_fec_priv *priv = tsk->priv;
struct bcom_fec_rx_var *var;
struct bcom_fec_rx_inc *inc;
/* Shutdown the task */
bcom_disable_task(tsk->tasknum);
/* Reset the microcode */
var = (struct bcom_fec_rx_var *) bcom_task_var(tsk->tasknum);
inc = (struct bcom_fec_rx_inc *) bcom_task_inc(tsk->tasknum);
if (bcom_load_image(tsk->tasknum, bcom_fec_rx_task))
return -1;
var->enable = bcom_eng->regs_base +
offsetof(struct mpc52xx_sdma, tcr[tsk->tasknum]);
var->fifo = (u32) priv->fifo;
var->bd_base = tsk->bd_pa;
var->bd_last = tsk->bd_pa + ((tsk->num_bd-1) * tsk->bd_size);
var->bd_start = tsk->bd_pa;
var->buffer_size = priv->maxbufsize;
inc->incr_bytes = -(s16)sizeof(u32); /* These should be in the */
inc->incr_dst = sizeof(u32); /* task image, but we stick */
inc->incr_dst_ma= sizeof(u8); /* to the official ones */
/* Reset the BDs */
tsk->index = 0;
tsk->outdex = 0;
memset(tsk->bd, 0x00, tsk->num_bd * tsk->bd_size);
/* Configure some stuff */
bcom_set_task_pragma(tsk->tasknum, BCOM_FEC_RX_BD_PRAGMA);
bcom_set_task_auto_start(tsk->tasknum, tsk->tasknum);
out_8(&bcom_eng->regs->ipr[BCOM_INITIATOR_FEC_RX], BCOM_IPR_FEC_RX);
out_be32(&bcom_eng->regs->IntPend, 1<<tsk->tasknum); /* Clear ints */
return 0;
}
EXPORT_SYMBOL_GPL(bcom_fec_rx_reset);
void
bcom_fec_rx_release(struct bcom_task *tsk)
{
/* Nothing special for the FEC tasks */
bcom_task_free(tsk);
}
EXPORT_SYMBOL_GPL(bcom_fec_rx_release);
/* Return 2nd to last DRD */
/* This is an ugly hack, but at least it's only done
once at initialization */
static u32 *self_modified_drd(int tasknum)
{
u32 *desc;
int num_descs;
int drd_count;
int i;
num_descs = bcom_task_num_descs(tasknum);
desc = bcom_task_desc(tasknum) + num_descs - 1;
drd_count = 0;
for (i=0; i<num_descs; i++, desc--)
if (bcom_desc_is_drd(*desc) && ++drd_count == 3)
break;
return desc;
}
struct bcom_task *
bcom_fec_tx_init(int queue_len, phys_addr_t fifo)
{
struct bcom_task *tsk;
struct bcom_fec_priv *priv;
tsk = bcom_task_alloc(queue_len, sizeof(struct bcom_fec_bd),
sizeof(struct bcom_fec_priv));
if (!tsk)
return NULL;
tsk->flags = BCOM_FLAGS_ENABLE_TASK;
priv = tsk->priv;
priv->fifo = fifo;
if (bcom_fec_tx_reset(tsk)) {
bcom_task_free(tsk);
return NULL;
}
return tsk;
}
EXPORT_SYMBOL_GPL(bcom_fec_tx_init);
int
bcom_fec_tx_reset(struct bcom_task *tsk)
{
struct bcom_fec_priv *priv = tsk->priv;
struct bcom_fec_tx_var *var;
struct bcom_fec_tx_inc *inc;
/* Shutdown the task */
bcom_disable_task(tsk->tasknum);
/* Reset the microcode */
var = (struct bcom_fec_tx_var *) bcom_task_var(tsk->tasknum);
inc = (struct bcom_fec_tx_inc *) bcom_task_inc(tsk->tasknum);
if (bcom_load_image(tsk->tasknum, bcom_fec_tx_task))
return -1;
var->enable = bcom_eng->regs_base +
offsetof(struct mpc52xx_sdma, tcr[tsk->tasknum]);
var->fifo = (u32) priv->fifo;
var->DRD = bcom_sram_va2pa(self_modified_drd(tsk->tasknum));
var->bd_base = tsk->bd_pa;
var->bd_last = tsk->bd_pa + ((tsk->num_bd-1) * tsk->bd_size);
var->bd_start = tsk->bd_pa;
inc->incr_bytes = -(s16)sizeof(u32); /* These should be in the */
inc->incr_src = sizeof(u32); /* task image, but we stick */
inc->incr_src_ma= sizeof(u8); /* to the official ones */
/* Reset the BDs */
tsk->index = 0;
tsk->outdex = 0;
memset(tsk->bd, 0x00, tsk->num_bd * tsk->bd_size);
/* Configure some stuff */
bcom_set_task_pragma(tsk->tasknum, BCOM_FEC_TX_BD_PRAGMA);
bcom_set_task_auto_start(tsk->tasknum, tsk->tasknum);
out_8(&bcom_eng->regs->ipr[BCOM_INITIATOR_FEC_TX], BCOM_IPR_FEC_TX);
out_be32(&bcom_eng->regs->IntPend, 1<<tsk->tasknum); /* Clear ints */
return 0;
}
EXPORT_SYMBOL_GPL(bcom_fec_tx_reset);
void
bcom_fec_tx_release(struct bcom_task *tsk)
{
/* Nothing special for the FEC tasks */
bcom_task_free(tsk);
}
EXPORT_SYMBOL_GPL(bcom_fec_tx_release);
MODULE_DESCRIPTION("BestComm FEC tasks driver");
MODULE_AUTHOR("Dale Farnsworth <dfarnsworth@mvista.com>");
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
felipesanches/linux-sunxi | drivers/pcmcia/pxa2xx_mainstone.c | 9742 | 4364 | /*
* linux/drivers/pcmcia/pxa2xx_mainstone.c
*
* Mainstone PCMCIA specific routines.
*
* Created: May 12, 2004
* Author: Nicolas Pitre
* Copyright: MontaVista Software Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <pcmcia/ss.h>
#include <asm/mach-types.h>
#include <asm/irq.h>
#include <mach/pxa2xx-regs.h>
#include <mach/mainstone.h>
#include "soc_common.h"
static int mst_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
{
/*
* Setup default state of GPIO outputs
* before we enable them as outputs.
*/
if (skt->nr == 0) {
skt->socket.pci_irq = MAINSTONE_S0_IRQ;
skt->stat[SOC_STAT_CD].irq = MAINSTONE_S0_CD_IRQ;
skt->stat[SOC_STAT_CD].name = "PCMCIA0 CD";
skt->stat[SOC_STAT_BVD1].irq = MAINSTONE_S0_STSCHG_IRQ;
skt->stat[SOC_STAT_BVD1].name = "PCMCIA0 STSCHG";
} else {
skt->socket.pci_irq = MAINSTONE_S1_IRQ;
skt->stat[SOC_STAT_CD].irq = MAINSTONE_S1_CD_IRQ;
skt->stat[SOC_STAT_CD].name = "PCMCIA1 CD";
skt->stat[SOC_STAT_BVD1].irq = MAINSTONE_S1_STSCHG_IRQ;
skt->stat[SOC_STAT_BVD1].name = "PCMCIA1 STSCHG";
}
return 0;
}
static unsigned long mst_pcmcia_status[2];
static void mst_pcmcia_socket_state(struct soc_pcmcia_socket *skt,
struct pcmcia_state *state)
{
unsigned long status, flip;
status = (skt->nr == 0) ? MST_PCMCIA0 : MST_PCMCIA1;
flip = (status ^ mst_pcmcia_status[skt->nr]) & MST_PCMCIA_nSTSCHG_BVD1;
/*
* Workaround for STSCHG which can't be deasserted:
* We therefore disable/enable corresponding IRQs
* as needed to avoid IRQ locks.
*/
if (flip) {
mst_pcmcia_status[skt->nr] = status;
if (status & MST_PCMCIA_nSTSCHG_BVD1)
enable_irq( (skt->nr == 0) ? MAINSTONE_S0_STSCHG_IRQ
: MAINSTONE_S1_STSCHG_IRQ );
else
disable_irq( (skt->nr == 0) ? MAINSTONE_S0_STSCHG_IRQ
: MAINSTONE_S1_STSCHG_IRQ );
}
state->detect = (status & MST_PCMCIA_nCD) ? 0 : 1;
state->ready = (status & MST_PCMCIA_nIRQ) ? 1 : 0;
state->bvd1 = (status & MST_PCMCIA_nSTSCHG_BVD1) ? 1 : 0;
state->bvd2 = (status & MST_PCMCIA_nSPKR_BVD2) ? 1 : 0;
state->vs_3v = (status & MST_PCMCIA_nVS1) ? 0 : 1;
state->vs_Xv = (status & MST_PCMCIA_nVS2) ? 0 : 1;
}
static int mst_pcmcia_configure_socket(struct soc_pcmcia_socket *skt,
const socket_state_t *state)
{
unsigned long power = 0;
int ret = 0;
switch (state->Vcc) {
case 0: power |= MST_PCMCIA_PWR_VCC_0; break;
case 33: power |= MST_PCMCIA_PWR_VCC_33; break;
case 50: power |= MST_PCMCIA_PWR_VCC_50; break;
default:
printk(KERN_ERR "%s(): bad Vcc %u\n",
__func__, state->Vcc);
ret = -1;
}
switch (state->Vpp) {
case 0: power |= MST_PCMCIA_PWR_VPP_0; break;
case 120: power |= MST_PCMCIA_PWR_VPP_120; break;
default:
if(state->Vpp == state->Vcc) {
power |= MST_PCMCIA_PWR_VPP_VCC;
} else {
printk(KERN_ERR "%s(): bad Vpp %u\n",
__func__, state->Vpp);
ret = -1;
}
}
if (state->flags & SS_RESET)
power |= MST_PCMCIA_RESET;
switch (skt->nr) {
case 0: MST_PCMCIA0 = power; break;
case 1: MST_PCMCIA1 = power; break;
default: ret = -1;
}
return ret;
}
static struct pcmcia_low_level mst_pcmcia_ops __initdata = {
.owner = THIS_MODULE,
.hw_init = mst_pcmcia_hw_init,
.socket_state = mst_pcmcia_socket_state,
.configure_socket = mst_pcmcia_configure_socket,
.nr = 2,
};
static struct platform_device *mst_pcmcia_device;
static int __init mst_pcmcia_init(void)
{
int ret;
if (!machine_is_mainstone())
return -ENODEV;
mst_pcmcia_device = platform_device_alloc("pxa2xx-pcmcia", -1);
if (!mst_pcmcia_device)
return -ENOMEM;
ret = platform_device_add_data(mst_pcmcia_device, &mst_pcmcia_ops,
sizeof(mst_pcmcia_ops));
if (ret == 0)
ret = platform_device_add(mst_pcmcia_device);
if (ret)
platform_device_put(mst_pcmcia_device);
return ret;
}
static void __exit mst_pcmcia_exit(void)
{
platform_device_unregister(mst_pcmcia_device);
}
fs_initcall(mst_pcmcia_init);
module_exit(mst_pcmcia_exit);
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:pxa2xx-pcmcia");
| gpl-2.0 |
thoemy/enru-3.1.10-g517147e | drivers/net/wireless/compat-wireless_R5.SP2.03/drivers/net/wireless/libertas/ethtool.c | 9742 | 3371 | #include <linux/hardirq.h>
#include <linux/netdevice.h>
#include <linux/ethtool.h>
#include <linux/delay.h>
#include "decl.h"
#include "cmd.h"
#include "mesh.h"
static void lbs_ethtool_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
struct lbs_private *priv = dev->ml_priv;
snprintf(info->fw_version, sizeof(info->fw_version),
"%u.%u.%u.p%u",
priv->fwrelease >> 24 & 0xff,
priv->fwrelease >> 16 & 0xff,
priv->fwrelease >> 8 & 0xff,
priv->fwrelease & 0xff);
strlcpy(info->driver, "libertas", sizeof(info->driver));
strlcpy(info->version, lbs_driver_version, sizeof(info->version));
}
/*
* All 8388 parts have 16KiB EEPROM size at the time of writing.
* In case that changes this needs fixing.
*/
#define LBS_EEPROM_LEN 16384
static int lbs_ethtool_get_eeprom_len(struct net_device *dev)
{
return LBS_EEPROM_LEN;
}
static int lbs_ethtool_get_eeprom(struct net_device *dev,
struct ethtool_eeprom *eeprom, u8 * bytes)
{
struct lbs_private *priv = dev->ml_priv;
struct cmd_ds_802_11_eeprom_access cmd;
int ret;
lbs_deb_enter(LBS_DEB_ETHTOOL);
if (eeprom->offset + eeprom->len > LBS_EEPROM_LEN ||
eeprom->len > LBS_EEPROM_READ_LEN) {
ret = -EINVAL;
goto out;
}
cmd.hdr.size = cpu_to_le16(sizeof(struct cmd_ds_802_11_eeprom_access) -
LBS_EEPROM_READ_LEN + eeprom->len);
cmd.action = cpu_to_le16(CMD_ACT_GET);
cmd.offset = cpu_to_le16(eeprom->offset);
cmd.len = cpu_to_le16(eeprom->len);
ret = lbs_cmd_with_response(priv, CMD_802_11_EEPROM_ACCESS, &cmd);
if (!ret)
memcpy(bytes, cmd.value, eeprom->len);
out:
lbs_deb_leave_args(LBS_DEB_ETHTOOL, "ret %d", ret);
return ret;
}
static void lbs_ethtool_get_wol(struct net_device *dev,
struct ethtool_wolinfo *wol)
{
struct lbs_private *priv = dev->ml_priv;
wol->supported = WAKE_UCAST|WAKE_MCAST|WAKE_BCAST|WAKE_PHY;
if (priv->wol_criteria == EHS_REMOVE_WAKEUP)
return;
if (priv->wol_criteria & EHS_WAKE_ON_UNICAST_DATA)
wol->wolopts |= WAKE_UCAST;
if (priv->wol_criteria & EHS_WAKE_ON_MULTICAST_DATA)
wol->wolopts |= WAKE_MCAST;
if (priv->wol_criteria & EHS_WAKE_ON_BROADCAST_DATA)
wol->wolopts |= WAKE_BCAST;
if (priv->wol_criteria & EHS_WAKE_ON_MAC_EVENT)
wol->wolopts |= WAKE_PHY;
}
static int lbs_ethtool_set_wol(struct net_device *dev,
struct ethtool_wolinfo *wol)
{
struct lbs_private *priv = dev->ml_priv;
if (wol->wolopts & ~(WAKE_UCAST|WAKE_MCAST|WAKE_BCAST|WAKE_PHY))
return -EOPNOTSUPP;
priv->wol_criteria = 0;
if (wol->wolopts & WAKE_UCAST)
priv->wol_criteria |= EHS_WAKE_ON_UNICAST_DATA;
if (wol->wolopts & WAKE_MCAST)
priv->wol_criteria |= EHS_WAKE_ON_MULTICAST_DATA;
if (wol->wolopts & WAKE_BCAST)
priv->wol_criteria |= EHS_WAKE_ON_BROADCAST_DATA;
if (wol->wolopts & WAKE_PHY)
priv->wol_criteria |= EHS_WAKE_ON_MAC_EVENT;
if (wol->wolopts == 0)
priv->wol_criteria |= EHS_REMOVE_WAKEUP;
return 0;
}
const struct ethtool_ops lbs_ethtool_ops = {
.get_drvinfo = lbs_ethtool_get_drvinfo,
.get_eeprom = lbs_ethtool_get_eeprom,
.get_eeprom_len = lbs_ethtool_get_eeprom_len,
#ifdef CONFIG_LIBERTAS_MESH
.get_sset_count = lbs_mesh_ethtool_get_sset_count,
.get_ethtool_stats = lbs_mesh_ethtool_get_stats,
.get_strings = lbs_mesh_ethtool_get_strings,
#endif
.get_wol = lbs_ethtool_get_wol,
.set_wol = lbs_ethtool_set_wol,
};
| gpl-2.0 |
Infusion-OS/android_kernel_lge_gee | drivers/media/dvb/b2c2/flexcop-fe-tuner.c | 9998 | 16141 | /*
* Linux driver for digital TV devices equipped with B2C2 FlexcopII(b)/III
* flexcop-fe-tuner.c - methods for frontend attachment and DiSEqC controlling
* see flexcop.c for copyright information
*/
#include <media/tuner.h>
#include "flexcop.h"
#include "mt312.h"
#include "stv0299.h"
#include "s5h1420.h"
#include "itd1000.h"
#include "cx24113.h"
#include "cx24123.h"
#include "isl6421.h"
#include "mt352.h"
#include "bcm3510.h"
#include "nxt200x.h"
#include "dvb-pll.h"
#include "lgdt330x.h"
#include "tuner-simple.h"
#include "stv0297.h"
/* Can we use the specified front-end? Remember that if we are compiled
* into the kernel we can't call code that's in modules. */
#define FE_SUPPORTED(fe) (defined(CONFIG_DVB_##fe) || \
(defined(CONFIG_DVB_##fe##_MODULE) && defined(MODULE)))
/* lnb control */
#if FE_SUPPORTED(MT312) || FE_SUPPORTED(STV0299)
static int flexcop_set_voltage(struct dvb_frontend *fe, fe_sec_voltage_t voltage)
{
struct flexcop_device *fc = fe->dvb->priv;
flexcop_ibi_value v;
deb_tuner("polarity/voltage = %u\n", voltage);
v = fc->read_ibi_reg(fc, misc_204);
switch (voltage) {
case SEC_VOLTAGE_OFF:
v.misc_204.ACPI1_sig = 1;
break;
case SEC_VOLTAGE_13:
v.misc_204.ACPI1_sig = 0;
v.misc_204.LNB_L_H_sig = 0;
break;
case SEC_VOLTAGE_18:
v.misc_204.ACPI1_sig = 0;
v.misc_204.LNB_L_H_sig = 1;
break;
default:
err("unknown SEC_VOLTAGE value");
return -EINVAL;
}
return fc->write_ibi_reg(fc, misc_204, v);
}
#endif
#if FE_SUPPORTED(S5H1420) || FE_SUPPORTED(STV0299) || FE_SUPPORTED(MT312)
static int flexcop_sleep(struct dvb_frontend* fe)
{
struct flexcop_device *fc = fe->dvb->priv;
if (fc->fe_sleep)
return fc->fe_sleep(fe);
return 0;
}
#endif
/* SkyStar2 DVB-S rev 2.3 */
#if FE_SUPPORTED(MT312) && FE_SUPPORTED(PLL)
static int flexcop_set_tone(struct dvb_frontend *fe, fe_sec_tone_mode_t tone)
{
/* u16 wz_half_period_for_45_mhz[] = { 0x01ff, 0x0154, 0x00ff, 0x00cc }; */
struct flexcop_device *fc = fe->dvb->priv;
flexcop_ibi_value v;
u16 ax;
v.raw = 0;
deb_tuner("tone = %u\n",tone);
switch (tone) {
case SEC_TONE_ON:
ax = 0x01ff;
break;
case SEC_TONE_OFF:
ax = 0;
break;
default:
err("unknown SEC_TONE value");
return -EINVAL;
}
v.lnb_switch_freq_200.LNB_CTLPrescaler_sig = 1; /* divide by 2 */
v.lnb_switch_freq_200.LNB_CTLHighCount_sig = ax;
v.lnb_switch_freq_200.LNB_CTLLowCount_sig = ax == 0 ? 0x1ff : ax;
return fc->write_ibi_reg(fc,lnb_switch_freq_200,v);
}
static void flexcop_diseqc_send_bit(struct dvb_frontend* fe, int data)
{
flexcop_set_tone(fe, SEC_TONE_ON);
udelay(data ? 500 : 1000);
flexcop_set_tone(fe, SEC_TONE_OFF);
udelay(data ? 1000 : 500);
}
static void flexcop_diseqc_send_byte(struct dvb_frontend* fe, int data)
{
int i, par = 1, d;
for (i = 7; i >= 0; i--) {
d = (data >> i) & 1;
par ^= d;
flexcop_diseqc_send_bit(fe, d);
}
flexcop_diseqc_send_bit(fe, par);
}
static int flexcop_send_diseqc_msg(struct dvb_frontend *fe,
int len, u8 *msg, unsigned long burst)
{
int i;
flexcop_set_tone(fe, SEC_TONE_OFF);
mdelay(16);
for (i = 0; i < len; i++)
flexcop_diseqc_send_byte(fe,msg[i]);
mdelay(16);
if (burst != -1) {
if (burst)
flexcop_diseqc_send_byte(fe, 0xff);
else {
flexcop_set_tone(fe, SEC_TONE_ON);
mdelay(12);
udelay(500);
flexcop_set_tone(fe, SEC_TONE_OFF);
}
msleep(20);
}
return 0;
}
static int flexcop_diseqc_send_master_cmd(struct dvb_frontend *fe,
struct dvb_diseqc_master_cmd *cmd)
{
return flexcop_send_diseqc_msg(fe, cmd->msg_len, cmd->msg, 0);
}
static int flexcop_diseqc_send_burst(struct dvb_frontend *fe,
fe_sec_mini_cmd_t minicmd)
{
return flexcop_send_diseqc_msg(fe, 0, NULL, minicmd);
}
static struct mt312_config skystar23_samsung_tbdu18132_config = {
.demod_address = 0x0e,
};
static int skystar2_rev23_attach(struct flexcop_device *fc,
struct i2c_adapter *i2c)
{
struct dvb_frontend_ops *ops;
fc->fe = dvb_attach(mt312_attach, &skystar23_samsung_tbdu18132_config, i2c);
if (!fc->fe)
return 0;
if (!dvb_attach(dvb_pll_attach, fc->fe, 0x61, i2c,
DVB_PLL_SAMSUNG_TBDU18132))
return 0;
ops = &fc->fe->ops;
ops->diseqc_send_master_cmd = flexcop_diseqc_send_master_cmd;
ops->diseqc_send_burst = flexcop_diseqc_send_burst;
ops->set_tone = flexcop_set_tone;
ops->set_voltage = flexcop_set_voltage;
fc->fe_sleep = ops->sleep;
ops->sleep = flexcop_sleep;
return 1;
}
#else
#define skystar2_rev23_attach NULL
#endif
/* SkyStar2 DVB-S rev 2.6 */
#if FE_SUPPORTED(STV0299) && FE_SUPPORTED(PLL)
static int samsung_tbmu24112_set_symbol_rate(struct dvb_frontend *fe,
u32 srate, u32 ratio)
{
u8 aclk = 0;
u8 bclk = 0;
if (srate < 1500000) {
aclk = 0xb7; bclk = 0x47;
} else if (srate < 3000000) {
aclk = 0xb7; bclk = 0x4b;
} else if (srate < 7000000) {
aclk = 0xb7; bclk = 0x4f;
} else if (srate < 14000000) {
aclk = 0xb7; bclk = 0x53;
} else if (srate < 30000000) {
aclk = 0xb6; bclk = 0x53;
} else if (srate < 45000000) {
aclk = 0xb4; bclk = 0x51;
}
stv0299_writereg(fe, 0x13, aclk);
stv0299_writereg(fe, 0x14, bclk);
stv0299_writereg(fe, 0x1f, (ratio >> 16) & 0xff);
stv0299_writereg(fe, 0x20, (ratio >> 8) & 0xff);
stv0299_writereg(fe, 0x21, ratio & 0xf0);
return 0;
}
static u8 samsung_tbmu24112_inittab[] = {
0x01, 0x15,
0x02, 0x30,
0x03, 0x00,
0x04, 0x7D,
0x05, 0x35,
0x06, 0x02,
0x07, 0x00,
0x08, 0xC3,
0x0C, 0x00,
0x0D, 0x81,
0x0E, 0x23,
0x0F, 0x12,
0x10, 0x7E,
0x11, 0x84,
0x12, 0xB9,
0x13, 0x88,
0x14, 0x89,
0x15, 0xC9,
0x16, 0x00,
0x17, 0x5C,
0x18, 0x00,
0x19, 0x00,
0x1A, 0x00,
0x1C, 0x00,
0x1D, 0x00,
0x1E, 0x00,
0x1F, 0x3A,
0x20, 0x2E,
0x21, 0x80,
0x22, 0xFF,
0x23, 0xC1,
0x28, 0x00,
0x29, 0x1E,
0x2A, 0x14,
0x2B, 0x0F,
0x2C, 0x09,
0x2D, 0x05,
0x31, 0x1F,
0x32, 0x19,
0x33, 0xFE,
0x34, 0x93,
0xff, 0xff,
};
static struct stv0299_config samsung_tbmu24112_config = {
.demod_address = 0x68,
.inittab = samsung_tbmu24112_inittab,
.mclk = 88000000UL,
.invert = 0,
.skip_reinit = 0,
.lock_output = STV0299_LOCKOUTPUT_LK,
.volt13_op0_op1 = STV0299_VOLT13_OP1,
.min_delay_ms = 100,
.set_symbol_rate = samsung_tbmu24112_set_symbol_rate,
};
static int skystar2_rev26_attach(struct flexcop_device *fc,
struct i2c_adapter *i2c)
{
fc->fe = dvb_attach(stv0299_attach, &samsung_tbmu24112_config, i2c);
if (!fc->fe)
return 0;
if (!dvb_attach(dvb_pll_attach, fc->fe, 0x61, i2c,
DVB_PLL_SAMSUNG_TBMU24112))
return 0;
fc->fe->ops.set_voltage = flexcop_set_voltage;
fc->fe_sleep = fc->fe->ops.sleep;
fc->fe->ops.sleep = flexcop_sleep;
return 1;
}
#else
#define skystar2_rev26_attach NULL
#endif
/* SkyStar2 DVB-S rev 2.7 */
#if FE_SUPPORTED(S5H1420) && FE_SUPPORTED(ISL6421) && FE_SUPPORTED(TUNER_ITD1000)
static struct s5h1420_config skystar2_rev2_7_s5h1420_config = {
.demod_address = 0x53,
.invert = 1,
.repeated_start_workaround = 1,
.serial_mpeg = 1,
};
static struct itd1000_config skystar2_rev2_7_itd1000_config = {
.i2c_address = 0x61,
};
static int skystar2_rev27_attach(struct flexcop_device *fc,
struct i2c_adapter *i2c)
{
flexcop_ibi_value r108;
struct i2c_adapter *i2c_tuner;
/* enable no_base_addr - no repeated start when reading */
fc->fc_i2c_adap[0].no_base_addr = 1;
fc->fe = dvb_attach(s5h1420_attach, &skystar2_rev2_7_s5h1420_config,
i2c);
if (!fc->fe)
goto fail;
i2c_tuner = s5h1420_get_tuner_i2c_adapter(fc->fe);
if (!i2c_tuner)
goto fail;
fc->fe_sleep = fc->fe->ops.sleep;
fc->fe->ops.sleep = flexcop_sleep;
/* enable no_base_addr - no repeated start when reading */
fc->fc_i2c_adap[2].no_base_addr = 1;
if (!dvb_attach(isl6421_attach, fc->fe, &fc->fc_i2c_adap[2].i2c_adap,
0x08, 1, 1)) {
err("ISL6421 could NOT be attached");
goto fail_isl;
}
info("ISL6421 successfully attached");
/* the ITD1000 requires a lower i2c clock - is it a problem ? */
r108.raw = 0x00000506;
fc->write_ibi_reg(fc, tw_sm_c_108, r108);
if (!dvb_attach(itd1000_attach, fc->fe, i2c_tuner,
&skystar2_rev2_7_itd1000_config)) {
err("ITD1000 could NOT be attached");
/* Should i2c clock be restored? */
goto fail_isl;
}
info("ITD1000 successfully attached");
return 1;
fail_isl:
fc->fc_i2c_adap[2].no_base_addr = 0;
fail:
/* for the next devices we need it again */
fc->fc_i2c_adap[0].no_base_addr = 0;
return 0;
}
#else
#define skystar2_rev27_attach NULL
#endif
/* SkyStar2 rev 2.8 */
#if FE_SUPPORTED(CX24123) && FE_SUPPORTED(ISL6421) && FE_SUPPORTED(TUNER_CX24113)
static struct cx24123_config skystar2_rev2_8_cx24123_config = {
.demod_address = 0x55,
.dont_use_pll = 1,
.agc_callback = cx24113_agc_callback,
};
static const struct cx24113_config skystar2_rev2_8_cx24113_config = {
.i2c_addr = 0x54,
.xtal_khz = 10111,
};
static int skystar2_rev28_attach(struct flexcop_device *fc,
struct i2c_adapter *i2c)
{
struct i2c_adapter *i2c_tuner;
fc->fe = dvb_attach(cx24123_attach, &skystar2_rev2_8_cx24123_config,
i2c);
if (!fc->fe)
return 0;
i2c_tuner = cx24123_get_tuner_i2c_adapter(fc->fe);
if (!i2c_tuner)
return 0;
if (!dvb_attach(cx24113_attach, fc->fe, &skystar2_rev2_8_cx24113_config,
i2c_tuner)) {
err("CX24113 could NOT be attached");
return 0;
}
info("CX24113 successfully attached");
fc->fc_i2c_adap[2].no_base_addr = 1;
if (!dvb_attach(isl6421_attach, fc->fe, &fc->fc_i2c_adap[2].i2c_adap,
0x08, 0, 0)) {
err("ISL6421 could NOT be attached");
fc->fc_i2c_adap[2].no_base_addr = 0;
return 0;
}
info("ISL6421 successfully attached");
/* TODO on i2c_adap[1] addr 0x11 (EEPROM) there seems to be an
* IR-receiver (PIC16F818) - but the card has no input for that ??? */
return 1;
}
#else
#define skystar2_rev28_attach NULL
#endif
/* AirStar DVB-T */
#if FE_SUPPORTED(MT352) && FE_SUPPORTED(PLL)
static int samsung_tdtc9251dh0_demod_init(struct dvb_frontend *fe)
{
static u8 mt352_clock_config[] = { 0x89, 0x18, 0x2d };
static u8 mt352_reset[] = { 0x50, 0x80 };
static u8 mt352_adc_ctl_1_cfg[] = { 0x8E, 0x40 };
static u8 mt352_agc_cfg[] = { 0x67, 0x28, 0xa1 };
static u8 mt352_capt_range_cfg[] = { 0x75, 0x32 };
mt352_write(fe, mt352_clock_config, sizeof(mt352_clock_config));
udelay(2000);
mt352_write(fe, mt352_reset, sizeof(mt352_reset));
mt352_write(fe, mt352_adc_ctl_1_cfg, sizeof(mt352_adc_ctl_1_cfg));
mt352_write(fe, mt352_agc_cfg, sizeof(mt352_agc_cfg));
mt352_write(fe, mt352_capt_range_cfg, sizeof(mt352_capt_range_cfg));
return 0;
}
static struct mt352_config samsung_tdtc9251dh0_config = {
.demod_address = 0x0f,
.demod_init = samsung_tdtc9251dh0_demod_init,
};
static int airstar_dvbt_attach(struct flexcop_device *fc,
struct i2c_adapter *i2c)
{
fc->fe = dvb_attach(mt352_attach, &samsung_tdtc9251dh0_config, i2c);
if (!fc->fe)
return 0;
return !!dvb_attach(dvb_pll_attach, fc->fe, 0x61, NULL,
DVB_PLL_SAMSUNG_TDTC9251DH0);
}
#else
#define airstar_dvbt_attach NULL
#endif
/* AirStar ATSC 1st generation */
#if FE_SUPPORTED(BCM3510)
static int flexcop_fe_request_firmware(struct dvb_frontend *fe,
const struct firmware **fw, char* name)
{
struct flexcop_device *fc = fe->dvb->priv;
return request_firmware(fw, name, fc->dev);
}
static struct bcm3510_config air2pc_atsc_first_gen_config = {
.demod_address = 0x0f,
.request_firmware = flexcop_fe_request_firmware,
};
static int airstar_atsc1_attach(struct flexcop_device *fc,
struct i2c_adapter *i2c)
{
fc->fe = dvb_attach(bcm3510_attach, &air2pc_atsc_first_gen_config, i2c);
return fc->fe != NULL;
}
#else
#define airstar_atsc1_attach NULL
#endif
/* AirStar ATSC 2nd generation */
#if FE_SUPPORTED(NXT200X) && FE_SUPPORTED(PLL)
static struct nxt200x_config samsung_tbmv_config = {
.demod_address = 0x0a,
};
static int airstar_atsc2_attach(struct flexcop_device *fc,
struct i2c_adapter *i2c)
{
fc->fe = dvb_attach(nxt200x_attach, &samsung_tbmv_config, i2c);
if (!fc->fe)
return 0;
return !!dvb_attach(dvb_pll_attach, fc->fe, 0x61, NULL,
DVB_PLL_SAMSUNG_TBMV);
}
#else
#define airstar_atsc2_attach NULL
#endif
/* AirStar ATSC 3rd generation */
#if FE_SUPPORTED(LGDT330X)
static struct lgdt330x_config air2pc_atsc_hd5000_config = {
.demod_address = 0x59,
.demod_chip = LGDT3303,
.serial_mpeg = 0x04,
.clock_polarity_flip = 1,
};
static int airstar_atsc3_attach(struct flexcop_device *fc,
struct i2c_adapter *i2c)
{
fc->fe = dvb_attach(lgdt330x_attach, &air2pc_atsc_hd5000_config, i2c);
if (!fc->fe)
return 0;
return !!dvb_attach(simple_tuner_attach, fc->fe, i2c, 0x61,
TUNER_LG_TDVS_H06XF);
}
#else
#define airstar_atsc3_attach NULL
#endif
/* CableStar2 DVB-C */
#if FE_SUPPORTED(STV0297) && FE_SUPPORTED(PLL)
static u8 alps_tdee4_stv0297_inittab[] = {
0x80, 0x01,
0x80, 0x00,
0x81, 0x01,
0x81, 0x00,
0x00, 0x48,
0x01, 0x58,
0x03, 0x00,
0x04, 0x00,
0x07, 0x00,
0x08, 0x00,
0x30, 0xff,
0x31, 0x9d,
0x32, 0xff,
0x33, 0x00,
0x34, 0x29,
0x35, 0x55,
0x36, 0x80,
0x37, 0x6e,
0x38, 0x9c,
0x40, 0x1a,
0x41, 0xfe,
0x42, 0x33,
0x43, 0x00,
0x44, 0xff,
0x45, 0x00,
0x46, 0x00,
0x49, 0x04,
0x4a, 0x51,
0x4b, 0xf8,
0x52, 0x30,
0x53, 0x06,
0x59, 0x06,
0x5a, 0x5e,
0x5b, 0x04,
0x61, 0x49,
0x62, 0x0a,
0x70, 0xff,
0x71, 0x04,
0x72, 0x00,
0x73, 0x00,
0x74, 0x0c,
0x80, 0x20,
0x81, 0x00,
0x82, 0x30,
0x83, 0x00,
0x84, 0x04,
0x85, 0x22,
0x86, 0x08,
0x87, 0x1b,
0x88, 0x00,
0x89, 0x00,
0x90, 0x00,
0x91, 0x04,
0xa0, 0x86,
0xa1, 0x00,
0xa2, 0x00,
0xb0, 0x91,
0xb1, 0x0b,
0xc0, 0x5b,
0xc1, 0x10,
0xc2, 0x12,
0xd0, 0x02,
0xd1, 0x00,
0xd2, 0x00,
0xd3, 0x00,
0xd4, 0x02,
0xd5, 0x00,
0xde, 0x00,
0xdf, 0x01,
0xff, 0xff,
};
static struct stv0297_config alps_tdee4_stv0297_config = {
.demod_address = 0x1c,
.inittab = alps_tdee4_stv0297_inittab,
};
static int cablestar2_attach(struct flexcop_device *fc,
struct i2c_adapter *i2c)
{
fc->fc_i2c_adap[0].no_base_addr = 1;
fc->fe = dvb_attach(stv0297_attach, &alps_tdee4_stv0297_config, i2c);
if (!fc->fe)
goto fail;
/* This tuner doesn't use the stv0297's I2C gate, but instead the
* tuner is connected to a different flexcop I2C adapter. */
if (fc->fe->ops.i2c_gate_ctrl)
fc->fe->ops.i2c_gate_ctrl(fc->fe, 0);
fc->fe->ops.i2c_gate_ctrl = NULL;
if (!dvb_attach(dvb_pll_attach, fc->fe, 0x61,
&fc->fc_i2c_adap[2].i2c_adap, DVB_PLL_TDEE4))
goto fail;
return 1;
fail:
/* Reset for next frontend to try */
fc->fc_i2c_adap[0].no_base_addr = 0;
return 0;
}
#else
#define cablestar2_attach NULL
#endif
static struct {
flexcop_device_type_t type;
int (*attach)(struct flexcop_device *, struct i2c_adapter *);
} flexcop_frontends[] = {
{ FC_SKY_REV27, skystar2_rev27_attach },
{ FC_SKY_REV28, skystar2_rev28_attach },
{ FC_SKY_REV26, skystar2_rev26_attach },
{ FC_AIR_DVBT, airstar_dvbt_attach },
{ FC_AIR_ATSC2, airstar_atsc2_attach },
{ FC_AIR_ATSC3, airstar_atsc3_attach },
{ FC_AIR_ATSC1, airstar_atsc1_attach },
{ FC_CABLE, cablestar2_attach },
{ FC_SKY_REV23, skystar2_rev23_attach },
};
/* try to figure out the frontend */
int flexcop_frontend_init(struct flexcop_device *fc)
{
int i;
for (i = 0; i < ARRAY_SIZE(flexcop_frontends); i++) {
if (!flexcop_frontends[i].attach)
continue;
/* type needs to be set before, because of some workarounds
* done based on the probed card type */
fc->dev_type = flexcop_frontends[i].type;
if (flexcop_frontends[i].attach(fc, &fc->fc_i2c_adap[0].i2c_adap))
goto fe_found;
/* Clean up partially attached frontend */
if (fc->fe) {
dvb_frontend_detach(fc->fe);
fc->fe = NULL;
}
}
fc->dev_type = FC_UNK;
err("no frontend driver found for this B2C2/FlexCop adapter");
return -ENODEV;
fe_found:
info("found '%s' .", fc->fe->ops.info.name);
if (dvb_register_frontend(&fc->dvb_adapter, fc->fe)) {
err("frontend registration failed!");
dvb_frontend_detach(fc->fe);
fc->fe = NULL;
return -EINVAL;
}
fc->init_state |= FC_STATE_FE_INIT;
return 0;
}
void flexcop_frontend_exit(struct flexcop_device *fc)
{
if (fc->init_state & FC_STATE_FE_INIT) {
dvb_unregister_frontend(fc->fe);
dvb_frontend_detach(fc->fe);
}
fc->init_state &= ~FC_STATE_FE_INIT;
}
| gpl-2.0 |
slz/samsung-kernel-msm7x30 | drivers/media/dvb/mantis/hopper_vp3028.c | 11278 | 2297 | /*
Hopper VP-3028 driver
Copyright (C) Manu Abraham (abraham.manu@gmail.com)
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/interrupt.h>
#include "dmxdev.h"
#include "dvbdev.h"
#include "dvb_demux.h"
#include "dvb_frontend.h"
#include "dvb_net.h"
#include "zl10353.h"
#include "mantis_common.h"
#include "mantis_ioc.h"
#include "mantis_dvb.h"
#include "hopper_vp3028.h"
struct zl10353_config hopper_vp3028_config = {
.demod_address = 0x0f,
};
#define MANTIS_MODEL_NAME "VP-3028"
#define MANTIS_DEV_TYPE "DVB-T"
static int vp3028_frontend_init(struct mantis_pci *mantis, struct dvb_frontend *fe)
{
struct i2c_adapter *adapter = &mantis->adapter;
struct mantis_hwconfig *config = mantis->hwconfig;
int err = 0;
mantis_gpio_set_bits(mantis, config->reset, 0);
msleep(100);
err = mantis_frontend_power(mantis, POWER_ON);
msleep(100);
mantis_gpio_set_bits(mantis, config->reset, 1);
err = mantis_frontend_power(mantis, POWER_ON);
if (err == 0) {
msleep(250);
dprintk(MANTIS_ERROR, 1, "Probing for 10353 (DVB-T)");
fe = dvb_attach(zl10353_attach, &hopper_vp3028_config, adapter);
if (!fe)
return -1;
} else {
dprintk(MANTIS_ERROR, 1, "Frontend on <%s> POWER ON failed! <%d>",
adapter->name,
err);
return -EIO;
}
dprintk(MANTIS_ERROR, 1, "Done!");
return 0;
}
struct mantis_hwconfig vp3028_config = {
.model_name = MANTIS_MODEL_NAME,
.dev_type = MANTIS_DEV_TYPE,
.ts_size = MANTIS_TS_188,
.baud_rate = MANTIS_BAUD_9600,
.parity = MANTIS_PARITY_NONE,
.bytes = 0,
.frontend_init = vp3028_frontend_init,
.power = GPIF_A00,
.reset = GPIF_A03,
};
| gpl-2.0 |
jds2001/qemu | qemu-coroutine-lock.c | 15 | 4530 | /*
* coroutine queues and locks
*
* Copyright (c) 2011 Kevin Wolf <kwolf@redhat.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include "qemu-common.h"
#include "qemu-coroutine.h"
#include "qemu-coroutine-int.h"
#include "qemu-queue.h"
#include "qemu-aio.h"
#include "trace.h"
static QTAILQ_HEAD(, Coroutine) unlock_bh_queue =
QTAILQ_HEAD_INITIALIZER(unlock_bh_queue);
static QEMUBH* unlock_bh;
static void qemu_co_queue_next_bh(void *opaque)
{
Coroutine *next;
trace_qemu_co_queue_next_bh();
while ((next = QTAILQ_FIRST(&unlock_bh_queue))) {
QTAILQ_REMOVE(&unlock_bh_queue, next, co_queue_next);
qemu_coroutine_enter(next, NULL);
}
}
void qemu_co_queue_init(CoQueue *queue)
{
QTAILQ_INIT(&queue->entries);
if (!unlock_bh) {
unlock_bh = qemu_bh_new(qemu_co_queue_next_bh, NULL);
}
}
void coroutine_fn qemu_co_queue_wait(CoQueue *queue)
{
Coroutine *self = qemu_coroutine_self();
QTAILQ_INSERT_TAIL(&queue->entries, self, co_queue_next);
qemu_coroutine_yield();
assert(qemu_in_coroutine());
}
void coroutine_fn qemu_co_queue_wait_insert_head(CoQueue *queue)
{
Coroutine *self = qemu_coroutine_self();
QTAILQ_INSERT_HEAD(&queue->entries, self, co_queue_next);
qemu_coroutine_yield();
assert(qemu_in_coroutine());
}
bool qemu_co_queue_next(CoQueue *queue)
{
Coroutine *next;
next = QTAILQ_FIRST(&queue->entries);
if (next) {
QTAILQ_REMOVE(&queue->entries, next, co_queue_next);
QTAILQ_INSERT_TAIL(&unlock_bh_queue, next, co_queue_next);
trace_qemu_co_queue_next(next);
qemu_bh_schedule(unlock_bh);
}
return (next != NULL);
}
void qemu_co_queue_restart_all(CoQueue *queue)
{
while (qemu_co_queue_next(queue)) {
/* Do nothing */
}
}
bool qemu_co_queue_empty(CoQueue *queue)
{
return (QTAILQ_FIRST(&queue->entries) == NULL);
}
void qemu_co_mutex_init(CoMutex *mutex)
{
memset(mutex, 0, sizeof(*mutex));
qemu_co_queue_init(&mutex->queue);
}
void coroutine_fn qemu_co_mutex_lock(CoMutex *mutex)
{
Coroutine *self = qemu_coroutine_self();
trace_qemu_co_mutex_lock_entry(mutex, self);
while (mutex->locked) {
qemu_co_queue_wait(&mutex->queue);
}
mutex->locked = true;
trace_qemu_co_mutex_lock_return(mutex, self);
}
void coroutine_fn qemu_co_mutex_unlock(CoMutex *mutex)
{
Coroutine *self = qemu_coroutine_self();
trace_qemu_co_mutex_unlock_entry(mutex, self);
assert(mutex->locked == true);
assert(qemu_in_coroutine());
mutex->locked = false;
qemu_co_queue_next(&mutex->queue);
trace_qemu_co_mutex_unlock_return(mutex, self);
}
void qemu_co_rwlock_init(CoRwlock *lock)
{
memset(lock, 0, sizeof(*lock));
qemu_co_queue_init(&lock->queue);
}
void qemu_co_rwlock_rdlock(CoRwlock *lock)
{
while (lock->writer) {
qemu_co_queue_wait(&lock->queue);
}
lock->reader++;
}
void qemu_co_rwlock_unlock(CoRwlock *lock)
{
assert(qemu_in_coroutine());
if (lock->writer) {
lock->writer = false;
qemu_co_queue_restart_all(&lock->queue);
} else {
lock->reader--;
assert(lock->reader >= 0);
/* Wakeup only one waiting writer */
if (!lock->reader) {
qemu_co_queue_next(&lock->queue);
}
}
}
void qemu_co_rwlock_wrlock(CoRwlock *lock)
{
while (lock->writer || lock->reader) {
qemu_co_queue_wait(&lock->queue);
}
lock->writer = true;
}
| gpl-2.0 |
superatmos/android_kernel_samsung_t1 | arch/powerpc/mm/gup.c | 15 | 4974 | /*
* Lockless get_user_pages_fast for powerpc
*
* Copyright (C) 2008 Nick Piggin
* Copyright (C) 2008 Novell Inc.
*/
#undef DEBUG
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/hugetlb.h>
#include <linux/vmstat.h>
#include <linux/pagemap.h>
#include <linux/rwsem.h>
#include <asm/pgtable.h>
#ifdef __HAVE_ARCH_PTE_SPECIAL
static inline void get_huge_page_tail(struct page *page)
{
/*
* __split_huge_page_refcount() cannot run
* from under us.
*/
VM_BUG_ON(page_mapcount(page) < 0);
VM_BUG_ON(atomic_read(&page->_count) != 0);
atomic_inc(&page->_mapcount);
}
/*
* The performance critical leaf functions are made noinline otherwise gcc
* inlines everything into a single function which results in too much
* register pressure.
*/
static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
unsigned long end, int write, struct page **pages, int *nr)
{
unsigned long mask, result;
pte_t *ptep;
result = _PAGE_PRESENT|_PAGE_USER;
if (write)
result |= _PAGE_RW;
mask = result | _PAGE_SPECIAL;
ptep = pte_offset_kernel(&pmd, addr);
do {
pte_t pte = *ptep;
struct page *page;
if ((pte_val(pte) & mask) != result)
return 0;
VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
page = pte_page(pte);
if (!page_cache_get_speculative(page))
return 0;
if (unlikely(pte_val(pte) != pte_val(*ptep))) {
put_page(page);
return 0;
}
if (PageTail(page))
get_huge_page_tail(page);
pages[*nr] = page;
(*nr)++;
} while (ptep++, addr += PAGE_SIZE, addr != end);
return 1;
}
static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
int write, struct page **pages, int *nr)
{
unsigned long next;
pmd_t *pmdp;
pmdp = pmd_offset(&pud, addr);
do {
pmd_t pmd = *pmdp;
next = pmd_addr_end(addr, end);
if (pmd_none(pmd))
return 0;
if (is_hugepd(pmdp)) {
if (!gup_hugepd((hugepd_t *)pmdp, PMD_SHIFT,
addr, next, write, pages, nr))
return 0;
} else if (!gup_pte_range(pmd, addr, next, write, pages, nr))
return 0;
} while (pmdp++, addr = next, addr != end);
return 1;
}
static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end,
int write, struct page **pages, int *nr)
{
unsigned long next;
pud_t *pudp;
pudp = pud_offset(&pgd, addr);
do {
pud_t pud = *pudp;
next = pud_addr_end(addr, end);
if (pud_none(pud))
return 0;
if (is_hugepd(pudp)) {
if (!gup_hugepd((hugepd_t *)pudp, PUD_SHIFT,
addr, next, write, pages, nr))
return 0;
} else if (!gup_pmd_range(pud, addr, next, write, pages, nr))
return 0;
} while (pudp++, addr = next, addr != end);
return 1;
}
int get_user_pages_fast(unsigned long start, int nr_pages, int write,
struct page **pages)
{
struct mm_struct *mm = current->mm;
unsigned long addr, len, end;
unsigned long next;
pgd_t *pgdp;
int nr = 0;
pr_devel("%s(%lx,%x,%s)\n", __func__, start, nr_pages, write ? "write" : "read");
start &= PAGE_MASK;
addr = start;
len = (unsigned long) nr_pages << PAGE_SHIFT;
end = start + len;
if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
start, len)))
goto slow_irqon;
pr_devel(" aligned: %lx .. %lx\n", start, end);
/*
* XXX: batch / limit 'nr', to avoid large irq off latency
* needs some instrumenting to determine the common sizes used by
* important workloads (eg. DB2), and whether limiting the batch size
* will decrease performance.
*
* It seems like we're in the clear for the moment. Direct-IO is
* the main guy that batches up lots of get_user_pages, and even
* they are limited to 64-at-a-time which is not so many.
*/
/*
* This doesn't prevent pagetable teardown, but does prevent
* the pagetables from being freed on powerpc.
*
* So long as we atomically load page table pointers versus teardown,
* we can follow the address down to the the page and take a ref on it.
*/
local_irq_disable();
pgdp = pgd_offset(mm, addr);
do {
pgd_t pgd = *pgdp;
pr_devel(" %016lx: normal pgd %p\n", addr,
(void *)pgd_val(pgd));
next = pgd_addr_end(addr, end);
if (pgd_none(pgd))
goto slow;
if (is_hugepd(pgdp)) {
if (!gup_hugepd((hugepd_t *)pgdp, PGDIR_SHIFT,
addr, next, write, pages, &nr))
goto slow;
} else if (!gup_pud_range(pgd, addr, next, write, pages, &nr))
goto slow;
} while (pgdp++, addr = next, addr != end);
local_irq_enable();
VM_BUG_ON(nr != (end - start) >> PAGE_SHIFT);
return nr;
{
int ret;
slow:
local_irq_enable();
slow_irqon:
pr_devel(" slow path ! nr = %d\n", nr);
/* Try to get the remaining pages with get_user_pages */
start += nr << PAGE_SHIFT;
pages += nr;
down_read(&mm->mmap_sem);
ret = get_user_pages(current, mm, start,
(end - start) >> PAGE_SHIFT, write, 0, pages, NULL);
up_read(&mm->mmap_sem);
/* Have to be a bit careful with return values */
if (nr > 0) {
if (ret < 0)
ret = nr;
else
ret += nr;
}
return ret;
}
}
#endif /* __HAVE_ARCH_PTE_SPECIAL */
| gpl-2.0 |
vigsterkr/openwrt-beagleboard | target/linux/adm5120/files/arch/mips/adm5120/zyxel/p-334wt.c | 15 | 1049 | /*
* ZyXEL Prestige P-334WT support
*
* Copyright (C) 2007-2008 Gabor Juhos <juhosg@openwrt.org>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*
*/
#include "p-33x.h"
static struct gpio_led p334wt_gpio_leds[] __initdata = {
GPIO_LED_INV(ADM5120_GPIO_PIN2, "power", NULL),
GPIO_LED_INV(ADM5120_GPIO_P3L0, "lan1", NULL),
GPIO_LED_INV(ADM5120_GPIO_P2L0, "lan2", NULL),
GPIO_LED_INV(ADM5120_GPIO_P1L0, "lan3", NULL),
GPIO_LED_INV(ADM5120_GPIO_P0L0, "lan4", NULL),
GPIO_LED_INV(ADM5120_GPIO_P4L0, "wan", NULL),
GPIO_LED_INV(ADM5120_GPIO_P4L2, "wlan", NULL),
GPIO_LED_INV(ADM5120_GPIO_P2L2, "otist", NULL),
GPIO_LED_INV(ADM5120_GPIO_P1L2, "hidden", NULL),
};
static void __init p334wt_setup(void)
{
p33x_generic_setup();
adm5120_add_device_gpio_leds(ARRAY_SIZE(p334wt_gpio_leds),
p334wt_gpio_leds);
}
ADM5120_BOARD(MACH_ADM5120_P334WT, "ZyXEL Prestige 334WT", p334wt_setup);
| gpl-2.0 |
miiicmueller/TerraZoo | contiki-2.7/platform/z1/dev/temperature-sensor.c | 15 | 2743 | /*
* Copyright (c) 2010, Swedish Institute of Computer Science.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the Institute nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE INSTITUTE AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE INSTITUTE OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
*/
/**
* \file
* Sensor driver for reading the built-in temperature sensor in the CPU.
* \author
* Adam Dunkels <adam@sics.se>
* Joakim Eriksson <joakime@sics.se>
* Niclas Finne <nfi@sics.se>
*/
#include "dev/temperature-sensor.h"
#include "dev/sky-sensors.h"
#include "contiki.h"
#define INPUT_CHANNEL (1 << INCH_10)
#define INPUT_REFERENCE SREF_1
#define TEMPERATURE_MEM ADC12MEM10
const struct sensors_sensor temperature_sensor;
/*---------------------------------------------------------------------------*/
static int
value(int type)
{
return TEMPERATURE_MEM;
}
/*---------------------------------------------------------------------------*/
static int
configure(int type, int c)
{
return sky_sensors_configure(INPUT_CHANNEL, INPUT_REFERENCE, type, c);
}
/*---------------------------------------------------------------------------*/
static int
status(int type)
{
return sky_sensors_status(INPUT_CHANNEL, type);
}
/*---------------------------------------------------------------------------*/
SENSORS_SENSOR(temperature_sensor, TEMPERATURE_SENSOR,
value, configure, status);
| gpl-2.0 |
HinTak/linux | tools/testing/selftests/kvm/hardware_disable_test.c | 15 | 4577 | // SPDX-License-Identifier: GPL-2.0-only
/*
* This test is intended to reproduce a crash that happens when
* kvm_arch_hardware_disable is called and it attempts to unregister the user
* return notifiers.
*/
#define _GNU_SOURCE
#include <fcntl.h>
#include <pthread.h>
#include <semaphore.h>
#include <stdint.h>
#include <stdlib.h>
#include <unistd.h>
#include <sys/wait.h>
#include <test_util.h>
#include "kvm_util.h"
#define VCPU_NUM 4
#define SLEEPING_THREAD_NUM (1 << 4)
#define FORK_NUM (1ULL << 9)
#define DELAY_US_MAX 2000
#define GUEST_CODE_PIO_PORT 4
sem_t *sem;
/* Arguments for the pthreads */
struct payload {
struct kvm_vm *vm;
uint32_t index;
};
static void guest_code(void)
{
for (;;)
; /* Some busy work */
printf("Should not be reached.\n");
}
static void *run_vcpu(void *arg)
{
struct payload *payload = (struct payload *)arg;
struct kvm_run *state = vcpu_state(payload->vm, payload->index);
vcpu_run(payload->vm, payload->index);
TEST_ASSERT(false, "%s: exited with reason %d: %s\n",
__func__, state->exit_reason,
exit_reason_str(state->exit_reason));
pthread_exit(NULL);
}
static void *sleeping_thread(void *arg)
{
int fd;
while (true) {
fd = open("/dev/null", O_RDWR);
close(fd);
}
TEST_ASSERT(false, "%s: exited\n", __func__);
pthread_exit(NULL);
}
static inline void check_create_thread(pthread_t *thread, pthread_attr_t *attr,
void *(*f)(void *), void *arg)
{
int r;
r = pthread_create(thread, attr, f, arg);
TEST_ASSERT(r == 0, "%s: failed to create thread", __func__);
}
static inline void check_set_affinity(pthread_t thread, cpu_set_t *cpu_set)
{
int r;
r = pthread_setaffinity_np(thread, sizeof(cpu_set_t), cpu_set);
TEST_ASSERT(r == 0, "%s: failed set affinity", __func__);
}
static inline void check_join(pthread_t thread, void **retval)
{
int r;
r = pthread_join(thread, retval);
TEST_ASSERT(r == 0, "%s: failed to join thread", __func__);
}
static void run_test(uint32_t run)
{
struct kvm_vm *vm;
cpu_set_t cpu_set;
pthread_t threads[VCPU_NUM];
pthread_t throw_away;
struct payload payloads[VCPU_NUM];
void *b;
uint32_t i, j;
CPU_ZERO(&cpu_set);
for (i = 0; i < VCPU_NUM; i++)
CPU_SET(i, &cpu_set);
vm = vm_create(VM_MODE_DEFAULT, DEFAULT_GUEST_PHY_PAGES, O_RDWR);
kvm_vm_elf_load(vm, program_invocation_name, 0, 0);
vm_create_irqchip(vm);
pr_debug("%s: [%d] start vcpus\n", __func__, run);
for (i = 0; i < VCPU_NUM; ++i) {
vm_vcpu_add_default(vm, i, guest_code);
payloads[i].vm = vm;
payloads[i].index = i;
check_create_thread(&threads[i], NULL, run_vcpu,
(void *)&payloads[i]);
check_set_affinity(threads[i], &cpu_set);
for (j = 0; j < SLEEPING_THREAD_NUM; ++j) {
check_create_thread(&throw_away, NULL, sleeping_thread,
(void *)NULL);
check_set_affinity(throw_away, &cpu_set);
}
}
pr_debug("%s: [%d] all threads launched\n", __func__, run);
sem_post(sem);
for (i = 0; i < VCPU_NUM; ++i)
check_join(threads[i], &b);
/* Should not be reached */
TEST_ASSERT(false, "%s: [%d] child escaped the ninja\n", __func__, run);
}
void wait_for_child_setup(pid_t pid)
{
/*
* Wait for the child to post to the semaphore, but wake up periodically
* to check if the child exited prematurely.
*/
for (;;) {
const struct timespec wait_period = { .tv_sec = 1 };
int status;
if (!sem_timedwait(sem, &wait_period))
return;
/* Child is still running, keep waiting. */
if (pid != waitpid(pid, &status, WNOHANG))
continue;
/*
* Child is no longer running, which is not expected.
*
* If it exited with a non-zero status, we explicitly forward
* the child's status in case it exited with KSFT_SKIP.
*/
if (WIFEXITED(status))
exit(WEXITSTATUS(status));
else
TEST_ASSERT(false, "Child exited unexpectedly");
}
}
int main(int argc, char **argv)
{
uint32_t i;
int s, r;
pid_t pid;
sem = sem_open("vm_sem", O_CREAT | O_EXCL, 0644, 0);
sem_unlink("vm_sem");
for (i = 0; i < FORK_NUM; ++i) {
pid = fork();
TEST_ASSERT(pid >= 0, "%s: unable to fork", __func__);
if (pid == 0)
run_test(i); /* This function always exits */
pr_debug("%s: [%d] waiting semaphore\n", __func__, i);
wait_for_child_setup(pid);
r = (rand() % DELAY_US_MAX) + 1;
pr_debug("%s: [%d] waiting %dus\n", __func__, i, r);
usleep(r);
r = waitpid(pid, &s, WNOHANG);
TEST_ASSERT(r != pid,
"%s: [%d] child exited unexpectedly status: [%d]",
__func__, i, s);
pr_debug("%s: [%d] killing child\n", __func__, i);
kill(pid, SIGKILL);
}
sem_destroy(sem);
exit(0);
}
| gpl-2.0 |
virtuallysafe/kodi | xbmc/pvr/recordings/PVRRecordings.cpp | 15 | 15649 | /*
* Copyright (C) 2012-2013 Team XBMC
* http://xbmc.org
*
* This Program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This Program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with XBMC; see the file COPYING. If not, see
* <http://www.gnu.org/licenses/>.
*
*/
#include "FileItem.h"
#include "epg/EpgContainer.h"
#include "URL.h"
#include "utils/log.h"
#include "threads/SingleLock.h"
#include "video/VideoDatabase.h"
#include "utils/URIUtils.h"
#include "utils/StringUtils.h"
#include "pvr/PVRManager.h"
#include "pvr/addons/PVRClients.h"
#include "PVRRecordings.h"
using namespace PVR;
CPVRRecordings::CPVRRecordings(void) :
m_bIsUpdating(false),
m_iLastId(0),
m_bGroupItems(true),
m_bHasDeleted(false)
{
m_database.Open();
}
CPVRRecordings::~CPVRRecordings()
{
Clear();
if (m_database.IsOpen())
m_database.Close();
}
void CPVRRecordings::UpdateFromClients(void)
{
CSingleLock lock(m_critSection);
Clear();
g_PVRClients->GetRecordings(this, false);
g_PVRClients->GetRecordings(this, true);
}
std::string CPVRRecordings::TrimSlashes(const std::string &strOrig) const
{
std::string strReturn(strOrig);
while (strReturn[0] == '/')
strReturn.erase(0, 1);
URIUtils::RemoveSlashAtEnd(strReturn);
return strReturn;
}
const std::string CPVRRecordings::GetDirectoryFromPath(const std::string &strPath, const std::string &strBase) const
{
std::string strReturn;
std::string strUsePath = TrimSlashes(strPath);
std::string strUseBase = TrimSlashes(strBase);
/* strip the base or return an empty value if it doesn't fit or match */
if (!strUseBase.empty())
{
/* adding "/" to make sure that base matches the complete folder name and not only parts of it */
if (strUsePath.size() <= strUseBase.size() || !StringUtils::StartsWith(strUsePath, strUseBase + "/"))
return strReturn;
strUsePath.erase(0, strUseBase.size());
}
/* check for more occurences */
size_t iDelimiter = strUsePath.find('/');
if (iDelimiter != std::string::npos && iDelimiter > 0)
strReturn = strUsePath.substr(0, iDelimiter);
else
strReturn = strUsePath;
return TrimSlashes(strReturn);
}
bool CPVRRecordings::IsDirectoryMember(const std::string &strDirectory, const std::string &strEntryDirectory) const
{
std::string strUseDirectory = TrimSlashes(strDirectory);
std::string strUseEntryDirectory = TrimSlashes(strEntryDirectory);
/* Case-insensitive comparison since sub folders are created with case-insensitive matching (GetSubDirectories) */
return m_bGroupItems ?
StringUtils::EqualsNoCase(strUseDirectory, strUseEntryDirectory) :
StringUtils::StartsWithNoCase(strUseEntryDirectory, strUseDirectory);
}
void CPVRRecordings::GetSubDirectories(const std::string &strBase, CFileItemList *results)
{
// Only active recordings are fetched to provide sub directories.
// Not applicable for deleted view which is supposed to be flattened.
std::string strUseBase = TrimSlashes(strBase);
std::set<CFileItemPtr> unwatchedFolders;
for (PVR_RECORDINGMAP_CITR it = m_recordings.begin(); it != m_recordings.end(); it++)
{
CPVRRecordingPtr current = it->second;
if (current->IsDeleted())
continue;
const std::string strCurrent = GetDirectoryFromPath(current->m_strDirectory, strUseBase);
if (strCurrent.empty())
continue;
std::string strFilePath;
if(strUseBase.empty())
strFilePath = StringUtils::Format("pvr://" PVR_RECORDING_BASE_PATH "/" PVR_RECORDING_ACTIVE_PATH "/%s/", strCurrent.c_str());
else
strFilePath = StringUtils::Format("pvr://" PVR_RECORDING_BASE_PATH "/" PVR_RECORDING_ACTIVE_PATH "/%s/%s/", strUseBase.c_str(), strCurrent.c_str());
CFileItemPtr pFileItem;
if (m_database.IsOpen())
current->UpdateMetadata(m_database);
if (!results->Contains(strFilePath))
{
pFileItem.reset(new CFileItem(strCurrent, true));
pFileItem->SetPath(strFilePath);
pFileItem->SetLabel(strCurrent);
pFileItem->SetLabelPreformated(true);
pFileItem->m_dateTime = current->RecordingTimeAsLocalTime();
// Assume all folders are watched, we'll change the overlay later
pFileItem->SetOverlayImage(CGUIListItem::ICON_OVERLAY_WATCHED, false);
results->Add(pFileItem);
}
else
{
pFileItem=results->Get(strFilePath);
if (pFileItem->m_dateTime<current->RecordingTimeAsLocalTime())
pFileItem->m_dateTime = current->RecordingTimeAsLocalTime();
}
if (current->m_playCount == 0)
unwatchedFolders.insert(pFileItem);
}
// Remove the watched overlay from folders containing unwatched entries
for (std::set<CFileItemPtr>::iterator it = unwatchedFolders.begin(); it != unwatchedFolders.end(); ++it)
(*it)->SetOverlayImage(CGUIListItem::ICON_OVERLAY_WATCHED, true);
}
int CPVRRecordings::Load(void)
{
Update();
return m_recordings.size();
}
void CPVRRecordings::Unload()
{
Clear();
}
void CPVRRecordings::Update(void)
{
CSingleLock lock(m_critSection);
if (m_bIsUpdating)
return;
m_bIsUpdating = true;
lock.Leave();
CLog::Log(LOGDEBUG, "CPVRRecordings - %s - updating recordings", __FUNCTION__);
UpdateFromClients();
lock.Enter();
m_bIsUpdating = false;
SetChanged();
lock.Leave();
NotifyObservers(ObservableMessageRecordings);
}
int CPVRRecordings::GetNumRecordings()
{
CSingleLock lock(m_critSection);
return m_recordings.size();
}
bool CPVRRecordings::HasDeletedRecordings()
{
CSingleLock lock(m_critSection);
return m_bHasDeleted;
}
int CPVRRecordings::GetRecordings(CFileItemList* results, bool bDeleted)
{
CSingleLock lock(m_critSection);
int iRecCount = 0;
for (PVR_RECORDINGMAP_CITR it = m_recordings.begin(); it != m_recordings.end(); it++)
{
if (it->second->IsDeleted() != bDeleted)
continue;
CFileItemPtr pFileItem(new CFileItem(it->second));
results->Add(pFileItem);
iRecCount++;
}
return iRecCount;
}
bool CPVRRecordings::Delete(const CFileItem& item)
{
return item.m_bIsFolder ? DeleteDirectory(item) : DeleteRecording(item);
}
bool CPVRRecordings::DeleteDirectory(const CFileItem& directory)
{
CFileItemList items;
XFILE::CDirectory::GetDirectory(directory.GetPath(), items);
bool allDeleted = true;
VECFILEITEMS itemList = items.GetList();
CFileItem item;
for (VECFILEITEMS::const_iterator it = itemList.begin(); it != itemList.end(); ++it)
allDeleted &= Delete(*(it->get()));
return allDeleted;
}
bool CPVRRecordings::DeleteRecording(const CFileItem &item)
{
if (!item.IsPVRRecording())
{
CLog::Log(LOGERROR, "CPVRRecordings - %s - cannot delete file: no valid recording tag", __FUNCTION__);
return false;
}
CPVRRecordingPtr tag = item.GetPVRRecordingInfoTag();
return tag->Delete();
}
bool CPVRRecordings::Undelete(const CFileItem &item)
{
if (!item.IsDeletedPVRRecording())
{
CLog::Log(LOGERROR, "CPVRRecordings - %s - cannot undelete file: no valid recording tag", __FUNCTION__);
return false;
}
CPVRRecordingPtr tag = item.GetPVRRecordingInfoTag();
return tag->Undelete();
}
bool CPVRRecordings::RenameRecording(CFileItem &item, std::string &strNewName)
{
if (!item.IsUsablePVRRecording())
{
CLog::Log(LOGERROR, "CPVRRecordings - %s - cannot rename file: no valid recording tag", __FUNCTION__);
return false;
}
CPVRRecordingPtr tag = item.GetPVRRecordingInfoTag();
return tag->Rename(strNewName);
}
bool CPVRRecordings::DeleteAllRecordingsFromTrash()
{
return g_PVRClients->DeleteAllRecordingsFromTrash() == PVR_ERROR_NO_ERROR;
}
bool CPVRRecordings::SetRecordingsPlayCount(const CFileItemPtr &item, int count)
{
bool bResult = false;
if (m_database.IsOpen())
{
bResult = true;
CLog::Log(LOGDEBUG, "CPVRRecordings - %s - item path %s", __FUNCTION__, item->GetPath().c_str());
CFileItemList items;
if (item->m_bIsFolder)
{
XFILE::CDirectory::GetDirectory(item->GetPath(), items);
}
else
items.Add(item);
CLog::Log(LOGDEBUG, "CPVRRecordings - %s - will set watched for %d items", __FUNCTION__, items.Size());
for (int i=0;i<items.Size();++i)
{
CLog::Log(LOGDEBUG, "CPVRRecordings - %s - setting watched for item %d", __FUNCTION__, i);
CFileItemPtr pItem=items[i];
if (pItem->m_bIsFolder)
{
CLog::Log(LOGDEBUG, "CPVRRecordings - %s - path %s is a folder, will call recursively", __FUNCTION__, pItem->GetPath().c_str());
if (pItem->GetLabel() != "..")
{
SetRecordingsPlayCount(pItem, count);
}
continue;
}
if (!pItem->HasPVRRecordingInfoTag())
continue;
const CPVRRecordingPtr recording = pItem->GetPVRRecordingInfoTag();
if (recording)
{
recording->SetPlayCount(count);
// Clear resume bookmark
if (count > 0)
{
m_database.ClearBookMarksOfFile(pItem->GetPath(), CBookmark::RESUME);
recording->SetLastPlayedPosition(0);
}
m_database.SetPlayCount(*pItem, count);
}
}
}
return bResult;
}
bool CPVRRecordings::GetDirectory(const std::string& strPath, CFileItemList &items)
{
CSingleLock lock(m_critSection);
CURL url(strPath);
std::string strDirectoryPath = url.GetFileName();
URIUtils::RemoveSlashAtEnd(strDirectoryPath);
if (StringUtils::StartsWith(strDirectoryPath, PVR_RECORDING_BASE_PATH))
{
strDirectoryPath.erase(0, sizeof(PVR_RECORDING_BASE_PATH) - 1);
// Check directory name is for deleted recordings
bool bDeleted = StringUtils::StartsWith(strDirectoryPath, "/" PVR_RECORDING_DELETED_PATH);
strDirectoryPath.erase(0, bDeleted ? sizeof(PVR_RECORDING_DELETED_PATH) : sizeof(PVR_RECORDING_ACTIVE_PATH));
// Get the directory structure if in non-flatten mode
// Deleted view is always flatten. So only for an active view
if (!bDeleted && m_bGroupItems)
GetSubDirectories(strDirectoryPath, &items);
// get all files of the currrent directory or recursively all files starting at the current directory if in flatten mode
for (PVR_RECORDINGMAP_CITR it = m_recordings.begin(); it != m_recordings.end(); it++)
{
CPVRRecordingPtr current = it->second;
// skip items that are not members of the target directory
if (!IsDirectoryMember(strDirectoryPath, current->m_strDirectory) || current->IsDeleted() != bDeleted)
continue;
if (m_database.IsOpen())
current->UpdateMetadata(m_database);
CFileItemPtr pFileItem(new CFileItem(current));
pFileItem->SetLabel2(current->RecordingTimeAsLocalTime().GetAsLocalizedDateTime(true, false));
pFileItem->m_dateTime = current->RecordingTimeAsLocalTime();
pFileItem->SetPath(current->m_strFileNameAndPath);
// Set art
if (!current->m_strIconPath.empty())
{
pFileItem->SetIconImage(current->m_strIconPath);
pFileItem->SetArt("icon", current->m_strIconPath);
}
if (!current->m_strThumbnailPath.empty())
pFileItem->SetArt("thumb", current->m_strThumbnailPath);
if (!current->m_strFanartPath.empty())
pFileItem->SetArt("fanart", current->m_strFanartPath);
// Use the channel icon as a fallback when a thumbnail is not available
pFileItem->SetArtFallback("thumb", "icon");
pFileItem->SetOverlayImage(CGUIListItem::ICON_OVERLAY_UNWATCHED, pFileItem->GetPVRRecordingInfoTag()->m_playCount > 0);
items.Add(pFileItem);
}
return true;
}
return false;
}
void CPVRRecordings::GetAll(CFileItemList &items, bool bDeleted)
{
CSingleLock lock(m_critSection);
for (PVR_RECORDINGMAP_CITR it = m_recordings.begin(); it != m_recordings.end(); it++)
{
CPVRRecordingPtr current = it->second;
if (current->IsDeleted() != bDeleted)
continue;
if (m_database.IsOpen())
current->UpdateMetadata(m_database);
CFileItemPtr pFileItem(new CFileItem(current));
pFileItem->SetLabel2(current->RecordingTimeAsLocalTime().GetAsLocalizedDateTime(true, false));
pFileItem->m_dateTime = current->RecordingTimeAsLocalTime();
pFileItem->SetPath(current->m_strFileNameAndPath);
pFileItem->SetOverlayImage(CGUIListItem::ICON_OVERLAY_UNWATCHED, pFileItem->GetPVRRecordingInfoTag()->m_playCount > 0);
items.Add(pFileItem);
}
}
CFileItemPtr CPVRRecordings::GetById(unsigned int iId) const
{
CFileItemPtr item;
CSingleLock lock(m_critSection);
for (PVR_RECORDINGMAP_CITR it = m_recordings.begin(); it != m_recordings.end(); it++)
{
if (iId == it->second->m_iRecordingId)
item = CFileItemPtr(new CFileItem(it->second));
}
return item;
}
CFileItemPtr CPVRRecordings::GetByPath(const std::string &path)
{
CURL url(path);
std::string fileName = url.GetFileName();
URIUtils::RemoveSlashAtEnd(fileName);
CSingleLock lock(m_critSection);
if (StringUtils::StartsWith(fileName, PVR_RECORDING_BASE_PATH "/"))
{
// Check directory name is for deleted recordings
fileName.erase(0, sizeof(PVR_RECORDING_BASE_PATH));
bool bDeleted = StringUtils::StartsWith(fileName, PVR_RECORDING_DELETED_PATH "/");
for (PVR_RECORDINGMAP_CITR it = m_recordings.begin(); it != m_recordings.end(); it++)
{
CPVRRecordingPtr current = it->second;
if (!URIUtils::PathEquals(path, current->m_strFileNameAndPath) || bDeleted != current->IsDeleted())
continue;
CFileItemPtr fileItem(new CFileItem(current));
return fileItem;
}
}
CFileItemPtr fileItem(new CFileItem);
return fileItem;
}
CPVRRecordingPtr CPVRRecordings::GetById(int iClientId, const std::string &strRecordingId) const
{
CPVRRecordingPtr retVal;
CSingleLock lock(m_critSection);
PVR_RECORDINGMAP_CITR it = m_recordings.find(CPVRRecordingUid(iClientId, strRecordingId));
if (it != m_recordings.end())
retVal = it->second;
return retVal;
}
void CPVRRecordings::Clear()
{
CSingleLock lock(m_critSection);
m_bHasDeleted = false;
m_recordings.clear();
}
void CPVRRecordings::UpdateFromClient(const CPVRRecordingPtr &tag)
{
CSingleLock lock(m_critSection);
if (tag->IsDeleted())
m_bHasDeleted = true;
CPVRRecordingPtr newTag = GetById(tag->m_iClientId, tag->m_strRecordingId);
if (newTag)
{
newTag->Update(*tag);
}
else
{
newTag = CPVRRecordingPtr(new CPVRRecording);
newTag->Update(*tag);
if (newTag->EpgEvent() > 0)
{
EPG::CEpgInfoTagPtr epgTag = EPG::CEpgContainer::GetInstance().GetTagById(newTag->EpgEvent());
if (epgTag)
epgTag->SetRecording(newTag);
}
newTag->m_iRecordingId = ++m_iLastId;
m_recordings.insert(std::make_pair(CPVRRecordingUid(newTag->m_iClientId, newTag->m_strRecordingId), newTag));
}
}
void CPVRRecordings::UpdateEpgTags(void)
{
CSingleLock lock(m_critSection);
int iEpgEvent;
for (PVR_RECORDINGMAP_ITR it = m_recordings.begin(); it != m_recordings.end(); ++it)
{
iEpgEvent = it->second->EpgEvent();
if (iEpgEvent > 0 && !it->second->IsDeleted())
{
EPG::CEpgInfoTagPtr epgTag = EPG::CEpgContainer::GetInstance().GetTagById(iEpgEvent);
if (epgTag)
epgTag->SetRecording(it->second);
}
}
}
| gpl-2.0 |
xtypeluki/mop548 | dep/acelite/ace/Base_Thread_Adapter.cpp | 271 | 3890 | // $Id: Base_Thread_Adapter.cpp 95595 2012-03-07 13:33:25Z johnnyw $
#include "ace/Base_Thread_Adapter.h"
#if !defined (ACE_HAS_INLINED_OSCALLS)
# include "ace/Base_Thread_Adapter.inl"
#endif /* ACE_HAS_INLINED_OSCALLS */
#if defined (ACE_HAS_TSS_EMULATION)
# include "ace/OS_NS_Thread.h"
#endif /* ACE_HAS_TSS_EMULATION */
#include "ace/Service_Config.h"
ACE_BEGIN_VERSIONED_NAMESPACE_DECL
ACE_INIT_LOG_MSG_HOOK ACE_Base_Thread_Adapter::init_log_msg_hook_ = 0;
ACE_INHERIT_LOG_MSG_HOOK ACE_Base_Thread_Adapter::inherit_log_msg_hook_ = 0;
ACE_CLOSE_LOG_MSG_HOOK ACE_Base_Thread_Adapter::close_log_msg_hook_ = 0;
ACE_SYNC_LOG_MSG_HOOK ACE_Base_Thread_Adapter::sync_log_msg_hook_ = 0;
ACE_THR_DESC_LOG_MSG_HOOK ACE_Base_Thread_Adapter::thr_desc_log_msg_hook_ = 0;
ACE_Base_Thread_Adapter::ACE_Base_Thread_Adapter (
ACE_THR_FUNC user_func,
void *arg,
ACE_THR_C_FUNC entry_point,
ACE_OS_Thread_Descriptor *td
#if defined (ACE_HAS_WIN32_STRUCTURAL_EXCEPTIONS)
, ACE_SEH_EXCEPT_HANDLER selector
, ACE_SEH_EXCEPT_HANDLER handler
#endif /* ACE_HAS_WIN32_STRUCTURAL_EXCEPTIONS */
, long cancel_flags
)
: user_func_ (user_func)
, arg_ (arg)
, entry_point_ (entry_point)
, thr_desc_ (td)
, ctx_ (ACE_Service_Config::current())
, flags_ (cancel_flags)
{
ACE_OS_TRACE ("ACE_Base_Thread_Adapter::ACE_Base_Thread_Adapter");
if (ACE_Base_Thread_Adapter::init_log_msg_hook_ != 0)
(*ACE_Base_Thread_Adapter::init_log_msg_hook_) (
this->log_msg_attributes_
# if defined (ACE_HAS_WIN32_STRUCTURAL_EXCEPTIONS)
, selector
, handler
# endif /* ACE_HAS_WIN32_STRUCTURAL_EXCEPTIONS */
);
#ifdef ACE_USES_GPROF
getitimer (ITIMER_PROF, &itimer_);
#endif // ACE_USES_GPROF
}
ACE_Base_Thread_Adapter::~ACE_Base_Thread_Adapter (void)
{
}
void
ACE_Base_Thread_Adapter::inherit_log_msg (void)
{
if (ACE_Base_Thread_Adapter::inherit_log_msg_hook_ != 0)
(*ACE_Base_Thread_Adapter::inherit_log_msg_hook_)(
this->thr_desc_,
this->log_msg_attributes_);
// Initialize the proper configuration context for the new thread
// Placed here since inherit_log_msg() gets called from any of our
// descendants (before self-destructing)
ACE_Service_Config::current (this->ctx_);
}
void
ACE_Base_Thread_Adapter::close_log_msg (void)
{
if (ACE_Base_Thread_Adapter::close_log_msg_hook_ != 0)
(*ACE_Base_Thread_Adapter::close_log_msg_hook_) ();
}
void
ACE_Base_Thread_Adapter::sync_log_msg (const ACE_TCHAR *prg)
{
if (ACE_Base_Thread_Adapter::sync_log_msg_hook_ != 0)
(*ACE_Base_Thread_Adapter::sync_log_msg_hook_) (prg);
}
ACE_OS_Thread_Descriptor::~ACE_OS_Thread_Descriptor (void)
{
}
ACE_OS_Thread_Descriptor *
ACE_Base_Thread_Adapter::thr_desc_log_msg (void)
{
if (ACE_Base_Thread_Adapter::thr_desc_log_msg_hook_ != 0)
return (*ACE_Base_Thread_Adapter::thr_desc_log_msg_hook_) ();
return 0;
}
ACE_END_VERSIONED_NAMESPACE_DECL
// Run the thread entry point for the <ACE_Thread_Adapter>. This must
// be an extern "C" to make certain compilers happy...
extern "C" ACE_THR_FUNC_RETURN
ACE_THREAD_ADAPTER_NAME (void *args)
{
ACE_OS_TRACE ("ACE_THREAD_ADAPTER_NAME");
#if defined (ACE_HAS_TSS_EMULATION)
// As early as we can in the execution of the new thread, allocate
// its local TS storage. Allocate it on the stack, to save dynamic
// allocation/dealloction.
void *ts_storage[ACE_TSS_Emulation::ACE_TSS_THREAD_KEYS_MAX];
ACE_TSS_Emulation::tss_open (ts_storage);
#endif /* ACE_HAS_TSS_EMULATION */
ACE_Base_Thread_Adapter * const thread_args =
static_cast<ACE_Base_Thread_Adapter *> (args);
#ifdef ACE_USES_GPROF
setitimer (ITIMER_PROF, thread_args->timerval (), 0);
#endif // ACE_USES_GPROF
// Invoke the user-supplied function with the args.
ACE_THR_FUNC_RETURN status = thread_args->invoke ();
return status;
}
| gpl-2.0 |
samnazarko/linux-imx6 | arch/arm/plat-orion/gpio.c | 527 | 16252 | /*
* arch/arm/plat-orion/gpio.c
*
* Marvell Orion SoC GPIO handling.
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*/
#define DEBUG
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/bitops.h>
#include <linux/io.h>
#include <linux/gpio.h>
#include <linux/leds.h>
#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/of_address.h>
#include <plat/orion-gpio.h>
/*
* GPIO unit register offsets.
*/
#define GPIO_OUT_OFF 0x0000
#define GPIO_IO_CONF_OFF 0x0004
#define GPIO_BLINK_EN_OFF 0x0008
#define GPIO_IN_POL_OFF 0x000c
#define GPIO_DATA_IN_OFF 0x0010
#define GPIO_EDGE_CAUSE_OFF 0x0014
#define GPIO_EDGE_MASK_OFF 0x0018
#define GPIO_LEVEL_MASK_OFF 0x001c
struct orion_gpio_chip {
struct gpio_chip chip;
spinlock_t lock;
void __iomem *base;
unsigned long valid_input;
unsigned long valid_output;
int mask_offset;
int secondary_irq_base;
struct irq_domain *domain;
};
static void __iomem *GPIO_OUT(struct orion_gpio_chip *ochip)
{
return ochip->base + GPIO_OUT_OFF;
}
static void __iomem *GPIO_IO_CONF(struct orion_gpio_chip *ochip)
{
return ochip->base + GPIO_IO_CONF_OFF;
}
static void __iomem *GPIO_BLINK_EN(struct orion_gpio_chip *ochip)
{
return ochip->base + GPIO_BLINK_EN_OFF;
}
static void __iomem *GPIO_IN_POL(struct orion_gpio_chip *ochip)
{
return ochip->base + GPIO_IN_POL_OFF;
}
static void __iomem *GPIO_DATA_IN(struct orion_gpio_chip *ochip)
{
return ochip->base + GPIO_DATA_IN_OFF;
}
static void __iomem *GPIO_EDGE_CAUSE(struct orion_gpio_chip *ochip)
{
return ochip->base + GPIO_EDGE_CAUSE_OFF;
}
static void __iomem *GPIO_EDGE_MASK(struct orion_gpio_chip *ochip)
{
return ochip->base + ochip->mask_offset + GPIO_EDGE_MASK_OFF;
}
static void __iomem *GPIO_LEVEL_MASK(struct orion_gpio_chip *ochip)
{
return ochip->base + ochip->mask_offset + GPIO_LEVEL_MASK_OFF;
}
static struct orion_gpio_chip orion_gpio_chips[2];
static int orion_gpio_chip_count;
static inline void
__set_direction(struct orion_gpio_chip *ochip, unsigned pin, int input)
{
u32 u;
u = readl(GPIO_IO_CONF(ochip));
if (input)
u |= 1 << pin;
else
u &= ~(1 << pin);
writel(u, GPIO_IO_CONF(ochip));
}
static void __set_level(struct orion_gpio_chip *ochip, unsigned pin, int high)
{
u32 u;
u = readl(GPIO_OUT(ochip));
if (high)
u |= 1 << pin;
else
u &= ~(1 << pin);
writel(u, GPIO_OUT(ochip));
}
static inline void
__set_blinking(struct orion_gpio_chip *ochip, unsigned pin, int blink)
{
u32 u;
u = readl(GPIO_BLINK_EN(ochip));
if (blink)
u |= 1 << pin;
else
u &= ~(1 << pin);
writel(u, GPIO_BLINK_EN(ochip));
}
static inline int
orion_gpio_is_valid(struct orion_gpio_chip *ochip, unsigned pin, int mode)
{
if (pin >= ochip->chip.ngpio)
goto err_out;
if ((mode & GPIO_INPUT_OK) && !test_bit(pin, &ochip->valid_input))
goto err_out;
if ((mode & GPIO_OUTPUT_OK) && !test_bit(pin, &ochip->valid_output))
goto err_out;
return 1;
err_out:
pr_debug("%s: invalid GPIO %d\n", __func__, pin);
return false;
}
/*
* GPIO primitives.
*/
static int orion_gpio_request(struct gpio_chip *chip, unsigned pin)
{
struct orion_gpio_chip *ochip =
container_of(chip, struct orion_gpio_chip, chip);
if (orion_gpio_is_valid(ochip, pin, GPIO_INPUT_OK) ||
orion_gpio_is_valid(ochip, pin, GPIO_OUTPUT_OK))
return 0;
return -EINVAL;
}
static int orion_gpio_direction_input(struct gpio_chip *chip, unsigned pin)
{
struct orion_gpio_chip *ochip =
container_of(chip, struct orion_gpio_chip, chip);
unsigned long flags;
if (!orion_gpio_is_valid(ochip, pin, GPIO_INPUT_OK))
return -EINVAL;
spin_lock_irqsave(&ochip->lock, flags);
__set_direction(ochip, pin, 1);
spin_unlock_irqrestore(&ochip->lock, flags);
return 0;
}
static int orion_gpio_get(struct gpio_chip *chip, unsigned pin)
{
struct orion_gpio_chip *ochip =
container_of(chip, struct orion_gpio_chip, chip);
int val;
if (readl(GPIO_IO_CONF(ochip)) & (1 << pin)) {
val = readl(GPIO_DATA_IN(ochip)) ^ readl(GPIO_IN_POL(ochip));
} else {
val = readl(GPIO_OUT(ochip));
}
return (val >> pin) & 1;
}
static int
orion_gpio_direction_output(struct gpio_chip *chip, unsigned pin, int value)
{
struct orion_gpio_chip *ochip =
container_of(chip, struct orion_gpio_chip, chip);
unsigned long flags;
if (!orion_gpio_is_valid(ochip, pin, GPIO_OUTPUT_OK))
return -EINVAL;
spin_lock_irqsave(&ochip->lock, flags);
__set_blinking(ochip, pin, 0);
__set_level(ochip, pin, value);
__set_direction(ochip, pin, 0);
spin_unlock_irqrestore(&ochip->lock, flags);
return 0;
}
static void orion_gpio_set(struct gpio_chip *chip, unsigned pin, int value)
{
struct orion_gpio_chip *ochip =
container_of(chip, struct orion_gpio_chip, chip);
unsigned long flags;
spin_lock_irqsave(&ochip->lock, flags);
__set_level(ochip, pin, value);
spin_unlock_irqrestore(&ochip->lock, flags);
}
static int orion_gpio_to_irq(struct gpio_chip *chip, unsigned pin)
{
struct orion_gpio_chip *ochip =
container_of(chip, struct orion_gpio_chip, chip);
return irq_create_mapping(ochip->domain,
ochip->secondary_irq_base + pin);
}
/*
* Orion-specific GPIO API extensions.
*/
static struct orion_gpio_chip *orion_gpio_chip_find(int pin)
{
int i;
for (i = 0; i < orion_gpio_chip_count; i++) {
struct orion_gpio_chip *ochip = orion_gpio_chips + i;
struct gpio_chip *chip = &ochip->chip;
if (pin >= chip->base && pin < chip->base + chip->ngpio)
return ochip;
}
return NULL;
}
void __init orion_gpio_set_unused(unsigned pin)
{
struct orion_gpio_chip *ochip = orion_gpio_chip_find(pin);
if (ochip == NULL)
return;
pin -= ochip->chip.base;
/* Configure as output, drive low. */
__set_level(ochip, pin, 0);
__set_direction(ochip, pin, 0);
}
void __init orion_gpio_set_valid(unsigned pin, int mode)
{
struct orion_gpio_chip *ochip = orion_gpio_chip_find(pin);
if (ochip == NULL)
return;
pin -= ochip->chip.base;
if (mode == 1)
mode = GPIO_INPUT_OK | GPIO_OUTPUT_OK;
if (mode & GPIO_INPUT_OK)
__set_bit(pin, &ochip->valid_input);
else
__clear_bit(pin, &ochip->valid_input);
if (mode & GPIO_OUTPUT_OK)
__set_bit(pin, &ochip->valid_output);
else
__clear_bit(pin, &ochip->valid_output);
}
void orion_gpio_set_blink(unsigned pin, int blink)
{
struct orion_gpio_chip *ochip = orion_gpio_chip_find(pin);
unsigned long flags;
if (ochip == NULL)
return;
spin_lock_irqsave(&ochip->lock, flags);
__set_level(ochip, pin & 31, 0);
__set_blinking(ochip, pin & 31, blink);
spin_unlock_irqrestore(&ochip->lock, flags);
}
EXPORT_SYMBOL(orion_gpio_set_blink);
#define ORION_BLINK_HALF_PERIOD 100 /* ms */
int orion_gpio_led_blink_set(unsigned gpio, int state,
unsigned long *delay_on, unsigned long *delay_off)
{
if (delay_on && delay_off && !*delay_on && !*delay_off)
*delay_on = *delay_off = ORION_BLINK_HALF_PERIOD;
switch (state) {
case GPIO_LED_NO_BLINK_LOW:
case GPIO_LED_NO_BLINK_HIGH:
orion_gpio_set_blink(gpio, 0);
gpio_set_value(gpio, state);
break;
case GPIO_LED_BLINK:
orion_gpio_set_blink(gpio, 1);
}
return 0;
}
EXPORT_SYMBOL_GPL(orion_gpio_led_blink_set);
/*****************************************************************************
* Orion GPIO IRQ
*
* GPIO_IN_POL register controls whether GPIO_DATA_IN will hold the same
* value of the line or the opposite value.
*
* Level IRQ handlers: DATA_IN is used directly as cause register.
* Interrupt are masked by LEVEL_MASK registers.
* Edge IRQ handlers: Change in DATA_IN are latched in EDGE_CAUSE.
* Interrupt are masked by EDGE_MASK registers.
* Both-edge handlers: Similar to regular Edge handlers, but also swaps
* the polarity to catch the next line transaction.
* This is a race condition that might not perfectly
* work on some use cases.
*
* Every eight GPIO lines are grouped (OR'ed) before going up to main
* cause register.
*
* EDGE cause mask
* data-in /--------| |-----| |----\
* -----| |----- ---- to main cause reg
* X \----------------| |----/
* polarity LEVEL mask
*
****************************************************************************/
static int gpio_irq_set_type(struct irq_data *d, u32 type)
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
struct irq_chip_type *ct = irq_data_get_chip_type(d);
struct orion_gpio_chip *ochip = gc->private;
int pin;
u32 u;
pin = d->hwirq - ochip->secondary_irq_base;
u = readl(GPIO_IO_CONF(ochip)) & (1 << pin);
if (!u) {
return -EINVAL;
}
type &= IRQ_TYPE_SENSE_MASK;
if (type == IRQ_TYPE_NONE)
return -EINVAL;
/* Check if we need to change chip and handler */
if (!(ct->type & type))
if (irq_setup_alt_chip(d, type))
return -EINVAL;
/*
* Configure interrupt polarity.
*/
if (type == IRQ_TYPE_EDGE_RISING || type == IRQ_TYPE_LEVEL_HIGH) {
u = readl(GPIO_IN_POL(ochip));
u &= ~(1 << pin);
writel(u, GPIO_IN_POL(ochip));
} else if (type == IRQ_TYPE_EDGE_FALLING || type == IRQ_TYPE_LEVEL_LOW) {
u = readl(GPIO_IN_POL(ochip));
u |= 1 << pin;
writel(u, GPIO_IN_POL(ochip));
} else if (type == IRQ_TYPE_EDGE_BOTH) {
u32 v;
v = readl(GPIO_IN_POL(ochip)) ^ readl(GPIO_DATA_IN(ochip));
/*
* set initial polarity based on current input level
*/
u = readl(GPIO_IN_POL(ochip));
if (v & (1 << pin))
u |= 1 << pin; /* falling */
else
u &= ~(1 << pin); /* rising */
writel(u, GPIO_IN_POL(ochip));
}
return 0;
}
static void gpio_irq_handler(unsigned irq, struct irq_desc *desc)
{
struct orion_gpio_chip *ochip = irq_get_handler_data(irq);
u32 cause, type;
int i;
if (ochip == NULL)
return;
cause = readl(GPIO_DATA_IN(ochip)) & readl(GPIO_LEVEL_MASK(ochip));
cause |= readl(GPIO_EDGE_CAUSE(ochip)) & readl(GPIO_EDGE_MASK(ochip));
for (i = 0; i < ochip->chip.ngpio; i++) {
int irq;
irq = ochip->secondary_irq_base + i;
if (!(cause & (1 << i)))
continue;
type = irq_get_trigger_type(irq);
if ((type & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH) {
/* Swap polarity (race with GPIO line) */
u32 polarity;
polarity = readl(GPIO_IN_POL(ochip));
polarity ^= 1 << i;
writel(polarity, GPIO_IN_POL(ochip));
}
generic_handle_irq(irq);
}
}
#ifdef CONFIG_DEBUG_FS
#include <linux/seq_file.h>
static void orion_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
{
struct orion_gpio_chip *ochip =
container_of(chip, struct orion_gpio_chip, chip);
u32 out, io_conf, blink, in_pol, data_in, cause, edg_msk, lvl_msk;
int i;
out = readl_relaxed(GPIO_OUT(ochip));
io_conf = readl_relaxed(GPIO_IO_CONF(ochip));
blink = readl_relaxed(GPIO_BLINK_EN(ochip));
in_pol = readl_relaxed(GPIO_IN_POL(ochip));
data_in = readl_relaxed(GPIO_DATA_IN(ochip));
cause = readl_relaxed(GPIO_EDGE_CAUSE(ochip));
edg_msk = readl_relaxed(GPIO_EDGE_MASK(ochip));
lvl_msk = readl_relaxed(GPIO_LEVEL_MASK(ochip));
for (i = 0; i < chip->ngpio; i++) {
const char *label;
u32 msk;
bool is_out;
label = gpiochip_is_requested(chip, i);
if (!label)
continue;
msk = 1 << i;
is_out = !(io_conf & msk);
seq_printf(s, " gpio-%-3d (%-20.20s)", chip->base + i, label);
if (is_out) {
seq_printf(s, " out %s %s\n",
out & msk ? "hi" : "lo",
blink & msk ? "(blink )" : "");
continue;
}
seq_printf(s, " in %s (act %s) - IRQ",
(data_in ^ in_pol) & msk ? "hi" : "lo",
in_pol & msk ? "lo" : "hi");
if (!((edg_msk | lvl_msk) & msk)) {
seq_printf(s, " disabled\n");
continue;
}
if (edg_msk & msk)
seq_printf(s, " edge ");
if (lvl_msk & msk)
seq_printf(s, " level");
seq_printf(s, " (%s)\n", cause & msk ? "pending" : "clear ");
}
}
#else
#define orion_gpio_dbg_show NULL
#endif
void __init orion_gpio_init(struct device_node *np,
int gpio_base, int ngpio,
void __iomem *base, int mask_offset,
int secondary_irq_base,
int irqs[4])
{
struct orion_gpio_chip *ochip;
struct irq_chip_generic *gc;
struct irq_chip_type *ct;
char gc_label[16];
int i;
if (orion_gpio_chip_count == ARRAY_SIZE(orion_gpio_chips))
return;
snprintf(gc_label, sizeof(gc_label), "orion_gpio%d",
orion_gpio_chip_count);
ochip = orion_gpio_chips + orion_gpio_chip_count;
ochip->chip.label = kstrdup(gc_label, GFP_KERNEL);
ochip->chip.request = orion_gpio_request;
ochip->chip.direction_input = orion_gpio_direction_input;
ochip->chip.get = orion_gpio_get;
ochip->chip.direction_output = orion_gpio_direction_output;
ochip->chip.set = orion_gpio_set;
ochip->chip.to_irq = orion_gpio_to_irq;
ochip->chip.base = gpio_base;
ochip->chip.ngpio = ngpio;
ochip->chip.can_sleep = 0;
#ifdef CONFIG_OF
ochip->chip.of_node = np;
#endif
ochip->chip.dbg_show = orion_gpio_dbg_show;
spin_lock_init(&ochip->lock);
ochip->base = (void __iomem *)base;
ochip->valid_input = 0;
ochip->valid_output = 0;
ochip->mask_offset = mask_offset;
ochip->secondary_irq_base = secondary_irq_base;
gpiochip_add(&ochip->chip);
/*
* Mask and clear GPIO interrupts.
*/
writel(0, GPIO_EDGE_CAUSE(ochip));
writel(0, GPIO_EDGE_MASK(ochip));
writel(0, GPIO_LEVEL_MASK(ochip));
/* Setup the interrupt handlers. Each chip can have up to 4
* interrupt handlers, with each handler dealing with 8 GPIO
* pins. */
for (i = 0; i < 4; i++) {
if (irqs[i]) {
irq_set_handler_data(irqs[i], ochip);
irq_set_chained_handler(irqs[i], gpio_irq_handler);
}
}
gc = irq_alloc_generic_chip("orion_gpio_irq", 2,
secondary_irq_base,
ochip->base, handle_level_irq);
gc->private = ochip;
ct = gc->chip_types;
ct->regs.mask = ochip->mask_offset + GPIO_LEVEL_MASK_OFF;
ct->type = IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW;
ct->chip.irq_mask = irq_gc_mask_clr_bit;
ct->chip.irq_unmask = irq_gc_mask_set_bit;
ct->chip.irq_set_type = gpio_irq_set_type;
ct->chip.name = ochip->chip.label;
ct++;
ct->regs.mask = ochip->mask_offset + GPIO_EDGE_MASK_OFF;
ct->regs.ack = GPIO_EDGE_CAUSE_OFF;
ct->type = IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING;
ct->chip.irq_ack = irq_gc_ack_clr_bit;
ct->chip.irq_mask = irq_gc_mask_clr_bit;
ct->chip.irq_unmask = irq_gc_mask_set_bit;
ct->chip.irq_set_type = gpio_irq_set_type;
ct->handler = handle_edge_irq;
ct->chip.name = ochip->chip.label;
irq_setup_generic_chip(gc, IRQ_MSK(ngpio), IRQ_GC_INIT_MASK_CACHE,
IRQ_NOREQUEST, IRQ_LEVEL | IRQ_NOPROBE);
/* Setup irq domain on top of the generic chip. */
ochip->domain = irq_domain_add_legacy(np,
ochip->chip.ngpio,
ochip->secondary_irq_base,
ochip->secondary_irq_base,
&irq_domain_simple_ops,
ochip);
if (!ochip->domain)
panic("%s: couldn't allocate irq domain (DT).\n",
ochip->chip.label);
orion_gpio_chip_count++;
}
#ifdef CONFIG_OF
static void __init orion_gpio_of_init_one(struct device_node *np,
int irq_gpio_base)
{
int ngpio, gpio_base, mask_offset;
void __iomem *base;
int ret, i;
int irqs[4];
int secondary_irq_base;
ret = of_property_read_u32(np, "ngpio", &ngpio);
if (ret)
goto out;
ret = of_property_read_u32(np, "mask-offset", &mask_offset);
if (ret == -EINVAL)
mask_offset = 0;
else
goto out;
base = of_iomap(np, 0);
if (!base)
goto out;
secondary_irq_base = irq_gpio_base + (32 * orion_gpio_chip_count);
gpio_base = 32 * orion_gpio_chip_count;
/* Get the interrupt numbers. Each chip can have up to 4
* interrupt handlers, with each handler dealing with 8 GPIO
* pins. */
for (i = 0; i < 4; i++)
irqs[i] = irq_of_parse_and_map(np, i);
orion_gpio_init(np, gpio_base, ngpio, base, mask_offset,
secondary_irq_base, irqs);
return;
out:
pr_err("%s: %s: missing mandatory property\n", __func__, np->name);
}
void __init orion_gpio_of_init(int irq_gpio_base)
{
struct device_node *np;
for_each_compatible_node(np, NULL, "marvell,orion-gpio")
orion_gpio_of_init_one(np, irq_gpio_base);
}
#endif
| gpl-2.0 |
kamarush/Xperia-2011-KRsH-Kernel-2.6.32.9-ICS | security/keys/proc.c | 527 | 8196 | /* proc.c: proc files for key database enumeration
*
* Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/fs.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <asm/errno.h>
#include "internal.h"
#ifdef CONFIG_KEYS_DEBUG_PROC_KEYS
static int proc_keys_open(struct inode *inode, struct file *file);
static void *proc_keys_start(struct seq_file *p, loff_t *_pos);
static void *proc_keys_next(struct seq_file *p, void *v, loff_t *_pos);
static void proc_keys_stop(struct seq_file *p, void *v);
static int proc_keys_show(struct seq_file *m, void *v);
static const struct seq_operations proc_keys_ops = {
.start = proc_keys_start,
.next = proc_keys_next,
.stop = proc_keys_stop,
.show = proc_keys_show,
};
static const struct file_operations proc_keys_fops = {
.open = proc_keys_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
#endif
static int proc_key_users_open(struct inode *inode, struct file *file);
static void *proc_key_users_start(struct seq_file *p, loff_t *_pos);
static void *proc_key_users_next(struct seq_file *p, void *v, loff_t *_pos);
static void proc_key_users_stop(struct seq_file *p, void *v);
static int proc_key_users_show(struct seq_file *m, void *v);
static const struct seq_operations proc_key_users_ops = {
.start = proc_key_users_start,
.next = proc_key_users_next,
.stop = proc_key_users_stop,
.show = proc_key_users_show,
};
static const struct file_operations proc_key_users_fops = {
.open = proc_key_users_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
/*****************************************************************************/
/*
* declare the /proc files
*/
static int __init key_proc_init(void)
{
struct proc_dir_entry *p;
#ifdef CONFIG_KEYS_DEBUG_PROC_KEYS
p = proc_create("keys", 0, NULL, &proc_keys_fops);
if (!p)
panic("Cannot create /proc/keys\n");
#endif
p = proc_create("key-users", 0, NULL, &proc_key_users_fops);
if (!p)
panic("Cannot create /proc/key-users\n");
return 0;
} /* end key_proc_init() */
__initcall(key_proc_init);
/*****************************************************************************/
/*
* implement "/proc/keys" to provides a list of the keys on the system
*/
#ifdef CONFIG_KEYS_DEBUG_PROC_KEYS
static struct rb_node *key_serial_next(struct rb_node *n)
{
struct user_namespace *user_ns = current_user_ns();
n = rb_next(n);
while (n) {
struct key *key = rb_entry(n, struct key, serial_node);
if (key->user->user_ns == user_ns)
break;
n = rb_next(n);
}
return n;
}
static int proc_keys_open(struct inode *inode, struct file *file)
{
return seq_open(file, &proc_keys_ops);
}
static struct key *find_ge_key(key_serial_t id)
{
struct user_namespace *user_ns = current_user_ns();
struct rb_node *n = key_serial_tree.rb_node;
struct key *minkey = NULL;
while (n) {
struct key *key = rb_entry(n, struct key, serial_node);
if (id < key->serial) {
if (!minkey || minkey->serial > key->serial)
minkey = key;
n = n->rb_left;
} else if (id > key->serial) {
n = n->rb_right;
} else {
minkey = key;
break;
}
key = NULL;
}
if (!minkey)
return NULL;
for (;;) {
if (minkey->user->user_ns == user_ns)
return minkey;
n = rb_next(&minkey->serial_node);
if (!n)
return NULL;
minkey = rb_entry(n, struct key, serial_node);
}
}
static void *proc_keys_start(struct seq_file *p, loff_t *_pos)
__acquires(key_serial_lock)
{
key_serial_t pos = *_pos;
struct key *key;
spin_lock(&key_serial_lock);
if (*_pos > INT_MAX)
return NULL;
key = find_ge_key(pos);
if (!key)
return NULL;
*_pos = key->serial;
return &key->serial_node;
}
static inline key_serial_t key_node_serial(struct rb_node *n)
{
struct key *key = rb_entry(n, struct key, serial_node);
return key->serial;
}
static void *proc_keys_next(struct seq_file *p, void *v, loff_t *_pos)
{
struct rb_node *n;
n = key_serial_next(v);
if (n)
*_pos = key_node_serial(n);
return n;
}
static void proc_keys_stop(struct seq_file *p, void *v)
__releases(key_serial_lock)
{
spin_unlock(&key_serial_lock);
}
static int proc_keys_show(struct seq_file *m, void *v)
{
struct rb_node *_p = v;
struct key *key = rb_entry(_p, struct key, serial_node);
struct timespec now;
unsigned long timo;
char xbuf[12];
int rc;
/* check whether the current task is allowed to view the key (assuming
* non-possession)
* - the caller holds a spinlock, and thus the RCU read lock, making our
* access to __current_cred() safe
*/
rc = key_task_permission(make_key_ref(key, 0), current_cred(),
KEY_VIEW);
if (rc < 0)
return 0;
now = current_kernel_time();
rcu_read_lock();
/* come up with a suitable timeout value */
if (key->expiry == 0) {
memcpy(xbuf, "perm", 5);
} else if (now.tv_sec >= key->expiry) {
memcpy(xbuf, "expd", 5);
} else {
timo = key->expiry - now.tv_sec;
if (timo < 60)
sprintf(xbuf, "%lus", timo);
else if (timo < 60*60)
sprintf(xbuf, "%lum", timo / 60);
else if (timo < 60*60*24)
sprintf(xbuf, "%luh", timo / (60*60));
else if (timo < 60*60*24*7)
sprintf(xbuf, "%lud", timo / (60*60*24));
else
sprintf(xbuf, "%luw", timo / (60*60*24*7));
}
#define showflag(KEY, LETTER, FLAG) \
(test_bit(FLAG, &(KEY)->flags) ? LETTER : '-')
seq_printf(m, "%08x %c%c%c%c%c%c %5d %4s %08x %5d %5d %-9.9s ",
key->serial,
showflag(key, 'I', KEY_FLAG_INSTANTIATED),
showflag(key, 'R', KEY_FLAG_REVOKED),
showflag(key, 'D', KEY_FLAG_DEAD),
showflag(key, 'Q', KEY_FLAG_IN_QUOTA),
showflag(key, 'U', KEY_FLAG_USER_CONSTRUCT),
showflag(key, 'N', KEY_FLAG_NEGATIVE),
atomic_read(&key->usage),
xbuf,
key->perm,
key->uid,
key->gid,
key->type->name);
#undef showflag
if (key->type->describe)
key->type->describe(key, m);
seq_putc(m, '\n');
rcu_read_unlock();
return 0;
}
#endif /* CONFIG_KEYS_DEBUG_PROC_KEYS */
static struct rb_node *__key_user_next(struct rb_node *n)
{
while (n) {
struct key_user *user = rb_entry(n, struct key_user, node);
if (user->user_ns == current_user_ns())
break;
n = rb_next(n);
}
return n;
}
static struct rb_node *key_user_next(struct rb_node *n)
{
return __key_user_next(rb_next(n));
}
static struct rb_node *key_user_first(struct rb_root *r)
{
struct rb_node *n = rb_first(r);
return __key_user_next(n);
}
/*****************************************************************************/
/*
* implement "/proc/key-users" to provides a list of the key users
*/
static int proc_key_users_open(struct inode *inode, struct file *file)
{
return seq_open(file, &proc_key_users_ops);
}
static void *proc_key_users_start(struct seq_file *p, loff_t *_pos)
__acquires(key_user_lock)
{
struct rb_node *_p;
loff_t pos = *_pos;
spin_lock(&key_user_lock);
_p = key_user_first(&key_user_tree);
while (pos > 0 && _p) {
pos--;
_p = key_user_next(_p);
}
return _p;
}
static void *proc_key_users_next(struct seq_file *p, void *v, loff_t *_pos)
{
(*_pos)++;
return key_user_next((struct rb_node *) v);
}
static void proc_key_users_stop(struct seq_file *p, void *v)
__releases(key_user_lock)
{
spin_unlock(&key_user_lock);
}
static int proc_key_users_show(struct seq_file *m, void *v)
{
struct rb_node *_p = v;
struct key_user *user = rb_entry(_p, struct key_user, node);
unsigned maxkeys = (user->uid == 0) ?
key_quota_root_maxkeys : key_quota_maxkeys;
unsigned maxbytes = (user->uid == 0) ?
key_quota_root_maxbytes : key_quota_maxbytes;
seq_printf(m, "%5u: %5d %d/%d %d/%d %d/%d\n",
user->uid,
atomic_read(&user->usage),
atomic_read(&user->nkeys),
atomic_read(&user->nikeys),
user->qnkeys,
maxkeys,
user->qnbytes,
maxbytes);
return 0;
}
| gpl-2.0 |
zhmz90/linux | arch/arm/mach-davinci/da8xx-dt.c | 527 | 2442 | /*
* Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/
*
* Modified from mach-omap/omap2/board-generic.c
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/io.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/irqdomain.h>
#include <asm/mach/arch.h>
#include <mach/common.h>
#include <mach/cp_intc.h>
#include <mach/da8xx.h>
#define DA8XX_NUM_UARTS 3
static const struct of_device_id da8xx_irq_match[] __initconst = {
{ .compatible = "ti,cp-intc", .data = cp_intc_of_init, },
{ }
};
static void __init da8xx_init_irq(void)
{
of_irq_init(da8xx_irq_match);
}
static struct of_dev_auxdata da850_auxdata_lookup[] __initdata = {
OF_DEV_AUXDATA("ti,davinci-i2c", 0x01c22000, "i2c_davinci.1", NULL),
OF_DEV_AUXDATA("ti,davinci-wdt", 0x01c21000, "davinci-wdt", NULL),
OF_DEV_AUXDATA("ti,da830-mmc", 0x01c40000, "da830-mmc.0", NULL),
OF_DEV_AUXDATA("ti,da850-ehrpwm", 0x01f00000, "ehrpwm", NULL),
OF_DEV_AUXDATA("ti,da850-ehrpwm", 0x01f02000, "ehrpwm", NULL),
OF_DEV_AUXDATA("ti,da850-ecap", 0x01f06000, "ecap", NULL),
OF_DEV_AUXDATA("ti,da850-ecap", 0x01f07000, "ecap", NULL),
OF_DEV_AUXDATA("ti,da850-ecap", 0x01f08000, "ecap", NULL),
OF_DEV_AUXDATA("ti,da830-spi", 0x01f0e000, "spi_davinci.1", NULL),
OF_DEV_AUXDATA("ns16550a", 0x01c42000, "serial8250.0", NULL),
OF_DEV_AUXDATA("ns16550a", 0x01d0c000, "serial8250.1", NULL),
OF_DEV_AUXDATA("ns16550a", 0x01d0d000, "serial8250.2", NULL),
OF_DEV_AUXDATA("ti,davinci_mdio", 0x01e24000, "davinci_mdio.0", NULL),
OF_DEV_AUXDATA("ti,davinci-dm6467-emac", 0x01e20000, "davinci_emac.1",
NULL),
OF_DEV_AUXDATA("ti,da830-mcasp-audio", 0x01d00000, "davinci-mcasp.0", NULL),
{}
};
#ifdef CONFIG_ARCH_DAVINCI_DA850
static void __init da850_init_machine(void)
{
of_platform_populate(NULL, of_default_bus_match_table,
da850_auxdata_lookup, NULL);
}
static const char *da850_boards_compat[] __initdata = {
"enbw,cmc",
"ti,da850-evm",
"ti,da850",
NULL,
};
DT_MACHINE_START(DA850_DT, "Generic DA850/OMAP-L138/AM18x")
.map_io = da850_init,
.init_irq = da8xx_init_irq,
.init_time = davinci_timer_init,
.init_machine = da850_init_machine,
.dt_compat = da850_boards_compat,
.init_late = davinci_init_late,
.restart = da8xx_restart,
MACHINE_END
#endif
| gpl-2.0 |
krzk/tizen-tv-rpi-linux | drivers/media/dvb-frontends/mt312.c | 783 | 18701 | /*
Driver for Zarlink VP310/MT312/ZL10313 Satellite Channel Decoder
Copyright (C) 2003 Andreas Oberritter <obi@linuxtv.org>
Copyright (C) 2008 Matthias Schwarzott <zzam@gentoo.org>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
References:
http://products.zarlink.com/product_profiles/MT312.htm
http://products.zarlink.com/product_profiles/SL1935.htm
*/
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/string.h>
#include <linux/slab.h>
#include "dvb_frontend.h"
#include "mt312_priv.h"
#include "mt312.h"
/* Max transfer size done by I2C transfer functions */
#define MAX_XFER_SIZE 64
struct mt312_state {
struct i2c_adapter *i2c;
/* configuration settings */
const struct mt312_config *config;
struct dvb_frontend frontend;
u8 id;
unsigned long xtal;
u8 freq_mult;
};
static int debug;
#define dprintk(args...) \
do { \
if (debug) \
printk(KERN_DEBUG "mt312: " args); \
} while (0)
#define MT312_PLL_CLK 10000000UL /* 10 MHz */
#define MT312_PLL_CLK_10_111 10111000UL /* 10.111 MHz */
static int mt312_read(struct mt312_state *state, const enum mt312_reg_addr reg,
u8 *buf, const size_t count)
{
int ret;
struct i2c_msg msg[2];
u8 regbuf[1] = { reg };
msg[0].addr = state->config->demod_address;
msg[0].flags = 0;
msg[0].buf = regbuf;
msg[0].len = 1;
msg[1].addr = state->config->demod_address;
msg[1].flags = I2C_M_RD;
msg[1].buf = buf;
msg[1].len = count;
ret = i2c_transfer(state->i2c, msg, 2);
if (ret != 2) {
printk(KERN_DEBUG "%s: ret == %d\n", __func__, ret);
return -EREMOTEIO;
}
if (debug) {
int i;
dprintk("R(%d):", reg & 0x7f);
for (i = 0; i < count; i++)
printk(KERN_CONT " %02x", buf[i]);
printk("\n");
}
return 0;
}
static int mt312_write(struct mt312_state *state, const enum mt312_reg_addr reg,
const u8 *src, const size_t count)
{
int ret;
u8 buf[MAX_XFER_SIZE];
struct i2c_msg msg;
if (1 + count > sizeof(buf)) {
printk(KERN_WARNING
"mt312: write: len=%zu is too big!\n", count);
return -EINVAL;
}
if (debug) {
int i;
dprintk("W(%d):", reg & 0x7f);
for (i = 0; i < count; i++)
printk(KERN_CONT " %02x", src[i]);
printk("\n");
}
buf[0] = reg;
memcpy(&buf[1], src, count);
msg.addr = state->config->demod_address;
msg.flags = 0;
msg.buf = buf;
msg.len = count + 1;
ret = i2c_transfer(state->i2c, &msg, 1);
if (ret != 1) {
dprintk("%s: ret == %d\n", __func__, ret);
return -EREMOTEIO;
}
return 0;
}
static inline int mt312_readreg(struct mt312_state *state,
const enum mt312_reg_addr reg, u8 *val)
{
return mt312_read(state, reg, val, 1);
}
static inline int mt312_writereg(struct mt312_state *state,
const enum mt312_reg_addr reg, const u8 val)
{
return mt312_write(state, reg, &val, 1);
}
static inline u32 mt312_div(u32 a, u32 b)
{
return (a + (b / 2)) / b;
}
static int mt312_reset(struct mt312_state *state, const u8 full)
{
return mt312_writereg(state, RESET, full ? 0x80 : 0x40);
}
static int mt312_get_inversion(struct mt312_state *state,
fe_spectral_inversion_t *i)
{
int ret;
u8 vit_mode;
ret = mt312_readreg(state, VIT_MODE, &vit_mode);
if (ret < 0)
return ret;
if (vit_mode & 0x80) /* auto inversion was used */
*i = (vit_mode & 0x40) ? INVERSION_ON : INVERSION_OFF;
return 0;
}
static int mt312_get_symbol_rate(struct mt312_state *state, u32 *sr)
{
int ret;
u8 sym_rate_h;
u8 dec_ratio;
u16 sym_rat_op;
u16 monitor;
u8 buf[2];
ret = mt312_readreg(state, SYM_RATE_H, &sym_rate_h);
if (ret < 0)
return ret;
if (sym_rate_h & 0x80) {
/* symbol rate search was used */
ret = mt312_writereg(state, MON_CTRL, 0x03);
if (ret < 0)
return ret;
ret = mt312_read(state, MONITOR_H, buf, sizeof(buf));
if (ret < 0)
return ret;
monitor = (buf[0] << 8) | buf[1];
dprintk("sr(auto) = %u\n",
mt312_div(monitor * 15625, 4));
} else {
ret = mt312_writereg(state, MON_CTRL, 0x05);
if (ret < 0)
return ret;
ret = mt312_read(state, MONITOR_H, buf, sizeof(buf));
if (ret < 0)
return ret;
dec_ratio = ((buf[0] >> 5) & 0x07) * 32;
ret = mt312_read(state, SYM_RAT_OP_H, buf, sizeof(buf));
if (ret < 0)
return ret;
sym_rat_op = (buf[0] << 8) | buf[1];
dprintk("sym_rat_op=%d dec_ratio=%d\n",
sym_rat_op, dec_ratio);
dprintk("*sr(manual) = %lu\n",
(((state->xtal * 8192) / (sym_rat_op + 8192)) *
2) - dec_ratio);
}
return 0;
}
static int mt312_get_code_rate(struct mt312_state *state, fe_code_rate_t *cr)
{
const fe_code_rate_t fec_tab[8] =
{ FEC_1_2, FEC_2_3, FEC_3_4, FEC_5_6, FEC_6_7, FEC_7_8,
FEC_AUTO, FEC_AUTO };
int ret;
u8 fec_status;
ret = mt312_readreg(state, FEC_STATUS, &fec_status);
if (ret < 0)
return ret;
*cr = fec_tab[(fec_status >> 4) & 0x07];
return 0;
}
static int mt312_initfe(struct dvb_frontend *fe)
{
struct mt312_state *state = fe->demodulator_priv;
int ret;
u8 buf[2];
/* wake up */
ret = mt312_writereg(state, CONFIG,
(state->freq_mult == 6 ? 0x88 : 0x8c));
if (ret < 0)
return ret;
/* wait at least 150 usec */
udelay(150);
/* full reset */
ret = mt312_reset(state, 1);
if (ret < 0)
return ret;
/* Per datasheet, write correct values. 09/28/03 ACCJr.
* If we don't do this, we won't get FE_HAS_VITERBI in the VP310. */
{
u8 buf_def[8] = { 0x14, 0x12, 0x03, 0x02,
0x01, 0x00, 0x00, 0x00 };
ret = mt312_write(state, VIT_SETUP, buf_def, sizeof(buf_def));
if (ret < 0)
return ret;
}
switch (state->id) {
case ID_ZL10313:
/* enable ADC */
ret = mt312_writereg(state, GPP_CTRL, 0x80);
if (ret < 0)
return ret;
/* configure ZL10313 for optimal ADC performance */
buf[0] = 0x80;
buf[1] = 0xB0;
ret = mt312_write(state, HW_CTRL, buf, 2);
if (ret < 0)
return ret;
/* enable MPEG output and ADCs */
ret = mt312_writereg(state, HW_CTRL, 0x00);
if (ret < 0)
return ret;
ret = mt312_writereg(state, MPEG_CTRL, 0x00);
if (ret < 0)
return ret;
break;
}
/* SYS_CLK */
buf[0] = mt312_div(state->xtal * state->freq_mult * 2, 1000000);
/* DISEQC_RATIO */
buf[1] = mt312_div(state->xtal, 22000 * 4);
ret = mt312_write(state, SYS_CLK, buf, sizeof(buf));
if (ret < 0)
return ret;
ret = mt312_writereg(state, SNR_THS_HIGH, 0x32);
if (ret < 0)
return ret;
/* different MOCLK polarity */
switch (state->id) {
case ID_ZL10313:
buf[0] = 0x33;
break;
default:
buf[0] = 0x53;
break;
}
ret = mt312_writereg(state, OP_CTRL, buf[0]);
if (ret < 0)
return ret;
/* TS_SW_LIM */
buf[0] = 0x8c;
buf[1] = 0x98;
ret = mt312_write(state, TS_SW_LIM_L, buf, sizeof(buf));
if (ret < 0)
return ret;
ret = mt312_writereg(state, CS_SW_LIM, 0x69);
if (ret < 0)
return ret;
return 0;
}
static int mt312_send_master_cmd(struct dvb_frontend *fe,
struct dvb_diseqc_master_cmd *c)
{
struct mt312_state *state = fe->demodulator_priv;
int ret;
u8 diseqc_mode;
if ((c->msg_len == 0) || (c->msg_len > sizeof(c->msg)))
return -EINVAL;
ret = mt312_readreg(state, DISEQC_MODE, &diseqc_mode);
if (ret < 0)
return ret;
ret = mt312_write(state, (0x80 | DISEQC_INSTR), c->msg, c->msg_len);
if (ret < 0)
return ret;
ret = mt312_writereg(state, DISEQC_MODE,
(diseqc_mode & 0x40) | ((c->msg_len - 1) << 3)
| 0x04);
if (ret < 0)
return ret;
/* is there a better way to wait for message to be transmitted */
msleep(100);
/* set DISEQC_MODE[2:0] to zero if a return message is expected */
if (c->msg[0] & 0x02) {
ret = mt312_writereg(state, DISEQC_MODE, (diseqc_mode & 0x40));
if (ret < 0)
return ret;
}
return 0;
}
static int mt312_send_burst(struct dvb_frontend *fe, const fe_sec_mini_cmd_t c)
{
struct mt312_state *state = fe->demodulator_priv;
const u8 mini_tab[2] = { 0x02, 0x03 };
int ret;
u8 diseqc_mode;
if (c > SEC_MINI_B)
return -EINVAL;
ret = mt312_readreg(state, DISEQC_MODE, &diseqc_mode);
if (ret < 0)
return ret;
ret = mt312_writereg(state, DISEQC_MODE,
(diseqc_mode & 0x40) | mini_tab[c]);
if (ret < 0)
return ret;
return 0;
}
static int mt312_set_tone(struct dvb_frontend *fe, const fe_sec_tone_mode_t t)
{
struct mt312_state *state = fe->demodulator_priv;
const u8 tone_tab[2] = { 0x01, 0x00 };
int ret;
u8 diseqc_mode;
if (t > SEC_TONE_OFF)
return -EINVAL;
ret = mt312_readreg(state, DISEQC_MODE, &diseqc_mode);
if (ret < 0)
return ret;
ret = mt312_writereg(state, DISEQC_MODE,
(diseqc_mode & 0x40) | tone_tab[t]);
if (ret < 0)
return ret;
return 0;
}
static int mt312_set_voltage(struct dvb_frontend *fe, const fe_sec_voltage_t v)
{
struct mt312_state *state = fe->demodulator_priv;
const u8 volt_tab[3] = { 0x00, 0x40, 0x00 };
u8 val;
if (v > SEC_VOLTAGE_OFF)
return -EINVAL;
val = volt_tab[v];
if (state->config->voltage_inverted)
val ^= 0x40;
return mt312_writereg(state, DISEQC_MODE, val);
}
static int mt312_read_status(struct dvb_frontend *fe, fe_status_t *s)
{
struct mt312_state *state = fe->demodulator_priv;
int ret;
u8 status[3];
*s = 0;
ret = mt312_read(state, QPSK_STAT_H, status, sizeof(status));
if (ret < 0)
return ret;
dprintk("QPSK_STAT_H: 0x%02x, QPSK_STAT_L: 0x%02x,"
" FEC_STATUS: 0x%02x\n", status[0], status[1], status[2]);
if (status[0] & 0xc0)
*s |= FE_HAS_SIGNAL; /* signal noise ratio */
if (status[0] & 0x04)
*s |= FE_HAS_CARRIER; /* qpsk carrier lock */
if (status[2] & 0x02)
*s |= FE_HAS_VITERBI; /* viterbi lock */
if (status[2] & 0x04)
*s |= FE_HAS_SYNC; /* byte align lock */
if (status[0] & 0x01)
*s |= FE_HAS_LOCK; /* qpsk lock */
return 0;
}
static int mt312_read_ber(struct dvb_frontend *fe, u32 *ber)
{
struct mt312_state *state = fe->demodulator_priv;
int ret;
u8 buf[3];
ret = mt312_read(state, RS_BERCNT_H, buf, 3);
if (ret < 0)
return ret;
*ber = ((buf[0] << 16) | (buf[1] << 8) | buf[2]) * 64;
return 0;
}
static int mt312_read_signal_strength(struct dvb_frontend *fe,
u16 *signal_strength)
{
struct mt312_state *state = fe->demodulator_priv;
int ret;
u8 buf[3];
u16 agc;
s16 err_db;
ret = mt312_read(state, AGC_H, buf, sizeof(buf));
if (ret < 0)
return ret;
agc = (buf[0] << 6) | (buf[1] >> 2);
err_db = (s16) (((buf[1] & 0x03) << 14) | buf[2] << 6) >> 6;
*signal_strength = agc;
dprintk("agc=%08x err_db=%hd\n", agc, err_db);
return 0;
}
static int mt312_read_snr(struct dvb_frontend *fe, u16 *snr)
{
struct mt312_state *state = fe->demodulator_priv;
int ret;
u8 buf[2];
ret = mt312_read(state, M_SNR_H, buf, sizeof(buf));
if (ret < 0)
return ret;
*snr = 0xFFFF - ((((buf[0] & 0x7f) << 8) | buf[1]) << 1);
return 0;
}
static int mt312_read_ucblocks(struct dvb_frontend *fe, u32 *ubc)
{
struct mt312_state *state = fe->demodulator_priv;
int ret;
u8 buf[2];
ret = mt312_read(state, RS_UBC_H, buf, sizeof(buf));
if (ret < 0)
return ret;
*ubc = (buf[0] << 8) | buf[1];
return 0;
}
static int mt312_set_frontend(struct dvb_frontend *fe)
{
struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct mt312_state *state = fe->demodulator_priv;
int ret;
u8 buf[5], config_val;
u16 sr;
const u8 fec_tab[10] =
{ 0x00, 0x01, 0x02, 0x04, 0x3f, 0x08, 0x10, 0x20, 0x3f, 0x3f };
const u8 inv_tab[3] = { 0x00, 0x40, 0x80 };
dprintk("%s: Freq %d\n", __func__, p->frequency);
if ((p->frequency < fe->ops.info.frequency_min)
|| (p->frequency > fe->ops.info.frequency_max))
return -EINVAL;
if (((int)p->inversion < INVERSION_OFF)
|| (p->inversion > INVERSION_ON))
return -EINVAL;
if ((p->symbol_rate < fe->ops.info.symbol_rate_min)
|| (p->symbol_rate > fe->ops.info.symbol_rate_max))
return -EINVAL;
if (((int)p->fec_inner < FEC_NONE)
|| (p->fec_inner > FEC_AUTO))
return -EINVAL;
if ((p->fec_inner == FEC_4_5)
|| (p->fec_inner == FEC_8_9))
return -EINVAL;
switch (state->id) {
case ID_VP310:
/* For now we will do this only for the VP310.
* It should be better for the mt312 as well,
* but tuning will be slower. ACCJr 09/29/03
*/
ret = mt312_readreg(state, CONFIG, &config_val);
if (ret < 0)
return ret;
if (p->symbol_rate >= 30000000) {
/* Note that 30MS/s should use 90MHz */
if (state->freq_mult == 6) {
/* We are running 60MHz */
state->freq_mult = 9;
ret = mt312_initfe(fe);
if (ret < 0)
return ret;
}
} else {
if (state->freq_mult == 9) {
/* We are running 90MHz */
state->freq_mult = 6;
ret = mt312_initfe(fe);
if (ret < 0)
return ret;
}
}
break;
case ID_MT312:
case ID_ZL10313:
break;
default:
return -EINVAL;
}
if (fe->ops.tuner_ops.set_params) {
fe->ops.tuner_ops.set_params(fe);
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 0);
}
/* sr = (u16)(sr * 256.0 / 1000000.0) */
sr = mt312_div(p->symbol_rate * 4, 15625);
/* SYM_RATE */
buf[0] = (sr >> 8) & 0x3f;
buf[1] = (sr >> 0) & 0xff;
/* VIT_MODE */
buf[2] = inv_tab[p->inversion] | fec_tab[p->fec_inner];
/* QPSK_CTRL */
buf[3] = 0x40; /* swap I and Q before QPSK demodulation */
if (p->symbol_rate < 10000000)
buf[3] |= 0x04; /* use afc mode */
/* GO */
buf[4] = 0x01;
ret = mt312_write(state, SYM_RATE_H, buf, sizeof(buf));
if (ret < 0)
return ret;
mt312_reset(state, 0);
return 0;
}
static int mt312_get_frontend(struct dvb_frontend *fe)
{
struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct mt312_state *state = fe->demodulator_priv;
int ret;
ret = mt312_get_inversion(state, &p->inversion);
if (ret < 0)
return ret;
ret = mt312_get_symbol_rate(state, &p->symbol_rate);
if (ret < 0)
return ret;
ret = mt312_get_code_rate(state, &p->fec_inner);
if (ret < 0)
return ret;
return 0;
}
static int mt312_i2c_gate_ctrl(struct dvb_frontend *fe, int enable)
{
struct mt312_state *state = fe->demodulator_priv;
u8 val = 0x00;
int ret;
switch (state->id) {
case ID_ZL10313:
ret = mt312_readreg(state, GPP_CTRL, &val);
if (ret < 0)
goto error;
/* preserve this bit to not accidentally shutdown ADC */
val &= 0x80;
break;
}
if (enable)
val |= 0x40;
else
val &= ~0x40;
ret = mt312_writereg(state, GPP_CTRL, val);
error:
return ret;
}
static int mt312_sleep(struct dvb_frontend *fe)
{
struct mt312_state *state = fe->demodulator_priv;
int ret;
u8 config;
/* reset all registers to defaults */
ret = mt312_reset(state, 1);
if (ret < 0)
return ret;
if (state->id == ID_ZL10313) {
/* reset ADC */
ret = mt312_writereg(state, GPP_CTRL, 0x00);
if (ret < 0)
return ret;
/* full shutdown of ADCs, mpeg bus tristated */
ret = mt312_writereg(state, HW_CTRL, 0x0d);
if (ret < 0)
return ret;
}
ret = mt312_readreg(state, CONFIG, &config);
if (ret < 0)
return ret;
/* enter standby */
ret = mt312_writereg(state, CONFIG, config & 0x7f);
if (ret < 0)
return ret;
return 0;
}
static int mt312_get_tune_settings(struct dvb_frontend *fe,
struct dvb_frontend_tune_settings *fesettings)
{
fesettings->min_delay_ms = 50;
fesettings->step_size = 0;
fesettings->max_drift = 0;
return 0;
}
static void mt312_release(struct dvb_frontend *fe)
{
struct mt312_state *state = fe->demodulator_priv;
kfree(state);
}
#define MT312_SYS_CLK 90000000UL /* 90 MHz */
static struct dvb_frontend_ops mt312_ops = {
.delsys = { SYS_DVBS },
.info = {
.name = "Zarlink ???? DVB-S",
.frequency_min = 950000,
.frequency_max = 2150000,
/* FIXME: adjust freq to real used xtal */
.frequency_stepsize = (MT312_PLL_CLK / 1000) / 128,
.symbol_rate_min = MT312_SYS_CLK / 128, /* FIXME as above */
.symbol_rate_max = MT312_SYS_CLK / 2,
.caps =
FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 |
FE_CAN_FEC_3_4 | FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 |
FE_CAN_FEC_AUTO | FE_CAN_QPSK | FE_CAN_MUTE_TS |
FE_CAN_RECOVER
},
.release = mt312_release,
.init = mt312_initfe,
.sleep = mt312_sleep,
.i2c_gate_ctrl = mt312_i2c_gate_ctrl,
.set_frontend = mt312_set_frontend,
.get_frontend = mt312_get_frontend,
.get_tune_settings = mt312_get_tune_settings,
.read_status = mt312_read_status,
.read_ber = mt312_read_ber,
.read_signal_strength = mt312_read_signal_strength,
.read_snr = mt312_read_snr,
.read_ucblocks = mt312_read_ucblocks,
.diseqc_send_master_cmd = mt312_send_master_cmd,
.diseqc_send_burst = mt312_send_burst,
.set_tone = mt312_set_tone,
.set_voltage = mt312_set_voltage,
};
struct dvb_frontend *mt312_attach(const struct mt312_config *config,
struct i2c_adapter *i2c)
{
struct mt312_state *state = NULL;
/* allocate memory for the internal state */
state = kzalloc(sizeof(struct mt312_state), GFP_KERNEL);
if (state == NULL)
goto error;
/* setup the state */
state->config = config;
state->i2c = i2c;
/* check if the demod is there */
if (mt312_readreg(state, ID, &state->id) < 0)
goto error;
/* create dvb_frontend */
memcpy(&state->frontend.ops, &mt312_ops,
sizeof(struct dvb_frontend_ops));
state->frontend.demodulator_priv = state;
switch (state->id) {
case ID_VP310:
strcpy(state->frontend.ops.info.name, "Zarlink VP310 DVB-S");
state->xtal = MT312_PLL_CLK;
state->freq_mult = 9;
break;
case ID_MT312:
strcpy(state->frontend.ops.info.name, "Zarlink MT312 DVB-S");
state->xtal = MT312_PLL_CLK;
state->freq_mult = 6;
break;
case ID_ZL10313:
strcpy(state->frontend.ops.info.name, "Zarlink ZL10313 DVB-S");
state->xtal = MT312_PLL_CLK_10_111;
state->freq_mult = 9;
break;
default:
printk(KERN_WARNING "Only Zarlink VP310/MT312/ZL10313"
" are supported chips.\n");
goto error;
}
return &state->frontend;
error:
kfree(state);
return NULL;
}
EXPORT_SYMBOL(mt312_attach);
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "Turn on/off frontend debugging (default:off).");
MODULE_DESCRIPTION("Zarlink VP310/MT312/ZL10313 DVB-S Demodulator driver");
MODULE_AUTHOR("Andreas Oberritter <obi@linuxtv.org>");
MODULE_AUTHOR("Matthias Schwarzott <zzam@gentoo.org>");
MODULE_LICENSE("GPL");
| gpl-2.0 |
mtmichaelson/LGE_Connect_Kernel | arch/arm/plat-iop/pci.c | 1295 | 9890 | /*
* arch/arm/plat-iop/pci.c
*
* PCI support for the Intel IOP32X and IOP33X processors
*
* Author: Rory Bolt <rorybolt@pacbell.net>
* Copyright (C) 2002 Rory Bolt
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/io.h>
#include <asm/irq.h>
#include <asm/signal.h>
#include <asm/system.h>
#include <mach/hardware.h>
#include <asm/mach/pci.h>
#include <asm/hardware/iop3xx.h>
// #define DEBUG
#ifdef DEBUG
#define DBG(x...) printk(x)
#else
#define DBG(x...) do { } while (0)
#endif
/*
* This routine builds either a type0 or type1 configuration command. If the
* bus is on the 803xx then a type0 made, else a type1 is created.
*/
static u32 iop3xx_cfg_address(struct pci_bus *bus, int devfn, int where)
{
struct pci_sys_data *sys = bus->sysdata;
u32 addr;
if (sys->busnr == bus->number)
addr = 1 << (PCI_SLOT(devfn) + 16) | (PCI_SLOT(devfn) << 11);
else
addr = bus->number << 16 | PCI_SLOT(devfn) << 11 | 1;
addr |= PCI_FUNC(devfn) << 8 | (where & ~3);
return addr;
}
/*
* This routine checks the status of the last configuration cycle. If an error
* was detected it returns a 1, else it returns a 0. The errors being checked
* are parity, master abort, target abort (master and target). These types of
* errors occur during a config cycle where there is no device, like during
* the discovery stage.
*/
static int iop3xx_pci_status(void)
{
unsigned int status;
int ret = 0;
/*
* Check the status registers.
*/
status = *IOP3XX_ATUSR;
if (status & 0xf900) {
DBG("\t\t\tPCI: P0 - status = 0x%08x\n", status);
*IOP3XX_ATUSR = status & 0xf900;
ret = 1;
}
status = *IOP3XX_ATUISR;
if (status & 0x679f) {
DBG("\t\t\tPCI: P1 - status = 0x%08x\n", status);
*IOP3XX_ATUISR = status & 0x679f;
ret = 1;
}
return ret;
}
/*
* Simply write the address register and read the configuration
* data. Note that the 4 nops ensure that we are able to handle
* a delayed abort (in theory.)
*/
static u32 iop3xx_read(unsigned long addr)
{
u32 val;
__asm__ __volatile__(
"str %1, [%2]\n\t"
"ldr %0, [%3]\n\t"
"nop\n\t"
"nop\n\t"
"nop\n\t"
"nop\n\t"
: "=r" (val)
: "r" (addr), "r" (IOP3XX_OCCAR), "r" (IOP3XX_OCCDR));
return val;
}
/*
* The read routines must check the error status of the last configuration
* cycle. If there was an error, the routine returns all hex f's.
*/
static int
iop3xx_read_config(struct pci_bus *bus, unsigned int devfn, int where,
int size, u32 *value)
{
unsigned long addr = iop3xx_cfg_address(bus, devfn, where);
u32 val = iop3xx_read(addr) >> ((where & 3) * 8);
if (iop3xx_pci_status())
val = 0xffffffff;
*value = val;
return PCIBIOS_SUCCESSFUL;
}
static int
iop3xx_write_config(struct pci_bus *bus, unsigned int devfn, int where,
int size, u32 value)
{
unsigned long addr = iop3xx_cfg_address(bus, devfn, where);
u32 val;
if (size != 4) {
val = iop3xx_read(addr);
if (iop3xx_pci_status())
return PCIBIOS_SUCCESSFUL;
where = (where & 3) * 8;
if (size == 1)
val &= ~(0xff << where);
else
val &= ~(0xffff << where);
*IOP3XX_OCCDR = val | value << where;
} else {
asm volatile(
"str %1, [%2]\n\t"
"str %0, [%3]\n\t"
"nop\n\t"
"nop\n\t"
"nop\n\t"
"nop\n\t"
:
: "r" (value), "r" (addr),
"r" (IOP3XX_OCCAR), "r" (IOP3XX_OCCDR));
}
return PCIBIOS_SUCCESSFUL;
}
static struct pci_ops iop3xx_ops = {
.read = iop3xx_read_config,
.write = iop3xx_write_config,
};
/*
* When a PCI device does not exist during config cycles, the 80200 gets a
* bus error instead of returning 0xffffffff. This handler simply returns.
*/
static int
iop3xx_pci_abort(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
{
DBG("PCI abort: address = 0x%08lx fsr = 0x%03x PC = 0x%08lx LR = 0x%08lx\n",
addr, fsr, regs->ARM_pc, regs->ARM_lr);
/*
* If it was an imprecise abort, then we need to correct the
* return address to be _after_ the instruction.
*/
if (fsr & (1 << 10))
regs->ARM_pc += 4;
return 0;
}
int iop3xx_pci_setup(int nr, struct pci_sys_data *sys)
{
struct resource *res;
if (nr != 0)
return 0;
res = kzalloc(2 * sizeof(struct resource), GFP_KERNEL);
if (!res)
panic("PCI: unable to alloc resources");
res[0].start = IOP3XX_PCI_LOWER_IO_PA;
res[0].end = IOP3XX_PCI_LOWER_IO_PA + IOP3XX_PCI_IO_WINDOW_SIZE - 1;
res[0].name = "IOP3XX PCI I/O Space";
res[0].flags = IORESOURCE_IO;
request_resource(&ioport_resource, &res[0]);
res[1].start = IOP3XX_PCI_LOWER_MEM_PA;
res[1].end = IOP3XX_PCI_LOWER_MEM_PA + IOP3XX_PCI_MEM_WINDOW_SIZE - 1;
res[1].name = "IOP3XX PCI Memory Space";
res[1].flags = IORESOURCE_MEM;
request_resource(&iomem_resource, &res[1]);
/*
* Use whatever translation is already setup.
*/
sys->mem_offset = IOP3XX_PCI_LOWER_MEM_PA - *IOP3XX_OMWTVR0;
sys->io_offset = IOP3XX_PCI_LOWER_IO_PA - *IOP3XX_OIOWTVR;
sys->resource[0] = &res[0];
sys->resource[1] = &res[1];
sys->resource[2] = NULL;
return 1;
}
struct pci_bus *iop3xx_pci_scan_bus(int nr, struct pci_sys_data *sys)
{
return pci_scan_bus(sys->busnr, &iop3xx_ops, sys);
}
void __init iop3xx_atu_setup(void)
{
/* BAR 0 ( Disabled ) */
*IOP3XX_IAUBAR0 = 0x0;
*IOP3XX_IABAR0 = 0x0;
*IOP3XX_IATVR0 = 0x0;
*IOP3XX_IALR0 = 0x0;
/* BAR 1 ( Disabled ) */
*IOP3XX_IAUBAR1 = 0x0;
*IOP3XX_IABAR1 = 0x0;
*IOP3XX_IALR1 = 0x0;
/* BAR 2 (1:1 mapping with Physical RAM) */
/* Set limit and enable */
*IOP3XX_IALR2 = ~((u32)IOP3XX_MAX_RAM_SIZE - 1) & ~0x1;
*IOP3XX_IAUBAR2 = 0x0;
/* Align the inbound bar with the base of memory */
*IOP3XX_IABAR2 = PHYS_OFFSET |
PCI_BASE_ADDRESS_MEM_TYPE_64 |
PCI_BASE_ADDRESS_MEM_PREFETCH;
*IOP3XX_IATVR2 = PHYS_OFFSET;
/* Outbound window 0 */
*IOP3XX_OMWTVR0 = IOP3XX_PCI_LOWER_MEM_BA;
*IOP3XX_OUMWTVR0 = 0;
/* Outbound window 1 */
*IOP3XX_OMWTVR1 = IOP3XX_PCI_LOWER_MEM_BA +
IOP3XX_PCI_MEM_WINDOW_SIZE / 2;
*IOP3XX_OUMWTVR1 = 0;
/* BAR 3 ( Disabled ) */
*IOP3XX_IAUBAR3 = 0x0;
*IOP3XX_IABAR3 = 0x0;
*IOP3XX_IATVR3 = 0x0;
*IOP3XX_IALR3 = 0x0;
/* Setup the I/O Bar
*/
*IOP3XX_OIOWTVR = IOP3XX_PCI_LOWER_IO_BA;
/* Enable inbound and outbound cycles
*/
*IOP3XX_ATUCMD |= PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER |
PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
*IOP3XX_ATUCR |= IOP3XX_ATUCR_OUT_EN;
}
void __init iop3xx_atu_disable(void)
{
*IOP3XX_ATUCMD = 0;
*IOP3XX_ATUCR = 0;
/* wait for cycles to quiesce */
while (*IOP3XX_PCSR & (IOP3XX_PCSR_OUT_Q_BUSY |
IOP3XX_PCSR_IN_Q_BUSY))
cpu_relax();
/* BAR 0 ( Disabled ) */
*IOP3XX_IAUBAR0 = 0x0;
*IOP3XX_IABAR0 = 0x0;
*IOP3XX_IATVR0 = 0x0;
*IOP3XX_IALR0 = 0x0;
/* BAR 1 ( Disabled ) */
*IOP3XX_IAUBAR1 = 0x0;
*IOP3XX_IABAR1 = 0x0;
*IOP3XX_IALR1 = 0x0;
/* BAR 2 ( Disabled ) */
*IOP3XX_IAUBAR2 = 0x0;
*IOP3XX_IABAR2 = 0x0;
*IOP3XX_IATVR2 = 0x0;
*IOP3XX_IALR2 = 0x0;
/* BAR 3 ( Disabled ) */
*IOP3XX_IAUBAR3 = 0x0;
*IOP3XX_IABAR3 = 0x0;
*IOP3XX_IATVR3 = 0x0;
*IOP3XX_IALR3 = 0x0;
/* Clear the outbound windows */
*IOP3XX_OIOWTVR = 0;
/* Outbound window 0 */
*IOP3XX_OMWTVR0 = 0;
*IOP3XX_OUMWTVR0 = 0;
/* Outbound window 1 */
*IOP3XX_OMWTVR1 = 0;
*IOP3XX_OUMWTVR1 = 0;
}
/* Flag to determine whether the ATU is initialized and the PCI bus scanned */
int init_atu;
int iop3xx_get_init_atu(void) {
/* check if default has been overridden */
if (init_atu != IOP3XX_INIT_ATU_DEFAULT)
return init_atu;
else
return IOP3XX_INIT_ATU_DISABLE;
}
static void __init iop3xx_atu_debug(void)
{
DBG("PCI: Intel IOP3xx PCI init.\n");
DBG("PCI: Outbound memory window 0: PCI 0x%08x%08x\n",
*IOP3XX_OUMWTVR0, *IOP3XX_OMWTVR0);
DBG("PCI: Outbound memory window 1: PCI 0x%08x%08x\n",
*IOP3XX_OUMWTVR1, *IOP3XX_OMWTVR1);
DBG("PCI: Outbound IO window: PCI 0x%08x\n",
*IOP3XX_OIOWTVR);
DBG("PCI: Inbound memory window 0: PCI 0x%08x%08x 0x%08x -> 0x%08x\n",
*IOP3XX_IAUBAR0, *IOP3XX_IABAR0, *IOP3XX_IALR0, *IOP3XX_IATVR0);
DBG("PCI: Inbound memory window 1: PCI 0x%08x%08x 0x%08x\n",
*IOP3XX_IAUBAR1, *IOP3XX_IABAR1, *IOP3XX_IALR1);
DBG("PCI: Inbound memory window 2: PCI 0x%08x%08x 0x%08x -> 0x%08x\n",
*IOP3XX_IAUBAR2, *IOP3XX_IABAR2, *IOP3XX_IALR2, *IOP3XX_IATVR2);
DBG("PCI: Inbound memory window 3: PCI 0x%08x%08x 0x%08x -> 0x%08x\n",
*IOP3XX_IAUBAR3, *IOP3XX_IABAR3, *IOP3XX_IALR3, *IOP3XX_IATVR3);
DBG("PCI: Expansion ROM window: PCI 0x%08x%08x 0x%08x -> 0x%08x\n",
0, *IOP3XX_ERBAR, *IOP3XX_ERLR, *IOP3XX_ERTVR);
DBG("ATU: IOP3XX_ATUCMD=0x%04x\n", *IOP3XX_ATUCMD);
DBG("ATU: IOP3XX_ATUCR=0x%08x\n", *IOP3XX_ATUCR);
hook_fault_code(16+6, iop3xx_pci_abort, SIGBUS, "imprecise external abort");
}
/* for platforms that might be host-bus-adapters */
void __init iop3xx_pci_preinit_cond(void)
{
if (iop3xx_get_init_atu() == IOP3XX_INIT_ATU_ENABLE) {
iop3xx_atu_disable();
iop3xx_atu_setup();
iop3xx_atu_debug();
}
}
void __init iop3xx_pci_preinit(void)
{
iop3xx_atu_disable();
iop3xx_atu_setup();
iop3xx_atu_debug();
}
/* allow init_atu to be user overridden */
static int __init iop3xx_init_atu_setup(char *str)
{
init_atu = IOP3XX_INIT_ATU_DEFAULT;
if (str) {
while (*str != '\0') {
switch (*str) {
case 'y':
case 'Y':
init_atu = IOP3XX_INIT_ATU_ENABLE;
break;
case 'n':
case 'N':
init_atu = IOP3XX_INIT_ATU_DISABLE;
break;
case ',':
case '=':
break;
default:
printk(KERN_DEBUG "\"%s\" malformed at "
"character: \'%c\'",
__func__,
*str);
*(str + 1) = '\0';
}
str++;
}
}
return 1;
}
__setup("iop3xx_init_atu", iop3xx_init_atu_setup);
| gpl-2.0 |
StefanescuCristian/ubuntu-bfsq | sound/oss/uart401.c | 1295 | 10610 | /*
* sound/oss/uart401.c
*
* MPU-401 UART driver (formerly uart401_midi.c)
*
*
* Copyright (C) by Hannu Savolainen 1993-1997
*
* OSS/Free for Linux is distributed under the GNU GENERAL PUBLIC LICENSE (GPL)
* Version 2 (June 1991). See the "COPYING" file distributed with this software
* for more info.
*
* Changes:
* Alan Cox Reformatted, removed sound_mem usage, use normal Linux
* interrupt allocation. Protect against bogus unload
* Fixed to allow IRQ > 15
* Christoph Hellwig Adapted to module_init/module_exit
* Arnaldo C. de Melo got rid of check_region
*
* Status:
* Untested
*/
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include "sound_config.h"
#include "mpu401.h"
struct uart401_devc
{
int base;
int irq;
int *osp;
void (*midi_input_intr) (int dev, unsigned char data);
int opened, disabled;
volatile unsigned char input_byte;
int my_dev;
int share_irq;
spinlock_t lock;
};
#define DATAPORT (devc->base)
#define COMDPORT (devc->base+1)
#define STATPORT (devc->base+1)
static int uart401_status(struct uart401_devc *devc)
{
return inb(STATPORT);
}
#define input_avail(devc) (!(uart401_status(devc)&INPUT_AVAIL))
#define output_ready(devc) (!(uart401_status(devc)&OUTPUT_READY))
static void uart401_cmd(struct uart401_devc *devc, unsigned char cmd)
{
outb((cmd), COMDPORT);
}
static int uart401_read(struct uart401_devc *devc)
{
return inb(DATAPORT);
}
static void uart401_write(struct uart401_devc *devc, unsigned char byte)
{
outb((byte), DATAPORT);
}
#define OUTPUT_READY 0x40
#define INPUT_AVAIL 0x80
#define MPU_ACK 0xFE
#define MPU_RESET 0xFF
#define UART_MODE_ON 0x3F
static int reset_uart401(struct uart401_devc *devc);
static void enter_uart_mode(struct uart401_devc *devc);
static void uart401_input_loop(struct uart401_devc *devc)
{
int work_limit=30000;
while (input_avail(devc) && --work_limit)
{
unsigned char c = uart401_read(devc);
if (c == MPU_ACK)
devc->input_byte = c;
else if (devc->opened & OPEN_READ && devc->midi_input_intr)
devc->midi_input_intr(devc->my_dev, c);
}
if(work_limit==0)
printk(KERN_WARNING "Too much work in interrupt on uart401 (0x%X). UART jabbering ??\n", devc->base);
}
irqreturn_t uart401intr(int irq, void *dev_id)
{
struct uart401_devc *devc = dev_id;
if (devc == NULL)
{
printk(KERN_ERR "uart401: bad devc\n");
return IRQ_NONE;
}
if (input_avail(devc))
uart401_input_loop(devc);
return IRQ_HANDLED;
}
static int
uart401_open(int dev, int mode,
void (*input) (int dev, unsigned char data),
void (*output) (int dev)
)
{
struct uart401_devc *devc = (struct uart401_devc *)
midi_devs[dev]->devc;
if (devc->opened)
return -EBUSY;
/* Flush the UART */
while (input_avail(devc))
uart401_read(devc);
devc->midi_input_intr = input;
devc->opened = mode;
enter_uart_mode(devc);
devc->disabled = 0;
return 0;
}
static void uart401_close(int dev)
{
struct uart401_devc *devc = (struct uart401_devc *)
midi_devs[dev]->devc;
reset_uart401(devc);
devc->opened = 0;
}
static int uart401_out(int dev, unsigned char midi_byte)
{
int timeout;
unsigned long flags;
struct uart401_devc *devc = (struct uart401_devc *)
midi_devs[dev]->devc;
if (devc->disabled)
return 1;
/*
* Test for input since pending input seems to block the output.
*/
spin_lock_irqsave(&devc->lock,flags);
if (input_avail(devc))
uart401_input_loop(devc);
spin_unlock_irqrestore(&devc->lock,flags);
/*
* Sometimes it takes about 13000 loops before the output becomes ready
* (After reset). Normally it takes just about 10 loops.
*/
for (timeout = 30000; timeout > 0 && !output_ready(devc); timeout--);
if (!output_ready(devc))
{
printk(KERN_WARNING "uart401: Timeout - Device not responding\n");
devc->disabled = 1;
reset_uart401(devc);
enter_uart_mode(devc);
return 1;
}
uart401_write(devc, midi_byte);
return 1;
}
static inline int uart401_start_read(int dev)
{
return 0;
}
static inline int uart401_end_read(int dev)
{
return 0;
}
static inline void uart401_kick(int dev)
{
}
static inline int uart401_buffer_status(int dev)
{
return 0;
}
#define MIDI_SYNTH_NAME "MPU-401 UART"
#define MIDI_SYNTH_CAPS SYNTH_CAP_INPUT
#include "midi_synth.h"
static const struct midi_operations uart401_operations =
{
.owner = THIS_MODULE,
.info = {"MPU-401 (UART) MIDI", 0, 0, SNDCARD_MPU401},
.converter = &std_midi_synth,
.in_info = {0},
.open = uart401_open,
.close = uart401_close,
.outputc = uart401_out,
.start_read = uart401_start_read,
.end_read = uart401_end_read,
.kick = uart401_kick,
.buffer_status = uart401_buffer_status,
};
static void enter_uart_mode(struct uart401_devc *devc)
{
int ok, timeout;
unsigned long flags;
spin_lock_irqsave(&devc->lock,flags);
for (timeout = 30000; timeout > 0 && !output_ready(devc); timeout--);
devc->input_byte = 0;
uart401_cmd(devc, UART_MODE_ON);
ok = 0;
for (timeout = 50000; timeout > 0 && !ok; timeout--)
if (devc->input_byte == MPU_ACK)
ok = 1;
else if (input_avail(devc))
if (uart401_read(devc) == MPU_ACK)
ok = 1;
spin_unlock_irqrestore(&devc->lock,flags);
}
static int reset_uart401(struct uart401_devc *devc)
{
int ok, timeout, n;
/*
* Send the RESET command. Try again if no success at the first time.
*/
ok = 0;
for (n = 0; n < 2 && !ok; n++)
{
for (timeout = 30000; timeout > 0 && !output_ready(devc); timeout--);
devc->input_byte = 0;
uart401_cmd(devc, MPU_RESET);
/*
* Wait at least 25 msec. This method is not accurate so let's make the
* loop bit longer. Cannot sleep since this is called during boot.
*/
for (timeout = 50000; timeout > 0 && !ok; timeout--)
{
if (devc->input_byte == MPU_ACK) /* Interrupt */
ok = 1;
else if (input_avail(devc))
{
if (uart401_read(devc) == MPU_ACK)
ok = 1;
}
}
}
/* Flush input before enabling interrupts */
if (ok)
uart401_input_loop(devc);
else
DDB(printk("Reset UART401 failed - No hardware detected.\n"));
return ok;
}
int probe_uart401(struct address_info *hw_config, struct module *owner)
{
struct uart401_devc *devc;
char *name = "MPU-401 (UART) MIDI";
int ok = 0;
unsigned long flags;
DDB(printk("Entered probe_uart401()\n"));
/* Default to "not found" */
hw_config->slots[4] = -1;
if (!request_region(hw_config->io_base, 4, "MPU-401 UART")) {
printk(KERN_INFO "uart401: could not request_region(%d, 4)\n", hw_config->io_base);
return 0;
}
devc = kmalloc(sizeof(struct uart401_devc), GFP_KERNEL);
if (!devc) {
printk(KERN_WARNING "uart401: Can't allocate memory\n");
goto cleanup_region;
}
devc->base = hw_config->io_base;
devc->irq = hw_config->irq;
devc->osp = hw_config->osp;
devc->midi_input_intr = NULL;
devc->opened = 0;
devc->input_byte = 0;
devc->my_dev = 0;
devc->share_irq = 0;
spin_lock_init(&devc->lock);
spin_lock_irqsave(&devc->lock,flags);
ok = reset_uart401(devc);
spin_unlock_irqrestore(&devc->lock,flags);
if (!ok)
goto cleanup_devc;
if (hw_config->name)
name = hw_config->name;
if (devc->irq < 0) {
devc->share_irq = 1;
devc->irq *= -1;
} else
devc->share_irq = 0;
if (!devc->share_irq)
if (request_irq(devc->irq, uart401intr, 0, "MPU-401 UART", devc) < 0) {
printk(KERN_WARNING "uart401: Failed to allocate IRQ%d\n", devc->irq);
devc->share_irq = 1;
}
devc->my_dev = sound_alloc_mididev();
enter_uart_mode(devc);
if (devc->my_dev == -1) {
printk(KERN_INFO "uart401: Too many midi devices detected\n");
goto cleanup_irq;
}
conf_printf(name, hw_config);
midi_devs[devc->my_dev] = kmemdup(&uart401_operations,
sizeof(struct midi_operations),
GFP_KERNEL);
if (!midi_devs[devc->my_dev]) {
printk(KERN_ERR "uart401: Failed to allocate memory\n");
goto cleanup_unload_mididev;
}
if (owner)
midi_devs[devc->my_dev]->owner = owner;
midi_devs[devc->my_dev]->devc = devc;
midi_devs[devc->my_dev]->converter = kmemdup(&std_midi_synth,
sizeof(struct synth_operations),
GFP_KERNEL);
if (!midi_devs[devc->my_dev]->converter) {
printk(KERN_WARNING "uart401: Failed to allocate memory\n");
goto cleanup_midi_devs;
}
strcpy(midi_devs[devc->my_dev]->info.name, name);
midi_devs[devc->my_dev]->converter->id = "UART401";
midi_devs[devc->my_dev]->converter->midi_dev = devc->my_dev;
if (owner)
midi_devs[devc->my_dev]->converter->owner = owner;
hw_config->slots[4] = devc->my_dev;
sequencer_init();
devc->opened = 0;
return 1;
cleanup_midi_devs:
kfree(midi_devs[devc->my_dev]);
cleanup_unload_mididev:
sound_unload_mididev(devc->my_dev);
cleanup_irq:
if (!devc->share_irq)
free_irq(devc->irq, devc);
cleanup_devc:
kfree(devc);
cleanup_region:
release_region(hw_config->io_base, 4);
return 0;
}
void unload_uart401(struct address_info *hw_config)
{
struct uart401_devc *devc;
int n=hw_config->slots[4];
/* Not set up */
if(n==-1 || midi_devs[n]==NULL)
return;
/* Not allocated (erm ??) */
devc = midi_devs[hw_config->slots[4]]->devc;
if (devc == NULL)
return;
reset_uart401(devc);
release_region(hw_config->io_base, 4);
if (!devc->share_irq)
free_irq(devc->irq, devc);
kfree(midi_devs[devc->my_dev]->converter);
kfree(midi_devs[devc->my_dev]);
kfree(devc);
/* This kills midi_devs[x] */
sound_unload_mididev(hw_config->slots[4]);
}
EXPORT_SYMBOL(probe_uart401);
EXPORT_SYMBOL(unload_uart401);
EXPORT_SYMBOL(uart401intr);
static struct address_info cfg_mpu;
static int io = -1;
static int irq = -1;
module_param(io, int, 0444);
module_param(irq, int, 0444);
static int __init init_uart401(void)
{
cfg_mpu.irq = irq;
cfg_mpu.io_base = io;
/* Can be loaded either for module use or to provide functions
to others */
if (cfg_mpu.io_base != -1 && cfg_mpu.irq != -1) {
printk(KERN_INFO "MPU-401 UART driver Copyright (C) Hannu Savolainen 1993-1997");
if (!probe_uart401(&cfg_mpu, THIS_MODULE))
return -ENODEV;
}
return 0;
}
static void __exit cleanup_uart401(void)
{
if (cfg_mpu.io_base != -1 && cfg_mpu.irq != -1)
unload_uart401(&cfg_mpu);
}
module_init(init_uart401);
module_exit(cleanup_uart401);
#ifndef MODULE
static int __init setup_uart401(char *str)
{
/* io, irq */
int ints[3];
str = get_options(str, ARRAY_SIZE(ints), ints);
io = ints[1];
irq = ints[2];
return 1;
}
__setup("uart401=", setup_uart401);
#endif
MODULE_LICENSE("GPL");
| gpl-2.0 |
markyzq/kernel-drm-rockchip | sound/pci/au88x0/au88x0_eq.c | 1295 | 23037 | /***************************************************************************
* au88x0_eq.c
* Aureal Vortex Hardware EQ control/access.
*
* Sun Jun 8 18:19:19 2003
* 2003 Manuel Jander (mjander@users.sourceforge.net)
*
* 02 July 2003: First time something works :)
* November 2003: A3D Bypass code completed but untested.
*
* TODO:
* - Debug (testing)
* - Test peak visualization support.
*
****************************************************************************/
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Library General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
/*
The Aureal Hardware EQ is found on AU8810 and AU8830 chips only.
it has 4 inputs (2 for general mix, 2 for A3D) and 2 outputs (supposed
to be routed to the codec).
*/
#include "au88x0.h"
#include "au88x0_eq.h"
#include "au88x0_eqdata.c"
#define VORTEX_EQ_BASE 0x2b000
#define VORTEX_EQ_DEST (VORTEX_EQ_BASE + 0x410)
#define VORTEX_EQ_SOURCE (VORTEX_EQ_BASE + 0x430)
#define VORTEX_EQ_CTRL (VORTEX_EQ_BASE + 0x440)
#define VORTEX_BAND_COEFF_SIZE 0x30
/* CEqHw.s */
static void vortex_EqHw_SetTimeConsts(vortex_t * vortex, u16 gain, u16 level)
{
hwwrite(vortex->mmio, 0x2b3c4, gain);
hwwrite(vortex->mmio, 0x2b3c8, level);
}
static inline u16 sign_invert(u16 a)
{
/* -(-32768) -> -32768 so we do -(-32768) -> 32767 to make the result positive */
if (a == (u16)-32768)
return 32767;
else
return -a;
}
static void vortex_EqHw_SetLeftCoefs(vortex_t * vortex, u16 coefs[])
{
eqhw_t *eqhw = &(vortex->eq.this04);
int i = 0, n /*esp2c */;
for (n = 0; n < eqhw->this04; n++) {
hwwrite(vortex->mmio, 0x2b000 + n * 0x30, coefs[i + 0]);
hwwrite(vortex->mmio, 0x2b004 + n * 0x30, coefs[i + 1]);
if (eqhw->this08 == 0) {
hwwrite(vortex->mmio, 0x2b008 + n * 0x30, coefs[i + 2]);
hwwrite(vortex->mmio, 0x2b00c + n * 0x30, coefs[i + 3]);
hwwrite(vortex->mmio, 0x2b010 + n * 0x30, coefs[i + 4]);
} else {
hwwrite(vortex->mmio, 0x2b008 + n * 0x30, sign_invert(coefs[2 + i]));
hwwrite(vortex->mmio, 0x2b00c + n * 0x30, sign_invert(coefs[3 + i]));
hwwrite(vortex->mmio, 0x2b010 + n * 0x30, sign_invert(coefs[4 + i]));
}
i += 5;
}
}
static void vortex_EqHw_SetRightCoefs(vortex_t * vortex, u16 coefs[])
{
eqhw_t *eqhw = &(vortex->eq.this04);
int i = 0, n /*esp2c */;
for (n = 0; n < eqhw->this04; n++) {
hwwrite(vortex->mmio, 0x2b1e0 + n * 0x30, coefs[0 + i]);
hwwrite(vortex->mmio, 0x2b1e4 + n * 0x30, coefs[1 + i]);
if (eqhw->this08 == 0) {
hwwrite(vortex->mmio, 0x2b1e8 + n * 0x30, coefs[2 + i]);
hwwrite(vortex->mmio, 0x2b1ec + n * 0x30, coefs[3 + i]);
hwwrite(vortex->mmio, 0x2b1f0 + n * 0x30, coefs[4 + i]);
} else {
hwwrite(vortex->mmio, 0x2b1e8 + n * 0x30, sign_invert(coefs[2 + i]));
hwwrite(vortex->mmio, 0x2b1ec + n * 0x30, sign_invert(coefs[3 + i]));
hwwrite(vortex->mmio, 0x2b1f0 + n * 0x30, sign_invert(coefs[4 + i]));
}
i += 5;
}
}
static void vortex_EqHw_SetLeftStates(vortex_t * vortex, u16 a[], u16 b[])
{
eqhw_t *eqhw = &(vortex->eq.this04);
int i = 0, ebx;
hwwrite(vortex->mmio, 0x2b3fc, a[0]);
hwwrite(vortex->mmio, 0x2b400, a[1]);
for (ebx = 0; ebx < eqhw->this04; ebx++) {
hwwrite(vortex->mmio, 0x2b014 + (i * 0xc), b[i]);
hwwrite(vortex->mmio, 0x2b018 + (i * 0xc), b[1 + i]);
hwwrite(vortex->mmio, 0x2b01c + (i * 0xc), b[2 + i]);
hwwrite(vortex->mmio, 0x2b020 + (i * 0xc), b[3 + i]);
i += 4;
}
}
static void vortex_EqHw_SetRightStates(vortex_t * vortex, u16 a[], u16 b[])
{
eqhw_t *eqhw = &(vortex->eq.this04);
int i = 0, ebx;
hwwrite(vortex->mmio, 0x2b404, a[0]);
hwwrite(vortex->mmio, 0x2b408, a[1]);
for (ebx = 0; ebx < eqhw->this04; ebx++) {
hwwrite(vortex->mmio, 0x2b1f4 + (i * 0xc), b[i]);
hwwrite(vortex->mmio, 0x2b1f8 + (i * 0xc), b[1 + i]);
hwwrite(vortex->mmio, 0x2b1fc + (i * 0xc), b[2 + i]);
hwwrite(vortex->mmio, 0x2b200 + (i * 0xc), b[3 + i]);
i += 4;
}
}
#if 0
static void vortex_EqHw_GetTimeConsts(vortex_t * vortex, u16 * a, u16 * b)
{
*a = hwread(vortex->mmio, 0x2b3c4);
*b = hwread(vortex->mmio, 0x2b3c8);
}
static void vortex_EqHw_GetLeftCoefs(vortex_t * vortex, u16 a[])
{
}
static void vortex_EqHw_GetRightCoefs(vortex_t * vortex, u16 a[])
{
}
static void vortex_EqHw_GetLeftStates(vortex_t * vortex, u16 * a, u16 b[])
{
}
static void vortex_EqHw_GetRightStates(vortex_t * vortex, u16 * a, u16 b[])
{
}
#endif
/* Mix Gains */
static void vortex_EqHw_SetBypassGain(vortex_t * vortex, u16 a, u16 b)
{
eqhw_t *eqhw = &(vortex->eq.this04);
if (eqhw->this08 == 0) {
hwwrite(vortex->mmio, 0x2b3d4, a);
hwwrite(vortex->mmio, 0x2b3ec, b);
} else {
hwwrite(vortex->mmio, 0x2b3d4, sign_invert(a));
hwwrite(vortex->mmio, 0x2b3ec, sign_invert(b));
}
}
static void vortex_EqHw_SetA3DBypassGain(vortex_t * vortex, u16 a, u16 b)
{
hwwrite(vortex->mmio, 0x2b3e0, a);
hwwrite(vortex->mmio, 0x2b3f8, b);
}
#if 0
static void vortex_EqHw_SetCurrBypassGain(vortex_t * vortex, u16 a, u16 b)
{
hwwrite(vortex->mmio, 0x2b3d0, a);
hwwrite(vortex->mmio, 0x2b3e8, b);
}
static void vortex_EqHw_SetCurrA3DBypassGain(vortex_t * vortex, u16 a, u16 b)
{
hwwrite(vortex->mmio, 0x2b3dc, a);
hwwrite(vortex->mmio, 0x2b3f4, b);
}
#endif
static void
vortex_EqHw_SetLeftGainsSingleTarget(vortex_t * vortex, u16 index, u16 b)
{
hwwrite(vortex->mmio, 0x2b02c + (index * 0x30), b);
}
static void
vortex_EqHw_SetRightGainsSingleTarget(vortex_t * vortex, u16 index, u16 b)
{
hwwrite(vortex->mmio, 0x2b20c + (index * 0x30), b);
}
static void vortex_EqHw_SetLeftGainsTarget(vortex_t * vortex, u16 a[])
{
eqhw_t *eqhw = &(vortex->eq.this04);
int ebx;
for (ebx = 0; ebx < eqhw->this04; ebx++) {
hwwrite(vortex->mmio, 0x2b02c + ebx * 0x30, a[ebx]);
}
}
static void vortex_EqHw_SetRightGainsTarget(vortex_t * vortex, u16 a[])
{
eqhw_t *eqhw = &(vortex->eq.this04);
int ebx;
for (ebx = 0; ebx < eqhw->this04; ebx++) {
hwwrite(vortex->mmio, 0x2b20c + ebx * 0x30, a[ebx]);
}
}
static void vortex_EqHw_SetLeftGainsCurrent(vortex_t * vortex, u16 a[])
{
eqhw_t *eqhw = &(vortex->eq.this04);
int ebx;
for (ebx = 0; ebx < eqhw->this04; ebx++) {
hwwrite(vortex->mmio, 0x2b028 + ebx * 0x30, a[ebx]);
}
}
static void vortex_EqHw_SetRightGainsCurrent(vortex_t * vortex, u16 a[])
{
eqhw_t *eqhw = &(vortex->eq.this04);
int ebx;
for (ebx = 0; ebx < eqhw->this04; ebx++) {
hwwrite(vortex->mmio, 0x2b208 + ebx * 0x30, a[ebx]);
}
}
#if 0
static void vortex_EqHw_GetLeftGainsTarget(vortex_t * vortex, u16 a[])
{
eqhw_t *eqhw = &(vortex->eq.this04);
int ebx = 0;
if (eqhw->this04 < 0)
return;
do {
a[ebx] = hwread(vortex->mmio, 0x2b02c + ebx * 0x30);
ebx++;
}
while (ebx < eqhw->this04);
}
static void vortex_EqHw_GetRightGainsTarget(vortex_t * vortex, u16 a[])
{
eqhw_t *eqhw = &(vortex->eq.this04);
int ebx = 0;
if (eqhw->this04 < 0)
return;
do {
a[ebx] = hwread(vortex->mmio, 0x2b20c + ebx * 0x30);
ebx++;
}
while (ebx < eqhw->this04);
}
static void vortex_EqHw_GetLeftGainsCurrent(vortex_t * vortex, u16 a[])
{
eqhw_t *eqhw = &(vortex->eq.this04);
int ebx = 0;
if (eqhw->this04 < 0)
return;
do {
a[ebx] = hwread(vortex->mmio, 0x2b028 + ebx * 0x30);
ebx++;
}
while (ebx < eqhw->this04);
}
static void vortex_EqHw_GetRightGainsCurrent(vortex_t * vortex, u16 a[])
{
eqhw_t *eqhw = &(vortex->eq.this04);
int ebx = 0;
if (eqhw->this04 < 0)
return;
do {
a[ebx] = hwread(vortex->mmio, 0x2b208 + ebx * 0x30);
ebx++;
}
while (ebx < eqhw->this04);
}
#endif
/* EQ band levels settings */
static void vortex_EqHw_SetLevels(vortex_t * vortex, u16 peaks[])
{
eqhw_t *eqhw = &(vortex->eq.this04);
int i;
/* set left peaks */
for (i = 0; i < eqhw->this04; i++) {
hwwrite(vortex->mmio, 0x2b024 + i * VORTEX_BAND_COEFF_SIZE, peaks[i]);
}
hwwrite(vortex->mmio, 0x2b3cc, peaks[eqhw->this04]);
hwwrite(vortex->mmio, 0x2b3d8, peaks[eqhw->this04 + 1]);
/* set right peaks */
for (i = 0; i < eqhw->this04; i++) {
hwwrite(vortex->mmio, 0x2b204 + i * VORTEX_BAND_COEFF_SIZE,
peaks[i + (eqhw->this04 + 2)]);
}
hwwrite(vortex->mmio, 0x2b3e4, peaks[2 + (eqhw->this04 * 2)]);
hwwrite(vortex->mmio, 0x2b3f0, peaks[3 + (eqhw->this04 * 2)]);
}
#if 0
static void vortex_EqHw_GetLevels(vortex_t * vortex, u16 a[])
{
eqhw_t *eqhw = &(vortex->eq.this04);
int ebx;
if (eqhw->this04 < 0)
return;
ebx = 0;
do {
a[ebx] = hwread(vortex->mmio, 0x2b024 + ebx * 0x30);
ebx++;
}
while (ebx < eqhw->this04);
a[eqhw->this04] = hwread(vortex->mmio, 0x2b3cc);
a[eqhw->this04 + 1] = hwread(vortex->mmio, 0x2b3d8);
ebx = 0;
do {
a[ebx + (eqhw->this04 + 2)] =
hwread(vortex->mmio, 0x2b204 + ebx * 0x30);
ebx++;
}
while (ebx < eqhw->this04);
a[2 + (eqhw->this04 * 2)] = hwread(vortex->mmio, 0x2b3e4);
a[3 + (eqhw->this04 * 2)] = hwread(vortex->mmio, 0x2b3f0);
}
#endif
/* Global Control */
static void vortex_EqHw_SetControlReg(vortex_t * vortex, u32 reg)
{
hwwrite(vortex->mmio, 0x2b440, reg);
}
static void vortex_EqHw_SetSampleRate(vortex_t * vortex, u32 sr)
{
hwwrite(vortex->mmio, 0x2b440, ((sr & 0x1f) << 3) | 0xb800);
}
#if 0
static void vortex_EqHw_GetControlReg(vortex_t * vortex, u32 *reg)
{
*reg = hwread(vortex->mmio, 0x2b440);
}
static void vortex_EqHw_GetSampleRate(vortex_t * vortex, u32 *sr)
{
*sr = (hwread(vortex->mmio, 0x2b440) >> 3) & 0x1f;
}
#endif
static void vortex_EqHw_Enable(vortex_t * vortex)
{
hwwrite(vortex->mmio, VORTEX_EQ_CTRL, 0xf001);
}
static void vortex_EqHw_Disable(vortex_t * vortex)
{
hwwrite(vortex->mmio, VORTEX_EQ_CTRL, 0xf000);
}
/* Reset (zero) buffers */
static void vortex_EqHw_ZeroIO(vortex_t * vortex)
{
int i;
for (i = 0; i < 0x8; i++)
hwwrite(vortex->mmio, VORTEX_EQ_DEST + (i << 2), 0x0);
for (i = 0; i < 0x4; i++)
hwwrite(vortex->mmio, VORTEX_EQ_SOURCE + (i << 2), 0x0);
}
static void vortex_EqHw_ZeroA3DIO(vortex_t * vortex)
{
int i;
for (i = 0; i < 0x4; i++)
hwwrite(vortex->mmio, VORTEX_EQ_DEST + (i << 2), 0x0);
}
static void vortex_EqHw_ZeroState(vortex_t * vortex)
{
vortex_EqHw_SetControlReg(vortex, 0);
vortex_EqHw_ZeroIO(vortex);
hwwrite(vortex->mmio, 0x2b3c0, 0);
vortex_EqHw_SetTimeConsts(vortex, 0, 0);
vortex_EqHw_SetLeftCoefs(vortex, asEqCoefsZeros);
vortex_EqHw_SetRightCoefs(vortex, asEqCoefsZeros);
vortex_EqHw_SetLeftGainsCurrent(vortex, eq_gains_zero);
vortex_EqHw_SetRightGainsCurrent(vortex, eq_gains_zero);
vortex_EqHw_SetLeftGainsTarget(vortex, eq_gains_zero);
vortex_EqHw_SetRightGainsTarget(vortex, eq_gains_zero);
vortex_EqHw_SetBypassGain(vortex, 0, 0);
//vortex_EqHw_SetCurrBypassGain(vortex, 0, 0);
vortex_EqHw_SetA3DBypassGain(vortex, 0, 0);
//vortex_EqHw_SetCurrA3DBypassGain(vortex, 0, 0);
vortex_EqHw_SetLeftStates(vortex, eq_states_zero, asEqOutStateZeros);
vortex_EqHw_SetRightStates(vortex, eq_states_zero, asEqOutStateZeros);
vortex_EqHw_SetLevels(vortex, (u16 *) eq_levels);
}
/* Program coeficients as pass through */
static void vortex_EqHw_ProgramPipe(vortex_t * vortex)
{
vortex_EqHw_SetTimeConsts(vortex, 0, 0);
vortex_EqHw_SetLeftCoefs(vortex, asEqCoefsPipes);
vortex_EqHw_SetRightCoefs(vortex, asEqCoefsPipes);
vortex_EqHw_SetLeftGainsCurrent(vortex, eq_gains_current);
vortex_EqHw_SetRightGainsCurrent(vortex, eq_gains_current);
vortex_EqHw_SetLeftGainsTarget(vortex, eq_gains_current);
vortex_EqHw_SetRightGainsTarget(vortex, eq_gains_current);
}
/* Program EQ block as 10 band Equalizer */
static void
vortex_EqHw_Program10Band(vortex_t * vortex, auxxEqCoeffSet_t * coefset)
{
vortex_EqHw_SetTimeConsts(vortex, 0xc, 0x7fe0);
vortex_EqHw_SetLeftCoefs(vortex, coefset->LeftCoefs);
vortex_EqHw_SetRightCoefs(vortex, coefset->RightCoefs);
vortex_EqHw_SetLeftGainsCurrent(vortex, coefset->LeftGains);
vortex_EqHw_SetRightGainsTarget(vortex, coefset->RightGains);
vortex_EqHw_SetLeftGainsTarget(vortex, coefset->LeftGains);
vortex_EqHw_SetRightGainsCurrent(vortex, coefset->RightGains);
}
/* Read all EQ peaks. (think VU meter) */
static void vortex_EqHw_GetTenBandLevels(vortex_t * vortex, u16 peaks[])
{
eqhw_t *eqhw = &(vortex->eq.this04);
int i;
if (eqhw->this04 <= 0)
return;
for (i = 0; i < eqhw->this04; i++)
peaks[i] = hwread(vortex->mmio, 0x2B024 + i * 0x30);
for (i = 0; i < eqhw->this04; i++)
peaks[i + eqhw->this04] =
hwread(vortex->mmio, 0x2B204 + i * 0x30);
}
/* CEqlzr.s */
static int vortex_Eqlzr_GetLeftGain(vortex_t * vortex, u16 index, u16 * gain)
{
eqlzr_t *eq = &(vortex->eq);
if (eq->this28) {
*gain = eq->this130[index];
return 0;
}
return 1;
}
static void vortex_Eqlzr_SetLeftGain(vortex_t * vortex, u16 index, u16 gain)
{
eqlzr_t *eq = &(vortex->eq);
if (eq->this28 == 0)
return;
eq->this130[index] = gain;
if (eq->this54)
return;
vortex_EqHw_SetLeftGainsSingleTarget(vortex, index, gain);
}
static int vortex_Eqlzr_GetRightGain(vortex_t * vortex, u16 index, u16 * gain)
{
eqlzr_t *eq = &(vortex->eq);
if (eq->this28) {
*gain = eq->this130[index + eq->this10];
return 0;
}
return 1;
}
static void vortex_Eqlzr_SetRightGain(vortex_t * vortex, u16 index, u16 gain)
{
eqlzr_t *eq = &(vortex->eq);
if (eq->this28 == 0)
return;
eq->this130[index + eq->this10] = gain;
if (eq->this54)
return;
vortex_EqHw_SetRightGainsSingleTarget(vortex, index, gain);
}
#if 0
static int
vortex_Eqlzr_GetAllBands(vortex_t * vortex, u16 * gains, s32 *cnt)
{
eqlzr_t *eq = &(vortex->eq);
int si = 0;
if (eq->this10 == 0)
return 1;
{
if (vortex_Eqlzr_GetLeftGain(vortex, si, &gains[si]))
return 1;
if (vortex_Eqlzr_GetRightGain
(vortex, si, &gains[si + eq->this10]))
return 1;
si++;
}
while (eq->this10 > si) ;
*cnt = si * 2;
return 0;
}
#endif
static int vortex_Eqlzr_SetAllBandsFromActiveCoeffSet(vortex_t * vortex)
{
eqlzr_t *eq = &(vortex->eq);
vortex_EqHw_SetLeftGainsTarget(vortex, eq->this130);
vortex_EqHw_SetRightGainsTarget(vortex, &(eq->this130[eq->this10]));
return 0;
}
static int
vortex_Eqlzr_SetAllBands(vortex_t * vortex, u16 gains[], s32 count)
{
eqlzr_t *eq = &(vortex->eq);
int i;
if (((eq->this10) * 2 != count) || (eq->this28 == 0))
return 1;
for (i = 0; i < count; i++) {
eq->this130[i] = gains[i];
}
if (eq->this54)
return 0;
return vortex_Eqlzr_SetAllBandsFromActiveCoeffSet(vortex);
}
static void
vortex_Eqlzr_SetA3dBypassGain(vortex_t * vortex, u32 a, u32 b)
{
eqlzr_t *eq = &(vortex->eq);
u32 eax, ebx;
eq->this58 = a;
eq->this5c = b;
if (eq->this54)
eax = eq->this0e;
else
eax = eq->this0a;
ebx = (eax * eq->this58) >> 0x10;
eax = (eax * eq->this5c) >> 0x10;
vortex_EqHw_SetA3DBypassGain(vortex, ebx, eax);
}
static void vortex_Eqlzr_ProgramA3dBypassGain(vortex_t * vortex)
{
eqlzr_t *eq = &(vortex->eq);
u32 eax, ebx;
if (eq->this54)
eax = eq->this0e;
else
eax = eq->this0a;
ebx = (eax * eq->this58) >> 0x10;
eax = (eax * eq->this5c) >> 0x10;
vortex_EqHw_SetA3DBypassGain(vortex, ebx, eax);
}
static void vortex_Eqlzr_ShutDownA3d(vortex_t * vortex)
{
if (vortex != NULL)
vortex_EqHw_ZeroA3DIO(vortex);
}
static void vortex_Eqlzr_SetBypass(vortex_t * vortex, u32 bp)
{
eqlzr_t *eq = &(vortex->eq);
if ((eq->this28) && (bp == 0)) {
/* EQ enabled */
vortex_Eqlzr_SetAllBandsFromActiveCoeffSet(vortex);
vortex_EqHw_SetBypassGain(vortex, eq->this08, eq->this08);
} else {
/* EQ disabled. */
vortex_EqHw_SetLeftGainsTarget(vortex, eq->this14_array);
vortex_EqHw_SetRightGainsTarget(vortex, eq->this14_array);
vortex_EqHw_SetBypassGain(vortex, eq->this0c, eq->this0c);
}
vortex_Eqlzr_ProgramA3dBypassGain(vortex);
}
static void vortex_Eqlzr_ReadAndSetActiveCoefSet(vortex_t * vortex)
{
eqlzr_t *eq = &(vortex->eq);
/* Set EQ BiQuad filter coeficients */
memcpy(&(eq->coefset), &asEqCoefsNormal, sizeof(auxxEqCoeffSet_t));
/* Set EQ Band gain levels and dump into hardware registers. */
vortex_Eqlzr_SetAllBands(vortex, eq_gains_normal, eq->this10 * 2);
}
static int vortex_Eqlzr_GetAllPeaks(vortex_t * vortex, u16 * peaks, int *count)
{
eqlzr_t *eq = &(vortex->eq);
if (eq->this10 == 0)
return 1;
*count = eq->this10 * 2;
vortex_EqHw_GetTenBandLevels(vortex, peaks);
return 0;
}
#if 0
static auxxEqCoeffSet_t *vortex_Eqlzr_GetActiveCoefSet(vortex_t * vortex)
{
eqlzr_t *eq = &(vortex->eq);
return (&(eq->coefset));
}
#endif
static void vortex_Eqlzr_init(vortex_t * vortex)
{
eqlzr_t *eq = &(vortex->eq);
/* Object constructor */
//eq->this04 = 0;
eq->this08 = 0; /* Bypass gain with EQ in use. */
eq->this0a = 0x5999;
eq->this0c = 0x5999; /* Bypass gain with EQ disabled. */
eq->this0e = 0x5999;
eq->this10 = 0xa; /* 10 eq frequency bands. */
eq->this04.this04 = eq->this10;
eq->this28 = 0x1; /* if 1 => Allow read access to this130 (gains) */
eq->this54 = 0x0; /* if 1 => Dont Allow access to hardware (gains) */
eq->this58 = 0xffff;
eq->this5c = 0xffff;
/* Set gains. */
memset(eq->this14_array, 0, sizeof(eq->this14_array));
/* Actual init. */
vortex_EqHw_ZeroState(vortex);
vortex_EqHw_SetSampleRate(vortex, 0x11);
vortex_Eqlzr_ReadAndSetActiveCoefSet(vortex);
vortex_EqHw_Program10Band(vortex, &(eq->coefset));
vortex_Eqlzr_SetBypass(vortex, eq->this54);
vortex_Eqlzr_SetA3dBypassGain(vortex, 0, 0);
vortex_EqHw_Enable(vortex);
}
static void vortex_Eqlzr_shutdown(vortex_t * vortex)
{
vortex_Eqlzr_ShutDownA3d(vortex);
vortex_EqHw_ProgramPipe(vortex);
vortex_EqHw_Disable(vortex);
}
/* ALSA interface */
/* Control interface */
#define snd_vortex_eqtoggle_info snd_ctl_boolean_mono_info
static int
snd_vortex_eqtoggle_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
vortex_t *vortex = snd_kcontrol_chip(kcontrol);
eqlzr_t *eq = &(vortex->eq);
//int i = kcontrol->private_value;
ucontrol->value.integer.value[0] = eq->this54 ? 0 : 1;
return 0;
}
static int
snd_vortex_eqtoggle_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
vortex_t *vortex = snd_kcontrol_chip(kcontrol);
eqlzr_t *eq = &(vortex->eq);
//int i = kcontrol->private_value;
eq->this54 = ucontrol->value.integer.value[0] ? 0 : 1;
vortex_Eqlzr_SetBypass(vortex, eq->this54);
return 1; /* Allways changes */
}
static struct snd_kcontrol_new vortex_eqtoggle_kcontrol = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "EQ Enable",
.index = 0,
.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
.private_value = 0,
.info = snd_vortex_eqtoggle_info,
.get = snd_vortex_eqtoggle_get,
.put = snd_vortex_eqtoggle_put
};
static int
snd_vortex_eq_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo)
{
uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
uinfo->count = 2;
uinfo->value.integer.min = 0x0000;
uinfo->value.integer.max = 0x7fff;
return 0;
}
static int
snd_vortex_eq_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
vortex_t *vortex = snd_kcontrol_chip(kcontrol);
int i = kcontrol->private_value;
u16 gainL = 0, gainR = 0;
vortex_Eqlzr_GetLeftGain(vortex, i, &gainL);
vortex_Eqlzr_GetRightGain(vortex, i, &gainR);
ucontrol->value.integer.value[0] = gainL;
ucontrol->value.integer.value[1] = gainR;
return 0;
}
static int
snd_vortex_eq_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
vortex_t *vortex = snd_kcontrol_chip(kcontrol);
int changed = 0, i = kcontrol->private_value;
u16 gainL = 0, gainR = 0;
vortex_Eqlzr_GetLeftGain(vortex, i, &gainL);
vortex_Eqlzr_GetRightGain(vortex, i, &gainR);
if (gainL != ucontrol->value.integer.value[0]) {
vortex_Eqlzr_SetLeftGain(vortex, i,
ucontrol->value.integer.value[0]);
changed = 1;
}
if (gainR != ucontrol->value.integer.value[1]) {
vortex_Eqlzr_SetRightGain(vortex, i,
ucontrol->value.integer.value[1]);
changed = 1;
}
return changed;
}
static struct snd_kcontrol_new vortex_eq_kcontrol = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = " .",
.index = 0,
.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
.private_value = 0,
.info = snd_vortex_eq_info,
.get = snd_vortex_eq_get,
.put = snd_vortex_eq_put
};
static int
snd_vortex_peaks_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo)
{
uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
uinfo->count = 20;
uinfo->value.integer.min = 0x0000;
uinfo->value.integer.max = 0x7fff;
return 0;
}
static int
snd_vortex_peaks_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
vortex_t *vortex = snd_kcontrol_chip(kcontrol);
int i, count = 0;
u16 peaks[20];
vortex_Eqlzr_GetAllPeaks(vortex, peaks, &count);
if (count != 20) {
dev_err(vortex->card->dev,
"peak count error 20 != %d\n", count);
return -1;
}
for (i = 0; i < 20; i++)
ucontrol->value.integer.value[i] = peaks[i];
return 0;
}
static struct snd_kcontrol_new vortex_levels_kcontrol = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "EQ Peaks",
.access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE,
.info = snd_vortex_peaks_info,
.get = snd_vortex_peaks_get,
};
/* EQ band gain labels. */
static char *EqBandLabels[10] = {
"EQ0 31Hz\0",
"EQ1 63Hz\0",
"EQ2 125Hz\0",
"EQ3 250Hz\0",
"EQ4 500Hz\0",
"EQ5 1KHz\0",
"EQ6 2KHz\0",
"EQ7 4KHz\0",
"EQ8 8KHz\0",
"EQ9 16KHz\0",
};
/* ALSA driver entry points. Init and exit. */
static int vortex_eq_init(vortex_t *vortex)
{
struct snd_kcontrol *kcontrol;
int err, i;
vortex_Eqlzr_init(vortex);
if ((kcontrol =
snd_ctl_new1(&vortex_eqtoggle_kcontrol, vortex)) == NULL)
return -ENOMEM;
kcontrol->private_value = 0;
if ((err = snd_ctl_add(vortex->card, kcontrol)) < 0)
return err;
/* EQ gain controls */
for (i = 0; i < 10; i++) {
if ((kcontrol =
snd_ctl_new1(&vortex_eq_kcontrol, vortex)) == NULL)
return -ENOMEM;
snprintf(kcontrol->id.name, sizeof(kcontrol->id.name),
"%s Playback Volume", EqBandLabels[i]);
kcontrol->private_value = i;
if ((err = snd_ctl_add(vortex->card, kcontrol)) < 0)
return err;
//vortex->eqctrl[i] = kcontrol;
}
/* EQ band levels */
if ((kcontrol = snd_ctl_new1(&vortex_levels_kcontrol, vortex)) == NULL)
return -ENOMEM;
if ((err = snd_ctl_add(vortex->card, kcontrol)) < 0)
return err;
return 0;
}
static int vortex_eq_free(vortex_t * vortex)
{
/*
//FIXME: segfault because vortex->eqctrl[i] == 4
int i;
for (i=0; i<10; i++) {
if (vortex->eqctrl[i])
snd_ctl_remove(vortex->card, vortex->eqctrl[i]);
}
*/
vortex_Eqlzr_shutdown(vortex);
return 0;
}
/* End */
| gpl-2.0 |
CyanHacker-Lollipop/kernel_moto_shamu | drivers/net/wireless/orinoco/orinoco_cs.c | 2319 | 14044 | /* orinoco_cs.c (formerly known as dldwd_cs.c)
*
* A driver for "Hermes" chipset based PCMCIA wireless adaptors, such
* as the Lucent WavelanIEEE/Orinoco cards and their OEM (Cabletron/
* EnteraSys RoamAbout 802.11, ELSA Airlancer, Melco Buffalo and others).
* It should also be usable on various Prism II based cards such as the
* Linksys, D-Link and Farallon Skyline. It should also work on Symbol
* cards such as the 3Com AirConnect and Ericsson WLAN.
*
* Copyright notice & release notes in file main.c
*/
#define DRIVER_NAME "orinoco_cs"
#define PFX DRIVER_NAME ": "
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <pcmcia/cistpl.h>
#include <pcmcia/cisreg.h>
#include <pcmcia/ds.h>
#include "orinoco.h"
/********************************************************************/
/* Module stuff */
/********************************************************************/
MODULE_AUTHOR("David Gibson <hermes@gibson.dropbear.id.au>");
MODULE_DESCRIPTION("Driver for PCMCIA Lucent Orinoco,"
" Prism II based and similar wireless cards");
MODULE_LICENSE("Dual MPL/GPL");
/* Module parameters */
/* Some D-Link cards have buggy CIS. They do work at 5v properly, but
* don't have any CIS entry for it. This workaround it... */
static int ignore_cis_vcc; /* = 0 */
module_param(ignore_cis_vcc, int, 0);
MODULE_PARM_DESC(ignore_cis_vcc, "Allow voltage mismatch between card and socket");
/********************************************************************/
/* Data structures */
/********************************************************************/
/* PCMCIA specific device information (goes in the card field of
* struct orinoco_private */
struct orinoco_pccard {
struct pcmcia_device *p_dev;
/* Used to handle hard reset */
/* yuck, we need this hack to work around the insanity of the
* PCMCIA layer */
unsigned long hard_reset_in_progress;
};
/********************************************************************/
/* Function prototypes */
/********************************************************************/
static int orinoco_cs_config(struct pcmcia_device *link);
static void orinoco_cs_release(struct pcmcia_device *link);
static void orinoco_cs_detach(struct pcmcia_device *p_dev);
/********************************************************************/
/* Device methods */
/********************************************************************/
static int
orinoco_cs_hard_reset(struct orinoco_private *priv)
{
struct orinoco_pccard *card = priv->card;
struct pcmcia_device *link = card->p_dev;
int err;
/* We need atomic ops here, because we're not holding the lock */
set_bit(0, &card->hard_reset_in_progress);
err = pcmcia_reset_card(link->socket);
if (err)
return err;
msleep(100);
clear_bit(0, &card->hard_reset_in_progress);
return 0;
}
/********************************************************************/
/* PCMCIA stuff */
/********************************************************************/
static int
orinoco_cs_probe(struct pcmcia_device *link)
{
struct orinoco_private *priv;
struct orinoco_pccard *card;
priv = alloc_orinocodev(sizeof(*card), &link->dev,
orinoco_cs_hard_reset, NULL);
if (!priv)
return -ENOMEM;
card = priv->card;
/* Link both structures together */
card->p_dev = link;
link->priv = priv;
return orinoco_cs_config(link);
} /* orinoco_cs_attach */
static void orinoco_cs_detach(struct pcmcia_device *link)
{
struct orinoco_private *priv = link->priv;
orinoco_if_del(priv);
orinoco_cs_release(link);
free_orinocodev(priv);
} /* orinoco_cs_detach */
static int orinoco_cs_config_check(struct pcmcia_device *p_dev, void *priv_data)
{
if (p_dev->config_index == 0)
return -EINVAL;
return pcmcia_request_io(p_dev);
};
static int
orinoco_cs_config(struct pcmcia_device *link)
{
struct orinoco_private *priv = link->priv;
struct hermes *hw = &priv->hw;
int ret;
void __iomem *mem;
link->config_flags |= CONF_AUTO_SET_VPP | CONF_AUTO_CHECK_VCC |
CONF_AUTO_SET_IO | CONF_ENABLE_IRQ;
if (ignore_cis_vcc)
link->config_flags &= ~CONF_AUTO_CHECK_VCC;
ret = pcmcia_loop_config(link, orinoco_cs_config_check, NULL);
if (ret) {
if (!ignore_cis_vcc)
printk(KERN_ERR PFX "GetNextTuple(): No matching "
"CIS configuration. Maybe you need the "
"ignore_cis_vcc=1 parameter.\n");
goto failed;
}
mem = ioport_map(link->resource[0]->start,
resource_size(link->resource[0]));
if (!mem)
goto failed;
/* We initialize the hermes structure before completing PCMCIA
* configuration just in case the interrupt handler gets
* called. */
hermes_struct_init(hw, mem, HERMES_16BIT_REGSPACING);
ret = pcmcia_request_irq(link, orinoco_interrupt);
if (ret)
goto failed;
ret = pcmcia_enable_device(link);
if (ret)
goto failed;
/* Initialise the main driver */
if (orinoco_init(priv) != 0) {
printk(KERN_ERR PFX "orinoco_init() failed\n");
goto failed;
}
/* Register an interface with the stack */
if (orinoco_if_add(priv, link->resource[0]->start,
link->irq, NULL) != 0) {
printk(KERN_ERR PFX "orinoco_if_add() failed\n");
goto failed;
}
return 0;
failed:
orinoco_cs_release(link);
return -ENODEV;
} /* orinoco_cs_config */
static void
orinoco_cs_release(struct pcmcia_device *link)
{
struct orinoco_private *priv = link->priv;
unsigned long flags;
/* We're committed to taking the device away now, so mark the
* hardware as unavailable */
priv->hw.ops->lock_irqsave(&priv->lock, &flags);
priv->hw_unavailable++;
priv->hw.ops->unlock_irqrestore(&priv->lock, &flags);
pcmcia_disable_device(link);
if (priv->hw.iobase)
ioport_unmap(priv->hw.iobase);
} /* orinoco_cs_release */
static int orinoco_cs_suspend(struct pcmcia_device *link)
{
struct orinoco_private *priv = link->priv;
struct orinoco_pccard *card = priv->card;
/* This is probably racy, but I can't think of
a better way, short of rewriting the PCMCIA
layer to not suck :-( */
if (!test_bit(0, &card->hard_reset_in_progress))
orinoco_down(priv);
return 0;
}
static int orinoco_cs_resume(struct pcmcia_device *link)
{
struct orinoco_private *priv = link->priv;
struct orinoco_pccard *card = priv->card;
int err = 0;
if (!test_bit(0, &card->hard_reset_in_progress))
err = orinoco_up(priv);
return err;
}
/********************************************************************/
/* Module initialization */
/********************************************************************/
static const struct pcmcia_device_id orinoco_cs_ids[] = {
PCMCIA_DEVICE_MANF_CARD(0x0101, 0x0777), /* 3Com AirConnect PCI 777A */
PCMCIA_DEVICE_MANF_CARD(0x016b, 0x0001), /* Ericsson WLAN Card C11 */
PCMCIA_DEVICE_MANF_CARD(0x01eb, 0x080a), /* Nortel Networks eMobility 802.11 Wireless Adapter */
PCMCIA_DEVICE_MANF_CARD(0x0261, 0x0002), /* AirWay 802.11 Adapter (PCMCIA) */
PCMCIA_DEVICE_MANF_CARD(0x0268, 0x0001), /* ARtem Onair */
PCMCIA_DEVICE_MANF_CARD(0x0268, 0x0003), /* ARtem Onair Comcard 11 */
PCMCIA_DEVICE_MANF_CARD(0x026f, 0x0305), /* Buffalo WLI-PCM-S11 */
PCMCIA_DEVICE_MANF_CARD(0x02aa, 0x0002), /* ASUS SpaceLink WL-100 */
PCMCIA_DEVICE_MANF_CARD(0x02ac, 0x0002), /* SpeedStream SS1021 Wireless Adapter */
PCMCIA_DEVICE_MANF_CARD(0x02ac, 0x3021), /* SpeedStream Wireless Adapter */
PCMCIA_DEVICE_MANF_CARD(0x14ea, 0xb001), /* PLANEX RoadLannerWave GW-NS11H */
PCMCIA_DEVICE_PROD_ID12("3Com", "3CRWE737A AirConnect Wireless LAN PC Card", 0x41240e5b, 0x56010af3),
PCMCIA_DEVICE_PROD_ID12("Allied Telesyn", "AT-WCL452 Wireless PCMCIA Radio", 0x5cd01705, 0x4271660f),
PCMCIA_DEVICE_PROD_ID12("ASUS", "802_11B_CF_CARD_25", 0x78fc06ee, 0x45a50c1e),
PCMCIA_DEVICE_PROD_ID12("ASUS", "802_11b_PC_CARD_25", 0x78fc06ee, 0xdb9aa842),
PCMCIA_DEVICE_PROD_ID12("Avaya Communication", "Avaya Wireless PC Card", 0xd8a43b78, 0x0d341169),
PCMCIA_DEVICE_PROD_ID12("BENQ", "AWL100 PCMCIA ADAPTER", 0x35dadc74, 0x01f7fedb),
PCMCIA_DEVICE_PROD_ID12("Cabletron", "RoamAbout 802.11 DS", 0x32d445f5, 0xedeffd90),
PCMCIA_DEVICE_PROD_ID12("D-Link Corporation", "D-Link DWL-650H 11Mbps WLAN Adapter", 0xef544d24, 0xcd8ea916),
PCMCIA_DEVICE_PROD_ID12("ELSA", "AirLancer MC-11", 0x4507a33a, 0xef54f0e3),
PCMCIA_DEVICE_PROD_ID12("HyperLink", "Wireless PC Card 11Mbps", 0x56cc3f1a, 0x0bcf220c),
PCMCIA_DEVICE_PROD_ID12("Intel", "PRO/Wireless 2011 LAN PC Card", 0x816cc815, 0x07f58077),
PCMCIA_DEVICE_PROD_ID12("LeArtery", "SYNCBYAIR 11Mbps Wireless LAN PC Card", 0x7e3b326a, 0x49893e92),
PCMCIA_DEVICE_PROD_ID12("Lucent Technologies", "WaveLAN/IEEE", 0x23eb9949, 0xc562e72a),
PCMCIA_DEVICE_PROD_ID12("MELCO", "WLI-PCM-L11", 0x481e0094, 0x7360e410),
PCMCIA_DEVICE_PROD_ID12("MELCO", "WLI-PCM-L11G", 0x481e0094, 0xf57ca4b3),
PCMCIA_DEVICE_PROD_ID12("NCR", "WaveLAN/IEEE", 0x24358cd4, 0xc562e72a),
PCMCIA_DEVICE_PROD_ID12("Nortel Networks", "emobility 802.11 Wireless LAN PC Card", 0x2d617ea0, 0x88cd5767),
PCMCIA_DEVICE_PROD_ID12("OTC", "Wireless AirEZY 2411-PCC WLAN Card", 0x4ac44287, 0x235a6bed),
PCMCIA_DEVICE_PROD_ID12("PROXIM", "LAN PC CARD HARMONY 80211B", 0xc6536a5e, 0x090c3cd9),
PCMCIA_DEVICE_PROD_ID12("PROXIM", "LAN PCI CARD HARMONY 80211B", 0xc6536a5e, 0x9f494e26),
PCMCIA_DEVICE_PROD_ID12("SAMSUNG", "11Mbps WLAN Card", 0x43d74cb4, 0x579bd91b),
PCMCIA_DEVICE_PROD_ID12("Symbol Technologies", "LA4111 Spectrum24 Wireless LAN PC Card", 0x3f02b4d6, 0x3663cb0e),
PCMCIA_DEVICE_MANF_CARD_PROD_ID3(0x0156, 0x0002, "Version 01.01", 0xd27deb1a), /* Lucent Orinoco */
#ifdef CONFIG_HERMES_PRISM
/* Only entries that certainly identify Prism chipset */
PCMCIA_DEVICE_MANF_CARD(0x000b, 0x7100), /* SonicWALL Long Range Wireless Card */
PCMCIA_DEVICE_MANF_CARD(0x000b, 0x7300), /* Sohoware NCP110, Philips 802.11b */
PCMCIA_DEVICE_MANF_CARD(0x0089, 0x0002), /* AnyPoint(TM) Wireless II PC Card */
PCMCIA_DEVICE_MANF_CARD(0x0126, 0x8000), /* PROXIM RangeLAN-DS/LAN PC CARD */
PCMCIA_DEVICE_MANF_CARD(0x0138, 0x0002), /* Compaq WL100 11 Mbps Wireless Adapter */
PCMCIA_DEVICE_MANF_CARD(0x01ff, 0x0008), /* Intermec MobileLAN 11Mbps 802.11b WLAN Card */
PCMCIA_DEVICE_MANF_CARD(0x0250, 0x0002), /* Samsung SWL2000-N 11Mb/s WLAN Card */
PCMCIA_DEVICE_MANF_CARD(0x0274, 0x1612), /* Linksys WPC11 Version 2.5 */
PCMCIA_DEVICE_MANF_CARD(0x0274, 0x1613), /* Linksys WPC11 Version 3 */
PCMCIA_DEVICE_MANF_CARD(0x028a, 0x0002), /* Compaq HNW-100 11 Mbps Wireless Adapter */
PCMCIA_DEVICE_MANF_CARD(0x028a, 0x0673), /* Linksys WCF12 Wireless CompactFlash Card */
PCMCIA_DEVICE_MANF_CARD(0x50c2, 0x7300), /* Airvast WN-100 */
PCMCIA_DEVICE_MANF_CARD(0x9005, 0x0021), /* Adaptec Ultra Wireless ANW-8030 */
PCMCIA_DEVICE_MANF_CARD(0xc001, 0x0008), /* CONTEC FLEXSCAN/FX-DDS110-PCC */
PCMCIA_DEVICE_MANF_CARD(0xc250, 0x0002), /* Conceptronic CON11Cpro, EMTAC A2424i */
PCMCIA_DEVICE_MANF_CARD(0xd601, 0x0002), /* Safeway 802.11b, ZCOMAX AirRunner/XI-300 */
PCMCIA_DEVICE_MANF_CARD(0xd601, 0x0005), /* D-Link DCF660, Sandisk Connect SDWCFB-000 */
PCMCIA_DEVICE_PROD_ID123("Instant Wireless ", " Network PC CARD", "Version 01.02", 0x11d901af, 0x6e9bd926, 0x4b74baa0),
PCMCIA_DEVICE_PROD_ID12("ACTIONTEC", "PRISM Wireless LAN PC Card", 0x393089da, 0xa71e69d5),
PCMCIA_DEVICE_PROD_ID12("Addtron", "AWP-100 Wireless PCMCIA", 0xe6ec52ce, 0x08649af2),
PCMCIA_DEVICE_PROD_ID12("BUFFALO", "WLI-CF-S11G", 0x2decece3, 0x82067c18),
PCMCIA_DEVICE_PROD_ID12("BUFFALO", "WLI-PCM-L11G", 0x2decece3, 0xf57ca4b3),
PCMCIA_DEVICE_PROD_ID12("Compaq", "WL200_11Mbps_Wireless_PCI_Card", 0x54f7c49c, 0x15a75e5b),
PCMCIA_DEVICE_PROD_ID12("corega K.K.", "Wireless LAN PCC-11", 0x5261440f, 0xa6405584),
PCMCIA_DEVICE_PROD_ID12("corega K.K.", "Wireless LAN PCCA-11", 0x5261440f, 0xdf6115f9),
PCMCIA_DEVICE_PROD_ID12("corega_K.K.", "Wireless_LAN_PCCB-11", 0x29e33311, 0xee7a27ae),
PCMCIA_DEVICE_PROD_ID12("Digital Data Communications", "WPC-0100", 0xfdd73470, 0xe0b6f146),
PCMCIA_DEVICE_PROD_ID12("D", "Link DRC-650 11Mbps WLAN Card", 0x71b18589, 0xf144e3ac),
PCMCIA_DEVICE_PROD_ID12("D", "Link DWL-650 11Mbps WLAN Card", 0x71b18589, 0xb6f1b0ab),
PCMCIA_DEVICE_PROD_ID12(" ", "IEEE 802.11 Wireless LAN/PC Card", 0x3b6e20c8, 0xefccafe9),
PCMCIA_DEVICE_PROD_ID12("INTERSIL", "HFA384x/IEEE", 0x74c5e40d, 0xdb472a18),
PCMCIA_DEVICE_PROD_ID12("INTERSIL", "I-GATE 11M PC Card / PC Card plus", 0x74c5e40d, 0x8304ff77),
PCMCIA_DEVICE_PROD_ID12("Intersil", "PRISM 2_5 PCMCIA ADAPTER", 0x4b801a17, 0x6345a0bf),
PCMCIA_DEVICE_PROD_ID12("Linksys", "Wireless CompactFlash Card", 0x0733cc81, 0x0c52f395),
PCMCIA_DEVICE_PROD_ID12("Microsoft", "Wireless Notebook Adapter MN-520", 0x5961bf85, 0x6eec8c01),
PCMCIA_DEVICE_PROD_ID12("NETGEAR MA401RA Wireless PC", "Card", 0x0306467f, 0x9762e8f1),
PCMCIA_DEVICE_PROD_ID12("NETGEAR MA401 Wireless PC", "Card", 0xa37434e9, 0x9762e8f1),
PCMCIA_DEVICE_PROD_ID12("OEM", "PRISM2 IEEE 802.11 PC-Card", 0xfea54c90, 0x48f2bdd6),
PCMCIA_DEVICE_PROD_ID12("PLANEX", "GeoWave/GW-CF110", 0x209f40ab, 0xd9715264),
PCMCIA_DEVICE_PROD_ID12("PLANEX", "GeoWave/GW-NS110", 0x209f40ab, 0x46263178),
PCMCIA_DEVICE_PROD_ID12("SMC", "SMC2532W-B EliteConnect Wireless Adapter", 0xc4f8b18b, 0x196bd757),
PCMCIA_DEVICE_PROD_ID12("SMC", "SMC2632W", 0xc4f8b18b, 0x474a1f2a),
PCMCIA_DEVICE_PROD_ID12("ZoomAir 11Mbps High", "Rate wireless Networking", 0x273fe3db, 0x32a1eaee),
PCMCIA_DEVICE_PROD_ID3("HFA3863", 0x355cb092),
PCMCIA_DEVICE_PROD_ID3("ISL37100P", 0x630d52b2),
PCMCIA_DEVICE_PROD_ID3("ISL37101P-10", 0xdd97a26b),
PCMCIA_DEVICE_PROD_ID3("ISL37300P", 0xc9049a39),
/* This may be Agere or Intersil Firmware */
PCMCIA_DEVICE_MANF_CARD(0x0156, 0x0002),
#endif
PCMCIA_DEVICE_NULL,
};
MODULE_DEVICE_TABLE(pcmcia, orinoco_cs_ids);
static struct pcmcia_driver orinoco_driver = {
.owner = THIS_MODULE,
.name = DRIVER_NAME,
.probe = orinoco_cs_probe,
.remove = orinoco_cs_detach,
.id_table = orinoco_cs_ids,
.suspend = orinoco_cs_suspend,
.resume = orinoco_cs_resume,
};
module_pcmcia_driver(orinoco_driver);
| gpl-2.0 |
ShinySide/kernel_T230NU_NE2 | drivers/net/wireless/zd1211rw/zd_mac.c | 2319 | 41414 | /* ZD1211 USB-WLAN driver for Linux
*
* Copyright (C) 2005-2007 Ulrich Kunitz <kune@deine-taler.de>
* Copyright (C) 2006-2007 Daniel Drake <dsd@gentoo.org>
* Copyright (C) 2006-2007 Michael Wu <flamingice@sourmilk.net>
* Copyright (C) 2007-2008 Luis R. Rodriguez <mcgrof@winlab.rutgers.edu>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/slab.h>
#include <linux/usb.h>
#include <linux/jiffies.h>
#include <net/ieee80211_radiotap.h>
#include "zd_def.h"
#include "zd_chip.h"
#include "zd_mac.h"
#include "zd_rf.h"
struct zd_reg_alpha2_map {
u32 reg;
char alpha2[2];
};
static struct zd_reg_alpha2_map reg_alpha2_map[] = {
{ ZD_REGDOMAIN_FCC, "US" },
{ ZD_REGDOMAIN_IC, "CA" },
{ ZD_REGDOMAIN_ETSI, "DE" }, /* Generic ETSI, use most restrictive */
{ ZD_REGDOMAIN_JAPAN, "JP" },
{ ZD_REGDOMAIN_JAPAN_2, "JP" },
{ ZD_REGDOMAIN_JAPAN_3, "JP" },
{ ZD_REGDOMAIN_SPAIN, "ES" },
{ ZD_REGDOMAIN_FRANCE, "FR" },
};
/* This table contains the hardware specific values for the modulation rates. */
static const struct ieee80211_rate zd_rates[] = {
{ .bitrate = 10,
.hw_value = ZD_CCK_RATE_1M, },
{ .bitrate = 20,
.hw_value = ZD_CCK_RATE_2M,
.hw_value_short = ZD_CCK_RATE_2M | ZD_CCK_PREA_SHORT,
.flags = IEEE80211_RATE_SHORT_PREAMBLE },
{ .bitrate = 55,
.hw_value = ZD_CCK_RATE_5_5M,
.hw_value_short = ZD_CCK_RATE_5_5M | ZD_CCK_PREA_SHORT,
.flags = IEEE80211_RATE_SHORT_PREAMBLE },
{ .bitrate = 110,
.hw_value = ZD_CCK_RATE_11M,
.hw_value_short = ZD_CCK_RATE_11M | ZD_CCK_PREA_SHORT,
.flags = IEEE80211_RATE_SHORT_PREAMBLE },
{ .bitrate = 60,
.hw_value = ZD_OFDM_RATE_6M,
.flags = 0 },
{ .bitrate = 90,
.hw_value = ZD_OFDM_RATE_9M,
.flags = 0 },
{ .bitrate = 120,
.hw_value = ZD_OFDM_RATE_12M,
.flags = 0 },
{ .bitrate = 180,
.hw_value = ZD_OFDM_RATE_18M,
.flags = 0 },
{ .bitrate = 240,
.hw_value = ZD_OFDM_RATE_24M,
.flags = 0 },
{ .bitrate = 360,
.hw_value = ZD_OFDM_RATE_36M,
.flags = 0 },
{ .bitrate = 480,
.hw_value = ZD_OFDM_RATE_48M,
.flags = 0 },
{ .bitrate = 540,
.hw_value = ZD_OFDM_RATE_54M,
.flags = 0 },
};
/*
* Zydas retry rates table. Each line is listed in the same order as
* in zd_rates[] and contains all the rate used when a packet is sent
* starting with a given rates. Let's consider an example :
*
* "11 Mbits : 4, 3, 2, 1, 0" means :
* - packet is sent using 4 different rates
* - 1st rate is index 3 (ie 11 Mbits)
* - 2nd rate is index 2 (ie 5.5 Mbits)
* - 3rd rate is index 1 (ie 2 Mbits)
* - 4th rate is index 0 (ie 1 Mbits)
*/
static const struct tx_retry_rate zd_retry_rates[] = {
{ /* 1 Mbits */ 1, { 0 }},
{ /* 2 Mbits */ 2, { 1, 0 }},
{ /* 5.5 Mbits */ 3, { 2, 1, 0 }},
{ /* 11 Mbits */ 4, { 3, 2, 1, 0 }},
{ /* 6 Mbits */ 5, { 4, 3, 2, 1, 0 }},
{ /* 9 Mbits */ 6, { 5, 4, 3, 2, 1, 0}},
{ /* 12 Mbits */ 5, { 6, 3, 2, 1, 0 }},
{ /* 18 Mbits */ 6, { 7, 6, 3, 2, 1, 0 }},
{ /* 24 Mbits */ 6, { 8, 6, 3, 2, 1, 0 }},
{ /* 36 Mbits */ 7, { 9, 8, 6, 3, 2, 1, 0 }},
{ /* 48 Mbits */ 8, {10, 9, 8, 6, 3, 2, 1, 0 }},
{ /* 54 Mbits */ 9, {11, 10, 9, 8, 6, 3, 2, 1, 0 }}
};
static const struct ieee80211_channel zd_channels[] = {
{ .center_freq = 2412, .hw_value = 1 },
{ .center_freq = 2417, .hw_value = 2 },
{ .center_freq = 2422, .hw_value = 3 },
{ .center_freq = 2427, .hw_value = 4 },
{ .center_freq = 2432, .hw_value = 5 },
{ .center_freq = 2437, .hw_value = 6 },
{ .center_freq = 2442, .hw_value = 7 },
{ .center_freq = 2447, .hw_value = 8 },
{ .center_freq = 2452, .hw_value = 9 },
{ .center_freq = 2457, .hw_value = 10 },
{ .center_freq = 2462, .hw_value = 11 },
{ .center_freq = 2467, .hw_value = 12 },
{ .center_freq = 2472, .hw_value = 13 },
{ .center_freq = 2484, .hw_value = 14 },
};
static void housekeeping_init(struct zd_mac *mac);
static void housekeeping_enable(struct zd_mac *mac);
static void housekeeping_disable(struct zd_mac *mac);
static void beacon_init(struct zd_mac *mac);
static void beacon_enable(struct zd_mac *mac);
static void beacon_disable(struct zd_mac *mac);
static void set_rts_cts(struct zd_mac *mac, unsigned int short_preamble);
static int zd_mac_config_beacon(struct ieee80211_hw *hw,
struct sk_buff *beacon, bool in_intr);
static int zd_reg2alpha2(u8 regdomain, char *alpha2)
{
unsigned int i;
struct zd_reg_alpha2_map *reg_map;
for (i = 0; i < ARRAY_SIZE(reg_alpha2_map); i++) {
reg_map = ®_alpha2_map[i];
if (regdomain == reg_map->reg) {
alpha2[0] = reg_map->alpha2[0];
alpha2[1] = reg_map->alpha2[1];
return 0;
}
}
return 1;
}
static int zd_check_signal(struct ieee80211_hw *hw, int signal)
{
struct zd_mac *mac = zd_hw_mac(hw);
dev_dbg_f_cond(zd_mac_dev(mac), signal < 0 || signal > 100,
"%s: signal value from device not in range 0..100, "
"but %d.\n", __func__, signal);
if (signal < 0)
signal = 0;
else if (signal > 100)
signal = 100;
return signal;
}
int zd_mac_preinit_hw(struct ieee80211_hw *hw)
{
int r;
u8 addr[ETH_ALEN];
struct zd_mac *mac = zd_hw_mac(hw);
r = zd_chip_read_mac_addr_fw(&mac->chip, addr);
if (r)
return r;
SET_IEEE80211_PERM_ADDR(hw, addr);
return 0;
}
int zd_mac_init_hw(struct ieee80211_hw *hw)
{
int r;
struct zd_mac *mac = zd_hw_mac(hw);
struct zd_chip *chip = &mac->chip;
char alpha2[2];
u8 default_regdomain;
r = zd_chip_enable_int(chip);
if (r)
goto out;
r = zd_chip_init_hw(chip);
if (r)
goto disable_int;
ZD_ASSERT(!irqs_disabled());
r = zd_read_regdomain(chip, &default_regdomain);
if (r)
goto disable_int;
spin_lock_irq(&mac->lock);
mac->regdomain = mac->default_regdomain = default_regdomain;
spin_unlock_irq(&mac->lock);
/* We must inform the device that we are doing encryption/decryption in
* software at the moment. */
r = zd_set_encryption_type(chip, ENC_SNIFFER);
if (r)
goto disable_int;
r = zd_reg2alpha2(mac->regdomain, alpha2);
if (r)
goto disable_int;
r = regulatory_hint(hw->wiphy, alpha2);
disable_int:
zd_chip_disable_int(chip);
out:
return r;
}
void zd_mac_clear(struct zd_mac *mac)
{
flush_workqueue(zd_workqueue);
zd_chip_clear(&mac->chip);
ZD_ASSERT(!spin_is_locked(&mac->lock));
ZD_MEMCLEAR(mac, sizeof(struct zd_mac));
}
static int set_rx_filter(struct zd_mac *mac)
{
unsigned long flags;
u32 filter = STA_RX_FILTER;
spin_lock_irqsave(&mac->lock, flags);
if (mac->pass_ctrl)
filter |= RX_FILTER_CTRL;
spin_unlock_irqrestore(&mac->lock, flags);
return zd_iowrite32(&mac->chip, CR_RX_FILTER, filter);
}
static int set_mac_and_bssid(struct zd_mac *mac)
{
int r;
if (!mac->vif)
return -1;
r = zd_write_mac_addr(&mac->chip, mac->vif->addr);
if (r)
return r;
/* Vendor driver after setting MAC either sets BSSID for AP or
* filter for other modes.
*/
if (mac->type != NL80211_IFTYPE_AP)
return set_rx_filter(mac);
else
return zd_write_bssid(&mac->chip, mac->vif->addr);
}
static int set_mc_hash(struct zd_mac *mac)
{
struct zd_mc_hash hash;
zd_mc_clear(&hash);
return zd_chip_set_multicast_hash(&mac->chip, &hash);
}
int zd_op_start(struct ieee80211_hw *hw)
{
struct zd_mac *mac = zd_hw_mac(hw);
struct zd_chip *chip = &mac->chip;
struct zd_usb *usb = &chip->usb;
int r;
if (!usb->initialized) {
r = zd_usb_init_hw(usb);
if (r)
goto out;
}
r = zd_chip_enable_int(chip);
if (r < 0)
goto out;
r = zd_chip_set_basic_rates(chip, CR_RATES_80211B | CR_RATES_80211G);
if (r < 0)
goto disable_int;
r = set_rx_filter(mac);
if (r)
goto disable_int;
r = set_mc_hash(mac);
if (r)
goto disable_int;
/* Wait after setting the multicast hash table and powering on
* the radio otherwise interface bring up will fail. This matches
* what the vendor driver did.
*/
msleep(10);
r = zd_chip_switch_radio_on(chip);
if (r < 0) {
dev_err(zd_chip_dev(chip),
"%s: failed to set radio on\n", __func__);
goto disable_int;
}
r = zd_chip_enable_rxtx(chip);
if (r < 0)
goto disable_radio;
r = zd_chip_enable_hwint(chip);
if (r < 0)
goto disable_rxtx;
housekeeping_enable(mac);
beacon_enable(mac);
set_bit(ZD_DEVICE_RUNNING, &mac->flags);
return 0;
disable_rxtx:
zd_chip_disable_rxtx(chip);
disable_radio:
zd_chip_switch_radio_off(chip);
disable_int:
zd_chip_disable_int(chip);
out:
return r;
}
void zd_op_stop(struct ieee80211_hw *hw)
{
struct zd_mac *mac = zd_hw_mac(hw);
struct zd_chip *chip = &mac->chip;
struct sk_buff *skb;
struct sk_buff_head *ack_wait_queue = &mac->ack_wait_queue;
clear_bit(ZD_DEVICE_RUNNING, &mac->flags);
/* The order here deliberately is a little different from the open()
* method, since we need to make sure there is no opportunity for RX
* frames to be processed by mac80211 after we have stopped it.
*/
zd_chip_disable_rxtx(chip);
beacon_disable(mac);
housekeeping_disable(mac);
flush_workqueue(zd_workqueue);
zd_chip_disable_hwint(chip);
zd_chip_switch_radio_off(chip);
zd_chip_disable_int(chip);
while ((skb = skb_dequeue(ack_wait_queue)))
dev_kfree_skb_any(skb);
}
int zd_restore_settings(struct zd_mac *mac)
{
struct sk_buff *beacon;
struct zd_mc_hash multicast_hash;
unsigned int short_preamble;
int r, beacon_interval, beacon_period;
u8 channel;
dev_dbg_f(zd_mac_dev(mac), "\n");
spin_lock_irq(&mac->lock);
multicast_hash = mac->multicast_hash;
short_preamble = mac->short_preamble;
beacon_interval = mac->beacon.interval;
beacon_period = mac->beacon.period;
channel = mac->channel;
spin_unlock_irq(&mac->lock);
r = set_mac_and_bssid(mac);
if (r < 0) {
dev_dbg_f(zd_mac_dev(mac), "set_mac_and_bssid failed, %d\n", r);
return r;
}
r = zd_chip_set_channel(&mac->chip, channel);
if (r < 0) {
dev_dbg_f(zd_mac_dev(mac), "zd_chip_set_channel failed, %d\n",
r);
return r;
}
set_rts_cts(mac, short_preamble);
r = zd_chip_set_multicast_hash(&mac->chip, &multicast_hash);
if (r < 0) {
dev_dbg_f(zd_mac_dev(mac),
"zd_chip_set_multicast_hash failed, %d\n", r);
return r;
}
if (mac->type == NL80211_IFTYPE_MESH_POINT ||
mac->type == NL80211_IFTYPE_ADHOC ||
mac->type == NL80211_IFTYPE_AP) {
if (mac->vif != NULL) {
beacon = ieee80211_beacon_get(mac->hw, mac->vif);
if (beacon)
zd_mac_config_beacon(mac->hw, beacon, false);
}
zd_set_beacon_interval(&mac->chip, beacon_interval,
beacon_period, mac->type);
spin_lock_irq(&mac->lock);
mac->beacon.last_update = jiffies;
spin_unlock_irq(&mac->lock);
}
return 0;
}
/**
* zd_mac_tx_status - reports tx status of a packet if required
* @hw - a &struct ieee80211_hw pointer
* @skb - a sk-buffer
* @flags: extra flags to set in the TX status info
* @ackssi: ACK signal strength
* @success - True for successful transmission of the frame
*
* This information calls ieee80211_tx_status_irqsafe() if required by the
* control information. It copies the control information into the status
* information.
*
* If no status information has been requested, the skb is freed.
*/
static void zd_mac_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb,
int ackssi, struct tx_status *tx_status)
{
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
int i;
int success = 1, retry = 1;
int first_idx;
const struct tx_retry_rate *retries;
ieee80211_tx_info_clear_status(info);
if (tx_status) {
success = !tx_status->failure;
retry = tx_status->retry + success;
}
if (success) {
/* success */
info->flags |= IEEE80211_TX_STAT_ACK;
} else {
/* failure */
info->flags &= ~IEEE80211_TX_STAT_ACK;
}
first_idx = info->status.rates[0].idx;
ZD_ASSERT(0<=first_idx && first_idx<ARRAY_SIZE(zd_retry_rates));
retries = &zd_retry_rates[first_idx];
ZD_ASSERT(1 <= retry && retry <= retries->count);
info->status.rates[0].idx = retries->rate[0];
info->status.rates[0].count = 1; // (retry > 1 ? 2 : 1);
for (i=1; i<IEEE80211_TX_MAX_RATES-1 && i<retry; i++) {
info->status.rates[i].idx = retries->rate[i];
info->status.rates[i].count = 1; // ((i==retry-1) && success ? 1:2);
}
for (; i<IEEE80211_TX_MAX_RATES && i<retry; i++) {
info->status.rates[i].idx = retries->rate[retry - 1];
info->status.rates[i].count = 1; // (success ? 1:2);
}
if (i<IEEE80211_TX_MAX_RATES)
info->status.rates[i].idx = -1; /* terminate */
info->status.ack_signal = zd_check_signal(hw, ackssi);
ieee80211_tx_status_irqsafe(hw, skb);
}
/**
* zd_mac_tx_failed - callback for failed frames
* @dev: the mac80211 wireless device
*
* This function is called if a frame couldn't be successfully
* transferred. The first frame from the tx queue, will be selected and
* reported as error to the upper layers.
*/
void zd_mac_tx_failed(struct urb *urb)
{
struct ieee80211_hw * hw = zd_usb_to_hw(urb->context);
struct zd_mac *mac = zd_hw_mac(hw);
struct sk_buff_head *q = &mac->ack_wait_queue;
struct sk_buff *skb;
struct tx_status *tx_status = (struct tx_status *)urb->transfer_buffer;
unsigned long flags;
int success = !tx_status->failure;
int retry = tx_status->retry + success;
int found = 0;
int i, position = 0;
q = &mac->ack_wait_queue;
spin_lock_irqsave(&q->lock, flags);
skb_queue_walk(q, skb) {
struct ieee80211_hdr *tx_hdr;
struct ieee80211_tx_info *info;
int first_idx, final_idx;
const struct tx_retry_rate *retries;
u8 final_rate;
position ++;
/* if the hardware reports a failure and we had a 802.11 ACK
* pending, then we skip the first skb when searching for a
* matching frame */
if (tx_status->failure && mac->ack_pending &&
skb_queue_is_first(q, skb)) {
continue;
}
tx_hdr = (struct ieee80211_hdr *)skb->data;
/* we skip all frames not matching the reported destination */
if (unlikely(memcmp(tx_hdr->addr1, tx_status->mac, ETH_ALEN))) {
continue;
}
/* we skip all frames not matching the reported final rate */
info = IEEE80211_SKB_CB(skb);
first_idx = info->status.rates[0].idx;
ZD_ASSERT(0<=first_idx && first_idx<ARRAY_SIZE(zd_retry_rates));
retries = &zd_retry_rates[first_idx];
if (retry <= 0 || retry > retries->count)
continue;
final_idx = retries->rate[retry - 1];
final_rate = zd_rates[final_idx].hw_value;
if (final_rate != tx_status->rate) {
continue;
}
found = 1;
break;
}
if (found) {
for (i=1; i<=position; i++) {
skb = __skb_dequeue(q);
zd_mac_tx_status(hw, skb,
mac->ack_pending ? mac->ack_signal : 0,
i == position ? tx_status : NULL);
mac->ack_pending = 0;
}
}
spin_unlock_irqrestore(&q->lock, flags);
}
/**
* zd_mac_tx_to_dev - callback for USB layer
* @skb: a &sk_buff pointer
* @error: error value, 0 if transmission successful
*
* Informs the MAC layer that the frame has successfully transferred to the
* device. If an ACK is required and the transfer to the device has been
* successful, the packets are put on the @ack_wait_queue with
* the control set removed.
*/
void zd_mac_tx_to_dev(struct sk_buff *skb, int error)
{
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_hw *hw = info->rate_driver_data[0];
struct zd_mac *mac = zd_hw_mac(hw);
ieee80211_tx_info_clear_status(info);
skb_pull(skb, sizeof(struct zd_ctrlset));
if (unlikely(error ||
(info->flags & IEEE80211_TX_CTL_NO_ACK))) {
/*
* FIXME : do we need to fill in anything ?
*/
ieee80211_tx_status_irqsafe(hw, skb);
} else {
struct sk_buff_head *q = &mac->ack_wait_queue;
skb_queue_tail(q, skb);
while (skb_queue_len(q) > ZD_MAC_MAX_ACK_WAITERS) {
zd_mac_tx_status(hw, skb_dequeue(q),
mac->ack_pending ? mac->ack_signal : 0,
NULL);
mac->ack_pending = 0;
}
}
}
static int zd_calc_tx_length_us(u8 *service, u8 zd_rate, u16 tx_length)
{
/* ZD_PURE_RATE() must be used to remove the modulation type flag of
* the zd-rate values.
*/
static const u8 rate_divisor[] = {
[ZD_PURE_RATE(ZD_CCK_RATE_1M)] = 1,
[ZD_PURE_RATE(ZD_CCK_RATE_2M)] = 2,
/* Bits must be doubled. */
[ZD_PURE_RATE(ZD_CCK_RATE_5_5M)] = 11,
[ZD_PURE_RATE(ZD_CCK_RATE_11M)] = 11,
[ZD_PURE_RATE(ZD_OFDM_RATE_6M)] = 6,
[ZD_PURE_RATE(ZD_OFDM_RATE_9M)] = 9,
[ZD_PURE_RATE(ZD_OFDM_RATE_12M)] = 12,
[ZD_PURE_RATE(ZD_OFDM_RATE_18M)] = 18,
[ZD_PURE_RATE(ZD_OFDM_RATE_24M)] = 24,
[ZD_PURE_RATE(ZD_OFDM_RATE_36M)] = 36,
[ZD_PURE_RATE(ZD_OFDM_RATE_48M)] = 48,
[ZD_PURE_RATE(ZD_OFDM_RATE_54M)] = 54,
};
u32 bits = (u32)tx_length * 8;
u32 divisor;
divisor = rate_divisor[ZD_PURE_RATE(zd_rate)];
if (divisor == 0)
return -EINVAL;
switch (zd_rate) {
case ZD_CCK_RATE_5_5M:
bits = (2*bits) + 10; /* round up to the next integer */
break;
case ZD_CCK_RATE_11M:
if (service) {
u32 t = bits % 11;
*service &= ~ZD_PLCP_SERVICE_LENGTH_EXTENSION;
if (0 < t && t <= 3) {
*service |= ZD_PLCP_SERVICE_LENGTH_EXTENSION;
}
}
bits += 10; /* round up to the next integer */
break;
}
return bits/divisor;
}
static void cs_set_control(struct zd_mac *mac, struct zd_ctrlset *cs,
struct ieee80211_hdr *header,
struct ieee80211_tx_info *info)
{
/*
* CONTROL TODO:
* - if backoff needed, enable bit 0
* - if burst (backoff not needed) disable bit 0
*/
cs->control = 0;
/* First fragment */
if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
cs->control |= ZD_CS_NEED_RANDOM_BACKOFF;
/* No ACK expected (multicast, etc.) */
if (info->flags & IEEE80211_TX_CTL_NO_ACK)
cs->control |= ZD_CS_NO_ACK;
/* PS-POLL */
if (ieee80211_is_pspoll(header->frame_control))
cs->control |= ZD_CS_PS_POLL_FRAME;
if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS)
cs->control |= ZD_CS_RTS;
if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT)
cs->control |= ZD_CS_SELF_CTS;
/* FIXME: Management frame? */
}
static bool zd_mac_match_cur_beacon(struct zd_mac *mac, struct sk_buff *beacon)
{
if (!mac->beacon.cur_beacon)
return false;
if (mac->beacon.cur_beacon->len != beacon->len)
return false;
return !memcmp(beacon->data, mac->beacon.cur_beacon->data, beacon->len);
}
static void zd_mac_free_cur_beacon_locked(struct zd_mac *mac)
{
ZD_ASSERT(mutex_is_locked(&mac->chip.mutex));
kfree_skb(mac->beacon.cur_beacon);
mac->beacon.cur_beacon = NULL;
}
static void zd_mac_free_cur_beacon(struct zd_mac *mac)
{
mutex_lock(&mac->chip.mutex);
zd_mac_free_cur_beacon_locked(mac);
mutex_unlock(&mac->chip.mutex);
}
static int zd_mac_config_beacon(struct ieee80211_hw *hw, struct sk_buff *beacon,
bool in_intr)
{
struct zd_mac *mac = zd_hw_mac(hw);
int r, ret, num_cmds, req_pos = 0;
u32 tmp, j = 0;
/* 4 more bytes for tail CRC */
u32 full_len = beacon->len + 4;
unsigned long end_jiffies, message_jiffies;
struct zd_ioreq32 *ioreqs;
mutex_lock(&mac->chip.mutex);
/* Check if hw already has this beacon. */
if (zd_mac_match_cur_beacon(mac, beacon)) {
r = 0;
goto out_nofree;
}
/* Alloc memory for full beacon write at once. */
num_cmds = 1 + zd_chip_is_zd1211b(&mac->chip) + full_len;
ioreqs = kmalloc(num_cmds * sizeof(struct zd_ioreq32), GFP_KERNEL);
if (!ioreqs) {
r = -ENOMEM;
goto out_nofree;
}
r = zd_iowrite32_locked(&mac->chip, 0, CR_BCN_FIFO_SEMAPHORE);
if (r < 0)
goto out;
r = zd_ioread32_locked(&mac->chip, &tmp, CR_BCN_FIFO_SEMAPHORE);
if (r < 0)
goto release_sema;
if (in_intr && tmp & 0x2) {
r = -EBUSY;
goto release_sema;
}
end_jiffies = jiffies + HZ / 2; /*~500ms*/
message_jiffies = jiffies + HZ / 10; /*~100ms*/
while (tmp & 0x2) {
r = zd_ioread32_locked(&mac->chip, &tmp, CR_BCN_FIFO_SEMAPHORE);
if (r < 0)
goto release_sema;
if (time_is_before_eq_jiffies(message_jiffies)) {
message_jiffies = jiffies + HZ / 10;
dev_err(zd_mac_dev(mac),
"CR_BCN_FIFO_SEMAPHORE not ready\n");
if (time_is_before_eq_jiffies(end_jiffies)) {
dev_err(zd_mac_dev(mac),
"Giving up beacon config.\n");
r = -ETIMEDOUT;
goto reset_device;
}
}
msleep(20);
}
ioreqs[req_pos].addr = CR_BCN_FIFO;
ioreqs[req_pos].value = full_len - 1;
req_pos++;
if (zd_chip_is_zd1211b(&mac->chip)) {
ioreqs[req_pos].addr = CR_BCN_LENGTH;
ioreqs[req_pos].value = full_len - 1;
req_pos++;
}
for (j = 0 ; j < beacon->len; j++) {
ioreqs[req_pos].addr = CR_BCN_FIFO;
ioreqs[req_pos].value = *((u8 *)(beacon->data + j));
req_pos++;
}
for (j = 0; j < 4; j++) {
ioreqs[req_pos].addr = CR_BCN_FIFO;
ioreqs[req_pos].value = 0x0;
req_pos++;
}
BUG_ON(req_pos != num_cmds);
r = zd_iowrite32a_locked(&mac->chip, ioreqs, num_cmds);
release_sema:
/*
* Try very hard to release device beacon semaphore, as otherwise
* device/driver can be left in unusable state.
*/
end_jiffies = jiffies + HZ / 2; /*~500ms*/
ret = zd_iowrite32_locked(&mac->chip, 1, CR_BCN_FIFO_SEMAPHORE);
while (ret < 0) {
if (in_intr || time_is_before_eq_jiffies(end_jiffies)) {
ret = -ETIMEDOUT;
break;
}
msleep(20);
ret = zd_iowrite32_locked(&mac->chip, 1, CR_BCN_FIFO_SEMAPHORE);
}
if (ret < 0)
dev_err(zd_mac_dev(mac), "Could not release "
"CR_BCN_FIFO_SEMAPHORE!\n");
if (r < 0 || ret < 0) {
if (r >= 0)
r = ret;
/* We don't know if beacon was written successfully or not,
* so clear current. */
zd_mac_free_cur_beacon_locked(mac);
goto out;
}
/* Beacon has now been written successfully, update current. */
zd_mac_free_cur_beacon_locked(mac);
mac->beacon.cur_beacon = beacon;
beacon = NULL;
/* 802.11b/g 2.4G CCK 1Mb
* 802.11a, not yet implemented, uses different values (see GPL vendor
* driver)
*/
r = zd_iowrite32_locked(&mac->chip, 0x00000400 | (full_len << 19),
CR_BCN_PLCP_CFG);
out:
kfree(ioreqs);
out_nofree:
kfree_skb(beacon);
mutex_unlock(&mac->chip.mutex);
return r;
reset_device:
zd_mac_free_cur_beacon_locked(mac);
kfree_skb(beacon);
mutex_unlock(&mac->chip.mutex);
kfree(ioreqs);
/* semaphore stuck, reset device to avoid fw freeze later */
dev_warn(zd_mac_dev(mac), "CR_BCN_FIFO_SEMAPHORE stuck, "
"resetting device...");
usb_queue_reset_device(mac->chip.usb.intf);
return r;
}
static int fill_ctrlset(struct zd_mac *mac,
struct sk_buff *skb)
{
int r;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
unsigned int frag_len = skb->len + FCS_LEN;
unsigned int packet_length;
struct ieee80211_rate *txrate;
struct zd_ctrlset *cs = (struct zd_ctrlset *)
skb_push(skb, sizeof(struct zd_ctrlset));
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
ZD_ASSERT(frag_len <= 0xffff);
/*
* Firmware computes the duration itself (for all frames except PSPoll)
* and needs the field set to 0 at input, otherwise firmware messes up
* duration_id and sets bits 14 and 15 on.
*/
if (!ieee80211_is_pspoll(hdr->frame_control))
hdr->duration_id = 0;
txrate = ieee80211_get_tx_rate(mac->hw, info);
cs->modulation = txrate->hw_value;
if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
cs->modulation = txrate->hw_value_short;
cs->tx_length = cpu_to_le16(frag_len);
cs_set_control(mac, cs, hdr, info);
packet_length = frag_len + sizeof(struct zd_ctrlset) + 10;
ZD_ASSERT(packet_length <= 0xffff);
/* ZD1211B: Computing the length difference this way, gives us
* flexibility to compute the packet length.
*/
cs->packet_length = cpu_to_le16(zd_chip_is_zd1211b(&mac->chip) ?
packet_length - frag_len : packet_length);
/*
* CURRENT LENGTH:
* - transmit frame length in microseconds
* - seems to be derived from frame length
* - see Cal_Us_Service() in zdinlinef.h
* - if macp->bTxBurstEnable is enabled, then multiply by 4
* - bTxBurstEnable is never set in the vendor driver
*
* SERVICE:
* - "for PLCP configuration"
* - always 0 except in some situations at 802.11b 11M
* - see line 53 of zdinlinef.h
*/
cs->service = 0;
r = zd_calc_tx_length_us(&cs->service, ZD_RATE(cs->modulation),
le16_to_cpu(cs->tx_length));
if (r < 0)
return r;
cs->current_length = cpu_to_le16(r);
cs->next_frame_length = 0;
return 0;
}
/**
* zd_op_tx - transmits a network frame to the device
*
* @dev: mac80211 hardware device
* @skb: socket buffer
* @control: the control structure
*
* This function transmit an IEEE 802.11 network frame to the device. The
* control block of the skbuff will be initialized. If necessary the incoming
* mac80211 queues will be stopped.
*/
static void zd_op_tx(struct ieee80211_hw *hw,
struct ieee80211_tx_control *control,
struct sk_buff *skb)
{
struct zd_mac *mac = zd_hw_mac(hw);
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
int r;
r = fill_ctrlset(mac, skb);
if (r)
goto fail;
info->rate_driver_data[0] = hw;
r = zd_usb_tx(&mac->chip.usb, skb);
if (r)
goto fail;
return;
fail:
dev_kfree_skb(skb);
}
/**
* filter_ack - filters incoming packets for acknowledgements
* @dev: the mac80211 device
* @rx_hdr: received header
* @stats: the status for the received packet
*
* This functions looks for ACK packets and tries to match them with the
* frames in the tx queue. If a match is found the frame will be dequeued and
* the upper layers is informed about the successful transmission. If
* mac80211 queues have been stopped and the number of frames still to be
* transmitted is low the queues will be opened again.
*
* Returns 1 if the frame was an ACK, 0 if it was ignored.
*/
static int filter_ack(struct ieee80211_hw *hw, struct ieee80211_hdr *rx_hdr,
struct ieee80211_rx_status *stats)
{
struct zd_mac *mac = zd_hw_mac(hw);
struct sk_buff *skb;
struct sk_buff_head *q;
unsigned long flags;
int found = 0;
int i, position = 0;
if (!ieee80211_is_ack(rx_hdr->frame_control))
return 0;
q = &mac->ack_wait_queue;
spin_lock_irqsave(&q->lock, flags);
skb_queue_walk(q, skb) {
struct ieee80211_hdr *tx_hdr;
position ++;
if (mac->ack_pending && skb_queue_is_first(q, skb))
continue;
tx_hdr = (struct ieee80211_hdr *)skb->data;
if (likely(!memcmp(tx_hdr->addr2, rx_hdr->addr1, ETH_ALEN)))
{
found = 1;
break;
}
}
if (found) {
for (i=1; i<position; i++) {
skb = __skb_dequeue(q);
zd_mac_tx_status(hw, skb,
mac->ack_pending ? mac->ack_signal : 0,
NULL);
mac->ack_pending = 0;
}
mac->ack_pending = 1;
mac->ack_signal = stats->signal;
/* Prevent pending tx-packet on AP-mode */
if (mac->type == NL80211_IFTYPE_AP) {
skb = __skb_dequeue(q);
zd_mac_tx_status(hw, skb, mac->ack_signal, NULL);
mac->ack_pending = 0;
}
}
spin_unlock_irqrestore(&q->lock, flags);
return 1;
}
int zd_mac_rx(struct ieee80211_hw *hw, const u8 *buffer, unsigned int length)
{
struct zd_mac *mac = zd_hw_mac(hw);
struct ieee80211_rx_status stats;
const struct rx_status *status;
struct sk_buff *skb;
int bad_frame = 0;
__le16 fc;
int need_padding;
int i;
u8 rate;
if (length < ZD_PLCP_HEADER_SIZE + 10 /* IEEE80211_1ADDR_LEN */ +
FCS_LEN + sizeof(struct rx_status))
return -EINVAL;
memset(&stats, 0, sizeof(stats));
/* Note about pass_failed_fcs and pass_ctrl access below:
* mac locking intentionally omitted here, as this is the only unlocked
* reader and the only writer is configure_filter. Plus, if there were
* any races accessing these variables, it wouldn't really matter.
* If mac80211 ever provides a way for us to access filter flags
* from outside configure_filter, we could improve on this. Also, this
* situation may change once we implement some kind of DMA-into-skb
* RX path. */
/* Caller has to ensure that length >= sizeof(struct rx_status). */
status = (struct rx_status *)
(buffer + (length - sizeof(struct rx_status)));
if (status->frame_status & ZD_RX_ERROR) {
if (mac->pass_failed_fcs &&
(status->frame_status & ZD_RX_CRC32_ERROR)) {
stats.flag |= RX_FLAG_FAILED_FCS_CRC;
bad_frame = 1;
} else {
return -EINVAL;
}
}
stats.freq = zd_channels[_zd_chip_get_channel(&mac->chip) - 1].center_freq;
stats.band = IEEE80211_BAND_2GHZ;
stats.signal = zd_check_signal(hw, status->signal_strength);
rate = zd_rx_rate(buffer, status);
/* todo: return index in the big switches in zd_rx_rate instead */
for (i = 0; i < mac->band.n_bitrates; i++)
if (rate == mac->band.bitrates[i].hw_value)
stats.rate_idx = i;
length -= ZD_PLCP_HEADER_SIZE + sizeof(struct rx_status);
buffer += ZD_PLCP_HEADER_SIZE;
/* Except for bad frames, filter each frame to see if it is an ACK, in
* which case our internal TX tracking is updated. Normally we then
* bail here as there's no need to pass ACKs on up to the stack, but
* there is also the case where the stack has requested us to pass
* control frames on up (pass_ctrl) which we must consider. */
if (!bad_frame &&
filter_ack(hw, (struct ieee80211_hdr *)buffer, &stats)
&& !mac->pass_ctrl)
return 0;
fc = get_unaligned((__le16*)buffer);
need_padding = ieee80211_is_data_qos(fc) ^ ieee80211_has_a4(fc);
skb = dev_alloc_skb(length + (need_padding ? 2 : 0));
if (skb == NULL)
return -ENOMEM;
if (need_padding) {
/* Make sure the payload data is 4 byte aligned. */
skb_reserve(skb, 2);
}
/* FIXME : could we avoid this big memcpy ? */
memcpy(skb_put(skb, length), buffer, length);
memcpy(IEEE80211_SKB_RXCB(skb), &stats, sizeof(stats));
ieee80211_rx_irqsafe(hw, skb);
return 0;
}
static int zd_op_add_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct zd_mac *mac = zd_hw_mac(hw);
/* using NL80211_IFTYPE_UNSPECIFIED to indicate no mode selected */
if (mac->type != NL80211_IFTYPE_UNSPECIFIED)
return -EOPNOTSUPP;
switch (vif->type) {
case NL80211_IFTYPE_MONITOR:
case NL80211_IFTYPE_MESH_POINT:
case NL80211_IFTYPE_STATION:
case NL80211_IFTYPE_ADHOC:
case NL80211_IFTYPE_AP:
mac->type = vif->type;
break;
default:
return -EOPNOTSUPP;
}
mac->vif = vif;
return set_mac_and_bssid(mac);
}
static void zd_op_remove_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct zd_mac *mac = zd_hw_mac(hw);
mac->type = NL80211_IFTYPE_UNSPECIFIED;
mac->vif = NULL;
zd_set_beacon_interval(&mac->chip, 0, 0, NL80211_IFTYPE_UNSPECIFIED);
zd_write_mac_addr(&mac->chip, NULL);
zd_mac_free_cur_beacon(mac);
}
static int zd_op_config(struct ieee80211_hw *hw, u32 changed)
{
struct zd_mac *mac = zd_hw_mac(hw);
struct ieee80211_conf *conf = &hw->conf;
spin_lock_irq(&mac->lock);
mac->channel = conf->chandef.chan->hw_value;
spin_unlock_irq(&mac->lock);
return zd_chip_set_channel(&mac->chip, conf->chandef.chan->hw_value);
}
static void zd_beacon_done(struct zd_mac *mac)
{
struct sk_buff *skb, *beacon;
if (!test_bit(ZD_DEVICE_RUNNING, &mac->flags))
return;
if (!mac->vif || mac->vif->type != NL80211_IFTYPE_AP)
return;
/*
* Send out buffered broad- and multicast frames.
*/
while (!ieee80211_queue_stopped(mac->hw, 0)) {
skb = ieee80211_get_buffered_bc(mac->hw, mac->vif);
if (!skb)
break;
zd_op_tx(mac->hw, NULL, skb);
}
/*
* Fetch next beacon so that tim_count is updated.
*/
beacon = ieee80211_beacon_get(mac->hw, mac->vif);
if (beacon)
zd_mac_config_beacon(mac->hw, beacon, true);
spin_lock_irq(&mac->lock);
mac->beacon.last_update = jiffies;
spin_unlock_irq(&mac->lock);
}
static void zd_process_intr(struct work_struct *work)
{
u16 int_status;
unsigned long flags;
struct zd_mac *mac = container_of(work, struct zd_mac, process_intr);
spin_lock_irqsave(&mac->lock, flags);
int_status = le16_to_cpu(*(__le16 *)(mac->intr_buffer + 4));
spin_unlock_irqrestore(&mac->lock, flags);
if (int_status & INT_CFG_NEXT_BCN) {
/*dev_dbg_f_limit(zd_mac_dev(mac), "INT_CFG_NEXT_BCN\n");*/
zd_beacon_done(mac);
} else {
dev_dbg_f(zd_mac_dev(mac), "Unsupported interrupt\n");
}
zd_chip_enable_hwint(&mac->chip);
}
static u64 zd_op_prepare_multicast(struct ieee80211_hw *hw,
struct netdev_hw_addr_list *mc_list)
{
struct zd_mac *mac = zd_hw_mac(hw);
struct zd_mc_hash hash;
struct netdev_hw_addr *ha;
zd_mc_clear(&hash);
netdev_hw_addr_list_for_each(ha, mc_list) {
dev_dbg_f(zd_mac_dev(mac), "mc addr %pM\n", ha->addr);
zd_mc_add_addr(&hash, ha->addr);
}
return hash.low | ((u64)hash.high << 32);
}
#define SUPPORTED_FIF_FLAGS \
(FIF_PROMISC_IN_BSS | FIF_ALLMULTI | FIF_FCSFAIL | FIF_CONTROL | \
FIF_OTHER_BSS | FIF_BCN_PRBRESP_PROMISC)
static void zd_op_configure_filter(struct ieee80211_hw *hw,
unsigned int changed_flags,
unsigned int *new_flags,
u64 multicast)
{
struct zd_mc_hash hash = {
.low = multicast,
.high = multicast >> 32,
};
struct zd_mac *mac = zd_hw_mac(hw);
unsigned long flags;
int r;
/* Only deal with supported flags */
changed_flags &= SUPPORTED_FIF_FLAGS;
*new_flags &= SUPPORTED_FIF_FLAGS;
/*
* If multicast parameter (as returned by zd_op_prepare_multicast)
* has changed, no bit in changed_flags is set. To handle this
* situation, we do not return if changed_flags is 0. If we do so,
* we will have some issue with IPv6 which uses multicast for link
* layer address resolution.
*/
if (*new_flags & (FIF_PROMISC_IN_BSS | FIF_ALLMULTI))
zd_mc_add_all(&hash);
spin_lock_irqsave(&mac->lock, flags);
mac->pass_failed_fcs = !!(*new_flags & FIF_FCSFAIL);
mac->pass_ctrl = !!(*new_flags & FIF_CONTROL);
mac->multicast_hash = hash;
spin_unlock_irqrestore(&mac->lock, flags);
zd_chip_set_multicast_hash(&mac->chip, &hash);
if (changed_flags & FIF_CONTROL) {
r = set_rx_filter(mac);
if (r)
dev_err(zd_mac_dev(mac), "set_rx_filter error %d\n", r);
}
/* no handling required for FIF_OTHER_BSS as we don't currently
* do BSSID filtering */
/* FIXME: in future it would be nice to enable the probe response
* filter (so that the driver doesn't see them) until
* FIF_BCN_PRBRESP_PROMISC is set. however due to atomicity here, we'd
* have to schedule work to enable prbresp reception, which might
* happen too late. For now we'll just listen and forward them all the
* time. */
}
static void set_rts_cts(struct zd_mac *mac, unsigned int short_preamble)
{
mutex_lock(&mac->chip.mutex);
zd_chip_set_rts_cts_rate_locked(&mac->chip, short_preamble);
mutex_unlock(&mac->chip.mutex);
}
static void zd_op_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf,
u32 changes)
{
struct zd_mac *mac = zd_hw_mac(hw);
int associated;
dev_dbg_f(zd_mac_dev(mac), "changes: %x\n", changes);
if (mac->type == NL80211_IFTYPE_MESH_POINT ||
mac->type == NL80211_IFTYPE_ADHOC ||
mac->type == NL80211_IFTYPE_AP) {
associated = true;
if (changes & BSS_CHANGED_BEACON) {
struct sk_buff *beacon = ieee80211_beacon_get(hw, vif);
if (beacon) {
zd_chip_disable_hwint(&mac->chip);
zd_mac_config_beacon(hw, beacon, false);
zd_chip_enable_hwint(&mac->chip);
}
}
if (changes & BSS_CHANGED_BEACON_ENABLED) {
u16 interval = 0;
u8 period = 0;
if (bss_conf->enable_beacon) {
period = bss_conf->dtim_period;
interval = bss_conf->beacon_int;
}
spin_lock_irq(&mac->lock);
mac->beacon.period = period;
mac->beacon.interval = interval;
mac->beacon.last_update = jiffies;
spin_unlock_irq(&mac->lock);
zd_set_beacon_interval(&mac->chip, interval, period,
mac->type);
}
} else
associated = is_valid_ether_addr(bss_conf->bssid);
spin_lock_irq(&mac->lock);
mac->associated = associated;
spin_unlock_irq(&mac->lock);
/* TODO: do hardware bssid filtering */
if (changes & BSS_CHANGED_ERP_PREAMBLE) {
spin_lock_irq(&mac->lock);
mac->short_preamble = bss_conf->use_short_preamble;
spin_unlock_irq(&mac->lock);
set_rts_cts(mac, bss_conf->use_short_preamble);
}
}
static u64 zd_op_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
{
struct zd_mac *mac = zd_hw_mac(hw);
return zd_chip_get_tsf(&mac->chip);
}
static const struct ieee80211_ops zd_ops = {
.tx = zd_op_tx,
.start = zd_op_start,
.stop = zd_op_stop,
.add_interface = zd_op_add_interface,
.remove_interface = zd_op_remove_interface,
.config = zd_op_config,
.prepare_multicast = zd_op_prepare_multicast,
.configure_filter = zd_op_configure_filter,
.bss_info_changed = zd_op_bss_info_changed,
.get_tsf = zd_op_get_tsf,
};
struct ieee80211_hw *zd_mac_alloc_hw(struct usb_interface *intf)
{
struct zd_mac *mac;
struct ieee80211_hw *hw;
hw = ieee80211_alloc_hw(sizeof(struct zd_mac), &zd_ops);
if (!hw) {
dev_dbg_f(&intf->dev, "out of memory\n");
return NULL;
}
mac = zd_hw_mac(hw);
memset(mac, 0, sizeof(*mac));
spin_lock_init(&mac->lock);
mac->hw = hw;
mac->type = NL80211_IFTYPE_UNSPECIFIED;
memcpy(mac->channels, zd_channels, sizeof(zd_channels));
memcpy(mac->rates, zd_rates, sizeof(zd_rates));
mac->band.n_bitrates = ARRAY_SIZE(zd_rates);
mac->band.bitrates = mac->rates;
mac->band.n_channels = ARRAY_SIZE(zd_channels);
mac->band.channels = mac->channels;
hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &mac->band;
hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
IEEE80211_HW_SIGNAL_UNSPEC |
IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
IEEE80211_HW_MFP_CAPABLE;
hw->wiphy->interface_modes =
BIT(NL80211_IFTYPE_MESH_POINT) |
BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_ADHOC) |
BIT(NL80211_IFTYPE_AP);
hw->max_signal = 100;
hw->queues = 1;
hw->extra_tx_headroom = sizeof(struct zd_ctrlset);
/*
* Tell mac80211 that we support multi rate retries
*/
hw->max_rates = IEEE80211_TX_MAX_RATES;
hw->max_rate_tries = 18; /* 9 rates * 2 retries/rate */
skb_queue_head_init(&mac->ack_wait_queue);
mac->ack_pending = 0;
zd_chip_init(&mac->chip, hw, intf);
housekeeping_init(mac);
beacon_init(mac);
INIT_WORK(&mac->process_intr, zd_process_intr);
SET_IEEE80211_DEV(hw, &intf->dev);
return hw;
}
#define BEACON_WATCHDOG_DELAY round_jiffies_relative(HZ)
static void beacon_watchdog_handler(struct work_struct *work)
{
struct zd_mac *mac =
container_of(work, struct zd_mac, beacon.watchdog_work.work);
struct sk_buff *beacon;
unsigned long timeout;
int interval, period;
if (!test_bit(ZD_DEVICE_RUNNING, &mac->flags))
goto rearm;
if (mac->type != NL80211_IFTYPE_AP || !mac->vif)
goto rearm;
spin_lock_irq(&mac->lock);
interval = mac->beacon.interval;
period = mac->beacon.period;
timeout = mac->beacon.last_update +
msecs_to_jiffies(interval * 1024 / 1000) * 3;
spin_unlock_irq(&mac->lock);
if (interval > 0 && time_is_before_jiffies(timeout)) {
dev_dbg_f(zd_mac_dev(mac), "beacon interrupt stalled, "
"restarting. "
"(interval: %d, dtim: %d)\n",
interval, period);
zd_chip_disable_hwint(&mac->chip);
beacon = ieee80211_beacon_get(mac->hw, mac->vif);
if (beacon) {
zd_mac_free_cur_beacon(mac);
zd_mac_config_beacon(mac->hw, beacon, false);
}
zd_set_beacon_interval(&mac->chip, interval, period, mac->type);
zd_chip_enable_hwint(&mac->chip);
spin_lock_irq(&mac->lock);
mac->beacon.last_update = jiffies;
spin_unlock_irq(&mac->lock);
}
rearm:
queue_delayed_work(zd_workqueue, &mac->beacon.watchdog_work,
BEACON_WATCHDOG_DELAY);
}
static void beacon_init(struct zd_mac *mac)
{
INIT_DELAYED_WORK(&mac->beacon.watchdog_work, beacon_watchdog_handler);
}
static void beacon_enable(struct zd_mac *mac)
{
dev_dbg_f(zd_mac_dev(mac), "\n");
mac->beacon.last_update = jiffies;
queue_delayed_work(zd_workqueue, &mac->beacon.watchdog_work,
BEACON_WATCHDOG_DELAY);
}
static void beacon_disable(struct zd_mac *mac)
{
dev_dbg_f(zd_mac_dev(mac), "\n");
cancel_delayed_work_sync(&mac->beacon.watchdog_work);
zd_mac_free_cur_beacon(mac);
}
#define LINK_LED_WORK_DELAY HZ
static void link_led_handler(struct work_struct *work)
{
struct zd_mac *mac =
container_of(work, struct zd_mac, housekeeping.link_led_work.work);
struct zd_chip *chip = &mac->chip;
int is_associated;
int r;
if (!test_bit(ZD_DEVICE_RUNNING, &mac->flags))
goto requeue;
spin_lock_irq(&mac->lock);
is_associated = mac->associated;
spin_unlock_irq(&mac->lock);
r = zd_chip_control_leds(chip,
is_associated ? ZD_LED_ASSOCIATED : ZD_LED_SCANNING);
if (r)
dev_dbg_f(zd_mac_dev(mac), "zd_chip_control_leds error %d\n", r);
requeue:
queue_delayed_work(zd_workqueue, &mac->housekeeping.link_led_work,
LINK_LED_WORK_DELAY);
}
static void housekeeping_init(struct zd_mac *mac)
{
INIT_DELAYED_WORK(&mac->housekeeping.link_led_work, link_led_handler);
}
static void housekeeping_enable(struct zd_mac *mac)
{
dev_dbg_f(zd_mac_dev(mac), "\n");
queue_delayed_work(zd_workqueue, &mac->housekeeping.link_led_work,
0);
}
static void housekeeping_disable(struct zd_mac *mac)
{
dev_dbg_f(zd_mac_dev(mac), "\n");
cancel_delayed_work_sync(&mac->housekeeping.link_led_work);
zd_chip_control_leds(&mac->chip, ZD_LED_OFF);
}
| gpl-2.0 |
aquarism5-dev/android_kernel_bq_piccolo | drivers/staging/vt6655/power.c | 2319 | 9959 | /*
* Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
* All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*
*
* File: power.c
*
* Purpose: Handles 802.11 power management functions
*
* Author: Lyndon Chen
*
* Date: July 17, 2002
*
* Functions:
* PSvEnablePowerSaving - Enable Power Saving Mode
* PSvDiasblePowerSaving - Disable Power Saving Mode
* PSbConsiderPowerDown - Decide if we can Power Down
* PSvSendPSPOLL - Send PS-POLL packet
* PSbSendNullPacket - Send Null packet
* PSbIsNextTBTTWakeUp - Decide if we need to wake up at next Beacon
*
* Revision History:
*
*/
#include "ttype.h"
#include "mac.h"
#include "device.h"
#include "wmgr.h"
#include "power.h"
#include "wcmd.h"
#include "rxtx.h"
#include "card.h"
/*--------------------- Static Definitions -------------------------*/
/*--------------------- Static Classes ----------------------------*/
/*--------------------- Static Variables --------------------------*/
static int msglevel = MSG_LEVEL_INFO;
/*--------------------- Static Functions --------------------------*/
/*--------------------- Export Variables --------------------------*/
/*--------------------- Export Functions --------------------------*/
/*+
*
* Routine Description:
* Enable hw power saving functions
*
* Return Value:
* None.
*
-*/
void
PSvEnablePowerSaving(
void *hDeviceContext,
unsigned short wListenInterval
)
{
PSDevice pDevice = (PSDevice)hDeviceContext;
PSMgmtObject pMgmt = pDevice->pMgmt;
unsigned short wAID = pMgmt->wCurrAID | BIT14 | BIT15;
// set period of power up before TBTT
VNSvOutPortW(pDevice->PortOffset + MAC_REG_PWBT, C_PWBT);
if (pDevice->eOPMode != OP_MODE_ADHOC) {
// set AID
VNSvOutPortW(pDevice->PortOffset + MAC_REG_AIDATIM, wAID);
} else {
// set ATIM Window
MACvWriteATIMW(pDevice->PortOffset, pMgmt->wCurrATIMWindow);
}
// Set AutoSleep
MACvRegBitsOn(pDevice->PortOffset, MAC_REG_PSCFG, PSCFG_AUTOSLEEP);
// Set HWUTSF
MACvRegBitsOn(pDevice->PortOffset, MAC_REG_TFTCTL, TFTCTL_HWUTSF);
if (wListenInterval >= 2) {
// clear always listen beacon
MACvRegBitsOff(pDevice->PortOffset, MAC_REG_PSCTL, PSCTL_ALBCN);
//pDevice->wCFG &= ~CFG_ALB;
// first time set listen next beacon
MACvRegBitsOn(pDevice->PortOffset, MAC_REG_PSCTL, PSCTL_LNBCN);
pMgmt->wCountToWakeUp = wListenInterval;
} else {
// always listen beacon
MACvRegBitsOn(pDevice->PortOffset, MAC_REG_PSCTL, PSCTL_ALBCN);
//pDevice->wCFG |= CFG_ALB;
pMgmt->wCountToWakeUp = 0;
}
// enable power saving hw function
MACvRegBitsOn(pDevice->PortOffset, MAC_REG_PSCTL, PSCTL_PSEN);
pDevice->bEnablePSMode = true;
if (pDevice->eOPMode == OP_MODE_ADHOC) {
// bMgrPrepareBeaconToSend((void *)pDevice, pMgmt);
}
// We don't send null pkt in ad hoc mode since beacon will handle this.
else if (pDevice->eOPMode == OP_MODE_INFRASTRUCTURE) {
PSbSendNullPacket(pDevice);
}
pDevice->bPWBitOn = true;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "PS:Power Saving Mode Enable... \n");
return;
}
/*+
*
* Routine Description:
* Disable hw power saving functions
*
* Return Value:
* None.
*
-*/
void
PSvDisablePowerSaving(
void *hDeviceContext
)
{
PSDevice pDevice = (PSDevice)hDeviceContext;
// PSMgmtObject pMgmt = pDevice->pMgmt;
// disable power saving hw function
MACbPSWakeup(pDevice->PortOffset);
//clear AutoSleep
MACvRegBitsOff(pDevice->PortOffset, MAC_REG_PSCFG, PSCFG_AUTOSLEEP);
//clear HWUTSF
MACvRegBitsOff(pDevice->PortOffset, MAC_REG_TFTCTL, TFTCTL_HWUTSF);
// set always listen beacon
MACvRegBitsOn(pDevice->PortOffset, MAC_REG_PSCTL, PSCTL_ALBCN);
pDevice->bEnablePSMode = false;
if (pDevice->eOPMode == OP_MODE_INFRASTRUCTURE) {
PSbSendNullPacket(pDevice);
}
pDevice->bPWBitOn = false;
return;
}
/*+
*
* Routine Description:
* Consider to power down when no more packets to tx or rx.
*
* Return Value:
* true, if power down success
* false, if fail
-*/
bool
PSbConsiderPowerDown(
void *hDeviceContext,
bool bCheckRxDMA,
bool bCheckCountToWakeUp
)
{
PSDevice pDevice = (PSDevice)hDeviceContext;
PSMgmtObject pMgmt = pDevice->pMgmt;
unsigned int uIdx;
// check if already in Doze mode
if (MACbIsRegBitsOn(pDevice->PortOffset, MAC_REG_PSCTL, PSCTL_PS))
return true;
if (pMgmt->eCurrMode != WMAC_MODE_IBSS_STA) {
// check if in TIM wake period
if (pMgmt->bInTIMWake)
return false;
}
// check scan state
if (pDevice->bCmdRunning)
return false;
// Force PSEN on
MACvRegBitsOn(pDevice->PortOffset, MAC_REG_PSCTL, PSCTL_PSEN);
// check if all TD are empty,
for (uIdx = 0; uIdx < TYPE_MAXTD; uIdx++) {
if (pDevice->iTDUsed[uIdx] != 0)
return false;
}
// check if rx isr is clear
if (bCheckRxDMA &&
((pDevice->dwIsr & ISR_RXDMA0) != 0) &&
((pDevice->dwIsr & ISR_RXDMA1) != 0)) {
return false;
}
if (pMgmt->eCurrMode != WMAC_MODE_IBSS_STA) {
if (bCheckCountToWakeUp &&
(pMgmt->wCountToWakeUp == 0 || pMgmt->wCountToWakeUp == 1)) {
return false;
}
}
// no Tx, no Rx isr, now go to Doze
MACvRegBitsOn(pDevice->PortOffset, MAC_REG_PSCTL, PSCTL_GO2DOZE);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Go to Doze ZZZZZZZZZZZZZZZ\n");
return true;
}
/*+
*
* Routine Description:
* Send PS-POLL packet
*
* Return Value:
* None.
*
-*/
void
PSvSendPSPOLL(
void *hDeviceContext
)
{
PSDevice pDevice = (PSDevice)hDeviceContext;
PSMgmtObject pMgmt = pDevice->pMgmt;
PSTxMgmtPacket pTxPacket = NULL;
memset(pMgmt->pbyPSPacketPool, 0, sizeof(STxMgmtPacket) + WLAN_HDR_ADDR2_LEN);
pTxPacket = (PSTxMgmtPacket)pMgmt->pbyPSPacketPool;
pTxPacket->p80211Header = (PUWLAN_80211HDR)((unsigned char *)pTxPacket + sizeof(STxMgmtPacket));
pTxPacket->p80211Header->sA2.wFrameCtl = cpu_to_le16(
(
WLAN_SET_FC_FTYPE(WLAN_TYPE_CTL) |
WLAN_SET_FC_FSTYPE(WLAN_FSTYPE_PSPOLL) |
WLAN_SET_FC_PWRMGT(0)
));
pTxPacket->p80211Header->sA2.wDurationID = pMgmt->wCurrAID | BIT14 | BIT15;
memcpy(pTxPacket->p80211Header->sA2.abyAddr1, pMgmt->abyCurrBSSID, WLAN_ADDR_LEN);
memcpy(pTxPacket->p80211Header->sA2.abyAddr2, pMgmt->abyMACAddr, WLAN_ADDR_LEN);
pTxPacket->cbMPDULen = WLAN_HDR_ADDR2_LEN;
pTxPacket->cbPayloadLen = 0;
// send the frame
if (csMgmt_xmit(pDevice, pTxPacket) != CMD_STATUS_PENDING) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Send PS-Poll packet failed..\n");
} else {
// DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Send PS-Poll packet success..\n");
};
return;
}
/*+
*
* Routine Description:
* Send NULL packet to AP for notification power state of STA
*
* Return Value:
* None.
*
-*/
bool
PSbSendNullPacket(
void *hDeviceContext
)
{
PSDevice pDevice = (PSDevice)hDeviceContext;
PSTxMgmtPacket pTxPacket = NULL;
PSMgmtObject pMgmt = pDevice->pMgmt;
unsigned int uIdx;
if (pDevice->bLinkPass == false) {
return false;
}
#ifdef TxInSleep
if ((pDevice->bEnablePSMode == false) &&
(pDevice->fTxDataInSleep == false)) {
return false;
}
#else
if (pDevice->bEnablePSMode == false) {
return false;
}
#endif
if (pDevice->bEnablePSMode) {
for (uIdx = 0; uIdx < TYPE_MAXTD; uIdx++) {
if (pDevice->iTDUsed[uIdx] != 0)
return false;
}
}
memset(pMgmt->pbyPSPacketPool, 0, sizeof(STxMgmtPacket) + WLAN_NULLDATA_FR_MAXLEN);
pTxPacket = (PSTxMgmtPacket)pMgmt->pbyPSPacketPool;
pTxPacket->p80211Header = (PUWLAN_80211HDR)((unsigned char *)pTxPacket + sizeof(STxMgmtPacket));
if (pDevice->bEnablePSMode) {
pTxPacket->p80211Header->sA3.wFrameCtl = cpu_to_le16(
(
WLAN_SET_FC_FTYPE(WLAN_TYPE_DATA) |
WLAN_SET_FC_FSTYPE(WLAN_FSTYPE_NULL) |
WLAN_SET_FC_PWRMGT(1)
));
} else {
pTxPacket->p80211Header->sA3.wFrameCtl = cpu_to_le16(
(
WLAN_SET_FC_FTYPE(WLAN_TYPE_DATA) |
WLAN_SET_FC_FSTYPE(WLAN_FSTYPE_NULL) |
WLAN_SET_FC_PWRMGT(0)
));
}
if (pMgmt->eCurrMode != WMAC_MODE_IBSS_STA) {
pTxPacket->p80211Header->sA3.wFrameCtl |= cpu_to_le16((unsigned short)WLAN_SET_FC_TODS(1));
}
memcpy(pTxPacket->p80211Header->sA3.abyAddr1, pMgmt->abyCurrBSSID, WLAN_ADDR_LEN);
memcpy(pTxPacket->p80211Header->sA3.abyAddr2, pMgmt->abyMACAddr, WLAN_ADDR_LEN);
memcpy(pTxPacket->p80211Header->sA3.abyAddr3, pMgmt->abyCurrBSSID, WLAN_BSSID_LEN);
pTxPacket->cbMPDULen = WLAN_HDR_ADDR3_LEN;
pTxPacket->cbPayloadLen = 0;
// send the frame
if (csMgmt_xmit(pDevice, pTxPacket) != CMD_STATUS_PENDING) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Send Null Packet failed !\n");
return false;
} else {
// DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Send Null Packet success....\n");
}
return true;
}
/*+
*
* Routine Description:
* Check if Next TBTT must wake up
*
* Return Value:
* None.
*
-*/
bool
PSbIsNextTBTTWakeUp(
void *hDeviceContext
)
{
PSDevice pDevice = (PSDevice)hDeviceContext;
PSMgmtObject pMgmt = pDevice->pMgmt;
bool bWakeUp = false;
if (pMgmt->wListenInterval >= 2) {
if (pMgmt->wCountToWakeUp == 0) {
pMgmt->wCountToWakeUp = pMgmt->wListenInterval;
}
pMgmt->wCountToWakeUp--;
if (pMgmt->wCountToWakeUp == 1) {
// Turn on wake up to listen next beacon
MACvRegBitsOn(pDevice->PortOffset, MAC_REG_PSCTL, PSCTL_LNBCN);
bWakeUp = true;
}
}
return bWakeUp;
}
| gpl-2.0 |
sayeed99/flareM_old | drivers/media/radio/radio-cadet.c | 2575 | 16911 | /* radio-cadet.c - A video4linux driver for the ADS Cadet AM/FM Radio Card
*
* by Fred Gleason <fredg@wava.com>
* Version 0.3.3
*
* (Loosely) based on code for the Aztech radio card by
*
* Russell Kroll (rkroll@exploits.org)
* Quay Ly
* Donald Song
* Jason Lewis (jlewis@twilight.vtc.vsc.edu)
* Scott McGrath (smcgrath@twilight.vtc.vsc.edu)
* William McGrath (wmcgrath@twilight.vtc.vsc.edu)
*
* History:
* 2000-04-29 Russell Kroll <rkroll@exploits.org>
* Added ISAPnP detection for Linux 2.3/2.4
*
* 2001-01-10 Russell Kroll <rkroll@exploits.org>
* Removed dead CONFIG_RADIO_CADET_PORT code
* PnP detection on load is now default (no args necessary)
*
* 2002-01-17 Adam Belay <ambx1@neo.rr.com>
* Updated to latest pnp code
*
* 2003-01-31 Alan Cox <alan@lxorguk.ukuu.org.uk>
* Cleaned up locking, delay code, general odds and ends
*
* 2006-07-30 Hans J. Koch <koch@hjk-az.de>
* Changed API to V4L2
*/
#include <linux/module.h> /* Modules */
#include <linux/init.h> /* Initdata */
#include <linux/ioport.h> /* request_region */
#include <linux/delay.h> /* udelay */
#include <linux/videodev2.h> /* V4L2 API defs */
#include <linux/param.h>
#include <linux/pnp.h>
#include <linux/sched.h>
#include <linux/io.h> /* outb, outb_p */
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-fh.h>
#include <media/v4l2-event.h>
MODULE_AUTHOR("Fred Gleason, Russell Kroll, Quay Lu, Donald Song, Jason Lewis, Scott McGrath, William McGrath");
MODULE_DESCRIPTION("A driver for the ADS Cadet AM/FM/RDS radio card.");
MODULE_LICENSE("GPL");
MODULE_VERSION("0.3.4");
static int io = -1; /* default to isapnp activation */
static int radio_nr = -1;
module_param(io, int, 0);
MODULE_PARM_DESC(io, "I/O address of Cadet card (0x330,0x332,0x334,0x336,0x338,0x33a,0x33c,0x33e)");
module_param(radio_nr, int, 0);
#define RDS_BUFFER 256
#define RDS_RX_FLAG 1
#define MBS_RX_FLAG 2
struct cadet {
struct v4l2_device v4l2_dev;
struct video_device vdev;
struct v4l2_ctrl_handler ctrl_handler;
int io;
bool is_fm_band;
u32 curfreq;
int tunestat;
int sigstrength;
wait_queue_head_t read_queue;
struct timer_list readtimer;
u8 rdsin, rdsout, rdsstat;
unsigned char rdsbuf[RDS_BUFFER];
struct mutex lock;
int reading;
};
static struct cadet cadet_card;
/*
* Signal Strength Threshold Values
* The V4L API spec does not define any particular unit for the signal
* strength value. These values are in microvolts of RF at the tuner's input.
*/
static u16 sigtable[2][4] = {
{ 1835, 2621, 4128, 65535 },
{ 2185, 4369, 13107, 65535 },
};
static const struct v4l2_frequency_band bands[] = {
{
.index = 0,
.type = V4L2_TUNER_RADIO,
.capability = V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_FREQ_BANDS,
.rangelow = 8320, /* 520 kHz */
.rangehigh = 26400, /* 1650 kHz */
.modulation = V4L2_BAND_MODULATION_AM,
}, {
.index = 1,
.type = V4L2_TUNER_RADIO,
.capability = V4L2_TUNER_CAP_STEREO | V4L2_TUNER_CAP_RDS |
V4L2_TUNER_CAP_RDS_BLOCK_IO | V4L2_TUNER_CAP_LOW |
V4L2_TUNER_CAP_FREQ_BANDS,
.rangelow = 1400000, /* 87.5 MHz */
.rangehigh = 1728000, /* 108.0 MHz */
.modulation = V4L2_BAND_MODULATION_FM,
},
};
static int cadet_getstereo(struct cadet *dev)
{
int ret = V4L2_TUNER_SUB_MONO;
if (!dev->is_fm_band) /* Only FM has stereo capability! */
return V4L2_TUNER_SUB_MONO;
outb(7, dev->io); /* Select tuner control */
if ((inb(dev->io + 1) & 0x40) == 0)
ret = V4L2_TUNER_SUB_STEREO;
return ret;
}
static unsigned cadet_gettune(struct cadet *dev)
{
int curvol, i;
unsigned fifo = 0;
/*
* Prepare for read
*/
outb(7, dev->io); /* Select tuner control */
curvol = inb(dev->io + 1); /* Save current volume/mute setting */
outb(0x00, dev->io + 1); /* Ensure WRITE-ENABLE is LOW */
dev->tunestat = 0xffff;
/*
* Read the shift register
*/
for (i = 0; i < 25; i++) {
fifo = (fifo << 1) | ((inb(dev->io + 1) >> 7) & 0x01);
if (i < 24) {
outb(0x01, dev->io + 1);
dev->tunestat &= inb(dev->io + 1);
outb(0x00, dev->io + 1);
}
}
/*
* Restore volume/mute setting
*/
outb(curvol, dev->io + 1);
return fifo;
}
static unsigned cadet_getfreq(struct cadet *dev)
{
int i;
unsigned freq = 0, test, fifo = 0;
/*
* Read current tuning
*/
fifo = cadet_gettune(dev);
/*
* Convert to actual frequency
*/
if (!dev->is_fm_band) /* AM */
return ((fifo & 0x7fff) - 450) * 16;
test = 12500;
for (i = 0; i < 14; i++) {
if ((fifo & 0x01) != 0)
freq += test;
test = test << 1;
fifo = fifo >> 1;
}
freq -= 10700000; /* IF frequency is 10.7 MHz */
freq = (freq * 16) / 1000; /* Make it 1/16 kHz */
return freq;
}
static void cadet_settune(struct cadet *dev, unsigned fifo)
{
int i;
unsigned test;
outb(7, dev->io); /* Select tuner control */
/*
* Write the shift register
*/
test = 0;
test = (fifo >> 23) & 0x02; /* Align data for SDO */
test |= 0x1c; /* SDM=1, SWE=1, SEN=1, SCK=0 */
outb(7, dev->io); /* Select tuner control */
outb(test, dev->io + 1); /* Initialize for write */
for (i = 0; i < 25; i++) {
test |= 0x01; /* Toggle SCK High */
outb(test, dev->io + 1);
test &= 0xfe; /* Toggle SCK Low */
outb(test, dev->io + 1);
fifo = fifo << 1; /* Prepare the next bit */
test = 0x1c | ((fifo >> 23) & 0x02);
outb(test, dev->io + 1);
}
}
static void cadet_setfreq(struct cadet *dev, unsigned freq)
{
unsigned fifo;
int i, j, test;
int curvol;
freq = clamp(freq, bands[dev->is_fm_band].rangelow,
bands[dev->is_fm_band].rangehigh);
dev->curfreq = freq;
/*
* Formulate a fifo command
*/
fifo = 0;
if (dev->is_fm_band) { /* FM */
test = 102400;
freq = freq / 16; /* Make it kHz */
freq += 10700; /* IF is 10700 kHz */
for (i = 0; i < 14; i++) {
fifo = fifo << 1;
if (freq >= test) {
fifo |= 0x01;
freq -= test;
}
test = test >> 1;
}
} else { /* AM */
fifo = (freq / 16) + 450; /* Make it kHz */
fifo |= 0x100000; /* Select AM Band */
}
/*
* Save current volume/mute setting
*/
outb(7, dev->io); /* Select tuner control */
curvol = inb(dev->io + 1);
/*
* Tune the card
*/
for (j = 3; j > -1; j--) {
cadet_settune(dev, fifo | (j << 16));
outb(7, dev->io); /* Select tuner control */
outb(curvol, dev->io + 1);
msleep(100);
cadet_gettune(dev);
if ((dev->tunestat & 0x40) == 0) { /* Tuned */
dev->sigstrength = sigtable[dev->is_fm_band][j];
goto reset_rds;
}
}
dev->sigstrength = 0;
reset_rds:
outb(3, dev->io);
outb(inb(dev->io + 1) & 0x7f, dev->io + 1);
}
static void cadet_handler(unsigned long data)
{
struct cadet *dev = (void *)data;
/* Service the RDS fifo */
if (mutex_trylock(&dev->lock)) {
outb(0x3, dev->io); /* Select RDS Decoder Control */
if ((inb(dev->io + 1) & 0x20) != 0)
printk(KERN_CRIT "cadet: RDS fifo overflow\n");
outb(0x80, dev->io); /* Select RDS fifo */
while ((inb(dev->io) & 0x80) != 0) {
dev->rdsbuf[dev->rdsin] = inb(dev->io + 1);
if (dev->rdsin + 1 == dev->rdsout)
printk(KERN_WARNING "cadet: RDS buffer overflow\n");
else
dev->rdsin++;
}
mutex_unlock(&dev->lock);
}
/*
* Service pending read
*/
if (dev->rdsin != dev->rdsout)
wake_up_interruptible(&dev->read_queue);
/*
* Clean up and exit
*/
init_timer(&dev->readtimer);
dev->readtimer.function = cadet_handler;
dev->readtimer.data = data;
dev->readtimer.expires = jiffies + msecs_to_jiffies(50);
add_timer(&dev->readtimer);
}
static void cadet_start_rds(struct cadet *dev)
{
dev->rdsstat = 1;
outb(0x80, dev->io); /* Select RDS fifo */
init_timer(&dev->readtimer);
dev->readtimer.function = cadet_handler;
dev->readtimer.data = (unsigned long)dev;
dev->readtimer.expires = jiffies + msecs_to_jiffies(50);
add_timer(&dev->readtimer);
}
static ssize_t cadet_read(struct file *file, char __user *data, size_t count, loff_t *ppos)
{
struct cadet *dev = video_drvdata(file);
unsigned char readbuf[RDS_BUFFER];
int i = 0;
mutex_lock(&dev->lock);
if (dev->rdsstat == 0)
cadet_start_rds(dev);
if (dev->rdsin == dev->rdsout) {
if (file->f_flags & O_NONBLOCK) {
i = -EWOULDBLOCK;
goto unlock;
}
mutex_unlock(&dev->lock);
interruptible_sleep_on(&dev->read_queue);
mutex_lock(&dev->lock);
}
while (i < count && dev->rdsin != dev->rdsout)
readbuf[i++] = dev->rdsbuf[dev->rdsout++];
if (i && copy_to_user(data, readbuf, i))
i = -EFAULT;
unlock:
mutex_unlock(&dev->lock);
return i;
}
static int vidioc_querycap(struct file *file, void *priv,
struct v4l2_capability *v)
{
strlcpy(v->driver, "ADS Cadet", sizeof(v->driver));
strlcpy(v->card, "ADS Cadet", sizeof(v->card));
strlcpy(v->bus_info, "ISA", sizeof(v->bus_info));
v->device_caps = V4L2_CAP_TUNER | V4L2_CAP_RADIO |
V4L2_CAP_READWRITE | V4L2_CAP_RDS_CAPTURE;
v->capabilities = v->device_caps | V4L2_CAP_DEVICE_CAPS;
return 0;
}
static int vidioc_g_tuner(struct file *file, void *priv,
struct v4l2_tuner *v)
{
struct cadet *dev = video_drvdata(file);
if (v->index)
return -EINVAL;
v->type = V4L2_TUNER_RADIO;
strlcpy(v->name, "Radio", sizeof(v->name));
v->capability = bands[0].capability | bands[1].capability;
v->rangelow = bands[0].rangelow; /* 520 kHz (start of AM band) */
v->rangehigh = bands[1].rangehigh; /* 108.0 MHz (end of FM band) */
if (dev->is_fm_band) {
v->rxsubchans = cadet_getstereo(dev);
outb(3, dev->io);
outb(inb(dev->io + 1) & 0x7f, dev->io + 1);
mdelay(100);
outb(3, dev->io);
if (inb(dev->io + 1) & 0x80)
v->rxsubchans |= V4L2_TUNER_SUB_RDS;
} else {
v->rangelow = 8320; /* 520 kHz */
v->rangehigh = 26400; /* 1650 kHz */
v->rxsubchans = V4L2_TUNER_SUB_MONO;
}
v->audmode = V4L2_TUNER_MODE_STEREO;
v->signal = dev->sigstrength; /* We might need to modify scaling of this */
return 0;
}
static int vidioc_s_tuner(struct file *file, void *priv,
const struct v4l2_tuner *v)
{
return v->index ? -EINVAL : 0;
}
static int vidioc_enum_freq_bands(struct file *file, void *priv,
struct v4l2_frequency_band *band)
{
if (band->tuner)
return -EINVAL;
if (band->index >= ARRAY_SIZE(bands))
return -EINVAL;
*band = bands[band->index];
return 0;
}
static int vidioc_g_frequency(struct file *file, void *priv,
struct v4l2_frequency *f)
{
struct cadet *dev = video_drvdata(file);
if (f->tuner)
return -EINVAL;
f->type = V4L2_TUNER_RADIO;
f->frequency = dev->curfreq;
return 0;
}
static int vidioc_s_frequency(struct file *file, void *priv,
const struct v4l2_frequency *f)
{
struct cadet *dev = video_drvdata(file);
if (f->tuner)
return -EINVAL;
dev->is_fm_band =
f->frequency >= (bands[0].rangehigh + bands[1].rangelow) / 2;
cadet_setfreq(dev, f->frequency);
return 0;
}
static int cadet_s_ctrl(struct v4l2_ctrl *ctrl)
{
struct cadet *dev = container_of(ctrl->handler, struct cadet, ctrl_handler);
switch (ctrl->id) {
case V4L2_CID_AUDIO_MUTE:
outb(7, dev->io); /* Select tuner control */
if (ctrl->val)
outb(0x00, dev->io + 1);
else
outb(0x20, dev->io + 1);
return 0;
}
return -EINVAL;
}
static int cadet_open(struct file *file)
{
struct cadet *dev = video_drvdata(file);
int err;
mutex_lock(&dev->lock);
err = v4l2_fh_open(file);
if (err)
goto fail;
if (v4l2_fh_is_singular_file(file))
init_waitqueue_head(&dev->read_queue);
fail:
mutex_unlock(&dev->lock);
return err;
}
static int cadet_release(struct file *file)
{
struct cadet *dev = video_drvdata(file);
mutex_lock(&dev->lock);
if (v4l2_fh_is_singular_file(file) && dev->rdsstat) {
del_timer_sync(&dev->readtimer);
dev->rdsstat = 0;
}
v4l2_fh_release(file);
mutex_unlock(&dev->lock);
return 0;
}
static unsigned int cadet_poll(struct file *file, struct poll_table_struct *wait)
{
struct cadet *dev = video_drvdata(file);
unsigned long req_events = poll_requested_events(wait);
unsigned int res = v4l2_ctrl_poll(file, wait);
poll_wait(file, &dev->read_queue, wait);
if (dev->rdsstat == 0 && (req_events & (POLLIN | POLLRDNORM))) {
mutex_lock(&dev->lock);
if (dev->rdsstat == 0)
cadet_start_rds(dev);
mutex_unlock(&dev->lock);
}
if (dev->rdsin != dev->rdsout)
res |= POLLIN | POLLRDNORM;
return res;
}
static const struct v4l2_file_operations cadet_fops = {
.owner = THIS_MODULE,
.open = cadet_open,
.release = cadet_release,
.read = cadet_read,
.unlocked_ioctl = video_ioctl2,
.poll = cadet_poll,
};
static const struct v4l2_ioctl_ops cadet_ioctl_ops = {
.vidioc_querycap = vidioc_querycap,
.vidioc_g_tuner = vidioc_g_tuner,
.vidioc_s_tuner = vidioc_s_tuner,
.vidioc_g_frequency = vidioc_g_frequency,
.vidioc_s_frequency = vidioc_s_frequency,
.vidioc_enum_freq_bands = vidioc_enum_freq_bands,
.vidioc_log_status = v4l2_ctrl_log_status,
.vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
.vidioc_unsubscribe_event = v4l2_event_unsubscribe,
};
static const struct v4l2_ctrl_ops cadet_ctrl_ops = {
.s_ctrl = cadet_s_ctrl,
};
#ifdef CONFIG_PNP
static struct pnp_device_id cadet_pnp_devices[] = {
/* ADS Cadet AM/FM Radio Card */
{.id = "MSM0c24", .driver_data = 0},
{.id = ""}
};
MODULE_DEVICE_TABLE(pnp, cadet_pnp_devices);
static int cadet_pnp_probe(struct pnp_dev *dev, const struct pnp_device_id *dev_id)
{
if (!dev)
return -ENODEV;
/* only support one device */
if (io > 0)
return -EBUSY;
if (!pnp_port_valid(dev, 0))
return -ENODEV;
io = pnp_port_start(dev, 0);
printk(KERN_INFO "radio-cadet: PnP reports device at %#x\n", io);
return io;
}
static struct pnp_driver cadet_pnp_driver = {
.name = "radio-cadet",
.id_table = cadet_pnp_devices,
.probe = cadet_pnp_probe,
.remove = NULL,
};
#else
static struct pnp_driver cadet_pnp_driver;
#endif
static void cadet_probe(struct cadet *dev)
{
static int iovals[8] = { 0x330, 0x332, 0x334, 0x336, 0x338, 0x33a, 0x33c, 0x33e };
int i;
for (i = 0; i < 8; i++) {
dev->io = iovals[i];
if (request_region(dev->io, 2, "cadet-probe")) {
cadet_setfreq(dev, bands[1].rangelow);
if (cadet_getfreq(dev) == bands[1].rangelow) {
release_region(dev->io, 2);
return;
}
release_region(dev->io, 2);
}
}
dev->io = -1;
}
/*
* io should only be set if the user has used something like
* isapnp (the userspace program) to initialize this card for us
*/
static int __init cadet_init(void)
{
struct cadet *dev = &cadet_card;
struct v4l2_device *v4l2_dev = &dev->v4l2_dev;
struct v4l2_ctrl_handler *hdl;
int res = -ENODEV;
strlcpy(v4l2_dev->name, "cadet", sizeof(v4l2_dev->name));
mutex_init(&dev->lock);
/* If a probe was requested then probe ISAPnP first (safest) */
if (io < 0)
pnp_register_driver(&cadet_pnp_driver);
dev->io = io;
/* If that fails then probe unsafely if probe is requested */
if (dev->io < 0)
cadet_probe(dev);
/* Else we bail out */
if (dev->io < 0) {
#ifdef MODULE
v4l2_err(v4l2_dev, "you must set an I/O address with io=0x330, 0x332, 0x334,\n");
v4l2_err(v4l2_dev, "0x336, 0x338, 0x33a, 0x33c or 0x33e\n");
#endif
goto fail;
}
if (!request_region(dev->io, 2, "cadet"))
goto fail;
res = v4l2_device_register(NULL, v4l2_dev);
if (res < 0) {
release_region(dev->io, 2);
v4l2_err(v4l2_dev, "could not register v4l2_device\n");
goto fail;
}
hdl = &dev->ctrl_handler;
v4l2_ctrl_handler_init(hdl, 2);
v4l2_ctrl_new_std(hdl, &cadet_ctrl_ops,
V4L2_CID_AUDIO_MUTE, 0, 1, 1, 1);
v4l2_dev->ctrl_handler = hdl;
if (hdl->error) {
res = hdl->error;
v4l2_err(v4l2_dev, "Could not register controls\n");
goto err_hdl;
}
dev->is_fm_band = true;
dev->curfreq = bands[dev->is_fm_band].rangelow;
cadet_setfreq(dev, dev->curfreq);
strlcpy(dev->vdev.name, v4l2_dev->name, sizeof(dev->vdev.name));
dev->vdev.v4l2_dev = v4l2_dev;
dev->vdev.fops = &cadet_fops;
dev->vdev.ioctl_ops = &cadet_ioctl_ops;
dev->vdev.release = video_device_release_empty;
dev->vdev.lock = &dev->lock;
set_bit(V4L2_FL_USE_FH_PRIO, &dev->vdev.flags);
video_set_drvdata(&dev->vdev, dev);
res = video_register_device(&dev->vdev, VFL_TYPE_RADIO, radio_nr);
if (res < 0)
goto err_hdl;
v4l2_info(v4l2_dev, "ADS Cadet Radio Card at 0x%x\n", dev->io);
return 0;
err_hdl:
v4l2_ctrl_handler_free(hdl);
v4l2_device_unregister(v4l2_dev);
release_region(dev->io, 2);
fail:
pnp_unregister_driver(&cadet_pnp_driver);
return res;
}
static void __exit cadet_exit(void)
{
struct cadet *dev = &cadet_card;
video_unregister_device(&dev->vdev);
v4l2_ctrl_handler_free(&dev->ctrl_handler);
v4l2_device_unregister(&dev->v4l2_dev);
outb(7, dev->io); /* Mute */
outb(0x00, dev->io + 1);
release_region(dev->io, 2);
pnp_unregister_driver(&cadet_pnp_driver);
}
module_init(cadet_init);
module_exit(cadet_exit);
| gpl-2.0 |
MaxiCM-Private/android_kernel_lge_jagnm | sound/pci/hda/patch_conexant.c | 2831 | 137621 | /*
* HD audio interface patch for Conexant HDA audio codec
*
* Copyright (c) 2006 Pototskiy Akex <alex.pototskiy@gmail.com>
* Takashi Iwai <tiwai@suse.de>
* Tobin Davis <tdavis@dsl-only.net>
*
* This driver is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This driver is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/pci.h>
#include <linux/module.h>
#include <sound/core.h>
#include <sound/jack.h>
#include "hda_codec.h"
#include "hda_local.h"
#include "hda_beep.h"
#include "hda_jack.h"
#define CXT_PIN_DIR_IN 0x00
#define CXT_PIN_DIR_OUT 0x01
#define CXT_PIN_DIR_INOUT 0x02
#define CXT_PIN_DIR_IN_NOMICBIAS 0x03
#define CXT_PIN_DIR_INOUT_NOMICBIAS 0x04
#define CONEXANT_HP_EVENT 0x37
#define CONEXANT_MIC_EVENT 0x38
#define CONEXANT_LINE_EVENT 0x39
/* Conexant 5051 specific */
#define CXT5051_SPDIF_OUT 0x12
#define CXT5051_PORTB_EVENT 0x38
#define CXT5051_PORTC_EVENT 0x39
#define AUTO_MIC_PORTB (1 << 1)
#define AUTO_MIC_PORTC (1 << 2)
struct pin_dac_pair {
hda_nid_t pin;
hda_nid_t dac;
int type;
};
struct imux_info {
hda_nid_t pin; /* input pin NID */
hda_nid_t adc; /* connected ADC NID */
hda_nid_t boost; /* optional boost volume NID */
int index; /* corresponding to autocfg.input */
};
struct conexant_spec {
const struct snd_kcontrol_new *mixers[5];
int num_mixers;
hda_nid_t vmaster_nid;
struct hda_vmaster_mute_hook vmaster_mute;
bool vmaster_mute_led;
const struct hda_verb *init_verbs[5]; /* initialization verbs
* don't forget NULL
* termination!
*/
unsigned int num_init_verbs;
/* playback */
struct hda_multi_out multiout; /* playback set-up
* max_channels, dacs must be set
* dig_out_nid and hp_nid are optional
*/
unsigned int cur_eapd;
unsigned int hp_present;
unsigned int line_present;
unsigned int auto_mic;
int auto_mic_ext; /* imux_pins[] index for ext mic */
int auto_mic_dock; /* imux_pins[] index for dock mic */
int auto_mic_int; /* imux_pins[] index for int mic */
unsigned int need_dac_fix;
hda_nid_t slave_dig_outs[2];
/* capture */
unsigned int num_adc_nids;
const hda_nid_t *adc_nids;
hda_nid_t dig_in_nid; /* digital-in NID; optional */
unsigned int cur_adc_idx;
hda_nid_t cur_adc;
unsigned int cur_adc_stream_tag;
unsigned int cur_adc_format;
const struct hda_pcm_stream *capture_stream;
/* capture source */
const struct hda_input_mux *input_mux;
const hda_nid_t *capsrc_nids;
unsigned int cur_mux[3];
/* channel model */
const struct hda_channel_mode *channel_mode;
int num_channel_mode;
/* PCM information */
struct hda_pcm pcm_rec[2]; /* used in build_pcms() */
unsigned int spdif_route;
/* dynamic controls, init_verbs and input_mux */
struct auto_pin_cfg autocfg;
struct hda_input_mux private_imux;
struct imux_info imux_info[HDA_MAX_NUM_INPUTS];
hda_nid_t private_adc_nids[HDA_MAX_NUM_INPUTS];
hda_nid_t private_dac_nids[AUTO_CFG_MAX_OUTS];
struct pin_dac_pair dac_info[8];
int dac_info_filled;
unsigned int port_d_mode;
unsigned int auto_mute:1; /* used in auto-parser */
unsigned int detect_line:1; /* Line-out detection enabled */
unsigned int automute_lines:1; /* automute line-out as well */
unsigned int automute_hp_lo:1; /* both HP and LO available */
unsigned int dell_automute:1;
unsigned int dell_vostro:1;
unsigned int ideapad:1;
unsigned int thinkpad:1;
unsigned int hp_laptop:1;
unsigned int asus:1;
unsigned int pin_eapd_ctrls:1;
unsigned int adc_switching:1;
unsigned int ext_mic_present;
unsigned int recording;
void (*capture_prepare)(struct hda_codec *codec);
void (*capture_cleanup)(struct hda_codec *codec);
/* OLPC XO-1.5 supports DC input mode (e.g. for use with analog sensors)
* through the microphone jack.
* When the user enables this through a mixer switch, both internal and
* external microphones are disabled. Gain is fixed at 0dB. In this mode,
* we also allow the bias to be configured through a separate mixer
* control. */
unsigned int dc_enable;
unsigned int dc_input_bias; /* offset into cxt5066_olpc_dc_bias */
unsigned int mic_boost; /* offset into cxt5066_analog_mic_boost */
unsigned int beep_amp;
/* extra EAPD pins */
unsigned int num_eapds;
hda_nid_t eapds[4];
};
static int conexant_playback_pcm_open(struct hda_pcm_stream *hinfo,
struct hda_codec *codec,
struct snd_pcm_substream *substream)
{
struct conexant_spec *spec = codec->spec;
return snd_hda_multi_out_analog_open(codec, &spec->multiout, substream,
hinfo);
}
static int conexant_playback_pcm_prepare(struct hda_pcm_stream *hinfo,
struct hda_codec *codec,
unsigned int stream_tag,
unsigned int format,
struct snd_pcm_substream *substream)
{
struct conexant_spec *spec = codec->spec;
return snd_hda_multi_out_analog_prepare(codec, &spec->multiout,
stream_tag,
format, substream);
}
static int conexant_playback_pcm_cleanup(struct hda_pcm_stream *hinfo,
struct hda_codec *codec,
struct snd_pcm_substream *substream)
{
struct conexant_spec *spec = codec->spec;
return snd_hda_multi_out_analog_cleanup(codec, &spec->multiout);
}
/*
* Digital out
*/
static int conexant_dig_playback_pcm_open(struct hda_pcm_stream *hinfo,
struct hda_codec *codec,
struct snd_pcm_substream *substream)
{
struct conexant_spec *spec = codec->spec;
return snd_hda_multi_out_dig_open(codec, &spec->multiout);
}
static int conexant_dig_playback_pcm_close(struct hda_pcm_stream *hinfo,
struct hda_codec *codec,
struct snd_pcm_substream *substream)
{
struct conexant_spec *spec = codec->spec;
return snd_hda_multi_out_dig_close(codec, &spec->multiout);
}
static int conexant_dig_playback_pcm_prepare(struct hda_pcm_stream *hinfo,
struct hda_codec *codec,
unsigned int stream_tag,
unsigned int format,
struct snd_pcm_substream *substream)
{
struct conexant_spec *spec = codec->spec;
return snd_hda_multi_out_dig_prepare(codec, &spec->multiout,
stream_tag,
format, substream);
}
/*
* Analog capture
*/
static int conexant_capture_pcm_prepare(struct hda_pcm_stream *hinfo,
struct hda_codec *codec,
unsigned int stream_tag,
unsigned int format,
struct snd_pcm_substream *substream)
{
struct conexant_spec *spec = codec->spec;
if (spec->capture_prepare)
spec->capture_prepare(codec);
snd_hda_codec_setup_stream(codec, spec->adc_nids[substream->number],
stream_tag, 0, format);
return 0;
}
static int conexant_capture_pcm_cleanup(struct hda_pcm_stream *hinfo,
struct hda_codec *codec,
struct snd_pcm_substream *substream)
{
struct conexant_spec *spec = codec->spec;
snd_hda_codec_cleanup_stream(codec, spec->adc_nids[substream->number]);
if (spec->capture_cleanup)
spec->capture_cleanup(codec);
return 0;
}
static const struct hda_pcm_stream conexant_pcm_analog_playback = {
.substreams = 1,
.channels_min = 2,
.channels_max = 2,
.nid = 0, /* fill later */
.ops = {
.open = conexant_playback_pcm_open,
.prepare = conexant_playback_pcm_prepare,
.cleanup = conexant_playback_pcm_cleanup
},
};
static const struct hda_pcm_stream conexant_pcm_analog_capture = {
.substreams = 1,
.channels_min = 2,
.channels_max = 2,
.nid = 0, /* fill later */
.ops = {
.prepare = conexant_capture_pcm_prepare,
.cleanup = conexant_capture_pcm_cleanup
},
};
static const struct hda_pcm_stream conexant_pcm_digital_playback = {
.substreams = 1,
.channels_min = 2,
.channels_max = 2,
.nid = 0, /* fill later */
.ops = {
.open = conexant_dig_playback_pcm_open,
.close = conexant_dig_playback_pcm_close,
.prepare = conexant_dig_playback_pcm_prepare
},
};
static const struct hda_pcm_stream conexant_pcm_digital_capture = {
.substreams = 1,
.channels_min = 2,
.channels_max = 2,
/* NID is set in alc_build_pcms */
};
static int cx5051_capture_pcm_prepare(struct hda_pcm_stream *hinfo,
struct hda_codec *codec,
unsigned int stream_tag,
unsigned int format,
struct snd_pcm_substream *substream)
{
struct conexant_spec *spec = codec->spec;
spec->cur_adc = spec->adc_nids[spec->cur_adc_idx];
spec->cur_adc_stream_tag = stream_tag;
spec->cur_adc_format = format;
snd_hda_codec_setup_stream(codec, spec->cur_adc, stream_tag, 0, format);
return 0;
}
static int cx5051_capture_pcm_cleanup(struct hda_pcm_stream *hinfo,
struct hda_codec *codec,
struct snd_pcm_substream *substream)
{
struct conexant_spec *spec = codec->spec;
snd_hda_codec_cleanup_stream(codec, spec->cur_adc);
spec->cur_adc = 0;
return 0;
}
static const struct hda_pcm_stream cx5051_pcm_analog_capture = {
.substreams = 1,
.channels_min = 2,
.channels_max = 2,
.nid = 0, /* fill later */
.ops = {
.prepare = cx5051_capture_pcm_prepare,
.cleanup = cx5051_capture_pcm_cleanup
},
};
static int conexant_build_pcms(struct hda_codec *codec)
{
struct conexant_spec *spec = codec->spec;
struct hda_pcm *info = spec->pcm_rec;
codec->num_pcms = 1;
codec->pcm_info = info;
info->name = "CONEXANT Analog";
info->stream[SNDRV_PCM_STREAM_PLAYBACK] = conexant_pcm_analog_playback;
info->stream[SNDRV_PCM_STREAM_PLAYBACK].channels_max =
spec->multiout.max_channels;
info->stream[SNDRV_PCM_STREAM_PLAYBACK].nid =
spec->multiout.dac_nids[0];
if (spec->capture_stream)
info->stream[SNDRV_PCM_STREAM_CAPTURE] = *spec->capture_stream;
else {
if (codec->vendor_id == 0x14f15051)
info->stream[SNDRV_PCM_STREAM_CAPTURE] =
cx5051_pcm_analog_capture;
else {
info->stream[SNDRV_PCM_STREAM_CAPTURE] =
conexant_pcm_analog_capture;
info->stream[SNDRV_PCM_STREAM_CAPTURE].substreams =
spec->num_adc_nids;
}
}
info->stream[SNDRV_PCM_STREAM_CAPTURE].nid = spec->adc_nids[0];
if (spec->multiout.dig_out_nid) {
info++;
codec->num_pcms++;
info->name = "Conexant Digital";
info->pcm_type = HDA_PCM_TYPE_SPDIF;
info->stream[SNDRV_PCM_STREAM_PLAYBACK] =
conexant_pcm_digital_playback;
info->stream[SNDRV_PCM_STREAM_PLAYBACK].nid =
spec->multiout.dig_out_nid;
if (spec->dig_in_nid) {
info->stream[SNDRV_PCM_STREAM_CAPTURE] =
conexant_pcm_digital_capture;
info->stream[SNDRV_PCM_STREAM_CAPTURE].nid =
spec->dig_in_nid;
}
if (spec->slave_dig_outs[0])
codec->slave_dig_outs = spec->slave_dig_outs;
}
return 0;
}
static int conexant_mux_enum_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
struct conexant_spec *spec = codec->spec;
return snd_hda_input_mux_info(spec->input_mux, uinfo);
}
static int conexant_mux_enum_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
struct conexant_spec *spec = codec->spec;
unsigned int adc_idx = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id);
ucontrol->value.enumerated.item[0] = spec->cur_mux[adc_idx];
return 0;
}
static int conexant_mux_enum_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
struct conexant_spec *spec = codec->spec;
unsigned int adc_idx = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id);
return snd_hda_input_mux_put(codec, spec->input_mux, ucontrol,
spec->capsrc_nids[adc_idx],
&spec->cur_mux[adc_idx]);
}
static void conexant_set_power(struct hda_codec *codec, hda_nid_t fg,
unsigned int power_state)
{
if (power_state == AC_PWRST_D3)
msleep(100);
snd_hda_codec_read(codec, fg, 0, AC_VERB_SET_POWER_STATE,
power_state);
/* partial workaround for "azx_get_response timeout" */
if (power_state == AC_PWRST_D0)
msleep(10);
snd_hda_codec_set_power_to_all(codec, fg, power_state, true);
}
static int conexant_init(struct hda_codec *codec)
{
struct conexant_spec *spec = codec->spec;
int i;
for (i = 0; i < spec->num_init_verbs; i++)
snd_hda_sequence_write(codec, spec->init_verbs[i]);
return 0;
}
static void conexant_free(struct hda_codec *codec)
{
snd_hda_detach_beep_device(codec);
kfree(codec->spec);
}
static const struct snd_kcontrol_new cxt_capture_mixers[] = {
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Capture Source",
.info = conexant_mux_enum_info,
.get = conexant_mux_enum_get,
.put = conexant_mux_enum_put
},
{}
};
#ifdef CONFIG_SND_HDA_INPUT_BEEP
/* additional beep mixers; the actual parameters are overwritten at build */
static const struct snd_kcontrol_new cxt_beep_mixer[] = {
HDA_CODEC_VOLUME_MONO("Beep Playback Volume", 0, 1, 0, HDA_OUTPUT),
HDA_CODEC_MUTE_BEEP_MONO("Beep Playback Switch", 0, 1, 0, HDA_OUTPUT),
{ } /* end */
};
#endif
static const char * const slave_pfxs[] = {
"Headphone", "Speaker", "Front", "Surround", "CLFE",
NULL
};
static int conexant_build_controls(struct hda_codec *codec)
{
struct conexant_spec *spec = codec->spec;
unsigned int i;
int err;
for (i = 0; i < spec->num_mixers; i++) {
err = snd_hda_add_new_ctls(codec, spec->mixers[i]);
if (err < 0)
return err;
}
if (spec->multiout.dig_out_nid) {
err = snd_hda_create_spdif_out_ctls(codec,
spec->multiout.dig_out_nid,
spec->multiout.dig_out_nid);
if (err < 0)
return err;
err = snd_hda_create_spdif_share_sw(codec,
&spec->multiout);
if (err < 0)
return err;
spec->multiout.share_spdif = 1;
}
if (spec->dig_in_nid) {
err = snd_hda_create_spdif_in_ctls(codec,spec->dig_in_nid);
if (err < 0)
return err;
}
/* if we have no master control, let's create it */
if (spec->vmaster_nid &&
!snd_hda_find_mixer_ctl(codec, "Master Playback Volume")) {
unsigned int vmaster_tlv[4];
snd_hda_set_vmaster_tlv(codec, spec->vmaster_nid,
HDA_OUTPUT, vmaster_tlv);
err = snd_hda_add_vmaster(codec, "Master Playback Volume",
vmaster_tlv, slave_pfxs,
"Playback Volume");
if (err < 0)
return err;
}
if (spec->vmaster_nid &&
!snd_hda_find_mixer_ctl(codec, "Master Playback Switch")) {
err = __snd_hda_add_vmaster(codec, "Master Playback Switch",
NULL, slave_pfxs,
"Playback Switch", true,
&spec->vmaster_mute.sw_kctl);
if (err < 0)
return err;
}
if (spec->input_mux) {
err = snd_hda_add_new_ctls(codec, cxt_capture_mixers);
if (err < 0)
return err;
}
#ifdef CONFIG_SND_HDA_INPUT_BEEP
/* create beep controls if needed */
if (spec->beep_amp) {
const struct snd_kcontrol_new *knew;
for (knew = cxt_beep_mixer; knew->name; knew++) {
struct snd_kcontrol *kctl;
kctl = snd_ctl_new1(knew, codec);
if (!kctl)
return -ENOMEM;
kctl->private_value = spec->beep_amp;
err = snd_hda_ctl_add(codec, 0, kctl);
if (err < 0)
return err;
}
}
#endif
return 0;
}
#ifdef CONFIG_SND_HDA_POWER_SAVE
static int conexant_suspend(struct hda_codec *codec, pm_message_t state)
{
snd_hda_shutup_pins(codec);
return 0;
}
#endif
static const struct hda_codec_ops conexant_patch_ops = {
.build_controls = conexant_build_controls,
.build_pcms = conexant_build_pcms,
.init = conexant_init,
.free = conexant_free,
.set_power_state = conexant_set_power,
#ifdef CONFIG_SND_HDA_POWER_SAVE
.suspend = conexant_suspend,
#endif
.reboot_notify = snd_hda_shutup_pins,
};
#ifdef CONFIG_SND_HDA_INPUT_BEEP
#define set_beep_amp(spec, nid, idx, dir) \
((spec)->beep_amp = HDA_COMPOSE_AMP_VAL(nid, 1, idx, dir))
#else
#define set_beep_amp(spec, nid, idx, dir) /* NOP */
#endif
static int patch_conexant_auto(struct hda_codec *codec);
/*
* EAPD control
* the private value = nid | (invert << 8)
*/
#define cxt_eapd_info snd_ctl_boolean_mono_info
static int cxt_eapd_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
struct conexant_spec *spec = codec->spec;
int invert = (kcontrol->private_value >> 8) & 1;
if (invert)
ucontrol->value.integer.value[0] = !spec->cur_eapd;
else
ucontrol->value.integer.value[0] = spec->cur_eapd;
return 0;
}
static int cxt_eapd_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
struct conexant_spec *spec = codec->spec;
int invert = (kcontrol->private_value >> 8) & 1;
hda_nid_t nid = kcontrol->private_value & 0xff;
unsigned int eapd;
eapd = !!ucontrol->value.integer.value[0];
if (invert)
eapd = !eapd;
if (eapd == spec->cur_eapd)
return 0;
spec->cur_eapd = eapd;
snd_hda_codec_write_cache(codec, nid,
0, AC_VERB_SET_EAPD_BTLENABLE,
eapd ? 0x02 : 0x00);
return 1;
}
/* controls for test mode */
#ifdef CONFIG_SND_DEBUG
#define CXT_EAPD_SWITCH(xname, nid, mask) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, .index = 0, \
.info = cxt_eapd_info, \
.get = cxt_eapd_get, \
.put = cxt_eapd_put, \
.private_value = nid | (mask<<16) }
static int conexant_ch_mode_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
struct conexant_spec *spec = codec->spec;
return snd_hda_ch_mode_info(codec, uinfo, spec->channel_mode,
spec->num_channel_mode);
}
static int conexant_ch_mode_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
struct conexant_spec *spec = codec->spec;
return snd_hda_ch_mode_get(codec, ucontrol, spec->channel_mode,
spec->num_channel_mode,
spec->multiout.max_channels);
}
static int conexant_ch_mode_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
struct conexant_spec *spec = codec->spec;
int err = snd_hda_ch_mode_put(codec, ucontrol, spec->channel_mode,
spec->num_channel_mode,
&spec->multiout.max_channels);
if (err >= 0 && spec->need_dac_fix)
spec->multiout.num_dacs = spec->multiout.max_channels / 2;
return err;
}
#define CXT_PIN_MODE(xname, nid, dir) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, .index = 0, \
.info = conexant_ch_mode_info, \
.get = conexant_ch_mode_get, \
.put = conexant_ch_mode_put, \
.private_value = nid | (dir<<16) }
#endif /* CONFIG_SND_DEBUG */
/* Conexant 5045 specific */
static const hda_nid_t cxt5045_dac_nids[1] = { 0x19 };
static const hda_nid_t cxt5045_adc_nids[1] = { 0x1a };
static const hda_nid_t cxt5045_capsrc_nids[1] = { 0x1a };
#define CXT5045_SPDIF_OUT 0x18
static const struct hda_channel_mode cxt5045_modes[1] = {
{ 2, NULL },
};
static const struct hda_input_mux cxt5045_capture_source = {
.num_items = 2,
.items = {
{ "Internal Mic", 0x1 },
{ "Mic", 0x2 },
}
};
static const struct hda_input_mux cxt5045_capture_source_benq = {
.num_items = 4,
.items = {
{ "Internal Mic", 0x1 },
{ "Mic", 0x2 },
{ "Line", 0x3 },
{ "Mixer", 0x0 },
}
};
static const struct hda_input_mux cxt5045_capture_source_hp530 = {
.num_items = 2,
.items = {
{ "Mic", 0x1 },
{ "Internal Mic", 0x2 },
}
};
/* turn on/off EAPD (+ mute HP) as a master switch */
static int cxt5045_hp_master_sw_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
struct conexant_spec *spec = codec->spec;
unsigned int bits;
if (!cxt_eapd_put(kcontrol, ucontrol))
return 0;
/* toggle internal speakers mute depending of presence of
* the headphone jack
*/
bits = (!spec->hp_present && spec->cur_eapd) ? 0 : HDA_AMP_MUTE;
snd_hda_codec_amp_stereo(codec, 0x10, HDA_OUTPUT, 0,
HDA_AMP_MUTE, bits);
bits = spec->cur_eapd ? 0 : HDA_AMP_MUTE;
snd_hda_codec_amp_stereo(codec, 0x11, HDA_OUTPUT, 0,
HDA_AMP_MUTE, bits);
return 1;
}
/* bind volumes of both NID 0x10 and 0x11 */
static const struct hda_bind_ctls cxt5045_hp_bind_master_vol = {
.ops = &snd_hda_bind_vol,
.values = {
HDA_COMPOSE_AMP_VAL(0x10, 3, 0, HDA_OUTPUT),
HDA_COMPOSE_AMP_VAL(0x11, 3, 0, HDA_OUTPUT),
0
},
};
/* toggle input of built-in and mic jack appropriately */
static void cxt5045_hp_automic(struct hda_codec *codec)
{
static const struct hda_verb mic_jack_on[] = {
{0x14, AC_VERB_SET_AMP_GAIN_MUTE, 0xb080},
{0x12, AC_VERB_SET_AMP_GAIN_MUTE, 0xb000},
{}
};
static const struct hda_verb mic_jack_off[] = {
{0x12, AC_VERB_SET_AMP_GAIN_MUTE, 0xb080},
{0x14, AC_VERB_SET_AMP_GAIN_MUTE, 0xb000},
{}
};
unsigned int present;
present = snd_hda_jack_detect(codec, 0x12);
if (present)
snd_hda_sequence_write(codec, mic_jack_on);
else
snd_hda_sequence_write(codec, mic_jack_off);
}
/* mute internal speaker if HP is plugged */
static void cxt5045_hp_automute(struct hda_codec *codec)
{
struct conexant_spec *spec = codec->spec;
unsigned int bits;
spec->hp_present = snd_hda_jack_detect(codec, 0x11);
bits = (spec->hp_present || !spec->cur_eapd) ? HDA_AMP_MUTE : 0;
snd_hda_codec_amp_stereo(codec, 0x10, HDA_OUTPUT, 0,
HDA_AMP_MUTE, bits);
}
/* unsolicited event for HP jack sensing */
static void cxt5045_hp_unsol_event(struct hda_codec *codec,
unsigned int res)
{
res >>= 26;
switch (res) {
case CONEXANT_HP_EVENT:
cxt5045_hp_automute(codec);
break;
case CONEXANT_MIC_EVENT:
cxt5045_hp_automic(codec);
break;
}
}
static const struct snd_kcontrol_new cxt5045_mixers[] = {
HDA_CODEC_VOLUME("Capture Volume", 0x1a, 0x00, HDA_INPUT),
HDA_CODEC_MUTE("Capture Switch", 0x1a, 0x0, HDA_INPUT),
HDA_CODEC_VOLUME("PCM Playback Volume", 0x17, 0x0, HDA_INPUT),
HDA_CODEC_MUTE("PCM Playback Switch", 0x17, 0x0, HDA_INPUT),
HDA_CODEC_VOLUME("Internal Mic Playback Volume", 0x17, 0x1, HDA_INPUT),
HDA_CODEC_MUTE("Internal Mic Playback Switch", 0x17, 0x1, HDA_INPUT),
HDA_CODEC_VOLUME("Mic Playback Volume", 0x17, 0x2, HDA_INPUT),
HDA_CODEC_MUTE("Mic Playback Switch", 0x17, 0x2, HDA_INPUT),
HDA_BIND_VOL("Master Playback Volume", &cxt5045_hp_bind_master_vol),
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Master Playback Switch",
.info = cxt_eapd_info,
.get = cxt_eapd_get,
.put = cxt5045_hp_master_sw_put,
.private_value = 0x10,
},
{}
};
static const struct snd_kcontrol_new cxt5045_benq_mixers[] = {
HDA_CODEC_VOLUME("Line Playback Volume", 0x17, 0x3, HDA_INPUT),
HDA_CODEC_MUTE("Line Playback Switch", 0x17, 0x3, HDA_INPUT),
{}
};
static const struct snd_kcontrol_new cxt5045_mixers_hp530[] = {
HDA_CODEC_VOLUME("Capture Volume", 0x1a, 0x00, HDA_INPUT),
HDA_CODEC_MUTE("Capture Switch", 0x1a, 0x0, HDA_INPUT),
HDA_CODEC_VOLUME("PCM Playback Volume", 0x17, 0x0, HDA_INPUT),
HDA_CODEC_MUTE("PCM Playback Switch", 0x17, 0x0, HDA_INPUT),
HDA_CODEC_VOLUME("Internal Mic Playback Volume", 0x17, 0x2, HDA_INPUT),
HDA_CODEC_MUTE("Internal Mic Playback Switch", 0x17, 0x2, HDA_INPUT),
HDA_CODEC_VOLUME("Mic Playback Volume", 0x17, 0x1, HDA_INPUT),
HDA_CODEC_MUTE("Mic Playback Switch", 0x17, 0x1, HDA_INPUT),
HDA_BIND_VOL("Master Playback Volume", &cxt5045_hp_bind_master_vol),
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Master Playback Switch",
.info = cxt_eapd_info,
.get = cxt_eapd_get,
.put = cxt5045_hp_master_sw_put,
.private_value = 0x10,
},
{}
};
static const struct hda_verb cxt5045_init_verbs[] = {
/* Line in, Mic */
{0x12, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN|AC_PINCTL_VREF_80 },
{0x14, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN|AC_PINCTL_VREF_80 },
/* HP, Amp */
{0x10, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
{0x10, AC_VERB_SET_CONNECT_SEL, 0x1},
{0x11, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP},
{0x11, AC_VERB_SET_CONNECT_SEL, 0x1},
{0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
{0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
{0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(2)},
{0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(3)},
{0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(4)},
/* Record selector: Internal mic */
{0x1a, AC_VERB_SET_CONNECT_SEL,0x1},
{0x1a, AC_VERB_SET_AMP_GAIN_MUTE,
AC_AMP_SET_INPUT|AC_AMP_SET_RIGHT|AC_AMP_SET_LEFT|0x17},
/* SPDIF route: PCM */
{0x13, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
{ 0x13, AC_VERB_SET_CONNECT_SEL, 0x0 },
/* EAPD */
{0x10, AC_VERB_SET_EAPD_BTLENABLE, 0x2 }, /* default on */
{ } /* end */
};
static const struct hda_verb cxt5045_benq_init_verbs[] = {
/* Internal Mic, Mic */
{0x12, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN|AC_PINCTL_VREF_80 },
{0x14, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN|AC_PINCTL_VREF_80 },
/* Line In,HP, Amp */
{0x10, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
{0x10, AC_VERB_SET_CONNECT_SEL, 0x1},
{0x11, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN},
{0x11, AC_VERB_SET_CONNECT_SEL, 0x1},
{0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
{0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
{0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(2)},
{0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(3)},
{0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(4)},
/* Record selector: Internal mic */
{0x1a, AC_VERB_SET_CONNECT_SEL, 0x1},
{0x1a, AC_VERB_SET_AMP_GAIN_MUTE,
AC_AMP_SET_INPUT|AC_AMP_SET_RIGHT|AC_AMP_SET_LEFT|0x17},
/* SPDIF route: PCM */
{0x13, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
{0x13, AC_VERB_SET_CONNECT_SEL, 0x0},
/* EAPD */
{0x10, AC_VERB_SET_EAPD_BTLENABLE, 0x2}, /* default on */
{ } /* end */
};
static const struct hda_verb cxt5045_hp_sense_init_verbs[] = {
/* pin sensing on HP jack */
{0x11, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN | CONEXANT_HP_EVENT},
{ } /* end */
};
static const struct hda_verb cxt5045_mic_sense_init_verbs[] = {
/* pin sensing on HP jack */
{0x12, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN | CONEXANT_MIC_EVENT},
{ } /* end */
};
#ifdef CONFIG_SND_DEBUG
/* Test configuration for debugging, modelled after the ALC260 test
* configuration.
*/
static const struct hda_input_mux cxt5045_test_capture_source = {
.num_items = 5,
.items = {
{ "MIXER", 0x0 },
{ "MIC1 pin", 0x1 },
{ "LINE1 pin", 0x2 },
{ "HP-OUT pin", 0x3 },
{ "CD pin", 0x4 },
},
};
static const struct snd_kcontrol_new cxt5045_test_mixer[] = {
/* Output controls */
HDA_CODEC_VOLUME("Speaker Playback Volume", 0x10, 0x0, HDA_OUTPUT),
HDA_CODEC_MUTE("Speaker Playback Switch", 0x10, 0x0, HDA_OUTPUT),
HDA_CODEC_VOLUME("HP-OUT Playback Volume", 0x11, 0x0, HDA_OUTPUT),
HDA_CODEC_MUTE("HP-OUT Playback Switch", 0x11, 0x0, HDA_OUTPUT),
HDA_CODEC_VOLUME("LINE1 Playback Volume", 0x12, 0x0, HDA_OUTPUT),
HDA_CODEC_MUTE("LINE1 Playback Switch", 0x12, 0x0, HDA_OUTPUT),
/* Modes for retasking pin widgets */
CXT_PIN_MODE("HP-OUT pin mode", 0x11, CXT_PIN_DIR_INOUT),
CXT_PIN_MODE("LINE1 pin mode", 0x12, CXT_PIN_DIR_INOUT),
/* EAPD Switch Control */
CXT_EAPD_SWITCH("External Amplifier", 0x10, 0x0),
/* Loopback mixer controls */
HDA_CODEC_VOLUME("PCM Volume", 0x17, 0x0, HDA_INPUT),
HDA_CODEC_MUTE("PCM Switch", 0x17, 0x0, HDA_INPUT),
HDA_CODEC_VOLUME("MIC1 pin Volume", 0x17, 0x1, HDA_INPUT),
HDA_CODEC_MUTE("MIC1 pin Switch", 0x17, 0x1, HDA_INPUT),
HDA_CODEC_VOLUME("LINE1 pin Volume", 0x17, 0x2, HDA_INPUT),
HDA_CODEC_MUTE("LINE1 pin Switch", 0x17, 0x2, HDA_INPUT),
HDA_CODEC_VOLUME("HP-OUT pin Volume", 0x17, 0x3, HDA_INPUT),
HDA_CODEC_MUTE("HP-OUT pin Switch", 0x17, 0x3, HDA_INPUT),
HDA_CODEC_VOLUME("CD pin Volume", 0x17, 0x4, HDA_INPUT),
HDA_CODEC_MUTE("CD pin Switch", 0x17, 0x4, HDA_INPUT),
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Input Source",
.info = conexant_mux_enum_info,
.get = conexant_mux_enum_get,
.put = conexant_mux_enum_put,
},
/* Audio input controls */
HDA_CODEC_VOLUME("Capture Volume", 0x1a, 0x0, HDA_INPUT),
HDA_CODEC_MUTE("Capture Switch", 0x1a, 0x0, HDA_INPUT),
{ } /* end */
};
static const struct hda_verb cxt5045_test_init_verbs[] = {
/* Set connections */
{ 0x10, AC_VERB_SET_CONNECT_SEL, 0x0 },
{ 0x11, AC_VERB_SET_CONNECT_SEL, 0x0 },
{ 0x12, AC_VERB_SET_CONNECT_SEL, 0x0 },
/* Enable retasking pins as output, initially without power amp */
{0x12, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
{0x11, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
/* Disable digital (SPDIF) pins initially, but users can enable
* them via a mixer switch. In the case of SPDIF-out, this initverb
* payload also sets the generation to 0, output to be in "consumer"
* PCM format, copyright asserted, no pre-emphasis and no validity
* control.
*/
{0x13, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
{0x18, AC_VERB_SET_DIGI_CONVERT_1, 0},
/* Unmute retasking pin widget output buffers since the default
* state appears to be output. As the pin mode is changed by the
* user the pin mode control will take care of enabling the pin's
* input/output buffers as needed.
*/
{0x12, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
{0x11, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
/* Mute capture amp left and right */
{0x1a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
/* Set ADC connection select to match default mixer setting (mic1
* pin)
*/
{0x1a, AC_VERB_SET_CONNECT_SEL, 0x01},
{0x17, AC_VERB_SET_CONNECT_SEL, 0x01},
/* Mute all inputs to mixer widget (even unconnected ones) */
{0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)}, /* Mixer */
{0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)}, /* Mic1 pin */
{0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(2)}, /* Line pin */
{0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(3)}, /* HP pin */
{0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(4)}, /* CD pin */
{ }
};
#endif
/* initialize jack-sensing, too */
static int cxt5045_init(struct hda_codec *codec)
{
conexant_init(codec);
cxt5045_hp_automute(codec);
return 0;
}
enum {
CXT5045_LAPTOP_HPSENSE,
CXT5045_LAPTOP_MICSENSE,
CXT5045_LAPTOP_HPMICSENSE,
CXT5045_BENQ,
CXT5045_LAPTOP_HP530,
#ifdef CONFIG_SND_DEBUG
CXT5045_TEST,
#endif
CXT5045_AUTO,
CXT5045_MODELS
};
static const char * const cxt5045_models[CXT5045_MODELS] = {
[CXT5045_LAPTOP_HPSENSE] = "laptop-hpsense",
[CXT5045_LAPTOP_MICSENSE] = "laptop-micsense",
[CXT5045_LAPTOP_HPMICSENSE] = "laptop-hpmicsense",
[CXT5045_BENQ] = "benq",
[CXT5045_LAPTOP_HP530] = "laptop-hp530",
#ifdef CONFIG_SND_DEBUG
[CXT5045_TEST] = "test",
#endif
[CXT5045_AUTO] = "auto",
};
static const struct snd_pci_quirk cxt5045_cfg_tbl[] = {
SND_PCI_QUIRK(0x103c, 0x30d5, "HP 530", CXT5045_LAPTOP_HP530),
SND_PCI_QUIRK(0x1179, 0xff31, "Toshiba P105", CXT5045_LAPTOP_MICSENSE),
SND_PCI_QUIRK(0x152d, 0x0753, "Benq R55E", CXT5045_BENQ),
SND_PCI_QUIRK(0x1734, 0x10ad, "Fujitsu Si1520", CXT5045_LAPTOP_MICSENSE),
SND_PCI_QUIRK(0x1734, 0x10cb, "Fujitsu Si3515", CXT5045_LAPTOP_HPMICSENSE),
SND_PCI_QUIRK(0x1734, 0x110e, "Fujitsu V5505",
CXT5045_LAPTOP_HPMICSENSE),
SND_PCI_QUIRK(0x1509, 0x1e40, "FIC", CXT5045_LAPTOP_HPMICSENSE),
SND_PCI_QUIRK(0x1509, 0x2f05, "FIC", CXT5045_LAPTOP_HPMICSENSE),
SND_PCI_QUIRK(0x1509, 0x2f06, "FIC", CXT5045_LAPTOP_HPMICSENSE),
SND_PCI_QUIRK_MASK(0x1631, 0xff00, 0xc100, "Packard Bell",
CXT5045_LAPTOP_HPMICSENSE),
SND_PCI_QUIRK(0x8086, 0x2111, "Conexant Reference board", CXT5045_LAPTOP_HPSENSE),
{}
};
static int patch_cxt5045(struct hda_codec *codec)
{
struct conexant_spec *spec;
int board_config;
board_config = snd_hda_check_board_config(codec, CXT5045_MODELS,
cxt5045_models,
cxt5045_cfg_tbl);
if (board_config < 0)
board_config = CXT5045_AUTO; /* model=auto as default */
if (board_config == CXT5045_AUTO)
return patch_conexant_auto(codec);
spec = kzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec)
return -ENOMEM;
codec->spec = spec;
codec->single_adc_amp = 1;
spec->multiout.max_channels = 2;
spec->multiout.num_dacs = ARRAY_SIZE(cxt5045_dac_nids);
spec->multiout.dac_nids = cxt5045_dac_nids;
spec->multiout.dig_out_nid = CXT5045_SPDIF_OUT;
spec->num_adc_nids = 1;
spec->adc_nids = cxt5045_adc_nids;
spec->capsrc_nids = cxt5045_capsrc_nids;
spec->input_mux = &cxt5045_capture_source;
spec->num_mixers = 1;
spec->mixers[0] = cxt5045_mixers;
spec->num_init_verbs = 1;
spec->init_verbs[0] = cxt5045_init_verbs;
spec->spdif_route = 0;
spec->num_channel_mode = ARRAY_SIZE(cxt5045_modes);
spec->channel_mode = cxt5045_modes;
set_beep_amp(spec, 0x16, 0, 1);
codec->patch_ops = conexant_patch_ops;
switch (board_config) {
case CXT5045_LAPTOP_HPSENSE:
codec->patch_ops.unsol_event = cxt5045_hp_unsol_event;
spec->input_mux = &cxt5045_capture_source;
spec->num_init_verbs = 2;
spec->init_verbs[1] = cxt5045_hp_sense_init_verbs;
spec->mixers[0] = cxt5045_mixers;
codec->patch_ops.init = cxt5045_init;
break;
case CXT5045_LAPTOP_MICSENSE:
codec->patch_ops.unsol_event = cxt5045_hp_unsol_event;
spec->input_mux = &cxt5045_capture_source;
spec->num_init_verbs = 2;
spec->init_verbs[1] = cxt5045_mic_sense_init_verbs;
spec->mixers[0] = cxt5045_mixers;
codec->patch_ops.init = cxt5045_init;
break;
default:
case CXT5045_LAPTOP_HPMICSENSE:
codec->patch_ops.unsol_event = cxt5045_hp_unsol_event;
spec->input_mux = &cxt5045_capture_source;
spec->num_init_verbs = 3;
spec->init_verbs[1] = cxt5045_hp_sense_init_verbs;
spec->init_verbs[2] = cxt5045_mic_sense_init_verbs;
spec->mixers[0] = cxt5045_mixers;
codec->patch_ops.init = cxt5045_init;
break;
case CXT5045_BENQ:
codec->patch_ops.unsol_event = cxt5045_hp_unsol_event;
spec->input_mux = &cxt5045_capture_source_benq;
spec->num_init_verbs = 1;
spec->init_verbs[0] = cxt5045_benq_init_verbs;
spec->mixers[0] = cxt5045_mixers;
spec->mixers[1] = cxt5045_benq_mixers;
spec->num_mixers = 2;
codec->patch_ops.init = cxt5045_init;
break;
case CXT5045_LAPTOP_HP530:
codec->patch_ops.unsol_event = cxt5045_hp_unsol_event;
spec->input_mux = &cxt5045_capture_source_hp530;
spec->num_init_verbs = 2;
spec->init_verbs[1] = cxt5045_hp_sense_init_verbs;
spec->mixers[0] = cxt5045_mixers_hp530;
codec->patch_ops.init = cxt5045_init;
break;
#ifdef CONFIG_SND_DEBUG
case CXT5045_TEST:
spec->input_mux = &cxt5045_test_capture_source;
spec->mixers[0] = cxt5045_test_mixer;
spec->init_verbs[0] = cxt5045_test_init_verbs;
break;
#endif
}
switch (codec->subsystem_id >> 16) {
case 0x103c:
case 0x1631:
case 0x1734:
case 0x17aa:
/* HP, Packard Bell, Fujitsu-Siemens & Lenovo laptops have
* really bad sound over 0dB on NID 0x17. Fix max PCM level to
* 0 dB (originally it has 0x2b steps with 0dB offset 0x14)
*/
snd_hda_override_amp_caps(codec, 0x17, HDA_INPUT,
(0x14 << AC_AMPCAP_OFFSET_SHIFT) |
(0x14 << AC_AMPCAP_NUM_STEPS_SHIFT) |
(0x05 << AC_AMPCAP_STEP_SIZE_SHIFT) |
(1 << AC_AMPCAP_MUTE_SHIFT));
break;
}
if (spec->beep_amp)
snd_hda_attach_beep_device(codec, spec->beep_amp);
return 0;
}
/* Conexant 5047 specific */
#define CXT5047_SPDIF_OUT 0x11
static const hda_nid_t cxt5047_dac_nids[1] = { 0x10 }; /* 0x1c */
static const hda_nid_t cxt5047_adc_nids[1] = { 0x12 };
static const hda_nid_t cxt5047_capsrc_nids[1] = { 0x1a };
static const struct hda_channel_mode cxt5047_modes[1] = {
{ 2, NULL },
};
static const struct hda_input_mux cxt5047_toshiba_capture_source = {
.num_items = 2,
.items = {
{ "ExtMic", 0x2 },
{ "Line-In", 0x1 },
}
};
/* turn on/off EAPD (+ mute HP) as a master switch */
static int cxt5047_hp_master_sw_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
struct conexant_spec *spec = codec->spec;
unsigned int bits;
if (!cxt_eapd_put(kcontrol, ucontrol))
return 0;
/* toggle internal speakers mute depending of presence of
* the headphone jack
*/
bits = (!spec->hp_present && spec->cur_eapd) ? 0 : HDA_AMP_MUTE;
/* NOTE: Conexat codec needs the index for *OUTPUT* amp of
* pin widgets unlike other codecs. In this case, we need to
* set index 0x01 for the volume from the mixer amp 0x19.
*/
snd_hda_codec_amp_stereo(codec, 0x1d, HDA_OUTPUT, 0x01,
HDA_AMP_MUTE, bits);
bits = spec->cur_eapd ? 0 : HDA_AMP_MUTE;
snd_hda_codec_amp_stereo(codec, 0x13, HDA_OUTPUT, 0,
HDA_AMP_MUTE, bits);
return 1;
}
/* mute internal speaker if HP is plugged */
static void cxt5047_hp_automute(struct hda_codec *codec)
{
struct conexant_spec *spec = codec->spec;
unsigned int bits;
spec->hp_present = snd_hda_jack_detect(codec, 0x13);
bits = (spec->hp_present || !spec->cur_eapd) ? HDA_AMP_MUTE : 0;
/* See the note in cxt5047_hp_master_sw_put */
snd_hda_codec_amp_stereo(codec, 0x1d, HDA_OUTPUT, 0x01,
HDA_AMP_MUTE, bits);
}
/* toggle input of built-in and mic jack appropriately */
static void cxt5047_hp_automic(struct hda_codec *codec)
{
static const struct hda_verb mic_jack_on[] = {
{0x15, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
{0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
{}
};
static const struct hda_verb mic_jack_off[] = {
{0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
{0x15, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
{}
};
unsigned int present;
present = snd_hda_jack_detect(codec, 0x15);
if (present)
snd_hda_sequence_write(codec, mic_jack_on);
else
snd_hda_sequence_write(codec, mic_jack_off);
}
/* unsolicited event for HP jack sensing */
static void cxt5047_hp_unsol_event(struct hda_codec *codec,
unsigned int res)
{
switch (res >> 26) {
case CONEXANT_HP_EVENT:
cxt5047_hp_automute(codec);
break;
case CONEXANT_MIC_EVENT:
cxt5047_hp_automic(codec);
break;
}
}
static const struct snd_kcontrol_new cxt5047_base_mixers[] = {
HDA_CODEC_VOLUME("Mic Playback Volume", 0x19, 0x02, HDA_INPUT),
HDA_CODEC_MUTE("Mic Playback Switch", 0x19, 0x02, HDA_INPUT),
HDA_CODEC_VOLUME("Mic Boost Volume", 0x1a, 0x0, HDA_OUTPUT),
HDA_CODEC_VOLUME("Capture Volume", 0x12, 0x03, HDA_INPUT),
HDA_CODEC_MUTE("Capture Switch", 0x12, 0x03, HDA_INPUT),
HDA_CODEC_VOLUME("PCM Volume", 0x10, 0x00, HDA_OUTPUT),
HDA_CODEC_MUTE("PCM Switch", 0x10, 0x00, HDA_OUTPUT),
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Master Playback Switch",
.info = cxt_eapd_info,
.get = cxt_eapd_get,
.put = cxt5047_hp_master_sw_put,
.private_value = 0x13,
},
{}
};
static const struct snd_kcontrol_new cxt5047_hp_spk_mixers[] = {
/* See the note in cxt5047_hp_master_sw_put */
HDA_CODEC_VOLUME("Speaker Playback Volume", 0x1d, 0x01, HDA_OUTPUT),
HDA_CODEC_VOLUME("Headphone Playback Volume", 0x13, 0x00, HDA_OUTPUT),
{}
};
static const struct snd_kcontrol_new cxt5047_hp_only_mixers[] = {
HDA_CODEC_VOLUME("Master Playback Volume", 0x13, 0x00, HDA_OUTPUT),
{ } /* end */
};
static const struct hda_verb cxt5047_init_verbs[] = {
/* Line in, Mic, Built-in Mic */
{0x14, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN },
{0x15, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN|AC_PINCTL_VREF_50 },
{0x17, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN|AC_PINCTL_VREF_50 },
/* HP, Speaker */
{0x13, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP },
{0x13, AC_VERB_SET_CONNECT_SEL, 0x0}, /* mixer(0x19) */
{0x1d, AC_VERB_SET_CONNECT_SEL, 0x1}, /* mixer(0x19) */
/* Record selector: Mic */
{0x12, AC_VERB_SET_CONNECT_SEL,0x03},
{0x19, AC_VERB_SET_AMP_GAIN_MUTE,
AC_AMP_SET_INPUT|AC_AMP_SET_RIGHT|AC_AMP_SET_LEFT|0x17},
{0x1A, AC_VERB_SET_CONNECT_SEL,0x02},
{0x1A, AC_VERB_SET_AMP_GAIN_MUTE,
AC_AMP_SET_OUTPUT|AC_AMP_SET_RIGHT|AC_AMP_SET_LEFT|0x00},
{0x1A, AC_VERB_SET_AMP_GAIN_MUTE,
AC_AMP_SET_OUTPUT|AC_AMP_SET_RIGHT|AC_AMP_SET_LEFT|0x03},
/* SPDIF route: PCM */
{ 0x18, AC_VERB_SET_CONNECT_SEL, 0x0 },
/* Enable unsolicited events */
{0x13, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN | CONEXANT_HP_EVENT},
{0x15, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN | CONEXANT_MIC_EVENT},
{ } /* end */
};
/* configuration for Toshiba Laptops */
static const struct hda_verb cxt5047_toshiba_init_verbs[] = {
{0x13, AC_VERB_SET_EAPD_BTLENABLE, 0x0}, /* default off */
{}
};
/* Test configuration for debugging, modelled after the ALC260 test
* configuration.
*/
#ifdef CONFIG_SND_DEBUG
static const struct hda_input_mux cxt5047_test_capture_source = {
.num_items = 4,
.items = {
{ "LINE1 pin", 0x0 },
{ "MIC1 pin", 0x1 },
{ "MIC2 pin", 0x2 },
{ "CD pin", 0x3 },
},
};
static const struct snd_kcontrol_new cxt5047_test_mixer[] = {
/* Output only controls */
HDA_CODEC_VOLUME("OutAmp-1 Volume", 0x10, 0x0, HDA_OUTPUT),
HDA_CODEC_MUTE("OutAmp-1 Switch", 0x10,0x0, HDA_OUTPUT),
HDA_CODEC_VOLUME("OutAmp-2 Volume", 0x1c, 0x0, HDA_OUTPUT),
HDA_CODEC_MUTE("OutAmp-2 Switch", 0x1c, 0x0, HDA_OUTPUT),
HDA_CODEC_VOLUME("Speaker Playback Volume", 0x1d, 0x0, HDA_OUTPUT),
HDA_CODEC_MUTE("Speaker Playback Switch", 0x1d, 0x0, HDA_OUTPUT),
HDA_CODEC_VOLUME("HeadPhone Playback Volume", 0x13, 0x0, HDA_OUTPUT),
HDA_CODEC_MUTE("HeadPhone Playback Switch", 0x13, 0x0, HDA_OUTPUT),
HDA_CODEC_VOLUME("Line1-Out Playback Volume", 0x14, 0x0, HDA_OUTPUT),
HDA_CODEC_MUTE("Line1-Out Playback Switch", 0x14, 0x0, HDA_OUTPUT),
HDA_CODEC_VOLUME("Line2-Out Playback Volume", 0x15, 0x0, HDA_OUTPUT),
HDA_CODEC_MUTE("Line2-Out Playback Switch", 0x15, 0x0, HDA_OUTPUT),
/* Modes for retasking pin widgets */
CXT_PIN_MODE("LINE1 pin mode", 0x14, CXT_PIN_DIR_INOUT),
CXT_PIN_MODE("MIC1 pin mode", 0x15, CXT_PIN_DIR_INOUT),
/* EAPD Switch Control */
CXT_EAPD_SWITCH("External Amplifier", 0x13, 0x0),
/* Loopback mixer controls */
HDA_CODEC_VOLUME("MIC1 Playback Volume", 0x12, 0x01, HDA_INPUT),
HDA_CODEC_MUTE("MIC1 Playback Switch", 0x12, 0x01, HDA_INPUT),
HDA_CODEC_VOLUME("MIC2 Playback Volume", 0x12, 0x02, HDA_INPUT),
HDA_CODEC_MUTE("MIC2 Playback Switch", 0x12, 0x02, HDA_INPUT),
HDA_CODEC_VOLUME("LINE Playback Volume", 0x12, 0x0, HDA_INPUT),
HDA_CODEC_MUTE("LINE Playback Switch", 0x12, 0x0, HDA_INPUT),
HDA_CODEC_VOLUME("CD Playback Volume", 0x12, 0x04, HDA_INPUT),
HDA_CODEC_MUTE("CD Playback Switch", 0x12, 0x04, HDA_INPUT),
HDA_CODEC_VOLUME("Capture-1 Volume", 0x19, 0x0, HDA_INPUT),
HDA_CODEC_MUTE("Capture-1 Switch", 0x19, 0x0, HDA_INPUT),
HDA_CODEC_VOLUME("Capture-2 Volume", 0x19, 0x1, HDA_INPUT),
HDA_CODEC_MUTE("Capture-2 Switch", 0x19, 0x1, HDA_INPUT),
HDA_CODEC_VOLUME("Capture-3 Volume", 0x19, 0x2, HDA_INPUT),
HDA_CODEC_MUTE("Capture-3 Switch", 0x19, 0x2, HDA_INPUT),
HDA_CODEC_VOLUME("Capture-4 Volume", 0x19, 0x3, HDA_INPUT),
HDA_CODEC_MUTE("Capture-4 Switch", 0x19, 0x3, HDA_INPUT),
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Input Source",
.info = conexant_mux_enum_info,
.get = conexant_mux_enum_get,
.put = conexant_mux_enum_put,
},
HDA_CODEC_VOLUME("Mic Boost Volume", 0x1a, 0x0, HDA_OUTPUT),
{ } /* end */
};
static const struct hda_verb cxt5047_test_init_verbs[] = {
/* Enable retasking pins as output, initially without power amp */
{0x15, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
{0x14, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
{0x13, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
/* Disable digital (SPDIF) pins initially, but users can enable
* them via a mixer switch. In the case of SPDIF-out, this initverb
* payload also sets the generation to 0, output to be in "consumer"
* PCM format, copyright asserted, no pre-emphasis and no validity
* control.
*/
{0x18, AC_VERB_SET_DIGI_CONVERT_1, 0},
/* Ensure mic1, mic2, line1 pin widgets take input from the
* OUT1 sum bus when acting as an output.
*/
{0x1a, AC_VERB_SET_CONNECT_SEL, 0},
{0x1b, AC_VERB_SET_CONNECT_SEL, 0},
/* Start with output sum widgets muted and their output gains at min */
{0x19, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
{0x19, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
/* Unmute retasking pin widget output buffers since the default
* state appears to be output. As the pin mode is changed by the
* user the pin mode control will take care of enabling the pin's
* input/output buffers as needed.
*/
{0x15, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
{0x14, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
{0x13, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
/* Mute capture amp left and right */
{0x12, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
/* Set ADC connection select to match default mixer setting (mic1
* pin)
*/
{0x12, AC_VERB_SET_CONNECT_SEL, 0x00},
/* Mute all inputs to mixer widget (even unconnected ones) */
{0x19, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)}, /* mic1 pin */
{0x19, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)}, /* mic2 pin */
{0x19, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(2)}, /* line1 pin */
{0x19, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(3)}, /* line2 pin */
{0x19, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(4)}, /* CD pin */
{0x19, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(5)}, /* Beep-gen pin */
{0x19, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(6)}, /* Line-out pin */
{0x19, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(7)}, /* HP-pin pin */
{ }
};
#endif
/* initialize jack-sensing, too */
static int cxt5047_hp_init(struct hda_codec *codec)
{
conexant_init(codec);
cxt5047_hp_automute(codec);
return 0;
}
enum {
CXT5047_LAPTOP, /* Laptops w/o EAPD support */
CXT5047_LAPTOP_HP, /* Some HP laptops */
CXT5047_LAPTOP_EAPD, /* Laptops with EAPD support */
#ifdef CONFIG_SND_DEBUG
CXT5047_TEST,
#endif
CXT5047_AUTO,
CXT5047_MODELS
};
static const char * const cxt5047_models[CXT5047_MODELS] = {
[CXT5047_LAPTOP] = "laptop",
[CXT5047_LAPTOP_HP] = "laptop-hp",
[CXT5047_LAPTOP_EAPD] = "laptop-eapd",
#ifdef CONFIG_SND_DEBUG
[CXT5047_TEST] = "test",
#endif
[CXT5047_AUTO] = "auto",
};
static const struct snd_pci_quirk cxt5047_cfg_tbl[] = {
SND_PCI_QUIRK(0x103c, 0x30a5, "HP DV5200T/DV8000T", CXT5047_LAPTOP_HP),
SND_PCI_QUIRK_MASK(0x103c, 0xff00, 0x3000, "HP DV Series",
CXT5047_LAPTOP),
SND_PCI_QUIRK(0x1179, 0xff31, "Toshiba P100", CXT5047_LAPTOP_EAPD),
{}
};
static int patch_cxt5047(struct hda_codec *codec)
{
struct conexant_spec *spec;
int board_config;
board_config = snd_hda_check_board_config(codec, CXT5047_MODELS,
cxt5047_models,
cxt5047_cfg_tbl);
if (board_config < 0)
board_config = CXT5047_AUTO; /* model=auto as default */
if (board_config == CXT5047_AUTO)
return patch_conexant_auto(codec);
spec = kzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec)
return -ENOMEM;
codec->spec = spec;
codec->pin_amp_workaround = 1;
spec->multiout.max_channels = 2;
spec->multiout.num_dacs = ARRAY_SIZE(cxt5047_dac_nids);
spec->multiout.dac_nids = cxt5047_dac_nids;
spec->multiout.dig_out_nid = CXT5047_SPDIF_OUT;
spec->num_adc_nids = 1;
spec->adc_nids = cxt5047_adc_nids;
spec->capsrc_nids = cxt5047_capsrc_nids;
spec->num_mixers = 1;
spec->mixers[0] = cxt5047_base_mixers;
spec->num_init_verbs = 1;
spec->init_verbs[0] = cxt5047_init_verbs;
spec->spdif_route = 0;
spec->num_channel_mode = ARRAY_SIZE(cxt5047_modes),
spec->channel_mode = cxt5047_modes,
codec->patch_ops = conexant_patch_ops;
switch (board_config) {
case CXT5047_LAPTOP:
spec->num_mixers = 2;
spec->mixers[1] = cxt5047_hp_spk_mixers;
codec->patch_ops.unsol_event = cxt5047_hp_unsol_event;
break;
case CXT5047_LAPTOP_HP:
spec->num_mixers = 2;
spec->mixers[1] = cxt5047_hp_only_mixers;
codec->patch_ops.unsol_event = cxt5047_hp_unsol_event;
codec->patch_ops.init = cxt5047_hp_init;
break;
case CXT5047_LAPTOP_EAPD:
spec->input_mux = &cxt5047_toshiba_capture_source;
spec->num_mixers = 2;
spec->mixers[1] = cxt5047_hp_spk_mixers;
spec->num_init_verbs = 2;
spec->init_verbs[1] = cxt5047_toshiba_init_verbs;
codec->patch_ops.unsol_event = cxt5047_hp_unsol_event;
break;
#ifdef CONFIG_SND_DEBUG
case CXT5047_TEST:
spec->input_mux = &cxt5047_test_capture_source;
spec->mixers[0] = cxt5047_test_mixer;
spec->init_verbs[0] = cxt5047_test_init_verbs;
codec->patch_ops.unsol_event = cxt5047_hp_unsol_event;
#endif
}
spec->vmaster_nid = 0x13;
switch (codec->subsystem_id >> 16) {
case 0x103c:
/* HP laptops have really bad sound over 0 dB on NID 0x10.
* Fix max PCM level to 0 dB (originally it has 0x1e steps
* with 0 dB offset 0x17)
*/
snd_hda_override_amp_caps(codec, 0x10, HDA_INPUT,
(0x17 << AC_AMPCAP_OFFSET_SHIFT) |
(0x17 << AC_AMPCAP_NUM_STEPS_SHIFT) |
(0x05 << AC_AMPCAP_STEP_SIZE_SHIFT) |
(1 << AC_AMPCAP_MUTE_SHIFT));
break;
}
return 0;
}
/* Conexant 5051 specific */
static const hda_nid_t cxt5051_dac_nids[1] = { 0x10 };
static const hda_nid_t cxt5051_adc_nids[2] = { 0x14, 0x15 };
static const struct hda_channel_mode cxt5051_modes[1] = {
{ 2, NULL },
};
static void cxt5051_update_speaker(struct hda_codec *codec)
{
struct conexant_spec *spec = codec->spec;
unsigned int pinctl;
/* headphone pin */
pinctl = (spec->hp_present && spec->cur_eapd) ? PIN_HP : 0;
snd_hda_codec_write(codec, 0x16, 0, AC_VERB_SET_PIN_WIDGET_CONTROL,
pinctl);
/* speaker pin */
pinctl = (!spec->hp_present && spec->cur_eapd) ? PIN_OUT : 0;
snd_hda_codec_write(codec, 0x1a, 0, AC_VERB_SET_PIN_WIDGET_CONTROL,
pinctl);
/* on ideapad there is an additional speaker (subwoofer) to mute */
if (spec->ideapad)
snd_hda_codec_write(codec, 0x1b, 0,
AC_VERB_SET_PIN_WIDGET_CONTROL,
pinctl);
}
/* turn on/off EAPD (+ mute HP) as a master switch */
static int cxt5051_hp_master_sw_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
if (!cxt_eapd_put(kcontrol, ucontrol))
return 0;
cxt5051_update_speaker(codec);
return 1;
}
/* toggle input of built-in and mic jack appropriately */
static void cxt5051_portb_automic(struct hda_codec *codec)
{
struct conexant_spec *spec = codec->spec;
unsigned int present;
if (!(spec->auto_mic & AUTO_MIC_PORTB))
return;
present = snd_hda_jack_detect(codec, 0x17);
snd_hda_codec_write(codec, 0x14, 0,
AC_VERB_SET_CONNECT_SEL,
present ? 0x01 : 0x00);
}
/* switch the current ADC according to the jack state */
static void cxt5051_portc_automic(struct hda_codec *codec)
{
struct conexant_spec *spec = codec->spec;
unsigned int present;
hda_nid_t new_adc;
if (!(spec->auto_mic & AUTO_MIC_PORTC))
return;
present = snd_hda_jack_detect(codec, 0x18);
if (present)
spec->cur_adc_idx = 1;
else
spec->cur_adc_idx = 0;
new_adc = spec->adc_nids[spec->cur_adc_idx];
if (spec->cur_adc && spec->cur_adc != new_adc) {
/* stream is running, let's swap the current ADC */
__snd_hda_codec_cleanup_stream(codec, spec->cur_adc, 1);
spec->cur_adc = new_adc;
snd_hda_codec_setup_stream(codec, new_adc,
spec->cur_adc_stream_tag, 0,
spec->cur_adc_format);
}
}
/* mute internal speaker if HP is plugged */
static void cxt5051_hp_automute(struct hda_codec *codec)
{
struct conexant_spec *spec = codec->spec;
spec->hp_present = snd_hda_jack_detect(codec, 0x16);
cxt5051_update_speaker(codec);
}
/* unsolicited event for HP jack sensing */
static void cxt5051_hp_unsol_event(struct hda_codec *codec,
unsigned int res)
{
switch (res >> 26) {
case CONEXANT_HP_EVENT:
cxt5051_hp_automute(codec);
break;
case CXT5051_PORTB_EVENT:
cxt5051_portb_automic(codec);
break;
case CXT5051_PORTC_EVENT:
cxt5051_portc_automic(codec);
break;
}
}
static const struct snd_kcontrol_new cxt5051_playback_mixers[] = {
HDA_CODEC_VOLUME("Master Playback Volume", 0x10, 0x00, HDA_OUTPUT),
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Master Playback Switch",
.info = cxt_eapd_info,
.get = cxt_eapd_get,
.put = cxt5051_hp_master_sw_put,
.private_value = 0x1a,
},
{}
};
static const struct snd_kcontrol_new cxt5051_capture_mixers[] = {
HDA_CODEC_VOLUME("Internal Mic Volume", 0x14, 0x00, HDA_INPUT),
HDA_CODEC_MUTE("Internal Mic Switch", 0x14, 0x00, HDA_INPUT),
HDA_CODEC_VOLUME("Mic Volume", 0x14, 0x01, HDA_INPUT),
HDA_CODEC_MUTE("Mic Switch", 0x14, 0x01, HDA_INPUT),
HDA_CODEC_VOLUME("Docking Mic Volume", 0x15, 0x00, HDA_INPUT),
HDA_CODEC_MUTE("Docking Mic Switch", 0x15, 0x00, HDA_INPUT),
{}
};
static const struct snd_kcontrol_new cxt5051_hp_mixers[] = {
HDA_CODEC_VOLUME("Internal Mic Volume", 0x14, 0x00, HDA_INPUT),
HDA_CODEC_MUTE("Internal Mic Switch", 0x14, 0x00, HDA_INPUT),
HDA_CODEC_VOLUME("Mic Volume", 0x15, 0x00, HDA_INPUT),
HDA_CODEC_MUTE("Mic Switch", 0x15, 0x00, HDA_INPUT),
{}
};
static const struct snd_kcontrol_new cxt5051_hp_dv6736_mixers[] = {
HDA_CODEC_VOLUME("Capture Volume", 0x14, 0x00, HDA_INPUT),
HDA_CODEC_MUTE("Capture Switch", 0x14, 0x00, HDA_INPUT),
{}
};
static const struct snd_kcontrol_new cxt5051_f700_mixers[] = {
HDA_CODEC_VOLUME("Capture Volume", 0x14, 0x01, HDA_INPUT),
HDA_CODEC_MUTE("Capture Switch", 0x14, 0x01, HDA_INPUT),
{}
};
static const struct snd_kcontrol_new cxt5051_toshiba_mixers[] = {
HDA_CODEC_VOLUME("Internal Mic Volume", 0x14, 0x00, HDA_INPUT),
HDA_CODEC_MUTE("Internal Mic Switch", 0x14, 0x00, HDA_INPUT),
HDA_CODEC_VOLUME("Mic Volume", 0x14, 0x01, HDA_INPUT),
HDA_CODEC_MUTE("Mic Switch", 0x14, 0x01, HDA_INPUT),
{}
};
static const struct hda_verb cxt5051_init_verbs[] = {
/* Line in, Mic */
{0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0) | 0x03},
{0x17, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80},
{0x18, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0) | 0x03},
{0x18, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80},
{0x1d, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN},
{0x1d, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0) | 0x03},
/* SPK */
{0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
{0x1a, AC_VERB_SET_CONNECT_SEL, 0x00},
/* HP, Amp */
{0x16, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP},
{0x16, AC_VERB_SET_CONNECT_SEL, 0x00},
/* DAC1 */
{0x10, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
/* Record selector: Internal mic */
{0x14, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0) | 0x44},
{0x14, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(1) | 0x44},
{0x15, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0) | 0x44},
/* SPDIF route: PCM */
{0x1c, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
{0x1c, AC_VERB_SET_CONNECT_SEL, 0x0},
/* EAPD */
{0x1a, AC_VERB_SET_EAPD_BTLENABLE, 0x2}, /* default on */
{0x16, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN|CONEXANT_HP_EVENT},
{ } /* end */
};
static const struct hda_verb cxt5051_hp_dv6736_init_verbs[] = {
/* Line in, Mic */
{0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0) | 0x03},
{0x17, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80},
{0x18, AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0},
{0x1d, AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0},
/* SPK */
{0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
{0x1a, AC_VERB_SET_CONNECT_SEL, 0x00},
/* HP, Amp */
{0x16, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP},
{0x16, AC_VERB_SET_CONNECT_SEL, 0x00},
/* DAC1 */
{0x10, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
/* Record selector: Internal mic */
{0x14, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(1) | 0x44},
{0x14, AC_VERB_SET_CONNECT_SEL, 0x1},
/* SPDIF route: PCM */
{0x1c, AC_VERB_SET_CONNECT_SEL, 0x0},
/* EAPD */
{0x1a, AC_VERB_SET_EAPD_BTLENABLE, 0x2}, /* default on */
{0x16, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN|CONEXANT_HP_EVENT},
{ } /* end */
};
static const struct hda_verb cxt5051_f700_init_verbs[] = {
/* Line in, Mic */
{0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0) | 0x03},
{0x17, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80},
{0x18, AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0},
{0x1d, AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0},
/* SPK */
{0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
{0x1a, AC_VERB_SET_CONNECT_SEL, 0x00},
/* HP, Amp */
{0x16, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP},
{0x16, AC_VERB_SET_CONNECT_SEL, 0x00},
/* DAC1 */
{0x10, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
/* Record selector: Internal mic */
{0x14, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(1) | 0x44},
{0x14, AC_VERB_SET_CONNECT_SEL, 0x1},
/* SPDIF route: PCM */
{0x1c, AC_VERB_SET_CONNECT_SEL, 0x0},
/* EAPD */
{0x1a, AC_VERB_SET_EAPD_BTLENABLE, 0x2}, /* default on */
{0x16, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN|CONEXANT_HP_EVENT},
{ } /* end */
};
static void cxt5051_init_mic_port(struct hda_codec *codec, hda_nid_t nid,
unsigned int event)
{
snd_hda_codec_write(codec, nid, 0,
AC_VERB_SET_UNSOLICITED_ENABLE,
AC_USRSP_EN | event);
}
static const struct hda_verb cxt5051_ideapad_init_verbs[] = {
/* Subwoofer */
{0x1b, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
{0x1b, AC_VERB_SET_CONNECT_SEL, 0x00},
{ } /* end */
};
/* initialize jack-sensing, too */
static int cxt5051_init(struct hda_codec *codec)
{
struct conexant_spec *spec = codec->spec;
conexant_init(codec);
if (spec->auto_mic & AUTO_MIC_PORTB)
cxt5051_init_mic_port(codec, 0x17, CXT5051_PORTB_EVENT);
if (spec->auto_mic & AUTO_MIC_PORTC)
cxt5051_init_mic_port(codec, 0x18, CXT5051_PORTC_EVENT);
if (codec->patch_ops.unsol_event) {
cxt5051_hp_automute(codec);
cxt5051_portb_automic(codec);
cxt5051_portc_automic(codec);
}
return 0;
}
enum {
CXT5051_LAPTOP, /* Laptops w/ EAPD support */
CXT5051_HP, /* no docking */
CXT5051_HP_DV6736, /* HP without mic switch */
CXT5051_F700, /* HP Compaq Presario F700 */
CXT5051_TOSHIBA, /* Toshiba M300 & co */
CXT5051_IDEAPAD, /* Lenovo IdeaPad Y430 */
CXT5051_AUTO, /* auto-parser */
CXT5051_MODELS
};
static const char *const cxt5051_models[CXT5051_MODELS] = {
[CXT5051_LAPTOP] = "laptop",
[CXT5051_HP] = "hp",
[CXT5051_HP_DV6736] = "hp-dv6736",
[CXT5051_F700] = "hp-700",
[CXT5051_TOSHIBA] = "toshiba",
[CXT5051_IDEAPAD] = "ideapad",
[CXT5051_AUTO] = "auto",
};
static const struct snd_pci_quirk cxt5051_cfg_tbl[] = {
SND_PCI_QUIRK(0x103c, 0x30cf, "HP DV6736", CXT5051_HP_DV6736),
SND_PCI_QUIRK(0x103c, 0x360b, "Compaq Presario CQ60", CXT5051_HP),
SND_PCI_QUIRK(0x103c, 0x30ea, "Compaq Presario F700", CXT5051_F700),
SND_PCI_QUIRK(0x1179, 0xff50, "Toshiba M30x", CXT5051_TOSHIBA),
SND_PCI_QUIRK(0x14f1, 0x0101, "Conexant Reference board",
CXT5051_LAPTOP),
SND_PCI_QUIRK(0x14f1, 0x5051, "HP Spartan 1.1", CXT5051_HP),
SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo IdeaPad", CXT5051_IDEAPAD),
{}
};
static int patch_cxt5051(struct hda_codec *codec)
{
struct conexant_spec *spec;
int board_config;
board_config = snd_hda_check_board_config(codec, CXT5051_MODELS,
cxt5051_models,
cxt5051_cfg_tbl);
if (board_config < 0)
board_config = CXT5051_AUTO; /* model=auto as default */
if (board_config == CXT5051_AUTO)
return patch_conexant_auto(codec);
spec = kzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec)
return -ENOMEM;
codec->spec = spec;
codec->pin_amp_workaround = 1;
codec->patch_ops = conexant_patch_ops;
codec->patch_ops.init = cxt5051_init;
spec->multiout.max_channels = 2;
spec->multiout.num_dacs = ARRAY_SIZE(cxt5051_dac_nids);
spec->multiout.dac_nids = cxt5051_dac_nids;
spec->multiout.dig_out_nid = CXT5051_SPDIF_OUT;
spec->num_adc_nids = 1; /* not 2; via auto-mic switch */
spec->adc_nids = cxt5051_adc_nids;
spec->num_mixers = 2;
spec->mixers[0] = cxt5051_capture_mixers;
spec->mixers[1] = cxt5051_playback_mixers;
spec->num_init_verbs = 1;
spec->init_verbs[0] = cxt5051_init_verbs;
spec->spdif_route = 0;
spec->num_channel_mode = ARRAY_SIZE(cxt5051_modes);
spec->channel_mode = cxt5051_modes;
spec->cur_adc = 0;
spec->cur_adc_idx = 0;
set_beep_amp(spec, 0x13, 0, HDA_OUTPUT);
codec->patch_ops.unsol_event = cxt5051_hp_unsol_event;
spec->auto_mic = AUTO_MIC_PORTB | AUTO_MIC_PORTC;
switch (board_config) {
case CXT5051_HP:
spec->mixers[0] = cxt5051_hp_mixers;
break;
case CXT5051_HP_DV6736:
spec->init_verbs[0] = cxt5051_hp_dv6736_init_verbs;
spec->mixers[0] = cxt5051_hp_dv6736_mixers;
spec->auto_mic = 0;
break;
case CXT5051_F700:
spec->init_verbs[0] = cxt5051_f700_init_verbs;
spec->mixers[0] = cxt5051_f700_mixers;
spec->auto_mic = 0;
break;
case CXT5051_TOSHIBA:
spec->mixers[0] = cxt5051_toshiba_mixers;
spec->auto_mic = AUTO_MIC_PORTB;
break;
case CXT5051_IDEAPAD:
spec->init_verbs[spec->num_init_verbs++] =
cxt5051_ideapad_init_verbs;
spec->ideapad = 1;
break;
}
if (spec->beep_amp)
snd_hda_attach_beep_device(codec, spec->beep_amp);
return 0;
}
/* Conexant 5066 specific */
static const hda_nid_t cxt5066_dac_nids[1] = { 0x10 };
static const hda_nid_t cxt5066_adc_nids[3] = { 0x14, 0x15, 0x16 };
static const hda_nid_t cxt5066_capsrc_nids[1] = { 0x17 };
static const hda_nid_t cxt5066_digout_pin_nids[2] = { 0x20, 0x22 };
/* OLPC's microphone port is DC coupled for use with external sensors,
* therefore we use a 50% mic bias in order to center the input signal with
* the DC input range of the codec. */
#define CXT5066_OLPC_EXT_MIC_BIAS PIN_VREF50
static const struct hda_channel_mode cxt5066_modes[1] = {
{ 2, NULL },
};
#define HP_PRESENT_PORT_A (1 << 0)
#define HP_PRESENT_PORT_D (1 << 1)
#define hp_port_a_present(spec) ((spec)->hp_present & HP_PRESENT_PORT_A)
#define hp_port_d_present(spec) ((spec)->hp_present & HP_PRESENT_PORT_D)
static void cxt5066_update_speaker(struct hda_codec *codec)
{
struct conexant_spec *spec = codec->spec;
unsigned int pinctl;
snd_printdd("CXT5066: update speaker, hp_present=%d, cur_eapd=%d\n",
spec->hp_present, spec->cur_eapd);
/* Port A (HP) */
pinctl = (hp_port_a_present(spec) && spec->cur_eapd) ? PIN_HP : 0;
snd_hda_codec_write(codec, 0x19, 0, AC_VERB_SET_PIN_WIDGET_CONTROL,
pinctl);
/* Port D (HP/LO) */
pinctl = spec->cur_eapd ? spec->port_d_mode : 0;
if (spec->dell_automute || spec->thinkpad) {
/* Mute if Port A is connected */
if (hp_port_a_present(spec))
pinctl = 0;
} else {
/* Thinkpad/Dell doesn't give pin-D status */
if (!hp_port_d_present(spec))
pinctl = 0;
}
snd_hda_codec_write(codec, 0x1c, 0, AC_VERB_SET_PIN_WIDGET_CONTROL,
pinctl);
/* CLASS_D AMP */
pinctl = (!spec->hp_present && spec->cur_eapd) ? PIN_OUT : 0;
snd_hda_codec_write(codec, 0x1f, 0, AC_VERB_SET_PIN_WIDGET_CONTROL,
pinctl);
}
/* turn on/off EAPD (+ mute HP) as a master switch */
static int cxt5066_hp_master_sw_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
if (!cxt_eapd_put(kcontrol, ucontrol))
return 0;
cxt5066_update_speaker(codec);
return 1;
}
static const struct hda_input_mux cxt5066_olpc_dc_bias = {
.num_items = 3,
.items = {
{ "Off", PIN_IN },
{ "50%", PIN_VREF50 },
{ "80%", PIN_VREF80 },
},
};
static int cxt5066_set_olpc_dc_bias(struct hda_codec *codec)
{
struct conexant_spec *spec = codec->spec;
/* Even though port F is the DC input, the bias is controlled on port B.
* we also leave that port as an active input (but unselected) in DC mode
* just in case that is necessary to make the bias setting take effect. */
return snd_hda_codec_write_cache(codec, 0x1a, 0,
AC_VERB_SET_PIN_WIDGET_CONTROL,
cxt5066_olpc_dc_bias.items[spec->dc_input_bias].index);
}
/* OLPC defers mic widget control until when capture is started because the
* microphone LED comes on as soon as these settings are put in place. if we
* did this before recording, it would give the false indication that recording
* is happening when it is not. */
static void cxt5066_olpc_select_mic(struct hda_codec *codec)
{
struct conexant_spec *spec = codec->spec;
if (!spec->recording)
return;
if (spec->dc_enable) {
/* in DC mode we ignore presence detection and just use the jack
* through our special DC port */
const struct hda_verb enable_dc_mode[] = {
/* disble internal mic, port C */
{0x1b, AC_VERB_SET_PIN_WIDGET_CONTROL, 0},
/* enable DC capture, port F */
{0x1e, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN},
{},
};
snd_hda_sequence_write(codec, enable_dc_mode);
/* port B input disabled (and bias set) through the following call */
cxt5066_set_olpc_dc_bias(codec);
return;
}
/* disable DC (port F) */
snd_hda_codec_write(codec, 0x1e, 0, AC_VERB_SET_PIN_WIDGET_CONTROL, 0);
/* external mic, port B */
snd_hda_codec_write(codec, 0x1a, 0, AC_VERB_SET_PIN_WIDGET_CONTROL,
spec->ext_mic_present ? CXT5066_OLPC_EXT_MIC_BIAS : 0);
/* internal mic, port C */
snd_hda_codec_write(codec, 0x1b, 0, AC_VERB_SET_PIN_WIDGET_CONTROL,
spec->ext_mic_present ? 0 : PIN_VREF80);
}
/* toggle input of built-in and mic jack appropriately */
static void cxt5066_olpc_automic(struct hda_codec *codec)
{
struct conexant_spec *spec = codec->spec;
unsigned int present;
if (spec->dc_enable) /* don't do presence detection in DC mode */
return;
present = snd_hda_codec_read(codec, 0x1a, 0,
AC_VERB_GET_PIN_SENSE, 0) & 0x80000000;
if (present)
snd_printdd("CXT5066: external microphone detected\n");
else
snd_printdd("CXT5066: external microphone absent\n");
snd_hda_codec_write(codec, 0x17, 0, AC_VERB_SET_CONNECT_SEL,
present ? 0 : 1);
spec->ext_mic_present = !!present;
cxt5066_olpc_select_mic(codec);
}
/* toggle input of built-in digital mic and mic jack appropriately */
static void cxt5066_vostro_automic(struct hda_codec *codec)
{
unsigned int present;
struct hda_verb ext_mic_present[] = {
/* enable external mic, port B */
{0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80},
/* switch to external mic input */
{0x17, AC_VERB_SET_CONNECT_SEL, 0},
{0x14, AC_VERB_SET_CONNECT_SEL, 0},
/* disable internal digital mic */
{0x23, AC_VERB_SET_PIN_WIDGET_CONTROL, 0},
{}
};
static const struct hda_verb ext_mic_absent[] = {
/* enable internal mic, port C */
{0x23, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN},
/* switch to internal mic input */
{0x14, AC_VERB_SET_CONNECT_SEL, 2},
/* disable external mic, port B */
{0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, 0},
{}
};
present = snd_hda_jack_detect(codec, 0x1a);
if (present) {
snd_printdd("CXT5066: external microphone detected\n");
snd_hda_sequence_write(codec, ext_mic_present);
} else {
snd_printdd("CXT5066: external microphone absent\n");
snd_hda_sequence_write(codec, ext_mic_absent);
}
}
/* toggle input of built-in digital mic and mic jack appropriately */
static void cxt5066_ideapad_automic(struct hda_codec *codec)
{
unsigned int present;
struct hda_verb ext_mic_present[] = {
{0x14, AC_VERB_SET_CONNECT_SEL, 0},
{0x1b, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80},
{0x23, AC_VERB_SET_PIN_WIDGET_CONTROL, 0},
{}
};
static const struct hda_verb ext_mic_absent[] = {
{0x14, AC_VERB_SET_CONNECT_SEL, 2},
{0x23, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN},
{0x1b, AC_VERB_SET_PIN_WIDGET_CONTROL, 0},
{}
};
present = snd_hda_jack_detect(codec, 0x1b);
if (present) {
snd_printdd("CXT5066: external microphone detected\n");
snd_hda_sequence_write(codec, ext_mic_present);
} else {
snd_printdd("CXT5066: external microphone absent\n");
snd_hda_sequence_write(codec, ext_mic_absent);
}
}
/* toggle input of built-in digital mic and mic jack appropriately */
static void cxt5066_asus_automic(struct hda_codec *codec)
{
unsigned int present;
present = snd_hda_jack_detect(codec, 0x1b);
snd_printdd("CXT5066: external microphone present=%d\n", present);
snd_hda_codec_write(codec, 0x17, 0, AC_VERB_SET_CONNECT_SEL,
present ? 1 : 0);
}
/* toggle input of built-in digital mic and mic jack appropriately */
static void cxt5066_hp_laptop_automic(struct hda_codec *codec)
{
unsigned int present;
present = snd_hda_jack_detect(codec, 0x1b);
snd_printdd("CXT5066: external microphone present=%d\n", present);
snd_hda_codec_write(codec, 0x17, 0, AC_VERB_SET_CONNECT_SEL,
present ? 1 : 3);
}
/* toggle input of built-in digital mic and mic jack appropriately
order is: external mic -> dock mic -> interal mic */
static void cxt5066_thinkpad_automic(struct hda_codec *codec)
{
unsigned int ext_present, dock_present;
static const struct hda_verb ext_mic_present[] = {
{0x14, AC_VERB_SET_CONNECT_SEL, 0},
{0x17, AC_VERB_SET_CONNECT_SEL, 1},
{0x1b, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80},
{0x23, AC_VERB_SET_PIN_WIDGET_CONTROL, 0},
{0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, 0},
{}
};
static const struct hda_verb dock_mic_present[] = {
{0x14, AC_VERB_SET_CONNECT_SEL, 0},
{0x17, AC_VERB_SET_CONNECT_SEL, 0},
{0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80},
{0x23, AC_VERB_SET_PIN_WIDGET_CONTROL, 0},
{0x1b, AC_VERB_SET_PIN_WIDGET_CONTROL, 0},
{}
};
static const struct hda_verb ext_mic_absent[] = {
{0x14, AC_VERB_SET_CONNECT_SEL, 2},
{0x23, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN},
{0x1b, AC_VERB_SET_PIN_WIDGET_CONTROL, 0},
{0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, 0},
{}
};
ext_present = snd_hda_jack_detect(codec, 0x1b);
dock_present = snd_hda_jack_detect(codec, 0x1a);
if (ext_present) {
snd_printdd("CXT5066: external microphone detected\n");
snd_hda_sequence_write(codec, ext_mic_present);
} else if (dock_present) {
snd_printdd("CXT5066: dock microphone detected\n");
snd_hda_sequence_write(codec, dock_mic_present);
} else {
snd_printdd("CXT5066: external microphone absent\n");
snd_hda_sequence_write(codec, ext_mic_absent);
}
}
/* mute internal speaker if HP is plugged */
static void cxt5066_hp_automute(struct hda_codec *codec)
{
struct conexant_spec *spec = codec->spec;
unsigned int portA, portD;
/* Port A */
portA = snd_hda_jack_detect(codec, 0x19);
/* Port D */
portD = snd_hda_jack_detect(codec, 0x1c);
spec->hp_present = portA ? HP_PRESENT_PORT_A : 0;
spec->hp_present |= portD ? HP_PRESENT_PORT_D : 0;
snd_printdd("CXT5066: hp automute portA=%x portD=%x present=%d\n",
portA, portD, spec->hp_present);
cxt5066_update_speaker(codec);
}
/* Dispatch the right mic autoswitch function */
static void cxt5066_automic(struct hda_codec *codec)
{
struct conexant_spec *spec = codec->spec;
if (spec->dell_vostro)
cxt5066_vostro_automic(codec);
else if (spec->ideapad)
cxt5066_ideapad_automic(codec);
else if (spec->thinkpad)
cxt5066_thinkpad_automic(codec);
else if (spec->hp_laptop)
cxt5066_hp_laptop_automic(codec);
else if (spec->asus)
cxt5066_asus_automic(codec);
}
/* unsolicited event for jack sensing */
static void cxt5066_olpc_unsol_event(struct hda_codec *codec, unsigned int res)
{
struct conexant_spec *spec = codec->spec;
snd_printdd("CXT5066: unsol event %x (%x)\n", res, res >> 26);
switch (res >> 26) {
case CONEXANT_HP_EVENT:
cxt5066_hp_automute(codec);
break;
case CONEXANT_MIC_EVENT:
/* ignore mic events in DC mode; we're always using the jack */
if (!spec->dc_enable)
cxt5066_olpc_automic(codec);
break;
}
}
/* unsolicited event for jack sensing */
static void cxt5066_unsol_event(struct hda_codec *codec, unsigned int res)
{
snd_printdd("CXT5066: unsol event %x (%x)\n", res, res >> 26);
switch (res >> 26) {
case CONEXANT_HP_EVENT:
cxt5066_hp_automute(codec);
break;
case CONEXANT_MIC_EVENT:
cxt5066_automic(codec);
break;
}
}
static const struct hda_input_mux cxt5066_analog_mic_boost = {
.num_items = 5,
.items = {
{ "0dB", 0 },
{ "10dB", 1 },
{ "20dB", 2 },
{ "30dB", 3 },
{ "40dB", 4 },
},
};
static void cxt5066_set_mic_boost(struct hda_codec *codec)
{
struct conexant_spec *spec = codec->spec;
snd_hda_codec_write_cache(codec, 0x17, 0,
AC_VERB_SET_AMP_GAIN_MUTE,
AC_AMP_SET_RIGHT | AC_AMP_SET_LEFT | AC_AMP_SET_OUTPUT |
cxt5066_analog_mic_boost.items[spec->mic_boost].index);
if (spec->ideapad || spec->thinkpad) {
/* adjust the internal mic as well...it is not through 0x17 */
snd_hda_codec_write_cache(codec, 0x23, 0,
AC_VERB_SET_AMP_GAIN_MUTE,
AC_AMP_SET_RIGHT | AC_AMP_SET_LEFT | AC_AMP_SET_INPUT |
cxt5066_analog_mic_boost.
items[spec->mic_boost].index);
}
}
static int cxt5066_mic_boost_mux_enum_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
return snd_hda_input_mux_info(&cxt5066_analog_mic_boost, uinfo);
}
static int cxt5066_mic_boost_mux_enum_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
struct conexant_spec *spec = codec->spec;
ucontrol->value.enumerated.item[0] = spec->mic_boost;
return 0;
}
static int cxt5066_mic_boost_mux_enum_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
struct conexant_spec *spec = codec->spec;
const struct hda_input_mux *imux = &cxt5066_analog_mic_boost;
unsigned int idx;
idx = ucontrol->value.enumerated.item[0];
if (idx >= imux->num_items)
idx = imux->num_items - 1;
spec->mic_boost = idx;
if (!spec->dc_enable)
cxt5066_set_mic_boost(codec);
return 1;
}
static void cxt5066_enable_dc(struct hda_codec *codec)
{
const struct hda_verb enable_dc_mode[] = {
/* disable gain */
{0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
/* switch to DC input */
{0x17, AC_VERB_SET_CONNECT_SEL, 3},
{}
};
/* configure as input source */
snd_hda_sequence_write(codec, enable_dc_mode);
cxt5066_olpc_select_mic(codec); /* also sets configured bias */
}
static void cxt5066_disable_dc(struct hda_codec *codec)
{
/* reconfigure input source */
cxt5066_set_mic_boost(codec);
/* automic also selects the right mic if we're recording */
cxt5066_olpc_automic(codec);
}
static int cxt5066_olpc_dc_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
struct conexant_spec *spec = codec->spec;
ucontrol->value.integer.value[0] = spec->dc_enable;
return 0;
}
static int cxt5066_olpc_dc_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
struct conexant_spec *spec = codec->spec;
int dc_enable = !!ucontrol->value.integer.value[0];
if (dc_enable == spec->dc_enable)
return 0;
spec->dc_enable = dc_enable;
if (dc_enable)
cxt5066_enable_dc(codec);
else
cxt5066_disable_dc(codec);
return 1;
}
static int cxt5066_olpc_dc_bias_enum_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
return snd_hda_input_mux_info(&cxt5066_olpc_dc_bias, uinfo);
}
static int cxt5066_olpc_dc_bias_enum_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
struct conexant_spec *spec = codec->spec;
ucontrol->value.enumerated.item[0] = spec->dc_input_bias;
return 0;
}
static int cxt5066_olpc_dc_bias_enum_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
struct conexant_spec *spec = codec->spec;
const struct hda_input_mux *imux = &cxt5066_analog_mic_boost;
unsigned int idx;
idx = ucontrol->value.enumerated.item[0];
if (idx >= imux->num_items)
idx = imux->num_items - 1;
spec->dc_input_bias = idx;
if (spec->dc_enable)
cxt5066_set_olpc_dc_bias(codec);
return 1;
}
static void cxt5066_olpc_capture_prepare(struct hda_codec *codec)
{
struct conexant_spec *spec = codec->spec;
/* mark as recording and configure the microphone widget so that the
* recording LED comes on. */
spec->recording = 1;
cxt5066_olpc_select_mic(codec);
}
static void cxt5066_olpc_capture_cleanup(struct hda_codec *codec)
{
struct conexant_spec *spec = codec->spec;
const struct hda_verb disable_mics[] = {
/* disable external mic, port B */
{0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, 0},
/* disble internal mic, port C */
{0x1b, AC_VERB_SET_PIN_WIDGET_CONTROL, 0},
/* disable DC capture, port F */
{0x1e, AC_VERB_SET_PIN_WIDGET_CONTROL, 0},
{},
};
snd_hda_sequence_write(codec, disable_mics);
spec->recording = 0;
}
static void conexant_check_dig_outs(struct hda_codec *codec,
const hda_nid_t *dig_pins,
int num_pins)
{
struct conexant_spec *spec = codec->spec;
hda_nid_t *nid_loc = &spec->multiout.dig_out_nid;
int i;
for (i = 0; i < num_pins; i++, dig_pins++) {
unsigned int cfg = snd_hda_codec_get_pincfg(codec, *dig_pins);
if (get_defcfg_connect(cfg) == AC_JACK_PORT_NONE)
continue;
if (snd_hda_get_connections(codec, *dig_pins, nid_loc, 1) != 1)
continue;
if (spec->slave_dig_outs[0])
nid_loc++;
else
nid_loc = spec->slave_dig_outs;
}
}
static const struct hda_input_mux cxt5066_capture_source = {
.num_items = 4,
.items = {
{ "Mic B", 0 },
{ "Mic C", 1 },
{ "Mic E", 2 },
{ "Mic F", 3 },
},
};
static const struct hda_bind_ctls cxt5066_bind_capture_vol_others = {
.ops = &snd_hda_bind_vol,
.values = {
HDA_COMPOSE_AMP_VAL(0x14, 3, 0, HDA_INPUT),
HDA_COMPOSE_AMP_VAL(0x14, 3, 2, HDA_INPUT),
0
},
};
static const struct hda_bind_ctls cxt5066_bind_capture_sw_others = {
.ops = &snd_hda_bind_sw,
.values = {
HDA_COMPOSE_AMP_VAL(0x14, 3, 0, HDA_INPUT),
HDA_COMPOSE_AMP_VAL(0x14, 3, 2, HDA_INPUT),
0
},
};
static const struct snd_kcontrol_new cxt5066_mixer_master[] = {
HDA_CODEC_VOLUME("Master Playback Volume", 0x10, 0x00, HDA_OUTPUT),
{}
};
static const struct snd_kcontrol_new cxt5066_mixer_master_olpc[] = {
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Master Playback Volume",
.access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
SNDRV_CTL_ELEM_ACCESS_TLV_READ |
SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK,
.subdevice = HDA_SUBDEV_AMP_FLAG,
.info = snd_hda_mixer_amp_volume_info,
.get = snd_hda_mixer_amp_volume_get,
.put = snd_hda_mixer_amp_volume_put,
.tlv = { .c = snd_hda_mixer_amp_tlv },
/* offset by 28 volume steps to limit minimum gain to -46dB */
.private_value =
HDA_COMPOSE_AMP_VAL_OFS(0x10, 3, 0, HDA_OUTPUT, 28),
},
{}
};
static const struct snd_kcontrol_new cxt5066_mixer_olpc_dc[] = {
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "DC Mode Enable Switch",
.info = snd_ctl_boolean_mono_info,
.get = cxt5066_olpc_dc_get,
.put = cxt5066_olpc_dc_put,
},
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "DC Input Bias Enum",
.info = cxt5066_olpc_dc_bias_enum_info,
.get = cxt5066_olpc_dc_bias_enum_get,
.put = cxt5066_olpc_dc_bias_enum_put,
},
{}
};
static const struct snd_kcontrol_new cxt5066_mixers[] = {
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Master Playback Switch",
.info = cxt_eapd_info,
.get = cxt_eapd_get,
.put = cxt5066_hp_master_sw_put,
.private_value = 0x1d,
},
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Analog Mic Boost Capture Enum",
.info = cxt5066_mic_boost_mux_enum_info,
.get = cxt5066_mic_boost_mux_enum_get,
.put = cxt5066_mic_boost_mux_enum_put,
},
HDA_BIND_VOL("Capture Volume", &cxt5066_bind_capture_vol_others),
HDA_BIND_SW("Capture Switch", &cxt5066_bind_capture_sw_others),
{}
};
static const struct snd_kcontrol_new cxt5066_vostro_mixers[] = {
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Internal Mic Boost Capture Enum",
.info = cxt5066_mic_boost_mux_enum_info,
.get = cxt5066_mic_boost_mux_enum_get,
.put = cxt5066_mic_boost_mux_enum_put,
.private_value = 0x23 | 0x100,
},
{}
};
static const struct hda_verb cxt5066_init_verbs[] = {
{0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80}, /* Port B */
{0x1b, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80}, /* Port C */
{0x1e, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN}, /* Port F */
{0x1d, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN}, /* Port E */
/* Speakers */
{0x1f, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
{0x1f, AC_VERB_SET_CONNECT_SEL, 0x00}, /* DAC1 */
/* HP, Amp */
{0x19, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP},
{0x19, AC_VERB_SET_CONNECT_SEL, 0x00}, /* DAC1 */
{0x1c, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP},
{0x1c, AC_VERB_SET_CONNECT_SEL, 0x00}, /* DAC1 */
/* DAC1 */
{0x10, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
/* Node 14 connections: 0x17 0x18 0x23 0x24 0x27 */
{0x14, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0) | 0x50},
{0x14, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
{0x14, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(2) | 0x50},
{0x14, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(3)},
{0x14, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(4)},
/* no digital microphone support yet */
{0x23, AC_VERB_SET_PIN_WIDGET_CONTROL, 0},
/* Audio input selector */
{0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE | 0x3},
/* SPDIF route: PCM */
{0x20, AC_VERB_SET_CONNECT_SEL, 0x0},
{0x22, AC_VERB_SET_CONNECT_SEL, 0x0},
{0x20, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
{0x22, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
/* EAPD */
{0x1d, AC_VERB_SET_EAPD_BTLENABLE, 0x2}, /* default on */
/* not handling these yet */
{0x19, AC_VERB_SET_UNSOLICITED_ENABLE, 0},
{0x1a, AC_VERB_SET_UNSOLICITED_ENABLE, 0},
{0x1b, AC_VERB_SET_UNSOLICITED_ENABLE, 0},
{0x1c, AC_VERB_SET_UNSOLICITED_ENABLE, 0},
{0x1d, AC_VERB_SET_UNSOLICITED_ENABLE, 0},
{0x1e, AC_VERB_SET_UNSOLICITED_ENABLE, 0},
{0x20, AC_VERB_SET_UNSOLICITED_ENABLE, 0},
{0x22, AC_VERB_SET_UNSOLICITED_ENABLE, 0},
{ } /* end */
};
static const struct hda_verb cxt5066_init_verbs_olpc[] = {
/* Port A: headphones */
{0x19, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP},
{0x19, AC_VERB_SET_CONNECT_SEL, 0x00}, /* DAC1 */
/* Port B: external microphone */
{0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, 0},
/* Port C: internal microphone */
{0x1b, AC_VERB_SET_PIN_WIDGET_CONTROL, 0},
/* Port D: unused */
{0x1c, AC_VERB_SET_PIN_WIDGET_CONTROL, 0},
/* Port E: unused, but has primary EAPD */
{0x1d, AC_VERB_SET_PIN_WIDGET_CONTROL, 0},
{0x1d, AC_VERB_SET_EAPD_BTLENABLE, 0x2}, /* default on */
/* Port F: external DC input through microphone port */
{0x1e, AC_VERB_SET_PIN_WIDGET_CONTROL, 0},
/* Port G: internal speakers */
{0x1f, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
{0x1f, AC_VERB_SET_CONNECT_SEL, 0x00}, /* DAC1 */
/* DAC1 */
{0x10, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
/* DAC2: unused */
{0x11, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
{0x14, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0) | 0x50},
{0x14, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
{0x14, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(2)},
{0x14, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(3)},
{0x15, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
{0x15, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
{0x15, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(2)},
{0x15, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(3)},
{0x16, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
{0x16, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
{0x16, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(2)},
{0x16, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(3)},
/* Disable digital microphone port */
{0x23, AC_VERB_SET_PIN_WIDGET_CONTROL, 0},
/* Audio input selectors */
{0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE | 0x3},
{0x18, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE },
/* Disable SPDIF */
{0x20, AC_VERB_SET_PIN_WIDGET_CONTROL, 0},
{0x22, AC_VERB_SET_PIN_WIDGET_CONTROL, 0},
/* enable unsolicited events for Port A and B */
{0x19, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN | CONEXANT_HP_EVENT},
{0x1a, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN | CONEXANT_MIC_EVENT},
{ } /* end */
};
static const struct hda_verb cxt5066_init_verbs_vostro[] = {
/* Port A: headphones */
{0x19, AC_VERB_SET_PIN_WIDGET_CONTROL, 0},
{0x19, AC_VERB_SET_CONNECT_SEL, 0x00}, /* DAC1 */
/* Port B: external microphone */
{0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, 0},
/* Port C: unused */
{0x1b, AC_VERB_SET_PIN_WIDGET_CONTROL, 0},
/* Port D: unused */
{0x1c, AC_VERB_SET_PIN_WIDGET_CONTROL, 0},
/* Port E: unused, but has primary EAPD */
{0x1d, AC_VERB_SET_PIN_WIDGET_CONTROL, 0},
{0x1d, AC_VERB_SET_EAPD_BTLENABLE, 0x2}, /* default on */
/* Port F: unused */
{0x1e, AC_VERB_SET_PIN_WIDGET_CONTROL, 0},
/* Port G: internal speakers */
{0x1f, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
{0x1f, AC_VERB_SET_CONNECT_SEL, 0x00}, /* DAC1 */
/* DAC1 */
{0x10, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
/* DAC2: unused */
{0x11, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
{0x14, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
{0x14, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
{0x14, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(2)},
{0x14, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(3)},
{0x15, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
{0x15, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
{0x15, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(2)},
{0x15, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(3)},
{0x16, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
{0x16, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
{0x16, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(2)},
{0x16, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(3)},
/* Digital microphone port */
{0x23, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN},
/* Audio input selectors */
{0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE | 0x3},
{0x18, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE },
/* Disable SPDIF */
{0x20, AC_VERB_SET_PIN_WIDGET_CONTROL, 0},
{0x22, AC_VERB_SET_PIN_WIDGET_CONTROL, 0},
/* enable unsolicited events for Port A and B */
{0x19, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN | CONEXANT_HP_EVENT},
{0x1a, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN | CONEXANT_MIC_EVENT},
{ } /* end */
};
static const struct hda_verb cxt5066_init_verbs_ideapad[] = {
{0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80}, /* Port B */
{0x1b, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80}, /* Port C */
{0x1e, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN}, /* Port F */
{0x1d, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN}, /* Port E */
/* Speakers */
{0x1f, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
{0x1f, AC_VERB_SET_CONNECT_SEL, 0x00}, /* DAC1 */
/* HP, Amp */
{0x19, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP},
{0x19, AC_VERB_SET_CONNECT_SEL, 0x00}, /* DAC1 */
{0x1c, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP},
{0x1c, AC_VERB_SET_CONNECT_SEL, 0x00}, /* DAC1 */
/* DAC1 */
{0x10, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
/* Node 14 connections: 0x17 0x18 0x23 0x24 0x27 */
{0x14, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0) | 0x50},
{0x14, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
{0x14, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(2) | 0x50},
{0x14, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(3)},
{0x14, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(4)},
{0x14, AC_VERB_SET_CONNECT_SEL, 2}, /* default to internal mic */
/* Audio input selector */
{0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE | 0x2},
{0x17, AC_VERB_SET_CONNECT_SEL, 1}, /* route ext mic */
/* SPDIF route: PCM */
{0x20, AC_VERB_SET_CONNECT_SEL, 0x0},
{0x22, AC_VERB_SET_CONNECT_SEL, 0x0},
{0x20, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
{0x22, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
/* internal microphone */
{0x23, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN}, /* enable internal mic */
/* EAPD */
{0x1d, AC_VERB_SET_EAPD_BTLENABLE, 0x2}, /* default on */
{0x19, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN | CONEXANT_HP_EVENT},
{0x1b, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN | CONEXANT_MIC_EVENT},
{ } /* end */
};
static const struct hda_verb cxt5066_init_verbs_thinkpad[] = {
{0x1e, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN}, /* Port F */
{0x1d, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN}, /* Port E */
/* Port G: internal speakers */
{0x1f, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
{0x1f, AC_VERB_SET_CONNECT_SEL, 0x00}, /* DAC1 */
/* Port A: HP, Amp */
{0x19, AC_VERB_SET_PIN_WIDGET_CONTROL, 0},
{0x19, AC_VERB_SET_CONNECT_SEL, 0x00}, /* DAC1 */
/* Port B: Mic Dock */
{0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, 0},
/* Port C: Mic */
{0x1b, AC_VERB_SET_PIN_WIDGET_CONTROL, 0},
/* Port D: HP Dock, Amp */
{0x1c, AC_VERB_SET_PIN_WIDGET_CONTROL, 0},
{0x1c, AC_VERB_SET_CONNECT_SEL, 0x00}, /* DAC1 */
/* DAC1 */
{0x10, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
/* Node 14 connections: 0x17 0x18 0x23 0x24 0x27 */
{0x14, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0) | 0x50},
{0x14, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
{0x14, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(2) | 0x50},
{0x14, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(3)},
{0x14, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(4)},
{0x14, AC_VERB_SET_CONNECT_SEL, 2}, /* default to internal mic */
/* Audio input selector */
{0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE | 0x2},
{0x17, AC_VERB_SET_CONNECT_SEL, 1}, /* route ext mic */
/* SPDIF route: PCM */
{0x20, AC_VERB_SET_CONNECT_SEL, 0x0},
{0x22, AC_VERB_SET_CONNECT_SEL, 0x0},
{0x20, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
{0x22, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
/* internal microphone */
{0x23, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN}, /* enable internal mic */
/* EAPD */
{0x1d, AC_VERB_SET_EAPD_BTLENABLE, 0x2}, /* default on */
/* enable unsolicited events for Port A, B, C and D */
{0x19, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN | CONEXANT_HP_EVENT},
{0x1c, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN | CONEXANT_HP_EVENT},
{0x1a, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN | CONEXANT_MIC_EVENT},
{0x1b, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN | CONEXANT_MIC_EVENT},
{ } /* end */
};
static const struct hda_verb cxt5066_init_verbs_portd_lo[] = {
{0x1c, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
{ } /* end */
};
static const struct hda_verb cxt5066_init_verbs_hp_laptop[] = {
{0x14, AC_VERB_SET_CONNECT_SEL, 0x0},
{0x19, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN | CONEXANT_HP_EVENT},
{0x1b, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN | CONEXANT_MIC_EVENT},
{ } /* end */
};
/* initialize jack-sensing, too */
static int cxt5066_init(struct hda_codec *codec)
{
snd_printdd("CXT5066: init\n");
conexant_init(codec);
if (codec->patch_ops.unsol_event) {
cxt5066_hp_automute(codec);
cxt5066_automic(codec);
}
cxt5066_set_mic_boost(codec);
return 0;
}
static int cxt5066_olpc_init(struct hda_codec *codec)
{
struct conexant_spec *spec = codec->spec;
snd_printdd("CXT5066: init\n");
conexant_init(codec);
cxt5066_hp_automute(codec);
if (!spec->dc_enable) {
cxt5066_set_mic_boost(codec);
cxt5066_olpc_automic(codec);
} else {
cxt5066_enable_dc(codec);
}
return 0;
}
enum {
CXT5066_LAPTOP, /* Laptops w/ EAPD support */
CXT5066_DELL_LAPTOP, /* Dell Laptop */
CXT5066_OLPC_XO_1_5, /* OLPC XO 1.5 */
CXT5066_DELL_VOSTRO, /* Dell Vostro 1015i */
CXT5066_IDEAPAD, /* Lenovo IdeaPad U150 */
CXT5066_THINKPAD, /* Lenovo ThinkPad T410s, others? */
CXT5066_ASUS, /* Asus K52JU, Lenovo G560 - Int mic at 0x1a and Ext mic at 0x1b */
CXT5066_HP_LAPTOP, /* HP Laptop */
CXT5066_AUTO, /* BIOS auto-parser */
CXT5066_MODELS
};
static const char * const cxt5066_models[CXT5066_MODELS] = {
[CXT5066_LAPTOP] = "laptop",
[CXT5066_DELL_LAPTOP] = "dell-laptop",
[CXT5066_OLPC_XO_1_5] = "olpc-xo-1_5",
[CXT5066_DELL_VOSTRO] = "dell-vostro",
[CXT5066_IDEAPAD] = "ideapad",
[CXT5066_THINKPAD] = "thinkpad",
[CXT5066_ASUS] = "asus",
[CXT5066_HP_LAPTOP] = "hp-laptop",
[CXT5066_AUTO] = "auto",
};
static const struct snd_pci_quirk cxt5066_cfg_tbl[] = {
SND_PCI_QUIRK(0x1025, 0x054c, "Acer Aspire 3830TG", CXT5066_AUTO),
SND_PCI_QUIRK_MASK(0x1025, 0xff00, 0x0400, "Acer", CXT5066_IDEAPAD),
SND_PCI_QUIRK(0x1028, 0x02d8, "Dell Vostro", CXT5066_DELL_VOSTRO),
SND_PCI_QUIRK(0x1028, 0x02f5, "Dell Vostro 320", CXT5066_IDEAPAD),
SND_PCI_QUIRK(0x1028, 0x0401, "Dell Vostro 1014", CXT5066_DELL_VOSTRO),
SND_PCI_QUIRK(0x1028, 0x0402, "Dell Vostro", CXT5066_DELL_VOSTRO),
SND_PCI_QUIRK(0x1028, 0x0408, "Dell Inspiron One 19T", CXT5066_IDEAPAD),
SND_PCI_QUIRK(0x1028, 0x050f, "Dell Inspiron", CXT5066_IDEAPAD),
SND_PCI_QUIRK(0x1028, 0x0510, "Dell Vostro", CXT5066_IDEAPAD),
SND_PCI_QUIRK(0x103c, 0x360b, "HP G60", CXT5066_HP_LAPTOP),
SND_PCI_QUIRK(0x1043, 0x13f3, "Asus A52J", CXT5066_ASUS),
SND_PCI_QUIRK(0x1043, 0x1643, "Asus K52JU", CXT5066_ASUS),
SND_PCI_QUIRK(0x1043, 0x1993, "Asus U50F", CXT5066_ASUS),
SND_PCI_QUIRK(0x1179, 0xff1e, "Toshiba Satellite C650D", CXT5066_IDEAPAD),
SND_PCI_QUIRK(0x1179, 0xff50, "Toshiba Satellite P500-PSPGSC-01800T", CXT5066_OLPC_XO_1_5),
SND_PCI_QUIRK(0x14f1, 0x0101, "Conexant Reference board",
CXT5066_LAPTOP),
SND_PCI_QUIRK(0x152d, 0x0833, "OLPC XO-1.5", CXT5066_OLPC_XO_1_5),
SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo T400s", CXT5066_THINKPAD),
SND_PCI_QUIRK(0x17aa, 0x21c5, "Thinkpad Edge 13", CXT5066_THINKPAD),
SND_PCI_QUIRK(0x17aa, 0x21c6, "Thinkpad Edge 13", CXT5066_ASUS),
SND_PCI_QUIRK(0x17aa, 0x215e, "Lenovo T510", CXT5066_AUTO),
SND_PCI_QUIRK(0x17aa, 0x21cf, "Lenovo T520 & W520", CXT5066_AUTO),
SND_PCI_QUIRK(0x17aa, 0x21da, "Lenovo X220", CXT5066_THINKPAD),
SND_PCI_QUIRK(0x17aa, 0x21db, "Lenovo X220-tablet", CXT5066_THINKPAD),
SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo U350", CXT5066_ASUS),
SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo G560", CXT5066_ASUS),
SND_PCI_QUIRK(0x17aa, 0x3938, "Lenovo G565", CXT5066_AUTO),
SND_PCI_QUIRK(0x1b0a, 0x2092, "CyberpowerPC Gamer Xplorer N57001", CXT5066_AUTO),
{}
};
static int patch_cxt5066(struct hda_codec *codec)
{
struct conexant_spec *spec;
int board_config;
board_config = snd_hda_check_board_config(codec, CXT5066_MODELS,
cxt5066_models, cxt5066_cfg_tbl);
if (board_config < 0)
board_config = CXT5066_AUTO; /* model=auto as default */
if (board_config == CXT5066_AUTO)
return patch_conexant_auto(codec);
spec = kzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec)
return -ENOMEM;
codec->spec = spec;
codec->patch_ops = conexant_patch_ops;
codec->patch_ops.init = conexant_init;
spec->dell_automute = 0;
spec->multiout.max_channels = 2;
spec->multiout.num_dacs = ARRAY_SIZE(cxt5066_dac_nids);
spec->multiout.dac_nids = cxt5066_dac_nids;
conexant_check_dig_outs(codec, cxt5066_digout_pin_nids,
ARRAY_SIZE(cxt5066_digout_pin_nids));
spec->num_adc_nids = 1;
spec->adc_nids = cxt5066_adc_nids;
spec->capsrc_nids = cxt5066_capsrc_nids;
spec->input_mux = &cxt5066_capture_source;
spec->port_d_mode = PIN_HP;
spec->num_init_verbs = 1;
spec->init_verbs[0] = cxt5066_init_verbs;
spec->num_channel_mode = ARRAY_SIZE(cxt5066_modes);
spec->channel_mode = cxt5066_modes;
spec->cur_adc = 0;
spec->cur_adc_idx = 0;
set_beep_amp(spec, 0x13, 0, HDA_OUTPUT);
switch (board_config) {
default:
case CXT5066_LAPTOP:
spec->mixers[spec->num_mixers++] = cxt5066_mixer_master;
spec->mixers[spec->num_mixers++] = cxt5066_mixers;
break;
case CXT5066_DELL_LAPTOP:
spec->mixers[spec->num_mixers++] = cxt5066_mixer_master;
spec->mixers[spec->num_mixers++] = cxt5066_mixers;
spec->port_d_mode = PIN_OUT;
spec->init_verbs[spec->num_init_verbs] = cxt5066_init_verbs_portd_lo;
spec->num_init_verbs++;
spec->dell_automute = 1;
break;
case CXT5066_ASUS:
case CXT5066_HP_LAPTOP:
codec->patch_ops.init = cxt5066_init;
codec->patch_ops.unsol_event = cxt5066_unsol_event;
spec->init_verbs[spec->num_init_verbs] =
cxt5066_init_verbs_hp_laptop;
spec->num_init_verbs++;
spec->hp_laptop = board_config == CXT5066_HP_LAPTOP;
spec->asus = board_config == CXT5066_ASUS;
spec->mixers[spec->num_mixers++] = cxt5066_mixer_master;
spec->mixers[spec->num_mixers++] = cxt5066_mixers;
/* no S/PDIF out */
if (board_config == CXT5066_HP_LAPTOP)
spec->multiout.dig_out_nid = 0;
/* input source automatically selected */
spec->input_mux = NULL;
spec->port_d_mode = 0;
spec->mic_boost = 3; /* default 30dB gain */
break;
case CXT5066_OLPC_XO_1_5:
codec->patch_ops.init = cxt5066_olpc_init;
codec->patch_ops.unsol_event = cxt5066_olpc_unsol_event;
spec->init_verbs[0] = cxt5066_init_verbs_olpc;
spec->mixers[spec->num_mixers++] = cxt5066_mixer_master_olpc;
spec->mixers[spec->num_mixers++] = cxt5066_mixer_olpc_dc;
spec->mixers[spec->num_mixers++] = cxt5066_mixers;
spec->port_d_mode = 0;
spec->mic_boost = 3; /* default 30dB gain */
/* no S/PDIF out */
spec->multiout.dig_out_nid = 0;
/* input source automatically selected */
spec->input_mux = NULL;
/* our capture hooks which allow us to turn on the microphone LED
* at the right time */
spec->capture_prepare = cxt5066_olpc_capture_prepare;
spec->capture_cleanup = cxt5066_olpc_capture_cleanup;
break;
case CXT5066_DELL_VOSTRO:
codec->patch_ops.init = cxt5066_init;
codec->patch_ops.unsol_event = cxt5066_unsol_event;
spec->init_verbs[0] = cxt5066_init_verbs_vostro;
spec->mixers[spec->num_mixers++] = cxt5066_mixer_master_olpc;
spec->mixers[spec->num_mixers++] = cxt5066_mixers;
spec->mixers[spec->num_mixers++] = cxt5066_vostro_mixers;
spec->port_d_mode = 0;
spec->dell_vostro = 1;
spec->mic_boost = 3; /* default 30dB gain */
/* no S/PDIF out */
spec->multiout.dig_out_nid = 0;
/* input source automatically selected */
spec->input_mux = NULL;
break;
case CXT5066_IDEAPAD:
codec->patch_ops.init = cxt5066_init;
codec->patch_ops.unsol_event = cxt5066_unsol_event;
spec->mixers[spec->num_mixers++] = cxt5066_mixer_master;
spec->mixers[spec->num_mixers++] = cxt5066_mixers;
spec->init_verbs[0] = cxt5066_init_verbs_ideapad;
spec->port_d_mode = 0;
spec->ideapad = 1;
spec->mic_boost = 2; /* default 20dB gain */
/* no S/PDIF out */
spec->multiout.dig_out_nid = 0;
/* input source automatically selected */
spec->input_mux = NULL;
break;
case CXT5066_THINKPAD:
codec->patch_ops.init = cxt5066_init;
codec->patch_ops.unsol_event = cxt5066_unsol_event;
spec->mixers[spec->num_mixers++] = cxt5066_mixer_master;
spec->mixers[spec->num_mixers++] = cxt5066_mixers;
spec->init_verbs[0] = cxt5066_init_verbs_thinkpad;
spec->thinkpad = 1;
spec->port_d_mode = PIN_OUT;
spec->mic_boost = 2; /* default 20dB gain */
/* no S/PDIF out */
spec->multiout.dig_out_nid = 0;
/* input source automatically selected */
spec->input_mux = NULL;
break;
}
if (spec->beep_amp)
snd_hda_attach_beep_device(codec, spec->beep_amp);
return 0;
}
/*
* Automatic parser for CX20641 & co
*/
static int cx_auto_capture_pcm_prepare(struct hda_pcm_stream *hinfo,
struct hda_codec *codec,
unsigned int stream_tag,
unsigned int format,
struct snd_pcm_substream *substream)
{
struct conexant_spec *spec = codec->spec;
hda_nid_t adc = spec->imux_info[spec->cur_mux[0]].adc;
if (spec->adc_switching) {
spec->cur_adc = adc;
spec->cur_adc_stream_tag = stream_tag;
spec->cur_adc_format = format;
}
snd_hda_codec_setup_stream(codec, adc, stream_tag, 0, format);
return 0;
}
static int cx_auto_capture_pcm_cleanup(struct hda_pcm_stream *hinfo,
struct hda_codec *codec,
struct snd_pcm_substream *substream)
{
struct conexant_spec *spec = codec->spec;
snd_hda_codec_cleanup_stream(codec, spec->cur_adc);
spec->cur_adc = 0;
return 0;
}
static const struct hda_pcm_stream cx_auto_pcm_analog_capture = {
.substreams = 1,
.channels_min = 2,
.channels_max = 2,
.nid = 0, /* fill later */
.ops = {
.prepare = cx_auto_capture_pcm_prepare,
.cleanup = cx_auto_capture_pcm_cleanup
},
};
static const hda_nid_t cx_auto_adc_nids[] = { 0x14 };
#define get_connection_index(codec, mux, nid)\
snd_hda_get_conn_index(codec, mux, nid, 0)
/* get an unassigned DAC from the given list.
* Return the nid if found and reduce the DAC list, or return zero if
* not found
*/
static hda_nid_t get_unassigned_dac(struct hda_codec *codec, hda_nid_t pin,
hda_nid_t *dacs, int *num_dacs)
{
int i, nums = *num_dacs;
hda_nid_t ret = 0;
for (i = 0; i < nums; i++) {
if (get_connection_index(codec, pin, dacs[i]) >= 0) {
ret = dacs[i];
break;
}
}
if (!ret)
return 0;
if (--nums > 0)
memmove(dacs, dacs + 1, nums * sizeof(hda_nid_t));
*num_dacs = nums;
return ret;
}
#define MAX_AUTO_DACS 5
#define DAC_SLAVE_FLAG 0x8000 /* filled dac is a slave */
/* fill analog DAC list from the widget tree */
static int fill_cx_auto_dacs(struct hda_codec *codec, hda_nid_t *dacs)
{
hda_nid_t nid, end_nid;
int nums = 0;
end_nid = codec->start_nid + codec->num_nodes;
for (nid = codec->start_nid; nid < end_nid; nid++) {
unsigned int wcaps = get_wcaps(codec, nid);
unsigned int type = get_wcaps_type(wcaps);
if (type == AC_WID_AUD_OUT && !(wcaps & AC_WCAP_DIGITAL)) {
dacs[nums++] = nid;
if (nums >= MAX_AUTO_DACS)
break;
}
}
return nums;
}
/* fill pin_dac_pair list from the pin and dac list */
static int fill_dacs_for_pins(struct hda_codec *codec, hda_nid_t *pins,
int num_pins, hda_nid_t *dacs, int *rest,
struct pin_dac_pair *filled, int nums,
int type)
{
int i, start = nums;
for (i = 0; i < num_pins; i++, nums++) {
filled[nums].pin = pins[i];
filled[nums].type = type;
filled[nums].dac = get_unassigned_dac(codec, pins[i], dacs, rest);
if (filled[nums].dac)
continue;
if (filled[start].dac && get_connection_index(codec, pins[i], filled[start].dac) >= 0) {
filled[nums].dac = filled[start].dac | DAC_SLAVE_FLAG;
continue;
}
if (filled[0].dac && get_connection_index(codec, pins[i], filled[0].dac) >= 0) {
filled[nums].dac = filled[0].dac | DAC_SLAVE_FLAG;
continue;
}
snd_printdd("Failed to find a DAC for pin 0x%x", pins[i]);
}
return nums;
}
/* parse analog output paths */
static void cx_auto_parse_output(struct hda_codec *codec)
{
struct conexant_spec *spec = codec->spec;
struct auto_pin_cfg *cfg = &spec->autocfg;
hda_nid_t dacs[MAX_AUTO_DACS];
int i, j, nums, rest;
rest = fill_cx_auto_dacs(codec, dacs);
/* parse all analog output pins */
nums = fill_dacs_for_pins(codec, cfg->line_out_pins, cfg->line_outs,
dacs, &rest, spec->dac_info, 0,
AUTO_PIN_LINE_OUT);
nums = fill_dacs_for_pins(codec, cfg->hp_pins, cfg->hp_outs,
dacs, &rest, spec->dac_info, nums,
AUTO_PIN_HP_OUT);
nums = fill_dacs_for_pins(codec, cfg->speaker_pins, cfg->speaker_outs,
dacs, &rest, spec->dac_info, nums,
AUTO_PIN_SPEAKER_OUT);
spec->dac_info_filled = nums;
/* fill multiout struct */
for (i = 0; i < nums; i++) {
hda_nid_t dac = spec->dac_info[i].dac;
if (!dac || (dac & DAC_SLAVE_FLAG))
continue;
switch (spec->dac_info[i].type) {
case AUTO_PIN_LINE_OUT:
spec->private_dac_nids[spec->multiout.num_dacs] = dac;
spec->multiout.num_dacs++;
break;
case AUTO_PIN_HP_OUT:
case AUTO_PIN_SPEAKER_OUT:
if (!spec->multiout.hp_nid) {
spec->multiout.hp_nid = dac;
break;
}
for (j = 0; j < ARRAY_SIZE(spec->multiout.extra_out_nid); j++)
if (!spec->multiout.extra_out_nid[j]) {
spec->multiout.extra_out_nid[j] = dac;
break;
}
break;
}
}
spec->multiout.dac_nids = spec->private_dac_nids;
spec->multiout.max_channels = spec->multiout.num_dacs * 2;
for (i = 0; i < cfg->hp_outs; i++) {
if (is_jack_detectable(codec, cfg->hp_pins[i])) {
spec->auto_mute = 1;
break;
}
}
if (spec->auto_mute &&
cfg->line_out_pins[0] &&
cfg->line_out_type != AUTO_PIN_SPEAKER_OUT &&
cfg->line_out_pins[0] != cfg->hp_pins[0] &&
cfg->line_out_pins[0] != cfg->speaker_pins[0]) {
for (i = 0; i < cfg->line_outs; i++) {
if (is_jack_detectable(codec, cfg->line_out_pins[i])) {
spec->detect_line = 1;
break;
}
}
spec->automute_lines = spec->detect_line;
}
spec->vmaster_nid = spec->private_dac_nids[0];
}
static void cx_auto_turn_eapd(struct hda_codec *codec, int num_pins,
hda_nid_t *pins, bool on);
static void do_automute(struct hda_codec *codec, int num_pins,
hda_nid_t *pins, bool on)
{
struct conexant_spec *spec = codec->spec;
int i;
for (i = 0; i < num_pins; i++)
snd_hda_codec_write(codec, pins[i], 0,
AC_VERB_SET_PIN_WIDGET_CONTROL,
on ? PIN_OUT : 0);
if (spec->pin_eapd_ctrls)
cx_auto_turn_eapd(codec, num_pins, pins, on);
}
static int detect_jacks(struct hda_codec *codec, int num_pins, hda_nid_t *pins)
{
int i, present = 0;
for (i = 0; i < num_pins; i++) {
hda_nid_t nid = pins[i];
if (!nid || !is_jack_detectable(codec, nid))
break;
present |= snd_hda_jack_detect(codec, nid);
}
return present;
}
/* auto-mute/unmute speaker and line outs according to headphone jack */
static void cx_auto_update_speakers(struct hda_codec *codec)
{
struct conexant_spec *spec = codec->spec;
struct auto_pin_cfg *cfg = &spec->autocfg;
int on = 1;
/* turn on HP EAPD when HP jacks are present */
if (spec->pin_eapd_ctrls) {
if (spec->auto_mute)
on = spec->hp_present;
cx_auto_turn_eapd(codec, cfg->hp_outs, cfg->hp_pins, on);
}
/* mute speakers in auto-mode if HP or LO jacks are plugged */
if (spec->auto_mute)
on = !(spec->hp_present ||
(spec->detect_line && spec->line_present));
do_automute(codec, cfg->speaker_outs, cfg->speaker_pins, on);
/* toggle line-out mutes if needed, too */
/* if LO is a copy of either HP or Speaker, don't need to handle it */
if (cfg->line_out_pins[0] == cfg->hp_pins[0] ||
cfg->line_out_pins[0] == cfg->speaker_pins[0])
return;
if (spec->auto_mute) {
/* mute LO in auto-mode when HP jack is present */
if (cfg->line_out_type == AUTO_PIN_SPEAKER_OUT ||
spec->automute_lines)
on = !spec->hp_present;
else
on = 1;
}
do_automute(codec, cfg->line_outs, cfg->line_out_pins, on);
}
static void cx_auto_hp_automute(struct hda_codec *codec)
{
struct conexant_spec *spec = codec->spec;
struct auto_pin_cfg *cfg = &spec->autocfg;
if (!spec->auto_mute)
return;
spec->hp_present = detect_jacks(codec, cfg->hp_outs, cfg->hp_pins);
cx_auto_update_speakers(codec);
}
static void cx_auto_line_automute(struct hda_codec *codec)
{
struct conexant_spec *spec = codec->spec;
struct auto_pin_cfg *cfg = &spec->autocfg;
if (!spec->auto_mute || !spec->detect_line)
return;
spec->line_present = detect_jacks(codec, cfg->line_outs,
cfg->line_out_pins);
cx_auto_update_speakers(codec);
}
static int cx_automute_mode_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
struct conexant_spec *spec = codec->spec;
static const char * const texts2[] = {
"Disabled", "Enabled"
};
static const char * const texts3[] = {
"Disabled", "Speaker Only", "Line Out+Speaker"
};
const char * const *texts;
uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
uinfo->count = 1;
if (spec->automute_hp_lo) {
uinfo->value.enumerated.items = 3;
texts = texts3;
} else {
uinfo->value.enumerated.items = 2;
texts = texts2;
}
if (uinfo->value.enumerated.item >= uinfo->value.enumerated.items)
uinfo->value.enumerated.item = uinfo->value.enumerated.items - 1;
strcpy(uinfo->value.enumerated.name,
texts[uinfo->value.enumerated.item]);
return 0;
}
static int cx_automute_mode_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
struct conexant_spec *spec = codec->spec;
unsigned int val;
if (!spec->auto_mute)
val = 0;
else if (!spec->automute_lines)
val = 1;
else
val = 2;
ucontrol->value.enumerated.item[0] = val;
return 0;
}
static int cx_automute_mode_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
struct conexant_spec *spec = codec->spec;
switch (ucontrol->value.enumerated.item[0]) {
case 0:
if (!spec->auto_mute)
return 0;
spec->auto_mute = 0;
break;
case 1:
if (spec->auto_mute && !spec->automute_lines)
return 0;
spec->auto_mute = 1;
spec->automute_lines = 0;
break;
case 2:
if (!spec->automute_hp_lo)
return -EINVAL;
if (spec->auto_mute && spec->automute_lines)
return 0;
spec->auto_mute = 1;
spec->automute_lines = 1;
break;
default:
return -EINVAL;
}
cx_auto_update_speakers(codec);
return 1;
}
static const struct snd_kcontrol_new cx_automute_mode_enum[] = {
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Auto-Mute Mode",
.info = cx_automute_mode_info,
.get = cx_automute_mode_get,
.put = cx_automute_mode_put,
},
{ }
};
static int cx_auto_mux_enum_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
struct conexant_spec *spec = codec->spec;
return snd_hda_input_mux_info(&spec->private_imux, uinfo);
}
static int cx_auto_mux_enum_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
struct conexant_spec *spec = codec->spec;
ucontrol->value.enumerated.item[0] = spec->cur_mux[0];
return 0;
}
/* look for the route the given pin from mux and return the index;
* if do_select is set, actually select the route.
*/
static int __select_input_connection(struct hda_codec *codec, hda_nid_t mux,
hda_nid_t pin, hda_nid_t *srcp,
bool do_select, int depth)
{
hda_nid_t conn[HDA_MAX_NUM_INPUTS];
int i, nums;
switch (get_wcaps_type(get_wcaps(codec, mux))) {
case AC_WID_AUD_IN:
case AC_WID_AUD_SEL:
case AC_WID_AUD_MIX:
break;
default:
return -1;
}
nums = snd_hda_get_connections(codec, mux, conn, ARRAY_SIZE(conn));
for (i = 0; i < nums; i++)
if (conn[i] == pin) {
if (do_select)
snd_hda_codec_write(codec, mux, 0,
AC_VERB_SET_CONNECT_SEL, i);
if (srcp)
*srcp = mux;
return i;
}
depth++;
if (depth == 2)
return -1;
for (i = 0; i < nums; i++) {
int ret = __select_input_connection(codec, conn[i], pin, srcp,
do_select, depth);
if (ret >= 0) {
if (do_select)
snd_hda_codec_write(codec, mux, 0,
AC_VERB_SET_CONNECT_SEL, i);
return i;
}
}
return -1;
}
static void select_input_connection(struct hda_codec *codec, hda_nid_t mux,
hda_nid_t pin)
{
__select_input_connection(codec, mux, pin, NULL, true, 0);
}
static int get_input_connection(struct hda_codec *codec, hda_nid_t mux,
hda_nid_t pin)
{
return __select_input_connection(codec, mux, pin, NULL, false, 0);
}
static int cx_auto_mux_enum_update(struct hda_codec *codec,
const struct hda_input_mux *imux,
unsigned int idx)
{
struct conexant_spec *spec = codec->spec;
hda_nid_t adc;
int changed = 1;
if (!imux->num_items)
return 0;
if (idx >= imux->num_items)
idx = imux->num_items - 1;
if (spec->cur_mux[0] == idx)
changed = 0;
adc = spec->imux_info[idx].adc;
select_input_connection(codec, spec->imux_info[idx].adc,
spec->imux_info[idx].pin);
if (spec->cur_adc && spec->cur_adc != adc) {
/* stream is running, let's swap the current ADC */
__snd_hda_codec_cleanup_stream(codec, spec->cur_adc, 1);
spec->cur_adc = adc;
snd_hda_codec_setup_stream(codec, adc,
spec->cur_adc_stream_tag, 0,
spec->cur_adc_format);
}
spec->cur_mux[0] = idx;
return changed;
}
static int cx_auto_mux_enum_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
struct conexant_spec *spec = codec->spec;
return cx_auto_mux_enum_update(codec, &spec->private_imux,
ucontrol->value.enumerated.item[0]);
}
static const struct snd_kcontrol_new cx_auto_capture_mixers[] = {
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Capture Source",
.info = cx_auto_mux_enum_info,
.get = cx_auto_mux_enum_get,
.put = cx_auto_mux_enum_put
},
{}
};
static bool select_automic(struct hda_codec *codec, int idx, bool detect)
{
struct conexant_spec *spec = codec->spec;
if (idx < 0)
return false;
if (detect && !snd_hda_jack_detect(codec, spec->imux_info[idx].pin))
return false;
cx_auto_mux_enum_update(codec, &spec->private_imux, idx);
return true;
}
/* automatic switch internal and external mic */
static void cx_auto_automic(struct hda_codec *codec)
{
struct conexant_spec *spec = codec->spec;
if (!spec->auto_mic)
return;
if (!select_automic(codec, spec->auto_mic_ext, true))
if (!select_automic(codec, spec->auto_mic_dock, true))
select_automic(codec, spec->auto_mic_int, false);
}
static void cx_auto_unsol_event(struct hda_codec *codec, unsigned int res)
{
switch (snd_hda_jack_get_action(codec, res >> 26)) {
case CONEXANT_HP_EVENT:
cx_auto_hp_automute(codec);
break;
case CONEXANT_LINE_EVENT:
cx_auto_line_automute(codec);
break;
case CONEXANT_MIC_EVENT:
cx_auto_automic(codec);
break;
}
snd_hda_jack_report_sync(codec);
}
/* check whether the pin config is suitable for auto-mic switching;
* auto-mic is enabled only when one int-mic and one ext- and/or
* one dock-mic exist
*/
static void cx_auto_check_auto_mic(struct hda_codec *codec)
{
struct conexant_spec *spec = codec->spec;
int pset[INPUT_PIN_ATTR_NORMAL + 1];
int i;
for (i = 0; i < ARRAY_SIZE(pset); i++)
pset[i] = -1;
for (i = 0; i < spec->private_imux.num_items; i++) {
hda_nid_t pin = spec->imux_info[i].pin;
unsigned int def_conf = snd_hda_codec_get_pincfg(codec, pin);
int type, attr;
attr = snd_hda_get_input_pin_attr(def_conf);
if (attr == INPUT_PIN_ATTR_UNUSED)
return; /* invalid entry */
if (attr > INPUT_PIN_ATTR_NORMAL)
attr = INPUT_PIN_ATTR_NORMAL;
if (attr != INPUT_PIN_ATTR_INT &&
!is_jack_detectable(codec, pin))
return; /* non-detectable pin */
type = get_defcfg_device(def_conf);
if (type != AC_JACK_MIC_IN &&
(attr != INPUT_PIN_ATTR_DOCK || type != AC_JACK_LINE_IN))
return; /* no valid input type */
if (pset[attr] >= 0)
return; /* already occupied */
pset[attr] = i;
}
if (pset[INPUT_PIN_ATTR_INT] < 0 ||
(pset[INPUT_PIN_ATTR_NORMAL] < 0 && pset[INPUT_PIN_ATTR_DOCK]))
return; /* no input to switch*/
spec->auto_mic = 1;
spec->auto_mic_ext = pset[INPUT_PIN_ATTR_NORMAL];
spec->auto_mic_dock = pset[INPUT_PIN_ATTR_DOCK];
spec->auto_mic_int = pset[INPUT_PIN_ATTR_INT];
}
static void cx_auto_parse_input(struct hda_codec *codec)
{
struct conexant_spec *spec = codec->spec;
struct auto_pin_cfg *cfg = &spec->autocfg;
struct hda_input_mux *imux;
int i, j;
imux = &spec->private_imux;
for (i = 0; i < cfg->num_inputs; i++) {
for (j = 0; j < spec->num_adc_nids; j++) {
hda_nid_t adc = spec->adc_nids[j];
int idx = get_input_connection(codec, adc,
cfg->inputs[i].pin);
if (idx >= 0) {
const char *label;
label = hda_get_autocfg_input_label(codec, cfg, i);
spec->imux_info[imux->num_items].index = i;
spec->imux_info[imux->num_items].boost = 0;
spec->imux_info[imux->num_items].adc = adc;
spec->imux_info[imux->num_items].pin =
cfg->inputs[i].pin;
snd_hda_add_imux_item(imux, label, idx, NULL);
break;
}
}
}
if (imux->num_items >= 2 && cfg->num_inputs == imux->num_items)
cx_auto_check_auto_mic(codec);
if (imux->num_items > 1) {
for (i = 1; i < imux->num_items; i++) {
if (spec->imux_info[i].adc != spec->imux_info[0].adc) {
spec->adc_switching = 1;
break;
}
}
}
}
/* get digital-input audio widget corresponding to the given pin */
static hda_nid_t cx_auto_get_dig_in(struct hda_codec *codec, hda_nid_t pin)
{
hda_nid_t nid, end_nid;
end_nid = codec->start_nid + codec->num_nodes;
for (nid = codec->start_nid; nid < end_nid; nid++) {
unsigned int wcaps = get_wcaps(codec, nid);
unsigned int type = get_wcaps_type(wcaps);
if (type == AC_WID_AUD_IN && (wcaps & AC_WCAP_DIGITAL)) {
if (get_connection_index(codec, nid, pin) >= 0)
return nid;
}
}
return 0;
}
static void cx_auto_parse_digital(struct hda_codec *codec)
{
struct conexant_spec *spec = codec->spec;
struct auto_pin_cfg *cfg = &spec->autocfg;
hda_nid_t nid;
if (cfg->dig_outs &&
snd_hda_get_connections(codec, cfg->dig_out_pins[0], &nid, 1) == 1)
spec->multiout.dig_out_nid = nid;
if (cfg->dig_in_pin)
spec->dig_in_nid = cx_auto_get_dig_in(codec, cfg->dig_in_pin);
}
#ifdef CONFIG_SND_HDA_INPUT_BEEP
static void cx_auto_parse_beep(struct hda_codec *codec)
{
struct conexant_spec *spec = codec->spec;
hda_nid_t nid, end_nid;
end_nid = codec->start_nid + codec->num_nodes;
for (nid = codec->start_nid; nid < end_nid; nid++)
if (get_wcaps_type(get_wcaps(codec, nid)) == AC_WID_BEEP) {
set_beep_amp(spec, nid, 0, HDA_OUTPUT);
break;
}
}
#else
#define cx_auto_parse_beep(codec)
#endif
/* parse EAPDs */
static void cx_auto_parse_eapd(struct hda_codec *codec)
{
struct conexant_spec *spec = codec->spec;
hda_nid_t nid, end_nid;
end_nid = codec->start_nid + codec->num_nodes;
for (nid = codec->start_nid; nid < end_nid; nid++) {
if (get_wcaps_type(get_wcaps(codec, nid)) != AC_WID_PIN)
continue;
if (!(snd_hda_query_pin_caps(codec, nid) & AC_PINCAP_EAPD))
continue;
spec->eapds[spec->num_eapds++] = nid;
if (spec->num_eapds >= ARRAY_SIZE(spec->eapds))
break;
}
/* NOTE: below is a wild guess; if we have more than two EAPDs,
* it's a new chip, where EAPDs are supposed to be associated to
* pins, and we can control EAPD per pin.
* OTOH, if only one or two EAPDs are found, it's an old chip,
* thus it might control over all pins.
*/
spec->pin_eapd_ctrls = spec->num_eapds > 2;
}
static int cx_auto_parse_auto_config(struct hda_codec *codec)
{
struct conexant_spec *spec = codec->spec;
int err;
err = snd_hda_parse_pin_def_config(codec, &spec->autocfg, NULL);
if (err < 0)
return err;
cx_auto_parse_output(codec);
cx_auto_parse_input(codec);
cx_auto_parse_digital(codec);
cx_auto_parse_beep(codec);
cx_auto_parse_eapd(codec);
return 0;
}
static void cx_auto_turn_eapd(struct hda_codec *codec, int num_pins,
hda_nid_t *pins, bool on)
{
int i;
for (i = 0; i < num_pins; i++) {
if (snd_hda_query_pin_caps(codec, pins[i]) & AC_PINCAP_EAPD)
snd_hda_codec_write(codec, pins[i], 0,
AC_VERB_SET_EAPD_BTLENABLE,
on ? 0x02 : 0);
}
}
static void select_connection(struct hda_codec *codec, hda_nid_t pin,
hda_nid_t src)
{
int idx = get_connection_index(codec, pin, src);
if (idx >= 0)
snd_hda_codec_write(codec, pin, 0,
AC_VERB_SET_CONNECT_SEL, idx);
}
static void mute_outputs(struct hda_codec *codec, int num_nids,
const hda_nid_t *nids)
{
int i, val;
for (i = 0; i < num_nids; i++) {
hda_nid_t nid = nids[i];
if (!(get_wcaps(codec, nid) & AC_WCAP_OUT_AMP))
continue;
if (query_amp_caps(codec, nid, HDA_OUTPUT) & AC_AMPCAP_MUTE)
val = AMP_OUT_MUTE;
else
val = AMP_OUT_ZERO;
snd_hda_codec_write(codec, nid, 0,
AC_VERB_SET_AMP_GAIN_MUTE, val);
}
}
static void enable_unsol_pins(struct hda_codec *codec, int num_pins,
hda_nid_t *pins, unsigned int action)
{
int i;
for (i = 0; i < num_pins; i++)
snd_hda_jack_detect_enable(codec, pins[i], action);
}
static bool found_in_nid_list(hda_nid_t nid, const hda_nid_t *list, int nums)
{
int i;
for (i = 0; i < nums; i++)
if (list[i] == nid)
return true;
return false;
}
/* is the given NID found in any of autocfg items? */
static bool found_in_autocfg(struct auto_pin_cfg *cfg, hda_nid_t nid)
{
int i;
if (found_in_nid_list(nid, cfg->line_out_pins, cfg->line_outs) ||
found_in_nid_list(nid, cfg->hp_pins, cfg->hp_outs) ||
found_in_nid_list(nid, cfg->speaker_pins, cfg->speaker_outs) ||
found_in_nid_list(nid, cfg->dig_out_pins, cfg->dig_outs))
return true;
for (i = 0; i < cfg->num_inputs; i++)
if (cfg->inputs[i].pin == nid)
return true;
if (cfg->dig_in_pin == nid)
return true;
return false;
}
/* clear unsol-event tags on unused pins; Conexant codecs seem to leave
* invalid unsol tags by some reason
*/
static void clear_unsol_on_unused_pins(struct hda_codec *codec)
{
struct conexant_spec *spec = codec->spec;
struct auto_pin_cfg *cfg = &spec->autocfg;
int i;
for (i = 0; i < codec->init_pins.used; i++) {
struct hda_pincfg *pin = snd_array_elem(&codec->init_pins, i);
if (!found_in_autocfg(cfg, pin->nid))
snd_hda_codec_write(codec, pin->nid, 0,
AC_VERB_SET_UNSOLICITED_ENABLE, 0);
}
}
/* turn on/off EAPD according to Master switch */
static void cx_auto_vmaster_hook(void *private_data, int enabled)
{
struct hda_codec *codec = private_data;
struct conexant_spec *spec = codec->spec;
if (enabled && spec->pin_eapd_ctrls) {
cx_auto_update_speakers(codec);
return;
}
cx_auto_turn_eapd(codec, spec->num_eapds, spec->eapds, enabled);
}
static void cx_auto_init_output(struct hda_codec *codec)
{
struct conexant_spec *spec = codec->spec;
struct auto_pin_cfg *cfg = &spec->autocfg;
hda_nid_t nid;
int i;
mute_outputs(codec, spec->multiout.num_dacs, spec->multiout.dac_nids);
for (i = 0; i < cfg->hp_outs; i++) {
unsigned int val = PIN_OUT;
if (snd_hda_query_pin_caps(codec, cfg->hp_pins[i]) &
AC_PINCAP_HP_DRV)
val |= AC_PINCTL_HP_EN;
snd_hda_codec_write(codec, cfg->hp_pins[i], 0,
AC_VERB_SET_PIN_WIDGET_CONTROL, val);
}
mute_outputs(codec, cfg->hp_outs, cfg->hp_pins);
mute_outputs(codec, cfg->line_outs, cfg->line_out_pins);
mute_outputs(codec, cfg->speaker_outs, cfg->speaker_pins);
for (i = 0; i < spec->dac_info_filled; i++) {
nid = spec->dac_info[i].dac;
if (!nid)
nid = spec->multiout.dac_nids[0];
else if (nid & DAC_SLAVE_FLAG)
nid &= ~DAC_SLAVE_FLAG;
select_connection(codec, spec->dac_info[i].pin, nid);
}
if (spec->auto_mute) {
enable_unsol_pins(codec, cfg->hp_outs, cfg->hp_pins,
CONEXANT_HP_EVENT);
spec->hp_present = detect_jacks(codec, cfg->hp_outs,
cfg->hp_pins);
if (spec->detect_line) {
enable_unsol_pins(codec, cfg->line_outs,
cfg->line_out_pins,
CONEXANT_LINE_EVENT);
spec->line_present =
detect_jacks(codec, cfg->line_outs,
cfg->line_out_pins);
}
}
cx_auto_update_speakers(codec);
/* turn on all EAPDs if no individual EAPD control is available */
if (!spec->pin_eapd_ctrls)
cx_auto_turn_eapd(codec, spec->num_eapds, spec->eapds, true);
clear_unsol_on_unused_pins(codec);
}
static void cx_auto_init_input(struct hda_codec *codec)
{
struct conexant_spec *spec = codec->spec;
struct auto_pin_cfg *cfg = &spec->autocfg;
int i, val;
for (i = 0; i < spec->num_adc_nids; i++) {
hda_nid_t nid = spec->adc_nids[i];
if (!(get_wcaps(codec, nid) & AC_WCAP_IN_AMP))
continue;
if (query_amp_caps(codec, nid, HDA_INPUT) & AC_AMPCAP_MUTE)
val = AMP_IN_MUTE(0);
else
val = AMP_IN_UNMUTE(0);
snd_hda_codec_write(codec, nid, 0, AC_VERB_SET_AMP_GAIN_MUTE,
val);
}
for (i = 0; i < cfg->num_inputs; i++) {
unsigned int type;
if (cfg->inputs[i].type == AUTO_PIN_MIC)
type = PIN_VREF80;
else
type = PIN_IN;
snd_hda_codec_write(codec, cfg->inputs[i].pin, 0,
AC_VERB_SET_PIN_WIDGET_CONTROL, type);
}
if (spec->auto_mic) {
if (spec->auto_mic_ext >= 0) {
snd_hda_jack_detect_enable(codec,
cfg->inputs[spec->auto_mic_ext].pin,
CONEXANT_MIC_EVENT);
}
if (spec->auto_mic_dock >= 0) {
snd_hda_jack_detect_enable(codec,
cfg->inputs[spec->auto_mic_dock].pin,
CONEXANT_MIC_EVENT);
}
cx_auto_automic(codec);
} else {
select_input_connection(codec, spec->imux_info[0].adc,
spec->imux_info[0].pin);
}
}
static void cx_auto_init_digital(struct hda_codec *codec)
{
struct conexant_spec *spec = codec->spec;
struct auto_pin_cfg *cfg = &spec->autocfg;
if (spec->multiout.dig_out_nid)
snd_hda_codec_write(codec, cfg->dig_out_pins[0], 0,
AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT);
if (spec->dig_in_nid)
snd_hda_codec_write(codec, cfg->dig_in_pin, 0,
AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN);
}
static int cx_auto_init(struct hda_codec *codec)
{
struct conexant_spec *spec = codec->spec;
/*snd_hda_sequence_write(codec, cx_auto_init_verbs);*/
cx_auto_init_output(codec);
cx_auto_init_input(codec);
cx_auto_init_digital(codec);
snd_hda_jack_report_sync(codec);
snd_hda_sync_vmaster_hook(&spec->vmaster_mute);
return 0;
}
static int cx_auto_add_volume_idx(struct hda_codec *codec, const char *basename,
const char *dir, int cidx,
hda_nid_t nid, int hda_dir, int amp_idx)
{
static char name[32];
static struct snd_kcontrol_new knew[] = {
HDA_CODEC_VOLUME(name, 0, 0, 0),
HDA_CODEC_MUTE(name, 0, 0, 0),
};
static const char * const sfx[2] = { "Volume", "Switch" };
int i, err;
for (i = 0; i < 2; i++) {
struct snd_kcontrol *kctl;
knew[i].private_value = HDA_COMPOSE_AMP_VAL(nid, 3, amp_idx,
hda_dir);
knew[i].subdevice = HDA_SUBDEV_AMP_FLAG;
knew[i].index = cidx;
snprintf(name, sizeof(name), "%s%s %s", basename, dir, sfx[i]);
kctl = snd_ctl_new1(&knew[i], codec);
if (!kctl)
return -ENOMEM;
err = snd_hda_ctl_add(codec, nid, kctl);
if (err < 0)
return err;
if (!(query_amp_caps(codec, nid, hda_dir) &
(AC_AMPCAP_MUTE | AC_AMPCAP_MIN_MUTE)))
break;
}
return 0;
}
#define cx_auto_add_volume(codec, str, dir, cidx, nid, hda_dir) \
cx_auto_add_volume_idx(codec, str, dir, cidx, nid, hda_dir, 0)
#define cx_auto_add_pb_volume(codec, nid, str, idx) \
cx_auto_add_volume(codec, str, " Playback", idx, nid, HDA_OUTPUT)
static int try_add_pb_volume(struct hda_codec *codec, hda_nid_t dac,
hda_nid_t pin, const char *name, int idx)
{
unsigned int caps;
if (dac && !(dac & DAC_SLAVE_FLAG)) {
caps = query_amp_caps(codec, dac, HDA_OUTPUT);
if (caps & AC_AMPCAP_NUM_STEPS)
return cx_auto_add_pb_volume(codec, dac, name, idx);
}
caps = query_amp_caps(codec, pin, HDA_OUTPUT);
if (caps & AC_AMPCAP_NUM_STEPS)
return cx_auto_add_pb_volume(codec, pin, name, idx);
return 0;
}
static int cx_auto_build_output_controls(struct hda_codec *codec)
{
struct conexant_spec *spec = codec->spec;
int i, err;
int num_line = 0, num_hp = 0, num_spk = 0;
static const char * const texts[3] = { "Front", "Surround", "CLFE" };
if (spec->dac_info_filled == 1)
return try_add_pb_volume(codec, spec->dac_info[0].dac,
spec->dac_info[0].pin,
"Master", 0);
for (i = 0; i < spec->dac_info_filled; i++) {
const char *label;
int idx, type;
hda_nid_t dac = spec->dac_info[i].dac;
type = spec->dac_info[i].type;
if (type == AUTO_PIN_LINE_OUT)
type = spec->autocfg.line_out_type;
switch (type) {
case AUTO_PIN_LINE_OUT:
default:
label = texts[num_line++];
idx = 0;
break;
case AUTO_PIN_HP_OUT:
label = "Headphone";
idx = num_hp++;
break;
case AUTO_PIN_SPEAKER_OUT:
label = "Speaker";
idx = num_spk++;
break;
}
err = try_add_pb_volume(codec, dac,
spec->dac_info[i].pin,
label, idx);
if (err < 0)
return err;
}
if (spec->auto_mute) {
err = snd_hda_add_new_ctls(codec, cx_automute_mode_enum);
if (err < 0)
return err;
}
return 0;
}
static int cx_auto_add_capture_volume(struct hda_codec *codec, hda_nid_t nid,
const char *label, const char *pfx,
int cidx)
{
struct conexant_spec *spec = codec->spec;
int i;
for (i = 0; i < spec->num_adc_nids; i++) {
hda_nid_t adc_nid = spec->adc_nids[i];
int idx = get_input_connection(codec, adc_nid, nid);
if (idx < 0)
continue;
if (codec->single_adc_amp)
idx = 0;
return cx_auto_add_volume_idx(codec, label, pfx,
cidx, adc_nid, HDA_INPUT, idx);
}
return 0;
}
static int cx_auto_add_boost_volume(struct hda_codec *codec, int idx,
const char *label, int cidx)
{
struct conexant_spec *spec = codec->spec;
hda_nid_t mux, nid;
int i, con;
nid = spec->imux_info[idx].pin;
if (get_wcaps(codec, nid) & AC_WCAP_IN_AMP)
return cx_auto_add_volume(codec, label, " Boost", cidx,
nid, HDA_INPUT);
con = __select_input_connection(codec, spec->imux_info[idx].adc, nid,
&mux, false, 0);
if (con < 0)
return 0;
for (i = 0; i < idx; i++) {
if (spec->imux_info[i].boost == mux)
return 0; /* already present */
}
if (get_wcaps(codec, mux) & AC_WCAP_OUT_AMP) {
spec->imux_info[idx].boost = mux;
return cx_auto_add_volume(codec, label, " Boost", 0,
mux, HDA_OUTPUT);
}
return 0;
}
static int cx_auto_build_input_controls(struct hda_codec *codec)
{
struct conexant_spec *spec = codec->spec;
struct hda_input_mux *imux = &spec->private_imux;
const char *prev_label;
int input_conn[HDA_MAX_NUM_INPUTS];
int i, j, err, cidx;
int multi_connection;
if (!imux->num_items)
return 0;
multi_connection = 0;
for (i = 0; i < imux->num_items; i++) {
cidx = get_input_connection(codec, spec->imux_info[i].adc,
spec->imux_info[i].pin);
if (cidx < 0)
continue;
input_conn[i] = spec->imux_info[i].adc;
if (!codec->single_adc_amp)
input_conn[i] |= cidx << 8;
if (i > 0 && input_conn[i] != input_conn[0])
multi_connection = 1;
}
prev_label = NULL;
cidx = 0;
for (i = 0; i < imux->num_items; i++) {
hda_nid_t nid = spec->imux_info[i].pin;
const char *label;
label = hda_get_autocfg_input_label(codec, &spec->autocfg,
spec->imux_info[i].index);
if (label == prev_label)
cidx++;
else
cidx = 0;
prev_label = label;
err = cx_auto_add_boost_volume(codec, i, label, cidx);
if (err < 0)
return err;
if (!multi_connection) {
if (i > 0)
continue;
err = cx_auto_add_capture_volume(codec, nid,
"Capture", "", cidx);
} else {
bool dup_found = false;
for (j = 0; j < i; j++) {
if (input_conn[j] == input_conn[i]) {
dup_found = true;
break;
}
}
if (dup_found)
continue;
err = cx_auto_add_capture_volume(codec, nid,
label, " Capture", cidx);
}
if (err < 0)
return err;
}
if (spec->private_imux.num_items > 1 && !spec->auto_mic) {
err = snd_hda_add_new_ctls(codec, cx_auto_capture_mixers);
if (err < 0)
return err;
}
return 0;
}
static int cx_auto_build_controls(struct hda_codec *codec)
{
struct conexant_spec *spec = codec->spec;
int err;
err = cx_auto_build_output_controls(codec);
if (err < 0)
return err;
err = cx_auto_build_input_controls(codec);
if (err < 0)
return err;
err = conexant_build_controls(codec);
if (err < 0)
return err;
err = snd_hda_jack_add_kctls(codec, &spec->autocfg);
if (err < 0)
return err;
if (spec->vmaster_mute.sw_kctl) {
spec->vmaster_mute.hook = cx_auto_vmaster_hook;
err = snd_hda_add_vmaster_hook(codec, &spec->vmaster_mute,
spec->vmaster_mute_led);
if (err < 0)
return err;
}
return 0;
}
static int cx_auto_search_adcs(struct hda_codec *codec)
{
struct conexant_spec *spec = codec->spec;
hda_nid_t nid, end_nid;
end_nid = codec->start_nid + codec->num_nodes;
for (nid = codec->start_nid; nid < end_nid; nid++) {
unsigned int caps = get_wcaps(codec, nid);
if (get_wcaps_type(caps) != AC_WID_AUD_IN)
continue;
if (caps & AC_WCAP_DIGITAL)
continue;
if (snd_BUG_ON(spec->num_adc_nids >=
ARRAY_SIZE(spec->private_adc_nids)))
break;
spec->private_adc_nids[spec->num_adc_nids++] = nid;
}
spec->adc_nids = spec->private_adc_nids;
return 0;
}
static const struct hda_codec_ops cx_auto_patch_ops = {
.build_controls = cx_auto_build_controls,
.build_pcms = conexant_build_pcms,
.init = cx_auto_init,
.free = conexant_free,
.unsol_event = cx_auto_unsol_event,
#ifdef CONFIG_SND_HDA_POWER_SAVE
.suspend = conexant_suspend,
#endif
.reboot_notify = snd_hda_shutup_pins,
};
/*
* pin fix-up
*/
struct cxt_pincfg {
hda_nid_t nid;
u32 val;
};
static void apply_pincfg(struct hda_codec *codec, const struct cxt_pincfg *cfg)
{
for (; cfg->nid; cfg++)
snd_hda_codec_set_pincfg(codec, cfg->nid, cfg->val);
}
static void apply_pin_fixup(struct hda_codec *codec,
const struct snd_pci_quirk *quirk,
const struct cxt_pincfg **table)
{
quirk = snd_pci_quirk_lookup(codec->bus->pci, quirk);
if (quirk) {
snd_printdd(KERN_INFO "hda_codec: applying pincfg for %s\n",
quirk->name);
apply_pincfg(codec, table[quirk->value]);
}
}
enum {
CXT_PINCFG_LENOVO_X200,
CXT_PINCFG_LENOVO_TP410,
};
/* ThinkPad X200 & co with cxt5051 */
static const struct cxt_pincfg cxt_pincfg_lenovo_x200[] = {
{ 0x16, 0x042140ff }, /* HP (seq# overridden) */
{ 0x17, 0x21a11000 }, /* dock-mic */
{ 0x19, 0x2121103f }, /* dock-HP */
{ 0x1c, 0x21440100 }, /* dock SPDIF out */
{}
};
/* ThinkPad 410/420/510/520, X201 & co with cxt5066 */
static const struct cxt_pincfg cxt_pincfg_lenovo_tp410[] = {
{ 0x19, 0x042110ff }, /* HP (seq# overridden) */
{ 0x1a, 0x21a190f0 }, /* dock-mic */
{ 0x1c, 0x212140ff }, /* dock-HP */
{}
};
static const struct cxt_pincfg *cxt_pincfg_tbl[] = {
[CXT_PINCFG_LENOVO_X200] = cxt_pincfg_lenovo_x200,
[CXT_PINCFG_LENOVO_TP410] = cxt_pincfg_lenovo_tp410,
};
static const struct snd_pci_quirk cxt5051_fixups[] = {
SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo X200", CXT_PINCFG_LENOVO_X200),
{}
};
static const struct snd_pci_quirk cxt5066_fixups[] = {
SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo T400", CXT_PINCFG_LENOVO_TP410),
SND_PCI_QUIRK(0x17aa, 0x215e, "Lenovo T410", CXT_PINCFG_LENOVO_TP410),
SND_PCI_QUIRK(0x17aa, 0x215f, "Lenovo T510", CXT_PINCFG_LENOVO_TP410),
SND_PCI_QUIRK(0x17aa, 0x21ce, "Lenovo T420", CXT_PINCFG_LENOVO_TP410),
SND_PCI_QUIRK(0x17aa, 0x21cf, "Lenovo T520", CXT_PINCFG_LENOVO_TP410),
{}
};
/* add "fake" mute amp-caps to DACs on cx5051 so that mixer mute switches
* can be created (bko#42825)
*/
static void add_cx5051_fake_mutes(struct hda_codec *codec)
{
static hda_nid_t out_nids[] = {
0x10, 0x11, 0
};
hda_nid_t *p;
for (p = out_nids; *p; p++)
snd_hda_override_amp_caps(codec, *p, HDA_OUTPUT,
AC_AMPCAP_MIN_MUTE |
query_amp_caps(codec, *p, HDA_OUTPUT));
}
static int patch_conexant_auto(struct hda_codec *codec)
{
struct conexant_spec *spec;
int err;
printk(KERN_INFO "hda_codec: %s: BIOS auto-probing.\n",
codec->chip_name);
spec = kzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec)
return -ENOMEM;
codec->spec = spec;
switch (codec->vendor_id) {
case 0x14f15045:
codec->single_adc_amp = 1;
break;
case 0x14f15051:
add_cx5051_fake_mutes(codec);
codec->pin_amp_workaround = 1;
apply_pin_fixup(codec, cxt5051_fixups, cxt_pincfg_tbl);
break;
default:
codec->pin_amp_workaround = 1;
apply_pin_fixup(codec, cxt5066_fixups, cxt_pincfg_tbl);
}
/* Show mute-led control only on HP laptops
* This is a sort of white-list: on HP laptops, EAPD corresponds
* only to the mute-LED without actualy amp function. Meanwhile,
* others may use EAPD really as an amp switch, so it might be
* not good to expose it blindly.
*/
switch (codec->subsystem_id >> 16) {
case 0x103c:
spec->vmaster_mute_led = 1;
break;
}
err = cx_auto_search_adcs(codec);
if (err < 0)
return err;
err = cx_auto_parse_auto_config(codec);
if (err < 0) {
kfree(codec->spec);
codec->spec = NULL;
return err;
}
spec->capture_stream = &cx_auto_pcm_analog_capture;
codec->patch_ops = cx_auto_patch_ops;
if (spec->beep_amp)
snd_hda_attach_beep_device(codec, spec->beep_amp);
/* Some laptops with Conexant chips show stalls in S3 resume,
* which falls into the single-cmd mode.
* Better to make reset, then.
*/
if (!codec->bus->sync_write) {
snd_printd("hda_codec: "
"Enable sync_write for stable communication\n");
codec->bus->sync_write = 1;
codec->bus->allow_bus_reset = 1;
}
return 0;
}
/*
*/
static const struct hda_codec_preset snd_hda_preset_conexant[] = {
{ .id = 0x14f15045, .name = "CX20549 (Venice)",
.patch = patch_cxt5045 },
{ .id = 0x14f15047, .name = "CX20551 (Waikiki)",
.patch = patch_cxt5047 },
{ .id = 0x14f15051, .name = "CX20561 (Hermosa)",
.patch = patch_cxt5051 },
{ .id = 0x14f15066, .name = "CX20582 (Pebble)",
.patch = patch_cxt5066 },
{ .id = 0x14f15067, .name = "CX20583 (Pebble HSF)",
.patch = patch_cxt5066 },
{ .id = 0x14f15068, .name = "CX20584",
.patch = patch_cxt5066 },
{ .id = 0x14f15069, .name = "CX20585",
.patch = patch_cxt5066 },
{ .id = 0x14f1506c, .name = "CX20588",
.patch = patch_cxt5066 },
{ .id = 0x14f1506e, .name = "CX20590",
.patch = patch_cxt5066 },
{ .id = 0x14f15097, .name = "CX20631",
.patch = patch_conexant_auto },
{ .id = 0x14f15098, .name = "CX20632",
.patch = patch_conexant_auto },
{ .id = 0x14f150a1, .name = "CX20641",
.patch = patch_conexant_auto },
{ .id = 0x14f150a2, .name = "CX20642",
.patch = patch_conexant_auto },
{ .id = 0x14f150ab, .name = "CX20651",
.patch = patch_conexant_auto },
{ .id = 0x14f150ac, .name = "CX20652",
.patch = patch_conexant_auto },
{ .id = 0x14f150b8, .name = "CX20664",
.patch = patch_conexant_auto },
{ .id = 0x14f150b9, .name = "CX20665",
.patch = patch_conexant_auto },
{} /* terminator */
};
MODULE_ALIAS("snd-hda-codec-id:14f15045");
MODULE_ALIAS("snd-hda-codec-id:14f15047");
MODULE_ALIAS("snd-hda-codec-id:14f15051");
MODULE_ALIAS("snd-hda-codec-id:14f15066");
MODULE_ALIAS("snd-hda-codec-id:14f15067");
MODULE_ALIAS("snd-hda-codec-id:14f15068");
MODULE_ALIAS("snd-hda-codec-id:14f15069");
MODULE_ALIAS("snd-hda-codec-id:14f1506c");
MODULE_ALIAS("snd-hda-codec-id:14f1506e");
MODULE_ALIAS("snd-hda-codec-id:14f15097");
MODULE_ALIAS("snd-hda-codec-id:14f15098");
MODULE_ALIAS("snd-hda-codec-id:14f150a1");
MODULE_ALIAS("snd-hda-codec-id:14f150a2");
MODULE_ALIAS("snd-hda-codec-id:14f150ab");
MODULE_ALIAS("snd-hda-codec-id:14f150ac");
MODULE_ALIAS("snd-hda-codec-id:14f150b8");
MODULE_ALIAS("snd-hda-codec-id:14f150b9");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Conexant HD-audio codec");
static struct hda_codec_preset_list conexant_list = {
.preset = snd_hda_preset_conexant,
.owner = THIS_MODULE,
};
static int __init patch_conexant_init(void)
{
return snd_hda_add_codec_preset(&conexant_list);
}
static void __exit patch_conexant_exit(void)
{
snd_hda_delete_codec_preset(&conexant_list);
}
module_init(patch_conexant_init)
module_exit(patch_conexant_exit)
| gpl-2.0 |
gao-feng/net | sound/soc/blackfin/bf5xx-i2s.c | 4111 | 7345 | /*
* File: sound/soc/blackfin/bf5xx-i2s.c
* Author: Cliff Cai <Cliff.Cai@analog.com>
*
* Created: Tue June 06 2008
* Description: Blackfin I2S CPU DAI driver
*
* Modified:
* Copyright 2008 Analog Devices Inc.
*
* Bugs: Enter bugs at http://blackfin.uclinux.org/
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see the file COPYING, or write
* to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/delay.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/initval.h>
#include <sound/soc.h>
#include <asm/irq.h>
#include <asm/portmux.h>
#include <linux/mutex.h>
#include <linux/gpio.h>
#include "bf5xx-sport.h"
struct bf5xx_i2s_port {
u16 tcr1;
u16 rcr1;
u16 tcr2;
u16 rcr2;
int configured;
};
static int bf5xx_i2s_set_dai_fmt(struct snd_soc_dai *cpu_dai,
unsigned int fmt)
{
struct sport_device *sport_handle = snd_soc_dai_get_drvdata(cpu_dai);
struct bf5xx_i2s_port *bf5xx_i2s = sport_handle->private_data;
int ret = 0;
/* interface format:support I2S,slave mode */
switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
case SND_SOC_DAIFMT_I2S:
bf5xx_i2s->tcr1 |= TFSR | TCKFE;
bf5xx_i2s->rcr1 |= RFSR | RCKFE;
bf5xx_i2s->tcr2 |= TSFSE;
bf5xx_i2s->rcr2 |= RSFSE;
break;
case SND_SOC_DAIFMT_DSP_A:
bf5xx_i2s->tcr1 |= TFSR;
bf5xx_i2s->rcr1 |= RFSR;
break;
case SND_SOC_DAIFMT_LEFT_J:
ret = -EINVAL;
break;
default:
printk(KERN_ERR "%s: Unknown DAI format type\n", __func__);
ret = -EINVAL;
break;
}
switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
case SND_SOC_DAIFMT_CBM_CFM:
break;
case SND_SOC_DAIFMT_CBS_CFS:
case SND_SOC_DAIFMT_CBM_CFS:
case SND_SOC_DAIFMT_CBS_CFM:
ret = -EINVAL;
break;
default:
printk(KERN_ERR "%s: Unknown DAI master type\n", __func__);
ret = -EINVAL;
break;
}
return ret;
}
static int bf5xx_i2s_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *dai)
{
struct sport_device *sport_handle = snd_soc_dai_get_drvdata(dai);
struct bf5xx_i2s_port *bf5xx_i2s = sport_handle->private_data;
int ret = 0;
bf5xx_i2s->tcr2 &= ~0x1f;
bf5xx_i2s->rcr2 &= ~0x1f;
switch (params_format(params)) {
case SNDRV_PCM_FORMAT_S8:
bf5xx_i2s->tcr2 |= 7;
bf5xx_i2s->rcr2 |= 7;
sport_handle->wdsize = 1;
case SNDRV_PCM_FORMAT_S16_LE:
bf5xx_i2s->tcr2 |= 15;
bf5xx_i2s->rcr2 |= 15;
sport_handle->wdsize = 2;
break;
case SNDRV_PCM_FORMAT_S24_LE:
bf5xx_i2s->tcr2 |= 23;
bf5xx_i2s->rcr2 |= 23;
sport_handle->wdsize = 3;
break;
case SNDRV_PCM_FORMAT_S32_LE:
bf5xx_i2s->tcr2 |= 31;
bf5xx_i2s->rcr2 |= 31;
sport_handle->wdsize = 4;
break;
}
if (!bf5xx_i2s->configured) {
/*
* TX and RX are not independent,they are enabled at the
* same time, even if only one side is running. So, we
* need to configure both of them at the time when the first
* stream is opened.
*
* CPU DAI:slave mode.
*/
bf5xx_i2s->configured = 1;
ret = sport_config_rx(sport_handle, bf5xx_i2s->rcr1,
bf5xx_i2s->rcr2, 0, 0);
if (ret) {
pr_err("SPORT is busy!\n");
return -EBUSY;
}
ret = sport_config_tx(sport_handle, bf5xx_i2s->tcr1,
bf5xx_i2s->tcr2, 0, 0);
if (ret) {
pr_err("SPORT is busy!\n");
return -EBUSY;
}
}
return 0;
}
static void bf5xx_i2s_shutdown(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct sport_device *sport_handle = snd_soc_dai_get_drvdata(dai);
struct bf5xx_i2s_port *bf5xx_i2s = sport_handle->private_data;
pr_debug("%s enter\n", __func__);
/* No active stream, SPORT is allowed to be configured again. */
if (!dai->active)
bf5xx_i2s->configured = 0;
}
#ifdef CONFIG_PM
static int bf5xx_i2s_suspend(struct snd_soc_dai *dai)
{
struct sport_device *sport_handle = snd_soc_dai_get_drvdata(dai);
pr_debug("%s : sport %d\n", __func__, dai->id);
if (dai->capture_active)
sport_rx_stop(sport_handle);
if (dai->playback_active)
sport_tx_stop(sport_handle);
return 0;
}
static int bf5xx_i2s_resume(struct snd_soc_dai *dai)
{
struct sport_device *sport_handle = snd_soc_dai_get_drvdata(dai);
struct bf5xx_i2s_port *bf5xx_i2s = sport_handle->private_data;
int ret;
pr_debug("%s : sport %d\n", __func__, dai->id);
ret = sport_config_rx(sport_handle, bf5xx_i2s->rcr1,
bf5xx_i2s->rcr2, 0, 0);
if (ret) {
pr_err("SPORT is busy!\n");
return -EBUSY;
}
ret = sport_config_tx(sport_handle, bf5xx_i2s->tcr1,
bf5xx_i2s->tcr2, 0, 0);
if (ret) {
pr_err("SPORT is busy!\n");
return -EBUSY;
}
return 0;
}
#else
#define bf5xx_i2s_suspend NULL
#define bf5xx_i2s_resume NULL
#endif
#define BF5XX_I2S_RATES (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_11025 |\
SNDRV_PCM_RATE_16000 | SNDRV_PCM_RATE_22050 | \
SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000 | \
SNDRV_PCM_RATE_96000)
#define BF5XX_I2S_FORMATS \
(SNDRV_PCM_FMTBIT_S8 | \
SNDRV_PCM_FMTBIT_S16_LE | \
SNDRV_PCM_FMTBIT_S24_LE | \
SNDRV_PCM_FMTBIT_S32_LE)
static const struct snd_soc_dai_ops bf5xx_i2s_dai_ops = {
.shutdown = bf5xx_i2s_shutdown,
.hw_params = bf5xx_i2s_hw_params,
.set_fmt = bf5xx_i2s_set_dai_fmt,
};
static struct snd_soc_dai_driver bf5xx_i2s_dai = {
.suspend = bf5xx_i2s_suspend,
.resume = bf5xx_i2s_resume,
.playback = {
.channels_min = 1,
.channels_max = 2,
.rates = BF5XX_I2S_RATES,
.formats = BF5XX_I2S_FORMATS,},
.capture = {
.channels_min = 1,
.channels_max = 2,
.rates = BF5XX_I2S_RATES,
.formats = BF5XX_I2S_FORMATS,},
.ops = &bf5xx_i2s_dai_ops,
};
static int __devinit bf5xx_i2s_probe(struct platform_device *pdev)
{
struct sport_device *sport_handle;
int ret;
/* configure SPORT for I2S */
sport_handle = sport_init(pdev, 4, 2 * sizeof(u32),
sizeof(struct bf5xx_i2s_port));
if (!sport_handle)
return -ENODEV;
/* register with the ASoC layers */
ret = snd_soc_register_dai(&pdev->dev, &bf5xx_i2s_dai);
if (ret) {
pr_err("Failed to register DAI: %d\n", ret);
sport_done(sport_handle);
return ret;
}
return 0;
}
static int __devexit bf5xx_i2s_remove(struct platform_device *pdev)
{
struct sport_device *sport_handle = platform_get_drvdata(pdev);
pr_debug("%s enter\n", __func__);
snd_soc_unregister_dai(&pdev->dev);
sport_done(sport_handle);
return 0;
}
static struct platform_driver bfin_i2s_driver = {
.probe = bf5xx_i2s_probe,
.remove = __devexit_p(bf5xx_i2s_remove),
.driver = {
.name = "bfin-i2s",
.owner = THIS_MODULE,
},
};
module_platform_driver(bfin_i2s_driver);
/* Module information */
MODULE_AUTHOR("Cliff Cai");
MODULE_DESCRIPTION("I2S driver for ADI Blackfin");
MODULE_LICENSE("GPL");
| gpl-2.0 |
ngxson/SXDNickiLolly | arch/alpha/kernel/pci-sysfs.c | 4367 | 9427 | /*
* arch/alpha/kernel/pci-sysfs.c
*
* Copyright (C) 2009 Ivan Kokshaysky
*
* Alpha PCI resource files.
*
* Loosely based on generic HAVE_PCI_MMAP implementation in
* drivers/pci/pci-sysfs.c
*/
#include <linux/sched.h>
#include <linux/stat.h>
#include <linux/slab.h>
#include <linux/pci.h>
static int hose_mmap_page_range(struct pci_controller *hose,
struct vm_area_struct *vma,
enum pci_mmap_state mmap_type, int sparse)
{
unsigned long base;
if (mmap_type == pci_mmap_mem)
base = sparse ? hose->sparse_mem_base : hose->dense_mem_base;
else
base = sparse ? hose->sparse_io_base : hose->dense_io_base;
vma->vm_pgoff += base >> PAGE_SHIFT;
vma->vm_flags |= (VM_IO | VM_RESERVED);
return io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
vma->vm_end - vma->vm_start,
vma->vm_page_prot);
}
static int __pci_mmap_fits(struct pci_dev *pdev, int num,
struct vm_area_struct *vma, int sparse)
{
unsigned long nr, start, size;
int shift = sparse ? 5 : 0;
nr = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
start = vma->vm_pgoff;
size = ((pci_resource_len(pdev, num) - 1) >> (PAGE_SHIFT - shift)) + 1;
if (start < size && size - start >= nr)
return 1;
WARN(1, "process \"%s\" tried to map%s 0x%08lx-0x%08lx on %s BAR %d "
"(size 0x%08lx)\n",
current->comm, sparse ? " sparse" : "", start, start + nr,
pci_name(pdev), num, size);
return 0;
}
/**
* pci_mmap_resource - map a PCI resource into user memory space
* @kobj: kobject for mapping
* @attr: struct bin_attribute for the file being mapped
* @vma: struct vm_area_struct passed into the mmap
* @sparse: address space type
*
* Use the bus mapping routines to map a PCI resource into userspace.
*/
static int pci_mmap_resource(struct kobject *kobj,
struct bin_attribute *attr,
struct vm_area_struct *vma, int sparse)
{
struct pci_dev *pdev = to_pci_dev(container_of(kobj,
struct device, kobj));
struct resource *res = attr->private;
enum pci_mmap_state mmap_type;
struct pci_bus_region bar;
int i;
for (i = 0; i < PCI_ROM_RESOURCE; i++)
if (res == &pdev->resource[i])
break;
if (i >= PCI_ROM_RESOURCE)
return -ENODEV;
if (!__pci_mmap_fits(pdev, i, vma, sparse))
return -EINVAL;
if (iomem_is_exclusive(res->start))
return -EINVAL;
pcibios_resource_to_bus(pdev, &bar, res);
vma->vm_pgoff += bar.start >> (PAGE_SHIFT - (sparse ? 5 : 0));
mmap_type = res->flags & IORESOURCE_MEM ? pci_mmap_mem : pci_mmap_io;
return hose_mmap_page_range(pdev->sysdata, vma, mmap_type, sparse);
}
static int pci_mmap_resource_sparse(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr,
struct vm_area_struct *vma)
{
return pci_mmap_resource(kobj, attr, vma, 1);
}
static int pci_mmap_resource_dense(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr,
struct vm_area_struct *vma)
{
return pci_mmap_resource(kobj, attr, vma, 0);
}
/**
* pci_remove_resource_files - cleanup resource files
* @dev: dev to cleanup
*
* If we created resource files for @dev, remove them from sysfs and
* free their resources.
*/
void pci_remove_resource_files(struct pci_dev *pdev)
{
int i;
for (i = 0; i < PCI_ROM_RESOURCE; i++) {
struct bin_attribute *res_attr;
res_attr = pdev->res_attr[i];
if (res_attr) {
sysfs_remove_bin_file(&pdev->dev.kobj, res_attr);
kfree(res_attr);
}
res_attr = pdev->res_attr_wc[i];
if (res_attr) {
sysfs_remove_bin_file(&pdev->dev.kobj, res_attr);
kfree(res_attr);
}
}
}
static int sparse_mem_mmap_fits(struct pci_dev *pdev, int num)
{
struct pci_bus_region bar;
struct pci_controller *hose = pdev->sysdata;
long dense_offset;
unsigned long sparse_size;
pcibios_resource_to_bus(pdev, &bar, &pdev->resource[num]);
/* All core logic chips have 4G sparse address space, except
CIA which has 16G (see xxx_SPARSE_MEM and xxx_DENSE_MEM
definitions in asm/core_xxx.h files). This corresponds
to 128M or 512M of the bus space. */
dense_offset = (long)(hose->dense_mem_base - hose->sparse_mem_base);
sparse_size = dense_offset >= 0x400000000UL ? 0x20000000 : 0x8000000;
return bar.end < sparse_size;
}
static int pci_create_one_attr(struct pci_dev *pdev, int num, char *name,
char *suffix, struct bin_attribute *res_attr,
unsigned long sparse)
{
size_t size = pci_resource_len(pdev, num);
sprintf(name, "resource%d%s", num, suffix);
res_attr->mmap = sparse ? pci_mmap_resource_sparse :
pci_mmap_resource_dense;
res_attr->attr.name = name;
res_attr->attr.mode = S_IRUSR | S_IWUSR;
res_attr->size = sparse ? size << 5 : size;
res_attr->private = &pdev->resource[num];
return sysfs_create_bin_file(&pdev->dev.kobj, res_attr);
}
static int pci_create_attr(struct pci_dev *pdev, int num)
{
/* allocate attribute structure, piggyback attribute name */
int retval, nlen1, nlen2 = 0, res_count = 1;
unsigned long sparse_base, dense_base;
struct bin_attribute *attr;
struct pci_controller *hose = pdev->sysdata;
char *suffix, *attr_name;
suffix = ""; /* Assume bwx machine, normal resourceN files. */
nlen1 = 10;
if (pdev->resource[num].flags & IORESOURCE_MEM) {
sparse_base = hose->sparse_mem_base;
dense_base = hose->dense_mem_base;
if (sparse_base && !sparse_mem_mmap_fits(pdev, num)) {
sparse_base = 0;
suffix = "_dense";
nlen1 = 16; /* resourceN_dense */
}
} else {
sparse_base = hose->sparse_io_base;
dense_base = hose->dense_io_base;
}
if (sparse_base) {
suffix = "_sparse";
nlen1 = 17;
if (dense_base) {
nlen2 = 16; /* resourceN_dense */
res_count = 2;
}
}
attr = kzalloc(sizeof(*attr) * res_count + nlen1 + nlen2, GFP_ATOMIC);
if (!attr)
return -ENOMEM;
/* Create bwx, sparse or single dense file */
attr_name = (char *)(attr + res_count);
pdev->res_attr[num] = attr;
retval = pci_create_one_attr(pdev, num, attr_name, suffix, attr,
sparse_base);
if (retval || res_count == 1)
return retval;
/* Create dense file */
attr_name += nlen1;
attr++;
pdev->res_attr_wc[num] = attr;
return pci_create_one_attr(pdev, num, attr_name, "_dense", attr, 0);
}
/**
* pci_create_resource_files - create resource files in sysfs for @dev
* @dev: dev in question
*
* Walk the resources in @dev creating files for each resource available.
*/
int pci_create_resource_files(struct pci_dev *pdev)
{
int i;
int retval;
/* Expose the PCI resources from this device as files */
for (i = 0; i < PCI_ROM_RESOURCE; i++) {
/* skip empty resources */
if (!pci_resource_len(pdev, i))
continue;
retval = pci_create_attr(pdev, i);
if (retval) {
pci_remove_resource_files(pdev);
return retval;
}
}
return 0;
}
/* Legacy I/O bus mapping stuff. */
static int __legacy_mmap_fits(struct pci_controller *hose,
struct vm_area_struct *vma,
unsigned long res_size, int sparse)
{
unsigned long nr, start, size;
nr = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
start = vma->vm_pgoff;
size = ((res_size - 1) >> PAGE_SHIFT) + 1;
if (start < size && size - start >= nr)
return 1;
WARN(1, "process \"%s\" tried to map%s 0x%08lx-0x%08lx on hose %d "
"(size 0x%08lx)\n",
current->comm, sparse ? " sparse" : "", start, start + nr,
hose->index, size);
return 0;
}
static inline int has_sparse(struct pci_controller *hose,
enum pci_mmap_state mmap_type)
{
unsigned long base;
base = (mmap_type == pci_mmap_mem) ? hose->sparse_mem_base :
hose->sparse_io_base;
return base != 0;
}
int pci_mmap_legacy_page_range(struct pci_bus *bus, struct vm_area_struct *vma,
enum pci_mmap_state mmap_type)
{
struct pci_controller *hose = bus->sysdata;
int sparse = has_sparse(hose, mmap_type);
unsigned long res_size;
res_size = (mmap_type == pci_mmap_mem) ? bus->legacy_mem->size :
bus->legacy_io->size;
if (!__legacy_mmap_fits(hose, vma, res_size, sparse))
return -EINVAL;
return hose_mmap_page_range(hose, vma, mmap_type, sparse);
}
/**
* pci_adjust_legacy_attr - adjustment of legacy file attributes
* @b: bus to create files under
* @mmap_type: I/O port or memory
*
* Adjust file name and size for sparse mappings.
*/
void pci_adjust_legacy_attr(struct pci_bus *bus, enum pci_mmap_state mmap_type)
{
struct pci_controller *hose = bus->sysdata;
if (!has_sparse(hose, mmap_type))
return;
if (mmap_type == pci_mmap_mem) {
bus->legacy_mem->attr.name = "legacy_mem_sparse";
bus->legacy_mem->size <<= 5;
} else {
bus->legacy_io->attr.name = "legacy_io_sparse";
bus->legacy_io->size <<= 5;
}
return;
}
/* Legacy I/O bus read/write functions */
int pci_legacy_read(struct pci_bus *bus, loff_t port, u32 *val, size_t size)
{
struct pci_controller *hose = bus->sysdata;
port += hose->io_space->start;
switch(size) {
case 1:
*((u8 *)val) = inb(port);
return 1;
case 2:
if (port & 1)
return -EINVAL;
*((u16 *)val) = inw(port);
return 2;
case 4:
if (port & 3)
return -EINVAL;
*((u32 *)val) = inl(port);
return 4;
}
return -EINVAL;
}
int pci_legacy_write(struct pci_bus *bus, loff_t port, u32 val, size_t size)
{
struct pci_controller *hose = bus->sysdata;
port += hose->io_space->start;
switch(size) {
case 1:
outb(port, val);
return 1;
case 2:
if (port & 1)
return -EINVAL;
outw(port, val);
return 2;
case 4:
if (port & 3)
return -EINVAL;
outl(port, val);
return 4;
}
return -EINVAL;
}
| gpl-2.0 |
gpillusion/SM-G710K_Illusion_kernel | drivers/net/usb/cdc_subset.c | 4623 | 11029 | /*
* Simple "CDC Subset" USB Networking Links
* Copyright (C) 2000-2005 by David Brownell
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/module.h>
#include <linux/kmod.h>
#include <linux/init.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/workqueue.h>
#include <linux/mii.h>
#include <linux/usb.h>
#include <linux/usb/usbnet.h>
/*
* This supports simple USB network links that don't require any special
* framing or hardware control operations. The protocol used here is a
* strict subset of CDC Ethernet, with three basic differences reflecting
* the goal that almost any hardware should run it:
*
* - Minimal runtime control: one interface, no altsettings, and
* no vendor or class specific control requests. If a device is
* configured, it is allowed to exchange packets with the host.
* Fancier models would mean not working on some hardware.
*
* - Minimal manufacturing control: no IEEE "Organizationally
* Unique ID" required, or an EEPROMs to store one. Each host uses
* one random "locally assigned" Ethernet address instead, which can
* of course be overridden using standard tools like "ifconfig".
* (With 2^46 such addresses, same-net collisions are quite rare.)
*
* - There is no additional framing data for USB. Packets are written
* exactly as in CDC Ethernet, starting with an Ethernet header and
* terminated by a short packet. However, the host will never send a
* zero length packet; some systems can't handle those robustly.
*
* Anything that can transmit and receive USB bulk packets can implement
* this protocol. That includes both smart peripherals and quite a lot
* of "host-to-host" USB cables (which embed two devices back-to-back).
*
* Note that although Linux may use many of those host-to-host links
* with this "cdc_subset" framing, that doesn't mean there may not be a
* better approach. Handling the "other end unplugs/replugs" scenario
* well tends to require chip-specific vendor requests. Also, Windows
* peers at the other end of host-to-host cables may expect their own
* framing to be used rather than this "cdc_subset" model.
*/
#if defined(CONFIG_USB_EPSON2888) || defined(CONFIG_USB_ARMLINUX)
/* PDA style devices are always connected if present */
static int always_connected (struct usbnet *dev)
{
return 0;
}
#endif
#ifdef CONFIG_USB_ALI_M5632
#define HAVE_HARDWARE
/*-------------------------------------------------------------------------
*
* ALi M5632 driver ... does high speed
*
* NOTE that the MS-Windows drivers for this chip use some funky and
* (naturally) undocumented 7-byte prefix to each packet, so this is a
* case where we don't currently interoperate. Also, once you unplug
* one end of the cable, you need to replug the other end too ... since
* chip docs are unavailable, there's no way to reset the relevant state
* short of a power cycle.
*
*-------------------------------------------------------------------------*/
static const struct driver_info ali_m5632_info = {
.description = "ALi M5632",
.flags = FLAG_POINTTOPOINT,
};
#endif
#ifdef CONFIG_USB_AN2720
#define HAVE_HARDWARE
/*-------------------------------------------------------------------------
*
* AnchorChips 2720 driver ... http://www.cypress.com
*
* This doesn't seem to have a way to detect whether the peer is
* connected, or need any reset handshaking. It's got pretty big
* internal buffers (handles most of a frame's worth of data).
* Chip data sheets don't describe any vendor control messages.
*
*-------------------------------------------------------------------------*/
static const struct driver_info an2720_info = {
.description = "AnchorChips/Cypress 2720",
.flags = FLAG_POINTTOPOINT,
// no reset available!
// no check_connect available!
.in = 2, .out = 2, // direction distinguishes these
};
#endif /* CONFIG_USB_AN2720 */
#ifdef CONFIG_USB_BELKIN
#define HAVE_HARDWARE
/*-------------------------------------------------------------------------
*
* Belkin F5U104 ... two NetChip 2280 devices + Atmel AVR microcontroller
*
* ... also two eTEK designs, including one sold as "Advance USBNET"
*
*-------------------------------------------------------------------------*/
static const struct driver_info belkin_info = {
.description = "Belkin, eTEK, or compatible",
.flags = FLAG_POINTTOPOINT,
};
#endif /* CONFIG_USB_BELKIN */
#ifdef CONFIG_USB_EPSON2888
#define HAVE_HARDWARE
/*-------------------------------------------------------------------------
*
* EPSON USB clients
*
* This is the same idea as Linux PDAs (below) except the firmware in the
* device might not be Tux-powered. Epson provides reference firmware that
* implements this interface. Product developers can reuse or modify that
* code, such as by using their own product and vendor codes.
*
* Support was from Juro Bystricky <bystricky.juro@erd.epson.com>
*
*-------------------------------------------------------------------------*/
static const struct driver_info epson2888_info = {
.description = "Epson USB Device",
.check_connect = always_connected,
.flags = FLAG_POINTTOPOINT,
.in = 4, .out = 3,
};
#endif /* CONFIG_USB_EPSON2888 */
/*-------------------------------------------------------------------------
*
* info from Jonathan McDowell <noodles@earth.li>
*
*-------------------------------------------------------------------------*/
#ifdef CONFIG_USB_KC2190
#define HAVE_HARDWARE
static const struct driver_info kc2190_info = {
.description = "KC Technology KC-190",
.flags = FLAG_POINTTOPOINT,
};
#endif /* CONFIG_USB_KC2190 */
#ifdef CONFIG_USB_ARMLINUX
#define HAVE_HARDWARE
/*-------------------------------------------------------------------------
*
* Intel's SA-1100 chip integrates basic USB support, and is used
* in PDAs like some iPaqs, the Yopy, some Zaurus models, and more.
* When they run Linux, arch/arm/mach-sa1100/usb-eth.c may be used to
* network using minimal USB framing data.
*
* This describes the driver currently in standard ARM Linux kernels.
* The Zaurus uses a different driver (see later).
*
* PXA25x and PXA210 use XScale cores (ARM v5TE) with better USB support
* and different USB endpoint numbering than the SA1100 devices. The
* mach-pxa/usb-eth.c driver re-uses the device ids from mach-sa1100
* so we rely on the endpoint descriptors.
*
*-------------------------------------------------------------------------*/
static const struct driver_info linuxdev_info = {
.description = "Linux Device",
.check_connect = always_connected,
.flags = FLAG_POINTTOPOINT,
};
static const struct driver_info yopy_info = {
.description = "Yopy",
.check_connect = always_connected,
.flags = FLAG_POINTTOPOINT,
};
static const struct driver_info blob_info = {
.description = "Boot Loader OBject",
.check_connect = always_connected,
.flags = FLAG_POINTTOPOINT,
};
#endif /* CONFIG_USB_ARMLINUX */
/*-------------------------------------------------------------------------*/
#ifndef HAVE_HARDWARE
#warning You need to configure some hardware for this driver
#endif
/*
* chip vendor names won't normally be on the cables, and
* may not be on the device.
*/
static const struct usb_device_id products [] = {
#ifdef CONFIG_USB_ALI_M5632
{
USB_DEVICE (0x0402, 0x5632), // ALi defaults
.driver_info = (unsigned long) &ali_m5632_info,
},
{
USB_DEVICE (0x182d,0x207c), // SiteCom CN-124
.driver_info = (unsigned long) &ali_m5632_info,
},
#endif
#ifdef CONFIG_USB_AN2720
{
USB_DEVICE (0x0547, 0x2720), // AnchorChips defaults
.driver_info = (unsigned long) &an2720_info,
}, {
USB_DEVICE (0x0547, 0x2727), // Xircom PGUNET
.driver_info = (unsigned long) &an2720_info,
},
#endif
#ifdef CONFIG_USB_BELKIN
{
USB_DEVICE (0x050d, 0x0004), // Belkin
.driver_info = (unsigned long) &belkin_info,
}, {
USB_DEVICE (0x056c, 0x8100), // eTEK
.driver_info = (unsigned long) &belkin_info,
}, {
USB_DEVICE (0x0525, 0x9901), // Advance USBNET (eTEK)
.driver_info = (unsigned long) &belkin_info,
},
#endif
#ifdef CONFIG_USB_EPSON2888
{
USB_DEVICE (0x0525, 0x2888), // EPSON USB client
.driver_info = (unsigned long) &epson2888_info,
},
#endif
#ifdef CONFIG_USB_KC2190
{
USB_DEVICE (0x050f, 0x0190), // KC-190
.driver_info = (unsigned long) &kc2190_info,
},
#endif
#ifdef CONFIG_USB_ARMLINUX
/*
* SA-1100 using standard ARM Linux kernels, or compatible.
* Often used when talking to Linux PDAs (iPaq, Yopy, etc).
* The sa-1100 "usb-eth" driver handles the basic framing.
*
* PXA25x or PXA210 ... these use a "usb-eth" driver much like
* the sa1100 one, but hardware uses different endpoint numbers.
*
* Or the Linux "Ethernet" gadget on hardware that can't talk
* CDC Ethernet (e.g., no altsettings), in either of two modes:
* - acting just like the old "usb-eth" firmware, though
* the implementation is different
* - supporting RNDIS as the first/default configuration for
* MS-Windows interop; Linux needs to use the other config
*/
{
// 1183 = 0x049F, both used as hex values?
// Compaq "Itsy" vendor/product id
USB_DEVICE (0x049F, 0x505A), // usb-eth, or compatible
.driver_info = (unsigned long) &linuxdev_info,
}, {
USB_DEVICE (0x0E7E, 0x1001), // G.Mate "Yopy"
.driver_info = (unsigned long) &yopy_info,
}, {
USB_DEVICE (0x8086, 0x07d3), // "blob" bootloader
.driver_info = (unsigned long) &blob_info,
}, {
USB_DEVICE (0x1286, 0x8001), // "blob" bootloader
.driver_info = (unsigned long) &blob_info,
}, {
// Linux Ethernet/RNDIS gadget, mostly on PXA, second config
// e.g. Gumstix, current OpenZaurus, ... or anything else
// that just enables this gadget option.
USB_DEVICE (0x0525, 0xa4a2),
.driver_info = (unsigned long) &linuxdev_info,
},
#endif
{ }, // END
};
MODULE_DEVICE_TABLE(usb, products);
/*-------------------------------------------------------------------------*/
static struct usb_driver cdc_subset_driver = {
.name = "cdc_subset",
.probe = usbnet_probe,
.suspend = usbnet_suspend,
.resume = usbnet_resume,
.disconnect = usbnet_disconnect,
.id_table = products,
};
module_usb_driver(cdc_subset_driver);
MODULE_AUTHOR("David Brownell");
MODULE_DESCRIPTION("Simple 'CDC Subset' USB networking links");
MODULE_LICENSE("GPL");
| gpl-2.0 |
nobodyAtall/msm7x30-3.4.x-nAa | drivers/usb/host/ehci-xilinx-of.c | 4879 | 8010 | /*
* EHCI HCD (Host Controller Driver) for USB.
*
* Bus Glue for Xilinx EHCI core on the of_platform bus
*
* Copyright (c) 2009 Xilinx, Inc.
*
* Based on "ehci-ppc-of.c" by Valentine Barshak <vbarshak@ru.mvista.com>
* and "ehci-ppc-soc.c" by Stefan Roese <sr@denx.de>
* and "ohci-ppc-of.c" by Sylvain Munaut <tnt@246tNt.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
*/
#include <linux/signal.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/of_address.h>
/**
* ehci_xilinx_of_setup - Initialize the device for ehci_reset()
* @hcd: Pointer to the usb_hcd device to which the host controller bound
*
* called during probe() after chip reset completes.
*/
static int ehci_xilinx_of_setup(struct usb_hcd *hcd)
{
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
int retval;
retval = ehci_halt(ehci);
if (retval)
return retval;
retval = ehci_init(hcd);
if (retval)
return retval;
ehci->sbrn = 0x20;
return ehci_reset(ehci);
}
/**
* ehci_xilinx_port_handed_over - hand the port out if failed to enable it
* @hcd: Pointer to the usb_hcd device to which the host controller bound
* @portnum:Port number to which the device is attached.
*
* This function is used as a place to tell the user that the Xilinx USB host
* controller does support LS devices. And in an HS only configuration, it
* does not support FS devices either. It is hoped that this can help a
* confused user.
*
* There are cases when the host controller fails to enable the port due to,
* for example, insufficient power that can be supplied to the device from
* the USB bus. In those cases, the messages printed here are not helpful.
*/
static int ehci_xilinx_port_handed_over(struct usb_hcd *hcd, int portnum)
{
dev_warn(hcd->self.controller, "port %d cannot be enabled\n", portnum);
if (hcd->has_tt) {
dev_warn(hcd->self.controller,
"Maybe you have connected a low speed device?\n");
dev_warn(hcd->self.controller,
"We do not support low speed devices\n");
} else {
dev_warn(hcd->self.controller,
"Maybe your device is not a high speed device?\n");
dev_warn(hcd->self.controller,
"The USB host controller does not support full speed "
"nor low speed devices\n");
dev_warn(hcd->self.controller,
"You can reconfigure the host controller to have "
"full speed support\n");
}
return 0;
}
static const struct hc_driver ehci_xilinx_of_hc_driver = {
.description = hcd_name,
.product_desc = "OF EHCI",
.hcd_priv_size = sizeof(struct ehci_hcd),
/*
* generic hardware linkage
*/
.irq = ehci_irq,
.flags = HCD_MEMORY | HCD_USB2,
/*
* basic lifecycle operations
*/
.reset = ehci_xilinx_of_setup,
.start = ehci_run,
.stop = ehci_stop,
.shutdown = ehci_shutdown,
/*
* managing i/o requests and associated device resources
*/
.urb_enqueue = ehci_urb_enqueue,
.urb_dequeue = ehci_urb_dequeue,
.endpoint_disable = ehci_endpoint_disable,
.endpoint_reset = ehci_endpoint_reset,
/*
* scheduling support
*/
.get_frame_number = ehci_get_frame,
/*
* root hub support
*/
.hub_status_data = ehci_hub_status_data,
.hub_control = ehci_hub_control,
#ifdef CONFIG_PM
.bus_suspend = ehci_bus_suspend,
.bus_resume = ehci_bus_resume,
#endif
.relinquish_port = NULL,
.port_handed_over = ehci_xilinx_port_handed_over,
.clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
};
/**
* ehci_hcd_xilinx_of_probe - Probe method for the USB host controller
* @op: pointer to the platform_device bound to the host controller
*
* This function requests resources and sets up appropriate properties for the
* host controller. Because the Xilinx USB host controller can be configured
* as HS only or HS/FS only, it checks the configuration in the device tree
* entry, and sets an appropriate value for hcd->has_tt.
*/
static int __devinit ehci_hcd_xilinx_of_probe(struct platform_device *op)
{
struct device_node *dn = op->dev.of_node;
struct usb_hcd *hcd;
struct ehci_hcd *ehci;
struct resource res;
int irq;
int rv;
int *value;
if (usb_disabled())
return -ENODEV;
dev_dbg(&op->dev, "initializing XILINX-OF USB Controller\n");
rv = of_address_to_resource(dn, 0, &res);
if (rv)
return rv;
hcd = usb_create_hcd(&ehci_xilinx_of_hc_driver, &op->dev,
"XILINX-OF USB");
if (!hcd)
return -ENOMEM;
hcd->rsrc_start = res.start;
hcd->rsrc_len = resource_size(&res);
if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
printk(KERN_ERR "%s: request_mem_region failed\n", __FILE__);
rv = -EBUSY;
goto err_rmr;
}
irq = irq_of_parse_and_map(dn, 0);
if (!irq) {
printk(KERN_ERR "%s: irq_of_parse_and_map failed\n", __FILE__);
rv = -EBUSY;
goto err_irq;
}
hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
if (!hcd->regs) {
printk(KERN_ERR "%s: ioremap failed\n", __FILE__);
rv = -ENOMEM;
goto err_ioremap;
}
ehci = hcd_to_ehci(hcd);
/* This core always has big-endian register interface and uses
* big-endian memory descriptors.
*/
ehci->big_endian_mmio = 1;
ehci->big_endian_desc = 1;
/* Check whether the FS support option is selected in the hardware.
*/
value = (int *)of_get_property(dn, "xlnx,support-usb-fs", NULL);
if (value && (*value == 1)) {
ehci_dbg(ehci, "USB host controller supports FS devices\n");
hcd->has_tt = 1;
} else {
ehci_dbg(ehci,
"USB host controller is HS only\n");
hcd->has_tt = 0;
}
/* Debug registers are at the first 0x100 region
*/
ehci->caps = hcd->regs + 0x100;
ehci->regs = hcd->regs + 0x100 +
HC_LENGTH(ehci, ehci_readl(ehci, &ehci->caps->hc_capbase));
/* cache this readonly data; minimize chip reads */
ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params);
rv = usb_add_hcd(hcd, irq, 0);
if (rv == 0)
return 0;
iounmap(hcd->regs);
err_ioremap:
err_irq:
release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
err_rmr:
usb_put_hcd(hcd);
return rv;
}
/**
* ehci_hcd_xilinx_of_remove - shutdown hcd and release resources
* @op: pointer to platform_device structure that is to be removed
*
* Remove the hcd structure, and release resources that has been requested
* during probe.
*/
static int ehci_hcd_xilinx_of_remove(struct platform_device *op)
{
struct usb_hcd *hcd = dev_get_drvdata(&op->dev);
dev_set_drvdata(&op->dev, NULL);
dev_dbg(&op->dev, "stopping XILINX-OF USB Controller\n");
usb_remove_hcd(hcd);
iounmap(hcd->regs);
release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
usb_put_hcd(hcd);
return 0;
}
/**
* ehci_hcd_xilinx_of_shutdown - shutdown the hcd
* @op: pointer to platform_device structure that is to be removed
*
* Properly shutdown the hcd, call driver's shutdown routine.
*/
static int ehci_hcd_xilinx_of_shutdown(struct platform_device *op)
{
struct usb_hcd *hcd = dev_get_drvdata(&op->dev);
if (hcd->driver->shutdown)
hcd->driver->shutdown(hcd);
return 0;
}
static const struct of_device_id ehci_hcd_xilinx_of_match[] = {
{.compatible = "xlnx,xps-usb-host-1.00.a",},
{},
};
MODULE_DEVICE_TABLE(of, ehci_hcd_xilinx_of_match);
static struct platform_driver ehci_hcd_xilinx_of_driver = {
.probe = ehci_hcd_xilinx_of_probe,
.remove = ehci_hcd_xilinx_of_remove,
.shutdown = ehci_hcd_xilinx_of_shutdown,
.driver = {
.name = "xilinx-of-ehci",
.owner = THIS_MODULE,
.of_match_table = ehci_hcd_xilinx_of_match,
},
};
| gpl-2.0 |
jawad6233/Lenovo_A820_kernel_kk | kernel/drivers/staging/iio/accel/adis16204_ring.c | 4879 | 3567 | #include <linux/export.h>
#include <linux/interrupt.h>
#include <linux/mutex.h>
#include <linux/kernel.h>
#include <linux/spi/spi.h>
#include <linux/slab.h>
#include "../iio.h"
#include "../ring_sw.h"
#include "../trigger_consumer.h"
#include "adis16204.h"
/**
* adis16204_read_ring_data() read data registers which will be placed into ring
* @dev: device associated with child of actual device (iio_dev or iio_trig)
* @rx: somewhere to pass back the value read
**/
static int adis16204_read_ring_data(struct device *dev, u8 *rx)
{
struct spi_message msg;
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct adis16204_state *st = iio_priv(indio_dev);
struct spi_transfer xfers[ADIS16204_OUTPUTS + 1];
int ret;
int i;
mutex_lock(&st->buf_lock);
spi_message_init(&msg);
memset(xfers, 0, sizeof(xfers));
for (i = 0; i <= ADIS16204_OUTPUTS; i++) {
xfers[i].bits_per_word = 8;
xfers[i].cs_change = 1;
xfers[i].len = 2;
xfers[i].delay_usecs = 20;
xfers[i].tx_buf = st->tx + 2 * i;
st->tx[2 * i]
= ADIS16204_READ_REG(ADIS16204_SUPPLY_OUT + 2 * i);
st->tx[2 * i + 1] = 0;
if (i >= 1)
xfers[i].rx_buf = rx + 2 * (i - 1);
spi_message_add_tail(&xfers[i], &msg);
}
ret = spi_sync(st->us, &msg);
if (ret)
dev_err(&st->us->dev, "problem when burst reading");
mutex_unlock(&st->buf_lock);
return ret;
}
/* Whilst this makes a lot of calls to iio_sw_ring functions - it is to device
* specific to be rolled into the core.
*/
static irqreturn_t adis16204_trigger_handler(int irq, void *p)
{
struct iio_poll_func *pf = p;
struct iio_dev *indio_dev = pf->indio_dev;
struct adis16204_state *st = iio_priv(indio_dev);
struct iio_buffer *ring = indio_dev->buffer;
int i = 0;
s16 *data;
size_t datasize = ring->access->get_bytes_per_datum(ring);
data = kmalloc(datasize, GFP_KERNEL);
if (data == NULL) {
dev_err(&st->us->dev, "memory alloc failed in ring bh");
return -ENOMEM;
}
if (!bitmap_empty(indio_dev->active_scan_mask, indio_dev->masklength) &&
adis16204_read_ring_data(&indio_dev->dev, st->rx) >= 0)
for (; i < bitmap_weight(indio_dev->active_scan_mask,
indio_dev->masklength); i++)
data[i] = be16_to_cpup((__be16 *)&(st->rx[i*2]));
/* Guaranteed to be aligned with 8 byte boundary */
if (ring->scan_timestamp)
*((s64 *)(data + ((i + 3)/4)*4)) = pf->timestamp;
ring->access->store_to(ring, (u8 *)data, pf->timestamp);
iio_trigger_notify_done(indio_dev->trig);
kfree(data);
return IRQ_HANDLED;
}
void adis16204_unconfigure_ring(struct iio_dev *indio_dev)
{
iio_dealloc_pollfunc(indio_dev->pollfunc);
iio_sw_rb_free(indio_dev->buffer);
}
static const struct iio_buffer_setup_ops adis16204_ring_setup_ops = {
.preenable = &iio_sw_buffer_preenable,
.postenable = &iio_triggered_buffer_postenable,
.predisable = &iio_triggered_buffer_predisable,
};
int adis16204_configure_ring(struct iio_dev *indio_dev)
{
int ret = 0;
struct iio_buffer *ring;
ring = iio_sw_rb_allocate(indio_dev);
if (!ring) {
ret = -ENOMEM;
return ret;
}
indio_dev->buffer = ring;
ring->scan_timestamp = true;
indio_dev->setup_ops = &adis16204_ring_setup_ops;
indio_dev->pollfunc = iio_alloc_pollfunc(&iio_pollfunc_store_time,
&adis16204_trigger_handler,
IRQF_ONESHOT,
indio_dev,
"%s_consumer%d",
indio_dev->name,
indio_dev->id);
if (indio_dev->pollfunc == NULL) {
ret = -ENOMEM;
goto error_iio_sw_rb_free;
}
indio_dev->modes |= INDIO_BUFFER_TRIGGERED;
return 0;
error_iio_sw_rb_free:
iio_sw_rb_free(indio_dev->buffer);
return ret;
}
| gpl-2.0 |
Red680812/android_44_KitKat_kernel_htc_dlxpul-1 | net/irda/irlap_event.c | 11279 | 66551 | /*********************************************************************
*
* Filename: irlap_event.c
* Version: 0.9
* Description: IrLAP state machine implementation
* Status: Experimental.
* Author: Dag Brattli <dag@brattli.net>
* Created at: Sat Aug 16 00:59:29 1997
* Modified at: Sat Dec 25 21:07:57 1999
* Modified by: Dag Brattli <dag@brattli.net>
*
* Copyright (c) 1998-2000 Dag Brattli <dag@brattli.net>,
* Copyright (c) 1998 Thomas Davis <ratbert@radiks.net>
* All Rights Reserved.
* Copyright (c) 2000-2003 Jean Tourrilhes <jt@hpl.hp.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of
* the License, or (at your option) any later version.
*
* Neither Dag Brattli nor University of Tromsø admit liability nor
* provide warranty for any of this software. This material is
* provided "AS-IS" and at no charge.
*
********************************************************************/
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <net/irda/irda.h>
#include <net/irda/irlap_event.h>
#include <net/irda/timer.h>
#include <net/irda/irlap.h>
#include <net/irda/irlap_frame.h>
#include <net/irda/qos.h>
#include <net/irda/parameters.h>
#include <net/irda/irlmp.h> /* irlmp_flow_indication(), ... */
#include <net/irda/irda_device.h>
#ifdef CONFIG_IRDA_FAST_RR
int sysctl_fast_poll_increase = 50;
#endif
static int irlap_state_ndm (struct irlap_cb *self, IRLAP_EVENT event,
struct sk_buff *skb, struct irlap_info *info);
static int irlap_state_query (struct irlap_cb *self, IRLAP_EVENT event,
struct sk_buff *skb, struct irlap_info *info);
static int irlap_state_reply (struct irlap_cb *self, IRLAP_EVENT event,
struct sk_buff *skb, struct irlap_info *info);
static int irlap_state_conn (struct irlap_cb *self, IRLAP_EVENT event,
struct sk_buff *skb, struct irlap_info *info);
static int irlap_state_setup (struct irlap_cb *self, IRLAP_EVENT event,
struct sk_buff *skb, struct irlap_info *info);
static int irlap_state_offline(struct irlap_cb *self, IRLAP_EVENT event,
struct sk_buff *skb, struct irlap_info *info);
static int irlap_state_xmit_p (struct irlap_cb *self, IRLAP_EVENT event,
struct sk_buff *skb, struct irlap_info *info);
static int irlap_state_pclose (struct irlap_cb *self, IRLAP_EVENT event,
struct sk_buff *skb, struct irlap_info *info);
static int irlap_state_nrm_p (struct irlap_cb *self, IRLAP_EVENT event,
struct sk_buff *skb, struct irlap_info *info);
static int irlap_state_reset_wait(struct irlap_cb *self, IRLAP_EVENT event,
struct sk_buff *skb, struct irlap_info *info);
static int irlap_state_reset (struct irlap_cb *self, IRLAP_EVENT event,
struct sk_buff *skb, struct irlap_info *info);
static int irlap_state_nrm_s (struct irlap_cb *self, IRLAP_EVENT event,
struct sk_buff *skb, struct irlap_info *info);
static int irlap_state_xmit_s (struct irlap_cb *self, IRLAP_EVENT event,
struct sk_buff *skb, struct irlap_info *info);
static int irlap_state_sclose (struct irlap_cb *self, IRLAP_EVENT event,
struct sk_buff *skb, struct irlap_info *info);
static int irlap_state_reset_check(struct irlap_cb *, IRLAP_EVENT event,
struct sk_buff *, struct irlap_info *);
#ifdef CONFIG_IRDA_DEBUG
static const char *const irlap_event[] = {
"DISCOVERY_REQUEST",
"CONNECT_REQUEST",
"CONNECT_RESPONSE",
"DISCONNECT_REQUEST",
"DATA_REQUEST",
"RESET_REQUEST",
"RESET_RESPONSE",
"SEND_I_CMD",
"SEND_UI_FRAME",
"RECV_DISCOVERY_XID_CMD",
"RECV_DISCOVERY_XID_RSP",
"RECV_SNRM_CMD",
"RECV_TEST_CMD",
"RECV_TEST_RSP",
"RECV_UA_RSP",
"RECV_DM_RSP",
"RECV_RD_RSP",
"RECV_I_CMD",
"RECV_I_RSP",
"RECV_UI_FRAME",
"RECV_FRMR_RSP",
"RECV_RR_CMD",
"RECV_RR_RSP",
"RECV_RNR_CMD",
"RECV_RNR_RSP",
"RECV_REJ_CMD",
"RECV_REJ_RSP",
"RECV_SREJ_CMD",
"RECV_SREJ_RSP",
"RECV_DISC_CMD",
"SLOT_TIMER_EXPIRED",
"QUERY_TIMER_EXPIRED",
"FINAL_TIMER_EXPIRED",
"POLL_TIMER_EXPIRED",
"DISCOVERY_TIMER_EXPIRED",
"WD_TIMER_EXPIRED",
"BACKOFF_TIMER_EXPIRED",
"MEDIA_BUSY_TIMER_EXPIRED",
};
#endif /* CONFIG_IRDA_DEBUG */
const char *const irlap_state[] = {
"LAP_NDM",
"LAP_QUERY",
"LAP_REPLY",
"LAP_CONN",
"LAP_SETUP",
"LAP_OFFLINE",
"LAP_XMIT_P",
"LAP_PCLOSE",
"LAP_NRM_P",
"LAP_RESET_WAIT",
"LAP_RESET",
"LAP_NRM_S",
"LAP_XMIT_S",
"LAP_SCLOSE",
"LAP_RESET_CHECK",
};
static int (*state[])(struct irlap_cb *self, IRLAP_EVENT event,
struct sk_buff *skb, struct irlap_info *info) =
{
irlap_state_ndm,
irlap_state_query,
irlap_state_reply,
irlap_state_conn,
irlap_state_setup,
irlap_state_offline,
irlap_state_xmit_p,
irlap_state_pclose,
irlap_state_nrm_p,
irlap_state_reset_wait,
irlap_state_reset,
irlap_state_nrm_s,
irlap_state_xmit_s,
irlap_state_sclose,
irlap_state_reset_check,
};
/*
* Function irda_poll_timer_expired (data)
*
* Poll timer has expired. Normally we must now send a RR frame to the
* remote device
*/
static void irlap_poll_timer_expired(void *data)
{
struct irlap_cb *self = (struct irlap_cb *) data;
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
irlap_do_event(self, POLL_TIMER_EXPIRED, NULL, NULL);
}
/*
* Calculate and set time before we will have to send back the pf bit
* to the peer. Use in primary.
* Make sure that state is XMIT_P/XMIT_S when calling this function
* (and that nobody messed up with the state). - Jean II
*/
static void irlap_start_poll_timer(struct irlap_cb *self, int timeout)
{
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
#ifdef CONFIG_IRDA_FAST_RR
/*
* Send out the RR frames faster if our own transmit queue is empty, or
* if the peer is busy. The effect is a much faster conversation
*/
if (skb_queue_empty(&self->txq) || self->remote_busy) {
if (self->fast_RR == TRUE) {
/*
* Assert that the fast poll timer has not reached the
* normal poll timer yet
*/
if (self->fast_RR_timeout < timeout) {
/*
* FIXME: this should be a more configurable
* function
*/
self->fast_RR_timeout +=
(sysctl_fast_poll_increase * HZ/1000);
/* Use this fast(er) timeout instead */
timeout = self->fast_RR_timeout;
}
} else {
self->fast_RR = TRUE;
/* Start with just 0 ms */
self->fast_RR_timeout = 0;
timeout = 0;
}
} else
self->fast_RR = FALSE;
IRDA_DEBUG(3, "%s(), timeout=%d (%ld)\n", __func__, timeout, jiffies);
#endif /* CONFIG_IRDA_FAST_RR */
if (timeout == 0)
irlap_do_event(self, POLL_TIMER_EXPIRED, NULL, NULL);
else
irda_start_timer(&self->poll_timer, timeout, self,
irlap_poll_timer_expired);
}
/*
* Function irlap_do_event (event, skb, info)
*
* Rushes through the state machine without any delay. If state == XMIT
* then send queued data frames.
*/
void irlap_do_event(struct irlap_cb *self, IRLAP_EVENT event,
struct sk_buff *skb, struct irlap_info *info)
{
int ret;
if (!self || self->magic != LAP_MAGIC)
return;
IRDA_DEBUG(3, "%s(), event = %s, state = %s\n", __func__,
irlap_event[event], irlap_state[self->state]);
ret = (*state[self->state])(self, event, skb, info);
/*
* Check if there are any pending events that needs to be executed
*/
switch (self->state) {
case LAP_XMIT_P: /* FALLTHROUGH */
case LAP_XMIT_S:
/*
* We just received the pf bit and are at the beginning
* of a new LAP transmit window.
* Check if there are any queued data frames, and do not
* try to disconnect link if we send any data frames, since
* that will change the state away form XMIT
*/
IRDA_DEBUG(2, "%s() : queue len = %d\n", __func__,
skb_queue_len(&self->txq));
if (!skb_queue_empty(&self->txq)) {
/* Prevent race conditions with irlap_data_request() */
self->local_busy = TRUE;
/* Theory of operation.
* We send frames up to when we fill the window or
* reach line capacity. Those frames will queue up
* in the device queue, and the driver will slowly
* send them.
* After each frame that we send, we poll the higher
* layer for more data. It's the right time to do
* that because the link layer need to perform the mtt
* and then send the first frame, so we can afford
* to send a bit of time in kernel space.
* The explicit flow indication allow to minimise
* buffers (== lower latency), to avoid higher layer
* polling via timers (== less context switches) and
* to implement a crude scheduler - Jean II */
/* Try to send away all queued data frames */
while ((skb = skb_dequeue(&self->txq)) != NULL) {
/* Send one frame */
ret = (*state[self->state])(self, SEND_I_CMD,
skb, NULL);
/* Drop reference count.
* It will be increase as needed in
* irlap_send_data_xxx() */
kfree_skb(skb);
/* Poll the higher layers for one more frame */
irlmp_flow_indication(self->notify.instance,
FLOW_START);
if (ret == -EPROTO)
break; /* Try again later! */
}
/* Finished transmitting */
self->local_busy = FALSE;
} else if (self->disconnect_pending) {
self->disconnect_pending = FALSE;
ret = (*state[self->state])(self, DISCONNECT_REQUEST,
NULL, NULL);
}
break;
/* case LAP_NDM: */
/* case LAP_CONN: */
/* case LAP_RESET_WAIT: */
/* case LAP_RESET_CHECK: */
default:
break;
}
}
/*
* Function irlap_state_ndm (event, skb, frame)
*
* NDM (Normal Disconnected Mode) state
*
*/
static int irlap_state_ndm(struct irlap_cb *self, IRLAP_EVENT event,
struct sk_buff *skb, struct irlap_info *info)
{
discovery_t *discovery_rsp;
int ret = 0;
IRDA_ASSERT(self != NULL, return -1;);
IRDA_ASSERT(self->magic == LAP_MAGIC, return -1;);
switch (event) {
case CONNECT_REQUEST:
IRDA_ASSERT(self->netdev != NULL, return -1;);
if (self->media_busy) {
/* Note : this will never happen, because we test
* media busy in irlap_connect_request() and
* postpone the event... - Jean II */
IRDA_DEBUG(0, "%s(), CONNECT_REQUEST: media busy!\n",
__func__);
/* Always switch state before calling upper layers */
irlap_next_state(self, LAP_NDM);
irlap_disconnect_indication(self, LAP_MEDIA_BUSY);
} else {
irlap_send_snrm_frame(self, &self->qos_rx);
/* Start Final-bit timer */
irlap_start_final_timer(self, self->final_timeout);
self->retry_count = 0;
irlap_next_state(self, LAP_SETUP);
}
break;
case RECV_SNRM_CMD:
/* Check if the frame contains and I field */
if (info) {
self->daddr = info->daddr;
self->caddr = info->caddr;
irlap_next_state(self, LAP_CONN);
irlap_connect_indication(self, skb);
} else {
IRDA_DEBUG(0, "%s(), SNRM frame does not "
"contain an I field!\n", __func__);
}
break;
case DISCOVERY_REQUEST:
IRDA_ASSERT(info != NULL, return -1;);
if (self->media_busy) {
IRDA_DEBUG(1, "%s(), DISCOVERY_REQUEST: media busy!\n",
__func__);
/* irlap->log.condition = MEDIA_BUSY; */
/* This will make IrLMP try again */
irlap_discovery_confirm(self, NULL);
/* Note : the discovery log is not cleaned up here,
* it will be done in irlap_discovery_request()
* Jean II */
return 0;
}
self->S = info->S;
self->s = info->s;
irlap_send_discovery_xid_frame(self, info->S, info->s, TRUE,
info->discovery);
self->frame_sent = FALSE;
self->s++;
irlap_start_slot_timer(self, self->slot_timeout);
irlap_next_state(self, LAP_QUERY);
break;
case RECV_DISCOVERY_XID_CMD:
IRDA_ASSERT(info != NULL, return -1;);
/* Assert that this is not the final slot */
if (info->s <= info->S) {
self->slot = irlap_generate_rand_time_slot(info->S,
info->s);
if (self->slot == info->s) {
discovery_rsp = irlmp_get_discovery_response();
discovery_rsp->data.daddr = info->daddr;
irlap_send_discovery_xid_frame(self, info->S,
self->slot,
FALSE,
discovery_rsp);
self->frame_sent = TRUE;
} else
self->frame_sent = FALSE;
/*
* Go to reply state until end of discovery to
* inhibit our own transmissions. Set the timer
* to not stay forever there... Jean II
*/
irlap_start_query_timer(self, info->S, info->s);
irlap_next_state(self, LAP_REPLY);
} else {
/* This is the final slot. How is it possible ?
* This would happen is both discoveries are just slightly
* offset (if they are in sync, all packets are lost).
* Most often, all the discovery requests will be received
* in QUERY state (see my comment there), except for the
* last frame that will come here.
* The big trouble when it happen is that active discovery
* doesn't happen, because nobody answer the discoveries
* frame of the other guy, so the log shows up empty.
* What should we do ?
* Not much. It's too late to answer those discovery frames,
* so we just pass the info to IrLMP who will put it in the
* log (and post an event).
* Another cause would be devices that do discovery much
* slower than us, however the latest fixes should minimise
* those cases...
* Jean II
*/
IRDA_DEBUG(1, "%s(), Receiving final discovery request, missed the discovery slots :-(\n", __func__);
/* Last discovery request -> in the log */
irlap_discovery_indication(self, info->discovery);
}
break;
case MEDIA_BUSY_TIMER_EXPIRED:
/* A bunch of events may be postponed because the media is
* busy (usually immediately after we close a connection),
* or while we are doing discovery (state query/reply).
* In all those cases, the media busy flag will be cleared
* when it's OK for us to process those postponed events.
* This event is not mentioned in the state machines in the
* IrLAP spec. It's because they didn't consider Ultra and
* postponing connection request is optional.
* Jean II */
#ifdef CONFIG_IRDA_ULTRA
/* Send any pending Ultra frames if any */
if (!skb_queue_empty(&self->txq_ultra)) {
/* We don't send the frame, just post an event.
* Also, previously this code was in timer.c...
* Jean II */
ret = (*state[self->state])(self, SEND_UI_FRAME,
NULL, NULL);
}
#endif /* CONFIG_IRDA_ULTRA */
/* Check if we should try to connect.
* This code was previously in irlap_do_event() */
if (self->connect_pending) {
self->connect_pending = FALSE;
/* This one *should* not pend in this state, except
* if a socket try to connect and immediately
* disconnect. - clear - Jean II */
if (self->disconnect_pending)
irlap_disconnect_indication(self, LAP_DISC_INDICATION);
else
ret = (*state[self->state])(self,
CONNECT_REQUEST,
NULL, NULL);
self->disconnect_pending = FALSE;
}
/* Note : one way to test if this code works well (including
* media busy and small busy) is to create a user space
* application generating an Ultra packet every 3.05 sec (or
* 2.95 sec) and to see how it interact with discovery.
* It's fairly easy to check that no packet is lost, that the
* packets are postponed during discovery and that after
* discovery indication you have a 100ms "gap".
* As connection request and Ultra are now processed the same
* way, this avoid the tedious job of trying IrLAP connection
* in all those cases...
* Jean II */
break;
#ifdef CONFIG_IRDA_ULTRA
case SEND_UI_FRAME:
{
int i;
/* Only allowed to repeat an operation twice */
for (i=0; ((i<2) && (self->media_busy == FALSE)); i++) {
skb = skb_dequeue(&self->txq_ultra);
if (skb)
irlap_send_ui_frame(self, skb, CBROADCAST,
CMD_FRAME);
else
break;
/* irlap_send_ui_frame() won't increase skb reference
* count, so no dev_kfree_skb() - Jean II */
}
if (i == 2) {
/* Force us to listen 500 ms again */
irda_device_set_media_busy(self->netdev, TRUE);
}
break;
}
case RECV_UI_FRAME:
/* Only accept broadcast frames in NDM mode */
if (info->caddr != CBROADCAST) {
IRDA_DEBUG(0, "%s(), not a broadcast frame!\n",
__func__);
} else
irlap_unitdata_indication(self, skb);
break;
#endif /* CONFIG_IRDA_ULTRA */
case RECV_TEST_CMD:
/* Remove test frame header */
skb_pull(skb, sizeof(struct test_frame));
/*
* Send response. This skb will not be sent out again, and
* will only be used to send out the same info as the cmd
*/
irlap_send_test_frame(self, CBROADCAST, info->daddr, skb);
break;
case RECV_TEST_RSP:
IRDA_DEBUG(0, "%s() not implemented!\n", __func__);
break;
default:
IRDA_DEBUG(2, "%s(), Unknown event %s\n", __func__,
irlap_event[event]);
ret = -1;
break;
}
return ret;
}
/*
* Function irlap_state_query (event, skb, info)
*
* QUERY state
*
*/
static int irlap_state_query(struct irlap_cb *self, IRLAP_EVENT event,
struct sk_buff *skb, struct irlap_info *info)
{
int ret = 0;
IRDA_ASSERT(self != NULL, return -1;);
IRDA_ASSERT(self->magic == LAP_MAGIC, return -1;);
switch (event) {
case RECV_DISCOVERY_XID_RSP:
IRDA_ASSERT(info != NULL, return -1;);
IRDA_ASSERT(info->discovery != NULL, return -1;);
IRDA_DEBUG(4, "%s(), daddr=%08x\n", __func__,
info->discovery->data.daddr);
if (!self->discovery_log) {
IRDA_WARNING("%s: discovery log is gone! "
"maybe the discovery timeout has been set"
" too short?\n", __func__);
break;
}
hashbin_insert(self->discovery_log,
(irda_queue_t *) info->discovery,
info->discovery->data.daddr, NULL);
/* Keep state */
/* irlap_next_state(self, LAP_QUERY); */
break;
case RECV_DISCOVERY_XID_CMD:
/* Yes, it is possible to receive those frames in this mode.
* Note that most often the last discovery request won't
* occur here but in NDM state (see my comment there).
* What should we do ?
* Not much. We are currently performing our own discovery,
* therefore we can't answer those frames. We don't want
* to change state either. We just pass the info to
* IrLMP who will put it in the log (and post an event).
* Jean II
*/
IRDA_ASSERT(info != NULL, return -1;);
IRDA_DEBUG(1, "%s(), Receiving discovery request (s = %d) while performing discovery :-(\n", __func__, info->s);
/* Last discovery request ? */
if (info->s == 0xff)
irlap_discovery_indication(self, info->discovery);
break;
case SLOT_TIMER_EXPIRED:
/*
* Wait a little longer if we detect an incoming frame. This
* is not mentioned in the spec, but is a good thing to do,
* since we want to work even with devices that violate the
* timing requirements.
*/
if (irda_device_is_receiving(self->netdev) && !self->add_wait) {
IRDA_DEBUG(2, "%s(), device is slow to answer, "
"waiting some more!\n", __func__);
irlap_start_slot_timer(self, msecs_to_jiffies(10));
self->add_wait = TRUE;
return ret;
}
self->add_wait = FALSE;
if (self->s < self->S) {
irlap_send_discovery_xid_frame(self, self->S,
self->s, TRUE,
self->discovery_cmd);
self->s++;
irlap_start_slot_timer(self, self->slot_timeout);
/* Keep state */
irlap_next_state(self, LAP_QUERY);
} else {
/* This is the final slot! */
irlap_send_discovery_xid_frame(self, self->S, 0xff,
TRUE,
self->discovery_cmd);
/* Always switch state before calling upper layers */
irlap_next_state(self, LAP_NDM);
/*
* We are now finished with the discovery procedure,
* so now we must return the results
*/
irlap_discovery_confirm(self, self->discovery_log);
/* IrLMP should now have taken care of the log */
self->discovery_log = NULL;
}
break;
default:
IRDA_DEBUG(2, "%s(), Unknown event %s\n", __func__,
irlap_event[event]);
ret = -1;
break;
}
return ret;
}
/*
* Function irlap_state_reply (self, event, skb, info)
*
* REPLY, we have received a XID discovery frame from a device and we
* are waiting for the right time slot to send a response XID frame
*
*/
static int irlap_state_reply(struct irlap_cb *self, IRLAP_EVENT event,
struct sk_buff *skb, struct irlap_info *info)
{
discovery_t *discovery_rsp;
int ret=0;
IRDA_DEBUG(4, "%s()\n", __func__);
IRDA_ASSERT(self != NULL, return -1;);
IRDA_ASSERT(self->magic == LAP_MAGIC, return -1;);
switch (event) {
case QUERY_TIMER_EXPIRED:
IRDA_DEBUG(0, "%s(), QUERY_TIMER_EXPIRED <%ld>\n",
__func__, jiffies);
irlap_next_state(self, LAP_NDM);
break;
case RECV_DISCOVERY_XID_CMD:
IRDA_ASSERT(info != NULL, return -1;);
/* Last frame? */
if (info->s == 0xff) {
del_timer(&self->query_timer);
/* info->log.condition = REMOTE; */
/* Always switch state before calling upper layers */
irlap_next_state(self, LAP_NDM);
irlap_discovery_indication(self, info->discovery);
} else {
/* If it's our slot, send our reply */
if ((info->s >= self->slot) && (!self->frame_sent)) {
discovery_rsp = irlmp_get_discovery_response();
discovery_rsp->data.daddr = info->daddr;
irlap_send_discovery_xid_frame(self, info->S,
self->slot,
FALSE,
discovery_rsp);
self->frame_sent = TRUE;
}
/* Readjust our timer to accommodate devices
* doing faster or slower discovery than us...
* Jean II */
irlap_start_query_timer(self, info->S, info->s);
/* Keep state */
//irlap_next_state(self, LAP_REPLY);
}
break;
default:
IRDA_DEBUG(1, "%s(), Unknown event %d, %s\n", __func__,
event, irlap_event[event]);
ret = -1;
break;
}
return ret;
}
/*
* Function irlap_state_conn (event, skb, info)
*
* CONN, we have received a SNRM command and is waiting for the upper
* layer to accept or refuse connection
*
*/
static int irlap_state_conn(struct irlap_cb *self, IRLAP_EVENT event,
struct sk_buff *skb, struct irlap_info *info)
{
int ret = 0;
IRDA_DEBUG(4, "%s(), event=%s\n", __func__, irlap_event[ event]);
IRDA_ASSERT(self != NULL, return -1;);
IRDA_ASSERT(self->magic == LAP_MAGIC, return -1;);
switch (event) {
case CONNECT_RESPONSE:
skb_pull(skb, sizeof(struct snrm_frame));
IRDA_ASSERT(self->netdev != NULL, return -1;);
irlap_qos_negotiate(self, skb);
irlap_initiate_connection_state(self);
/*
* Applying the parameters now will make sure we change speed
* *after* we have sent the next frame
*/
irlap_apply_connection_parameters(self, FALSE);
/*
* Sending this frame will force a speed change after it has
* been sent (i.e. the frame will be sent at 9600).
*/
irlap_send_ua_response_frame(self, &self->qos_rx);
#if 0
/*
* We are allowed to send two frames, but this may increase
* the connect latency, so lets not do it for now.
*/
/* This is full of good intentions, but doesn't work in
* practice.
* After sending the first UA response, we switch the
* dongle to the negotiated speed, which is usually
* different than 9600 kb/s.
* From there, there is two solutions :
* 1) The other end has received the first UA response :
* it will set up the connection, move to state LAP_NRM_P,
* and will ignore and drop the second UA response.
* Actually, it's even worse : the other side will almost
* immediately send a RR that will likely collide with the
* UA response (depending on negotiated turnaround).
* 2) The other end has not received the first UA response,
* will stay at 9600 and will never see the second UA response.
* Jean II */
irlap_send_ua_response_frame(self, &self->qos_rx);
#endif
/*
* The WD-timer could be set to the duration of the P-timer
* for this case, but it is recommended to use twice the
* value (note 3 IrLAP p. 60).
*/
irlap_start_wd_timer(self, self->wd_timeout);
irlap_next_state(self, LAP_NRM_S);
break;
case RECV_DISCOVERY_XID_CMD:
IRDA_DEBUG(3, "%s(), event RECV_DISCOVER_XID_CMD!\n",
__func__);
irlap_next_state(self, LAP_NDM);
break;
case DISCONNECT_REQUEST:
IRDA_DEBUG(0, "%s(), Disconnect request!\n", __func__);
irlap_send_dm_frame(self);
irlap_next_state( self, LAP_NDM);
irlap_disconnect_indication(self, LAP_DISC_INDICATION);
break;
default:
IRDA_DEBUG(1, "%s(), Unknown event %d, %s\n", __func__,
event, irlap_event[event]);
ret = -1;
break;
}
return ret;
}
/*
* Function irlap_state_setup (event, skb, frame)
*
* SETUP state, The local layer has transmitted a SNRM command frame to
* a remote peer layer and is awaiting a reply .
*
*/
static int irlap_state_setup(struct irlap_cb *self, IRLAP_EVENT event,
struct sk_buff *skb, struct irlap_info *info)
{
int ret = 0;
IRDA_DEBUG(4, "%s()\n", __func__);
IRDA_ASSERT(self != NULL, return -1;);
IRDA_ASSERT(self->magic == LAP_MAGIC, return -1;);
switch (event) {
case FINAL_TIMER_EXPIRED:
if (self->retry_count < self->N3) {
/*
* Perform random backoff, Wait a random number of time units, minimum
* duration half the time taken to transmitt a SNRM frame, maximum duration
* 1.5 times the time taken to transmit a SNRM frame. So this time should
* between 15 msecs and 45 msecs.
*/
irlap_start_backoff_timer(self, msecs_to_jiffies(20 +
(jiffies % 30)));
} else {
/* Always switch state before calling upper layers */
irlap_next_state(self, LAP_NDM);
irlap_disconnect_indication(self, LAP_FOUND_NONE);
}
break;
case BACKOFF_TIMER_EXPIRED:
irlap_send_snrm_frame(self, &self->qos_rx);
irlap_start_final_timer(self, self->final_timeout);
self->retry_count++;
break;
case RECV_SNRM_CMD:
IRDA_DEBUG(4, "%s(), SNRM battle!\n", __func__);
IRDA_ASSERT(skb != NULL, return 0;);
IRDA_ASSERT(info != NULL, return 0;);
/*
* The device with the largest device address wins the battle
* (both have sent a SNRM command!)
*/
if (info &&(info->daddr > self->saddr)) {
del_timer(&self->final_timer);
irlap_initiate_connection_state(self);
IRDA_ASSERT(self->netdev != NULL, return -1;);
skb_pull(skb, sizeof(struct snrm_frame));
irlap_qos_negotiate(self, skb);
/* Send UA frame and then change link settings */
irlap_apply_connection_parameters(self, FALSE);
irlap_send_ua_response_frame(self, &self->qos_rx);
irlap_next_state(self, LAP_NRM_S);
irlap_connect_confirm(self, skb);
/*
* The WD-timer could be set to the duration of the
* P-timer for this case, but it is recommended
* to use twice the value (note 3 IrLAP p. 60).
*/
irlap_start_wd_timer(self, self->wd_timeout);
} else {
/* We just ignore the other device! */
irlap_next_state(self, LAP_SETUP);
}
break;
case RECV_UA_RSP:
/* Stop F-timer */
del_timer(&self->final_timer);
/* Initiate connection state */
irlap_initiate_connection_state(self);
/* Negotiate connection parameters */
IRDA_ASSERT(skb->len > 10, return -1;);
skb_pull(skb, sizeof(struct ua_frame));
IRDA_ASSERT(self->netdev != NULL, return -1;);
irlap_qos_negotiate(self, skb);
/* Set the new link setting *now* (before the rr frame) */
irlap_apply_connection_parameters(self, TRUE);
self->retry_count = 0;
/* Wait for turnaround time to give a chance to the other
* device to be ready to receive us.
* Note : the time to switch speed is typically larger
* than the turnaround time, but as we don't have the other
* side speed switch time, that's our best guess...
* Jean II */
irlap_wait_min_turn_around(self, &self->qos_tx);
/* This frame will actually be sent at the new speed */
irlap_send_rr_frame(self, CMD_FRAME);
/* The timer is set to half the normal timer to quickly
* detect a failure to negotiate the new connection
* parameters. IrLAP 6.11.3.2, note 3.
* Note that currently we don't process this failure
* properly, as we should do a quick disconnect.
* Jean II */
irlap_start_final_timer(self, self->final_timeout/2);
irlap_next_state(self, LAP_NRM_P);
irlap_connect_confirm(self, skb);
break;
case RECV_DM_RSP: /* FALLTHROUGH */
case RECV_DISC_CMD:
del_timer(&self->final_timer);
irlap_next_state(self, LAP_NDM);
irlap_disconnect_indication(self, LAP_DISC_INDICATION);
break;
default:
IRDA_DEBUG(1, "%s(), Unknown event %d, %s\n", __func__,
event, irlap_event[event]);
ret = -1;
break;
}
return ret;
}
/*
* Function irlap_state_offline (self, event, skb, info)
*
* OFFLINE state, not used for now!
*
*/
static int irlap_state_offline(struct irlap_cb *self, IRLAP_EVENT event,
struct sk_buff *skb, struct irlap_info *info)
{
IRDA_DEBUG( 0, "%s(), Unknown event\n", __func__);
return -1;
}
/*
* Function irlap_state_xmit_p (self, event, skb, info)
*
* XMIT, Only the primary station has right to transmit, and we
* therefore do not expect to receive any transmissions from other
* stations.
*
*/
static int irlap_state_xmit_p(struct irlap_cb *self, IRLAP_EVENT event,
struct sk_buff *skb, struct irlap_info *info)
{
int ret = 0;
switch (event) {
case SEND_I_CMD:
/*
* Only send frame if send-window > 0.
*/
if ((self->window > 0) && (!self->remote_busy)) {
int nextfit;
#ifdef CONFIG_IRDA_DYNAMIC_WINDOW
struct sk_buff *skb_next;
/* With DYNAMIC_WINDOW, we keep the window size
* maximum, and adapt on the packets we are sending.
* At 115k, we can send only 2 packets of 2048 bytes
* in a 500 ms turnaround. Without this option, we
* would always limit the window to 2. With this
* option, if we send smaller packets, we can send
* up to 7 of them (always depending on QoS).
* Jean II */
/* Look at the next skb. This is safe, as we are
* the only consumer of the Tx queue (if we are not,
* we have other problems) - Jean II */
skb_next = skb_peek(&self->txq);
/* Check if a subsequent skb exist and would fit in
* the current window (with respect to turnaround
* time).
* This allow us to properly mark the current packet
* with the pf bit, to avoid falling back on the
* second test below, and avoid waiting the
* end of the window and sending a extra RR.
* Note : (skb_next != NULL) <=> (skb_queue_len() > 0)
* Jean II */
nextfit = ((skb_next != NULL) &&
((skb_next->len + skb->len) <=
self->bytes_left));
/*
* The current packet may not fit ! Because of test
* above, this should not happen any more !!!
* Test if we have transmitted more bytes over the
* link than its possible to do with the current
* speed and turn-around-time.
*/
if((!nextfit) && (skb->len > self->bytes_left)) {
IRDA_DEBUG(0, "%s(), Not allowed to transmit"
" more bytes!\n", __func__);
/* Requeue the skb */
skb_queue_head(&self->txq, skb_get(skb));
/*
* We should switch state to LAP_NRM_P, but
* that is not possible since we must be sure
* that we poll the other side. Since we have
* used up our time, the poll timer should
* trigger anyway now, so we just wait for it
* DB
*/
/*
* Sorry, but that's not totally true. If
* we send 2000B packets, we may wait another
* 1000B until our turnaround expire. That's
* why we need to be proactive in avoiding
* coming here. - Jean II
*/
return -EPROTO;
}
/* Subtract space used by this skb */
self->bytes_left -= skb->len;
#else /* CONFIG_IRDA_DYNAMIC_WINDOW */
/* Window has been adjusted for the max packet
* size, so much simpler... - Jean II */
nextfit = !skb_queue_empty(&self->txq);
#endif /* CONFIG_IRDA_DYNAMIC_WINDOW */
/*
* Send data with poll bit cleared only if window > 1
* and there is more frames after this one to be sent
*/
if ((self->window > 1) && (nextfit)) {
/* More packet to send in current window */
irlap_send_data_primary(self, skb);
irlap_next_state(self, LAP_XMIT_P);
} else {
/* Final packet of window */
irlap_send_data_primary_poll(self, skb);
/*
* Make sure state machine does not try to send
* any more frames
*/
ret = -EPROTO;
}
#ifdef CONFIG_IRDA_FAST_RR
/* Peer may want to reply immediately */
self->fast_RR = FALSE;
#endif /* CONFIG_IRDA_FAST_RR */
} else {
IRDA_DEBUG(4, "%s(), Unable to send! remote busy?\n",
__func__);
skb_queue_head(&self->txq, skb_get(skb));
/*
* The next ret is important, because it tells
* irlap_next_state _not_ to deliver more frames
*/
ret = -EPROTO;
}
break;
case POLL_TIMER_EXPIRED:
IRDA_DEBUG(3, "%s(), POLL_TIMER_EXPIRED <%ld>\n",
__func__, jiffies);
irlap_send_rr_frame(self, CMD_FRAME);
/* Return to NRM properly - Jean II */
self->window = self->window_size;
#ifdef CONFIG_IRDA_DYNAMIC_WINDOW
/* Allowed to transmit a maximum number of bytes again. */
self->bytes_left = self->line_capacity;
#endif /* CONFIG_IRDA_DYNAMIC_WINDOW */
irlap_start_final_timer(self, self->final_timeout);
irlap_next_state(self, LAP_NRM_P);
break;
case DISCONNECT_REQUEST:
del_timer(&self->poll_timer);
irlap_wait_min_turn_around(self, &self->qos_tx);
irlap_send_disc_frame(self);
irlap_flush_all_queues(self);
irlap_start_final_timer(self, self->final_timeout);
self->retry_count = 0;
irlap_next_state(self, LAP_PCLOSE);
break;
case DATA_REQUEST:
/* Nothing to do, irlap_do_event() will send the packet
* when we return... - Jean II */
break;
default:
IRDA_DEBUG(0, "%s(), Unknown event %s\n",
__func__, irlap_event[event]);
ret = -EINVAL;
break;
}
return ret;
}
/*
* Function irlap_state_pclose (event, skb, info)
*
* PCLOSE state
*/
static int irlap_state_pclose(struct irlap_cb *self, IRLAP_EVENT event,
struct sk_buff *skb, struct irlap_info *info)
{
int ret = 0;
IRDA_DEBUG(1, "%s()\n", __func__);
IRDA_ASSERT(self != NULL, return -1;);
IRDA_ASSERT(self->magic == LAP_MAGIC, return -1;);
switch (event) {
case RECV_UA_RSP: /* FALLTHROUGH */
case RECV_DM_RSP:
del_timer(&self->final_timer);
/* Set new link parameters */
irlap_apply_default_connection_parameters(self);
/* Always switch state before calling upper layers */
irlap_next_state(self, LAP_NDM);
irlap_disconnect_indication(self, LAP_DISC_INDICATION);
break;
case FINAL_TIMER_EXPIRED:
if (self->retry_count < self->N3) {
irlap_wait_min_turn_around(self, &self->qos_tx);
irlap_send_disc_frame(self);
irlap_start_final_timer(self, self->final_timeout);
self->retry_count++;
/* Keep state */
} else {
irlap_apply_default_connection_parameters(self);
/* Always switch state before calling upper layers */
irlap_next_state(self, LAP_NDM);
irlap_disconnect_indication(self, LAP_NO_RESPONSE);
}
break;
default:
IRDA_DEBUG(1, "%s(), Unknown event %d\n", __func__, event);
ret = -1;
break;
}
return ret;
}
/*
* Function irlap_state_nrm_p (self, event, skb, info)
*
* NRM_P (Normal Response Mode as Primary), The primary station has given
* permissions to a secondary station to transmit IrLAP resonse frames
* (by sending a frame with the P bit set). The primary station will not
* transmit any frames and is expecting to receive frames only from the
* secondary to which transmission permissions has been given.
*/
static int irlap_state_nrm_p(struct irlap_cb *self, IRLAP_EVENT event,
struct sk_buff *skb, struct irlap_info *info)
{
int ret = 0;
int ns_status;
int nr_status;
switch (event) {
case RECV_I_RSP: /* Optimize for the common case */
if (unlikely(skb->len <= LAP_ADDR_HEADER + LAP_CTRL_HEADER)) {
/*
* Input validation check: a stir4200/mcp2150
* combination sometimes results in an empty i:rsp.
* This makes no sense; we can just ignore the frame
* and send an rr:cmd immediately. This happens before
* changing nr or ns so triggers a retransmit
*/
irlap_wait_min_turn_around(self, &self->qos_tx);
irlap_send_rr_frame(self, CMD_FRAME);
/* Keep state */
break;
}
/* FIXME: must check for remote_busy below */
#ifdef CONFIG_IRDA_FAST_RR
/*
* Reset the fast_RR so we can use the fast RR code with
* full speed the next time since peer may have more frames
* to transmitt
*/
self->fast_RR = FALSE;
#endif /* CONFIG_IRDA_FAST_RR */
IRDA_ASSERT( info != NULL, return -1;);
ns_status = irlap_validate_ns_received(self, info->ns);
nr_status = irlap_validate_nr_received(self, info->nr);
/*
* Check for expected I(nformation) frame
*/
if ((ns_status == NS_EXPECTED) && (nr_status == NR_EXPECTED)) {
/* Update Vr (next frame for us to receive) */
self->vr = (self->vr + 1) % 8;
/* Update Nr received, cleanup our retry queue */
irlap_update_nr_received(self, info->nr);
/*
* Got expected NR, so reset the
* retry_count. This is not done by IrLAP spec,
* which is strange!
*/
self->retry_count = 0;
self->ack_required = TRUE;
/* poll bit cleared? */
if (!info->pf) {
/* Keep state, do not move this line */
irlap_next_state(self, LAP_NRM_P);
irlap_data_indication(self, skb, FALSE);
} else {
/* No longer waiting for pf */
del_timer(&self->final_timer);
irlap_wait_min_turn_around(self, &self->qos_tx);
/* Call higher layer *before* changing state
* to give them a chance to send data in the
* next LAP frame.
* Jean II */
irlap_data_indication(self, skb, FALSE);
/* XMIT states are the most dangerous state
* to be in, because user requests are
* processed directly and may change state.
* On the other hand, in NDM_P, those
* requests are queued and we will process
* them when we return to irlap_do_event().
* Jean II
*/
irlap_next_state(self, LAP_XMIT_P);
/* This is the last frame.
* Make sure it's always called in XMIT state.
* - Jean II */
irlap_start_poll_timer(self, self->poll_timeout);
}
break;
}
/* Unexpected next to send (Ns) */
if ((ns_status == NS_UNEXPECTED) && (nr_status == NR_EXPECTED))
{
if (!info->pf) {
irlap_update_nr_received(self, info->nr);
/*
* Wait until the last frame before doing
* anything
*/
/* Keep state */
irlap_next_state(self, LAP_NRM_P);
} else {
IRDA_DEBUG(4,
"%s(), missing or duplicate frame!\n",
__func__);
/* Update Nr received */
irlap_update_nr_received(self, info->nr);
irlap_wait_min_turn_around(self, &self->qos_tx);
irlap_send_rr_frame(self, CMD_FRAME);
self->ack_required = FALSE;
irlap_start_final_timer(self, self->final_timeout);
irlap_next_state(self, LAP_NRM_P);
}
break;
}
/*
* Unexpected next to receive (Nr)
*/
if ((ns_status == NS_EXPECTED) && (nr_status == NR_UNEXPECTED))
{
if (info->pf) {
self->vr = (self->vr + 1) % 8;
/* Update Nr received */
irlap_update_nr_received(self, info->nr);
/* Resend rejected frames */
irlap_resend_rejected_frames(self, CMD_FRAME);
self->ack_required = FALSE;
/* Make sure we account for the time
* to transmit our frames. See comemnts
* in irlap_send_data_primary_poll().
* Jean II */
irlap_start_final_timer(self, 2 * self->final_timeout);
/* Keep state, do not move this line */
irlap_next_state(self, LAP_NRM_P);
irlap_data_indication(self, skb, FALSE);
} else {
/*
* Do not resend frames until the last
* frame has arrived from the other
* device. This is not documented in
* IrLAP!!
*/
self->vr = (self->vr + 1) % 8;
/* Update Nr received */
irlap_update_nr_received(self, info->nr);
self->ack_required = FALSE;
/* Keep state, do not move this line!*/
irlap_next_state(self, LAP_NRM_P);
irlap_data_indication(self, skb, FALSE);
}
break;
}
/*
* Unexpected next to send (Ns) and next to receive (Nr)
* Not documented by IrLAP!
*/
if ((ns_status == NS_UNEXPECTED) &&
(nr_status == NR_UNEXPECTED))
{
IRDA_DEBUG(4, "%s(), unexpected nr and ns!\n",
__func__);
if (info->pf) {
/* Resend rejected frames */
irlap_resend_rejected_frames(self, CMD_FRAME);
/* Give peer some time to retransmit!
* But account for our own Tx. */
irlap_start_final_timer(self, 2 * self->final_timeout);
/* Keep state, do not move this line */
irlap_next_state(self, LAP_NRM_P);
} else {
/* Update Nr received */
/* irlap_update_nr_received( info->nr); */
self->ack_required = FALSE;
}
break;
}
/*
* Invalid NR or NS
*/
if ((nr_status == NR_INVALID) || (ns_status == NS_INVALID)) {
if (info->pf) {
del_timer(&self->final_timer);
irlap_next_state(self, LAP_RESET_WAIT);
irlap_disconnect_indication(self, LAP_RESET_INDICATION);
self->xmitflag = TRUE;
} else {
del_timer(&self->final_timer);
irlap_disconnect_indication(self, LAP_RESET_INDICATION);
self->xmitflag = FALSE;
}
break;
}
IRDA_DEBUG(1, "%s(), Not implemented!\n", __func__);
IRDA_DEBUG(1, "%s(), event=%s, ns_status=%d, nr_status=%d\n",
__func__, irlap_event[event], ns_status, nr_status);
break;
case RECV_UI_FRAME:
/* Poll bit cleared? */
if (!info->pf) {
irlap_data_indication(self, skb, TRUE);
irlap_next_state(self, LAP_NRM_P);
} else {
del_timer(&self->final_timer);
irlap_data_indication(self, skb, TRUE);
irlap_next_state(self, LAP_XMIT_P);
IRDA_DEBUG(1, "%s: RECV_UI_FRAME: next state %s\n", __func__, irlap_state[self->state]);
irlap_start_poll_timer(self, self->poll_timeout);
}
break;
case RECV_RR_RSP:
/*
* If you get a RR, the remote isn't busy anymore,
* no matter what the NR
*/
self->remote_busy = FALSE;
/* Stop final timer */
del_timer(&self->final_timer);
/*
* Nr as expected?
*/
ret = irlap_validate_nr_received(self, info->nr);
if (ret == NR_EXPECTED) {
/* Update Nr received */
irlap_update_nr_received(self, info->nr);
/*
* Got expected NR, so reset the retry_count. This
* is not done by the IrLAP standard , which is
* strange! DB.
*/
self->retry_count = 0;
irlap_wait_min_turn_around(self, &self->qos_tx);
irlap_next_state(self, LAP_XMIT_P);
/* Start poll timer */
irlap_start_poll_timer(self, self->poll_timeout);
} else if (ret == NR_UNEXPECTED) {
IRDA_ASSERT(info != NULL, return -1;);
/*
* Unexpected nr!
*/
/* Update Nr received */
irlap_update_nr_received(self, info->nr);
IRDA_DEBUG(4, "RECV_RR_FRAME: Retrans:%d, nr=%d, va=%d, "
"vs=%d, vr=%d\n",
self->retry_count, info->nr, self->va,
self->vs, self->vr);
/* Resend rejected frames */
irlap_resend_rejected_frames(self, CMD_FRAME);
irlap_start_final_timer(self, self->final_timeout * 2);
irlap_next_state(self, LAP_NRM_P);
} else if (ret == NR_INVALID) {
IRDA_DEBUG(1, "%s(), Received RR with "
"invalid nr !\n", __func__);
irlap_next_state(self, LAP_RESET_WAIT);
irlap_disconnect_indication(self, LAP_RESET_INDICATION);
self->xmitflag = TRUE;
}
break;
case RECV_RNR_RSP:
IRDA_ASSERT(info != NULL, return -1;);
/* Stop final timer */
del_timer(&self->final_timer);
self->remote_busy = TRUE;
/* Update Nr received */
irlap_update_nr_received(self, info->nr);
irlap_next_state(self, LAP_XMIT_P);
/* Start poll timer */
irlap_start_poll_timer(self, self->poll_timeout);
break;
case RECV_FRMR_RSP:
del_timer(&self->final_timer);
self->xmitflag = TRUE;
irlap_next_state(self, LAP_RESET_WAIT);
irlap_reset_indication(self);
break;
case FINAL_TIMER_EXPIRED:
/*
* We are allowed to wait for additional 300 ms if
* final timer expires when we are in the middle
* of receiving a frame (page 45, IrLAP). Check that
* we only do this once for each frame.
*/
if (irda_device_is_receiving(self->netdev) && !self->add_wait) {
IRDA_DEBUG(1, "FINAL_TIMER_EXPIRED when receiving a "
"frame! Waiting a little bit more!\n");
irlap_start_final_timer(self, msecs_to_jiffies(300));
/*
* Don't allow this to happen one more time in a row,
* or else we can get a pretty tight loop here if
* if we only receive half a frame. DB.
*/
self->add_wait = TRUE;
break;
}
self->add_wait = FALSE;
/* N2 is the disconnect timer. Until we reach it, we retry */
if (self->retry_count < self->N2) {
if (skb_peek(&self->wx_list) == NULL) {
/* Retry sending the pf bit to the secondary */
IRDA_DEBUG(4, "nrm_p: resending rr");
irlap_wait_min_turn_around(self, &self->qos_tx);
irlap_send_rr_frame(self, CMD_FRAME);
} else {
IRDA_DEBUG(4, "nrm_p: resend frames");
irlap_resend_rejected_frames(self, CMD_FRAME);
}
irlap_start_final_timer(self, self->final_timeout);
self->retry_count++;
IRDA_DEBUG(4, "irlap_state_nrm_p: FINAL_TIMER_EXPIRED:"
" retry_count=%d\n", self->retry_count);
/* Early warning event. I'm using a pretty liberal
* interpretation of the spec and generate an event
* every time the timer is multiple of N1 (and not
* only the first time). This allow application
* to know precisely if connectivity restart...
* Jean II */
if((self->retry_count % self->N1) == 0)
irlap_status_indication(self,
STATUS_NO_ACTIVITY);
/* Keep state */
} else {
irlap_apply_default_connection_parameters(self);
/* Always switch state before calling upper layers */
irlap_next_state(self, LAP_NDM);
irlap_disconnect_indication(self, LAP_NO_RESPONSE);
}
break;
case RECV_REJ_RSP:
irlap_update_nr_received(self, info->nr);
if (self->remote_busy) {
irlap_wait_min_turn_around(self, &self->qos_tx);
irlap_send_rr_frame(self, CMD_FRAME);
} else
irlap_resend_rejected_frames(self, CMD_FRAME);
irlap_start_final_timer(self, 2 * self->final_timeout);
break;
case RECV_SREJ_RSP:
irlap_update_nr_received(self, info->nr);
if (self->remote_busy) {
irlap_wait_min_turn_around(self, &self->qos_tx);
irlap_send_rr_frame(self, CMD_FRAME);
} else
irlap_resend_rejected_frame(self, CMD_FRAME);
irlap_start_final_timer(self, 2 * self->final_timeout);
break;
case RECV_RD_RSP:
IRDA_DEBUG(1, "%s(), RECV_RD_RSP\n", __func__);
irlap_flush_all_queues(self);
irlap_next_state(self, LAP_XMIT_P);
/* Call back the LAP state machine to do a proper disconnect */
irlap_disconnect_request(self);
break;
default:
IRDA_DEBUG(1, "%s(), Unknown event %s\n",
__func__, irlap_event[event]);
ret = -1;
break;
}
return ret;
}
/*
* Function irlap_state_reset_wait (event, skb, info)
*
* We have informed the service user of a reset condition, and is
* awaiting reset of disconnect request.
*
*/
static int irlap_state_reset_wait(struct irlap_cb *self, IRLAP_EVENT event,
struct sk_buff *skb, struct irlap_info *info)
{
int ret = 0;
IRDA_DEBUG(3, "%s(), event = %s\n", __func__, irlap_event[event]);
IRDA_ASSERT(self != NULL, return -1;);
IRDA_ASSERT(self->magic == LAP_MAGIC, return -1;);
switch (event) {
case RESET_REQUEST:
if (self->xmitflag) {
irlap_wait_min_turn_around(self, &self->qos_tx);
irlap_send_snrm_frame(self, NULL);
irlap_start_final_timer(self, self->final_timeout);
irlap_next_state(self, LAP_RESET);
} else {
irlap_start_final_timer(self, self->final_timeout);
irlap_next_state(self, LAP_RESET);
}
break;
case DISCONNECT_REQUEST:
irlap_wait_min_turn_around( self, &self->qos_tx);
irlap_send_disc_frame( self);
irlap_flush_all_queues( self);
irlap_start_final_timer( self, self->final_timeout);
self->retry_count = 0;
irlap_next_state( self, LAP_PCLOSE);
break;
default:
IRDA_DEBUG(2, "%s(), Unknown event %s\n", __func__,
irlap_event[event]);
ret = -1;
break;
}
return ret;
}
/*
* Function irlap_state_reset (self, event, skb, info)
*
* We have sent a SNRM reset command to the peer layer, and is awaiting
* reply.
*
*/
static int irlap_state_reset(struct irlap_cb *self, IRLAP_EVENT event,
struct sk_buff *skb, struct irlap_info *info)
{
int ret = 0;
IRDA_DEBUG(3, "%s(), event = %s\n", __func__, irlap_event[event]);
IRDA_ASSERT(self != NULL, return -1;);
IRDA_ASSERT(self->magic == LAP_MAGIC, return -1;);
switch (event) {
case RECV_DISC_CMD:
del_timer(&self->final_timer);
irlap_apply_default_connection_parameters(self);
/* Always switch state before calling upper layers */
irlap_next_state(self, LAP_NDM);
irlap_disconnect_indication(self, LAP_NO_RESPONSE);
break;
case RECV_UA_RSP:
del_timer(&self->final_timer);
/* Initiate connection state */
irlap_initiate_connection_state(self);
irlap_reset_confirm();
self->remote_busy = FALSE;
irlap_next_state(self, LAP_XMIT_P);
irlap_start_poll_timer(self, self->poll_timeout);
break;
case FINAL_TIMER_EXPIRED:
if (self->retry_count < 3) {
irlap_wait_min_turn_around(self, &self->qos_tx);
IRDA_ASSERT(self->netdev != NULL, return -1;);
irlap_send_snrm_frame(self, self->qos_dev);
self->retry_count++; /* Experimental!! */
irlap_start_final_timer(self, self->final_timeout);
irlap_next_state(self, LAP_RESET);
} else if (self->retry_count >= self->N3) {
irlap_apply_default_connection_parameters(self);
/* Always switch state before calling upper layers */
irlap_next_state(self, LAP_NDM);
irlap_disconnect_indication(self, LAP_NO_RESPONSE);
}
break;
case RECV_SNRM_CMD:
/*
* SNRM frame is not allowed to contain an I-field in this
* state
*/
if (!info) {
IRDA_DEBUG(3, "%s(), RECV_SNRM_CMD\n", __func__);
irlap_initiate_connection_state(self);
irlap_wait_min_turn_around(self, &self->qos_tx);
irlap_send_ua_response_frame(self, &self->qos_rx);
irlap_reset_confirm();
irlap_start_wd_timer(self, self->wd_timeout);
irlap_next_state(self, LAP_NDM);
} else {
IRDA_DEBUG(0,
"%s(), SNRM frame contained an I field!\n",
__func__);
}
break;
default:
IRDA_DEBUG(1, "%s(), Unknown event %s\n",
__func__, irlap_event[event]);
ret = -1;
break;
}
return ret;
}
/*
* Function irlap_state_xmit_s (event, skb, info)
*
* XMIT_S, The secondary station has been given the right to transmit,
* and we therefore do not expect to receive any transmissions from other
* stations.
*/
static int irlap_state_xmit_s(struct irlap_cb *self, IRLAP_EVENT event,
struct sk_buff *skb, struct irlap_info *info)
{
int ret = 0;
IRDA_DEBUG(4, "%s(), event=%s\n", __func__, irlap_event[event]);
IRDA_ASSERT(self != NULL, return -ENODEV;);
IRDA_ASSERT(self->magic == LAP_MAGIC, return -EBADR;);
switch (event) {
case SEND_I_CMD:
/*
* Send frame only if send window > 0
*/
if ((self->window > 0) && (!self->remote_busy)) {
int nextfit;
#ifdef CONFIG_IRDA_DYNAMIC_WINDOW
struct sk_buff *skb_next;
/*
* Same deal as in irlap_state_xmit_p(), so see
* the comments at that point.
* We are the secondary, so there are only subtle
* differences. - Jean II
*/
/* Check if a subsequent skb exist and would fit in
* the current window (with respect to turnaround
* time). - Jean II */
skb_next = skb_peek(&self->txq);
nextfit = ((skb_next != NULL) &&
((skb_next->len + skb->len) <=
self->bytes_left));
/*
* Test if we have transmitted more bytes over the
* link than its possible to do with the current
* speed and turn-around-time.
*/
if((!nextfit) && (skb->len > self->bytes_left)) {
IRDA_DEBUG(0, "%s(), Not allowed to transmit"
" more bytes!\n", __func__);
/* Requeue the skb */
skb_queue_head(&self->txq, skb_get(skb));
/*
* Switch to NRM_S, this is only possible
* when we are in secondary mode, since we
* must be sure that we don't miss any RR
* frames
*/
self->window = self->window_size;
self->bytes_left = self->line_capacity;
irlap_start_wd_timer(self, self->wd_timeout);
irlap_next_state(self, LAP_NRM_S);
/* Slight difference with primary :
* here we would wait for the other side to
* expire the turnaround. - Jean II */
return -EPROTO; /* Try again later */
}
/* Subtract space used by this skb */
self->bytes_left -= skb->len;
#else /* CONFIG_IRDA_DYNAMIC_WINDOW */
/* Window has been adjusted for the max packet
* size, so much simpler... - Jean II */
nextfit = !skb_queue_empty(&self->txq);
#endif /* CONFIG_IRDA_DYNAMIC_WINDOW */
/*
* Send data with final bit cleared only if window > 1
* and there is more frames to be sent
*/
if ((self->window > 1) && (nextfit)) {
irlap_send_data_secondary(self, skb);
irlap_next_state(self, LAP_XMIT_S);
} else {
irlap_send_data_secondary_final(self, skb);
irlap_next_state(self, LAP_NRM_S);
/*
* Make sure state machine does not try to send
* any more frames
*/
ret = -EPROTO;
}
} else {
IRDA_DEBUG(2, "%s(), Unable to send!\n", __func__);
skb_queue_head(&self->txq, skb_get(skb));
ret = -EPROTO;
}
break;
case DISCONNECT_REQUEST:
irlap_send_rd_frame(self);
irlap_flush_all_queues(self);
irlap_start_wd_timer(self, self->wd_timeout);
irlap_next_state(self, LAP_SCLOSE);
break;
case DATA_REQUEST:
/* Nothing to do, irlap_do_event() will send the packet
* when we return... - Jean II */
break;
default:
IRDA_DEBUG(2, "%s(), Unknown event %s\n", __func__,
irlap_event[event]);
ret = -EINVAL;
break;
}
return ret;
}
/*
* Function irlap_state_nrm_s (event, skb, info)
*
* NRM_S (Normal Response Mode as Secondary) state, in this state we are
* expecting to receive frames from the primary station
*
*/
static int irlap_state_nrm_s(struct irlap_cb *self, IRLAP_EVENT event,
struct sk_buff *skb, struct irlap_info *info)
{
int ns_status;
int nr_status;
int ret = 0;
IRDA_DEBUG(4, "%s(), event=%s\n", __func__, irlap_event[ event]);
IRDA_ASSERT(self != NULL, return -1;);
IRDA_ASSERT(self->magic == LAP_MAGIC, return -1;);
switch (event) {
case RECV_I_CMD: /* Optimize for the common case */
/* FIXME: must check for remote_busy below */
IRDA_DEBUG(4, "%s(), event=%s nr=%d, vs=%d, ns=%d, "
"vr=%d, pf=%d\n", __func__,
irlap_event[event], info->nr,
self->vs, info->ns, self->vr, info->pf);
self->retry_count = 0;
ns_status = irlap_validate_ns_received(self, info->ns);
nr_status = irlap_validate_nr_received(self, info->nr);
/*
* Check for expected I(nformation) frame
*/
if ((ns_status == NS_EXPECTED) && (nr_status == NR_EXPECTED)) {
/* Update Vr (next frame for us to receive) */
self->vr = (self->vr + 1) % 8;
/* Update Nr received */
irlap_update_nr_received(self, info->nr);
/*
* poll bit cleared?
*/
if (!info->pf) {
self->ack_required = TRUE;
/*
* Starting WD-timer here is optional, but
* not recommended. Note 6 IrLAP p. 83
*/
#if 0
irda_start_timer(WD_TIMER, self->wd_timeout);
#endif
/* Keep state, do not move this line */
irlap_next_state(self, LAP_NRM_S);
irlap_data_indication(self, skb, FALSE);
break;
} else {
/*
* We should wait before sending RR, and
* also before changing to XMIT_S
* state. (note 1, IrLAP p. 82)
*/
irlap_wait_min_turn_around(self, &self->qos_tx);
/*
* Give higher layers a chance to
* immediately reply with some data before
* we decide if we should send a RR frame
* or not
*/
irlap_data_indication(self, skb, FALSE);
/* Any pending data requests? */
if (!skb_queue_empty(&self->txq) &&
(self->window > 0))
{
self->ack_required = TRUE;
del_timer(&self->wd_timer);
irlap_next_state(self, LAP_XMIT_S);
} else {
irlap_send_rr_frame(self, RSP_FRAME);
irlap_start_wd_timer(self,
self->wd_timeout);
/* Keep the state */
irlap_next_state(self, LAP_NRM_S);
}
break;
}
}
/*
* Check for Unexpected next to send (Ns)
*/
if ((ns_status == NS_UNEXPECTED) && (nr_status == NR_EXPECTED))
{
/* Unexpected next to send, with final bit cleared */
if (!info->pf) {
irlap_update_nr_received(self, info->nr);
irlap_start_wd_timer(self, self->wd_timeout);
} else {
/* Update Nr received */
irlap_update_nr_received(self, info->nr);
irlap_wait_min_turn_around(self, &self->qos_tx);
irlap_send_rr_frame(self, RSP_FRAME);
irlap_start_wd_timer(self, self->wd_timeout);
}
break;
}
/*
* Unexpected Next to Receive(NR) ?
*/
if ((ns_status == NS_EXPECTED) && (nr_status == NR_UNEXPECTED))
{
if (info->pf) {
IRDA_DEBUG(4, "RECV_I_RSP: frame(s) lost\n");
self->vr = (self->vr + 1) % 8;
/* Update Nr received */
irlap_update_nr_received(self, info->nr);
/* Resend rejected frames */
irlap_resend_rejected_frames(self, RSP_FRAME);
/* Keep state, do not move this line */
irlap_next_state(self, LAP_NRM_S);
irlap_data_indication(self, skb, FALSE);
irlap_start_wd_timer(self, self->wd_timeout);
break;
}
/*
* This is not documented in IrLAP!! Unexpected NR
* with poll bit cleared
*/
if (!info->pf) {
self->vr = (self->vr + 1) % 8;
/* Update Nr received */
irlap_update_nr_received(self, info->nr);
/* Keep state, do not move this line */
irlap_next_state(self, LAP_NRM_S);
irlap_data_indication(self, skb, FALSE);
irlap_start_wd_timer(self, self->wd_timeout);
}
break;
}
if (ret == NR_INVALID) {
IRDA_DEBUG(0, "NRM_S, NR_INVALID not implemented!\n");
}
if (ret == NS_INVALID) {
IRDA_DEBUG(0, "NRM_S, NS_INVALID not implemented!\n");
}
break;
case RECV_UI_FRAME:
/*
* poll bit cleared?
*/
if (!info->pf) {
irlap_data_indication(self, skb, TRUE);
irlap_next_state(self, LAP_NRM_S); /* Keep state */
} else {
/*
* Any pending data requests?
*/
if (!skb_queue_empty(&self->txq) &&
(self->window > 0) && !self->remote_busy)
{
irlap_data_indication(self, skb, TRUE);
del_timer(&self->wd_timer);
irlap_next_state(self, LAP_XMIT_S);
} else {
irlap_data_indication(self, skb, TRUE);
irlap_wait_min_turn_around(self, &self->qos_tx);
irlap_send_rr_frame(self, RSP_FRAME);
self->ack_required = FALSE;
irlap_start_wd_timer(self, self->wd_timeout);
/* Keep the state */
irlap_next_state(self, LAP_NRM_S);
}
}
break;
case RECV_RR_CMD:
self->retry_count = 0;
/*
* Nr as expected?
*/
nr_status = irlap_validate_nr_received(self, info->nr);
if (nr_status == NR_EXPECTED) {
if (!skb_queue_empty(&self->txq) &&
(self->window > 0)) {
self->remote_busy = FALSE;
/* Update Nr received */
irlap_update_nr_received(self, info->nr);
del_timer(&self->wd_timer);
irlap_wait_min_turn_around(self, &self->qos_tx);
irlap_next_state(self, LAP_XMIT_S);
} else {
self->remote_busy = FALSE;
/* Update Nr received */
irlap_update_nr_received(self, info->nr);
irlap_wait_min_turn_around(self, &self->qos_tx);
irlap_start_wd_timer(self, self->wd_timeout);
/* Note : if the link is idle (this case),
* we never go in XMIT_S, so we never get a
* chance to process any DISCONNECT_REQUEST.
* Do it now ! - Jean II */
if (self->disconnect_pending) {
/* Disconnect */
irlap_send_rd_frame(self);
irlap_flush_all_queues(self);
irlap_next_state(self, LAP_SCLOSE);
} else {
/* Just send back pf bit */
irlap_send_rr_frame(self, RSP_FRAME);
irlap_next_state(self, LAP_NRM_S);
}
}
} else if (nr_status == NR_UNEXPECTED) {
self->remote_busy = FALSE;
irlap_update_nr_received(self, info->nr);
irlap_resend_rejected_frames(self, RSP_FRAME);
irlap_start_wd_timer(self, self->wd_timeout);
/* Keep state */
irlap_next_state(self, LAP_NRM_S);
} else {
IRDA_DEBUG(1, "%s(), invalid nr not implemented!\n",
__func__);
}
break;
case RECV_SNRM_CMD:
/* SNRM frame is not allowed to contain an I-field */
if (!info) {
del_timer(&self->wd_timer);
IRDA_DEBUG(1, "%s(), received SNRM cmd\n", __func__);
irlap_next_state(self, LAP_RESET_CHECK);
irlap_reset_indication(self);
} else {
IRDA_DEBUG(0,
"%s(), SNRM frame contained an I-field!\n",
__func__);
}
break;
case RECV_REJ_CMD:
irlap_update_nr_received(self, info->nr);
if (self->remote_busy) {
irlap_wait_min_turn_around(self, &self->qos_tx);
irlap_send_rr_frame(self, RSP_FRAME);
} else
irlap_resend_rejected_frames(self, RSP_FRAME);
irlap_start_wd_timer(self, self->wd_timeout);
break;
case RECV_SREJ_CMD:
irlap_update_nr_received(self, info->nr);
if (self->remote_busy) {
irlap_wait_min_turn_around(self, &self->qos_tx);
irlap_send_rr_frame(self, RSP_FRAME);
} else
irlap_resend_rejected_frame(self, RSP_FRAME);
irlap_start_wd_timer(self, self->wd_timeout);
break;
case WD_TIMER_EXPIRED:
/*
* Wait until retry_count * n matches negotiated threshold/
* disconnect time (note 2 in IrLAP p. 82)
*
* Similar to irlap_state_nrm_p() -> FINAL_TIMER_EXPIRED
* Note : self->wd_timeout = (self->final_timeout * 2),
* which explain why we use (self->N2 / 2) here !!!
* Jean II
*/
IRDA_DEBUG(1, "%s(), retry_count = %d\n", __func__,
self->retry_count);
if (self->retry_count < (self->N2 / 2)) {
/* No retry, just wait for primary */
irlap_start_wd_timer(self, self->wd_timeout);
self->retry_count++;
if((self->retry_count % (self->N1 / 2)) == 0)
irlap_status_indication(self,
STATUS_NO_ACTIVITY);
} else {
irlap_apply_default_connection_parameters(self);
/* Always switch state before calling upper layers */
irlap_next_state(self, LAP_NDM);
irlap_disconnect_indication(self, LAP_NO_RESPONSE);
}
break;
case RECV_DISC_CMD:
/* Always switch state before calling upper layers */
irlap_next_state(self, LAP_NDM);
/* Send disconnect response */
irlap_wait_min_turn_around(self, &self->qos_tx);
irlap_send_ua_response_frame(self, NULL);
del_timer(&self->wd_timer);
irlap_flush_all_queues(self);
/* Set default link parameters */
irlap_apply_default_connection_parameters(self);
irlap_disconnect_indication(self, LAP_DISC_INDICATION);
break;
case RECV_DISCOVERY_XID_CMD:
irlap_wait_min_turn_around(self, &self->qos_tx);
irlap_send_rr_frame(self, RSP_FRAME);
self->ack_required = TRUE;
irlap_start_wd_timer(self, self->wd_timeout);
irlap_next_state(self, LAP_NRM_S);
break;
case RECV_TEST_CMD:
/* Remove test frame header (only LAP header in NRM) */
skb_pull(skb, LAP_ADDR_HEADER + LAP_CTRL_HEADER);
irlap_wait_min_turn_around(self, &self->qos_tx);
irlap_start_wd_timer(self, self->wd_timeout);
/* Send response (info will be copied) */
irlap_send_test_frame(self, self->caddr, info->daddr, skb);
break;
default:
IRDA_DEBUG(1, "%s(), Unknown event %d, (%s)\n", __func__,
event, irlap_event[event]);
ret = -EINVAL;
break;
}
return ret;
}
/*
* Function irlap_state_sclose (self, event, skb, info)
*/
static int irlap_state_sclose(struct irlap_cb *self, IRLAP_EVENT event,
struct sk_buff *skb, struct irlap_info *info)
{
IRDA_DEBUG(1, "%s()\n", __func__);
IRDA_ASSERT(self != NULL, return -ENODEV;);
IRDA_ASSERT(self->magic == LAP_MAGIC, return -EBADR;);
switch (event) {
case RECV_DISC_CMD:
/* Always switch state before calling upper layers */
irlap_next_state(self, LAP_NDM);
/* Send disconnect response */
irlap_wait_min_turn_around(self, &self->qos_tx);
irlap_send_ua_response_frame(self, NULL);
del_timer(&self->wd_timer);
/* Set default link parameters */
irlap_apply_default_connection_parameters(self);
irlap_disconnect_indication(self, LAP_DISC_INDICATION);
break;
case RECV_DM_RSP:
/* IrLAP-1.1 p.82: in SCLOSE, S and I type RSP frames
* shall take us down into default NDM state, like DM_RSP
*/
case RECV_RR_RSP:
case RECV_RNR_RSP:
case RECV_REJ_RSP:
case RECV_SREJ_RSP:
case RECV_I_RSP:
/* Always switch state before calling upper layers */
irlap_next_state(self, LAP_NDM);
del_timer(&self->wd_timer);
irlap_apply_default_connection_parameters(self);
irlap_disconnect_indication(self, LAP_DISC_INDICATION);
break;
case WD_TIMER_EXPIRED:
/* Always switch state before calling upper layers */
irlap_next_state(self, LAP_NDM);
irlap_apply_default_connection_parameters(self);
irlap_disconnect_indication(self, LAP_DISC_INDICATION);
break;
default:
/* IrLAP-1.1 p.82: in SCLOSE, basically any received frame
* with pf=1 shall restart the wd-timer and resend the rd:rsp
*/
if (info != NULL && info->pf) {
del_timer(&self->wd_timer);
irlap_wait_min_turn_around(self, &self->qos_tx);
irlap_send_rd_frame(self);
irlap_start_wd_timer(self, self->wd_timeout);
break; /* stay in SCLOSE */
}
IRDA_DEBUG(1, "%s(), Unknown event %d, (%s)\n", __func__,
event, irlap_event[event]);
break;
}
return -1;
}
static int irlap_state_reset_check( struct irlap_cb *self, IRLAP_EVENT event,
struct sk_buff *skb,
struct irlap_info *info)
{
int ret = 0;
IRDA_DEBUG(1, "%s(), event=%s\n", __func__, irlap_event[event]);
IRDA_ASSERT(self != NULL, return -ENODEV;);
IRDA_ASSERT(self->magic == LAP_MAGIC, return -EBADR;);
switch (event) {
case RESET_RESPONSE:
irlap_send_ua_response_frame(self, &self->qos_rx);
irlap_initiate_connection_state(self);
irlap_start_wd_timer(self, WD_TIMEOUT);
irlap_flush_all_queues(self);
irlap_next_state(self, LAP_NRM_S);
break;
case DISCONNECT_REQUEST:
irlap_wait_min_turn_around(self, &self->qos_tx);
irlap_send_rd_frame(self);
irlap_start_wd_timer(self, WD_TIMEOUT);
irlap_next_state(self, LAP_SCLOSE);
break;
default:
IRDA_DEBUG(1, "%s(), Unknown event %d, (%s)\n", __func__,
event, irlap_event[event]);
ret = -EINVAL;
break;
}
return ret;
}
| gpl-2.0 |
rdnetto/linux-TF101 | drivers/scsi/sr_vendor.c | 12047 | 8619 | /* -*-linux-c-*-
* vendor-specific code for SCSI CD-ROM's goes here.
*
* This is needed becauce most of the new features (multisession and
* the like) are too new to be included into the SCSI-II standard (to
* be exact: there is'nt anything in my draft copy).
*
* Aug 1997: Ha! Got a SCSI-3 cdrom spec across my fingers. SCSI-3 does
* multisession using the READ TOC command (like SONY).
*
* Rearranged stuff here: SCSI-3 is included allways, support
* for NEC/TOSHIBA/HP commands is optional.
*
* Gerd Knorr <kraxel@cs.tu-berlin.de>
*
* --------------------------------------------------------------------------
*
* support for XA/multisession-CD's
*
* - NEC: Detection and support of multisession CD's.
*
* - TOSHIBA: Detection and support of multisession CD's.
* Some XA-Sector tweaking, required for older drives.
*
* - SONY: Detection and support of multisession CD's.
* added by Thomas Quinot <thomas@cuivre.freenix.fr>
*
* - PIONEER, HITACHI, PLEXTOR, MATSHITA, TEAC, PHILIPS: known to
* work with SONY (SCSI3 now) code.
*
* - HP: Much like SONY, but a little different... (Thomas)
* HP-Writers only ??? Maybe other CD-Writers work with this too ?
* HP 6020 writers now supported.
*/
#include <linux/cdrom.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/bcd.h>
#include <linux/blkdev.h>
#include <linux/slab.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_ioctl.h>
#include "sr.h"
#if 0
#define DEBUG
#endif
/* here are some constants to sort the vendors into groups */
#define VENDOR_SCSI3 1 /* default: scsi-3 mmc */
#define VENDOR_NEC 2
#define VENDOR_TOSHIBA 3
#define VENDOR_WRITER 4 /* pre-scsi3 writers */
#define VENDOR_TIMEOUT 30*HZ
void sr_vendor_init(Scsi_CD *cd)
{
#ifndef CONFIG_BLK_DEV_SR_VENDOR
cd->vendor = VENDOR_SCSI3;
#else
const char *vendor = cd->device->vendor;
const char *model = cd->device->model;
/* default */
cd->vendor = VENDOR_SCSI3;
if (cd->readcd_known)
/* this is true for scsi3/mmc drives - no more checks */
return;
if (cd->device->type == TYPE_WORM) {
cd->vendor = VENDOR_WRITER;
} else if (!strncmp(vendor, "NEC", 3)) {
cd->vendor = VENDOR_NEC;
if (!strncmp(model, "CD-ROM DRIVE:25", 15) ||
!strncmp(model, "CD-ROM DRIVE:36", 15) ||
!strncmp(model, "CD-ROM DRIVE:83", 15) ||
!strncmp(model, "CD-ROM DRIVE:84 ", 16)
#if 0
/* my NEC 3x returns the read-raw data if a read-raw
is followed by a read for the same sector - aeb */
|| !strncmp(model, "CD-ROM DRIVE:500", 16)
#endif
)
/* these can't handle multisession, may hang */
cd->cdi.mask |= CDC_MULTI_SESSION;
} else if (!strncmp(vendor, "TOSHIBA", 7)) {
cd->vendor = VENDOR_TOSHIBA;
}
#endif
}
/* small handy function for switching block length using MODE SELECT,
* used by sr_read_sector() */
int sr_set_blocklength(Scsi_CD *cd, int blocklength)
{
unsigned char *buffer; /* the buffer for the ioctl */
struct packet_command cgc;
struct ccs_modesel_head *modesel;
int rc, density = 0;
#ifdef CONFIG_BLK_DEV_SR_VENDOR
if (cd->vendor == VENDOR_TOSHIBA)
density = (blocklength > 2048) ? 0x81 : 0x83;
#endif
buffer = kmalloc(512, GFP_KERNEL | GFP_DMA);
if (!buffer)
return -ENOMEM;
#ifdef DEBUG
printk("%s: MODE SELECT 0x%x/%d\n", cd->cdi.name, density, blocklength);
#endif
memset(&cgc, 0, sizeof(struct packet_command));
cgc.cmd[0] = MODE_SELECT;
cgc.cmd[1] = (1 << 4);
cgc.cmd[4] = 12;
modesel = (struct ccs_modesel_head *) buffer;
memset(modesel, 0, sizeof(*modesel));
modesel->block_desc_length = 0x08;
modesel->density = density;
modesel->block_length_med = (blocklength >> 8) & 0xff;
modesel->block_length_lo = blocklength & 0xff;
cgc.buffer = buffer;
cgc.buflen = sizeof(*modesel);
cgc.data_direction = DMA_TO_DEVICE;
cgc.timeout = VENDOR_TIMEOUT;
if (0 == (rc = sr_do_ioctl(cd, &cgc))) {
cd->device->sector_size = blocklength;
}
#ifdef DEBUG
else
printk("%s: switching blocklength to %d bytes failed\n",
cd->cdi.name, blocklength);
#endif
kfree(buffer);
return rc;
}
/* This function gets called after a media change. Checks if the CD is
multisession, asks for offset etc. */
int sr_cd_check(struct cdrom_device_info *cdi)
{
Scsi_CD *cd = cdi->handle;
unsigned long sector;
unsigned char *buffer; /* the buffer for the ioctl */
struct packet_command cgc;
int rc, no_multi;
if (cd->cdi.mask & CDC_MULTI_SESSION)
return 0;
buffer = kmalloc(512, GFP_KERNEL | GFP_DMA);
if (!buffer)
return -ENOMEM;
sector = 0; /* the multisession sector offset goes here */
no_multi = 0; /* flag: the drive can't handle multisession */
rc = 0;
memset(&cgc, 0, sizeof(struct packet_command));
switch (cd->vendor) {
case VENDOR_SCSI3:
cgc.cmd[0] = READ_TOC;
cgc.cmd[8] = 12;
cgc.cmd[9] = 0x40;
cgc.buffer = buffer;
cgc.buflen = 12;
cgc.quiet = 1;
cgc.data_direction = DMA_FROM_DEVICE;
cgc.timeout = VENDOR_TIMEOUT;
rc = sr_do_ioctl(cd, &cgc);
if (rc != 0)
break;
if ((buffer[0] << 8) + buffer[1] < 0x0a) {
printk(KERN_INFO "%s: Hmm, seems the drive "
"doesn't support multisession CD's\n", cd->cdi.name);
no_multi = 1;
break;
}
sector = buffer[11] + (buffer[10] << 8) +
(buffer[9] << 16) + (buffer[8] << 24);
if (buffer[6] <= 1) {
/* ignore sector offsets from first track */
sector = 0;
}
break;
#ifdef CONFIG_BLK_DEV_SR_VENDOR
case VENDOR_NEC:{
unsigned long min, sec, frame;
cgc.cmd[0] = 0xde;
cgc.cmd[1] = 0x03;
cgc.cmd[2] = 0xb0;
cgc.buffer = buffer;
cgc.buflen = 0x16;
cgc.quiet = 1;
cgc.data_direction = DMA_FROM_DEVICE;
cgc.timeout = VENDOR_TIMEOUT;
rc = sr_do_ioctl(cd, &cgc);
if (rc != 0)
break;
if (buffer[14] != 0 && buffer[14] != 0xb0) {
printk(KERN_INFO "%s: Hmm, seems the cdrom "
"doesn't support multisession CD's\n",
cd->cdi.name);
no_multi = 1;
break;
}
min = bcd2bin(buffer[15]);
sec = bcd2bin(buffer[16]);
frame = bcd2bin(buffer[17]);
sector = min * CD_SECS * CD_FRAMES + sec * CD_FRAMES + frame;
break;
}
case VENDOR_TOSHIBA:{
unsigned long min, sec, frame;
/* we request some disc information (is it a XA-CD ?,
* where starts the last session ?) */
cgc.cmd[0] = 0xc7;
cgc.cmd[1] = 0x03;
cgc.buffer = buffer;
cgc.buflen = 4;
cgc.quiet = 1;
cgc.data_direction = DMA_FROM_DEVICE;
cgc.timeout = VENDOR_TIMEOUT;
rc = sr_do_ioctl(cd, &cgc);
if (rc == -EINVAL) {
printk(KERN_INFO "%s: Hmm, seems the drive "
"doesn't support multisession CD's\n",
cd->cdi.name);
no_multi = 1;
break;
}
if (rc != 0)
break;
min = bcd2bin(buffer[1]);
sec = bcd2bin(buffer[2]);
frame = bcd2bin(buffer[3]);
sector = min * CD_SECS * CD_FRAMES + sec * CD_FRAMES + frame;
if (sector)
sector -= CD_MSF_OFFSET;
sr_set_blocklength(cd, 2048);
break;
}
case VENDOR_WRITER:
cgc.cmd[0] = READ_TOC;
cgc.cmd[8] = 0x04;
cgc.cmd[9] = 0x40;
cgc.buffer = buffer;
cgc.buflen = 0x04;
cgc.quiet = 1;
cgc.data_direction = DMA_FROM_DEVICE;
cgc.timeout = VENDOR_TIMEOUT;
rc = sr_do_ioctl(cd, &cgc);
if (rc != 0) {
break;
}
if ((rc = buffer[2]) == 0) {
printk(KERN_WARNING
"%s: No finished session\n", cd->cdi.name);
break;
}
cgc.cmd[0] = READ_TOC; /* Read TOC */
cgc.cmd[6] = rc & 0x7f; /* number of last session */
cgc.cmd[8] = 0x0c;
cgc.cmd[9] = 0x40;
cgc.buffer = buffer;
cgc.buflen = 12;
cgc.quiet = 1;
cgc.data_direction = DMA_FROM_DEVICE;
cgc.timeout = VENDOR_TIMEOUT;
rc = sr_do_ioctl(cd, &cgc);
if (rc != 0) {
break;
}
sector = buffer[11] + (buffer[10] << 8) +
(buffer[9] << 16) + (buffer[8] << 24);
break;
#endif /* CONFIG_BLK_DEV_SR_VENDOR */
default:
/* should not happen */
printk(KERN_WARNING
"%s: unknown vendor code (%i), not initialized ?\n",
cd->cdi.name, cd->vendor);
sector = 0;
no_multi = 1;
break;
}
cd->ms_offset = sector;
cd->xa_flag = 0;
if (CDS_AUDIO != sr_disk_status(cdi) && 1 == sr_is_xa(cd))
cd->xa_flag = 1;
if (2048 != cd->device->sector_size) {
sr_set_blocklength(cd, 2048);
}
if (no_multi)
cdi->mask |= CDC_MULTI_SESSION;
#ifdef DEBUG
if (sector)
printk(KERN_DEBUG "%s: multisession offset=%lu\n",
cd->cdi.name, sector);
#endif
kfree(buffer);
return rc;
}
| gpl-2.0 |
MrApocalypse/Immortality_kernel | lib/bitrev.c | 13071 | 2157 | #include <linux/types.h>
#include <linux/module.h>
#include <linux/bitrev.h>
MODULE_AUTHOR("Akinobu Mita <akinobu.mita@gmail.com>");
MODULE_DESCRIPTION("Bit ordering reversal functions");
MODULE_LICENSE("GPL");
const u8 byte_rev_table[256] = {
0x00, 0x80, 0x40, 0xc0, 0x20, 0xa0, 0x60, 0xe0,
0x10, 0x90, 0x50, 0xd0, 0x30, 0xb0, 0x70, 0xf0,
0x08, 0x88, 0x48, 0xc8, 0x28, 0xa8, 0x68, 0xe8,
0x18, 0x98, 0x58, 0xd8, 0x38, 0xb8, 0x78, 0xf8,
0x04, 0x84, 0x44, 0xc4, 0x24, 0xa4, 0x64, 0xe4,
0x14, 0x94, 0x54, 0xd4, 0x34, 0xb4, 0x74, 0xf4,
0x0c, 0x8c, 0x4c, 0xcc, 0x2c, 0xac, 0x6c, 0xec,
0x1c, 0x9c, 0x5c, 0xdc, 0x3c, 0xbc, 0x7c, 0xfc,
0x02, 0x82, 0x42, 0xc2, 0x22, 0xa2, 0x62, 0xe2,
0x12, 0x92, 0x52, 0xd2, 0x32, 0xb2, 0x72, 0xf2,
0x0a, 0x8a, 0x4a, 0xca, 0x2a, 0xaa, 0x6a, 0xea,
0x1a, 0x9a, 0x5a, 0xda, 0x3a, 0xba, 0x7a, 0xfa,
0x06, 0x86, 0x46, 0xc6, 0x26, 0xa6, 0x66, 0xe6,
0x16, 0x96, 0x56, 0xd6, 0x36, 0xb6, 0x76, 0xf6,
0x0e, 0x8e, 0x4e, 0xce, 0x2e, 0xae, 0x6e, 0xee,
0x1e, 0x9e, 0x5e, 0xde, 0x3e, 0xbe, 0x7e, 0xfe,
0x01, 0x81, 0x41, 0xc1, 0x21, 0xa1, 0x61, 0xe1,
0x11, 0x91, 0x51, 0xd1, 0x31, 0xb1, 0x71, 0xf1,
0x09, 0x89, 0x49, 0xc9, 0x29, 0xa9, 0x69, 0xe9,
0x19, 0x99, 0x59, 0xd9, 0x39, 0xb9, 0x79, 0xf9,
0x05, 0x85, 0x45, 0xc5, 0x25, 0xa5, 0x65, 0xe5,
0x15, 0x95, 0x55, 0xd5, 0x35, 0xb5, 0x75, 0xf5,
0x0d, 0x8d, 0x4d, 0xcd, 0x2d, 0xad, 0x6d, 0xed,
0x1d, 0x9d, 0x5d, 0xdd, 0x3d, 0xbd, 0x7d, 0xfd,
0x03, 0x83, 0x43, 0xc3, 0x23, 0xa3, 0x63, 0xe3,
0x13, 0x93, 0x53, 0xd3, 0x33, 0xb3, 0x73, 0xf3,
0x0b, 0x8b, 0x4b, 0xcb, 0x2b, 0xab, 0x6b, 0xeb,
0x1b, 0x9b, 0x5b, 0xdb, 0x3b, 0xbb, 0x7b, 0xfb,
0x07, 0x87, 0x47, 0xc7, 0x27, 0xa7, 0x67, 0xe7,
0x17, 0x97, 0x57, 0xd7, 0x37, 0xb7, 0x77, 0xf7,
0x0f, 0x8f, 0x4f, 0xcf, 0x2f, 0xaf, 0x6f, 0xef,
0x1f, 0x9f, 0x5f, 0xdf, 0x3f, 0xbf, 0x7f, 0xff,
};
EXPORT_SYMBOL_GPL(byte_rev_table);
u16 bitrev16(u16 x)
{
return (bitrev8(x & 0xff) << 8) | bitrev8(x >> 8);
}
EXPORT_SYMBOL(bitrev16);
/**
* bitrev32 - reverse the order of bits in a u32 value
* @x: value to be bit-reversed
*/
u32 bitrev32(u32 x)
{
return (bitrev16(x & 0xffff) << 16) | bitrev16(x >> 16);
}
EXPORT_SYMBOL(bitrev32);
| gpl-2.0 |
javelinanddart/android_kernel_3.10_ville | arch/powerpc/boot/cuboot-sam440ep.c | 14095 | 1254 | /*
* Old U-boot compatibility for Sam440ep based off bamboo.c code
* original copyrights below
*
* Author: Josh Boyer <jwboyer@linux.vnet.ibm.com>
*
* Copyright 2007 IBM Corporation
*
* Based on cuboot-ebony.c
*
* Modified from cuboot-bamboo.c for sam440ep:
* Copyright 2008 Giuseppe Coviello <gicoviello@gmail.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*/
#include "ops.h"
#include "stdio.h"
#include "44x.h"
#include "4xx.h"
#include "cuboot.h"
#define TARGET_4xx
#define TARGET_44x
#include "ppcboot.h"
static bd_t bd;
static void sam440ep_fixups(void)
{
unsigned long sysclk = 66666666;
ibm440ep_fixup_clocks(sysclk, 11059200, 25000000);
ibm4xx_sdram_fixup_memsize();
ibm4xx_quiesce_eth((u32 *)0xef600e00, (u32 *)0xef600f00);
dt_fixup_mac_addresses(&bd.bi_enetaddr, &bd.bi_enet1addr);
}
void platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
unsigned long r6, unsigned long r7)
{
CUBOOT_INIT();
platform_ops.fixups = sam440ep_fixups;
platform_ops.exit = ibm44x_dbcr_reset;
fdt_init(_dtb_start);
serial_console_init();
}
| gpl-2.0 |
mickael-guene/gcc | libiberty/testsuite/test-strtol.c | 16 | 5235 | /* Test program for strtol family of funtions,
Copyright (C) 2014-2017 Free Software Foundation, Inc.
Written by Yury Gribov <y.gribov@samsung.com>
This file is part of the libiberty library, which is part of GCC.
This file is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
In addition to the permissions in the GNU General Public License, the
Free Software Foundation gives you unlimited permission to link the
compiled version of this file into combinations with other programs,
and to distribute those combinations without any restriction coming
from the use of this file. (The General Public License restrictions
do apply in other respects; for example, they cover modification of
the file, and distribution when not linked into a combined
executable.)
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA.
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include "libiberty.h"
#include <stdio.h>
#include <errno.h>
#ifdef HAVE_STDLIB_H
#include <stdlib.h>
#endif
#ifdef HAVE_STRING_H
#include <string.h>
#endif
#ifdef HAVE_UNISTD_H
#include <unistd.h>
#endif
#ifndef EXIT_SUCCESS
#define EXIT_SUCCESS 0
#endif
#ifndef EXIT_FAILURE
#define EXIT_FAILURE 1
#endif
/* Test input data. */
enum conversion_fun
{
STRTOL,
STRTOLL,
STRTOUL,
STRTOULL,
};
#ifdef HAVE_LONG_LONG
typedef unsigned long long integer_type;
#else
typedef unsigned long integer_type;
#endif
struct test_data_t
{
enum conversion_fun fun;
const char *nptr;
int base;
integer_type res;
int errnum;
};
const struct test_data_t test_data[] = {
{ STRTOL, "0x123", 0, 0x123L, 0 },
{ STRTOL, "123", 0, 123L, 0 },
{ STRTOL, "0123", 0, 0123L, 0 },
{ STRTOL, "0x7FFFFFFF", 0, 0x7fffffffL, 0 },
{ STRTOL, "-0x80000000", 0, -0x80000000L, 0 },
{ STRTOUL, "0x123", 0, 0x123UL, 0 },
{ STRTOUL, "123", 0, 123UL, 0 },
{ STRTOUL, "0123", 0, 0123UL, 0 },
{ STRTOUL, "0xFFFFFFFF", 0, 0xffffffffUL, 0 },
#if SIZEOF_LONG == 4
{ STRTOL, "0x80000000", 0, 0x7fffffffL, ERANGE },
{ STRTOL, "-0x80000001", 0, -0x80000000L, ERANGE },
{ STRTOUL, "0x100000000", 0, 0xffffffffUL, ERANGE },
#endif
#ifdef HAVE_LONG_LONG
{ STRTOLL, "0x123", 0, 0x123LL, 0 },
{ STRTOLL, "123", 0, 123LL, 0 },
{ STRTOLL, "0123", 0, 0123LL, 0 },
{ STRTOLL, "0x7FFFFFFFFFFFFFFF", 0, 0x7fffffffffffffffLL, 0 },
{ STRTOLL, "-0x8000000000000000", 0, -0x8000000000000000LL, 0 },
{ STRTOULL, "0x123", 0, 0x123ULL, 0 },
{ STRTOULL, "123", 0, 123ULL, 0 },
{ STRTOULL, "0123", 0, 0123ULL, 0 },
{ STRTOULL, "0xFFFFFFFFFFFFFFFF", 0, 0xffffffffffffffffULL, 0 },
#if SIZEOF_LONG_LONG == 8
{ STRTOLL, "0x8000000000000000", 0, 0x7fffffffffffffffLL, ERANGE },
{ STRTOLL, "-0x8000000000000001", 0, -0x8000000000000000LL, ERANGE },
{ STRTOULL, "0x10000000000000000", 0, 0xffffffffffffffffULL, ERANGE },
#endif
#endif
};
/* run_tests:
Run conversion function
Compare results
Return number of fails */
int
run_tests (const struct test_data_t *test_data, size_t ntests)
{
int fails = 0, failed;
size_t i;
for (i = 0; i < ntests; ++i)
{
integer_type res;
int saved_errno;
errno = 0;
switch (test_data[i].fun)
{
case STRTOL:
res = (unsigned long) strtol (test_data[i].nptr,
0, test_data[i].base);
break;
case STRTOUL:
res = strtoul (test_data[i].nptr, 0, test_data[i].base);
break;
#ifdef HAVE_LONG_LONG
case STRTOLL:
res = strtoll (test_data[i].nptr, 0, test_data[i].base);
break;
case STRTOULL:
res = strtoull (test_data[i].nptr, 0, test_data[i].base);
break;
#endif
}
saved_errno = errno;
failed = 0;
/* Compare result */
if (res != test_data[i].res)
{
printf ("FAIL: test-strtol-%zd. Results don't match.\n", i);
failed++;
}
/* Compare errno */
if (saved_errno != test_data[i].errnum)
{
printf ("FAIL: test-strtol-%zd. Errnos don't match.\n", i);
failed++;
}
if (!failed)
printf ("PASS: test-strtol-%zd.\n", i);
else
fails++;
}
return fails;
}
int
main(int argc, char **argv)
{
int fails;
fails = run_tests (test_data, sizeof (test_data) / sizeof (test_data[0]));
exit (fails ? EXIT_FAILURE : EXIT_SUCCESS);
}
| gpl-2.0 |
hyunokoh/s4_qemu | roms/openhackware/src/libpart/apple.c | 16 | 11523 | /*
* <apple.c>
*
* Open Hack'Ware BIOS Apple partition type management
*
* Copyright (c) 2004-2005 Jocelyn Mayer
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License V2
* as published by the Free Software Foundation
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <stdlib.h>
#include <stdio.h>
#include "bios.h"
#include "libpart.h"
/* Apple partitions handler */
#define HFS_BLOCSIZE (512)
typedef struct Mac_head_t Mac_head_t;
struct Mac_head_t {
/* 0x000 */
uint8_t signature[2];
uint16_t bloc_size;
uint32_t bloc_count;
/* 0x008 */
uint16_t dev_type;
uint16_t dev_ID;
uint32_t data;
/* 0x010 */
uint16_t driver_cnt;
uint8_t pad[428];
/* 0x01BE */
uint8_t part_table[0x40];
/* 0x1FE */
uint8_t magic[2];
/* 0x0200 */
} __attribute__ ((packed));
typedef struct Mac_driver_entry_t Mac_driver_entry_t;
struct Mac_driver_entry_t {
uint32_t start;
uint16_t size;
uint16_t type;
} __attribute__ ((packed));
typedef enum Mac_partflags_t Mac_partflags_t;
enum Mac_partflags_t {
MACPART_SPEC2 = 0x0100,
MACPART_SPEC1 = 0x0080,
MACPART_PIC = 0x0040,
MACPART_WRITABLE = 0x0020,
MACPART_READABLE = 0x0010,
MACPART_BOOTABLE = 0x0008,
MACPART_INUSE = 0x0004,
MACPART_ALLOCATED = 0x0002,
MACPART_VALID = 0x0001,
};
#define MAC_BOOTABLE_PART (MACPART_VALID | MACPART_INUSE | MACPART_BOOTABLE)
typedef struct Mac_partmap_t Mac_partmap_t;
struct Mac_partmap_t {
/* 0x000 */
uint8_t signature[2];
uint8_t res0[2];
uint32_t map_cnt;
/* 0x008 */
uint32_t start_bloc;
uint32_t bloc_cnt;
/* 0x010 */
uint8_t name[32];
/* 0x030 */
uint8_t type[32];
/* 0x050 */
uint32_t data_start;
uint32_t data_cnt;
/* 0x058 */
uint32_t flags;
uint32_t boot_start;
/* 0x060 */
uint32_t boot_size;
uint32_t boot_load;
/* 0x068 */
uint32_t boot_load2;
uint32_t boot_entry;
/* 0x070 */
uint32_t boot_entry2;
uint32_t boot_csum;
/* 0x078 */
uint8_t CPU[16];
/* 0x088 */
uint8_t boot_args[0x80];
/* 0x108 */
uint8_t pad0[0xC8];
/* 0x1D4 */
uint16_t ntype;
uint8_t ff[2];
/* 0x1D8 */
uint8_t pad1[0x24];
/* 0x1FC */
uint8_t mark[4];
/* 0x200 */
} __attribute__ ((packed));
int fs_raw_set_bootfile (part_t *part,
uint32_t start_bloc, uint32_t start_offset,
uint32_t size_bloc, uint32_t size_offset);
part_t *Apple_probe_partitions (bloc_device_t *bd)
{
unsigned char tmp[33], *name;
Mac_head_t *head;
Mac_partmap_t *partmap;
part_t *part, *boot_part;
unsigned char *type;
uint8_t *buffer;
uint32_t pos, bloc, start, count;
uint32_t bloc_size, flags;
int map_count, i, n, len;
part = NULL;
boot_part = NULL;
n = 1;
buffer = malloc(HFS_BLOCSIZE);
/* Read first sector */
bd_seek(bd, 0, 0);
if (bd_read(bd, buffer, HFS_BLOCSIZE) < 0) {
ERROR("Unable to read boot sector from boot device. Aborting...\n");
goto error;
}
head = (Mac_head_t *)buffer;
if (head->signature[0] != 'E' || head->signature[1] != 'R') {
// MSG("\rNo Apple boot bloc signature...\n");
goto error;
}
MSG("\rFound Apple partition map...\n");
bloc = 0;
bloc_size = bd_seclen(bd);
map_count = 1;
#if 0
if (head->magic[0] == 0x55 && head->magic[1] == 0xAA) {
/* PREP boot image ! Must parse it as MS-DOS boot bloc */
ERROR("%s PREP head magic\n", __func__);
goto error;
}
#endif
/* Partition table starts in sector 1 */
for (i = 1; i < (map_count + 1); i++) {
bloc = (i * HFS_BLOCSIZE) / bloc_size;
pos = (i * HFS_BLOCSIZE) % bloc_size;
DPRINTF("Check part %d of %d (%d %d %d)\n",
i, map_count, bloc, pos, bloc_size);
bd_seek(bd, bloc, pos);
if (bd_read(bd, buffer, HFS_BLOCSIZE) < 0) {
ERROR("%s sector_read failed (%d)\n", __func__, i);
goto error;
}
partmap = (Mac_partmap_t *)buffer;
if (partmap->signature[0] != 'P' || partmap->signature[1] != 'M' ) {
ERROR("%s bad partition signature (%c %c)\n",
__func__, partmap->signature[0], partmap->signature[1]);
goto error;
}
/* We found at least one Apple partition map,
* so we won't have to try to parse with other partition mappings.
*/
for (type = partmap->type; (type - partmap->type) < 32; type++) {
if (*type != '\0')
break;
}
if (partmap->name[0] == '\0') {
sprintf(tmp, "part%d", i);
name = tmp;
} else {
name = partmap->name;
}
/* Regular Apple partition */
part = malloc(sizeof(part_t));
if (part == NULL) {
ERROR("%s: can't allocate partition\n", __func__);
return NULL;
}
memset(part, 0, sizeof(part_t));
part->start = partmap->start_bloc;
part->size = partmap->bloc_cnt;
part_set_blocsize(bd, part, HFS_BLOCSIZE);
len = 32 - (type - partmap->type);
if (len == 0) {
/* Place holder. Skip it */
DPRINTF("%s placeholder part\t%d\n", __func__, i);
part->flags = PART_TYPE_APPLE | PART_FLAG_DUMMY;
part_register(bd, part, name, i);
} else if (strncmp("Apple_Void", type, 32) == 0) {
/* Void partition. Skip it */
DPRINTF("%s Void part\t%d [%s]\n", __func__, i, type);
part->flags = PART_TYPE_APPLE | PART_FLAG_DUMMY;
part_register(bd, part, name, i);
} else if (strncmp("Apple_Free", type, 32) == 0) {
/* Free space. Skip it */
DPRINTF("%s Free part (%d)\n", __func__, i);
part->flags = PART_TYPE_APPLE | PART_FLAG_DUMMY;
part_register(bd, part, name, i);
} else if (strncmp("Apple_partition_map", type, 32) == 0 ||
strncmp("Apple_Partition_Map", type, 32) == 0
#if 0 // Is this really used or is it just a mistake ?
|| strncmp("Apple_patition_map", type, 32) == 0
#endif
) {
DPRINTF("%s Partition map\t%d [%s]\n", __func__, i, type);
/* We are in the partition map descriptor */
if (i == 1) {
/* Get the real map blocs count */
map_count = partmap->map_cnt;
DPRINTF("%s: map_count: %d\n", __func__, map_count);
} else {
/* Don't about about secondary partition map
* Seems to be used, at least on CDROMs, to describe
* the same partition map with bloc_size = 2048
*/
}
part->flags = PART_TYPE_APPLE | PART_FLAG_DUMMY;
part_register(bd, part, name, i);
} else if (strncmp("Apple_Driver", type, 32) == 0 ||
strncmp("Apple_Driver43", type, 32) == 0 ||
strncmp("Apple_Driver43_CD", type, 32) == 0 ||
strncmp("Apple_Driver_ATA", type, 32) == 0 ||
strncmp("Apple_Driver_ATAPI", type, 32) == 0 ||
strncmp("Apple_FWDriver", type, 32) == 0 ||
strncmp("Apple_Driver_IOKit", type, 32) == 0) {
/* Drivers. don't care for now */
DPRINTF("%s Drivers part\t%d [%s]\n", __func__, i, type);
part->flags = PART_TYPE_APPLE | PART_FLAG_DRIVER;
part_register(bd, part, name, i);
} else if (strncmp("Apple_Patches", type, 32) == 0) {
/* Patches: don't care for now */
part->flags = PART_TYPE_APPLE | PART_FLAG_PATCH;
part_register(bd, part, name, i);
DPRINTF("%s Patches part\t%d [%s]\n", __func__, i, type);
} else if (strncmp("Apple_HFS", type, 32) == 0 ||
strncmp("Apple_MFS", type, 32) == 0 ||
strncmp("Apple_UFS", type, 32) == 0 ||
strncmp("Apple_PRODOS", type, 32) == 0 ||
strncmp("Apple_UNIX_SVR2", type, 32) == 0 ||
strncmp("Linux", type, 32) == 0 ||
strncmp("NetBSD/macppc", type, 32) == 0 ||
strncmp("Apple_boot", type, 32) == 0 ||
strncmp("Apple_bootstrap", type, 32) == 0 ||
strncmp("Apple_Bootstrap", type, 32) == 0) {
DPRINTF("%s Fs part\t%d [%s]\n", __func__, i, type);
/* Filesystems / boot partitions */
flags = partmap->flags;
start = partmap->start_bloc * HFS_BLOCSIZE;
count = partmap->bloc_cnt * HFS_BLOCSIZE;
if (partmap->boot_size == 0 || partmap->boot_load == 0) {
printf("Not a bootable partition %d %d (%p %p)\n",
partmap->boot_size, partmap->boot_load,
boot_part, part);
part->flags = PART_TYPE_APPLE | PART_FLAG_FS;
} else {
part->boot_start.bloc = partmap->boot_start;
part->boot_start.offset = 0;
part->boot_size.bloc = partmap->boot_size / HFS_BLOCSIZE;
#if 0
printf("%0x %0x %0x\n", partmap->boot_size, HFS_BLOCSIZE,
part->boot_size.bloc);
#endif
part->boot_size.offset = (partmap->boot_size) % HFS_BLOCSIZE;
part->boot_load = partmap->boot_load;
part->boot_entry = partmap->boot_entry;
fs_raw_set_bootfile(part, part->boot_start.bloc,
part->boot_start.offset,
part->boot_size.bloc,
part->boot_size.offset);
boot_part = part;
part->flags = PART_TYPE_APPLE | PART_FLAG_FS | PART_FLAG_BOOT;
}
printf("Partition: %d '%s' '%s' st %0x size %0x",
i, name, type, partmap->start_bloc, partmap->bloc_cnt);
#ifndef DEBUG
printf("\n");
#endif
DPRINTF(" - %0x %0x %p %p\n",
partmap->boot_start, partmap->boot_size, part, part->fs);
DPRINTF(" boot %0x %0x load %0x entry %0x\n",
part->boot_start.bloc, part->boot_size.bloc,
part->boot_load, part->boot_entry);
DPRINTF(" load %0x entry %0x %0x\n",
partmap->boot_load2, partmap->boot_entry2, HFS_BLOCSIZE);
part_register(bd, part, name, i);
} else {
memcpy(tmp, type, 32);
tmp[32] = '\0';
ERROR("Unknown partition type [%s]\n", tmp);
part->flags = PART_TYPE_APPLE | PART_FLAG_DUMMY;
part_register(bd, part, name, i);
}
}
error:
free(buffer);
return boot_part;
}
| gpl-2.0 |
The-Nemesis-Project/hltetmo_kernel | drivers/video/msm/mdss/mdss_mdp_pp.c | 16 | 96392 | /*
* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#define pr_fmt(fmt) "%s: " fmt, __func__
#include "mdss_fb.h"
#include "mdss_mdp.h"
#include <linux/uaccess.h>
#include <linux/spinlock.h>
#include <linux/delay.h>
#ifdef CONFIG_FB_MSM_CAMERA_CSC
struct mdp_csc_cfg mdp_csc_convert_wideband = {
0,
{
0x0200, 0x0000, 0x02CD,
0x0200, 0xFF4F, 0xFE91,
0x0200, 0x038B, 0x0000,
},
{ 0x0, 0xFF80, 0xFF80,},
{ 0x0, 0x0, 0x0,},
{ 0x0, 0xFF, 0x0, 0xFF, 0x0, 0xFF,},
{ 0x0, 0xFF, 0x0, 0xFF, 0x0, 0xFF,},
};
#endif
struct mdp_csc_cfg mdp_csc_convert[MDSS_MDP_MAX_CSC] = {
[MDSS_MDP_CSC_RGB2RGB] = {
0,
{
0x0200, 0x0000, 0x0000,
0x0000, 0x0200, 0x0000,
0x0000, 0x0000, 0x0200,
},
{ 0x0, 0x0, 0x0,},
{ 0x0, 0x0, 0x0,},
{ 0x0, 0xff, 0x0, 0xff, 0x0, 0xff,},
{ 0x0, 0xff, 0x0, 0xff, 0x0, 0xff,},
},
[MDSS_MDP_CSC_YUV2RGB] = {
0,
{
0x0254, 0x0000, 0x0331,
0x0254, 0xff37, 0xfe60,
0x0254, 0x0409, 0x0000,
},
{ 0xfff0, 0xff80, 0xff80,},
{ 0x0, 0x0, 0x0,},
{ 0x0, 0xff, 0x0, 0xff, 0x0, 0xff,},
{ 0x0, 0xff, 0x0, 0xff, 0x0, 0xff,},
},
[MDSS_MDP_CSC_RGB2YUV] = {
0,
{
0x0083, 0x0102, 0x0032,
0x1fb5, 0x1f6c, 0x00e1,
0x00e1, 0x1f45, 0x1fdc
},
{ 0x0, 0x0, 0x0,},
{ 0x0010, 0x0080, 0x0080,},
{ 0x0, 0xff, 0x0, 0xff, 0x0, 0xff,},
{ 0x0010, 0x00eb, 0x0010, 0x00f0, 0x0010, 0x00f0,},
},
[MDSS_MDP_CSC_YUV2YUV] = {
0,
{
0x0200, 0x0000, 0x0000,
0x0000, 0x0200, 0x0000,
0x0000, 0x0000, 0x0200,
},
{ 0x0, 0x0, 0x0,},
{ 0x0, 0x0, 0x0,},
{ 0x0, 0xff, 0x0, 0xff, 0x0, 0xff,},
{ 0x0, 0xff, 0x0, 0xff, 0x0, 0xff,},
},
};
#define CSC_MV_OFF 0x0
#define CSC_BV_OFF 0x2C
#define CSC_LV_OFF 0x14
#define CSC_POST_OFF 0xC
#define MDSS_BLOCK_DISP_NUM (MDP_BLOCK_MAX - MDP_LOGICAL_BLOCK_DISP_0)
#define HIST_WAIT_TIMEOUT(frame) ((75 * HZ * (frame)) / 1000)
/* hist collect state */
enum {
HIST_UNKNOWN,
HIST_IDLE,
HIST_RESET,
HIST_START,
HIST_READY,
};
static u32 dither_matrix[16] = {
15, 7, 13, 5, 3, 11, 1, 9, 12, 4, 14, 6, 0, 8, 2, 10};
static u32 dither_depth_map[9] = {
0, 0, 0, 0, 0, 1, 2, 3, 3};
static u32 igc_limited[IGC_LUT_ENTRIES] = {
16777472, 17826064, 18874656, 19923248,
19923248, 20971840, 22020432, 23069024,
24117616, 25166208, 26214800, 26214800,
27263392, 28311984, 29360576, 30409168,
31457760, 32506352, 32506352, 33554944,
34603536, 35652128, 36700720, 37749312,
38797904, 38797904, 39846496, 40895088,
41943680, 42992272, 44040864, 45089456,
45089456, 46138048, 47186640, 48235232,
49283824, 50332416, 51381008, 51381008,
52429600, 53478192, 54526784, 55575376,
56623968, 57672560, 58721152, 58721152,
59769744, 60818336, 61866928, 62915520,
63964112, 65012704, 65012704, 66061296,
67109888, 68158480, 69207072, 70255664,
71304256, 71304256, 72352848, 73401440,
74450032, 75498624, 76547216, 77595808,
77595808, 78644400, 79692992, 80741584,
81790176, 82838768, 83887360, 83887360,
84935952, 85984544, 87033136, 88081728,
89130320, 90178912, 90178912, 91227504,
92276096, 93324688, 94373280, 95421872,
96470464, 96470464, 97519056, 98567648,
99616240, 100664832, 101713424, 102762016,
102762016, 103810608, 104859200, 105907792,
106956384, 108004976, 109053568, 109053568,
110102160, 111150752, 112199344, 113247936,
114296528, 115345120, 115345120, 116393712,
117442304, 118490896, 119539488, 120588080,
121636672, 121636672, 122685264, 123733856,
124782448, 125831040, 126879632, 127928224,
127928224, 128976816, 130025408, 131074000,
132122592, 133171184, 134219776, 135268368,
135268368, 136316960, 137365552, 138414144,
139462736, 140511328, 141559920, 141559920,
142608512, 143657104, 144705696, 145754288,
146802880, 147851472, 147851472, 148900064,
149948656, 150997248, 152045840, 153094432,
154143024, 154143024, 155191616, 156240208,
157288800, 158337392, 159385984, 160434576,
160434576, 161483168, 162531760, 163580352,
164628944, 165677536, 166726128, 166726128,
167774720, 168823312, 169871904, 170920496,
171969088, 173017680, 173017680, 174066272,
175114864, 176163456, 177212048, 178260640,
179309232, 179309232, 180357824, 181406416,
182455008, 183503600, 184552192, 185600784,
185600784, 186649376, 187697968, 188746560,
189795152, 190843744, 191892336, 191892336,
192940928, 193989520, 195038112, 196086704,
197135296, 198183888, 198183888, 199232480,
200281072, 201329664, 202378256, 203426848,
204475440, 204475440, 205524032, 206572624,
207621216, 208669808, 209718400, 210766992,
211815584, 211815584, 212864176, 213912768,
214961360, 216009952, 217058544, 218107136,
218107136, 219155728, 220204320, 221252912,
222301504, 223350096, 224398688, 224398688,
225447280, 226495872, 227544464, 228593056,
229641648, 230690240, 230690240, 231738832,
232787424, 233836016, 234884608, 235933200,
236981792, 236981792, 238030384, 239078976,
240127568, 241176160, 242224752, 243273344,
243273344, 244321936, 245370528, 246419120};
#define GAMUT_T0_SIZE 125
#define GAMUT_T1_SIZE 100
#define GAMUT_T2_SIZE 80
#define GAMUT_T3_SIZE 100
#define GAMUT_T4_SIZE 100
#define GAMUT_T5_SIZE 80
#define GAMUT_T6_SIZE 64
#define GAMUT_T7_SIZE 80
#define GAMUT_TOTAL_TABLE_SIZE (GAMUT_T0_SIZE + GAMUT_T1_SIZE + \
GAMUT_T2_SIZE + GAMUT_T3_SIZE + GAMUT_T4_SIZE + \
GAMUT_T5_SIZE + GAMUT_T6_SIZE + GAMUT_T7_SIZE)
#define PP_FLAGS_DIRTY_PA 0x1
#define PP_FLAGS_DIRTY_PCC 0x2
#define PP_FLAGS_DIRTY_IGC 0x4
#define PP_FLAGS_DIRTY_ARGC 0x8
#define PP_FLAGS_DIRTY_ENHIST 0x10
#define PP_FLAGS_DIRTY_DITHER 0x20
#define PP_FLAGS_DIRTY_GAMUT 0x40
#define PP_FLAGS_DIRTY_HIST_COL 0x80
#define PP_FLAGS_DIRTY_PGC 0x100
#define PP_FLAGS_DIRTY_SHARP 0x200
#define PP_STS_ENABLE 0x1
#define PP_STS_GAMUT_FIRST 0x2
#define PP_AD_STATE_INIT 0x2
#define PP_AD_STATE_CFG 0x4
#define PP_AD_STATE_DATA 0x8
#define PP_AD_STATE_RUN 0x10
#define PP_AD_STATE_VSYNC 0x20
#define PP_AD_STATE_BL_LIN 0x40
#define PP_AD_STATE_IS_INITCFG(st) (((st) & PP_AD_STATE_INIT) &&\
((st) & PP_AD_STATE_CFG))
#define PP_AD_STATE_IS_READY(st) (((st) & PP_AD_STATE_INIT) &&\
((st) & PP_AD_STATE_CFG) &&\
((st) & PP_AD_STATE_DATA))
#define PP_AD_STS_DIRTY_INIT 0x2
#define PP_AD_STS_DIRTY_CFG 0x4
#define PP_AD_STS_DIRTY_DATA 0x8
#define PP_AD_STS_DIRTY_VSYNC 0x10
#define PP_AD_STS_IS_DIRTY(sts) (((sts) & PP_AD_STS_DIRTY_INIT) ||\
((sts) & PP_AD_STS_DIRTY_CFG))
/* Bits 0 and 1 */
#define MDSS_AD_INPUT_AMBIENT (0x03)
/* Bits 3 and 7 */
#define MDSS_AD_INPUT_STRENGTH (0x88)
/*
* Check data by shifting by mode to see if it matches to the
* MDSS_AD_INPUT_* bitfields
*/
#define MDSS_AD_MODE_DATA_MATCH(mode, data) ((1 << (mode)) & (data))
#define MDSS_AD_RUNNING_AUTO_BL(ad) (((ad)->state & PP_AD_STATE_RUN) &&\
((ad)->cfg.mode == MDSS_AD_MODE_AUTO_BL))
#define MDSS_AD_RUNNING_AUTO_STR(ad) (((ad)->state & PP_AD_STATE_RUN) &&\
((ad)->cfg.mode == MDSS_AD_MODE_AUTO_STR))
#define SHARP_STRENGTH_DEFAULT 32
#define SHARP_EDGE_THR_DEFAULT 112
#define SHARP_SMOOTH_THR_DEFAULT 8
#define SHARP_NOISE_THR_DEFAULT 2
struct mdss_pp_res_type {
/* logical info */
u32 pp_disp_flags[MDSS_BLOCK_DISP_NUM];
u32 igc_lut_c0c1[MDSS_BLOCK_DISP_NUM][IGC_LUT_ENTRIES];
u32 igc_lut_c2[MDSS_BLOCK_DISP_NUM][IGC_LUT_ENTRIES];
struct mdp_ar_gc_lut_data
gc_lut_r[MDSS_BLOCK_DISP_NUM][GC_LUT_SEGMENTS];
struct mdp_ar_gc_lut_data
gc_lut_g[MDSS_BLOCK_DISP_NUM][GC_LUT_SEGMENTS];
struct mdp_ar_gc_lut_data
gc_lut_b[MDSS_BLOCK_DISP_NUM][GC_LUT_SEGMENTS];
u32 enhist_lut[MDSS_BLOCK_DISP_NUM][ENHIST_LUT_ENTRIES];
struct mdp_pa_cfg pa_disp_cfg[MDSS_BLOCK_DISP_NUM];
struct mdp_pcc_cfg_data pcc_disp_cfg[MDSS_BLOCK_DISP_NUM];
struct mdp_igc_lut_data igc_disp_cfg[MDSS_BLOCK_DISP_NUM];
struct mdp_pgc_lut_data argc_disp_cfg[MDSS_BLOCK_DISP_NUM];
struct mdp_pgc_lut_data pgc_disp_cfg[MDSS_BLOCK_DISP_NUM];
struct mdp_hist_lut_data enhist_disp_cfg[MDSS_BLOCK_DISP_NUM];
struct mdp_dither_cfg_data dither_disp_cfg[MDSS_BLOCK_DISP_NUM];
struct mdp_gamut_cfg_data gamut_disp_cfg[MDSS_BLOCK_DISP_NUM];
uint16_t gamut_tbl[MDSS_BLOCK_DISP_NUM][GAMUT_TOTAL_TABLE_SIZE];
u32 hist_data[MDSS_BLOCK_DISP_NUM][HIST_V_SIZE];
/* physical info */
struct pp_sts_type pp_disp_sts[MDSS_BLOCK_DISP_NUM];
struct pp_hist_col_info dspp_hist[MDSS_MDP_MAX_DSPP];
};
static DEFINE_MUTEX(mdss_pp_mutex);
static struct mdss_pp_res_type *mdss_pp_res;
static void pp_hist_read(char __iomem *v_base,
struct pp_hist_col_info *hist_info);
static int pp_histogram_setup(u32 *op, u32 block, struct mdss_mdp_mixer *mix);
static int pp_histogram_disable(struct pp_hist_col_info *hist_info,
u32 done_bit, char __iomem *ctl_base);
static void pp_update_pcc_regs(u32 offset,
struct mdp_pcc_cfg_data *cfg_ptr);
static void pp_update_igc_lut(struct mdp_igc_lut_data *cfg,
u32 offset, u32 blk_idx);
static void pp_update_gc_one_lut(u32 offset,
struct mdp_ar_gc_lut_data *lut_data);
static void pp_update_argc_lut(u32 offset,
struct mdp_pgc_lut_data *config);
static void pp_update_hist_lut(char __iomem *base,
struct mdp_hist_lut_data *cfg);
static void pp_pa_config(unsigned long flags, u32 base,
struct pp_sts_type *pp_sts,
struct mdp_pa_cfg *pa_config);
static void pp_pcc_config(unsigned long flags, u32 base,
struct pp_sts_type *pp_sts,
struct mdp_pcc_cfg_data *pcc_config);
static void pp_igc_config(unsigned long flags, u32 base,
struct pp_sts_type *pp_sts,
struct mdp_igc_lut_data *igc_config,
u32 pipe_num);
static void pp_enhist_config(unsigned long flags, char __iomem *base,
struct pp_sts_type *pp_sts,
struct mdp_hist_lut_data *enhist_cfg);
static void pp_sharp_config(char __iomem *offset,
struct pp_sts_type *pp_sts,
struct mdp_sharp_cfg *sharp_config);
static int mdss_ad_init_checks(struct msm_fb_data_type *mfd);
static struct mdss_ad_info *mdss_mdp_get_ad(struct msm_fb_data_type *mfd);
static int pp_update_ad_input(struct msm_fb_data_type *mfd);
static void pp_ad_vsync_handler(struct mdss_mdp_ctl *ctl, ktime_t t);
static void pp_ad_cfg_write(struct mdss_ad_info *ad);
static void pp_ad_init_write(struct mdss_ad_info *ad);
static void pp_ad_input_write(struct mdss_ad_info *ad, u32 bl_lvl);
static int mdss_mdp_ad_setup(struct msm_fb_data_type *mfd);
static void pp_ad_cfg_lut(char __iomem *offset, u32 *data);
static u32 last_sts, last_state;
int mdss_mdp_csc_setup_data(u32 block, u32 blk_idx, u32 tbl_idx,
struct mdp_csc_cfg *data)
{
int i, ret = 0;
char __iomem *base, *off;
u32 val = 0;
struct mdss_data_type *mdata;
struct mdss_mdp_pipe *pipe;
struct mdss_mdp_ctl *ctl;
if (data == NULL) {
pr_err("no csc matrix specified\n");
return -EINVAL;
}
mdata = mdss_mdp_get_mdata();
switch (block) {
case MDSS_MDP_BLOCK_SSPP:
if (blk_idx < mdata->nvig_pipes) {
pipe = mdata->vig_pipes + blk_idx;
base = pipe->base;
if (tbl_idx == 1)
base += MDSS_MDP_REG_VIG_CSC_1_BASE;
else
base += MDSS_MDP_REG_VIG_CSC_0_BASE;
} else {
ret = -EINVAL;
}
break;
case MDSS_MDP_BLOCK_WB:
if (blk_idx < mdata->nctl) {
ctl = mdata->ctl_off + blk_idx;
base = ctl->wb_base + MDSS_MDP_REG_WB_CSC_BASE;
} else {
ret = -EINVAL;
}
break;
default:
ret = -EINVAL;
break;
}
if (ret != 0) {
pr_err("unsupported block id for csc\n");
return ret;
}
off = base + CSC_MV_OFF;
for (i = 0; i < 9; i++) {
if (i & 0x1) {
val |= data->csc_mv[i] << 16;
writel_relaxed(val, off);
off += sizeof(u32 *);
} else {
val = data->csc_mv[i];
}
}
writel_relaxed(val, off); /* COEFF_33 */
off = base + CSC_BV_OFF;
for (i = 0; i < 3; i++) {
writel_relaxed(data->csc_pre_bv[i], off);
writel_relaxed(data->csc_post_bv[i], off + CSC_POST_OFF);
off += sizeof(u32 *);
}
off = base + CSC_LV_OFF;
for (i = 0; i < 6; i += 2) {
val = (data->csc_pre_lv[i] << 8) | data->csc_pre_lv[i+1];
writel_relaxed(val, off);
val = (data->csc_post_lv[i] << 8) | data->csc_post_lv[i+1];
writel_relaxed(val, off + CSC_POST_OFF);
off += sizeof(u32 *);
}
return ret;
}
int mdss_mdp_csc_setup(u32 block, u32 blk_idx, u32 tbl_idx, u32 csc_type)
{
struct mdp_csc_cfg *data;
if (csc_type >= MDSS_MDP_MAX_CSC) {
pr_err("invalid csc matrix index %d\n", csc_type);
return -ERANGE;
}
pr_debug("csc type=%d blk=%d idx=%d tbl=%d\n", csc_type,
block, blk_idx, tbl_idx);
#ifdef CONFIG_FB_MSM_CAMERA_CSC
if (csc_type == MDSS_MDP_CSC_YUV2RGB && !csc_update)
{
data = &mdp_csc_convert_wideband;
pr_debug("will do mdp_csc_convert_wideband\n");
}
else
{
data = &mdp_csc_convert[csc_type];
pr_debug("will do mdp_csc_convert(narrow band)\n");
}
#else
data = &mdp_csc_convert[csc_type];
#endif
return mdss_mdp_csc_setup_data(block, blk_idx, tbl_idx, data);
}
static void pp_gamut_config(struct mdp_gamut_cfg_data *gamut_cfg,
u32 base, struct pp_sts_type *pp_sts)
{
u32 offset;
int i, j;
if (gamut_cfg->flags & MDP_PP_OPS_WRITE) {
offset = base + MDSS_MDP_REG_DSPP_GAMUT_BASE;
for (i = 0; i < MDP_GAMUT_TABLE_NUM; i++) {
for (j = 0; j < gamut_cfg->tbl_size[i]; j++)
MDSS_MDP_REG_WRITE(offset,
(u32)gamut_cfg->r_tbl[i][j]);
offset += 4;
}
for (i = 0; i < MDP_GAMUT_TABLE_NUM; i++) {
for (j = 0; j < gamut_cfg->tbl_size[i]; j++)
MDSS_MDP_REG_WRITE(offset,
(u32)gamut_cfg->g_tbl[i][j]);
offset += 4;
}
for (i = 0; i < MDP_GAMUT_TABLE_NUM; i++) {
for (j = 0; j < gamut_cfg->tbl_size[i]; j++)
MDSS_MDP_REG_WRITE(offset,
(u32)gamut_cfg->b_tbl[i][j]);
offset += 4;
}
if (gamut_cfg->gamut_first)
pp_sts->gamut_sts |= PP_STS_GAMUT_FIRST;
}
if (gamut_cfg->flags & MDP_PP_OPS_DISABLE)
pp_sts->gamut_sts &= ~PP_STS_ENABLE;
else if (gamut_cfg->flags & MDP_PP_OPS_ENABLE)
pp_sts->gamut_sts |= PP_STS_ENABLE;
}
static void pp_pa_config(unsigned long flags, u32 base,
struct pp_sts_type *pp_sts,
struct mdp_pa_cfg *pa_config)
{
if (flags & PP_FLAGS_DIRTY_PA) {
if (pa_config->flags & MDP_PP_OPS_WRITE) {
MDSS_MDP_REG_WRITE(base, pa_config->hue_adj);
base += 4;
MDSS_MDP_REG_WRITE(base, pa_config->sat_adj);
base += 4;
MDSS_MDP_REG_WRITE(base, pa_config->val_adj);
base += 4;
MDSS_MDP_REG_WRITE(base, pa_config->cont_adj);
}
if (pa_config->flags & MDP_PP_OPS_DISABLE)
pp_sts->pa_sts &= ~PP_STS_ENABLE;
else if (pa_config->flags & MDP_PP_OPS_ENABLE)
pp_sts->pa_sts |= PP_STS_ENABLE;
}
}
static void pp_pcc_config(unsigned long flags, u32 base,
struct pp_sts_type *pp_sts,
struct mdp_pcc_cfg_data *pcc_config)
{
if (flags & PP_FLAGS_DIRTY_PCC) {
if (pcc_config->ops & MDP_PP_OPS_WRITE)
pp_update_pcc_regs(base, pcc_config);
if (pcc_config->ops & MDP_PP_OPS_DISABLE)
pp_sts->pcc_sts &= ~PP_STS_ENABLE;
else if (pcc_config->ops & MDP_PP_OPS_ENABLE)
pp_sts->pcc_sts |= PP_STS_ENABLE;
}
}
static void pp_igc_config(unsigned long flags, u32 base,
struct pp_sts_type *pp_sts,
struct mdp_igc_lut_data *igc_config,
u32 pipe_num)
{
u32 tbl_idx;
if (flags & PP_FLAGS_DIRTY_IGC) {
if (igc_config->ops & MDP_PP_OPS_WRITE)
pp_update_igc_lut(igc_config, base, pipe_num);
if (igc_config->ops & MDP_PP_IGC_FLAG_ROM0) {
pp_sts->pcc_sts |= PP_STS_ENABLE;
tbl_idx = 1;
} else if (igc_config->ops & MDP_PP_IGC_FLAG_ROM1) {
pp_sts->pcc_sts |= PP_STS_ENABLE;
tbl_idx = 2;
} else {
tbl_idx = 0;
}
pp_sts->igc_tbl_idx = tbl_idx;
if (igc_config->ops & MDP_PP_OPS_DISABLE)
pp_sts->igc_sts &= ~PP_STS_ENABLE;
else if (igc_config->ops & MDP_PP_OPS_ENABLE)
pp_sts->igc_sts |= PP_STS_ENABLE;
}
}
static void pp_enhist_config(unsigned long flags, char __iomem *base,
struct pp_sts_type *pp_sts,
struct mdp_hist_lut_data *enhist_cfg)
{
if (flags & PP_FLAGS_DIRTY_ENHIST) {
if (enhist_cfg->ops & MDP_PP_OPS_WRITE)
pp_update_hist_lut(base, enhist_cfg);
if (enhist_cfg->ops & MDP_PP_OPS_DISABLE)
pp_sts->enhist_sts &= ~PP_STS_ENABLE;
else if (enhist_cfg->ops & MDP_PP_OPS_ENABLE)
pp_sts->enhist_sts |= PP_STS_ENABLE;
}
}
/*the below function doesn't do error checking on the input params*/
static void pp_sharp_config(char __iomem *base,
struct pp_sts_type *pp_sts,
struct mdp_sharp_cfg *sharp_config)
{
if (sharp_config->flags & MDP_PP_OPS_WRITE) {
writel_relaxed(sharp_config->strength, base);
base += 4;
writel_relaxed(sharp_config->edge_thr, base);
base += 4;
writel_relaxed(sharp_config->smooth_thr, base);
base += 4;
writel_relaxed(sharp_config->noise_thr, base);
}
if (sharp_config->flags & MDP_PP_OPS_DISABLE)
pp_sts->sharp_sts &= ~PP_STS_ENABLE;
else if (sharp_config->flags & MDP_PP_OPS_ENABLE)
pp_sts->sharp_sts |= PP_STS_ENABLE;
}
static int pp_vig_pipe_setup(struct mdss_mdp_pipe *pipe, u32 *op)
{
u32 opmode = 0, base = 0;
unsigned long flags = 0;
char __iomem *offset;
pr_debug("pnum=%x\n", pipe->num);
if ((pipe->flags & MDP_OVERLAY_PP_CFG_EN) &&
(pipe->pp_cfg.config_ops & MDP_OVERLAY_PP_CSC_CFG)) {
opmode |= !!(pipe->pp_cfg.csc_cfg.flags &
MDP_CSC_FLAG_ENABLE) << 17;
opmode |= !!(pipe->pp_cfg.csc_cfg.flags &
MDP_CSC_FLAG_YUV_IN) << 18;
opmode |= !!(pipe->pp_cfg.csc_cfg.flags &
MDP_CSC_FLAG_YUV_OUT) << 19;
/*
* TODO: Allow pipe to be programmed whenever new CSC is
* applied (i.e. dirty bit)
*/
if (pipe->play_cnt == 0)
mdss_mdp_csc_setup_data(MDSS_MDP_BLOCK_SSPP,
pipe->num, 1, &pipe->pp_cfg.csc_cfg);
} else {
if (pipe->src_fmt->is_yuv)
opmode |= (0 << 19) | /* DST_DATA=RGB */
(1 << 18) | /* SRC_DATA=YCBCR */
(1 << 17); /* CSC_1_EN */
/*
* TODO: Needs to be part of dirty bit logic: if there is a
* previously configured pipe need to re-configure CSC matrix
*/
if ((pipe->play_cnt == 0)||(pre_csc_update != csc_update)) {
mdss_mdp_csc_setup(MDSS_MDP_BLOCK_SSPP, pipe->num, 1,
MDSS_MDP_CSC_YUV2RGB);
}
}
pp_histogram_setup(&opmode, MDSS_PP_SSPP_CFG | pipe->num, pipe->mixer);
if (pipe->flags & MDP_OVERLAY_PP_CFG_EN) {
if (pipe->pp_cfg.config_ops & MDP_OVERLAY_PP_PA_CFG) {
flags = PP_FLAGS_DIRTY_PA;
base = MDSS_MDP_REG_SSPP_OFFSET(pipe->num) +
MDSS_MDP_REG_VIG_PA_BASE;
pp_pa_config(flags, base, &pipe->pp_res.pp_sts,
&pipe->pp_cfg.pa_cfg);
if (pipe->pp_res.pp_sts.pa_sts & PP_STS_ENABLE)
opmode |= (1 << 4); /* PA_EN */
}
if (pipe->pp_cfg.config_ops & MDP_OVERLAY_PP_HIST_LUT_CFG) {
pp_enhist_config(PP_FLAGS_DIRTY_ENHIST,
pipe->base + MDSS_MDP_REG_VIG_HIST_LUT_BASE,
&pipe->pp_res.pp_sts,
&pipe->pp_cfg.hist_lut_cfg);
}
}
if (pipe->pp_res.pp_sts.enhist_sts & PP_STS_ENABLE) {
/* Enable HistLUT and PA */
opmode |= BIT(10) | BIT(4);
if (!(pipe->pp_res.pp_sts.pa_sts & PP_STS_ENABLE)) {
/* Program default value */
offset = pipe->base + MDSS_MDP_REG_VIG_PA_BASE;
writel_relaxed(0, offset);
writel_relaxed(0, offset + 4);
writel_relaxed(0, offset + 8);
writel_relaxed(0, offset + 12);
}
}
*op = opmode;
return 0;
}
static int mdss_mdp_scale_setup(struct mdss_mdp_pipe *pipe)
{
u32 scale_config = 0;
u32 phasex_step = 0, phasey_step = 0;
u32 chroma_sample;
u32 filter_mode;
struct mdss_data_type *mdata;
u32 src_w, src_h;
mdata = mdss_mdp_get_mdata();
if (mdata->mdp_rev >= MDSS_MDP_HW_REV_102 && pipe->src_fmt->is_yuv)
filter_mode = MDSS_MDP_SCALE_FILTER_CA;
else
filter_mode = MDSS_MDP_SCALE_FILTER_BIL;
if (pipe->type == MDSS_MDP_PIPE_TYPE_DMA) {
if (pipe->dst.h != pipe->src.h || pipe->dst.w != pipe->src.w) {
pr_err("no scaling supported on dma pipe\n");
return -EINVAL;
} else {
return 0;
}
}
src_w = pipe->src.w >> pipe->horz_deci;
src_h = pipe->src.h >> pipe->vert_deci;
chroma_sample = pipe->src_fmt->chroma_sample;
if (pipe->flags & MDP_SOURCE_ROTATED_90) {
if (chroma_sample == MDSS_MDP_CHROMA_H1V2)
chroma_sample = MDSS_MDP_CHROMA_H2V1;
else if (chroma_sample == MDSS_MDP_CHROMA_H2V1)
chroma_sample = MDSS_MDP_CHROMA_H1V2;
}
if (!(pipe->pp_cfg.config_ops & MDP_OVERLAY_PP_SHARP_CFG)) {
pipe->pp_cfg.sharp_cfg.flags = MDP_PP_OPS_ENABLE |
MDP_PP_OPS_WRITE;
pipe->pp_cfg.sharp_cfg.strength = SHARP_STRENGTH_DEFAULT;
pipe->pp_cfg.sharp_cfg.edge_thr = SHARP_EDGE_THR_DEFAULT;
pipe->pp_cfg.sharp_cfg.smooth_thr = SHARP_SMOOTH_THR_DEFAULT;
pipe->pp_cfg.sharp_cfg.noise_thr = SHARP_NOISE_THR_DEFAULT;
}
if ((pipe->src_fmt->is_yuv) &&
!((pipe->dst.w < src_w) || (pipe->dst.h < src_h))) {
pp_sharp_config(pipe->base +
MDSS_MDP_REG_VIG_QSEED2_SHARP,
&pipe->pp_res.pp_sts,
&pipe->pp_cfg.sharp_cfg);
}
if ((src_h != pipe->dst.h) ||
(pipe->pp_res.pp_sts.sharp_sts & PP_STS_ENABLE) ||
(chroma_sample == MDSS_MDP_CHROMA_420) ||
(chroma_sample == MDSS_MDP_CHROMA_H1V2)) {
pr_debug("scale y - src_h=%d dst_h=%d\n", src_h, pipe->dst.h);
if ((src_h / MAX_DOWNSCALE_RATIO) > pipe->dst.h) {
pr_err("too much downscaling height=%d->%d",
src_h, pipe->dst.h);
return -EINVAL;
}
scale_config |= MDSS_MDP_SCALEY_EN;
phasey_step = pipe->phase_step_y;
if (pipe->type == MDSS_MDP_PIPE_TYPE_VIG) {
u32 chroma_shift = 0;
if (!pipe->vert_deci &&
((chroma_sample == MDSS_MDP_CHROMA_420) ||
(chroma_sample == MDSS_MDP_CHROMA_H1V2)))
chroma_shift = 1; /* 2x upsample chroma */
if (src_h <= pipe->dst.h) {
scale_config |= /* G/Y, A */
(filter_mode << 10) |
(MDSS_MDP_SCALE_FILTER_NEAREST << 18);
} else
scale_config |= /* G/Y, A */
(MDSS_MDP_SCALE_FILTER_PCMN << 10) |
(MDSS_MDP_SCALE_FILTER_PCMN << 18);
if ((src_h >> chroma_shift) <= pipe->dst.h)
scale_config |= /* CrCb */
(MDSS_MDP_SCALE_FILTER_BIL << 14);
else
scale_config |= /* CrCb */
(MDSS_MDP_SCALE_FILTER_PCMN << 14);
writel_relaxed(phasey_step >> chroma_shift, pipe->base +
MDSS_MDP_REG_VIG_QSEED2_C12_PHASESTEPY);
} else {
if (src_h <= pipe->dst.h)
scale_config |= /* RGB, A */
(MDSS_MDP_SCALE_FILTER_BIL << 10) |
(MDSS_MDP_SCALE_FILTER_NEAREST << 18);
else
scale_config |= /* RGB, A */
(MDSS_MDP_SCALE_FILTER_PCMN << 10) |
(MDSS_MDP_SCALE_FILTER_PCMN << 18);
}
}
if ((src_w != pipe->dst.w) ||
(pipe->pp_res.pp_sts.sharp_sts & PP_STS_ENABLE) ||
(chroma_sample == MDSS_MDP_CHROMA_420) ||
(chroma_sample == MDSS_MDP_CHROMA_H2V1)) {
pr_debug("scale x - src_w=%d dst_w=%d\n", src_w, pipe->dst.w);
if ((src_w / MAX_DOWNSCALE_RATIO) > pipe->dst.w) {
pr_err("too much downscaling width=%d->%d",
src_w, pipe->dst.w);
return -EINVAL;
}
scale_config |= MDSS_MDP_SCALEX_EN;
phasex_step = pipe->phase_step_x;
if (pipe->type == MDSS_MDP_PIPE_TYPE_VIG) {
u32 chroma_shift = 0;
if (!pipe->horz_deci &&
((chroma_sample == MDSS_MDP_CHROMA_420) ||
(chroma_sample == MDSS_MDP_CHROMA_H2V1)))
chroma_shift = 1; /* 2x upsample chroma */
if (src_w <= pipe->dst.w) {
scale_config |= /* G/Y, A */
(filter_mode << 8) |
(MDSS_MDP_SCALE_FILTER_NEAREST << 16);
} else
scale_config |= /* G/Y, A */
(MDSS_MDP_SCALE_FILTER_PCMN << 8) |
(MDSS_MDP_SCALE_FILTER_PCMN << 16);
if ((src_w >> chroma_shift) <= pipe->dst.w)
scale_config |= /* CrCb */
(MDSS_MDP_SCALE_FILTER_BIL << 12);
else
scale_config |= /* CrCb */
(MDSS_MDP_SCALE_FILTER_PCMN << 12);
writel_relaxed(phasex_step >> chroma_shift, pipe->base +
MDSS_MDP_REG_VIG_QSEED2_C12_PHASESTEPX);
} else {
if (src_w <= pipe->dst.w)
scale_config |= /* RGB, A */
(MDSS_MDP_SCALE_FILTER_BIL << 8) |
(MDSS_MDP_SCALE_FILTER_NEAREST << 16);
else
scale_config |= /* RGB, A */
(MDSS_MDP_SCALE_FILTER_PCMN << 8) |
(MDSS_MDP_SCALE_FILTER_PCMN << 16);
}
}
writel_relaxed(scale_config, pipe->base +
MDSS_MDP_REG_SCALE_CONFIG);
writel_relaxed(phasex_step, pipe->base +
MDSS_MDP_REG_SCALE_PHASE_STEP_X);
writel_relaxed(phasey_step, pipe->base +
MDSS_MDP_REG_SCALE_PHASE_STEP_Y);
return 0;
}
int mdss_mdp_pipe_pp_setup(struct mdss_mdp_pipe *pipe, u32 *op)
{
int ret = 0;
if (!pipe)
return -ENODEV;
ret = mdss_mdp_scale_setup(pipe);
if (ret)
return -EINVAL;
if (pipe->type == MDSS_MDP_PIPE_TYPE_VIG)
ret = pp_vig_pipe_setup(pipe, op);
return ret;
}
void mdss_mdp_pipe_sspp_term(struct mdss_mdp_pipe *pipe)
{
u32 done_bit;
struct pp_hist_col_info *hist_info;
char __iomem *ctl_base;
if (!pipe && pipe->pp_res.hist.col_en) {
done_bit = 3 << (pipe->num * 4);
hist_info = &pipe->pp_res.hist;
ctl_base = pipe->base +
MDSS_MDP_REG_VIG_HIST_CTL_BASE;
pp_histogram_disable(hist_info, done_bit, ctl_base);
}
memset(&pipe->pp_cfg, 0, sizeof(struct mdp_overlay_pp_params));
memset(&pipe->pp_res, 0, sizeof(struct mdss_pipe_pp_res));
}
int mdss_mdp_pipe_sspp_setup(struct mdss_mdp_pipe *pipe, u32 *op)
{
int ret = 0;
unsigned long flags = 0;
u32 pipe_base;
u32 pipe_num;
if (pipe == NULL)
return -EINVAL;
/*
* TODO: should this function be responsible for masking multiple
* pipes to be written in dual pipe case?
* if so, requires rework of update_igc_lut
*/
switch (pipe->type) {
case MDSS_MDP_PIPE_TYPE_VIG:
pipe_base = MDSS_MDP_REG_IGC_VIG_BASE;
pipe_num = pipe->num - MDSS_MDP_SSPP_VIG0;
break;
case MDSS_MDP_PIPE_TYPE_RGB:
pipe_base = MDSS_MDP_REG_IGC_RGB_BASE;
pipe_num = pipe->num - MDSS_MDP_SSPP_RGB0;
break;
case MDSS_MDP_PIPE_TYPE_DMA:
pipe_base = MDSS_MDP_REG_IGC_DMA_BASE;
pipe_num = pipe->num - MDSS_MDP_SSPP_DMA0;
break;
default:
return -EINVAL;
}
if (pipe->pp_cfg.config_ops & MDP_OVERLAY_PP_IGC_CFG) {
flags |= PP_FLAGS_DIRTY_IGC;
pp_igc_config(flags, pipe_base, &pipe->pp_res.pp_sts,
&pipe->pp_cfg.igc_cfg, pipe_num);
}
if (pipe->pp_res.pp_sts.igc_sts & PP_STS_ENABLE)
*op |= (1 << 16); /* IGC_LUT_EN */
return ret;
}
static int pp_mixer_setup(u32 disp_num,
struct mdss_mdp_mixer *mixer)
{
u32 flags, offset, dspp_num, opmode = 0;
struct mdp_pgc_lut_data *pgc_config;
struct pp_sts_type *pp_sts;
struct mdss_mdp_ctl *ctl;
dspp_num = mixer->num;
if (!mixer || !mixer->ctl)
return -EINVAL;
ctl = mixer->ctl;
/* no corresponding dspp */
if ((mixer->type != MDSS_MDP_MIXER_TYPE_INTF) ||
(dspp_num >= MDSS_MDP_MAX_DSPP))
return 0;
if (disp_num < MDSS_BLOCK_DISP_NUM)
flags = mdss_pp_res->pp_disp_flags[disp_num];
else
flags = 0;
pp_sts = &mdss_pp_res->pp_disp_sts[disp_num];
/* GC_LUT is in layer mixer */
if (flags & PP_FLAGS_DIRTY_ARGC) {
pgc_config = &mdss_pp_res->argc_disp_cfg[disp_num];
if (pgc_config->flags & MDP_PP_OPS_WRITE) {
offset = MDSS_MDP_REG_LM_OFFSET(disp_num) +
MDSS_MDP_REG_LM_GC_LUT_BASE;
pp_update_argc_lut(offset, pgc_config);
}
if (pgc_config->flags & MDP_PP_OPS_DISABLE)
pp_sts->argc_sts &= ~PP_STS_ENABLE;
else if (pgc_config->flags & MDP_PP_OPS_ENABLE)
pp_sts->argc_sts |= PP_STS_ENABLE;
ctl->flush_bits |= BIT(6) << dspp_num; /* LAYER_MIXER */
}
/* update LM opmode if LM needs flush */
if ((pp_sts->argc_sts & PP_STS_ENABLE) &&
(ctl->flush_bits & (BIT(6) << dspp_num))) {
offset = MDSS_MDP_REG_LM_OFFSET(dspp_num) +
MDSS_MDP_REG_LM_OP_MODE;
opmode = MDSS_MDP_REG_READ(offset);
opmode |= (1 << 0); /* GC_LUT_EN */
MDSS_MDP_REG_WRITE(offset, opmode);
}
return 0;
}
static char __iomem *mdss_mdp_get_dspp_addr_off(u32 dspp_num)
{
struct mdss_data_type *mdata;
struct mdss_mdp_mixer *mixer;
mdata = mdss_mdp_get_mdata();
if (mdata->nmixers_intf <= dspp_num) {
pr_err("Invalid dspp_num=%d", dspp_num);
return ERR_PTR(-EINVAL);
}
mixer = mdata->mixer_intf + dspp_num;
return mixer->dspp_base;
}
/* Assumes that function will be called from within clock enabled space*/
static int pp_histogram_setup(u32 *op, u32 block, struct mdss_mdp_mixer *mix)
{
int ret = -EINVAL;
char __iomem *base;
u32 op_flags, kick_base, col_state;
struct mdss_data_type *mdata;
struct mdss_mdp_pipe *pipe;
struct pp_hist_col_info *hist_info;
unsigned long flag;
if (mix && (PP_LOCAT(block) == MDSS_PP_DSPP_CFG)) {
/* HIST_EN & AUTO_CLEAR */
op_flags = BIT(16) | BIT(17);
hist_info = &mdss_pp_res->dspp_hist[mix->num];
base = mdss_mdp_get_dspp_addr_off(PP_BLOCK(block));
kick_base = MDSS_MDP_REG_DSPP_HIST_CTL_BASE;
} else if (PP_LOCAT(block) == MDSS_PP_SSPP_CFG) {
mdata = mdss_mdp_get_mdata();
pipe = mdss_mdp_pipe_get(mdata, BIT(PP_BLOCK(block)));
if (IS_ERR_OR_NULL(pipe)) {
pr_debug("pipe DNE (%d)", (u32) BIT(PP_BLOCK(block)));
ret = -ENODEV;
goto error;
}
/* HIST_EN & AUTO_CLEAR */
op_flags = BIT(8) + BIT(9);
hist_info = &pipe->pp_res.hist;
base = pipe->base;
kick_base = MDSS_MDP_REG_VIG_HIST_CTL_BASE;
mdss_mdp_pipe_unmap(pipe);
} else {
pr_warn("invalid histogram location (%d)", block);
goto error;
}
if (hist_info->col_en) {
*op |= op_flags;
mutex_lock(&hist_info->hist_mutex);
spin_lock_irqsave(&hist_info->hist_lock, flag);
col_state = hist_info->col_state;
if (hist_info->is_kick_ready &&
((col_state == HIST_IDLE) ||
((false == hist_info->read_request) &&
col_state == HIST_READY))) {
/* Kick off collection */
writel_relaxed(1, base + kick_base);
hist_info->col_state = HIST_START;
}
spin_unlock_irqrestore(&hist_info->hist_lock, flag);
mutex_unlock(&hist_info->hist_mutex);
}
ret = 0;
error:
return ret;
}
static int pp_dspp_setup(u32 disp_num, struct mdss_mdp_mixer *mixer)
{
u32 flags, base, offset, dspp_num, opmode = 0;
struct mdp_dither_cfg_data *dither_cfg;
struct mdp_pgc_lut_data *pgc_config;
struct pp_sts_type *pp_sts;
u32 data;
char __iomem *basel;
int i, ret = 0;
struct mdss_data_type *mdata;
struct mdss_mdp_ctl *ctl;
u32 mixer_cnt;
u32 mixer_id[MDSS_MDP_INTF_MAX_LAYERMIXER];
if (!mixer || !mixer->ctl || !mixer->ctl->mdata)
return -EINVAL;
ctl = mixer->ctl;
mdata = ctl->mdata;
dspp_num = mixer->num;
/* no corresponding dspp */
if ((mixer->type != MDSS_MDP_MIXER_TYPE_INTF) ||
(dspp_num >= MDSS_MDP_MAX_DSPP))
return -EINVAL;
base = MDSS_MDP_REG_DSPP_OFFSET(dspp_num);
basel = mdss_mdp_get_dspp_addr_off(dspp_num);
mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
ret = pp_histogram_setup(&opmode, MDSS_PP_DSPP_CFG | dspp_num, mixer);
if (ret)
goto dspp_exit;
if (disp_num < MDSS_BLOCK_DISP_NUM)
flags = mdss_pp_res->pp_disp_flags[disp_num];
else
flags = 0;
mixer_cnt = mdss_mdp_get_ctl_mixers(disp_num, mixer_id);
if (dspp_num < mdata->nad_cfgs && (mixer_cnt != 2) &&
ctl->mfd->panel_info->type != MIPI_CMD_PANEL) {
ret = mdss_mdp_ad_setup(ctl->mfd);
if (ret < 0)
pr_warn("ad_setup(dspp%d) returns %d", dspp_num, ret);
}
/* call calibration specific processing here */
if (ctl->mfd->calib_mode)
goto flush_exit;
/* nothing to update */
if ((!flags) && (!(opmode)) && (ret <= 0))
goto dspp_exit;
ret = 0;
pp_sts = &mdss_pp_res->pp_disp_sts[disp_num];
pp_pa_config(flags, base + MDSS_MDP_REG_DSPP_PA_BASE, pp_sts,
&mdss_pp_res->pa_disp_cfg[disp_num]);
pp_pcc_config(flags, base + MDSS_MDP_REG_DSPP_PCC_BASE, pp_sts,
&mdss_pp_res->pcc_disp_cfg[disp_num]);
pp_igc_config(flags, MDSS_MDP_REG_IGC_DSPP_BASE, pp_sts,
&mdss_pp_res->igc_disp_cfg[disp_num], dspp_num);
pp_enhist_config(flags, basel + MDSS_MDP_REG_DSPP_HIST_LUT_BASE,
pp_sts, &mdss_pp_res->enhist_disp_cfg[disp_num]);
if (pp_sts->pa_sts & PP_STS_ENABLE)
opmode |= (1 << 20); /* PA_EN */
if (pp_sts->pcc_sts & PP_STS_ENABLE)
opmode |= (1 << 4); /* PCC_EN */
if (pp_sts->igc_sts & PP_STS_ENABLE) {
opmode |= (1 << 0) | /* IGC_LUT_EN */
(pp_sts->igc_tbl_idx << 1);
}
if (pp_sts->enhist_sts & PP_STS_ENABLE) {
opmode |= (1 << 19) | /* HIST_LUT_EN */
(1 << 20); /* PA_EN */
if (!(pp_sts->pa_sts & PP_STS_ENABLE)) {
/* Program default value */
offset = base + MDSS_MDP_REG_DSPP_PA_BASE;
MDSS_MDP_REG_WRITE(offset, 0);
MDSS_MDP_REG_WRITE(offset + 4, 0);
MDSS_MDP_REG_WRITE(offset + 8, 0);
MDSS_MDP_REG_WRITE(offset + 12, 0);
}
}
if (flags & PP_FLAGS_DIRTY_DITHER) {
dither_cfg = &mdss_pp_res->dither_disp_cfg[disp_num];
if (dither_cfg->flags & MDP_PP_OPS_WRITE) {
offset = base + MDSS_MDP_REG_DSPP_DITHER_DEPTH;
MDSS_MDP_REG_WRITE(offset,
dither_depth_map[dither_cfg->g_y_depth] |
(dither_depth_map[dither_cfg->b_cb_depth] << 2) |
(dither_depth_map[dither_cfg->r_cr_depth] << 4));
offset += 0x14;
for (i = 0; i << 16; i += 4) {
data = dither_matrix[i] |
(dither_matrix[i + 1] << 4) |
(dither_matrix[i + 2] << 8) |
(dither_matrix[i + 3] << 12);
MDSS_MDP_REG_WRITE(offset, data);
offset += 4;
}
}
if (dither_cfg->flags & MDP_PP_OPS_DISABLE)
pp_sts->dither_sts &= ~PP_STS_ENABLE;
else if (dither_cfg->flags & MDP_PP_OPS_ENABLE)
pp_sts->dither_sts |= PP_STS_ENABLE;
}
if (pp_sts->dither_sts & PP_STS_ENABLE)
opmode |= (1 << 8); /* DITHER_EN */
if (flags & PP_FLAGS_DIRTY_GAMUT)
pp_gamut_config(&mdss_pp_res->gamut_disp_cfg[disp_num], base,
pp_sts);
if (pp_sts->gamut_sts & PP_STS_ENABLE) {
opmode |= (1 << 23); /* GAMUT_EN */
if (pp_sts->gamut_sts & PP_STS_GAMUT_FIRST)
opmode |= (1 << 24); /* GAMUT_ORDER */
}
if (flags & PP_FLAGS_DIRTY_PGC) {
pgc_config = &mdss_pp_res->pgc_disp_cfg[disp_num];
if (pgc_config->flags & MDP_PP_OPS_WRITE) {
offset = base + MDSS_MDP_REG_DSPP_GC_BASE;
pp_update_argc_lut(offset, pgc_config);
}
if (pgc_config->flags & MDP_PP_OPS_DISABLE)
pp_sts->pgc_sts &= ~PP_STS_ENABLE;
else if (pgc_config->flags & MDP_PP_OPS_ENABLE)
pp_sts->pgc_sts |= PP_STS_ENABLE;
}
if (pp_sts->pgc_sts & PP_STS_ENABLE)
opmode |= (1 << 22);
flush_exit:
writel_relaxed(opmode, basel + MDSS_MDP_REG_DSPP_OP_MODE);
mdss_mdp_ctl_write(ctl, MDSS_MDP_REG_CTL_FLUSH, BIT(13 + dspp_num));
wmb();
dspp_exit:
mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
return ret;
}
int mdss_mdp_pp_setup(struct mdss_mdp_ctl *ctl)
{
int ret = 0;
if ((!ctl->mfd) || (!mdss_pp_res))
return -EINVAL;
/* TODO: have some sort of reader/writer lock to prevent unclocked
* access while display power is toggled */
if (!ctl->mfd->panel_power_on) {
ret = -EPERM;
goto error;
}
mutex_lock(&ctl->mfd->lock);
ret = mdss_mdp_pp_setup_locked(ctl);
mutex_unlock(&ctl->mfd->lock);
error:
return ret;
}
/* call only when holding and mfd->lock */
int mdss_mdp_pp_setup_locked(struct mdss_mdp_ctl *ctl)
{
u32 disp_num;
if ((!ctl->mfd) || (!mdss_pp_res))
return -EINVAL;
/* treat fb_num the same as block logical id*/
disp_num = ctl->mfd->index;
mutex_lock(&mdss_pp_mutex);
if (ctl->mixer_left) {
pp_mixer_setup(disp_num, ctl->mixer_left);
pp_dspp_setup(disp_num, ctl->mixer_left);
}
if (ctl->mixer_right) {
pp_mixer_setup(disp_num, ctl->mixer_right);
pp_dspp_setup(disp_num, ctl->mixer_right);
}
/* clear dirty flag */
if (disp_num < MDSS_BLOCK_DISP_NUM)
mdss_pp_res->pp_disp_flags[disp_num] = 0;
mutex_unlock(&mdss_pp_mutex);
return 0;
}
/*
* Set dirty and write bits on features that were enabled so they will be
* reconfigured
*/
int mdss_mdp_pp_resume(struct mdss_mdp_ctl *ctl, u32 dspp_num)
{
u32 flags = 0, disp_num, bl;
struct pp_sts_type pp_sts;
struct mdss_ad_info *ad;
struct mdss_data_type *mdata = ctl->mdata;
if (dspp_num >= MDSS_MDP_MAX_DSPP) {
pr_warn("invalid dspp_num");
return -EINVAL;
}
disp_num = ctl->mfd->index;
if (dspp_num < mdata->nad_cfgs) {
ad = &mdata->ad_cfgs[dspp_num];
if (PP_AD_STATE_CFG & ad->state)
pp_ad_cfg_write(ad);
if (PP_AD_STATE_INIT & ad->state)
pp_ad_init_write(ad);
if (PP_AD_STATE_DATA & ad->state) {
bl = ctl->mfd->bl_level;
ad->last_bl = bl;
if (ad->state & PP_AD_STATE_BL_LIN) {
bl = ad->bl_lin[bl >> ad->bl_bright_shift];
bl = bl << ad->bl_bright_shift;
}
pp_ad_input_write(ad, bl);
}
if ((PP_AD_STATE_VSYNC & ad->state) && ad->calc_itr)
ctl->add_vsync_handler(ctl, &ad->handle);
}
pp_sts = mdss_pp_res->pp_disp_sts[disp_num];
if (pp_sts.pa_sts & PP_STS_ENABLE) {
flags |= PP_FLAGS_DIRTY_PA;
if (!(mdss_pp_res->pa_disp_cfg[disp_num].flags
& MDP_PP_OPS_DISABLE))
mdss_pp_res->pa_disp_cfg[disp_num].flags |=
MDP_PP_OPS_WRITE;
}
if (pp_sts.pcc_sts & PP_STS_ENABLE) {
flags |= PP_FLAGS_DIRTY_PCC;
if (!(mdss_pp_res->pcc_disp_cfg[disp_num].ops
& MDP_PP_OPS_DISABLE))
mdss_pp_res->pcc_disp_cfg[disp_num].ops |=
MDP_PP_OPS_WRITE;
}
if (pp_sts.igc_sts & PP_STS_ENABLE) {
flags |= PP_FLAGS_DIRTY_IGC;
if (!(mdss_pp_res->igc_disp_cfg[disp_num].ops
& MDP_PP_OPS_DISABLE))
mdss_pp_res->igc_disp_cfg[disp_num].ops |=
MDP_PP_OPS_WRITE;
}
if (pp_sts.argc_sts & PP_STS_ENABLE) {
flags |= PP_FLAGS_DIRTY_ARGC;
if (!(mdss_pp_res->argc_disp_cfg[disp_num].flags
& MDP_PP_OPS_DISABLE))
mdss_pp_res->argc_disp_cfg[disp_num].flags |=
MDP_PP_OPS_WRITE;
}
if (pp_sts.enhist_sts & PP_STS_ENABLE) {
flags |= PP_FLAGS_DIRTY_ENHIST;
if (!(mdss_pp_res->enhist_disp_cfg[disp_num].ops
& MDP_PP_OPS_DISABLE))
mdss_pp_res->enhist_disp_cfg[disp_num].ops |=
MDP_PP_OPS_WRITE;
}
if (pp_sts.dither_sts & PP_STS_ENABLE) {
flags |= PP_FLAGS_DIRTY_DITHER;
if (!(mdss_pp_res->dither_disp_cfg[disp_num].flags
& MDP_PP_OPS_DISABLE))
mdss_pp_res->dither_disp_cfg[disp_num].flags |=
MDP_PP_OPS_WRITE;
}
if (pp_sts.gamut_sts & PP_STS_ENABLE) {
flags |= PP_FLAGS_DIRTY_GAMUT;
if (!(mdss_pp_res->gamut_disp_cfg[disp_num].flags
& MDP_PP_OPS_DISABLE))
mdss_pp_res->gamut_disp_cfg[disp_num].flags |=
MDP_PP_OPS_WRITE;
}
if (pp_sts.pgc_sts & PP_STS_ENABLE) {
flags |= PP_FLAGS_DIRTY_PGC;
if (!(mdss_pp_res->pgc_disp_cfg[disp_num].flags
& MDP_PP_OPS_DISABLE))
mdss_pp_res->pgc_disp_cfg[disp_num].flags |=
MDP_PP_OPS_WRITE;
}
mdss_pp_res->pp_disp_flags[disp_num] |= flags;
return 0;
}
int mdss_mdp_pp_init(struct device *dev)
{
int i, ret = 0;
struct mdss_data_type *mdata = mdss_mdp_get_mdata();
struct mdss_mdp_pipe *vig;
mutex_lock(&mdss_pp_mutex);
if (!mdss_pp_res) {
mdss_pp_res = devm_kzalloc(dev, sizeof(*mdss_pp_res),
GFP_KERNEL);
if (mdss_pp_res == NULL) {
pr_err("%s mdss_pp_res allocation failed!", __func__);
ret = -ENOMEM;
}
for (i = 0; i < MDSS_MDP_MAX_DSPP; i++) {
mutex_init(&mdss_pp_res->dspp_hist[i].hist_mutex);
spin_lock_init(&mdss_pp_res->dspp_hist[i].hist_lock);
}
}
if (mdata) {
vig = mdata->vig_pipes;
for (i = 0; i < mdata->nvig_pipes; i++) {
mutex_init(&vig[i].pp_res.hist.hist_mutex);
spin_lock_init(&vig[i].pp_res.hist.hist_lock);
}
}
mutex_unlock(&mdss_pp_mutex);
return ret;
}
void mdss_mdp_pp_term(struct device *dev)
{
if (!mdss_pp_res) {
mutex_lock(&mdss_pp_mutex);
devm_kfree(dev, mdss_pp_res);
mdss_pp_res = NULL;
mutex_unlock(&mdss_pp_mutex);
}
}
static int pp_get_dspp_num(u32 disp_num, u32 *dspp_num)
{
int i;
u32 mixer_cnt;
u32 mixer_id[MDSS_MDP_INTF_MAX_LAYERMIXER];
mixer_cnt = mdss_mdp_get_ctl_mixers(disp_num, mixer_id);
if (!mixer_cnt)
return -EPERM;
/* only read the first mixer */
for (i = 0; i < mixer_cnt; i++) {
if (mixer_id[i] < MDSS_MDP_MAX_DSPP)
break;
}
if (i >= mixer_cnt)
return -EPERM;
*dspp_num = mixer_id[i];
return 0;
}
int mdss_mdp_pa_config(struct mdss_mdp_ctl *ctl, struct mdp_pa_cfg_data *config,
u32 *copyback)
{
int ret = 0;
u32 pa_offset, disp_num, dspp_num = 0;
if (!ctl)
return -EINVAL;
if ((config->block < MDP_LOGICAL_BLOCK_DISP_0) ||
(config->block >= MDP_BLOCK_MAX))
return -EINVAL;
mutex_lock(&mdss_pp_mutex);
disp_num = config->block - MDP_LOGICAL_BLOCK_DISP_0;
if (config->pa_data.flags & MDP_PP_OPS_READ) {
ret = pp_get_dspp_num(disp_num, &dspp_num);
if (ret) {
pr_err("%s, no dspp connects to disp %d",
__func__, disp_num);
goto pa_config_exit;
}
mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
pa_offset = MDSS_MDP_REG_DSPP_OFFSET(dspp_num) +
MDSS_MDP_REG_DSPP_PA_BASE;
config->pa_data.hue_adj = MDSS_MDP_REG_READ(pa_offset);
pa_offset += 4;
config->pa_data.sat_adj = MDSS_MDP_REG_READ(pa_offset);
pa_offset += 4;
config->pa_data.val_adj = MDSS_MDP_REG_READ(pa_offset);
pa_offset += 4;
config->pa_data.cont_adj = MDSS_MDP_REG_READ(pa_offset);
*copyback = 1;
mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
} else {
mdss_pp_res->pa_disp_cfg[disp_num] = config->pa_data;
mdss_pp_res->pp_disp_flags[disp_num] |= PP_FLAGS_DIRTY_PA;
}
pa_config_exit:
mutex_unlock(&mdss_pp_mutex);
if (!ret)
mdss_mdp_pp_setup(ctl);
return ret;
}
static void pp_read_pcc_regs(u32 offset,
struct mdp_pcc_cfg_data *cfg_ptr)
{
cfg_ptr->r.c = MDSS_MDP_REG_READ(offset);
cfg_ptr->g.c = MDSS_MDP_REG_READ(offset + 4);
cfg_ptr->b.c = MDSS_MDP_REG_READ(offset + 8);
offset += 0x10;
cfg_ptr->r.r = MDSS_MDP_REG_READ(offset);
cfg_ptr->g.r = MDSS_MDP_REG_READ(offset + 4);
cfg_ptr->b.r = MDSS_MDP_REG_READ(offset + 8);
offset += 0x10;
cfg_ptr->r.g = MDSS_MDP_REG_READ(offset);
cfg_ptr->g.g = MDSS_MDP_REG_READ(offset + 4);
cfg_ptr->b.g = MDSS_MDP_REG_READ(offset + 8);
offset += 0x10;
cfg_ptr->r.b = MDSS_MDP_REG_READ(offset);
cfg_ptr->g.b = MDSS_MDP_REG_READ(offset + 4);
cfg_ptr->b.b = MDSS_MDP_REG_READ(offset + 8);
offset += 0x10;
cfg_ptr->r.rr = MDSS_MDP_REG_READ(offset);
cfg_ptr->g.rr = MDSS_MDP_REG_READ(offset + 4);
cfg_ptr->b.rr = MDSS_MDP_REG_READ(offset + 8);
offset += 0x10;
cfg_ptr->r.rg = MDSS_MDP_REG_READ(offset);
cfg_ptr->g.rg = MDSS_MDP_REG_READ(offset + 4);
cfg_ptr->b.rg = MDSS_MDP_REG_READ(offset + 8);
offset += 0x10;
cfg_ptr->r.rb = MDSS_MDP_REG_READ(offset);
cfg_ptr->g.rb = MDSS_MDP_REG_READ(offset + 4);
cfg_ptr->b.rb = MDSS_MDP_REG_READ(offset + 8);
offset += 0x10;
cfg_ptr->r.gg = MDSS_MDP_REG_READ(offset);
cfg_ptr->g.gg = MDSS_MDP_REG_READ(offset + 4);
cfg_ptr->b.gg = MDSS_MDP_REG_READ(offset + 8);
offset += 0x10;
cfg_ptr->r.gb = MDSS_MDP_REG_READ(offset);
cfg_ptr->g.gb = MDSS_MDP_REG_READ(offset + 4);
cfg_ptr->b.gb = MDSS_MDP_REG_READ(offset + 8);
offset += 0x10;
cfg_ptr->r.bb = MDSS_MDP_REG_READ(offset);
cfg_ptr->g.bb = MDSS_MDP_REG_READ(offset + 4);
cfg_ptr->b.bb = MDSS_MDP_REG_READ(offset + 8);
offset += 0x10;
cfg_ptr->r.rgb_0 = MDSS_MDP_REG_READ(offset);
cfg_ptr->g.rgb_0 = MDSS_MDP_REG_READ(offset + 4);
cfg_ptr->b.rgb_0 = MDSS_MDP_REG_READ(offset + 8);
offset += 0x10;
cfg_ptr->r.rgb_1 = MDSS_MDP_REG_READ(offset);
cfg_ptr->g.rgb_1 = MDSS_MDP_REG_READ(offset + 4);
cfg_ptr->b.rgb_1 = MDSS_MDP_REG_READ(offset + 8);
}
static void pp_update_pcc_regs(u32 offset,
struct mdp_pcc_cfg_data *cfg_ptr)
{
MDSS_MDP_REG_WRITE(offset, cfg_ptr->r.c);
MDSS_MDP_REG_WRITE(offset + 4, cfg_ptr->g.c);
MDSS_MDP_REG_WRITE(offset + 8, cfg_ptr->b.c);
offset += 0x10;
MDSS_MDP_REG_WRITE(offset, cfg_ptr->r.r);
MDSS_MDP_REG_WRITE(offset + 4, cfg_ptr->g.r);
MDSS_MDP_REG_WRITE(offset + 8, cfg_ptr->b.r);
offset += 0x10;
MDSS_MDP_REG_WRITE(offset, cfg_ptr->r.g);
MDSS_MDP_REG_WRITE(offset + 4, cfg_ptr->g.g);
MDSS_MDP_REG_WRITE(offset + 8, cfg_ptr->b.g);
offset += 0x10;
MDSS_MDP_REG_WRITE(offset, cfg_ptr->r.b);
MDSS_MDP_REG_WRITE(offset + 4, cfg_ptr->g.b);
MDSS_MDP_REG_WRITE(offset + 8, cfg_ptr->b.b);
offset += 0x10;
MDSS_MDP_REG_WRITE(offset, cfg_ptr->r.rr);
MDSS_MDP_REG_WRITE(offset + 4, cfg_ptr->g.rr);
MDSS_MDP_REG_WRITE(offset + 8, cfg_ptr->b.rr);
offset += 0x10;
MDSS_MDP_REG_WRITE(offset, cfg_ptr->r.rg);
MDSS_MDP_REG_WRITE(offset + 4, cfg_ptr->g.rg);
MDSS_MDP_REG_WRITE(offset + 8, cfg_ptr->b.rg);
offset += 0x10;
MDSS_MDP_REG_WRITE(offset, cfg_ptr->r.rb);
MDSS_MDP_REG_WRITE(offset + 4, cfg_ptr->g.rb);
MDSS_MDP_REG_WRITE(offset + 8, cfg_ptr->b.rb);
offset += 0x10;
MDSS_MDP_REG_WRITE(offset, cfg_ptr->r.gg);
MDSS_MDP_REG_WRITE(offset + 4, cfg_ptr->g.gg);
MDSS_MDP_REG_WRITE(offset + 8, cfg_ptr->b.gg);
offset += 0x10;
MDSS_MDP_REG_WRITE(offset, cfg_ptr->r.gb);
MDSS_MDP_REG_WRITE(offset + 4, cfg_ptr->g.gb);
MDSS_MDP_REG_WRITE(offset + 8, cfg_ptr->b.gb);
offset += 0x10;
MDSS_MDP_REG_WRITE(offset, cfg_ptr->r.bb);
MDSS_MDP_REG_WRITE(offset + 4, cfg_ptr->g.bb);
MDSS_MDP_REG_WRITE(offset + 8, cfg_ptr->b.bb);
offset += 0x10;
MDSS_MDP_REG_WRITE(offset, cfg_ptr->r.rgb_0);
MDSS_MDP_REG_WRITE(offset + 4, cfg_ptr->g.rgb_0);
MDSS_MDP_REG_WRITE(offset + 8, cfg_ptr->b.rgb_0);
offset += 0x10;
MDSS_MDP_REG_WRITE(offset, cfg_ptr->r.rgb_1);
MDSS_MDP_REG_WRITE(offset + 4, cfg_ptr->g.rgb_1);
MDSS_MDP_REG_WRITE(offset + 8, cfg_ptr->b.rgb_1);
}
int mdss_mdp_pcc_config(struct mdss_mdp_ctl *ctl,
struct mdp_pcc_cfg_data *config,
u32 *copyback)
{
int ret = 0;
u32 base, disp_num, dspp_num = 0;
if (!ctl)
return -EINVAL;
if ((config->block < MDP_LOGICAL_BLOCK_DISP_0) ||
(config->block >= MDP_BLOCK_MAX))
return -EINVAL;
mutex_lock(&mdss_pp_mutex);
disp_num = config->block - MDP_LOGICAL_BLOCK_DISP_0;
if (config->ops & MDP_PP_OPS_READ) {
ret = pp_get_dspp_num(disp_num, &dspp_num);
if (ret) {
pr_err("%s, no dspp connects to disp %d",
__func__, disp_num);
goto pcc_config_exit;
}
mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
base = MDSS_MDP_REG_DSPP_OFFSET(dspp_num) +
MDSS_MDP_REG_DSPP_PCC_BASE;
pp_read_pcc_regs(base, config);
*copyback = 1;
mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
} else {
mdss_pp_res->pcc_disp_cfg[disp_num] = *config;
mdss_pp_res->pp_disp_flags[disp_num] |= PP_FLAGS_DIRTY_PCC;
}
pcc_config_exit:
mutex_unlock(&mdss_pp_mutex);
if (!ret)
mdss_mdp_pp_setup(ctl);
return ret;
}
static void pp_read_igc_lut(struct mdp_igc_lut_data *cfg,
u32 offset, u32 blk_idx)
{
int i;
u32 data;
/* INDEX_UPDATE & VALUE_UPDATEN */
data = (3 << 24) | (((~(1 << blk_idx)) & 0x7) << 28);
MDSS_MDP_REG_WRITE(offset, data);
for (i = 0; i < cfg->len; i++)
cfg->c0_c1_data[i] = MDSS_MDP_REG_READ(offset) & 0xFFF;
offset += 0x4;
MDSS_MDP_REG_WRITE(offset, data);
for (i = 0; i < cfg->len; i++)
cfg->c0_c1_data[i] |= (MDSS_MDP_REG_READ(offset) & 0xFFF) << 16;
offset += 0x4;
MDSS_MDP_REG_WRITE(offset, data);
for (i = 0; i < cfg->len; i++)
cfg->c2_data[i] = MDSS_MDP_REG_READ(offset) & 0xFFF;
}
static void pp_update_igc_lut(struct mdp_igc_lut_data *cfg,
u32 offset, u32 blk_idx)
{
int i;
u32 data;
/* INDEX_UPDATE */
data = (1 << 25) | (((~(1 << blk_idx)) & 0x7) << 28);
MDSS_MDP_REG_WRITE(offset, (cfg->c0_c1_data[0] & 0xFFF) | data);
/* disable index update */
data &= ~(1 << 25);
for (i = 1; i < cfg->len; i++)
MDSS_MDP_REG_WRITE(offset, (cfg->c0_c1_data[i] & 0xFFF) | data);
offset += 0x4;
data |= (1 << 25);
MDSS_MDP_REG_WRITE(offset, ((cfg->c0_c1_data[0] >> 16) & 0xFFF) | data);
data &= ~(1 << 25);
for (i = 1; i < cfg->len; i++)
MDSS_MDP_REG_WRITE(offset,
((cfg->c0_c1_data[i] >> 16) & 0xFFF) | data);
offset += 0x4;
data |= (1 << 25);
MDSS_MDP_REG_WRITE(offset, (cfg->c2_data[0] & 0xFFF) | data);
data &= ~(1 << 25);
for (i = 1; i < cfg->len; i++)
MDSS_MDP_REG_WRITE(offset, (cfg->c2_data[i] & 0xFFF) | data);
}
int mdss_mdp_limited_lut_igc_config(struct mdss_mdp_ctl *ctl)
{
int ret = 0;
u32 copyback = 0;
u32 copy_from_kernel = 1;
struct mdp_igc_lut_data config;
if (!ctl)
return -EINVAL;
config.len = IGC_LUT_ENTRIES;
config.ops = MDP_PP_OPS_WRITE | MDP_PP_OPS_ENABLE;
config.block = (ctl->mfd->index) + MDP_LOGICAL_BLOCK_DISP_0;
config.c0_c1_data = igc_limited;
config.c2_data = igc_limited;
ret = mdss_mdp_igc_lut_config(ctl, &config, ©back,
copy_from_kernel);
return ret;
}
int mdss_mdp_igc_lut_config(struct mdss_mdp_ctl *ctl,
struct mdp_igc_lut_data *config,
u32 *copyback, u32 copy_from_kernel)
{
int ret = 0;
u32 tbl_idx, igc_offset, disp_num, dspp_num = 0;
struct mdp_igc_lut_data local_cfg;
if (!ctl)
return -EINVAL;
if ((config->block < MDP_LOGICAL_BLOCK_DISP_0) ||
(config->block >= MDP_BLOCK_MAX))
return -EINVAL;
if (config->len != IGC_LUT_ENTRIES)
return -EINVAL;
mutex_lock(&mdss_pp_mutex);
disp_num = config->block - MDP_LOGICAL_BLOCK_DISP_0;
if (config->ops & MDP_PP_OPS_READ) {
ret = pp_get_dspp_num(disp_num, &dspp_num);
if (ret) {
pr_err("%s, no dspp connects to disp %d",
__func__, disp_num);
goto igc_config_exit;
}
mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
if (config->ops & MDP_PP_IGC_FLAG_ROM0)
tbl_idx = 1;
else if (config->ops & MDP_PP_IGC_FLAG_ROM1)
tbl_idx = 2;
else
tbl_idx = 0;
igc_offset = MDSS_MDP_REG_IGC_DSPP_BASE + (0x10 * tbl_idx);
local_cfg = *config;
local_cfg.c0_c1_data =
&mdss_pp_res->igc_lut_c0c1[disp_num][0];
local_cfg.c2_data =
&mdss_pp_res->igc_lut_c2[disp_num][0];
pp_read_igc_lut(&local_cfg, igc_offset, dspp_num);
if (copy_to_user(config->c0_c1_data, local_cfg.c2_data,
config->len * sizeof(u32))) {
ret = -EFAULT;
mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
goto igc_config_exit;
}
if (copy_to_user(config->c2_data, local_cfg.c0_c1_data,
config->len * sizeof(u32))) {
ret = -EFAULT;
mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
goto igc_config_exit;
}
*copyback = 1;
mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
} else {
if (copy_from_kernel) {
memcpy(&mdss_pp_res->igc_lut_c0c1[disp_num][0],
config->c0_c1_data, config->len * sizeof(u32));
memcpy(&mdss_pp_res->igc_lut_c2[disp_num][0],
config->c2_data, config->len * sizeof(u32));
} else {
if (copy_from_user(
&mdss_pp_res->igc_lut_c0c1[disp_num][0],
config->c0_c1_data,
config->len * sizeof(u32))) {
ret = -EFAULT;
goto igc_config_exit;
}
if (copy_from_user(
&mdss_pp_res->igc_lut_c2[disp_num][0],
config->c2_data, config->len * sizeof(u32))) {
ret = -EFAULT;
goto igc_config_exit;
}
}
mdss_pp_res->igc_disp_cfg[disp_num] = *config;
mdss_pp_res->igc_disp_cfg[disp_num].c0_c1_data =
&mdss_pp_res->igc_lut_c0c1[disp_num][0];
mdss_pp_res->igc_disp_cfg[disp_num].c2_data =
&mdss_pp_res->igc_lut_c2[disp_num][0];
mdss_pp_res->pp_disp_flags[disp_num] |= PP_FLAGS_DIRTY_IGC;
}
igc_config_exit:
mutex_unlock(&mdss_pp_mutex);
if (!ret)
mdss_mdp_pp_setup(ctl);
return ret;
}
static void pp_update_gc_one_lut(u32 offset,
struct mdp_ar_gc_lut_data *lut_data)
{
int i, start_idx;
start_idx = (MDSS_MDP_REG_READ(offset) >> 16) & 0xF;
for (i = start_idx; i < GC_LUT_SEGMENTS; i++)
MDSS_MDP_REG_WRITE(offset, lut_data[i].x_start);
for (i = 0; i < start_idx; i++)
MDSS_MDP_REG_WRITE(offset, lut_data[i].x_start);
offset += 4;
start_idx = (MDSS_MDP_REG_READ(offset) >> 16) & 0xF;
for (i = start_idx; i < GC_LUT_SEGMENTS; i++)
MDSS_MDP_REG_WRITE(offset, lut_data[i].slope);
for (i = 0; i < start_idx; i++)
MDSS_MDP_REG_WRITE(offset, lut_data[i].slope);
offset += 4;
start_idx = (MDSS_MDP_REG_READ(offset) >> 16) & 0xF;
for (i = start_idx; i < GC_LUT_SEGMENTS; i++)
MDSS_MDP_REG_WRITE(offset, lut_data[i].offset);
for (i = 0; i < start_idx; i++)
MDSS_MDP_REG_WRITE(offset, lut_data[i].offset);
}
static void pp_update_argc_lut(u32 offset, struct mdp_pgc_lut_data *config)
{
pp_update_gc_one_lut(offset, config->r_data);
offset += 0x10;
pp_update_gc_one_lut(offset, config->g_data);
offset += 0x10;
pp_update_gc_one_lut(offset, config->b_data);
}
static void pp_read_gc_one_lut(u32 offset,
struct mdp_ar_gc_lut_data *gc_data)
{
int i, start_idx, data;
data = MDSS_MDP_REG_READ(offset);
start_idx = (data >> 16) & 0xF;
gc_data[start_idx].x_start = data & 0xFFF;
for (i = start_idx + 1; i < GC_LUT_SEGMENTS; i++) {
data = MDSS_MDP_REG_READ(offset);
gc_data[i].x_start = data & 0xFFF;
}
for (i = 0; i < start_idx; i++) {
data = MDSS_MDP_REG_READ(offset);
gc_data[i].x_start = data & 0xFFF;
}
offset += 4;
data = MDSS_MDP_REG_READ(offset);
start_idx = (data >> 16) & 0xF;
gc_data[start_idx].slope = data & 0x7FFF;
for (i = start_idx + 1; i < GC_LUT_SEGMENTS; i++) {
data = MDSS_MDP_REG_READ(offset);
gc_data[i].slope = data & 0x7FFF;
}
for (i = 0; i < start_idx; i++) {
data = MDSS_MDP_REG_READ(offset);
gc_data[i].slope = data & 0x7FFF;
}
offset += 4;
data = MDSS_MDP_REG_READ(offset);
start_idx = (data >> 16) & 0xF;
gc_data[start_idx].offset = data & 0x7FFF;
for (i = start_idx + 1; i < GC_LUT_SEGMENTS; i++) {
data = MDSS_MDP_REG_READ(offset);
gc_data[i].offset = data & 0x7FFF;
}
for (i = 0; i < start_idx; i++) {
data = MDSS_MDP_REG_READ(offset);
gc_data[i].offset = data & 0x7FFF;
}
}
static int pp_read_argc_lut(struct mdp_pgc_lut_data *config, u32 offset)
{
int ret = 0;
pp_read_gc_one_lut(offset, config->r_data);
offset += 0x10;
pp_read_gc_one_lut(offset, config->g_data);
offset += 0x10;
pp_read_gc_one_lut(offset, config->b_data);
return ret;
}
/* Note: Assumes that its inputs have been checked by calling function */
static void pp_update_hist_lut(char __iomem *offset,
struct mdp_hist_lut_data *cfg)
{
int i;
for (i = 0; i < ENHIST_LUT_ENTRIES; i++)
writel_relaxed(cfg->data[i], offset);
/* swap */
if (PP_LOCAT(cfg->block) == MDSS_PP_DSPP_CFG)
writel_relaxed(1, offset + 4);
else
writel_relaxed(1, offset + 16);
}
int mdss_mdp_argc_config(struct mdss_mdp_ctl *ctl,
struct mdp_pgc_lut_data *config,
u32 *copyback)
{
int ret = 0;
u32 argc_offset = 0, disp_num, dspp_num = 0;
struct mdp_pgc_lut_data local_cfg;
struct mdp_pgc_lut_data *pgc_ptr;
u32 tbl_size;
if (!ctl)
return -EINVAL;
if ((PP_BLOCK(config->block) < MDP_LOGICAL_BLOCK_DISP_0) ||
(PP_BLOCK(config->block) >= MDP_BLOCK_MAX))
return -EINVAL;
mutex_lock(&mdss_pp_mutex);
disp_num = PP_BLOCK(config->block) - MDP_LOGICAL_BLOCK_DISP_0;
switch (PP_LOCAT(config->block)) {
case MDSS_PP_LM_CFG:
argc_offset = MDSS_MDP_REG_LM_OFFSET(dspp_num) +
MDSS_MDP_REG_LM_GC_LUT_BASE;
pgc_ptr = &mdss_pp_res->argc_disp_cfg[disp_num];
mdss_pp_res->pp_disp_flags[disp_num] |=
PP_FLAGS_DIRTY_ARGC;
break;
case MDSS_PP_DSPP_CFG:
argc_offset = MDSS_MDP_REG_DSPP_OFFSET(dspp_num) +
MDSS_MDP_REG_DSPP_GC_BASE;
pgc_ptr = &mdss_pp_res->pgc_disp_cfg[disp_num];
mdss_pp_res->pp_disp_flags[disp_num] |=
PP_FLAGS_DIRTY_PGC;
break;
default:
goto argc_config_exit;
break;
}
tbl_size = GC_LUT_SEGMENTS * sizeof(struct mdp_ar_gc_lut_data);
if (config->flags & MDP_PP_OPS_READ) {
ret = pp_get_dspp_num(disp_num, &dspp_num);
if (ret) {
pr_err("%s, no dspp connects to disp %d",
__func__, disp_num);
goto argc_config_exit;
}
mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
local_cfg = *config;
local_cfg.r_data =
&mdss_pp_res->gc_lut_r[disp_num][0];
local_cfg.g_data =
&mdss_pp_res->gc_lut_g[disp_num][0];
local_cfg.b_data =
&mdss_pp_res->gc_lut_b[disp_num][0];
pp_read_argc_lut(&local_cfg, argc_offset);
if (copy_to_user(config->r_data,
&mdss_pp_res->gc_lut_r[disp_num][0], tbl_size)) {
mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
ret = -EFAULT;
goto argc_config_exit;
}
if (copy_to_user(config->g_data,
&mdss_pp_res->gc_lut_g[disp_num][0], tbl_size)) {
mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
ret = -EFAULT;
goto argc_config_exit;
}
if (copy_to_user(config->b_data,
&mdss_pp_res->gc_lut_b[disp_num][0], tbl_size)) {
mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
ret = -EFAULT;
goto argc_config_exit;
}
*copyback = 1;
mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
} else {
if (copy_from_user(&mdss_pp_res->gc_lut_r[disp_num][0],
config->r_data, tbl_size)) {
ret = -EFAULT;
goto argc_config_exit;
}
if (copy_from_user(&mdss_pp_res->gc_lut_g[disp_num][0],
config->g_data, tbl_size)) {
ret = -EFAULT;
goto argc_config_exit;
}
if (copy_from_user(&mdss_pp_res->gc_lut_b[disp_num][0],
config->b_data, tbl_size)) {
ret = -EFAULT;
goto argc_config_exit;
}
*pgc_ptr = *config;
pgc_ptr->r_data =
&mdss_pp_res->gc_lut_r[disp_num][0];
pgc_ptr->g_data =
&mdss_pp_res->gc_lut_g[disp_num][0];
pgc_ptr->b_data =
&mdss_pp_res->gc_lut_b[disp_num][0];
}
argc_config_exit:
mutex_unlock(&mdss_pp_mutex);
if (!ret)
mdss_mdp_pp_setup(ctl);
return ret;
}
int mdss_mdp_hist_lut_config(struct mdss_mdp_ctl *ctl,
struct mdp_hist_lut_data *config,
u32 *copyback)
{
int i, ret = 0;
u32 hist_offset, disp_num, dspp_num = 0;
if (!ctl)
return -EINVAL;
if ((PP_BLOCK(config->block) < MDP_LOGICAL_BLOCK_DISP_0) ||
(PP_BLOCK(config->block) >= MDP_BLOCK_MAX))
return -EINVAL;
mutex_lock(&mdss_pp_mutex);
disp_num = PP_BLOCK(config->block) - MDP_LOGICAL_BLOCK_DISP_0;
if (config->ops & MDP_PP_OPS_READ) {
ret = pp_get_dspp_num(disp_num, &dspp_num);
if (ret) {
pr_err("%s, no dspp connects to disp %d",
__func__, disp_num);
goto enhist_config_exit;
}
mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
hist_offset = MDSS_MDP_REG_DSPP_OFFSET(dspp_num) +
MDSS_MDP_REG_DSPP_HIST_LUT_BASE;
for (i = 0; i < ENHIST_LUT_ENTRIES; i++)
mdss_pp_res->enhist_lut[disp_num][i] =
MDSS_MDP_REG_READ(hist_offset);
if (copy_to_user(config->data,
&mdss_pp_res->enhist_lut[disp_num][0],
ENHIST_LUT_ENTRIES * sizeof(u32))) {
mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
ret = -EFAULT;
goto enhist_config_exit;
}
*copyback = 1;
mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
} else {
if (copy_from_user(&mdss_pp_res->enhist_lut[disp_num][0],
config->data, ENHIST_LUT_ENTRIES * sizeof(u32))) {
ret = -EFAULT;
goto enhist_config_exit;
}
mdss_pp_res->enhist_disp_cfg[disp_num] = *config;
mdss_pp_res->enhist_disp_cfg[disp_num].data =
&mdss_pp_res->enhist_lut[disp_num][0];
mdss_pp_res->pp_disp_flags[disp_num] |= PP_FLAGS_DIRTY_ENHIST;
}
enhist_config_exit:
mutex_unlock(&mdss_pp_mutex);
if (!ret)
mdss_mdp_pp_setup(ctl);
return ret;
}
int mdss_mdp_dither_config(struct mdss_mdp_ctl *ctl,
struct mdp_dither_cfg_data *config,
u32 *copyback)
{
u32 disp_num;
if (!ctl)
return -EINVAL;
if ((config->block < MDP_LOGICAL_BLOCK_DISP_0) ||
(config->block >= MDP_BLOCK_MAX))
return -EINVAL;
if (config->flags & MDP_PP_OPS_READ)
return -ENOTSUPP;
mutex_lock(&mdss_pp_mutex);
disp_num = config->block - MDP_LOGICAL_BLOCK_DISP_0;
mdss_pp_res->dither_disp_cfg[disp_num] = *config;
mdss_pp_res->pp_disp_flags[disp_num] |= PP_FLAGS_DIRTY_DITHER;
mutex_unlock(&mdss_pp_mutex);
mdss_mdp_pp_setup(ctl);
return 0;
}
int mdss_mdp_gamut_config(struct mdss_mdp_ctl *ctl,
struct mdp_gamut_cfg_data *config,
u32 *copyback)
{
int i, j, size_total = 0, ret = 0;
u32 offset, disp_num, dspp_num = 0;
uint16_t *tbl_off;
struct mdp_gamut_cfg_data local_cfg;
if (!ctl)
return -EINVAL;
if ((config->block < MDP_LOGICAL_BLOCK_DISP_0) ||
(config->block >= MDP_BLOCK_MAX))
return -EINVAL;
for (i = 0; i < MDP_GAMUT_TABLE_NUM; i++)
size_total += config->tbl_size[i];
if (size_total != GAMUT_TOTAL_TABLE_SIZE)
return -EINVAL;
mutex_lock(&mdss_pp_mutex);
disp_num = config->block - MDP_LOGICAL_BLOCK_DISP_0;
if (config->flags & MDP_PP_OPS_READ) {
ret = pp_get_dspp_num(disp_num, &dspp_num);
if (ret) {
pr_err("%s, no dspp connects to disp %d",
__func__, disp_num);
goto gamut_config_exit;
}
mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
offset = MDSS_MDP_REG_DSPP_OFFSET(dspp_num) +
MDSS_MDP_REG_DSPP_GAMUT_BASE;
for (i = 0; i < MDP_GAMUT_TABLE_NUM; i++) {
for (j = 0; j < config->tbl_size[i]; j++)
config->r_tbl[i][j] =
(u16)MDSS_MDP_REG_READ(offset);
offset += 4;
}
for (i = 0; i < MDP_GAMUT_TABLE_NUM; i++) {
for (j = 0; j < config->tbl_size[i]; j++)
config->g_tbl[i][j] =
(u16)MDSS_MDP_REG_READ(offset);
offset += 4;
}
for (i = 0; i < MDP_GAMUT_TABLE_NUM; i++) {
for (j = 0; j < config->tbl_size[i]; j++)
config->b_tbl[i][j] =
(u16)MDSS_MDP_REG_READ(offset);
offset += 4;
}
*copyback = 1;
mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
} else {
local_cfg = *config;
tbl_off = mdss_pp_res->gamut_tbl[disp_num];
for (i = 0; i < MDP_GAMUT_TABLE_NUM; i++) {
local_cfg.r_tbl[i] = tbl_off;
if (copy_from_user(tbl_off, config->r_tbl[i],
config->tbl_size[i] * sizeof(uint16_t))) {
ret = -EFAULT;
goto gamut_config_exit;
}
tbl_off += local_cfg.tbl_size[i];
}
for (i = 0; i < MDP_GAMUT_TABLE_NUM; i++) {
local_cfg.g_tbl[i] = tbl_off;
if (copy_from_user(tbl_off, config->g_tbl[i],
config->tbl_size[i] * sizeof(uint16_t))) {
ret = -EFAULT;
goto gamut_config_exit;
}
tbl_off += local_cfg.tbl_size[i];
}
for (i = 0; i < MDP_GAMUT_TABLE_NUM; i++) {
local_cfg.b_tbl[i] = tbl_off;
if (copy_from_user(tbl_off, config->b_tbl[i],
config->tbl_size[i] * sizeof(uint16_t))) {
ret = -EFAULT;
goto gamut_config_exit;
}
tbl_off += local_cfg.tbl_size[i];
}
mdss_pp_res->gamut_disp_cfg[disp_num] = local_cfg;
mdss_pp_res->pp_disp_flags[disp_num] |= PP_FLAGS_DIRTY_GAMUT;
}
gamut_config_exit:
mutex_unlock(&mdss_pp_mutex);
if (!ret)
mdss_mdp_pp_setup(ctl);
return ret;
}
static void pp_hist_read(char __iomem *v_base,
struct pp_hist_col_info *hist_info)
{
int i, i_start;
u32 data;
data = readl_relaxed(v_base);
i_start = data >> 24;
hist_info->data[i_start] = data & 0xFFFFFF;
for (i = i_start + 1; i < HIST_V_SIZE; i++)
hist_info->data[i] = readl_relaxed(v_base) & 0xFFFFFF;
for (i = 0; i < i_start - 1; i++)
hist_info->data[i] = readl_relaxed(v_base) & 0xFFFFFF;
hist_info->hist_cnt_read++;
}
/* Assumes that relevant clocks are enabled */
static int pp_histogram_enable(struct pp_hist_col_info *hist_info,
struct mdp_histogram_start_req *req,
u32 shift_bit, char __iomem *ctl_base)
{
unsigned long flag;
int ret = 0;
mutex_lock(&hist_info->hist_mutex);
/* check if it is idle */
if (hist_info->col_en) {
pr_info("%s Hist collection has already been enabled %d",
__func__, (u32) ctl_base);
ret = -EINVAL;
goto exit;
}
hist_info->frame_cnt = req->frame_cnt;
init_completion(&hist_info->comp);
hist_info->hist_cnt_read = 0;
hist_info->hist_cnt_sent = 0;
hist_info->hist_cnt_time = 0;
spin_lock_irqsave(&hist_info->hist_lock, flag);
hist_info->read_request = false;
hist_info->col_state = HIST_RESET;
hist_info->col_en = true;
spin_unlock_irqrestore(&hist_info->hist_lock, flag);
hist_info->is_kick_ready = false;
mdss_mdp_hist_irq_enable(3 << shift_bit);
writel_relaxed(req->frame_cnt, ctl_base + 8);
/* Kick out reset start */
writel_relaxed(1, ctl_base + 4);
exit:
mutex_unlock(&hist_info->hist_mutex);
return ret;
}
int mdss_mdp_histogram_start(struct mdss_mdp_ctl *ctl,
struct mdp_histogram_start_req *req)
{
u32 done_shift_bit;
char __iomem *ctl_base;
struct pp_hist_col_info *hist_info;
int i, ret = 0;
u32 disp_num, dspp_num = 0;
u32 mixer_cnt, mixer_id[MDSS_MDP_INTF_MAX_LAYERMIXER];
struct mdss_mdp_pipe *pipe;
struct mdss_data_type *mdata = mdss_mdp_get_mdata();
if (!ctl)
return -EINVAL;
if ((PP_BLOCK(req->block) < MDP_LOGICAL_BLOCK_DISP_0) ||
(PP_BLOCK(req->block) >= MDP_BLOCK_MAX))
return -EINVAL;
disp_num = PP_BLOCK(req->block) - MDP_LOGICAL_BLOCK_DISP_0;
mixer_cnt = mdss_mdp_get_ctl_mixers(disp_num, mixer_id);
if (!mixer_cnt) {
pr_err("%s, no dspp connects to disp %d",
__func__, disp_num);
ret = -EPERM;
goto hist_exit;
}
if (mixer_cnt >= MDSS_MDP_MAX_DSPP) {
pr_err("%s, Too many dspp connects to disp %d",
__func__, mixer_cnt);
ret = -EPERM;
goto hist_exit;
}
mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
if (PP_LOCAT(req->block) == MDSS_PP_SSPP_CFG) {
i = MDSS_PP_ARG_MASK & req->block;
if (!i) {
ret = -EINVAL;
pr_warn("Must pass pipe arguments, %d", i);
goto hist_exit;
}
for (i = 0; i < MDSS_PP_ARG_NUM; i++) {
if (!PP_ARG(i, req->block))
continue;
pipe = mdss_mdp_pipe_get(mdata, BIT(i));
if (IS_ERR_OR_NULL(pipe))
continue;
if (!pipe || pipe->num > MDSS_MDP_SSPP_VIG2) {
mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
ret = -EINVAL;
pr_warn("Invalid Hist pipe (%d)", i);
goto hist_exit;
}
done_shift_bit = (pipe->num * 4);
hist_info = &pipe->pp_res.hist;
ctl_base = pipe->base +
MDSS_MDP_REG_VIG_HIST_CTL_BASE;
ret = pp_histogram_enable(hist_info, req,
done_shift_bit, ctl_base);
mdss_mdp_pipe_unmap(pipe);
}
} else if (PP_LOCAT(req->block) == MDSS_PP_DSPP_CFG) {
for (i = 0; i < mixer_cnt; i++) {
dspp_num = mixer_id[i];
done_shift_bit = (dspp_num * 4) + 12;
hist_info = &mdss_pp_res->dspp_hist[dspp_num];
ctl_base = mdss_mdp_get_dspp_addr_off(dspp_num) +
MDSS_MDP_REG_DSPP_HIST_CTL_BASE;
ret = pp_histogram_enable(hist_info, req,
done_shift_bit, ctl_base);
mdss_pp_res->pp_disp_flags[disp_num] |=
PP_FLAGS_DIRTY_HIST_COL;
}
}
mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
hist_exit:
if (!ret && (PP_LOCAT(req->block) == MDSS_PP_DSPP_CFG)) {
mdss_mdp_pp_setup(ctl);
/* wait for a frame to let histrogram enable itself */
/* TODO add hysteresis value to be able to remove this sleep */
usleep(41666);
for (i = 0; i < mixer_cnt; i++) {
dspp_num = mixer_id[i];
hist_info = &mdss_pp_res->dspp_hist[dspp_num];
mutex_lock(&hist_info->hist_mutex);
hist_info->is_kick_ready = true;
mutex_unlock(&hist_info->hist_mutex);
}
} else if (!ret) {
for (i = 0; i < MDSS_PP_ARG_NUM; i++) {
if (!PP_ARG(i, req->block))
continue;
pr_info("PP_ARG(%d) = %d", i, PP_ARG(i, req->block));
pipe = mdss_mdp_pipe_get(mdata, BIT(i));
if (IS_ERR_OR_NULL(pipe))
continue;
hist_info = &pipe->pp_res.hist;
hist_info->is_kick_ready = true;
mdss_mdp_pipe_unmap(pipe);
}
}
return ret;
}
static int pp_histogram_disable(struct pp_hist_col_info *hist_info,
u32 done_bit, char __iomem *ctl_base)
{
int ret = 0;
unsigned long flag;
mutex_lock(&hist_info->hist_mutex);
if (hist_info->col_en == false) {
pr_debug("Histogram already disabled (%d)", (u32) ctl_base);
ret = -EINVAL;
goto exit;
}
complete_all(&hist_info->comp);
spin_lock_irqsave(&hist_info->hist_lock, flag);
hist_info->col_en = false;
hist_info->col_state = HIST_UNKNOWN;
spin_unlock_irqrestore(&hist_info->hist_lock, flag);
hist_info->is_kick_ready = false;
mdss_mdp_hist_irq_disable(done_bit);
writel_relaxed(BIT(1), ctl_base);/* cancel */
ret = 0;
exit:
mutex_unlock(&hist_info->hist_mutex);
return ret;
}
int mdss_mdp_histogram_stop(struct mdss_mdp_ctl *ctl, u32 block)
{
int i, ret = 0;
char __iomem *ctl_base;
u32 dspp_num, disp_num, done_bit;
struct pp_hist_col_info *hist_info;
u32 mixer_cnt, mixer_id[MDSS_MDP_INTF_MAX_LAYERMIXER];
struct mdss_mdp_pipe *pipe;
struct mdss_data_type *mdata = mdss_mdp_get_mdata();
if (!ctl)
return -EINVAL;
if ((PP_BLOCK(block) < MDP_LOGICAL_BLOCK_DISP_0) ||
(PP_BLOCK(block) >= MDP_BLOCK_MAX))
return -EINVAL;
disp_num = PP_BLOCK(block) - MDP_LOGICAL_BLOCK_DISP_0;
mixer_cnt = mdss_mdp_get_ctl_mixers(disp_num, mixer_id);
if (!mixer_cnt) {
pr_err("%s, no dspp connects to disp %d",
__func__, disp_num);
ret = -EPERM;
goto hist_stop_exit;
}
if (mixer_cnt >= MDSS_MDP_MAX_DSPP) {
pr_err("%s, Too many dspp connects to disp %d",
__func__, mixer_cnt);
ret = -EPERM;
goto hist_stop_exit;
}
mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
if (PP_LOCAT(block) == MDSS_PP_SSPP_CFG) {
i = MDSS_PP_ARG_MASK & block;
if (!i) {
pr_warn("Must pass pipe arguments, %d", i);
goto hist_stop_clk;
}
for (i = 0; i < MDSS_PP_ARG_NUM; i++) {
if (!PP_ARG(i, block))
continue;
pipe = mdss_mdp_pipe_get(mdata, BIT(i));
if (IS_ERR_OR_NULL(pipe) ||
pipe->num > MDSS_MDP_SSPP_VIG2) {
pr_warn("Invalid Hist pipe (%d)", i);
continue;
}
done_bit = 3 << (pipe->num * 4);
hist_info = &pipe->pp_res.hist;
ctl_base = pipe->base +
MDSS_MDP_REG_VIG_HIST_CTL_BASE;
ret = pp_histogram_disable(hist_info, done_bit,
ctl_base);
mdss_mdp_pipe_unmap(pipe);
if (ret)
goto hist_stop_clk;
}
} else if (PP_LOCAT(block) == MDSS_PP_DSPP_CFG) {
for (i = 0; i < mixer_cnt; i++) {
dspp_num = mixer_id[i];
done_bit = 3 << ((dspp_num * 4) + 12);
hist_info = &mdss_pp_res->dspp_hist[dspp_num];
ctl_base = mdss_mdp_get_dspp_addr_off(dspp_num) +
MDSS_MDP_REG_DSPP_HIST_CTL_BASE;
ret = pp_histogram_disable(hist_info, done_bit,
ctl_base);
if (ret)
goto hist_stop_clk;
mdss_pp_res->pp_disp_flags[disp_num] |=
PP_FLAGS_DIRTY_HIST_COL;
}
}
hist_stop_clk:
mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
hist_stop_exit:
if (!ret && (PP_LOCAT(block) == MDSS_PP_DSPP_CFG))
mdss_mdp_pp_setup(ctl);
return ret;
}
static int pp_hist_collect(struct mdss_mdp_ctl *ctl,
struct mdp_histogram_data *hist,
struct pp_hist_col_info *hist_info,
char __iomem *ctl_base)
{
int wait_ret, ret = 0;
u32 timeout;
char __iomem *v_base;
unsigned long flag;
struct mdss_pipe_pp_res *res;
struct mdss_mdp_pipe *pipe;
mutex_lock(&hist_info->hist_mutex);
if ((hist_info->col_en == 0) ||
(hist_info->col_state == HIST_UNKNOWN)) {
ret = -EINVAL;
goto hist_collect_exit;
}
spin_lock_irqsave(&hist_info->hist_lock, flag);
/* wait for hist done if cache has no data */
if (hist_info->col_state != HIST_READY) {
hist_info->read_request = true;
spin_unlock_irqrestore(&hist_info->hist_lock, flag);
timeout = HIST_WAIT_TIMEOUT(hist_info->frame_cnt);
mutex_unlock(&hist_info->hist_mutex);
/* flush updates before wait*/
if (PP_LOCAT(hist->block) == MDSS_PP_DSPP_CFG)
mdss_mdp_pp_setup(ctl);
if (PP_LOCAT(hist->block) == MDSS_PP_SSPP_CFG) {
res = container_of(hist_info, struct mdss_pipe_pp_res,
hist);
pipe = container_of(res, struct mdss_mdp_pipe, pp_res);
pipe->params_changed++;
}
wait_ret = wait_for_completion_killable_timeout(
&(hist_info->comp), timeout);
mutex_lock(&hist_info->hist_mutex);
if (wait_ret == 0) {
ret = -ETIMEDOUT;
spin_lock_irqsave(&hist_info->hist_lock, flag);
pr_debug("bin collection timedout, state %d",
hist_info->col_state);
/*
* When the histogram has timed out (usually
* underrun) change the SW state back to idle
* since histogram hardware will have done the
* same. Histogram data also needs to be
* cleared in this case, which is done by the
* histogram being read (triggered by READY
* state, which also moves the histogram SW back
* to IDLE).
*/
hist_info->hist_cnt_time++;
hist_info->col_state = HIST_READY;
spin_unlock_irqrestore(&hist_info->hist_lock, flag);
} else if (wait_ret < 0) {
ret = -EINTR;
pr_debug("%s: bin collection interrupted",
__func__);
goto hist_collect_exit;
}
if (hist_info->col_state != HIST_READY) {
ret = -ENODATA;
pr_debug("%s: state is not ready: %d",
__func__, hist_info->col_state);
goto hist_collect_exit;
}
} else {
spin_unlock_irqrestore(&hist_info->hist_lock, flag);
}
spin_lock_irqsave(&hist_info->hist_lock, flag);
if (hist_info->col_state == HIST_READY) {
spin_unlock_irqrestore(&hist_info->hist_lock, flag);
v_base = ctl_base + 0x1C;
mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
pp_hist_read(v_base, hist_info);
mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
spin_lock_irqsave(&hist_info->hist_lock, flag);
hist_info->read_request = false;
hist_info->col_state = HIST_IDLE;
}
spin_unlock_irqrestore(&hist_info->hist_lock, flag);
hist_collect_exit:
mutex_unlock(&hist_info->hist_mutex);
return ret;
}
int mdss_mdp_hist_collect(struct mdss_mdp_ctl *ctl,
struct mdp_histogram_data *hist)
{
int i, j, off, ret = 0;
struct pp_hist_col_info *hist_info;
u32 dspp_num, disp_num;
char __iomem *ctl_base;
u32 hist_cnt, mixer_id[MDSS_MDP_INTF_MAX_LAYERMIXER];
u32 *hist_concat = NULL;
u32 *hist_data_addr;
u32 pipe_cnt = 0;
u32 pipe_num = MDSS_MDP_SSPP_VIG0;
struct mdss_mdp_pipe *pipe;
struct mdss_data_type *mdata = mdss_mdp_get_mdata();
if (!ctl)
return -EINVAL;
if ((PP_BLOCK(hist->block) < MDP_LOGICAL_BLOCK_DISP_0) ||
(PP_BLOCK(hist->block) >= MDP_BLOCK_MAX))
return -EINVAL;
disp_num = PP_BLOCK(hist->block) - MDP_LOGICAL_BLOCK_DISP_0;
hist_cnt = mdss_mdp_get_ctl_mixers(disp_num, mixer_id);
if (!hist_cnt) {
pr_err("%s, no dspp connects to disp %d",
__func__, disp_num);
ret = -EPERM;
goto hist_collect_exit;
}
if (hist_cnt >= MDSS_MDP_MAX_DSPP) {
pr_err("%s, Too many dspp connects to disp %d",
__func__, hist_cnt);
ret = -EPERM;
goto hist_collect_exit;
}
if (PP_LOCAT(hist->block) == MDSS_PP_DSPP_CFG) {
hist_info = &mdss_pp_res->dspp_hist[disp_num];
for (i = 0; i < hist_cnt; i++) {
dspp_num = mixer_id[i];
hist_info = &mdss_pp_res->dspp_hist[dspp_num];
ctl_base = mdss_mdp_get_dspp_addr_off(dspp_num) +
MDSS_MDP_REG_DSPP_HIST_CTL_BASE;
ret = pp_hist_collect(ctl, hist, hist_info, ctl_base);
if (ret)
goto hist_collect_exit;
}
if (hist_cnt > 1) {
if (hist->bin_cnt != HIST_V_SIZE) {
pr_err("User not expecting size %d output",
HIST_V_SIZE);
ret = -EINVAL;
goto hist_collect_exit;
}
hist_concat = kmalloc(HIST_V_SIZE * sizeof(u32),
GFP_KERNEL);
if (!hist_concat) {
ret = -ENOMEM;
goto hist_collect_exit;
}
memset(hist_concat, 0, HIST_V_SIZE * sizeof(u32));
for (i = 0; i < hist_cnt; i++) {
dspp_num = mixer_id[i];
hist_info = &mdss_pp_res->dspp_hist[dspp_num];
mutex_lock(&hist_info->hist_mutex);
for (j = 0; j < HIST_V_SIZE; j++)
hist_concat[i] += hist_info->data[i];
mutex_unlock(&hist_info->hist_mutex);
}
hist_data_addr = hist_concat;
} else {
hist_data_addr = hist_info->data;
}
hist_info = &mdss_pp_res->dspp_hist[disp_num];
hist_info->hist_cnt_sent++;
} else if (PP_LOCAT(hist->block) == MDSS_PP_SSPP_CFG) {
hist_cnt = MDSS_PP_ARG_MASK & hist->block;
if (!hist_cnt) {
pr_warn("Must pass pipe arguments, %d", hist_cnt);
goto hist_collect_exit;
}
/* Find the first pipe requested */
for (i = 0; i < MDSS_PP_ARG_NUM; i++) {
if (PP_ARG(i, hist_cnt)) {
pipe_num = i;
break;
}
}
pipe = mdss_mdp_pipe_get(mdata, BIT(pipe_num));
if (IS_ERR_OR_NULL(pipe)) {
pr_warn("Invalid starting hist pipe, %d", pipe_num);
ret = -ENODEV;
goto hist_collect_exit;
}
hist_info = &pipe->pp_res.hist;
mdss_mdp_pipe_unmap(pipe);
for (i = pipe_num; i < MDSS_PP_ARG_NUM; i++) {
if (!PP_ARG(i, hist->block))
continue;
pipe_cnt++;
pipe = mdss_mdp_pipe_get(mdata, BIT(i));
if (IS_ERR_OR_NULL(pipe) ||
pipe->num > MDSS_MDP_SSPP_VIG2) {
pr_warn("Invalid Hist pipe (%d)", i);
continue;
}
hist_info = &pipe->pp_res.hist;
ctl_base = pipe->base +
MDSS_MDP_REG_VIG_HIST_CTL_BASE;
ret = pp_hist_collect(ctl, hist, hist_info, ctl_base);
mdss_mdp_pipe_unmap(pipe);
if (ret)
goto hist_collect_exit;
}
if (pipe_cnt > 1) {
if (hist->bin_cnt != (HIST_V_SIZE * pipe_cnt)) {
pr_err("User not expecting size %d output",
pipe_cnt * HIST_V_SIZE);
ret = -EINVAL;
goto hist_collect_exit;
}
hist_concat = kmalloc(HIST_V_SIZE * pipe_cnt *
sizeof(u32), GFP_KERNEL);
if (!hist_concat) {
ret = -ENOMEM;
goto hist_collect_exit;
}
memset(hist_concat, 0, pipe_cnt * HIST_V_SIZE *
sizeof(u32));
for (i = pipe_num; i < MDSS_PP_ARG_NUM; i++) {
if (!PP_ARG(i, hist->block))
continue;
pipe = mdss_mdp_pipe_get(mdata, BIT(i));
hist_info = &pipe->pp_res.hist;
off = HIST_V_SIZE * i;
mutex_lock(&hist_info->hist_mutex);
for (j = off; j < off + HIST_V_SIZE; j++)
hist_concat[j] =
hist_info->data[j - off];
hist_info->hist_cnt_sent++;
mutex_unlock(&hist_info->hist_mutex);
mdss_mdp_pipe_unmap(pipe);
}
hist_data_addr = hist_concat;
} else {
hist_data_addr = hist_info->data;
}
} else {
pr_info("No Histogram at location %d", PP_LOCAT(hist->block));
goto hist_collect_exit;
}
ret = copy_to_user(hist->c0, hist_data_addr, sizeof(u32) *
hist->bin_cnt);
hist_collect_exit:
kfree(hist_concat);
return ret;
}
void mdss_mdp_hist_intr_done(u32 isr)
{
u32 isr_blk, blk_idx;
struct pp_hist_col_info *hist_info = NULL;
struct mdss_mdp_pipe *pipe;
struct mdss_data_type *mdata = mdss_mdp_get_mdata();
isr &= 0x333333;
while (isr != 0) {
if (isr & 0xFFF000) {
if (isr & 0x3000) {
blk_idx = 0;
isr_blk = (isr >> 12) & 0x3;
isr &= ~0x3000;
} else if (isr & 0x30000) {
blk_idx = 1;
isr_blk = (isr >> 16) & 0x3;
isr &= ~0x30000;
} else {
blk_idx = 2;
isr_blk = (isr >> 20) & 0x3;
isr &= ~0x300000;
}
hist_info = &mdss_pp_res->dspp_hist[blk_idx];
} else {
if (isr & 0x3) {
blk_idx = MDSS_MDP_SSPP_VIG0;
isr_blk = isr & 0x3;
isr &= ~0x3;
} else if (isr & 0x30) {
blk_idx = MDSS_MDP_SSPP_VIG1;
isr_blk = (isr >> 4) & 0x3;
isr &= ~0x30;
} else {
blk_idx = MDSS_MDP_SSPP_VIG2;
isr_blk = (isr >> 8) & 0x3;
isr &= ~0x300;
}
pipe = mdss_mdp_pipe_search(mdata, BIT(blk_idx));
if (IS_ERR_OR_NULL(pipe)) {
pr_debug("pipe DNE, %d", blk_idx);
continue;
}
hist_info = &pipe->pp_res.hist;
}
/* Histogram Done Interrupt */
if (hist_info && (isr_blk & 0x1) &&
(hist_info->col_en)) {
spin_lock(&hist_info->hist_lock);
hist_info->col_state = HIST_READY;
spin_unlock(&hist_info->hist_lock);
if (hist_info->read_request)
complete(&hist_info->comp);
}
/* Histogram Reset Done Interrupt */
if ((isr_blk & 0x2) &&
(hist_info->col_en)) {
spin_lock(&hist_info->hist_lock);
hist_info->col_state = HIST_IDLE;
spin_unlock(&hist_info->hist_lock);
}
};
}
#define MDSS_AD_MAX_MIXERS 1
static int mdss_ad_init_checks(struct msm_fb_data_type *mfd)
{
u32 mixer_id[MDSS_MDP_INTF_MAX_LAYERMIXER];
u32 mixer_num;
u32 ret = -EINVAL;
int i = 0;
struct mdss_data_type *mdata = mfd_to_mdata(mfd);
if (!mfd || !mdata)
return ret;
if (mdata->nad_cfgs == 0) {
pr_debug("Assertive Display not supported by device");
return -ENODEV;
}
if (mfd->panel_info->type == MIPI_CMD_PANEL) {
pr_debug("Command panel not supported");
return -EINVAL;
}
mixer_num = mdss_mdp_get_ctl_mixers(mfd->index, mixer_id);
if (!mixer_num || mixer_num > MDSS_AD_MAX_MIXERS) {
pr_err("invalid mixer_num, %d", mixer_num);
return ret;
}
do {
if (mixer_id[i] >= mdata->nad_cfgs) {
pr_err("invalid mixer input, %d", mixer_id[i]);
return ret;
}
i++;
} while (i < mixer_num);
return mixer_id[0];
}
static struct mdss_ad_info *mdss_mdp_get_ad(struct msm_fb_data_type *mfd)
{
int ad_num;
struct mdss_data_type *mdata;
struct mdss_ad_info *ad = NULL;
mdata = mfd_to_mdata(mfd);
ad_num = mdss_ad_init_checks(mfd);
if (ad_num >= 0)
ad = &mdata->ad_cfgs[ad_num];
return ad;
}
static int pp_update_ad_input(struct msm_fb_data_type *mfd)
{
struct mdss_ad_info *ad;
struct mdss_ad_input input;
struct mdss_mdp_ctl *ctl;
if (!mfd)
return -EINVAL;
ctl = mfd_to_ctl(mfd);
if (!ctl)
return -EINVAL;
ad = mdss_mdp_get_ad(mfd);
if (!ad || ad->cfg.mode == MDSS_AD_MODE_AUTO_BL)
return -EINVAL;
pr_debug("backlight level changed (%d), trigger update to AD",
mfd->bl_level);
input.mode = ad->cfg.mode;
if (MDSS_AD_MODE_DATA_MATCH(ad->cfg.mode, MDSS_AD_INPUT_AMBIENT))
input.in.amb_light = ad->ad_data;
else
input.in.strength = ad->ad_data;
/* call to ad_input will trigger backlight read */
return mdss_mdp_ad_input(mfd, &input, 0);
}
int mdss_mdp_ad_config(struct msm_fb_data_type *mfd,
struct mdss_ad_init_cfg *init_cfg)
{
struct mdss_ad_info *ad;
struct mdss_mdp_ctl *ctl;
int lin_ret = -1, inv_ret = -1, ret = 0;
u32 ratio_temp, shift = 0;
ad = mdss_mdp_get_ad(mfd);
if (!ad)
return -EINVAL;
mutex_lock(&ad->lock);
if (init_cfg->ops & MDP_PP_AD_INIT) {
memcpy(&ad->init, &init_cfg->params.init,
sizeof(struct mdss_ad_init));
if (init_cfg->params.init.bl_lin_len == AD_BL_LIN_LEN) {
lin_ret = copy_from_user(&ad->bl_lin,
init_cfg->params.init.bl_lin,
AD_BL_LIN_LEN * sizeof(uint32_t));
inv_ret = copy_from_user(&ad->bl_lin_inv,
init_cfg->params.init.bl_lin_inv,
AD_BL_LIN_LEN * sizeof(uint32_t));
if (lin_ret || inv_ret)
ret = -ENOMEM;
ratio_temp = mfd->panel_info->bl_max / AD_BL_LIN_LEN;
while (ratio_temp > 0) {
ratio_temp = ratio_temp >> 1;
shift++;
}
ad->bl_bright_shift = shift;
} else if (init_cfg->params.init.bl_lin_len) {
ret = -EINVAL;
}
if (!lin_ret && !inv_ret)
ad->state |= PP_AD_STATE_BL_LIN;
else
ad->state &= !PP_AD_STATE_BL_LIN;
ad->sts |= PP_AD_STS_DIRTY_INIT;
} else if (init_cfg->ops & MDP_PP_AD_CFG) {
memcpy(&ad->cfg, &init_cfg->params.cfg,
sizeof(struct mdss_ad_cfg));
/*
* TODO: specify panel independent range of input from cfg,
* scale input backlight_scale to panel bl_max's range
*/
ad->cfg.backlight_scale = mfd->panel_info->bl_max;
ad->sts |= PP_AD_STS_DIRTY_CFG;
}
if (!ret && (init_cfg->ops & MDP_PP_OPS_DISABLE)) {
ad->sts &= ~PP_STS_ENABLE;
mutex_unlock(&ad->lock);
cancel_work_sync(&ad->calc_work);
mutex_lock(&ad->lock);
ad->mfd = NULL;
} else if (!ret && (init_cfg->ops & MDP_PP_OPS_ENABLE)) {
ad->sts |= PP_STS_ENABLE;
ad->mfd = mfd;
}
mutex_unlock(&ad->lock);
ctl = mfd_to_ctl(mfd);
if (!ret)
mdss_mdp_pp_setup(ctl);
return ret;
}
int mdss_mdp_ad_input(struct msm_fb_data_type *mfd,
struct mdss_ad_input *input, int wait) {
int ret = 0;
struct mdss_ad_info *ad;
struct mdss_mdp_ctl *ctl;
u32 bl;
ad = mdss_mdp_get_ad(mfd);
if (!ad)
return -EINVAL;
mutex_lock(&ad->lock);
if ((!PP_AD_STATE_IS_INITCFG(ad->state) &&
!PP_AD_STS_IS_DIRTY(ad->sts)) &&
!input->mode == MDSS_AD_MODE_CALIB) {
pr_warn("AD not initialized or configured.");
ret = -EPERM;
goto error;
}
switch (input->mode) {
case MDSS_AD_MODE_AUTO_BL:
case MDSS_AD_MODE_AUTO_STR:
if (!MDSS_AD_MODE_DATA_MATCH(ad->cfg.mode,
MDSS_AD_INPUT_AMBIENT)) {
ret = -EINVAL;
goto error;
}
ad->ad_data_mode = MDSS_AD_INPUT_AMBIENT;
ad->ad_data = input->in.amb_light;
ad->calc_itr = ad->cfg.stab_itr;
ad->sts |= PP_AD_STS_DIRTY_VSYNC;
ad->sts |= PP_AD_STS_DIRTY_DATA;
break;
case MDSS_AD_MODE_TARG_STR:
case MDSS_AD_MODE_MAN_STR:
if (!MDSS_AD_MODE_DATA_MATCH(ad->cfg.mode,
MDSS_AD_INPUT_STRENGTH)) {
ret = -EINVAL;
goto error;
}
ad->ad_data_mode = MDSS_AD_INPUT_STRENGTH;
ad->ad_data = input->in.strength;
ad->calc_itr = ad->cfg.stab_itr;
ad->sts |= PP_AD_STS_DIRTY_VSYNC;
ad->sts |= PP_AD_STS_DIRTY_DATA;
break;
case MDSS_AD_MODE_CALIB:
wait = 0;
if (mfd->calib_mode) {
bl = input->in.calib_bl;
if (bl >= AD_BL_LIN_LEN) {
pr_warn("calib_bl 255 max!");
break;
}
mutex_unlock(&ad->lock);
mutex_lock(&mfd->bl_lock);
MDSS_BRIGHT_TO_BL(bl, bl, mfd->panel_info->bl_max,
MDSS_MAX_BL_BRIGHTNESS);
mdss_fb_set_backlight(mfd, bl);
mutex_unlock(&mfd->bl_lock);
mutex_lock(&ad->lock);
} else {
pr_warn("should be in calib mode");
}
break;
default:
pr_warn("invalid default %d", input->mode);
ret = -EINVAL;
goto error;
}
error:
mutex_unlock(&ad->lock);
if (!ret) {
if (wait) {
mutex_lock(&ad->lock);
init_completion(&ad->comp);
mutex_unlock(&ad->lock);
}
ctl = mfd_to_ctl(mfd);
mdss_mdp_pp_setup(ctl);
if (wait) {
ret = wait_for_completion_interruptible_timeout(
&ad->comp, HIST_WAIT_TIMEOUT(1));
if (ret == 0)
ret = -ETIMEDOUT;
else if (ret > 0)
input->output = ad->last_str;
}
}
return ret;
}
static void pp_ad_input_write(struct mdss_ad_info *ad, u32 bl_lvl)
{
char __iomem *base = ad->base;
switch (ad->cfg.mode) {
case MDSS_AD_MODE_AUTO_BL:
writel_relaxed(ad->ad_data, base + MDSS_MDP_REG_AD_AL);
break;
case MDSS_AD_MODE_AUTO_STR:
writel_relaxed(bl_lvl, base + MDSS_MDP_REG_AD_BL);
writel_relaxed(ad->ad_data, base + MDSS_MDP_REG_AD_AL);
break;
case MDSS_AD_MODE_TARG_STR:
writel_relaxed(bl_lvl, base + MDSS_MDP_REG_AD_BL);
writel_relaxed(ad->ad_data, base + MDSS_MDP_REG_AD_TARG_STR);
break;
case MDSS_AD_MODE_MAN_STR:
writel_relaxed(bl_lvl, base + MDSS_MDP_REG_AD_BL);
writel_relaxed(ad->ad_data, base + MDSS_MDP_REG_AD_STR_MAN);
break;
default:
pr_warn("Invalid mode! %d", ad->cfg.mode);
break;
}
}
static void pp_ad_init_write(struct mdss_ad_info *ad)
{
u32 temp;
char __iomem *base = ad->base;
writel_relaxed(ad->init.i_control[0] & 0x1F,
base + MDSS_MDP_REG_AD_CON_CTRL_0);
writel_relaxed(ad->init.i_control[1] << 8,
base + MDSS_MDP_REG_AD_CON_CTRL_1);
temp = ad->init.white_lvl << 16;
temp |= ad->init.black_lvl & 0xFFFF;
writel_relaxed(temp, base + MDSS_MDP_REG_AD_BW_LVL);
writel_relaxed(ad->init.var, base + MDSS_MDP_REG_AD_VAR);
writel_relaxed(ad->init.limit_ampl, base + MDSS_MDP_REG_AD_AMP_LIM);
writel_relaxed(ad->init.i_dither, base + MDSS_MDP_REG_AD_DITH);
temp = ad->init.slope_max << 8;
temp |= ad->init.slope_min & 0xFF;
writel_relaxed(temp, base + MDSS_MDP_REG_AD_SLOPE);
writel_relaxed(ad->init.dither_ctl, base + MDSS_MDP_REG_AD_DITH_CTRL);
writel_relaxed(ad->init.format, base + MDSS_MDP_REG_AD_CTRL_0);
writel_relaxed(ad->init.auto_size, base + MDSS_MDP_REG_AD_CTRL_1);
temp = ad->init.frame_w << 16;
temp |= ad->init.frame_h & 0xFFFF;
writel_relaxed(temp, base + MDSS_MDP_REG_AD_FRAME_SIZE);
temp = ad->init.logo_v << 8;
temp |= ad->init.logo_h & 0xFF;
writel_relaxed(temp, base + MDSS_MDP_REG_AD_LOGO_POS);
pp_ad_cfg_lut(base + MDSS_MDP_REG_AD_LUT_FI, ad->init.asym_lut);
pp_ad_cfg_lut(base + MDSS_MDP_REG_AD_LUT_CC, ad->init.color_corr_lut);
}
#define MDSS_PP_AD_DEF_CALIB 0x6E
static void pp_ad_cfg_write(struct mdss_ad_info *ad)
{
char __iomem *base = ad->base;
u32 temp, temp_calib = MDSS_PP_AD_DEF_CALIB;
switch (ad->cfg.mode) {
case MDSS_AD_MODE_AUTO_BL:
temp = ad->cfg.backlight_max << 16;
temp |= ad->cfg.backlight_min & 0xFFFF;
writel_relaxed(temp, base + MDSS_MDP_REG_AD_BL_MINMAX);
writel_relaxed(ad->cfg.amb_light_min,
base + MDSS_MDP_REG_AD_AL_MIN);
temp = ad->cfg.filter[1] << 16;
temp |= ad->cfg.filter[0] & 0xFFFF;
writel_relaxed(temp, base + MDSS_MDP_REG_AD_AL_FILT);
case MDSS_AD_MODE_AUTO_STR:
pp_ad_cfg_lut(base + MDSS_MDP_REG_AD_LUT_AL,
ad->cfg.al_calib_lut);
writel_relaxed(ad->cfg.strength_limit,
base + MDSS_MDP_REG_AD_STR_LIM);
temp = ad->cfg.calib[3] << 16;
temp |= ad->cfg.calib[2] & 0xFFFF;
writel_relaxed(temp, base + MDSS_MDP_REG_AD_CALIB_CD);
writel_relaxed(ad->cfg.t_filter_recursion,
base + MDSS_MDP_REG_AD_TFILT_CTRL);
temp_calib = ad->cfg.calib[0] & 0xFFFF;
case MDSS_AD_MODE_TARG_STR:
temp = ad->cfg.calib[1] << 16;
temp |= temp_calib;
writel_relaxed(temp, base + MDSS_MDP_REG_AD_CALIB_AB);
case MDSS_AD_MODE_MAN_STR:
writel_relaxed(ad->cfg.backlight_scale,
base + MDSS_MDP_REG_AD_BL_MAX);
writel_relaxed(ad->cfg.mode, base + MDSS_MDP_REG_AD_MODE_SEL);
pr_debug("stab_itr = %d", ad->cfg.stab_itr);
break;
default:
break;
}
}
static void pp_ad_vsync_handler(struct mdss_mdp_ctl *ctl, ktime_t t)
{
struct mdss_data_type *mdata = ctl->mdata;
struct mdss_ad_info *ad;
if (ctl->mixer_left && ctl->mixer_left->num < mdata->nad_cfgs) {
ad = &mdata->ad_cfgs[ctl->mixer_left->num];
queue_work(mdata->ad_calc_wq, &ad->calc_work);
}
}
#define MDSS_PP_AD_BYPASS_DEF 0x101
static int mdss_mdp_ad_setup(struct msm_fb_data_type *mfd)
{
int ret = 0;
struct mdss_ad_info *ad;
struct mdss_mdp_ctl *ctl = mfd_to_ctl(mfd);
char __iomem *base;
u32 bypass = MDSS_PP_AD_BYPASS_DEF, bl;
ad = mdss_mdp_get_ad(mfd);
if (!ad)
return -EINVAL;
base = ad->base;
mutex_lock(&ad->lock);
if (ad->sts != last_sts || ad->state != last_state) {
last_sts = ad->sts;
last_state = ad->state;
pr_debug("begining: ad->sts = 0x%08x, state = 0x%08x", ad->sts,
ad->state);
}
if (!PP_AD_STS_IS_DIRTY(ad->sts) &&
(ad->sts & PP_AD_STS_DIRTY_DATA)) {
/*
* Write inputs to regs when the data has been updated or
* Assertive Display is up and running as long as there are
* no updates to AD init or cfg
*/
ad->sts &= ~PP_AD_STS_DIRTY_DATA;
ad->state |= PP_AD_STATE_DATA;
bl = 0;
if (MDSS_AD_RUNNING_AUTO_STR(ad) || ad->last_bl == 0) {
mutex_lock(&mfd->bl_lock);
bl = mfd->bl_level;
if (bl != ad->last_bl) {
ad->last_bl = bl;
ad->calc_itr = ad->cfg.stab_itr;
ad->sts |= PP_AD_STS_DIRTY_VSYNC;
}
if (ad->state & PP_AD_STATE_BL_LIN) {
bl = ad->bl_lin[bl >> ad->bl_bright_shift];
bl = bl << ad->bl_bright_shift;
}
mutex_unlock(&mfd->bl_lock);
}
pp_ad_input_write(ad, bl);
}
if (ad->sts & PP_AD_STS_DIRTY_CFG) {
ad->sts &= ~PP_AD_STS_DIRTY_CFG;
ad->state |= PP_AD_STATE_CFG;
pp_ad_cfg_write(ad);
if (!MDSS_AD_MODE_DATA_MATCH(ad->cfg.mode, ad->ad_data_mode)) {
ad->sts &= ~PP_AD_STS_DIRTY_DATA;
ad->state &= ~PP_AD_STATE_DATA;
pr_debug("Mode switched, data invalidated!");
}
}
if (ad->sts & PP_AD_STS_DIRTY_INIT) {
ad->sts &= ~PP_AD_STS_DIRTY_INIT;
ad->state |= PP_AD_STATE_INIT;
pp_ad_init_write(ad);
}
if ((ad->sts & PP_STS_ENABLE) && PP_AD_STATE_IS_READY(ad->state)) {
bypass = 0;
ret = 1;
ad->state |= PP_AD_STATE_RUN;
mutex_lock(&mfd->bl_lock);
mfd->mdp.update_ad_input = pp_update_ad_input;
mfd->ext_bl_ctrl = ad->cfg.bl_ctrl_mode;
mutex_unlock(&mfd->bl_lock);
} else {
if (ad->state & PP_AD_STATE_RUN) {
ret = 1;
/* Clear state and regs when going to off state*/
ad->sts = 0;
ad->sts |= PP_AD_STS_DIRTY_VSYNC;
ad->state &= !PP_AD_STATE_INIT;
ad->state &= !PP_AD_STATE_CFG;
ad->state &= !PP_AD_STATE_DATA;
ad->state &= !PP_AD_STATE_BL_LIN;
ad->bl_bright_shift = 0;
ad->ad_data = 0;
ad->ad_data_mode = 0;
ad->calc_itr = 0;
memset(&ad->bl_lin, 0, sizeof(uint32_t) *
AD_BL_LIN_LEN);
memset(&ad->bl_lin_inv, 0, sizeof(uint32_t) *
AD_BL_LIN_LEN);
memset(&ad->init, 0, sizeof(struct mdss_ad_init));
memset(&ad->cfg, 0, sizeof(struct mdss_ad_cfg));
mutex_lock(&mfd->bl_lock);
mfd->mdp.update_ad_input = NULL;
mfd->ext_bl_ctrl = 0;
mutex_unlock(&mfd->bl_lock);
}
ad->state &= ~PP_AD_STATE_RUN;
}
writel_relaxed(bypass, base);
if (PP_AD_STS_DIRTY_VSYNC & ad->sts) {
pr_debug("dirty vsync, calc_itr = %d", ad->calc_itr);
ad->sts &= ~PP_AD_STS_DIRTY_VSYNC;
if (!(PP_AD_STATE_VSYNC & ad->state) && ad->calc_itr &&
(ad->state & PP_AD_STATE_RUN)) {
ctl->add_vsync_handler(ctl, &ad->handle);
ad->state |= PP_AD_STATE_VSYNC;
} else if ((PP_AD_STATE_VSYNC & ad->state) &&
(!ad->calc_itr || !(PP_AD_STATE_RUN & ad->state))) {
ctl->remove_vsync_handler(ctl, &ad->handle);
ad->state &= ~PP_AD_STATE_VSYNC;
}
}
if (ad->sts != last_sts || ad->state != last_state) {
last_sts = ad->sts;
last_state = ad->state;
pr_debug("end: ad->sts = 0x%08x, state = 0x%08x", ad->sts,
ad->state);
}
mutex_unlock(&ad->lock);
return ret;
}
#define MDSS_PP_AD_SLEEP 10
static void pp_ad_calc_worker(struct work_struct *work)
{
struct mdss_ad_info *ad;
struct mdss_mdp_ctl *ctl;
struct msm_fb_data_type *mfd;
u32 bl, calc_done = 0;
ad = container_of(work, struct mdss_ad_info, calc_work);
mutex_lock(&ad->lock);
if (!ad->mfd || !(ad->sts & PP_STS_ENABLE)) {
mutex_unlock(&ad->lock);
return;
}
mfd = ad->mfd;
ctl = mfd_to_ctl(ad->mfd);
if (PP_AD_STATE_RUN & ad->state) {
/* Kick off calculation */
ad->calc_itr--;
writel_relaxed(1, ad->base + MDSS_MDP_REG_AD_START_CALC);
}
if (ad->state & PP_AD_STATE_RUN) {
do {
calc_done = readl_relaxed(ad->base +
MDSS_MDP_REG_AD_CALC_DONE);
if (!calc_done)
usleep(MDSS_PP_AD_SLEEP);
} while (!calc_done && (ad->state & PP_AD_STATE_RUN));
if (calc_done) {
ad->last_str = 0xFF & readl_relaxed(ad->base +
MDSS_MDP_REG_AD_STR_OUT);
if (MDSS_AD_RUNNING_AUTO_BL(ad)) {
bl = 0xFFFF & readl_relaxed(ad->base +
MDSS_MDP_REG_AD_BL_OUT);
if (ad->state & PP_AD_STATE_BL_LIN) {
bl = bl >> ad->bl_bright_shift;
bl = min_t(u32, bl,
MDSS_MAX_BL_BRIGHTNESS);
bl = ad->bl_lin_inv[bl];
bl = bl << ad->bl_bright_shift;
}
pr_debug("calc bl = %d", bl);
ad->last_str |= bl << 16;
mutex_lock(&ad->mfd->bl_lock);
mdss_fb_set_backlight(ad->mfd, bl);
mutex_unlock(&ad->mfd->bl_lock);
}
pr_debug("calc_str = %d, calc_itr %d",
ad->last_str & 0xFF,
ad->calc_itr);
} else {
ad->last_str = 0xFFFFFFFF;
}
}
complete(&ad->comp);
if (!ad->calc_itr) {
ad->state &= ~PP_AD_STATE_VSYNC;
ctl->remove_vsync_handler(ctl, &ad->handle);
}
mutex_unlock(&ad->lock);
mutex_lock(&mfd->lock);
mdss_mdp_ctl_write(ctl, MDSS_MDP_REG_CTL_FLUSH, BIT(13 + ad->num));
mutex_unlock(&mfd->lock);
/* Trigger update notify to wake up those waiting for display updates */
mdss_fb_update_notify_update(mfd);
}
#define PP_AD_LUT_LEN 33
static void pp_ad_cfg_lut(char __iomem *offset, u32 *data)
{
int i;
u32 temp;
for (i = 0; i < PP_AD_LUT_LEN - 1; i += 2) {
temp = data[i+1] << 16;
temp |= (data[i] & 0xFFFF);
writel_relaxed(temp, offset + (i*2));
}
writel_relaxed(data[PP_AD_LUT_LEN - 1] << 16,
offset + ((PP_AD_LUT_LEN - 1) * 2));
}
int mdss_mdp_ad_addr_setup(struct mdss_data_type *mdata, u32 *ad_off)
{
u32 i;
int rc = 0;
mdata->ad_cfgs = devm_kzalloc(&mdata->pdev->dev,
sizeof(struct mdss_ad_info) * mdata->nad_cfgs,
GFP_KERNEL);
if (!mdata->ad_cfgs) {
pr_err("unable to setup assertive display:devm_kzalloc fail\n");
return -ENOMEM;
}
mdata->ad_calc_wq = create_singlethread_workqueue("ad_calc_wq");
for (i = 0; i < mdata->nad_cfgs; i++) {
mdata->ad_cfgs[i].base = mdata->mdp_base + ad_off[i];
mdata->ad_cfgs[i].num = i;
mdata->ad_cfgs[i].calc_itr = 0;
mdata->ad_cfgs[i].last_str = 0xFFFFFFFF;
mutex_init(&mdata->ad_cfgs[i].lock);
mdata->ad_cfgs[i].handle.vsync_handler = pp_ad_vsync_handler;
INIT_WORK(&mdata->ad_cfgs[i].calc_work, pp_ad_calc_worker);
}
return rc;
}
static int is_valid_calib_addr(void *addr)
{
int ret = 0;
unsigned int ptr;
ptr = (unsigned int) addr;
/* if request is outside the MDP reg-map or is not aligned 4 */
if (ptr == 0x0 || ptr > 0x5138 || ptr % 0x4)
goto end;
if (ptr >= 0x100 && ptr <= 0x5138) {
/* if ptr is in dspp range */
if (ptr >= 0x4600 && ptr <= 0x5138) {
/* if ptr is in dspp0 range*/
if (ptr >= 0x4600 && ptr <= 0x4938)
ptr -= 0x4600;
/* if ptr is in dspp1 range */
else if (ptr >= 0x4a00 && ptr <= 0x4d38)
ptr -= 0x4a00;
/* if ptr is in dspp2 range */
else if (ptr >= 0x4e00 && ptr <= 0x5138)
ptr -= 0x4e00;
/* if ptr is in pcc plane rgb coeff.range */
if (ptr >= 0x30 && ptr <= 0xe8)
ret = 1;
/* if ptr is in ARLUT red range */
else if (ptr >= 0x2b0 && ptr <= 0x2b8)
ret = 1;
/* if ptr is in PA range */
else if (ptr >= 0x238 && ptr <= 0x244)
ret = 1;
/* if ptr is in ARLUT green range */
else if (ptr >= 0x2c0 && ptr <= 0x2c8)
ret = 1;
/* if ptr is in ARLUT blue range or
gamut map table range */
else if (ptr >= 0x2d0 && ptr <= 0x338)
ret = 1;
/* if ptr is dspp0,dspp1,dspp2 op mode
register */
else if (ptr == 0)
ret = 1;
} else if (ptr >= 0x600 && ptr <= 0x608)
ret = 1;
else if (ptr >= 0x400 && ptr <= 0x408)
ret = 1;
else if ((ptr == 0x1830) || (ptr == 0x1c30) ||
(ptr == 0x1430) || (ptr == 0x1e38))
ret = 1;
else if ((ptr == 0x1e3c) || (ptr == 0x1e30))
ret = 1;
else if (ptr >= 0x3220 && ptr <= 0x3228)
ret = 1;
else if (ptr >= 0x3200 || ptr == 0x100)
ret = 1;
}
end:
return ret;
}
int mdss_mdp_calib_config(struct mdp_calib_config_data *cfg, u32 *copyback)
{
int ret = -1;
void *ptr = (void *) cfg->addr;
if (is_valid_calib_addr(ptr))
ret = 0;
else
return ret;
ptr = (void *)(((unsigned int) ptr) + (mdss_res->mdp_base));
mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
if (cfg->ops & MDP_PP_OPS_READ) {
cfg->data = readl_relaxed(ptr);
*copyback = 1;
ret = 0;
} else if (cfg->ops & MDP_PP_OPS_WRITE) {
writel_relaxed(cfg->data, ptr);
ret = 0;
}
mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
return ret;
}
int mdss_mdp_calib_mode(struct msm_fb_data_type *mfd,
struct mdss_calib_cfg *cfg)
{
if (!mdss_pp_res || !mfd)
return -EINVAL;
mutex_lock(&mdss_pp_mutex);
mfd->calib_mode = cfg->calib_mask;
mutex_unlock(&mdss_pp_mutex);
return 0;
}
| gpl-2.0 |
zlatinski/omap-android-drm-kms | drivers/rpmsg/rpmsg_resmgr_common.c | 16 | 5344 | /*
* Remote processor resource manager common resources
*
* Copyright (C) 2012 Texas Instruments, Inc.
*
* Fernando Guzman Lugo <fernando.lugo@ti.com>
* Miguel Vadillo <vadillo@ti.com>
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*/
#define pr_fmt(fmt) "%s: " fmt, __func__
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/clk.h>
#include <linux/gpio.h>
#include <linux/i2c.h>
#include <linux/pm_runtime.h>
#include <linux/err.h>
#include <linux/rpmsg_resmgr.h>
#include <linux/remoteproc.h>
#include "rpmsg_resmgr_common.h"
struct rprm_i2c_depot {
u32 id;
struct device *dev;
};
struct rprm_rproc_depot {
char name[16];
struct rproc *rp;
};
static int rprm_gpio_request(void **handle, void *args, size_t len)
{
int ret;
struct rprm_gpio *gpio = args;
if (sizeof *gpio != len)
return -EINVAL;
ret = gpio_request(gpio->id , "rpmsg_resmgr");
if (ret) {
pr_err("error providing gpio %d\n", gpio->id);
return ret;
}
*handle = (void *)gpio->id;
return 0;
}
static int rprm_gpio_release(void *handle)
{
u32 id = (unsigned)handle;
gpio_free(id);
return 0;
}
static int rprm_gpio_get_info(void *handle, char *buf, size_t len)
{
u32 id = (unsigned)handle;
return snprintf(buf, len, "Id:%d\n", id);
}
static int rprm_i2c_request(void **handle, void *data, size_t len)
{
struct rprm_i2c *i2c = data;
struct i2c_adapter *adapter;
struct rprm_i2c_depot *i2cd;
int ret;
if (len != sizeof *i2c)
return -EINVAL;
i2cd = kmalloc(sizeof *i2cd, GFP_KERNEL);
if (!i2cd)
return -ENOMEM;
adapter = i2c_get_adapter(i2c->id);
if (!adapter) {
pr_err("could not get i2c%d adapter\n", i2c->id);
ret = -EINVAL;
goto err;
}
i2cd->dev = adapter->dev.parent;
i2c_put_adapter(adapter);
/* FIXME: don't use pm runtime framework */
ret = pm_runtime_get_sync(i2cd->dev);
/*
* pm_runtime_get_sync can return 1 in case it is already active,
* change it to 0 to indicate success.
*/
ret -= ret == 1;
if (ret)
goto err;
i2cd->id = i2c->id;
*handle = i2cd;
return 0;
err:
kfree(i2cd);
return ret;
}
static int rprm_i2c_release(void *handle)
{
struct rprm_i2c_depot *i2cd = handle;
int ret;
ret = pm_runtime_put_sync(i2cd->dev);
if (ret) {
pr_err("failed put sync %d\n", ret);
return ret;
}
kfree(i2cd);
return 0;
}
static int rprm_i2c_get_info(void *handle, char *buf, size_t len)
{
struct rprm_i2c_depot *i2cd = handle;
return snprintf(buf, len, "id:%d\n", i2cd->id);
}
static int rprm_rproc_request(void **handle, void *data, size_t len)
{
struct rprm_rproc *rproc_data = data;
struct rprm_rproc_depot *rprocd;
int ret;
if (len != sizeof *rproc_data)
return -EINVAL;
rprocd = kmalloc(sizeof *rprocd, GFP_KERNEL);
if (!rprocd)
return -ENOMEM;
rprocd->rp = rproc_get_by_name(rproc_data->name);
if (!rprocd->rp) {
ret = -ENODEV;
goto error;
}
strcpy(rprocd->name, rproc_data->name);
*handle = rprocd;
return 0;
error:
kfree(rprocd);
return ret;
}
static int rprm_rproc_release(void *handle)
{
struct rprm_rproc_depot *rprocd = handle;
rproc_shutdown(rprocd->rp);
kfree(rprocd);
return 0;
}
static int rprm_rproc_get_info(void *handle, char *buf, size_t len)
{
struct rprm_rproc_depot *rprocd = handle;
return snprintf(buf, len, "Name:%s\n", rprocd->name);
}
static int _rproc_latency(struct device *rdev, void *handle, unsigned long val)
{
struct rprm_rproc_depot *rprocd = handle;
return rproc_set_constraints(rdev, rprocd->rp,
RPROC_CONSTRAINT_LATENCY, val);
}
static int _rproc_bandwidth(struct device *rdev, void *handle,
unsigned long val)
{
struct rprm_rproc_depot *rprocd = handle;
return rproc_set_constraints(rdev, rprocd->rp,
RPROC_CONSTRAINT_BANDWIDTH, val);
}
static int _rproc_scale(struct device *rdev, void *handle, unsigned long val)
{
struct rprm_rproc_depot *rprocd = handle;
return rproc_set_constraints(rdev, rprocd->rp,
RPROC_CONSTRAINT_FREQUENCY, val);
}
static struct rprm_res_ops gpio_ops = {
.request = rprm_gpio_request,
.release = rprm_gpio_release,
.get_info = rprm_gpio_get_info,
};
static struct rprm_res_ops i2c_ops = {
.request = rprm_i2c_request,
.release = rprm_i2c_release,
.get_info = rprm_i2c_get_info,
};
static struct rprm_res_ops rproc_ops = {
.request = rprm_rproc_request,
.release = rprm_rproc_release,
.get_info = rprm_rproc_get_info,
.latency = _rproc_latency,
.bandwidth = _rproc_bandwidth,
.scale = _rproc_scale,
};
static struct rprm_res generic_res[] = {
{
.name = "gpio",
.ops = &gpio_ops,
},
{
.name = "i2c",
.ops = &i2c_ops,
},
{
.name = "rproc",
.ops = &rproc_ops,
},
};
static int __init rprm_resources_init(void)
{
int i;
for (i = 0; i < ARRAY_SIZE(generic_res); i++) {
generic_res[i].owner = THIS_MODULE;
rprm_resource_register(&generic_res[i]);
}
return 0;
}
static void __exit rprm_resources_exit(void)
{
int i;
for (i = 0; i < ARRAY_SIZE(generic_res); i++)
rprm_resource_unregister(&generic_res[i]);
}
module_init(rprm_resources_init);
module_exit(rprm_resources_exit);
MODULE_DESCRIPTION("Remote Processor Resource Manager common resources");
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
kalibera/rexp | src/library/stats/src/zeroin.c | 16 | 6539 | /*
* R : A Computer Language for Statistical Data Analysis
* Copyright (C) 1999, 2001 the R Core Team
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, a copy is available at
* http://www.r-project.org/Licenses/
*/
/* from NETLIB c/brent.shar with max.iter, add'l info and convergence
details hacked in by Peter Dalgaard */
/*************************************************************************
* C math library
* function ZEROIN - obtain a function zero within the given range
*
* Input
* double zeroin(ax,bx,f,info,Tol,Maxit)
* double ax; Root will be seeked for within
* double bx; a range [ax,bx]
* double (*f)(double x, void *info); Name of the function whose zero
* will be seeked for
* void *info; Add'l info passed to f
* double *Tol; Acceptable tolerance for the root
* value.
* May be specified as 0.0 to cause
* the program to find the root as
* accurate as possible
*
* int *Maxit; Max. iterations
*
*
* Output
* Zeroin returns an estimate for the root with accuracy
* 4*EPSILON*abs(x) + tol
* *Tol returns estimated precision
* *Maxit returns actual # of iterations, or -1 if maxit was
* reached without convergence.
*
* Algorithm
* G.Forsythe, M.Malcolm, C.Moler, Computer methods for mathematical
* computations. M., Mir, 1980, p.180 of the Russian edition
*
* The function makes use of the bisection procedure combined with
* the linear or quadric inverse interpolation.
* At every step program operates on three abscissae - a, b, and c.
* b - the last and the best approximation to the root
* a - the last but one approximation
* c - the last but one or even earlier approximation than a that
* 1) |f(b)| <= |f(c)|
* 2) f(b) and f(c) have opposite signs, i.e. b and c confine
* the root
* At every step Zeroin selects one of the two new approximations, the
* former being obtained by the bisection procedure and the latter
* resulting in the interpolation (if a,b, and c are all different
* the quadric interpolation is utilized, otherwise the linear one).
* If the latter (i.e. obtained by the interpolation) point is
* reasonable (i.e. lies within the current interval [b,c] not being
* too close to the boundaries) it is accepted. The bisection result
* is used in the other case. Therefore, the range of uncertainty is
* ensured to be reduced at least by the factor 1.6
*
************************************************************************
*
* NOTE: uniroot() --> do_zeroin2() --- in ../main/optimize.c
* ~~~~~~~~~~~~~~~~~~
*/
#include <float.h>
#include <math.h>
#include <R_ext/Applic.h>
#define EPSILON DBL_EPSILON
/* R_zeroin2() is faster for "expensive" f(), in those typical cases where
* f(ax) and f(bx) are available anyway : */
double R_zeroin2( /* An estimate of the root */
double ax, /* Left border | of the range */
double bx, /* Right border| the root is seeked*/
double fa, double fb, /* f(a), f(b) */
double (*f)(double x, void *info), /* Function under investigation */
void *info, /* Add'l info passed on to f */
double *Tol, /* Acceptable tolerance */
int *Maxit) /* Max # of iterations */
{
double a,b,c, fc; /* Abscissae, descr. see above, f(c) */
double tol;
int maxit;
a = ax; b = bx;
c = a; fc = fa;
maxit = *Maxit + 1; tol = * Tol;
/* First test if we have found a root at an endpoint */
if(fa == 0.0) {
*Tol = 0.0;
*Maxit = 0;
return a;
}
if(fb == 0.0) {
*Tol = 0.0;
*Maxit = 0;
return b;
}
while(maxit--) /* Main iteration loop */
{
double prev_step = b-a; /* Distance from the last but one
to the last approximation */
double tol_act; /* Actual tolerance */
double p; /* Interpolation step is calcu- */
double q; /* lated in the form p/q; divi-
* sion operations is delayed
* until the last moment */
double new_step; /* Step at this iteration */
if( fabs(fc) < fabs(fb) )
{ /* Swap data for b to be the */
a = b; b = c; c = a; /* best approximation */
fa=fb; fb=fc; fc=fa;
}
tol_act = 2*EPSILON*fabs(b) + tol/2;
new_step = (c-b)/2;
if( fabs(new_step) <= tol_act || fb == (double)0 )
{
*Maxit -= maxit;
*Tol = fabs(c-b);
return b; /* Acceptable approx. is found */
}
/* Decide if the interpolation can be tried */
if( fabs(prev_step) >= tol_act /* If prev_step was large enough*/
&& fabs(fa) > fabs(fb) ) { /* and was in true direction,
* Interpolation may be tried */
register double t1,cb,t2;
cb = c-b;
if( a==c ) { /* If we have only two distinct */
/* points linear interpolation */
t1 = fb/fa; /* can only be applied */
p = cb*t1;
q = 1.0 - t1;
}
else { /* Quadric inverse interpolation*/
q = fa/fc; t1 = fb/fc; t2 = fb/fa;
p = t2 * ( cb*q*(q-t1) - (b-a)*(t1-1.0) );
q = (q-1.0) * (t1-1.0) * (t2-1.0);
}
if( p>(double)0 ) /* p was calculated with the */
q = -q; /* opposite sign; make p positive */
else /* and assign possible minus to */
p = -p; /* q */
if( p < (0.75*cb*q-fabs(tol_act*q)/2) /* If b+p/q falls in [b,c]*/
&& p < fabs(prev_step*q/2) ) /* and isn't too large */
new_step = p/q; /* it is accepted
* If p/q is too large then the
* bisection procedure can
* reduce [b,c] range to more
* extent */
}
if( fabs(new_step) < tol_act) { /* Adjust the step to be not less*/
if( new_step > (double)0 ) /* than tolerance */
new_step = tol_act;
else
new_step = -tol_act;
}
a = b; fa = fb; /* Save the previous approx. */
b += new_step; fb = (*f)(b, info); /* Do step to a new approxim. */
if( (fb > 0 && fc > 0) || (fb < 0 && fc < 0) ) {
/* Adjust c for it to have a sign opposite to that of b */
c = a; fc = fa;
}
}
/* failed! */
*Tol = fabs(c-b);
*Maxit = -1;
return b;
}
| gpl-2.0 |
aopp/android_kernel_nvidia_shieldtablet | mm/mprotect.c | 16 | 10699 | /*
* mm/mprotect.c
*
* (C) Copyright 1994 Linus Torvalds
* (C) Copyright 2002 Christoph Hellwig
*
* Address space accounting code <alan@lxorguk.ukuu.org.uk>
* (C) Copyright 2002 Red Hat Inc, All Rights Reserved
*/
#include <linux/mm.h>
#include <linux/hugetlb.h>
#include <linux/shm.h>
#include <linux/mman.h>
#include <linux/fs.h>
#include <linux/highmem.h>
#include <linux/security.h>
#include <linux/mempolicy.h>
#include <linux/personality.h>
#include <linux/syscalls.h>
#include <linux/swap.h>
#include <linux/swapops.h>
#include <linux/mmu_notifier.h>
#include <linux/migrate.h>
#include <linux/perf_event.h>
#include <linux/tegra_profiler.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
#ifndef pgprot_modify
static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
{
return newprot;
}
#endif
static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
unsigned long addr, unsigned long end, pgprot_t newprot,
int dirty_accountable, int prot_numa, bool *ret_all_same_node)
{
struct mm_struct *mm = vma->vm_mm;
pte_t *pte, oldpte;
spinlock_t *ptl;
unsigned long pages = 0;
bool all_same_node = true;
int last_nid = -1;
pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
arch_enter_lazy_mmu_mode();
do {
oldpte = *pte;
if (pte_present(oldpte)) {
pte_t ptent;
bool updated = false;
ptent = ptep_modify_prot_start(mm, addr, pte);
if (!prot_numa) {
ptent = pte_modify(ptent, newprot);
updated = true;
} else {
struct page *page;
page = vm_normal_page(vma, addr, oldpte);
if (page) {
int this_nid = page_to_nid(page);
if (last_nid == -1)
last_nid = this_nid;
if (last_nid != this_nid)
all_same_node = false;
/* only check non-shared pages */
if (!pte_numa(oldpte) &&
page_mapcount(page) == 1) {
ptent = pte_mknuma(ptent);
updated = true;
}
}
}
/*
* Avoid taking write faults for pages we know to be
* dirty.
*/
if (dirty_accountable && pte_dirty(ptent)) {
ptent = pte_mkwrite(ptent);
updated = true;
}
if (updated)
pages++;
ptep_modify_prot_commit(mm, addr, pte, ptent);
} else if (IS_ENABLED(CONFIG_MIGRATION) && !pte_file(oldpte)) {
swp_entry_t entry = pte_to_swp_entry(oldpte);
if (is_write_migration_entry(entry)) {
/*
* A protection check is difficult so
* just be safe and disable write
*/
make_migration_entry_read(&entry);
set_pte_at(mm, addr, pte,
swp_entry_to_pte(entry));
}
pages++;
}
} while (pte++, addr += PAGE_SIZE, addr != end);
arch_leave_lazy_mmu_mode();
pte_unmap_unlock(pte - 1, ptl);
*ret_all_same_node = all_same_node;
return pages;
}
#ifdef CONFIG_NUMA_BALANCING
static inline void change_pmd_protnuma(struct mm_struct *mm, unsigned long addr,
pmd_t *pmd)
{
spin_lock(&mm->page_table_lock);
set_pmd_at(mm, addr & PMD_MASK, pmd, pmd_mknuma(*pmd));
spin_unlock(&mm->page_table_lock);
}
#else
static inline void change_pmd_protnuma(struct mm_struct *mm, unsigned long addr,
pmd_t *pmd)
{
BUG();
}
#endif /* CONFIG_NUMA_BALANCING */
static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
pud_t *pud, unsigned long addr, unsigned long end,
pgprot_t newprot, int dirty_accountable, int prot_numa)
{
pmd_t *pmd;
unsigned long next;
unsigned long pages = 0;
unsigned long nr_huge_updates = 0;
bool all_same_node;
pmd = pmd_offset(pud, addr);
do {
next = pmd_addr_end(addr, end);
if (pmd_trans_huge(*pmd)) {
if (next - addr != HPAGE_PMD_SIZE)
split_huge_page_pmd(vma, addr, pmd);
else if (change_huge_pmd(vma, pmd, addr, newprot,
prot_numa)) {
pages += HPAGE_PMD_NR;
nr_huge_updates++;
continue;
}
/* fall through */
}
if (pmd_none_or_clear_bad(pmd))
continue;
pages += change_pte_range(vma, pmd, addr, next, newprot,
dirty_accountable, prot_numa, &all_same_node);
/*
* If we are changing protections for NUMA hinting faults then
* set pmd_numa if the examined pages were all on the same
* node. This allows a regular PMD to be handled as one fault
* and effectively batches the taking of the PTL
*/
if (prot_numa && all_same_node)
change_pmd_protnuma(vma->vm_mm, addr, pmd);
} while (pmd++, addr = next, addr != end);
if (nr_huge_updates)
count_vm_numa_events(NUMA_HUGE_PTE_UPDATES, nr_huge_updates);
return pages;
}
static inline unsigned long change_pud_range(struct vm_area_struct *vma,
pgd_t *pgd, unsigned long addr, unsigned long end,
pgprot_t newprot, int dirty_accountable, int prot_numa)
{
pud_t *pud;
unsigned long next;
unsigned long pages = 0;
pud = pud_offset(pgd, addr);
do {
next = pud_addr_end(addr, end);
if (pud_none_or_clear_bad(pud))
continue;
pages += change_pmd_range(vma, pud, addr, next, newprot,
dirty_accountable, prot_numa);
} while (pud++, addr = next, addr != end);
return pages;
}
static unsigned long change_protection_range(struct vm_area_struct *vma,
unsigned long addr, unsigned long end, pgprot_t newprot,
int dirty_accountable, int prot_numa)
{
struct mm_struct *mm = vma->vm_mm;
pgd_t *pgd;
unsigned long next;
unsigned long start = addr;
unsigned long pages = 0;
BUG_ON(addr >= end);
pgd = pgd_offset(mm, addr);
flush_cache_range(vma, addr, end);
set_tlb_flush_pending(mm);
do {
next = pgd_addr_end(addr, end);
if (pgd_none_or_clear_bad(pgd))
continue;
pages += change_pud_range(vma, pgd, addr, next, newprot,
dirty_accountable, prot_numa);
} while (pgd++, addr = next, addr != end);
/* Only flush the TLB if we actually modified any entries: */
if (pages)
flush_tlb_range(vma, start, end);
clear_tlb_flush_pending(mm);
return pages;
}
unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
unsigned long end, pgprot_t newprot,
int dirty_accountable, int prot_numa)
{
struct mm_struct *mm = vma->vm_mm;
unsigned long pages;
mmu_notifier_invalidate_range_start(mm, start, end);
if (is_vm_hugetlb_page(vma))
pages = hugetlb_change_protection(vma, start, end, newprot);
else
pages = change_protection_range(vma, start, end, newprot, dirty_accountable, prot_numa);
mmu_notifier_invalidate_range_end(mm, start, end);
return pages;
}
int
mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
unsigned long start, unsigned long end, unsigned long newflags)
{
struct mm_struct *mm = vma->vm_mm;
unsigned long oldflags = vma->vm_flags;
long nrpages = (end - start) >> PAGE_SHIFT;
unsigned long charged = 0;
pgoff_t pgoff;
int error;
int dirty_accountable = 0;
if (newflags == oldflags) {
*pprev = vma;
return 0;
}
/*
* If we make a private mapping writable we increase our commit;
* but (without finer accounting) cannot reduce our commit if we
* make it unwritable again. hugetlb mapping were accounted for
* even if read-only so there is no need to account for them here
*/
if (newflags & VM_WRITE) {
if (!(oldflags & (VM_ACCOUNT|VM_WRITE|VM_HUGETLB|
VM_SHARED|VM_NORESERVE))) {
charged = nrpages;
if (security_vm_enough_memory_mm(mm, charged))
return -ENOMEM;
newflags |= VM_ACCOUNT;
}
}
/*
* First try to merge with previous and/or next vma.
*/
pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
*pprev = vma_merge(mm, *pprev, start, end, newflags,
vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma),
vma_get_anon_name(vma));
if (*pprev) {
vma = *pprev;
goto success;
}
*pprev = vma;
if (start != vma->vm_start) {
error = split_vma(mm, vma, start, 1);
if (error)
goto fail;
}
if (end != vma->vm_end) {
error = split_vma(mm, vma, end, 0);
if (error)
goto fail;
}
success:
/*
* vm_flags and vm_page_prot are protected by the mmap_sem
* held in write mode.
*/
vma->vm_flags = newflags;
vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
vm_get_page_prot(newflags));
if (vma_wants_writenotify(vma)) {
vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
dirty_accountable = 1;
}
change_protection(vma, start, end, vma->vm_page_prot,
dirty_accountable, 0);
vm_stat_account(mm, oldflags, vma->vm_file, -nrpages);
vm_stat_account(mm, newflags, vma->vm_file, nrpages);
perf_event_mmap(vma);
quadd_event_mmap(vma);
return 0;
fail:
vm_unacct_memory(charged);
return error;
}
SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
unsigned long, prot)
{
unsigned long vm_flags, nstart, end, tmp, reqprot;
struct vm_area_struct *vma, *prev;
int error = -EINVAL;
const int grows = prot & (PROT_GROWSDOWN|PROT_GROWSUP);
prot &= ~(PROT_GROWSDOWN|PROT_GROWSUP);
if (grows == (PROT_GROWSDOWN|PROT_GROWSUP)) /* can't be both */
return -EINVAL;
if (start & ~PAGE_MASK)
return -EINVAL;
if (!len)
return 0;
len = PAGE_ALIGN(len);
end = start + len;
if (end <= start)
return -ENOMEM;
if (!arch_validate_prot(prot))
return -EINVAL;
reqprot = prot;
/*
* Does the application expect PROT_READ to imply PROT_EXEC:
*/
if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
prot |= PROT_EXEC;
vm_flags = calc_vm_prot_bits(prot);
down_write(¤t->mm->mmap_sem);
vma = find_vma(current->mm, start);
error = -ENOMEM;
if (!vma)
goto out;
prev = vma->vm_prev;
if (unlikely(grows & PROT_GROWSDOWN)) {
if (vma->vm_start >= end)
goto out;
start = vma->vm_start;
error = -EINVAL;
if (!(vma->vm_flags & VM_GROWSDOWN))
goto out;
} else {
if (vma->vm_start > start)
goto out;
if (unlikely(grows & PROT_GROWSUP)) {
end = vma->vm_end;
error = -EINVAL;
if (!(vma->vm_flags & VM_GROWSUP))
goto out;
}
}
if (start > vma->vm_start)
prev = vma;
for (nstart = start ; ; ) {
unsigned long newflags;
/* Here we know that vma->vm_start <= nstart < vma->vm_end. */
newflags = vm_flags;
newflags |= (vma->vm_flags & ~(VM_READ | VM_WRITE | VM_EXEC));
/* newflags >> 4 shift VM_MAY% in place of VM_% */
if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
error = -EACCES;
goto out;
}
error = security_file_mprotect(vma, reqprot, prot);
if (error)
goto out;
tmp = vma->vm_end;
if (tmp > end)
tmp = end;
error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
if (error)
goto out;
nstart = tmp;
if (nstart < prev->vm_end)
nstart = prev->vm_end;
if (nstart >= end)
goto out;
vma = prev->vm_next;
if (!vma || vma->vm_start != nstart) {
error = -ENOMEM;
goto out;
}
}
out:
up_write(¤t->mm->mmap_sem);
return error;
}
| gpl-2.0 |
formorer/pkg-libnetfilter-conntrack | qa/ct_stress.c | 16 | 1793 | /* simple tool to generate random of flow entries to fill hard the
conntrack table. Early drop will not save our day then, because
the table will be plenty of assured flows. If things go well,
we hit ENOMEM at some point.
You have to use conntrack_events_reliable together with this tool.
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <errno.h>
#include <arpa/inet.h>
#include <time.h>
#include <libnetfilter_conntrack/libnetfilter_conntrack.h>
#include <libnetfilter_conntrack/libnetfilter_conntrack_tcp.h>
int main(int argc, char *argv[])
{
time_t t;
int ret, i, j, r;
struct nfct_handle *h;
struct nf_conntrack *ct;
if (argc < 2) {
fprintf(stderr, "Usage: %s [ct_table_size]\n", argv[0]);
exit(EXIT_FAILURE);
}
time(&t);
srandom(t);
r = random();
ct = nfct_new();
if (!ct) {
perror("nfct_new");
return 0;
}
h = nfct_open(CONNTRACK, 0);
if (!h) {
perror("nfct_open");
nfct_destroy(ct);
return -1;
}
for (i = r, j = 0;i < (r + atoi(argv[1]) * 2); i++, j++) {
nfct_set_attr_u8(ct, ATTR_L3PROTO, AF_INET);
nfct_set_attr_u32(ct, ATTR_IPV4_SRC, inet_addr("1.1.1.1") + i);
nfct_set_attr_u32(ct, ATTR_IPV4_DST, inet_addr("2.2.2.2") + i);
nfct_set_attr_u8(ct, ATTR_L4PROTO, IPPROTO_TCP);
nfct_set_attr_u16(ct, ATTR_PORT_SRC, htons(10));
nfct_set_attr_u16(ct, ATTR_PORT_DST, htons(20));
nfct_setobjopt(ct, NFCT_SOPT_SETUP_REPLY);
nfct_set_attr_u8(ct, ATTR_TCP_STATE, TCP_CONNTRACK_ESTABLISHED);
nfct_set_attr_u32(ct, ATTR_TIMEOUT, 1000);
nfct_set_attr_u32(ct, ATTR_STATUS, IPS_ASSURED);
if (i % 10000 == 0)
printf("added %d flow entries\n", j);
ret = nfct_query(h, NFCT_Q_CREATE, ct);
if (ret == -1)
perror("nfct_query: ");
}
nfct_close(h);
nfct_destroy(ct);
exit(EXIT_SUCCESS);
}
| gpl-2.0 |
mynew5/tc | src/server/scripts/EasternKingdoms/Stratholme/boss_ramstein_the_gorger.cpp | 16 | 3085 | /*
* Copyright (C) 2008-2014 TrinityCore <http://www.trinitycore.org/>
* Copyright (C) 2006-2009 ScriptDev2 <https://scriptdev2.svn.sourceforge.net/>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along
* with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/* ScriptData
SDName: Boss_Ramstein_The_Gorger
SD%Complete: 70
SDComment:
SDCategory: Stratholme
EndScriptData */
#include "ScriptMgr.h"
#include "ScriptedCreature.h"
#include "stratholme.h"
enum Spells
{
SPELL_TRAMPLE = 5568,
SPELL_KNOCKOUT = 17307
};
enum CreatureId
{
NPC_MINDLESS_UNDEAD = 11030
};
class boss_ramstein_the_gorger : public CreatureScript
{
public:
boss_ramstein_the_gorger() : CreatureScript("boss_ramstein_the_gorger") { }
CreatureAI* GetAI(Creature* creature) const override
{
return GetInstanceAI<boss_ramstein_the_gorgerAI>(creature);
}
struct boss_ramstein_the_gorgerAI : public ScriptedAI
{
boss_ramstein_the_gorgerAI(Creature* creature) : ScriptedAI(creature)
{
instance = me->GetInstanceScript();
}
InstanceScript* instance;
uint32 Trample_Timer;
uint32 Knockout_Timer;
void Reset() override
{
Trample_Timer = 3000;
Knockout_Timer = 12000;
}
void EnterCombat(Unit* /*who*/) override
{
}
void JustDied(Unit* /*killer*/) override
{
for (uint8 i = 0; i < 30; ++i)
{
if (Creature* mob = me->SummonCreature(NPC_MINDLESS_UNDEAD, 3969.35f+irand(-10, 10), -3391.87f+irand(-10, 10), 119.11f, 5.91f, TEMPSUMMON_TIMED_OR_DEAD_DESPAWN, 1800000))
mob->AI()->AttackStart(me->SelectNearestTarget(100.0f));
}
instance->SetData(TYPE_RAMSTEIN, DONE);
}
void UpdateAI(uint32 diff) override
{
//Return since we have no target
if (!UpdateVictim())
return;
//Trample
if (Trample_Timer <= diff)
{
DoCast(me, SPELL_TRAMPLE);
Trample_Timer = 7000;
} else Trample_Timer -= diff;
//Knockout
if (Knockout_Timer <= diff)
{
DoCastVictim(SPELL_KNOCKOUT);
Knockout_Timer = 10000;
} else Knockout_Timer -= diff;
DoMeleeAttackIfReady();
}
};
};
void AddSC_boss_ramstein_the_gorger()
{
new boss_ramstein_the_gorger();
}
| gpl-2.0 |
h4ck3rm1k3/gcc-1 | libgcc/config/libbid/_addsub_sd.c | 16 | 1625 | /* Copyright (C) 2007-2016 Free Software Foundation, Inc.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
#include "bid_conf.h"
#include "bid_functions.h"
#include "bid_gcc_intrinsics.h"
_Decimal32
__bid_addsd3 (_Decimal32 x, _Decimal32 y) {
UINT64 x64, y64, res64;
union decimal32 ux, uy, res;
ux.d = x;
uy.d = y;
x64 = __bid32_to_bid64 (ux.i);
y64 = __bid32_to_bid64 (uy.i);
res64 = __bid64_add (x64, y64);
res.i = __bid64_to_bid32 (res64);
return (res.d);
}
_Decimal32
__bid_subsd3 (_Decimal32 x, _Decimal32 y) {
UINT64 x64, y64, res64;
union decimal32 ux, uy, res;
ux.d = x;
uy.d = y;
x64 = __bid32_to_bid64 (ux.i);
y64 = __bid32_to_bid64 (uy.i);
res64 = __bid64_sub (x64, y64);
res.i = __bid64_to_bid32 (res64);
return (res.d);
}
| gpl-2.0 |
vtsingaras/hurd-glibc | resource/getrusage.c | 16 | 1156 | /* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
#include <sys/resource.h>
#include <errno.h>
/* Return resource usage information on process indicated by WHO
and put it in *USAGE. Returns 0 for success, -1 for failure. */
int
__getrusage (who, usage)
enum __rusage_who who;
struct rusage *usage;
{
__set_errno (ENOSYS);
return -1;
}
stub_warning (getrusage)
weak_alias (__getrusage, getrusage)
| gpl-2.0 |
kushan02/kushan_kernel | kernel/exit.c | 16 | 48453 | /*
* linux/kernel/exit.c
*
* Copyright (C) 1991, 1992 Linus Torvalds
*/
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/capability.h>
#include <linux/completion.h>
#include <linux/personality.h>
#include <linux/tty.h>
#include <linux/iocontext.h>
#include <linux/key.h>
#include <linux/security.h>
#include <linux/cpu.h>
#include <linux/acct.h>
#include <linux/tsacct_kern.h>
#include <linux/file.h>
#include <linux/fdtable.h>
#include <linux/binfmts.h>
#include <linux/nsproxy.h>
#include <linux/pid_namespace.h>
#include <linux/ptrace.h>
#include <linux/profile.h>
#include <linux/mount.h>
#include <linux/proc_fs.h>
#include <linux/kthread.h>
#include <linux/mempolicy.h>
#include <linux/taskstats_kern.h>
#include <linux/delayacct.h>
#include <linux/freezer.h>
#include <linux/cgroup.h>
#include <linux/syscalls.h>
#include <linux/signal.h>
#include <linux/posix-timers.h>
#include <linux/cn_proc.h>
#include <linux/mutex.h>
#include <linux/futex.h>
#include <linux/pipe_fs_i.h>
#include <linux/audit.h> /* for audit_free() */
#include <linux/resource.h>
#include <linux/blkdev.h>
#include <linux/task_io_accounting_ops.h>
#include <linux/tracehook.h>
#include <linux/fs_struct.h>
#include <linux/init_task.h>
#include <linux/perf_event.h>
#include <trace/events/sched.h>
#include <linux/hw_breakpoint.h>
#include <linux/oom.h>
#include <linux/writeback.h>
#include <linux/shm.h>
#include <asm/uaccess.h>
#include <asm/unistd.h>
#include <asm/pgtable.h>
#include <asm/mmu_context.h>
static void exit_mm(struct task_struct * tsk);
static void __unhash_process(struct task_struct *p, bool group_dead)
{
nr_threads--;
detach_pid(p, PIDTYPE_PID);
if (group_dead) {
detach_pid(p, PIDTYPE_PGID);
detach_pid(p, PIDTYPE_SID);
list_del_rcu(&p->tasks);
list_del_init(&p->sibling);
__this_cpu_dec(process_counts);
}
list_del_rcu(&p->thread_group);
}
/*
* This function expects the tasklist_lock write-locked.
*/
static void __exit_signal(struct task_struct *tsk)
{
struct signal_struct *sig = tsk->signal;
bool group_dead = thread_group_leader(tsk);
struct sighand_struct *sighand;
struct tty_struct *uninitialized_var(tty);
sighand = rcu_dereference_check(tsk->sighand,
lockdep_tasklist_lock_is_held());
spin_lock(&sighand->siglock);
posix_cpu_timers_exit(tsk);
if (group_dead) {
posix_cpu_timers_exit_group(tsk);
tty = sig->tty;
sig->tty = NULL;
} else {
/*
* This can only happen if the caller is de_thread().
* FIXME: this is the temporary hack, we should teach
* posix-cpu-timers to handle this case correctly.
*/
if (unlikely(has_group_leader_pid(tsk)))
posix_cpu_timers_exit_group(tsk);
/*
* If there is any task waiting for the group exit
* then notify it:
*/
if (sig->notify_count > 0 && !--sig->notify_count)
wake_up_process(sig->group_exit_task);
if (tsk == sig->curr_target)
sig->curr_target = next_thread(tsk);
/*
* Accumulate here the counters for all threads but the
* group leader as they die, so they can be added into
* the process-wide totals when those are taken.
* The group leader stays around as a zombie as long
* as there are other threads. When it gets reaped,
* the exit.c code will add its counts into these totals.
* We won't ever get here for the group leader, since it
* will have been the last reference on the signal_struct.
*/
sig->utime += tsk->utime;
sig->stime += tsk->stime;
sig->gtime += tsk->gtime;
sig->min_flt += tsk->min_flt;
sig->maj_flt += tsk->maj_flt;
sig->nvcsw += tsk->nvcsw;
sig->nivcsw += tsk->nivcsw;
sig->inblock += task_io_get_inblock(tsk);
sig->oublock += task_io_get_oublock(tsk);
task_io_accounting_add(&sig->ioac, &tsk->ioac);
sig->sum_sched_runtime += tsk->se.sum_exec_runtime;
}
sig->nr_threads--;
__unhash_process(tsk, group_dead);
/*
* Do this under ->siglock, we can race with another thread
* doing sigqueue_free() if we have SIGQUEUE_PREALLOC signals.
*/
flush_sigqueue(&tsk->pending);
tsk->sighand = NULL;
spin_unlock(&sighand->siglock);
__cleanup_sighand(sighand);
clear_tsk_thread_flag(tsk,TIF_SIGPENDING);
if (group_dead) {
flush_sigqueue(&sig->shared_pending);
tty_kref_put(tty);
}
}
static void delayed_put_task_struct(struct rcu_head *rhp)
{
struct task_struct *tsk = container_of(rhp, struct task_struct, rcu);
perf_event_delayed_put(tsk);
trace_sched_process_free(tsk);
put_task_struct(tsk);
}
void release_task(struct task_struct * p)
{
struct task_struct *leader;
int zap_leader;
repeat:
/* don't need to get the RCU readlock here - the process is dead and
* can't be modifying its own credentials. But shut RCU-lockdep up */
rcu_read_lock();
atomic_dec(&__task_cred(p)->user->processes);
rcu_read_unlock();
proc_flush_task(p);
write_lock_irq(&tasklist_lock);
ptrace_release_task(p);
__exit_signal(p);
/*
* If we are the last non-leader member of the thread
* group, and the leader is zombie, then notify the
* group leader's parent process. (if it wants notification.)
*/
zap_leader = 0;
leader = p->group_leader;
if (leader != p && thread_group_empty(leader) && leader->exit_state == EXIT_ZOMBIE) {
/*
* If we were the last child thread and the leader has
* exited already, and the leader's parent ignores SIGCHLD,
* then we are the one who should release the leader.
*/
zap_leader = do_notify_parent(leader, leader->exit_signal);
if (zap_leader)
leader->exit_state = EXIT_DEAD;
}
write_unlock_irq(&tasklist_lock);
release_thread(p);
call_rcu(&p->rcu, delayed_put_task_struct);
p = leader;
if (unlikely(zap_leader))
goto repeat;
}
/*
* This checks not only the pgrp, but falls back on the pid if no
* satisfactory pgrp is found. I dunno - gdb doesn't work correctly
* without this...
*
* The caller must hold rcu lock or the tasklist lock.
*/
struct pid *session_of_pgrp(struct pid *pgrp)
{
struct task_struct *p;
struct pid *sid = NULL;
p = pid_task(pgrp, PIDTYPE_PGID);
if (p == NULL)
p = pid_task(pgrp, PIDTYPE_PID);
if (p != NULL)
sid = task_session(p);
return sid;
}
/*
* Determine if a process group is "orphaned", according to the POSIX
* definition in 2.2.2.52. Orphaned process groups are not to be affected
* by terminal-generated stop signals. Newly orphaned process groups are
* to receive a SIGHUP and a SIGCONT.
*
* "I ask you, have you ever known what it is to be an orphan?"
*/
static int will_become_orphaned_pgrp(struct pid *pgrp, struct task_struct *ignored_task)
{
struct task_struct *p;
do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
if ((p == ignored_task) ||
(p->exit_state && thread_group_empty(p)) ||
is_global_init(p->real_parent))
continue;
if (task_pgrp(p->real_parent) != pgrp &&
task_session(p->real_parent) == task_session(p))
return 0;
} while_each_pid_task(pgrp, PIDTYPE_PGID, p);
return 1;
}
int is_current_pgrp_orphaned(void)
{
int retval;
read_lock(&tasklist_lock);
retval = will_become_orphaned_pgrp(task_pgrp(current), NULL);
read_unlock(&tasklist_lock);
return retval;
}
static bool has_stopped_jobs(struct pid *pgrp)
{
struct task_struct *p;
do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
if (p->signal->flags & SIGNAL_STOP_STOPPED)
return true;
} while_each_pid_task(pgrp, PIDTYPE_PGID, p);
return false;
}
/*
* Check to see if any process groups have become orphaned as
* a result of our exiting, and if they have any stopped jobs,
* send them a SIGHUP and then a SIGCONT. (POSIX 3.2.2.2)
*/
static void
kill_orphaned_pgrp(struct task_struct *tsk, struct task_struct *parent)
{
struct pid *pgrp = task_pgrp(tsk);
struct task_struct *ignored_task = tsk;
if (!parent)
/* exit: our father is in a different pgrp than
* we are and we were the only connection outside.
*/
parent = tsk->real_parent;
else
/* reparent: our child is in a different pgrp than
* we are, and it was the only connection outside.
*/
ignored_task = NULL;
if (task_pgrp(parent) != pgrp &&
task_session(parent) == task_session(tsk) &&
will_become_orphaned_pgrp(pgrp, ignored_task) &&
has_stopped_jobs(pgrp)) {
__kill_pgrp_info(SIGHUP, SEND_SIG_PRIV, pgrp);
__kill_pgrp_info(SIGCONT, SEND_SIG_PRIV, pgrp);
}
}
/**
* reparent_to_kthreadd - Reparent the calling kernel thread to kthreadd
*
* If a kernel thread is launched as a result of a system call, or if
* it ever exits, it should generally reparent itself to kthreadd so it
* isn't in the way of other processes and is correctly cleaned up on exit.
*
* The various task state such as scheduling policy and priority may have
* been inherited from a user process, so we reset them to sane values here.
*
* NOTE that reparent_to_kthreadd() gives the caller full capabilities.
*/
static void reparent_to_kthreadd(void)
{
write_lock_irq(&tasklist_lock);
ptrace_unlink(current);
/* Reparent to init */
current->real_parent = current->parent = kthreadd_task;
list_move_tail(¤t->sibling, ¤t->real_parent->children);
/* Set the exit signal to SIGCHLD so we signal init on exit */
current->exit_signal = SIGCHLD;
if (task_nice(current) < 0)
set_user_nice(current, 0);
/* cpus_allowed? */
/* rt_priority? */
/* signals? */
memcpy(current->signal->rlim, init_task.signal->rlim,
sizeof(current->signal->rlim));
atomic_inc(&init_cred.usage);
commit_creds(&init_cred);
write_unlock_irq(&tasklist_lock);
}
void __set_special_pids(struct pid *pid)
{
struct task_struct *curr = current->group_leader;
if (task_session(curr) != pid)
change_pid(curr, PIDTYPE_SID, pid);
if (task_pgrp(curr) != pid)
change_pid(curr, PIDTYPE_PGID, pid);
}
static void set_special_pids(struct pid *pid)
{
write_lock_irq(&tasklist_lock);
__set_special_pids(pid);
write_unlock_irq(&tasklist_lock);
}
/*
* Let kernel threads use this to say that they allow a certain signal.
* Must not be used if kthread was cloned with CLONE_SIGHAND.
*/
int allow_signal(int sig)
{
if (!valid_signal(sig) || sig < 1)
return -EINVAL;
spin_lock_irq(¤t->sighand->siglock);
/* This is only needed for daemonize()'ed kthreads */
sigdelset(¤t->blocked, sig);
/*
* Kernel threads handle their own signals. Let the signal code
* know it'll be handled, so that they don't get converted to
* SIGKILL or just silently dropped.
*/
current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
recalc_sigpending();
spin_unlock_irq(¤t->sighand->siglock);
return 0;
}
EXPORT_SYMBOL(allow_signal);
int disallow_signal(int sig)
{
if (!valid_signal(sig) || sig < 1)
return -EINVAL;
spin_lock_irq(¤t->sighand->siglock);
current->sighand->action[(sig)-1].sa.sa_handler = SIG_IGN;
recalc_sigpending();
spin_unlock_irq(¤t->sighand->siglock);
return 0;
}
EXPORT_SYMBOL(disallow_signal);
/*
* Put all the gunge required to become a kernel thread without
* attached user resources in one place where it belongs.
*/
void daemonize(const char *name, ...)
{
va_list args;
sigset_t blocked;
va_start(args, name);
vsnprintf(current->comm, sizeof(current->comm), name, args);
va_end(args);
/*
* If we were started as result of loading a module, close all of the
* user space pages. We don't need them, and if we didn't close them
* they would be locked into memory.
*/
exit_mm(current);
/*
* We don't want to get frozen, in case system-wide hibernation
* or suspend transition begins right now.
*/
current->flags |= (PF_NOFREEZE | PF_KTHREAD);
if (current->nsproxy != &init_nsproxy) {
get_nsproxy(&init_nsproxy);
switch_task_namespaces(current, &init_nsproxy);
}
set_special_pids(&init_struct_pid);
proc_clear_tty(current);
/* Block and flush all signals */
sigfillset(&blocked);
sigprocmask(SIG_BLOCK, &blocked, NULL);
flush_signals(current);
/* Become as one with the init task */
daemonize_fs_struct();
exit_files(current);
current->files = init_task.files;
atomic_inc(¤t->files->count);
reparent_to_kthreadd();
}
EXPORT_SYMBOL(daemonize);
static void close_files(struct files_struct * files)
{
int i, j;
struct fdtable *fdt;
j = 0;
/*
* It is safe to dereference the fd table without RCU or
* ->file_lock because this is the last reference to the
* files structure. But use RCU to shut RCU-lockdep up.
*/
rcu_read_lock();
fdt = files_fdtable(files);
rcu_read_unlock();
for (;;) {
unsigned long set;
i = j * __NFDBITS;
if (i >= fdt->max_fds)
break;
set = fdt->open_fds[j++];
while (set) {
if (set & 1) {
struct file * file = xchg(&fdt->fd[i], NULL);
if (file) {
filp_close(file, files);
cond_resched();
}
}
i++;
set >>= 1;
}
}
}
struct files_struct *get_files_struct(struct task_struct *task)
{
struct files_struct *files;
task_lock(task);
files = task->files;
if (files)
atomic_inc(&files->count);
task_unlock(task);
return files;
}
void put_files_struct(struct files_struct *files)
{
struct fdtable *fdt;
if (atomic_dec_and_test(&files->count)) {
close_files(files);
/*
* Free the fd and fdset arrays if we expanded them.
* If the fdtable was embedded, pass files for freeing
* at the end of the RCU grace period. Otherwise,
* you can free files immediately.
*/
rcu_read_lock();
fdt = files_fdtable(files);
if (fdt != &files->fdtab)
kmem_cache_free(files_cachep, files);
free_fdtable(fdt);
rcu_read_unlock();
}
}
void reset_files_struct(struct files_struct *files)
{
struct task_struct *tsk = current;
struct files_struct *old;
old = tsk->files;
task_lock(tsk);
tsk->files = files;
task_unlock(tsk);
put_files_struct(old);
}
void exit_files(struct task_struct *tsk)
{
struct files_struct * files = tsk->files;
if (files) {
task_lock(tsk);
tsk->files = NULL;
task_unlock(tsk);
put_files_struct(files);
}
}
#ifdef CONFIG_MM_OWNER
/*
* A task is exiting. If it owned this mm, find a new owner for the mm.
*/
void mm_update_next_owner(struct mm_struct *mm)
{
struct task_struct *c, *g, *p = current;
retry:
/*
* If the exiting or execing task is not the owner, it's
* someone else's problem.
*/
if (mm->owner != p)
return;
/*
* The current owner is exiting/execing and there are no other
* candidates. Do not leave the mm pointing to a possibly
* freed task structure.
*/
if (atomic_read(&mm->mm_users) <= 1) {
mm->owner = NULL;
return;
}
read_lock(&tasklist_lock);
/*
* Search in the children
*/
list_for_each_entry(c, &p->children, sibling) {
if (c->mm == mm)
goto assign_new_owner;
}
/*
* Search in the siblings
*/
list_for_each_entry(c, &p->real_parent->children, sibling) {
if (c->mm == mm)
goto assign_new_owner;
}
/*
* Search through everything else. We should not get
* here often
*/
do_each_thread(g, c) {
if (c->mm == mm)
goto assign_new_owner;
} while_each_thread(g, c);
read_unlock(&tasklist_lock);
/*
* We found no owner yet mm_users > 1: this implies that we are
* most likely racing with swapoff (try_to_unuse()) or /proc or
* ptrace or page migration (get_task_mm()). Mark owner as NULL.
*/
mm->owner = NULL;
return;
assign_new_owner:
BUG_ON(c == p);
get_task_struct(c);
/*
* The task_lock protects c->mm from changing.
* We always want mm->owner->mm == mm
*/
task_lock(c);
/*
* Delay read_unlock() till we have the task_lock()
* to ensure that c does not slip away underneath us
*/
read_unlock(&tasklist_lock);
if (c->mm != mm) {
task_unlock(c);
put_task_struct(c);
goto retry;
}
mm->owner = c;
task_unlock(c);
put_task_struct(c);
}
#endif /* CONFIG_MM_OWNER */
/*
* Turn us into a lazy TLB process if we
* aren't already..
*/
static void exit_mm(struct task_struct * tsk)
{
struct mm_struct *mm = tsk->mm;
struct core_state *core_state;
int mm_released;
mm_release(tsk, mm);
if (!mm)
return;
/*
* Serialize with any possible pending coredump.
* We must hold mmap_sem around checking core_state
* and clearing tsk->mm. The core-inducing thread
* will increment ->nr_threads for each thread in the
* group with ->mm != NULL.
*/
down_read(&mm->mmap_sem);
core_state = mm->core_state;
if (core_state) {
struct core_thread self;
up_read(&mm->mmap_sem);
self.task = tsk;
self.next = xchg(&core_state->dumper.next, &self);
/*
* Implies mb(), the result of xchg() must be visible
* to core_state->dumper.
*/
if (atomic_dec_and_test(&core_state->nr_threads))
complete(&core_state->startup);
for (;;) {
set_task_state(tsk, TASK_UNINTERRUPTIBLE);
if (!self.task) /* see coredump_finish() */
break;
schedule();
}
__set_task_state(tsk, TASK_RUNNING);
down_read(&mm->mmap_sem);
}
atomic_inc(&mm->mm_count);
BUG_ON(mm != tsk->active_mm);
/* more a memory barrier than a real lock */
task_lock(tsk);
tsk->mm = NULL;
up_read(&mm->mmap_sem);
enter_lazy_tlb(mm, current);
task_unlock(tsk);
mm_update_next_owner(mm);
mm_released = mmput(mm);
if (mm_released)
set_tsk_thread_flag(tsk, TIF_MM_RELEASED);
}
/*
* When we die, we re-parent all our children, and try to:
* 1. give them to another thread in our thread group, if such a member exists
* 2. give it to the first ancestor process which prctl'd itself as a
* child_subreaper for its children (like a service manager)
* 3. give it to the init process (PID 1) in our pid namespace
*/
static struct task_struct *find_new_reaper(struct task_struct *father)
__releases(&tasklist_lock)
__acquires(&tasklist_lock)
{
struct pid_namespace *pid_ns = task_active_pid_ns(father);
struct task_struct *thread;
thread = father;
while_each_thread(father, thread) {
if (thread->flags & PF_EXITING)
continue;
if (unlikely(pid_ns->child_reaper == father))
pid_ns->child_reaper = thread;
return thread;
}
if (unlikely(pid_ns->child_reaper == father)) {
write_unlock_irq(&tasklist_lock);
if (unlikely(pid_ns == &init_pid_ns)) {
panic("Attempted to kill init! exitcode=0x%08x\n",
father->signal->group_exit_code ?:
father->exit_code);
}
zap_pid_ns_processes(pid_ns);
write_lock_irq(&tasklist_lock);
/*
* We can not clear ->child_reaper or leave it alone.
* There may by stealth EXIT_DEAD tasks on ->children,
* forget_original_parent() must move them somewhere.
*/
pid_ns->child_reaper = init_pid_ns.child_reaper;
} else if (father->signal->has_child_subreaper) {
struct task_struct *reaper;
/*
* Find the first ancestor marked as child_subreaper.
* Note that the code below checks same_thread_group(reaper,
* pid_ns->child_reaper). This is what we need to DTRT in a
* PID namespace. However we still need the check above, see
* http://marc.info/?l=linux-kernel&m=131385460420380
*/
for (reaper = father->real_parent;
reaper != &init_task;
reaper = reaper->real_parent) {
if (same_thread_group(reaper, pid_ns->child_reaper))
break;
if (!reaper->signal->is_child_subreaper)
continue;
thread = reaper;
do {
if (!(thread->flags & PF_EXITING))
return reaper;
} while_each_thread(reaper, thread);
}
}
return pid_ns->child_reaper;
}
/*
* Any that need to be release_task'd are put on the @dead list.
*/
static void reparent_leader(struct task_struct *father, struct task_struct *p,
struct list_head *dead)
{
list_move_tail(&p->sibling, &p->real_parent->children);
if (p->exit_state == EXIT_DEAD)
return;
/*
* If this is a threaded reparent there is no need to
* notify anyone anything has happened.
*/
if (same_thread_group(p->real_parent, father))
return;
/* We don't want people slaying init. */
p->exit_signal = SIGCHLD;
/* If it has exited notify the new parent about this child's death. */
if (!p->ptrace &&
p->exit_state == EXIT_ZOMBIE && thread_group_empty(p)) {
if (do_notify_parent(p, p->exit_signal)) {
p->exit_state = EXIT_DEAD;
list_move_tail(&p->sibling, dead);
}
}
kill_orphaned_pgrp(p, father);
}
static void forget_original_parent(struct task_struct *father)
{
struct task_struct *p, *n, *reaper;
LIST_HEAD(dead_children);
write_lock_irq(&tasklist_lock);
/*
* Note that exit_ptrace() and find_new_reaper() might
* drop tasklist_lock and reacquire it.
*/
exit_ptrace(father);
reaper = find_new_reaper(father);
list_for_each_entry_safe(p, n, &father->children, sibling) {
struct task_struct *t = p;
do {
t->real_parent = reaper;
if (t->parent == father) {
BUG_ON(t->ptrace);
t->parent = t->real_parent;
}
if (t->pdeath_signal)
group_send_sig_info(t->pdeath_signal,
SEND_SIG_NOINFO, t);
} while_each_thread(p, t);
reparent_leader(father, p, &dead_children);
}
write_unlock_irq(&tasklist_lock);
BUG_ON(!list_empty(&father->children));
list_for_each_entry_safe(p, n, &dead_children, sibling) {
list_del_init(&p->sibling);
release_task(p);
}
}
/*
* Send signals to all our closest relatives so that they know
* to properly mourn us..
*/
static void exit_notify(struct task_struct *tsk, int group_dead)
{
bool autoreap;
/*
* This does two things:
*
* A. Make init inherit all the child processes
* B. Check to see if any process groups have become orphaned
* as a result of our exiting, and if they have any stopped
* jobs, send them a SIGHUP and then a SIGCONT. (POSIX 3.2.2.2)
*/
forget_original_parent(tsk);
exit_task_namespaces(tsk);
write_lock_irq(&tasklist_lock);
if (group_dead)
kill_orphaned_pgrp(tsk->group_leader, NULL);
if (unlikely(tsk->ptrace)) {
int sig = thread_group_leader(tsk) &&
thread_group_empty(tsk) &&
!ptrace_reparented(tsk) ?
tsk->exit_signal : SIGCHLD;
autoreap = do_notify_parent(tsk, sig);
} else if (thread_group_leader(tsk)) {
autoreap = thread_group_empty(tsk) &&
do_notify_parent(tsk, tsk->exit_signal);
} else {
autoreap = true;
}
tsk->exit_state = autoreap ? EXIT_DEAD : EXIT_ZOMBIE;
/* mt-exec, de_thread() is waiting for group leader */
if (unlikely(tsk->signal->notify_count < 0))
wake_up_process(tsk->signal->group_exit_task);
write_unlock_irq(&tasklist_lock);
/* If the process is dead, release it - nobody will wait for it */
if (autoreap)
release_task(tsk);
}
#ifdef CONFIG_DEBUG_STACK_USAGE
static void check_stack_usage(void)
{
static DEFINE_SPINLOCK(low_water_lock);
static int lowest_to_date = THREAD_SIZE;
unsigned long free;
free = stack_not_used(current);
if (free >= lowest_to_date)
return;
spin_lock(&low_water_lock);
if (free < lowest_to_date) {
printk(KERN_WARNING "%s used greatest stack depth: %lu bytes "
"left\n",
current->comm, free);
lowest_to_date = free;
}
spin_unlock(&low_water_lock);
}
#else
static inline void check_stack_usage(void) {}
#endif
void do_exit(long code)
{
struct task_struct *tsk = current;
int group_dead;
profile_task_exit(tsk);
WARN_ON(blk_needs_flush_plug(tsk));
if (unlikely(in_interrupt()))
panic("Aiee, killing interrupt handler!");
if (unlikely(!tsk->pid) || unlikely(tsk->pid==1))
panic("Attempted to kill the idle task! or init task");
/*
* If do_exit is called because this processes oopsed, it's possible
* that get_fs() was left as KERNEL_DS, so reset it to USER_DS before
* continuing. Amongst other possible reasons, this is to prevent
* mm_release()->clear_child_tid() from writing to a user-controlled
* kernel address.
*/
set_fs(USER_DS);
ptrace_event(PTRACE_EVENT_EXIT, code);
validate_creds_for_do_exit(tsk);
/*
* We're taking recursive faults here in do_exit. Safest is to just
* leave this task alone and wait for reboot.
*/
if (unlikely(tsk->flags & PF_EXITING)) {
printk(KERN_ALERT
"Fixing recursive fault but reboot is needed!\n");
/*
* We can do this unlocked here. The futex code uses
* this flag just to verify whether the pi state
* cleanup has been done or not. In the worst case it
* loops once more. We pretend that the cleanup was
* done as there is no way to return. Either the
* OWNER_DIED bit is set by now or we push the blocked
* task into the wait for ever nirwana as well.
*/
tsk->flags |= PF_EXITPIDONE;
set_current_state(TASK_UNINTERRUPTIBLE);
schedule();
}
exit_signals(tsk); /* sets PF_EXITING */
/*
* tsk->flags are checked in the futex code to protect against
* an exiting task cleaning up the robust pi futexes.
*/
smp_mb();
raw_spin_unlock_wait(&tsk->pi_lock);
exit_irq_thread();
if (unlikely(in_atomic()))
printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n",
current->comm, task_pid_nr(current),
preempt_count());
acct_update_integrals(tsk);
/* sync mm's RSS info before statistics gathering */
if (tsk->mm)
sync_mm_rss(tsk->mm);
group_dead = atomic_dec_and_test(&tsk->signal->live);
if (group_dead) {
hrtimer_cancel(&tsk->signal->real_timer);
exit_itimers(tsk->signal);
if (tsk->mm)
setmax_mm_hiwater_rss(&tsk->signal->maxrss, tsk->mm);
}
acct_collect(code, group_dead);
if (group_dead)
tty_audit_exit();
audit_free(tsk);
tsk->exit_code = code;
taskstats_exit(tsk, group_dead);
exit_mm(tsk);
if (group_dead)
acct_process();
trace_sched_process_exit(tsk);
exit_sem(tsk);
exit_shm(tsk);
exit_files(tsk);
exit_fs(tsk);
check_stack_usage();
exit_thread();
/*
* Flush inherited counters to the parent - before the parent
* gets woken up by child-exit notifications.
*
* because of cgroup mode, must be called before cgroup_exit()
*/
perf_event_exit_task(tsk);
cgroup_exit(tsk, 1);
if (group_dead)
disassociate_ctty(1);
module_put(task_thread_info(tsk)->exec_domain->module);
proc_exit_connector(tsk);
/*
* FIXME: do that only when needed, using sched_exit tracepoint
*/
ptrace_put_breakpoints(tsk);
exit_notify(tsk, group_dead);
#ifdef CONFIG_NUMA
task_lock(tsk);
mpol_put(tsk->mempolicy);
tsk->mempolicy = NULL;
task_unlock(tsk);
#endif
#ifdef CONFIG_FUTEX
if (unlikely(current->pi_state_cache))
kfree(current->pi_state_cache);
#endif
/*
* Make sure we are holding no locks:
*/
debug_check_no_locks_held(tsk);
/*
* We can do this unlocked here. The futex code uses this flag
* just to verify whether the pi state cleanup has been done
* or not. In the worst case it loops once more.
*/
tsk->flags |= PF_EXITPIDONE;
if (tsk->io_context)
exit_io_context(tsk);
if (tsk->splice_pipe)
__free_pipe_info(tsk->splice_pipe);
validate_creds_for_do_exit(tsk);
preempt_disable();
if (tsk->nr_dirtied)
__this_cpu_add(dirty_throttle_leaks, tsk->nr_dirtied);
exit_rcu();
/*
* The setting of TASK_RUNNING by try_to_wake_up() may be delayed
* when the following two conditions become true.
* - There is race condition of mmap_sem (It is acquired by
* exit_mm()), and
* - SMI occurs before setting TASK_RUNINNG.
* (or hypervisor of virtual machine switches to other guest)
* As a result, we may become TASK_RUNNING after becoming TASK_DEAD
*
* To avoid it, we have to wait for releasing tsk->pi_lock which
* is held by try_to_wake_up()
*/
smp_mb();
raw_spin_unlock_wait(&tsk->pi_lock);
/* causes final put_task_struct in finish_task_switch(). */
tsk->state = TASK_DEAD;
tsk->flags |= PF_NOFREEZE; /* tell freezer to ignore us */
schedule();
BUG();
/* Avoid "noreturn function does return". */
for (;;)
cpu_relax(); /* For when BUG is null */
}
EXPORT_SYMBOL_GPL(do_exit);
void complete_and_exit(struct completion *comp, long code)
{
if (comp)
complete(comp);
do_exit(code);
}
EXPORT_SYMBOL(complete_and_exit);
SYSCALL_DEFINE1(exit, int, error_code)
{
do_exit((error_code&0xff)<<8);
}
/*
* Take down every thread in the group. This is called by fatal signals
* as well as by sys_exit_group (below).
*/
void
do_group_exit(int exit_code)
{
struct signal_struct *sig = current->signal;
BUG_ON(exit_code & 0x80); /* core dumps don't get here */
if (signal_group_exit(sig))
exit_code = sig->group_exit_code;
else if (!thread_group_empty(current)) {
struct sighand_struct *const sighand = current->sighand;
spin_lock_irq(&sighand->siglock);
if (signal_group_exit(sig))
/* Another thread got here before we took the lock. */
exit_code = sig->group_exit_code;
else {
sig->group_exit_code = exit_code;
sig->flags = SIGNAL_GROUP_EXIT;
zap_other_threads(current);
}
spin_unlock_irq(&sighand->siglock);
}
do_exit(exit_code);
/* NOTREACHED */
}
/*
* this kills every thread in the thread group. Note that any externally
* wait4()-ing process will get the correct exit code - even if this
* thread is not the thread group leader.
*/
SYSCALL_DEFINE1(exit_group, int, error_code)
{
do_group_exit((error_code & 0xff) << 8);
/* NOTREACHED */
return 0;
}
struct wait_opts {
enum pid_type wo_type;
int wo_flags;
struct pid *wo_pid;
struct siginfo __user *wo_info;
int __user *wo_stat;
struct rusage __user *wo_rusage;
wait_queue_t child_wait;
int notask_error;
};
static inline
struct pid *task_pid_type(struct task_struct *task, enum pid_type type)
{
if (type != PIDTYPE_PID)
task = task->group_leader;
return task->pids[type].pid;
}
static int eligible_pid(struct wait_opts *wo, struct task_struct *p)
{
return wo->wo_type == PIDTYPE_MAX ||
task_pid_type(p, wo->wo_type) == wo->wo_pid;
}
static int eligible_child(struct wait_opts *wo, struct task_struct *p)
{
if (!eligible_pid(wo, p))
return 0;
/* Wait for all children (clone and not) if __WALL is set;
* otherwise, wait for clone children *only* if __WCLONE is
* set; otherwise, wait for non-clone children *only*. (Note:
* A "clone" child here is one that reports to its parent
* using a signal other than SIGCHLD.) */
if (((p->exit_signal != SIGCHLD) ^ !!(wo->wo_flags & __WCLONE))
&& !(wo->wo_flags & __WALL))
return 0;
return 1;
}
static int wait_noreap_copyout(struct wait_opts *wo, struct task_struct *p,
pid_t pid, uid_t uid, int why, int status)
{
struct siginfo __user *infop;
int retval = wo->wo_rusage
? getrusage(p, RUSAGE_BOTH, wo->wo_rusage) : 0;
put_task_struct(p);
infop = wo->wo_info;
if (infop) {
if (!retval)
retval = put_user(SIGCHLD, &infop->si_signo);
if (!retval)
retval = put_user(0, &infop->si_errno);
if (!retval)
retval = put_user((short)why, &infop->si_code);
if (!retval)
retval = put_user(pid, &infop->si_pid);
if (!retval)
retval = put_user(uid, &infop->si_uid);
if (!retval)
retval = put_user(status, &infop->si_status);
}
if (!retval)
retval = pid;
return retval;
}
/*
* Handle sys_wait4 work for one task in state EXIT_ZOMBIE. We hold
* read_lock(&tasklist_lock) on entry. If we return zero, we still hold
* the lock and this task is uninteresting. If we return nonzero, we have
* released the lock and the system call should return.
*/
static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p)
{
unsigned long state;
int retval, status, traced;
pid_t pid = task_pid_vnr(p);
uid_t uid = __task_cred(p)->uid;
struct siginfo __user *infop;
if (!likely(wo->wo_flags & WEXITED))
return 0;
if (unlikely(wo->wo_flags & WNOWAIT)) {
int exit_code = p->exit_code;
int why;
get_task_struct(p);
read_unlock(&tasklist_lock);
if ((exit_code & 0x7f) == 0) {
why = CLD_EXITED;
status = exit_code >> 8;
} else {
why = (exit_code & 0x80) ? CLD_DUMPED : CLD_KILLED;
status = exit_code & 0x7f;
}
return wait_noreap_copyout(wo, p, pid, uid, why, status);
}
/*
* Try to move the task's state to DEAD
* only one thread is allowed to do this:
*/
state = xchg(&p->exit_state, EXIT_DEAD);
if (state != EXIT_ZOMBIE) {
BUG_ON(state != EXIT_DEAD);
return 0;
}
traced = ptrace_reparented(p);
/*
* It can be ptraced but not reparented, check
* thread_group_leader() to filter out sub-threads.
*/
if (likely(!traced) && thread_group_leader(p)) {
struct signal_struct *psig;
struct signal_struct *sig;
unsigned long maxrss;
cputime_t tgutime, tgstime;
/*
* The resource counters for the group leader are in its
* own task_struct. Those for dead threads in the group
* are in its signal_struct, as are those for the child
* processes it has previously reaped. All these
* accumulate in the parent's signal_struct c* fields.
*
* We don't bother to take a lock here to protect these
* p->signal fields, because they are only touched by
* __exit_signal, which runs with tasklist_lock
* write-locked anyway, and so is excluded here. We do
* need to protect the access to parent->signal fields,
* as other threads in the parent group can be right
* here reaping other children at the same time.
*
* We use thread_group_times() to get times for the thread
* group, which consolidates times for all threads in the
* group including the group leader.
*/
thread_group_times(p, &tgutime, &tgstime);
spin_lock_irq(&p->real_parent->sighand->siglock);
psig = p->real_parent->signal;
sig = p->signal;
psig->cutime += tgutime + sig->cutime;
psig->cstime += tgstime + sig->cstime;
psig->cgtime += p->gtime + sig->gtime + sig->cgtime;
psig->cmin_flt +=
p->min_flt + sig->min_flt + sig->cmin_flt;
psig->cmaj_flt +=
p->maj_flt + sig->maj_flt + sig->cmaj_flt;
psig->cnvcsw +=
p->nvcsw + sig->nvcsw + sig->cnvcsw;
psig->cnivcsw +=
p->nivcsw + sig->nivcsw + sig->cnivcsw;
psig->cinblock +=
task_io_get_inblock(p) +
sig->inblock + sig->cinblock;
psig->coublock +=
task_io_get_oublock(p) +
sig->oublock + sig->coublock;
maxrss = max(sig->maxrss, sig->cmaxrss);
if (psig->cmaxrss < maxrss)
psig->cmaxrss = maxrss;
task_io_accounting_add(&psig->ioac, &p->ioac);
task_io_accounting_add(&psig->ioac, &sig->ioac);
spin_unlock_irq(&p->real_parent->sighand->siglock);
}
/*
* Now we are sure this task is interesting, and no other
* thread can reap it because we set its state to EXIT_DEAD.
*/
read_unlock(&tasklist_lock);
retval = wo->wo_rusage
? getrusage(p, RUSAGE_BOTH, wo->wo_rusage) : 0;
status = (p->signal->flags & SIGNAL_GROUP_EXIT)
? p->signal->group_exit_code : p->exit_code;
if (!retval && wo->wo_stat)
retval = put_user(status, wo->wo_stat);
infop = wo->wo_info;
if (!retval && infop)
retval = put_user(SIGCHLD, &infop->si_signo);
if (!retval && infop)
retval = put_user(0, &infop->si_errno);
if (!retval && infop) {
int why;
if ((status & 0x7f) == 0) {
why = CLD_EXITED;
status >>= 8;
} else {
why = (status & 0x80) ? CLD_DUMPED : CLD_KILLED;
status &= 0x7f;
}
retval = put_user((short)why, &infop->si_code);
if (!retval)
retval = put_user(status, &infop->si_status);
}
if (!retval && infop)
retval = put_user(pid, &infop->si_pid);
if (!retval && infop)
retval = put_user(uid, &infop->si_uid);
if (!retval)
retval = pid;
if (traced) {
write_lock_irq(&tasklist_lock);
/* We dropped tasklist, ptracer could die and untrace */
ptrace_unlink(p);
/*
* If this is not a sub-thread, notify the parent.
* If parent wants a zombie, don't release it now.
*/
if (thread_group_leader(p) &&
!do_notify_parent(p, p->exit_signal)) {
p->exit_state = EXIT_ZOMBIE;
p = NULL;
}
write_unlock_irq(&tasklist_lock);
}
if (p != NULL)
release_task(p);
return retval;
}
static int *task_stopped_code(struct task_struct *p, bool ptrace)
{
if (ptrace) {
if (task_is_stopped_or_traced(p) &&
!(p->jobctl & JOBCTL_LISTENING))
return &p->exit_code;
} else {
if (p->signal->flags & SIGNAL_STOP_STOPPED)
return &p->signal->group_exit_code;
}
return NULL;
}
/**
* wait_task_stopped - Wait for %TASK_STOPPED or %TASK_TRACED
* @wo: wait options
* @ptrace: is the wait for ptrace
* @p: task to wait for
*
* Handle sys_wait4() work for %p in state %TASK_STOPPED or %TASK_TRACED.
*
* CONTEXT:
* read_lock(&tasklist_lock), which is released if return value is
* non-zero. Also, grabs and releases @p->sighand->siglock.
*
* RETURNS:
* 0 if wait condition didn't exist and search for other wait conditions
* should continue. Non-zero return, -errno on failure and @p's pid on
* success, implies that tasklist_lock is released and wait condition
* search should terminate.
*/
static int wait_task_stopped(struct wait_opts *wo,
int ptrace, struct task_struct *p)
{
struct siginfo __user *infop;
int retval, exit_code, *p_code, why;
uid_t uid = 0; /* unneeded, required by compiler */
pid_t pid;
/*
* Traditionally we see ptrace'd stopped tasks regardless of options.
*/
if (!ptrace && !(wo->wo_flags & WUNTRACED))
return 0;
if (!task_stopped_code(p, ptrace))
return 0;
exit_code = 0;
spin_lock_irq(&p->sighand->siglock);
p_code = task_stopped_code(p, ptrace);
if (unlikely(!p_code))
goto unlock_sig;
exit_code = *p_code;
if (!exit_code)
goto unlock_sig;
if (!unlikely(wo->wo_flags & WNOWAIT))
*p_code = 0;
uid = task_uid(p);
unlock_sig:
spin_unlock_irq(&p->sighand->siglock);
if (!exit_code)
return 0;
/*
* Now we are pretty sure this task is interesting.
* Make sure it doesn't get reaped out from under us while we
* give up the lock and then examine it below. We don't want to
* keep holding onto the tasklist_lock while we call getrusage and
* possibly take page faults for user memory.
*/
get_task_struct(p);
pid = task_pid_vnr(p);
why = ptrace ? CLD_TRAPPED : CLD_STOPPED;
read_unlock(&tasklist_lock);
if (unlikely(wo->wo_flags & WNOWAIT))
return wait_noreap_copyout(wo, p, pid, uid, why, exit_code);
retval = wo->wo_rusage
? getrusage(p, RUSAGE_BOTH, wo->wo_rusage) : 0;
if (!retval && wo->wo_stat)
retval = put_user((exit_code << 8) | 0x7f, wo->wo_stat);
infop = wo->wo_info;
if (!retval && infop)
retval = put_user(SIGCHLD, &infop->si_signo);
if (!retval && infop)
retval = put_user(0, &infop->si_errno);
if (!retval && infop)
retval = put_user((short)why, &infop->si_code);
if (!retval && infop)
retval = put_user(exit_code, &infop->si_status);
if (!retval && infop)
retval = put_user(pid, &infop->si_pid);
if (!retval && infop)
retval = put_user(uid, &infop->si_uid);
if (!retval)
retval = pid;
put_task_struct(p);
BUG_ON(!retval);
return retval;
}
/*
* Handle do_wait work for one task in a live, non-stopped state.
* read_lock(&tasklist_lock) on entry. If we return zero, we still hold
* the lock and this task is uninteresting. If we return nonzero, we have
* released the lock and the system call should return.
*/
static int wait_task_continued(struct wait_opts *wo, struct task_struct *p)
{
int retval;
pid_t pid;
uid_t uid;
if (!unlikely(wo->wo_flags & WCONTINUED))
return 0;
if (!(p->signal->flags & SIGNAL_STOP_CONTINUED))
return 0;
spin_lock_irq(&p->sighand->siglock);
/* Re-check with the lock held. */
if (!(p->signal->flags & SIGNAL_STOP_CONTINUED)) {
spin_unlock_irq(&p->sighand->siglock);
return 0;
}
if (!unlikely(wo->wo_flags & WNOWAIT))
p->signal->flags &= ~SIGNAL_STOP_CONTINUED;
uid = task_uid(p);
spin_unlock_irq(&p->sighand->siglock);
pid = task_pid_vnr(p);
get_task_struct(p);
read_unlock(&tasklist_lock);
if (!wo->wo_info) {
retval = wo->wo_rusage
? getrusage(p, RUSAGE_BOTH, wo->wo_rusage) : 0;
put_task_struct(p);
if (!retval && wo->wo_stat)
retval = put_user(0xffff, wo->wo_stat);
if (!retval)
retval = pid;
} else {
retval = wait_noreap_copyout(wo, p, pid, uid,
CLD_CONTINUED, SIGCONT);
BUG_ON(retval == 0);
}
return retval;
}
/*
* Consider @p for a wait by @parent.
*
* -ECHILD should be in ->notask_error before the first call.
* Returns nonzero for a final return, when we have unlocked tasklist_lock.
* Returns zero if the search for a child should continue;
* then ->notask_error is 0 if @p is an eligible child,
* or another error from security_task_wait(), or still -ECHILD.
*/
static int wait_consider_task(struct wait_opts *wo, int ptrace,
struct task_struct *p)
{
int ret = eligible_child(wo, p);
if (!ret)
return ret;
ret = security_task_wait(p);
if (unlikely(ret < 0)) {
/*
* If we have not yet seen any eligible child,
* then let this error code replace -ECHILD.
* A permission error will give the user a clue
* to look for security policy problems, rather
* than for mysterious wait bugs.
*/
if (wo->notask_error)
wo->notask_error = ret;
return 0;
}
/* dead body doesn't have much to contribute */
if (unlikely(p->exit_state == EXIT_DEAD)) {
/*
* But do not ignore this task until the tracer does
* wait_task_zombie()->do_notify_parent().
*/
if (likely(!ptrace) && unlikely(ptrace_reparented(p)))
wo->notask_error = 0;
return 0;
}
/* slay zombie? */
if (p->exit_state == EXIT_ZOMBIE) {
/*
* A zombie ptracee is only visible to its ptracer.
* Notification and reaping will be cascaded to the real
* parent when the ptracer detaches.
*/
if (likely(!ptrace) && unlikely(p->ptrace)) {
/* it will become visible, clear notask_error */
wo->notask_error = 0;
return 0;
}
/* we don't reap group leaders with subthreads */
if (!delay_group_leader(p))
return wait_task_zombie(wo, p);
/*
* Allow access to stopped/continued state via zombie by
* falling through. Clearing of notask_error is complex.
*
* When !@ptrace:
*
* If WEXITED is set, notask_error should naturally be
* cleared. If not, subset of WSTOPPED|WCONTINUED is set,
* so, if there are live subthreads, there are events to
* wait for. If all subthreads are dead, it's still safe
* to clear - this function will be called again in finite
* amount time once all the subthreads are released and
* will then return without clearing.
*
* When @ptrace:
*
* Stopped state is per-task and thus can't change once the
* target task dies. Only continued and exited can happen.
* Clear notask_error if WCONTINUED | WEXITED.
*/
if (likely(!ptrace) || (wo->wo_flags & (WCONTINUED | WEXITED)))
wo->notask_error = 0;
} else {
/*
* If @p is ptraced by a task in its real parent's group,
* hide group stop/continued state when looking at @p as
* the real parent; otherwise, a single stop can be
* reported twice as group and ptrace stops.
*
* If a ptracer wants to distinguish the two events for its
* own children, it should create a separate process which
* takes the role of real parent.
*/
if (likely(!ptrace) && p->ptrace && !ptrace_reparented(p))
return 0;
/*
* @p is alive and it's gonna stop, continue or exit, so
* there always is something to wait for.
*/
wo->notask_error = 0;
}
/*
* Wait for stopped. Depending on @ptrace, different stopped state
* is used and the two don't interact with each other.
*/
ret = wait_task_stopped(wo, ptrace, p);
if (ret)
return ret;
/*
* Wait for continued. There's only one continued state and the
* ptracer can consume it which can confuse the real parent. Don't
* use WCONTINUED from ptracer. You don't need or want it.
*/
return wait_task_continued(wo, p);
}
/*
* Do the work of do_wait() for one thread in the group, @tsk.
*
* -ECHILD should be in ->notask_error before the first call.
* Returns nonzero for a final return, when we have unlocked tasklist_lock.
* Returns zero if the search for a child should continue; then
* ->notask_error is 0 if there were any eligible children,
* or another error from security_task_wait(), or still -ECHILD.
*/
static int do_wait_thread(struct wait_opts *wo, struct task_struct *tsk)
{
struct task_struct *p;
list_for_each_entry(p, &tsk->children, sibling) {
int ret = wait_consider_task(wo, 0, p);
if (ret)
return ret;
}
return 0;
}
static int ptrace_do_wait(struct wait_opts *wo, struct task_struct *tsk)
{
struct task_struct *p;
list_for_each_entry(p, &tsk->ptraced, ptrace_entry) {
int ret = wait_consider_task(wo, 1, p);
if (ret)
return ret;
}
return 0;
}
static int child_wait_callback(wait_queue_t *wait, unsigned mode,
int sync, void *key)
{
struct wait_opts *wo = container_of(wait, struct wait_opts,
child_wait);
struct task_struct *p = key;
if (!eligible_pid(wo, p))
return 0;
if ((wo->wo_flags & __WNOTHREAD) && wait->private != p->parent)
return 0;
return default_wake_function(wait, mode, sync, key);
}
void __wake_up_parent(struct task_struct *p, struct task_struct *parent)
{
__wake_up_sync_key(&parent->signal->wait_chldexit,
TASK_INTERRUPTIBLE, 1, p);
}
static long do_wait(struct wait_opts *wo)
{
struct task_struct *tsk;
int retval;
trace_sched_process_wait(wo->wo_pid);
init_waitqueue_func_entry(&wo->child_wait, child_wait_callback);
wo->child_wait.private = current;
add_wait_queue(¤t->signal->wait_chldexit, &wo->child_wait);
repeat:
/*
* If there is nothing that can match our critiera just get out.
* We will clear ->notask_error to zero if we see any child that
* might later match our criteria, even if we are not able to reap
* it yet.
*/
wo->notask_error = -ECHILD;
if ((wo->wo_type < PIDTYPE_MAX) &&
(!wo->wo_pid || hlist_empty(&wo->wo_pid->tasks[wo->wo_type])))
goto notask;
set_current_state(TASK_INTERRUPTIBLE);
read_lock(&tasklist_lock);
tsk = current;
do {
retval = do_wait_thread(wo, tsk);
if (retval)
goto end;
retval = ptrace_do_wait(wo, tsk);
if (retval)
goto end;
if (wo->wo_flags & __WNOTHREAD)
break;
} while_each_thread(current, tsk);
read_unlock(&tasklist_lock);
notask:
retval = wo->notask_error;
if (!retval && !(wo->wo_flags & WNOHANG)) {
retval = -ERESTARTSYS;
if (!signal_pending(current)) {
schedule();
goto repeat;
}
}
end:
__set_current_state(TASK_RUNNING);
remove_wait_queue(¤t->signal->wait_chldexit, &wo->child_wait);
return retval;
}
SYSCALL_DEFINE5(waitid, int, which, pid_t, upid, struct siginfo __user *,
infop, int, options, struct rusage __user *, ru)
{
struct wait_opts wo;
struct pid *pid = NULL;
enum pid_type type;
long ret;
if (options & ~(WNOHANG|WNOWAIT|WEXITED|WSTOPPED|WCONTINUED))
return -EINVAL;
if (!(options & (WEXITED|WSTOPPED|WCONTINUED)))
return -EINVAL;
switch (which) {
case P_ALL:
type = PIDTYPE_MAX;
break;
case P_PID:
type = PIDTYPE_PID;
if (upid <= 0)
return -EINVAL;
break;
case P_PGID:
type = PIDTYPE_PGID;
if (upid <= 0)
return -EINVAL;
break;
default:
return -EINVAL;
}
if (type < PIDTYPE_MAX)
pid = find_get_pid(upid);
wo.wo_type = type;
wo.wo_pid = pid;
wo.wo_flags = options;
wo.wo_info = infop;
wo.wo_stat = NULL;
wo.wo_rusage = ru;
ret = do_wait(&wo);
if (ret > 0) {
ret = 0;
} else if (infop) {
/*
* For a WNOHANG return, clear out all the fields
* we would set so the user can easily tell the
* difference.
*/
if (!ret)
ret = put_user(0, &infop->si_signo);
if (!ret)
ret = put_user(0, &infop->si_errno);
if (!ret)
ret = put_user(0, &infop->si_code);
if (!ret)
ret = put_user(0, &infop->si_pid);
if (!ret)
ret = put_user(0, &infop->si_uid);
if (!ret)
ret = put_user(0, &infop->si_status);
}
put_pid(pid);
/* avoid REGPARM breakage on x86: */
asmlinkage_protect(5, ret, which, upid, infop, options, ru);
return ret;
}
SYSCALL_DEFINE4(wait4, pid_t, upid, int __user *, stat_addr,
int, options, struct rusage __user *, ru)
{
struct wait_opts wo;
struct pid *pid = NULL;
enum pid_type type;
long ret;
if (options & ~(WNOHANG|WUNTRACED|WCONTINUED|
__WNOTHREAD|__WCLONE|__WALL))
return -EINVAL;
if (upid == -1)
type = PIDTYPE_MAX;
else if (upid < 0) {
type = PIDTYPE_PGID;
pid = find_get_pid(-upid);
} else if (upid == 0) {
type = PIDTYPE_PGID;
pid = get_task_pid(current, PIDTYPE_PGID);
} else /* upid > 0 */ {
type = PIDTYPE_PID;
pid = find_get_pid(upid);
}
wo.wo_type = type;
wo.wo_pid = pid;
wo.wo_flags = options | WEXITED;
wo.wo_info = NULL;
wo.wo_stat = stat_addr;
wo.wo_rusage = ru;
ret = do_wait(&wo);
put_pid(pid);
/* avoid REGPARM breakage on x86: */
asmlinkage_protect(4, ret, upid, stat_addr, options, ru);
return ret;
}
#ifdef __ARCH_WANT_SYS_WAITPID
/*
* sys_waitpid() remains for compatibility. waitpid() should be
* implemented by calling sys_wait4() from libc.a.
*/
SYSCALL_DEFINE3(waitpid, pid_t, pid, int __user *, stat_addr, int, options)
{
return sys_wait4(pid, stat_addr, options, NULL);
}
#endif
| gpl-2.0 |
elpaso/QGIS | tests/src/core/testqgsmaprotation.cpp | 16 | 8284 | /***************************************************************************
testqgsmaprotation.cpp
--------------------------------------
Date : Feb 18 2015
Copyright : (C) 2015 by Sandro Santilli
Email : strk@keybit.net
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
#include "qgstest.h"
#include <QObject>
#include <QString>
#include <QStringList>
#include <QApplication>
#include <QFileInfo>
#include <QDir>
//qgis includes...
#include "qgsrasterlayer.h"
#include "qgsvectorlayer.h"
#include "qgsmultibandcolorrenderer.h"
#include "qgsproject.h"
#include "qgsapplication.h"
#include "qgspallabeling.h"
#include "qgsfontutils.h"
#include "qgsrasterdataprovider.h"
#include "qgsvectorlayerlabeling.h"
//qgis unit test includes
#include "qgsmultirenderchecker.h"
/**
* \ingroup UnitTests
* This is a unit test for the map rotation feature
*/
class TestQgsMapRotation : public QObject
{
Q_OBJECT
public:
TestQgsMapRotation()
{
mTestDataDir = QStringLiteral( TEST_DATA_DIR ) + '/';
}
~TestQgsMapRotation() override;
private slots:
void initTestCase();// will be called before the first testfunction is executed.
void cleanupTestCase();// will be called after the last testfunction was executed.
void init() {} // will be called before each testfunction is executed.
void cleanup() {} // will be called after every testfunction.
void rasterLayer();
void pointsLayer();
void linesLayer();
// TODO: polygonsLayer
private:
bool render( const QString &fileName );
QString mTestDataDir;
QgsRasterLayer *mRasterLayer = nullptr;
QgsVectorLayer *mPointsLayer = nullptr;
QgsVectorLayer *mLinesLayer = nullptr;
QgsMapSettings *mMapSettings = nullptr;
QString mReport;
};
//runs before all tests
void TestQgsMapRotation::initTestCase()
{
// init QGIS's paths - true means that all path will be inited from prefix
QgsApplication::init();
QgsApplication::initQgis();
mMapSettings = new QgsMapSettings();
//create a raster layer that will be used in all tests...
QFileInfo rasterFileInfo( mTestDataDir + "rgb256x256.png" );
mRasterLayer = new QgsRasterLayer( rasterFileInfo.filePath(),
rasterFileInfo.completeBaseName() );
QgsMultiBandColorRenderer *rasterRenderer = new QgsMultiBandColorRenderer( mRasterLayer->dataProvider(), 1, 2, 3 );
mRasterLayer->setRenderer( rasterRenderer );
//create a point layer that will be used in all tests...
QString myPointsFileName = mTestDataDir + "points.shp";
QFileInfo myPointFileInfo( myPointsFileName );
mPointsLayer = new QgsVectorLayer( myPointFileInfo.filePath(),
myPointFileInfo.completeBaseName(), QStringLiteral( "ogr" ) );
//create a line layer that will be used in all tests...
QString myLinesFileName = mTestDataDir + "lines_cardinals.shp";
QFileInfo myLinesFileInfo( myLinesFileName );
mLinesLayer = new QgsVectorLayer( myLinesFileInfo.filePath(),
myLinesFileInfo.completeBaseName(), QStringLiteral( "ogr" ) );
// This is needed to correctly set rotation center,
// the actual size doesn't matter as QgsRenderChecker will
// re-set it to the size of the expected image
mMapSettings->setOutputSize( QSize( 256, 256 ) );
mReport += QLatin1String( "<h1>Map Rotation Tests</h1>\n" );
QgsFontUtils::loadStandardTestFonts( QStringList() << QStringLiteral( "Bold" ) );
}
TestQgsMapRotation::~TestQgsMapRotation() = default;
//runs after all tests
void TestQgsMapRotation::cleanupTestCase()
{
delete mMapSettings;
delete mPointsLayer;
delete mLinesLayer;
delete mRasterLayer;
QgsApplication::exitQgis();
QString myReportFile = QDir::tempPath() + "/qgistest.html";
QFile myFile( myReportFile );
if ( myFile.open( QIODevice::WriteOnly | QIODevice::Append ) )
{
QTextStream myQTextStream( &myFile );
myQTextStream << mReport;
myFile.close();
}
}
void TestQgsMapRotation::rasterLayer()
{
mMapSettings->setLayers( QList<QgsMapLayer *>() << mRasterLayer );
mMapSettings->setExtent( mRasterLayer->extent() );
mMapSettings->setRotation( 45 );
// This ensures rotated image is all visible by tweaking scale
mMapSettings->setExtent( mMapSettings->visibleExtent() );
QVERIFY( render( "raster+45" ) );
mMapSettings->setRotation( -45 );
QVERIFY( render( "raster-45" ) );
}
void TestQgsMapRotation::pointsLayer()
{
mMapSettings->setLayers( QList<QgsMapLayer *>() << mPointsLayer );
// SVG points, fixed (no) rotation
QString qml = mTestDataDir + "points.qml";
bool success = false;
mPointsLayer->loadNamedStyle( qml, success );
QVERIFY( success );
mMapSettings->setExtent( QgsRectangle( -105.5, 37, -97.5, 45 ) );
mMapSettings->setRotation( -60 );
QVERIFY( render( "svgpoints-60" ) );
// SVG points, data defined rotation
qml = mTestDataDir + "points_single_symbol_datadefined_rotation.qml";
success = false;
mPointsLayer->loadNamedStyle( qml, success );
QVERIFY( success );
mMapSettings->setExtent( QgsRectangle( -116, 33, -107, 42 ) );
mMapSettings->setRotation( 90 );
QVERIFY( render( "svgpoints-datadefined+90" ) );
// TODO: SVG points, fixed (defined) rotation ?
// Simple points, data defined rotation
qml = mTestDataDir + "points_single_symbol.qml";
success = false;
mPointsLayer->loadNamedStyle( qml, success );
QVERIFY( success );
mMapSettings->setExtent( QgsRectangle( -116, 33, -107, 42 ) );
mMapSettings->setRotation( 90 );
QVERIFY( render( "simplepoints-datadefined+90" ) );
// Simple points, fixed (no) rotation
qml = mTestDataDir + "points_graduated_symbol.qml";
success = false;
mPointsLayer->loadNamedStyle( qml, success );
QVERIFY( success );
mMapSettings->setExtent( QgsRectangle( -108, 26, -100, 34 ) );
mMapSettings->setRotation( 30 );
QVERIFY( render( "simplepoints+30" ) );
// TODO: simple points, fixed (defined) rotation ?
}
void TestQgsMapRotation::linesLayer()
{
mMapSettings->setLayers( QList<QgsMapLayer *>() << mLinesLayer );
// Arrowed line with parallel labels
QString qml = mTestDataDir + "lines_cardinals_arrowed_parallel_label.qml";
bool success = false;
mLinesLayer->loadNamedStyle( qml, success );
//use test font
QVERIFY( mLinesLayer->labeling() );
QVERIFY( mLinesLayer->labeling()->type() == QLatin1String( "simple" ) );
const QgsVectorLayerSimpleLabeling *labeling = static_cast<const QgsVectorLayerSimpleLabeling *>( mLinesLayer->labeling() );
QgsPalLayerSettings palSettings = labeling->settings();
QgsTextFormat format = palSettings.format();
format.setFont( QgsFontUtils::getStandardTestFont( QStringLiteral( "Bold" ) ) );
format.setSize( 16 );
palSettings.setFormat( format );
mLinesLayer->setLabeling( new QgsVectorLayerSimpleLabeling( palSettings ) );
mLinesLayer->setLabelsEnabled( true );
QVERIFY( success );
mMapSettings->setExtent( mLinesLayer->extent() ); //QgsRectangle(-150,-150,150,150) );
mMapSettings->setRotation( 45 );
QVERIFY( render( "lines-parallel-label+45" ) );
// TODO: horizontal labels
// TODO: curved labels
}
bool TestQgsMapRotation::render( const QString &testType )
{
mReport += "<h2>" + testType + "</h2>\n";
mMapSettings->setOutputDpi( 96 );
QgsMultiRenderChecker checker;
checker.setControlPathPrefix( QStringLiteral( "maprotation" ) );
checker.setControlName( "expected_" + testType );
checker.setMapSettings( *mMapSettings );
bool result = checker.runTest( testType );
mReport += "\n\n\n" + checker.report();
return result;
}
QGSTEST_MAIN( TestQgsMapRotation )
#include "testqgsmaprotation.moc"
| gpl-2.0 |
yueyoum/uwsgi | plugins/rawrouter/rawrouter.c | 16 | 10024 | /*
uWSGI rawrouter
*/
#include "../../uwsgi.h"
#include "../corerouter/cr.h"
static struct uwsgi_rawrouter {
struct uwsgi_corerouter cr;
int xclient;
} urr;
extern struct uwsgi_server uwsgi;
struct rawrouter_session {
struct corerouter_session session;
// XCLIENT ADDR=xxx\r\n
struct uwsgi_buffer *xclient;
size_t xclient_pos;
// placeholder for \r\n
size_t xclient_rn;
};
static struct uwsgi_option rawrouter_options[] = {
{"rawrouter", required_argument, 0, "run the rawrouter on the specified port", uwsgi_opt_undeferred_corerouter, &urr, 0},
{"rawrouter-processes", required_argument, 0, "prefork the specified number of rawrouter processes", uwsgi_opt_set_int, &urr.cr.processes, 0},
{"rawrouter-workers", required_argument, 0, "prefork the specified number of rawrouter processes", uwsgi_opt_set_int, &urr.cr.processes, 0},
{"rawrouter-zerg", required_argument, 0, "attach the rawrouter to a zerg server", uwsgi_opt_corerouter_zerg, &urr, 0},
{"rawrouter-use-cache", optional_argument, 0, "use uWSGI cache as hostname->server mapper for the rawrouter", uwsgi_opt_set_str, &urr.cr.use_cache, 0},
{"rawrouter-use-pattern", required_argument, 0, "use a pattern for rawrouter hostname->server mapping", uwsgi_opt_corerouter_use_pattern, &urr, 0},
{"rawrouter-use-base", required_argument, 0, "use a base dir for rawrouter hostname->server mapping", uwsgi_opt_corerouter_use_base, &urr, 0},
{"rawrouter-fallback", required_argument, 0, "fallback to the specified node in case of error", uwsgi_opt_add_string_list, &urr.cr.fallback, 0},
{"rawrouter-use-code-string", required_argument, 0, "use code string as hostname->server mapper for the rawrouter", uwsgi_opt_corerouter_cs, &urr, 0},
{"rawrouter-use-socket", optional_argument, 0, "forward request to the specified uwsgi socket", uwsgi_opt_corerouter_use_socket, &urr, 0},
{"rawrouter-to", required_argument, 0, "forward requests to the specified uwsgi server (you can specify it multiple times for load balancing)", uwsgi_opt_add_string_list, &urr.cr.static_nodes, 0},
{"rawrouter-gracetime", required_argument, 0, "retry connections to dead static nodes after the specified amount of seconds", uwsgi_opt_set_int, &urr.cr.static_node_gracetime, 0},
{"rawrouter-events", required_argument, 0, "set the maximum number of concurrent events", uwsgi_opt_set_int, &urr.cr.nevents, 0},
{"rawrouter-max-retries", required_argument, 0, "set the maximum number of retries/fallbacks to other nodes", uwsgi_opt_set_int, &urr.cr.max_retries, 0},
{"rawrouter-quiet", required_argument, 0, "do not report failed connections to instances", uwsgi_opt_true, &urr.cr.quiet, 0},
{"rawrouter-cheap", no_argument, 0, "run the rawrouter in cheap mode", uwsgi_opt_true, &urr.cr.cheap, 0},
{"rawrouter-subscription-server", required_argument, 0, "run the rawrouter subscription server on the spcified address", uwsgi_opt_corerouter_ss, &urr, 0},
{"rawrouter-subscription-slot", required_argument, 0, "*** deprecated ***", uwsgi_opt_deprecated, (void *) "useless thanks to the new implementation", 0},
{"rawrouter-timeout", required_argument, 0, "set rawrouter timeout", uwsgi_opt_set_int, &urr.cr.socket_timeout, 0},
{"rawrouter-stats", required_argument, 0, "run the rawrouter stats server", uwsgi_opt_set_str, &urr.cr.stats_server, 0},
{"rawrouter-stats-server", required_argument, 0, "run the rawrouter stats server", uwsgi_opt_set_str, &urr.cr.stats_server, 0},
{"rawrouter-ss", required_argument, 0, "run the rawrouter stats server", uwsgi_opt_set_str, &urr.cr.stats_server, 0},
{"rawrouter-harakiri", required_argument, 0, "enable rawrouter harakiri", uwsgi_opt_set_int, &urr.cr.harakiri, 0},
{"rawrouter-xclient", no_argument, 0, "use the xclient protocol to pass the client addres", uwsgi_opt_true, &urr.xclient, 0},
{"rawrouter-buffer-size", required_argument, 0, "set internal buffer size (default: page size)", uwsgi_opt_set_64bit, &urr.cr.buffer_size, 0},
{0, 0, 0, 0, 0, 0, 0},
};
// write to backend
static ssize_t rr_instance_write(struct corerouter_peer *peer) {
ssize_t len = cr_write(peer, "rr_instance_write()");
// end on empty write
if (!len) return 0;
// the chunk has been sent, start (again) reading from client and instances
if (cr_write_complete(peer)) {
// reset the buffer
peer->out->pos = 0;
cr_reset_hooks(peer);
}
return len;
}
// write to client
static ssize_t rr_write(struct corerouter_peer *main_peer) {
ssize_t len = cr_write(main_peer, "rr_write()");
// end on empty write
if (!len) return 0;
// ok this response chunk is sent, let's start reading again
if (cr_write_complete(main_peer)) {
// reset the buffer
main_peer->out->pos = 0;
cr_reset_hooks(main_peer);
}
return len;
}
// read from backend
static ssize_t rr_instance_read(struct corerouter_peer *peer) {
ssize_t len = cr_read(peer, "rr_instance_read()");
if (!len) return 0;
// set the input buffer as the main output one
peer->session->main_peer->out = peer->in;
peer->session->main_peer->out_pos = 0;
cr_write_to_main(peer, rr_write);
return len;
}
// write the xclient banner
static ssize_t rr_xclient_write(struct corerouter_peer *peer) {
struct corerouter_session *cs = peer->session;
struct rawrouter_session *rr = (struct rawrouter_session *) cs;
ssize_t len = cr_write_buf(peer, rr->xclient, "rr_xclient_write()");
if (!len) return 0;
if (cr_write_complete_buf(peer, rr->xclient)) {
if (peer->session->main_peer->out_pos > 0) {
// (eventually) send previous data
peer->last_hook_read = rr_instance_read;
cr_write_to_main(peer, rr_write);
}
else {
// reset to standard behaviour
peer->in->pos = 0;
cr_reset_hooks_and_read(peer, rr_instance_read);
}
}
return len;
}
// read the first line from the backend and skip it
static ssize_t rr_xclient_read(struct corerouter_peer *peer) {
struct corerouter_session *cs = peer->session;
struct rawrouter_session *rr = (struct rawrouter_session *) cs;
ssize_t len = cr_read(peer, "rr_xclient_read()");
if (!len) return 0;
char *ptr = (peer->in->buf + peer->in->pos) - len;
ssize_t i;
for(i=0;i<len;i++) {
if (rr->xclient_rn == 1) {
if (ptr[i] != '\n') {
return -1;
}
// banner received (skip it, will be sent later)
size_t remains = len - (i+1);
if (remains > 0) {
peer->session->main_peer->out = peer->in;
peer->session->main_peer->out_pos = (peer->in->pos - remains) ;
}
cr_write_to_backend(peer, rr_xclient_write);
return len;
}
else if (ptr[i] == '\r') {
rr->xclient_rn = 1;
}
}
return len;
}
// the instance is connected now we cannot retry connections
static ssize_t rr_instance_connected(struct corerouter_peer *peer) {
struct corerouter_session *cs = peer->session;
struct rawrouter_session *rr = (struct rawrouter_session *) cs;
cr_peer_connected(peer, "rr_instance_connected()");
peer->can_retry = 0;
if (rr->xclient) {
cr_reset_hooks_and_read(peer, rr_xclient_read);
return 1;
}
cr_reset_hooks_and_read(peer, rr_instance_read);
return 1;
}
// read from client
static ssize_t rr_read(struct corerouter_peer *main_peer) {
ssize_t len = cr_read(main_peer, "rr_read()");
if (!len) return 0;
main_peer->session->peers->out = main_peer->in;
main_peer->session->peers->out_pos = 0;
cr_write_to_backend(main_peer->session->peers, rr_instance_write);
return len;
}
// retry the connection
static int rr_retry(struct corerouter_peer *peer) {
struct corerouter_session *cs = peer->session;
struct uwsgi_corerouter *ucr = cs->corerouter;
if (peer->instance_address_len > 0) goto retry;
if (ucr->mapper(ucr, peer)) {
return -1;
}
if (peer->instance_address_len == 0) {
return -1;
}
retry:
// start async connect (again)
cr_connect(peer, rr_instance_connected);
return 0;
}
static void rr_session_close(struct corerouter_session *cs) {
struct rawrouter_session *rr = (struct rawrouter_session *) cs;
if (rr->xclient) {
uwsgi_buffer_destroy(rr->xclient);
}
}
// allocate a new session
static int rawrouter_alloc_session(struct uwsgi_corerouter *ucr, struct uwsgi_gateway_socket *ugs, struct corerouter_session *cs, struct sockaddr *sa, socklen_t s_len) {
// set default read hook
cs->main_peer->last_hook_read = rr_read;
// set close hook
cs->close = rr_session_close;
// set retry hook
cs->retry = rr_retry;
if (sa && sa->sa_family == AF_INET) {
if (urr.xclient) {
struct rawrouter_session *rr = (struct rawrouter_session *) cs;
rr->xclient = uwsgi_buffer_new(13+sizeof(cs->client_address)+2);
if (uwsgi_buffer_append(rr->xclient, "XCLIENT ADDR=", 13)) return -1;
if (uwsgi_buffer_append(rr->xclient, cs->client_address, strlen(cs->client_address))) return -1;
if (uwsgi_buffer_append(rr->xclient, "\r\n", 2)) return -1;
}
}
// add a new peer
struct corerouter_peer *peer = uwsgi_cr_peer_add(cs);
// set default peer hook
peer->last_hook_read = rr_instance_read;
// use the address as hostname
memcpy(peer->key, cs->ugs->name, cs->ugs->name_len);
peer->key_len = cs->ugs->name_len;
// the mapper hook
if (ucr->mapper(ucr, peer)) {
return -1;
}
if (peer->instance_address_len == 0) {
return -1;
}
peer->can_retry = 1;
cr_connect(peer, rr_instance_connected);
return 0;
}
static int rawrouter_init() {
urr.cr.session_size = sizeof(struct rawrouter_session);
urr.cr.alloc_session = rawrouter_alloc_session;
uwsgi_corerouter_init((struct uwsgi_corerouter *) &urr);
return 0;
}
static void rawrouter_setup() {
urr.cr.name = uwsgi_str("uWSGI rawrouter");
urr.cr.short_name = uwsgi_str("rawrouter");
}
struct uwsgi_plugin rawrouter_plugin = {
.name = "rawrouter",
.options = rawrouter_options,
.init = rawrouter_init,
.on_load = rawrouter_setup
};
| gpl-2.0 |
rbheromax/src_glibc | localedata/tst-mbswcs4.c | 16 | 2050 | /* Test restarting behaviour of mbsrtowcs.
Copyright (C) 2000-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
#include <stdio.h>
#include <string.h>
#include <wchar.h>
#include <locale.h>
#define show(expr, nexp, wcexp, end) \
n = expr; \
printf (#expr " -> %Zd", n); \
printf (", wc = %lu, src = buf+%d", (unsigned long int) wc, \
src - (const char *) buf); \
if (n != (size_t) nexp || wc != wcexp || src != (const char *) (end)) \
{ \
printf (", expected %Zd and %lu and buf+%d", nexp, \
(unsigned long int) wcexp, (end) - buf); \
result = 1; \
} \
putc ('\n', stdout)
int
main (void)
{
unsigned char buf[6] = { 0x25, 0xe2, 0x82, 0xac, 0xce, 0xbb };
mbstate_t state;
const char *src;
wchar_t wc = 42;
size_t n;
int result = 0;
const char *used_locale;
setlocale (LC_CTYPE,"de_DE.UTF-8");
/* Double check. */
used_locale = setlocale (LC_CTYPE, NULL);
printf ("used locale: \"%s\"\n", used_locale);
result = strcmp (used_locale, "de_DE.UTF-8");
memset (&state, '\0', sizeof (state));
src = (const char *) buf;
show (mbsrtowcs (&wc, &src, 1, &state), 1, 37, buf + 1);
show (mbsrtowcs (&wc, &src, 1, &state), 1, 8364, buf + 4);
show (mbsrtowcs (&wc, &src, 1, &state), 1, 955, buf + 6);
return result;
}
| gpl-2.0 |
jyunyen/Nexus7_Kernal | drivers/usb/host/ehci-fsl.c | 272 | 16599 | /*
* Copyright 2005-2009 MontaVista Software, Inc.
* Copyright 2008 Freescale Semiconductor, Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
* Ported to 834x by Randy Vinson <rvinson@mvista.com> using code provided
* by Hunter Wu.
* Power Management support by Dave Liu <daveliu@freescale.com>,
* Jerry Huang <Chang-Ming.Huang@freescale.com> and
* Anton Vorontsov <avorontsov@ru.mvista.com>.
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/delay.h>
#include <linux/pm.h>
#include <linux/platform_device.h>
#include <linux/fsl_devices.h>
#include "ehci-fsl.h"
/* configure so an HC device and id are always provided */
/* always called with process context; sleeping is OK */
/**
* usb_hcd_fsl_probe - initialize FSL-based HCDs
* @drvier: Driver to be used for this HCD
* @pdev: USB Host Controller being probed
* Context: !in_interrupt()
*
* Allocates basic resources for this USB host controller.
*
*/
static int usb_hcd_fsl_probe(const struct hc_driver *driver,
struct platform_device *pdev)
{
struct fsl_usb2_platform_data *pdata;
struct usb_hcd *hcd;
struct resource *res;
int irq;
int retval;
pr_debug("initializing FSL-SOC USB Controller\n");
/* Need platform data for setup */
pdata = (struct fsl_usb2_platform_data *)pdev->dev.platform_data;
if (!pdata) {
dev_err(&pdev->dev,
"No platform data for %s.\n", dev_name(&pdev->dev));
return -ENODEV;
}
/*
* This is a host mode driver, verify that we're supposed to be
* in host mode.
*/
if (!((pdata->operating_mode == FSL_USB2_DR_HOST) ||
(pdata->operating_mode == FSL_USB2_MPH_HOST) ||
(pdata->operating_mode == FSL_USB2_DR_OTG))) {
dev_err(&pdev->dev,
"Non Host Mode configured for %s. Wrong driver linked.\n",
dev_name(&pdev->dev));
return -ENODEV;
}
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (!res) {
dev_err(&pdev->dev,
"Found HC with no IRQ. Check %s setup!\n",
dev_name(&pdev->dev));
return -ENODEV;
}
irq = res->start;
hcd = usb_create_hcd(driver, &pdev->dev, dev_name(&pdev->dev));
if (!hcd) {
retval = -ENOMEM;
goto err1;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
dev_err(&pdev->dev,
"Found HC with no register addr. Check %s setup!\n",
dev_name(&pdev->dev));
retval = -ENODEV;
goto err2;
}
hcd->rsrc_start = res->start;
hcd->rsrc_len = resource_size(res);
if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len,
driver->description)) {
dev_dbg(&pdev->dev, "controller already in use\n");
retval = -EBUSY;
goto err2;
}
hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
if (hcd->regs == NULL) {
dev_dbg(&pdev->dev, "error mapping memory\n");
retval = -EFAULT;
goto err3;
}
pdata->regs = hcd->regs;
/*
* do platform specific init: check the clock, grab/config pins, etc.
*/
if (pdata->init && pdata->init(pdev)) {
retval = -ENODEV;
goto err4;
}
/* Enable USB controller, 83xx or 8536 */
if (pdata->have_sysif_regs)
setbits32(hcd->regs + FSL_SOC_USB_CTRL, 0x4);
/* Don't need to set host mode here. It will be done by tdi_reset() */
retval = usb_add_hcd(hcd, irq, IRQF_DISABLED | IRQF_SHARED);
if (retval != 0)
goto err4;
return retval;
err4:
iounmap(hcd->regs);
err3:
release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
err2:
usb_put_hcd(hcd);
err1:
dev_err(&pdev->dev, "init %s fail, %d\n", dev_name(&pdev->dev), retval);
if (pdata->exit)
pdata->exit(pdev);
return retval;
}
/* may be called without controller electrically present */
/* may be called with controller, bus, and devices active */
/**
* usb_hcd_fsl_remove - shutdown processing for FSL-based HCDs
* @dev: USB Host Controller being removed
* Context: !in_interrupt()
*
* Reverses the effect of usb_hcd_fsl_probe().
*
*/
static void usb_hcd_fsl_remove(struct usb_hcd *hcd,
struct platform_device *pdev)
{
struct fsl_usb2_platform_data *pdata = pdev->dev.platform_data;
usb_remove_hcd(hcd);
/*
* do platform specific un-initialization:
* release iomux pins, disable clock, etc.
*/
if (pdata->exit)
pdata->exit(pdev);
iounmap(hcd->regs);
release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
usb_put_hcd(hcd);
}
static void ehci_fsl_setup_phy(struct ehci_hcd *ehci,
enum fsl_usb2_phy_modes phy_mode,
unsigned int port_offset)
{
u32 portsc;
portsc = ehci_readl(ehci, &ehci->regs->port_status[port_offset]);
portsc &= ~(PORT_PTS_MSK | PORT_PTS_PTW);
switch (phy_mode) {
case FSL_USB2_PHY_ULPI:
portsc |= PORT_PTS_ULPI;
break;
case FSL_USB2_PHY_SERIAL:
portsc |= PORT_PTS_SERIAL;
break;
case FSL_USB2_PHY_UTMI_WIDE:
portsc |= PORT_PTS_PTW;
/* fall through */
case FSL_USB2_PHY_UTMI:
portsc |= PORT_PTS_UTMI;
break;
case FSL_USB2_PHY_NONE:
break;
}
ehci_writel(ehci, portsc, &ehci->regs->port_status[port_offset]);
}
static void ehci_fsl_usb_setup(struct ehci_hcd *ehci)
{
struct usb_hcd *hcd = ehci_to_hcd(ehci);
struct fsl_usb2_platform_data *pdata;
void __iomem *non_ehci = hcd->regs;
u32 temp;
pdata = hcd->self.controller->platform_data;
/* Enable PHY interface in the control reg. */
if (pdata->have_sysif_regs) {
temp = in_be32(non_ehci + FSL_SOC_USB_CTRL);
out_be32(non_ehci + FSL_SOC_USB_CTRL, temp | 0x00000004);
out_be32(non_ehci + FSL_SOC_USB_SNOOP1, 0x0000001b);
}
#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
/*
* Turn on cache snooping hardware, since some PowerPC platforms
* wholly rely on hardware to deal with cache coherent
*/
/* Setup Snooping for all the 4GB space */
/* SNOOP1 starts from 0x0, size 2G */
out_be32(non_ehci + FSL_SOC_USB_SNOOP1, 0x0 | SNOOP_SIZE_2GB);
/* SNOOP2 starts from 0x80000000, size 2G */
out_be32(non_ehci + FSL_SOC_USB_SNOOP2, 0x80000000 | SNOOP_SIZE_2GB);
#endif
if ((pdata->operating_mode == FSL_USB2_DR_HOST) ||
(pdata->operating_mode == FSL_USB2_DR_OTG))
ehci_fsl_setup_phy(ehci, pdata->phy_mode, 0);
if (pdata->operating_mode == FSL_USB2_MPH_HOST) {
unsigned int chip, rev, svr;
svr = mfspr(SPRN_SVR);
chip = svr >> 16;
rev = (svr >> 4) & 0xf;
/* Deal with USB Erratum #14 on MPC834x Rev 1.0 & 1.1 chips */
if ((rev == 1) && (chip >= 0x8050) && (chip <= 0x8055))
ehci->has_fsl_port_bug = 1;
if (pdata->port_enables & FSL_USB2_PORT0_ENABLED)
ehci_fsl_setup_phy(ehci, pdata->phy_mode, 0);
if (pdata->port_enables & FSL_USB2_PORT1_ENABLED)
ehci_fsl_setup_phy(ehci, pdata->phy_mode, 1);
}
if (pdata->have_sysif_regs) {
#ifdef CONFIG_PPC_85xx
out_be32(non_ehci + FSL_SOC_USB_PRICTRL, 0x00000008);
out_be32(non_ehci + FSL_SOC_USB_AGECNTTHRSH, 0x00000080);
#else
out_be32(non_ehci + FSL_SOC_USB_PRICTRL, 0x0000000c);
out_be32(non_ehci + FSL_SOC_USB_AGECNTTHRSH, 0x00000040);
#endif
out_be32(non_ehci + FSL_SOC_USB_SICTRL, 0x00000001);
}
}
/* called after powerup, by probe or system-pm "wakeup" */
static int ehci_fsl_reinit(struct ehci_hcd *ehci)
{
ehci_fsl_usb_setup(ehci);
ehci_port_power(ehci, 0);
return 0;
}
/* called during probe() after chip reset completes */
static int ehci_fsl_setup(struct usb_hcd *hcd)
{
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
int retval;
struct fsl_usb2_platform_data *pdata;
pdata = hcd->self.controller->platform_data;
ehci->big_endian_desc = pdata->big_endian_desc;
ehci->big_endian_mmio = pdata->big_endian_mmio;
/* EHCI registers start at offset 0x100 */
ehci->caps = hcd->regs + 0x100;
ehci->regs = hcd->regs + 0x100 +
HC_LENGTH(ehci, ehci_readl(ehci, &ehci->caps->hc_capbase));
dbg_hcs_params(ehci, "reset");
dbg_hcc_params(ehci, "reset");
/* cache this readonly data; minimize chip reads */
ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params);
hcd->has_tt = 1;
retval = ehci_halt(ehci);
if (retval)
return retval;
/* data structure init */
retval = ehci_init(hcd);
if (retval)
return retval;
ehci->sbrn = 0x20;
ehci_reset(ehci);
retval = ehci_fsl_reinit(ehci);
return retval;
}
struct ehci_fsl {
struct ehci_hcd ehci;
#ifdef CONFIG_PM
/* Saved USB PHY settings, need to restore after deep sleep. */
u32 usb_ctrl;
#endif
};
#ifdef CONFIG_PM
#ifdef CONFIG_PPC_MPC512x
static int ehci_fsl_mpc512x_drv_suspend(struct device *dev)
{
struct usb_hcd *hcd = dev_get_drvdata(dev);
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
struct fsl_usb2_platform_data *pdata = dev->platform_data;
u32 tmp;
#ifdef DEBUG
u32 mode = ehci_readl(ehci, hcd->regs + FSL_SOC_USB_USBMODE);
mode &= USBMODE_CM_MASK;
tmp = ehci_readl(ehci, hcd->regs + 0x140); /* usbcmd */
dev_dbg(dev, "suspend=%d already_suspended=%d "
"mode=%d usbcmd %08x\n", pdata->suspended,
pdata->already_suspended, mode, tmp);
#endif
/*
* If the controller is already suspended, then this must be a
* PM suspend. Remember this fact, so that we will leave the
* controller suspended at PM resume time.
*/
if (pdata->suspended) {
dev_dbg(dev, "already suspended, leaving early\n");
pdata->already_suspended = 1;
return 0;
}
dev_dbg(dev, "suspending...\n");
hcd->state = HC_STATE_SUSPENDED;
dev->power.power_state = PMSG_SUSPEND;
/* ignore non-host interrupts */
clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
/* stop the controller */
tmp = ehci_readl(ehci, &ehci->regs->command);
tmp &= ~CMD_RUN;
ehci_writel(ehci, tmp, &ehci->regs->command);
/* save EHCI registers */
pdata->pm_command = ehci_readl(ehci, &ehci->regs->command);
pdata->pm_command &= ~CMD_RUN;
pdata->pm_status = ehci_readl(ehci, &ehci->regs->status);
pdata->pm_intr_enable = ehci_readl(ehci, &ehci->regs->intr_enable);
pdata->pm_frame_index = ehci_readl(ehci, &ehci->regs->frame_index);
pdata->pm_segment = ehci_readl(ehci, &ehci->regs->segment);
pdata->pm_frame_list = ehci_readl(ehci, &ehci->regs->frame_list);
pdata->pm_async_next = ehci_readl(ehci, &ehci->regs->async_next);
pdata->pm_configured_flag =
ehci_readl(ehci, &ehci->regs->configured_flag);
pdata->pm_portsc = ehci_readl(ehci, &ehci->regs->port_status[0]);
pdata->pm_usbgenctrl = ehci_readl(ehci,
hcd->regs + FSL_SOC_USB_USBGENCTRL);
/* clear the W1C bits */
pdata->pm_portsc &= cpu_to_hc32(ehci, ~PORT_RWC_BITS);
pdata->suspended = 1;
/* clear PP to cut power to the port */
tmp = ehci_readl(ehci, &ehci->regs->port_status[0]);
tmp &= ~PORT_POWER;
ehci_writel(ehci, tmp, &ehci->regs->port_status[0]);
return 0;
}
static int ehci_fsl_mpc512x_drv_resume(struct device *dev)
{
struct usb_hcd *hcd = dev_get_drvdata(dev);
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
struct fsl_usb2_platform_data *pdata = dev->platform_data;
u32 tmp;
dev_dbg(dev, "suspend=%d already_suspended=%d\n",
pdata->suspended, pdata->already_suspended);
/*
* If the controller was already suspended at suspend time,
* then don't resume it now.
*/
if (pdata->already_suspended) {
dev_dbg(dev, "already suspended, leaving early\n");
pdata->already_suspended = 0;
return 0;
}
if (!pdata->suspended) {
dev_dbg(dev, "not suspended, leaving early\n");
return 0;
}
pdata->suspended = 0;
dev_dbg(dev, "resuming...\n");
/* set host mode */
tmp = USBMODE_CM_HOST | (pdata->es ? USBMODE_ES : 0);
ehci_writel(ehci, tmp, hcd->regs + FSL_SOC_USB_USBMODE);
ehci_writel(ehci, pdata->pm_usbgenctrl,
hcd->regs + FSL_SOC_USB_USBGENCTRL);
ehci_writel(ehci, ISIPHYCTRL_PXE | ISIPHYCTRL_PHYE,
hcd->regs + FSL_SOC_USB_ISIPHYCTRL);
/* restore EHCI registers */
ehci_writel(ehci, pdata->pm_command, &ehci->regs->command);
ehci_writel(ehci, pdata->pm_intr_enable, &ehci->regs->intr_enable);
ehci_writel(ehci, pdata->pm_frame_index, &ehci->regs->frame_index);
ehci_writel(ehci, pdata->pm_segment, &ehci->regs->segment);
ehci_writel(ehci, pdata->pm_frame_list, &ehci->regs->frame_list);
ehci_writel(ehci, pdata->pm_async_next, &ehci->regs->async_next);
ehci_writel(ehci, pdata->pm_configured_flag,
&ehci->regs->configured_flag);
ehci_writel(ehci, pdata->pm_portsc, &ehci->regs->port_status[0]);
set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
hcd->state = HC_STATE_RUNNING;
dev->power.power_state = PMSG_ON;
tmp = ehci_readl(ehci, &ehci->regs->command);
tmp |= CMD_RUN;
ehci_writel(ehci, tmp, &ehci->regs->command);
usb_hcd_resume_root_hub(hcd);
return 0;
}
#else
static inline int ehci_fsl_mpc512x_drv_suspend(struct device *dev)
{
return 0;
}
static inline int ehci_fsl_mpc512x_drv_resume(struct device *dev)
{
return 0;
}
#endif /* CONFIG_PPC_MPC512x */
static struct ehci_fsl *hcd_to_ehci_fsl(struct usb_hcd *hcd)
{
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
return container_of(ehci, struct ehci_fsl, ehci);
}
static int ehci_fsl_drv_suspend(struct device *dev)
{
struct usb_hcd *hcd = dev_get_drvdata(dev);
struct ehci_fsl *ehci_fsl = hcd_to_ehci_fsl(hcd);
void __iomem *non_ehci = hcd->regs;
if (of_device_is_compatible(dev->parent->of_node,
"fsl,mpc5121-usb2-dr")) {
return ehci_fsl_mpc512x_drv_suspend(dev);
}
ehci_prepare_ports_for_controller_suspend(hcd_to_ehci(hcd),
device_may_wakeup(dev));
if (!fsl_deep_sleep())
return 0;
ehci_fsl->usb_ctrl = in_be32(non_ehci + FSL_SOC_USB_CTRL);
return 0;
}
static int ehci_fsl_drv_resume(struct device *dev)
{
struct usb_hcd *hcd = dev_get_drvdata(dev);
struct ehci_fsl *ehci_fsl = hcd_to_ehci_fsl(hcd);
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
void __iomem *non_ehci = hcd->regs;
if (of_device_is_compatible(dev->parent->of_node,
"fsl,mpc5121-usb2-dr")) {
return ehci_fsl_mpc512x_drv_resume(dev);
}
ehci_prepare_ports_for_controller_resume(ehci);
if (!fsl_deep_sleep())
return 0;
usb_root_hub_lost_power(hcd->self.root_hub);
/* Restore USB PHY settings and enable the controller. */
out_be32(non_ehci + FSL_SOC_USB_CTRL, ehci_fsl->usb_ctrl);
ehci_reset(ehci);
ehci_fsl_reinit(ehci);
return 0;
}
static int ehci_fsl_drv_restore(struct device *dev)
{
struct usb_hcd *hcd = dev_get_drvdata(dev);
usb_root_hub_lost_power(hcd->self.root_hub);
return 0;
}
static struct dev_pm_ops ehci_fsl_pm_ops = {
.suspend = ehci_fsl_drv_suspend,
.resume = ehci_fsl_drv_resume,
.restore = ehci_fsl_drv_restore,
};
#define EHCI_FSL_PM_OPS (&ehci_fsl_pm_ops)
#else
#define EHCI_FSL_PM_OPS NULL
#endif /* CONFIG_PM */
static const struct hc_driver ehci_fsl_hc_driver = {
.description = hcd_name,
.product_desc = "Freescale On-Chip EHCI Host Controller",
.hcd_priv_size = sizeof(struct ehci_fsl),
/*
* generic hardware linkage
*/
.irq = ehci_irq,
.flags = HCD_USB2 | HCD_MEMORY,
/*
* basic lifecycle operations
*/
.reset = ehci_fsl_setup,
.start = ehci_run,
.stop = ehci_stop,
.shutdown = ehci_shutdown,
/*
* managing i/o requests and associated device resources
*/
.urb_enqueue = ehci_urb_enqueue,
.urb_dequeue = ehci_urb_dequeue,
.endpoint_disable = ehci_endpoint_disable,
.endpoint_reset = ehci_endpoint_reset,
/*
* scheduling support
*/
.get_frame_number = ehci_get_frame,
/*
* root hub support
*/
.hub_status_data = ehci_hub_status_data,
.hub_control = ehci_hub_control,
.bus_suspend = ehci_bus_suspend,
.bus_resume = ehci_bus_resume,
.relinquish_port = ehci_relinquish_port,
.port_handed_over = ehci_port_handed_over,
.clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
};
static int ehci_fsl_drv_probe(struct platform_device *pdev)
{
if (usb_disabled())
return -ENODEV;
/* FIXME we only want one one probe() not two */
return usb_hcd_fsl_probe(&ehci_fsl_hc_driver, pdev);
}
static int ehci_fsl_drv_remove(struct platform_device *pdev)
{
struct usb_hcd *hcd = platform_get_drvdata(pdev);
/* FIXME we only want one one remove() not two */
usb_hcd_fsl_remove(hcd, pdev);
return 0;
}
MODULE_ALIAS("platform:fsl-ehci");
static struct platform_driver ehci_fsl_driver = {
.probe = ehci_fsl_drv_probe,
.remove = ehci_fsl_drv_remove,
.shutdown = usb_hcd_platform_shutdown,
.driver = {
.name = "fsl-ehci",
.pm = EHCI_FSL_PM_OPS,
},
};
| gpl-2.0 |
01org/Igvtg-kernel | drivers/regulator/tps6507x-regulator.c | 528 | 13312 | /*
* tps6507x-regulator.c
*
* Regulator driver for TPS65073 PMIC
*
* Copyright (C) 2009 Texas Instrument Incorporated - http://www.ti.com/
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation version 2.
*
* This program is distributed "as is" WITHOUT ANY WARRANTY of any kind,
* whether express or implied; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/err.h>
#include <linux/platform_device.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
#include <linux/regulator/tps6507x.h>
#include <linux/of.h>
#include <linux/slab.h>
#include <linux/mfd/tps6507x.h>
#include <linux/regulator/of_regulator.h>
/* DCDC's */
#define TPS6507X_DCDC_1 0
#define TPS6507X_DCDC_2 1
#define TPS6507X_DCDC_3 2
/* LDOs */
#define TPS6507X_LDO_1 3
#define TPS6507X_LDO_2 4
#define TPS6507X_MAX_REG_ID TPS6507X_LDO_2
/* Number of step-down converters available */
#define TPS6507X_NUM_DCDC 3
/* Number of LDO voltage regulators available */
#define TPS6507X_NUM_LDO 2
/* Number of total regulators available */
#define TPS6507X_NUM_REGULATOR (TPS6507X_NUM_DCDC + TPS6507X_NUM_LDO)
/* Supported voltage values for regulators (in microVolts) */
static const unsigned int VDCDCx_VSEL_table[] = {
725000, 750000, 775000, 800000,
825000, 850000, 875000, 900000,
925000, 950000, 975000, 1000000,
1025000, 1050000, 1075000, 1100000,
1125000, 1150000, 1175000, 1200000,
1225000, 1250000, 1275000, 1300000,
1325000, 1350000, 1375000, 1400000,
1425000, 1450000, 1475000, 1500000,
1550000, 1600000, 1650000, 1700000,
1750000, 1800000, 1850000, 1900000,
1950000, 2000000, 2050000, 2100000,
2150000, 2200000, 2250000, 2300000,
2350000, 2400000, 2450000, 2500000,
2550000, 2600000, 2650000, 2700000,
2750000, 2800000, 2850000, 2900000,
3000000, 3100000, 3200000, 3300000,
};
static const unsigned int LDO1_VSEL_table[] = {
1000000, 1100000, 1200000, 1250000,
1300000, 1350000, 1400000, 1500000,
1600000, 1800000, 2500000, 2750000,
2800000, 3000000, 3100000, 3300000,
};
/* The voltage mapping table for LDO2 is the same as VDCDCx */
#define LDO2_VSEL_table VDCDCx_VSEL_table
struct tps_info {
const char *name;
u8 table_len;
const unsigned int *table;
/* Does DCDC high or the low register defines output voltage? */
bool defdcdc_default;
};
static struct tps_info tps6507x_pmic_regs[] = {
{
.name = "VDCDC1",
.table_len = ARRAY_SIZE(VDCDCx_VSEL_table),
.table = VDCDCx_VSEL_table,
},
{
.name = "VDCDC2",
.table_len = ARRAY_SIZE(VDCDCx_VSEL_table),
.table = VDCDCx_VSEL_table,
},
{
.name = "VDCDC3",
.table_len = ARRAY_SIZE(VDCDCx_VSEL_table),
.table = VDCDCx_VSEL_table,
},
{
.name = "LDO1",
.table_len = ARRAY_SIZE(LDO1_VSEL_table),
.table = LDO1_VSEL_table,
},
{
.name = "LDO2",
.table_len = ARRAY_SIZE(LDO2_VSEL_table),
.table = LDO2_VSEL_table,
},
};
struct tps6507x_pmic {
struct regulator_desc desc[TPS6507X_NUM_REGULATOR];
struct tps6507x_dev *mfd;
struct regulator_dev *rdev[TPS6507X_NUM_REGULATOR];
struct tps_info *info[TPS6507X_NUM_REGULATOR];
struct mutex io_lock;
};
static inline int tps6507x_pmic_read(struct tps6507x_pmic *tps, u8 reg)
{
u8 val;
int err;
err = tps->mfd->read_dev(tps->mfd, reg, 1, &val);
if (err)
return err;
return val;
}
static inline int tps6507x_pmic_write(struct tps6507x_pmic *tps, u8 reg, u8 val)
{
return tps->mfd->write_dev(tps->mfd, reg, 1, &val);
}
static int tps6507x_pmic_set_bits(struct tps6507x_pmic *tps, u8 reg, u8 mask)
{
int err, data;
mutex_lock(&tps->io_lock);
data = tps6507x_pmic_read(tps, reg);
if (data < 0) {
dev_err(tps->mfd->dev, "Read from reg 0x%x failed\n", reg);
err = data;
goto out;
}
data |= mask;
err = tps6507x_pmic_write(tps, reg, data);
if (err)
dev_err(tps->mfd->dev, "Write for reg 0x%x failed\n", reg);
out:
mutex_unlock(&tps->io_lock);
return err;
}
static int tps6507x_pmic_clear_bits(struct tps6507x_pmic *tps, u8 reg, u8 mask)
{
int err, data;
mutex_lock(&tps->io_lock);
data = tps6507x_pmic_read(tps, reg);
if (data < 0) {
dev_err(tps->mfd->dev, "Read from reg 0x%x failed\n", reg);
err = data;
goto out;
}
data &= ~mask;
err = tps6507x_pmic_write(tps, reg, data);
if (err)
dev_err(tps->mfd->dev, "Write for reg 0x%x failed\n", reg);
out:
mutex_unlock(&tps->io_lock);
return err;
}
static int tps6507x_pmic_reg_read(struct tps6507x_pmic *tps, u8 reg)
{
int data;
mutex_lock(&tps->io_lock);
data = tps6507x_pmic_read(tps, reg);
if (data < 0)
dev_err(tps->mfd->dev, "Read from reg 0x%x failed\n", reg);
mutex_unlock(&tps->io_lock);
return data;
}
static int tps6507x_pmic_reg_write(struct tps6507x_pmic *tps, u8 reg, u8 val)
{
int err;
mutex_lock(&tps->io_lock);
err = tps6507x_pmic_write(tps, reg, val);
if (err < 0)
dev_err(tps->mfd->dev, "Write for reg 0x%x failed\n", reg);
mutex_unlock(&tps->io_lock);
return err;
}
static int tps6507x_pmic_is_enabled(struct regulator_dev *dev)
{
struct tps6507x_pmic *tps = rdev_get_drvdata(dev);
int data, rid = rdev_get_id(dev);
u8 shift;
if (rid < TPS6507X_DCDC_1 || rid > TPS6507X_LDO_2)
return -EINVAL;
shift = TPS6507X_MAX_REG_ID - rid;
data = tps6507x_pmic_reg_read(tps, TPS6507X_REG_CON_CTRL1);
if (data < 0)
return data;
else
return (data & 1<<shift) ? 1 : 0;
}
static int tps6507x_pmic_enable(struct regulator_dev *dev)
{
struct tps6507x_pmic *tps = rdev_get_drvdata(dev);
int rid = rdev_get_id(dev);
u8 shift;
if (rid < TPS6507X_DCDC_1 || rid > TPS6507X_LDO_2)
return -EINVAL;
shift = TPS6507X_MAX_REG_ID - rid;
return tps6507x_pmic_set_bits(tps, TPS6507X_REG_CON_CTRL1, 1 << shift);
}
static int tps6507x_pmic_disable(struct regulator_dev *dev)
{
struct tps6507x_pmic *tps = rdev_get_drvdata(dev);
int rid = rdev_get_id(dev);
u8 shift;
if (rid < TPS6507X_DCDC_1 || rid > TPS6507X_LDO_2)
return -EINVAL;
shift = TPS6507X_MAX_REG_ID - rid;
return tps6507x_pmic_clear_bits(tps, TPS6507X_REG_CON_CTRL1,
1 << shift);
}
static int tps6507x_pmic_get_voltage_sel(struct regulator_dev *dev)
{
struct tps6507x_pmic *tps = rdev_get_drvdata(dev);
int data, rid = rdev_get_id(dev);
u8 reg, mask;
switch (rid) {
case TPS6507X_DCDC_1:
reg = TPS6507X_REG_DEFDCDC1;
mask = TPS6507X_DEFDCDCX_DCDC_MASK;
break;
case TPS6507X_DCDC_2:
if (tps->info[rid]->defdcdc_default)
reg = TPS6507X_REG_DEFDCDC2_HIGH;
else
reg = TPS6507X_REG_DEFDCDC2_LOW;
mask = TPS6507X_DEFDCDCX_DCDC_MASK;
break;
case TPS6507X_DCDC_3:
if (tps->info[rid]->defdcdc_default)
reg = TPS6507X_REG_DEFDCDC3_HIGH;
else
reg = TPS6507X_REG_DEFDCDC3_LOW;
mask = TPS6507X_DEFDCDCX_DCDC_MASK;
break;
case TPS6507X_LDO_1:
reg = TPS6507X_REG_LDO_CTRL1;
mask = TPS6507X_REG_LDO_CTRL1_LDO1_MASK;
break;
case TPS6507X_LDO_2:
reg = TPS6507X_REG_DEFLDO2;
mask = TPS6507X_REG_DEFLDO2_LDO2_MASK;
break;
default:
return -EINVAL;
}
data = tps6507x_pmic_reg_read(tps, reg);
if (data < 0)
return data;
data &= mask;
return data;
}
static int tps6507x_pmic_set_voltage_sel(struct regulator_dev *dev,
unsigned selector)
{
struct tps6507x_pmic *tps = rdev_get_drvdata(dev);
int data, rid = rdev_get_id(dev);
u8 reg, mask;
switch (rid) {
case TPS6507X_DCDC_1:
reg = TPS6507X_REG_DEFDCDC1;
mask = TPS6507X_DEFDCDCX_DCDC_MASK;
break;
case TPS6507X_DCDC_2:
if (tps->info[rid]->defdcdc_default)
reg = TPS6507X_REG_DEFDCDC2_HIGH;
else
reg = TPS6507X_REG_DEFDCDC2_LOW;
mask = TPS6507X_DEFDCDCX_DCDC_MASK;
break;
case TPS6507X_DCDC_3:
if (tps->info[rid]->defdcdc_default)
reg = TPS6507X_REG_DEFDCDC3_HIGH;
else
reg = TPS6507X_REG_DEFDCDC3_LOW;
mask = TPS6507X_DEFDCDCX_DCDC_MASK;
break;
case TPS6507X_LDO_1:
reg = TPS6507X_REG_LDO_CTRL1;
mask = TPS6507X_REG_LDO_CTRL1_LDO1_MASK;
break;
case TPS6507X_LDO_2:
reg = TPS6507X_REG_DEFLDO2;
mask = TPS6507X_REG_DEFLDO2_LDO2_MASK;
break;
default:
return -EINVAL;
}
data = tps6507x_pmic_reg_read(tps, reg);
if (data < 0)
return data;
data &= ~mask;
data |= selector;
return tps6507x_pmic_reg_write(tps, reg, data);
}
static struct regulator_ops tps6507x_pmic_ops = {
.is_enabled = tps6507x_pmic_is_enabled,
.enable = tps6507x_pmic_enable,
.disable = tps6507x_pmic_disable,
.get_voltage_sel = tps6507x_pmic_get_voltage_sel,
.set_voltage_sel = tps6507x_pmic_set_voltage_sel,
.list_voltage = regulator_list_voltage_table,
.map_voltage = regulator_map_voltage_ascend,
};
static struct of_regulator_match tps6507x_matches[] = {
{ .name = "VDCDC1"},
{ .name = "VDCDC2"},
{ .name = "VDCDC3"},
{ .name = "LDO1"},
{ .name = "LDO2"},
};
static struct tps6507x_board *tps6507x_parse_dt_reg_data(
struct platform_device *pdev,
struct of_regulator_match **tps6507x_reg_matches)
{
struct tps6507x_board *tps_board;
struct device_node *np = pdev->dev.parent->of_node;
struct device_node *regulators;
struct of_regulator_match *matches;
static struct regulator_init_data *reg_data;
int idx = 0, count, ret;
tps_board = devm_kzalloc(&pdev->dev, sizeof(*tps_board),
GFP_KERNEL);
if (!tps_board)
return NULL;
regulators = of_get_child_by_name(np, "regulators");
if (!regulators) {
dev_err(&pdev->dev, "regulator node not found\n");
return NULL;
}
count = ARRAY_SIZE(tps6507x_matches);
matches = tps6507x_matches;
ret = of_regulator_match(&pdev->dev, regulators, matches, count);
of_node_put(regulators);
if (ret < 0) {
dev_err(&pdev->dev, "Error parsing regulator init data: %d\n",
ret);
return NULL;
}
*tps6507x_reg_matches = matches;
reg_data = devm_kzalloc(&pdev->dev, (sizeof(struct regulator_init_data)
* TPS6507X_NUM_REGULATOR), GFP_KERNEL);
if (!reg_data)
return NULL;
tps_board->tps6507x_pmic_init_data = reg_data;
for (idx = 0; idx < count; idx++) {
if (!matches[idx].init_data || !matches[idx].of_node)
continue;
memcpy(®_data[idx], matches[idx].init_data,
sizeof(struct regulator_init_data));
}
return tps_board;
}
static int tps6507x_pmic_probe(struct platform_device *pdev)
{
struct tps6507x_dev *tps6507x_dev = dev_get_drvdata(pdev->dev.parent);
struct tps_info *info = &tps6507x_pmic_regs[0];
struct regulator_config config = { };
struct regulator_init_data *init_data;
struct regulator_dev *rdev;
struct tps6507x_pmic *tps;
struct tps6507x_board *tps_board;
struct of_regulator_match *tps6507x_reg_matches = NULL;
int i;
int error;
unsigned int prop;
/**
* tps_board points to pmic related constants
* coming from the board-evm file.
*/
tps_board = dev_get_platdata(tps6507x_dev->dev);
if (IS_ENABLED(CONFIG_OF) && !tps_board &&
tps6507x_dev->dev->of_node)
tps_board = tps6507x_parse_dt_reg_data(pdev,
&tps6507x_reg_matches);
if (!tps_board)
return -EINVAL;
/**
* init_data points to array of regulator_init structures
* coming from the board-evm file.
*/
init_data = tps_board->tps6507x_pmic_init_data;
if (!init_data)
return -EINVAL;
tps = devm_kzalloc(&pdev->dev, sizeof(*tps), GFP_KERNEL);
if (!tps)
return -ENOMEM;
mutex_init(&tps->io_lock);
/* common for all regulators */
tps->mfd = tps6507x_dev;
for (i = 0; i < TPS6507X_NUM_REGULATOR; i++, info++, init_data++) {
/* Register the regulators */
tps->info[i] = info;
if (init_data->driver_data) {
struct tps6507x_reg_platform_data *data =
init_data->driver_data;
tps->info[i]->defdcdc_default = data->defdcdc_default;
}
tps->desc[i].name = info->name;
tps->desc[i].id = i;
tps->desc[i].n_voltages = info->table_len;
tps->desc[i].volt_table = info->table;
tps->desc[i].ops = &tps6507x_pmic_ops;
tps->desc[i].type = REGULATOR_VOLTAGE;
tps->desc[i].owner = THIS_MODULE;
config.dev = tps6507x_dev->dev;
config.init_data = init_data;
config.driver_data = tps;
if (tps6507x_reg_matches) {
error = of_property_read_u32(
tps6507x_reg_matches[i].of_node,
"ti,defdcdc_default", &prop);
if (!error)
tps->info[i]->defdcdc_default = prop;
config.of_node = tps6507x_reg_matches[i].of_node;
}
rdev = devm_regulator_register(&pdev->dev, &tps->desc[i],
&config);
if (IS_ERR(rdev)) {
dev_err(tps6507x_dev->dev,
"failed to register %s regulator\n",
pdev->name);
return PTR_ERR(rdev);
}
/* Save regulator for cleanup */
tps->rdev[i] = rdev;
}
tps6507x_dev->pmic = tps;
platform_set_drvdata(pdev, tps6507x_dev);
return 0;
}
static struct platform_driver tps6507x_pmic_driver = {
.driver = {
.name = "tps6507x-pmic",
.owner = THIS_MODULE,
},
.probe = tps6507x_pmic_probe,
};
static int __init tps6507x_pmic_init(void)
{
return platform_driver_register(&tps6507x_pmic_driver);
}
subsys_initcall(tps6507x_pmic_init);
static void __exit tps6507x_pmic_cleanup(void)
{
platform_driver_unregister(&tps6507x_pmic_driver);
}
module_exit(tps6507x_pmic_cleanup);
MODULE_AUTHOR("Texas Instruments");
MODULE_DESCRIPTION("TPS6507x voltage regulator driver");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:tps6507x-pmic");
| gpl-2.0 |
aeroevan/linux | drivers/rtc/rtc-mv.c | 784 | 9453 | /*
* Driver for the RTC in Marvell SoCs.
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/rtc.h>
#include <linux/bcd.h>
#include <linux/bitops.h>
#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/of.h>
#include <linux/delay.h>
#include <linux/clk.h>
#include <linux/gfp.h>
#include <linux/module.h>
#define RTC_TIME_REG_OFFS 0
#define RTC_SECONDS_OFFS 0
#define RTC_MINUTES_OFFS 8
#define RTC_HOURS_OFFS 16
#define RTC_WDAY_OFFS 24
#define RTC_HOURS_12H_MODE BIT(22) /* 12 hour mode */
#define RTC_DATE_REG_OFFS 4
#define RTC_MDAY_OFFS 0
#define RTC_MONTH_OFFS 8
#define RTC_YEAR_OFFS 16
#define RTC_ALARM_TIME_REG_OFFS 8
#define RTC_ALARM_DATE_REG_OFFS 0xc
#define RTC_ALARM_VALID BIT(7)
#define RTC_ALARM_INTERRUPT_MASK_REG_OFFS 0x10
#define RTC_ALARM_INTERRUPT_CASUE_REG_OFFS 0x14
struct rtc_plat_data {
struct rtc_device *rtc;
void __iomem *ioaddr;
int irq;
struct clk *clk;
};
static int mv_rtc_set_time(struct device *dev, struct rtc_time *tm)
{
struct rtc_plat_data *pdata = dev_get_drvdata(dev);
void __iomem *ioaddr = pdata->ioaddr;
u32 rtc_reg;
rtc_reg = (bin2bcd(tm->tm_sec) << RTC_SECONDS_OFFS) |
(bin2bcd(tm->tm_min) << RTC_MINUTES_OFFS) |
(bin2bcd(tm->tm_hour) << RTC_HOURS_OFFS) |
(bin2bcd(tm->tm_wday) << RTC_WDAY_OFFS);
writel(rtc_reg, ioaddr + RTC_TIME_REG_OFFS);
rtc_reg = (bin2bcd(tm->tm_mday) << RTC_MDAY_OFFS) |
(bin2bcd(tm->tm_mon + 1) << RTC_MONTH_OFFS) |
(bin2bcd(tm->tm_year % 100) << RTC_YEAR_OFFS);
writel(rtc_reg, ioaddr + RTC_DATE_REG_OFFS);
return 0;
}
static int mv_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
struct rtc_plat_data *pdata = dev_get_drvdata(dev);
void __iomem *ioaddr = pdata->ioaddr;
u32 rtc_time, rtc_date;
unsigned int year, month, day, hour, minute, second, wday;
rtc_time = readl(ioaddr + RTC_TIME_REG_OFFS);
rtc_date = readl(ioaddr + RTC_DATE_REG_OFFS);
second = rtc_time & 0x7f;
minute = (rtc_time >> RTC_MINUTES_OFFS) & 0x7f;
hour = (rtc_time >> RTC_HOURS_OFFS) & 0x3f; /* assume 24 hour mode */
wday = (rtc_time >> RTC_WDAY_OFFS) & 0x7;
day = rtc_date & 0x3f;
month = (rtc_date >> RTC_MONTH_OFFS) & 0x3f;
year = (rtc_date >> RTC_YEAR_OFFS) & 0xff;
tm->tm_sec = bcd2bin(second);
tm->tm_min = bcd2bin(minute);
tm->tm_hour = bcd2bin(hour);
tm->tm_mday = bcd2bin(day);
tm->tm_wday = bcd2bin(wday);
tm->tm_mon = bcd2bin(month) - 1;
/* hw counts from year 2000, but tm_year is relative to 1900 */
tm->tm_year = bcd2bin(year) + 100;
return rtc_valid_tm(tm);
}
static int mv_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alm)
{
struct rtc_plat_data *pdata = dev_get_drvdata(dev);
void __iomem *ioaddr = pdata->ioaddr;
u32 rtc_time, rtc_date;
unsigned int year, month, day, hour, minute, second, wday;
rtc_time = readl(ioaddr + RTC_ALARM_TIME_REG_OFFS);
rtc_date = readl(ioaddr + RTC_ALARM_DATE_REG_OFFS);
second = rtc_time & 0x7f;
minute = (rtc_time >> RTC_MINUTES_OFFS) & 0x7f;
hour = (rtc_time >> RTC_HOURS_OFFS) & 0x3f; /* assume 24 hour mode */
wday = (rtc_time >> RTC_WDAY_OFFS) & 0x7;
day = rtc_date & 0x3f;
month = (rtc_date >> RTC_MONTH_OFFS) & 0x3f;
year = (rtc_date >> RTC_YEAR_OFFS) & 0xff;
alm->time.tm_sec = bcd2bin(second);
alm->time.tm_min = bcd2bin(minute);
alm->time.tm_hour = bcd2bin(hour);
alm->time.tm_mday = bcd2bin(day);
alm->time.tm_wday = bcd2bin(wday);
alm->time.tm_mon = bcd2bin(month) - 1;
/* hw counts from year 2000, but tm_year is relative to 1900 */
alm->time.tm_year = bcd2bin(year) + 100;
if (rtc_valid_tm(&alm->time) < 0) {
dev_err(dev, "retrieved alarm date/time is not valid.\n");
rtc_time_to_tm(0, &alm->time);
}
alm->enabled = !!readl(ioaddr + RTC_ALARM_INTERRUPT_MASK_REG_OFFS);
return 0;
}
static int mv_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm)
{
struct rtc_plat_data *pdata = dev_get_drvdata(dev);
void __iomem *ioaddr = pdata->ioaddr;
u32 rtc_reg = 0;
if (alm->time.tm_sec >= 0)
rtc_reg |= (RTC_ALARM_VALID | bin2bcd(alm->time.tm_sec))
<< RTC_SECONDS_OFFS;
if (alm->time.tm_min >= 0)
rtc_reg |= (RTC_ALARM_VALID | bin2bcd(alm->time.tm_min))
<< RTC_MINUTES_OFFS;
if (alm->time.tm_hour >= 0)
rtc_reg |= (RTC_ALARM_VALID | bin2bcd(alm->time.tm_hour))
<< RTC_HOURS_OFFS;
writel(rtc_reg, ioaddr + RTC_ALARM_TIME_REG_OFFS);
if (alm->time.tm_mday >= 0)
rtc_reg = (RTC_ALARM_VALID | bin2bcd(alm->time.tm_mday))
<< RTC_MDAY_OFFS;
else
rtc_reg = 0;
if (alm->time.tm_mon >= 0)
rtc_reg |= (RTC_ALARM_VALID | bin2bcd(alm->time.tm_mon + 1))
<< RTC_MONTH_OFFS;
if (alm->time.tm_year >= 0)
rtc_reg |= (RTC_ALARM_VALID | bin2bcd(alm->time.tm_year % 100))
<< RTC_YEAR_OFFS;
writel(rtc_reg, ioaddr + RTC_ALARM_DATE_REG_OFFS);
writel(0, ioaddr + RTC_ALARM_INTERRUPT_CASUE_REG_OFFS);
writel(alm->enabled ? 1 : 0,
ioaddr + RTC_ALARM_INTERRUPT_MASK_REG_OFFS);
return 0;
}
static int mv_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
{
struct platform_device *pdev = to_platform_device(dev);
struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
void __iomem *ioaddr = pdata->ioaddr;
if (pdata->irq < 0)
return -EINVAL; /* fall back into rtc-dev's emulation */
if (enabled)
writel(1, ioaddr + RTC_ALARM_INTERRUPT_MASK_REG_OFFS);
else
writel(0, ioaddr + RTC_ALARM_INTERRUPT_MASK_REG_OFFS);
return 0;
}
static irqreturn_t mv_rtc_interrupt(int irq, void *data)
{
struct rtc_plat_data *pdata = data;
void __iomem *ioaddr = pdata->ioaddr;
/* alarm irq? */
if (!readl(ioaddr + RTC_ALARM_INTERRUPT_CASUE_REG_OFFS))
return IRQ_NONE;
/* clear interrupt */
writel(0, ioaddr + RTC_ALARM_INTERRUPT_CASUE_REG_OFFS);
rtc_update_irq(pdata->rtc, 1, RTC_IRQF | RTC_AF);
return IRQ_HANDLED;
}
static const struct rtc_class_ops mv_rtc_ops = {
.read_time = mv_rtc_read_time,
.set_time = mv_rtc_set_time,
};
static const struct rtc_class_ops mv_rtc_alarm_ops = {
.read_time = mv_rtc_read_time,
.set_time = mv_rtc_set_time,
.read_alarm = mv_rtc_read_alarm,
.set_alarm = mv_rtc_set_alarm,
.alarm_irq_enable = mv_rtc_alarm_irq_enable,
};
static int __init mv_rtc_probe(struct platform_device *pdev)
{
struct resource *res;
struct rtc_plat_data *pdata;
u32 rtc_time;
u32 rtc_date;
int ret = 0;
pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return -ENOMEM;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
pdata->ioaddr = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(pdata->ioaddr))
return PTR_ERR(pdata->ioaddr);
pdata->clk = devm_clk_get(&pdev->dev, NULL);
/* Not all SoCs require a clock.*/
if (!IS_ERR(pdata->clk))
clk_prepare_enable(pdata->clk);
/* make sure the 24 hour mode is enabled */
rtc_time = readl(pdata->ioaddr + RTC_TIME_REG_OFFS);
if (rtc_time & RTC_HOURS_12H_MODE) {
dev_err(&pdev->dev, "12 Hour mode is enabled but not supported.\n");
ret = -EINVAL;
goto out;
}
/* make sure it is actually functional */
if (rtc_time == 0x01000000) {
ssleep(1);
rtc_time = readl(pdata->ioaddr + RTC_TIME_REG_OFFS);
if (rtc_time == 0x01000000) {
dev_err(&pdev->dev, "internal RTC not ticking\n");
ret = -ENODEV;
goto out;
}
}
/*
* A date after January 19th, 2038 does not fit on 32 bits and
* will confuse the kernel and userspace. Reset to a sane date
* (January 1st, 2013) if we're after 2038.
*/
rtc_date = readl(pdata->ioaddr + RTC_DATE_REG_OFFS);
if (bcd2bin((rtc_date >> RTC_YEAR_OFFS) & 0xff) >= 38) {
dev_info(&pdev->dev, "invalid RTC date, resetting to January 1st, 2013\n");
writel(0x130101, pdata->ioaddr + RTC_DATE_REG_OFFS);
}
pdata->irq = platform_get_irq(pdev, 0);
platform_set_drvdata(pdev, pdata);
if (pdata->irq >= 0) {
device_init_wakeup(&pdev->dev, 1);
pdata->rtc = devm_rtc_device_register(&pdev->dev, pdev->name,
&mv_rtc_alarm_ops,
THIS_MODULE);
} else {
pdata->rtc = devm_rtc_device_register(&pdev->dev, pdev->name,
&mv_rtc_ops, THIS_MODULE);
}
if (IS_ERR(pdata->rtc)) {
ret = PTR_ERR(pdata->rtc);
goto out;
}
if (pdata->irq >= 0) {
writel(0, pdata->ioaddr + RTC_ALARM_INTERRUPT_MASK_REG_OFFS);
if (devm_request_irq(&pdev->dev, pdata->irq, mv_rtc_interrupt,
IRQF_SHARED,
pdev->name, pdata) < 0) {
dev_warn(&pdev->dev, "interrupt not available.\n");
pdata->irq = -1;
}
}
return 0;
out:
if (!IS_ERR(pdata->clk))
clk_disable_unprepare(pdata->clk);
return ret;
}
static int __exit mv_rtc_remove(struct platform_device *pdev)
{
struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
if (pdata->irq >= 0)
device_init_wakeup(&pdev->dev, 0);
if (!IS_ERR(pdata->clk))
clk_disable_unprepare(pdata->clk);
return 0;
}
#ifdef CONFIG_OF
static const struct of_device_id rtc_mv_of_match_table[] = {
{ .compatible = "marvell,orion-rtc", },
{}
};
MODULE_DEVICE_TABLE(of, rtc_mv_of_match_table);
#endif
static struct platform_driver mv_rtc_driver = {
.remove = __exit_p(mv_rtc_remove),
.driver = {
.name = "rtc-mv",
.of_match_table = of_match_ptr(rtc_mv_of_match_table),
},
};
module_platform_driver_probe(mv_rtc_driver, mv_rtc_probe);
MODULE_AUTHOR("Saeed Bishara <saeed@marvell.com>");
MODULE_DESCRIPTION("Marvell RTC driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:rtc-mv");
| gpl-2.0 |
gem5/linux-arm64-gem5 | drivers/net/wireless/ath/ath5k/pcu.c | 1040 | 28649 | /*
* Copyright (c) 2004-2008 Reyk Floeter <reyk@openbsd.org>
* Copyright (c) 2006-2008 Nick Kossifidis <mickflemm@gmail.com>
* Copyright (c) 2007-2008 Matthew W. S. Bell <mentor@madwifi.org>
* Copyright (c) 2007-2008 Luis Rodriguez <mcgrof@winlab.rutgers.edu>
* Copyright (c) 2007-2008 Pavel Roskin <proski@gnu.org>
* Copyright (c) 2007-2008 Jiri Slaby <jirislaby@gmail.com>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*
*/
/*********************************\
* Protocol Control Unit Functions *
\*********************************/
#include <asm/unaligned.h>
#include "ath5k.h"
#include "reg.h"
#include "debug.h"
/**
* DOC: Protocol Control Unit (PCU) functions
*
* Protocol control unit is responsible to maintain various protocol
* properties before a frame is send and after a frame is received to/from
* baseband. To be more specific, PCU handles:
*
* - Buffering of RX and TX frames (after QCU/DCUs)
*
* - Encrypting and decrypting (using the built-in engine)
*
* - Generating ACKs, RTS/CTS frames
*
* - Maintaining TSF
*
* - FCS
*
* - Updating beacon data (with TSF etc)
*
* - Generating virtual CCA
*
* - RX/Multicast filtering
*
* - BSSID filtering
*
* - Various statistics
*
* -Different operating modes: AP, STA, IBSS
*
* Note: Most of these functions can be tweaked/bypassed so you can do
* them on sw above for debugging or research. For more infos check out PCU
* registers on reg.h.
*/
/**
* DOC: ACK rates
*
* AR5212+ can use higher rates for ack transmission
* based on current tx rate instead of the base rate.
* It does this to better utilize channel usage.
* There is a mapping between G rates (that cover both
* CCK and OFDM) and ack rates that we use when setting
* rate -> duration table. This mapping is hw-based so
* don't change anything.
*
* To enable this functionality we must set
* ah->ah_ack_bitrate_high to true else base rate is
* used (1Mb for CCK, 6Mb for OFDM).
*/
static const unsigned int ack_rates_high[] =
/* Tx -> ACK */
/* 1Mb -> 1Mb */ { 0,
/* 2MB -> 2Mb */ 1,
/* 5.5Mb -> 2Mb */ 1,
/* 11Mb -> 2Mb */ 1,
/* 6Mb -> 6Mb */ 4,
/* 9Mb -> 6Mb */ 4,
/* 12Mb -> 12Mb */ 6,
/* 18Mb -> 12Mb */ 6,
/* 24Mb -> 24Mb */ 8,
/* 36Mb -> 24Mb */ 8,
/* 48Mb -> 24Mb */ 8,
/* 54Mb -> 24Mb */ 8 };
/*******************\
* Helper functions *
\*******************/
/**
* ath5k_hw_get_frame_duration() - Get tx time of a frame
* @ah: The &struct ath5k_hw
* @len: Frame's length in bytes
* @rate: The @struct ieee80211_rate
* @shortpre: Indicate short preample
*
* Calculate tx duration of a frame given it's rate and length
* It extends ieee80211_generic_frame_duration for non standard
* bwmodes.
*/
int
ath5k_hw_get_frame_duration(struct ath5k_hw *ah, enum ieee80211_band band,
int len, struct ieee80211_rate *rate, bool shortpre)
{
int sifs, preamble, plcp_bits, sym_time;
int bitrate, bits, symbols, symbol_bits;
int dur;
/* Fallback */
if (!ah->ah_bwmode) {
__le16 raw_dur = ieee80211_generic_frame_duration(ah->hw,
NULL, band, len, rate);
/* subtract difference between long and short preamble */
dur = le16_to_cpu(raw_dur);
if (shortpre)
dur -= 96;
return dur;
}
bitrate = rate->bitrate;
preamble = AR5K_INIT_OFDM_PREAMPLE_TIME;
plcp_bits = AR5K_INIT_OFDM_PLCP_BITS;
sym_time = AR5K_INIT_OFDM_SYMBOL_TIME;
switch (ah->ah_bwmode) {
case AR5K_BWMODE_40MHZ:
sifs = AR5K_INIT_SIFS_TURBO;
preamble = AR5K_INIT_OFDM_PREAMBLE_TIME_MIN;
break;
case AR5K_BWMODE_10MHZ:
sifs = AR5K_INIT_SIFS_HALF_RATE;
preamble *= 2;
sym_time *= 2;
bitrate = DIV_ROUND_UP(bitrate, 2);
break;
case AR5K_BWMODE_5MHZ:
sifs = AR5K_INIT_SIFS_QUARTER_RATE;
preamble *= 4;
sym_time *= 4;
bitrate = DIV_ROUND_UP(bitrate, 4);
break;
default:
sifs = AR5K_INIT_SIFS_DEFAULT_BG;
break;
}
bits = plcp_bits + (len << 3);
/* Bit rate is in 100Kbits */
symbol_bits = bitrate * sym_time;
symbols = DIV_ROUND_UP(bits * 10, symbol_bits);
dur = sifs + preamble + (sym_time * symbols);
return dur;
}
/**
* ath5k_hw_get_default_slottime() - Get the default slot time for current mode
* @ah: The &struct ath5k_hw
*/
unsigned int
ath5k_hw_get_default_slottime(struct ath5k_hw *ah)
{
struct ieee80211_channel *channel = ah->ah_current_channel;
unsigned int slot_time;
switch (ah->ah_bwmode) {
case AR5K_BWMODE_40MHZ:
slot_time = AR5K_INIT_SLOT_TIME_TURBO;
break;
case AR5K_BWMODE_10MHZ:
slot_time = AR5K_INIT_SLOT_TIME_HALF_RATE;
break;
case AR5K_BWMODE_5MHZ:
slot_time = AR5K_INIT_SLOT_TIME_QUARTER_RATE;
break;
case AR5K_BWMODE_DEFAULT:
default:
slot_time = AR5K_INIT_SLOT_TIME_DEFAULT;
if ((channel->hw_value == AR5K_MODE_11B) && !ah->ah_short_slot)
slot_time = AR5K_INIT_SLOT_TIME_B;
break;
}
return slot_time;
}
/**
* ath5k_hw_get_default_sifs() - Get the default SIFS for current mode
* @ah: The &struct ath5k_hw
*/
unsigned int
ath5k_hw_get_default_sifs(struct ath5k_hw *ah)
{
struct ieee80211_channel *channel = ah->ah_current_channel;
unsigned int sifs;
switch (ah->ah_bwmode) {
case AR5K_BWMODE_40MHZ:
sifs = AR5K_INIT_SIFS_TURBO;
break;
case AR5K_BWMODE_10MHZ:
sifs = AR5K_INIT_SIFS_HALF_RATE;
break;
case AR5K_BWMODE_5MHZ:
sifs = AR5K_INIT_SIFS_QUARTER_RATE;
break;
case AR5K_BWMODE_DEFAULT:
sifs = AR5K_INIT_SIFS_DEFAULT_BG;
default:
if (channel->band == IEEE80211_BAND_5GHZ)
sifs = AR5K_INIT_SIFS_DEFAULT_A;
break;
}
return sifs;
}
/**
* ath5k_hw_update_mib_counters() - Update MIB counters (mac layer statistics)
* @ah: The &struct ath5k_hw
*
* Reads MIB counters from PCU and updates sw statistics. Is called after a
* MIB interrupt, because one of these counters might have reached their maximum
* and triggered the MIB interrupt, to let us read and clear the counter.
*
* NOTE: Is called in interrupt context!
*/
void
ath5k_hw_update_mib_counters(struct ath5k_hw *ah)
{
struct ath5k_statistics *stats = &ah->stats;
/* Read-And-Clear */
stats->ack_fail += ath5k_hw_reg_read(ah, AR5K_ACK_FAIL);
stats->rts_fail += ath5k_hw_reg_read(ah, AR5K_RTS_FAIL);
stats->rts_ok += ath5k_hw_reg_read(ah, AR5K_RTS_OK);
stats->fcs_error += ath5k_hw_reg_read(ah, AR5K_FCS_FAIL);
stats->beacons += ath5k_hw_reg_read(ah, AR5K_BEACON_CNT);
}
/******************\
* ACK/CTS Timeouts *
\******************/
/**
* ath5k_hw_write_rate_duration() - Fill rate code to duration table
* @ah: The &struct ath5k_hw
*
* Write the rate code to duration table upon hw reset. This is a helper for
* ath5k_hw_pcu_init(). It seems all this is doing is setting an ACK timeout on
* the hardware, based on current mode, for each rate. The rates which are
* capable of short preamble (802.11b rates 2Mbps, 5.5Mbps, and 11Mbps) have
* different rate code so we write their value twice (one for long preamble
* and one for short).
*
* Note: Band doesn't matter here, if we set the values for OFDM it works
* on both a and g modes. So all we have to do is set values for all g rates
* that include all OFDM and CCK rates.
*
*/
static inline void
ath5k_hw_write_rate_duration(struct ath5k_hw *ah)
{
struct ieee80211_rate *rate;
unsigned int i;
/* 802.11g covers both OFDM and CCK */
u8 band = IEEE80211_BAND_2GHZ;
/* Write rate duration table */
for (i = 0; i < ah->sbands[band].n_bitrates; i++) {
u32 reg;
u16 tx_time;
if (ah->ah_ack_bitrate_high)
rate = &ah->sbands[band].bitrates[ack_rates_high[i]];
/* CCK -> 1Mb */
else if (i < 4)
rate = &ah->sbands[band].bitrates[0];
/* OFDM -> 6Mb */
else
rate = &ah->sbands[band].bitrates[4];
/* Set ACK timeout */
reg = AR5K_RATE_DUR(rate->hw_value);
/* An ACK frame consists of 10 bytes. If you add the FCS,
* which ieee80211_generic_frame_duration() adds,
* its 14 bytes. Note we use the control rate and not the
* actual rate for this rate. See mac80211 tx.c
* ieee80211_duration() for a brief description of
* what rate we should choose to TX ACKs. */
tx_time = ath5k_hw_get_frame_duration(ah, band, 10,
rate, false);
ath5k_hw_reg_write(ah, tx_time, reg);
if (!(rate->flags & IEEE80211_RATE_SHORT_PREAMBLE))
continue;
tx_time = ath5k_hw_get_frame_duration(ah, band, 10, rate, true);
ath5k_hw_reg_write(ah, tx_time,
reg + (AR5K_SET_SHORT_PREAMBLE << 2));
}
}
/**
* ath5k_hw_set_ack_timeout() - Set ACK timeout on PCU
* @ah: The &struct ath5k_hw
* @timeout: Timeout in usec
*/
static int
ath5k_hw_set_ack_timeout(struct ath5k_hw *ah, unsigned int timeout)
{
if (ath5k_hw_clocktoh(ah, AR5K_REG_MS(0xffffffff, AR5K_TIME_OUT_ACK))
<= timeout)
return -EINVAL;
AR5K_REG_WRITE_BITS(ah, AR5K_TIME_OUT, AR5K_TIME_OUT_ACK,
ath5k_hw_htoclock(ah, timeout));
return 0;
}
/**
* ath5k_hw_set_cts_timeout() - Set CTS timeout on PCU
* @ah: The &struct ath5k_hw
* @timeout: Timeout in usec
*/
static int
ath5k_hw_set_cts_timeout(struct ath5k_hw *ah, unsigned int timeout)
{
if (ath5k_hw_clocktoh(ah, AR5K_REG_MS(0xffffffff, AR5K_TIME_OUT_CTS))
<= timeout)
return -EINVAL;
AR5K_REG_WRITE_BITS(ah, AR5K_TIME_OUT, AR5K_TIME_OUT_CTS,
ath5k_hw_htoclock(ah, timeout));
return 0;
}
/*******************\
* RX filter Control *
\*******************/
/**
* ath5k_hw_set_lladdr() - Set station id
* @ah: The &struct ath5k_hw
* @mac: The card's mac address (array of octets)
*
* Set station id on hw using the provided mac address
*/
int
ath5k_hw_set_lladdr(struct ath5k_hw *ah, const u8 *mac)
{
struct ath_common *common = ath5k_hw_common(ah);
u32 low_id, high_id;
u32 pcu_reg;
/* Set new station ID */
memcpy(common->macaddr, mac, ETH_ALEN);
pcu_reg = ath5k_hw_reg_read(ah, AR5K_STA_ID1) & 0xffff0000;
low_id = get_unaligned_le32(mac);
high_id = get_unaligned_le16(mac + 4);
ath5k_hw_reg_write(ah, low_id, AR5K_STA_ID0);
ath5k_hw_reg_write(ah, pcu_reg | high_id, AR5K_STA_ID1);
return 0;
}
/**
* ath5k_hw_set_bssid() - Set current BSSID on hw
* @ah: The &struct ath5k_hw
*
* Sets the current BSSID and BSSID mask we have from the
* common struct into the hardware
*/
void
ath5k_hw_set_bssid(struct ath5k_hw *ah)
{
struct ath_common *common = ath5k_hw_common(ah);
u16 tim_offset = 0;
/*
* Set BSSID mask on 5212
*/
if (ah->ah_version == AR5K_AR5212)
ath_hw_setbssidmask(common);
/*
* Set BSSID
*/
ath5k_hw_reg_write(ah,
get_unaligned_le32(common->curbssid),
AR5K_BSS_ID0);
ath5k_hw_reg_write(ah,
get_unaligned_le16(common->curbssid + 4) |
((common->curaid & 0x3fff) << AR5K_BSS_ID1_AID_S),
AR5K_BSS_ID1);
if (common->curaid == 0) {
ath5k_hw_disable_pspoll(ah);
return;
}
AR5K_REG_WRITE_BITS(ah, AR5K_BEACON, AR5K_BEACON_TIM,
tim_offset ? tim_offset + 4 : 0);
ath5k_hw_enable_pspoll(ah, NULL, 0);
}
/**
* ath5k_hw_set_bssid_mask() - Filter out bssids we listen
* @ah: The &struct ath5k_hw
* @mask: The BSSID mask to set (array of octets)
*
* BSSID masking is a method used by AR5212 and newer hardware to inform PCU
* which bits of the interface's MAC address should be looked at when trying
* to decide which packets to ACK. In station mode and AP mode with a single
* BSS every bit matters since we lock to only one BSS. In AP mode with
* multiple BSSes (virtual interfaces) not every bit matters because hw must
* accept frames for all BSSes and so we tweak some bits of our mac address
* in order to have multiple BSSes.
*
* For more information check out ../hw.c of the common ath module.
*/
void
ath5k_hw_set_bssid_mask(struct ath5k_hw *ah, const u8 *mask)
{
struct ath_common *common = ath5k_hw_common(ah);
/* Cache bssid mask so that we can restore it
* on reset */
memcpy(common->bssidmask, mask, ETH_ALEN);
if (ah->ah_version == AR5K_AR5212)
ath_hw_setbssidmask(common);
}
/**
* ath5k_hw_set_mcast_filter() - Set multicast filter
* @ah: The &struct ath5k_hw
* @filter0: Lower 32bits of muticast filter
* @filter1: Higher 16bits of multicast filter
*/
void
ath5k_hw_set_mcast_filter(struct ath5k_hw *ah, u32 filter0, u32 filter1)
{
ath5k_hw_reg_write(ah, filter0, AR5K_MCAST_FILTER0);
ath5k_hw_reg_write(ah, filter1, AR5K_MCAST_FILTER1);
}
/**
* ath5k_hw_get_rx_filter() - Get current rx filter
* @ah: The &struct ath5k_hw
*
* Returns the RX filter by reading rx filter and
* phy error filter registers. RX filter is used
* to set the allowed frame types that PCU will accept
* and pass to the driver. For a list of frame types
* check out reg.h.
*/
u32
ath5k_hw_get_rx_filter(struct ath5k_hw *ah)
{
u32 data, filter = 0;
filter = ath5k_hw_reg_read(ah, AR5K_RX_FILTER);
/*Radar detection for 5212*/
if (ah->ah_version == AR5K_AR5212) {
data = ath5k_hw_reg_read(ah, AR5K_PHY_ERR_FIL);
if (data & AR5K_PHY_ERR_FIL_RADAR)
filter |= AR5K_RX_FILTER_RADARERR;
if (data & (AR5K_PHY_ERR_FIL_OFDM | AR5K_PHY_ERR_FIL_CCK))
filter |= AR5K_RX_FILTER_PHYERR;
}
return filter;
}
/**
* ath5k_hw_set_rx_filter() - Set rx filter
* @ah: The &struct ath5k_hw
* @filter: RX filter mask (see reg.h)
*
* Sets RX filter register and also handles PHY error filter
* register on 5212 and newer chips so that we have proper PHY
* error reporting.
*/
void
ath5k_hw_set_rx_filter(struct ath5k_hw *ah, u32 filter)
{
u32 data = 0;
/* Set PHY error filter register on 5212*/
if (ah->ah_version == AR5K_AR5212) {
if (filter & AR5K_RX_FILTER_RADARERR)
data |= AR5K_PHY_ERR_FIL_RADAR;
if (filter & AR5K_RX_FILTER_PHYERR)
data |= AR5K_PHY_ERR_FIL_OFDM | AR5K_PHY_ERR_FIL_CCK;
}
/*
* The AR5210 uses promiscuous mode to detect radar activity
*/
if (ah->ah_version == AR5K_AR5210 &&
(filter & AR5K_RX_FILTER_RADARERR)) {
filter &= ~AR5K_RX_FILTER_RADARERR;
filter |= AR5K_RX_FILTER_PROM;
}
/*Zero length DMA (phy error reporting) */
if (data)
AR5K_REG_ENABLE_BITS(ah, AR5K_RXCFG, AR5K_RXCFG_ZLFDMA);
else
AR5K_REG_DISABLE_BITS(ah, AR5K_RXCFG, AR5K_RXCFG_ZLFDMA);
/*Write RX Filter register*/
ath5k_hw_reg_write(ah, filter & 0xff, AR5K_RX_FILTER);
/*Write PHY error filter register on 5212*/
if (ah->ah_version == AR5K_AR5212)
ath5k_hw_reg_write(ah, data, AR5K_PHY_ERR_FIL);
}
/****************\
* Beacon control *
\****************/
#define ATH5K_MAX_TSF_READ 10
/**
* ath5k_hw_get_tsf64() - Get the full 64bit TSF
* @ah: The &struct ath5k_hw
*
* Returns the current TSF
*/
u64
ath5k_hw_get_tsf64(struct ath5k_hw *ah)
{
u32 tsf_lower, tsf_upper1, tsf_upper2;
int i;
unsigned long flags;
/* This code is time critical - we don't want to be interrupted here */
local_irq_save(flags);
/*
* While reading TSF upper and then lower part, the clock is still
* counting (or jumping in case of IBSS merge) so we might get
* inconsistent values. To avoid this, we read the upper part again
* and check it has not been changed. We make the hypothesis that a
* maximum of 3 changes can happens in a row (we use 10 as a safe
* value).
*
* Impact on performance is pretty small, since in most cases, only
* 3 register reads are needed.
*/
tsf_upper1 = ath5k_hw_reg_read(ah, AR5K_TSF_U32);
for (i = 0; i < ATH5K_MAX_TSF_READ; i++) {
tsf_lower = ath5k_hw_reg_read(ah, AR5K_TSF_L32);
tsf_upper2 = ath5k_hw_reg_read(ah, AR5K_TSF_U32);
if (tsf_upper2 == tsf_upper1)
break;
tsf_upper1 = tsf_upper2;
}
local_irq_restore(flags);
WARN_ON(i == ATH5K_MAX_TSF_READ);
return ((u64)tsf_upper1 << 32) | tsf_lower;
}
#undef ATH5K_MAX_TSF_READ
/**
* ath5k_hw_set_tsf64() - Set a new 64bit TSF
* @ah: The &struct ath5k_hw
* @tsf64: The new 64bit TSF
*
* Sets the new TSF
*/
void
ath5k_hw_set_tsf64(struct ath5k_hw *ah, u64 tsf64)
{
ath5k_hw_reg_write(ah, tsf64 & 0xffffffff, AR5K_TSF_L32);
ath5k_hw_reg_write(ah, (tsf64 >> 32) & 0xffffffff, AR5K_TSF_U32);
}
/**
* ath5k_hw_reset_tsf() - Force a TSF reset
* @ah: The &struct ath5k_hw
*
* Forces a TSF reset on PCU
*/
void
ath5k_hw_reset_tsf(struct ath5k_hw *ah)
{
u32 val;
val = ath5k_hw_reg_read(ah, AR5K_BEACON) | AR5K_BEACON_RESET_TSF;
/*
* Each write to the RESET_TSF bit toggles a hardware internal
* signal to reset TSF, but if left high it will cause a TSF reset
* on the next chip reset as well. Thus we always write the value
* twice to clear the signal.
*/
ath5k_hw_reg_write(ah, val, AR5K_BEACON);
ath5k_hw_reg_write(ah, val, AR5K_BEACON);
}
/**
* ath5k_hw_init_beacon_timers() - Initialize beacon timers
* @ah: The &struct ath5k_hw
* @next_beacon: Next TBTT
* @interval: Current beacon interval
*
* This function is used to initialize beacon timers based on current
* operation mode and settings.
*/
void
ath5k_hw_init_beacon_timers(struct ath5k_hw *ah, u32 next_beacon, u32 interval)
{
u32 timer1, timer2, timer3;
/*
* Set the additional timers by mode
*/
switch (ah->opmode) {
case NL80211_IFTYPE_MONITOR:
case NL80211_IFTYPE_STATION:
/* In STA mode timer1 is used as next wakeup
* timer and timer2 as next CFP duration start
* timer. Both in 1/8TUs. */
/* TODO: PCF handling */
if (ah->ah_version == AR5K_AR5210) {
timer1 = 0xffffffff;
timer2 = 0xffffffff;
} else {
timer1 = 0x0000ffff;
timer2 = 0x0007ffff;
}
/* Mark associated AP as PCF incapable for now */
AR5K_REG_DISABLE_BITS(ah, AR5K_STA_ID1, AR5K_STA_ID1_PCF);
break;
case NL80211_IFTYPE_ADHOC:
AR5K_REG_ENABLE_BITS(ah, AR5K_TXCFG, AR5K_TXCFG_ADHOC_BCN_ATIM);
default:
/* On non-STA modes timer1 is used as next DMA
* beacon alert (DBA) timer and timer2 as next
* software beacon alert. Both in 1/8TUs. */
timer1 = (next_beacon - AR5K_TUNE_DMA_BEACON_RESP) << 3;
timer2 = (next_beacon - AR5K_TUNE_SW_BEACON_RESP) << 3;
break;
}
/* Timer3 marks the end of our ATIM window
* a zero length window is not allowed because
* we 'll get no beacons */
timer3 = next_beacon + 1;
/*
* Set the beacon register and enable all timers.
*/
/* When in AP or Mesh Point mode zero timer0 to start TSF */
if (ah->opmode == NL80211_IFTYPE_AP ||
ah->opmode == NL80211_IFTYPE_MESH_POINT)
ath5k_hw_reg_write(ah, 0, AR5K_TIMER0);
ath5k_hw_reg_write(ah, next_beacon, AR5K_TIMER0);
ath5k_hw_reg_write(ah, timer1, AR5K_TIMER1);
ath5k_hw_reg_write(ah, timer2, AR5K_TIMER2);
ath5k_hw_reg_write(ah, timer3, AR5K_TIMER3);
/* Force a TSF reset if requested and enable beacons */
if (interval & AR5K_BEACON_RESET_TSF)
ath5k_hw_reset_tsf(ah);
ath5k_hw_reg_write(ah, interval & (AR5K_BEACON_PERIOD |
AR5K_BEACON_ENABLE),
AR5K_BEACON);
/* Flush any pending BMISS interrupts on ISR by
* performing a clear-on-write operation on PISR
* register for the BMISS bit (writing a bit on
* ISR toggles a reset for that bit and leaves
* the remaining bits intact) */
if (ah->ah_version == AR5K_AR5210)
ath5k_hw_reg_write(ah, AR5K_ISR_BMISS, AR5K_ISR);
else
ath5k_hw_reg_write(ah, AR5K_ISR_BMISS, AR5K_PISR);
/* TODO: Set enhanced sleep registers on AR5212
* based on vif->bss_conf params, until then
* disable power save reporting.*/
AR5K_REG_DISABLE_BITS(ah, AR5K_STA_ID1, AR5K_STA_ID1_PWR_SV);
}
/**
* ath5k_check_timer_win() - Check if timer B is timer A + window
* @a: timer a (before b)
* @b: timer b (after a)
* @window: difference between a and b
* @intval: timers are increased by this interval
*
* This helper function checks if timer B is timer A + window and covers
* cases where timer A or B might have already been updated or wrapped
* around (Timers are 16 bit).
*
* Returns true if O.K.
*/
static inline bool
ath5k_check_timer_win(int a, int b, int window, int intval)
{
/*
* 1.) usually B should be A + window
* 2.) A already updated, B not updated yet
* 3.) A already updated and has wrapped around
* 4.) B has wrapped around
*/
if ((b - a == window) || /* 1.) */
(a - b == intval - window) || /* 2.) */
((a | 0x10000) - b == intval - window) || /* 3.) */
((b | 0x10000) - a == window)) /* 4.) */
return true; /* O.K. */
return false;
}
/**
* ath5k_hw_check_beacon_timers() - Check if the beacon timers are correct
* @ah: The &struct ath5k_hw
* @intval: beacon interval
*
* This is a workaround for IBSS mode
*
* The need for this function arises from the fact that we have 4 separate
* HW timer registers (TIMER0 - TIMER3), which are closely related to the
* next beacon target time (NBTT), and that the HW updates these timers
* separately based on the current TSF value. The hardware increments each
* timer by the beacon interval, when the local TSF converted to TU is equal
* to the value stored in the timer.
*
* The reception of a beacon with the same BSSID can update the local HW TSF
* at any time - this is something we can't avoid. If the TSF jumps to a
* time which is later than the time stored in a timer, this timer will not
* be updated until the TSF in TU wraps around at 16 bit (the size of the
* timers) and reaches the time which is stored in the timer.
*
* The problem is that these timers are closely related to TIMER0 (NBTT) and
* that they define a time "window". When the TSF jumps between two timers
* (e.g. ATIM and NBTT), the one in the past will be left behind (not
* updated), while the one in the future will be updated every beacon
* interval. This causes the window to get larger, until the TSF wraps
* around as described above and the timer which was left behind gets
* updated again. But - because the beacon interval is usually not an exact
* divisor of the size of the timers (16 bit), an unwanted "window" between
* these timers has developed!
*
* This is especially important with the ATIM window, because during
* the ATIM window only ATIM frames and no data frames are allowed to be
* sent, which creates transmission pauses after each beacon. This symptom
* has been described as "ramping ping" because ping times increase linearly
* for some time and then drop down again. A wrong window on the DMA beacon
* timer has the same effect, so we check for these two conditions.
*
* Returns true if O.K.
*/
bool
ath5k_hw_check_beacon_timers(struct ath5k_hw *ah, int intval)
{
unsigned int nbtt, atim, dma;
nbtt = ath5k_hw_reg_read(ah, AR5K_TIMER0);
atim = ath5k_hw_reg_read(ah, AR5K_TIMER3);
dma = ath5k_hw_reg_read(ah, AR5K_TIMER1) >> 3;
/* NOTE: SWBA is different. Having a wrong window there does not
* stop us from sending data and this condition is caught by
* other means (SWBA interrupt) */
if (ath5k_check_timer_win(nbtt, atim, 1, intval) &&
ath5k_check_timer_win(dma, nbtt, AR5K_TUNE_DMA_BEACON_RESP,
intval))
return true; /* O.K. */
return false;
}
/**
* ath5k_hw_set_coverage_class() - Set IEEE 802.11 coverage class
* @ah: The &struct ath5k_hw
* @coverage_class: IEEE 802.11 coverage class number
*
* Sets IFS intervals and ACK/CTS timeouts for given coverage class.
*/
void
ath5k_hw_set_coverage_class(struct ath5k_hw *ah, u8 coverage_class)
{
/* As defined by IEEE 802.11-2007 17.3.8.6 */
int slot_time = ath5k_hw_get_default_slottime(ah) + 3 * coverage_class;
int ack_timeout = ath5k_hw_get_default_sifs(ah) + slot_time;
int cts_timeout = ack_timeout;
ath5k_hw_set_ifs_intervals(ah, slot_time);
ath5k_hw_set_ack_timeout(ah, ack_timeout);
ath5k_hw_set_cts_timeout(ah, cts_timeout);
ah->ah_coverage_class = coverage_class;
}
/***************************\
* Init/Start/Stop functions *
\***************************/
/**
* ath5k_hw_start_rx_pcu() - Start RX engine
* @ah: The &struct ath5k_hw
*
* Starts RX engine on PCU so that hw can process RXed frames
* (ACK etc).
*
* NOTE: RX DMA should be already enabled using ath5k_hw_start_rx_dma
*/
void
ath5k_hw_start_rx_pcu(struct ath5k_hw *ah)
{
AR5K_REG_DISABLE_BITS(ah, AR5K_DIAG_SW, AR5K_DIAG_SW_DIS_RX);
}
/**
* at5k_hw_stop_rx_pcu() - Stop RX engine
* @ah: The &struct ath5k_hw
*
* Stops RX engine on PCU
*/
void
ath5k_hw_stop_rx_pcu(struct ath5k_hw *ah)
{
AR5K_REG_ENABLE_BITS(ah, AR5K_DIAG_SW, AR5K_DIAG_SW_DIS_RX);
}
/**
* ath5k_hw_set_opmode() - Set PCU operating mode
* @ah: The &struct ath5k_hw
* @op_mode: One of enum nl80211_iftype
*
* Configure PCU for the various operating modes (AP/STA etc)
*/
int
ath5k_hw_set_opmode(struct ath5k_hw *ah, enum nl80211_iftype op_mode)
{
struct ath_common *common = ath5k_hw_common(ah);
u32 pcu_reg, beacon_reg, low_id, high_id;
ATH5K_DBG(ah, ATH5K_DEBUG_MODE, "mode %d\n", op_mode);
/* Preserve rest settings */
pcu_reg = ath5k_hw_reg_read(ah, AR5K_STA_ID1) & 0xffff0000;
pcu_reg &= ~(AR5K_STA_ID1_ADHOC | AR5K_STA_ID1_AP
| AR5K_STA_ID1_KEYSRCH_MODE
| (ah->ah_version == AR5K_AR5210 ?
(AR5K_STA_ID1_PWR_SV | AR5K_STA_ID1_NO_PSPOLL) : 0));
beacon_reg = 0;
switch (op_mode) {
case NL80211_IFTYPE_ADHOC:
pcu_reg |= AR5K_STA_ID1_ADHOC | AR5K_STA_ID1_KEYSRCH_MODE;
beacon_reg |= AR5K_BCR_ADHOC;
if (ah->ah_version == AR5K_AR5210)
pcu_reg |= AR5K_STA_ID1_NO_PSPOLL;
else
AR5K_REG_ENABLE_BITS(ah, AR5K_CFG, AR5K_CFG_IBSS);
break;
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_MESH_POINT:
pcu_reg |= AR5K_STA_ID1_AP | AR5K_STA_ID1_KEYSRCH_MODE;
beacon_reg |= AR5K_BCR_AP;
if (ah->ah_version == AR5K_AR5210)
pcu_reg |= AR5K_STA_ID1_NO_PSPOLL;
else
AR5K_REG_DISABLE_BITS(ah, AR5K_CFG, AR5K_CFG_IBSS);
break;
case NL80211_IFTYPE_STATION:
pcu_reg |= AR5K_STA_ID1_KEYSRCH_MODE
| (ah->ah_version == AR5K_AR5210 ?
AR5K_STA_ID1_PWR_SV : 0);
case NL80211_IFTYPE_MONITOR:
pcu_reg |= AR5K_STA_ID1_KEYSRCH_MODE
| (ah->ah_version == AR5K_AR5210 ?
AR5K_STA_ID1_NO_PSPOLL : 0);
break;
default:
return -EINVAL;
}
/*
* Set PCU registers
*/
low_id = get_unaligned_le32(common->macaddr);
high_id = get_unaligned_le16(common->macaddr + 4);
ath5k_hw_reg_write(ah, low_id, AR5K_STA_ID0);
ath5k_hw_reg_write(ah, pcu_reg | high_id, AR5K_STA_ID1);
/*
* Set Beacon Control Register on 5210
*/
if (ah->ah_version == AR5K_AR5210)
ath5k_hw_reg_write(ah, beacon_reg, AR5K_BCR);
return 0;
}
/**
* ath5k_hw_pcu_init() - Initialize PCU
* @ah: The &struct ath5k_hw
* @op_mode: One of enum nl80211_iftype
* @mode: One of enum ath5k_driver_mode
*
* This function is used to initialize PCU by setting current
* operation mode and various other settings.
*/
void
ath5k_hw_pcu_init(struct ath5k_hw *ah, enum nl80211_iftype op_mode)
{
/* Set bssid and bssid mask */
ath5k_hw_set_bssid(ah);
/* Set PCU config */
ath5k_hw_set_opmode(ah, op_mode);
/* Write rate duration table only on AR5212 and if
* virtual interface has already been brought up
* XXX: rethink this after new mode changes to
* mac80211 are integrated */
if (ah->ah_version == AR5K_AR5212 &&
ah->nvifs)
ath5k_hw_write_rate_duration(ah);
/* Set RSSI/BRSSI thresholds
*
* Note: If we decide to set this value
* dynamically, have in mind that when AR5K_RSSI_THR
* register is read it might return 0x40 if we haven't
* wrote anything to it plus BMISS RSSI threshold is zeroed.
* So doing a save/restore procedure here isn't the right
* choice. Instead store it on ath5k_hw */
ath5k_hw_reg_write(ah, (AR5K_TUNE_RSSI_THRES |
AR5K_TUNE_BMISS_THRES <<
AR5K_RSSI_THR_BMISS_S),
AR5K_RSSI_THR);
/* MIC QoS support */
if (ah->ah_mac_srev >= AR5K_SREV_AR2413) {
ath5k_hw_reg_write(ah, 0x000100aa, AR5K_MIC_QOS_CTL);
ath5k_hw_reg_write(ah, 0x00003210, AR5K_MIC_QOS_SEL);
}
/* QoS NOACK Policy */
if (ah->ah_version == AR5K_AR5212) {
ath5k_hw_reg_write(ah,
AR5K_REG_SM(2, AR5K_QOS_NOACK_2BIT_VALUES) |
AR5K_REG_SM(5, AR5K_QOS_NOACK_BIT_OFFSET) |
AR5K_REG_SM(0, AR5K_QOS_NOACK_BYTE_OFFSET),
AR5K_QOS_NOACK);
}
/* Restore slot time and ACK timeouts */
if (ah->ah_coverage_class > 0)
ath5k_hw_set_coverage_class(ah, ah->ah_coverage_class);
/* Set ACK bitrate mode (see ack_rates_high) */
if (ah->ah_version == AR5K_AR5212) {
u32 val = AR5K_STA_ID1_BASE_RATE_11B | AR5K_STA_ID1_ACKCTS_6MB;
if (ah->ah_ack_bitrate_high)
AR5K_REG_DISABLE_BITS(ah, AR5K_STA_ID1, val);
else
AR5K_REG_ENABLE_BITS(ah, AR5K_STA_ID1, val);
}
return;
}
| gpl-2.0 |
wuby986/Sixty-4Stroke-kernel | drivers/staging/media/solo6x10/solo6x10-v4l2.c | 2320 | 19455 | /*
* Copyright (C) 2010-2013 Bluecherry, LLC <http://www.bluecherrydvr.com>
*
* Original author:
* Ben Collins <bcollins@ubuntu.com>
*
* Additional work by:
* John Brooks <john.brooks@bluecherry.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/kthread.h>
#include <linux/freezer.h>
#include <media/v4l2-ioctl.h>
#include <media/v4l2-common.h>
#include <media/v4l2-event.h>
#include <media/videobuf2-dma-contig.h>
#include "solo6x10.h"
#include "solo6x10-tw28.h"
/* Image size is two fields, SOLO_HW_BPL is one horizontal line in hardware */
#define SOLO_HW_BPL 2048
#define solo_vlines(__solo) (__solo->video_vsize * 2)
#define solo_image_size(__solo) (solo_bytesperline(__solo) * \
solo_vlines(__solo))
#define solo_bytesperline(__solo) (__solo->video_hsize * 2)
#define MIN_VID_BUFFERS 2
static inline void erase_on(struct solo_dev *solo_dev)
{
solo_reg_write(solo_dev, SOLO_VO_DISP_ERASE, SOLO_VO_DISP_ERASE_ON);
solo_dev->erasing = 1;
solo_dev->frame_blank = 0;
}
static inline int erase_off(struct solo_dev *solo_dev)
{
if (!solo_dev->erasing)
return 0;
/* First time around, assert erase off */
if (!solo_dev->frame_blank)
solo_reg_write(solo_dev, SOLO_VO_DISP_ERASE, 0);
/* Keep the erasing flag on for 8 frames minimum */
if (solo_dev->frame_blank++ >= 8)
solo_dev->erasing = 0;
return 1;
}
void solo_video_in_isr(struct solo_dev *solo_dev)
{
wake_up_interruptible_all(&solo_dev->disp_thread_wait);
}
static void solo_win_setup(struct solo_dev *solo_dev, u8 ch,
int sx, int sy, int ex, int ey, int scale)
{
if (ch >= solo_dev->nr_chans)
return;
/* Here, we just keep window/channel the same */
solo_reg_write(solo_dev, SOLO_VI_WIN_CTRL0(ch),
SOLO_VI_WIN_CHANNEL(ch) |
SOLO_VI_WIN_SX(sx) |
SOLO_VI_WIN_EX(ex) |
SOLO_VI_WIN_SCALE(scale));
solo_reg_write(solo_dev, SOLO_VI_WIN_CTRL1(ch),
SOLO_VI_WIN_SY(sy) |
SOLO_VI_WIN_EY(ey));
}
static int solo_v4l2_ch_ext_4up(struct solo_dev *solo_dev, u8 idx, int on)
{
u8 ch = idx * 4;
if (ch >= solo_dev->nr_chans)
return -EINVAL;
if (!on) {
u8 i;
for (i = ch; i < ch + 4; i++)
solo_win_setup(solo_dev, i, solo_dev->video_hsize,
solo_vlines(solo_dev),
solo_dev->video_hsize,
solo_vlines(solo_dev), 0);
return 0;
}
/* Row 1 */
solo_win_setup(solo_dev, ch, 0, 0, solo_dev->video_hsize / 2,
solo_vlines(solo_dev) / 2, 3);
solo_win_setup(solo_dev, ch + 1, solo_dev->video_hsize / 2, 0,
solo_dev->video_hsize, solo_vlines(solo_dev) / 2, 3);
/* Row 2 */
solo_win_setup(solo_dev, ch + 2, 0, solo_vlines(solo_dev) / 2,
solo_dev->video_hsize / 2, solo_vlines(solo_dev), 3);
solo_win_setup(solo_dev, ch + 3, solo_dev->video_hsize / 2,
solo_vlines(solo_dev) / 2, solo_dev->video_hsize,
solo_vlines(solo_dev), 3);
return 0;
}
static int solo_v4l2_ch_ext_16up(struct solo_dev *solo_dev, int on)
{
int sy, ysize, hsize, i;
if (!on) {
for (i = 0; i < 16; i++)
solo_win_setup(solo_dev, i, solo_dev->video_hsize,
solo_vlines(solo_dev),
solo_dev->video_hsize,
solo_vlines(solo_dev), 0);
return 0;
}
ysize = solo_vlines(solo_dev) / 4;
hsize = solo_dev->video_hsize / 4;
for (sy = 0, i = 0; i < 4; i++, sy += ysize) {
solo_win_setup(solo_dev, i * 4, 0, sy, hsize,
sy + ysize, 5);
solo_win_setup(solo_dev, (i * 4) + 1, hsize, sy,
hsize * 2, sy + ysize, 5);
solo_win_setup(solo_dev, (i * 4) + 2, hsize * 2, sy,
hsize * 3, sy + ysize, 5);
solo_win_setup(solo_dev, (i * 4) + 3, hsize * 3, sy,
solo_dev->video_hsize, sy + ysize, 5);
}
return 0;
}
static int solo_v4l2_ch(struct solo_dev *solo_dev, u8 ch, int on)
{
u8 ext_ch;
if (ch < solo_dev->nr_chans) {
solo_win_setup(solo_dev, ch, on ? 0 : solo_dev->video_hsize,
on ? 0 : solo_vlines(solo_dev),
solo_dev->video_hsize, solo_vlines(solo_dev),
on ? 1 : 0);
return 0;
}
if (ch >= solo_dev->nr_chans + solo_dev->nr_ext)
return -EINVAL;
ext_ch = ch - solo_dev->nr_chans;
/* 4up's first */
if (ext_ch < 4)
return solo_v4l2_ch_ext_4up(solo_dev, ext_ch, on);
/* Remaining case is 16up for 16-port */
return solo_v4l2_ch_ext_16up(solo_dev, on);
}
static int solo_v4l2_set_ch(struct solo_dev *solo_dev, u8 ch)
{
if (ch >= solo_dev->nr_chans + solo_dev->nr_ext)
return -EINVAL;
erase_on(solo_dev);
solo_v4l2_ch(solo_dev, solo_dev->cur_disp_ch, 0);
solo_v4l2_ch(solo_dev, ch, 1);
solo_dev->cur_disp_ch = ch;
return 0;
}
static void solo_fillbuf(struct solo_dev *solo_dev,
struct vb2_buffer *vb)
{
dma_addr_t vbuf;
unsigned int fdma_addr;
int error = -1;
int i;
vbuf = vb2_dma_contig_plane_dma_addr(vb, 0);
if (!vbuf)
goto finish_buf;
if (erase_off(solo_dev)) {
void *p = vb2_plane_vaddr(vb, 0);
int image_size = solo_image_size(solo_dev);
for (i = 0; i < image_size; i += 2) {
((u8 *)p)[i] = 0x80;
((u8 *)p)[i + 1] = 0x00;
}
error = 0;
} else {
fdma_addr = SOLO_DISP_EXT_ADDR + (solo_dev->old_write *
(SOLO_HW_BPL * solo_vlines(solo_dev)));
error = solo_p2m_dma_t(solo_dev, 0, vbuf, fdma_addr,
solo_bytesperline(solo_dev),
solo_vlines(solo_dev), SOLO_HW_BPL);
}
finish_buf:
if (!error) {
vb2_set_plane_payload(vb, 0,
solo_vlines(solo_dev) * solo_bytesperline(solo_dev));
vb->v4l2_buf.sequence = solo_dev->sequence++;
v4l2_get_timestamp(&vb->v4l2_buf.timestamp);
}
vb2_buffer_done(vb, error ? VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
}
static void solo_thread_try(struct solo_dev *solo_dev)
{
struct solo_vb2_buf *vb;
/* Only "break" from this loop if slock is held, otherwise
* just return. */
for (;;) {
unsigned int cur_write;
cur_write = SOLO_VI_STATUS0_PAGE(
solo_reg_read(solo_dev, SOLO_VI_STATUS0));
if (cur_write == solo_dev->old_write)
return;
spin_lock(&solo_dev->slock);
if (list_empty(&solo_dev->vidq_active))
break;
vb = list_first_entry(&solo_dev->vidq_active, struct solo_vb2_buf,
list);
solo_dev->old_write = cur_write;
list_del(&vb->list);
spin_unlock(&solo_dev->slock);
solo_fillbuf(solo_dev, &vb->vb);
}
assert_spin_locked(&solo_dev->slock);
spin_unlock(&solo_dev->slock);
}
static int solo_thread(void *data)
{
struct solo_dev *solo_dev = data;
DECLARE_WAITQUEUE(wait, current);
set_freezable();
add_wait_queue(&solo_dev->disp_thread_wait, &wait);
for (;;) {
long timeout = schedule_timeout_interruptible(HZ);
if (timeout == -ERESTARTSYS || kthread_should_stop())
break;
solo_thread_try(solo_dev);
try_to_freeze();
}
remove_wait_queue(&solo_dev->disp_thread_wait, &wait);
return 0;
}
static int solo_start_thread(struct solo_dev *solo_dev)
{
int ret = 0;
solo_dev->kthread = kthread_run(solo_thread, solo_dev, SOLO6X10_NAME "_disp");
if (IS_ERR(solo_dev->kthread)) {
ret = PTR_ERR(solo_dev->kthread);
solo_dev->kthread = NULL;
return ret;
}
solo_irq_on(solo_dev, SOLO_IRQ_VIDEO_IN);
return ret;
}
static void solo_stop_thread(struct solo_dev *solo_dev)
{
if (!solo_dev->kthread)
return;
solo_irq_off(solo_dev, SOLO_IRQ_VIDEO_IN);
kthread_stop(solo_dev->kthread);
solo_dev->kthread = NULL;
}
static int solo_queue_setup(struct vb2_queue *q, const struct v4l2_format *fmt,
unsigned int *num_buffers, unsigned int *num_planes,
unsigned int sizes[], void *alloc_ctxs[])
{
struct solo_dev *solo_dev = vb2_get_drv_priv(q);
sizes[0] = solo_image_size(solo_dev);
alloc_ctxs[0] = solo_dev->alloc_ctx;
*num_planes = 1;
if (*num_buffers < MIN_VID_BUFFERS)
*num_buffers = MIN_VID_BUFFERS;
return 0;
}
static int solo_start_streaming(struct vb2_queue *q, unsigned int count)
{
struct solo_dev *solo_dev = vb2_get_drv_priv(q);
solo_dev->sequence = 0;
return solo_start_thread(solo_dev);
}
static int solo_stop_streaming(struct vb2_queue *q)
{
struct solo_dev *solo_dev = vb2_get_drv_priv(q);
solo_stop_thread(solo_dev);
INIT_LIST_HEAD(&solo_dev->vidq_active);
return 0;
}
static void solo_buf_queue(struct vb2_buffer *vb)
{
struct vb2_queue *vq = vb->vb2_queue;
struct solo_dev *solo_dev = vb2_get_drv_priv(vq);
struct solo_vb2_buf *solo_vb =
container_of(vb, struct solo_vb2_buf, vb);
spin_lock(&solo_dev->slock);
list_add_tail(&solo_vb->list, &solo_dev->vidq_active);
spin_unlock(&solo_dev->slock);
wake_up_interruptible(&solo_dev->disp_thread_wait);
}
static const struct vb2_ops solo_video_qops = {
.queue_setup = solo_queue_setup,
.buf_queue = solo_buf_queue,
.start_streaming = solo_start_streaming,
.stop_streaming = solo_stop_streaming,
.wait_prepare = vb2_ops_wait_prepare,
.wait_finish = vb2_ops_wait_finish,
};
static int solo_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
struct solo_dev *solo_dev = video_drvdata(file);
strcpy(cap->driver, SOLO6X10_NAME);
strcpy(cap->card, "Softlogic 6x10");
snprintf(cap->bus_info, sizeof(cap->bus_info), "PCI:%s",
pci_name(solo_dev->pdev));
cap->device_caps = V4L2_CAP_VIDEO_CAPTURE |
V4L2_CAP_READWRITE | V4L2_CAP_STREAMING;
cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
return 0;
}
static int solo_enum_ext_input(struct solo_dev *solo_dev,
struct v4l2_input *input)
{
static const char * const dispnames_1[] = { "4UP" };
static const char * const dispnames_2[] = { "4UP-1", "4UP-2" };
static const char * const dispnames_5[] = {
"4UP-1", "4UP-2", "4UP-3", "4UP-4", "16UP"
};
const char * const *dispnames;
if (input->index >= (solo_dev->nr_chans + solo_dev->nr_ext))
return -EINVAL;
if (solo_dev->nr_ext == 5)
dispnames = dispnames_5;
else if (solo_dev->nr_ext == 2)
dispnames = dispnames_2;
else
dispnames = dispnames_1;
snprintf(input->name, sizeof(input->name), "Multi %s",
dispnames[input->index - solo_dev->nr_chans]);
return 0;
}
static int solo_enum_input(struct file *file, void *priv,
struct v4l2_input *input)
{
struct solo_dev *solo_dev = video_drvdata(file);
if (input->index >= solo_dev->nr_chans) {
int ret = solo_enum_ext_input(solo_dev, input);
if (ret < 0)
return ret;
} else {
snprintf(input->name, sizeof(input->name), "Camera %d",
input->index + 1);
/* We can only check this for normal inputs */
if (!tw28_get_video_status(solo_dev, input->index))
input->status = V4L2_IN_ST_NO_SIGNAL;
}
input->type = V4L2_INPUT_TYPE_CAMERA;
input->std = solo_dev->vfd->tvnorms;
return 0;
}
static int solo_set_input(struct file *file, void *priv, unsigned int index)
{
struct solo_dev *solo_dev = video_drvdata(file);
int ret = solo_v4l2_set_ch(solo_dev, index);
if (!ret) {
while (erase_off(solo_dev))
/* Do nothing */;
}
return ret;
}
static int solo_get_input(struct file *file, void *priv, unsigned int *index)
{
struct solo_dev *solo_dev = video_drvdata(file);
*index = solo_dev->cur_disp_ch;
return 0;
}
static int solo_enum_fmt_cap(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
if (f->index)
return -EINVAL;
f->pixelformat = V4L2_PIX_FMT_UYVY;
strlcpy(f->description, "UYUV 4:2:2 Packed", sizeof(f->description));
return 0;
}
static int solo_try_fmt_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
struct solo_dev *solo_dev = video_drvdata(file);
struct v4l2_pix_format *pix = &f->fmt.pix;
int image_size = solo_image_size(solo_dev);
if (pix->pixelformat != V4L2_PIX_FMT_UYVY)
return -EINVAL;
pix->width = solo_dev->video_hsize;
pix->height = solo_vlines(solo_dev);
pix->sizeimage = image_size;
pix->field = V4L2_FIELD_INTERLACED;
pix->pixelformat = V4L2_PIX_FMT_UYVY;
pix->colorspace = V4L2_COLORSPACE_SMPTE170M;
pix->priv = 0;
return 0;
}
static int solo_set_fmt_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
struct solo_dev *solo_dev = video_drvdata(file);
if (vb2_is_busy(&solo_dev->vidq))
return -EBUSY;
/* For right now, if it doesn't match our running config,
* then fail */
return solo_try_fmt_cap(file, priv, f);
}
static int solo_get_fmt_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
struct solo_dev *solo_dev = video_drvdata(file);
struct v4l2_pix_format *pix = &f->fmt.pix;
pix->width = solo_dev->video_hsize;
pix->height = solo_vlines(solo_dev);
pix->pixelformat = V4L2_PIX_FMT_UYVY;
pix->field = V4L2_FIELD_INTERLACED;
pix->sizeimage = solo_image_size(solo_dev);
pix->colorspace = V4L2_COLORSPACE_SMPTE170M;
pix->bytesperline = solo_bytesperline(solo_dev);
pix->priv = 0;
return 0;
}
static int solo_g_std(struct file *file, void *priv, v4l2_std_id *i)
{
struct solo_dev *solo_dev = video_drvdata(file);
if (solo_dev->video_type == SOLO_VO_FMT_TYPE_NTSC)
*i = V4L2_STD_NTSC_M;
else
*i = V4L2_STD_PAL;
return 0;
}
int solo_set_video_type(struct solo_dev *solo_dev, bool type)
{
int i;
/* Make sure all video nodes are idle */
if (vb2_is_busy(&solo_dev->vidq))
return -EBUSY;
for (i = 0; i < solo_dev->nr_chans; i++)
if (vb2_is_busy(&solo_dev->v4l2_enc[i]->vidq))
return -EBUSY;
solo_dev->video_type = type;
/* Reconfigure for the new standard */
solo_disp_init(solo_dev);
solo_enc_init(solo_dev);
solo_tw28_init(solo_dev);
for (i = 0; i < solo_dev->nr_chans; i++)
solo_update_mode(solo_dev->v4l2_enc[i]);
return solo_v4l2_set_ch(solo_dev, solo_dev->cur_disp_ch);
}
static int solo_s_std(struct file *file, void *priv, v4l2_std_id std)
{
struct solo_dev *solo_dev = video_drvdata(file);
return solo_set_video_type(solo_dev, std & V4L2_STD_PAL);
}
static int solo_s_ctrl(struct v4l2_ctrl *ctrl)
{
struct solo_dev *solo_dev =
container_of(ctrl->handler, struct solo_dev, disp_hdl);
switch (ctrl->id) {
case V4L2_CID_MOTION_TRACE:
if (ctrl->val) {
solo_reg_write(solo_dev, SOLO_VI_MOTION_BORDER,
SOLO_VI_MOTION_Y_ADD |
SOLO_VI_MOTION_Y_VALUE(0x20) |
SOLO_VI_MOTION_CB_VALUE(0x10) |
SOLO_VI_MOTION_CR_VALUE(0x10));
solo_reg_write(solo_dev, SOLO_VI_MOTION_BAR,
SOLO_VI_MOTION_CR_ADD |
SOLO_VI_MOTION_Y_VALUE(0x10) |
SOLO_VI_MOTION_CB_VALUE(0x80) |
SOLO_VI_MOTION_CR_VALUE(0x10));
} else {
solo_reg_write(solo_dev, SOLO_VI_MOTION_BORDER, 0);
solo_reg_write(solo_dev, SOLO_VI_MOTION_BAR, 0);
}
return 0;
default:
break;
}
return -EINVAL;
}
static const struct v4l2_file_operations solo_v4l2_fops = {
.owner = THIS_MODULE,
.open = v4l2_fh_open,
.release = vb2_fop_release,
.read = vb2_fop_read,
.poll = vb2_fop_poll,
.mmap = vb2_fop_mmap,
.unlocked_ioctl = video_ioctl2,
};
static const struct v4l2_ioctl_ops solo_v4l2_ioctl_ops = {
.vidioc_querycap = solo_querycap,
.vidioc_s_std = solo_s_std,
.vidioc_g_std = solo_g_std,
/* Input callbacks */
.vidioc_enum_input = solo_enum_input,
.vidioc_s_input = solo_set_input,
.vidioc_g_input = solo_get_input,
/* Video capture format callbacks */
.vidioc_enum_fmt_vid_cap = solo_enum_fmt_cap,
.vidioc_try_fmt_vid_cap = solo_try_fmt_cap,
.vidioc_s_fmt_vid_cap = solo_set_fmt_cap,
.vidioc_g_fmt_vid_cap = solo_get_fmt_cap,
/* Streaming I/O */
.vidioc_reqbufs = vb2_ioctl_reqbufs,
.vidioc_querybuf = vb2_ioctl_querybuf,
.vidioc_qbuf = vb2_ioctl_qbuf,
.vidioc_dqbuf = vb2_ioctl_dqbuf,
.vidioc_streamon = vb2_ioctl_streamon,
.vidioc_streamoff = vb2_ioctl_streamoff,
/* Logging and events */
.vidioc_log_status = v4l2_ctrl_log_status,
.vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
.vidioc_unsubscribe_event = v4l2_event_unsubscribe,
};
static struct video_device solo_v4l2_template = {
.name = SOLO6X10_NAME,
.fops = &solo_v4l2_fops,
.ioctl_ops = &solo_v4l2_ioctl_ops,
.minor = -1,
.release = video_device_release,
.tvnorms = V4L2_STD_NTSC_M | V4L2_STD_PAL,
};
static const struct v4l2_ctrl_ops solo_ctrl_ops = {
.s_ctrl = solo_s_ctrl,
};
static const struct v4l2_ctrl_config solo_motion_trace_ctrl = {
.ops = &solo_ctrl_ops,
.id = V4L2_CID_MOTION_TRACE,
.name = "Motion Detection Trace",
.type = V4L2_CTRL_TYPE_BOOLEAN,
.max = 1,
.step = 1,
};
int solo_v4l2_init(struct solo_dev *solo_dev, unsigned nr)
{
int ret;
int i;
init_waitqueue_head(&solo_dev->disp_thread_wait);
spin_lock_init(&solo_dev->slock);
mutex_init(&solo_dev->lock);
INIT_LIST_HEAD(&solo_dev->vidq_active);
solo_dev->vfd = video_device_alloc();
if (!solo_dev->vfd)
return -ENOMEM;
*solo_dev->vfd = solo_v4l2_template;
solo_dev->vfd->v4l2_dev = &solo_dev->v4l2_dev;
solo_dev->vfd->queue = &solo_dev->vidq;
solo_dev->vfd->lock = &solo_dev->lock;
v4l2_ctrl_handler_init(&solo_dev->disp_hdl, 1);
v4l2_ctrl_new_custom(&solo_dev->disp_hdl, &solo_motion_trace_ctrl, NULL);
if (solo_dev->disp_hdl.error) {
ret = solo_dev->disp_hdl.error;
goto fail;
}
solo_dev->vfd->ctrl_handler = &solo_dev->disp_hdl;
set_bit(V4L2_FL_USE_FH_PRIO, &solo_dev->vfd->flags);
video_set_drvdata(solo_dev->vfd, solo_dev);
solo_dev->vidq.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
solo_dev->vidq.io_modes = VB2_MMAP | VB2_USERPTR | VB2_READ;
solo_dev->vidq.ops = &solo_video_qops;
solo_dev->vidq.mem_ops = &vb2_dma_contig_memops;
solo_dev->vidq.drv_priv = solo_dev;
solo_dev->vidq.timestamp_type = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
solo_dev->vidq.gfp_flags = __GFP_DMA32;
solo_dev->vidq.buf_struct_size = sizeof(struct solo_vb2_buf);
solo_dev->vidq.lock = &solo_dev->lock;
ret = vb2_queue_init(&solo_dev->vidq);
if (ret < 0)
goto fail;
solo_dev->alloc_ctx = vb2_dma_contig_init_ctx(&solo_dev->pdev->dev);
if (IS_ERR(solo_dev->alloc_ctx)) {
dev_err(&solo_dev->pdev->dev, "Can't allocate buffer context");
return PTR_ERR(solo_dev->alloc_ctx);
}
/* Cycle all the channels and clear */
for (i = 0; i < solo_dev->nr_chans; i++) {
solo_v4l2_set_ch(solo_dev, i);
while (erase_off(solo_dev))
/* Do nothing */;
}
/* Set the default display channel */
solo_v4l2_set_ch(solo_dev, 0);
while (erase_off(solo_dev))
/* Do nothing */;
ret = video_register_device(solo_dev->vfd, VFL_TYPE_GRABBER, nr);
if (ret < 0)
goto fail;
snprintf(solo_dev->vfd->name, sizeof(solo_dev->vfd->name), "%s (%i)",
SOLO6X10_NAME, solo_dev->vfd->num);
dev_info(&solo_dev->pdev->dev, "Display as /dev/video%d with "
"%d inputs (%d extended)\n", solo_dev->vfd->num,
solo_dev->nr_chans, solo_dev->nr_ext);
return 0;
fail:
video_device_release(solo_dev->vfd);
vb2_dma_contig_cleanup_ctx(solo_dev->alloc_ctx);
v4l2_ctrl_handler_free(&solo_dev->disp_hdl);
solo_dev->vfd = NULL;
return ret;
}
void solo_v4l2_exit(struct solo_dev *solo_dev)
{
if (solo_dev->vfd == NULL)
return;
video_unregister_device(solo_dev->vfd);
vb2_dma_contig_cleanup_ctx(solo_dev->alloc_ctx);
v4l2_ctrl_handler_free(&solo_dev->disp_hdl);
solo_dev->vfd = NULL;
}
| gpl-2.0 |
caoyuhua/Linux-3.10.28 | drivers/net/caif/caif_spi_slave.c | 2320 | 6581 | /*
* Copyright (C) ST-Ericsson AB 2010
* Author: Daniel Martensson
* License terms: GNU General Public License (GPL) version 2.
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/string.h>
#include <linux/semaphore.h>
#include <linux/workqueue.h>
#include <linux/completion.h>
#include <linux/list.h>
#include <linux/interrupt.h>
#include <linux/dma-mapping.h>
#include <linux/delay.h>
#include <linux/sched.h>
#include <linux/debugfs.h>
#include <net/caif/caif_spi.h>
#ifndef CONFIG_CAIF_SPI_SYNC
#define SPI_DATA_POS 0
static inline int forward_to_spi_cmd(struct cfspi *cfspi)
{
return cfspi->rx_cpck_len;
}
#else
#define SPI_DATA_POS SPI_CMD_SZ
static inline int forward_to_spi_cmd(struct cfspi *cfspi)
{
return 0;
}
#endif
int spi_frm_align = 2;
/*
* SPI padding options.
* Warning: must be a base of 2 (& operation used) and can not be zero !
*/
int spi_up_head_align = 1 << 1;
int spi_up_tail_align = 1 << 0;
int spi_down_head_align = 1 << 2;
int spi_down_tail_align = 1 << 1;
#ifdef CONFIG_DEBUG_FS
static inline void debugfs_store_prev(struct cfspi *cfspi)
{
/* Store previous command for debugging reasons.*/
cfspi->pcmd = cfspi->cmd;
/* Store previous transfer. */
cfspi->tx_ppck_len = cfspi->tx_cpck_len;
cfspi->rx_ppck_len = cfspi->rx_cpck_len;
}
#else
static inline void debugfs_store_prev(struct cfspi *cfspi)
{
}
#endif
void cfspi_xfer(struct work_struct *work)
{
struct cfspi *cfspi;
u8 *ptr = NULL;
unsigned long flags;
int ret;
cfspi = container_of(work, struct cfspi, work);
/* Initialize state. */
cfspi->cmd = SPI_CMD_EOT;
for (;;) {
cfspi_dbg_state(cfspi, CFSPI_STATE_WAITING);
/* Wait for master talk or transmit event. */
wait_event_interruptible(cfspi->wait,
test_bit(SPI_XFER, &cfspi->state) ||
test_bit(SPI_TERMINATE, &cfspi->state));
if (test_bit(SPI_TERMINATE, &cfspi->state))
return;
#if CFSPI_DBG_PREFILL
/* Prefill buffers for easier debugging. */
memset(cfspi->xfer.va_tx, 0xFF, SPI_DMA_BUF_LEN);
memset(cfspi->xfer.va_rx, 0xFF, SPI_DMA_BUF_LEN);
#endif /* CFSPI_DBG_PREFILL */
cfspi_dbg_state(cfspi, CFSPI_STATE_AWAKE);
/* Check whether we have a committed frame. */
if (cfspi->tx_cpck_len) {
int len;
cfspi_dbg_state(cfspi, CFSPI_STATE_FETCH_PKT);
/* Copy committed SPI frames after the SPI indication. */
ptr = (u8 *) cfspi->xfer.va_tx;
ptr += SPI_IND_SZ;
len = cfspi_xmitfrm(cfspi, ptr, cfspi->tx_cpck_len);
WARN_ON(len != cfspi->tx_cpck_len);
}
cfspi_dbg_state(cfspi, CFSPI_STATE_GET_NEXT);
/* Get length of next frame to commit. */
cfspi->tx_npck_len = cfspi_xmitlen(cfspi);
WARN_ON(cfspi->tx_npck_len > SPI_DMA_BUF_LEN);
/*
* Add indication and length at the beginning of the frame,
* using little endian.
*/
ptr = (u8 *) cfspi->xfer.va_tx;
*ptr++ = SPI_CMD_IND;
*ptr++ = (SPI_CMD_IND & 0xFF00) >> 8;
*ptr++ = cfspi->tx_npck_len & 0x00FF;
*ptr++ = (cfspi->tx_npck_len & 0xFF00) >> 8;
/* Calculate length of DMAs. */
cfspi->xfer.tx_dma_len = cfspi->tx_cpck_len + SPI_IND_SZ;
cfspi->xfer.rx_dma_len = cfspi->rx_cpck_len + SPI_CMD_SZ;
/* Add SPI TX frame alignment padding, if necessary. */
if (cfspi->tx_cpck_len &&
(cfspi->xfer.tx_dma_len % spi_frm_align)) {
cfspi->xfer.tx_dma_len += spi_frm_align -
(cfspi->xfer.tx_dma_len % spi_frm_align);
}
/* Add SPI RX frame alignment padding, if necessary. */
if (cfspi->rx_cpck_len &&
(cfspi->xfer.rx_dma_len % spi_frm_align)) {
cfspi->xfer.rx_dma_len += spi_frm_align -
(cfspi->xfer.rx_dma_len % spi_frm_align);
}
cfspi_dbg_state(cfspi, CFSPI_STATE_INIT_XFER);
/* Start transfer. */
ret = cfspi->dev->init_xfer(&cfspi->xfer, cfspi->dev);
WARN_ON(ret);
cfspi_dbg_state(cfspi, CFSPI_STATE_WAIT_ACTIVE);
/*
* TODO: We might be able to make an assumption if this is the
* first loop. Make sure that minimum toggle time is respected.
*/
udelay(MIN_TRANSITION_TIME_USEC);
cfspi_dbg_state(cfspi, CFSPI_STATE_SIG_ACTIVE);
/* Signal that we are ready to receive data. */
cfspi->dev->sig_xfer(true, cfspi->dev);
cfspi_dbg_state(cfspi, CFSPI_STATE_WAIT_XFER_DONE);
/* Wait for transfer completion. */
wait_for_completion(&cfspi->comp);
cfspi_dbg_state(cfspi, CFSPI_STATE_XFER_DONE);
if (cfspi->cmd == SPI_CMD_EOT) {
/*
* Clear the master talk bit. A xfer is always at
* least two bursts.
*/
clear_bit(SPI_SS_ON, &cfspi->state);
}
cfspi_dbg_state(cfspi, CFSPI_STATE_WAIT_INACTIVE);
/* Make sure that the minimum toggle time is respected. */
if (SPI_XFER_TIME_USEC(cfspi->xfer.tx_dma_len,
cfspi->dev->clk_mhz) <
MIN_TRANSITION_TIME_USEC) {
udelay(MIN_TRANSITION_TIME_USEC -
SPI_XFER_TIME_USEC
(cfspi->xfer.tx_dma_len, cfspi->dev->clk_mhz));
}
cfspi_dbg_state(cfspi, CFSPI_STATE_SIG_INACTIVE);
/* De-assert transfer signal. */
cfspi->dev->sig_xfer(false, cfspi->dev);
/* Check whether we received a CAIF packet. */
if (cfspi->rx_cpck_len) {
int len;
cfspi_dbg_state(cfspi, CFSPI_STATE_DELIVER_PKT);
/* Parse SPI frame. */
ptr = ((u8 *)(cfspi->xfer.va_rx + SPI_DATA_POS));
len = cfspi_rxfrm(cfspi, ptr, cfspi->rx_cpck_len);
WARN_ON(len != cfspi->rx_cpck_len);
}
/* Check the next SPI command and length. */
ptr = (u8 *) cfspi->xfer.va_rx;
ptr += forward_to_spi_cmd(cfspi);
cfspi->cmd = *ptr++;
cfspi->cmd |= ((*ptr++) << 8) & 0xFF00;
cfspi->rx_npck_len = *ptr++;
cfspi->rx_npck_len |= ((*ptr++) << 8) & 0xFF00;
WARN_ON(cfspi->rx_npck_len > SPI_DMA_BUF_LEN);
WARN_ON(cfspi->cmd > SPI_CMD_EOT);
debugfs_store_prev(cfspi);
/* Check whether the master issued an EOT command. */
if (cfspi->cmd == SPI_CMD_EOT) {
/* Reset state. */
cfspi->tx_cpck_len = 0;
cfspi->rx_cpck_len = 0;
} else {
/* Update state. */
cfspi->tx_cpck_len = cfspi->tx_npck_len;
cfspi->rx_cpck_len = cfspi->rx_npck_len;
}
/*
* Check whether we need to clear the xfer bit.
* Spin lock needed for packet insertion.
* Test and clear of different bits
* are not supported.
*/
spin_lock_irqsave(&cfspi->lock, flags);
if (cfspi->cmd == SPI_CMD_EOT && !cfspi_xmitlen(cfspi)
&& !test_bit(SPI_SS_ON, &cfspi->state))
clear_bit(SPI_XFER, &cfspi->state);
spin_unlock_irqrestore(&cfspi->lock, flags);
}
}
struct platform_driver cfspi_spi_driver = {
.probe = cfspi_spi_probe,
.remove = cfspi_spi_remove,
.driver = {
.name = "cfspi_sspi",
.owner = THIS_MODULE,
},
};
| gpl-2.0 |
narantech/linux-pc64 | arch/mn10300/kernel/signal.c | 2576 | 11535 | /* MN10300 Signal handling
*
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public Licence
* as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version.
*/
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/kernel.h>
#include <linux/signal.h>
#include <linux/errno.h>
#include <linux/wait.h>
#include <linux/ptrace.h>
#include <linux/unistd.h>
#include <linux/stddef.h>
#include <linux/tty.h>
#include <linux/personality.h>
#include <linux/suspend.h>
#include <linux/tracehook.h>
#include <asm/cacheflush.h>
#include <asm/ucontext.h>
#include <asm/uaccess.h>
#include <asm/fpu.h>
#include "sigframe.h"
#define DEBUG_SIG 0
/*
* do a signal return; undo the signal stack.
*/
static int restore_sigcontext(struct pt_regs *regs,
struct sigcontext __user *sc, long *_d0)
{
unsigned int err = 0;
/* Always make any pending restarted system calls return -EINTR */
current_thread_info()->restart_block.fn = do_no_restart_syscall;
if (is_using_fpu(current))
fpu_kill_state(current);
#define COPY(x) err |= __get_user(regs->x, &sc->x)
COPY(d1); COPY(d2); COPY(d3);
COPY(a0); COPY(a1); COPY(a2); COPY(a3);
COPY(e0); COPY(e1); COPY(e2); COPY(e3);
COPY(e4); COPY(e5); COPY(e6); COPY(e7);
COPY(lar); COPY(lir);
COPY(mdr); COPY(mdrq);
COPY(mcvf); COPY(mcrl); COPY(mcrh);
COPY(sp); COPY(pc);
#undef COPY
{
unsigned int tmpflags;
#ifndef CONFIG_MN10300_USING_JTAG
#define USER_EPSW (EPSW_FLAG_Z | EPSW_FLAG_N | EPSW_FLAG_C | EPSW_FLAG_V | \
EPSW_T | EPSW_nAR)
#else
#define USER_EPSW (EPSW_FLAG_Z | EPSW_FLAG_N | EPSW_FLAG_C | EPSW_FLAG_V | \
EPSW_nAR)
#endif
err |= __get_user(tmpflags, &sc->epsw);
regs->epsw = (regs->epsw & ~USER_EPSW) |
(tmpflags & USER_EPSW);
regs->orig_d0 = -1; /* disable syscall checks */
}
{
struct fpucontext *buf;
err |= __get_user(buf, &sc->fpucontext);
if (buf) {
if (verify_area(VERIFY_READ, buf, sizeof(*buf)))
goto badframe;
err |= fpu_restore_sigcontext(buf);
}
}
err |= __get_user(*_d0, &sc->d0);
return err;
badframe:
return 1;
}
/*
* standard signal return syscall
*/
asmlinkage long sys_sigreturn(void)
{
struct sigframe __user *frame;
sigset_t set;
long d0;
frame = (struct sigframe __user *) current_frame()->sp;
if (verify_area(VERIFY_READ, frame, sizeof(*frame)))
goto badframe;
if (__get_user(set.sig[0], &frame->sc.oldmask))
goto badframe;
if (_NSIG_WORDS > 1 &&
__copy_from_user(&set.sig[1], &frame->extramask,
sizeof(frame->extramask)))
goto badframe;
set_current_blocked(&set);
if (restore_sigcontext(current_frame(), &frame->sc, &d0))
goto badframe;
return d0;
badframe:
force_sig(SIGSEGV, current);
return 0;
}
/*
* realtime signal return syscall
*/
asmlinkage long sys_rt_sigreturn(void)
{
struct rt_sigframe __user *frame;
sigset_t set;
long d0;
frame = (struct rt_sigframe __user *) current_frame()->sp;
if (verify_area(VERIFY_READ, frame, sizeof(*frame)))
goto badframe;
if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
goto badframe;
set_current_blocked(&set);
if (restore_sigcontext(current_frame(), &frame->uc.uc_mcontext, &d0))
goto badframe;
if (restore_altstack(&frame->uc.uc_stack))
goto badframe;
return d0;
badframe:
force_sig(SIGSEGV, current);
return 0;
}
/*
* store the userspace context into a signal frame
*/
static int setup_sigcontext(struct sigcontext __user *sc,
struct fpucontext *fpuctx,
struct pt_regs *regs,
unsigned long mask)
{
int tmp, err = 0;
#define COPY(x) err |= __put_user(regs->x, &sc->x)
COPY(d0); COPY(d1); COPY(d2); COPY(d3);
COPY(a0); COPY(a1); COPY(a2); COPY(a3);
COPY(e0); COPY(e1); COPY(e2); COPY(e3);
COPY(e4); COPY(e5); COPY(e6); COPY(e7);
COPY(lar); COPY(lir);
COPY(mdr); COPY(mdrq);
COPY(mcvf); COPY(mcrl); COPY(mcrh);
COPY(sp); COPY(epsw); COPY(pc);
#undef COPY
tmp = fpu_setup_sigcontext(fpuctx);
if (tmp < 0)
err = 1;
else
err |= __put_user(tmp ? fpuctx : NULL, &sc->fpucontext);
/* non-iBCS2 extensions.. */
err |= __put_user(mask, &sc->oldmask);
return err;
}
/*
* determine which stack to use..
*/
static inline void __user *get_sigframe(struct k_sigaction *ka,
struct pt_regs *regs,
size_t frame_size)
{
unsigned long sp;
/* default to using normal stack */
sp = regs->sp;
/* this is the X/Open sanctioned signal stack switching. */
if (ka->sa.sa_flags & SA_ONSTACK) {
if (sas_ss_flags(sp) == 0)
sp = current->sas_ss_sp + current->sas_ss_size;
}
return (void __user *) ((sp - frame_size) & ~7UL);
}
/*
* set up a normal signal frame
*/
static int setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
struct pt_regs *regs)
{
struct sigframe __user *frame;
int rsig;
frame = get_sigframe(ka, regs, sizeof(*frame));
if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
goto give_sigsegv;
rsig = sig;
if (sig < 32 &&
current_thread_info()->exec_domain &&
current_thread_info()->exec_domain->signal_invmap)
rsig = current_thread_info()->exec_domain->signal_invmap[sig];
if (__put_user(rsig, &frame->sig) < 0 ||
__put_user(&frame->sc, &frame->psc) < 0)
goto give_sigsegv;
if (setup_sigcontext(&frame->sc, &frame->fpuctx, regs, set->sig[0]))
goto give_sigsegv;
if (_NSIG_WORDS > 1) {
if (__copy_to_user(frame->extramask, &set->sig[1],
sizeof(frame->extramask)))
goto give_sigsegv;
}
/* set up to return from userspace. If provided, use a stub already in
* userspace */
if (ka->sa.sa_flags & SA_RESTORER) {
if (__put_user(ka->sa.sa_restorer, &frame->pretcode))
goto give_sigsegv;
} else {
if (__put_user((void (*)(void))frame->retcode,
&frame->pretcode))
goto give_sigsegv;
/* this is mov $,d0; syscall 0 */
if (__put_user(0x2c, (char *)(frame->retcode + 0)) ||
__put_user(__NR_sigreturn, (char *)(frame->retcode + 1)) ||
__put_user(0x00, (char *)(frame->retcode + 2)) ||
__put_user(0xf0, (char *)(frame->retcode + 3)) ||
__put_user(0xe0, (char *)(frame->retcode + 4)))
goto give_sigsegv;
flush_icache_range((unsigned long) frame->retcode,
(unsigned long) frame->retcode + 5);
}
/* set up registers for signal handler */
regs->sp = (unsigned long) frame;
regs->pc = (unsigned long) ka->sa.sa_handler;
regs->d0 = sig;
regs->d1 = (unsigned long) &frame->sc;
#if DEBUG_SIG
printk(KERN_DEBUG "SIG deliver %d (%s:%d): sp=%p pc=%lx ra=%p\n",
sig, current->comm, current->pid, frame, regs->pc,
frame->pretcode);
#endif
return 0;
give_sigsegv:
force_sigsegv(sig, current);
return -EFAULT;
}
/*
* set up a realtime signal frame
*/
static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
sigset_t *set, struct pt_regs *regs)
{
struct rt_sigframe __user *frame;
int rsig;
frame = get_sigframe(ka, regs, sizeof(*frame));
if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
goto give_sigsegv;
rsig = sig;
if (sig < 32 &&
current_thread_info()->exec_domain &&
current_thread_info()->exec_domain->signal_invmap)
rsig = current_thread_info()->exec_domain->signal_invmap[sig];
if (__put_user(rsig, &frame->sig) ||
__put_user(&frame->info, &frame->pinfo) ||
__put_user(&frame->uc, &frame->puc) ||
copy_siginfo_to_user(&frame->info, info))
goto give_sigsegv;
/* create the ucontext. */
if (__put_user(0, &frame->uc.uc_flags) ||
__put_user(0, &frame->uc.uc_link) ||
__save_altstack(&frame->uc.uc_stack, regs->sp) ||
setup_sigcontext(&frame->uc.uc_mcontext,
&frame->fpuctx, regs, set->sig[0]) ||
__copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)))
goto give_sigsegv;
/* set up to return from userspace. If provided, use a stub already in
* userspace */
if (ka->sa.sa_flags & SA_RESTORER) {
if (__put_user(ka->sa.sa_restorer, &frame->pretcode))
goto give_sigsegv;
} else {
if (__put_user((void(*)(void))frame->retcode,
&frame->pretcode) ||
/* This is mov $,d0; syscall 0 */
__put_user(0x2c, (char *)(frame->retcode + 0)) ||
__put_user(__NR_rt_sigreturn,
(char *)(frame->retcode + 1)) ||
__put_user(0x00, (char *)(frame->retcode + 2)) ||
__put_user(0xf0, (char *)(frame->retcode + 3)) ||
__put_user(0xe0, (char *)(frame->retcode + 4)))
goto give_sigsegv;
flush_icache_range((u_long) frame->retcode,
(u_long) frame->retcode + 5);
}
/* Set up registers for signal handler */
regs->sp = (unsigned long) frame;
regs->pc = (unsigned long) ka->sa.sa_handler;
regs->d0 = sig;
regs->d1 = (long) &frame->info;
#if DEBUG_SIG
printk(KERN_DEBUG "SIG deliver %d (%s:%d): sp=%p pc=%lx ra=%p\n",
sig, current->comm, current->pid, frame, regs->pc,
frame->pretcode);
#endif
return 0;
give_sigsegv:
force_sigsegv(sig, current);
return -EFAULT;
}
static inline void stepback(struct pt_regs *regs)
{
regs->pc -= 2;
regs->orig_d0 = -1;
}
/*
* handle the actual delivery of a signal to userspace
*/
static int handle_signal(int sig,
siginfo_t *info, struct k_sigaction *ka,
struct pt_regs *regs)
{
sigset_t *oldset = sigmask_to_save();
int ret;
/* Are we from a system call? */
if (regs->orig_d0 >= 0) {
/* If so, check system call restarting.. */
switch (regs->d0) {
case -ERESTART_RESTARTBLOCK:
case -ERESTARTNOHAND:
regs->d0 = -EINTR;
break;
case -ERESTARTSYS:
if (!(ka->sa.sa_flags & SA_RESTART)) {
regs->d0 = -EINTR;
break;
}
/* fallthrough */
case -ERESTARTNOINTR:
regs->d0 = regs->orig_d0;
stepback(regs);
}
}
/* Set up the stack frame */
if (ka->sa.sa_flags & SA_SIGINFO)
ret = setup_rt_frame(sig, ka, info, oldset, regs);
else
ret = setup_frame(sig, ka, oldset, regs);
if (ret)
return ret;
signal_delivered(sig, info, ka, regs,
test_thread_flag(TIF_SINGLESTEP));
return 0;
}
/*
* handle a potential signal
*/
static void do_signal(struct pt_regs *regs)
{
struct k_sigaction ka;
siginfo_t info;
int signr;
signr = get_signal_to_deliver(&info, &ka, regs, NULL);
if (signr > 0) {
if (handle_signal(signr, &info, &ka, regs) == 0) {
}
return;
}
/* did we come from a system call? */
if (regs->orig_d0 >= 0) {
/* restart the system call - no handlers present */
switch (regs->d0) {
case -ERESTARTNOHAND:
case -ERESTARTSYS:
case -ERESTARTNOINTR:
regs->d0 = regs->orig_d0;
stepback(regs);
break;
case -ERESTART_RESTARTBLOCK:
regs->d0 = __NR_restart_syscall;
stepback(regs);
break;
}
}
/* if there's no signal to deliver, we just put the saved sigmask
* back */
restore_saved_sigmask();
}
/*
* notification of userspace execution resumption
* - triggered by current->work.notify_resume
*/
asmlinkage void do_notify_resume(struct pt_regs *regs, u32 thread_info_flags)
{
/* Pending single-step? */
if (thread_info_flags & _TIF_SINGLESTEP) {
#ifndef CONFIG_MN10300_USING_JTAG
regs->epsw |= EPSW_T;
clear_thread_flag(TIF_SINGLESTEP);
#else
BUG(); /* no h/w single-step if using JTAG unit */
#endif
}
/* deal with pending signal delivery */
if (thread_info_flags & _TIF_SIGPENDING)
do_signal(regs);
if (thread_info_flags & _TIF_NOTIFY_RESUME) {
clear_thread_flag(TIF_NOTIFY_RESUME);
tracehook_notify_resume(current_frame());
}
}
| gpl-2.0 |
denkem/enru-3.1.10-g7f360be | drivers/gpu/drm/drm_stub.c | 2576 | 11850 | /**
* \file drm_stub.h
* Stub support
*
* \author Rickard E. (Rik) Faith <faith@valinux.com>
*/
/*
* Created: Fri Jan 19 10:48:35 2001 by faith@acm.org
*
* Copyright 2001 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/slab.h>
#include "drmP.h"
#include "drm_core.h"
unsigned int drm_debug = 0; /* 1 to enable debug output */
EXPORT_SYMBOL(drm_debug);
unsigned int drm_vblank_offdelay = 5000; /* Default to 5000 msecs. */
EXPORT_SYMBOL(drm_vblank_offdelay);
unsigned int drm_timestamp_precision = 20; /* Default to 20 usecs. */
EXPORT_SYMBOL(drm_timestamp_precision);
MODULE_AUTHOR(CORE_AUTHOR);
MODULE_DESCRIPTION(CORE_DESC);
MODULE_LICENSE("GPL and additional rights");
MODULE_PARM_DESC(debug, "Enable debug output");
MODULE_PARM_DESC(vblankoffdelay, "Delay until vblank irq auto-disable [msecs]");
MODULE_PARM_DESC(timestamp_precision_usec, "Max. error on timestamps [usecs]");
module_param_named(debug, drm_debug, int, 0600);
module_param_named(vblankoffdelay, drm_vblank_offdelay, int, 0600);
module_param_named(timestamp_precision_usec, drm_timestamp_precision, int, 0600);
struct idr drm_minors_idr;
struct class *drm_class;
struct proc_dir_entry *drm_proc_root;
struct dentry *drm_debugfs_root;
int drm_err(const char *func, const char *format, ...)
{
struct va_format vaf;
va_list args;
int r;
va_start(args, format);
vaf.fmt = format;
vaf.va = &args;
r = printk(KERN_ERR "[" DRM_NAME ":%s] *ERROR* %pV", func, &vaf);
va_end(args);
return r;
}
EXPORT_SYMBOL(drm_err);
void drm_ut_debug_printk(unsigned int request_level,
const char *prefix,
const char *function_name,
const char *format, ...)
{
va_list args;
if (drm_debug & request_level) {
if (function_name)
printk(KERN_DEBUG "[%s:%s], ", prefix, function_name);
va_start(args, format);
vprintk(format, args);
va_end(args);
}
}
EXPORT_SYMBOL(drm_ut_debug_printk);
static int drm_minor_get_id(struct drm_device *dev, int type)
{
int new_id;
int ret;
int base = 0, limit = 63;
if (type == DRM_MINOR_CONTROL) {
base += 64;
limit = base + 127;
} else if (type == DRM_MINOR_RENDER) {
base += 128;
limit = base + 255;
}
again:
if (idr_pre_get(&drm_minors_idr, GFP_KERNEL) == 0) {
DRM_ERROR("Out of memory expanding drawable idr\n");
return -ENOMEM;
}
mutex_lock(&dev->struct_mutex);
ret = idr_get_new_above(&drm_minors_idr, NULL,
base, &new_id);
mutex_unlock(&dev->struct_mutex);
if (ret == -EAGAIN) {
goto again;
} else if (ret) {
return ret;
}
if (new_id >= limit) {
idr_remove(&drm_minors_idr, new_id);
return -EINVAL;
}
return new_id;
}
struct drm_master *drm_master_create(struct drm_minor *minor)
{
struct drm_master *master;
master = kzalloc(sizeof(*master), GFP_KERNEL);
if (!master)
return NULL;
kref_init(&master->refcount);
spin_lock_init(&master->lock.spinlock);
init_waitqueue_head(&master->lock.lock_queue);
drm_ht_create(&master->magiclist, DRM_MAGIC_HASH_ORDER);
INIT_LIST_HEAD(&master->magicfree);
master->minor = minor;
list_add_tail(&master->head, &minor->master_list);
return master;
}
struct drm_master *drm_master_get(struct drm_master *master)
{
kref_get(&master->refcount);
return master;
}
EXPORT_SYMBOL(drm_master_get);
static void drm_master_destroy(struct kref *kref)
{
struct drm_master *master = container_of(kref, struct drm_master, refcount);
struct drm_magic_entry *pt, *next;
struct drm_device *dev = master->minor->dev;
struct drm_map_list *r_list, *list_temp;
list_del(&master->head);
if (dev->driver->master_destroy)
dev->driver->master_destroy(dev, master);
list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) {
if (r_list->master == master) {
drm_rmmap_locked(dev, r_list->map);
r_list = NULL;
}
}
if (master->unique) {
kfree(master->unique);
master->unique = NULL;
master->unique_len = 0;
}
kfree(dev->devname);
dev->devname = NULL;
list_for_each_entry_safe(pt, next, &master->magicfree, head) {
list_del(&pt->head);
drm_ht_remove_item(&master->magiclist, &pt->hash_item);
kfree(pt);
}
drm_ht_remove(&master->magiclist);
kfree(master);
}
void drm_master_put(struct drm_master **master)
{
kref_put(&(*master)->refcount, drm_master_destroy);
*master = NULL;
}
EXPORT_SYMBOL(drm_master_put);
int drm_setmaster_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
int ret = 0;
if (file_priv->is_master)
return 0;
if (file_priv->minor->master && file_priv->minor->master != file_priv->master)
return -EINVAL;
if (!file_priv->master)
return -EINVAL;
if (!file_priv->minor->master &&
file_priv->minor->master != file_priv->master) {
mutex_lock(&dev->struct_mutex);
file_priv->minor->master = drm_master_get(file_priv->master);
file_priv->is_master = 1;
if (dev->driver->master_set) {
ret = dev->driver->master_set(dev, file_priv, false);
if (unlikely(ret != 0)) {
file_priv->is_master = 0;
drm_master_put(&file_priv->minor->master);
}
}
mutex_unlock(&dev->struct_mutex);
}
return 0;
}
int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
if (!file_priv->is_master)
return -EINVAL;
if (!file_priv->minor->master)
return -EINVAL;
mutex_lock(&dev->struct_mutex);
if (dev->driver->master_drop)
dev->driver->master_drop(dev, file_priv, false);
drm_master_put(&file_priv->minor->master);
file_priv->is_master = 0;
mutex_unlock(&dev->struct_mutex);
return 0;
}
int drm_fill_in_dev(struct drm_device *dev,
const struct pci_device_id *ent,
struct drm_driver *driver)
{
int retcode;
INIT_LIST_HEAD(&dev->filelist);
INIT_LIST_HEAD(&dev->ctxlist);
INIT_LIST_HEAD(&dev->vmalist);
INIT_LIST_HEAD(&dev->maplist);
INIT_LIST_HEAD(&dev->vblank_event_list);
spin_lock_init(&dev->count_lock);
spin_lock_init(&dev->event_lock);
mutex_init(&dev->struct_mutex);
mutex_init(&dev->ctxlist_mutex);
if (drm_ht_create(&dev->map_hash, 12)) {
return -ENOMEM;
}
/* the DRM has 6 basic counters */
dev->counters = 6;
dev->types[0] = _DRM_STAT_LOCK;
dev->types[1] = _DRM_STAT_OPENS;
dev->types[2] = _DRM_STAT_CLOSES;
dev->types[3] = _DRM_STAT_IOCTLS;
dev->types[4] = _DRM_STAT_LOCKS;
dev->types[5] = _DRM_STAT_UNLOCKS;
dev->driver = driver;
if (dev->driver->bus->agp_init) {
retcode = dev->driver->bus->agp_init(dev);
if (retcode)
goto error_out_unreg;
}
retcode = drm_ctxbitmap_init(dev);
if (retcode) {
DRM_ERROR("Cannot allocate memory for context bitmap.\n");
goto error_out_unreg;
}
if (driver->driver_features & DRIVER_GEM) {
retcode = drm_gem_init(dev);
if (retcode) {
DRM_ERROR("Cannot initialize graphics execution "
"manager (GEM)\n");
goto error_out_unreg;
}
}
return 0;
error_out_unreg:
drm_lastclose(dev);
return retcode;
}
/**
* Get a secondary minor number.
*
* \param dev device data structure
* \param sec-minor structure to hold the assigned minor
* \return negative number on failure.
*
* Search an empty entry and initialize it to the given parameters, and
* create the proc init entry via proc_init(). This routines assigns
* minor numbers to secondary heads of multi-headed cards
*/
int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int type)
{
struct drm_minor *new_minor;
int ret;
int minor_id;
DRM_DEBUG("\n");
minor_id = drm_minor_get_id(dev, type);
if (minor_id < 0)
return minor_id;
new_minor = kzalloc(sizeof(struct drm_minor), GFP_KERNEL);
if (!new_minor) {
ret = -ENOMEM;
goto err_idr;
}
new_minor->type = type;
new_minor->device = MKDEV(DRM_MAJOR, minor_id);
new_minor->dev = dev;
new_minor->index = minor_id;
INIT_LIST_HEAD(&new_minor->master_list);
idr_replace(&drm_minors_idr, new_minor, minor_id);
if (type == DRM_MINOR_LEGACY) {
ret = drm_proc_init(new_minor, minor_id, drm_proc_root);
if (ret) {
DRM_ERROR("DRM: Failed to initialize /proc/dri.\n");
goto err_mem;
}
} else
new_minor->proc_root = NULL;
#if defined(CONFIG_DEBUG_FS)
ret = drm_debugfs_init(new_minor, minor_id, drm_debugfs_root);
if (ret) {
DRM_ERROR("DRM: Failed to initialize /sys/kernel/debug/dri.\n");
goto err_g2;
}
#endif
ret = drm_sysfs_device_add(new_minor);
if (ret) {
printk(KERN_ERR
"DRM: Error sysfs_device_add.\n");
goto err_g2;
}
*minor = new_minor;
DRM_DEBUG("new minor assigned %d\n", minor_id);
return 0;
err_g2:
if (new_minor->type == DRM_MINOR_LEGACY)
drm_proc_cleanup(new_minor, drm_proc_root);
err_mem:
kfree(new_minor);
err_idr:
idr_remove(&drm_minors_idr, minor_id);
*minor = NULL;
return ret;
}
/**
* Put a secondary minor number.
*
* \param sec_minor - structure to be released
* \return always zero
*
* Cleans up the proc resources. Not legal for this to be the
* last minor released.
*
*/
int drm_put_minor(struct drm_minor **minor_p)
{
struct drm_minor *minor = *minor_p;
DRM_DEBUG("release secondary minor %d\n", minor->index);
if (minor->type == DRM_MINOR_LEGACY)
drm_proc_cleanup(minor, drm_proc_root);
#if defined(CONFIG_DEBUG_FS)
drm_debugfs_cleanup(minor);
#endif
drm_sysfs_device_remove(minor);
idr_remove(&drm_minors_idr, minor->index);
kfree(minor);
*minor_p = NULL;
return 0;
}
/**
* Called via drm_exit() at module unload time or when pci device is
* unplugged.
*
* Cleans up all DRM device, calling drm_lastclose().
*
*/
void drm_put_dev(struct drm_device *dev)
{
struct drm_driver *driver;
struct drm_map_list *r_list, *list_temp;
DRM_DEBUG("\n");
if (!dev) {
DRM_ERROR("cleanup called no dev\n");
return;
}
driver = dev->driver;
drm_lastclose(dev);
if (drm_core_has_MTRR(dev) && drm_core_has_AGP(dev) &&
dev->agp && dev->agp->agp_mtrr >= 0) {
int retval;
retval = mtrr_del(dev->agp->agp_mtrr,
dev->agp->agp_info.aper_base,
dev->agp->agp_info.aper_size * 1024 * 1024);
DRM_DEBUG("mtrr_del=%d\n", retval);
}
if (dev->driver->unload)
dev->driver->unload(dev);
if (drm_core_has_AGP(dev) && dev->agp) {
kfree(dev->agp);
dev->agp = NULL;
}
drm_vblank_cleanup(dev);
list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head)
drm_rmmap(dev, r_list->map);
drm_ht_remove(&dev->map_hash);
drm_ctxbitmap_cleanup(dev);
if (drm_core_check_feature(dev, DRIVER_MODESET))
drm_put_minor(&dev->control);
if (driver->driver_features & DRIVER_GEM)
drm_gem_destroy(dev);
drm_put_minor(&dev->primary);
list_del(&dev->driver_item);
if (dev->devname) {
kfree(dev->devname);
dev->devname = NULL;
}
kfree(dev);
}
EXPORT_SYMBOL(drm_put_dev);
| gpl-2.0 |
mdeejay/kernel_huawei_front | arch/arm/mach-s5p64x0/cpu.c | 2832 | 4427 | /* linux/arch/arm/mach-s5p64x0/cpu.c
*
* Copyright (c) 2009-2010 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/list.h>
#include <linux/timer.h>
#include <linux/init.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/sysdev.h>
#include <linux/serial_core.h>
#include <linux/platform_device.h>
#include <linux/sched.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
#include <asm/mach/irq.h>
#include <asm/proc-fns.h>
#include <asm/irq.h>
#include <mach/hardware.h>
#include <mach/map.h>
#include <mach/regs-clock.h>
#include <plat/regs-serial.h>
#include <plat/cpu.h>
#include <plat/devs.h>
#include <plat/clock.h>
#include <plat/s5p6440.h>
#include <plat/s5p6450.h>
#include <plat/adc-core.h>
/* Initial IO mappings */
static struct map_desc s5p64x0_iodesc[] __initdata = {
{
.virtual = (unsigned long)S5P_VA_GPIO,
.pfn = __phys_to_pfn(S5P64X0_PA_GPIO),
.length = SZ_4K,
.type = MT_DEVICE,
}, {
.virtual = (unsigned long)VA_VIC0,
.pfn = __phys_to_pfn(S5P64X0_PA_VIC0),
.length = SZ_16K,
.type = MT_DEVICE,
}, {
.virtual = (unsigned long)VA_VIC1,
.pfn = __phys_to_pfn(S5P64X0_PA_VIC1),
.length = SZ_16K,
.type = MT_DEVICE,
},
};
static struct map_desc s5p6440_iodesc[] __initdata = {
{
.virtual = (unsigned long)S3C_VA_UART,
.pfn = __phys_to_pfn(S5P6440_PA_UART(0)),
.length = SZ_4K,
.type = MT_DEVICE,
},
};
static struct map_desc s5p6450_iodesc[] __initdata = {
{
.virtual = (unsigned long)S3C_VA_UART,
.pfn = __phys_to_pfn(S5P6450_PA_UART(0)),
.length = SZ_512K,
.type = MT_DEVICE,
}, {
.virtual = (unsigned long)S3C_VA_UART + SZ_512K,
.pfn = __phys_to_pfn(S5P6450_PA_UART(5)),
.length = SZ_4K,
.type = MT_DEVICE,
},
};
static void s5p64x0_idle(void)
{
unsigned long val;
if (!need_resched()) {
val = __raw_readl(S5P64X0_PWR_CFG);
val &= ~(0x3 << 5);
val |= (0x1 << 5);
__raw_writel(val, S5P64X0_PWR_CFG);
cpu_do_idle();
}
local_irq_enable();
}
/*
* s5p64x0_map_io
*
* register the standard CPU IO areas
*/
void __init s5p6440_map_io(void)
{
/* initialize any device information early */
s3c_adc_setname("s3c64xx-adc");
iotable_init(s5p64x0_iodesc, ARRAY_SIZE(s5p64x0_iodesc));
iotable_init(s5p6440_iodesc, ARRAY_SIZE(s5p6440_iodesc));
}
void __init s5p6450_map_io(void)
{
/* initialize any device information early */
s3c_adc_setname("s3c64xx-adc");
iotable_init(s5p64x0_iodesc, ARRAY_SIZE(s5p64x0_iodesc));
iotable_init(s5p6450_iodesc, ARRAY_SIZE(s5p6450_iodesc));
}
/*
* s5p64x0_init_clocks
*
* register and setup the CPU clocks
*/
void __init s5p6440_init_clocks(int xtal)
{
printk(KERN_DEBUG "%s: initializing clocks\n", __func__);
s3c24xx_register_baseclocks(xtal);
s5p_register_clocks(xtal);
s5p6440_register_clocks();
s5p6440_setup_clocks();
}
void __init s5p6450_init_clocks(int xtal)
{
printk(KERN_DEBUG "%s: initializing clocks\n", __func__);
s3c24xx_register_baseclocks(xtal);
s5p_register_clocks(xtal);
s5p6450_register_clocks();
s5p6450_setup_clocks();
}
/*
* s5p64x0_init_irq
*
* register the CPU interrupts
*/
void __init s5p6440_init_irq(void)
{
/* S5P6440 supports 2 VIC */
u32 vic[2];
/*
* VIC0 is missing IRQ_VIC0[3, 4, 8, 10, (12-22)]
* VIC1 is missing IRQ VIC1[1, 3, 4, 10, 11, 12, 14, 15, 22]
*/
vic[0] = 0xff800ae7;
vic[1] = 0xffbf23e5;
s5p_init_irq(vic, ARRAY_SIZE(vic));
}
void __init s5p6450_init_irq(void)
{
/* S5P6450 supports only 2 VIC */
u32 vic[2];
/*
* VIC0 is missing IRQ_VIC0[(13-15), (21-22)]
* VIC1 is missing IRQ VIC1[12, 14, 23]
*/
vic[0] = 0xff9f1fff;
vic[1] = 0xff7fafff;
s5p_init_irq(vic, ARRAY_SIZE(vic));
}
struct sysdev_class s5p64x0_sysclass = {
.name = "s5p64x0-core",
};
static struct sys_device s5p64x0_sysdev = {
.cls = &s5p64x0_sysclass,
};
static int __init s5p64x0_core_init(void)
{
return sysdev_class_register(&s5p64x0_sysclass);
}
core_initcall(s5p64x0_core_init);
int __init s5p64x0_init(void)
{
printk(KERN_INFO "S5P64X0(S5P6440/S5P6450): Initializing architecture\n");
/* set idle function */
pm_idle = s5p64x0_idle;
return sysdev_register(&s5p64x0_sysdev);
}
| gpl-2.0 |
yank555-lu/N3-AOSP-KK | arch/x86/kvm/mmu.c | 3856 | 98368 | /*
* Kernel-based Virtual Machine driver for Linux
*
* This module enables machines with Intel VT-x extensions to run virtual
* machines without emulation or binary translation.
*
* MMU support
*
* Copyright (C) 2006 Qumranet, Inc.
* Copyright 2010 Red Hat, Inc. and/or its affiliates.
*
* Authors:
* Yaniv Kamay <yaniv@qumranet.com>
* Avi Kivity <avi@qumranet.com>
*
* This work is licensed under the terms of the GNU GPL, version 2. See
* the COPYING file in the top-level directory.
*
*/
#include "irq.h"
#include "mmu.h"
#include "x86.h"
#include "kvm_cache_regs.h"
#include <linux/kvm_host.h>
#include <linux/types.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/highmem.h>
#include <linux/module.h>
#include <linux/swap.h>
#include <linux/hugetlb.h>
#include <linux/compiler.h>
#include <linux/srcu.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <asm/page.h>
#include <asm/cmpxchg.h>
#include <asm/io.h>
#include <asm/vmx.h>
/*
* When setting this variable to true it enables Two-Dimensional-Paging
* where the hardware walks 2 page tables:
* 1. the guest-virtual to guest-physical
* 2. while doing 1. it walks guest-physical to host-physical
* If the hardware supports that we don't need to do shadow paging.
*/
bool tdp_enabled = false;
enum {
AUDIT_PRE_PAGE_FAULT,
AUDIT_POST_PAGE_FAULT,
AUDIT_PRE_PTE_WRITE,
AUDIT_POST_PTE_WRITE,
AUDIT_PRE_SYNC,
AUDIT_POST_SYNC
};
#undef MMU_DEBUG
#ifdef MMU_DEBUG
#define pgprintk(x...) do { if (dbg) printk(x); } while (0)
#define rmap_printk(x...) do { if (dbg) printk(x); } while (0)
#else
#define pgprintk(x...) do { } while (0)
#define rmap_printk(x...) do { } while (0)
#endif
#ifdef MMU_DEBUG
static bool dbg = 0;
module_param(dbg, bool, 0644);
#endif
#ifndef MMU_DEBUG
#define ASSERT(x) do { } while (0)
#else
#define ASSERT(x) \
if (!(x)) { \
printk(KERN_WARNING "assertion failed %s:%d: %s\n", \
__FILE__, __LINE__, #x); \
}
#endif
#define PTE_PREFETCH_NUM 8
#define PT_FIRST_AVAIL_BITS_SHIFT 9
#define PT64_SECOND_AVAIL_BITS_SHIFT 52
#define PT64_LEVEL_BITS 9
#define PT64_LEVEL_SHIFT(level) \
(PAGE_SHIFT + (level - 1) * PT64_LEVEL_BITS)
#define PT64_INDEX(address, level)\
(((address) >> PT64_LEVEL_SHIFT(level)) & ((1 << PT64_LEVEL_BITS) - 1))
#define PT32_LEVEL_BITS 10
#define PT32_LEVEL_SHIFT(level) \
(PAGE_SHIFT + (level - 1) * PT32_LEVEL_BITS)
#define PT32_LVL_OFFSET_MASK(level) \
(PT32_BASE_ADDR_MASK & ((1ULL << (PAGE_SHIFT + (((level) - 1) \
* PT32_LEVEL_BITS))) - 1))
#define PT32_INDEX(address, level)\
(((address) >> PT32_LEVEL_SHIFT(level)) & ((1 << PT32_LEVEL_BITS) - 1))
#define PT64_BASE_ADDR_MASK (((1ULL << 52) - 1) & ~(u64)(PAGE_SIZE-1))
#define PT64_DIR_BASE_ADDR_MASK \
(PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + PT64_LEVEL_BITS)) - 1))
#define PT64_LVL_ADDR_MASK(level) \
(PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + (((level) - 1) \
* PT64_LEVEL_BITS))) - 1))
#define PT64_LVL_OFFSET_MASK(level) \
(PT64_BASE_ADDR_MASK & ((1ULL << (PAGE_SHIFT + (((level) - 1) \
* PT64_LEVEL_BITS))) - 1))
#define PT32_BASE_ADDR_MASK PAGE_MASK
#define PT32_DIR_BASE_ADDR_MASK \
(PAGE_MASK & ~((1ULL << (PAGE_SHIFT + PT32_LEVEL_BITS)) - 1))
#define PT32_LVL_ADDR_MASK(level) \
(PAGE_MASK & ~((1ULL << (PAGE_SHIFT + (((level) - 1) \
* PT32_LEVEL_BITS))) - 1))
#define PT64_PERM_MASK (PT_PRESENT_MASK | PT_WRITABLE_MASK | PT_USER_MASK \
| PT64_NX_MASK)
#define PTE_LIST_EXT 4
#define ACC_EXEC_MASK 1
#define ACC_WRITE_MASK PT_WRITABLE_MASK
#define ACC_USER_MASK PT_USER_MASK
#define ACC_ALL (ACC_EXEC_MASK | ACC_WRITE_MASK | ACC_USER_MASK)
#include <trace/events/kvm.h>
#define CREATE_TRACE_POINTS
#include "mmutrace.h"
#define SPTE_HOST_WRITEABLE (1ULL << PT_FIRST_AVAIL_BITS_SHIFT)
#define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
struct pte_list_desc {
u64 *sptes[PTE_LIST_EXT];
struct pte_list_desc *more;
};
struct kvm_shadow_walk_iterator {
u64 addr;
hpa_t shadow_addr;
u64 *sptep;
int level;
unsigned index;
};
#define for_each_shadow_entry(_vcpu, _addr, _walker) \
for (shadow_walk_init(&(_walker), _vcpu, _addr); \
shadow_walk_okay(&(_walker)); \
shadow_walk_next(&(_walker)))
#define for_each_shadow_entry_lockless(_vcpu, _addr, _walker, spte) \
for (shadow_walk_init(&(_walker), _vcpu, _addr); \
shadow_walk_okay(&(_walker)) && \
({ spte = mmu_spte_get_lockless(_walker.sptep); 1; }); \
__shadow_walk_next(&(_walker), spte))
static struct kmem_cache *pte_list_desc_cache;
static struct kmem_cache *mmu_page_header_cache;
static struct percpu_counter kvm_total_used_mmu_pages;
static u64 __read_mostly shadow_nx_mask;
static u64 __read_mostly shadow_x_mask; /* mutual exclusive with nx_mask */
static u64 __read_mostly shadow_user_mask;
static u64 __read_mostly shadow_accessed_mask;
static u64 __read_mostly shadow_dirty_mask;
static u64 __read_mostly shadow_mmio_mask;
static void mmu_spte_set(u64 *sptep, u64 spte);
void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask)
{
shadow_mmio_mask = mmio_mask;
}
EXPORT_SYMBOL_GPL(kvm_mmu_set_mmio_spte_mask);
static void mark_mmio_spte(u64 *sptep, u64 gfn, unsigned access)
{
access &= ACC_WRITE_MASK | ACC_USER_MASK;
trace_mark_mmio_spte(sptep, gfn, access);
mmu_spte_set(sptep, shadow_mmio_mask | access | gfn << PAGE_SHIFT);
}
static bool is_mmio_spte(u64 spte)
{
return (spte & shadow_mmio_mask) == shadow_mmio_mask;
}
static gfn_t get_mmio_spte_gfn(u64 spte)
{
return (spte & ~shadow_mmio_mask) >> PAGE_SHIFT;
}
static unsigned get_mmio_spte_access(u64 spte)
{
return (spte & ~shadow_mmio_mask) & ~PAGE_MASK;
}
static bool set_mmio_spte(u64 *sptep, gfn_t gfn, pfn_t pfn, unsigned access)
{
if (unlikely(is_noslot_pfn(pfn))) {
mark_mmio_spte(sptep, gfn, access);
return true;
}
return false;
}
static inline u64 rsvd_bits(int s, int e)
{
return ((1ULL << (e - s + 1)) - 1) << s;
}
void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
u64 dirty_mask, u64 nx_mask, u64 x_mask)
{
shadow_user_mask = user_mask;
shadow_accessed_mask = accessed_mask;
shadow_dirty_mask = dirty_mask;
shadow_nx_mask = nx_mask;
shadow_x_mask = x_mask;
}
EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes);
static int is_cpuid_PSE36(void)
{
return 1;
}
static int is_nx(struct kvm_vcpu *vcpu)
{
return vcpu->arch.efer & EFER_NX;
}
static int is_shadow_present_pte(u64 pte)
{
return pte & PT_PRESENT_MASK && !is_mmio_spte(pte);
}
static int is_large_pte(u64 pte)
{
return pte & PT_PAGE_SIZE_MASK;
}
static int is_dirty_gpte(unsigned long pte)
{
return pte & PT_DIRTY_MASK;
}
static int is_rmap_spte(u64 pte)
{
return is_shadow_present_pte(pte);
}
static int is_last_spte(u64 pte, int level)
{
if (level == PT_PAGE_TABLE_LEVEL)
return 1;
if (is_large_pte(pte))
return 1;
return 0;
}
static pfn_t spte_to_pfn(u64 pte)
{
return (pte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
}
static gfn_t pse36_gfn_delta(u32 gpte)
{
int shift = 32 - PT32_DIR_PSE36_SHIFT - PAGE_SHIFT;
return (gpte & PT32_DIR_PSE36_MASK) << shift;
}
#ifdef CONFIG_X86_64
static void __set_spte(u64 *sptep, u64 spte)
{
*sptep = spte;
}
static void __update_clear_spte_fast(u64 *sptep, u64 spte)
{
*sptep = spte;
}
static u64 __update_clear_spte_slow(u64 *sptep, u64 spte)
{
return xchg(sptep, spte);
}
static u64 __get_spte_lockless(u64 *sptep)
{
return ACCESS_ONCE(*sptep);
}
static bool __check_direct_spte_mmio_pf(u64 spte)
{
/* It is valid if the spte is zapped. */
return spte == 0ull;
}
#else
union split_spte {
struct {
u32 spte_low;
u32 spte_high;
};
u64 spte;
};
static void count_spte_clear(u64 *sptep, u64 spte)
{
struct kvm_mmu_page *sp = page_header(__pa(sptep));
if (is_shadow_present_pte(spte))
return;
/* Ensure the spte is completely set before we increase the count */
smp_wmb();
sp->clear_spte_count++;
}
static void __set_spte(u64 *sptep, u64 spte)
{
union split_spte *ssptep, sspte;
ssptep = (union split_spte *)sptep;
sspte = (union split_spte)spte;
ssptep->spte_high = sspte.spte_high;
/*
* If we map the spte from nonpresent to present, We should store
* the high bits firstly, then set present bit, so cpu can not
* fetch this spte while we are setting the spte.
*/
smp_wmb();
ssptep->spte_low = sspte.spte_low;
}
static void __update_clear_spte_fast(u64 *sptep, u64 spte)
{
union split_spte *ssptep, sspte;
ssptep = (union split_spte *)sptep;
sspte = (union split_spte)spte;
ssptep->spte_low = sspte.spte_low;
/*
* If we map the spte from present to nonpresent, we should clear
* present bit firstly to avoid vcpu fetch the old high bits.
*/
smp_wmb();
ssptep->spte_high = sspte.spte_high;
count_spte_clear(sptep, spte);
}
static u64 __update_clear_spte_slow(u64 *sptep, u64 spte)
{
union split_spte *ssptep, sspte, orig;
ssptep = (union split_spte *)sptep;
sspte = (union split_spte)spte;
/* xchg acts as a barrier before the setting of the high bits */
orig.spte_low = xchg(&ssptep->spte_low, sspte.spte_low);
orig.spte_high = ssptep->spte_high;
ssptep->spte_high = sspte.spte_high;
count_spte_clear(sptep, spte);
return orig.spte;
}
/*
* The idea using the light way get the spte on x86_32 guest is from
* gup_get_pte(arch/x86/mm/gup.c).
* The difference is we can not catch the spte tlb flush if we leave
* guest mode, so we emulate it by increase clear_spte_count when spte
* is cleared.
*/
static u64 __get_spte_lockless(u64 *sptep)
{
struct kvm_mmu_page *sp = page_header(__pa(sptep));
union split_spte spte, *orig = (union split_spte *)sptep;
int count;
retry:
count = sp->clear_spte_count;
smp_rmb();
spte.spte_low = orig->spte_low;
smp_rmb();
spte.spte_high = orig->spte_high;
smp_rmb();
if (unlikely(spte.spte_low != orig->spte_low ||
count != sp->clear_spte_count))
goto retry;
return spte.spte;
}
static bool __check_direct_spte_mmio_pf(u64 spte)
{
union split_spte sspte = (union split_spte)spte;
u32 high_mmio_mask = shadow_mmio_mask >> 32;
/* It is valid if the spte is zapped. */
if (spte == 0ull)
return true;
/* It is valid if the spte is being zapped. */
if (sspte.spte_low == 0ull &&
(sspte.spte_high & high_mmio_mask) == high_mmio_mask)
return true;
return false;
}
#endif
static bool spte_has_volatile_bits(u64 spte)
{
if (!shadow_accessed_mask)
return false;
if (!is_shadow_present_pte(spte))
return false;
if ((spte & shadow_accessed_mask) &&
(!is_writable_pte(spte) || (spte & shadow_dirty_mask)))
return false;
return true;
}
static bool spte_is_bit_cleared(u64 old_spte, u64 new_spte, u64 bit_mask)
{
return (old_spte & bit_mask) && !(new_spte & bit_mask);
}
/* Rules for using mmu_spte_set:
* Set the sptep from nonpresent to present.
* Note: the sptep being assigned *must* be either not present
* or in a state where the hardware will not attempt to update
* the spte.
*/
static void mmu_spte_set(u64 *sptep, u64 new_spte)
{
WARN_ON(is_shadow_present_pte(*sptep));
__set_spte(sptep, new_spte);
}
/* Rules for using mmu_spte_update:
* Update the state bits, it means the mapped pfn is not changged.
*/
static void mmu_spte_update(u64 *sptep, u64 new_spte)
{
u64 mask, old_spte = *sptep;
WARN_ON(!is_rmap_spte(new_spte));
if (!is_shadow_present_pte(old_spte))
return mmu_spte_set(sptep, new_spte);
new_spte |= old_spte & shadow_dirty_mask;
mask = shadow_accessed_mask;
if (is_writable_pte(old_spte))
mask |= shadow_dirty_mask;
if (!spte_has_volatile_bits(old_spte) || (new_spte & mask) == mask)
__update_clear_spte_fast(sptep, new_spte);
else
old_spte = __update_clear_spte_slow(sptep, new_spte);
if (!shadow_accessed_mask)
return;
if (spte_is_bit_cleared(old_spte, new_spte, shadow_accessed_mask))
kvm_set_pfn_accessed(spte_to_pfn(old_spte));
if (spte_is_bit_cleared(old_spte, new_spte, shadow_dirty_mask))
kvm_set_pfn_dirty(spte_to_pfn(old_spte));
}
/*
* Rules for using mmu_spte_clear_track_bits:
* It sets the sptep from present to nonpresent, and track the
* state bits, it is used to clear the last level sptep.
*/
static int mmu_spte_clear_track_bits(u64 *sptep)
{
pfn_t pfn;
u64 old_spte = *sptep;
if (!spte_has_volatile_bits(old_spte))
__update_clear_spte_fast(sptep, 0ull);
else
old_spte = __update_clear_spte_slow(sptep, 0ull);
if (!is_rmap_spte(old_spte))
return 0;
pfn = spte_to_pfn(old_spte);
if (!shadow_accessed_mask || old_spte & shadow_accessed_mask)
kvm_set_pfn_accessed(pfn);
if (!shadow_dirty_mask || (old_spte & shadow_dirty_mask))
kvm_set_pfn_dirty(pfn);
return 1;
}
/*
* Rules for using mmu_spte_clear_no_track:
* Directly clear spte without caring the state bits of sptep,
* it is used to set the upper level spte.
*/
static void mmu_spte_clear_no_track(u64 *sptep)
{
__update_clear_spte_fast(sptep, 0ull);
}
static u64 mmu_spte_get_lockless(u64 *sptep)
{
return __get_spte_lockless(sptep);
}
static void walk_shadow_page_lockless_begin(struct kvm_vcpu *vcpu)
{
rcu_read_lock();
atomic_inc(&vcpu->kvm->arch.reader_counter);
/* Increase the counter before walking shadow page table */
smp_mb__after_atomic_inc();
}
static void walk_shadow_page_lockless_end(struct kvm_vcpu *vcpu)
{
/* Decrease the counter after walking shadow page table finished */
smp_mb__before_atomic_dec();
atomic_dec(&vcpu->kvm->arch.reader_counter);
rcu_read_unlock();
}
static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
struct kmem_cache *base_cache, int min)
{
void *obj;
if (cache->nobjs >= min)
return 0;
while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
obj = kmem_cache_zalloc(base_cache, GFP_KERNEL);
if (!obj)
return -ENOMEM;
cache->objects[cache->nobjs++] = obj;
}
return 0;
}
static int mmu_memory_cache_free_objects(struct kvm_mmu_memory_cache *cache)
{
return cache->nobjs;
}
static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc,
struct kmem_cache *cache)
{
while (mc->nobjs)
kmem_cache_free(cache, mc->objects[--mc->nobjs]);
}
static int mmu_topup_memory_cache_page(struct kvm_mmu_memory_cache *cache,
int min)
{
void *page;
if (cache->nobjs >= min)
return 0;
while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
page = (void *)__get_free_page(GFP_KERNEL);
if (!page)
return -ENOMEM;
cache->objects[cache->nobjs++] = page;
}
return 0;
}
static void mmu_free_memory_cache_page(struct kvm_mmu_memory_cache *mc)
{
while (mc->nobjs)
free_page((unsigned long)mc->objects[--mc->nobjs]);
}
static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu)
{
int r;
r = mmu_topup_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache,
pte_list_desc_cache, 8 + PTE_PREFETCH_NUM);
if (r)
goto out;
r = mmu_topup_memory_cache_page(&vcpu->arch.mmu_page_cache, 8);
if (r)
goto out;
r = mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache,
mmu_page_header_cache, 4);
out:
return r;
}
static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
{
mmu_free_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache,
pte_list_desc_cache);
mmu_free_memory_cache_page(&vcpu->arch.mmu_page_cache);
mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache,
mmu_page_header_cache);
}
static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc,
size_t size)
{
void *p;
BUG_ON(!mc->nobjs);
p = mc->objects[--mc->nobjs];
return p;
}
static struct pte_list_desc *mmu_alloc_pte_list_desc(struct kvm_vcpu *vcpu)
{
return mmu_memory_cache_alloc(&vcpu->arch.mmu_pte_list_desc_cache,
sizeof(struct pte_list_desc));
}
static void mmu_free_pte_list_desc(struct pte_list_desc *pte_list_desc)
{
kmem_cache_free(pte_list_desc_cache, pte_list_desc);
}
static gfn_t kvm_mmu_page_get_gfn(struct kvm_mmu_page *sp, int index)
{
if (!sp->role.direct)
return sp->gfns[index];
return sp->gfn + (index << ((sp->role.level - 1) * PT64_LEVEL_BITS));
}
static void kvm_mmu_page_set_gfn(struct kvm_mmu_page *sp, int index, gfn_t gfn)
{
if (sp->role.direct)
BUG_ON(gfn != kvm_mmu_page_get_gfn(sp, index));
else
sp->gfns[index] = gfn;
}
/*
* Return the pointer to the large page information for a given gfn,
* handling slots that are not large page aligned.
*/
static struct kvm_lpage_info *lpage_info_slot(gfn_t gfn,
struct kvm_memory_slot *slot,
int level)
{
unsigned long idx;
idx = gfn_to_index(gfn, slot->base_gfn, level);
return &slot->arch.lpage_info[level - 2][idx];
}
static void account_shadowed(struct kvm *kvm, gfn_t gfn)
{
struct kvm_memory_slot *slot;
struct kvm_lpage_info *linfo;
int i;
slot = gfn_to_memslot(kvm, gfn);
for (i = PT_DIRECTORY_LEVEL;
i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) {
linfo = lpage_info_slot(gfn, slot, i);
linfo->write_count += 1;
}
kvm->arch.indirect_shadow_pages++;
}
static void unaccount_shadowed(struct kvm *kvm, gfn_t gfn)
{
struct kvm_memory_slot *slot;
struct kvm_lpage_info *linfo;
int i;
slot = gfn_to_memslot(kvm, gfn);
for (i = PT_DIRECTORY_LEVEL;
i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) {
linfo = lpage_info_slot(gfn, slot, i);
linfo->write_count -= 1;
WARN_ON(linfo->write_count < 0);
}
kvm->arch.indirect_shadow_pages--;
}
static int has_wrprotected_page(struct kvm *kvm,
gfn_t gfn,
int level)
{
struct kvm_memory_slot *slot;
struct kvm_lpage_info *linfo;
slot = gfn_to_memslot(kvm, gfn);
if (slot) {
linfo = lpage_info_slot(gfn, slot, level);
return linfo->write_count;
}
return 1;
}
static int host_mapping_level(struct kvm *kvm, gfn_t gfn)
{
unsigned long page_size;
int i, ret = 0;
page_size = kvm_host_page_size(kvm, gfn);
for (i = PT_PAGE_TABLE_LEVEL;
i < (PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES); ++i) {
if (page_size >= KVM_HPAGE_SIZE(i))
ret = i;
else
break;
}
return ret;
}
static struct kvm_memory_slot *
gfn_to_memslot_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t gfn,
bool no_dirty_log)
{
struct kvm_memory_slot *slot;
slot = gfn_to_memslot(vcpu->kvm, gfn);
if (!slot || slot->flags & KVM_MEMSLOT_INVALID ||
(no_dirty_log && slot->dirty_bitmap))
slot = NULL;
return slot;
}
static bool mapping_level_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t large_gfn)
{
return !gfn_to_memslot_dirty_bitmap(vcpu, large_gfn, true);
}
static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn)
{
int host_level, level, max_level;
host_level = host_mapping_level(vcpu->kvm, large_gfn);
if (host_level == PT_PAGE_TABLE_LEVEL)
return host_level;
max_level = kvm_x86_ops->get_lpage_level() < host_level ?
kvm_x86_ops->get_lpage_level() : host_level;
for (level = PT_DIRECTORY_LEVEL; level <= max_level; ++level)
if (has_wrprotected_page(vcpu->kvm, large_gfn, level))
break;
return level - 1;
}
/*
* Pte mapping structures:
*
* If pte_list bit zero is zero, then pte_list point to the spte.
*
* If pte_list bit zero is one, (then pte_list & ~1) points to a struct
* pte_list_desc containing more mappings.
*
* Returns the number of pte entries before the spte was added or zero if
* the spte was not added.
*
*/
static int pte_list_add(struct kvm_vcpu *vcpu, u64 *spte,
unsigned long *pte_list)
{
struct pte_list_desc *desc;
int i, count = 0;
if (!*pte_list) {
rmap_printk("pte_list_add: %p %llx 0->1\n", spte, *spte);
*pte_list = (unsigned long)spte;
} else if (!(*pte_list & 1)) {
rmap_printk("pte_list_add: %p %llx 1->many\n", spte, *spte);
desc = mmu_alloc_pte_list_desc(vcpu);
desc->sptes[0] = (u64 *)*pte_list;
desc->sptes[1] = spte;
*pte_list = (unsigned long)desc | 1;
++count;
} else {
rmap_printk("pte_list_add: %p %llx many->many\n", spte, *spte);
desc = (struct pte_list_desc *)(*pte_list & ~1ul);
while (desc->sptes[PTE_LIST_EXT-1] && desc->more) {
desc = desc->more;
count += PTE_LIST_EXT;
}
if (desc->sptes[PTE_LIST_EXT-1]) {
desc->more = mmu_alloc_pte_list_desc(vcpu);
desc = desc->more;
}
for (i = 0; desc->sptes[i]; ++i)
++count;
desc->sptes[i] = spte;
}
return count;
}
static u64 *pte_list_next(unsigned long *pte_list, u64 *spte)
{
struct pte_list_desc *desc;
u64 *prev_spte;
int i;
if (!*pte_list)
return NULL;
else if (!(*pte_list & 1)) {
if (!spte)
return (u64 *)*pte_list;
return NULL;
}
desc = (struct pte_list_desc *)(*pte_list & ~1ul);
prev_spte = NULL;
while (desc) {
for (i = 0; i < PTE_LIST_EXT && desc->sptes[i]; ++i) {
if (prev_spte == spte)
return desc->sptes[i];
prev_spte = desc->sptes[i];
}
desc = desc->more;
}
return NULL;
}
static void
pte_list_desc_remove_entry(unsigned long *pte_list, struct pte_list_desc *desc,
int i, struct pte_list_desc *prev_desc)
{
int j;
for (j = PTE_LIST_EXT - 1; !desc->sptes[j] && j > i; --j)
;
desc->sptes[i] = desc->sptes[j];
desc->sptes[j] = NULL;
if (j != 0)
return;
if (!prev_desc && !desc->more)
*pte_list = (unsigned long)desc->sptes[0];
else
if (prev_desc)
prev_desc->more = desc->more;
else
*pte_list = (unsigned long)desc->more | 1;
mmu_free_pte_list_desc(desc);
}
static void pte_list_remove(u64 *spte, unsigned long *pte_list)
{
struct pte_list_desc *desc;
struct pte_list_desc *prev_desc;
int i;
if (!*pte_list) {
printk(KERN_ERR "pte_list_remove: %p 0->BUG\n", spte);
BUG();
} else if (!(*pte_list & 1)) {
rmap_printk("pte_list_remove: %p 1->0\n", spte);
if ((u64 *)*pte_list != spte) {
printk(KERN_ERR "pte_list_remove: %p 1->BUG\n", spte);
BUG();
}
*pte_list = 0;
} else {
rmap_printk("pte_list_remove: %p many->many\n", spte);
desc = (struct pte_list_desc *)(*pte_list & ~1ul);
prev_desc = NULL;
while (desc) {
for (i = 0; i < PTE_LIST_EXT && desc->sptes[i]; ++i)
if (desc->sptes[i] == spte) {
pte_list_desc_remove_entry(pte_list,
desc, i,
prev_desc);
return;
}
prev_desc = desc;
desc = desc->more;
}
pr_err("pte_list_remove: %p many->many\n", spte);
BUG();
}
}
typedef void (*pte_list_walk_fn) (u64 *spte);
static void pte_list_walk(unsigned long *pte_list, pte_list_walk_fn fn)
{
struct pte_list_desc *desc;
int i;
if (!*pte_list)
return;
if (!(*pte_list & 1))
return fn((u64 *)*pte_list);
desc = (struct pte_list_desc *)(*pte_list & ~1ul);
while (desc) {
for (i = 0; i < PTE_LIST_EXT && desc->sptes[i]; ++i)
fn(desc->sptes[i]);
desc = desc->more;
}
}
static unsigned long *__gfn_to_rmap(gfn_t gfn, int level,
struct kvm_memory_slot *slot)
{
struct kvm_lpage_info *linfo;
if (likely(level == PT_PAGE_TABLE_LEVEL))
return &slot->rmap[gfn - slot->base_gfn];
linfo = lpage_info_slot(gfn, slot, level);
return &linfo->rmap_pde;
}
/*
* Take gfn and return the reverse mapping to it.
*/
static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, int level)
{
struct kvm_memory_slot *slot;
slot = gfn_to_memslot(kvm, gfn);
return __gfn_to_rmap(gfn, level, slot);
}
static bool rmap_can_add(struct kvm_vcpu *vcpu)
{
struct kvm_mmu_memory_cache *cache;
cache = &vcpu->arch.mmu_pte_list_desc_cache;
return mmu_memory_cache_free_objects(cache);
}
static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
{
struct kvm_mmu_page *sp;
unsigned long *rmapp;
sp = page_header(__pa(spte));
kvm_mmu_page_set_gfn(sp, spte - sp->spt, gfn);
rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp->role.level);
return pte_list_add(vcpu, spte, rmapp);
}
static u64 *rmap_next(unsigned long *rmapp, u64 *spte)
{
return pte_list_next(rmapp, spte);
}
static void rmap_remove(struct kvm *kvm, u64 *spte)
{
struct kvm_mmu_page *sp;
gfn_t gfn;
unsigned long *rmapp;
sp = page_header(__pa(spte));
gfn = kvm_mmu_page_get_gfn(sp, spte - sp->spt);
rmapp = gfn_to_rmap(kvm, gfn, sp->role.level);
pte_list_remove(spte, rmapp);
}
static void drop_spte(struct kvm *kvm, u64 *sptep)
{
if (mmu_spte_clear_track_bits(sptep))
rmap_remove(kvm, sptep);
}
int kvm_mmu_rmap_write_protect(struct kvm *kvm, u64 gfn,
struct kvm_memory_slot *slot)
{
unsigned long *rmapp;
u64 *spte;
int i, write_protected = 0;
rmapp = __gfn_to_rmap(gfn, PT_PAGE_TABLE_LEVEL, slot);
spte = rmap_next(rmapp, NULL);
while (spte) {
BUG_ON(!(*spte & PT_PRESENT_MASK));
rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte);
if (is_writable_pte(*spte)) {
mmu_spte_update(spte, *spte & ~PT_WRITABLE_MASK);
write_protected = 1;
}
spte = rmap_next(rmapp, spte);
}
/* check for huge page mappings */
for (i = PT_DIRECTORY_LEVEL;
i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) {
rmapp = __gfn_to_rmap(gfn, i, slot);
spte = rmap_next(rmapp, NULL);
while (spte) {
BUG_ON(!(*spte & PT_PRESENT_MASK));
BUG_ON(!is_large_pte(*spte));
pgprintk("rmap_write_protect(large): spte %p %llx %lld\n", spte, *spte, gfn);
if (is_writable_pte(*spte)) {
drop_spte(kvm, spte);
--kvm->stat.lpages;
spte = NULL;
write_protected = 1;
}
spte = rmap_next(rmapp, spte);
}
}
return write_protected;
}
static int rmap_write_protect(struct kvm *kvm, u64 gfn)
{
struct kvm_memory_slot *slot;
slot = gfn_to_memslot(kvm, gfn);
return kvm_mmu_rmap_write_protect(kvm, gfn, slot);
}
static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp,
unsigned long data)
{
u64 *spte;
int need_tlb_flush = 0;
while ((spte = rmap_next(rmapp, NULL))) {
BUG_ON(!(*spte & PT_PRESENT_MASK));
rmap_printk("kvm_rmap_unmap_hva: spte %p %llx\n", spte, *spte);
drop_spte(kvm, spte);
need_tlb_flush = 1;
}
return need_tlb_flush;
}
static int kvm_set_pte_rmapp(struct kvm *kvm, unsigned long *rmapp,
unsigned long data)
{
int need_flush = 0;
u64 *spte, new_spte;
pte_t *ptep = (pte_t *)data;
pfn_t new_pfn;
WARN_ON(pte_huge(*ptep));
new_pfn = pte_pfn(*ptep);
spte = rmap_next(rmapp, NULL);
while (spte) {
BUG_ON(!is_shadow_present_pte(*spte));
rmap_printk("kvm_set_pte_rmapp: spte %p %llx\n", spte, *spte);
need_flush = 1;
if (pte_write(*ptep)) {
drop_spte(kvm, spte);
spte = rmap_next(rmapp, NULL);
} else {
new_spte = *spte &~ (PT64_BASE_ADDR_MASK);
new_spte |= (u64)new_pfn << PAGE_SHIFT;
new_spte &= ~PT_WRITABLE_MASK;
new_spte &= ~SPTE_HOST_WRITEABLE;
new_spte &= ~shadow_accessed_mask;
mmu_spte_clear_track_bits(spte);
mmu_spte_set(spte, new_spte);
spte = rmap_next(rmapp, spte);
}
}
if (need_flush)
kvm_flush_remote_tlbs(kvm);
return 0;
}
static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
unsigned long data,
int (*handler)(struct kvm *kvm, unsigned long *rmapp,
unsigned long data))
{
int j;
int ret;
int retval = 0;
struct kvm_memslots *slots;
struct kvm_memory_slot *memslot;
slots = kvm_memslots(kvm);
kvm_for_each_memslot(memslot, slots) {
unsigned long start = memslot->userspace_addr;
unsigned long end;
end = start + (memslot->npages << PAGE_SHIFT);
if (hva >= start && hva < end) {
gfn_t gfn_offset = (hva - start) >> PAGE_SHIFT;
gfn_t gfn = memslot->base_gfn + gfn_offset;
ret = handler(kvm, &memslot->rmap[gfn_offset], data);
for (j = 0; j < KVM_NR_PAGE_SIZES - 1; ++j) {
struct kvm_lpage_info *linfo;
linfo = lpage_info_slot(gfn, memslot,
PT_DIRECTORY_LEVEL + j);
ret |= handler(kvm, &linfo->rmap_pde, data);
}
trace_kvm_age_page(hva, memslot, ret);
retval |= ret;
}
}
return retval;
}
int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
{
return kvm_handle_hva(kvm, hva, 0, kvm_unmap_rmapp);
}
void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
{
kvm_handle_hva(kvm, hva, (unsigned long)&pte, kvm_set_pte_rmapp);
}
static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
unsigned long data)
{
u64 *spte;
int young = 0;
/*
* Emulate the accessed bit for EPT, by checking if this page has
* an EPT mapping, and clearing it if it does. On the next access,
* a new EPT mapping will be established.
* This has some overhead, but not as much as the cost of swapping
* out actively used pages or breaking up actively used hugepages.
*/
if (!shadow_accessed_mask)
return kvm_unmap_rmapp(kvm, rmapp, data);
spte = rmap_next(rmapp, NULL);
while (spte) {
int _young;
u64 _spte = *spte;
BUG_ON(!(_spte & PT_PRESENT_MASK));
_young = _spte & PT_ACCESSED_MASK;
if (_young) {
young = 1;
clear_bit(PT_ACCESSED_SHIFT, (unsigned long *)spte);
}
spte = rmap_next(rmapp, spte);
}
return young;
}
static int kvm_test_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
unsigned long data)
{
u64 *spte;
int young = 0;
/*
* If there's no access bit in the secondary pte set by the
* hardware it's up to gup-fast/gup to set the access bit in
* the primary pte or in the page structure.
*/
if (!shadow_accessed_mask)
goto out;
spte = rmap_next(rmapp, NULL);
while (spte) {
u64 _spte = *spte;
BUG_ON(!(_spte & PT_PRESENT_MASK));
young = _spte & PT_ACCESSED_MASK;
if (young) {
young = 1;
break;
}
spte = rmap_next(rmapp, spte);
}
out:
return young;
}
#define RMAP_RECYCLE_THRESHOLD 1000
static void rmap_recycle(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
{
unsigned long *rmapp;
struct kvm_mmu_page *sp;
sp = page_header(__pa(spte));
rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp->role.level);
kvm_unmap_rmapp(vcpu->kvm, rmapp, 0);
kvm_flush_remote_tlbs(vcpu->kvm);
}
int kvm_age_hva(struct kvm *kvm, unsigned long hva)
{
return kvm_handle_hva(kvm, hva, 0, kvm_age_rmapp);
}
int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
{
return kvm_handle_hva(kvm, hva, 0, kvm_test_age_rmapp);
}
#ifdef MMU_DEBUG
static int is_empty_shadow_page(u64 *spt)
{
u64 *pos;
u64 *end;
for (pos = spt, end = pos + PAGE_SIZE / sizeof(u64); pos != end; pos++)
if (is_shadow_present_pte(*pos)) {
printk(KERN_ERR "%s: %p %llx\n", __func__,
pos, *pos);
return 0;
}
return 1;
}
#endif
/*
* This value is the sum of all of the kvm instances's
* kvm->arch.n_used_mmu_pages values. We need a global,
* aggregate version in order to make the slab shrinker
* faster
*/
static inline void kvm_mod_used_mmu_pages(struct kvm *kvm, int nr)
{
kvm->arch.n_used_mmu_pages += nr;
percpu_counter_add(&kvm_total_used_mmu_pages, nr);
}
/*
* Remove the sp from shadow page cache, after call it,
* we can not find this sp from the cache, and the shadow
* page table is still valid.
* It should be under the protection of mmu lock.
*/
static void kvm_mmu_isolate_page(struct kvm_mmu_page *sp)
{
ASSERT(is_empty_shadow_page(sp->spt));
hlist_del(&sp->hash_link);
if (!sp->role.direct)
free_page((unsigned long)sp->gfns);
}
/*
* Free the shadow page table and the sp, we can do it
* out of the protection of mmu lock.
*/
static void kvm_mmu_free_page(struct kvm_mmu_page *sp)
{
list_del(&sp->link);
free_page((unsigned long)sp->spt);
kmem_cache_free(mmu_page_header_cache, sp);
}
static unsigned kvm_page_table_hashfn(gfn_t gfn)
{
return gfn & ((1 << KVM_MMU_HASH_SHIFT) - 1);
}
static void mmu_page_add_parent_pte(struct kvm_vcpu *vcpu,
struct kvm_mmu_page *sp, u64 *parent_pte)
{
if (!parent_pte)
return;
pte_list_add(vcpu, parent_pte, &sp->parent_ptes);
}
static void mmu_page_remove_parent_pte(struct kvm_mmu_page *sp,
u64 *parent_pte)
{
pte_list_remove(parent_pte, &sp->parent_ptes);
}
static void drop_parent_pte(struct kvm_mmu_page *sp,
u64 *parent_pte)
{
mmu_page_remove_parent_pte(sp, parent_pte);
mmu_spte_clear_no_track(parent_pte);
}
static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
u64 *parent_pte, int direct)
{
struct kvm_mmu_page *sp;
sp = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache,
sizeof *sp);
sp->spt = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE);
if (!direct)
sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache,
PAGE_SIZE);
set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages);
bitmap_zero(sp->slot_bitmap, KVM_MEM_SLOTS_NUM);
sp->parent_ptes = 0;
mmu_page_add_parent_pte(vcpu, sp, parent_pte);
kvm_mod_used_mmu_pages(vcpu->kvm, +1);
return sp;
}
static void mark_unsync(u64 *spte);
static void kvm_mmu_mark_parents_unsync(struct kvm_mmu_page *sp)
{
pte_list_walk(&sp->parent_ptes, mark_unsync);
}
static void mark_unsync(u64 *spte)
{
struct kvm_mmu_page *sp;
unsigned int index;
sp = page_header(__pa(spte));
index = spte - sp->spt;
if (__test_and_set_bit(index, sp->unsync_child_bitmap))
return;
if (sp->unsync_children++)
return;
kvm_mmu_mark_parents_unsync(sp);
}
static int nonpaging_sync_page(struct kvm_vcpu *vcpu,
struct kvm_mmu_page *sp)
{
return 1;
}
static void nonpaging_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
{
}
static void nonpaging_update_pte(struct kvm_vcpu *vcpu,
struct kvm_mmu_page *sp, u64 *spte,
const void *pte)
{
WARN_ON(1);
}
#define KVM_PAGE_ARRAY_NR 16
struct kvm_mmu_pages {
struct mmu_page_and_offset {
struct kvm_mmu_page *sp;
unsigned int idx;
} page[KVM_PAGE_ARRAY_NR];
unsigned int nr;
};
static int mmu_pages_add(struct kvm_mmu_pages *pvec, struct kvm_mmu_page *sp,
int idx)
{
int i;
if (sp->unsync)
for (i=0; i < pvec->nr; i++)
if (pvec->page[i].sp == sp)
return 0;
pvec->page[pvec->nr].sp = sp;
pvec->page[pvec->nr].idx = idx;
pvec->nr++;
return (pvec->nr == KVM_PAGE_ARRAY_NR);
}
static int __mmu_unsync_walk(struct kvm_mmu_page *sp,
struct kvm_mmu_pages *pvec)
{
int i, ret, nr_unsync_leaf = 0;
for_each_set_bit(i, sp->unsync_child_bitmap, 512) {
struct kvm_mmu_page *child;
u64 ent = sp->spt[i];
if (!is_shadow_present_pte(ent) || is_large_pte(ent))
goto clear_child_bitmap;
child = page_header(ent & PT64_BASE_ADDR_MASK);
if (child->unsync_children) {
if (mmu_pages_add(pvec, child, i))
return -ENOSPC;
ret = __mmu_unsync_walk(child, pvec);
if (!ret)
goto clear_child_bitmap;
else if (ret > 0)
nr_unsync_leaf += ret;
else
return ret;
} else if (child->unsync) {
nr_unsync_leaf++;
if (mmu_pages_add(pvec, child, i))
return -ENOSPC;
} else
goto clear_child_bitmap;
continue;
clear_child_bitmap:
__clear_bit(i, sp->unsync_child_bitmap);
sp->unsync_children--;
WARN_ON((int)sp->unsync_children < 0);
}
return nr_unsync_leaf;
}
static int mmu_unsync_walk(struct kvm_mmu_page *sp,
struct kvm_mmu_pages *pvec)
{
if (!sp->unsync_children)
return 0;
mmu_pages_add(pvec, sp, 0);
return __mmu_unsync_walk(sp, pvec);
}
static void kvm_unlink_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp)
{
WARN_ON(!sp->unsync);
trace_kvm_mmu_sync_page(sp);
sp->unsync = 0;
--kvm->stat.mmu_unsync;
}
static int kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
struct list_head *invalid_list);
static void kvm_mmu_commit_zap_page(struct kvm *kvm,
struct list_head *invalid_list);
#define for_each_gfn_sp(kvm, sp, gfn, pos) \
hlist_for_each_entry(sp, pos, \
&(kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)], hash_link) \
if ((sp)->gfn != (gfn)) {} else
#define for_each_gfn_indirect_valid_sp(kvm, sp, gfn, pos) \
hlist_for_each_entry(sp, pos, \
&(kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)], hash_link) \
if ((sp)->gfn != (gfn) || (sp)->role.direct || \
(sp)->role.invalid) {} else
/* @sp->gfn should be write-protected at the call site */
static int __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
struct list_head *invalid_list, bool clear_unsync)
{
if (sp->role.cr4_pae != !!is_pae(vcpu)) {
kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list);
return 1;
}
if (clear_unsync)
kvm_unlink_unsync_page(vcpu->kvm, sp);
if (vcpu->arch.mmu.sync_page(vcpu, sp)) {
kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list);
return 1;
}
kvm_mmu_flush_tlb(vcpu);
return 0;
}
static int kvm_sync_page_transient(struct kvm_vcpu *vcpu,
struct kvm_mmu_page *sp)
{
LIST_HEAD(invalid_list);
int ret;
ret = __kvm_sync_page(vcpu, sp, &invalid_list, false);
if (ret)
kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
return ret;
}
#ifdef CONFIG_KVM_MMU_AUDIT
#include "mmu_audit.c"
#else
static void kvm_mmu_audit(struct kvm_vcpu *vcpu, int point) { }
static void mmu_audit_disable(void) { }
#endif
static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
struct list_head *invalid_list)
{
return __kvm_sync_page(vcpu, sp, invalid_list, true);
}
/* @gfn should be write-protected at the call site */
static void kvm_sync_pages(struct kvm_vcpu *vcpu, gfn_t gfn)
{
struct kvm_mmu_page *s;
struct hlist_node *node;
LIST_HEAD(invalid_list);
bool flush = false;
for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn, node) {
if (!s->unsync)
continue;
WARN_ON(s->role.level != PT_PAGE_TABLE_LEVEL);
kvm_unlink_unsync_page(vcpu->kvm, s);
if ((s->role.cr4_pae != !!is_pae(vcpu)) ||
(vcpu->arch.mmu.sync_page(vcpu, s))) {
kvm_mmu_prepare_zap_page(vcpu->kvm, s, &invalid_list);
continue;
}
flush = true;
}
kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
if (flush)
kvm_mmu_flush_tlb(vcpu);
}
struct mmu_page_path {
struct kvm_mmu_page *parent[PT64_ROOT_LEVEL-1];
unsigned int idx[PT64_ROOT_LEVEL-1];
};
#define for_each_sp(pvec, sp, parents, i) \
for (i = mmu_pages_next(&pvec, &parents, -1), \
sp = pvec.page[i].sp; \
i < pvec.nr && ({ sp = pvec.page[i].sp; 1;}); \
i = mmu_pages_next(&pvec, &parents, i))
static int mmu_pages_next(struct kvm_mmu_pages *pvec,
struct mmu_page_path *parents,
int i)
{
int n;
for (n = i+1; n < pvec->nr; n++) {
struct kvm_mmu_page *sp = pvec->page[n].sp;
if (sp->role.level == PT_PAGE_TABLE_LEVEL) {
parents->idx[0] = pvec->page[n].idx;
return n;
}
parents->parent[sp->role.level-2] = sp;
parents->idx[sp->role.level-1] = pvec->page[n].idx;
}
return n;
}
static void mmu_pages_clear_parents(struct mmu_page_path *parents)
{
struct kvm_mmu_page *sp;
unsigned int level = 0;
do {
unsigned int idx = parents->idx[level];
sp = parents->parent[level];
if (!sp)
return;
--sp->unsync_children;
WARN_ON((int)sp->unsync_children < 0);
__clear_bit(idx, sp->unsync_child_bitmap);
level++;
} while (level < PT64_ROOT_LEVEL-1 && !sp->unsync_children);
}
static void kvm_mmu_pages_init(struct kvm_mmu_page *parent,
struct mmu_page_path *parents,
struct kvm_mmu_pages *pvec)
{
parents->parent[parent->role.level-1] = NULL;
pvec->nr = 0;
}
static void mmu_sync_children(struct kvm_vcpu *vcpu,
struct kvm_mmu_page *parent)
{
int i;
struct kvm_mmu_page *sp;
struct mmu_page_path parents;
struct kvm_mmu_pages pages;
LIST_HEAD(invalid_list);
kvm_mmu_pages_init(parent, &parents, &pages);
while (mmu_unsync_walk(parent, &pages)) {
int protected = 0;
for_each_sp(pages, sp, parents, i)
protected |= rmap_write_protect(vcpu->kvm, sp->gfn);
if (protected)
kvm_flush_remote_tlbs(vcpu->kvm);
for_each_sp(pages, sp, parents, i) {
kvm_sync_page(vcpu, sp, &invalid_list);
mmu_pages_clear_parents(&parents);
}
kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
cond_resched_lock(&vcpu->kvm->mmu_lock);
kvm_mmu_pages_init(parent, &parents, &pages);
}
}
static void init_shadow_page_table(struct kvm_mmu_page *sp)
{
int i;
for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
sp->spt[i] = 0ull;
}
static void __clear_sp_write_flooding_count(struct kvm_mmu_page *sp)
{
sp->write_flooding_count = 0;
}
static void clear_sp_write_flooding_count(u64 *spte)
{
struct kvm_mmu_page *sp = page_header(__pa(spte));
__clear_sp_write_flooding_count(sp);
}
static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
gfn_t gfn,
gva_t gaddr,
unsigned level,
int direct,
unsigned access,
u64 *parent_pte)
{
union kvm_mmu_page_role role;
unsigned quadrant;
struct kvm_mmu_page *sp;
struct hlist_node *node;
bool need_sync = false;
role = vcpu->arch.mmu.base_role;
role.level = level;
role.direct = direct;
if (role.direct)
role.cr4_pae = 0;
role.access = access;
if (!vcpu->arch.mmu.direct_map
&& vcpu->arch.mmu.root_level <= PT32_ROOT_LEVEL) {
quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level));
quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
role.quadrant = quadrant;
}
for_each_gfn_sp(vcpu->kvm, sp, gfn, node) {
if (!need_sync && sp->unsync)
need_sync = true;
if (sp->role.word != role.word)
continue;
if (sp->unsync && kvm_sync_page_transient(vcpu, sp))
break;
mmu_page_add_parent_pte(vcpu, sp, parent_pte);
if (sp->unsync_children) {
kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
kvm_mmu_mark_parents_unsync(sp);
} else if (sp->unsync)
kvm_mmu_mark_parents_unsync(sp);
__clear_sp_write_flooding_count(sp);
trace_kvm_mmu_get_page(sp, false);
return sp;
}
++vcpu->kvm->stat.mmu_cache_miss;
sp = kvm_mmu_alloc_page(vcpu, parent_pte, direct);
if (!sp)
return sp;
sp->gfn = gfn;
sp->role = role;
hlist_add_head(&sp->hash_link,
&vcpu->kvm->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)]);
if (!direct) {
if (rmap_write_protect(vcpu->kvm, gfn))
kvm_flush_remote_tlbs(vcpu->kvm);
if (level > PT_PAGE_TABLE_LEVEL && need_sync)
kvm_sync_pages(vcpu, gfn);
account_shadowed(vcpu->kvm, gfn);
}
init_shadow_page_table(sp);
trace_kvm_mmu_get_page(sp, true);
return sp;
}
static void shadow_walk_init(struct kvm_shadow_walk_iterator *iterator,
struct kvm_vcpu *vcpu, u64 addr)
{
iterator->addr = addr;
iterator->shadow_addr = vcpu->arch.mmu.root_hpa;
iterator->level = vcpu->arch.mmu.shadow_root_level;
if (iterator->level == PT64_ROOT_LEVEL &&
vcpu->arch.mmu.root_level < PT64_ROOT_LEVEL &&
!vcpu->arch.mmu.direct_map)
--iterator->level;
if (iterator->level == PT32E_ROOT_LEVEL) {
iterator->shadow_addr
= vcpu->arch.mmu.pae_root[(addr >> 30) & 3];
iterator->shadow_addr &= PT64_BASE_ADDR_MASK;
--iterator->level;
if (!iterator->shadow_addr)
iterator->level = 0;
}
}
static bool shadow_walk_okay(struct kvm_shadow_walk_iterator *iterator)
{
if (iterator->level < PT_PAGE_TABLE_LEVEL)
return false;
iterator->index = SHADOW_PT_INDEX(iterator->addr, iterator->level);
iterator->sptep = ((u64 *)__va(iterator->shadow_addr)) + iterator->index;
return true;
}
static void __shadow_walk_next(struct kvm_shadow_walk_iterator *iterator,
u64 spte)
{
if (is_last_spte(spte, iterator->level)) {
iterator->level = 0;
return;
}
iterator->shadow_addr = spte & PT64_BASE_ADDR_MASK;
--iterator->level;
}
static void shadow_walk_next(struct kvm_shadow_walk_iterator *iterator)
{
return __shadow_walk_next(iterator, *iterator->sptep);
}
static void link_shadow_page(u64 *sptep, struct kvm_mmu_page *sp)
{
u64 spte;
spte = __pa(sp->spt)
| PT_PRESENT_MASK | PT_ACCESSED_MASK
| PT_WRITABLE_MASK | PT_USER_MASK;
mmu_spte_set(sptep, spte);
}
static void drop_large_spte(struct kvm_vcpu *vcpu, u64 *sptep)
{
if (is_large_pte(*sptep)) {
drop_spte(vcpu->kvm, sptep);
--vcpu->kvm->stat.lpages;
kvm_flush_remote_tlbs(vcpu->kvm);
}
}
static void validate_direct_spte(struct kvm_vcpu *vcpu, u64 *sptep,
unsigned direct_access)
{
if (is_shadow_present_pte(*sptep) && !is_large_pte(*sptep)) {
struct kvm_mmu_page *child;
/*
* For the direct sp, if the guest pte's dirty bit
* changed form clean to dirty, it will corrupt the
* sp's access: allow writable in the read-only sp,
* so we should update the spte at this point to get
* a new sp with the correct access.
*/
child = page_header(*sptep & PT64_BASE_ADDR_MASK);
if (child->role.access == direct_access)
return;
drop_parent_pte(child, sptep);
kvm_flush_remote_tlbs(vcpu->kvm);
}
}
static bool mmu_page_zap_pte(struct kvm *kvm, struct kvm_mmu_page *sp,
u64 *spte)
{
u64 pte;
struct kvm_mmu_page *child;
pte = *spte;
if (is_shadow_present_pte(pte)) {
if (is_last_spte(pte, sp->role.level)) {
drop_spte(kvm, spte);
if (is_large_pte(pte))
--kvm->stat.lpages;
} else {
child = page_header(pte & PT64_BASE_ADDR_MASK);
drop_parent_pte(child, spte);
}
return true;
}
if (is_mmio_spte(pte))
mmu_spte_clear_no_track(spte);
return false;
}
static void kvm_mmu_page_unlink_children(struct kvm *kvm,
struct kvm_mmu_page *sp)
{
unsigned i;
for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
mmu_page_zap_pte(kvm, sp, sp->spt + i);
}
static void kvm_mmu_put_page(struct kvm_mmu_page *sp, u64 *parent_pte)
{
mmu_page_remove_parent_pte(sp, parent_pte);
}
static void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp)
{
u64 *parent_pte;
while ((parent_pte = pte_list_next(&sp->parent_ptes, NULL)))
drop_parent_pte(sp, parent_pte);
}
static int mmu_zap_unsync_children(struct kvm *kvm,
struct kvm_mmu_page *parent,
struct list_head *invalid_list)
{
int i, zapped = 0;
struct mmu_page_path parents;
struct kvm_mmu_pages pages;
if (parent->role.level == PT_PAGE_TABLE_LEVEL)
return 0;
kvm_mmu_pages_init(parent, &parents, &pages);
while (mmu_unsync_walk(parent, &pages)) {
struct kvm_mmu_page *sp;
for_each_sp(pages, sp, parents, i) {
kvm_mmu_prepare_zap_page(kvm, sp, invalid_list);
mmu_pages_clear_parents(&parents);
zapped++;
}
kvm_mmu_pages_init(parent, &parents, &pages);
}
return zapped;
}
static int kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
struct list_head *invalid_list)
{
int ret;
trace_kvm_mmu_prepare_zap_page(sp);
++kvm->stat.mmu_shadow_zapped;
ret = mmu_zap_unsync_children(kvm, sp, invalid_list);
kvm_mmu_page_unlink_children(kvm, sp);
kvm_mmu_unlink_parents(kvm, sp);
if (!sp->role.invalid && !sp->role.direct)
unaccount_shadowed(kvm, sp->gfn);
if (sp->unsync)
kvm_unlink_unsync_page(kvm, sp);
if (!sp->root_count) {
/* Count self */
ret++;
list_move(&sp->link, invalid_list);
kvm_mod_used_mmu_pages(kvm, -1);
} else {
list_move(&sp->link, &kvm->arch.active_mmu_pages);
kvm_reload_remote_mmus(kvm);
}
sp->role.invalid = 1;
return ret;
}
static void kvm_mmu_isolate_pages(struct list_head *invalid_list)
{
struct kvm_mmu_page *sp;
list_for_each_entry(sp, invalid_list, link)
kvm_mmu_isolate_page(sp);
}
static void free_pages_rcu(struct rcu_head *head)
{
struct kvm_mmu_page *next, *sp;
sp = container_of(head, struct kvm_mmu_page, rcu);
while (sp) {
if (!list_empty(&sp->link))
next = list_first_entry(&sp->link,
struct kvm_mmu_page, link);
else
next = NULL;
kvm_mmu_free_page(sp);
sp = next;
}
}
static void kvm_mmu_commit_zap_page(struct kvm *kvm,
struct list_head *invalid_list)
{
struct kvm_mmu_page *sp;
if (list_empty(invalid_list))
return;
kvm_flush_remote_tlbs(kvm);
if (atomic_read(&kvm->arch.reader_counter)) {
kvm_mmu_isolate_pages(invalid_list);
sp = list_first_entry(invalid_list, struct kvm_mmu_page, link);
list_del_init(invalid_list);
trace_kvm_mmu_delay_free_pages(sp);
call_rcu(&sp->rcu, free_pages_rcu);
return;
}
do {
sp = list_first_entry(invalid_list, struct kvm_mmu_page, link);
WARN_ON(!sp->role.invalid || sp->root_count);
kvm_mmu_isolate_page(sp);
kvm_mmu_free_page(sp);
} while (!list_empty(invalid_list));
}
/*
* Changing the number of mmu pages allocated to the vm
* Note: if goal_nr_mmu_pages is too small, you will get dead lock
*/
void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int goal_nr_mmu_pages)
{
LIST_HEAD(invalid_list);
/*
* If we set the number of mmu pages to be smaller be than the
* number of actived pages , we must to free some mmu pages before we
* change the value
*/
if (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages) {
while (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages &&
!list_empty(&kvm->arch.active_mmu_pages)) {
struct kvm_mmu_page *page;
page = container_of(kvm->arch.active_mmu_pages.prev,
struct kvm_mmu_page, link);
kvm_mmu_prepare_zap_page(kvm, page, &invalid_list);
}
kvm_mmu_commit_zap_page(kvm, &invalid_list);
goal_nr_mmu_pages = kvm->arch.n_used_mmu_pages;
}
kvm->arch.n_max_mmu_pages = goal_nr_mmu_pages;
}
int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
{
struct kvm_mmu_page *sp;
struct hlist_node *node;
LIST_HEAD(invalid_list);
int r;
pgprintk("%s: looking for gfn %llx\n", __func__, gfn);
r = 0;
spin_lock(&kvm->mmu_lock);
for_each_gfn_indirect_valid_sp(kvm, sp, gfn, node) {
pgprintk("%s: gfn %llx role %x\n", __func__, gfn,
sp->role.word);
r = 1;
kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
}
kvm_mmu_commit_zap_page(kvm, &invalid_list);
spin_unlock(&kvm->mmu_lock);
return r;
}
EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page);
static void page_header_update_slot(struct kvm *kvm, void *pte, gfn_t gfn)
{
int slot = memslot_id(kvm, gfn);
struct kvm_mmu_page *sp = page_header(__pa(pte));
__set_bit(slot, sp->slot_bitmap);
}
/*
* The function is based on mtrr_type_lookup() in
* arch/x86/kernel/cpu/mtrr/generic.c
*/
static int get_mtrr_type(struct mtrr_state_type *mtrr_state,
u64 start, u64 end)
{
int i;
u64 base, mask;
u8 prev_match, curr_match;
int num_var_ranges = KVM_NR_VAR_MTRR;
if (!mtrr_state->enabled)
return 0xFF;
/* Make end inclusive end, instead of exclusive */
end--;
/* Look in fixed ranges. Just return the type as per start */
if (mtrr_state->have_fixed && (start < 0x100000)) {
int idx;
if (start < 0x80000) {
idx = 0;
idx += (start >> 16);
return mtrr_state->fixed_ranges[idx];
} else if (start < 0xC0000) {
idx = 1 * 8;
idx += ((start - 0x80000) >> 14);
return mtrr_state->fixed_ranges[idx];
} else if (start < 0x1000000) {
idx = 3 * 8;
idx += ((start - 0xC0000) >> 12);
return mtrr_state->fixed_ranges[idx];
}
}
/*
* Look in variable ranges
* Look of multiple ranges matching this address and pick type
* as per MTRR precedence
*/
if (!(mtrr_state->enabled & 2))
return mtrr_state->def_type;
prev_match = 0xFF;
for (i = 0; i < num_var_ranges; ++i) {
unsigned short start_state, end_state;
if (!(mtrr_state->var_ranges[i].mask_lo & (1 << 11)))
continue;
base = (((u64)mtrr_state->var_ranges[i].base_hi) << 32) +
(mtrr_state->var_ranges[i].base_lo & PAGE_MASK);
mask = (((u64)mtrr_state->var_ranges[i].mask_hi) << 32) +
(mtrr_state->var_ranges[i].mask_lo & PAGE_MASK);
start_state = ((start & mask) == (base & mask));
end_state = ((end & mask) == (base & mask));
if (start_state != end_state)
return 0xFE;
if ((start & mask) != (base & mask))
continue;
curr_match = mtrr_state->var_ranges[i].base_lo & 0xff;
if (prev_match == 0xFF) {
prev_match = curr_match;
continue;
}
if (prev_match == MTRR_TYPE_UNCACHABLE ||
curr_match == MTRR_TYPE_UNCACHABLE)
return MTRR_TYPE_UNCACHABLE;
if ((prev_match == MTRR_TYPE_WRBACK &&
curr_match == MTRR_TYPE_WRTHROUGH) ||
(prev_match == MTRR_TYPE_WRTHROUGH &&
curr_match == MTRR_TYPE_WRBACK)) {
prev_match = MTRR_TYPE_WRTHROUGH;
curr_match = MTRR_TYPE_WRTHROUGH;
}
if (prev_match != curr_match)
return MTRR_TYPE_UNCACHABLE;
}
if (prev_match != 0xFF)
return prev_match;
return mtrr_state->def_type;
}
u8 kvm_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn)
{
u8 mtrr;
mtrr = get_mtrr_type(&vcpu->arch.mtrr_state, gfn << PAGE_SHIFT,
(gfn << PAGE_SHIFT) + PAGE_SIZE);
if (mtrr == 0xfe || mtrr == 0xff)
mtrr = MTRR_TYPE_WRBACK;
return mtrr;
}
EXPORT_SYMBOL_GPL(kvm_get_guest_memory_type);
static void __kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
{
trace_kvm_mmu_unsync_page(sp);
++vcpu->kvm->stat.mmu_unsync;
sp->unsync = 1;
kvm_mmu_mark_parents_unsync(sp);
}
static void kvm_unsync_pages(struct kvm_vcpu *vcpu, gfn_t gfn)
{
struct kvm_mmu_page *s;
struct hlist_node *node;
for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn, node) {
if (s->unsync)
continue;
WARN_ON(s->role.level != PT_PAGE_TABLE_LEVEL);
__kvm_unsync_page(vcpu, s);
}
}
static int mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
bool can_unsync)
{
struct kvm_mmu_page *s;
struct hlist_node *node;
bool need_unsync = false;
for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn, node) {
if (!can_unsync)
return 1;
if (s->role.level != PT_PAGE_TABLE_LEVEL)
return 1;
if (!need_unsync && !s->unsync) {
need_unsync = true;
}
}
if (need_unsync)
kvm_unsync_pages(vcpu, gfn);
return 0;
}
static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
unsigned pte_access, int user_fault,
int write_fault, int level,
gfn_t gfn, pfn_t pfn, bool speculative,
bool can_unsync, bool host_writable)
{
u64 spte, entry = *sptep;
int ret = 0;
if (set_mmio_spte(sptep, gfn, pfn, pte_access))
return 0;
spte = PT_PRESENT_MASK;
if (!speculative)
spte |= shadow_accessed_mask;
if (pte_access & ACC_EXEC_MASK)
spte |= shadow_x_mask;
else
spte |= shadow_nx_mask;
if (pte_access & ACC_USER_MASK)
spte |= shadow_user_mask;
if (level > PT_PAGE_TABLE_LEVEL)
spte |= PT_PAGE_SIZE_MASK;
if (tdp_enabled)
spte |= kvm_x86_ops->get_mt_mask(vcpu, gfn,
kvm_is_mmio_pfn(pfn));
if (host_writable)
spte |= SPTE_HOST_WRITEABLE;
else
pte_access &= ~ACC_WRITE_MASK;
spte |= (u64)pfn << PAGE_SHIFT;
if ((pte_access & ACC_WRITE_MASK)
|| (!vcpu->arch.mmu.direct_map && write_fault
&& !is_write_protection(vcpu) && !user_fault)) {
if (level > PT_PAGE_TABLE_LEVEL &&
has_wrprotected_page(vcpu->kvm, gfn, level)) {
ret = 1;
drop_spte(vcpu->kvm, sptep);
goto done;
}
spte |= PT_WRITABLE_MASK;
if (!vcpu->arch.mmu.direct_map
&& !(pte_access & ACC_WRITE_MASK)) {
spte &= ~PT_USER_MASK;
/*
* If we converted a user page to a kernel page,
* so that the kernel can write to it when cr0.wp=0,
* then we should prevent the kernel from executing it
* if SMEP is enabled.
*/
if (kvm_read_cr4_bits(vcpu, X86_CR4_SMEP))
spte |= PT64_NX_MASK;
}
/*
* Optimization: for pte sync, if spte was writable the hash
* lookup is unnecessary (and expensive). Write protection
* is responsibility of mmu_get_page / kvm_sync_page.
* Same reasoning can be applied to dirty page accounting.
*/
if (!can_unsync && is_writable_pte(*sptep))
goto set_pte;
if (mmu_need_write_protect(vcpu, gfn, can_unsync)) {
pgprintk("%s: found shadow page for %llx, marking ro\n",
__func__, gfn);
ret = 1;
pte_access &= ~ACC_WRITE_MASK;
if (is_writable_pte(spte))
spte &= ~PT_WRITABLE_MASK;
}
}
if (pte_access & ACC_WRITE_MASK)
mark_page_dirty(vcpu->kvm, gfn);
set_pte:
mmu_spte_update(sptep, spte);
/*
* If we overwrite a writable spte with a read-only one we
* should flush remote TLBs. Otherwise rmap_write_protect
* will find a read-only spte, even though the writable spte
* might be cached on a CPU's TLB.
*/
if (is_writable_pte(entry) && !is_writable_pte(*sptep))
kvm_flush_remote_tlbs(vcpu->kvm);
done:
return ret;
}
static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
unsigned pt_access, unsigned pte_access,
int user_fault, int write_fault,
int *emulate, int level, gfn_t gfn,
pfn_t pfn, bool speculative,
bool host_writable)
{
int was_rmapped = 0;
int rmap_count;
pgprintk("%s: spte %llx access %x write_fault %d"
" user_fault %d gfn %llx\n",
__func__, *sptep, pt_access,
write_fault, user_fault, gfn);
if (is_rmap_spte(*sptep)) {
/*
* If we overwrite a PTE page pointer with a 2MB PMD, unlink
* the parent of the now unreachable PTE.
*/
if (level > PT_PAGE_TABLE_LEVEL &&
!is_large_pte(*sptep)) {
struct kvm_mmu_page *child;
u64 pte = *sptep;
child = page_header(pte & PT64_BASE_ADDR_MASK);
drop_parent_pte(child, sptep);
kvm_flush_remote_tlbs(vcpu->kvm);
} else if (pfn != spte_to_pfn(*sptep)) {
pgprintk("hfn old %llx new %llx\n",
spte_to_pfn(*sptep), pfn);
drop_spte(vcpu->kvm, sptep);
kvm_flush_remote_tlbs(vcpu->kvm);
} else
was_rmapped = 1;
}
if (set_spte(vcpu, sptep, pte_access, user_fault, write_fault,
level, gfn, pfn, speculative, true,
host_writable)) {
if (write_fault)
*emulate = 1;
kvm_mmu_flush_tlb(vcpu);
}
if (unlikely(is_mmio_spte(*sptep) && emulate))
*emulate = 1;
pgprintk("%s: setting spte %llx\n", __func__, *sptep);
pgprintk("instantiating %s PTE (%s) at %llx (%llx) addr %p\n",
is_large_pte(*sptep)? "2MB" : "4kB",
*sptep & PT_PRESENT_MASK ?"RW":"R", gfn,
*sptep, sptep);
if (!was_rmapped && is_large_pte(*sptep))
++vcpu->kvm->stat.lpages;
if (is_shadow_present_pte(*sptep)) {
page_header_update_slot(vcpu->kvm, sptep, gfn);
if (!was_rmapped) {
rmap_count = rmap_add(vcpu, sptep, gfn);
if (rmap_count > RMAP_RECYCLE_THRESHOLD)
rmap_recycle(vcpu, sptep, gfn);
}
}
kvm_release_pfn_clean(pfn);
}
static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
{
}
static pfn_t pte_prefetch_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn,
bool no_dirty_log)
{
struct kvm_memory_slot *slot;
unsigned long hva;
slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, no_dirty_log);
if (!slot) {
get_page(fault_page);
return page_to_pfn(fault_page);
}
hva = gfn_to_hva_memslot(slot, gfn);
return hva_to_pfn_atomic(vcpu->kvm, hva);
}
static int direct_pte_prefetch_many(struct kvm_vcpu *vcpu,
struct kvm_mmu_page *sp,
u64 *start, u64 *end)
{
struct page *pages[PTE_PREFETCH_NUM];
unsigned access = sp->role.access;
int i, ret;
gfn_t gfn;
gfn = kvm_mmu_page_get_gfn(sp, start - sp->spt);
if (!gfn_to_memslot_dirty_bitmap(vcpu, gfn, access & ACC_WRITE_MASK))
return -1;
ret = gfn_to_page_many_atomic(vcpu->kvm, gfn, pages, end - start);
if (ret <= 0)
return -1;
for (i = 0; i < ret; i++, gfn++, start++)
mmu_set_spte(vcpu, start, ACC_ALL,
access, 0, 0, NULL,
sp->role.level, gfn,
page_to_pfn(pages[i]), true, true);
return 0;
}
static void __direct_pte_prefetch(struct kvm_vcpu *vcpu,
struct kvm_mmu_page *sp, u64 *sptep)
{
u64 *spte, *start = NULL;
int i;
WARN_ON(!sp->role.direct);
i = (sptep - sp->spt) & ~(PTE_PREFETCH_NUM - 1);
spte = sp->spt + i;
for (i = 0; i < PTE_PREFETCH_NUM; i++, spte++) {
if (is_shadow_present_pte(*spte) || spte == sptep) {
if (!start)
continue;
if (direct_pte_prefetch_many(vcpu, sp, start, spte) < 0)
break;
start = NULL;
} else if (!start)
start = spte;
}
}
static void direct_pte_prefetch(struct kvm_vcpu *vcpu, u64 *sptep)
{
struct kvm_mmu_page *sp;
/*
* Since it's no accessed bit on EPT, it's no way to
* distinguish between actually accessed translations
* and prefetched, so disable pte prefetch if EPT is
* enabled.
*/
if (!shadow_accessed_mask)
return;
sp = page_header(__pa(sptep));
if (sp->role.level > PT_PAGE_TABLE_LEVEL)
return;
__direct_pte_prefetch(vcpu, sp, sptep);
}
static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
int map_writable, int level, gfn_t gfn, pfn_t pfn,
bool prefault)
{
struct kvm_shadow_walk_iterator iterator;
struct kvm_mmu_page *sp;
int emulate = 0;
gfn_t pseudo_gfn;
for_each_shadow_entry(vcpu, (u64)gfn << PAGE_SHIFT, iterator) {
if (iterator.level == level) {
unsigned pte_access = ACC_ALL;
mmu_set_spte(vcpu, iterator.sptep, ACC_ALL, pte_access,
0, write, &emulate,
level, gfn, pfn, prefault, map_writable);
direct_pte_prefetch(vcpu, iterator.sptep);
++vcpu->stat.pf_fixed;
break;
}
if (!is_shadow_present_pte(*iterator.sptep)) {
u64 base_addr = iterator.addr;
base_addr &= PT64_LVL_ADDR_MASK(iterator.level);
pseudo_gfn = base_addr >> PAGE_SHIFT;
sp = kvm_mmu_get_page(vcpu, pseudo_gfn, iterator.addr,
iterator.level - 1,
1, ACC_ALL, iterator.sptep);
if (!sp) {
pgprintk("nonpaging_map: ENOMEM\n");
kvm_release_pfn_clean(pfn);
return -ENOMEM;
}
mmu_spte_set(iterator.sptep,
__pa(sp->spt)
| PT_PRESENT_MASK | PT_WRITABLE_MASK
| shadow_user_mask | shadow_x_mask
| shadow_accessed_mask);
}
}
return emulate;
}
static void kvm_send_hwpoison_signal(unsigned long address, struct task_struct *tsk)
{
siginfo_t info;
info.si_signo = SIGBUS;
info.si_errno = 0;
info.si_code = BUS_MCEERR_AR;
info.si_addr = (void __user *)address;
info.si_addr_lsb = PAGE_SHIFT;
send_sig_info(SIGBUS, &info, tsk);
}
static int kvm_handle_bad_page(struct kvm_vcpu *vcpu, gfn_t gfn, pfn_t pfn)
{
kvm_release_pfn_clean(pfn);
if (is_hwpoison_pfn(pfn)) {
kvm_send_hwpoison_signal(gfn_to_hva(vcpu->kvm, gfn), current);
return 0;
}
return -EFAULT;
}
static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu,
gfn_t *gfnp, pfn_t *pfnp, int *levelp)
{
pfn_t pfn = *pfnp;
gfn_t gfn = *gfnp;
int level = *levelp;
/*
* Check if it's a transparent hugepage. If this would be an
* hugetlbfs page, level wouldn't be set to
* PT_PAGE_TABLE_LEVEL and there would be no adjustment done
* here.
*/
if (!is_error_pfn(pfn) && !kvm_is_mmio_pfn(pfn) &&
level == PT_PAGE_TABLE_LEVEL &&
PageTransCompound(pfn_to_page(pfn)) &&
!has_wrprotected_page(vcpu->kvm, gfn, PT_DIRECTORY_LEVEL)) {
unsigned long mask;
/*
* mmu_notifier_retry was successful and we hold the
* mmu_lock here, so the pmd can't become splitting
* from under us, and in turn
* __split_huge_page_refcount() can't run from under
* us and we can safely transfer the refcount from
* PG_tail to PG_head as we switch the pfn to tail to
* head.
*/
*levelp = level = PT_DIRECTORY_LEVEL;
mask = KVM_PAGES_PER_HPAGE(level) - 1;
VM_BUG_ON((gfn & mask) != (pfn & mask));
if (pfn & mask) {
gfn &= ~mask;
*gfnp = gfn;
kvm_release_pfn_clean(pfn);
pfn &= ~mask;
if (!get_page_unless_zero(pfn_to_page(pfn)))
BUG();
*pfnp = pfn;
}
}
}
static bool mmu_invalid_pfn(pfn_t pfn)
{
return unlikely(is_invalid_pfn(pfn));
}
static bool handle_abnormal_pfn(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn,
pfn_t pfn, unsigned access, int *ret_val)
{
bool ret = true;
/* The pfn is invalid, report the error! */
if (unlikely(is_invalid_pfn(pfn))) {
*ret_val = kvm_handle_bad_page(vcpu, gfn, pfn);
goto exit;
}
if (unlikely(is_noslot_pfn(pfn)))
vcpu_cache_mmio_info(vcpu, gva, gfn, access);
ret = false;
exit:
return ret;
}
static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
gva_t gva, pfn_t *pfn, bool write, bool *writable);
static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn,
bool prefault)
{
int r;
int level;
int force_pt_level;
pfn_t pfn;
unsigned long mmu_seq;
bool map_writable;
force_pt_level = mapping_level_dirty_bitmap(vcpu, gfn);
if (likely(!force_pt_level)) {
level = mapping_level(vcpu, gfn);
/*
* This path builds a PAE pagetable - so we can map
* 2mb pages at maximum. Therefore check if the level
* is larger than that.
*/
if (level > PT_DIRECTORY_LEVEL)
level = PT_DIRECTORY_LEVEL;
gfn &= ~(KVM_PAGES_PER_HPAGE(level) - 1);
} else
level = PT_PAGE_TABLE_LEVEL;
mmu_seq = vcpu->kvm->mmu_notifier_seq;
smp_rmb();
if (try_async_pf(vcpu, prefault, gfn, v, &pfn, write, &map_writable))
return 0;
if (handle_abnormal_pfn(vcpu, v, gfn, pfn, ACC_ALL, &r))
return r;
spin_lock(&vcpu->kvm->mmu_lock);
if (mmu_notifier_retry(vcpu, mmu_seq))
goto out_unlock;
kvm_mmu_free_some_pages(vcpu);
if (likely(!force_pt_level))
transparent_hugepage_adjust(vcpu, &gfn, &pfn, &level);
r = __direct_map(vcpu, v, write, map_writable, level, gfn, pfn,
prefault);
spin_unlock(&vcpu->kvm->mmu_lock);
return r;
out_unlock:
spin_unlock(&vcpu->kvm->mmu_lock);
kvm_release_pfn_clean(pfn);
return 0;
}
static void mmu_free_roots(struct kvm_vcpu *vcpu)
{
int i;
struct kvm_mmu_page *sp;
LIST_HEAD(invalid_list);
if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
return;
spin_lock(&vcpu->kvm->mmu_lock);
if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL &&
(vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL ||
vcpu->arch.mmu.direct_map)) {
hpa_t root = vcpu->arch.mmu.root_hpa;
sp = page_header(root);
--sp->root_count;
if (!sp->root_count && sp->role.invalid) {
kvm_mmu_prepare_zap_page(vcpu->kvm, sp, &invalid_list);
kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
}
vcpu->arch.mmu.root_hpa = INVALID_PAGE;
spin_unlock(&vcpu->kvm->mmu_lock);
return;
}
for (i = 0; i < 4; ++i) {
hpa_t root = vcpu->arch.mmu.pae_root[i];
if (root) {
root &= PT64_BASE_ADDR_MASK;
sp = page_header(root);
--sp->root_count;
if (!sp->root_count && sp->role.invalid)
kvm_mmu_prepare_zap_page(vcpu->kvm, sp,
&invalid_list);
}
vcpu->arch.mmu.pae_root[i] = INVALID_PAGE;
}
kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
spin_unlock(&vcpu->kvm->mmu_lock);
vcpu->arch.mmu.root_hpa = INVALID_PAGE;
}
static int mmu_check_root(struct kvm_vcpu *vcpu, gfn_t root_gfn)
{
int ret = 0;
if (!kvm_is_visible_gfn(vcpu->kvm, root_gfn)) {
kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
ret = 1;
}
return ret;
}
static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
{
struct kvm_mmu_page *sp;
unsigned i;
if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
spin_lock(&vcpu->kvm->mmu_lock);
kvm_mmu_free_some_pages(vcpu);
sp = kvm_mmu_get_page(vcpu, 0, 0, PT64_ROOT_LEVEL,
1, ACC_ALL, NULL);
++sp->root_count;
spin_unlock(&vcpu->kvm->mmu_lock);
vcpu->arch.mmu.root_hpa = __pa(sp->spt);
} else if (vcpu->arch.mmu.shadow_root_level == PT32E_ROOT_LEVEL) {
for (i = 0; i < 4; ++i) {
hpa_t root = vcpu->arch.mmu.pae_root[i];
ASSERT(!VALID_PAGE(root));
spin_lock(&vcpu->kvm->mmu_lock);
kvm_mmu_free_some_pages(vcpu);
sp = kvm_mmu_get_page(vcpu, i << (30 - PAGE_SHIFT),
i << 30,
PT32_ROOT_LEVEL, 1, ACC_ALL,
NULL);
root = __pa(sp->spt);
++sp->root_count;
spin_unlock(&vcpu->kvm->mmu_lock);
vcpu->arch.mmu.pae_root[i] = root | PT_PRESENT_MASK;
}
vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root);
} else
BUG();
return 0;
}
static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
{
struct kvm_mmu_page *sp;
u64 pdptr, pm_mask;
gfn_t root_gfn;
int i;
root_gfn = vcpu->arch.mmu.get_cr3(vcpu) >> PAGE_SHIFT;
if (mmu_check_root(vcpu, root_gfn))
return 1;
/*
* Do we shadow a long mode page table? If so we need to
* write-protect the guests page table root.
*/
if (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL) {
hpa_t root = vcpu->arch.mmu.root_hpa;
ASSERT(!VALID_PAGE(root));
spin_lock(&vcpu->kvm->mmu_lock);
kvm_mmu_free_some_pages(vcpu);
sp = kvm_mmu_get_page(vcpu, root_gfn, 0, PT64_ROOT_LEVEL,
0, ACC_ALL, NULL);
root = __pa(sp->spt);
++sp->root_count;
spin_unlock(&vcpu->kvm->mmu_lock);
vcpu->arch.mmu.root_hpa = root;
return 0;
}
/*
* We shadow a 32 bit page table. This may be a legacy 2-level
* or a PAE 3-level page table. In either case we need to be aware that
* the shadow page table may be a PAE or a long mode page table.
*/
pm_mask = PT_PRESENT_MASK;
if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL)
pm_mask |= PT_ACCESSED_MASK | PT_WRITABLE_MASK | PT_USER_MASK;
for (i = 0; i < 4; ++i) {
hpa_t root = vcpu->arch.mmu.pae_root[i];
ASSERT(!VALID_PAGE(root));
if (vcpu->arch.mmu.root_level == PT32E_ROOT_LEVEL) {
pdptr = vcpu->arch.mmu.get_pdptr(vcpu, i);
if (!is_present_gpte(pdptr)) {
vcpu->arch.mmu.pae_root[i] = 0;
continue;
}
root_gfn = pdptr >> PAGE_SHIFT;
if (mmu_check_root(vcpu, root_gfn))
return 1;
}
spin_lock(&vcpu->kvm->mmu_lock);
kvm_mmu_free_some_pages(vcpu);
sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30,
PT32_ROOT_LEVEL, 0,
ACC_ALL, NULL);
root = __pa(sp->spt);
++sp->root_count;
spin_unlock(&vcpu->kvm->mmu_lock);
vcpu->arch.mmu.pae_root[i] = root | pm_mask;
}
vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root);
/*
* If we shadow a 32 bit page table with a long mode page
* table we enter this path.
*/
if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
if (vcpu->arch.mmu.lm_root == NULL) {
/*
* The additional page necessary for this is only
* allocated on demand.
*/
u64 *lm_root;
lm_root = (void*)get_zeroed_page(GFP_KERNEL);
if (lm_root == NULL)
return 1;
lm_root[0] = __pa(vcpu->arch.mmu.pae_root) | pm_mask;
vcpu->arch.mmu.lm_root = lm_root;
}
vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.lm_root);
}
return 0;
}
static int mmu_alloc_roots(struct kvm_vcpu *vcpu)
{
if (vcpu->arch.mmu.direct_map)
return mmu_alloc_direct_roots(vcpu);
else
return mmu_alloc_shadow_roots(vcpu);
}
static void mmu_sync_roots(struct kvm_vcpu *vcpu)
{
int i;
struct kvm_mmu_page *sp;
if (vcpu->arch.mmu.direct_map)
return;
if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
return;
vcpu_clear_mmio_info(vcpu, ~0ul);
kvm_mmu_audit(vcpu, AUDIT_PRE_SYNC);
if (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL) {
hpa_t root = vcpu->arch.mmu.root_hpa;
sp = page_header(root);
mmu_sync_children(vcpu, sp);
kvm_mmu_audit(vcpu, AUDIT_POST_SYNC);
return;
}
for (i = 0; i < 4; ++i) {
hpa_t root = vcpu->arch.mmu.pae_root[i];
if (root && VALID_PAGE(root)) {
root &= PT64_BASE_ADDR_MASK;
sp = page_header(root);
mmu_sync_children(vcpu, sp);
}
}
kvm_mmu_audit(vcpu, AUDIT_POST_SYNC);
}
void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
{
spin_lock(&vcpu->kvm->mmu_lock);
mmu_sync_roots(vcpu);
spin_unlock(&vcpu->kvm->mmu_lock);
}
static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr,
u32 access, struct x86_exception *exception)
{
if (exception)
exception->error_code = 0;
return vaddr;
}
static gpa_t nonpaging_gva_to_gpa_nested(struct kvm_vcpu *vcpu, gva_t vaddr,
u32 access,
struct x86_exception *exception)
{
if (exception)
exception->error_code = 0;
return vcpu->arch.nested_mmu.translate_gpa(vcpu, vaddr, access);
}
static bool quickly_check_mmio_pf(struct kvm_vcpu *vcpu, u64 addr, bool direct)
{
if (direct)
return vcpu_match_mmio_gpa(vcpu, addr);
return vcpu_match_mmio_gva(vcpu, addr);
}
/*
* On direct hosts, the last spte is only allows two states
* for mmio page fault:
* - It is the mmio spte
* - It is zapped or it is being zapped.
*
* This function completely checks the spte when the last spte
* is not the mmio spte.
*/
static bool check_direct_spte_mmio_pf(u64 spte)
{
return __check_direct_spte_mmio_pf(spte);
}
static u64 walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr)
{
struct kvm_shadow_walk_iterator iterator;
u64 spte = 0ull;
walk_shadow_page_lockless_begin(vcpu);
for_each_shadow_entry_lockless(vcpu, addr, iterator, spte)
if (!is_shadow_present_pte(spte))
break;
walk_shadow_page_lockless_end(vcpu);
return spte;
}
/*
* If it is a real mmio page fault, return 1 and emulat the instruction
* directly, return 0 to let CPU fault again on the address, -1 is
* returned if bug is detected.
*/
int handle_mmio_page_fault_common(struct kvm_vcpu *vcpu, u64 addr, bool direct)
{
u64 spte;
if (quickly_check_mmio_pf(vcpu, addr, direct))
return 1;
spte = walk_shadow_page_get_mmio_spte(vcpu, addr);
if (is_mmio_spte(spte)) {
gfn_t gfn = get_mmio_spte_gfn(spte);
unsigned access = get_mmio_spte_access(spte);
if (direct)
addr = 0;
trace_handle_mmio_page_fault(addr, gfn, access);
vcpu_cache_mmio_info(vcpu, addr, gfn, access);
return 1;
}
/*
* It's ok if the gva is remapped by other cpus on shadow guest,
* it's a BUG if the gfn is not a mmio page.
*/
if (direct && !check_direct_spte_mmio_pf(spte))
return -1;
/*
* If the page table is zapped by other cpus, let CPU fault again on
* the address.
*/
return 0;
}
EXPORT_SYMBOL_GPL(handle_mmio_page_fault_common);
static int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr,
u32 error_code, bool direct)
{
int ret;
ret = handle_mmio_page_fault_common(vcpu, addr, direct);
WARN_ON(ret < 0);
return ret;
}
static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
u32 error_code, bool prefault)
{
gfn_t gfn;
int r;
pgprintk("%s: gva %lx error %x\n", __func__, gva, error_code);
if (unlikely(error_code & PFERR_RSVD_MASK))
return handle_mmio_page_fault(vcpu, gva, error_code, true);
r = mmu_topup_memory_caches(vcpu);
if (r)
return r;
ASSERT(vcpu);
ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa));
gfn = gva >> PAGE_SHIFT;
return nonpaging_map(vcpu, gva & PAGE_MASK,
error_code & PFERR_WRITE_MASK, gfn, prefault);
}
static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn)
{
struct kvm_arch_async_pf arch;
arch.token = (vcpu->arch.apf.id++ << 12) | vcpu->vcpu_id;
arch.gfn = gfn;
arch.direct_map = vcpu->arch.mmu.direct_map;
arch.cr3 = vcpu->arch.mmu.get_cr3(vcpu);
return kvm_setup_async_pf(vcpu, gva, gfn, &arch);
}
static bool can_do_async_pf(struct kvm_vcpu *vcpu)
{
if (unlikely(!irqchip_in_kernel(vcpu->kvm) ||
kvm_event_needs_reinjection(vcpu)))
return false;
return kvm_x86_ops->interrupt_allowed(vcpu);
}
static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
gva_t gva, pfn_t *pfn, bool write, bool *writable)
{
bool async;
*pfn = gfn_to_pfn_async(vcpu->kvm, gfn, &async, write, writable);
if (!async)
return false; /* *pfn has correct page already */
put_page(pfn_to_page(*pfn));
if (!prefault && can_do_async_pf(vcpu)) {
trace_kvm_try_async_get_page(gva, gfn);
if (kvm_find_async_pf_gfn(vcpu, gfn)) {
trace_kvm_async_pf_doublefault(gva, gfn);
kvm_make_request(KVM_REQ_APF_HALT, vcpu);
return true;
} else if (kvm_arch_setup_async_pf(vcpu, gva, gfn))
return true;
}
*pfn = gfn_to_pfn_prot(vcpu->kvm, gfn, write, writable);
return false;
}
static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code,
bool prefault)
{
pfn_t pfn;
int r;
int level;
int force_pt_level;
gfn_t gfn = gpa >> PAGE_SHIFT;
unsigned long mmu_seq;
int write = error_code & PFERR_WRITE_MASK;
bool map_writable;
ASSERT(vcpu);
ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa));
if (unlikely(error_code & PFERR_RSVD_MASK))
return handle_mmio_page_fault(vcpu, gpa, error_code, true);
r = mmu_topup_memory_caches(vcpu);
if (r)
return r;
force_pt_level = mapping_level_dirty_bitmap(vcpu, gfn);
if (likely(!force_pt_level)) {
level = mapping_level(vcpu, gfn);
gfn &= ~(KVM_PAGES_PER_HPAGE(level) - 1);
} else
level = PT_PAGE_TABLE_LEVEL;
mmu_seq = vcpu->kvm->mmu_notifier_seq;
smp_rmb();
if (try_async_pf(vcpu, prefault, gfn, gpa, &pfn, write, &map_writable))
return 0;
if (handle_abnormal_pfn(vcpu, 0, gfn, pfn, ACC_ALL, &r))
return r;
spin_lock(&vcpu->kvm->mmu_lock);
if (mmu_notifier_retry(vcpu, mmu_seq))
goto out_unlock;
kvm_mmu_free_some_pages(vcpu);
if (likely(!force_pt_level))
transparent_hugepage_adjust(vcpu, &gfn, &pfn, &level);
r = __direct_map(vcpu, gpa, write, map_writable,
level, gfn, pfn, prefault);
spin_unlock(&vcpu->kvm->mmu_lock);
return r;
out_unlock:
spin_unlock(&vcpu->kvm->mmu_lock);
kvm_release_pfn_clean(pfn);
return 0;
}
static void nonpaging_free(struct kvm_vcpu *vcpu)
{
mmu_free_roots(vcpu);
}
static int nonpaging_init_context(struct kvm_vcpu *vcpu,
struct kvm_mmu *context)
{
context->new_cr3 = nonpaging_new_cr3;
context->page_fault = nonpaging_page_fault;
context->gva_to_gpa = nonpaging_gva_to_gpa;
context->free = nonpaging_free;
context->sync_page = nonpaging_sync_page;
context->invlpg = nonpaging_invlpg;
context->update_pte = nonpaging_update_pte;
context->root_level = 0;
context->shadow_root_level = PT32E_ROOT_LEVEL;
context->root_hpa = INVALID_PAGE;
context->direct_map = true;
context->nx = false;
return 0;
}
void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu)
{
++vcpu->stat.tlb_flush;
kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
}
static void paging_new_cr3(struct kvm_vcpu *vcpu)
{
pgprintk("%s: cr3 %lx\n", __func__, kvm_read_cr3(vcpu));
mmu_free_roots(vcpu);
}
static unsigned long get_cr3(struct kvm_vcpu *vcpu)
{
return kvm_read_cr3(vcpu);
}
static void inject_page_fault(struct kvm_vcpu *vcpu,
struct x86_exception *fault)
{
vcpu->arch.mmu.inject_page_fault(vcpu, fault);
}
static void paging_free(struct kvm_vcpu *vcpu)
{
nonpaging_free(vcpu);
}
static bool is_rsvd_bits_set(struct kvm_mmu *mmu, u64 gpte, int level)
{
int bit7;
bit7 = (gpte >> 7) & 1;
return (gpte & mmu->rsvd_bits_mask[bit7][level-1]) != 0;
}
static bool sync_mmio_spte(u64 *sptep, gfn_t gfn, unsigned access,
int *nr_present)
{
if (unlikely(is_mmio_spte(*sptep))) {
if (gfn != get_mmio_spte_gfn(*sptep)) {
mmu_spte_clear_no_track(sptep);
return true;
}
(*nr_present)++;
mark_mmio_spte(sptep, gfn, access);
return true;
}
return false;
}
#define PTTYPE 64
#include "paging_tmpl.h"
#undef PTTYPE
#define PTTYPE 32
#include "paging_tmpl.h"
#undef PTTYPE
static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu,
struct kvm_mmu *context)
{
int maxphyaddr = cpuid_maxphyaddr(vcpu);
u64 exb_bit_rsvd = 0;
if (!context->nx)
exb_bit_rsvd = rsvd_bits(63, 63);
switch (context->root_level) {
case PT32_ROOT_LEVEL:
/* no rsvd bits for 2 level 4K page table entries */
context->rsvd_bits_mask[0][1] = 0;
context->rsvd_bits_mask[0][0] = 0;
context->rsvd_bits_mask[1][0] = context->rsvd_bits_mask[0][0];
if (!is_pse(vcpu)) {
context->rsvd_bits_mask[1][1] = 0;
break;
}
if (is_cpuid_PSE36())
/* 36bits PSE 4MB page */
context->rsvd_bits_mask[1][1] = rsvd_bits(17, 21);
else
/* 32 bits PSE 4MB page */
context->rsvd_bits_mask[1][1] = rsvd_bits(13, 21);
break;
case PT32E_ROOT_LEVEL:
context->rsvd_bits_mask[0][2] =
rsvd_bits(maxphyaddr, 63) |
rsvd_bits(7, 8) | rsvd_bits(1, 2); /* PDPTE */
context->rsvd_bits_mask[0][1] = exb_bit_rsvd |
rsvd_bits(maxphyaddr, 62); /* PDE */
context->rsvd_bits_mask[0][0] = exb_bit_rsvd |
rsvd_bits(maxphyaddr, 62); /* PTE */
context->rsvd_bits_mask[1][1] = exb_bit_rsvd |
rsvd_bits(maxphyaddr, 62) |
rsvd_bits(13, 20); /* large page */
context->rsvd_bits_mask[1][0] = context->rsvd_bits_mask[0][0];
break;
case PT64_ROOT_LEVEL:
context->rsvd_bits_mask[0][3] = exb_bit_rsvd |
rsvd_bits(maxphyaddr, 51) | rsvd_bits(7, 8);
context->rsvd_bits_mask[0][2] = exb_bit_rsvd |
rsvd_bits(maxphyaddr, 51) | rsvd_bits(7, 8);
context->rsvd_bits_mask[0][1] = exb_bit_rsvd |
rsvd_bits(maxphyaddr, 51);
context->rsvd_bits_mask[0][0] = exb_bit_rsvd |
rsvd_bits(maxphyaddr, 51);
context->rsvd_bits_mask[1][3] = context->rsvd_bits_mask[0][3];
context->rsvd_bits_mask[1][2] = exb_bit_rsvd |
rsvd_bits(maxphyaddr, 51) |
rsvd_bits(13, 29);
context->rsvd_bits_mask[1][1] = exb_bit_rsvd |
rsvd_bits(maxphyaddr, 51) |
rsvd_bits(13, 20); /* large page */
context->rsvd_bits_mask[1][0] = context->rsvd_bits_mask[0][0];
break;
}
}
static int paging64_init_context_common(struct kvm_vcpu *vcpu,
struct kvm_mmu *context,
int level)
{
context->nx = is_nx(vcpu);
context->root_level = level;
reset_rsvds_bits_mask(vcpu, context);
ASSERT(is_pae(vcpu));
context->new_cr3 = paging_new_cr3;
context->page_fault = paging64_page_fault;
context->gva_to_gpa = paging64_gva_to_gpa;
context->sync_page = paging64_sync_page;
context->invlpg = paging64_invlpg;
context->update_pte = paging64_update_pte;
context->free = paging_free;
context->shadow_root_level = level;
context->root_hpa = INVALID_PAGE;
context->direct_map = false;
return 0;
}
static int paging64_init_context(struct kvm_vcpu *vcpu,
struct kvm_mmu *context)
{
return paging64_init_context_common(vcpu, context, PT64_ROOT_LEVEL);
}
static int paging32_init_context(struct kvm_vcpu *vcpu,
struct kvm_mmu *context)
{
context->nx = false;
context->root_level = PT32_ROOT_LEVEL;
reset_rsvds_bits_mask(vcpu, context);
context->new_cr3 = paging_new_cr3;
context->page_fault = paging32_page_fault;
context->gva_to_gpa = paging32_gva_to_gpa;
context->free = paging_free;
context->sync_page = paging32_sync_page;
context->invlpg = paging32_invlpg;
context->update_pte = paging32_update_pte;
context->shadow_root_level = PT32E_ROOT_LEVEL;
context->root_hpa = INVALID_PAGE;
context->direct_map = false;
return 0;
}
static int paging32E_init_context(struct kvm_vcpu *vcpu,
struct kvm_mmu *context)
{
return paging64_init_context_common(vcpu, context, PT32E_ROOT_LEVEL);
}
static int init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
{
struct kvm_mmu *context = vcpu->arch.walk_mmu;
context->base_role.word = 0;
context->new_cr3 = nonpaging_new_cr3;
context->page_fault = tdp_page_fault;
context->free = nonpaging_free;
context->sync_page = nonpaging_sync_page;
context->invlpg = nonpaging_invlpg;
context->update_pte = nonpaging_update_pte;
context->shadow_root_level = kvm_x86_ops->get_tdp_level();
context->root_hpa = INVALID_PAGE;
context->direct_map = true;
context->set_cr3 = kvm_x86_ops->set_tdp_cr3;
context->get_cr3 = get_cr3;
context->get_pdptr = kvm_pdptr_read;
context->inject_page_fault = kvm_inject_page_fault;
if (!is_paging(vcpu)) {
context->nx = false;
context->gva_to_gpa = nonpaging_gva_to_gpa;
context->root_level = 0;
} else if (is_long_mode(vcpu)) {
context->nx = is_nx(vcpu);
context->root_level = PT64_ROOT_LEVEL;
reset_rsvds_bits_mask(vcpu, context);
context->gva_to_gpa = paging64_gva_to_gpa;
} else if (is_pae(vcpu)) {
context->nx = is_nx(vcpu);
context->root_level = PT32E_ROOT_LEVEL;
reset_rsvds_bits_mask(vcpu, context);
context->gva_to_gpa = paging64_gva_to_gpa;
} else {
context->nx = false;
context->root_level = PT32_ROOT_LEVEL;
reset_rsvds_bits_mask(vcpu, context);
context->gva_to_gpa = paging32_gva_to_gpa;
}
return 0;
}
int kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context)
{
int r;
bool smep = kvm_read_cr4_bits(vcpu, X86_CR4_SMEP);
ASSERT(vcpu);
ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
if (!is_paging(vcpu))
r = nonpaging_init_context(vcpu, context);
else if (is_long_mode(vcpu))
r = paging64_init_context(vcpu, context);
else if (is_pae(vcpu))
r = paging32E_init_context(vcpu, context);
else
r = paging32_init_context(vcpu, context);
vcpu->arch.mmu.base_role.cr4_pae = !!is_pae(vcpu);
vcpu->arch.mmu.base_role.cr0_wp = is_write_protection(vcpu);
vcpu->arch.mmu.base_role.smep_andnot_wp
= smep && !is_write_protection(vcpu);
return r;
}
EXPORT_SYMBOL_GPL(kvm_init_shadow_mmu);
static int init_kvm_softmmu(struct kvm_vcpu *vcpu)
{
int r = kvm_init_shadow_mmu(vcpu, vcpu->arch.walk_mmu);
vcpu->arch.walk_mmu->set_cr3 = kvm_x86_ops->set_cr3;
vcpu->arch.walk_mmu->get_cr3 = get_cr3;
vcpu->arch.walk_mmu->get_pdptr = kvm_pdptr_read;
vcpu->arch.walk_mmu->inject_page_fault = kvm_inject_page_fault;
return r;
}
static int init_kvm_nested_mmu(struct kvm_vcpu *vcpu)
{
struct kvm_mmu *g_context = &vcpu->arch.nested_mmu;
g_context->get_cr3 = get_cr3;
g_context->get_pdptr = kvm_pdptr_read;
g_context->inject_page_fault = kvm_inject_page_fault;
/*
* Note that arch.mmu.gva_to_gpa translates l2_gva to l1_gpa. The
* translation of l2_gpa to l1_gpa addresses is done using the
* arch.nested_mmu.gva_to_gpa function. Basically the gva_to_gpa
* functions between mmu and nested_mmu are swapped.
*/
if (!is_paging(vcpu)) {
g_context->nx = false;
g_context->root_level = 0;
g_context->gva_to_gpa = nonpaging_gva_to_gpa_nested;
} else if (is_long_mode(vcpu)) {
g_context->nx = is_nx(vcpu);
g_context->root_level = PT64_ROOT_LEVEL;
reset_rsvds_bits_mask(vcpu, g_context);
g_context->gva_to_gpa = paging64_gva_to_gpa_nested;
} else if (is_pae(vcpu)) {
g_context->nx = is_nx(vcpu);
g_context->root_level = PT32E_ROOT_LEVEL;
reset_rsvds_bits_mask(vcpu, g_context);
g_context->gva_to_gpa = paging64_gva_to_gpa_nested;
} else {
g_context->nx = false;
g_context->root_level = PT32_ROOT_LEVEL;
reset_rsvds_bits_mask(vcpu, g_context);
g_context->gva_to_gpa = paging32_gva_to_gpa_nested;
}
return 0;
}
static int init_kvm_mmu(struct kvm_vcpu *vcpu)
{
if (mmu_is_nested(vcpu))
return init_kvm_nested_mmu(vcpu);
else if (tdp_enabled)
return init_kvm_tdp_mmu(vcpu);
else
return init_kvm_softmmu(vcpu);
}
static void destroy_kvm_mmu(struct kvm_vcpu *vcpu)
{
ASSERT(vcpu);
if (VALID_PAGE(vcpu->arch.mmu.root_hpa))
/* mmu.free() should set root_hpa = INVALID_PAGE */
vcpu->arch.mmu.free(vcpu);
}
int kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
{
destroy_kvm_mmu(vcpu);
return init_kvm_mmu(vcpu);
}
EXPORT_SYMBOL_GPL(kvm_mmu_reset_context);
int kvm_mmu_load(struct kvm_vcpu *vcpu)
{
int r;
r = mmu_topup_memory_caches(vcpu);
if (r)
goto out;
r = mmu_alloc_roots(vcpu);
spin_lock(&vcpu->kvm->mmu_lock);
mmu_sync_roots(vcpu);
spin_unlock(&vcpu->kvm->mmu_lock);
if (r)
goto out;
/* set_cr3() should ensure TLB has been flushed */
vcpu->arch.mmu.set_cr3(vcpu, vcpu->arch.mmu.root_hpa);
out:
return r;
}
EXPORT_SYMBOL_GPL(kvm_mmu_load);
void kvm_mmu_unload(struct kvm_vcpu *vcpu)
{
mmu_free_roots(vcpu);
}
EXPORT_SYMBOL_GPL(kvm_mmu_unload);
static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
struct kvm_mmu_page *sp, u64 *spte,
const void *new)
{
if (sp->role.level != PT_PAGE_TABLE_LEVEL) {
++vcpu->kvm->stat.mmu_pde_zapped;
return;
}
++vcpu->kvm->stat.mmu_pte_updated;
vcpu->arch.mmu.update_pte(vcpu, sp, spte, new);
}
static bool need_remote_flush(u64 old, u64 new)
{
if (!is_shadow_present_pte(old))
return false;
if (!is_shadow_present_pte(new))
return true;
if ((old ^ new) & PT64_BASE_ADDR_MASK)
return true;
old ^= PT64_NX_MASK;
new ^= PT64_NX_MASK;
return (old & ~new & PT64_PERM_MASK) != 0;
}
static void mmu_pte_write_flush_tlb(struct kvm_vcpu *vcpu, bool zap_page,
bool remote_flush, bool local_flush)
{
if (zap_page)
return;
if (remote_flush)
kvm_flush_remote_tlbs(vcpu->kvm);
else if (local_flush)
kvm_mmu_flush_tlb(vcpu);
}
static u64 mmu_pte_write_fetch_gpte(struct kvm_vcpu *vcpu, gpa_t *gpa,
const u8 *new, int *bytes)
{
u64 gentry;
int r;
/*
* Assume that the pte write on a page table of the same type
* as the current vcpu paging mode since we update the sptes only
* when they have the same mode.
*/
if (is_pae(vcpu) && *bytes == 4) {
/* Handle a 32-bit guest writing two halves of a 64-bit gpte */
*gpa &= ~(gpa_t)7;
*bytes = 8;
r = kvm_read_guest(vcpu->kvm, *gpa, &gentry, min(*bytes, 8));
if (r)
gentry = 0;
new = (const u8 *)&gentry;
}
switch (*bytes) {
case 4:
gentry = *(const u32 *)new;
break;
case 8:
gentry = *(const u64 *)new;
break;
default:
gentry = 0;
break;
}
return gentry;
}
/*
* If we're seeing too many writes to a page, it may no longer be a page table,
* or we may be forking, in which case it is better to unmap the page.
*/
static bool detect_write_flooding(struct kvm_mmu_page *sp)
{
/*
* Skip write-flooding detected for the sp whose level is 1, because
* it can become unsync, then the guest page is not write-protected.
*/
if (sp->role.level == 1)
return false;
return ++sp->write_flooding_count >= 3;
}
/*
* Misaligned accesses are too much trouble to fix up; also, they usually
* indicate a page is not used as a page table.
*/
static bool detect_write_misaligned(struct kvm_mmu_page *sp, gpa_t gpa,
int bytes)
{
unsigned offset, pte_size, misaligned;
pgprintk("misaligned: gpa %llx bytes %d role %x\n",
gpa, bytes, sp->role.word);
offset = offset_in_page(gpa);
pte_size = sp->role.cr4_pae ? 8 : 4;
/*
* Sometimes, the OS only writes the last one bytes to update status
* bits, for example, in linux, andb instruction is used in clear_bit().
*/
if (!(offset & (pte_size - 1)) && bytes == 1)
return false;
misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
misaligned |= bytes < 4;
return misaligned;
}
static u64 *get_written_sptes(struct kvm_mmu_page *sp, gpa_t gpa, int *nspte)
{
unsigned page_offset, quadrant;
u64 *spte;
int level;
page_offset = offset_in_page(gpa);
level = sp->role.level;
*nspte = 1;
if (!sp->role.cr4_pae) {
page_offset <<= 1; /* 32->64 */
/*
* A 32-bit pde maps 4MB while the shadow pdes map
* only 2MB. So we need to double the offset again
* and zap two pdes instead of one.
*/
if (level == PT32_ROOT_LEVEL) {
page_offset &= ~7; /* kill rounding error */
page_offset <<= 1;
*nspte = 2;
}
quadrant = page_offset >> PAGE_SHIFT;
page_offset &= ~PAGE_MASK;
if (quadrant != sp->role.quadrant)
return NULL;
}
spte = &sp->spt[page_offset / sizeof(*spte)];
return spte;
}
void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
const u8 *new, int bytes)
{
gfn_t gfn = gpa >> PAGE_SHIFT;
union kvm_mmu_page_role mask = { .word = 0 };
struct kvm_mmu_page *sp;
struct hlist_node *node;
LIST_HEAD(invalid_list);
u64 entry, gentry, *spte;
int npte;
bool remote_flush, local_flush, zap_page;
/*
* If we don't have indirect shadow pages, it means no page is
* write-protected, so we can exit simply.
*/
if (!ACCESS_ONCE(vcpu->kvm->arch.indirect_shadow_pages))
return;
zap_page = remote_flush = local_flush = false;
pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
gentry = mmu_pte_write_fetch_gpte(vcpu, &gpa, new, &bytes);
/*
* No need to care whether allocation memory is successful
* or not since pte prefetch is skiped if it does not have
* enough objects in the cache.
*/
mmu_topup_memory_caches(vcpu);
spin_lock(&vcpu->kvm->mmu_lock);
++vcpu->kvm->stat.mmu_pte_write;
kvm_mmu_audit(vcpu, AUDIT_PRE_PTE_WRITE);
mask.cr0_wp = mask.cr4_pae = mask.nxe = 1;
for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn, node) {
if (detect_write_misaligned(sp, gpa, bytes) ||
detect_write_flooding(sp)) {
zap_page |= !!kvm_mmu_prepare_zap_page(vcpu->kvm, sp,
&invalid_list);
++vcpu->kvm->stat.mmu_flooded;
continue;
}
spte = get_written_sptes(sp, gpa, &npte);
if (!spte)
continue;
local_flush = true;
while (npte--) {
entry = *spte;
mmu_page_zap_pte(vcpu->kvm, sp, spte);
if (gentry &&
!((sp->role.word ^ vcpu->arch.mmu.base_role.word)
& mask.word) && rmap_can_add(vcpu))
mmu_pte_write_new_pte(vcpu, sp, spte, &gentry);
if (!remote_flush && need_remote_flush(entry, *spte))
remote_flush = true;
++spte;
}
}
mmu_pte_write_flush_tlb(vcpu, zap_page, remote_flush, local_flush);
kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
kvm_mmu_audit(vcpu, AUDIT_POST_PTE_WRITE);
spin_unlock(&vcpu->kvm->mmu_lock);
}
int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
{
gpa_t gpa;
int r;
if (vcpu->arch.mmu.direct_map)
return 0;
gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, NULL);
r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
return r;
}
EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page_virt);
void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
{
LIST_HEAD(invalid_list);
while (kvm_mmu_available_pages(vcpu->kvm) < KVM_REFILL_PAGES &&
!list_empty(&vcpu->kvm->arch.active_mmu_pages)) {
struct kvm_mmu_page *sp;
sp = container_of(vcpu->kvm->arch.active_mmu_pages.prev,
struct kvm_mmu_page, link);
kvm_mmu_prepare_zap_page(vcpu->kvm, sp, &invalid_list);
++vcpu->kvm->stat.mmu_recycled;
}
kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
}
static bool is_mmio_page_fault(struct kvm_vcpu *vcpu, gva_t addr)
{
if (vcpu->arch.mmu.direct_map || mmu_is_nested(vcpu))
return vcpu_match_mmio_gpa(vcpu, addr);
return vcpu_match_mmio_gva(vcpu, addr);
}
int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code,
void *insn, int insn_len)
{
int r, emulation_type = EMULTYPE_RETRY;
enum emulation_result er;
r = vcpu->arch.mmu.page_fault(vcpu, cr2, error_code, false);
if (r < 0)
goto out;
if (!r) {
r = 1;
goto out;
}
if (is_mmio_page_fault(vcpu, cr2))
emulation_type = 0;
er = x86_emulate_instruction(vcpu, cr2, emulation_type, insn, insn_len);
switch (er) {
case EMULATE_DONE:
return 1;
case EMULATE_DO_MMIO:
++vcpu->stat.mmio_exits;
/* fall through */
case EMULATE_FAIL:
return 0;
default:
BUG();
}
out:
return r;
}
EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);
void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
{
vcpu->arch.mmu.invlpg(vcpu, gva);
kvm_mmu_flush_tlb(vcpu);
++vcpu->stat.invlpg;
}
EXPORT_SYMBOL_GPL(kvm_mmu_invlpg);
void kvm_enable_tdp(void)
{
tdp_enabled = true;
}
EXPORT_SYMBOL_GPL(kvm_enable_tdp);
void kvm_disable_tdp(void)
{
tdp_enabled = false;
}
EXPORT_SYMBOL_GPL(kvm_disable_tdp);
static void free_mmu_pages(struct kvm_vcpu *vcpu)
{
free_page((unsigned long)vcpu->arch.mmu.pae_root);
if (vcpu->arch.mmu.lm_root != NULL)
free_page((unsigned long)vcpu->arch.mmu.lm_root);
}
static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
{
struct page *page;
int i;
ASSERT(vcpu);
/*
* When emulating 32-bit mode, cr3 is only 32 bits even on x86_64.
* Therefore we need to allocate shadow page tables in the first
* 4GB of memory, which happens to fit the DMA32 zone.
*/
page = alloc_page(GFP_KERNEL | __GFP_DMA32);
if (!page)
return -ENOMEM;
vcpu->arch.mmu.pae_root = page_address(page);
for (i = 0; i < 4; ++i)
vcpu->arch.mmu.pae_root[i] = INVALID_PAGE;
return 0;
}
int kvm_mmu_create(struct kvm_vcpu *vcpu)
{
ASSERT(vcpu);
vcpu->arch.walk_mmu = &vcpu->arch.mmu;
vcpu->arch.mmu.root_hpa = INVALID_PAGE;
vcpu->arch.mmu.translate_gpa = translate_gpa;
vcpu->arch.nested_mmu.translate_gpa = translate_nested_gpa;
return alloc_mmu_pages(vcpu);
}
int kvm_mmu_setup(struct kvm_vcpu *vcpu)
{
ASSERT(vcpu);
ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
return init_kvm_mmu(vcpu);
}
void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
{
struct kvm_mmu_page *sp;
list_for_each_entry(sp, &kvm->arch.active_mmu_pages, link) {
int i;
u64 *pt;
if (!test_bit(slot, sp->slot_bitmap))
continue;
pt = sp->spt;
for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
if (!is_shadow_present_pte(pt[i]) ||
!is_last_spte(pt[i], sp->role.level))
continue;
if (is_large_pte(pt[i])) {
drop_spte(kvm, &pt[i]);
--kvm->stat.lpages;
continue;
}
/* avoid RMW */
if (is_writable_pte(pt[i]))
mmu_spte_update(&pt[i],
pt[i] & ~PT_WRITABLE_MASK);
}
}
kvm_flush_remote_tlbs(kvm);
}
void kvm_mmu_zap_all(struct kvm *kvm)
{
struct kvm_mmu_page *sp, *node;
LIST_HEAD(invalid_list);
spin_lock(&kvm->mmu_lock);
restart:
list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link)
if (kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list))
goto restart;
kvm_mmu_commit_zap_page(kvm, &invalid_list);
spin_unlock(&kvm->mmu_lock);
}
static void kvm_mmu_remove_some_alloc_mmu_pages(struct kvm *kvm,
struct list_head *invalid_list)
{
struct kvm_mmu_page *page;
page = container_of(kvm->arch.active_mmu_pages.prev,
struct kvm_mmu_page, link);
kvm_mmu_prepare_zap_page(kvm, page, invalid_list);
}
static int mmu_shrink(struct shrinker *shrink, struct shrink_control *sc)
{
struct kvm *kvm;
struct kvm *kvm_freed = NULL;
int nr_to_scan = sc->nr_to_scan;
if (nr_to_scan == 0)
goto out;
raw_spin_lock(&kvm_lock);
list_for_each_entry(kvm, &vm_list, vm_list) {
int idx;
LIST_HEAD(invalid_list);
idx = srcu_read_lock(&kvm->srcu);
spin_lock(&kvm->mmu_lock);
if (!kvm_freed && nr_to_scan > 0 &&
kvm->arch.n_used_mmu_pages > 0) {
kvm_mmu_remove_some_alloc_mmu_pages(kvm,
&invalid_list);
kvm_freed = kvm;
}
nr_to_scan--;
kvm_mmu_commit_zap_page(kvm, &invalid_list);
spin_unlock(&kvm->mmu_lock);
srcu_read_unlock(&kvm->srcu, idx);
}
if (kvm_freed)
list_move_tail(&kvm_freed->vm_list, &vm_list);
raw_spin_unlock(&kvm_lock);
out:
return percpu_counter_read_positive(&kvm_total_used_mmu_pages);
}
static struct shrinker mmu_shrinker = {
.shrink = mmu_shrink,
.seeks = DEFAULT_SEEKS * 10,
};
static void mmu_destroy_caches(void)
{
if (pte_list_desc_cache)
kmem_cache_destroy(pte_list_desc_cache);
if (mmu_page_header_cache)
kmem_cache_destroy(mmu_page_header_cache);
}
int kvm_mmu_module_init(void)
{
pte_list_desc_cache = kmem_cache_create("pte_list_desc",
sizeof(struct pte_list_desc),
0, 0, NULL);
if (!pte_list_desc_cache)
goto nomem;
mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header",
sizeof(struct kvm_mmu_page),
0, 0, NULL);
if (!mmu_page_header_cache)
goto nomem;
if (percpu_counter_init(&kvm_total_used_mmu_pages, 0))
goto nomem;
register_shrinker(&mmu_shrinker);
return 0;
nomem:
mmu_destroy_caches();
return -ENOMEM;
}
/*
* Caculate mmu pages needed for kvm.
*/
unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm)
{
unsigned int nr_mmu_pages;
unsigned int nr_pages = 0;
struct kvm_memslots *slots;
struct kvm_memory_slot *memslot;
slots = kvm_memslots(kvm);
kvm_for_each_memslot(memslot, slots)
nr_pages += memslot->npages;
nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000;
nr_mmu_pages = max(nr_mmu_pages,
(unsigned int) KVM_MIN_ALLOC_MMU_PAGES);
return nr_mmu_pages;
}
int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4])
{
struct kvm_shadow_walk_iterator iterator;
u64 spte;
int nr_sptes = 0;
walk_shadow_page_lockless_begin(vcpu);
for_each_shadow_entry_lockless(vcpu, addr, iterator, spte) {
sptes[iterator.level-1] = spte;
nr_sptes++;
if (!is_shadow_present_pte(spte))
break;
}
walk_shadow_page_lockless_end(vcpu);
return nr_sptes;
}
EXPORT_SYMBOL_GPL(kvm_mmu_get_spte_hierarchy);
void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
{
ASSERT(vcpu);
destroy_kvm_mmu(vcpu);
free_mmu_pages(vcpu);
mmu_free_memory_caches(vcpu);
}
void kvm_mmu_module_exit(void)
{
mmu_destroy_caches();
percpu_counter_destroy(&kvm_total_used_mmu_pages);
unregister_shrinker(&mmu_shrinker);
mmu_audit_disable();
}
| gpl-2.0 |
kgp700/Nexroid-Kernel | drivers/mfd/pcf50633-adc.c | 4112 | 6282 | /* NXP PCF50633 ADC Driver
*
* (C) 2006-2008 by Openmoko, Inc.
* Author: Balaji Rao <balajirrao@openmoko.org>
* All rights reserved.
*
* Broken down from monstrous PCF50633 driver mainly by
* Harald Welte, Andy Green and Werner Almesberger
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* NOTE: This driver does not yet support subtractive ADC mode, which means
* you can do only one measurement per read request.
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/completion.h>
#include <linux/mfd/pcf50633/core.h>
#include <linux/mfd/pcf50633/adc.h>
struct pcf50633_adc_request {
int mux;
int avg;
void (*callback)(struct pcf50633 *, void *, int);
void *callback_param;
};
struct pcf50633_adc_sync_request {
int result;
struct completion completion;
};
#define PCF50633_MAX_ADC_FIFO_DEPTH 8
struct pcf50633_adc {
struct pcf50633 *pcf;
/* Private stuff */
struct pcf50633_adc_request *queue[PCF50633_MAX_ADC_FIFO_DEPTH];
int queue_head;
int queue_tail;
struct mutex queue_mutex;
};
static inline struct pcf50633_adc *__to_adc(struct pcf50633 *pcf)
{
return platform_get_drvdata(pcf->adc_pdev);
}
static void adc_setup(struct pcf50633 *pcf, int channel, int avg)
{
channel &= PCF50633_ADCC1_ADCMUX_MASK;
/* kill ratiometric, but enable ACCSW biasing */
pcf50633_reg_write(pcf, PCF50633_REG_ADCC2, 0x00);
pcf50633_reg_write(pcf, PCF50633_REG_ADCC3, 0x01);
/* start ADC conversion on selected channel */
pcf50633_reg_write(pcf, PCF50633_REG_ADCC1, channel | avg |
PCF50633_ADCC1_ADCSTART | PCF50633_ADCC1_RES_10BIT);
}
static void trigger_next_adc_job_if_any(struct pcf50633 *pcf)
{
struct pcf50633_adc *adc = __to_adc(pcf);
int head;
head = adc->queue_head;
if (!adc->queue[head])
return;
adc_setup(pcf, adc->queue[head]->mux, adc->queue[head]->avg);
}
static int
adc_enqueue_request(struct pcf50633 *pcf, struct pcf50633_adc_request *req)
{
struct pcf50633_adc *adc = __to_adc(pcf);
int head, tail;
mutex_lock(&adc->queue_mutex);
head = adc->queue_head;
tail = adc->queue_tail;
if (adc->queue[tail]) {
mutex_unlock(&adc->queue_mutex);
dev_err(pcf->dev, "ADC queue is full, dropping request\n");
return -EBUSY;
}
adc->queue[tail] = req;
if (head == tail)
trigger_next_adc_job_if_any(pcf);
adc->queue_tail = (tail + 1) & (PCF50633_MAX_ADC_FIFO_DEPTH - 1);
mutex_unlock(&adc->queue_mutex);
return 0;
}
static void pcf50633_adc_sync_read_callback(struct pcf50633 *pcf, void *param,
int result)
{
struct pcf50633_adc_sync_request *req = param;
req->result = result;
complete(&req->completion);
}
int pcf50633_adc_sync_read(struct pcf50633 *pcf, int mux, int avg)
{
struct pcf50633_adc_sync_request req;
int ret;
init_completion(&req.completion);
ret = pcf50633_adc_async_read(pcf, mux, avg,
pcf50633_adc_sync_read_callback, &req);
if (ret)
return ret;
wait_for_completion(&req.completion);
return req.result;
}
EXPORT_SYMBOL_GPL(pcf50633_adc_sync_read);
int pcf50633_adc_async_read(struct pcf50633 *pcf, int mux, int avg,
void (*callback)(struct pcf50633 *, void *, int),
void *callback_param)
{
struct pcf50633_adc_request *req;
/* req is freed when the result is ready, in interrupt handler */
req = kmalloc(sizeof(*req), GFP_KERNEL);
if (!req)
return -ENOMEM;
req->mux = mux;
req->avg = avg;
req->callback = callback;
req->callback_param = callback_param;
return adc_enqueue_request(pcf, req);
}
EXPORT_SYMBOL_GPL(pcf50633_adc_async_read);
static int adc_result(struct pcf50633 *pcf)
{
u8 adcs1, adcs3;
u16 result;
adcs1 = pcf50633_reg_read(pcf, PCF50633_REG_ADCS1);
adcs3 = pcf50633_reg_read(pcf, PCF50633_REG_ADCS3);
result = (adcs1 << 2) | (adcs3 & PCF50633_ADCS3_ADCDAT1L_MASK);
dev_dbg(pcf->dev, "adc result = %d\n", result);
return result;
}
static void pcf50633_adc_irq(int irq, void *data)
{
struct pcf50633_adc *adc = data;
struct pcf50633 *pcf = adc->pcf;
struct pcf50633_adc_request *req;
int head, res;
mutex_lock(&adc->queue_mutex);
head = adc->queue_head;
req = adc->queue[head];
if (WARN_ON(!req)) {
dev_err(pcf->dev, "pcf50633-adc irq: ADC queue empty!\n");
mutex_unlock(&adc->queue_mutex);
return;
}
adc->queue[head] = NULL;
adc->queue_head = (head + 1) &
(PCF50633_MAX_ADC_FIFO_DEPTH - 1);
res = adc_result(pcf);
trigger_next_adc_job_if_any(pcf);
mutex_unlock(&adc->queue_mutex);
req->callback(pcf, req->callback_param, res);
kfree(req);
}
static int __devinit pcf50633_adc_probe(struct platform_device *pdev)
{
struct pcf50633_adc *adc;
adc = kzalloc(sizeof(*adc), GFP_KERNEL);
if (!adc)
return -ENOMEM;
adc->pcf = dev_to_pcf50633(pdev->dev.parent);
platform_set_drvdata(pdev, adc);
pcf50633_register_irq(adc->pcf, PCF50633_IRQ_ADCRDY,
pcf50633_adc_irq, adc);
mutex_init(&adc->queue_mutex);
return 0;
}
static int __devexit pcf50633_adc_remove(struct platform_device *pdev)
{
struct pcf50633_adc *adc = platform_get_drvdata(pdev);
int i, head;
pcf50633_free_irq(adc->pcf, PCF50633_IRQ_ADCRDY);
mutex_lock(&adc->queue_mutex);
head = adc->queue_head;
if (WARN_ON(adc->queue[head]))
dev_err(adc->pcf->dev,
"adc driver removed with request pending\n");
for (i = 0; i < PCF50633_MAX_ADC_FIFO_DEPTH; i++)
kfree(adc->queue[i]);
mutex_unlock(&adc->queue_mutex);
kfree(adc);
return 0;
}
static struct platform_driver pcf50633_adc_driver = {
.driver = {
.name = "pcf50633-adc",
},
.probe = pcf50633_adc_probe,
.remove = __devexit_p(pcf50633_adc_remove),
};
static int __init pcf50633_adc_init(void)
{
return platform_driver_register(&pcf50633_adc_driver);
}
module_init(pcf50633_adc_init);
static void __exit pcf50633_adc_exit(void)
{
platform_driver_unregister(&pcf50633_adc_driver);
}
module_exit(pcf50633_adc_exit);
MODULE_AUTHOR("Balaji Rao <balajirrao@openmoko.org>");
MODULE_DESCRIPTION("PCF50633 adc driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:pcf50633-adc");
| gpl-2.0 |
nDroidProject/nDroid-kernel | arch/sh/kernel/cpu/sh3/serial-sh7720.c | 4624 | 1083 | #include <linux/serial_sci.h>
#include <linux/serial_core.h>
#include <linux/io.h>
#include <cpu/serial.h>
#include <cpu/gpio.h>
static void sh7720_sci_init_pins(struct uart_port *port, unsigned int cflag)
{
unsigned short data;
if (cflag & CRTSCTS) {
/* enable RTS/CTS */
if (port->mapbase == 0xa4430000) { /* SCIF0 */
/* Clear PTCR bit 9-2; enable all scif pins but sck */
data = __raw_readw(PORT_PTCR);
__raw_writew((data & 0xfc03), PORT_PTCR);
} else if (port->mapbase == 0xa4438000) { /* SCIF1 */
/* Clear PVCR bit 9-2 */
data = __raw_readw(PORT_PVCR);
__raw_writew((data & 0xfc03), PORT_PVCR);
}
} else {
if (port->mapbase == 0xa4430000) { /* SCIF0 */
/* Clear PTCR bit 5-2; enable only tx and rx */
data = __raw_readw(PORT_PTCR);
__raw_writew((data & 0xffc3), PORT_PTCR);
} else if (port->mapbase == 0xa4438000) { /* SCIF1 */
/* Clear PVCR bit 5-2 */
data = __raw_readw(PORT_PVCR);
__raw_writew((data & 0xffc3), PORT_PVCR);
}
}
}
struct plat_sci_port_ops sh7720_sci_port_ops = {
.init_pins = sh7720_sci_init_pins,
};
| gpl-2.0 |
STR4NG3R/android_kernel_motorola_msm8226 | drivers/net/wireless/brcm80211/brcmsmac/ampdu.c | 4880 | 35615 | /*
* Copyright (c) 2010 Broadcom Corporation
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
* SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
* OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <net/mac80211.h>
#include "rate.h"
#include "scb.h"
#include "phy/phy_hal.h"
#include "antsel.h"
#include "main.h"
#include "ampdu.h"
/* max number of mpdus in an ampdu */
#define AMPDU_MAX_MPDU 32
/* max number of mpdus in an ampdu to a legacy */
#define AMPDU_NUM_MPDU_LEGACY 16
/* max Tx ba window size (in pdu) */
#define AMPDU_TX_BA_MAX_WSIZE 64
/* default Tx ba window size (in pdu) */
#define AMPDU_TX_BA_DEF_WSIZE 64
/* default Rx ba window size (in pdu) */
#define AMPDU_RX_BA_DEF_WSIZE 64
/* max Rx ba window size (in pdu) */
#define AMPDU_RX_BA_MAX_WSIZE 64
/* max dur of tx ampdu (in msec) */
#define AMPDU_MAX_DUR 5
/* default tx retry limit */
#define AMPDU_DEF_RETRY_LIMIT 5
/* default tx retry limit at reg rate */
#define AMPDU_DEF_RR_RETRY_LIMIT 2
/* default weight of ampdu in txfifo */
#define AMPDU_DEF_TXPKT_WEIGHT 2
/* default ffpld reserved bytes */
#define AMPDU_DEF_FFPLD_RSVD 2048
/* # of inis to be freed on detach */
#define AMPDU_INI_FREE 10
/* max # of mpdus released at a time */
#define AMPDU_SCB_MAX_RELEASE 20
#define NUM_FFPLD_FIFO 4 /* number of fifo concerned by pre-loading */
#define FFPLD_TX_MAX_UNFL 200 /* default value of the average number of ampdu
* without underflows
*/
#define FFPLD_MPDU_SIZE 1800 /* estimate of maximum mpdu size */
#define FFPLD_MAX_MCS 23 /* we don't deal with mcs 32 */
#define FFPLD_PLD_INCR 1000 /* increments in bytes */
#define FFPLD_MAX_AMPDU_CNT 5000 /* maximum number of ampdu we
* accumulate between resets.
*/
#define AMPDU_DELIMITER_LEN 4
/* max allowed number of mpdus in an ampdu (2 streams) */
#define AMPDU_NUM_MPDU 16
#define TX_SEQ_TO_INDEX(seq) ((seq) % AMPDU_TX_BA_MAX_WSIZE)
/* max possible overhead per mpdu in the ampdu; 3 is for roundup if needed */
#define AMPDU_MAX_MPDU_OVERHEAD (FCS_LEN + DOT11_ICV_AES_LEN +\
AMPDU_DELIMITER_LEN + 3\
+ DOT11_A4_HDR_LEN + DOT11_QOS_LEN + DOT11_IV_MAX_LEN)
/* modulo add/sub, bound = 2^k */
#define MODADD_POW2(x, y, bound) (((x) + (y)) & ((bound) - 1))
#define MODSUB_POW2(x, y, bound) (((x) - (y)) & ((bound) - 1))
/* structure to hold tx fifo information and pre-loading state
* counters specific to tx underflows of ampdus
* some counters might be redundant with the ones in wlc or ampdu structures.
* This allows to maintain a specific state independently of
* how often and/or when the wlc counters are updated.
*
* ampdu_pld_size: number of bytes to be pre-loaded
* mcs2ampdu_table: per-mcs max # of mpdus in an ampdu
* prev_txfunfl: num of underflows last read from the HW macstats counter
* accum_txfunfl: num of underflows since we modified pld params
* accum_txampdu: num of tx ampdu since we modified pld params
* prev_txampdu: previous reading of tx ampdu
* dmaxferrate: estimated dma avg xfer rate in kbits/sec
*/
struct brcms_fifo_info {
u16 ampdu_pld_size;
u8 mcs2ampdu_table[FFPLD_MAX_MCS + 1];
u16 prev_txfunfl;
u32 accum_txfunfl;
u32 accum_txampdu;
u32 prev_txampdu;
u32 dmaxferrate;
};
/* AMPDU module specific state
*
* wlc: pointer to main wlc structure
* scb_handle: scb cubby handle to retrieve data from scb
* ini_enable: per-tid initiator enable/disable of ampdu
* ba_tx_wsize: Tx ba window size (in pdu)
* ba_rx_wsize: Rx ba window size (in pdu)
* retry_limit: mpdu transmit retry limit
* rr_retry_limit: mpdu transmit retry limit at regular rate
* retry_limit_tid: per-tid mpdu transmit retry limit
* rr_retry_limit_tid: per-tid mpdu transmit retry limit at regular rate
* mpdu_density: min mpdu spacing (0-7) ==> 2^(x-1)/8 usec
* max_pdu: max pdus allowed in ampdu
* dur: max duration of an ampdu (in msec)
* txpkt_weight: weight of ampdu in txfifo; reduces rate lag
* rx_factor: maximum rx ampdu factor (0-3) ==> 2^(13+x) bytes
* ffpld_rsvd: number of bytes to reserve for preload
* max_txlen: max size of ampdu per mcs, bw and sgi
* mfbr: enable multiple fallback rate
* tx_max_funl: underflows should be kept such that
* (tx_max_funfl*underflows) < tx frames
* fifo_tb: table of fifo infos
*/
struct ampdu_info {
struct brcms_c_info *wlc;
int scb_handle;
u8 ini_enable[AMPDU_MAX_SCB_TID];
u8 ba_tx_wsize;
u8 ba_rx_wsize;
u8 retry_limit;
u8 rr_retry_limit;
u8 retry_limit_tid[AMPDU_MAX_SCB_TID];
u8 rr_retry_limit_tid[AMPDU_MAX_SCB_TID];
u8 mpdu_density;
s8 max_pdu;
u8 dur;
u8 txpkt_weight;
u8 rx_factor;
u32 ffpld_rsvd;
u32 max_txlen[MCS_TABLE_SIZE][2][2];
bool mfbr;
u32 tx_max_funl;
struct brcms_fifo_info fifo_tb[NUM_FFPLD_FIFO];
};
/* used for flushing ampdu packets */
struct cb_del_ampdu_pars {
struct ieee80211_sta *sta;
u16 tid;
};
static void brcms_c_scb_ampdu_update_max_txlen(struct ampdu_info *ampdu, u8 dur)
{
u32 rate, mcs;
for (mcs = 0; mcs < MCS_TABLE_SIZE; mcs++) {
/* rate is in Kbps; dur is in msec ==> len = (rate * dur) / 8 */
/* 20MHz, No SGI */
rate = mcs_2_rate(mcs, false, false);
ampdu->max_txlen[mcs][0][0] = (rate * dur) >> 3;
/* 40 MHz, No SGI */
rate = mcs_2_rate(mcs, true, false);
ampdu->max_txlen[mcs][1][0] = (rate * dur) >> 3;
/* 20MHz, SGI */
rate = mcs_2_rate(mcs, false, true);
ampdu->max_txlen[mcs][0][1] = (rate * dur) >> 3;
/* 40 MHz, SGI */
rate = mcs_2_rate(mcs, true, true);
ampdu->max_txlen[mcs][1][1] = (rate * dur) >> 3;
}
}
static bool brcms_c_ampdu_cap(struct ampdu_info *ampdu)
{
if (BRCMS_PHY_11N_CAP(ampdu->wlc->band))
return true;
else
return false;
}
static int brcms_c_ampdu_set(struct ampdu_info *ampdu, bool on)
{
struct brcms_c_info *wlc = ampdu->wlc;
wlc->pub->_ampdu = false;
if (on) {
if (!(wlc->pub->_n_enab & SUPPORT_11N)) {
wiphy_err(ampdu->wlc->wiphy, "wl%d: driver not "
"nmode enabled\n", wlc->pub->unit);
return -ENOTSUPP;
}
if (!brcms_c_ampdu_cap(ampdu)) {
wiphy_err(ampdu->wlc->wiphy, "wl%d: device not "
"ampdu capable\n", wlc->pub->unit);
return -ENOTSUPP;
}
wlc->pub->_ampdu = on;
}
return 0;
}
static void brcms_c_ffpld_init(struct ampdu_info *ampdu)
{
int i, j;
struct brcms_fifo_info *fifo;
for (j = 0; j < NUM_FFPLD_FIFO; j++) {
fifo = (ampdu->fifo_tb + j);
fifo->ampdu_pld_size = 0;
for (i = 0; i <= FFPLD_MAX_MCS; i++)
fifo->mcs2ampdu_table[i] = 255;
fifo->dmaxferrate = 0;
fifo->accum_txampdu = 0;
fifo->prev_txfunfl = 0;
fifo->accum_txfunfl = 0;
}
}
struct ampdu_info *brcms_c_ampdu_attach(struct brcms_c_info *wlc)
{
struct ampdu_info *ampdu;
int i;
ampdu = kzalloc(sizeof(struct ampdu_info), GFP_ATOMIC);
if (!ampdu)
return NULL;
ampdu->wlc = wlc;
for (i = 0; i < AMPDU_MAX_SCB_TID; i++)
ampdu->ini_enable[i] = true;
/* Disable ampdu for VO by default */
ampdu->ini_enable[PRIO_8021D_VO] = false;
ampdu->ini_enable[PRIO_8021D_NC] = false;
/* Disable ampdu for BK by default since not enough fifo space */
ampdu->ini_enable[PRIO_8021D_NONE] = false;
ampdu->ini_enable[PRIO_8021D_BK] = false;
ampdu->ba_tx_wsize = AMPDU_TX_BA_DEF_WSIZE;
ampdu->ba_rx_wsize = AMPDU_RX_BA_DEF_WSIZE;
ampdu->mpdu_density = AMPDU_DEF_MPDU_DENSITY;
ampdu->max_pdu = AUTO;
ampdu->dur = AMPDU_MAX_DUR;
ampdu->txpkt_weight = AMPDU_DEF_TXPKT_WEIGHT;
ampdu->ffpld_rsvd = AMPDU_DEF_FFPLD_RSVD;
/*
* bump max ampdu rcv size to 64k for all 11n
* devices except 4321A0 and 4321A1
*/
if (BRCMS_ISNPHY(wlc->band) && NREV_LT(wlc->band->phyrev, 2))
ampdu->rx_factor = IEEE80211_HT_MAX_AMPDU_32K;
else
ampdu->rx_factor = IEEE80211_HT_MAX_AMPDU_64K;
ampdu->retry_limit = AMPDU_DEF_RETRY_LIMIT;
ampdu->rr_retry_limit = AMPDU_DEF_RR_RETRY_LIMIT;
for (i = 0; i < AMPDU_MAX_SCB_TID; i++) {
ampdu->retry_limit_tid[i] = ampdu->retry_limit;
ampdu->rr_retry_limit_tid[i] = ampdu->rr_retry_limit;
}
brcms_c_scb_ampdu_update_max_txlen(ampdu, ampdu->dur);
ampdu->mfbr = false;
/* try to set ampdu to the default value */
brcms_c_ampdu_set(ampdu, wlc->pub->_ampdu);
ampdu->tx_max_funl = FFPLD_TX_MAX_UNFL;
brcms_c_ffpld_init(ampdu);
return ampdu;
}
void brcms_c_ampdu_detach(struct ampdu_info *ampdu)
{
kfree(ampdu);
}
static void brcms_c_scb_ampdu_update_config(struct ampdu_info *ampdu,
struct scb *scb)
{
struct scb_ampdu *scb_ampdu = &scb->scb_ampdu;
int i;
scb_ampdu->max_pdu = AMPDU_NUM_MPDU;
/* go back to legacy size if some preloading is occurring */
for (i = 0; i < NUM_FFPLD_FIFO; i++) {
if (ampdu->fifo_tb[i].ampdu_pld_size > FFPLD_PLD_INCR)
scb_ampdu->max_pdu = AMPDU_NUM_MPDU_LEGACY;
}
/* apply user override */
if (ampdu->max_pdu != AUTO)
scb_ampdu->max_pdu = (u8) ampdu->max_pdu;
scb_ampdu->release = min_t(u8, scb_ampdu->max_pdu,
AMPDU_SCB_MAX_RELEASE);
if (scb_ampdu->max_rx_ampdu_bytes)
scb_ampdu->release = min_t(u8, scb_ampdu->release,
scb_ampdu->max_rx_ampdu_bytes / 1600);
scb_ampdu->release = min(scb_ampdu->release,
ampdu->fifo_tb[TX_AC_BE_FIFO].
mcs2ampdu_table[FFPLD_MAX_MCS]);
}
static void brcms_c_scb_ampdu_update_config_all(struct ampdu_info *ampdu)
{
brcms_c_scb_ampdu_update_config(ampdu, &du->wlc->pri_scb);
}
static void brcms_c_ffpld_calc_mcs2ampdu_table(struct ampdu_info *ampdu, int f)
{
int i;
u32 phy_rate, dma_rate, tmp;
u8 max_mpdu;
struct brcms_fifo_info *fifo = (ampdu->fifo_tb + f);
/* recompute the dma rate */
/* note : we divide/multiply by 100 to avoid integer overflows */
max_mpdu = min_t(u8, fifo->mcs2ampdu_table[FFPLD_MAX_MCS],
AMPDU_NUM_MPDU_LEGACY);
phy_rate = mcs_2_rate(FFPLD_MAX_MCS, true, false);
dma_rate =
(((phy_rate / 100) *
(max_mpdu * FFPLD_MPDU_SIZE - fifo->ampdu_pld_size))
/ (max_mpdu * FFPLD_MPDU_SIZE)) * 100;
fifo->dmaxferrate = dma_rate;
/* fill up the mcs2ampdu table; do not recalc the last mcs */
dma_rate = dma_rate >> 7;
for (i = 0; i < FFPLD_MAX_MCS; i++) {
/* shifting to keep it within integer range */
phy_rate = mcs_2_rate(i, true, false) >> 7;
if (phy_rate > dma_rate) {
tmp = ((fifo->ampdu_pld_size * phy_rate) /
((phy_rate - dma_rate) * FFPLD_MPDU_SIZE)) + 1;
tmp = min_t(u32, tmp, 255);
fifo->mcs2ampdu_table[i] = (u8) tmp;
}
}
}
/* evaluate the dma transfer rate using the tx underflows as feedback.
* If necessary, increase tx fifo preloading. If not enough,
* decrease maximum ampdu size for each mcs till underflows stop
* Return 1 if pre-loading not active, -1 if not an underflow event,
* 0 if pre-loading module took care of the event.
*/
static int brcms_c_ffpld_check_txfunfl(struct brcms_c_info *wlc, int fid)
{
struct ampdu_info *ampdu = wlc->ampdu;
u32 phy_rate = mcs_2_rate(FFPLD_MAX_MCS, true, false);
u32 txunfl_ratio;
u8 max_mpdu;
u32 current_ampdu_cnt = 0;
u16 max_pld_size;
u32 new_txunfl;
struct brcms_fifo_info *fifo = (ampdu->fifo_tb + fid);
uint xmtfifo_sz;
u16 cur_txunfl;
/* return if we got here for a different reason than underflows */
cur_txunfl = brcms_b_read_shm(wlc->hw,
M_UCODE_MACSTAT +
offsetof(struct macstat, txfunfl[fid]));
new_txunfl = (u16) (cur_txunfl - fifo->prev_txfunfl);
if (new_txunfl == 0) {
BCMMSG(wlc->wiphy, "TX status FRAG set but no tx underflows\n");
return -1;
}
fifo->prev_txfunfl = cur_txunfl;
if (!ampdu->tx_max_funl)
return 1;
/* check if fifo is big enough */
if (brcms_b_xmtfifo_sz_get(wlc->hw, fid, &xmtfifo_sz))
return -1;
if ((TXFIFO_SIZE_UNIT * (u32) xmtfifo_sz) <= ampdu->ffpld_rsvd)
return 1;
max_pld_size = TXFIFO_SIZE_UNIT * xmtfifo_sz - ampdu->ffpld_rsvd;
fifo->accum_txfunfl += new_txunfl;
/* we need to wait for at least 10 underflows */
if (fifo->accum_txfunfl < 10)
return 0;
BCMMSG(wlc->wiphy, "ampdu_count %d tx_underflows %d\n",
current_ampdu_cnt, fifo->accum_txfunfl);
/*
compute the current ratio of tx unfl per ampdu.
When the current ampdu count becomes too
big while the ratio remains small, we reset
the current count in order to not
introduce too big of a latency in detecting a
large amount of tx underflows later.
*/
txunfl_ratio = current_ampdu_cnt / fifo->accum_txfunfl;
if (txunfl_ratio > ampdu->tx_max_funl) {
if (current_ampdu_cnt >= FFPLD_MAX_AMPDU_CNT)
fifo->accum_txfunfl = 0;
return 0;
}
max_mpdu = min_t(u8, fifo->mcs2ampdu_table[FFPLD_MAX_MCS],
AMPDU_NUM_MPDU_LEGACY);
/* In case max value max_pdu is already lower than
the fifo depth, there is nothing more we can do.
*/
if (fifo->ampdu_pld_size >= max_mpdu * FFPLD_MPDU_SIZE) {
fifo->accum_txfunfl = 0;
return 0;
}
if (fifo->ampdu_pld_size < max_pld_size) {
/* increment by TX_FIFO_PLD_INC bytes */
fifo->ampdu_pld_size += FFPLD_PLD_INCR;
if (fifo->ampdu_pld_size > max_pld_size)
fifo->ampdu_pld_size = max_pld_size;
/* update scb release size */
brcms_c_scb_ampdu_update_config_all(ampdu);
/*
* compute a new dma xfer rate for max_mpdu @ max mcs.
* This is the minimum dma rate that can achieve no
* underflow condition for the current mpdu size.
*
* note : we divide/multiply by 100 to avoid integer overflows
*/
fifo->dmaxferrate =
(((phy_rate / 100) *
(max_mpdu * FFPLD_MPDU_SIZE - fifo->ampdu_pld_size))
/ (max_mpdu * FFPLD_MPDU_SIZE)) * 100;
BCMMSG(wlc->wiphy, "DMA estimated transfer rate %d; "
"pre-load size %d\n",
fifo->dmaxferrate, fifo->ampdu_pld_size);
} else {
/* decrease ampdu size */
if (fifo->mcs2ampdu_table[FFPLD_MAX_MCS] > 1) {
if (fifo->mcs2ampdu_table[FFPLD_MAX_MCS] == 255)
fifo->mcs2ampdu_table[FFPLD_MAX_MCS] =
AMPDU_NUM_MPDU_LEGACY - 1;
else
fifo->mcs2ampdu_table[FFPLD_MAX_MCS] -= 1;
/* recompute the table */
brcms_c_ffpld_calc_mcs2ampdu_table(ampdu, fid);
/* update scb release size */
brcms_c_scb_ampdu_update_config_all(ampdu);
}
}
fifo->accum_txfunfl = 0;
return 0;
}
void
brcms_c_ampdu_tx_operational(struct brcms_c_info *wlc, u8 tid,
u8 ba_wsize, /* negotiated ba window size (in pdu) */
uint max_rx_ampdu_bytes) /* from ht_cap in beacon */
{
struct scb_ampdu *scb_ampdu;
struct scb_ampdu_tid_ini *ini;
struct ampdu_info *ampdu = wlc->ampdu;
struct scb *scb = &wlc->pri_scb;
scb_ampdu = &scb->scb_ampdu;
if (!ampdu->ini_enable[tid]) {
wiphy_err(ampdu->wlc->wiphy, "%s: Rejecting tid %d\n",
__func__, tid);
return;
}
ini = &scb_ampdu->ini[tid];
ini->tid = tid;
ini->scb = scb_ampdu->scb;
ini->ba_wsize = ba_wsize;
scb_ampdu->max_rx_ampdu_bytes = max_rx_ampdu_bytes;
}
int
brcms_c_sendampdu(struct ampdu_info *ampdu, struct brcms_txq_info *qi,
struct sk_buff **pdu, int prec)
{
struct brcms_c_info *wlc;
struct sk_buff *p, *pkt[AMPDU_MAX_MPDU];
u8 tid, ndelim;
int err = 0;
u8 preamble_type = BRCMS_GF_PREAMBLE;
u8 fbr_preamble_type = BRCMS_GF_PREAMBLE;
u8 rts_preamble_type = BRCMS_LONG_PREAMBLE;
u8 rts_fbr_preamble_type = BRCMS_LONG_PREAMBLE;
bool rr = true, fbr = false;
uint i, count = 0, fifo, seg_cnt = 0;
u16 plen, len, seq = 0, mcl, mch, index, frameid, dma_len = 0;
u32 ampdu_len, max_ampdu_bytes = 0;
struct d11txh *txh = NULL;
u8 *plcp;
struct ieee80211_hdr *h;
struct scb *scb;
struct scb_ampdu *scb_ampdu;
struct scb_ampdu_tid_ini *ini;
u8 mcs = 0;
bool use_rts = false, use_cts = false;
u32 rspec = 0, rspec_fallback = 0;
u32 rts_rspec = 0, rts_rspec_fallback = 0;
u16 mimo_ctlchbw = PHY_TXC1_BW_20MHZ;
struct ieee80211_rts *rts;
u8 rr_retry_limit;
struct brcms_fifo_info *f;
bool fbr_iscck;
struct ieee80211_tx_info *tx_info;
u16 qlen;
struct wiphy *wiphy;
wlc = ampdu->wlc;
wiphy = wlc->wiphy;
p = *pdu;
tid = (u8) (p->priority);
f = ampdu->fifo_tb + prio2fifo[tid];
scb = &wlc->pri_scb;
scb_ampdu = &scb->scb_ampdu;
ini = &scb_ampdu->ini[tid];
/* Let pressure continue to build ... */
qlen = pktq_plen(&qi->q, prec);
if (ini->tx_in_transit > 0 &&
qlen < min(scb_ampdu->max_pdu, ini->ba_wsize))
/* Collect multiple MPDU's to be sent in the next AMPDU */
return -EBUSY;
/* at this point we intend to transmit an AMPDU */
rr_retry_limit = ampdu->rr_retry_limit_tid[tid];
ampdu_len = 0;
dma_len = 0;
while (p) {
struct ieee80211_tx_rate *txrate;
tx_info = IEEE80211_SKB_CB(p);
txrate = tx_info->status.rates;
if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
err = brcms_c_prep_pdu(wlc, p, &fifo);
} else {
wiphy_err(wiphy, "%s: AMPDU flag is off!\n", __func__);
*pdu = NULL;
err = 0;
break;
}
if (err) {
if (err == -EBUSY) {
wiphy_err(wiphy, "wl%d: sendampdu: "
"prep_xdu retry; seq 0x%x\n",
wlc->pub->unit, seq);
*pdu = p;
break;
}
/* error in the packet; reject it */
wiphy_err(wiphy, "wl%d: sendampdu: prep_xdu "
"rejected; seq 0x%x\n", wlc->pub->unit, seq);
*pdu = NULL;
break;
}
/* pkt is good to be aggregated */
txh = (struct d11txh *) p->data;
plcp = (u8 *) (txh + 1);
h = (struct ieee80211_hdr *)(plcp + D11_PHY_HDR_LEN);
seq = le16_to_cpu(h->seq_ctrl) >> SEQNUM_SHIFT;
index = TX_SEQ_TO_INDEX(seq);
/* check mcl fields and test whether it can be agg'd */
mcl = le16_to_cpu(txh->MacTxControlLow);
mcl &= ~TXC_AMPDU_MASK;
fbr_iscck = !(le16_to_cpu(txh->XtraFrameTypes) & 0x3);
txh->PreloadSize = 0; /* always default to 0 */
/* Handle retry limits */
if (txrate[0].count <= rr_retry_limit) {
txrate[0].count++;
rr = true;
fbr = false;
} else {
fbr = true;
rr = false;
txrate[1].count++;
}
/* extract the length info */
len = fbr_iscck ? BRCMS_GET_CCK_PLCP_LEN(txh->FragPLCPFallback)
: BRCMS_GET_MIMO_PLCP_LEN(txh->FragPLCPFallback);
/* retrieve null delimiter count */
ndelim = txh->RTSPLCPFallback[AMPDU_FBR_NULL_DELIM];
seg_cnt += 1;
BCMMSG(wlc->wiphy, "wl%d: mpdu %d plcp_len %d\n",
wlc->pub->unit, count, len);
/*
* aggregateable mpdu. For ucode/hw agg,
* test whether need to break or change the epoch
*/
if (count == 0) {
mcl |= (TXC_AMPDU_FIRST << TXC_AMPDU_SHIFT);
/* refill the bits since might be a retx mpdu */
mcl |= TXC_STARTMSDU;
rts = (struct ieee80211_rts *)&txh->rts_frame;
if (ieee80211_is_rts(rts->frame_control)) {
mcl |= TXC_SENDRTS;
use_rts = true;
}
if (ieee80211_is_cts(rts->frame_control)) {
mcl |= TXC_SENDCTS;
use_cts = true;
}
} else {
mcl |= (TXC_AMPDU_MIDDLE << TXC_AMPDU_SHIFT);
mcl &= ~(TXC_STARTMSDU | TXC_SENDRTS | TXC_SENDCTS);
}
len = roundup(len, 4);
ampdu_len += (len + (ndelim + 1) * AMPDU_DELIMITER_LEN);
dma_len += (u16) p->len;
BCMMSG(wlc->wiphy, "wl%d: ampdu_len %d"
" seg_cnt %d null delim %d\n",
wlc->pub->unit, ampdu_len, seg_cnt, ndelim);
txh->MacTxControlLow = cpu_to_le16(mcl);
/* this packet is added */
pkt[count++] = p;
/* patch the first MPDU */
if (count == 1) {
u8 plcp0, plcp3, is40, sgi;
struct ieee80211_sta *sta;
sta = tx_info->control.sta;
if (rr) {
plcp0 = plcp[0];
plcp3 = plcp[3];
} else {
plcp0 = txh->FragPLCPFallback[0];
plcp3 = txh->FragPLCPFallback[3];
}
is40 = (plcp0 & MIMO_PLCP_40MHZ) ? 1 : 0;
sgi = plcp3_issgi(plcp3) ? 1 : 0;
mcs = plcp0 & ~MIMO_PLCP_40MHZ;
max_ampdu_bytes =
min(scb_ampdu->max_rx_ampdu_bytes,
ampdu->max_txlen[mcs][is40][sgi]);
if (is40)
mimo_ctlchbw =
CHSPEC_SB_UPPER(wlc_phy_chanspec_get(
wlc->band->pi))
? PHY_TXC1_BW_20MHZ_UP : PHY_TXC1_BW_20MHZ;
/* rebuild the rspec and rspec_fallback */
rspec = RSPEC_MIMORATE;
rspec |= plcp[0] & ~MIMO_PLCP_40MHZ;
if (plcp[0] & MIMO_PLCP_40MHZ)
rspec |= (PHY_TXC1_BW_40MHZ << RSPEC_BW_SHIFT);
if (fbr_iscck) /* CCK */
rspec_fallback = cck_rspec(cck_phy2mac_rate
(txh->FragPLCPFallback[0]));
else { /* MIMO */
rspec_fallback = RSPEC_MIMORATE;
rspec_fallback |=
txh->FragPLCPFallback[0] & ~MIMO_PLCP_40MHZ;
if (txh->FragPLCPFallback[0] & MIMO_PLCP_40MHZ)
rspec_fallback |=
(PHY_TXC1_BW_40MHZ <<
RSPEC_BW_SHIFT);
}
if (use_rts || use_cts) {
rts_rspec =
brcms_c_rspec_to_rts_rspec(wlc,
rspec, false, mimo_ctlchbw);
rts_rspec_fallback =
brcms_c_rspec_to_rts_rspec(wlc,
rspec_fallback, false, mimo_ctlchbw);
}
}
/* if (first mpdu for host agg) */
/* test whether to add more */
if ((mcs_2_rate(mcs, true, false) >= f->dmaxferrate) &&
(count == f->mcs2ampdu_table[mcs])) {
BCMMSG(wlc->wiphy, "wl%d: PR 37644: stopping"
" ampdu at %d for mcs %d\n",
wlc->pub->unit, count, mcs);
break;
}
if (count == scb_ampdu->max_pdu)
break;
/*
* check to see if the next pkt is
* a candidate for aggregation
*/
p = pktq_ppeek(&qi->q, prec);
/* tx_info must be checked with current p */
tx_info = IEEE80211_SKB_CB(p);
if (p) {
if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) &&
((u8) (p->priority) == tid)) {
plen = p->len + AMPDU_MAX_MPDU_OVERHEAD;
plen = max(scb_ampdu->min_len, plen);
if ((plen + ampdu_len) > max_ampdu_bytes) {
p = NULL;
continue;
}
/*
* check if there are enough
* descriptors available
*/
if (*wlc->core->txavail[fifo] <= seg_cnt + 1) {
wiphy_err(wiphy, "%s: No fifo space "
"!!\n", __func__);
p = NULL;
continue;
}
p = brcmu_pktq_pdeq(&qi->q, prec);
} else {
p = NULL;
}
}
} /* end while(p) */
ini->tx_in_transit += count;
if (count) {
/* patch up the last txh */
txh = (struct d11txh *) pkt[count - 1]->data;
mcl = le16_to_cpu(txh->MacTxControlLow);
mcl &= ~TXC_AMPDU_MASK;
mcl |= (TXC_AMPDU_LAST << TXC_AMPDU_SHIFT);
txh->MacTxControlLow = cpu_to_le16(mcl);
/* remove the null delimiter after last mpdu */
ndelim = txh->RTSPLCPFallback[AMPDU_FBR_NULL_DELIM];
txh->RTSPLCPFallback[AMPDU_FBR_NULL_DELIM] = 0;
ampdu_len -= ndelim * AMPDU_DELIMITER_LEN;
/* remove the pad len from last mpdu */
fbr_iscck = ((le16_to_cpu(txh->XtraFrameTypes) & 0x3) == 0);
len = fbr_iscck ? BRCMS_GET_CCK_PLCP_LEN(txh->FragPLCPFallback)
: BRCMS_GET_MIMO_PLCP_LEN(txh->FragPLCPFallback);
ampdu_len -= roundup(len, 4) - len;
/* patch up the first txh & plcp */
txh = (struct d11txh *) pkt[0]->data;
plcp = (u8 *) (txh + 1);
BRCMS_SET_MIMO_PLCP_LEN(plcp, ampdu_len);
/* mark plcp to indicate ampdu */
BRCMS_SET_MIMO_PLCP_AMPDU(plcp);
/* reset the mixed mode header durations */
if (txh->MModeLen) {
u16 mmodelen =
brcms_c_calc_lsig_len(wlc, rspec, ampdu_len);
txh->MModeLen = cpu_to_le16(mmodelen);
preamble_type = BRCMS_MM_PREAMBLE;
}
if (txh->MModeFbrLen) {
u16 mmfbrlen =
brcms_c_calc_lsig_len(wlc, rspec_fallback,
ampdu_len);
txh->MModeFbrLen = cpu_to_le16(mmfbrlen);
fbr_preamble_type = BRCMS_MM_PREAMBLE;
}
/* set the preload length */
if (mcs_2_rate(mcs, true, false) >= f->dmaxferrate) {
dma_len = min(dma_len, f->ampdu_pld_size);
txh->PreloadSize = cpu_to_le16(dma_len);
} else
txh->PreloadSize = 0;
mch = le16_to_cpu(txh->MacTxControlHigh);
/* update RTS dur fields */
if (use_rts || use_cts) {
u16 durid;
rts = (struct ieee80211_rts *)&txh->rts_frame;
if ((mch & TXC_PREAMBLE_RTS_MAIN_SHORT) ==
TXC_PREAMBLE_RTS_MAIN_SHORT)
rts_preamble_type = BRCMS_SHORT_PREAMBLE;
if ((mch & TXC_PREAMBLE_RTS_FB_SHORT) ==
TXC_PREAMBLE_RTS_FB_SHORT)
rts_fbr_preamble_type = BRCMS_SHORT_PREAMBLE;
durid =
brcms_c_compute_rtscts_dur(wlc, use_cts, rts_rspec,
rspec, rts_preamble_type,
preamble_type, ampdu_len,
true);
rts->duration = cpu_to_le16(durid);
durid = brcms_c_compute_rtscts_dur(wlc, use_cts,
rts_rspec_fallback,
rspec_fallback,
rts_fbr_preamble_type,
fbr_preamble_type,
ampdu_len, true);
txh->RTSDurFallback = cpu_to_le16(durid);
/* set TxFesTimeNormal */
txh->TxFesTimeNormal = rts->duration;
/* set fallback rate version of TxFesTimeNormal */
txh->TxFesTimeFallback = txh->RTSDurFallback;
}
/* set flag and plcp for fallback rate */
if (fbr) {
mch |= TXC_AMPDU_FBR;
txh->MacTxControlHigh = cpu_to_le16(mch);
BRCMS_SET_MIMO_PLCP_AMPDU(plcp);
BRCMS_SET_MIMO_PLCP_AMPDU(txh->FragPLCPFallback);
}
BCMMSG(wlc->wiphy, "wl%d: count %d ampdu_len %d\n",
wlc->pub->unit, count, ampdu_len);
/* inform rate_sel if it this is a rate probe pkt */
frameid = le16_to_cpu(txh->TxFrameID);
if (frameid & TXFID_RATE_PROBE_MASK)
wiphy_err(wiphy, "%s: XXX what to do with "
"TXFID_RATE_PROBE_MASK!?\n", __func__);
for (i = 0; i < count; i++)
brcms_c_txfifo(wlc, fifo, pkt[i], i == (count - 1),
ampdu->txpkt_weight);
}
/* endif (count) */
return err;
}
static void
brcms_c_ampdu_rate_status(struct brcms_c_info *wlc,
struct ieee80211_tx_info *tx_info,
struct tx_status *txs, u8 mcs)
{
struct ieee80211_tx_rate *txrate = tx_info->status.rates;
int i;
/* clear the rest of the rates */
for (i = 2; i < IEEE80211_TX_MAX_RATES; i++) {
txrate[i].idx = -1;
txrate[i].count = 0;
}
}
static void
brcms_c_ampdu_dotxstatus_complete(struct ampdu_info *ampdu, struct scb *scb,
struct sk_buff *p, struct tx_status *txs,
u32 s1, u32 s2)
{
struct scb_ampdu *scb_ampdu;
struct brcms_c_info *wlc = ampdu->wlc;
struct scb_ampdu_tid_ini *ini;
u8 bitmap[8], queue, tid;
struct d11txh *txh;
u8 *plcp;
struct ieee80211_hdr *h;
u16 seq, start_seq = 0, bindex, index, mcl;
u8 mcs = 0;
bool ba_recd = false, ack_recd = false;
u8 suc_mpdu = 0, tot_mpdu = 0;
uint supr_status;
bool update_rate = true, retry = true, tx_error = false;
u16 mimoantsel = 0;
u8 antselid = 0;
u8 retry_limit, rr_retry_limit;
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(p);
struct wiphy *wiphy = wlc->wiphy;
#ifdef DEBUG
u8 hole[AMPDU_MAX_MPDU];
memset(hole, 0, sizeof(hole));
#endif
scb_ampdu = &scb->scb_ampdu;
tid = (u8) (p->priority);
ini = &scb_ampdu->ini[tid];
retry_limit = ampdu->retry_limit_tid[tid];
rr_retry_limit = ampdu->rr_retry_limit_tid[tid];
memset(bitmap, 0, sizeof(bitmap));
queue = txs->frameid & TXFID_QUEUE_MASK;
supr_status = txs->status & TX_STATUS_SUPR_MASK;
if (txs->status & TX_STATUS_ACK_RCV) {
if (TX_STATUS_SUPR_UF == supr_status)
update_rate = false;
WARN_ON(!(txs->status & TX_STATUS_INTERMEDIATE));
start_seq = txs->sequence >> SEQNUM_SHIFT;
bitmap[0] = (txs->status & TX_STATUS_BA_BMAP03_MASK) >>
TX_STATUS_BA_BMAP03_SHIFT;
WARN_ON(s1 & TX_STATUS_INTERMEDIATE);
WARN_ON(!(s1 & TX_STATUS_AMPDU));
bitmap[0] |=
(s1 & TX_STATUS_BA_BMAP47_MASK) <<
TX_STATUS_BA_BMAP47_SHIFT;
bitmap[1] = (s1 >> 8) & 0xff;
bitmap[2] = (s1 >> 16) & 0xff;
bitmap[3] = (s1 >> 24) & 0xff;
bitmap[4] = s2 & 0xff;
bitmap[5] = (s2 >> 8) & 0xff;
bitmap[6] = (s2 >> 16) & 0xff;
bitmap[7] = (s2 >> 24) & 0xff;
ba_recd = true;
} else {
if (supr_status) {
update_rate = false;
if (supr_status == TX_STATUS_SUPR_BADCH) {
wiphy_err(wiphy,
"%s: Pkt tx suppressed, illegal channel possibly %d\n",
__func__, CHSPEC_CHANNEL(
wlc->default_bss->chanspec));
} else {
if (supr_status != TX_STATUS_SUPR_FRAG)
wiphy_err(wiphy, "%s: supr_status 0x%x\n",
__func__, supr_status);
}
/* no need to retry for badch; will fail again */
if (supr_status == TX_STATUS_SUPR_BADCH ||
supr_status == TX_STATUS_SUPR_EXPTIME) {
retry = false;
} else if (supr_status == TX_STATUS_SUPR_EXPTIME) {
/* TX underflow:
* try tuning pre-loading or ampdu size
*/
} else if (supr_status == TX_STATUS_SUPR_FRAG) {
/*
* if there were underflows, but pre-loading
* is not active, notify rate adaptation.
*/
if (brcms_c_ffpld_check_txfunfl(wlc,
prio2fifo[tid]) > 0)
tx_error = true;
}
} else if (txs->phyerr) {
update_rate = false;
wiphy_err(wiphy, "%s: ampdu tx phy error (0x%x)\n",
__func__, txs->phyerr);
if (brcm_msg_level & LOG_ERROR_VAL) {
brcmu_prpkt("txpkt (AMPDU)", p);
brcms_c_print_txdesc((struct d11txh *) p->data);
}
brcms_c_print_txstatus(txs);
}
}
/* loop through all pkts and retry if not acked */
while (p) {
tx_info = IEEE80211_SKB_CB(p);
txh = (struct d11txh *) p->data;
mcl = le16_to_cpu(txh->MacTxControlLow);
plcp = (u8 *) (txh + 1);
h = (struct ieee80211_hdr *)(plcp + D11_PHY_HDR_LEN);
seq = le16_to_cpu(h->seq_ctrl) >> SEQNUM_SHIFT;
if (tot_mpdu == 0) {
mcs = plcp[0] & MIMO_PLCP_MCS_MASK;
mimoantsel = le16_to_cpu(txh->ABI_MimoAntSel);
}
index = TX_SEQ_TO_INDEX(seq);
ack_recd = false;
if (ba_recd) {
bindex = MODSUB_POW2(seq, start_seq, SEQNUM_MAX);
BCMMSG(wiphy,
"tid %d seq %d, start_seq %d, bindex %d set %d, index %d\n",
tid, seq, start_seq, bindex,
isset(bitmap, bindex), index);
/* if acked then clear bit and free packet */
if ((bindex < AMPDU_TX_BA_MAX_WSIZE)
&& isset(bitmap, bindex)) {
ini->tx_in_transit--;
ini->txretry[index] = 0;
/*
* ampdu_ack_len:
* number of acked aggregated frames
*/
/* ampdu_len: number of aggregated frames */
brcms_c_ampdu_rate_status(wlc, tx_info, txs,
mcs);
tx_info->flags |= IEEE80211_TX_STAT_ACK;
tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
tx_info->status.ampdu_ack_len =
tx_info->status.ampdu_len = 1;
skb_pull(p, D11_PHY_HDR_LEN);
skb_pull(p, D11_TXH_LEN);
ieee80211_tx_status_irqsafe(wlc->pub->ieee_hw,
p);
ack_recd = true;
suc_mpdu++;
}
}
/* either retransmit or send bar if ack not recd */
if (!ack_recd) {
if (retry && (ini->txretry[index] < (int)retry_limit)) {
ini->txretry[index]++;
ini->tx_in_transit--;
/*
* Use high prededence for retransmit to
* give some punch
*/
brcms_c_txq_enq(wlc, scb, p,
BRCMS_PRIO_TO_HI_PREC(tid));
} else {
/* Retry timeout */
ini->tx_in_transit--;
ieee80211_tx_info_clear_status(tx_info);
tx_info->status.ampdu_ack_len = 0;
tx_info->status.ampdu_len = 1;
tx_info->flags |=
IEEE80211_TX_STAT_AMPDU_NO_BACK;
skb_pull(p, D11_PHY_HDR_LEN);
skb_pull(p, D11_TXH_LEN);
BCMMSG(wiphy,
"BA Timeout, seq %d, in_transit %d\n",
seq, ini->tx_in_transit);
ieee80211_tx_status_irqsafe(wlc->pub->ieee_hw,
p);
}
}
tot_mpdu++;
/* break out if last packet of ampdu */
if (((mcl & TXC_AMPDU_MASK) >> TXC_AMPDU_SHIFT) ==
TXC_AMPDU_LAST)
break;
p = dma_getnexttxp(wlc->hw->di[queue], DMA_RANGE_TRANSMITTED);
}
brcms_c_send_q(wlc);
/* update rate state */
antselid = brcms_c_antsel_antsel2id(wlc->asi, mimoantsel);
brcms_c_txfifo_complete(wlc, queue, ampdu->txpkt_weight);
}
void
brcms_c_ampdu_dotxstatus(struct ampdu_info *ampdu, struct scb *scb,
struct sk_buff *p, struct tx_status *txs)
{
struct scb_ampdu *scb_ampdu;
struct brcms_c_info *wlc = ampdu->wlc;
struct scb_ampdu_tid_ini *ini;
u32 s1 = 0, s2 = 0;
struct ieee80211_tx_info *tx_info;
tx_info = IEEE80211_SKB_CB(p);
/* BMAC_NOTE: For the split driver, second level txstatus comes later
* So if the ACK was received then wait for the second level else just
* call the first one
*/
if (txs->status & TX_STATUS_ACK_RCV) {
u8 status_delay = 0;
/* wait till the next 8 bytes of txstatus is available */
s1 = bcma_read32(wlc->hw->d11core, D11REGOFFS(frmtxstatus));
while ((s1 & TXS_V) == 0) {
udelay(1);
status_delay++;
if (status_delay > 10)
return; /* error condition */
s1 = bcma_read32(wlc->hw->d11core,
D11REGOFFS(frmtxstatus));
}
s2 = bcma_read32(wlc->hw->d11core, D11REGOFFS(frmtxstatus2));
}
if (scb) {
scb_ampdu = &scb->scb_ampdu;
ini = &scb_ampdu->ini[p->priority];
brcms_c_ampdu_dotxstatus_complete(ampdu, scb, p, txs, s1, s2);
} else {
/* loop through all pkts and free */
u8 queue = txs->frameid & TXFID_QUEUE_MASK;
struct d11txh *txh;
u16 mcl;
while (p) {
tx_info = IEEE80211_SKB_CB(p);
txh = (struct d11txh *) p->data;
mcl = le16_to_cpu(txh->MacTxControlLow);
brcmu_pkt_buf_free_skb(p);
/* break out if last packet of ampdu */
if (((mcl & TXC_AMPDU_MASK) >> TXC_AMPDU_SHIFT) ==
TXC_AMPDU_LAST)
break;
p = dma_getnexttxp(wlc->hw->di[queue],
DMA_RANGE_TRANSMITTED);
}
brcms_c_txfifo_complete(wlc, queue, ampdu->txpkt_weight);
}
}
void brcms_c_ampdu_macaddr_upd(struct brcms_c_info *wlc)
{
char template[T_RAM_ACCESS_SZ * 2];
/* driver needs to write the ta in the template; ta is at offset 16 */
memset(template, 0, sizeof(template));
memcpy(template, wlc->pub->cur_etheraddr, ETH_ALEN);
brcms_b_write_template_ram(wlc->hw, (T_BA_TPL_BASE + 16),
(T_RAM_ACCESS_SZ * 2),
template);
}
bool brcms_c_aggregatable(struct brcms_c_info *wlc, u8 tid)
{
return wlc->ampdu->ini_enable[tid];
}
void brcms_c_ampdu_shm_upd(struct ampdu_info *ampdu)
{
struct brcms_c_info *wlc = ampdu->wlc;
/*
* Extend ucode internal watchdog timer to
* match larger received frames
*/
if ((ampdu->rx_factor & IEEE80211_HT_AMPDU_PARM_FACTOR) ==
IEEE80211_HT_MAX_AMPDU_64K) {
brcms_b_write_shm(wlc->hw, M_MIMO_MAXSYM, MIMO_MAXSYM_MAX);
brcms_b_write_shm(wlc->hw, M_WATCHDOG_8TU, WATCHDOG_8TU_MAX);
} else {
brcms_b_write_shm(wlc->hw, M_MIMO_MAXSYM, MIMO_MAXSYM_DEF);
brcms_b_write_shm(wlc->hw, M_WATCHDOG_8TU, WATCHDOG_8TU_DEF);
}
}
/*
* callback function that helps flushing ampdu packets from a priority queue
*/
static bool cb_del_ampdu_pkt(struct sk_buff *mpdu, void *arg_a)
{
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(mpdu);
struct cb_del_ampdu_pars *ampdu_pars =
(struct cb_del_ampdu_pars *)arg_a;
bool rc;
rc = tx_info->flags & IEEE80211_TX_CTL_AMPDU ? true : false;
rc = rc && (tx_info->control.sta == NULL || ampdu_pars->sta == NULL ||
tx_info->control.sta == ampdu_pars->sta);
rc = rc && ((u8)(mpdu->priority) == ampdu_pars->tid);
return rc;
}
/*
* callback function that helps invalidating ampdu packets in a DMA queue
*/
static void dma_cb_fn_ampdu(void *txi, void *arg_a)
{
struct ieee80211_sta *sta = arg_a;
struct ieee80211_tx_info *tx_info = (struct ieee80211_tx_info *)txi;
if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) &&
(tx_info->control.sta == sta || sta == NULL))
tx_info->control.sta = NULL;
}
/*
* When a remote party is no longer available for ampdu communication, any
* pending tx ampdu packets in the driver have to be flushed.
*/
void brcms_c_ampdu_flush(struct brcms_c_info *wlc,
struct ieee80211_sta *sta, u16 tid)
{
struct brcms_txq_info *qi = wlc->pkt_queue;
struct pktq *pq = &qi->q;
int prec;
struct cb_del_ampdu_pars ampdu_pars;
ampdu_pars.sta = sta;
ampdu_pars.tid = tid;
for (prec = 0; prec < pq->num_prec; prec++)
brcmu_pktq_pflush(pq, prec, true, cb_del_ampdu_pkt,
(void *)&du_pars);
brcms_c_inval_dma_pkts(wlc->hw, sta, dma_cb_fn_ampdu);
}
| gpl-2.0 |
shanxiwbj/linux-2.6.31.6 | net/ipv4/tcp_ipv4.c | 17 | 62556 | /*
* INET An implementation of the TCP/IP protocol suite for the LINUX
* operating system. INET is implemented using the BSD Socket
* interface as the means of communication with the user level.
*
* Implementation of the Transmission Control Protocol(TCP).
*
* IPv4 specific functions
*
*
* code split from:
* linux/ipv4/tcp.c
* linux/ipv4/tcp_input.c
* linux/ipv4/tcp_output.c
*
* See tcp.c for author information
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
/*
* Changes:
* David S. Miller : New socket lookup architecture.
* This code is dedicated to John Dyson.
* David S. Miller : Change semantics of established hash,
* half is devoted to TIME_WAIT sockets
* and the rest go in the other half.
* Andi Kleen : Add support for syncookies and fixed
* some bugs: ip options weren't passed to
* the TCP layer, missed a check for an
* ACK bit.
* Andi Kleen : Implemented fast path mtu discovery.
* Fixed many serious bugs in the
* request_sock handling and moved
* most of it into the af independent code.
* Added tail drop and some other bugfixes.
* Added new listen semantics.
* Mike McLagan : Routing by source
* Juan Jose Ciarlante: ip_dynaddr bits
* Andi Kleen: various fixes.
* Vitaly E. Lavrov : Transparent proxy revived after year
* coma.
* Andi Kleen : Fix new listen.
* Andi Kleen : Fix accept error reporting.
* YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
* Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
* a single port at the same time.
*/
#include <linux/bottom_half.h>
#include <linux/types.h>
#include <linux/fcntl.h>
#include <linux/module.h>
#include <linux/random.h>
#include <linux/cache.h>
#include <linux/jhash.h>
#include <linux/init.h>
#include <linux/times.h>
#include <net/net_namespace.h>
#include <net/icmp.h>
#include <net/inet_hashtables.h>
#include <net/tcp.h>
#include <net/transp_v6.h>
#include <net/ipv6.h>
#include <net/inet_common.h>
#include <net/timewait_sock.h>
#include <net/xfrm.h>
#include <net/netdma.h>
#include <linux/inet.h>
#include <linux/ipv6.h>
#include <linux/stddef.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/crypto.h>
#include <linux/scatterlist.h>
int sysctl_tcp_tw_reuse __read_mostly;
int sysctl_tcp_low_latency __read_mostly;
#ifdef CONFIG_TCP_MD5SIG
static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
__be32 addr);
static int tcp_v4_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
__be32 daddr, __be32 saddr, struct tcphdr *th);
#else
static inline
struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk, __be32 addr)
{
return NULL;
}
#endif
struct inet_hashinfo tcp_hashinfo;
static inline __u32 tcp_v4_init_sequence(struct sk_buff *skb)
{
return secure_tcp_sequence_number(ip_hdr(skb)->daddr,
ip_hdr(skb)->saddr,
tcp_hdr(skb)->dest,
tcp_hdr(skb)->source);
}
int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
{
const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
struct tcp_sock *tp = tcp_sk(sk);
/* With PAWS, it is safe from the viewpoint
of data integrity. Even without PAWS it is safe provided sequence
spaces do not overlap i.e. at data rates <= 80Mbit/sec.
Actually, the idea is close to VJ's one, only timestamp cache is
held not per host, but per port pair and TW bucket is used as state
holder.
If TW bucket has been already destroyed we fall back to VJ's scheme
and use initial timestamp retrieved from peer table.
*/
if (tcptw->tw_ts_recent_stamp &&
(twp == NULL || (sysctl_tcp_tw_reuse &&
get_seconds() - tcptw->tw_ts_recent_stamp > 1))) {
tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
if (tp->write_seq == 0)
tp->write_seq = 1;
tp->rx_opt.ts_recent = tcptw->tw_ts_recent;
tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
sock_hold(sktw);
return 1;
}
return 0;
}
EXPORT_SYMBOL_GPL(tcp_twsk_unique);
/* This will initiate an outgoing connection. */
int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
{
struct inet_sock *inet = inet_sk(sk);
struct tcp_sock *tp = tcp_sk(sk);
struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
struct rtable *rt;
__be32 daddr, nexthop;
int tmp;
int err;
if (addr_len < sizeof(struct sockaddr_in))
return -EINVAL;
if (usin->sin_family != AF_INET)
return -EAFNOSUPPORT;
nexthop = daddr = usin->sin_addr.s_addr;
if (inet->opt && inet->opt->srr) {
if (!daddr)
return -EINVAL;
nexthop = inet->opt->faddr;
}
tmp = ip_route_connect(&rt, nexthop, inet->saddr,
RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
IPPROTO_TCP,
inet->sport, usin->sin_port, sk, 1);
if (tmp < 0) {
if (tmp == -ENETUNREACH)
IP_INC_STATS_BH(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
return tmp;
}
if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
ip_rt_put(rt);
return -ENETUNREACH;
}
if (!inet->opt || !inet->opt->srr)
daddr = rt->rt_dst;
if (!inet->saddr)
inet->saddr = rt->rt_src;
inet->rcv_saddr = inet->saddr;
if (tp->rx_opt.ts_recent_stamp && inet->daddr != daddr) {
/* Reset inherited state */
tp->rx_opt.ts_recent = 0;
tp->rx_opt.ts_recent_stamp = 0;
tp->write_seq = 0;
}
if (tcp_death_row.sysctl_tw_recycle &&
!tp->rx_opt.ts_recent_stamp && rt->rt_dst == daddr) {
struct inet_peer *peer = rt_get_peer(rt);
/*
* VJ's idea. We save last timestamp seen from
* the destination in peer table, when entering state
* TIME-WAIT * and initialize rx_opt.ts_recent from it,
* when trying new connection.
*/
if (peer != NULL &&
peer->tcp_ts_stamp + TCP_PAWS_MSL >= get_seconds()) {
tp->rx_opt.ts_recent_stamp = peer->tcp_ts_stamp;
tp->rx_opt.ts_recent = peer->tcp_ts;
}
}
inet->dport = usin->sin_port;
inet->daddr = daddr;
inet_csk(sk)->icsk_ext_hdr_len = 0;
if (inet->opt)
inet_csk(sk)->icsk_ext_hdr_len = inet->opt->optlen;
tp->rx_opt.mss_clamp = 536;
/* Socket identity is still unknown (sport may be zero).
* However we set state to SYN-SENT and not releasing socket
* lock select source port, enter ourselves into the hash tables and
* complete initialization after this.
*/
tcp_set_state(sk, TCP_SYN_SENT);
err = inet_hash_connect(&tcp_death_row, sk);
if (err)
goto failure;
err = ip_route_newports(&rt, IPPROTO_TCP,
inet->sport, inet->dport, sk);
if (err)
goto failure;
/* OK, now commit destination to socket. */
sk->sk_gso_type = SKB_GSO_TCPV4;
sk_setup_caps(sk, &rt->u.dst);
if (!tp->write_seq)
tp->write_seq = secure_tcp_sequence_number(inet->saddr,
inet->daddr,
inet->sport,
usin->sin_port);
inet->id = tp->write_seq ^ jiffies;
err = tcp_connect(sk);
rt = NULL;
if (err)
goto failure;
return 0;
failure:
/*
* This unhashes the socket and releases the local port,
* if necessary.
*/
tcp_set_state(sk, TCP_CLOSE);
ip_rt_put(rt);
sk->sk_route_caps = 0;
inet->dport = 0;
return err;
}
/*
* This routine does path mtu discovery as defined in RFC1191.
*/
static void do_pmtu_discovery(struct sock *sk, struct iphdr *iph, u32 mtu)
{
struct dst_entry *dst;
struct inet_sock *inet = inet_sk(sk);
/* We are not interested in TCP_LISTEN and open_requests (SYN-ACKs
* send out by Linux are always <576bytes so they should go through
* unfragmented).
*/
if (sk->sk_state == TCP_LISTEN)
return;
/* We don't check in the destentry if pmtu discovery is forbidden
* on this route. We just assume that no packet_to_big packets
* are send back when pmtu discovery is not active.
* There is a small race when the user changes this flag in the
* route, but I think that's acceptable.
*/
if ((dst = __sk_dst_check(sk, 0)) == NULL)
return;
dst->ops->update_pmtu(dst, mtu);
/* Something is about to be wrong... Remember soft error
* for the case, if this connection will not able to recover.
*/
if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
sk->sk_err_soft = EMSGSIZE;
mtu = dst_mtu(dst);
if (inet->pmtudisc != IP_PMTUDISC_DONT &&
inet_csk(sk)->icsk_pmtu_cookie > mtu) {
tcp_sync_mss(sk, mtu);
/* Resend the TCP packet because it's
* clear that the old packet has been
* dropped. This is the new "fast" path mtu
* discovery.
*/
tcp_simple_retransmit(sk);
} /* else let the usual retransmit timer handle it */
}
/*
* This routine is called by the ICMP module when it gets some
* sort of error condition. If err < 0 then the socket should
* be closed and the error returned to the user. If err > 0
* it's just the icmp type << 8 | icmp code. After adjustment
* header points to the first 8 bytes of the tcp header. We need
* to find the appropriate port.
*
* The locking strategy used here is very "optimistic". When
* someone else accesses the socket the ICMP is just dropped
* and for some paths there is no check at all.
* A more general error queue to queue errors for later handling
* is probably better.
*
*/
void tcp_v4_err(struct sk_buff *skb, u32 info)
{
struct iphdr *iph = (struct iphdr *)skb->data;
struct tcphdr *th = (struct tcphdr *)(skb->data + (iph->ihl << 2));
struct tcp_sock *tp;
struct inet_sock *inet;
const int type = icmp_hdr(skb)->type;
const int code = icmp_hdr(skb)->code;
struct sock *sk;
__u32 seq;
int err;
struct net *net = dev_net(skb->dev);
if (skb->len < (iph->ihl << 2) + 8) {
ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
return;
}
sk = inet_lookup(net, &tcp_hashinfo, iph->daddr, th->dest,
iph->saddr, th->source, inet_iif(skb));
if (!sk) {
ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
return;
}
if (sk->sk_state == TCP_TIME_WAIT) {
inet_twsk_put(inet_twsk(sk));
return;
}
bh_lock_sock(sk);
/* If too many ICMPs get dropped on busy
* servers this needs to be solved differently.
*/
if (sock_owned_by_user(sk))
NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
if (sk->sk_state == TCP_CLOSE)
goto out;
tp = tcp_sk(sk);
seq = ntohl(th->seq);
if (sk->sk_state != TCP_LISTEN &&
!between(seq, tp->snd_una, tp->snd_nxt)) {
NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
goto out;
}
switch (type) {
case ICMP_SOURCE_QUENCH:
/* Just silently ignore these. */
goto out;
case ICMP_PARAMETERPROB:
err = EPROTO;
break;
case ICMP_DEST_UNREACH:
if (code > NR_ICMP_UNREACH)
goto out;
if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
if (!sock_owned_by_user(sk))
do_pmtu_discovery(sk, iph, info);
goto out;
}
err = icmp_err_convert[code].errno;
break;
case ICMP_TIME_EXCEEDED:
err = EHOSTUNREACH;
break;
default:
goto out;
}
switch (sk->sk_state) {
struct request_sock *req, **prev;
case TCP_LISTEN:
if (sock_owned_by_user(sk))
goto out;
req = inet_csk_search_req(sk, &prev, th->dest,
iph->daddr, iph->saddr);
if (!req)
goto out;
/* ICMPs are not backlogged, hence we cannot get
an established socket here.
*/
WARN_ON(req->sk);
if (seq != tcp_rsk(req)->snt_isn) {
NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
goto out;
}
/*
* Still in SYN_RECV, just remove it silently.
* There is no good way to pass the error to the newly
* created socket, and POSIX does not want network
* errors returned from accept().
*/
inet_csk_reqsk_queue_drop(sk, req, prev);
goto out;
case TCP_SYN_SENT:
case TCP_SYN_RECV: /* Cannot happen.
It can f.e. if SYNs crossed.
*/
if (!sock_owned_by_user(sk)) {
sk->sk_err = err;
sk->sk_error_report(sk);
tcp_done(sk);
} else {
sk->sk_err_soft = err;
}
goto out;
}
/* If we've already connected we will keep trying
* until we time out, or the user gives up.
*
* rfc1122 4.2.3.9 allows to consider as hard errors
* only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
* but it is obsoleted by pmtu discovery).
*
* Note, that in modern internet, where routing is unreliable
* and in each dark corner broken firewalls sit, sending random
* errors ordered by their masters even this two messages finally lose
* their original sense (even Linux sends invalid PORT_UNREACHs)
*
* Now we are in compliance with RFCs.
* --ANK (980905)
*/
inet = inet_sk(sk);
if (!sock_owned_by_user(sk) && inet->recverr) {
sk->sk_err = err;
sk->sk_error_report(sk);
} else { /* Only an error on timeout */
sk->sk_err_soft = err;
}
out:
bh_unlock_sock(sk);
sock_put(sk);
}
/* This routine computes an IPv4 TCP checksum. */
void tcp_v4_send_check(struct sock *sk, int len, struct sk_buff *skb)
{
struct inet_sock *inet = inet_sk(sk);
struct tcphdr *th = tcp_hdr(skb);
if (skb->ip_summed == CHECKSUM_PARTIAL) {
th->check = ~tcp_v4_check(len, inet->saddr,
inet->daddr, 0);
skb->csum_start = skb_transport_header(skb) - skb->head;
skb->csum_offset = offsetof(struct tcphdr, check);
} else {
th->check = tcp_v4_check(len, inet->saddr, inet->daddr,
csum_partial(th,
th->doff << 2,
skb->csum));
}
}
int tcp_v4_gso_send_check(struct sk_buff *skb)
{
const struct iphdr *iph;
struct tcphdr *th;
if (!pskb_may_pull(skb, sizeof(*th)))
return -EINVAL;
iph = ip_hdr(skb);
th = tcp_hdr(skb);
th->check = 0;
th->check = ~tcp_v4_check(skb->len, iph->saddr, iph->daddr, 0);
skb->csum_start = skb_transport_header(skb) - skb->head;
skb->csum_offset = offsetof(struct tcphdr, check);
skb->ip_summed = CHECKSUM_PARTIAL;
return 0;
}
/*
* This routine will send an RST to the other tcp.
*
* Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
* for reset.
* Answer: if a packet caused RST, it is not for a socket
* existing in our system, if it is matched to a socket,
* it is just duplicate segment or bug in other side's TCP.
* So that we build reply only basing on parameters
* arrived with segment.
* Exception: precedence violation. We do not implement it in any case.
*/
static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
{
struct tcphdr *th = tcp_hdr(skb);
struct {
struct tcphdr th;
#ifdef CONFIG_TCP_MD5SIG
__be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)];
#endif
} rep;
struct ip_reply_arg arg;
#ifdef CONFIG_TCP_MD5SIG
struct tcp_md5sig_key *key;
#endif
struct net *net;
/* Never send a reset in response to a reset. */
if (th->rst)
return;
if (skb_rtable(skb)->rt_type != RTN_LOCAL)
return;
/* Swap the send and the receive. */
memset(&rep, 0, sizeof(rep));
rep.th.dest = th->source;
rep.th.source = th->dest;
rep.th.doff = sizeof(struct tcphdr) / 4;
rep.th.rst = 1;
if (th->ack) {
rep.th.seq = th->ack_seq;
} else {
rep.th.ack = 1;
rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
skb->len - (th->doff << 2));
}
memset(&arg, 0, sizeof(arg));
arg.iov[0].iov_base = (unsigned char *)&rep;
arg.iov[0].iov_len = sizeof(rep.th);
#ifdef CONFIG_TCP_MD5SIG
key = sk ? tcp_v4_md5_do_lookup(sk, ip_hdr(skb)->daddr) : NULL;
if (key) {
rep.opt[0] = htonl((TCPOPT_NOP << 24) |
(TCPOPT_NOP << 16) |
(TCPOPT_MD5SIG << 8) |
TCPOLEN_MD5SIG);
/* Update length and the length the header thinks exists */
arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
rep.th.doff = arg.iov[0].iov_len / 4;
tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1],
key, ip_hdr(skb)->saddr,
ip_hdr(skb)->daddr, &rep.th);
}
#endif
arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
ip_hdr(skb)->saddr, /* XXX */
arg.iov[0].iov_len, IPPROTO_TCP, 0);
arg.csumoffset = offsetof(struct tcphdr, check) / 2;
arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0;
net = dev_net(skb_dst(skb)->dev);
ip_send_reply(net->ipv4.tcp_sock, skb,
&arg, arg.iov[0].iov_len);
TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
}
/* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
outside socket context is ugly, certainly. What can I do?
*/
static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
u32 win, u32 ts, int oif,
struct tcp_md5sig_key *key,
int reply_flags)
{
struct tcphdr *th = tcp_hdr(skb);
struct {
struct tcphdr th;
__be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
#ifdef CONFIG_TCP_MD5SIG
+ (TCPOLEN_MD5SIG_ALIGNED >> 2)
#endif
];
} rep;
struct ip_reply_arg arg;
struct net *net = dev_net(skb_dst(skb)->dev);
memset(&rep.th, 0, sizeof(struct tcphdr));
memset(&arg, 0, sizeof(arg));
arg.iov[0].iov_base = (unsigned char *)&rep;
arg.iov[0].iov_len = sizeof(rep.th);
if (ts) {
rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
(TCPOPT_TIMESTAMP << 8) |
TCPOLEN_TIMESTAMP);
rep.opt[1] = htonl(tcp_time_stamp);
rep.opt[2] = htonl(ts);
arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
}
/* Swap the send and the receive. */
rep.th.dest = th->source;
rep.th.source = th->dest;
rep.th.doff = arg.iov[0].iov_len / 4;
rep.th.seq = htonl(seq);
rep.th.ack_seq = htonl(ack);
rep.th.ack = 1;
rep.th.window = htons(win);
#ifdef CONFIG_TCP_MD5SIG
if (key) {
int offset = (ts) ? 3 : 0;
rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
(TCPOPT_NOP << 16) |
(TCPOPT_MD5SIG << 8) |
TCPOLEN_MD5SIG);
arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
rep.th.doff = arg.iov[0].iov_len/4;
tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset],
key, ip_hdr(skb)->saddr,
ip_hdr(skb)->daddr, &rep.th);
}
#endif
arg.flags = reply_flags;
arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
ip_hdr(skb)->saddr, /* XXX */
arg.iov[0].iov_len, IPPROTO_TCP, 0);
arg.csumoffset = offsetof(struct tcphdr, check) / 2;
if (oif)
arg.bound_dev_if = oif;
ip_send_reply(net->ipv4.tcp_sock, skb,
&arg, arg.iov[0].iov_len);
TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
}
static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
{
struct inet_timewait_sock *tw = inet_twsk(sk);
struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
tcptw->tw_ts_recent,
tw->tw_bound_dev_if,
tcp_twsk_md5_key(tcptw),
tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0
);
inet_twsk_put(tw);
}
static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
struct request_sock *req)
{
tcp_v4_send_ack(skb, tcp_rsk(req)->snt_isn + 1,
tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd,
req->ts_recent,
0,
tcp_v4_md5_do_lookup(sk, ip_hdr(skb)->daddr),
inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0);
}
/*
* Send a SYN-ACK after having received a SYN.
* This still operates on a request_sock only, not on a big
* socket.
*/
static int __tcp_v4_send_synack(struct sock *sk, struct request_sock *req,
struct dst_entry *dst)
{
const struct inet_request_sock *ireq = inet_rsk(req);
int err = -1;
struct sk_buff * skb;
/* First, grab a route. */
if (!dst && (dst = inet_csk_route_req(sk, req)) == NULL)
return -1;
skb = tcp_make_synack(sk, dst, req);
if (skb) {
struct tcphdr *th = tcp_hdr(skb);
th->check = tcp_v4_check(skb->len,
ireq->loc_addr,
ireq->rmt_addr,
csum_partial(th, skb->len,
skb->csum));
err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr,
ireq->rmt_addr,
ireq->opt);
err = net_xmit_eval(err);
}
dst_release(dst);
return err;
}
static int tcp_v4_send_synack(struct sock *sk, struct request_sock *req)
{
return __tcp_v4_send_synack(sk, req, NULL);
}
/*
* IPv4 request_sock destructor.
*/
static void tcp_v4_reqsk_destructor(struct request_sock *req)
{
kfree(inet_rsk(req)->opt);
}
#ifdef CONFIG_SYN_COOKIES
static void syn_flood_warning(struct sk_buff *skb)
{
static unsigned long warntime;
if (time_after(jiffies, (warntime + HZ * 60))) {
warntime = jiffies;
printk(KERN_INFO
"possible SYN flooding on port %d. Sending cookies.\n",
ntohs(tcp_hdr(skb)->dest));
}
}
#endif
/*
* Save and compile IPv4 options into the request_sock if needed.
*/
static struct ip_options *tcp_v4_save_options(struct sock *sk,
struct sk_buff *skb)
{
struct ip_options *opt = &(IPCB(skb)->opt);
struct ip_options *dopt = NULL;
if (opt && opt->optlen) {
int opt_size = optlength(opt);
dopt = kmalloc(opt_size, GFP_ATOMIC);
if (dopt) {
if (ip_options_echo(dopt, skb)) {
kfree(dopt);
dopt = NULL;
}
}
}
return dopt;
}
#ifdef CONFIG_TCP_MD5SIG
/*
* RFC2385 MD5 checksumming requires a mapping of
* IP address->MD5 Key.
* We need to maintain these in the sk structure.
*/
/* Find the Key structure for an address. */
static struct tcp_md5sig_key *
tcp_v4_md5_do_lookup(struct sock *sk, __be32 addr)
{
struct tcp_sock *tp = tcp_sk(sk);
int i;
if (!tp->md5sig_info || !tp->md5sig_info->entries4)
return NULL;
for (i = 0; i < tp->md5sig_info->entries4; i++) {
if (tp->md5sig_info->keys4[i].addr == addr)
return &tp->md5sig_info->keys4[i].base;
}
return NULL;
}
struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk,
struct sock *addr_sk)
{
return tcp_v4_md5_do_lookup(sk, inet_sk(addr_sk)->daddr);
}
EXPORT_SYMBOL(tcp_v4_md5_lookup);
static struct tcp_md5sig_key *tcp_v4_reqsk_md5_lookup(struct sock *sk,
struct request_sock *req)
{
return tcp_v4_md5_do_lookup(sk, inet_rsk(req)->rmt_addr);
}
/* This can be called on a newly created socket, from other files */
int tcp_v4_md5_do_add(struct sock *sk, __be32 addr,
u8 *newkey, u8 newkeylen)
{
/* Add Key to the list */
struct tcp_md5sig_key *key;
struct tcp_sock *tp = tcp_sk(sk);
struct tcp4_md5sig_key *keys;
key = tcp_v4_md5_do_lookup(sk, addr);
if (key) {
/* Pre-existing entry - just update that one. */
kfree(key->key);
key->key = newkey;
key->keylen = newkeylen;
} else {
struct tcp_md5sig_info *md5sig;
if (!tp->md5sig_info) {
tp->md5sig_info = kzalloc(sizeof(*tp->md5sig_info),
GFP_ATOMIC);
if (!tp->md5sig_info) {
kfree(newkey);
return -ENOMEM;
}
sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
}
if (tcp_alloc_md5sig_pool() == NULL) {
kfree(newkey);
return -ENOMEM;
}
md5sig = tp->md5sig_info;
if (md5sig->alloced4 == md5sig->entries4) {
keys = kmalloc((sizeof(*keys) *
(md5sig->entries4 + 1)), GFP_ATOMIC);
if (!keys) {
kfree(newkey);
tcp_free_md5sig_pool();
return -ENOMEM;
}
if (md5sig->entries4)
memcpy(keys, md5sig->keys4,
sizeof(*keys) * md5sig->entries4);
/* Free old key list, and reference new one */
kfree(md5sig->keys4);
md5sig->keys4 = keys;
md5sig->alloced4++;
}
md5sig->entries4++;
md5sig->keys4[md5sig->entries4 - 1].addr = addr;
md5sig->keys4[md5sig->entries4 - 1].base.key = newkey;
md5sig->keys4[md5sig->entries4 - 1].base.keylen = newkeylen;
}
return 0;
}
EXPORT_SYMBOL(tcp_v4_md5_do_add);
static int tcp_v4_md5_add_func(struct sock *sk, struct sock *addr_sk,
u8 *newkey, u8 newkeylen)
{
return tcp_v4_md5_do_add(sk, inet_sk(addr_sk)->daddr,
newkey, newkeylen);
}
int tcp_v4_md5_do_del(struct sock *sk, __be32 addr)
{
struct tcp_sock *tp = tcp_sk(sk);
int i;
for (i = 0; i < tp->md5sig_info->entries4; i++) {
if (tp->md5sig_info->keys4[i].addr == addr) {
/* Free the key */
kfree(tp->md5sig_info->keys4[i].base.key);
tp->md5sig_info->entries4--;
if (tp->md5sig_info->entries4 == 0) {
kfree(tp->md5sig_info->keys4);
tp->md5sig_info->keys4 = NULL;
tp->md5sig_info->alloced4 = 0;
} else if (tp->md5sig_info->entries4 != i) {
/* Need to do some manipulation */
memmove(&tp->md5sig_info->keys4[i],
&tp->md5sig_info->keys4[i+1],
(tp->md5sig_info->entries4 - i) *
sizeof(struct tcp4_md5sig_key));
}
tcp_free_md5sig_pool();
return 0;
}
}
return -ENOENT;
}
EXPORT_SYMBOL(tcp_v4_md5_do_del);
static void tcp_v4_clear_md5_list(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
/* Free each key, then the set of key keys,
* the crypto element, and then decrement our
* hold on the last resort crypto.
*/
if (tp->md5sig_info->entries4) {
int i;
for (i = 0; i < tp->md5sig_info->entries4; i++)
kfree(tp->md5sig_info->keys4[i].base.key);
tp->md5sig_info->entries4 = 0;
tcp_free_md5sig_pool();
}
if (tp->md5sig_info->keys4) {
kfree(tp->md5sig_info->keys4);
tp->md5sig_info->keys4 = NULL;
tp->md5sig_info->alloced4 = 0;
}
}
static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval,
int optlen)
{
struct tcp_md5sig cmd;
struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
u8 *newkey;
if (optlen < sizeof(cmd))
return -EINVAL;
if (copy_from_user(&cmd, optval, sizeof(cmd)))
return -EFAULT;
if (sin->sin_family != AF_INET)
return -EINVAL;
if (!cmd.tcpm_key || !cmd.tcpm_keylen) {
if (!tcp_sk(sk)->md5sig_info)
return -ENOENT;
return tcp_v4_md5_do_del(sk, sin->sin_addr.s_addr);
}
if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
return -EINVAL;
if (!tcp_sk(sk)->md5sig_info) {
struct tcp_sock *tp = tcp_sk(sk);
struct tcp_md5sig_info *p = kzalloc(sizeof(*p), GFP_KERNEL);
if (!p)
return -EINVAL;
tp->md5sig_info = p;
sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
}
newkey = kmemdup(cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
if (!newkey)
return -ENOMEM;
return tcp_v4_md5_do_add(sk, sin->sin_addr.s_addr,
newkey, cmd.tcpm_keylen);
}
static int tcp_v4_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
__be32 daddr, __be32 saddr, int nbytes)
{
struct tcp4_pseudohdr *bp;
struct scatterlist sg;
bp = &hp->md5_blk.ip4;
/*
* 1. the TCP pseudo-header (in the order: source IP address,
* destination IP address, zero-padded protocol number, and
* segment length)
*/
bp->saddr = saddr;
bp->daddr = daddr;
bp->pad = 0;
bp->protocol = IPPROTO_TCP;
bp->len = cpu_to_be16(nbytes);
sg_init_one(&sg, bp, sizeof(*bp));
return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
}
static int tcp_v4_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
__be32 daddr, __be32 saddr, struct tcphdr *th)
{
struct tcp_md5sig_pool *hp;
struct hash_desc *desc;
hp = tcp_get_md5sig_pool();
if (!hp)
goto clear_hash_noput;
desc = &hp->md5_desc;
if (crypto_hash_init(desc))
goto clear_hash;
if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
goto clear_hash;
if (tcp_md5_hash_header(hp, th))
goto clear_hash;
if (tcp_md5_hash_key(hp, key))
goto clear_hash;
if (crypto_hash_final(desc, md5_hash))
goto clear_hash;
tcp_put_md5sig_pool();
return 0;
clear_hash:
tcp_put_md5sig_pool();
clear_hash_noput:
memset(md5_hash, 0, 16);
return 1;
}
int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
struct sock *sk, struct request_sock *req,
struct sk_buff *skb)
{
struct tcp_md5sig_pool *hp;
struct hash_desc *desc;
struct tcphdr *th = tcp_hdr(skb);
__be32 saddr, daddr;
if (sk) {
saddr = inet_sk(sk)->saddr;
daddr = inet_sk(sk)->daddr;
} else if (req) {
saddr = inet_rsk(req)->loc_addr;
daddr = inet_rsk(req)->rmt_addr;
} else {
const struct iphdr *iph = ip_hdr(skb);
saddr = iph->saddr;
daddr = iph->daddr;
}
hp = tcp_get_md5sig_pool();
if (!hp)
goto clear_hash_noput;
desc = &hp->md5_desc;
if (crypto_hash_init(desc))
goto clear_hash;
if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
goto clear_hash;
if (tcp_md5_hash_header(hp, th))
goto clear_hash;
if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
goto clear_hash;
if (tcp_md5_hash_key(hp, key))
goto clear_hash;
if (crypto_hash_final(desc, md5_hash))
goto clear_hash;
tcp_put_md5sig_pool();
return 0;
clear_hash:
tcp_put_md5sig_pool();
clear_hash_noput:
memset(md5_hash, 0, 16);
return 1;
}
EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
static int tcp_v4_inbound_md5_hash(struct sock *sk, struct sk_buff *skb)
{
/*
* This gets called for each TCP segment that arrives
* so we want to be efficient.
* We have 3 drop cases:
* o No MD5 hash and one expected.
* o MD5 hash and we're not expecting one.
* o MD5 hash and its wrong.
*/
__u8 *hash_location = NULL;
struct tcp_md5sig_key *hash_expected;
const struct iphdr *iph = ip_hdr(skb);
struct tcphdr *th = tcp_hdr(skb);
int genhash;
unsigned char newhash[16];
hash_expected = tcp_v4_md5_do_lookup(sk, iph->saddr);
hash_location = tcp_parse_md5sig_option(th);
/* We've parsed the options - do we have a hash? */
if (!hash_expected && !hash_location)
return 0;
if (hash_expected && !hash_location) {
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
return 1;
}
if (!hash_expected && hash_location) {
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
return 1;
}
/* Okay, so this is hash_expected and hash_location -
* so we need to calculate the checksum.
*/
genhash = tcp_v4_md5_hash_skb(newhash,
hash_expected,
NULL, NULL, skb);
if (genhash || memcmp(hash_location, newhash, 16) != 0) {
if (net_ratelimit()) {
printk(KERN_INFO "MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
&iph->saddr, ntohs(th->source),
&iph->daddr, ntohs(th->dest),
genhash ? " tcp_v4_calc_md5_hash failed" : "");
}
return 1;
}
return 0;
}
#endif
struct request_sock_ops tcp_request_sock_ops __read_mostly = {
.family = PF_INET,
.obj_size = sizeof(struct tcp_request_sock),
.rtx_syn_ack = tcp_v4_send_synack,
.send_ack = tcp_v4_reqsk_send_ack,
.destructor = tcp_v4_reqsk_destructor,
.send_reset = tcp_v4_send_reset,
};
#ifdef CONFIG_TCP_MD5SIG
static struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
.md5_lookup = tcp_v4_reqsk_md5_lookup,
.calc_md5_hash = tcp_v4_md5_hash_skb,
};
#endif
static struct timewait_sock_ops tcp_timewait_sock_ops = {
.twsk_obj_size = sizeof(struct tcp_timewait_sock),
.twsk_unique = tcp_twsk_unique,
.twsk_destructor= tcp_twsk_destructor,
};
int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
{
struct inet_request_sock *ireq;
struct tcp_options_received tmp_opt;
struct request_sock *req;
__be32 saddr = ip_hdr(skb)->saddr;
__be32 daddr = ip_hdr(skb)->daddr;
__u32 isn = TCP_SKB_CB(skb)->when;
struct dst_entry *dst = NULL;
#ifdef CONFIG_SYN_COOKIES
int want_cookie = 0;
#else
#define want_cookie 0 /* Argh, why doesn't gcc optimize this :( */
#endif
/* Never answer to SYNs send to broadcast or multicast */
if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
goto drop;
/* TW buckets are converted to open requests without
* limitations, they conserve resources and peer is
* evidently real one.
*/
if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
#ifdef CONFIG_SYN_COOKIES
if (sysctl_tcp_syncookies) {
want_cookie = 1;
} else
#endif
goto drop;
}
/* Accept backlog is full. If we have already queued enough
* of warm entries in syn queue, drop request. It is better than
* clogging syn queue with openreqs with exponentially increasing
* timeout.
*/
if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
goto drop;
req = inet_reqsk_alloc(&tcp_request_sock_ops);
if (!req)
goto drop;
#ifdef CONFIG_TCP_MD5SIG
tcp_rsk(req)->af_specific = &tcp_request_sock_ipv4_ops;
#endif
tcp_clear_options(&tmp_opt);
tmp_opt.mss_clamp = 536;
tmp_opt.user_mss = tcp_sk(sk)->rx_opt.user_mss;
tcp_parse_options(skb, &tmp_opt, 0);
if (want_cookie && !tmp_opt.saw_tstamp)
tcp_clear_options(&tmp_opt);
tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
tcp_openreq_init(req, &tmp_opt, skb);
ireq = inet_rsk(req);
ireq->loc_addr = daddr;
ireq->rmt_addr = saddr;
ireq->no_srccheck = inet_sk(sk)->transparent;
ireq->opt = tcp_v4_save_options(sk, skb);
if (security_inet_conn_request(sk, skb, req))
goto drop_and_free;
if (!want_cookie)
TCP_ECN_create_request(req, tcp_hdr(skb));
if (want_cookie) {
#ifdef CONFIG_SYN_COOKIES
syn_flood_warning(skb);
req->cookie_ts = tmp_opt.tstamp_ok;
#endif
isn = cookie_v4_init_sequence(sk, skb, &req->mss);
} else if (!isn) {
struct inet_peer *peer = NULL;
/* VJ's idea. We save last timestamp seen
* from the destination in peer table, when entering
* state TIME-WAIT, and check against it before
* accepting new connection request.
*
* If "isn" is not zero, this request hit alive
* timewait bucket, so that all the necessary checks
* are made in the function processing timewait state.
*/
if (tmp_opt.saw_tstamp &&
tcp_death_row.sysctl_tw_recycle &&
(dst = inet_csk_route_req(sk, req)) != NULL &&
(peer = rt_get_peer((struct rtable *)dst)) != NULL &&
peer->v4daddr == saddr) {
if (get_seconds() < peer->tcp_ts_stamp + TCP_PAWS_MSL &&
(s32)(peer->tcp_ts - req->ts_recent) >
TCP_PAWS_WINDOW) {
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
goto drop_and_release;
}
}
/* Kill the following clause, if you dislike this way. */
else if (!sysctl_tcp_syncookies &&
(sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
(sysctl_max_syn_backlog >> 2)) &&
(!peer || !peer->tcp_ts_stamp) &&
(!dst || !dst_metric(dst, RTAX_RTT))) {
/* Without syncookies last quarter of
* backlog is filled with destinations,
* proven to be alive.
* It means that we continue to communicate
* to destinations, already remembered
* to the moment of synflood.
*/
LIMIT_NETDEBUG(KERN_DEBUG "TCP: drop open request from %pI4/%u\n",
&saddr, ntohs(tcp_hdr(skb)->source));
goto drop_and_release;
}
isn = tcp_v4_init_sequence(skb);
}
tcp_rsk(req)->snt_isn = isn;
if (__tcp_v4_send_synack(sk, req, dst) || want_cookie)
goto drop_and_free;
inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
return 0;
drop_and_release:
dst_release(dst);
drop_and_free:
reqsk_free(req);
drop:
return 0;
}
/*
* The three way handshake has completed - we got a valid synack -
* now create the new socket.
*/
struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
struct request_sock *req,
struct dst_entry *dst)
{
struct inet_request_sock *ireq;
struct inet_sock *newinet;
struct tcp_sock *newtp;
struct sock *newsk;
#ifdef CONFIG_TCP_MD5SIG
struct tcp_md5sig_key *key;
#endif
if (sk_acceptq_is_full(sk))
goto exit_overflow;
if (!dst && (dst = inet_csk_route_req(sk, req)) == NULL)
goto exit;
newsk = tcp_create_openreq_child(sk, req, skb);
if (!newsk)
goto exit;
newsk->sk_gso_type = SKB_GSO_TCPV4;
sk_setup_caps(newsk, dst);
newtp = tcp_sk(newsk);
newinet = inet_sk(newsk);
ireq = inet_rsk(req);
newinet->daddr = ireq->rmt_addr;
newinet->rcv_saddr = ireq->loc_addr;
newinet->saddr = ireq->loc_addr;
newinet->opt = ireq->opt;
ireq->opt = NULL;
newinet->mc_index = inet_iif(skb);
newinet->mc_ttl = ip_hdr(skb)->ttl;
inet_csk(newsk)->icsk_ext_hdr_len = 0;
if (newinet->opt)
inet_csk(newsk)->icsk_ext_hdr_len = newinet->opt->optlen;
newinet->id = newtp->write_seq ^ jiffies;
tcp_mtup_init(newsk);
tcp_sync_mss(newsk, dst_mtu(dst));
newtp->advmss = dst_metric(dst, RTAX_ADVMSS);
if (tcp_sk(sk)->rx_opt.user_mss &&
tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
tcp_initialize_rcv_mss(newsk);
#ifdef CONFIG_TCP_MD5SIG
/* Copy over the MD5 key from the original socket */
if ((key = tcp_v4_md5_do_lookup(sk, newinet->daddr)) != NULL) {
/*
* We're using one, so create a matching key
* on the newsk structure. If we fail to get
* memory, then we end up not copying the key
* across. Shucks.
*/
char *newkey = kmemdup(key->key, key->keylen, GFP_ATOMIC);
if (newkey != NULL)
tcp_v4_md5_do_add(newsk, newinet->daddr,
newkey, key->keylen);
newsk->sk_route_caps &= ~NETIF_F_GSO_MASK;
}
#endif
__inet_hash_nolisten(newsk);
__inet_inherit_port(sk, newsk);
return newsk;
exit_overflow:
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
exit:
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
dst_release(dst);
return NULL;
}
static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
{
struct tcphdr *th = tcp_hdr(skb);
const struct iphdr *iph = ip_hdr(skb);
struct sock *nsk;
struct request_sock **prev;
/* Find possible connection requests. */
struct request_sock *req = inet_csk_search_req(sk, &prev, th->source,
iph->saddr, iph->daddr);
if (req)
return tcp_check_req(sk, skb, req, prev);
nsk = inet_lookup_established(sock_net(sk), &tcp_hashinfo, iph->saddr,
th->source, iph->daddr, th->dest, inet_iif(skb));
if (nsk) {
if (nsk->sk_state != TCP_TIME_WAIT) {
bh_lock_sock(nsk);
return nsk;
}
inet_twsk_put(inet_twsk(nsk));
return NULL;
}
#ifdef CONFIG_SYN_COOKIES
if (!th->rst && !th->syn && th->ack)
sk = cookie_v4_check(sk, skb, &(IPCB(skb)->opt));
#endif
return sk;
}
static __sum16 tcp_v4_checksum_init(struct sk_buff *skb)
{
const struct iphdr *iph = ip_hdr(skb);
if (skb->ip_summed == CHECKSUM_COMPLETE) {
if (!tcp_v4_check(skb->len, iph->saddr,
iph->daddr, skb->csum)) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
return 0;
}
}
skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
skb->len, IPPROTO_TCP, 0);
if (skb->len <= 76) {
return __skb_checksum_complete(skb);
}
return 0;
}
/* The socket must have it's spinlock held when we get
* here.
*
* We have a potential double-lock case here, so even when
* doing backlog processing we use the BH locking scheme.
* This is because we cannot sleep with the original spinlock
* held.
*/
int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
{
struct sock *rsk;
#ifdef CONFIG_TCP_MD5SIG
/*
* We really want to reject the packet as early as possible
* if:
* o We're expecting an MD5'd packet and this is no MD5 tcp option
* o There is an MD5 option and we're not expecting one
*/
if (tcp_v4_inbound_md5_hash(sk, skb))
goto discard;
#endif
if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
TCP_CHECK_TIMER(sk);
if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) {
rsk = sk;
goto reset;
}
TCP_CHECK_TIMER(sk);
return 0;
}
if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
goto csum_err;
if (sk->sk_state == TCP_LISTEN) {
struct sock *nsk = tcp_v4_hnd_req(sk, skb);
if (!nsk)
goto discard;
if (nsk != sk) {
if (tcp_child_process(sk, nsk, skb)) {
rsk = nsk;
goto reset;
}
return 0;
}
}
TCP_CHECK_TIMER(sk);
if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) {
rsk = sk;
goto reset;
}
TCP_CHECK_TIMER(sk);
return 0;
reset:
tcp_v4_send_reset(rsk, skb);
discard:
kfree_skb(skb);
/* Be careful here. If this function gets more complicated and
* gcc suffers from register pressure on the x86, sk (in %ebx)
* might be destroyed here. This current version compiles correctly,
* but you have been warned.
*/
return 0;
csum_err:
TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
goto discard;
}
/*
* From tcp_input.c
*/
int tcp_v4_rcv(struct sk_buff *skb)
{
const struct iphdr *iph;
struct tcphdr *th;
struct sock *sk;
int ret;
struct net *net = dev_net(skb->dev);
if (skb->pkt_type != PACKET_HOST)
goto discard_it;
/* Count it even if it's bad */
TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
goto discard_it;
th = tcp_hdr(skb);
if (th->doff < sizeof(struct tcphdr) / 4)
goto bad_packet;
if (!pskb_may_pull(skb, th->doff * 4))
goto discard_it;
/* An explanation is required here, I think.
* Packet length and doff are validated by header prediction,
* provided case of th->doff==0 is eliminated.
* So, we defer the checks. */
if (!skb_csum_unnecessary(skb) && tcp_v4_checksum_init(skb))
goto bad_packet;
th = tcp_hdr(skb);
iph = ip_hdr(skb);
TCP_SKB_CB(skb)->seq = ntohl(th->seq);
TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
skb->len - th->doff * 4);
TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
TCP_SKB_CB(skb)->when = 0;
TCP_SKB_CB(skb)->flags = iph->tos;
TCP_SKB_CB(skb)->sacked = 0;
sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
if (!sk)
goto no_tcp_socket;
process:
if (sk->sk_state == TCP_TIME_WAIT)
goto do_time_wait;
if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
goto discard_and_relse;
nf_reset(skb);
if (sk_filter(sk, skb))
goto discard_and_relse;
skb->dev = NULL;
bh_lock_sock_nested(sk);
ret = 0;
if (!sock_owned_by_user(sk)) {
#ifdef CONFIG_NET_DMA
struct tcp_sock *tp = tcp_sk(sk);
if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY);
if (tp->ucopy.dma_chan)
ret = tcp_v4_do_rcv(sk, skb);
else
#endif
{
if (!tcp_prequeue(sk, skb))
ret = tcp_v4_do_rcv(sk, skb);
}
} else
sk_add_backlog(sk, skb);
bh_unlock_sock(sk);
sock_put(sk);
return ret;
no_tcp_socket:
if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
goto discard_it;
if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
bad_packet:
TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
} else {
tcp_v4_send_reset(NULL, skb);
}
discard_it:
/* Discard frame. */
kfree_skb(skb);
return 0;
discard_and_relse:
sock_put(sk);
goto discard_it;
do_time_wait:
if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
inet_twsk_put(inet_twsk(sk));
goto discard_it;
}
if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
inet_twsk_put(inet_twsk(sk));
goto discard_it;
}
switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
case TCP_TW_SYN: {
struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
&tcp_hashinfo,
iph->daddr, th->dest,
inet_iif(skb));
if (sk2) {
inet_twsk_deschedule(inet_twsk(sk), &tcp_death_row);
inet_twsk_put(inet_twsk(sk));
sk = sk2;
goto process;
}
/* Fall through to ACK */
}
case TCP_TW_ACK:
tcp_v4_timewait_ack(sk, skb);
break;
case TCP_TW_RST:
goto no_tcp_socket;
case TCP_TW_SUCCESS:;
}
goto discard_it;
}
/* VJ's idea. Save last timestamp seen from this destination
* and hold it at least for normal timewait interval to use for duplicate
* segment detection in subsequent connections, before they enter synchronized
* state.
*/
int tcp_v4_remember_stamp(struct sock *sk)
{
struct inet_sock *inet = inet_sk(sk);
struct tcp_sock *tp = tcp_sk(sk);
struct rtable *rt = (struct rtable *)__sk_dst_get(sk);
struct inet_peer *peer = NULL;
int release_it = 0;
if (!rt || rt->rt_dst != inet->daddr) {
peer = inet_getpeer(inet->daddr, 1);
release_it = 1;
} else {
if (!rt->peer)
rt_bind_peer(rt, 1);
peer = rt->peer;
}
if (peer) {
if ((s32)(peer->tcp_ts - tp->rx_opt.ts_recent) <= 0 ||
(peer->tcp_ts_stamp + TCP_PAWS_MSL < get_seconds() &&
peer->tcp_ts_stamp <= tp->rx_opt.ts_recent_stamp)) {
peer->tcp_ts_stamp = tp->rx_opt.ts_recent_stamp;
peer->tcp_ts = tp->rx_opt.ts_recent;
}
if (release_it)
inet_putpeer(peer);
return 1;
}
return 0;
}
int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw)
{
struct inet_peer *peer = inet_getpeer(tw->tw_daddr, 1);
if (peer) {
const struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
if ((s32)(peer->tcp_ts - tcptw->tw_ts_recent) <= 0 ||
(peer->tcp_ts_stamp + TCP_PAWS_MSL < get_seconds() &&
peer->tcp_ts_stamp <= tcptw->tw_ts_recent_stamp)) {
peer->tcp_ts_stamp = tcptw->tw_ts_recent_stamp;
peer->tcp_ts = tcptw->tw_ts_recent;
}
inet_putpeer(peer);
return 1;
}
return 0;
}
struct inet_connection_sock_af_ops ipv4_specific = {
.queue_xmit = ip_queue_xmit,
.send_check = tcp_v4_send_check,
.rebuild_header = inet_sk_rebuild_header,
.conn_request = tcp_v4_conn_request,
.syn_recv_sock = tcp_v4_syn_recv_sock,
.remember_stamp = tcp_v4_remember_stamp,
.net_header_len = sizeof(struct iphdr),
.setsockopt = ip_setsockopt,
.getsockopt = ip_getsockopt,
.addr2sockaddr = inet_csk_addr2sockaddr,
.sockaddr_len = sizeof(struct sockaddr_in),
.bind_conflict = inet_csk_bind_conflict,
#ifdef CONFIG_COMPAT
.compat_setsockopt = compat_ip_setsockopt,
.compat_getsockopt = compat_ip_getsockopt,
#endif
};
#ifdef CONFIG_TCP_MD5SIG
static struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
.md5_lookup = tcp_v4_md5_lookup,
.calc_md5_hash = tcp_v4_md5_hash_skb,
.md5_add = tcp_v4_md5_add_func,
.md5_parse = tcp_v4_parse_md5_keys,
};
#endif
/* NOTE: A lot of things set to zero explicitly by call to
* sk_alloc() so need not be done here.
*/
static int tcp_v4_init_sock(struct sock *sk)
{
struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk);
skb_queue_head_init(&tp->out_of_order_queue);
tcp_init_xmit_timers(sk);
tcp_prequeue_init(tp);
icsk->icsk_rto = TCP_TIMEOUT_INIT;
tp->mdev = TCP_TIMEOUT_INIT;
/* So many TCP implementations out there (incorrectly) count the
* initial SYN frame in their delayed-ACK and congestion control
* algorithms that we must have the following bandaid to talk
* efficiently to them. -DaveM
*/
tp->snd_cwnd = 2;
/* See draft-stevens-tcpca-spec-01 for discussion of the
* initialization of these values.
*/
tp->snd_ssthresh = 0x7fffffff; /* Infinity */
tp->snd_cwnd_clamp = ~0;
tp->mss_cache = 536;
tp->reordering = sysctl_tcp_reordering;
icsk->icsk_ca_ops = &tcp_init_congestion_ops;
sk->sk_state = TCP_CLOSE;
sk->sk_write_space = sk_stream_write_space;
sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
icsk->icsk_af_ops = &ipv4_specific;
icsk->icsk_sync_mss = tcp_sync_mss;
#ifdef CONFIG_TCP_MD5SIG
tp->af_specific = &tcp_sock_ipv4_specific;
#endif
sk->sk_sndbuf = sysctl_tcp_wmem[1];
sk->sk_rcvbuf = sysctl_tcp_rmem[1];
local_bh_disable();
percpu_counter_inc(&tcp_sockets_allocated);
local_bh_enable();
return 0;
}
void tcp_v4_destroy_sock(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
tcp_clear_xmit_timers(sk);
tcp_cleanup_congestion_control(sk);
/* Cleanup up the write buffer. */
tcp_write_queue_purge(sk);
/* Cleans up our, hopefully empty, out_of_order_queue. */
__skb_queue_purge(&tp->out_of_order_queue);
#ifdef CONFIG_TCP_MD5SIG
/* Clean up the MD5 key list, if any */
if (tp->md5sig_info) {
tcp_v4_clear_md5_list(sk);
kfree(tp->md5sig_info);
tp->md5sig_info = NULL;
}
#endif
#ifdef CONFIG_NET_DMA
/* Cleans up our sk_async_wait_queue */
__skb_queue_purge(&sk->sk_async_wait_queue);
#endif
/* Clean prequeue, it must be empty really */
__skb_queue_purge(&tp->ucopy.prequeue);
/* Clean up a referenced TCP bind bucket. */
if (inet_csk(sk)->icsk_bind_hash)
inet_put_port(sk);
/*
* If sendmsg cached page exists, toss it.
*/
if (sk->sk_sndmsg_page) {
__free_page(sk->sk_sndmsg_page);
sk->sk_sndmsg_page = NULL;
}
percpu_counter_dec(&tcp_sockets_allocated);
}
EXPORT_SYMBOL(tcp_v4_destroy_sock);
#ifdef CONFIG_PROC_FS
/* Proc filesystem TCP sock list dumping. */
static inline struct inet_timewait_sock *tw_head(struct hlist_nulls_head *head)
{
return hlist_nulls_empty(head) ? NULL :
list_entry(head->first, struct inet_timewait_sock, tw_node);
}
static inline struct inet_timewait_sock *tw_next(struct inet_timewait_sock *tw)
{
return !is_a_nulls(tw->tw_node.next) ?
hlist_nulls_entry(tw->tw_node.next, typeof(*tw), tw_node) : NULL;
}
static void *listening_get_next(struct seq_file *seq, void *cur)
{
struct inet_connection_sock *icsk;
struct hlist_nulls_node *node;
struct sock *sk = cur;
struct inet_listen_hashbucket *ilb;
struct tcp_iter_state *st = seq->private;
struct net *net = seq_file_net(seq);
if (!sk) {
st->bucket = 0;
ilb = &tcp_hashinfo.listening_hash[0];
spin_lock_bh(&ilb->lock);
sk = sk_nulls_head(&ilb->head);
goto get_sk;
}
ilb = &tcp_hashinfo.listening_hash[st->bucket];
++st->num;
if (st->state == TCP_SEQ_STATE_OPENREQ) {
struct request_sock *req = cur;
icsk = inet_csk(st->syn_wait_sk);
req = req->dl_next;
while (1) {
while (req) {
if (req->rsk_ops->family == st->family) {
cur = req;
goto out;
}
req = req->dl_next;
}
if (++st->sbucket >= icsk->icsk_accept_queue.listen_opt->nr_table_entries)
break;
get_req:
req = icsk->icsk_accept_queue.listen_opt->syn_table[st->sbucket];
}
sk = sk_next(st->syn_wait_sk);
st->state = TCP_SEQ_STATE_LISTENING;
read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
} else {
icsk = inet_csk(sk);
read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
if (reqsk_queue_len(&icsk->icsk_accept_queue))
goto start_req;
read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
sk = sk_next(sk);
}
get_sk:
sk_nulls_for_each_from(sk, node) {
if (sk->sk_family == st->family && net_eq(sock_net(sk), net)) {
cur = sk;
goto out;
}
icsk = inet_csk(sk);
read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
if (reqsk_queue_len(&icsk->icsk_accept_queue)) {
start_req:
st->uid = sock_i_uid(sk);
st->syn_wait_sk = sk;
st->state = TCP_SEQ_STATE_OPENREQ;
st->sbucket = 0;
goto get_req;
}
read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
}
spin_unlock_bh(&ilb->lock);
if (++st->bucket < INET_LHTABLE_SIZE) {
ilb = &tcp_hashinfo.listening_hash[st->bucket];
spin_lock_bh(&ilb->lock);
sk = sk_nulls_head(&ilb->head);
goto get_sk;
}
cur = NULL;
out:
return cur;
}
static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
{
void *rc = listening_get_next(seq, NULL);
while (rc && *pos) {
rc = listening_get_next(seq, rc);
--*pos;
}
return rc;
}
static inline int empty_bucket(struct tcp_iter_state *st)
{
return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain) &&
hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].twchain);
}
static void *established_get_first(struct seq_file *seq)
{
struct tcp_iter_state *st = seq->private;
struct net *net = seq_file_net(seq);
void *rc = NULL;
for (st->bucket = 0; st->bucket < tcp_hashinfo.ehash_size; ++st->bucket) {
struct sock *sk;
struct hlist_nulls_node *node;
struct inet_timewait_sock *tw;
spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
/* Lockless fast path for the common case of empty buckets */
if (empty_bucket(st))
continue;
spin_lock_bh(lock);
sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
if (sk->sk_family != st->family ||
!net_eq(sock_net(sk), net)) {
continue;
}
rc = sk;
goto out;
}
st->state = TCP_SEQ_STATE_TIME_WAIT;
inet_twsk_for_each(tw, node,
&tcp_hashinfo.ehash[st->bucket].twchain) {
if (tw->tw_family != st->family ||
!net_eq(twsk_net(tw), net)) {
continue;
}
rc = tw;
goto out;
}
spin_unlock_bh(lock);
st->state = TCP_SEQ_STATE_ESTABLISHED;
}
out:
return rc;
}
static void *established_get_next(struct seq_file *seq, void *cur)
{
struct sock *sk = cur;
struct inet_timewait_sock *tw;
struct hlist_nulls_node *node;
struct tcp_iter_state *st = seq->private;
struct net *net = seq_file_net(seq);
++st->num;
if (st->state == TCP_SEQ_STATE_TIME_WAIT) {
tw = cur;
tw = tw_next(tw);
get_tw:
while (tw && (tw->tw_family != st->family || !net_eq(twsk_net(tw), net))) {
tw = tw_next(tw);
}
if (tw) {
cur = tw;
goto out;
}
spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
st->state = TCP_SEQ_STATE_ESTABLISHED;
/* Look for next non empty bucket */
while (++st->bucket < tcp_hashinfo.ehash_size &&
empty_bucket(st))
;
if (st->bucket >= tcp_hashinfo.ehash_size)
return NULL;
spin_lock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
sk = sk_nulls_head(&tcp_hashinfo.ehash[st->bucket].chain);
} else
sk = sk_nulls_next(sk);
sk_nulls_for_each_from(sk, node) {
if (sk->sk_family == st->family && net_eq(sock_net(sk), net))
goto found;
}
st->state = TCP_SEQ_STATE_TIME_WAIT;
tw = tw_head(&tcp_hashinfo.ehash[st->bucket].twchain);
goto get_tw;
found:
cur = sk;
out:
return cur;
}
static void *established_get_idx(struct seq_file *seq, loff_t pos)
{
void *rc = established_get_first(seq);
while (rc && pos) {
rc = established_get_next(seq, rc);
--pos;
}
return rc;
}
static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
{
void *rc;
struct tcp_iter_state *st = seq->private;
st->state = TCP_SEQ_STATE_LISTENING;
rc = listening_get_idx(seq, &pos);
if (!rc) {
st->state = TCP_SEQ_STATE_ESTABLISHED;
rc = established_get_idx(seq, pos);
}
return rc;
}
static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
{
struct tcp_iter_state *st = seq->private;
st->state = TCP_SEQ_STATE_LISTENING;
st->num = 0;
return *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
}
static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
void *rc = NULL;
struct tcp_iter_state *st;
if (v == SEQ_START_TOKEN) {
rc = tcp_get_idx(seq, 0);
goto out;
}
st = seq->private;
switch (st->state) {
case TCP_SEQ_STATE_OPENREQ:
case TCP_SEQ_STATE_LISTENING:
rc = listening_get_next(seq, v);
if (!rc) {
st->state = TCP_SEQ_STATE_ESTABLISHED;
rc = established_get_first(seq);
}
break;
case TCP_SEQ_STATE_ESTABLISHED:
case TCP_SEQ_STATE_TIME_WAIT:
rc = established_get_next(seq, v);
break;
}
out:
++*pos;
return rc;
}
static void tcp_seq_stop(struct seq_file *seq, void *v)
{
struct tcp_iter_state *st = seq->private;
switch (st->state) {
case TCP_SEQ_STATE_OPENREQ:
if (v) {
struct inet_connection_sock *icsk = inet_csk(st->syn_wait_sk);
read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
}
case TCP_SEQ_STATE_LISTENING:
if (v != SEQ_START_TOKEN)
spin_unlock_bh(&tcp_hashinfo.listening_hash[st->bucket].lock);
break;
case TCP_SEQ_STATE_TIME_WAIT:
case TCP_SEQ_STATE_ESTABLISHED:
if (v)
spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
break;
}
}
static int tcp_seq_open(struct inode *inode, struct file *file)
{
struct tcp_seq_afinfo *afinfo = PDE(inode)->data;
struct tcp_iter_state *s;
int err;
err = seq_open_net(inode, file, &afinfo->seq_ops,
sizeof(struct tcp_iter_state));
if (err < 0)
return err;
s = ((struct seq_file *)file->private_data)->private;
s->family = afinfo->family;
return 0;
}
int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo)
{
int rc = 0;
struct proc_dir_entry *p;
afinfo->seq_fops.open = tcp_seq_open;
afinfo->seq_fops.read = seq_read;
afinfo->seq_fops.llseek = seq_lseek;
afinfo->seq_fops.release = seq_release_net;
afinfo->seq_ops.start = tcp_seq_start;
afinfo->seq_ops.next = tcp_seq_next;
afinfo->seq_ops.stop = tcp_seq_stop;
p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
&afinfo->seq_fops, afinfo);
if (!p)
rc = -ENOMEM;
return rc;
}
void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo)
{
proc_net_remove(net, afinfo->name);
}
static void get_openreq4(struct sock *sk, struct request_sock *req,
struct seq_file *f, int i, int uid, int *len)
{
const struct inet_request_sock *ireq = inet_rsk(req);
int ttd = req->expires - jiffies;
seq_printf(f, "%4d: %08X:%04X %08X:%04X"
" %02X %08X:%08X %02X:%08lX %08X %5d %8d %u %d %p%n",
i,
ireq->loc_addr,
ntohs(inet_sk(sk)->sport),
ireq->rmt_addr,
ntohs(ireq->rmt_port),
TCP_SYN_RECV,
0, 0, /* could print option size, but that is af dependent. */
1, /* timers active (only the expire timer) */
jiffies_to_clock_t(ttd),
req->retrans,
uid,
0, /* non standard timer */
0, /* open_requests have no inode */
atomic_read(&sk->sk_refcnt),
req,
len);
}
static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
{
int timer_active;
unsigned long timer_expires;
struct tcp_sock *tp = tcp_sk(sk);
const struct inet_connection_sock *icsk = inet_csk(sk);
struct inet_sock *inet = inet_sk(sk);
__be32 dest = inet->daddr;
__be32 src = inet->rcv_saddr;
__u16 destp = ntohs(inet->dport);
__u16 srcp = ntohs(inet->sport);
if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
timer_active = 1;
timer_expires = icsk->icsk_timeout;
} else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
timer_active = 4;
timer_expires = icsk->icsk_timeout;
} else if (timer_pending(&sk->sk_timer)) {
timer_active = 2;
timer_expires = sk->sk_timer.expires;
} else {
timer_active = 0;
timer_expires = jiffies;
}
seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
"%08X %5d %8d %lu %d %p %lu %lu %u %u %d%n",
i, src, srcp, dest, destp, sk->sk_state,
tp->write_seq - tp->snd_una,
sk->sk_state == TCP_LISTEN ? sk->sk_ack_backlog :
(tp->rcv_nxt - tp->copied_seq),
timer_active,
jiffies_to_clock_t(timer_expires - jiffies),
icsk->icsk_retransmits,
sock_i_uid(sk),
icsk->icsk_probes_out,
sock_i_ino(sk),
atomic_read(&sk->sk_refcnt), sk,
jiffies_to_clock_t(icsk->icsk_rto),
jiffies_to_clock_t(icsk->icsk_ack.ato),
(icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
tp->snd_cwnd,
tp->snd_ssthresh >= 0xFFFF ? -1 : tp->snd_ssthresh,
len);
}
static void get_timewait4_sock(struct inet_timewait_sock *tw,
struct seq_file *f, int i, int *len)
{
__be32 dest, src;
__u16 destp, srcp;
int ttd = tw->tw_ttd - jiffies;
if (ttd < 0)
ttd = 0;
dest = tw->tw_daddr;
src = tw->tw_rcv_saddr;
destp = ntohs(tw->tw_dport);
srcp = ntohs(tw->tw_sport);
seq_printf(f, "%4d: %08X:%04X %08X:%04X"
" %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p%n",
i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
atomic_read(&tw->tw_refcnt), tw, len);
}
#define TMPSZ 150
static int tcp4_seq_show(struct seq_file *seq, void *v)
{
struct tcp_iter_state *st;
int len;
if (v == SEQ_START_TOKEN) {
seq_printf(seq, "%-*s\n", TMPSZ - 1,
" sl local_address rem_address st tx_queue "
"rx_queue tr tm->when retrnsmt uid timeout "
"inode");
goto out;
}
st = seq->private;
switch (st->state) {
case TCP_SEQ_STATE_LISTENING:
case TCP_SEQ_STATE_ESTABLISHED:
get_tcp4_sock(v, seq, st->num, &len);
break;
case TCP_SEQ_STATE_OPENREQ:
get_openreq4(st->syn_wait_sk, v, seq, st->num, st->uid, &len);
break;
case TCP_SEQ_STATE_TIME_WAIT:
get_timewait4_sock(v, seq, st->num, &len);
break;
}
seq_printf(seq, "%*s\n", TMPSZ - 1 - len, "");
out:
return 0;
}
static struct tcp_seq_afinfo tcp4_seq_afinfo = {
.name = "tcp",
.family = AF_INET,
.seq_fops = {
.owner = THIS_MODULE,
},
.seq_ops = {
.show = tcp4_seq_show,
},
};
static int tcp4_proc_init_net(struct net *net)
{
return tcp_proc_register(net, &tcp4_seq_afinfo);
}
static void tcp4_proc_exit_net(struct net *net)
{
tcp_proc_unregister(net, &tcp4_seq_afinfo);
}
static struct pernet_operations tcp4_net_ops = {
.init = tcp4_proc_init_net,
.exit = tcp4_proc_exit_net,
};
int __init tcp4_proc_init(void)
{
return register_pernet_subsys(&tcp4_net_ops);
}
void tcp4_proc_exit(void)
{
unregister_pernet_subsys(&tcp4_net_ops);
}
#endif /* CONFIG_PROC_FS */
struct sk_buff **tcp4_gro_receive(struct sk_buff **head, struct sk_buff *skb)
{
struct iphdr *iph = skb_gro_network_header(skb);
switch (skb->ip_summed) {
case CHECKSUM_COMPLETE:
if (!tcp_v4_check(skb_gro_len(skb), iph->saddr, iph->daddr,
skb->csum)) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
break;
}
/* fall through */
case CHECKSUM_NONE:
NAPI_GRO_CB(skb)->flush = 1;
return NULL;
}
return tcp_gro_receive(head, skb);
}
EXPORT_SYMBOL(tcp4_gro_receive);
int tcp4_gro_complete(struct sk_buff *skb)
{
struct iphdr *iph = ip_hdr(skb);
struct tcphdr *th = tcp_hdr(skb);
th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
iph->saddr, iph->daddr, 0);
skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
return tcp_gro_complete(skb);
}
EXPORT_SYMBOL(tcp4_gro_complete);
struct proto tcp_prot = {
.name = "TCP",
.owner = THIS_MODULE,
.close = tcp_close,
.connect = tcp_v4_connect,
.disconnect = tcp_disconnect,
.accept = inet_csk_accept,
.ioctl = tcp_ioctl,
.init = tcp_v4_init_sock,
.destroy = tcp_v4_destroy_sock,
.shutdown = tcp_shutdown,
.setsockopt = tcp_setsockopt,
.getsockopt = tcp_getsockopt,
.recvmsg = tcp_recvmsg,
.backlog_rcv = tcp_v4_do_rcv,
.hash = inet_hash,
.unhash = inet_unhash,
.get_port = inet_csk_get_port,
.enter_memory_pressure = tcp_enter_memory_pressure,
.sockets_allocated = &tcp_sockets_allocated,
.orphan_count = &tcp_orphan_count,
.memory_allocated = &tcp_memory_allocated,
.memory_pressure = &tcp_memory_pressure,
.sysctl_mem = sysctl_tcp_mem,
.sysctl_wmem = sysctl_tcp_wmem,
.sysctl_rmem = sysctl_tcp_rmem,
.max_header = MAX_TCP_HEADER,
.obj_size = sizeof(struct tcp_sock),
.slab_flags = SLAB_DESTROY_BY_RCU,
.twsk_prot = &tcp_timewait_sock_ops,
.rsk_prot = &tcp_request_sock_ops,
.h.hashinfo = &tcp_hashinfo,
#ifdef CONFIG_COMPAT
.compat_setsockopt = compat_tcp_setsockopt,
.compat_getsockopt = compat_tcp_getsockopt,
#endif
};
static int __net_init tcp_sk_init(struct net *net)
{
return inet_ctl_sock_create(&net->ipv4.tcp_sock,
PF_INET, SOCK_RAW, IPPROTO_TCP, net);
}
static void __net_exit tcp_sk_exit(struct net *net)
{
inet_ctl_sock_destroy(net->ipv4.tcp_sock);
inet_twsk_purge(net, &tcp_hashinfo, &tcp_death_row, AF_INET);
}
static struct pernet_operations __net_initdata tcp_sk_ops = {
.init = tcp_sk_init,
.exit = tcp_sk_exit,
};
void __init tcp_v4_init(void)
{
inet_hashinfo_init(&tcp_hashinfo);
if (register_pernet_subsys(&tcp_sk_ops))
panic("Failed to create the TCP control socket.\n");
}
EXPORT_SYMBOL(ipv4_specific);
EXPORT_SYMBOL(tcp_hashinfo);
EXPORT_SYMBOL(tcp_prot);
EXPORT_SYMBOL(tcp_v4_conn_request);
EXPORT_SYMBOL(tcp_v4_connect);
EXPORT_SYMBOL(tcp_v4_do_rcv);
EXPORT_SYMBOL(tcp_v4_remember_stamp);
EXPORT_SYMBOL(tcp_v4_send_check);
EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
#ifdef CONFIG_PROC_FS
EXPORT_SYMBOL(tcp_proc_register);
EXPORT_SYMBOL(tcp_proc_unregister);
#endif
EXPORT_SYMBOL(sysctl_tcp_low_latency);
| gpl-2.0 |
samurai0000000/qemu | android/base/files/ScopedHandle_unittest.cpp | 17 | 1930 | // Copyright 2014 The Android Open Source Project
//
// This software is licensed under the terms of the GNU General Public
// License version 2, as published by the Free Software Foundation, and
// may be copied, distributed, and modified under those terms.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
#include "android/base/files/ScopedHandle.h"
#include <gtest/gtest.h>
namespace android {
namespace base {
namespace {
// The path of a file that can always be opened for reading on any platform.
static const char kNullFile[] = "NUL";
HANDLE OpenNull() {
return ::CreateFile(kNullFile,
GENERIC_READ,
FILE_SHARE_READ|FILE_SHARE_WRITE,
NULL,
OPEN_EXISTING,
FILE_ATTRIBUTE_NORMAL,
NULL);
}
} // namespace
TEST(ScopedHandle, DefaultConstructor) {
ScopedHandle h;
EXPECT_FALSE(h.valid());
EXPECT_EQ(INVALID_HANDLE_VALUE, h.get());
}
TEST(ScopedHandle, Constructor) {
ScopedHandle h(OpenNull());
EXPECT_TRUE(h.valid());
}
TEST(ScopedHandle, Release) {
ScopedHandle h(OpenNull());
EXPECT_TRUE(h.valid());
HANDLE handle = h.release();
EXPECT_FALSE(h.valid());
EXPECT_NE(INVALID_HANDLE_VALUE, handle);
::CloseHandle(handle);
}
TEST(ScopedHandle, Close) {
ScopedHandle h(OpenNull());
EXPECT_TRUE(h.valid());
h.close();
EXPECT_FALSE(h.valid());
}
TEST(ScopedHandle, Swap) {
ScopedHandle h1;
ScopedHandle h2(OpenNull());
EXPECT_FALSE(h1.valid());
EXPECT_TRUE(h2.valid());
h1.swap(&h2);
EXPECT_FALSE(h2.valid());
EXPECT_TRUE(h1.valid());
}
} // namespace base
} // namespace android
| gpl-2.0 |
yangxiaohua1977/sound-linux-4.5.7 | drivers/spi/spi-bcm2835aux.c | 17 | 14221 | /*
* Driver for Broadcom BCM2835 auxiliary SPI Controllers
*
* the driver does not rely on the native chipselects at all
* but only uses the gpio type chipselects
*
* Based on: spi-bcm2835.c
*
* Copyright (C) 2015 Martin Sperl
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/clk.h>
#include <linux/completion.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/of_gpio.h>
#include <linux/of_irq.h>
#include <linux/regmap.h>
#include <linux/spi/spi.h>
#include <linux/spinlock.h>
/*
* spi register defines
*
* note there is garbage in the "official" documentation,
* so some data is taken from the file:
* brcm_usrlib/dag/vmcsx/vcinclude/bcm2708_chip/aux_io.h
* inside of:
* http://www.broadcom.com/docs/support/videocore/Brcm_Android_ICS_Graphics_Stack.tar.gz
*/
/* SPI register offsets */
#define BCM2835_AUX_SPI_CNTL0 0x00
#define BCM2835_AUX_SPI_CNTL1 0x04
#define BCM2835_AUX_SPI_STAT 0x08
#define BCM2835_AUX_SPI_PEEK 0x0C
#define BCM2835_AUX_SPI_IO 0x20
#define BCM2835_AUX_SPI_TXHOLD 0x30
/* Bitfields in CNTL0 */
#define BCM2835_AUX_SPI_CNTL0_SPEED 0xFFF00000
#define BCM2835_AUX_SPI_CNTL0_SPEED_MAX 0xFFF
#define BCM2835_AUX_SPI_CNTL0_SPEED_SHIFT 20
#define BCM2835_AUX_SPI_CNTL0_CS 0x000E0000
#define BCM2835_AUX_SPI_CNTL0_POSTINPUT 0x00010000
#define BCM2835_AUX_SPI_CNTL0_VAR_CS 0x00008000
#define BCM2835_AUX_SPI_CNTL0_VAR_WIDTH 0x00004000
#define BCM2835_AUX_SPI_CNTL0_DOUTHOLD 0x00003000
#define BCM2835_AUX_SPI_CNTL0_ENABLE 0x00000800
#define BCM2835_AUX_SPI_CNTL0_CPHA_IN 0x00000400
#define BCM2835_AUX_SPI_CNTL0_CLEARFIFO 0x00000200
#define BCM2835_AUX_SPI_CNTL0_CPHA_OUT 0x00000100
#define BCM2835_AUX_SPI_CNTL0_CPOL 0x00000080
#define BCM2835_AUX_SPI_CNTL0_MSBF_OUT 0x00000040
#define BCM2835_AUX_SPI_CNTL0_SHIFTLEN 0x0000003F
/* Bitfields in CNTL1 */
#define BCM2835_AUX_SPI_CNTL1_CSHIGH 0x00000700
#define BCM2835_AUX_SPI_CNTL1_TXEMPTY 0x00000080
#define BCM2835_AUX_SPI_CNTL1_IDLE 0x00000040
#define BCM2835_AUX_SPI_CNTL1_MSBF_IN 0x00000002
#define BCM2835_AUX_SPI_CNTL1_KEEP_IN 0x00000001
/* Bitfields in STAT */
#define BCM2835_AUX_SPI_STAT_TX_LVL 0xFF000000
#define BCM2835_AUX_SPI_STAT_RX_LVL 0x00FF0000
#define BCM2835_AUX_SPI_STAT_TX_FULL 0x00000400
#define BCM2835_AUX_SPI_STAT_TX_EMPTY 0x00000200
#define BCM2835_AUX_SPI_STAT_RX_FULL 0x00000100
#define BCM2835_AUX_SPI_STAT_RX_EMPTY 0x00000080
#define BCM2835_AUX_SPI_STAT_BUSY 0x00000040
#define BCM2835_AUX_SPI_STAT_BITCOUNT 0x0000003F
/* timeout values */
#define BCM2835_AUX_SPI_POLLING_LIMIT_US 30
#define BCM2835_AUX_SPI_POLLING_JIFFIES 2
#define BCM2835_AUX_SPI_MODE_BITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH \
| SPI_NO_CS)
struct bcm2835aux_spi {
void __iomem *regs;
struct clk *clk;
int irq;
u32 cntl[2];
const u8 *tx_buf;
u8 *rx_buf;
int tx_len;
int rx_len;
int pending;
};
static inline u32 bcm2835aux_rd(struct bcm2835aux_spi *bs, unsigned reg)
{
return readl(bs->regs + reg);
}
static inline void bcm2835aux_wr(struct bcm2835aux_spi *bs, unsigned reg,
u32 val)
{
writel(val, bs->regs + reg);
}
static inline void bcm2835aux_rd_fifo(struct bcm2835aux_spi *bs)
{
u32 data;
int count = min(bs->rx_len, 3);
data = bcm2835aux_rd(bs, BCM2835_AUX_SPI_IO);
if (bs->rx_buf) {
switch (count) {
case 4:
*bs->rx_buf++ = (data >> 24) & 0xff;
/* fallthrough */
case 3:
*bs->rx_buf++ = (data >> 16) & 0xff;
/* fallthrough */
case 2:
*bs->rx_buf++ = (data >> 8) & 0xff;
/* fallthrough */
case 1:
*bs->rx_buf++ = (data >> 0) & 0xff;
/* fallthrough - no default */
}
}
bs->rx_len -= count;
bs->pending -= count;
}
static inline void bcm2835aux_wr_fifo(struct bcm2835aux_spi *bs)
{
u32 data;
u8 byte;
int count;
int i;
/* gather up to 3 bytes to write to the FIFO */
count = min(bs->tx_len, 3);
data = 0;
for (i = 0; i < count; i++) {
byte = bs->tx_buf ? *bs->tx_buf++ : 0;
data |= byte << (8 * (2 - i));
}
/* and set the variable bit-length */
data |= (count * 8) << 24;
/* and decrement length */
bs->tx_len -= count;
bs->pending += count;
/* write to the correct TX-register */
if (bs->tx_len)
bcm2835aux_wr(bs, BCM2835_AUX_SPI_TXHOLD, data);
else
bcm2835aux_wr(bs, BCM2835_AUX_SPI_IO, data);
}
static void bcm2835aux_spi_reset_hw(struct bcm2835aux_spi *bs)
{
/* disable spi clearing fifo and interrupts */
bcm2835aux_wr(bs, BCM2835_AUX_SPI_CNTL1, 0);
bcm2835aux_wr(bs, BCM2835_AUX_SPI_CNTL0,
BCM2835_AUX_SPI_CNTL0_CLEARFIFO);
}
static irqreturn_t bcm2835aux_spi_interrupt(int irq, void *dev_id)
{
struct spi_master *master = dev_id;
struct bcm2835aux_spi *bs = spi_master_get_devdata(master);
irqreturn_t ret = IRQ_NONE;
/* check if we have data to read */
while (bs->rx_len &&
(!(bcm2835aux_rd(bs, BCM2835_AUX_SPI_STAT) &
BCM2835_AUX_SPI_STAT_RX_EMPTY))) {
bcm2835aux_rd_fifo(bs);
ret = IRQ_HANDLED;
}
/* check if we have data to write */
while (bs->tx_len &&
(bs->pending < 12) &&
(!(bcm2835aux_rd(bs, BCM2835_AUX_SPI_STAT) &
BCM2835_AUX_SPI_STAT_TX_FULL))) {
bcm2835aux_wr_fifo(bs);
ret = IRQ_HANDLED;
}
/* and check if we have reached "done" */
while (bs->rx_len &&
(!(bcm2835aux_rd(bs, BCM2835_AUX_SPI_STAT) &
BCM2835_AUX_SPI_STAT_BUSY))) {
bcm2835aux_rd_fifo(bs);
ret = IRQ_HANDLED;
}
/* and if rx_len is 0 then wake up completion and disable spi */
if (!bs->rx_len) {
bcm2835aux_spi_reset_hw(bs);
complete(&master->xfer_completion);
}
/* and return */
return ret;
}
static int __bcm2835aux_spi_transfer_one_irq(struct spi_master *master,
struct spi_device *spi,
struct spi_transfer *tfr)
{
struct bcm2835aux_spi *bs = spi_master_get_devdata(master);
/* enable interrupts */
bcm2835aux_wr(bs, BCM2835_AUX_SPI_CNTL1, bs->cntl[1] |
BCM2835_AUX_SPI_CNTL1_TXEMPTY |
BCM2835_AUX_SPI_CNTL1_IDLE);
/* and wait for finish... */
return 1;
}
static int bcm2835aux_spi_transfer_one_irq(struct spi_master *master,
struct spi_device *spi,
struct spi_transfer *tfr)
{
struct bcm2835aux_spi *bs = spi_master_get_devdata(master);
/* fill in registers and fifos before enabling interrupts */
bcm2835aux_wr(bs, BCM2835_AUX_SPI_CNTL1, bs->cntl[1]);
bcm2835aux_wr(bs, BCM2835_AUX_SPI_CNTL0, bs->cntl[0]);
/* fill in tx fifo with data before enabling interrupts */
while ((bs->tx_len) &&
(bs->pending < 12) &&
(!(bcm2835aux_rd(bs, BCM2835_AUX_SPI_STAT) &
BCM2835_AUX_SPI_STAT_TX_FULL))) {
bcm2835aux_wr_fifo(bs);
}
/* now run the interrupt mode */
return __bcm2835aux_spi_transfer_one_irq(master, spi, tfr);
}
static int bcm2835aux_spi_transfer_one_poll(struct spi_master *master,
struct spi_device *spi,
struct spi_transfer *tfr)
{
struct bcm2835aux_spi *bs = spi_master_get_devdata(master);
unsigned long timeout;
u32 stat;
/* configure spi */
bcm2835aux_wr(bs, BCM2835_AUX_SPI_CNTL1, bs->cntl[1]);
bcm2835aux_wr(bs, BCM2835_AUX_SPI_CNTL0, bs->cntl[0]);
/* set the timeout */
timeout = jiffies + BCM2835_AUX_SPI_POLLING_JIFFIES;
/* loop until finished the transfer */
while (bs->rx_len) {
/* read status */
stat = bcm2835aux_rd(bs, BCM2835_AUX_SPI_STAT);
/* fill in tx fifo with remaining data */
if ((bs->tx_len) && (!(stat & BCM2835_AUX_SPI_STAT_TX_FULL))) {
bcm2835aux_wr_fifo(bs);
continue;
}
/* read data from fifo for both cases */
if (!(stat & BCM2835_AUX_SPI_STAT_RX_EMPTY)) {
bcm2835aux_rd_fifo(bs);
continue;
}
if (!(stat & BCM2835_AUX_SPI_STAT_BUSY)) {
bcm2835aux_rd_fifo(bs);
continue;
}
/* there is still data pending to read check the timeout */
if (bs->rx_len && time_after(jiffies, timeout)) {
dev_dbg_ratelimited(&spi->dev,
"timeout period reached: jiffies: %lu remaining tx/rx: %d/%d - falling back to interrupt mode\n",
jiffies - timeout,
bs->tx_len, bs->rx_len);
/* forward to interrupt handler */
return __bcm2835aux_spi_transfer_one_irq(master,
spi, tfr);
}
}
/* Transfer complete - reset SPI HW */
bcm2835aux_spi_reset_hw(bs);
/* and return without waiting for completion */
return 0;
}
static int bcm2835aux_spi_transfer_one(struct spi_master *master,
struct spi_device *spi,
struct spi_transfer *tfr)
{
struct bcm2835aux_spi *bs = spi_master_get_devdata(master);
unsigned long spi_hz, clk_hz, speed;
unsigned long spi_used_hz;
unsigned long long xfer_time_us;
/* calculate the registers to handle
*
* note that we use the variable data mode, which
* is not optimal for longer transfers as we waste registers
* resulting (potentially) in more interrupts when transferring
* more than 12 bytes
*/
bs->cntl[0] = BCM2835_AUX_SPI_CNTL0_ENABLE |
BCM2835_AUX_SPI_CNTL0_VAR_WIDTH |
BCM2835_AUX_SPI_CNTL0_MSBF_OUT;
bs->cntl[1] = BCM2835_AUX_SPI_CNTL1_MSBF_IN;
/* set clock */
spi_hz = tfr->speed_hz;
clk_hz = clk_get_rate(bs->clk);
if (spi_hz >= clk_hz / 2) {
speed = 0;
} else if (spi_hz) {
speed = DIV_ROUND_UP(clk_hz, 2 * spi_hz) - 1;
if (speed > BCM2835_AUX_SPI_CNTL0_SPEED_MAX)
speed = BCM2835_AUX_SPI_CNTL0_SPEED_MAX;
} else { /* the slowest we can go */
speed = BCM2835_AUX_SPI_CNTL0_SPEED_MAX;
}
bs->cntl[0] |= speed << BCM2835_AUX_SPI_CNTL0_SPEED_SHIFT;
spi_used_hz = clk_hz / (2 * (speed + 1));
/* handle all the modes */
if (spi->mode & SPI_CPOL)
bs->cntl[0] |= BCM2835_AUX_SPI_CNTL0_CPOL;
if (spi->mode & SPI_CPHA)
bs->cntl[0] |= BCM2835_AUX_SPI_CNTL0_CPHA_OUT |
BCM2835_AUX_SPI_CNTL0_CPHA_IN;
/* set transmit buffers and length */
bs->tx_buf = tfr->tx_buf;
bs->rx_buf = tfr->rx_buf;
bs->tx_len = tfr->len;
bs->rx_len = tfr->len;
bs->pending = 0;
/* calculate the estimated time in us the transfer runs
* note that there are are 2 idle clocks after each
* chunk getting transferred - in our case the chunk size
* is 3 bytes, so we approximate this by 9 bits/byte
*/
xfer_time_us = tfr->len * 9 * 1000000;
do_div(xfer_time_us, spi_used_hz);
/* run in polling mode for short transfers */
if (xfer_time_us < BCM2835_AUX_SPI_POLLING_LIMIT_US)
return bcm2835aux_spi_transfer_one_poll(master, spi, tfr);
/* run in interrupt mode for all others */
return bcm2835aux_spi_transfer_one_irq(master, spi, tfr);
}
static void bcm2835aux_spi_handle_err(struct spi_master *master,
struct spi_message *msg)
{
struct bcm2835aux_spi *bs = spi_master_get_devdata(master);
bcm2835aux_spi_reset_hw(bs);
}
static int bcm2835aux_spi_probe(struct platform_device *pdev)
{
struct spi_master *master;
struct bcm2835aux_spi *bs;
struct resource *res;
unsigned long clk_hz;
int err;
master = spi_alloc_master(&pdev->dev, sizeof(*bs));
if (!master) {
dev_err(&pdev->dev, "spi_alloc_master() failed\n");
return -ENOMEM;
}
platform_set_drvdata(pdev, master);
master->mode_bits = BCM2835_AUX_SPI_MODE_BITS;
master->bits_per_word_mask = SPI_BPW_MASK(8);
master->num_chipselect = -1;
master->transfer_one = bcm2835aux_spi_transfer_one;
master->handle_err = bcm2835aux_spi_handle_err;
master->dev.of_node = pdev->dev.of_node;
bs = spi_master_get_devdata(master);
/* the main area */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
bs->regs = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(bs->regs)) {
err = PTR_ERR(bs->regs);
goto out_master_put;
}
bs->clk = devm_clk_get(&pdev->dev, NULL);
if ((!bs->clk) || (IS_ERR(bs->clk))) {
err = PTR_ERR(bs->clk);
dev_err(&pdev->dev, "could not get clk: %d\n", err);
goto out_master_put;
}
bs->irq = platform_get_irq(pdev, 0);
if (bs->irq <= 0) {
dev_err(&pdev->dev, "could not get IRQ: %d\n", bs->irq);
err = bs->irq ? bs->irq : -ENODEV;
goto out_master_put;
}
/* this also enables the HW block */
err = clk_prepare_enable(bs->clk);
if (err) {
dev_err(&pdev->dev, "could not prepare clock: %d\n", err);
goto out_master_put;
}
/* just checking if the clock returns a sane value */
clk_hz = clk_get_rate(bs->clk);
if (!clk_hz) {
dev_err(&pdev->dev, "clock returns 0 Hz\n");
err = -ENODEV;
goto out_clk_disable;
}
/* reset SPI-HW block */
bcm2835aux_spi_reset_hw(bs);
err = devm_request_irq(&pdev->dev, bs->irq,
bcm2835aux_spi_interrupt,
IRQF_SHARED,
dev_name(&pdev->dev), master);
if (err) {
dev_err(&pdev->dev, "could not request IRQ: %d\n", err);
goto out_clk_disable;
}
err = devm_spi_register_master(&pdev->dev, master);
if (err) {
dev_err(&pdev->dev, "could not register SPI master: %d\n", err);
goto out_clk_disable;
}
return 0;
out_clk_disable:
clk_disable_unprepare(bs->clk);
out_master_put:
spi_master_put(master);
return err;
}
static int bcm2835aux_spi_remove(struct platform_device *pdev)
{
struct spi_master *master = platform_get_drvdata(pdev);
struct bcm2835aux_spi *bs = spi_master_get_devdata(master);
bcm2835aux_spi_reset_hw(bs);
/* disable the HW block by releasing the clock */
clk_disable_unprepare(bs->clk);
return 0;
}
static const struct of_device_id bcm2835aux_spi_match[] = {
{ .compatible = "brcm,bcm2835-aux-spi", },
{}
};
MODULE_DEVICE_TABLE(of, bcm2835aux_spi_match);
static struct platform_driver bcm2835aux_spi_driver = {
.driver = {
.name = "spi-bcm2835aux",
.of_match_table = bcm2835aux_spi_match,
},
.probe = bcm2835aux_spi_probe,
.remove = bcm2835aux_spi_remove,
};
module_platform_driver(bcm2835aux_spi_driver);
MODULE_DESCRIPTION("SPI controller driver for Broadcom BCM2835 aux");
MODULE_AUTHOR("Martin Sperl <kernel@martin.sperl.org>");
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
janrinze/loox7xxport.loox2624 | arch/arm/mach-omap1/board-palmtt.c | 17 | 8316 | /*
* linux/arch/arm/mach-omap1/board-palmtt.c
*
* Modified from board-palmtt2.c
*
* Modified and amended for Palm Tungsten|T
* by Marek Vasut <marek.vasut@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/notifier.h>
#include <linux/clk.h>
#include <linux/input.h>
#include <linux/interrupt.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#include <linux/leds.h>
#include <asm/hardware.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
#include <asm/mach/flash.h>
#include <asm/arch/led.h>
#include <asm/arch/mcbsp.h>
#include <asm/arch/gpio.h>
#include <asm/arch/mux.h>
#include <asm/arch/usb.h>
#include <asm/arch/dma.h>
#include <asm/arch/tc.h>
#include <asm/arch/board.h>
#include <asm/arch/irda.h>
#include <asm/arch/keypad.h>
#include <asm/arch/common.h>
#include <asm/arch/omap-alsa.h>
#include <linux/input.h>
#include <linux/spi/spi.h>
#include <linux/spi/ads7846.h>
static int palmtt_keymap[] = {
KEY(0, 0, KEY_ESC),
KEY(0, 1, KEY_SPACE),
KEY(0, 2, KEY_LEFTCTRL),
KEY(0, 3, KEY_TAB),
KEY(0, 4, KEY_ENTER),
KEY(1, 0, KEY_LEFT),
KEY(1, 1, KEY_DOWN),
KEY(1, 2, KEY_UP),
KEY(1, 3, KEY_RIGHT),
KEY(2, 0, KEY_SLEEP),
KEY(2, 4, KEY_Y),
0
};
static struct mtd_partition palmtt_partitions[] = {
{
.name = "write8k",
.offset = 0,
.size = SZ_8K,
.mask_flags = 0,
},
{
.name = "PalmOS-BootLoader(ro)",
.offset = SZ_8K,
.size = 7 * SZ_8K,
.mask_flags = MTD_WRITEABLE,
},
{
.name = "u-boot",
.offset = MTDPART_OFS_APPEND,
.size = 8 * SZ_8K,
.mask_flags = 0,
},
{
.name = "PalmOS-FS(ro)",
.offset = MTDPART_OFS_APPEND,
.size = 7 * SZ_1M + 4 * SZ_64K - 16 * SZ_8K,
.mask_flags = MTD_WRITEABLE,
},
{
.name = "u-boot(rez)",
.offset = MTDPART_OFS_APPEND,
.size = SZ_128K,
.mask_flags = 0
},
{
.name = "empty",
.offset = MTDPART_OFS_APPEND,
.size = MTDPART_SIZ_FULL,
.mask_flags = 0
}
};
static struct flash_platform_data palmtt_flash_data = {
.map_name = "cfi_probe",
.width = 2,
.parts = palmtt_partitions,
.nr_parts = ARRAY_SIZE(palmtt_partitions),
};
static struct resource palmtt_flash_resource = {
.start = OMAP_CS0_PHYS,
.end = OMAP_CS0_PHYS + SZ_8M - 1,
.flags = IORESOURCE_MEM,
};
static struct platform_device palmtt_flash_device = {
.name = "omapflash",
.id = 0,
.dev = {
.platform_data = &palmtt_flash_data,
},
.num_resources = 1,
.resource = &palmtt_flash_resource,
};
#define DEFAULT_BITPERSAMPLE 16
static struct omap_mcbsp_reg_cfg mcbsp_regs = {
.spcr2 = FREE | FRST | GRST | XRST | XINTM(3),
.spcr1 = RINTM(3) | RRST,
.rcr2 = RPHASE | RFRLEN2(OMAP_MCBSP_WORD_8) |
RWDLEN2(OMAP_MCBSP_WORD_16) | RDATDLY(0),
.rcr1 = RFRLEN1(OMAP_MCBSP_WORD_8) |
RWDLEN1(OMAP_MCBSP_WORD_16),
.xcr2 = XPHASE | XFRLEN2(OMAP_MCBSP_WORD_8) |
XWDLEN2(OMAP_MCBSP_WORD_16) | XDATDLY(0) | XFIG,
.xcr1 = XFRLEN1(OMAP_MCBSP_WORD_8) |
XWDLEN1(OMAP_MCBSP_WORD_16),
.srgr1 = FWID(DEFAULT_BITPERSAMPLE - 1),
.srgr2 = GSYNC | CLKSP | FSGM |
FPER(DEFAULT_BITPERSAMPLE * 2 - 1),
.pcr0 = CLKXP | CLKRP, /* mcbsp: slave */
};
static struct omap_alsa_codec_config alsa_config = {
.name = "PalmTT AIC23",
.mcbsp_regs_alsa = &mcbsp_regs,
.codec_configure_dev = NULL, // aic23_configure,
.codec_set_samplerate = NULL, // aic23_set_samplerate,
.codec_clock_setup = NULL, // aic23_clock_setup,
.codec_clock_on = NULL, // aic23_clock_on,
.codec_clock_off = NULL, // aic23_clock_off,
.get_default_samplerate = NULL, // aic23_get_default_samplerate,
};
static struct platform_device palmtt_mcbsp1_device = {
.name = "omap_alsa_mcbsp",
.id = 1,
.dev = {
.platform_data = &alsa_config,
},
};
static struct resource palmtt_kp_resources[] = {
[0] = {
.start = INT_KEYBOARD,
.end = INT_KEYBOARD,
.flags = IORESOURCE_IRQ,
},
};
static struct omap_kp_platform_data palmtt_kp_data = {
.rows = 6,
.cols = 3,
.keymap = palmtt_keymap,
};
static struct platform_device palmtt_kp_device = {
.name = "omap-keypad",
.id = -1,
.dev = {
.platform_data = &palmtt_kp_data,
},
.num_resources = ARRAY_SIZE(palmtt_kp_resources),
.resource = palmtt_kp_resources,
};
static struct platform_device palmtt_lcd_device = {
.name = "lcd_palmtt",
.id = -1,
};
static struct omap_irda_config palmtt_irda_config = {
.transceiver_cap = IR_SIRMODE,
.rx_channel = OMAP_DMA_UART3_RX,
.tx_channel = OMAP_DMA_UART3_TX,
.dest_start = UART3_THR,
.src_start = UART3_RHR,
.tx_trigger = 0,
.rx_trigger = 0,
};
static struct resource palmtt_irda_resources[] = {
[0] = {
.start = INT_UART3,
.end = INT_UART3,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device palmtt_irda_device = {
.name = "omapirda",
.id = -1,
.dev = {
.platform_data = &palmtt_irda_config,
},
.num_resources = ARRAY_SIZE(palmtt_irda_resources),
.resource = palmtt_irda_resources,
};
static struct platform_device palmtt_spi_device = {
.name = "spi_palmtt",
.id = -1,
};
static struct omap_backlight_config palmtt_backlight_config = {
.default_intensity = 0xa0,
};
static struct platform_device palmtt_backlight_device = {
.name = "omap-bl",
.id = -1,
.dev = {
.platform_data= &palmtt_backlight_config,
},
};
static struct omap_led_config palmtt_led_config[] = {
{
.cdev = {
.name = "palmtt:led0",
},
.gpio = PALMTT_LED_GPIO,
},
};
static struct omap_led_platform_data palmtt_led_data = {
.nr_leds = ARRAY_SIZE(palmtt_led_config),
.leds = palmtt_led_config,
};
static struct platform_device palmtt_led_device = {
.name = "omap-led",
.id = -1,
.dev = {
.platform_data = &palmtt_led_data,
},
};
static struct platform_device *palmtt_devices[] __initdata = {
&palmtt_flash_device,
&palmtt_mcbsp1_device,
&palmtt_kp_device,
&palmtt_lcd_device,
&palmtt_irda_device,
&palmtt_spi_device,
&palmtt_backlight_device,
&palmtt_led_device,
};
static int palmtt_get_pendown_state(void)
{
return !omap_get_gpio_datain(6);
}
static const struct ads7846_platform_data palmtt_ts_info = {
.model = 7846,
.vref_delay_usecs = 100, /* internal, no capacitor */
.x_plate_ohms = 419,
.y_plate_ohms = 486,
.get_pendown_state = palmtt_get_pendown_state,
};
static struct spi_board_info __initdata palmtt_boardinfo[] = {
{
/* MicroWire (bus 2) CS0 has an ads7846e */
.modalias = "ads7846",
.platform_data = &palmtt_ts_info,
.irq = OMAP_GPIO_IRQ(6),
.max_speed_hz = 120000 /* max sample rate at 3V */
* 26 /* command + data + overhead */,
.bus_num = 2,
.chip_select = 0,
}
};
static void __init omap_palmtt_init_irq(void)
{
omap1_init_common_hw();
omap_init_irq();
}
static struct omap_usb_config palmtt_usb_config __initdata = {
.register_dev = 1,
.hmc_mode = 0,
.pins[0] = 2,
};
static struct omap_lcd_config palmtt_lcd_config __initdata = {
.ctrl_name = "internal",
};
static struct omap_uart_config palmtt_uart_config __initdata = {
.enabled_uarts = (1 << 0) | (1 << 1) | (0 << 2),
};
static struct omap_board_config_kernel palmtt_config[] = {
{ OMAP_TAG_USB, &palmtt_usb_config },
{ OMAP_TAG_LCD, &palmtt_lcd_config },
{ OMAP_TAG_UART, &palmtt_uart_config },
};
static void __init omap_mpu_wdt_mode(int mode) {
if (mode)
omap_writew(0x8000, OMAP_WDT_TIMER_MODE);
else {
omap_writew(0x00f5, OMAP_WDT_TIMER_MODE);
omap_writew(0x00a0, OMAP_WDT_TIMER_MODE);
}
}
static void __init omap_palmtt_init(void)
{
omap_mpu_wdt_mode(0);
omap_board_config = palmtt_config;
omap_board_config_size = ARRAY_SIZE(palmtt_config);
platform_add_devices(palmtt_devices, ARRAY_SIZE(palmtt_devices));
spi_register_board_info(palmtt_boardinfo,ARRAY_SIZE(palmtt_boardinfo));
omap_serial_init();
}
static void __init omap_palmtt_map_io(void)
{
omap1_map_common_io();
}
MACHINE_START(OMAP_PALMTT, "OMAP1510 based Palm Tungsten|T")
.phys_io = 0xfff00000,
.io_pg_offst = ((0xfef00000) >> 18) & 0xfffc,
.boot_params = 0x10000100,
.map_io = omap_palmtt_map_io,
.init_irq = omap_palmtt_init_irq,
.init_machine = omap_palmtt_init,
.timer = &omap_timer,
MACHINE_END
| gpl-2.0 |
sonic2kk/dolphin | Source/Core/VideoCommon/Statistics.cpp | 17 | 5376 | // Copyright 2008 Dolphin Emulator Project
// Licensed under GPLv2+
// Refer to the license.txt file included.
#include "VideoCommon/Statistics.h"
#include <utility>
#include <imgui.h>
#include "VideoCommon/VideoCommon.h"
#include "VideoCommon/VideoConfig.h"
Statistics g_stats;
void Statistics::ResetFrame()
{
this_frame = {};
}
void Statistics::SwapDL()
{
std::swap(this_frame.num_dl_prims, this_frame.num_prims);
std::swap(this_frame.num_xf_loads_in_dl, this_frame.num_xf_loads);
std::swap(this_frame.num_cp_loads_in_dl, this_frame.num_cp_loads);
std::swap(this_frame.num_bp_loads_in_dl, this_frame.num_bp_loads);
}
void Statistics::Display() const
{
const float scale = ImGui::GetIO().DisplayFramebufferScale.x;
ImGui::SetNextWindowPos(ImVec2(10.0f * scale, 10.0f * scale), ImGuiCond_FirstUseEver);
ImGui::SetNextWindowSizeConstraints(ImVec2(275.0f * scale, 400.0f * scale),
ImGui::GetIO().DisplaySize);
if (!ImGui::Begin("Statistics", nullptr, ImGuiWindowFlags_NoNavInputs))
{
ImGui::End();
return;
}
ImGui::Columns(2, "Statistics", true);
const auto draw_statistic = [](const char* name, const char* format, auto&&... args) {
ImGui::TextUnformatted(name);
ImGui::NextColumn();
ImGui::Text(format, std::forward<decltype(args)>(args)...);
ImGui::NextColumn();
};
if (g_ActiveConfig.backend_info.api_type == APIType::Nothing)
{
draw_statistic("Objects", "%d", this_frame.num_drawn_objects);
draw_statistic("Vertices Loaded", "%d", this_frame.num_vertices_loaded);
draw_statistic("Triangles Input", "%d", this_frame.num_triangles_in);
draw_statistic("Triangles Rejected", "%d", this_frame.num_triangles_rejected);
draw_statistic("Triangles Culled", "%d", this_frame.num_triangles_culled);
draw_statistic("Triangles Clipped", "%d", this_frame.num_triangles_clipped);
draw_statistic("Triangles Drawn", "%d", this_frame.num_triangles_drawn);
draw_statistic("Rasterized Pix", "%d", this_frame.rasterized_pixels);
draw_statistic("TEV Pix In", "%d", this_frame.tev_pixels_in);
draw_statistic("TEV Pix Out", "%d", this_frame.tev_pixels_out);
}
draw_statistic("Textures created", "%d", num_textures_created);
draw_statistic("Textures uploaded", "%d", num_textures_uploaded);
draw_statistic("Textures alive", "%d", num_textures_alive);
draw_statistic("pshaders created", "%d", num_pixel_shaders_created);
draw_statistic("pshaders alive", "%d", num_pixel_shaders_alive);
draw_statistic("vshaders created", "%d", num_vertex_shaders_created);
draw_statistic("vshaders alive", "%d", num_vertex_shaders_alive);
draw_statistic("shaders changes", "%d", this_frame.num_shader_changes);
draw_statistic("dlists called", "%d", this_frame.num_dlists_called);
draw_statistic("Primitive joins", "%d", this_frame.num_primitive_joins);
draw_statistic("Draw calls", "%d", this_frame.num_draw_calls);
draw_statistic("Primitives", "%d", this_frame.num_prims);
draw_statistic("Primitives (DL)", "%d", this_frame.num_dl_prims);
draw_statistic("XF loads", "%d", this_frame.num_xf_loads);
draw_statistic("XF loads (DL)", "%d", this_frame.num_xf_loads_in_dl);
draw_statistic("CP loads", "%d", this_frame.num_cp_loads);
draw_statistic("CP loads (DL)", "%d", this_frame.num_cp_loads_in_dl);
draw_statistic("BP loads", "%d", this_frame.num_bp_loads);
draw_statistic("BP loads (DL)", "%d", this_frame.num_bp_loads_in_dl);
draw_statistic("Vertex streamed", "%i kB", this_frame.bytes_vertex_streamed / 1024);
draw_statistic("Index streamed", "%i kB", this_frame.bytes_index_streamed / 1024);
draw_statistic("Uniform streamed", "%i kB", this_frame.bytes_uniform_streamed / 1024);
draw_statistic("Vertex Loaders", "%d", num_vertex_loaders);
draw_statistic("EFB peeks:", "%d", this_frame.num_efb_peeks);
draw_statistic("EFB pokes:", "%d", this_frame.num_efb_pokes);
ImGui::Columns(1);
ImGui::End();
}
// Is this really needed?
void Statistics::DisplayProj() const
{
if (!ImGui::Begin("Projection Statistics", nullptr, ImGuiWindowFlags_NoNavInputs))
{
ImGui::End();
return;
}
ImGui::TextUnformatted("Projection #: X for Raw 6=0 (X for Raw 6!=0)");
ImGui::NewLine();
ImGui::Text("Projection 0: %f (%f) Raw 0: %f", gproj[0], g2proj[0], proj[0]);
ImGui::Text("Projection 1: %f (%f)", gproj[1], g2proj[1]);
ImGui::Text("Projection 2: %f (%f) Raw 1: %f", gproj[2], g2proj[2], proj[1]);
ImGui::Text("Projection 3: %f (%f)", gproj[3], g2proj[3]);
ImGui::Text("Projection 4: %f (%f)", gproj[4], g2proj[4]);
ImGui::Text("Projection 5: %f (%f) Raw 2: %f", gproj[5], g2proj[5], proj[2]);
ImGui::Text("Projection 6: %f (%f) Raw 3: %f", gproj[6], g2proj[6], proj[3]);
ImGui::Text("Projection 7: %f (%f)", gproj[7], g2proj[7]);
ImGui::Text("Projection 8: %f (%f)", gproj[8], g2proj[8]);
ImGui::Text("Projection 9: %f (%f)", gproj[9], g2proj[9]);
ImGui::Text("Projection 10: %f (%f) Raw 4: %f", gproj[10], g2proj[10], proj[4]);
ImGui::Text("Projection 11: %f (%f) Raw 5: %f", gproj[11], g2proj[11], proj[5]);
ImGui::Text("Projection 12: %f (%f)", gproj[12], g2proj[12]);
ImGui::Text("Projection 13: %f (%f)", gproj[13], g2proj[13]);
ImGui::Text("Projection 14: %f (%f)", gproj[14], g2proj[14]);
ImGui::Text("Projection 15: %f (%f)", gproj[15], g2proj[15]);
ImGui::End();
}
| gpl-2.0 |
CM-zenfone2/android_kernel_asus_moorefield | fs/f2fs/dir.c | 17 | 21735 | /*
* fs/f2fs/dir.c
*
* Copyright (c) 2012 Samsung Electronics Co., Ltd.
* http://www.samsung.com/
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/fs.h>
#include <linux/namei.h>
#include <linux/f2fs_fs.h>
#include "f2fs.h"
#include "node.h"
#include "acl.h"
#include "xattr.h"
static unsigned long dir_blocks(struct inode *inode)
{
return ((unsigned long long) (i_size_read(inode) + PAGE_CACHE_SIZE - 1))
>> PAGE_CACHE_SHIFT;
}
static unsigned int dir_buckets(unsigned int level, int dir_level)
{
if (level + dir_level < MAX_DIR_HASH_DEPTH / 2)
return 1 << (level + dir_level);
else
return MAX_DIR_BUCKETS;
}
static unsigned int bucket_blocks(unsigned int level)
{
if (level < MAX_DIR_HASH_DEPTH / 2)
return 2;
else
return 4;
}
unsigned char f2fs_filetype_table[F2FS_FT_MAX] = {
[F2FS_FT_UNKNOWN] = DT_UNKNOWN,
[F2FS_FT_REG_FILE] = DT_REG,
[F2FS_FT_DIR] = DT_DIR,
[F2FS_FT_CHRDEV] = DT_CHR,
[F2FS_FT_BLKDEV] = DT_BLK,
[F2FS_FT_FIFO] = DT_FIFO,
[F2FS_FT_SOCK] = DT_SOCK,
[F2FS_FT_SYMLINK] = DT_LNK,
};
#define S_SHIFT 12
static unsigned char f2fs_type_by_mode[S_IFMT >> S_SHIFT] = {
[S_IFREG >> S_SHIFT] = F2FS_FT_REG_FILE,
[S_IFDIR >> S_SHIFT] = F2FS_FT_DIR,
[S_IFCHR >> S_SHIFT] = F2FS_FT_CHRDEV,
[S_IFBLK >> S_SHIFT] = F2FS_FT_BLKDEV,
[S_IFIFO >> S_SHIFT] = F2FS_FT_FIFO,
[S_IFSOCK >> S_SHIFT] = F2FS_FT_SOCK,
[S_IFLNK >> S_SHIFT] = F2FS_FT_SYMLINK,
};
void set_de_type(struct f2fs_dir_entry *de, umode_t mode)
{
de->file_type = f2fs_type_by_mode[(mode & S_IFMT) >> S_SHIFT];
}
static unsigned long dir_block_index(unsigned int level,
int dir_level, unsigned int idx)
{
unsigned long i;
unsigned long bidx = 0;
for (i = 0; i < level; i++)
bidx += dir_buckets(i, dir_level) * bucket_blocks(i);
bidx += idx * bucket_blocks(level);
return bidx;
}
static struct f2fs_dir_entry *find_in_block(struct page *dentry_page,
struct f2fs_filename *fname,
f2fs_hash_t namehash,
int *max_slots,
struct page **res_page)
{
struct f2fs_dentry_block *dentry_blk;
struct f2fs_dir_entry *de;
struct f2fs_dentry_ptr d;
dentry_blk = (struct f2fs_dentry_block *)kmap(dentry_page);
make_dentry_ptr(NULL, &d, (void *)dentry_blk, 1);
de = find_target_dentry(fname, namehash, max_slots, &d);
if (de)
*res_page = dentry_page;
else
kunmap(dentry_page);
/*
* For the most part, it should be a bug when name_len is zero.
* We stop here for figuring out where the bugs has occurred.
*/
f2fs_bug_on(F2FS_P_SB(dentry_page), d.max < 0);
return de;
}
struct f2fs_dir_entry *find_target_dentry(struct f2fs_filename *fname,
f2fs_hash_t namehash, int *max_slots,
struct f2fs_dentry_ptr *d)
{
struct f2fs_dir_entry *de;
unsigned long bit_pos = 0;
int max_len = 0;
struct f2fs_str de_name = FSTR_INIT(NULL, 0);
struct f2fs_str *name = &fname->disk_name;
if (max_slots)
*max_slots = 0;
while (bit_pos < d->max) {
if (!test_bit_le(bit_pos, d->bitmap)) {
bit_pos++;
max_len++;
continue;
}
de = &d->dentry[bit_pos];
/* encrypted case */
de_name.name = d->filename[bit_pos];
de_name.len = le16_to_cpu(de->name_len);
/* show encrypted name */
if (fname->hash) {
if (de->hash_code == fname->hash)
goto found;
} else if (de_name.len == name->len &&
de->hash_code == namehash &&
!memcmp(de_name.name, name->name, name->len)) {
goto found;
}
if (max_slots && max_len > *max_slots)
*max_slots = max_len;
max_len = 0;
/* remain bug on condition */
if (unlikely(!de->name_len))
d->max = -1;
bit_pos += GET_DENTRY_SLOTS(le16_to_cpu(de->name_len));
}
de = NULL;
found:
if (max_slots && max_len > *max_slots)
*max_slots = max_len;
return de;
}
static struct f2fs_dir_entry *find_in_level(struct inode *dir,
unsigned int level,
struct f2fs_filename *fname,
struct page **res_page)
{
struct qstr name = FSTR_TO_QSTR(&fname->disk_name);
int s = GET_DENTRY_SLOTS(name.len);
unsigned int nbucket, nblock;
unsigned int bidx, end_block;
struct page *dentry_page;
struct f2fs_dir_entry *de = NULL;
bool room = false;
int max_slots;
f2fs_hash_t namehash;
namehash = f2fs_dentry_hash(&name);
f2fs_bug_on(F2FS_I_SB(dir), level > MAX_DIR_HASH_DEPTH);
nbucket = dir_buckets(level, F2FS_I(dir)->i_dir_level);
nblock = bucket_blocks(level);
bidx = dir_block_index(level, F2FS_I(dir)->i_dir_level,
le32_to_cpu(namehash) % nbucket);
end_block = bidx + nblock;
for (; bidx < end_block; bidx++) {
/* no need to allocate new dentry pages to all the indices */
dentry_page = find_data_page(dir, bidx);
if (IS_ERR(dentry_page)) {
room = true;
continue;
}
de = find_in_block(dentry_page, fname, namehash, &max_slots,
res_page);
if (de)
break;
if (max_slots >= s)
room = true;
f2fs_put_page(dentry_page, 0);
}
if (!de && room && F2FS_I(dir)->chash != namehash) {
F2FS_I(dir)->chash = namehash;
F2FS_I(dir)->clevel = level;
}
return de;
}
/*
* Find an entry in the specified directory with the wanted name.
* It returns the page where the entry was found (as a parameter - res_page),
* and the entry itself. Page is returned mapped and unlocked.
* Entry is guaranteed to be valid.
*/
struct f2fs_dir_entry *f2fs_find_entry(struct inode *dir, struct qstr *child,
struct page **res_page)
{
unsigned long npages = dir_blocks(dir);
struct f2fs_dir_entry *de = NULL;
unsigned int max_depth;
unsigned int level;
struct f2fs_filename fname;
int err;
*res_page = NULL;
err = f2fs_fname_setup_filename(dir, child, 1, &fname);
if (err)
return NULL;
if (f2fs_has_inline_dentry(dir)) {
de = find_in_inline_dir(dir, &fname, res_page);
goto out;
}
if (npages == 0)
goto out;
max_depth = F2FS_I(dir)->i_current_depth;
for (level = 0; level < max_depth; level++) {
de = find_in_level(dir, level, &fname, res_page);
if (de)
break;
}
out:
f2fs_fname_free_filename(&fname);
return de;
}
struct f2fs_dir_entry *f2fs_parent_dir(struct inode *dir, struct page **p)
{
struct page *page;
struct f2fs_dir_entry *de;
struct f2fs_dentry_block *dentry_blk;
if (f2fs_has_inline_dentry(dir))
return f2fs_parent_inline_dir(dir, p);
page = get_lock_data_page(dir, 0);
if (IS_ERR(page))
return NULL;
dentry_blk = kmap(page);
de = &dentry_blk->dentry[1];
*p = page;
unlock_page(page);
return de;
}
ino_t f2fs_inode_by_name(struct inode *dir, struct qstr *qstr)
{
ino_t res = 0;
struct f2fs_dir_entry *de;
struct page *page;
de = f2fs_find_entry(dir, qstr, &page);
if (de) {
res = le32_to_cpu(de->ino);
f2fs_dentry_kunmap(dir, page);
f2fs_put_page(page, 0);
}
return res;
}
void f2fs_set_link(struct inode *dir, struct f2fs_dir_entry *de,
struct page *page, struct inode *inode)
{
enum page_type type = f2fs_has_inline_dentry(dir) ? NODE : DATA;
lock_page(page);
f2fs_wait_on_page_writeback(page, type);
de->ino = cpu_to_le32(inode->i_ino);
set_de_type(de, inode->i_mode);
f2fs_dentry_kunmap(dir, page);
set_page_dirty(page);
dir->i_mtime = dir->i_ctime = CURRENT_TIME;
mark_inode_dirty(dir);
f2fs_put_page(page, 1);
}
static void init_dent_inode(const struct qstr *name, struct page *ipage)
{
struct f2fs_inode *ri;
f2fs_wait_on_page_writeback(ipage, NODE);
/* copy name info. to this inode page */
ri = F2FS_INODE(ipage);
ri->i_namelen = cpu_to_le32(name->len);
memcpy(ri->i_name, name->name, name->len);
set_page_dirty(ipage);
}
int update_dent_inode(struct inode *inode, struct inode *to,
const struct qstr *name)
{
struct page *page;
if (file_enc_name(to))
return 0;
page = get_node_page(F2FS_I_SB(inode), inode->i_ino);
if (IS_ERR(page))
return PTR_ERR(page);
init_dent_inode(name, page);
f2fs_put_page(page, 1);
return 0;
}
void do_make_empty_dir(struct inode *inode, struct inode *parent,
struct f2fs_dentry_ptr *d)
{
struct f2fs_dir_entry *de;
de = &d->dentry[0];
de->name_len = cpu_to_le16(1);
de->hash_code = 0;
de->ino = cpu_to_le32(inode->i_ino);
memcpy(d->filename[0], ".", 1);
set_de_type(de, inode->i_mode);
de = &d->dentry[1];
de->hash_code = 0;
de->name_len = cpu_to_le16(2);
de->ino = cpu_to_le32(parent->i_ino);
memcpy(d->filename[1], "..", 2);
set_de_type(de, parent->i_mode);
test_and_set_bit_le(0, (void *)d->bitmap);
test_and_set_bit_le(1, (void *)d->bitmap);
}
static int make_empty_dir(struct inode *inode,
struct inode *parent, struct page *page)
{
struct page *dentry_page;
struct f2fs_dentry_block *dentry_blk;
struct f2fs_dentry_ptr d;
if (f2fs_has_inline_dentry(inode))
return make_empty_inline_dir(inode, parent, page);
dentry_page = get_new_data_page(inode, page, 0, true);
if (IS_ERR(dentry_page))
return PTR_ERR(dentry_page);
dentry_blk = kmap_atomic(dentry_page);
make_dentry_ptr(NULL, &d, (void *)dentry_blk, 1);
do_make_empty_dir(inode, parent, &d);
kunmap_atomic(dentry_blk);
set_page_dirty(dentry_page);
f2fs_put_page(dentry_page, 1);
return 0;
}
struct page *init_inode_metadata(struct inode *inode, struct inode *dir,
const struct qstr *name, struct page *dpage)
{
struct page *page;
int err;
if (is_inode_flag_set(F2FS_I(inode), FI_NEW_INODE)) {
page = new_inode_page(inode);
if (IS_ERR(page))
return page;
if (S_ISDIR(inode->i_mode)) {
err = make_empty_dir(inode, dir, page);
if (err)
goto error;
}
err = f2fs_init_acl(inode, dir, page, dpage);
if (err)
goto put_error;
err = f2fs_init_security(inode, dir, name, page);
if (err)
goto put_error;
if (f2fs_encrypted_inode(dir) && f2fs_may_encrypt(inode)) {
err = f2fs_inherit_context(dir, inode, page);
if (err)
goto put_error;
}
} else {
page = get_node_page(F2FS_I_SB(dir), inode->i_ino);
if (IS_ERR(page))
return page;
set_cold_node(inode, page);
}
if (name)
init_dent_inode(name, page);
/*
* This file should be checkpointed during fsync.
* We lost i_pino from now on.
*/
if (is_inode_flag_set(F2FS_I(inode), FI_INC_LINK)) {
file_lost_pino(inode);
/*
* If link the tmpfile to alias through linkat path,
* we should remove this inode from orphan list.
*/
if (inode->i_nlink == 0)
remove_orphan_inode(F2FS_I_SB(dir), inode->i_ino);
inc_nlink(inode);
}
return page;
put_error:
f2fs_put_page(page, 1);
error:
/* once the failed inode becomes a bad inode, i_mode is S_IFREG */
truncate_inode_pages(&inode->i_data, 0);
truncate_blocks(inode, 0, false);
remove_dirty_dir_inode(inode);
remove_inode_page(inode);
return ERR_PTR(err);
}
void update_parent_metadata(struct inode *dir, struct inode *inode,
unsigned int current_depth)
{
if (inode && is_inode_flag_set(F2FS_I(inode), FI_NEW_INODE)) {
if (S_ISDIR(inode->i_mode)) {
inc_nlink(dir);
set_inode_flag(F2FS_I(dir), FI_UPDATE_DIR);
}
clear_inode_flag(F2FS_I(inode), FI_NEW_INODE);
}
dir->i_mtime = dir->i_ctime = CURRENT_TIME;
mark_inode_dirty(dir);
if (F2FS_I(dir)->i_current_depth != current_depth) {
F2FS_I(dir)->i_current_depth = current_depth;
set_inode_flag(F2FS_I(dir), FI_UPDATE_DIR);
}
if (inode && is_inode_flag_set(F2FS_I(inode), FI_INC_LINK))
clear_inode_flag(F2FS_I(inode), FI_INC_LINK);
}
int room_for_filename(const void *bitmap, int slots, int max_slots)
{
int bit_start = 0;
int zero_start, zero_end;
next:
zero_start = find_next_zero_bit_le(bitmap, max_slots, bit_start);
if (zero_start >= max_slots)
return max_slots;
zero_end = find_next_bit_le(bitmap, max_slots, zero_start);
if (zero_end - zero_start >= slots)
return zero_start;
bit_start = zero_end + 1;
if (zero_end + 1 >= max_slots)
return max_slots;
goto next;
}
void f2fs_update_dentry(nid_t ino, umode_t mode, struct f2fs_dentry_ptr *d,
const struct qstr *name, f2fs_hash_t name_hash,
unsigned int bit_pos)
{
struct f2fs_dir_entry *de;
int slots = GET_DENTRY_SLOTS(name->len);
int i;
de = &d->dentry[bit_pos];
de->hash_code = name_hash;
de->name_len = cpu_to_le16(name->len);
memcpy(d->filename[bit_pos], name->name, name->len);
de->ino = cpu_to_le32(ino);
set_de_type(de, mode);
for (i = 0; i < slots; i++)
test_and_set_bit_le(bit_pos + i, (void *)d->bitmap);
}
/*
* Caller should grab and release a rwsem by calling f2fs_lock_op() and
* f2fs_unlock_op().
*/
int __f2fs_add_link(struct inode *dir, const struct qstr *name,
struct inode *inode, nid_t ino, umode_t mode)
{
unsigned int bit_pos;
unsigned int level;
unsigned int current_depth;
unsigned long bidx, block;
f2fs_hash_t dentry_hash;
unsigned int nbucket, nblock;
struct page *dentry_page = NULL;
struct f2fs_dentry_block *dentry_blk = NULL;
struct f2fs_dentry_ptr d;
struct page *page = NULL;
struct f2fs_filename fname;
struct qstr new_name;
int slots, err;
err = f2fs_fname_setup_filename(dir, name, 0, &fname);
if (err)
return err;
new_name.name = fname_name(&fname);
new_name.len = fname_len(&fname);
if (f2fs_has_inline_dentry(dir)) {
err = f2fs_add_inline_entry(dir, &new_name, inode, ino, mode);
if (!err || err != -EAGAIN)
goto out;
else
err = 0;
}
level = 0;
slots = GET_DENTRY_SLOTS(new_name.len);
dentry_hash = f2fs_dentry_hash(&new_name);
current_depth = F2FS_I(dir)->i_current_depth;
if (F2FS_I(dir)->chash == dentry_hash) {
level = F2FS_I(dir)->clevel;
F2FS_I(dir)->chash = 0;
}
start:
if (unlikely(current_depth == MAX_DIR_HASH_DEPTH)) {
err = -ENOSPC;
goto out;
}
/* Increase the depth, if required */
if (level == current_depth)
++current_depth;
nbucket = dir_buckets(level, F2FS_I(dir)->i_dir_level);
nblock = bucket_blocks(level);
bidx = dir_block_index(level, F2FS_I(dir)->i_dir_level,
(le32_to_cpu(dentry_hash) % nbucket));
for (block = bidx; block <= (bidx + nblock - 1); block++) {
dentry_page = get_new_data_page(dir, NULL, block, true);
if (IS_ERR(dentry_page)) {
err = PTR_ERR(dentry_page);
goto out;
}
dentry_blk = kmap(dentry_page);
bit_pos = room_for_filename(&dentry_blk->dentry_bitmap,
slots, NR_DENTRY_IN_BLOCK);
if (bit_pos < NR_DENTRY_IN_BLOCK)
goto add_dentry;
kunmap(dentry_page);
f2fs_put_page(dentry_page, 1);
}
/* Move to next level to find the empty slot for new dentry */
++level;
goto start;
add_dentry:
f2fs_wait_on_page_writeback(dentry_page, DATA);
if (inode) {
down_write(&F2FS_I(inode)->i_sem);
page = init_inode_metadata(inode, dir, &new_name, NULL);
if (IS_ERR(page)) {
err = PTR_ERR(page);
goto fail;
}
if (f2fs_encrypted_inode(dir))
file_set_enc_name(inode);
}
make_dentry_ptr(NULL, &d, (void *)dentry_blk, 1);
f2fs_update_dentry(ino, mode, &d, &new_name, dentry_hash, bit_pos);
set_page_dirty(dentry_page);
if (inode) {
/* we don't need to mark_inode_dirty now */
F2FS_I(inode)->i_pino = dir->i_ino;
update_inode(inode, page);
f2fs_put_page(page, 1);
}
update_parent_metadata(dir, inode, current_depth);
fail:
if (inode)
up_write(&F2FS_I(inode)->i_sem);
if (is_inode_flag_set(F2FS_I(dir), FI_UPDATE_DIR)) {
update_inode_page(dir);
clear_inode_flag(F2FS_I(dir), FI_UPDATE_DIR);
}
kunmap(dentry_page);
f2fs_put_page(dentry_page, 1);
out:
f2fs_fname_free_filename(&fname);
return err;
}
int f2fs_do_tmpfile(struct inode *inode, struct inode *dir)
{
struct page *page;
int err = 0;
down_write(&F2FS_I(inode)->i_sem);
page = init_inode_metadata(inode, dir, NULL, NULL);
if (IS_ERR(page)) {
err = PTR_ERR(page);
goto fail;
}
/* we don't need to mark_inode_dirty now */
update_inode(inode, page);
f2fs_put_page(page, 1);
clear_inode_flag(F2FS_I(inode), FI_NEW_INODE);
fail:
up_write(&F2FS_I(inode)->i_sem);
return err;
}
void f2fs_drop_nlink(struct inode *dir, struct inode *inode, struct page *page)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
down_write(&F2FS_I(inode)->i_sem);
if (S_ISDIR(inode->i_mode)) {
drop_nlink(dir);
if (page)
update_inode(dir, page);
else
update_inode_page(dir);
}
inode->i_ctime = CURRENT_TIME;
drop_nlink(inode);
if (S_ISDIR(inode->i_mode)) {
drop_nlink(inode);
i_size_write(inode, 0);
}
up_write(&F2FS_I(inode)->i_sem);
update_inode_page(inode);
if (inode->i_nlink == 0)
add_orphan_inode(sbi, inode->i_ino);
else
release_orphan_inode(sbi);
}
/*
* It only removes the dentry from the dentry page, corresponding name
* entry in name page does not need to be touched during deletion.
*/
void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page,
struct inode *dir, struct inode *inode)
{
struct f2fs_dentry_block *dentry_blk;
unsigned int bit_pos;
int slots = GET_DENTRY_SLOTS(le16_to_cpu(dentry->name_len));
int i;
if (f2fs_has_inline_dentry(dir))
return f2fs_delete_inline_entry(dentry, page, dir, inode);
lock_page(page);
f2fs_wait_on_page_writeback(page, DATA);
dentry_blk = page_address(page);
bit_pos = dentry - dentry_blk->dentry;
for (i = 0; i < slots; i++)
test_and_clear_bit_le(bit_pos + i, &dentry_blk->dentry_bitmap);
/* Let's check and deallocate this dentry page */
bit_pos = find_next_bit_le(&dentry_blk->dentry_bitmap,
NR_DENTRY_IN_BLOCK,
0);
kunmap(page); /* kunmap - pair of f2fs_find_entry */
set_page_dirty(page);
dir->i_ctime = dir->i_mtime = CURRENT_TIME;
if (inode)
f2fs_drop_nlink(dir, inode, NULL);
if (bit_pos == NR_DENTRY_IN_BLOCK &&
!truncate_hole(dir, page->index, page->index + 1)) {
clear_page_dirty_for_io(page);
ClearPagePrivate(page);
ClearPageUptodate(page);
inode_dec_dirty_pages(dir);
}
f2fs_put_page(page, 1);
}
bool f2fs_empty_dir(struct inode *dir)
{
unsigned long bidx;
struct page *dentry_page;
unsigned int bit_pos;
struct f2fs_dentry_block *dentry_blk;
unsigned long nblock = dir_blocks(dir);
if (f2fs_has_inline_dentry(dir))
return f2fs_empty_inline_dir(dir);
for (bidx = 0; bidx < nblock; bidx++) {
dentry_page = get_lock_data_page(dir, bidx);
if (IS_ERR(dentry_page)) {
if (PTR_ERR(dentry_page) == -ENOENT)
continue;
else
return false;
}
dentry_blk = kmap_atomic(dentry_page);
if (bidx == 0)
bit_pos = 2;
else
bit_pos = 0;
bit_pos = find_next_bit_le(&dentry_blk->dentry_bitmap,
NR_DENTRY_IN_BLOCK,
bit_pos);
kunmap_atomic(dentry_blk);
f2fs_put_page(dentry_page, 1);
if (bit_pos < NR_DENTRY_IN_BLOCK)
return false;
}
return true;
}
bool f2fs_fill_dentries(struct file *file, void *dirent, filldir_t filldir,
struct f2fs_dentry_ptr *d, unsigned int n, unsigned int bit_pos,
struct f2fs_str *fstr)
{
unsigned int start_bit_pos = bit_pos;
unsigned char d_type;
struct f2fs_dir_entry *de = NULL;
struct f2fs_str de_name = FSTR_INIT(NULL, 0);
unsigned char *types = f2fs_filetype_table;
int over;
while (bit_pos < d->max) {
d_type = DT_UNKNOWN;
bit_pos = find_next_bit_le(d->bitmap, d->max, bit_pos);
if (bit_pos >= d->max)
break;
de = &d->dentry[bit_pos];
if (types && de->file_type < F2FS_FT_MAX)
d_type = types[de->file_type];
de_name.name = d->filename[bit_pos];
de_name.len = le16_to_cpu(de->name_len);
if (f2fs_encrypted_inode(d->inode)) {
int save_len = fstr->len;
int ret;
de_name.name = kmalloc(de_name.len, GFP_NOFS);
if (!de_name.name)
return false;
memcpy(de_name.name, d->filename[bit_pos], de_name.len);
ret = f2fs_fname_disk_to_usr(d->inode, &de->hash_code,
&de_name, fstr);
kfree(de_name.name);
if (ret < 0)
return true;
de_name = *fstr;
fstr->len = save_len;
}
over = filldir(dirent, de_name.name, de_name.len,
(n * d->max) + bit_pos,
le32_to_cpu(de->ino), d_type);
if (over) {
file->f_pos += bit_pos - start_bit_pos;
return true;
}
bit_pos += GET_DENTRY_SLOTS(le16_to_cpu(de->name_len));
}
return false;
}
static int f2fs_readdir(struct file *file, void *dirent, filldir_t filldir)
{
unsigned long pos = file->f_pos;
unsigned int bit_pos = 0;
struct inode *inode = file_inode(file);
unsigned long npages = dir_blocks(inode);
struct f2fs_dentry_block *dentry_blk = NULL;
struct page *dentry_page = NULL;
struct file_ra_state *ra = &file->f_ra;
struct f2fs_dentry_ptr d;
struct f2fs_str fstr = FSTR_INIT(NULL, 0);
unsigned int n = 0;
int err = 0;
if (f2fs_encrypted_inode(inode)) {
err = f2fs_get_encryption_info(inode);
if (err)
return err;
err = f2fs_fname_crypto_alloc_buffer(inode, F2FS_NAME_LEN,
&fstr);
if (err < 0)
return err;
}
if (f2fs_has_inline_dentry(inode)) {
err = f2fs_read_inline_dir(file, dirent, filldir, &fstr);
goto out;
}
bit_pos = (pos % NR_DENTRY_IN_BLOCK);
n = (pos / NR_DENTRY_IN_BLOCK);
/* readahead for multi pages of dir */
if (npages - n > 1 && !ra_has_index(ra, n))
page_cache_sync_readahead(inode->i_mapping, ra, file, n,
min(npages - n, (pgoff_t)MAX_DIR_RA_PAGES));
for (; n < npages; n++) {
dentry_page = get_lock_data_page(inode, n);
if (IS_ERR(dentry_page))
continue;
dentry_blk = kmap(dentry_page);
make_dentry_ptr(inode, &d, (void *)dentry_blk, 1);
if (f2fs_fill_dentries(file, dirent, filldir, &d, n, bit_pos, &fstr))
goto stop;
bit_pos = 0;
file->f_pos = (n + 1) * NR_DENTRY_IN_BLOCK;
kunmap(dentry_page);
f2fs_put_page(dentry_page, 1);
dentry_page = NULL;
}
stop:
if (dentry_page && !IS_ERR(dentry_page)) {
kunmap(dentry_page);
f2fs_put_page(dentry_page, 1);
}
out:
f2fs_fname_crypto_free_buffer(&fstr);
return err;
}
const struct file_operations f2fs_dir_operations = {
.llseek = generic_file_llseek,
.read = generic_read_dir,
.readdir = f2fs_readdir,
.fsync = f2fs_sync_file,
.unlocked_ioctl = f2fs_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = f2fs_compat_ioctl,
#endif
};
| gpl-2.0 |
StuntRR/StuntRR-kernel | sound/soc/msm/msm8x10.c | 273 | 34644 | /* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/gpio.h>
#include <linux/of_gpio.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/mfd/pm8xxx/pm8921.h>
#include <linux/qpnp/clkdiv.h>
#include <linux/io.h>
#include <sound/core.h>
#include <sound/soc.h>
#include <sound/soc-dapm.h>
#include <sound/pcm.h>
#include <sound/jack.h>
#include <asm/mach-types.h>
#include <mach/socinfo.h>
#include <qdsp6v2/msm-pcm-routing-v2.h>
#include <sound/q6afe-v2.h>
#include <linux/module.h>
#include <mach/gpiomux.h>
#include "../codecs/msm8x10-wcd.h"
#define DRV_NAME "msm8x10-asoc-wcd"
#define BTSCO_RATE_8KHZ 8000
#define BTSCO_RATE_16KHZ 16000
/* It takes about 13ms for Class-D PAs to ramp-up */
#define EXT_CLASS_D_EN_DELAY 13000
#define EXT_CLASS_D_DIS_DELAY 3000
#define EXT_CLASS_D_DELAY_DELTA 2000
#define CDC_EXT_CLK_RATE 9600000
#define WCD9XXX_MBHC_DEF_BUTTONS 8
#define WCD9XXX_MBHC_DEF_RLOADS 5
static int msm_btsco_rate = BTSCO_RATE_8KHZ;
static int msm_btsco_ch = 1;
static int msm_proxy_rx_ch = 2;
static struct platform_device *spdev;
static int ext_spk_amp_gpio = -1;
/* pointers for digital codec register mappings */
static void __iomem *pcbcr;
static void __iomem *prcgr;
static int msm_sec_mi2s_rx_ch = 1;
static int msm_pri_mi2s_tx_ch = 1;
static int msm_sec_mi2s_rx_bit_format = SNDRV_PCM_FORMAT_S16_LE;
static inline int param_is_mask(int p)
{
return ((p >= SNDRV_PCM_HW_PARAM_FIRST_MASK) &&
(p <= SNDRV_PCM_HW_PARAM_LAST_MASK));
}
static inline struct snd_mask *param_to_mask(struct snd_pcm_hw_params *p, int n)
{
return &(p->masks[n - SNDRV_PCM_HW_PARAM_FIRST_MASK]);
}
static void param_set_mask(struct snd_pcm_hw_params *p, int n, unsigned bit)
{
if (bit >= SNDRV_MASK_MAX)
return;
if (param_is_mask(n)) {
struct snd_mask *m = param_to_mask(p, n);
m->bits[0] = 0;
m->bits[1] = 0;
m->bits[bit >> 5] |= (1 << (bit & 31));
}
}
static void *def_msm8x10_wcd_mbhc_cal(void);
static int msm8x10_enable_codec_ext_clk(struct snd_soc_codec *codec, int enable,
bool dapm);
static struct wcd9xxx_mbhc_config mbhc_cfg = {
.read_fw_bin = false,
.calibration = NULL,
.micbias = MBHC_MICBIAS1,
.mclk_cb_fn = msm8x10_enable_codec_ext_clk,
.mclk_rate = CDC_EXT_CLK_RATE,
.gpio = 0,
.gpio_irq = 0,
.gpio_level_insert = 0,
.detect_extn_cable = false,
.insert_detect = true,
.swap_gnd_mic = NULL,
.use_int_rbias = false,
.micbias_enable_flags = 1 << MBHC_MICBIAS_ENABLE_THRESHOLD_HEADSET |
1 << MBHC_MICBIAS_ENABLE_REGULAR_HEADSET,
.cs_enable_flags = (1 << MBHC_CS_ENABLE_POLLING |
1 << MBHC_CS_ENABLE_INSERTION |
1 << MBHC_CS_ENABLE_REMOVAL),
.do_recalibration = false,
.use_vddio_meas = false,
.hw_jack_type = FOUR_POLE_JACK,
};
/*
* There is limitation for the clock root selection from
* either MI2S or DIG_CODEC.
* If DIG_CODEC root can only provide 9.6MHz clock
* to codec while MI2S only can provide
* 12.288MHz.
*/
enum {
DIG_CDC_CLK_SEL_DIG_CODEC,
DIG_CDC_CLK_SEL_PRI_MI2S,
DIG_CDC_CLK_SEL_SEC_MI2S,
};
static struct afe_clk_cfg mi2s_rx_clk = {
AFE_API_VERSION_I2S_CONFIG,
Q6AFE_LPASS_IBIT_CLK_1_P536_MHZ,
Q6AFE_LPASS_OSR_CLK_12_P288_MHZ,
Q6AFE_LPASS_CLK_SRC_INTERNAL,
Q6AFE_LPASS_CLK_ROOT_DEFAULT,
Q6AFE_LPASS_MODE_BOTH_VALID,
0,
};
static struct afe_clk_cfg mi2s_tx_clk = {
AFE_API_VERSION_I2S_CONFIG,
Q6AFE_LPASS_IBIT_CLK_1_P536_MHZ,
Q6AFE_LPASS_OSR_CLK_12_P288_MHZ,
Q6AFE_LPASS_CLK_SRC_INTERNAL,
Q6AFE_LPASS_CLK_ROOT_DEFAULT,
Q6AFE_LPASS_MODE_BOTH_VALID,
0,
};
static struct afe_digital_clk_cfg digital_cdc_clk = {
AFE_API_VERSION_I2S_CONFIG,
9600000,
5, /* Digital Codec root */
0,
};
static atomic_t mclk_rsc_ref;
static struct mutex cdc_mclk_mutex;
static int msm8x10_mclk_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event);
static int msm_ext_spkramp_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event);
static void msm8x10_enable_ext_spk_power_amp(u32 on);
static const struct snd_soc_dapm_widget msm8x10_dapm_widgets[] = {
SND_SOC_DAPM_SUPPLY("MCLK", SND_SOC_NOPM, 0, 0,
msm8x10_mclk_event, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_SPK("Lineout amp", msm_ext_spkramp_event),
SND_SOC_DAPM_MIC("Handset Mic", NULL),
SND_SOC_DAPM_MIC("Headset Mic", NULL),
SND_SOC_DAPM_MIC("Secondary Mic", NULL),
SND_SOC_DAPM_MIC("Digital Mic1", NULL),
SND_SOC_DAPM_MIC("Digital Mic2", NULL),
};
static int msm8x10_ext_spk_power_amp_init(void)
{
int ret = 0;
ext_spk_amp_gpio = of_get_named_gpio(spdev->dev.of_node,
"qcom,ext-spk-amp-gpio", 0);
if (ext_spk_amp_gpio >= 0) {
ret = gpio_request(ext_spk_amp_gpio, "ext_spk_amp_gpio");
if (ret) {
pr_err("%s: gpio_request failed for ext_spk_amp_gpio.\n",
__func__);
return -EINVAL;
}
gpio_direction_output(ext_spk_amp_gpio, 0);
}
return 0;
}
static int msm_ext_spkramp_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
pr_debug("%s()\n", __func__);
if (ext_spk_amp_gpio >= 0) {
if (SND_SOC_DAPM_EVENT_ON(event))
msm8x10_enable_ext_spk_power_amp(1);
else
msm8x10_enable_ext_spk_power_amp(0);
}
return 0;
}
static void msm8x10_enable_ext_spk_power_amp(u32 on)
{
if (on) {
gpio_direction_output(ext_spk_amp_gpio, on);
/*time takes enable the external power amplifier*/
usleep_range(EXT_CLASS_D_EN_DELAY,
EXT_CLASS_D_EN_DELAY + EXT_CLASS_D_DELAY_DELTA);
} else {
gpio_direction_output(ext_spk_amp_gpio, on);
/*time takes disable the external power amplifier*/
usleep_range(EXT_CLASS_D_DIS_DELAY,
EXT_CLASS_D_DIS_DELAY + EXT_CLASS_D_DELAY_DELTA);
}
pr_debug("%s: %s external speaker PAs.\n", __func__,
on ? "Enable" : "Disable");
}
static int msm_config_mclk(u16 port_id, struct afe_digital_clk_cfg *cfg)
{
/* set the drive strength on the clock */
msm_tlmm_misc_reg_write(TLMM_CDC_HDRV_CTL, 0x00);
msm_tlmm_misc_reg_write(TLMM_CDC_HDRV_PULL_CTL, 0x0006db6d);
iowrite32(0x1, pcbcr);
/* Set the update bit to make the settings go through */
iowrite32(0x1, prcgr);
return 0;
}
static int msm_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
struct snd_pcm_hw_params *params)
{
struct snd_interval *rate = hw_param_interval(params,
SNDRV_PCM_HW_PARAM_RATE);
pr_debug("%s()\n", __func__);
rate->min = rate->max = 48000;
return 0;
}
static int msm_be_fm_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
struct snd_pcm_hw_params *params)
{
struct snd_interval *rate = hw_param_interval(params,
SNDRV_PCM_HW_PARAM_RATE);
struct snd_interval *channels = hw_param_interval(params,
SNDRV_PCM_HW_PARAM_CHANNELS);
pr_debug("%s()\n", __func__);
rate->min = rate->max = 48000;
channels->min = channels->max = 2;
return 0;
}
static int msm_rx_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
struct snd_pcm_hw_params *params)
{
struct snd_interval *rate = hw_param_interval(params,
SNDRV_PCM_HW_PARAM_RATE);
struct snd_interval *channels = hw_param_interval(params,
SNDRV_PCM_HW_PARAM_CHANNELS);
pr_debug("%s(): channel:%d\n", __func__, msm_pri_mi2s_tx_ch);
param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
msm_sec_mi2s_rx_bit_format);
rate->min = rate->max = 48000;
channels->min = channels->max = msm_sec_mi2s_rx_ch;
return 0;
}
static int msm_tx_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
struct snd_pcm_hw_params *params)
{
struct snd_interval *rate = hw_param_interval(params,
SNDRV_PCM_HW_PARAM_RATE);
struct snd_interval *channels = hw_param_interval(params,
SNDRV_PCM_HW_PARAM_CHANNELS);
pr_debug("%s(), channel:%d\n", __func__, msm_pri_mi2s_tx_ch);
param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
msm_sec_mi2s_rx_bit_format);
rate->min = rate->max = 48000;
channels->min = channels->max = msm_pri_mi2s_tx_ch;
return 0;
}
static const char *const btsco_rate_text[] = {"BTSCO_RATE_8KHZ", "BTSCO_RATE_16KHZ"};
static const struct soc_enum msm_btsco_enum[] = {
SOC_ENUM_SINGLE_EXT(2, btsco_rate_text),
};
static const char *const sec_mi2s_rx_ch_text[] = {"One", "Two"};
static const char *const pri_mi2s_tx_ch_text[] = {"One", "Two"};
static int msm_btsco_rate_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
pr_debug("%s: msm_btsco_rate = %d", __func__, msm_btsco_rate);
ucontrol->value.integer.value[0] = msm_btsco_rate;
return 0;
}
static int msm_btsco_rate_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
switch (ucontrol->value.integer.value[0]) {
case 0:
msm_btsco_rate = BTSCO_RATE_8KHZ;
break;
case 1:
msm_btsco_rate = BTSCO_RATE_16KHZ;
break;
default:
msm_btsco_rate = BTSCO_RATE_8KHZ;
break;
}
pr_debug("%s: msm_btsco_rate = %d\n", __func__, msm_btsco_rate);
return 0;
}
static int msm_sec_mi2s_rx_ch_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
pr_debug("%s: msm_sec_mi2s_rx_ch = %d\n", __func__,
msm_sec_mi2s_rx_ch);
ucontrol->value.integer.value[0] = msm_sec_mi2s_rx_ch - 1;
return 0;
}
static int msm_sec_mi2s_rx_ch_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
msm_sec_mi2s_rx_ch = ucontrol->value.integer.value[0] + 1;
pr_debug("%s: msm_sec_mi2s_rx_ch = %d\n", __func__,
msm_sec_mi2s_rx_ch);
return 1;
}
static int msm_pri_mi2s_tx_ch_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
pr_debug("%s: msm_pri_mi2s_tx_ch = %d\n", __func__,
msm_pri_mi2s_tx_ch);
ucontrol->value.integer.value[0] = msm_pri_mi2s_tx_ch - 1;
return 0;
}
static int msm_pri_mi2s_tx_ch_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
msm_pri_mi2s_tx_ch = ucontrol->value.integer.value[0] + 1;
pr_debug("%s: msm_pri_mi2s_tx_ch = %d\n", __func__, msm_pri_mi2s_tx_ch);
return 1;
}
static int msm_btsco_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
struct snd_pcm_hw_params *params)
{
struct snd_interval *rate = hw_param_interval(params,
SNDRV_PCM_HW_PARAM_RATE);
struct snd_interval *channels = hw_param_interval(params,
SNDRV_PCM_HW_PARAM_CHANNELS);
rate->min = rate->max = msm_btsco_rate;
channels->min = channels->max = msm_btsco_ch;
return 0;
}
static int msm_mi2s_snd_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
pr_debug("%s(): substream = %s stream = %d\n", __func__,
substream->name, substream->stream);
return 0;
}
static int mi2s_clk_ctl(struct snd_pcm_substream *substream, bool enable)
{
int ret = 0;
if (enable) {
digital_cdc_clk.clk_val = 9600000;
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
mi2s_rx_clk.clk_val2 = Q6AFE_LPASS_OSR_CLK_12_P288_MHZ;
mi2s_rx_clk.clk_val1 = Q6AFE_LPASS_IBIT_CLK_1_P536_MHZ;
ret = afe_set_lpass_clock(AFE_PORT_ID_SECONDARY_MI2S_RX,
&mi2s_rx_clk);
} else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
mi2s_tx_clk.clk_val2 = Q6AFE_LPASS_OSR_CLK_12_P288_MHZ;
mi2s_tx_clk.clk_val1 = Q6AFE_LPASS_IBIT_CLK_1_P536_MHZ;
ret = afe_set_lpass_clock(AFE_PORT_ID_PRIMARY_MI2S_RX,
&mi2s_tx_clk);
} else
pr_err("%s:Not valid substream.\n", __func__);
if (ret < 0)
pr_err("%s:afe_set_lpass_clock failed\n", __func__);
} else {
digital_cdc_clk.clk_val = 0;
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
mi2s_rx_clk.clk_val2 = Q6AFE_LPASS_OSR_CLK_DISABLE;
mi2s_rx_clk.clk_val1 = Q6AFE_LPASS_IBIT_CLK_DISABLE;
ret = afe_set_lpass_clock(AFE_PORT_ID_SECONDARY_MI2S_RX,
&mi2s_rx_clk);
} else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
mi2s_tx_clk.clk_val2 = Q6AFE_LPASS_OSR_CLK_DISABLE;
mi2s_tx_clk.clk_val1 = Q6AFE_LPASS_IBIT_CLK_DISABLE;
ret = afe_set_lpass_clock(AFE_PORT_ID_PRIMARY_MI2S_RX,
&mi2s_tx_clk);
} else
pr_err("%s:Not valid substream.\n", __func__);
if (ret < 0)
pr_err("%s:afe_set_lpass_clock failed\n", __func__);
}
return ret;
}
static int msm8x10_enable_codec_ext_clk(struct snd_soc_codec *codec,
int enable, bool dapm)
{
int ret = 0;
mutex_lock(&cdc_mclk_mutex);
pr_debug("%s: enable = %d codec name %s enable %d mclk ref counter %d\n",
__func__, enable, codec->name, enable,
atomic_read(&mclk_rsc_ref));
if (enable) {
if (atomic_inc_return(&mclk_rsc_ref) == 1) {
digital_cdc_clk.clk_val = 9600000;
msm_config_mclk(AFE_PORT_ID_SECONDARY_MI2S_RX,
&digital_cdc_clk);
msm8x10_wcd_mclk_enable(codec, 1, dapm);
}
} else {
if (atomic_dec_return(&mclk_rsc_ref) == 0) {
digital_cdc_clk.clk_val = 0;
msm8x10_wcd_mclk_enable(codec, 0, dapm);
msm_config_mclk(AFE_PORT_ID_SECONDARY_MI2S_RX,
&digital_cdc_clk);
}
}
mutex_unlock(&cdc_mclk_mutex);
return ret;
}
static int msm8x10_mclk_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
pr_debug("%s: event = %d\n", __func__, event);
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
return msm8x10_enable_codec_ext_clk(w->codec, 1, true);
case SND_SOC_DAPM_POST_PMD:
return msm8x10_enable_codec_ext_clk(w->codec, 0, true);
default:
return -EINVAL;
}
}
static void msm_mi2s_snd_shutdown(struct snd_pcm_substream *substream)
{
int ret;
pr_debug("%s(): substream = %s stream = %d\n", __func__,
substream->name, substream->stream);
ret = mi2s_clk_ctl(substream, false);
if (ret < 0)
pr_err("%s:clock disable failed\n", __func__);
}
static int msm_mi2s_snd_startup(struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
int ret = 0;
pr_debug("%s(): substream = %s stream = %d\n", __func__,
substream->name, substream->stream);
ret = mi2s_clk_ctl(substream, true);
ret = snd_soc_dai_set_fmt(cpu_dai, SND_SOC_DAIFMT_CBS_CFS);
if (ret < 0)
pr_err("set fmt cpu dai failed\n");
return ret;
}
static const struct soc_enum msm_snd_enum[] = {
SOC_ENUM_SINGLE_EXT(2, sec_mi2s_rx_ch_text),
SOC_ENUM_SINGLE_EXT(2, pri_mi2s_tx_ch_text),
};
static const struct snd_kcontrol_new msm_snd_controls[] = {
SOC_ENUM_EXT("Internal BTSCO SampleRate", msm_btsco_enum[0],
msm_btsco_rate_get, msm_btsco_rate_put),
SOC_ENUM_EXT("MI2S_RX Channels", msm_snd_enum[0],
msm_sec_mi2s_rx_ch_get, msm_sec_mi2s_rx_ch_put),
SOC_ENUM_EXT("MI2S_TX Channels", msm_snd_enum[1],
msm_pri_mi2s_tx_ch_get, msm_pri_mi2s_tx_ch_put),
};
static int msm_audrx_init(struct snd_soc_pcm_runtime *rtd)
{
struct snd_soc_codec *codec = rtd->codec;
struct snd_soc_dapm_context *dapm = &codec->dapm;
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
int ret = 0;
pr_debug("%s(),dev_name%s\n", __func__, dev_name(cpu_dai->dev));
msm8x10_ext_spk_power_amp_init();
mbhc_cfg.calibration = def_msm8x10_wcd_mbhc_cal();
if (mbhc_cfg.calibration) {
ret = msm8x10_wcd_hs_detect(codec, &mbhc_cfg);
if (ret) {
pr_err("%s: msm8x10_wcd_hs_detect failed\n", __func__);
goto exit;
}
} else {
ret = -ENOMEM;
goto exit;
}
snd_soc_dapm_new_controls(dapm, msm8x10_dapm_widgets,
ARRAY_SIZE(msm8x10_dapm_widgets));
snd_soc_dapm_enable_pin(dapm, "Lineout amp");
snd_soc_dapm_sync(dapm);
ret = snd_soc_add_codec_controls(codec, msm_snd_controls,
ARRAY_SIZE(msm_snd_controls));
if (ret < 0)
return ret;
exit:
if (gpio_is_valid(ext_spk_amp_gpio))
gpio_free(ext_spk_amp_gpio);
return ret;
}
static void *def_msm8x10_wcd_mbhc_cal(void)
{
void *msm8x10_wcd_cal;
struct wcd9xxx_mbhc_btn_detect_cfg *btn_cfg;
u16 *btn_low, *btn_high;
u8 *n_ready, *n_cic, *gain;
msm8x10_wcd_cal = kzalloc(WCD9XXX_MBHC_CAL_SIZE(
WCD9XXX_MBHC_DEF_BUTTONS,
WCD9XXX_MBHC_DEF_RLOADS),
GFP_KERNEL);
if (!msm8x10_wcd_cal) {
pr_err("%s: out of memory\n", __func__);
return NULL;
}
#define S(X, Y) ((WCD9XXX_MBHC_CAL_GENERAL_PTR(msm8x10_wcd_cal)->X) = (Y))
S(t_ldoh, 100);
S(t_bg_fast_settle, 100);
S(t_shutdown_plug_rem, 255);
S(mbhc_nsa, 2);
S(mbhc_navg, 128);
#undef S
#define S(X, Y) ((WCD9XXX_MBHC_CAL_PLUG_DET_PTR(msm8x10_wcd_cal)->X) = (Y))
S(mic_current, MSM8X10_WCD_PID_MIC_5_UA);
S(hph_current, MSM8X10_WCD_PID_MIC_5_UA);
S(t_mic_pid, 100);
S(t_ins_complete, 250);
S(t_ins_retry, 200);
#undef S
#define S(X, Y) ((WCD9XXX_MBHC_CAL_PLUG_TYPE_PTR(msm8x10_wcd_cal)->X) = (Y))
S(v_no_mic, 30);
S(v_hs_max, 2550);
#undef S
#define S(X, Y) ((WCD9XXX_MBHC_CAL_BTN_DET_PTR(msm8x10_wcd_cal)->X) = (Y))
S(c[0], 62);
S(c[1], 124);
S(nc, 1);
S(n_meas, 5);
S(mbhc_nsc, 10);
S(n_btn_meas, 1);
S(n_btn_con, 2);
S(num_btn, WCD9XXX_MBHC_DEF_BUTTONS);
S(v_btn_press_delta_sta, 100);
S(v_btn_press_delta_cic, 50);
#undef S
btn_cfg = WCD9XXX_MBHC_CAL_BTN_DET_PTR(msm8x10_wcd_cal);
btn_low = wcd9xxx_mbhc_cal_btn_det_mp(btn_cfg, MBHC_BTN_DET_V_BTN_LOW);
btn_high = wcd9xxx_mbhc_cal_btn_det_mp(btn_cfg,
MBHC_BTN_DET_V_BTN_HIGH);
btn_low[0] = -50;
btn_high[0] = 20;
btn_low[1] = 21;
btn_high[1] = 61;
btn_low[2] = 62;
btn_high[2] = 104;
btn_low[3] = 105;
btn_high[3] = 148;
btn_low[4] = 149;
btn_high[4] = 189;
btn_low[5] = 190;
btn_high[5] = 228;
btn_low[6] = 229;
btn_high[6] = 264;
btn_low[7] = 265;
btn_high[7] = 500;
n_ready = wcd9xxx_mbhc_cal_btn_det_mp(btn_cfg, MBHC_BTN_DET_N_READY);
n_ready[0] = 80;
n_ready[1] = 68;
n_cic = wcd9xxx_mbhc_cal_btn_det_mp(btn_cfg, MBHC_BTN_DET_N_CIC);
n_cic[0] = 60;
n_cic[1] = 47;
gain = wcd9xxx_mbhc_cal_btn_det_mp(btn_cfg, MBHC_BTN_DET_GAIN);
gain[0] = 11;
gain[1] = 14;
return msm8x10_wcd_cal;
}
static int msm_proxy_rx_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
struct snd_pcm_hw_params *params)
{
struct snd_interval *rate = hw_param_interval(params,
SNDRV_PCM_HW_PARAM_RATE);
struct snd_interval *channels = hw_param_interval(params,
SNDRV_PCM_HW_PARAM_CHANNELS);
pr_debug("%s: msm_proxy_rx_ch =%d\n", __func__, msm_proxy_rx_ch);
if (channels->max < 2)
channels->min = channels->max = 2;
channels->min = channels->max = msm_proxy_rx_ch;
rate->min = rate->max = 48000;
return 0;
}
static int msm_proxy_tx_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
struct snd_pcm_hw_params *params)
{
struct snd_interval *rate = hw_param_interval(params,
SNDRV_PCM_HW_PARAM_RATE);
rate->min = rate->max = 48000;
return 0;
}
static struct snd_soc_ops msm8x10_mi2s_be_ops = {
.startup = msm_mi2s_snd_startup,
.hw_params = msm_mi2s_snd_hw_params,
.shutdown = msm_mi2s_snd_shutdown,
};
/* Digital audio interface glue - connects codec <---> CPU */
static struct snd_soc_dai_link msm8x10_dai[] = {
/* FrontEnd DAI Links */
{/* hw:x,0 */
.name = "MSM8X10 Media1",
.stream_name = "MultiMedia1",
.cpu_dai_name = "MultiMedia1",
.platform_name = "msm-pcm-dsp.0",
.dynamic = 1,
.trigger = {SND_SOC_DPCM_TRIGGER_POST,
SND_SOC_DPCM_TRIGGER_POST},
.codec_dai_name = "snd-soc-dummy-dai",
.codec_name = "snd-soc-dummy",
.ignore_suspend = 1,
/* this dainlink has playback support */
.ignore_pmdown_time = 1,
.be_id = MSM_FRONTEND_DAI_MULTIMEDIA1
},
{/* hw:x,1 */
.name = "MSM8X10 Media2",
.stream_name = "MultiMedia2",
.cpu_dai_name = "MultiMedia2",
.platform_name = "msm-pcm-dsp.0",
.dynamic = 1,
.codec_dai_name = "snd-soc-dummy-dai",
.codec_name = "snd-soc-dummy",
.trigger = {SND_SOC_DPCM_TRIGGER_POST,
SND_SOC_DPCM_TRIGGER_POST},
.ignore_suspend = 1,
/* this dainlink has playback support */
.ignore_pmdown_time = 1,
.be_id = MSM_FRONTEND_DAI_MULTIMEDIA2,
},
{/* hw:x,2 */
.name = "Circuit-Switch Voice",
.stream_name = "CS-Voice",
.cpu_dai_name = "CS-VOICE",
.platform_name = "msm-pcm-voice",
.dynamic = 1,
.codec_dai_name = "snd-soc-dummy-dai",
.codec_name = "snd-soc-dummy",
.trigger = {SND_SOC_DPCM_TRIGGER_POST,
SND_SOC_DPCM_TRIGGER_POST},
.no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
.ignore_suspend = 1,
/* this dainlink has playback support */
.ignore_pmdown_time = 1,
.be_id = MSM_FRONTEND_DAI_CS_VOICE,
},
{/* hw:x,3 */
.name = "MSM VoIP",
.stream_name = "VoIP",
.cpu_dai_name = "VoIP",
.platform_name = "msm-voip-dsp",
.dynamic = 1,
.trigger = {SND_SOC_DPCM_TRIGGER_POST,
SND_SOC_DPCM_TRIGGER_POST},
.codec_dai_name = "snd-soc-dummy-dai",
.codec_name = "snd-soc-dummy",
.ignore_suspend = 1,
/* this dainlink has playback support */
.ignore_pmdown_time = 1,
.be_id = MSM_FRONTEND_DAI_VOIP,
},
{/* hw:x,4 */
.name = "MSM8X10 LPA",
.stream_name = "LPA",
.cpu_dai_name = "MultiMedia3",
.platform_name = "msm-pcm-lpa",
.dynamic = 1,
.trigger = {SND_SOC_DPCM_TRIGGER_POST,
SND_SOC_DPCM_TRIGGER_POST},
.codec_dai_name = "snd-soc-dummy-dai",
.codec_name = "snd-soc-dummy",
.ignore_suspend = 1,
/* this dainlink has playback support */
.ignore_pmdown_time = 1,
.be_id = MSM_FRONTEND_DAI_MULTIMEDIA3,
},
/* Hostless PCM purpose */
{/* hw:x,5 */
.name = "Secondary MI2S RX Hostless",
.stream_name = "Secondary MI2S_RX Hostless Playback",
.cpu_dai_name = "SEC_MI2S_RX_HOSTLESS",
.platform_name = "msm-pcm-hostless",
.dynamic = 1,
.trigger = {SND_SOC_DPCM_TRIGGER_POST,
SND_SOC_DPCM_TRIGGER_POST},
.no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
.ignore_suspend = 1,
.ignore_pmdown_time = 1,
/* This dainlink has MI2S support */
.codec_dai_name = "snd-soc-dummy-dai",
.codec_name = "snd-soc-dummy",
},
{/* hw:x,6 */
.name = "INT_FM Hostless",
.stream_name = "INT_FM Hostless",
.cpu_dai_name = "INT_FM_HOSTLESS",
.platform_name = "msm-pcm-hostless",
.dynamic = 1,
.trigger = {SND_SOC_DPCM_TRIGGER_POST,
SND_SOC_DPCM_TRIGGER_POST},
.no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
.ignore_suspend = 1,
/* this dainlink has playback support */
.ignore_pmdown_time = 1,
.codec_dai_name = "snd-soc-dummy-dai",
.codec_name = "snd-soc-dummy",
},
{/* hw:x,7 */
.name = "MSM AFE-PCM RX",
.stream_name = "AFE-PROXY RX",
.cpu_dai_name = "msm-dai-q6-dev.241",
.codec_name = "msm-stub-codec.1",
.codec_dai_name = "msm-stub-rx",
.platform_name = "msm-pcm-afe",
.ignore_suspend = 1,
/* this dainlink has playback support */
.ignore_pmdown_time = 1,
},
{/* hw:x,8 */
.name = "MSM AFE-PCM TX",
.stream_name = "AFE-PROXY TX",
.cpu_dai_name = "msm-dai-q6-dev.240",
.codec_name = "msm-stub-codec.1",
.codec_dai_name = "msm-stub-tx",
.platform_name = "msm-pcm-afe",
.ignore_suspend = 1,
},
{/* hw:x,9 */
.name = "MSM8X10 Compr",
.stream_name = "COMPR",
.cpu_dai_name = "MultiMedia4",
.platform_name = "msm-compress-dsp",
.dynamic = 1,
.trigger = {SND_SOC_DPCM_TRIGGER_POST,
SND_SOC_DPCM_TRIGGER_POST},
.codec_dai_name = "snd-soc-dummy-dai",
.codec_name = "snd-soc-dummy",
.ignore_suspend = 1,
.ignore_pmdown_time = 1,
/* this dainlink has playback support */
.be_id = MSM_FRONTEND_DAI_MULTIMEDIA4,
},
{/* hw:x,10 */
.name = "AUXPCM Hostless",
.stream_name = "AUXPCM Hostless",
.cpu_dai_name = "AUXPCM_HOSTLESS",
.platform_name = "msm-pcm-hostless",
.dynamic = 1,
.trigger = {SND_SOC_DPCM_TRIGGER_POST,
SND_SOC_DPCM_TRIGGER_POST},
.no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
.ignore_suspend = 1,
/* this dainlink has playback support */
.ignore_pmdown_time = 1,
.codec_dai_name = "snd-soc-dummy-dai",
.codec_name = "snd-soc-dummy",
},
{/* hw:x,11 */
.name = "Primary MI2S TX Hostless",
.stream_name = "Primary MI2S_TX Hostless Capture",
.cpu_dai_name = "PRI_MI2S_TX_HOSTLESS",
.platform_name = "msm-pcm-hostless",
.dynamic = 1,
.trigger = {SND_SOC_DPCM_TRIGGER_POST,
SND_SOC_DPCM_TRIGGER_POST},
.no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
.ignore_suspend = 1,
.ignore_pmdown_time = 1,
/* This dainlink has MI2S support */
.codec_dai_name = "snd-soc-dummy-dai",
.codec_name = "snd-soc-dummy",
},
{/* hw:x,12 */
.name = "MSM8x10 LowLatency",
.stream_name = "MultiMedia5",
.cpu_dai_name = "MultiMedia5",
.platform_name = "msm-pcm-dsp.1",
.dynamic = 1,
.codec_dai_name = "snd-soc-dummy-dai",
.codec_name = "snd-soc-dummy",
.trigger = {SND_SOC_DPCM_TRIGGER_POST,
SND_SOC_DPCM_TRIGGER_POST},
.ignore_suspend = 1,
/* this dainlink has playback support */
.ignore_pmdown_time = 1,
.be_id = MSM_FRONTEND_DAI_MULTIMEDIA5,
},
{/* hw:x,13 */
.name = "Voice2",
.stream_name = "Voice2",
.cpu_dai_name = "Voice2",
.platform_name = "msm-pcm-voice",
.dynamic = 1,
.trigger = {SND_SOC_DPCM_TRIGGER_POST,
SND_SOC_DPCM_TRIGGER_POST},
.no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
.ignore_suspend = 1,
/* this dainlink has playback support */
.ignore_pmdown_time = 1,
.codec_dai_name = "snd-soc-dummy-dai",
.codec_name = "snd-soc-dummy",
},
{/* hw:x,14 */
.name = "QCHAT",
.stream_name = "QCHAT",
.cpu_dai_name = "QCHAT",
.platform_name = "msm-pcm-voice",
.dynamic = 1,
.trigger = {SND_SOC_DPCM_TRIGGER_POST,
SND_SOC_DPCM_TRIGGER_POST},
.no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
.ignore_suspend = 1,
/* this dainlink has playback support */
.ignore_pmdown_time = 1,
.codec_dai_name = "snd-soc-dummy-dai",
.codec_name = "snd-soc-dummy",
.be_id = MSM_FRONTEND_DAI_QCHAT,
},
{/* hw:x,15 */
.name = "MSM8X10 Media9",
.stream_name = "MultiMedia9",
.cpu_dai_name = "MultiMedia9",
.platform_name = "msm-pcm-dsp.0",
.dynamic = 1,
.trigger = {SND_SOC_DPCM_TRIGGER_POST,
SND_SOC_DPCM_TRIGGER_POST},
.codec_dai_name = "snd-soc-dummy-dai",
.codec_name = "snd-soc-dummy",
.ignore_suspend = 1,
/* this dainlink has playback support */
.ignore_pmdown_time = 1,
.be_id = MSM_FRONTEND_DAI_MULTIMEDIA9
},
/* Backend I2S DAI Links */
{
.name = LPASS_BE_SEC_MI2S_RX,
.stream_name = "Secondary MI2S Playback",
.cpu_dai_name = "msm-dai-q6-mi2s.1",
.platform_name = "msm-pcm-routing",
.codec_name = MSM8X10_CODEC_NAME,
.codec_dai_name = "msm8x10_wcd_i2s_rx1",
.no_pcm = 1,
.be_id = MSM_BACKEND_DAI_SECONDARY_MI2S_RX,
.init = &msm_audrx_init,
.be_hw_params_fixup = msm_rx_be_hw_params_fixup,
.ops = &msm8x10_mi2s_be_ops,
.ignore_suspend = 1,
},
{
.name = LPASS_BE_PRI_MI2S_TX,
.stream_name = "Primary MI2S Capture",
.cpu_dai_name = "msm-dai-q6-mi2s.0",
.platform_name = "msm-pcm-routing",
.codec_name = MSM8X10_CODEC_NAME,
.codec_dai_name = "msm8x10_wcd_i2s_tx1",
.no_pcm = 1,
.be_id = MSM_BACKEND_DAI_PRI_MI2S_TX,
.be_hw_params_fixup = msm_tx_be_hw_params_fixup,
.ops = &msm8x10_mi2s_be_ops,
.ignore_suspend = 1,
},
{
.name = LPASS_BE_INT_BT_SCO_RX,
.stream_name = "Internal BT-SCO Playback",
.cpu_dai_name = "msm-dai-q6-dev.12288",
.platform_name = "msm-pcm-routing",
.codec_name = "msm-stub-codec.1",
.codec_dai_name = "msm-stub-rx",
.no_pcm = 1,
.be_id = MSM_BACKEND_DAI_INT_BT_SCO_RX,
.be_hw_params_fixup = msm_btsco_be_hw_params_fixup,
/* this dainlink has playback support */
.ignore_pmdown_time = 1,
.ignore_suspend = 1,
},
{
.name = LPASS_BE_INT_BT_SCO_TX,
.stream_name = "Internal BT-SCO Capture",
.cpu_dai_name = "msm-dai-q6-dev.12289",
.platform_name = "msm-pcm-routing",
.codec_name = "msm-stub-codec.1",
.codec_dai_name = "msm-stub-tx",
.no_pcm = 1,
.be_id = MSM_BACKEND_DAI_INT_BT_SCO_TX,
.be_hw_params_fixup = msm_btsco_be_hw_params_fixup,
.ignore_suspend = 1,
},
{
.name = LPASS_BE_INT_FM_RX,
.stream_name = "Internal FM Playback",
.cpu_dai_name = "msm-dai-q6-dev.12292",
.platform_name = "msm-pcm-routing",
.codec_name = "msm-stub-codec.1",
.codec_dai_name = "msm-stub-rx",
.no_pcm = 1,
.be_id = MSM_BACKEND_DAI_INT_FM_RX,
.be_hw_params_fixup = msm_be_fm_hw_params_fixup,
/* this dainlink has playback support */
.ignore_pmdown_time = 1,
.ignore_suspend = 1,
},
{
.name = LPASS_BE_INT_FM_TX,
.stream_name = "Internal FM Capture",
.cpu_dai_name = "msm-dai-q6-dev.12293",
.platform_name = "msm-pcm-routing",
.codec_name = "msm-stub-codec.1",
.codec_dai_name = "msm-stub-tx",
.no_pcm = 1,
.be_id = MSM_BACKEND_DAI_INT_FM_TX,
.be_hw_params_fixup = msm_be_hw_params_fixup,
.ignore_suspend = 1,
},
{
.name = LPASS_BE_AFE_PCM_RX,
.stream_name = "AFE Playback",
.cpu_dai_name = "msm-dai-q6-dev.224",
.platform_name = "msm-pcm-routing",
.codec_name = "msm-stub-codec.1",
.codec_dai_name = "msm-stub-rx",
.no_pcm = 1,
.be_id = MSM_BACKEND_DAI_AFE_PCM_RX,
.be_hw_params_fixup = msm_proxy_rx_be_hw_params_fixup,
/* this dainlink has playback support */
.ignore_pmdown_time = 1,
.ignore_suspend = 1,
},
{
.name = LPASS_BE_AFE_PCM_TX,
.stream_name = "AFE Capture",
.cpu_dai_name = "msm-dai-q6-dev.225",
.platform_name = "msm-pcm-routing",
.codec_name = "msm-stub-codec.1",
.codec_dai_name = "msm-stub-tx",
.no_pcm = 1,
.be_id = MSM_BACKEND_DAI_AFE_PCM_TX,
.be_hw_params_fixup = msm_proxy_tx_be_hw_params_fixup,
.ignore_suspend = 1,
},
/* Incall Record Uplink BACK END DAI Link */
{
.name = LPASS_BE_INCALL_RECORD_TX,
.stream_name = "Voice Uplink Capture",
.cpu_dai_name = "msm-dai-q6-dev.32772",
.platform_name = "msm-pcm-routing",
.codec_name = "msm-stub-codec.1",
.codec_dai_name = "msm-stub-tx",
.no_pcm = 1,
.be_id = MSM_BACKEND_DAI_INCALL_RECORD_TX,
.be_hw_params_fixup = msm_be_hw_params_fixup,
.ignore_suspend = 1,
},
/* Incall Record Downlink BACK END DAI Link */
{
.name = LPASS_BE_INCALL_RECORD_RX,
.stream_name = "Voice Downlink Capture",
.cpu_dai_name = "msm-dai-q6-dev.32771",
.platform_name = "msm-pcm-routing",
.codec_name = "msm-stub-codec.1",
.codec_dai_name = "msm-stub-tx",
.no_pcm = 1,
.be_id = MSM_BACKEND_DAI_INCALL_RECORD_RX,
.be_hw_params_fixup = msm_be_hw_params_fixup,
.ignore_suspend = 1,
},
/* Incall Music BACK END DAI Link */
{
.name = LPASS_BE_VOICE_PLAYBACK_TX,
.stream_name = "Voice Farend Playback",
.cpu_dai_name = "msm-dai-q6-dev.32773",
.platform_name = "msm-pcm-routing",
.codec_name = "msm-stub-codec.1",
.codec_dai_name = "msm-stub-rx",
.no_pcm = 1,
.be_id = MSM_BACKEND_DAI_VOICE_PLAYBACK_TX,
.be_hw_params_fixup = msm_be_hw_params_fixup,
.ignore_suspend = 1,
},
/* Incall Music 2 BACK END DAI Link */
{
.name = LPASS_BE_VOICE2_PLAYBACK_TX,
.stream_name = "Voice2 Farend Playback",
.cpu_dai_name = "msm-dai-q6-dev.32770",
.platform_name = "msm-pcm-routing",
.codec_name = "msm-stub-codec.1",
.codec_dai_name = "msm-stub-rx",
.no_pcm = 1,
.be_id = MSM_BACKEND_DAI_VOICE2_PLAYBACK_TX,
.be_hw_params_fixup = msm_be_hw_params_fixup,
.ignore_suspend = 1,
},
};
struct snd_soc_card snd_soc_card_msm8x10 = {
.name = "msm8x10-snd-card",
.dai_link = msm8x10_dai,
.num_links = ARRAY_SIZE(msm8x10_dai),
};
static __devinit int msm8x10_asoc_machine_probe(struct platform_device *pdev)
{
struct snd_soc_card *card = &snd_soc_card_msm8x10;
const char *mbhc_audio_jack_type = NULL;
size_t n = strlen("4-pole-jack");
int ret;
dev_dbg(&pdev->dev, "%s\n", __func__);
if (!pdev->dev.of_node) {
dev_err(&pdev->dev, "No platform supplied from device tree\n");
return -EINVAL;
}
card->dev = &pdev->dev;
platform_set_drvdata(pdev, card);
ret = snd_soc_of_parse_card_name(card, "qcom,model");
if (ret)
goto err;
ret = snd_soc_of_parse_audio_routing(card,
"qcom,audio-routing");
if (ret)
goto err;
mutex_init(&cdc_mclk_mutex);
pcbcr = ioremap(MSM8X10_DINO_LPASS_DIGCODEC_CBCR, 4);
if (!pcbcr) {
ret = -ENOMEM;
goto err1;
}
prcgr = ioremap(MSM8X10_DINO_LPASS_DIGCODEC_CMD_RCGR, 4);
if (!prcgr) {
ret = -ENOMEM;
goto err1;
}
atomic_set(&mclk_rsc_ref, 0);
mbhc_cfg.gpio_level_insert = of_property_read_bool(pdev->dev.of_node,
"qcom,headset-jack-type-NC");
mbhc_cfg.use_int_rbias = of_property_read_bool(pdev->dev.of_node,
"qcom,mbhc-bias-internal");
ret = of_property_read_string(pdev->dev.of_node,
"qcom,mbhc-audio-jack-type", &mbhc_audio_jack_type);
if (ret) {
dev_dbg(&pdev->dev, "Looking up %s property in node %s failed",
"qcom,mbhc-audio-jack-type",
pdev->dev.of_node->full_name);
mbhc_cfg.hw_jack_type = FOUR_POLE_JACK;
mbhc_cfg.enable_anc_mic_detect = false;
dev_dbg(&pdev->dev, "Jack type properties set to default");
} else {
if (!strncmp(mbhc_audio_jack_type, "4-pole-jack", n)) {
mbhc_cfg.hw_jack_type = FOUR_POLE_JACK;
mbhc_cfg.enable_anc_mic_detect = false;
dev_dbg(&pdev->dev, "This hardware has 4 pole jack");
} else if (!strncmp(mbhc_audio_jack_type, "5-pole-jack", n)) {
mbhc_cfg.hw_jack_type = FIVE_POLE_JACK;
mbhc_cfg.enable_anc_mic_detect = true;
dev_dbg(&pdev->dev, "This hardware has 5 pole jack");
} else if (!strncmp(mbhc_audio_jack_type, "6-pole-jack", n)) {
mbhc_cfg.hw_jack_type = SIX_POLE_JACK;
mbhc_cfg.enable_anc_mic_detect = true;
dev_dbg(&pdev->dev, "This hardware has 6 pole jack");
} else {
mbhc_cfg.hw_jack_type = FOUR_POLE_JACK;
mbhc_cfg.enable_anc_mic_detect = false;
dev_dbg(&pdev->dev, "Unknown value, hence setting to default");
}
}
spdev = pdev;
ret = snd_soc_register_card(card);
if (ret == -EPROBE_DEFER)
goto err1;
else if (ret) {
dev_err(&pdev->dev, "snd_soc_register_card failed (%d)\n",
ret);
goto err1;
}
return 0;
err1:
mutex_destroy(&cdc_mclk_mutex);
if (pcbcr)
iounmap(pcbcr);
if (prcgr)
iounmap(prcgr);
err:
return ret;
}
static int __devexit msm8x10_asoc_machine_remove(struct platform_device *pdev)
{
struct snd_soc_card *card = platform_get_drvdata(pdev);
if (gpio_is_valid(ext_spk_amp_gpio))
gpio_free(ext_spk_amp_gpio);
snd_soc_unregister_card(card);
mutex_destroy(&cdc_mclk_mutex);
iounmap(pcbcr);
iounmap(prcgr);
return 0;
}
static const struct of_device_id msm8x10_asoc_machine_of_match[] = {
{ .compatible = "qcom,msm8x10-audio-codec", },
{},
};
static struct platform_driver msm8x10_asoc_machine_driver = {
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
.pm = &snd_soc_pm_ops,
.of_match_table = msm8x10_asoc_machine_of_match,
},
.probe = msm8x10_asoc_machine_probe,
.remove = __devexit_p(msm8x10_asoc_machine_remove),
};
module_platform_driver(msm8x10_asoc_machine_driver);
MODULE_DESCRIPTION("ALSA SoC msm");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:" DRV_NAME);
MODULE_DEVICE_TABLE(of, msm8x10_asoc_machine_of_match);
| gpl-2.0 |
binkybear/furnace_kernel_lge_hammerhead | drivers/media/platform/msm/camera_v2/sensor/imx135.c | 273 | 4082 | /* Copyright (c) 2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include "msm_sensor.h"
#define IMX135_SENSOR_NAME "imx135"
DEFINE_MSM_MUTEX(imx135_mut);
static struct msm_sensor_ctrl_t imx135_s_ctrl;
static struct msm_sensor_power_setting imx135_power_setting[] = {
{
.seq_type = SENSOR_VREG,
.seq_val = CAM_VDIG,
.config_val = 0,
.delay = 0,
},
{
.seq_type = SENSOR_VREG,
.seq_val = CAM_VANA,
.config_val = 0,
.delay = 0,
},
{
.seq_type = SENSOR_VREG,
.seq_val = CAM_VIO,
.config_val = 0,
.delay = 0,
},
{
.seq_type = SENSOR_VREG,
.seq_val = CAM_VAF,
.config_val = 0,
.delay = 0,
},
{
.seq_type = SENSOR_GPIO,
.seq_val = SENSOR_GPIO_RESET,
.config_val = GPIO_OUT_LOW,
.delay = 1,
},
{
.seq_type = SENSOR_GPIO,
.seq_val = SENSOR_GPIO_RESET,
.config_val = GPIO_OUT_HIGH,
.delay = 30,
},
{
.seq_type = SENSOR_GPIO,
.seq_val = SENSOR_GPIO_STANDBY,
.config_val = GPIO_OUT_LOW,
.delay = 1,
},
{
.seq_type = SENSOR_GPIO,
.seq_val = SENSOR_GPIO_STANDBY,
.config_val = GPIO_OUT_HIGH,
.delay = 30,
},
{
.seq_type = SENSOR_CLK,
.seq_val = SENSOR_CAM_MCLK,
.config_val = 0,
.delay = 1,
},
{
.seq_type = SENSOR_I2C_MUX,
.seq_val = 0,
.config_val = 0,
.delay = 0,
},
};
static struct v4l2_subdev_info imx135_subdev_info[] = {
{
.code = V4L2_MBUS_FMT_SBGGR10_1X10,
.colorspace = V4L2_COLORSPACE_JPEG,
.fmt = 1,
.order = 0,
},
};
static const struct i2c_device_id imx135_i2c_id[] = {
{IMX135_SENSOR_NAME, (kernel_ulong_t)&imx135_s_ctrl},
{ }
};
static int32_t msm_imx135_i2c_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
return msm_sensor_i2c_probe(client, id, &imx135_s_ctrl);
}
static struct i2c_driver imx135_i2c_driver = {
.id_table = imx135_i2c_id,
.probe = msm_imx135_i2c_probe,
.driver = {
.name = IMX135_SENSOR_NAME,
},
};
static struct msm_camera_i2c_client imx135_sensor_i2c_client = {
.addr_type = MSM_CAMERA_I2C_WORD_ADDR,
};
static const struct of_device_id imx135_dt_match[] = {
{.compatible = "qcom,imx135", .data = &imx135_s_ctrl},
{}
};
MODULE_DEVICE_TABLE(of, imx135_dt_match);
static struct platform_driver imx135_platform_driver = {
.driver = {
.name = "qcom,imx135",
.owner = THIS_MODULE,
.of_match_table = imx135_dt_match,
},
};
static int32_t imx135_platform_probe(struct platform_device *pdev)
{
int32_t rc = 0;
const struct of_device_id *match;
match = of_match_device(imx135_dt_match, &pdev->dev);
rc = msm_sensor_platform_probe(pdev, match->data);
return rc;
}
static int __init imx135_init_module(void)
{
int32_t rc = 0;
pr_info("%s:%d\n", __func__, __LINE__);
rc = platform_driver_probe(&imx135_platform_driver,
imx135_platform_probe);
if (!rc)
return rc;
pr_err("%s:%d rc %d\n", __func__, __LINE__, rc);
return i2c_add_driver(&imx135_i2c_driver);
}
static void __exit imx135_exit_module(void)
{
pr_info("%s:%d\n", __func__, __LINE__);
if (imx135_s_ctrl.pdev) {
msm_sensor_free_sensor_data(&imx135_s_ctrl);
platform_driver_unregister(&imx135_platform_driver);
} else
i2c_del_driver(&imx135_i2c_driver);
return;
}
static struct msm_sensor_ctrl_t imx135_s_ctrl = {
.sensor_i2c_client = &imx135_sensor_i2c_client,
.power_setting_array.power_setting = imx135_power_setting,
.power_setting_array.size = ARRAY_SIZE(imx135_power_setting),
.msm_sensor_mutex = &imx135_mut,
.sensor_v4l2_subdev_info = imx135_subdev_info,
.sensor_v4l2_subdev_info_size = ARRAY_SIZE(imx135_subdev_info),
};
module_init(imx135_init_module);
module_exit(imx135_exit_module);
MODULE_DESCRIPTION("imx135");
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
paxchristos/Semc-ICS-kernel | drivers/isdn/hisax/bkm_a8.c | 785 | 11765 | /* $Id: bkm_a8.c,v 1.22.2.4 2004/01/15 14:02:34 keil Exp $
*
* low level stuff for Scitel Quadro (4*S0, passive)
*
* Author Roland Klabunde
* Copyright by Roland Klabunde <R.Klabunde@Berkom.de>
*
* This software may be used and distributed according to the terms
* of the GNU General Public License, incorporated herein by reference.
*
*/
#include <linux/init.h>
#include "hisax.h"
#include "isac.h"
#include "ipac.h"
#include "hscx.h"
#include "isdnl1.h"
#include <linux/pci.h>
#include "bkm_ax.h"
#define ATTEMPT_PCI_REMAPPING /* Required for PLX rev 1 */
static const char sct_quadro_revision[] = "$Revision: 1.22.2.4 $";
static const char *sct_quadro_subtypes[] =
{
"",
"#1",
"#2",
"#3",
"#4"
};
#define wordout(addr,val) outw(val,addr)
#define wordin(addr) inw(addr)
static inline u_char
readreg(unsigned int ale, unsigned int adr, u_char off)
{
register u_char ret;
wordout(ale, off);
ret = wordin(adr) & 0xFF;
return (ret);
}
static inline void
readfifo(unsigned int ale, unsigned int adr, u_char off, u_char * data, int size)
{
int i;
wordout(ale, off);
for (i = 0; i < size; i++)
data[i] = wordin(adr) & 0xFF;
}
static inline void
writereg(unsigned int ale, unsigned int adr, u_char off, u_char data)
{
wordout(ale, off);
wordout(adr, data);
}
static inline void
writefifo(unsigned int ale, unsigned int adr, u_char off, u_char * data, int size)
{
int i;
wordout(ale, off);
for (i = 0; i < size; i++)
wordout(adr, data[i]);
}
/* Interface functions */
static u_char
ReadISAC(struct IsdnCardState *cs, u_char offset)
{
return (readreg(cs->hw.ax.base, cs->hw.ax.data_adr, offset | 0x80));
}
static void
WriteISAC(struct IsdnCardState *cs, u_char offset, u_char value)
{
writereg(cs->hw.ax.base, cs->hw.ax.data_adr, offset | 0x80, value);
}
static void
ReadISACfifo(struct IsdnCardState *cs, u_char * data, int size)
{
readfifo(cs->hw.ax.base, cs->hw.ax.data_adr, 0x80, data, size);
}
static void
WriteISACfifo(struct IsdnCardState *cs, u_char * data, int size)
{
writefifo(cs->hw.ax.base, cs->hw.ax.data_adr, 0x80, data, size);
}
static u_char
ReadHSCX(struct IsdnCardState *cs, int hscx, u_char offset)
{
return (readreg(cs->hw.ax.base, cs->hw.ax.data_adr, offset + (hscx ? 0x40 : 0)));
}
static void
WriteHSCX(struct IsdnCardState *cs, int hscx, u_char offset, u_char value)
{
writereg(cs->hw.ax.base, cs->hw.ax.data_adr, offset + (hscx ? 0x40 : 0), value);
}
/* Set the specific ipac to active */
static void
set_ipac_active(struct IsdnCardState *cs, u_int active)
{
/* set irq mask */
writereg(cs->hw.ax.base, cs->hw.ax.data_adr, IPAC_MASK,
active ? 0xc0 : 0xff);
}
/*
* fast interrupt HSCX stuff goes here
*/
#define READHSCX(cs, nr, reg) readreg(cs->hw.ax.base, \
cs->hw.ax.data_adr, reg + (nr ? 0x40 : 0))
#define WRITEHSCX(cs, nr, reg, data) writereg(cs->hw.ax.base, \
cs->hw.ax.data_adr, reg + (nr ? 0x40 : 0), data)
#define READHSCXFIFO(cs, nr, ptr, cnt) readfifo(cs->hw.ax.base, \
cs->hw.ax.data_adr, (nr ? 0x40 : 0), ptr, cnt)
#define WRITEHSCXFIFO(cs, nr, ptr, cnt) writefifo(cs->hw.ax.base, \
cs->hw.ax.data_adr, (nr ? 0x40 : 0), ptr, cnt)
#include "hscx_irq.c"
static irqreturn_t
bkm_interrupt_ipac(int intno, void *dev_id)
{
struct IsdnCardState *cs = dev_id;
u_char ista, val, icnt = 5;
u_long flags;
spin_lock_irqsave(&cs->lock, flags);
ista = readreg(cs->hw.ax.base, cs->hw.ax.data_adr, IPAC_ISTA);
if (!(ista & 0x3f)) { /* not this IPAC */
spin_unlock_irqrestore(&cs->lock, flags);
return IRQ_NONE;
}
Start_IPAC:
if (cs->debug & L1_DEB_IPAC)
debugl1(cs, "IPAC ISTA %02X", ista);
if (ista & 0x0f) {
val = readreg(cs->hw.ax.base, cs->hw.ax.data_adr, HSCX_ISTA + 0x40);
if (ista & 0x01)
val |= 0x01;
if (ista & 0x04)
val |= 0x02;
if (ista & 0x08)
val |= 0x04;
if (val) {
hscx_int_main(cs, val);
}
}
if (ista & 0x20) {
val = 0xfe & readreg(cs->hw.ax.base, cs->hw.ax.data_adr, ISAC_ISTA | 0x80);
if (val) {
isac_interrupt(cs, val);
}
}
if (ista & 0x10) {
val = 0x01;
isac_interrupt(cs, val);
}
ista = readreg(cs->hw.ax.base, cs->hw.ax.data_adr, IPAC_ISTA);
if ((ista & 0x3f) && icnt) {
icnt--;
goto Start_IPAC;
}
if (!icnt)
printk(KERN_WARNING "HiSax: Scitel Quadro (%s) IRQ LOOP\n",
sct_quadro_subtypes[cs->subtyp]);
writereg(cs->hw.ax.base, cs->hw.ax.data_adr, IPAC_MASK, 0xFF);
writereg(cs->hw.ax.base, cs->hw.ax.data_adr, IPAC_MASK, 0xC0);
spin_unlock_irqrestore(&cs->lock, flags);
return IRQ_HANDLED;
}
static void
release_io_sct_quadro(struct IsdnCardState *cs)
{
release_region(cs->hw.ax.base & 0xffffffc0, 128);
if (cs->subtyp == SCT_1)
release_region(cs->hw.ax.plx_adr, 64);
}
static void
enable_bkm_int(struct IsdnCardState *cs, unsigned bEnable)
{
if (cs->typ == ISDN_CTYPE_SCT_QUADRO) {
if (bEnable)
wordout(cs->hw.ax.plx_adr + 0x4C, (wordin(cs->hw.ax.plx_adr + 0x4C) | 0x41));
else
wordout(cs->hw.ax.plx_adr + 0x4C, (wordin(cs->hw.ax.plx_adr + 0x4C) & ~0x41));
}
}
static void
reset_bkm(struct IsdnCardState *cs)
{
if (cs->subtyp == SCT_1) {
wordout(cs->hw.ax.plx_adr + 0x50, (wordin(cs->hw.ax.plx_adr + 0x50) & ~4));
mdelay(10);
/* Remove the soft reset */
wordout(cs->hw.ax.plx_adr + 0x50, (wordin(cs->hw.ax.plx_adr + 0x50) | 4));
mdelay(10);
}
}
static int
BKM_card_msg(struct IsdnCardState *cs, int mt, void *arg)
{
u_long flags;
switch (mt) {
case CARD_RESET:
spin_lock_irqsave(&cs->lock, flags);
/* Disable ints */
set_ipac_active(cs, 0);
enable_bkm_int(cs, 0);
reset_bkm(cs);
spin_unlock_irqrestore(&cs->lock, flags);
return (0);
case CARD_RELEASE:
/* Sanity */
spin_lock_irqsave(&cs->lock, flags);
set_ipac_active(cs, 0);
enable_bkm_int(cs, 0);
spin_unlock_irqrestore(&cs->lock, flags);
release_io_sct_quadro(cs);
return (0);
case CARD_INIT:
spin_lock_irqsave(&cs->lock, flags);
cs->debug |= L1_DEB_IPAC;
set_ipac_active(cs, 1);
inithscxisac(cs, 3);
/* Enable ints */
enable_bkm_int(cs, 1);
spin_unlock_irqrestore(&cs->lock, flags);
return (0);
case CARD_TEST:
return (0);
}
return (0);
}
static int __devinit
sct_alloc_io(u_int adr, u_int len)
{
if (!request_region(adr, len, "scitel")) {
printk(KERN_WARNING
"HiSax: Scitel port %#x-%#x already in use\n",
adr, adr + len);
return (1);
}
return(0);
}
static struct pci_dev *dev_a8 __devinitdata = NULL;
static u16 sub_vendor_id __devinitdata = 0;
static u16 sub_sys_id __devinitdata = 0;
static u_char pci_bus __devinitdata = 0;
static u_char pci_device_fn __devinitdata = 0;
static u_char pci_irq __devinitdata = 0;
int __devinit
setup_sct_quadro(struct IsdnCard *card)
{
struct IsdnCardState *cs = card->cs;
char tmp[64];
u_int found = 0;
u_int pci_ioaddr1, pci_ioaddr2, pci_ioaddr3, pci_ioaddr4, pci_ioaddr5;
strcpy(tmp, sct_quadro_revision);
printk(KERN_INFO "HiSax: T-Berkom driver Rev. %s\n", HiSax_getrev(tmp));
if (cs->typ == ISDN_CTYPE_SCT_QUADRO) {
cs->subtyp = SCT_1; /* Preset */
} else
return (0);
/* Identify subtype by para[0] */
if (card->para[0] >= SCT_1 && card->para[0] <= SCT_4)
cs->subtyp = card->para[0];
else {
printk(KERN_WARNING "HiSax: Scitel Quadro: Invalid "
"subcontroller in configuration, default to 1\n");
return (0);
}
if ((cs->subtyp != SCT_1) && ((sub_sys_id != PCI_DEVICE_ID_BERKOM_SCITEL_QUADRO) ||
(sub_vendor_id != PCI_VENDOR_ID_BERKOM)))
return (0);
if (cs->subtyp == SCT_1) {
while ((dev_a8 = pci_find_device(PCI_VENDOR_ID_PLX,
PCI_DEVICE_ID_PLX_9050, dev_a8))) {
sub_vendor_id = dev_a8->subsystem_vendor;
sub_sys_id = dev_a8->subsystem_device;
if ((sub_sys_id == PCI_DEVICE_ID_BERKOM_SCITEL_QUADRO) &&
(sub_vendor_id == PCI_VENDOR_ID_BERKOM)) {
if (pci_enable_device(dev_a8))
return(0);
pci_ioaddr1 = pci_resource_start(dev_a8, 1);
pci_irq = dev_a8->irq;
pci_bus = dev_a8->bus->number;
pci_device_fn = dev_a8->devfn;
found = 1;
break;
}
}
if (!found) {
printk(KERN_WARNING "HiSax: Scitel Quadro (%s): "
"Card not found\n",
sct_quadro_subtypes[cs->subtyp]);
return (0);
}
#ifdef ATTEMPT_PCI_REMAPPING
/* HACK: PLX revision 1 bug: PLX address bit 7 must not be set */
if ((pci_ioaddr1 & 0x80) && (dev_a8->revision == 1)) {
printk(KERN_WARNING "HiSax: Scitel Quadro (%s): "
"PLX rev 1, remapping required!\n",
sct_quadro_subtypes[cs->subtyp]);
/* Restart PCI negotiation */
pci_write_config_dword(dev_a8, PCI_BASE_ADDRESS_1, (u_int) - 1);
/* Move up by 0x80 byte */
pci_ioaddr1 += 0x80;
pci_ioaddr1 &= PCI_BASE_ADDRESS_IO_MASK;
pci_write_config_dword(dev_a8, PCI_BASE_ADDRESS_1, pci_ioaddr1);
dev_a8->resource[ 1].start = pci_ioaddr1;
}
#endif /* End HACK */
}
if (!pci_irq) { /* IRQ range check ?? */
printk(KERN_WARNING "HiSax: Scitel Quadro (%s): No IRQ\n",
sct_quadro_subtypes[cs->subtyp]);
return (0);
}
pci_read_config_dword(dev_a8, PCI_BASE_ADDRESS_1, &pci_ioaddr1);
pci_read_config_dword(dev_a8, PCI_BASE_ADDRESS_2, &pci_ioaddr2);
pci_read_config_dword(dev_a8, PCI_BASE_ADDRESS_3, &pci_ioaddr3);
pci_read_config_dword(dev_a8, PCI_BASE_ADDRESS_4, &pci_ioaddr4);
pci_read_config_dword(dev_a8, PCI_BASE_ADDRESS_5, &pci_ioaddr5);
if (!pci_ioaddr1 || !pci_ioaddr2 || !pci_ioaddr3 || !pci_ioaddr4 || !pci_ioaddr5) {
printk(KERN_WARNING "HiSax: Scitel Quadro (%s): "
"No IO base address(es)\n",
sct_quadro_subtypes[cs->subtyp]);
return (0);
}
pci_ioaddr1 &= PCI_BASE_ADDRESS_IO_MASK;
pci_ioaddr2 &= PCI_BASE_ADDRESS_IO_MASK;
pci_ioaddr3 &= PCI_BASE_ADDRESS_IO_MASK;
pci_ioaddr4 &= PCI_BASE_ADDRESS_IO_MASK;
pci_ioaddr5 &= PCI_BASE_ADDRESS_IO_MASK;
/* Take over */
cs->irq = pci_irq;
cs->irq_flags |= IRQF_SHARED;
/* pci_ioaddr1 is unique to all subdevices */
/* pci_ioaddr2 is for the fourth subdevice only */
/* pci_ioaddr3 is for the third subdevice only */
/* pci_ioaddr4 is for the second subdevice only */
/* pci_ioaddr5 is for the first subdevice only */
cs->hw.ax.plx_adr = pci_ioaddr1;
/* Enter all ipac_base addresses */
switch(cs->subtyp) {
case 1:
cs->hw.ax.base = pci_ioaddr5 + 0x00;
if (sct_alloc_io(pci_ioaddr1, 128))
return(0);
if (sct_alloc_io(pci_ioaddr5, 64))
return(0);
/* disable all IPAC */
writereg(pci_ioaddr5, pci_ioaddr5 + 4,
IPAC_MASK, 0xFF);
writereg(pci_ioaddr4 + 0x08, pci_ioaddr4 + 0x0c,
IPAC_MASK, 0xFF);
writereg(pci_ioaddr3 + 0x10, pci_ioaddr3 + 0x14,
IPAC_MASK, 0xFF);
writereg(pci_ioaddr2 + 0x20, pci_ioaddr2 + 0x24,
IPAC_MASK, 0xFF);
break;
case 2:
cs->hw.ax.base = pci_ioaddr4 + 0x08;
if (sct_alloc_io(pci_ioaddr4, 64))
return(0);
break;
case 3:
cs->hw.ax.base = pci_ioaddr3 + 0x10;
if (sct_alloc_io(pci_ioaddr3, 64))
return(0);
break;
case 4:
cs->hw.ax.base = pci_ioaddr2 + 0x20;
if (sct_alloc_io(pci_ioaddr2, 64))
return(0);
break;
}
/* For isac and hscx data path */
cs->hw.ax.data_adr = cs->hw.ax.base + 4;
printk(KERN_INFO "HiSax: Scitel Quadro (%s) configured at "
"0x%.4lX, 0x%.4lX, 0x%.4lX and IRQ %d\n",
sct_quadro_subtypes[cs->subtyp],
cs->hw.ax.plx_adr,
cs->hw.ax.base,
cs->hw.ax.data_adr,
cs->irq);
test_and_set_bit(HW_IPAC, &cs->HW_Flags);
cs->readisac = &ReadISAC;
cs->writeisac = &WriteISAC;
cs->readisacfifo = &ReadISACfifo;
cs->writeisacfifo = &WriteISACfifo;
cs->BC_Read_Reg = &ReadHSCX;
cs->BC_Write_Reg = &WriteHSCX;
cs->BC_Send_Data = &hscx_fill_fifo;
cs->cardmsg = &BKM_card_msg;
cs->irq_func = &bkm_interrupt_ipac;
printk(KERN_INFO "HiSax: Scitel Quadro (%s): IPAC Version %d\n",
sct_quadro_subtypes[cs->subtyp],
readreg(cs->hw.ax.base, cs->hw.ax.data_adr, IPAC_ID));
return (1);
}
| gpl-2.0 |
ddilber/telegrauq7-linux | fs/ext2/inode.c | 1297 | 45888 | /*
* linux/fs/ext2/inode.c
*
* Copyright (C) 1992, 1993, 1994, 1995
* Remy Card (card@masi.ibp.fr)
* Laboratoire MASI - Institut Blaise Pascal
* Universite Pierre et Marie Curie (Paris VI)
*
* from
*
* linux/fs/minix/inode.c
*
* Copyright (C) 1991, 1992 Linus Torvalds
*
* Goal-directed block allocation by Stephen Tweedie
* (sct@dcs.ed.ac.uk), 1993, 1998
* Big-endian to little-endian byte-swapping/bitmaps by
* David S. Miller (davem@caip.rutgers.edu), 1995
* 64-bit file support on 64-bit platforms by Jakub Jelinek
* (jj@sunsite.ms.mff.cuni.cz)
*
* Assorted race fixes, rewrite of ext2_get_block() by Al Viro, 2000
*/
#include <linux/time.h>
#include <linux/highuid.h>
#include <linux/pagemap.h>
#include <linux/quotaops.h>
#include <linux/writeback.h>
#include <linux/buffer_head.h>
#include <linux/mpage.h>
#include <linux/fiemap.h>
#include <linux/namei.h>
#include <linux/aio.h>
#include "ext2.h"
#include "acl.h"
#include "xip.h"
#include "xattr.h"
static int __ext2_write_inode(struct inode *inode, int do_sync);
/*
* Test whether an inode is a fast symlink.
*/
static inline int ext2_inode_is_fast_symlink(struct inode *inode)
{
int ea_blocks = EXT2_I(inode)->i_file_acl ?
(inode->i_sb->s_blocksize >> 9) : 0;
return (S_ISLNK(inode->i_mode) &&
inode->i_blocks - ea_blocks == 0);
}
static void ext2_truncate_blocks(struct inode *inode, loff_t offset);
static void ext2_write_failed(struct address_space *mapping, loff_t to)
{
struct inode *inode = mapping->host;
if (to > inode->i_size) {
truncate_pagecache(inode, to, inode->i_size);
ext2_truncate_blocks(inode, inode->i_size);
}
}
/*
* Called at the last iput() if i_nlink is zero.
*/
void ext2_evict_inode(struct inode * inode)
{
struct ext2_block_alloc_info *rsv;
int want_delete = 0;
if (!inode->i_nlink && !is_bad_inode(inode)) {
want_delete = 1;
dquot_initialize(inode);
} else {
dquot_drop(inode);
}
truncate_inode_pages(&inode->i_data, 0);
if (want_delete) {
sb_start_intwrite(inode->i_sb);
/* set dtime */
EXT2_I(inode)->i_dtime = get_seconds();
mark_inode_dirty(inode);
__ext2_write_inode(inode, inode_needs_sync(inode));
/* truncate to 0 */
inode->i_size = 0;
if (inode->i_blocks)
ext2_truncate_blocks(inode, 0);
ext2_xattr_delete_inode(inode);
}
invalidate_inode_buffers(inode);
clear_inode(inode);
ext2_discard_reservation(inode);
rsv = EXT2_I(inode)->i_block_alloc_info;
EXT2_I(inode)->i_block_alloc_info = NULL;
if (unlikely(rsv))
kfree(rsv);
if (want_delete) {
ext2_free_inode(inode);
sb_end_intwrite(inode->i_sb);
}
}
typedef struct {
__le32 *p;
__le32 key;
struct buffer_head *bh;
} Indirect;
static inline void add_chain(Indirect *p, struct buffer_head *bh, __le32 *v)
{
p->key = *(p->p = v);
p->bh = bh;
}
static inline int verify_chain(Indirect *from, Indirect *to)
{
while (from <= to && from->key == *from->p)
from++;
return (from > to);
}
/**
* ext2_block_to_path - parse the block number into array of offsets
* @inode: inode in question (we are only interested in its superblock)
* @i_block: block number to be parsed
* @offsets: array to store the offsets in
* @boundary: set this non-zero if the referred-to block is likely to be
* followed (on disk) by an indirect block.
* To store the locations of file's data ext2 uses a data structure common
* for UNIX filesystems - tree of pointers anchored in the inode, with
* data blocks at leaves and indirect blocks in intermediate nodes.
* This function translates the block number into path in that tree -
* return value is the path length and @offsets[n] is the offset of
* pointer to (n+1)th node in the nth one. If @block is out of range
* (negative or too large) warning is printed and zero returned.
*
* Note: function doesn't find node addresses, so no IO is needed. All
* we need to know is the capacity of indirect blocks (taken from the
* inode->i_sb).
*/
/*
* Portability note: the last comparison (check that we fit into triple
* indirect block) is spelled differently, because otherwise on an
* architecture with 32-bit longs and 8Kb pages we might get into trouble
* if our filesystem had 8Kb blocks. We might use long long, but that would
* kill us on x86. Oh, well, at least the sign propagation does not matter -
* i_block would have to be negative in the very beginning, so we would not
* get there at all.
*/
static int ext2_block_to_path(struct inode *inode,
long i_block, int offsets[4], int *boundary)
{
int ptrs = EXT2_ADDR_PER_BLOCK(inode->i_sb);
int ptrs_bits = EXT2_ADDR_PER_BLOCK_BITS(inode->i_sb);
const long direct_blocks = EXT2_NDIR_BLOCKS,
indirect_blocks = ptrs,
double_blocks = (1 << (ptrs_bits * 2));
int n = 0;
int final = 0;
if (i_block < 0) {
ext2_msg(inode->i_sb, KERN_WARNING,
"warning: %s: block < 0", __func__);
} else if (i_block < direct_blocks) {
offsets[n++] = i_block;
final = direct_blocks;
} else if ( (i_block -= direct_blocks) < indirect_blocks) {
offsets[n++] = EXT2_IND_BLOCK;
offsets[n++] = i_block;
final = ptrs;
} else if ((i_block -= indirect_blocks) < double_blocks) {
offsets[n++] = EXT2_DIND_BLOCK;
offsets[n++] = i_block >> ptrs_bits;
offsets[n++] = i_block & (ptrs - 1);
final = ptrs;
} else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) {
offsets[n++] = EXT2_TIND_BLOCK;
offsets[n++] = i_block >> (ptrs_bits * 2);
offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1);
offsets[n++] = i_block & (ptrs - 1);
final = ptrs;
} else {
ext2_msg(inode->i_sb, KERN_WARNING,
"warning: %s: block is too big", __func__);
}
if (boundary)
*boundary = final - 1 - (i_block & (ptrs - 1));
return n;
}
/**
* ext2_get_branch - read the chain of indirect blocks leading to data
* @inode: inode in question
* @depth: depth of the chain (1 - direct pointer, etc.)
* @offsets: offsets of pointers in inode/indirect blocks
* @chain: place to store the result
* @err: here we store the error value
*
* Function fills the array of triples <key, p, bh> and returns %NULL
* if everything went OK or the pointer to the last filled triple
* (incomplete one) otherwise. Upon the return chain[i].key contains
* the number of (i+1)-th block in the chain (as it is stored in memory,
* i.e. little-endian 32-bit), chain[i].p contains the address of that
* number (it points into struct inode for i==0 and into the bh->b_data
* for i>0) and chain[i].bh points to the buffer_head of i-th indirect
* block for i>0 and NULL for i==0. In other words, it holds the block
* numbers of the chain, addresses they were taken from (and where we can
* verify that chain did not change) and buffer_heads hosting these
* numbers.
*
* Function stops when it stumbles upon zero pointer (absent block)
* (pointer to last triple returned, *@err == 0)
* or when it gets an IO error reading an indirect block
* (ditto, *@err == -EIO)
* or when it notices that chain had been changed while it was reading
* (ditto, *@err == -EAGAIN)
* or when it reads all @depth-1 indirect blocks successfully and finds
* the whole chain, all way to the data (returns %NULL, *err == 0).
*/
static Indirect *ext2_get_branch(struct inode *inode,
int depth,
int *offsets,
Indirect chain[4],
int *err)
{
struct super_block *sb = inode->i_sb;
Indirect *p = chain;
struct buffer_head *bh;
*err = 0;
/* i_data is not going away, no lock needed */
add_chain (chain, NULL, EXT2_I(inode)->i_data + *offsets);
if (!p->key)
goto no_block;
while (--depth) {
bh = sb_bread(sb, le32_to_cpu(p->key));
if (!bh)
goto failure;
read_lock(&EXT2_I(inode)->i_meta_lock);
if (!verify_chain(chain, p))
goto changed;
add_chain(++p, bh, (__le32*)bh->b_data + *++offsets);
read_unlock(&EXT2_I(inode)->i_meta_lock);
if (!p->key)
goto no_block;
}
return NULL;
changed:
read_unlock(&EXT2_I(inode)->i_meta_lock);
brelse(bh);
*err = -EAGAIN;
goto no_block;
failure:
*err = -EIO;
no_block:
return p;
}
/**
* ext2_find_near - find a place for allocation with sufficient locality
* @inode: owner
* @ind: descriptor of indirect block.
*
* This function returns the preferred place for block allocation.
* It is used when heuristic for sequential allocation fails.
* Rules are:
* + if there is a block to the left of our position - allocate near it.
* + if pointer will live in indirect block - allocate near that block.
* + if pointer will live in inode - allocate in the same cylinder group.
*
* In the latter case we colour the starting block by the callers PID to
* prevent it from clashing with concurrent allocations for a different inode
* in the same block group. The PID is used here so that functionally related
* files will be close-by on-disk.
*
* Caller must make sure that @ind is valid and will stay that way.
*/
static ext2_fsblk_t ext2_find_near(struct inode *inode, Indirect *ind)
{
struct ext2_inode_info *ei = EXT2_I(inode);
__le32 *start = ind->bh ? (__le32 *) ind->bh->b_data : ei->i_data;
__le32 *p;
ext2_fsblk_t bg_start;
ext2_fsblk_t colour;
/* Try to find previous block */
for (p = ind->p - 1; p >= start; p--)
if (*p)
return le32_to_cpu(*p);
/* No such thing, so let's try location of indirect block */
if (ind->bh)
return ind->bh->b_blocknr;
/*
* It is going to be referred from inode itself? OK, just put it into
* the same cylinder group then.
*/
bg_start = ext2_group_first_block_no(inode->i_sb, ei->i_block_group);
colour = (current->pid % 16) *
(EXT2_BLOCKS_PER_GROUP(inode->i_sb) / 16);
return bg_start + colour;
}
/**
* ext2_find_goal - find a preferred place for allocation.
* @inode: owner
* @block: block we want
* @partial: pointer to the last triple within a chain
*
* Returns preferred place for a block (the goal).
*/
static inline ext2_fsblk_t ext2_find_goal(struct inode *inode, long block,
Indirect *partial)
{
struct ext2_block_alloc_info *block_i;
block_i = EXT2_I(inode)->i_block_alloc_info;
/*
* try the heuristic for sequential allocation,
* failing that at least try to get decent locality.
*/
if (block_i && (block == block_i->last_alloc_logical_block + 1)
&& (block_i->last_alloc_physical_block != 0)) {
return block_i->last_alloc_physical_block + 1;
}
return ext2_find_near(inode, partial);
}
/**
* ext2_blks_to_allocate: Look up the block map and count the number
* of direct blocks need to be allocated for the given branch.
*
* @branch: chain of indirect blocks
* @k: number of blocks need for indirect blocks
* @blks: number of data blocks to be mapped.
* @blocks_to_boundary: the offset in the indirect block
*
* return the total number of blocks to be allocate, including the
* direct and indirect blocks.
*/
static int
ext2_blks_to_allocate(Indirect * branch, int k, unsigned long blks,
int blocks_to_boundary)
{
unsigned long count = 0;
/*
* Simple case, [t,d]Indirect block(s) has not allocated yet
* then it's clear blocks on that path have not allocated
*/
if (k > 0) {
/* right now don't hanel cross boundary allocation */
if (blks < blocks_to_boundary + 1)
count += blks;
else
count += blocks_to_boundary + 1;
return count;
}
count++;
while (count < blks && count <= blocks_to_boundary
&& le32_to_cpu(*(branch[0].p + count)) == 0) {
count++;
}
return count;
}
/**
* ext2_alloc_blocks: multiple allocate blocks needed for a branch
* @indirect_blks: the number of blocks need to allocate for indirect
* blocks
*
* @new_blocks: on return it will store the new block numbers for
* the indirect blocks(if needed) and the first direct block,
* @blks: on return it will store the total number of allocated
* direct blocks
*/
static int ext2_alloc_blocks(struct inode *inode,
ext2_fsblk_t goal, int indirect_blks, int blks,
ext2_fsblk_t new_blocks[4], int *err)
{
int target, i;
unsigned long count = 0;
int index = 0;
ext2_fsblk_t current_block = 0;
int ret = 0;
/*
* Here we try to allocate the requested multiple blocks at once,
* on a best-effort basis.
* To build a branch, we should allocate blocks for
* the indirect blocks(if not allocated yet), and at least
* the first direct block of this branch. That's the
* minimum number of blocks need to allocate(required)
*/
target = blks + indirect_blks;
while (1) {
count = target;
/* allocating blocks for indirect blocks and direct blocks */
current_block = ext2_new_blocks(inode,goal,&count,err);
if (*err)
goto failed_out;
target -= count;
/* allocate blocks for indirect blocks */
while (index < indirect_blks && count) {
new_blocks[index++] = current_block++;
count--;
}
if (count > 0)
break;
}
/* save the new block number for the first direct block */
new_blocks[index] = current_block;
/* total number of blocks allocated for direct blocks */
ret = count;
*err = 0;
return ret;
failed_out:
for (i = 0; i <index; i++)
ext2_free_blocks(inode, new_blocks[i], 1);
if (index)
mark_inode_dirty(inode);
return ret;
}
/**
* ext2_alloc_branch - allocate and set up a chain of blocks.
* @inode: owner
* @num: depth of the chain (number of blocks to allocate)
* @offsets: offsets (in the blocks) to store the pointers to next.
* @branch: place to store the chain in.
*
* This function allocates @num blocks, zeroes out all but the last one,
* links them into chain and (if we are synchronous) writes them to disk.
* In other words, it prepares a branch that can be spliced onto the
* inode. It stores the information about that chain in the branch[], in
* the same format as ext2_get_branch() would do. We are calling it after
* we had read the existing part of chain and partial points to the last
* triple of that (one with zero ->key). Upon the exit we have the same
* picture as after the successful ext2_get_block(), except that in one
* place chain is disconnected - *branch->p is still zero (we did not
* set the last link), but branch->key contains the number that should
* be placed into *branch->p to fill that gap.
*
* If allocation fails we free all blocks we've allocated (and forget
* their buffer_heads) and return the error value the from failed
* ext2_alloc_block() (normally -ENOSPC). Otherwise we set the chain
* as described above and return 0.
*/
static int ext2_alloc_branch(struct inode *inode,
int indirect_blks, int *blks, ext2_fsblk_t goal,
int *offsets, Indirect *branch)
{
int blocksize = inode->i_sb->s_blocksize;
int i, n = 0;
int err = 0;
struct buffer_head *bh;
int num;
ext2_fsblk_t new_blocks[4];
ext2_fsblk_t current_block;
num = ext2_alloc_blocks(inode, goal, indirect_blks,
*blks, new_blocks, &err);
if (err)
return err;
branch[0].key = cpu_to_le32(new_blocks[0]);
/*
* metadata blocks and data blocks are allocated.
*/
for (n = 1; n <= indirect_blks; n++) {
/*
* Get buffer_head for parent block, zero it out
* and set the pointer to new one, then send
* parent to disk.
*/
bh = sb_getblk(inode->i_sb, new_blocks[n-1]);
if (unlikely(!bh)) {
err = -ENOMEM;
goto failed;
}
branch[n].bh = bh;
lock_buffer(bh);
memset(bh->b_data, 0, blocksize);
branch[n].p = (__le32 *) bh->b_data + offsets[n];
branch[n].key = cpu_to_le32(new_blocks[n]);
*branch[n].p = branch[n].key;
if ( n == indirect_blks) {
current_block = new_blocks[n];
/*
* End of chain, update the last new metablock of
* the chain to point to the new allocated
* data blocks numbers
*/
for (i=1; i < num; i++)
*(branch[n].p + i) = cpu_to_le32(++current_block);
}
set_buffer_uptodate(bh);
unlock_buffer(bh);
mark_buffer_dirty_inode(bh, inode);
/* We used to sync bh here if IS_SYNC(inode).
* But we now rely upon generic_write_sync()
* and b_inode_buffers. But not for directories.
*/
if (S_ISDIR(inode->i_mode) && IS_DIRSYNC(inode))
sync_dirty_buffer(bh);
}
*blks = num;
return err;
failed:
for (i = 1; i < n; i++)
bforget(branch[i].bh);
for (i = 0; i < indirect_blks; i++)
ext2_free_blocks(inode, new_blocks[i], 1);
ext2_free_blocks(inode, new_blocks[i], num);
return err;
}
/**
* ext2_splice_branch - splice the allocated branch onto inode.
* @inode: owner
* @block: (logical) number of block we are adding
* @where: location of missing link
* @num: number of indirect blocks we are adding
* @blks: number of direct blocks we are adding
*
* This function fills the missing link and does all housekeeping needed in
* inode (->i_blocks, etc.). In case of success we end up with the full
* chain to new block and return 0.
*/
static void ext2_splice_branch(struct inode *inode,
long block, Indirect *where, int num, int blks)
{
int i;
struct ext2_block_alloc_info *block_i;
ext2_fsblk_t current_block;
block_i = EXT2_I(inode)->i_block_alloc_info;
/* XXX LOCKING probably should have i_meta_lock ?*/
/* That's it */
*where->p = where->key;
/*
* Update the host buffer_head or inode to point to more just allocated
* direct blocks blocks
*/
if (num == 0 && blks > 1) {
current_block = le32_to_cpu(where->key) + 1;
for (i = 1; i < blks; i++)
*(where->p + i ) = cpu_to_le32(current_block++);
}
/*
* update the most recently allocated logical & physical block
* in i_block_alloc_info, to assist find the proper goal block for next
* allocation
*/
if (block_i) {
block_i->last_alloc_logical_block = block + blks - 1;
block_i->last_alloc_physical_block =
le32_to_cpu(where[num].key) + blks - 1;
}
/* We are done with atomic stuff, now do the rest of housekeeping */
/* had we spliced it onto indirect block? */
if (where->bh)
mark_buffer_dirty_inode(where->bh, inode);
inode->i_ctime = CURRENT_TIME_SEC;
mark_inode_dirty(inode);
}
/*
* Allocation strategy is simple: if we have to allocate something, we will
* have to go the whole way to leaf. So let's do it before attaching anything
* to tree, set linkage between the newborn blocks, write them if sync is
* required, recheck the path, free and repeat if check fails, otherwise
* set the last missing link (that will protect us from any truncate-generated
* removals - all blocks on the path are immune now) and possibly force the
* write on the parent block.
* That has a nice additional property: no special recovery from the failed
* allocations is needed - we simply release blocks and do not touch anything
* reachable from inode.
*
* `handle' can be NULL if create == 0.
*
* return > 0, # of blocks mapped or allocated.
* return = 0, if plain lookup failed.
* return < 0, error case.
*/
static int ext2_get_blocks(struct inode *inode,
sector_t iblock, unsigned long maxblocks,
struct buffer_head *bh_result,
int create)
{
int err = -EIO;
int offsets[4];
Indirect chain[4];
Indirect *partial;
ext2_fsblk_t goal;
int indirect_blks;
int blocks_to_boundary = 0;
int depth;
struct ext2_inode_info *ei = EXT2_I(inode);
int count = 0;
ext2_fsblk_t first_block = 0;
depth = ext2_block_to_path(inode,iblock,offsets,&blocks_to_boundary);
if (depth == 0)
return (err);
partial = ext2_get_branch(inode, depth, offsets, chain, &err);
/* Simplest case - block found, no allocation needed */
if (!partial) {
first_block = le32_to_cpu(chain[depth - 1].key);
clear_buffer_new(bh_result); /* What's this do? */
count++;
/*map more blocks*/
while (count < maxblocks && count <= blocks_to_boundary) {
ext2_fsblk_t blk;
if (!verify_chain(chain, chain + depth - 1)) {
/*
* Indirect block might be removed by
* truncate while we were reading it.
* Handling of that case: forget what we've
* got now, go to reread.
*/
err = -EAGAIN;
count = 0;
break;
}
blk = le32_to_cpu(*(chain[depth-1].p + count));
if (blk == first_block + count)
count++;
else
break;
}
if (err != -EAGAIN)
goto got_it;
}
/* Next simple case - plain lookup or failed read of indirect block */
if (!create || err == -EIO)
goto cleanup;
mutex_lock(&ei->truncate_mutex);
/*
* If the indirect block is missing while we are reading
* the chain(ext2_get_branch() returns -EAGAIN err), or
* if the chain has been changed after we grab the semaphore,
* (either because another process truncated this branch, or
* another get_block allocated this branch) re-grab the chain to see if
* the request block has been allocated or not.
*
* Since we already block the truncate/other get_block
* at this point, we will have the current copy of the chain when we
* splice the branch into the tree.
*/
if (err == -EAGAIN || !verify_chain(chain, partial)) {
while (partial > chain) {
brelse(partial->bh);
partial--;
}
partial = ext2_get_branch(inode, depth, offsets, chain, &err);
if (!partial) {
count++;
mutex_unlock(&ei->truncate_mutex);
if (err)
goto cleanup;
clear_buffer_new(bh_result);
goto got_it;
}
}
/*
* Okay, we need to do block allocation. Lazily initialize the block
* allocation info here if necessary
*/
if (S_ISREG(inode->i_mode) && (!ei->i_block_alloc_info))
ext2_init_block_alloc_info(inode);
goal = ext2_find_goal(inode, iblock, partial);
/* the number of blocks need to allocate for [d,t]indirect blocks */
indirect_blks = (chain + depth) - partial - 1;
/*
* Next look up the indirect map to count the totoal number of
* direct blocks to allocate for this branch.
*/
count = ext2_blks_to_allocate(partial, indirect_blks,
maxblocks, blocks_to_boundary);
/*
* XXX ???? Block out ext2_truncate while we alter the tree
*/
err = ext2_alloc_branch(inode, indirect_blks, &count, goal,
offsets + (partial - chain), partial);
if (err) {
mutex_unlock(&ei->truncate_mutex);
goto cleanup;
}
if (ext2_use_xip(inode->i_sb)) {
/*
* we need to clear the block
*/
err = ext2_clear_xip_target (inode,
le32_to_cpu(chain[depth-1].key));
if (err) {
mutex_unlock(&ei->truncate_mutex);
goto cleanup;
}
}
ext2_splice_branch(inode, iblock, partial, indirect_blks, count);
mutex_unlock(&ei->truncate_mutex);
set_buffer_new(bh_result);
got_it:
map_bh(bh_result, inode->i_sb, le32_to_cpu(chain[depth-1].key));
if (count > blocks_to_boundary)
set_buffer_boundary(bh_result);
err = count;
/* Clean up and exit */
partial = chain + depth - 1; /* the whole chain */
cleanup:
while (partial > chain) {
brelse(partial->bh);
partial--;
}
return err;
}
int ext2_get_block(struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create)
{
unsigned max_blocks = bh_result->b_size >> inode->i_blkbits;
int ret = ext2_get_blocks(inode, iblock, max_blocks,
bh_result, create);
if (ret > 0) {
bh_result->b_size = (ret << inode->i_blkbits);
ret = 0;
}
return ret;
}
int ext2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
u64 start, u64 len)
{
return generic_block_fiemap(inode, fieinfo, start, len,
ext2_get_block);
}
static int ext2_writepage(struct page *page, struct writeback_control *wbc)
{
return block_write_full_page(page, ext2_get_block, wbc);
}
static int ext2_readpage(struct file *file, struct page *page)
{
return mpage_readpage(page, ext2_get_block);
}
static int
ext2_readpages(struct file *file, struct address_space *mapping,
struct list_head *pages, unsigned nr_pages)
{
return mpage_readpages(mapping, pages, nr_pages, ext2_get_block);
}
static int
ext2_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
struct page **pagep, void **fsdata)
{
int ret;
ret = block_write_begin(mapping, pos, len, flags, pagep,
ext2_get_block);
if (ret < 0)
ext2_write_failed(mapping, pos + len);
return ret;
}
static int ext2_write_end(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
struct page *page, void *fsdata)
{
int ret;
ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata);
if (ret < len)
ext2_write_failed(mapping, pos + len);
return ret;
}
static int
ext2_nobh_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
struct page **pagep, void **fsdata)
{
int ret;
ret = nobh_write_begin(mapping, pos, len, flags, pagep, fsdata,
ext2_get_block);
if (ret < 0)
ext2_write_failed(mapping, pos + len);
return ret;
}
static int ext2_nobh_writepage(struct page *page,
struct writeback_control *wbc)
{
return nobh_writepage(page, ext2_get_block, wbc);
}
static sector_t ext2_bmap(struct address_space *mapping, sector_t block)
{
return generic_block_bmap(mapping,block,ext2_get_block);
}
static ssize_t
ext2_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
loff_t offset, unsigned long nr_segs)
{
struct file *file = iocb->ki_filp;
struct address_space *mapping = file->f_mapping;
struct inode *inode = mapping->host;
ssize_t ret;
ret = blockdev_direct_IO(rw, iocb, inode, iov, offset, nr_segs,
ext2_get_block);
if (ret < 0 && (rw & WRITE))
ext2_write_failed(mapping, offset + iov_length(iov, nr_segs));
return ret;
}
static int
ext2_writepages(struct address_space *mapping, struct writeback_control *wbc)
{
return mpage_writepages(mapping, wbc, ext2_get_block);
}
const struct address_space_operations ext2_aops = {
.readpage = ext2_readpage,
.readpages = ext2_readpages,
.writepage = ext2_writepage,
.write_begin = ext2_write_begin,
.write_end = ext2_write_end,
.bmap = ext2_bmap,
.direct_IO = ext2_direct_IO,
.writepages = ext2_writepages,
.migratepage = buffer_migrate_page,
.is_partially_uptodate = block_is_partially_uptodate,
.error_remove_page = generic_error_remove_page,
};
const struct address_space_operations ext2_aops_xip = {
.bmap = ext2_bmap,
.get_xip_mem = ext2_get_xip_mem,
};
const struct address_space_operations ext2_nobh_aops = {
.readpage = ext2_readpage,
.readpages = ext2_readpages,
.writepage = ext2_nobh_writepage,
.write_begin = ext2_nobh_write_begin,
.write_end = nobh_write_end,
.bmap = ext2_bmap,
.direct_IO = ext2_direct_IO,
.writepages = ext2_writepages,
.migratepage = buffer_migrate_page,
.error_remove_page = generic_error_remove_page,
};
/*
* Probably it should be a library function... search for first non-zero word
* or memcmp with zero_page, whatever is better for particular architecture.
* Linus?
*/
static inline int all_zeroes(__le32 *p, __le32 *q)
{
while (p < q)
if (*p++)
return 0;
return 1;
}
/**
* ext2_find_shared - find the indirect blocks for partial truncation.
* @inode: inode in question
* @depth: depth of the affected branch
* @offsets: offsets of pointers in that branch (see ext2_block_to_path)
* @chain: place to store the pointers to partial indirect blocks
* @top: place to the (detached) top of branch
*
* This is a helper function used by ext2_truncate().
*
* When we do truncate() we may have to clean the ends of several indirect
* blocks but leave the blocks themselves alive. Block is partially
* truncated if some data below the new i_size is referred from it (and
* it is on the path to the first completely truncated data block, indeed).
* We have to free the top of that path along with everything to the right
* of the path. Since no allocation past the truncation point is possible
* until ext2_truncate() finishes, we may safely do the latter, but top
* of branch may require special attention - pageout below the truncation
* point might try to populate it.
*
* We atomically detach the top of branch from the tree, store the block
* number of its root in *@top, pointers to buffer_heads of partially
* truncated blocks - in @chain[].bh and pointers to their last elements
* that should not be removed - in @chain[].p. Return value is the pointer
* to last filled element of @chain.
*
* The work left to caller to do the actual freeing of subtrees:
* a) free the subtree starting from *@top
* b) free the subtrees whose roots are stored in
* (@chain[i].p+1 .. end of @chain[i].bh->b_data)
* c) free the subtrees growing from the inode past the @chain[0].p
* (no partially truncated stuff there).
*/
static Indirect *ext2_find_shared(struct inode *inode,
int depth,
int offsets[4],
Indirect chain[4],
__le32 *top)
{
Indirect *partial, *p;
int k, err;
*top = 0;
for (k = depth; k > 1 && !offsets[k-1]; k--)
;
partial = ext2_get_branch(inode, k, offsets, chain, &err);
if (!partial)
partial = chain + k-1;
/*
* If the branch acquired continuation since we've looked at it -
* fine, it should all survive and (new) top doesn't belong to us.
*/
write_lock(&EXT2_I(inode)->i_meta_lock);
if (!partial->key && *partial->p) {
write_unlock(&EXT2_I(inode)->i_meta_lock);
goto no_top;
}
for (p=partial; p>chain && all_zeroes((__le32*)p->bh->b_data,p->p); p--)
;
/*
* OK, we've found the last block that must survive. The rest of our
* branch should be detached before unlocking. However, if that rest
* of branch is all ours and does not grow immediately from the inode
* it's easier to cheat and just decrement partial->p.
*/
if (p == chain + k - 1 && p > chain) {
p->p--;
} else {
*top = *p->p;
*p->p = 0;
}
write_unlock(&EXT2_I(inode)->i_meta_lock);
while(partial > p)
{
brelse(partial->bh);
partial--;
}
no_top:
return partial;
}
/**
* ext2_free_data - free a list of data blocks
* @inode: inode we are dealing with
* @p: array of block numbers
* @q: points immediately past the end of array
*
* We are freeing all blocks referred from that array (numbers are
* stored as little-endian 32-bit) and updating @inode->i_blocks
* appropriately.
*/
static inline void ext2_free_data(struct inode *inode, __le32 *p, __le32 *q)
{
unsigned long block_to_free = 0, count = 0;
unsigned long nr;
for ( ; p < q ; p++) {
nr = le32_to_cpu(*p);
if (nr) {
*p = 0;
/* accumulate blocks to free if they're contiguous */
if (count == 0)
goto free_this;
else if (block_to_free == nr - count)
count++;
else {
ext2_free_blocks (inode, block_to_free, count);
mark_inode_dirty(inode);
free_this:
block_to_free = nr;
count = 1;
}
}
}
if (count > 0) {
ext2_free_blocks (inode, block_to_free, count);
mark_inode_dirty(inode);
}
}
/**
* ext2_free_branches - free an array of branches
* @inode: inode we are dealing with
* @p: array of block numbers
* @q: pointer immediately past the end of array
* @depth: depth of the branches to free
*
* We are freeing all blocks referred from these branches (numbers are
* stored as little-endian 32-bit) and updating @inode->i_blocks
* appropriately.
*/
static void ext2_free_branches(struct inode *inode, __le32 *p, __le32 *q, int depth)
{
struct buffer_head * bh;
unsigned long nr;
if (depth--) {
int addr_per_block = EXT2_ADDR_PER_BLOCK(inode->i_sb);
for ( ; p < q ; p++) {
nr = le32_to_cpu(*p);
if (!nr)
continue;
*p = 0;
bh = sb_bread(inode->i_sb, nr);
/*
* A read failure? Report error and clear slot
* (should be rare).
*/
if (!bh) {
ext2_error(inode->i_sb, "ext2_free_branches",
"Read failure, inode=%ld, block=%ld",
inode->i_ino, nr);
continue;
}
ext2_free_branches(inode,
(__le32*)bh->b_data,
(__le32*)bh->b_data + addr_per_block,
depth);
bforget(bh);
ext2_free_blocks(inode, nr, 1);
mark_inode_dirty(inode);
}
} else
ext2_free_data(inode, p, q);
}
static void __ext2_truncate_blocks(struct inode *inode, loff_t offset)
{
__le32 *i_data = EXT2_I(inode)->i_data;
struct ext2_inode_info *ei = EXT2_I(inode);
int addr_per_block = EXT2_ADDR_PER_BLOCK(inode->i_sb);
int offsets[4];
Indirect chain[4];
Indirect *partial;
__le32 nr = 0;
int n;
long iblock;
unsigned blocksize;
blocksize = inode->i_sb->s_blocksize;
iblock = (offset + blocksize-1) >> EXT2_BLOCK_SIZE_BITS(inode->i_sb);
n = ext2_block_to_path(inode, iblock, offsets, NULL);
if (n == 0)
return;
/*
* From here we block out all ext2_get_block() callers who want to
* modify the block allocation tree.
*/
mutex_lock(&ei->truncate_mutex);
if (n == 1) {
ext2_free_data(inode, i_data+offsets[0],
i_data + EXT2_NDIR_BLOCKS);
goto do_indirects;
}
partial = ext2_find_shared(inode, n, offsets, chain, &nr);
/* Kill the top of shared branch (already detached) */
if (nr) {
if (partial == chain)
mark_inode_dirty(inode);
else
mark_buffer_dirty_inode(partial->bh, inode);
ext2_free_branches(inode, &nr, &nr+1, (chain+n-1) - partial);
}
/* Clear the ends of indirect blocks on the shared branch */
while (partial > chain) {
ext2_free_branches(inode,
partial->p + 1,
(__le32*)partial->bh->b_data+addr_per_block,
(chain+n-1) - partial);
mark_buffer_dirty_inode(partial->bh, inode);
brelse (partial->bh);
partial--;
}
do_indirects:
/* Kill the remaining (whole) subtrees */
switch (offsets[0]) {
default:
nr = i_data[EXT2_IND_BLOCK];
if (nr) {
i_data[EXT2_IND_BLOCK] = 0;
mark_inode_dirty(inode);
ext2_free_branches(inode, &nr, &nr+1, 1);
}
case EXT2_IND_BLOCK:
nr = i_data[EXT2_DIND_BLOCK];
if (nr) {
i_data[EXT2_DIND_BLOCK] = 0;
mark_inode_dirty(inode);
ext2_free_branches(inode, &nr, &nr+1, 2);
}
case EXT2_DIND_BLOCK:
nr = i_data[EXT2_TIND_BLOCK];
if (nr) {
i_data[EXT2_TIND_BLOCK] = 0;
mark_inode_dirty(inode);
ext2_free_branches(inode, &nr, &nr+1, 3);
}
case EXT2_TIND_BLOCK:
;
}
ext2_discard_reservation(inode);
mutex_unlock(&ei->truncate_mutex);
}
static void ext2_truncate_blocks(struct inode *inode, loff_t offset)
{
/*
* XXX: it seems like a bug here that we don't allow
* IS_APPEND inode to have blocks-past-i_size trimmed off.
* review and fix this.
*
* Also would be nice to be able to handle IO errors and such,
* but that's probably too much to ask.
*/
if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
S_ISLNK(inode->i_mode)))
return;
if (ext2_inode_is_fast_symlink(inode))
return;
if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
return;
__ext2_truncate_blocks(inode, offset);
}
static int ext2_setsize(struct inode *inode, loff_t newsize)
{
int error;
if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
S_ISLNK(inode->i_mode)))
return -EINVAL;
if (ext2_inode_is_fast_symlink(inode))
return -EINVAL;
if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
return -EPERM;
inode_dio_wait(inode);
if (mapping_is_xip(inode->i_mapping))
error = xip_truncate_page(inode->i_mapping, newsize);
else if (test_opt(inode->i_sb, NOBH))
error = nobh_truncate_page(inode->i_mapping,
newsize, ext2_get_block);
else
error = block_truncate_page(inode->i_mapping,
newsize, ext2_get_block);
if (error)
return error;
truncate_setsize(inode, newsize);
__ext2_truncate_blocks(inode, newsize);
inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC;
if (inode_needs_sync(inode)) {
sync_mapping_buffers(inode->i_mapping);
sync_inode_metadata(inode, 1);
} else {
mark_inode_dirty(inode);
}
return 0;
}
static struct ext2_inode *ext2_get_inode(struct super_block *sb, ino_t ino,
struct buffer_head **p)
{
struct buffer_head * bh;
unsigned long block_group;
unsigned long block;
unsigned long offset;
struct ext2_group_desc * gdp;
*p = NULL;
if ((ino != EXT2_ROOT_INO && ino < EXT2_FIRST_INO(sb)) ||
ino > le32_to_cpu(EXT2_SB(sb)->s_es->s_inodes_count))
goto Einval;
block_group = (ino - 1) / EXT2_INODES_PER_GROUP(sb);
gdp = ext2_get_group_desc(sb, block_group, NULL);
if (!gdp)
goto Egdp;
/*
* Figure out the offset within the block group inode table
*/
offset = ((ino - 1) % EXT2_INODES_PER_GROUP(sb)) * EXT2_INODE_SIZE(sb);
block = le32_to_cpu(gdp->bg_inode_table) +
(offset >> EXT2_BLOCK_SIZE_BITS(sb));
if (!(bh = sb_bread(sb, block)))
goto Eio;
*p = bh;
offset &= (EXT2_BLOCK_SIZE(sb) - 1);
return (struct ext2_inode *) (bh->b_data + offset);
Einval:
ext2_error(sb, "ext2_get_inode", "bad inode number: %lu",
(unsigned long) ino);
return ERR_PTR(-EINVAL);
Eio:
ext2_error(sb, "ext2_get_inode",
"unable to read inode block - inode=%lu, block=%lu",
(unsigned long) ino, block);
Egdp:
return ERR_PTR(-EIO);
}
void ext2_set_inode_flags(struct inode *inode)
{
unsigned int flags = EXT2_I(inode)->i_flags;
inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC);
if (flags & EXT2_SYNC_FL)
inode->i_flags |= S_SYNC;
if (flags & EXT2_APPEND_FL)
inode->i_flags |= S_APPEND;
if (flags & EXT2_IMMUTABLE_FL)
inode->i_flags |= S_IMMUTABLE;
if (flags & EXT2_NOATIME_FL)
inode->i_flags |= S_NOATIME;
if (flags & EXT2_DIRSYNC_FL)
inode->i_flags |= S_DIRSYNC;
}
/* Propagate flags from i_flags to EXT2_I(inode)->i_flags */
void ext2_get_inode_flags(struct ext2_inode_info *ei)
{
unsigned int flags = ei->vfs_inode.i_flags;
ei->i_flags &= ~(EXT2_SYNC_FL|EXT2_APPEND_FL|
EXT2_IMMUTABLE_FL|EXT2_NOATIME_FL|EXT2_DIRSYNC_FL);
if (flags & S_SYNC)
ei->i_flags |= EXT2_SYNC_FL;
if (flags & S_APPEND)
ei->i_flags |= EXT2_APPEND_FL;
if (flags & S_IMMUTABLE)
ei->i_flags |= EXT2_IMMUTABLE_FL;
if (flags & S_NOATIME)
ei->i_flags |= EXT2_NOATIME_FL;
if (flags & S_DIRSYNC)
ei->i_flags |= EXT2_DIRSYNC_FL;
}
struct inode *ext2_iget (struct super_block *sb, unsigned long ino)
{
struct ext2_inode_info *ei;
struct buffer_head * bh;
struct ext2_inode *raw_inode;
struct inode *inode;
long ret = -EIO;
int n;
uid_t i_uid;
gid_t i_gid;
inode = iget_locked(sb, ino);
if (!inode)
return ERR_PTR(-ENOMEM);
if (!(inode->i_state & I_NEW))
return inode;
ei = EXT2_I(inode);
ei->i_block_alloc_info = NULL;
raw_inode = ext2_get_inode(inode->i_sb, ino, &bh);
if (IS_ERR(raw_inode)) {
ret = PTR_ERR(raw_inode);
goto bad_inode;
}
inode->i_mode = le16_to_cpu(raw_inode->i_mode);
i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low);
i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low);
if (!(test_opt (inode->i_sb, NO_UID32))) {
i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16;
i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16;
}
i_uid_write(inode, i_uid);
i_gid_write(inode, i_gid);
set_nlink(inode, le16_to_cpu(raw_inode->i_links_count));
inode->i_size = le32_to_cpu(raw_inode->i_size);
inode->i_atime.tv_sec = (signed)le32_to_cpu(raw_inode->i_atime);
inode->i_ctime.tv_sec = (signed)le32_to_cpu(raw_inode->i_ctime);
inode->i_mtime.tv_sec = (signed)le32_to_cpu(raw_inode->i_mtime);
inode->i_atime.tv_nsec = inode->i_mtime.tv_nsec = inode->i_ctime.tv_nsec = 0;
ei->i_dtime = le32_to_cpu(raw_inode->i_dtime);
/* We now have enough fields to check if the inode was active or not.
* This is needed because nfsd might try to access dead inodes
* the test is that same one that e2fsck uses
* NeilBrown 1999oct15
*/
if (inode->i_nlink == 0 && (inode->i_mode == 0 || ei->i_dtime)) {
/* this inode is deleted */
brelse (bh);
ret = -ESTALE;
goto bad_inode;
}
inode->i_blocks = le32_to_cpu(raw_inode->i_blocks);
ei->i_flags = le32_to_cpu(raw_inode->i_flags);
ei->i_faddr = le32_to_cpu(raw_inode->i_faddr);
ei->i_frag_no = raw_inode->i_frag;
ei->i_frag_size = raw_inode->i_fsize;
ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl);
ei->i_dir_acl = 0;
if (S_ISREG(inode->i_mode))
inode->i_size |= ((__u64)le32_to_cpu(raw_inode->i_size_high)) << 32;
else
ei->i_dir_acl = le32_to_cpu(raw_inode->i_dir_acl);
ei->i_dtime = 0;
inode->i_generation = le32_to_cpu(raw_inode->i_generation);
ei->i_state = 0;
ei->i_block_group = (ino - 1) / EXT2_INODES_PER_GROUP(inode->i_sb);
ei->i_dir_start_lookup = 0;
/*
* NOTE! The in-memory inode i_data array is in little-endian order
* even on big-endian machines: we do NOT byteswap the block numbers!
*/
for (n = 0; n < EXT2_N_BLOCKS; n++)
ei->i_data[n] = raw_inode->i_block[n];
if (S_ISREG(inode->i_mode)) {
inode->i_op = &ext2_file_inode_operations;
if (ext2_use_xip(inode->i_sb)) {
inode->i_mapping->a_ops = &ext2_aops_xip;
inode->i_fop = &ext2_xip_file_operations;
} else if (test_opt(inode->i_sb, NOBH)) {
inode->i_mapping->a_ops = &ext2_nobh_aops;
inode->i_fop = &ext2_file_operations;
} else {
inode->i_mapping->a_ops = &ext2_aops;
inode->i_fop = &ext2_file_operations;
}
} else if (S_ISDIR(inode->i_mode)) {
inode->i_op = &ext2_dir_inode_operations;
inode->i_fop = &ext2_dir_operations;
if (test_opt(inode->i_sb, NOBH))
inode->i_mapping->a_ops = &ext2_nobh_aops;
else
inode->i_mapping->a_ops = &ext2_aops;
} else if (S_ISLNK(inode->i_mode)) {
if (ext2_inode_is_fast_symlink(inode)) {
inode->i_op = &ext2_fast_symlink_inode_operations;
nd_terminate_link(ei->i_data, inode->i_size,
sizeof(ei->i_data) - 1);
} else {
inode->i_op = &ext2_symlink_inode_operations;
if (test_opt(inode->i_sb, NOBH))
inode->i_mapping->a_ops = &ext2_nobh_aops;
else
inode->i_mapping->a_ops = &ext2_aops;
}
} else {
inode->i_op = &ext2_special_inode_operations;
if (raw_inode->i_block[0])
init_special_inode(inode, inode->i_mode,
old_decode_dev(le32_to_cpu(raw_inode->i_block[0])));
else
init_special_inode(inode, inode->i_mode,
new_decode_dev(le32_to_cpu(raw_inode->i_block[1])));
}
brelse (bh);
ext2_set_inode_flags(inode);
unlock_new_inode(inode);
return inode;
bad_inode:
iget_failed(inode);
return ERR_PTR(ret);
}
static int __ext2_write_inode(struct inode *inode, int do_sync)
{
struct ext2_inode_info *ei = EXT2_I(inode);
struct super_block *sb = inode->i_sb;
ino_t ino = inode->i_ino;
uid_t uid = i_uid_read(inode);
gid_t gid = i_gid_read(inode);
struct buffer_head * bh;
struct ext2_inode * raw_inode = ext2_get_inode(sb, ino, &bh);
int n;
int err = 0;
if (IS_ERR(raw_inode))
return -EIO;
/* For fields not not tracking in the in-memory inode,
* initialise them to zero for new inodes. */
if (ei->i_state & EXT2_STATE_NEW)
memset(raw_inode, 0, EXT2_SB(sb)->s_inode_size);
ext2_get_inode_flags(ei);
raw_inode->i_mode = cpu_to_le16(inode->i_mode);
if (!(test_opt(sb, NO_UID32))) {
raw_inode->i_uid_low = cpu_to_le16(low_16_bits(uid));
raw_inode->i_gid_low = cpu_to_le16(low_16_bits(gid));
/*
* Fix up interoperability with old kernels. Otherwise, old inodes get
* re-used with the upper 16 bits of the uid/gid intact
*/
if (!ei->i_dtime) {
raw_inode->i_uid_high = cpu_to_le16(high_16_bits(uid));
raw_inode->i_gid_high = cpu_to_le16(high_16_bits(gid));
} else {
raw_inode->i_uid_high = 0;
raw_inode->i_gid_high = 0;
}
} else {
raw_inode->i_uid_low = cpu_to_le16(fs_high2lowuid(uid));
raw_inode->i_gid_low = cpu_to_le16(fs_high2lowgid(gid));
raw_inode->i_uid_high = 0;
raw_inode->i_gid_high = 0;
}
raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
raw_inode->i_size = cpu_to_le32(inode->i_size);
raw_inode->i_atime = cpu_to_le32(inode->i_atime.tv_sec);
raw_inode->i_ctime = cpu_to_le32(inode->i_ctime.tv_sec);
raw_inode->i_mtime = cpu_to_le32(inode->i_mtime.tv_sec);
raw_inode->i_blocks = cpu_to_le32(inode->i_blocks);
raw_inode->i_dtime = cpu_to_le32(ei->i_dtime);
raw_inode->i_flags = cpu_to_le32(ei->i_flags);
raw_inode->i_faddr = cpu_to_le32(ei->i_faddr);
raw_inode->i_frag = ei->i_frag_no;
raw_inode->i_fsize = ei->i_frag_size;
raw_inode->i_file_acl = cpu_to_le32(ei->i_file_acl);
if (!S_ISREG(inode->i_mode))
raw_inode->i_dir_acl = cpu_to_le32(ei->i_dir_acl);
else {
raw_inode->i_size_high = cpu_to_le32(inode->i_size >> 32);
if (inode->i_size > 0x7fffffffULL) {
if (!EXT2_HAS_RO_COMPAT_FEATURE(sb,
EXT2_FEATURE_RO_COMPAT_LARGE_FILE) ||
EXT2_SB(sb)->s_es->s_rev_level ==
cpu_to_le32(EXT2_GOOD_OLD_REV)) {
/* If this is the first large file
* created, add a flag to the superblock.
*/
spin_lock(&EXT2_SB(sb)->s_lock);
ext2_update_dynamic_rev(sb);
EXT2_SET_RO_COMPAT_FEATURE(sb,
EXT2_FEATURE_RO_COMPAT_LARGE_FILE);
spin_unlock(&EXT2_SB(sb)->s_lock);
ext2_write_super(sb);
}
}
}
raw_inode->i_generation = cpu_to_le32(inode->i_generation);
if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
if (old_valid_dev(inode->i_rdev)) {
raw_inode->i_block[0] =
cpu_to_le32(old_encode_dev(inode->i_rdev));
raw_inode->i_block[1] = 0;
} else {
raw_inode->i_block[0] = 0;
raw_inode->i_block[1] =
cpu_to_le32(new_encode_dev(inode->i_rdev));
raw_inode->i_block[2] = 0;
}
} else for (n = 0; n < EXT2_N_BLOCKS; n++)
raw_inode->i_block[n] = ei->i_data[n];
mark_buffer_dirty(bh);
if (do_sync) {
sync_dirty_buffer(bh);
if (buffer_req(bh) && !buffer_uptodate(bh)) {
printk ("IO error syncing ext2 inode [%s:%08lx]\n",
sb->s_id, (unsigned long) ino);
err = -EIO;
}
}
ei->i_state &= ~EXT2_STATE_NEW;
brelse (bh);
return err;
}
int ext2_write_inode(struct inode *inode, struct writeback_control *wbc)
{
return __ext2_write_inode(inode, wbc->sync_mode == WB_SYNC_ALL);
}
int ext2_setattr(struct dentry *dentry, struct iattr *iattr)
{
struct inode *inode = dentry->d_inode;
int error;
error = inode_change_ok(inode, iattr);
if (error)
return error;
if (is_quota_modification(inode, iattr))
dquot_initialize(inode);
if ((iattr->ia_valid & ATTR_UID && !uid_eq(iattr->ia_uid, inode->i_uid)) ||
(iattr->ia_valid & ATTR_GID && !gid_eq(iattr->ia_gid, inode->i_gid))) {
error = dquot_transfer(inode, iattr);
if (error)
return error;
}
if (iattr->ia_valid & ATTR_SIZE && iattr->ia_size != inode->i_size) {
error = ext2_setsize(inode, iattr->ia_size);
if (error)
return error;
}
setattr_copy(inode, iattr);
if (iattr->ia_valid & ATTR_MODE)
error = ext2_acl_chmod(inode);
mark_inode_dirty(inode);
return error;
}
| gpl-2.0 |
bossino/dra-kernel | drivers/s390/scsi/zfcp_cfdc.c | 2321 | 12479 | /*
* zfcp device driver
*
* Userspace interface for accessing the
* Access Control Lists / Control File Data Channel;
* handling of response code and states for ports and LUNs.
*
* Copyright IBM Corp. 2008, 2010
*/
#define KMSG_COMPONENT "zfcp"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/compat.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/miscdevice.h>
#include <asm/compat.h>
#include <asm/ccwdev.h>
#include "zfcp_def.h"
#include "zfcp_ext.h"
#include "zfcp_fsf.h"
#define ZFCP_CFDC_CMND_DOWNLOAD_NORMAL 0x00010001
#define ZFCP_CFDC_CMND_DOWNLOAD_FORCE 0x00010101
#define ZFCP_CFDC_CMND_FULL_ACCESS 0x00000201
#define ZFCP_CFDC_CMND_RESTRICTED_ACCESS 0x00000401
#define ZFCP_CFDC_CMND_UPLOAD 0x00010002
#define ZFCP_CFDC_DOWNLOAD 0x00000001
#define ZFCP_CFDC_UPLOAD 0x00000002
#define ZFCP_CFDC_WITH_CONTROL_FILE 0x00010000
#define ZFCP_CFDC_IOC_MAGIC 0xDD
#define ZFCP_CFDC_IOC \
_IOWR(ZFCP_CFDC_IOC_MAGIC, 0, struct zfcp_cfdc_data)
/**
* struct zfcp_cfdc_data - data for ioctl cfdc interface
* @signature: request signature
* @devno: FCP adapter device number
* @command: command code
* @fsf_status: returns status of FSF command to userspace
* @fsf_status_qual: returned to userspace
* @payloads: access conflicts list
* @control_file: access control table
*/
struct zfcp_cfdc_data {
u32 signature;
u32 devno;
u32 command;
u32 fsf_status;
u8 fsf_status_qual[FSF_STATUS_QUALIFIER_SIZE];
u8 payloads[256];
u8 control_file[0];
};
static int zfcp_cfdc_copy_from_user(struct scatterlist *sg,
void __user *user_buffer)
{
unsigned int length;
unsigned int size = ZFCP_CFDC_MAX_SIZE;
while (size) {
length = min((unsigned int)size, sg->length);
if (copy_from_user(sg_virt(sg++), user_buffer, length))
return -EFAULT;
user_buffer += length;
size -= length;
}
return 0;
}
static int zfcp_cfdc_copy_to_user(void __user *user_buffer,
struct scatterlist *sg)
{
unsigned int length;
unsigned int size = ZFCP_CFDC_MAX_SIZE;
while (size) {
length = min((unsigned int) size, sg->length);
if (copy_to_user(user_buffer, sg_virt(sg++), length))
return -EFAULT;
user_buffer += length;
size -= length;
}
return 0;
}
static struct zfcp_adapter *zfcp_cfdc_get_adapter(u32 devno)
{
char busid[9];
struct ccw_device *cdev;
struct zfcp_adapter *adapter;
snprintf(busid, sizeof(busid), "0.0.%04x", devno);
cdev = get_ccwdev_by_busid(&zfcp_ccw_driver, busid);
if (!cdev)
return NULL;
adapter = zfcp_ccw_adapter_by_cdev(cdev);
put_device(&cdev->dev);
return adapter;
}
static int zfcp_cfdc_set_fsf(struct zfcp_fsf_cfdc *fsf_cfdc, int command)
{
switch (command) {
case ZFCP_CFDC_CMND_DOWNLOAD_NORMAL:
fsf_cfdc->command = FSF_QTCB_DOWNLOAD_CONTROL_FILE;
fsf_cfdc->option = FSF_CFDC_OPTION_NORMAL_MODE;
break;
case ZFCP_CFDC_CMND_DOWNLOAD_FORCE:
fsf_cfdc->command = FSF_QTCB_DOWNLOAD_CONTROL_FILE;
fsf_cfdc->option = FSF_CFDC_OPTION_FORCE;
break;
case ZFCP_CFDC_CMND_FULL_ACCESS:
fsf_cfdc->command = FSF_QTCB_DOWNLOAD_CONTROL_FILE;
fsf_cfdc->option = FSF_CFDC_OPTION_FULL_ACCESS;
break;
case ZFCP_CFDC_CMND_RESTRICTED_ACCESS:
fsf_cfdc->command = FSF_QTCB_DOWNLOAD_CONTROL_FILE;
fsf_cfdc->option = FSF_CFDC_OPTION_RESTRICTED_ACCESS;
break;
case ZFCP_CFDC_CMND_UPLOAD:
fsf_cfdc->command = FSF_QTCB_UPLOAD_CONTROL_FILE;
fsf_cfdc->option = 0;
break;
default:
return -EINVAL;
}
return 0;
}
static int zfcp_cfdc_sg_setup(int command, struct scatterlist *sg,
u8 __user *control_file)
{
int retval;
retval = zfcp_sg_setup_table(sg, ZFCP_CFDC_PAGES);
if (retval)
return retval;
sg[ZFCP_CFDC_PAGES - 1].length = ZFCP_CFDC_MAX_SIZE % PAGE_SIZE;
if (command & ZFCP_CFDC_WITH_CONTROL_FILE &&
command & ZFCP_CFDC_DOWNLOAD) {
retval = zfcp_cfdc_copy_from_user(sg, control_file);
if (retval) {
zfcp_sg_free_table(sg, ZFCP_CFDC_PAGES);
return -EFAULT;
}
}
return 0;
}
static void zfcp_cfdc_req_to_sense(struct zfcp_cfdc_data *data,
struct zfcp_fsf_req *req)
{
data->fsf_status = req->qtcb->header.fsf_status;
memcpy(&data->fsf_status_qual, &req->qtcb->header.fsf_status_qual,
sizeof(union fsf_status_qual));
memcpy(&data->payloads, &req->qtcb->bottom.support.els,
sizeof(req->qtcb->bottom.support.els));
}
static long zfcp_cfdc_dev_ioctl(struct file *file, unsigned int command,
unsigned long arg)
{
struct zfcp_cfdc_data *data;
struct zfcp_cfdc_data __user *data_user;
struct zfcp_adapter *adapter;
struct zfcp_fsf_req *req;
struct zfcp_fsf_cfdc *fsf_cfdc;
int retval;
if (command != ZFCP_CFDC_IOC)
return -ENOTTY;
if (is_compat_task())
data_user = compat_ptr(arg);
else
data_user = (void __user *)arg;
if (!data_user)
return -EINVAL;
fsf_cfdc = kmalloc(sizeof(struct zfcp_fsf_cfdc), GFP_KERNEL);
if (!fsf_cfdc)
return -ENOMEM;
data = memdup_user(data_user, sizeof(*data_user));
if (IS_ERR(data)) {
retval = PTR_ERR(data);
goto no_mem_sense;
}
if (data->signature != 0xCFDCACDF) {
retval = -EINVAL;
goto free_buffer;
}
retval = zfcp_cfdc_set_fsf(fsf_cfdc, data->command);
adapter = zfcp_cfdc_get_adapter(data->devno);
if (!adapter) {
retval = -ENXIO;
goto free_buffer;
}
retval = zfcp_cfdc_sg_setup(data->command, fsf_cfdc->sg,
data_user->control_file);
if (retval)
goto adapter_put;
req = zfcp_fsf_control_file(adapter, fsf_cfdc);
if (IS_ERR(req)) {
retval = PTR_ERR(req);
goto free_sg;
}
if (req->status & ZFCP_STATUS_FSFREQ_ERROR) {
retval = -ENXIO;
goto free_fsf;
}
zfcp_cfdc_req_to_sense(data, req);
retval = copy_to_user(data_user, data, sizeof(*data_user));
if (retval) {
retval = -EFAULT;
goto free_fsf;
}
if (data->command & ZFCP_CFDC_UPLOAD)
retval = zfcp_cfdc_copy_to_user(&data_user->control_file,
fsf_cfdc->sg);
free_fsf:
zfcp_fsf_req_free(req);
free_sg:
zfcp_sg_free_table(fsf_cfdc->sg, ZFCP_CFDC_PAGES);
adapter_put:
zfcp_ccw_adapter_put(adapter);
free_buffer:
kfree(data);
no_mem_sense:
kfree(fsf_cfdc);
return retval;
}
static const struct file_operations zfcp_cfdc_fops = {
.open = nonseekable_open,
.unlocked_ioctl = zfcp_cfdc_dev_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = zfcp_cfdc_dev_ioctl,
#endif
.llseek = no_llseek,
};
struct miscdevice zfcp_cfdc_misc = {
.minor = MISC_DYNAMIC_MINOR,
.name = "zfcp_cfdc",
.fops = &zfcp_cfdc_fops,
};
/**
* zfcp_cfdc_adapter_access_changed - Process change in adapter ACT
* @adapter: Adapter where the Access Control Table (ACT) changed
*
* After a change in the adapter ACT, check if access to any
* previously denied resources is now possible.
*/
void zfcp_cfdc_adapter_access_changed(struct zfcp_adapter *adapter)
{
unsigned long flags;
struct zfcp_port *port;
struct scsi_device *sdev;
struct zfcp_scsi_dev *zfcp_sdev;
int status;
if (adapter->connection_features & FSF_FEATURE_NPIV_MODE)
return;
read_lock_irqsave(&adapter->port_list_lock, flags);
list_for_each_entry(port, &adapter->port_list, list) {
status = atomic_read(&port->status);
if ((status & ZFCP_STATUS_COMMON_ACCESS_DENIED) ||
(status & ZFCP_STATUS_COMMON_ACCESS_BOXED))
zfcp_erp_port_reopen(port,
ZFCP_STATUS_COMMON_ERP_FAILED,
"cfaac_1");
}
read_unlock_irqrestore(&adapter->port_list_lock, flags);
shost_for_each_device(sdev, adapter->scsi_host) {
zfcp_sdev = sdev_to_zfcp(sdev);
status = atomic_read(&zfcp_sdev->status);
if ((status & ZFCP_STATUS_COMMON_ACCESS_DENIED) ||
(status & ZFCP_STATUS_COMMON_ACCESS_BOXED))
zfcp_erp_lun_reopen(sdev,
ZFCP_STATUS_COMMON_ERP_FAILED,
"cfaac_2");
}
}
static void zfcp_act_eval_err(struct zfcp_adapter *adapter, u32 table)
{
u16 subtable = table >> 16;
u16 rule = table & 0xffff;
const char *act_type[] = { "unknown", "OS", "WWPN", "DID", "LUN" };
if (subtable && subtable < ARRAY_SIZE(act_type))
dev_warn(&adapter->ccw_device->dev,
"Access denied according to ACT rule type %s, "
"rule %d\n", act_type[subtable], rule);
}
/**
* zfcp_cfdc_port_denied - Process "access denied" for port
* @port: The port where the access has been denied
* @qual: The FSF status qualifier for the access denied FSF status
*/
void zfcp_cfdc_port_denied(struct zfcp_port *port,
union fsf_status_qual *qual)
{
dev_warn(&port->adapter->ccw_device->dev,
"Access denied to port 0x%016Lx\n",
(unsigned long long)port->wwpn);
zfcp_act_eval_err(port->adapter, qual->halfword[0]);
zfcp_act_eval_err(port->adapter, qual->halfword[1]);
zfcp_erp_set_port_status(port,
ZFCP_STATUS_COMMON_ERP_FAILED |
ZFCP_STATUS_COMMON_ACCESS_DENIED);
}
/**
* zfcp_cfdc_lun_denied - Process "access denied" for LUN
* @sdev: The SCSI device / LUN where the access has been denied
* @qual: The FSF status qualifier for the access denied FSF status
*/
void zfcp_cfdc_lun_denied(struct scsi_device *sdev,
union fsf_status_qual *qual)
{
struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
dev_warn(&zfcp_sdev->port->adapter->ccw_device->dev,
"Access denied to LUN 0x%016Lx on port 0x%016Lx\n",
zfcp_scsi_dev_lun(sdev),
(unsigned long long)zfcp_sdev->port->wwpn);
zfcp_act_eval_err(zfcp_sdev->port->adapter, qual->halfword[0]);
zfcp_act_eval_err(zfcp_sdev->port->adapter, qual->halfword[1]);
zfcp_erp_set_lun_status(sdev,
ZFCP_STATUS_COMMON_ERP_FAILED |
ZFCP_STATUS_COMMON_ACCESS_DENIED);
atomic_clear_mask(ZFCP_STATUS_LUN_SHARED, &zfcp_sdev->status);
atomic_clear_mask(ZFCP_STATUS_LUN_READONLY, &zfcp_sdev->status);
}
/**
* zfcp_cfdc_lun_shrng_vltn - Evaluate LUN sharing violation status
* @sdev: The LUN / SCSI device where sharing violation occurred
* @qual: The FSF status qualifier from the LUN sharing violation
*/
void zfcp_cfdc_lun_shrng_vltn(struct scsi_device *sdev,
union fsf_status_qual *qual)
{
struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
if (qual->word[0])
dev_warn(&zfcp_sdev->port->adapter->ccw_device->dev,
"LUN 0x%Lx on port 0x%Lx is already in "
"use by CSS%d, MIF Image ID %x\n",
zfcp_scsi_dev_lun(sdev),
(unsigned long long)zfcp_sdev->port->wwpn,
qual->fsf_queue_designator.cssid,
qual->fsf_queue_designator.hla);
else
zfcp_act_eval_err(zfcp_sdev->port->adapter, qual->word[2]);
zfcp_erp_set_lun_status(sdev,
ZFCP_STATUS_COMMON_ERP_FAILED |
ZFCP_STATUS_COMMON_ACCESS_DENIED);
atomic_clear_mask(ZFCP_STATUS_LUN_SHARED, &zfcp_sdev->status);
atomic_clear_mask(ZFCP_STATUS_LUN_READONLY, &zfcp_sdev->status);
}
/**
* zfcp_cfdc_open_lun_eval - Eval access ctrl. status for successful "open lun"
* @sdev: The SCSI device / LUN where to evaluate the status
* @bottom: The qtcb bottom with the status from the "open lun"
*
* Returns: 0 if LUN is usable, -EACCES if the access control table
* reports an unsupported configuration.
*/
int zfcp_cfdc_open_lun_eval(struct scsi_device *sdev,
struct fsf_qtcb_bottom_support *bottom)
{
int shared, rw;
struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
struct zfcp_adapter *adapter = zfcp_sdev->port->adapter;
if ((adapter->connection_features & FSF_FEATURE_NPIV_MODE) ||
!(adapter->adapter_features & FSF_FEATURE_LUN_SHARING) ||
zfcp_ccw_priv_sch(adapter))
return 0;
shared = !(bottom->lun_access_info & FSF_UNIT_ACCESS_EXCLUSIVE);
rw = (bottom->lun_access_info & FSF_UNIT_ACCESS_OUTBOUND_TRANSFER);
if (shared)
atomic_set_mask(ZFCP_STATUS_LUN_SHARED, &zfcp_sdev->status);
if (!rw) {
atomic_set_mask(ZFCP_STATUS_LUN_READONLY, &zfcp_sdev->status);
dev_info(&adapter->ccw_device->dev, "SCSI device at LUN "
"0x%016Lx on port 0x%016Lx opened read-only\n",
zfcp_scsi_dev_lun(sdev),
(unsigned long long)zfcp_sdev->port->wwpn);
}
if (!shared && !rw) {
dev_err(&adapter->ccw_device->dev, "Exclusive read-only access "
"not supported (LUN 0x%016Lx, port 0x%016Lx)\n",
zfcp_scsi_dev_lun(sdev),
(unsigned long long)zfcp_sdev->port->wwpn);
zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ERP_FAILED);
zfcp_erp_lun_shutdown(sdev, 0, "fsouh_6");
return -EACCES;
}
if (shared && rw) {
dev_err(&adapter->ccw_device->dev,
"Shared read-write access not supported "
"(LUN 0x%016Lx, port 0x%016Lx)\n",
zfcp_scsi_dev_lun(sdev),
(unsigned long long)zfcp_sdev->port->wwpn);
zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ERP_FAILED);
zfcp_erp_lun_shutdown(sdev, 0, "fsosh_8");
return -EACCES;
}
return 0;
}
| gpl-2.0 |
qqzwc/JBX_Kernel | arch/powerpc/mm/mmu_context_nohash.c | 2577 | 12902 | /*
* This file contains the routines for handling the MMU on those
* PowerPC implementations where the MMU is not using the hash
* table, such as 8xx, 4xx, BookE's etc...
*
* Copyright 2008 Ben Herrenschmidt <benh@kernel.crashing.org>
* IBM Corp.
*
* Derived from previous arch/powerpc/mm/mmu_context.c
* and arch/powerpc/include/asm/mmu_context.h
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* TODO:
*
* - The global context lock will not scale very well
* - The maps should be dynamically allocated to allow for processors
* that support more PID bits at runtime
* - Implement flush_tlb_mm() by making the context stale and picking
* a new one
* - More aggressively clear stale map bits and maybe find some way to
* also clear mm->cpu_vm_mask bits when processes are migrated
*/
//#define DEBUG_MAP_CONSISTENCY
//#define DEBUG_CLAMP_LAST_CONTEXT 31
//#define DEBUG_HARDER
/* We don't use DEBUG because it tends to be compiled in always nowadays
* and this would generate way too much output
*/
#ifdef DEBUG_HARDER
#define pr_hard(args...) printk(KERN_DEBUG args)
#define pr_hardcont(args...) printk(KERN_CONT args)
#else
#define pr_hard(args...) do { } while(0)
#define pr_hardcont(args...) do { } while(0)
#endif
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/bootmem.h>
#include <linux/notifier.h>
#include <linux/cpu.h>
#include <linux/slab.h>
#include <asm/mmu_context.h>
#include <asm/tlbflush.h>
static unsigned int first_context, last_context;
static unsigned int next_context, nr_free_contexts;
static unsigned long *context_map;
static unsigned long *stale_map[NR_CPUS];
static struct mm_struct **context_mm;
static DEFINE_RAW_SPINLOCK(context_lock);
#define CTX_MAP_SIZE \
(sizeof(unsigned long) * (last_context / BITS_PER_LONG + 1))
/* Steal a context from a task that has one at the moment.
*
* This is used when we are running out of available PID numbers
* on the processors.
*
* This isn't an LRU system, it just frees up each context in
* turn (sort-of pseudo-random replacement :). This would be the
* place to implement an LRU scheme if anyone was motivated to do it.
* -- paulus
*
* For context stealing, we use a slightly different approach for
* SMP and UP. Basically, the UP one is simpler and doesn't use
* the stale map as we can just flush the local CPU
* -- benh
*/
#ifdef CONFIG_SMP
static unsigned int steal_context_smp(unsigned int id)
{
struct mm_struct *mm;
unsigned int cpu, max, i;
max = last_context - first_context;
/* Attempt to free next_context first and then loop until we manage */
while (max--) {
/* Pick up the victim mm */
mm = context_mm[id];
/* We have a candidate victim, check if it's active, on SMP
* we cannot steal active contexts
*/
if (mm->context.active) {
id++;
if (id > last_context)
id = first_context;
continue;
}
pr_hardcont(" | steal %d from 0x%p", id, mm);
/* Mark this mm has having no context anymore */
mm->context.id = MMU_NO_CONTEXT;
/* Mark it stale on all CPUs that used this mm. For threaded
* implementations, we set it on all threads on each core
* represented in the mask. A future implementation will use
* a core map instead but this will do for now.
*/
for_each_cpu(cpu, mm_cpumask(mm)) {
for (i = cpu_first_thread_sibling(cpu);
i <= cpu_last_thread_sibling(cpu); i++)
__set_bit(id, stale_map[i]);
cpu = i - 1;
}
return id;
}
/* This will happen if you have more CPUs than available contexts,
* all we can do here is wait a bit and try again
*/
raw_spin_unlock(&context_lock);
cpu_relax();
raw_spin_lock(&context_lock);
/* This will cause the caller to try again */
return MMU_NO_CONTEXT;
}
#endif /* CONFIG_SMP */
/* Note that this will also be called on SMP if all other CPUs are
* offlined, which means that it may be called for cpu != 0. For
* this to work, we somewhat assume that CPUs that are onlined
* come up with a fully clean TLB (or are cleaned when offlined)
*/
static unsigned int steal_context_up(unsigned int id)
{
struct mm_struct *mm;
int cpu = smp_processor_id();
/* Pick up the victim mm */
mm = context_mm[id];
pr_hardcont(" | steal %d from 0x%p", id, mm);
/* Flush the TLB for that context */
local_flush_tlb_mm(mm);
/* Mark this mm has having no context anymore */
mm->context.id = MMU_NO_CONTEXT;
/* XXX This clear should ultimately be part of local_flush_tlb_mm */
__clear_bit(id, stale_map[cpu]);
return id;
}
#ifdef DEBUG_MAP_CONSISTENCY
static void context_check_map(void)
{
unsigned int id, nrf, nact;
nrf = nact = 0;
for (id = first_context; id <= last_context; id++) {
int used = test_bit(id, context_map);
if (!used)
nrf++;
if (used != (context_mm[id] != NULL))
pr_err("MMU: Context %d is %s and MM is %p !\n",
id, used ? "used" : "free", context_mm[id]);
if (context_mm[id] != NULL)
nact += context_mm[id]->context.active;
}
if (nrf != nr_free_contexts) {
pr_err("MMU: Free context count out of sync ! (%d vs %d)\n",
nr_free_contexts, nrf);
nr_free_contexts = nrf;
}
if (nact > num_online_cpus())
pr_err("MMU: More active contexts than CPUs ! (%d vs %d)\n",
nact, num_online_cpus());
if (first_context > 0 && !test_bit(0, context_map))
pr_err("MMU: Context 0 has been freed !!!\n");
}
#else
static void context_check_map(void) { }
#endif
void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next)
{
unsigned int i, id, cpu = smp_processor_id();
unsigned long *map;
/* No lockless fast path .. yet */
raw_spin_lock(&context_lock);
pr_hard("[%d] activating context for mm @%p, active=%d, id=%d",
cpu, next, next->context.active, next->context.id);
#ifdef CONFIG_SMP
/* Mark us active and the previous one not anymore */
next->context.active++;
if (prev) {
pr_hardcont(" (old=0x%p a=%d)", prev, prev->context.active);
WARN_ON(prev->context.active < 1);
prev->context.active--;
}
again:
#endif /* CONFIG_SMP */
/* If we already have a valid assigned context, skip all that */
id = next->context.id;
if (likely(id != MMU_NO_CONTEXT)) {
#ifdef DEBUG_MAP_CONSISTENCY
if (context_mm[id] != next)
pr_err("MMU: mm 0x%p has id %d but context_mm[%d] says 0x%p\n",
next, id, id, context_mm[id]);
#endif
goto ctxt_ok;
}
/* We really don't have a context, let's try to acquire one */
id = next_context;
if (id > last_context)
id = first_context;
map = context_map;
/* No more free contexts, let's try to steal one */
if (nr_free_contexts == 0) {
#ifdef CONFIG_SMP
if (num_online_cpus() > 1) {
id = steal_context_smp(id);
if (id == MMU_NO_CONTEXT)
goto again;
goto stolen;
}
#endif /* CONFIG_SMP */
id = steal_context_up(id);
goto stolen;
}
nr_free_contexts--;
/* We know there's at least one free context, try to find it */
while (__test_and_set_bit(id, map)) {
id = find_next_zero_bit(map, last_context+1, id);
if (id > last_context)
id = first_context;
}
stolen:
next_context = id + 1;
context_mm[id] = next;
next->context.id = id;
pr_hardcont(" | new id=%d,nrf=%d", id, nr_free_contexts);
context_check_map();
ctxt_ok:
/* If that context got marked stale on this CPU, then flush the
* local TLB for it and unmark it before we use it
*/
if (test_bit(id, stale_map[cpu])) {
pr_hardcont(" | stale flush %d [%d..%d]",
id, cpu_first_thread_sibling(cpu),
cpu_last_thread_sibling(cpu));
local_flush_tlb_mm(next);
/* XXX This clear should ultimately be part of local_flush_tlb_mm */
for (i = cpu_first_thread_sibling(cpu);
i <= cpu_last_thread_sibling(cpu); i++) {
__clear_bit(id, stale_map[i]);
}
}
/* Flick the MMU and release lock */
pr_hardcont(" -> %d\n", id);
set_context(id, next->pgd);
raw_spin_unlock(&context_lock);
}
/*
* Set up the context for a new address space.
*/
int init_new_context(struct task_struct *t, struct mm_struct *mm)
{
pr_hard("initing context for mm @%p\n", mm);
mm->context.id = MMU_NO_CONTEXT;
mm->context.active = 0;
return 0;
}
/*
* We're finished using the context for an address space.
*/
void destroy_context(struct mm_struct *mm)
{
unsigned long flags;
unsigned int id;
if (mm->context.id == MMU_NO_CONTEXT)
return;
WARN_ON(mm->context.active != 0);
raw_spin_lock_irqsave(&context_lock, flags);
id = mm->context.id;
if (id != MMU_NO_CONTEXT) {
__clear_bit(id, context_map);
mm->context.id = MMU_NO_CONTEXT;
#ifdef DEBUG_MAP_CONSISTENCY
mm->context.active = 0;
#endif
context_mm[id] = NULL;
nr_free_contexts++;
}
raw_spin_unlock_irqrestore(&context_lock, flags);
}
#ifdef CONFIG_SMP
static int __cpuinit mmu_context_cpu_notify(struct notifier_block *self,
unsigned long action, void *hcpu)
{
unsigned int cpu = (unsigned int)(long)hcpu;
#ifdef CONFIG_HOTPLUG_CPU
struct task_struct *p;
#endif
/* We don't touch CPU 0 map, it's allocated at aboot and kept
* around forever
*/
if (cpu == boot_cpuid)
return NOTIFY_OK;
switch (action) {
case CPU_UP_PREPARE:
case CPU_UP_PREPARE_FROZEN:
pr_devel("MMU: Allocating stale context map for CPU %d\n", cpu);
stale_map[cpu] = kzalloc(CTX_MAP_SIZE, GFP_KERNEL);
break;
#ifdef CONFIG_HOTPLUG_CPU
case CPU_UP_CANCELED:
case CPU_UP_CANCELED_FROZEN:
case CPU_DEAD:
case CPU_DEAD_FROZEN:
pr_devel("MMU: Freeing stale context map for CPU %d\n", cpu);
kfree(stale_map[cpu]);
stale_map[cpu] = NULL;
/* We also clear the cpu_vm_mask bits of CPUs going away */
read_lock(&tasklist_lock);
for_each_process(p) {
if (p->mm)
cpumask_clear_cpu(cpu, mm_cpumask(p->mm));
}
read_unlock(&tasklist_lock);
break;
#endif /* CONFIG_HOTPLUG_CPU */
}
return NOTIFY_OK;
}
static struct notifier_block __cpuinitdata mmu_context_cpu_nb = {
.notifier_call = mmu_context_cpu_notify,
};
#endif /* CONFIG_SMP */
/*
* Initialize the context management stuff.
*/
void __init mmu_context_init(void)
{
/* Mark init_mm as being active on all possible CPUs since
* we'll get called with prev == init_mm the first time
* we schedule on a given CPU
*/
init_mm.context.active = NR_CPUS;
/*
* The MPC8xx has only 16 contexts. We rotate through them on each
* task switch. A better way would be to keep track of tasks that
* own contexts, and implement an LRU usage. That way very active
* tasks don't always have to pay the TLB reload overhead. The
* kernel pages are mapped shared, so the kernel can run on behalf
* of any task that makes a kernel entry. Shared does not mean they
* are not protected, just that the ASID comparison is not performed.
* -- Dan
*
* The IBM4xx has 256 contexts, so we can just rotate through these
* as a way of "switching" contexts. If the TID of the TLB is zero,
* the PID/TID comparison is disabled, so we can use a TID of zero
* to represent all kernel pages as shared among all contexts.
* -- Dan
*
* The IBM 47x core supports 16-bit PIDs, thus 65535 contexts. We
* should normally never have to steal though the facility is
* present if needed.
* -- BenH
*/
if (mmu_has_feature(MMU_FTR_TYPE_8xx)) {
first_context = 0;
last_context = 15;
} else if (mmu_has_feature(MMU_FTR_TYPE_47x)) {
first_context = 1;
last_context = 65535;
} else
#ifdef CONFIG_PPC_BOOK3E_MMU
if (mmu_has_feature(MMU_FTR_TYPE_3E)) {
u32 mmucfg = mfspr(SPRN_MMUCFG);
u32 pid_bits = (mmucfg & MMUCFG_PIDSIZE_MASK)
>> MMUCFG_PIDSIZE_SHIFT;
first_context = 1;
last_context = (1UL << (pid_bits + 1)) - 1;
} else
#endif
{
first_context = 1;
last_context = 255;
}
#ifdef DEBUG_CLAMP_LAST_CONTEXT
last_context = DEBUG_CLAMP_LAST_CONTEXT;
#endif
/*
* Allocate the maps used by context management
*/
context_map = alloc_bootmem(CTX_MAP_SIZE);
context_mm = alloc_bootmem(sizeof(void *) * (last_context + 1));
#ifndef CONFIG_SMP
stale_map[0] = alloc_bootmem(CTX_MAP_SIZE);
#else
stale_map[boot_cpuid] = alloc_bootmem(CTX_MAP_SIZE);
register_cpu_notifier(&mmu_context_cpu_nb);
#endif
printk(KERN_INFO
"MMU: Allocated %zu bytes of context maps for %d contexts\n",
2 * CTX_MAP_SIZE + (sizeof(void *) * (last_context + 1)),
last_context - first_context + 1);
/*
* Some processors have too few contexts to reserve one for
* init_mm, and require using context 0 for a normal task.
* Other processors reserve the use of context zero for the kernel.
* This code assumes first_context < 32.
*/
context_map[0] = (1 << first_context) - 1;
next_context = first_context;
nr_free_contexts = last_context - first_context + 1;
}
| gpl-2.0 |
RenderBroken/msm8974_Victara_render_kernel | fs/afs/super.c | 4625 | 12575 | /* AFS superblock handling
*
* Copyright (c) 2002, 2007 Red Hat, Inc. All rights reserved.
*
* This software may be freely redistributed under the terms of the
* GNU General Public License.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
* Authors: David Howells <dhowells@redhat.com>
* David Woodhouse <dwmw2@infradead.org>
*
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mount.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/fs.h>
#include <linux/pagemap.h>
#include <linux/parser.h>
#include <linux/statfs.h>
#include <linux/sched.h>
#include "internal.h"
#define AFS_FS_MAGIC 0x6B414653 /* 'kAFS' */
static void afs_i_init_once(void *foo);
static struct dentry *afs_mount(struct file_system_type *fs_type,
int flags, const char *dev_name, void *data);
static void afs_kill_super(struct super_block *sb);
static struct inode *afs_alloc_inode(struct super_block *sb);
static void afs_destroy_inode(struct inode *inode);
static int afs_statfs(struct dentry *dentry, struct kstatfs *buf);
struct file_system_type afs_fs_type = {
.owner = THIS_MODULE,
.name = "afs",
.mount = afs_mount,
.kill_sb = afs_kill_super,
.fs_flags = 0,
};
static const struct super_operations afs_super_ops = {
.statfs = afs_statfs,
.alloc_inode = afs_alloc_inode,
.drop_inode = afs_drop_inode,
.destroy_inode = afs_destroy_inode,
.evict_inode = afs_evict_inode,
.show_options = generic_show_options,
};
static struct kmem_cache *afs_inode_cachep;
static atomic_t afs_count_active_inodes;
enum {
afs_no_opt,
afs_opt_cell,
afs_opt_rwpath,
afs_opt_vol,
afs_opt_autocell,
};
static const match_table_t afs_options_list = {
{ afs_opt_cell, "cell=%s" },
{ afs_opt_rwpath, "rwpath" },
{ afs_opt_vol, "vol=%s" },
{ afs_opt_autocell, "autocell" },
{ afs_no_opt, NULL },
};
/*
* initialise the filesystem
*/
int __init afs_fs_init(void)
{
int ret;
_enter("");
/* create ourselves an inode cache */
atomic_set(&afs_count_active_inodes, 0);
ret = -ENOMEM;
afs_inode_cachep = kmem_cache_create("afs_inode_cache",
sizeof(struct afs_vnode),
0,
SLAB_HWCACHE_ALIGN,
afs_i_init_once);
if (!afs_inode_cachep) {
printk(KERN_NOTICE "kAFS: Failed to allocate inode cache\n");
return ret;
}
/* now export our filesystem to lesser mortals */
ret = register_filesystem(&afs_fs_type);
if (ret < 0) {
kmem_cache_destroy(afs_inode_cachep);
_leave(" = %d", ret);
return ret;
}
_leave(" = 0");
return 0;
}
/*
* clean up the filesystem
*/
void __exit afs_fs_exit(void)
{
_enter("");
afs_mntpt_kill_timer();
unregister_filesystem(&afs_fs_type);
if (atomic_read(&afs_count_active_inodes) != 0) {
printk("kAFS: %d active inode objects still present\n",
atomic_read(&afs_count_active_inodes));
BUG();
}
kmem_cache_destroy(afs_inode_cachep);
_leave("");
}
/*
* parse the mount options
* - this function has been shamelessly adapted from the ext3 fs which
* shamelessly adapted it from the msdos fs
*/
static int afs_parse_options(struct afs_mount_params *params,
char *options, const char **devname)
{
struct afs_cell *cell;
substring_t args[MAX_OPT_ARGS];
char *p;
int token;
_enter("%s", options);
options[PAGE_SIZE - 1] = 0;
while ((p = strsep(&options, ","))) {
if (!*p)
continue;
token = match_token(p, afs_options_list, args);
switch (token) {
case afs_opt_cell:
cell = afs_cell_lookup(args[0].from,
args[0].to - args[0].from,
false);
if (IS_ERR(cell))
return PTR_ERR(cell);
afs_put_cell(params->cell);
params->cell = cell;
break;
case afs_opt_rwpath:
params->rwpath = 1;
break;
case afs_opt_vol:
*devname = args[0].from;
break;
case afs_opt_autocell:
params->autocell = 1;
break;
default:
printk(KERN_ERR "kAFS:"
" Unknown or invalid mount option: '%s'\n", p);
return -EINVAL;
}
}
_leave(" = 0");
return 0;
}
/*
* parse a device name to get cell name, volume name, volume type and R/W
* selector
* - this can be one of the following:
* "%[cell:]volume[.]" R/W volume
* "#[cell:]volume[.]" R/O or R/W volume (rwpath=0),
* or R/W (rwpath=1) volume
* "%[cell:]volume.readonly" R/O volume
* "#[cell:]volume.readonly" R/O volume
* "%[cell:]volume.backup" Backup volume
* "#[cell:]volume.backup" Backup volume
*/
static int afs_parse_device_name(struct afs_mount_params *params,
const char *name)
{
struct afs_cell *cell;
const char *cellname, *suffix;
int cellnamesz;
_enter(",%s", name);
if (!name) {
printk(KERN_ERR "kAFS: no volume name specified\n");
return -EINVAL;
}
if ((name[0] != '%' && name[0] != '#') || !name[1]) {
printk(KERN_ERR "kAFS: unparsable volume name\n");
return -EINVAL;
}
/* determine the type of volume we're looking for */
params->type = AFSVL_ROVOL;
params->force = false;
if (params->rwpath || name[0] == '%') {
params->type = AFSVL_RWVOL;
params->force = true;
}
name++;
/* split the cell name out if there is one */
params->volname = strchr(name, ':');
if (params->volname) {
cellname = name;
cellnamesz = params->volname - name;
params->volname++;
} else {
params->volname = name;
cellname = NULL;
cellnamesz = 0;
}
/* the volume type is further affected by a possible suffix */
suffix = strrchr(params->volname, '.');
if (suffix) {
if (strcmp(suffix, ".readonly") == 0) {
params->type = AFSVL_ROVOL;
params->force = true;
} else if (strcmp(suffix, ".backup") == 0) {
params->type = AFSVL_BACKVOL;
params->force = true;
} else if (suffix[1] == 0) {
} else {
suffix = NULL;
}
}
params->volnamesz = suffix ?
suffix - params->volname : strlen(params->volname);
_debug("cell %*.*s [%p]",
cellnamesz, cellnamesz, cellname ?: "", params->cell);
/* lookup the cell record */
if (cellname || !params->cell) {
cell = afs_cell_lookup(cellname, cellnamesz, true);
if (IS_ERR(cell)) {
printk(KERN_ERR "kAFS: unable to lookup cell '%*.*s'\n",
cellnamesz, cellnamesz, cellname ?: "");
return PTR_ERR(cell);
}
afs_put_cell(params->cell);
params->cell = cell;
}
_debug("CELL:%s [%p] VOLUME:%*.*s SUFFIX:%s TYPE:%d%s",
params->cell->name, params->cell,
params->volnamesz, params->volnamesz, params->volname,
suffix ?: "-", params->type, params->force ? " FORCE" : "");
return 0;
}
/*
* check a superblock to see if it's the one we're looking for
*/
static int afs_test_super(struct super_block *sb, void *data)
{
struct afs_super_info *as1 = data;
struct afs_super_info *as = sb->s_fs_info;
return as->volume == as1->volume;
}
static int afs_set_super(struct super_block *sb, void *data)
{
sb->s_fs_info = data;
return set_anon_super(sb, NULL);
}
/*
* fill in the superblock
*/
static int afs_fill_super(struct super_block *sb,
struct afs_mount_params *params)
{
struct afs_super_info *as = sb->s_fs_info;
struct afs_fid fid;
struct inode *inode = NULL;
int ret;
_enter("");
/* fill in the superblock */
sb->s_blocksize = PAGE_CACHE_SIZE;
sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
sb->s_magic = AFS_FS_MAGIC;
sb->s_op = &afs_super_ops;
sb->s_bdi = &as->volume->bdi;
strlcpy(sb->s_id, as->volume->vlocation->vldb.name, sizeof(sb->s_id));
/* allocate the root inode and dentry */
fid.vid = as->volume->vid;
fid.vnode = 1;
fid.unique = 1;
inode = afs_iget(sb, params->key, &fid, NULL, NULL);
if (IS_ERR(inode))
return PTR_ERR(inode);
if (params->autocell)
set_bit(AFS_VNODE_AUTOCELL, &AFS_FS_I(inode)->flags);
ret = -ENOMEM;
sb->s_root = d_make_root(inode);
if (!sb->s_root)
goto error;
sb->s_d_op = &afs_fs_dentry_operations;
_leave(" = 0");
return 0;
error:
_leave(" = %d", ret);
return ret;
}
/*
* get an AFS superblock
*/
static struct dentry *afs_mount(struct file_system_type *fs_type,
int flags, const char *dev_name, void *options)
{
struct afs_mount_params params;
struct super_block *sb;
struct afs_volume *vol;
struct key *key;
char *new_opts = kstrdup(options, GFP_KERNEL);
struct afs_super_info *as;
int ret;
_enter(",,%s,%p", dev_name, options);
memset(¶ms, 0, sizeof(params));
/* parse the options and device name */
if (options) {
ret = afs_parse_options(¶ms, options, &dev_name);
if (ret < 0)
goto error;
}
ret = afs_parse_device_name(¶ms, dev_name);
if (ret < 0)
goto error;
/* try and do the mount securely */
key = afs_request_key(params.cell);
if (IS_ERR(key)) {
_leave(" = %ld [key]", PTR_ERR(key));
ret = PTR_ERR(key);
goto error;
}
params.key = key;
/* parse the device name */
vol = afs_volume_lookup(¶ms);
if (IS_ERR(vol)) {
ret = PTR_ERR(vol);
goto error;
}
/* allocate a superblock info record */
as = kzalloc(sizeof(struct afs_super_info), GFP_KERNEL);
if (!as) {
ret = -ENOMEM;
afs_put_volume(vol);
goto error;
}
as->volume = vol;
/* allocate a deviceless superblock */
sb = sget(fs_type, afs_test_super, afs_set_super, as);
if (IS_ERR(sb)) {
ret = PTR_ERR(sb);
afs_put_volume(vol);
kfree(as);
goto error;
}
if (!sb->s_root) {
/* initial superblock/root creation */
_debug("create");
sb->s_flags = flags;
ret = afs_fill_super(sb, ¶ms);
if (ret < 0) {
deactivate_locked_super(sb);
goto error;
}
save_mount_options(sb, new_opts);
sb->s_flags |= MS_ACTIVE;
} else {
_debug("reuse");
ASSERTCMP(sb->s_flags, &, MS_ACTIVE);
afs_put_volume(vol);
kfree(as);
}
afs_put_cell(params.cell);
kfree(new_opts);
_leave(" = 0 [%p]", sb);
return dget(sb->s_root);
error:
afs_put_cell(params.cell);
key_put(params.key);
kfree(new_opts);
_leave(" = %d", ret);
return ERR_PTR(ret);
}
static void afs_kill_super(struct super_block *sb)
{
struct afs_super_info *as = sb->s_fs_info;
kill_anon_super(sb);
afs_put_volume(as->volume);
kfree(as);
}
/*
* initialise an inode cache slab element prior to any use
*/
static void afs_i_init_once(void *_vnode)
{
struct afs_vnode *vnode = _vnode;
memset(vnode, 0, sizeof(*vnode));
inode_init_once(&vnode->vfs_inode);
init_waitqueue_head(&vnode->update_waitq);
mutex_init(&vnode->permits_lock);
mutex_init(&vnode->validate_lock);
spin_lock_init(&vnode->writeback_lock);
spin_lock_init(&vnode->lock);
INIT_LIST_HEAD(&vnode->writebacks);
INIT_LIST_HEAD(&vnode->pending_locks);
INIT_LIST_HEAD(&vnode->granted_locks);
INIT_DELAYED_WORK(&vnode->lock_work, afs_lock_work);
INIT_WORK(&vnode->cb_broken_work, afs_broken_callback_work);
}
/*
* allocate an AFS inode struct from our slab cache
*/
static struct inode *afs_alloc_inode(struct super_block *sb)
{
struct afs_vnode *vnode;
vnode = kmem_cache_alloc(afs_inode_cachep, GFP_KERNEL);
if (!vnode)
return NULL;
atomic_inc(&afs_count_active_inodes);
memset(&vnode->fid, 0, sizeof(vnode->fid));
memset(&vnode->status, 0, sizeof(vnode->status));
vnode->volume = NULL;
vnode->update_cnt = 0;
vnode->flags = 1 << AFS_VNODE_UNSET;
vnode->cb_promised = false;
_leave(" = %p", &vnode->vfs_inode);
return &vnode->vfs_inode;
}
static void afs_i_callback(struct rcu_head *head)
{
struct inode *inode = container_of(head, struct inode, i_rcu);
struct afs_vnode *vnode = AFS_FS_I(inode);
kmem_cache_free(afs_inode_cachep, vnode);
}
/*
* destroy an AFS inode struct
*/
static void afs_destroy_inode(struct inode *inode)
{
struct afs_vnode *vnode = AFS_FS_I(inode);
_enter("%p{%x:%u}", inode, vnode->fid.vid, vnode->fid.vnode);
_debug("DESTROY INODE %p", inode);
ASSERTCMP(vnode->server, ==, NULL);
call_rcu(&inode->i_rcu, afs_i_callback);
atomic_dec(&afs_count_active_inodes);
}
/*
* return information about an AFS volume
*/
static int afs_statfs(struct dentry *dentry, struct kstatfs *buf)
{
struct afs_volume_status vs;
struct afs_vnode *vnode = AFS_FS_I(dentry->d_inode);
struct key *key;
int ret;
key = afs_request_key(vnode->volume->cell);
if (IS_ERR(key))
return PTR_ERR(key);
ret = afs_vnode_get_volume_status(vnode, key, &vs);
key_put(key);
if (ret < 0) {
_leave(" = %d", ret);
return ret;
}
buf->f_type = dentry->d_sb->s_magic;
buf->f_bsize = AFS_BLOCK_SIZE;
buf->f_namelen = AFSNAMEMAX - 1;
if (vs.max_quota == 0)
buf->f_blocks = vs.part_max_blocks;
else
buf->f_blocks = vs.max_quota;
buf->f_bavail = buf->f_bfree = buf->f_blocks - vs.blocks_in_use;
return 0;
}
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.