repo_name
string
path
string
copies
string
size
string
content
string
license
string
squake/first
arch/sparc/kernel/idprom.c
4669
3704
/* * idprom.c: Routines to load the idprom into kernel addresses and * interpret the data contained within. * * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/init.h> #include <linux/export.h> #include <asm/oplib.h> #include <asm/idprom.h> struct idprom *idprom; EXPORT_SYMBOL(idprom); static struct idprom idprom_buffer; #ifdef CONFIG_SPARC32 #include <asm/machines.h> /* Fun with Sun released architectures. */ /* Here is the master table of Sun machines which use some implementation * of the Sparc CPU and have a meaningful IDPROM machtype value that we * know about. See asm-sparc/machines.h for empirical constants. */ static struct Sun_Machine_Models Sun_Machines[NUM_SUN_MACHINES] = { /* First, Sun4's */ { .name = "Sun 4/100 Series", .id_machtype = (SM_SUN4 | SM_4_110) }, { .name = "Sun 4/200 Series", .id_machtype = (SM_SUN4 | SM_4_260) }, { .name = "Sun 4/300 Series", .id_machtype = (SM_SUN4 | SM_4_330) }, { .name = "Sun 4/400 Series", .id_machtype = (SM_SUN4 | SM_4_470) }, /* Now Leon */ { .name = "Leon3 System-on-a-Chip", .id_machtype = (M_LEON | M_LEON3_SOC) }, /* Now, Sun4c's */ { .name = "Sun4c SparcStation 1", .id_machtype = (SM_SUN4C | SM_4C_SS1) }, { .name = "Sun4c SparcStation IPC", .id_machtype = (SM_SUN4C | SM_4C_IPC) }, { .name = "Sun4c SparcStation 1+", .id_machtype = (SM_SUN4C | SM_4C_SS1PLUS) }, { .name = "Sun4c SparcStation SLC", .id_machtype = (SM_SUN4C | SM_4C_SLC) }, { .name = "Sun4c SparcStation 2", .id_machtype = (SM_SUN4C | SM_4C_SS2) }, { .name = "Sun4c SparcStation ELC", .id_machtype = (SM_SUN4C | SM_4C_ELC) }, { .name = "Sun4c SparcStation IPX", .id_machtype = (SM_SUN4C | SM_4C_IPX) }, /* Finally, early Sun4m's */ { .name = "Sun4m SparcSystem600", .id_machtype = (SM_SUN4M | SM_4M_SS60) }, { .name = "Sun4m SparcStation10/20", .id_machtype = (SM_SUN4M | SM_4M_SS50) }, { .name = "Sun4m SparcStation5", .id_machtype = (SM_SUN4M | SM_4M_SS40) }, /* One entry for the OBP arch's which are sun4d, sun4e, and newer sun4m's */ { .name = "Sun4M OBP based system", .id_machtype = (SM_SUN4M_OBP | 0x0) } }; static void __init display_system_type(unsigned char machtype) { char sysname[128]; register int i; for (i = 0; i < NUM_SUN_MACHINES; i++) { if (Sun_Machines[i].id_machtype == machtype) { if (machtype != (SM_SUN4M_OBP | 0x00) || prom_getproperty(prom_root_node, "banner-name", sysname, sizeof(sysname)) <= 0) printk(KERN_WARNING "TYPE: %s\n", Sun_Machines[i].name); else printk(KERN_WARNING "TYPE: %s\n", sysname); return; } } prom_printf("IDPROM: Warning, bogus id_machtype value, 0x%x\n", machtype); } #else static void __init display_system_type(unsigned char machtype) { } #endif /* Calculate the IDPROM checksum (xor of the data bytes). */ static unsigned char __init calc_idprom_cksum(struct idprom *idprom) { unsigned char cksum, i, *ptr = (unsigned char *)idprom; for (i = cksum = 0; i <= 0x0E; i++) cksum ^= *ptr++; return cksum; } /* Create a local IDPROM copy, verify integrity, and display information. */ void __init idprom_init(void) { prom_get_idprom((char *) &idprom_buffer, sizeof(idprom_buffer)); idprom = &idprom_buffer; if (idprom->id_format != 0x01) prom_printf("IDPROM: Warning, unknown format type!\n"); if (idprom->id_cksum != calc_idprom_cksum(idprom)) prom_printf("IDPROM: Warning, checksum failure (nvram=%x, calc=%x)!\n", idprom->id_cksum, calc_idprom_cksum(idprom)); display_system_type(idprom->id_machtype); printk(KERN_WARNING "Ethernet address: %pM\n", idprom->id_ethaddr); }
gpl-2.0
lostemp/lsk-3.4-android-12.09
drivers/char/mbcs.c
4925
20339
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (c) 2005 Silicon Graphics, Inc. All rights reserved. */ /* * MOATB Core Services driver. */ #include <linux/interrupt.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/types.h> #include <linux/ioport.h> #include <linux/kernel.h> #include <linux/notifier.h> #include <linux/reboot.h> #include <linux/init.h> #include <linux/fs.h> #include <linux/delay.h> #include <linux/device.h> #include <linux/mm.h> #include <linux/uio.h> #include <linux/mutex.h> #include <linux/slab.h> #include <asm/io.h> #include <asm/uaccess.h> #include <asm/pgtable.h> #include <asm/sn/addrs.h> #include <asm/sn/intr.h> #include <asm/sn/tiocx.h> #include "mbcs.h" #define MBCS_DEBUG 0 #if MBCS_DEBUG #define DBG(fmt...) printk(KERN_ALERT fmt) #else #define DBG(fmt...) #endif static DEFINE_MUTEX(mbcs_mutex); static int mbcs_major; static LIST_HEAD(soft_list); /* * file operations */ static const struct file_operations mbcs_ops = { .open = mbcs_open, .llseek = mbcs_sram_llseek, .read = mbcs_sram_read, .write = mbcs_sram_write, .mmap = mbcs_gscr_mmap, }; struct mbcs_callback_arg { int minor; struct cx_dev *cx_dev; }; static inline void mbcs_getdma_init(struct getdma *gdma) { memset(gdma, 0, sizeof(struct getdma)); gdma->DoneIntEnable = 1; } static inline void mbcs_putdma_init(struct putdma *pdma) { memset(pdma, 0, sizeof(struct putdma)); pdma->DoneIntEnable = 1; } static inline void mbcs_algo_init(struct algoblock *algo_soft) { memset(algo_soft, 0, sizeof(struct algoblock)); } static inline void mbcs_getdma_set(void *mmr, uint64_t hostAddr, uint64_t localAddr, uint64_t localRamSel, uint64_t numPkts, uint64_t amoEnable, uint64_t intrEnable, uint64_t peerIO, uint64_t amoHostDest, uint64_t amoModType, uint64_t intrHostDest, uint64_t intrVector) { union dma_control rdma_control; union dma_amo_dest amo_dest; union intr_dest intr_dest; union dma_localaddr local_addr; union dma_hostaddr host_addr; rdma_control.dma_control_reg = 0; amo_dest.dma_amo_dest_reg = 0; intr_dest.intr_dest_reg = 0; local_addr.dma_localaddr_reg = 0; host_addr.dma_hostaddr_reg = 0; host_addr.dma_sys_addr = hostAddr; MBCS_MMR_SET(mmr, MBCS_RD_DMA_SYS_ADDR, host_addr.dma_hostaddr_reg); local_addr.dma_ram_addr = localAddr; local_addr.dma_ram_sel = localRamSel; MBCS_MMR_SET(mmr, MBCS_RD_DMA_LOC_ADDR, local_addr.dma_localaddr_reg); rdma_control.dma_op_length = numPkts; rdma_control.done_amo_en = amoEnable; rdma_control.done_int_en = intrEnable; rdma_control.pio_mem_n = peerIO; MBCS_MMR_SET(mmr, MBCS_RD_DMA_CTRL, rdma_control.dma_control_reg); amo_dest.dma_amo_sys_addr = amoHostDest; amo_dest.dma_amo_mod_type = amoModType; MBCS_MMR_SET(mmr, MBCS_RD_DMA_AMO_DEST, amo_dest.dma_amo_dest_reg); intr_dest.address = intrHostDest; intr_dest.int_vector = intrVector; MBCS_MMR_SET(mmr, MBCS_RD_DMA_INT_DEST, intr_dest.intr_dest_reg); } static inline void mbcs_putdma_set(void *mmr, uint64_t hostAddr, uint64_t localAddr, uint64_t localRamSel, uint64_t numPkts, uint64_t amoEnable, uint64_t intrEnable, uint64_t peerIO, uint64_t amoHostDest, uint64_t amoModType, uint64_t intrHostDest, uint64_t intrVector) { union dma_control wdma_control; union dma_amo_dest amo_dest; union intr_dest intr_dest; union dma_localaddr local_addr; union dma_hostaddr host_addr; wdma_control.dma_control_reg = 0; amo_dest.dma_amo_dest_reg = 0; intr_dest.intr_dest_reg = 0; local_addr.dma_localaddr_reg = 0; host_addr.dma_hostaddr_reg = 0; host_addr.dma_sys_addr = hostAddr; MBCS_MMR_SET(mmr, MBCS_WR_DMA_SYS_ADDR, host_addr.dma_hostaddr_reg); local_addr.dma_ram_addr = localAddr; local_addr.dma_ram_sel = localRamSel; MBCS_MMR_SET(mmr, MBCS_WR_DMA_LOC_ADDR, local_addr.dma_localaddr_reg); wdma_control.dma_op_length = numPkts; wdma_control.done_amo_en = amoEnable; wdma_control.done_int_en = intrEnable; wdma_control.pio_mem_n = peerIO; MBCS_MMR_SET(mmr, MBCS_WR_DMA_CTRL, wdma_control.dma_control_reg); amo_dest.dma_amo_sys_addr = amoHostDest; amo_dest.dma_amo_mod_type = amoModType; MBCS_MMR_SET(mmr, MBCS_WR_DMA_AMO_DEST, amo_dest.dma_amo_dest_reg); intr_dest.address = intrHostDest; intr_dest.int_vector = intrVector; MBCS_MMR_SET(mmr, MBCS_WR_DMA_INT_DEST, intr_dest.intr_dest_reg); } static inline void mbcs_algo_set(void *mmr, uint64_t amoHostDest, uint64_t amoModType, uint64_t intrHostDest, uint64_t intrVector, uint64_t algoStepCount) { union dma_amo_dest amo_dest; union intr_dest intr_dest; union algo_step step; step.algo_step_reg = 0; intr_dest.intr_dest_reg = 0; amo_dest.dma_amo_dest_reg = 0; amo_dest.dma_amo_sys_addr = amoHostDest; amo_dest.dma_amo_mod_type = amoModType; MBCS_MMR_SET(mmr, MBCS_ALG_AMO_DEST, amo_dest.dma_amo_dest_reg); intr_dest.address = intrHostDest; intr_dest.int_vector = intrVector; MBCS_MMR_SET(mmr, MBCS_ALG_INT_DEST, intr_dest.intr_dest_reg); step.alg_step_cnt = algoStepCount; MBCS_MMR_SET(mmr, MBCS_ALG_STEP, step.algo_step_reg); } static inline int mbcs_getdma_start(struct mbcs_soft *soft) { void *mmr_base; struct getdma *gdma; uint64_t numPkts; union cm_control cm_control; mmr_base = soft->mmr_base; gdma = &soft->getdma; /* check that host address got setup */ if (!gdma->hostAddr) return -1; numPkts = (gdma->bytes + (MBCS_CACHELINE_SIZE - 1)) / MBCS_CACHELINE_SIZE; /* program engine */ mbcs_getdma_set(mmr_base, tiocx_dma_addr(gdma->hostAddr), gdma->localAddr, (gdma->localAddr < MB2) ? 0 : (gdma->localAddr < MB4) ? 1 : (gdma->localAddr < MB6) ? 2 : 3, numPkts, gdma->DoneAmoEnable, gdma->DoneIntEnable, gdma->peerIO, gdma->amoHostDest, gdma->amoModType, gdma->intrHostDest, gdma->intrVector); /* start engine */ cm_control.cm_control_reg = MBCS_MMR_GET(mmr_base, MBCS_CM_CONTROL); cm_control.rd_dma_go = 1; MBCS_MMR_SET(mmr_base, MBCS_CM_CONTROL, cm_control.cm_control_reg); return 0; } static inline int mbcs_putdma_start(struct mbcs_soft *soft) { void *mmr_base; struct putdma *pdma; uint64_t numPkts; union cm_control cm_control; mmr_base = soft->mmr_base; pdma = &soft->putdma; /* check that host address got setup */ if (!pdma->hostAddr) return -1; numPkts = (pdma->bytes + (MBCS_CACHELINE_SIZE - 1)) / MBCS_CACHELINE_SIZE; /* program engine */ mbcs_putdma_set(mmr_base, tiocx_dma_addr(pdma->hostAddr), pdma->localAddr, (pdma->localAddr < MB2) ? 0 : (pdma->localAddr < MB4) ? 1 : (pdma->localAddr < MB6) ? 2 : 3, numPkts, pdma->DoneAmoEnable, pdma->DoneIntEnable, pdma->peerIO, pdma->amoHostDest, pdma->amoModType, pdma->intrHostDest, pdma->intrVector); /* start engine */ cm_control.cm_control_reg = MBCS_MMR_GET(mmr_base, MBCS_CM_CONTROL); cm_control.wr_dma_go = 1; MBCS_MMR_SET(mmr_base, MBCS_CM_CONTROL, cm_control.cm_control_reg); return 0; } static inline int mbcs_algo_start(struct mbcs_soft *soft) { struct algoblock *algo_soft = &soft->algo; void *mmr_base = soft->mmr_base; union cm_control cm_control; if (mutex_lock_interruptible(&soft->algolock)) return -ERESTARTSYS; atomic_set(&soft->algo_done, 0); mbcs_algo_set(mmr_base, algo_soft->amoHostDest, algo_soft->amoModType, algo_soft->intrHostDest, algo_soft->intrVector, algo_soft->algoStepCount); /* start algorithm */ cm_control.cm_control_reg = MBCS_MMR_GET(mmr_base, MBCS_CM_CONTROL); cm_control.alg_done_int_en = 1; cm_control.alg_go = 1; MBCS_MMR_SET(mmr_base, MBCS_CM_CONTROL, cm_control.cm_control_reg); mutex_unlock(&soft->algolock); return 0; } static inline ssize_t do_mbcs_sram_dmawrite(struct mbcs_soft *soft, uint64_t hostAddr, size_t len, loff_t * off) { int rv = 0; if (mutex_lock_interruptible(&soft->dmawritelock)) return -ERESTARTSYS; atomic_set(&soft->dmawrite_done, 0); soft->putdma.hostAddr = hostAddr; soft->putdma.localAddr = *off; soft->putdma.bytes = len; if (mbcs_putdma_start(soft) < 0) { DBG(KERN_ALERT "do_mbcs_sram_dmawrite: " "mbcs_putdma_start failed\n"); rv = -EAGAIN; goto dmawrite_exit; } if (wait_event_interruptible(soft->dmawrite_queue, atomic_read(&soft->dmawrite_done))) { rv = -ERESTARTSYS; goto dmawrite_exit; } rv = len; *off += len; dmawrite_exit: mutex_unlock(&soft->dmawritelock); return rv; } static inline ssize_t do_mbcs_sram_dmaread(struct mbcs_soft *soft, uint64_t hostAddr, size_t len, loff_t * off) { int rv = 0; if (mutex_lock_interruptible(&soft->dmareadlock)) return -ERESTARTSYS; atomic_set(&soft->dmawrite_done, 0); soft->getdma.hostAddr = hostAddr; soft->getdma.localAddr = *off; soft->getdma.bytes = len; if (mbcs_getdma_start(soft) < 0) { DBG(KERN_ALERT "mbcs_strategy: mbcs_getdma_start failed\n"); rv = -EAGAIN; goto dmaread_exit; } if (wait_event_interruptible(soft->dmaread_queue, atomic_read(&soft->dmaread_done))) { rv = -ERESTARTSYS; goto dmaread_exit; } rv = len; *off += len; dmaread_exit: mutex_unlock(&soft->dmareadlock); return rv; } static int mbcs_open(struct inode *ip, struct file *fp) { struct mbcs_soft *soft; int minor; mutex_lock(&mbcs_mutex); minor = iminor(ip); /* Nothing protects access to this list... */ list_for_each_entry(soft, &soft_list, list) { if (soft->nasid == minor) { fp->private_data = soft->cxdev; mutex_unlock(&mbcs_mutex); return 0; } } mutex_unlock(&mbcs_mutex); return -ENODEV; } static ssize_t mbcs_sram_read(struct file * fp, char __user *buf, size_t len, loff_t * off) { struct cx_dev *cx_dev = fp->private_data; struct mbcs_soft *soft = cx_dev->soft; uint64_t hostAddr; int rv = 0; hostAddr = __get_dma_pages(GFP_KERNEL, get_order(len)); if (hostAddr == 0) return -ENOMEM; rv = do_mbcs_sram_dmawrite(soft, hostAddr, len, off); if (rv < 0) goto exit; if (copy_to_user(buf, (void *)hostAddr, len)) rv = -EFAULT; exit: free_pages(hostAddr, get_order(len)); return rv; } static ssize_t mbcs_sram_write(struct file * fp, const char __user *buf, size_t len, loff_t * off) { struct cx_dev *cx_dev = fp->private_data; struct mbcs_soft *soft = cx_dev->soft; uint64_t hostAddr; int rv = 0; hostAddr = __get_dma_pages(GFP_KERNEL, get_order(len)); if (hostAddr == 0) return -ENOMEM; if (copy_from_user((void *)hostAddr, buf, len)) { rv = -EFAULT; goto exit; } rv = do_mbcs_sram_dmaread(soft, hostAddr, len, off); exit: free_pages(hostAddr, get_order(len)); return rv; } static loff_t mbcs_sram_llseek(struct file * filp, loff_t off, int whence) { loff_t newpos; switch (whence) { case SEEK_SET: newpos = off; break; case SEEK_CUR: newpos = filp->f_pos + off; break; case SEEK_END: newpos = MBCS_SRAM_SIZE + off; break; default: /* can't happen */ return -EINVAL; } if (newpos < 0) return -EINVAL; filp->f_pos = newpos; return newpos; } static uint64_t mbcs_pioaddr(struct mbcs_soft *soft, uint64_t offset) { uint64_t mmr_base; mmr_base = (uint64_t) (soft->mmr_base + offset); return mmr_base; } static void mbcs_debug_pioaddr_set(struct mbcs_soft *soft) { soft->debug_addr = mbcs_pioaddr(soft, MBCS_DEBUG_START); } static void mbcs_gscr_pioaddr_set(struct mbcs_soft *soft) { soft->gscr_addr = mbcs_pioaddr(soft, MBCS_GSCR_START); } static int mbcs_gscr_mmap(struct file *fp, struct vm_area_struct *vma) { struct cx_dev *cx_dev = fp->private_data; struct mbcs_soft *soft = cx_dev->soft; if (vma->vm_pgoff != 0) return -EINVAL; vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); /* Remap-pfn-range will mark the range VM_IO and VM_RESERVED */ if (remap_pfn_range(vma, vma->vm_start, __pa(soft->gscr_addr) >> PAGE_SHIFT, PAGE_SIZE, vma->vm_page_prot)) return -EAGAIN; return 0; } /** * mbcs_completion_intr_handler - Primary completion handler. * @irq: irq * @arg: soft struct for device * */ static irqreturn_t mbcs_completion_intr_handler(int irq, void *arg) { struct mbcs_soft *soft = (struct mbcs_soft *)arg; void *mmr_base; union cm_status cm_status; union cm_control cm_control; mmr_base = soft->mmr_base; cm_status.cm_status_reg = MBCS_MMR_GET(mmr_base, MBCS_CM_STATUS); if (cm_status.rd_dma_done) { /* stop dma-read engine, clear status */ cm_control.cm_control_reg = MBCS_MMR_GET(mmr_base, MBCS_CM_CONTROL); cm_control.rd_dma_clr = 1; MBCS_MMR_SET(mmr_base, MBCS_CM_CONTROL, cm_control.cm_control_reg); atomic_set(&soft->dmaread_done, 1); wake_up(&soft->dmaread_queue); } if (cm_status.wr_dma_done) { /* stop dma-write engine, clear status */ cm_control.cm_control_reg = MBCS_MMR_GET(mmr_base, MBCS_CM_CONTROL); cm_control.wr_dma_clr = 1; MBCS_MMR_SET(mmr_base, MBCS_CM_CONTROL, cm_control.cm_control_reg); atomic_set(&soft->dmawrite_done, 1); wake_up(&soft->dmawrite_queue); } if (cm_status.alg_done) { /* clear status */ cm_control.cm_control_reg = MBCS_MMR_GET(mmr_base, MBCS_CM_CONTROL); cm_control.alg_done_clr = 1; MBCS_MMR_SET(mmr_base, MBCS_CM_CONTROL, cm_control.cm_control_reg); atomic_set(&soft->algo_done, 1); wake_up(&soft->algo_queue); } return IRQ_HANDLED; } /** * mbcs_intr_alloc - Allocate interrupts. * @dev: device pointer * */ static int mbcs_intr_alloc(struct cx_dev *dev) { struct sn_irq_info *sn_irq; struct mbcs_soft *soft; struct getdma *getdma; struct putdma *putdma; struct algoblock *algo; soft = dev->soft; getdma = &soft->getdma; putdma = &soft->putdma; algo = &soft->algo; soft->get_sn_irq = NULL; soft->put_sn_irq = NULL; soft->algo_sn_irq = NULL; sn_irq = tiocx_irq_alloc(dev->cx_id.nasid, TIOCX_CORELET, -1, -1, -1); if (sn_irq == NULL) return -EAGAIN; soft->get_sn_irq = sn_irq; getdma->intrHostDest = sn_irq->irq_xtalkaddr; getdma->intrVector = sn_irq->irq_irq; if (request_irq(sn_irq->irq_irq, (void *)mbcs_completion_intr_handler, IRQF_SHARED, "MBCS get intr", (void *)soft)) { tiocx_irq_free(soft->get_sn_irq); return -EAGAIN; } sn_irq = tiocx_irq_alloc(dev->cx_id.nasid, TIOCX_CORELET, -1, -1, -1); if (sn_irq == NULL) { free_irq(soft->get_sn_irq->irq_irq, soft); tiocx_irq_free(soft->get_sn_irq); return -EAGAIN; } soft->put_sn_irq = sn_irq; putdma->intrHostDest = sn_irq->irq_xtalkaddr; putdma->intrVector = sn_irq->irq_irq; if (request_irq(sn_irq->irq_irq, (void *)mbcs_completion_intr_handler, IRQF_SHARED, "MBCS put intr", (void *)soft)) { tiocx_irq_free(soft->put_sn_irq); free_irq(soft->get_sn_irq->irq_irq, soft); tiocx_irq_free(soft->get_sn_irq); return -EAGAIN; } sn_irq = tiocx_irq_alloc(dev->cx_id.nasid, TIOCX_CORELET, -1, -1, -1); if (sn_irq == NULL) { free_irq(soft->put_sn_irq->irq_irq, soft); tiocx_irq_free(soft->put_sn_irq); free_irq(soft->get_sn_irq->irq_irq, soft); tiocx_irq_free(soft->get_sn_irq); return -EAGAIN; } soft->algo_sn_irq = sn_irq; algo->intrHostDest = sn_irq->irq_xtalkaddr; algo->intrVector = sn_irq->irq_irq; if (request_irq(sn_irq->irq_irq, (void *)mbcs_completion_intr_handler, IRQF_SHARED, "MBCS algo intr", (void *)soft)) { tiocx_irq_free(soft->algo_sn_irq); free_irq(soft->put_sn_irq->irq_irq, soft); tiocx_irq_free(soft->put_sn_irq); free_irq(soft->get_sn_irq->irq_irq, soft); tiocx_irq_free(soft->get_sn_irq); return -EAGAIN; } return 0; } /** * mbcs_intr_dealloc - Remove interrupts. * @dev: device pointer * */ static void mbcs_intr_dealloc(struct cx_dev *dev) { struct mbcs_soft *soft; soft = dev->soft; free_irq(soft->get_sn_irq->irq_irq, soft); tiocx_irq_free(soft->get_sn_irq); free_irq(soft->put_sn_irq->irq_irq, soft); tiocx_irq_free(soft->put_sn_irq); free_irq(soft->algo_sn_irq->irq_irq, soft); tiocx_irq_free(soft->algo_sn_irq); } static inline int mbcs_hw_init(struct mbcs_soft *soft) { void *mmr_base = soft->mmr_base; union cm_control cm_control; union cm_req_timeout cm_req_timeout; uint64_t err_stat; cm_req_timeout.cm_req_timeout_reg = MBCS_MMR_GET(mmr_base, MBCS_CM_REQ_TOUT); cm_req_timeout.time_out = MBCS_CM_CONTROL_REQ_TOUT_MASK; MBCS_MMR_SET(mmr_base, MBCS_CM_REQ_TOUT, cm_req_timeout.cm_req_timeout_reg); mbcs_gscr_pioaddr_set(soft); mbcs_debug_pioaddr_set(soft); /* clear errors */ err_stat = MBCS_MMR_GET(mmr_base, MBCS_CM_ERR_STAT); MBCS_MMR_SET(mmr_base, MBCS_CM_CLR_ERR_STAT, err_stat); MBCS_MMR_ZERO(mmr_base, MBCS_CM_ERROR_DETAIL1); /* enable interrupts */ /* turn off 2^23 (INT_EN_PIO_REQ_ADDR_INV) */ MBCS_MMR_SET(mmr_base, MBCS_CM_ERR_INT_EN, 0x3ffffff7e00ffUL); /* arm status regs and clear engines */ cm_control.cm_control_reg = MBCS_MMR_GET(mmr_base, MBCS_CM_CONTROL); cm_control.rearm_stat_regs = 1; cm_control.alg_clr = 1; cm_control.wr_dma_clr = 1; cm_control.rd_dma_clr = 1; MBCS_MMR_SET(mmr_base, MBCS_CM_CONTROL, cm_control.cm_control_reg); return 0; } static ssize_t show_algo(struct device *dev, struct device_attribute *attr, char *buf) { struct cx_dev *cx_dev = to_cx_dev(dev); struct mbcs_soft *soft = cx_dev->soft; uint64_t debug0; /* * By convention, the first debug register contains the * algorithm number and revision. */ debug0 = *(uint64_t *) soft->debug_addr; return sprintf(buf, "0x%x 0x%x\n", upper_32_bits(debug0), lower_32_bits(debug0)); } static ssize_t store_algo(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int n; struct cx_dev *cx_dev = to_cx_dev(dev); struct mbcs_soft *soft = cx_dev->soft; if (count <= 0) return 0; n = simple_strtoul(buf, NULL, 0); if (n == 1) { mbcs_algo_start(soft); if (wait_event_interruptible(soft->algo_queue, atomic_read(&soft->algo_done))) return -ERESTARTSYS; } return count; } DEVICE_ATTR(algo, 0644, show_algo, store_algo); /** * mbcs_probe - Initialize for device * @dev: device pointer * @device_id: id table pointer * */ static int mbcs_probe(struct cx_dev *dev, const struct cx_device_id *id) { struct mbcs_soft *soft; dev->soft = NULL; soft = kzalloc(sizeof(struct mbcs_soft), GFP_KERNEL); if (soft == NULL) return -ENOMEM; soft->nasid = dev->cx_id.nasid; list_add(&soft->list, &soft_list); soft->mmr_base = (void *)tiocx_swin_base(dev->cx_id.nasid); dev->soft = soft; soft->cxdev = dev; init_waitqueue_head(&soft->dmawrite_queue); init_waitqueue_head(&soft->dmaread_queue); init_waitqueue_head(&soft->algo_queue); mutex_init(&soft->dmawritelock); mutex_init(&soft->dmareadlock); mutex_init(&soft->algolock); mbcs_getdma_init(&soft->getdma); mbcs_putdma_init(&soft->putdma); mbcs_algo_init(&soft->algo); mbcs_hw_init(soft); /* Allocate interrupts */ mbcs_intr_alloc(dev); device_create_file(&dev->dev, &dev_attr_algo); return 0; } static int mbcs_remove(struct cx_dev *dev) { if (dev->soft) { mbcs_intr_dealloc(dev); kfree(dev->soft); } device_remove_file(&dev->dev, &dev_attr_algo); return 0; } static const struct cx_device_id __devinitdata mbcs_id_table[] = { { .part_num = MBCS_PART_NUM, .mfg_num = MBCS_MFG_NUM, }, { .part_num = MBCS_PART_NUM_ALG0, .mfg_num = MBCS_MFG_NUM, }, {0, 0} }; MODULE_DEVICE_TABLE(cx, mbcs_id_table); static struct cx_drv mbcs_driver = { .name = DEVICE_NAME, .id_table = mbcs_id_table, .probe = mbcs_probe, .remove = mbcs_remove, }; static void __exit mbcs_exit(void) { unregister_chrdev(mbcs_major, DEVICE_NAME); cx_driver_unregister(&mbcs_driver); } static int __init mbcs_init(void) { int rv; if (!ia64_platform_is("sn2")) return -ENODEV; // Put driver into chrdevs[]. Get major number. rv = register_chrdev(mbcs_major, DEVICE_NAME, &mbcs_ops); if (rv < 0) { DBG(KERN_ALERT "mbcs_init: can't get major number. %d\n", rv); return rv; } mbcs_major = rv; return cx_driver_register(&mbcs_driver); } module_init(mbcs_init); module_exit(mbcs_exit); MODULE_AUTHOR("Bruce Losure <blosure@sgi.com>"); MODULE_DESCRIPTION("Driver for MOATB Core Services"); MODULE_LICENSE("GPL");
gpl-2.0
figue/raspberry-pi-kernel
drivers/rtc/rtc-pxa.c
4925
11478
/* * Real Time Clock interface for XScale PXA27x and PXA3xx * * Copyright (C) 2008 Robert Jarzmik * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/init.h> #include <linux/platform_device.h> #include <linux/module.h> #include <linux/rtc.h> #include <linux/seq_file.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/slab.h> #include <mach/hardware.h> #define TIMER_FREQ CLOCK_TICK_RATE #define RTC_DEF_DIVIDER (32768 - 1) #define RTC_DEF_TRIM 0 #define MAXFREQ_PERIODIC 1000 /* * PXA Registers and bits definitions */ #define RTSR_PICE (1 << 15) /* Periodic interrupt count enable */ #define RTSR_PIALE (1 << 14) /* Periodic interrupt Alarm enable */ #define RTSR_PIAL (1 << 13) /* Periodic interrupt detected */ #define RTSR_SWALE2 (1 << 11) /* RTC stopwatch alarm2 enable */ #define RTSR_SWAL2 (1 << 10) /* RTC stopwatch alarm2 detected */ #define RTSR_SWALE1 (1 << 9) /* RTC stopwatch alarm1 enable */ #define RTSR_SWAL1 (1 << 8) /* RTC stopwatch alarm1 detected */ #define RTSR_RDALE2 (1 << 7) /* RTC alarm2 enable */ #define RTSR_RDAL2 (1 << 6) /* RTC alarm2 detected */ #define RTSR_RDALE1 (1 << 5) /* RTC alarm1 enable */ #define RTSR_RDAL1 (1 << 4) /* RTC alarm1 detected */ #define RTSR_HZE (1 << 3) /* HZ interrupt enable */ #define RTSR_ALE (1 << 2) /* RTC alarm interrupt enable */ #define RTSR_HZ (1 << 1) /* HZ rising-edge detected */ #define RTSR_AL (1 << 0) /* RTC alarm detected */ #define RTSR_TRIG_MASK (RTSR_AL | RTSR_HZ | RTSR_RDAL1 | RTSR_RDAL2\ | RTSR_SWAL1 | RTSR_SWAL2) #define RYxR_YEAR_S 9 #define RYxR_YEAR_MASK (0xfff << RYxR_YEAR_S) #define RYxR_MONTH_S 5 #define RYxR_MONTH_MASK (0xf << RYxR_MONTH_S) #define RYxR_DAY_MASK 0x1f #define RDxR_HOUR_S 12 #define RDxR_HOUR_MASK (0x1f << RDxR_HOUR_S) #define RDxR_MIN_S 6 #define RDxR_MIN_MASK (0x3f << RDxR_MIN_S) #define RDxR_SEC_MASK 0x3f #define RTSR 0x08 #define RTTR 0x0c #define RDCR 0x10 #define RYCR 0x14 #define RDAR1 0x18 #define RYAR1 0x1c #define RTCPICR 0x34 #define PIAR 0x38 #define rtc_readl(pxa_rtc, reg) \ __raw_readl((pxa_rtc)->base + (reg)) #define rtc_writel(pxa_rtc, reg, value) \ __raw_writel((value), (pxa_rtc)->base + (reg)) struct pxa_rtc { struct resource *ress; void __iomem *base; int irq_1Hz; int irq_Alrm; struct rtc_device *rtc; spinlock_t lock; /* Protects this structure */ }; static u32 ryxr_calc(struct rtc_time *tm) { return ((tm->tm_year + 1900) << RYxR_YEAR_S) | ((tm->tm_mon + 1) << RYxR_MONTH_S) | tm->tm_mday; } static u32 rdxr_calc(struct rtc_time *tm) { return (tm->tm_hour << RDxR_HOUR_S) | (tm->tm_min << RDxR_MIN_S) | tm->tm_sec; } static void tm_calc(u32 rycr, u32 rdcr, struct rtc_time *tm) { tm->tm_year = ((rycr & RYxR_YEAR_MASK) >> RYxR_YEAR_S) - 1900; tm->tm_mon = (((rycr & RYxR_MONTH_MASK) >> RYxR_MONTH_S)) - 1; tm->tm_mday = (rycr & RYxR_DAY_MASK); tm->tm_hour = (rdcr & RDxR_HOUR_MASK) >> RDxR_HOUR_S; tm->tm_min = (rdcr & RDxR_MIN_MASK) >> RDxR_MIN_S; tm->tm_sec = rdcr & RDxR_SEC_MASK; } static void rtsr_clear_bits(struct pxa_rtc *pxa_rtc, u32 mask) { u32 rtsr; rtsr = rtc_readl(pxa_rtc, RTSR); rtsr &= ~RTSR_TRIG_MASK; rtsr &= ~mask; rtc_writel(pxa_rtc, RTSR, rtsr); } static void rtsr_set_bits(struct pxa_rtc *pxa_rtc, u32 mask) { u32 rtsr; rtsr = rtc_readl(pxa_rtc, RTSR); rtsr &= ~RTSR_TRIG_MASK; rtsr |= mask; rtc_writel(pxa_rtc, RTSR, rtsr); } static irqreturn_t pxa_rtc_irq(int irq, void *dev_id) { struct platform_device *pdev = to_platform_device(dev_id); struct pxa_rtc *pxa_rtc = platform_get_drvdata(pdev); u32 rtsr; unsigned long events = 0; spin_lock(&pxa_rtc->lock); /* clear interrupt sources */ rtsr = rtc_readl(pxa_rtc, RTSR); rtc_writel(pxa_rtc, RTSR, rtsr); /* temporary disable rtc interrupts */ rtsr_clear_bits(pxa_rtc, RTSR_RDALE1 | RTSR_PIALE | RTSR_HZE); /* clear alarm interrupt if it has occurred */ if (rtsr & RTSR_RDAL1) rtsr &= ~RTSR_RDALE1; /* update irq data & counter */ if (rtsr & RTSR_RDAL1) events |= RTC_AF | RTC_IRQF; if (rtsr & RTSR_HZ) events |= RTC_UF | RTC_IRQF; if (rtsr & RTSR_PIAL) events |= RTC_PF | RTC_IRQF; rtc_update_irq(pxa_rtc->rtc, 1, events); /* enable back rtc interrupts */ rtc_writel(pxa_rtc, RTSR, rtsr & ~RTSR_TRIG_MASK); spin_unlock(&pxa_rtc->lock); return IRQ_HANDLED; } static int pxa_rtc_open(struct device *dev) { struct pxa_rtc *pxa_rtc = dev_get_drvdata(dev); int ret; ret = request_irq(pxa_rtc->irq_1Hz, pxa_rtc_irq, 0, "rtc 1Hz", dev); if (ret < 0) { dev_err(dev, "can't get irq %i, err %d\n", pxa_rtc->irq_1Hz, ret); goto err_irq_1Hz; } ret = request_irq(pxa_rtc->irq_Alrm, pxa_rtc_irq, 0, "rtc Alrm", dev); if (ret < 0) { dev_err(dev, "can't get irq %i, err %d\n", pxa_rtc->irq_Alrm, ret); goto err_irq_Alrm; } return 0; err_irq_Alrm: free_irq(pxa_rtc->irq_1Hz, dev); err_irq_1Hz: return ret; } static void pxa_rtc_release(struct device *dev) { struct pxa_rtc *pxa_rtc = dev_get_drvdata(dev); spin_lock_irq(&pxa_rtc->lock); rtsr_clear_bits(pxa_rtc, RTSR_PIALE | RTSR_RDALE1 | RTSR_HZE); spin_unlock_irq(&pxa_rtc->lock); free_irq(pxa_rtc->irq_Alrm, dev); free_irq(pxa_rtc->irq_1Hz, dev); } static int pxa_alarm_irq_enable(struct device *dev, unsigned int enabled) { struct pxa_rtc *pxa_rtc = dev_get_drvdata(dev); spin_lock_irq(&pxa_rtc->lock); if (enabled) rtsr_set_bits(pxa_rtc, RTSR_RDALE1); else rtsr_clear_bits(pxa_rtc, RTSR_RDALE1); spin_unlock_irq(&pxa_rtc->lock); return 0; } static int pxa_rtc_read_time(struct device *dev, struct rtc_time *tm) { struct pxa_rtc *pxa_rtc = dev_get_drvdata(dev); u32 rycr, rdcr; rycr = rtc_readl(pxa_rtc, RYCR); rdcr = rtc_readl(pxa_rtc, RDCR); tm_calc(rycr, rdcr, tm); return 0; } static int pxa_rtc_set_time(struct device *dev, struct rtc_time *tm) { struct pxa_rtc *pxa_rtc = dev_get_drvdata(dev); rtc_writel(pxa_rtc, RYCR, ryxr_calc(tm)); rtc_writel(pxa_rtc, RDCR, rdxr_calc(tm)); return 0; } static int pxa_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm) { struct pxa_rtc *pxa_rtc = dev_get_drvdata(dev); u32 rtsr, ryar, rdar; ryar = rtc_readl(pxa_rtc, RYAR1); rdar = rtc_readl(pxa_rtc, RDAR1); tm_calc(ryar, rdar, &alrm->time); rtsr = rtc_readl(pxa_rtc, RTSR); alrm->enabled = (rtsr & RTSR_RDALE1) ? 1 : 0; alrm->pending = (rtsr & RTSR_RDAL1) ? 1 : 0; return 0; } static int pxa_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm) { struct pxa_rtc *pxa_rtc = dev_get_drvdata(dev); u32 rtsr; spin_lock_irq(&pxa_rtc->lock); rtc_writel(pxa_rtc, RYAR1, ryxr_calc(&alrm->time)); rtc_writel(pxa_rtc, RDAR1, rdxr_calc(&alrm->time)); rtsr = rtc_readl(pxa_rtc, RTSR); if (alrm->enabled) rtsr |= RTSR_RDALE1; else rtsr &= ~RTSR_RDALE1; rtc_writel(pxa_rtc, RTSR, rtsr); spin_unlock_irq(&pxa_rtc->lock); return 0; } static int pxa_rtc_proc(struct device *dev, struct seq_file *seq) { struct pxa_rtc *pxa_rtc = dev_get_drvdata(dev); seq_printf(seq, "trim/divider\t: 0x%08x\n", rtc_readl(pxa_rtc, RTTR)); seq_printf(seq, "update_IRQ\t: %s\n", (rtc_readl(pxa_rtc, RTSR) & RTSR_HZE) ? "yes" : "no"); seq_printf(seq, "periodic_IRQ\t: %s\n", (rtc_readl(pxa_rtc, RTSR) & RTSR_PIALE) ? "yes" : "no"); seq_printf(seq, "periodic_freq\t: %u\n", rtc_readl(pxa_rtc, PIAR)); return 0; } static const struct rtc_class_ops pxa_rtc_ops = { .open = pxa_rtc_open, .release = pxa_rtc_release, .read_time = pxa_rtc_read_time, .set_time = pxa_rtc_set_time, .read_alarm = pxa_rtc_read_alarm, .set_alarm = pxa_rtc_set_alarm, .alarm_irq_enable = pxa_alarm_irq_enable, .proc = pxa_rtc_proc, }; static int __init pxa_rtc_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct pxa_rtc *pxa_rtc; int ret; u32 rttr; pxa_rtc = kzalloc(sizeof(struct pxa_rtc), GFP_KERNEL); if (!pxa_rtc) return -ENOMEM; spin_lock_init(&pxa_rtc->lock); platform_set_drvdata(pdev, pxa_rtc); ret = -ENXIO; pxa_rtc->ress = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!pxa_rtc->ress) { dev_err(dev, "No I/O memory resource defined\n"); goto err_ress; } pxa_rtc->irq_1Hz = platform_get_irq(pdev, 0); if (pxa_rtc->irq_1Hz < 0) { dev_err(dev, "No 1Hz IRQ resource defined\n"); goto err_ress; } pxa_rtc->irq_Alrm = platform_get_irq(pdev, 1); if (pxa_rtc->irq_Alrm < 0) { dev_err(dev, "No alarm IRQ resource defined\n"); goto err_ress; } ret = -ENOMEM; pxa_rtc->base = ioremap(pxa_rtc->ress->start, resource_size(pxa_rtc->ress)); if (!pxa_rtc->base) { dev_err(&pdev->dev, "Unable to map pxa RTC I/O memory\n"); goto err_map; } /* * If the clock divider is uninitialized then reset it to the * default value to get the 1Hz clock. */ if (rtc_readl(pxa_rtc, RTTR) == 0) { rttr = RTC_DEF_DIVIDER + (RTC_DEF_TRIM << 16); rtc_writel(pxa_rtc, RTTR, rttr); dev_warn(dev, "warning: initializing default clock" " divider/trim value\n"); } rtsr_clear_bits(pxa_rtc, RTSR_PIALE | RTSR_RDALE1 | RTSR_HZE); pxa_rtc->rtc = rtc_device_register("pxa-rtc", &pdev->dev, &pxa_rtc_ops, THIS_MODULE); ret = PTR_ERR(pxa_rtc->rtc); if (IS_ERR(pxa_rtc->rtc)) { dev_err(dev, "Failed to register RTC device -> %d\n", ret); goto err_rtc_reg; } device_init_wakeup(dev, 1); return 0; err_rtc_reg: iounmap(pxa_rtc->base); err_ress: err_map: kfree(pxa_rtc); return ret; } static int __exit pxa_rtc_remove(struct platform_device *pdev) { struct pxa_rtc *pxa_rtc = platform_get_drvdata(pdev); rtc_device_unregister(pxa_rtc->rtc); spin_lock_irq(&pxa_rtc->lock); iounmap(pxa_rtc->base); spin_unlock_irq(&pxa_rtc->lock); kfree(pxa_rtc); return 0; } #ifdef CONFIG_PM static int pxa_rtc_suspend(struct device *dev) { struct pxa_rtc *pxa_rtc = dev_get_drvdata(dev); if (device_may_wakeup(dev)) enable_irq_wake(pxa_rtc->irq_Alrm); return 0; } static int pxa_rtc_resume(struct device *dev) { struct pxa_rtc *pxa_rtc = dev_get_drvdata(dev); if (device_may_wakeup(dev)) disable_irq_wake(pxa_rtc->irq_Alrm); return 0; } static const struct dev_pm_ops pxa_rtc_pm_ops = { .suspend = pxa_rtc_suspend, .resume = pxa_rtc_resume, }; #endif static struct platform_driver pxa_rtc_driver = { .remove = __exit_p(pxa_rtc_remove), .driver = { .name = "pxa-rtc", #ifdef CONFIG_PM .pm = &pxa_rtc_pm_ops, #endif }, }; static int __init pxa_rtc_init(void) { if (cpu_is_pxa27x() || cpu_is_pxa3xx()) return platform_driver_probe(&pxa_rtc_driver, pxa_rtc_probe); return -ENODEV; } static void __exit pxa_rtc_exit(void) { platform_driver_unregister(&pxa_rtc_driver); } module_init(pxa_rtc_init); module_exit(pxa_rtc_exit); MODULE_AUTHOR("Robert Jarzmik <robert.jarzmik@free.fr>"); MODULE_DESCRIPTION("PXA27x/PXA3xx Realtime Clock Driver (RTC)"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:pxa-rtc");
gpl-2.0
nychitman1/android_kernel_htc_flounder
arch/arm/mach-ixp4xx/ixdpg425-pci.c
4925
1318
/* * arch/arm/mach-ixp4xx/ixdpg425-pci.c * * PCI setup routines for Intel IXDPG425 Platform * * Copyright (C) 2004 MontaVista Softwrae, Inc. * * Maintainer: Deepak Saxena <dsaxena@plexity.net> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/kernel.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/irq.h> #include <asm/mach-types.h> #include <mach/hardware.h> #include <asm/mach/pci.h> void __init ixdpg425_pci_preinit(void) { irq_set_irq_type(IRQ_IXP4XX_GPIO6, IRQ_TYPE_LEVEL_LOW); irq_set_irq_type(IRQ_IXP4XX_GPIO7, IRQ_TYPE_LEVEL_LOW); ixp4xx_pci_preinit(); } static int __init ixdpg425_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { if (slot == 12 || slot == 13) return IRQ_IXP4XX_GPIO7; else if (slot == 14) return IRQ_IXP4XX_GPIO6; else return -1; } struct hw_pci ixdpg425_pci __initdata = { .nr_controllers = 1, .ops = &ixp4xx_ops, .preinit = ixdpg425_pci_preinit, .setup = ixp4xx_setup, .map_irq = ixdpg425_map_irq, }; int __init ixdpg425_pci_init(void) { if (machine_is_ixdpg425()) pci_common_init(&ixdpg425_pci); return 0; } subsys_initcall(ixdpg425_pci_init);
gpl-2.0
djvoleur/V_925P_BOF7
drivers/media/dvb-core/dvb_ringbuffer.c
7229
7225
/* * * dvb_ringbuffer.c: ring buffer implementation for the dvb driver * * Copyright (C) 2003 Oliver Endriss * Copyright (C) 2004 Andrew de Quincey * * based on code originally found in av7110.c & dvb_ci.c: * Copyright (C) 1999-2003 Ralph Metzler * & Marcus Metzler for convergence integrated media GmbH * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public License * as published by the Free Software Foundation; either version 2.1 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #include <linux/errno.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/sched.h> #include <linux/string.h> #include <asm/uaccess.h> #include "dvb_ringbuffer.h" #define PKT_READY 0 #define PKT_DISPOSED 1 void dvb_ringbuffer_init(struct dvb_ringbuffer *rbuf, void *data, size_t len) { rbuf->pread=rbuf->pwrite=0; rbuf->data=data; rbuf->size=len; rbuf->error=0; init_waitqueue_head(&rbuf->queue); spin_lock_init(&(rbuf->lock)); } int dvb_ringbuffer_empty(struct dvb_ringbuffer *rbuf) { return (rbuf->pread==rbuf->pwrite); } ssize_t dvb_ringbuffer_free(struct dvb_ringbuffer *rbuf) { ssize_t free; free = rbuf->pread - rbuf->pwrite; if (free <= 0) free += rbuf->size; return free-1; } ssize_t dvb_ringbuffer_avail(struct dvb_ringbuffer *rbuf) { ssize_t avail; avail = rbuf->pwrite - rbuf->pread; if (avail < 0) avail += rbuf->size; return avail; } void dvb_ringbuffer_flush(struct dvb_ringbuffer *rbuf) { rbuf->pread = rbuf->pwrite; rbuf->error = 0; } EXPORT_SYMBOL(dvb_ringbuffer_flush); void dvb_ringbuffer_reset(struct dvb_ringbuffer *rbuf) { rbuf->pread = rbuf->pwrite = 0; rbuf->error = 0; } void dvb_ringbuffer_flush_spinlock_wakeup(struct dvb_ringbuffer *rbuf) { unsigned long flags; spin_lock_irqsave(&rbuf->lock, flags); dvb_ringbuffer_flush(rbuf); spin_unlock_irqrestore(&rbuf->lock, flags); wake_up(&rbuf->queue); } ssize_t dvb_ringbuffer_read_user(struct dvb_ringbuffer *rbuf, u8 __user *buf, size_t len) { size_t todo = len; size_t split; split = (rbuf->pread + len > rbuf->size) ? rbuf->size - rbuf->pread : 0; if (split > 0) { if (copy_to_user(buf, rbuf->data+rbuf->pread, split)) return -EFAULT; buf += split; todo -= split; rbuf->pread = 0; } if (copy_to_user(buf, rbuf->data+rbuf->pread, todo)) return -EFAULT; rbuf->pread = (rbuf->pread + todo) % rbuf->size; return len; } void dvb_ringbuffer_read(struct dvb_ringbuffer *rbuf, u8 *buf, size_t len) { size_t todo = len; size_t split; split = (rbuf->pread + len > rbuf->size) ? rbuf->size - rbuf->pread : 0; if (split > 0) { memcpy(buf, rbuf->data+rbuf->pread, split); buf += split; todo -= split; rbuf->pread = 0; } memcpy(buf, rbuf->data+rbuf->pread, todo); rbuf->pread = (rbuf->pread + todo) % rbuf->size; } ssize_t dvb_ringbuffer_write(struct dvb_ringbuffer *rbuf, const u8 *buf, size_t len) { size_t todo = len; size_t split; split = (rbuf->pwrite + len > rbuf->size) ? rbuf->size - rbuf->pwrite : 0; if (split > 0) { memcpy(rbuf->data+rbuf->pwrite, buf, split); buf += split; todo -= split; rbuf->pwrite = 0; } memcpy(rbuf->data+rbuf->pwrite, buf, todo); rbuf->pwrite = (rbuf->pwrite + todo) % rbuf->size; return len; } ssize_t dvb_ringbuffer_pkt_write(struct dvb_ringbuffer *rbuf, u8* buf, size_t len) { int status; ssize_t oldpwrite = rbuf->pwrite; DVB_RINGBUFFER_WRITE_BYTE(rbuf, len >> 8); DVB_RINGBUFFER_WRITE_BYTE(rbuf, len & 0xff); DVB_RINGBUFFER_WRITE_BYTE(rbuf, PKT_READY); status = dvb_ringbuffer_write(rbuf, buf, len); if (status < 0) rbuf->pwrite = oldpwrite; return status; } ssize_t dvb_ringbuffer_pkt_read_user(struct dvb_ringbuffer *rbuf, size_t idx, int offset, u8 __user *buf, size_t len) { size_t todo; size_t split; size_t pktlen; pktlen = rbuf->data[idx] << 8; pktlen |= rbuf->data[(idx + 1) % rbuf->size]; if (offset > pktlen) return -EINVAL; if ((offset + len) > pktlen) len = pktlen - offset; idx = (idx + DVB_RINGBUFFER_PKTHDRSIZE + offset) % rbuf->size; todo = len; split = ((idx + len) > rbuf->size) ? rbuf->size - idx : 0; if (split > 0) { if (copy_to_user(buf, rbuf->data+idx, split)) return -EFAULT; buf += split; todo -= split; idx = 0; } if (copy_to_user(buf, rbuf->data+idx, todo)) return -EFAULT; return len; } ssize_t dvb_ringbuffer_pkt_read(struct dvb_ringbuffer *rbuf, size_t idx, int offset, u8* buf, size_t len) { size_t todo; size_t split; size_t pktlen; pktlen = rbuf->data[idx] << 8; pktlen |= rbuf->data[(idx + 1) % rbuf->size]; if (offset > pktlen) return -EINVAL; if ((offset + len) > pktlen) len = pktlen - offset; idx = (idx + DVB_RINGBUFFER_PKTHDRSIZE + offset) % rbuf->size; todo = len; split = ((idx + len) > rbuf->size) ? rbuf->size - idx : 0; if (split > 0) { memcpy(buf, rbuf->data+idx, split); buf += split; todo -= split; idx = 0; } memcpy(buf, rbuf->data+idx, todo); return len; } void dvb_ringbuffer_pkt_dispose(struct dvb_ringbuffer *rbuf, size_t idx) { size_t pktlen; rbuf->data[(idx + 2) % rbuf->size] = PKT_DISPOSED; // clean up disposed packets while(dvb_ringbuffer_avail(rbuf) > DVB_RINGBUFFER_PKTHDRSIZE) { if (DVB_RINGBUFFER_PEEK(rbuf, 2) == PKT_DISPOSED) { pktlen = DVB_RINGBUFFER_PEEK(rbuf, 0) << 8; pktlen |= DVB_RINGBUFFER_PEEK(rbuf, 1); DVB_RINGBUFFER_SKIP(rbuf, pktlen + DVB_RINGBUFFER_PKTHDRSIZE); } else { // first packet is not disposed, so we stop cleaning now break; } } } ssize_t dvb_ringbuffer_pkt_next(struct dvb_ringbuffer *rbuf, size_t idx, size_t* pktlen) { int consumed; int curpktlen; int curpktstatus; if (idx == -1) { idx = rbuf->pread; } else { curpktlen = rbuf->data[idx] << 8; curpktlen |= rbuf->data[(idx + 1) % rbuf->size]; idx = (idx + curpktlen + DVB_RINGBUFFER_PKTHDRSIZE) % rbuf->size; } consumed = (idx - rbuf->pread) % rbuf->size; while((dvb_ringbuffer_avail(rbuf) - consumed) > DVB_RINGBUFFER_PKTHDRSIZE) { curpktlen = rbuf->data[idx] << 8; curpktlen |= rbuf->data[(idx + 1) % rbuf->size]; curpktstatus = rbuf->data[(idx + 2) % rbuf->size]; if (curpktstatus == PKT_READY) { *pktlen = curpktlen; return idx; } consumed += curpktlen + DVB_RINGBUFFER_PKTHDRSIZE; idx = (idx + curpktlen + DVB_RINGBUFFER_PKTHDRSIZE) % rbuf->size; } // no packets available return -1; } EXPORT_SYMBOL(dvb_ringbuffer_init); EXPORT_SYMBOL(dvb_ringbuffer_empty); EXPORT_SYMBOL(dvb_ringbuffer_free); EXPORT_SYMBOL(dvb_ringbuffer_avail); EXPORT_SYMBOL(dvb_ringbuffer_flush_spinlock_wakeup); EXPORT_SYMBOL(dvb_ringbuffer_read_user); EXPORT_SYMBOL(dvb_ringbuffer_read); EXPORT_SYMBOL(dvb_ringbuffer_write);
gpl-2.0
Jason-Choi/EastSea-Kernel
drivers/staging/go7007/s2250-loader.c
7997
5072
/* * Copyright (C) 2008 Sensoray Company Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License (Version 2) as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. */ #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/usb.h> #include <dvb-usb.h> #define S2250_LOADER_FIRMWARE "s2250_loader.fw" #define S2250_FIRMWARE "s2250.fw" typedef struct device_extension_s { struct kref kref; int minor; struct usb_device *usbdev; } device_extension_t, *pdevice_extension_t; #define USB_s2250loader_MAJOR 240 #define USB_s2250loader_MINOR_BASE 0 #define MAX_DEVICES 256 static pdevice_extension_t s2250_dev_table[MAX_DEVICES]; static DEFINE_MUTEX(s2250_dev_table_mutex); #define to_s2250loader_dev_common(d) container_of(d, device_extension_t, kref) static void s2250loader_delete(struct kref *kref) { pdevice_extension_t s = to_s2250loader_dev_common(kref); s2250_dev_table[s->minor] = NULL; kfree(s); } static int s2250loader_probe(struct usb_interface *interface, const struct usb_device_id *id) { struct usb_device *usbdev; int minor, ret; pdevice_extension_t s = NULL; const struct firmware *fw; usbdev = usb_get_dev(interface_to_usbdev(interface)); if (!usbdev) { printk(KERN_ERR "Enter s2250loader_probe failed\n"); return -1; } printk(KERN_INFO "Enter s2250loader_probe 2.6 kernel\n"); printk(KERN_INFO "vendor id 0x%x, device id 0x%x devnum:%d\n", usbdev->descriptor.idVendor, usbdev->descriptor.idProduct, usbdev->devnum); if (usbdev->descriptor.bNumConfigurations != 1) { printk(KERN_ERR "can't handle multiple config\n"); return -1; } mutex_lock(&s2250_dev_table_mutex); for (minor = 0; minor < MAX_DEVICES; minor++) { if (s2250_dev_table[minor] == NULL) break; } if (minor < 0 || minor >= MAX_DEVICES) { printk(KERN_ERR "Invalid minor: %d\n", minor); goto failed; } /* Allocate dev data structure */ s = kmalloc(sizeof(device_extension_t), GFP_KERNEL); if (s == NULL) { printk(KERN_ERR "Out of memory\n"); goto failed; } s2250_dev_table[minor] = s; printk(KERN_INFO "s2250loader_probe: Device %d on Bus %d Minor %d\n", usbdev->devnum, usbdev->bus->busnum, minor); memset(s, 0, sizeof(device_extension_t)); s->usbdev = usbdev; printk(KERN_INFO "loading 2250 loader\n"); kref_init(&(s->kref)); mutex_unlock(&s2250_dev_table_mutex); if (request_firmware(&fw, S2250_LOADER_FIRMWARE, &usbdev->dev)) { printk(KERN_ERR "s2250: unable to load firmware from file \"%s\"\n", S2250_LOADER_FIRMWARE); goto failed2; } ret = usb_cypress_load_firmware(usbdev, fw, CYPRESS_FX2); release_firmware(fw); if (0 != ret) { printk(KERN_ERR "loader download failed\n"); goto failed2; } if (request_firmware(&fw, S2250_FIRMWARE, &usbdev->dev)) { printk(KERN_ERR "s2250: unable to load firmware from file \"%s\"\n", S2250_FIRMWARE); goto failed2; } ret = usb_cypress_load_firmware(usbdev, fw, CYPRESS_FX2); release_firmware(fw); if (0 != ret) { printk(KERN_ERR "firmware_s2250 download failed\n"); goto failed2; } usb_set_intfdata(interface, s); return 0; failed: mutex_unlock(&s2250_dev_table_mutex); failed2: if (s) kref_put(&(s->kref), s2250loader_delete); printk(KERN_ERR "probe failed\n"); return -1; } static void s2250loader_disconnect(struct usb_interface *interface) { pdevice_extension_t s; printk(KERN_INFO "s2250: disconnect\n"); s = usb_get_intfdata(interface); usb_set_intfdata(interface, NULL); kref_put(&(s->kref), s2250loader_delete); } static const struct usb_device_id s2250loader_ids[] = { {USB_DEVICE(0x1943, 0xa250)}, {} /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, s2250loader_ids); static struct usb_driver s2250loader_driver = { .name = "s2250-loader", .probe = s2250loader_probe, .disconnect = s2250loader_disconnect, .id_table = s2250loader_ids, }; static int __init s2250loader_init(void) { int r; unsigned i = 0; for (i = 0; i < MAX_DEVICES; i++) s2250_dev_table[i] = NULL; r = usb_register(&s2250loader_driver); if (r) { printk(KERN_ERR "usb_register failed. Error number %d\n", r); return -1; } printk(KERN_INFO "s2250loader_init: driver registered\n"); return 0; } module_init(s2250loader_init); static void __exit s2250loader_cleanup(void) { printk(KERN_INFO "s2250loader_cleanup\n"); usb_deregister(&s2250loader_driver); } module_exit(s2250loader_cleanup); MODULE_AUTHOR(""); MODULE_DESCRIPTION("firmware loader for Sensoray 2250/2251"); MODULE_LICENSE("GPL v2");
gpl-2.0
AOKP/kernel_samsung_manta
drivers/staging/rtl8192e/rtl819x_HTProc.c
7997
31703
/****************************************************************************** * Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * * Contact Information: * wlanfae <wlanfae@realtek.com> ******************************************************************************/ #include "rtllib.h" #include "rtl819x_HT.h" u8 MCS_FILTER_ALL[16] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; u8 MCS_FILTER_1SS[16] = { 0xff, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} ; u16 MCS_DATA_RATE[2][2][77] = { {{13, 26, 39, 52, 78, 104, 117, 130, 26, 52, 78, 104, 156, 208, 234, 260, 39, 78, 117, 234, 312, 351, 390, 52, 104, 156, 208, 312, 416, 468, 520, 0, 78, 104, 130, 117, 156, 195, 104, 130, 130, 156, 182, 182, 208, 156, 195, 195, 234, 273, 273, 312, 130, 156, 181, 156, 181, 208, 234, 208, 234, 260, 260, 286, 195, 234, 273, 234, 273, 312, 351, 312, 351, 390, 390, 429} , {14, 29, 43, 58, 87, 116, 130, 144, 29, 58, 87, 116, 173, 231, 260, 289, 43, 87, 130, 173, 260, 347, 390, 433, 58, 116, 173, 231, 347, 462, 520, 578, 0, 87, 116, 144, 130, 173, 217, 116, 144, 144, 173, 202, 202, 231, 173, 217, 217, 260, 303, 303, 347, 144, 173, 202, 173, 202, 231, 260, 231, 260, 289, 289, 318, 217, 260, 303, 260, 303, 347, 390, 347, 390, 433, 433, 477} } , {{27, 54, 81, 108, 162, 216, 243, 270, 54, 108, 162, 216, 324, 432, 486, 540, 81, 162, 243, 324, 486, 648, 729, 810, 108, 216, 324, 432, 648, 864, 972, 1080, 12, 162, 216, 270, 243, 324, 405, 216, 270, 270, 324, 378, 378, 432, 324, 405, 405, 486, 567, 567, 648, 270, 324, 378, 324, 378, 432, 486, 432, 486, 540, 540, 594, 405, 486, 567, 486, 567, 648, 729, 648, 729, 810, 810, 891}, {30, 60, 90, 120, 180, 240, 270, 300, 60, 120, 180, 240, 360, 480, 540, 600, 90, 180, 270, 360, 540, 720, 810, 900, 120, 240, 360, 480, 720, 960, 1080, 1200, 13, 180, 240, 300, 270, 360, 450, 240, 300, 300, 360, 420, 420, 480, 360, 450, 450, 540, 630, 630, 720, 300, 360, 420, 360, 420, 480, 540, 480, 540, 600, 600, 660, 450, 540, 630, 540, 630, 720, 810, 720, 810, 900, 900, 990} } }; static u8 UNKNOWN_BORADCOM[3] = {0x00, 0x14, 0xbf}; static u8 LINKSYSWRT330_LINKSYSWRT300_BROADCOM[3] = {0x00, 0x1a, 0x70}; static u8 LINKSYSWRT350_LINKSYSWRT150_BROADCOM[3] = {0x00, 0x1d, 0x7e}; static u8 BELKINF5D8233V1_RALINK[3] = {0x00, 0x17, 0x3f}; static u8 BELKINF5D82334V3_RALINK[3] = {0x00, 0x1c, 0xdf}; static u8 PCI_RALINK[3] = {0x00, 0x90, 0xcc}; static u8 EDIMAX_RALINK[3] = {0x00, 0x0e, 0x2e}; static u8 AIRLINK_RALINK[3] = {0x00, 0x18, 0x02}; static u8 DLINK_ATHEROS_1[3] = {0x00, 0x1c, 0xf0}; static u8 DLINK_ATHEROS_2[3] = {0x00, 0x21, 0x91}; static u8 CISCO_BROADCOM[3] = {0x00, 0x17, 0x94}; static u8 LINKSYS_MARVELL_4400N[3] = {0x00, 0x14, 0xa4}; void HTUpdateDefaultSetting(struct rtllib_device *ieee) { struct rt_hi_throughput *pHTInfo = ieee->pHTInfo; pHTInfo->bAcceptAddbaReq = 1; pHTInfo->bRegShortGI20MHz = 1; pHTInfo->bRegShortGI40MHz = 1; pHTInfo->bRegBW40MHz = 1; if (pHTInfo->bRegBW40MHz) pHTInfo->bRegSuppCCK = 1; else pHTInfo->bRegSuppCCK = true; pHTInfo->nAMSDU_MaxSize = 7935UL; pHTInfo->bAMSDU_Support = 0; pHTInfo->bAMPDUEnable = 1; pHTInfo->AMPDU_Factor = 2; pHTInfo->MPDU_Density = 0; pHTInfo->SelfMimoPs = 3; if (pHTInfo->SelfMimoPs == 2) pHTInfo->SelfMimoPs = 3; ieee->bTxDisableRateFallBack = 0; ieee->bTxUseDriverAssingedRate = 0; ieee->bTxEnableFwCalcDur = 1; pHTInfo->bRegRT2RTAggregation = 1; pHTInfo->bRegRxReorderEnable = 1; pHTInfo->RxReorderWinSize = 64; pHTInfo->RxReorderPendingTime = 30; } void HTDebugHTCapability(u8 *CapIE, u8 *TitleString) { static u8 EWC11NHTCap[] = {0x00, 0x90, 0x4c, 0x33}; struct ht_capab_ele *pCapELE; if (!memcmp(CapIE, EWC11NHTCap, sizeof(EWC11NHTCap))) { RTLLIB_DEBUG(RTLLIB_DL_HT, "EWC IE in %s()\n", __func__); pCapELE = (struct ht_capab_ele *)(&CapIE[4]); } else pCapELE = (struct ht_capab_ele *)(&CapIE[0]); RTLLIB_DEBUG(RTLLIB_DL_HT, "<Log HT Capability>. Called by %s\n", TitleString); RTLLIB_DEBUG(RTLLIB_DL_HT, "\tSupported Channel Width = %s\n", (pCapELE->ChlWidth) ? "20MHz" : "20/40MHz"); RTLLIB_DEBUG(RTLLIB_DL_HT, "\tSupport Short GI for 20M = %s\n", (pCapELE->ShortGI20Mhz) ? "YES" : "NO"); RTLLIB_DEBUG(RTLLIB_DL_HT, "\tSupport Short GI for 40M = %s\n", (pCapELE->ShortGI40Mhz) ? "YES" : "NO"); RTLLIB_DEBUG(RTLLIB_DL_HT, "\tSupport TX STBC = %s\n", (pCapELE->TxSTBC) ? "YES" : "NO"); RTLLIB_DEBUG(RTLLIB_DL_HT, "\tMax AMSDU Size = %s\n", (pCapELE->MaxAMSDUSize) ? "3839" : "7935"); RTLLIB_DEBUG(RTLLIB_DL_HT, "\tSupport CCK in 20/40 mode = %s\n", (pCapELE->DssCCk) ? "YES" : "NO"); RTLLIB_DEBUG(RTLLIB_DL_HT, "\tMax AMPDU Factor = %d\n", pCapELE->MaxRxAMPDUFactor); RTLLIB_DEBUG(RTLLIB_DL_HT, "\tMPDU Density = %d\n", pCapELE->MPDUDensity); RTLLIB_DEBUG(RTLLIB_DL_HT, "\tMCS Rate Set = [%x][%x][%x][%x][%x]\n", pCapELE->MCS[0], pCapELE->MCS[1], pCapELE->MCS[2], pCapELE->MCS[3], pCapELE->MCS[4]); return; } void HTDebugHTInfo(u8 *InfoIE, u8 *TitleString) { static u8 EWC11NHTInfo[] = {0x00, 0x90, 0x4c, 0x34}; struct ht_info_ele *pHTInfoEle; if (!memcmp(InfoIE, EWC11NHTInfo, sizeof(EWC11NHTInfo))) { RTLLIB_DEBUG(RTLLIB_DL_HT, "EWC IE in %s()\n", __func__); pHTInfoEle = (struct ht_info_ele *)(&InfoIE[4]); } else pHTInfoEle = (struct ht_info_ele *)(&InfoIE[0]); RTLLIB_DEBUG(RTLLIB_DL_HT, "<Log HT Information Element>. " "Called by %s\n", TitleString); RTLLIB_DEBUG(RTLLIB_DL_HT, "\tPrimary channel = %d\n", pHTInfoEle->ControlChl); RTLLIB_DEBUG(RTLLIB_DL_HT, "\tSenondary channel ="); switch (pHTInfoEle->ExtChlOffset) { case 0: RTLLIB_DEBUG(RTLLIB_DL_HT, "Not Present\n"); break; case 1: RTLLIB_DEBUG(RTLLIB_DL_HT, "Upper channel\n"); break; case 2: RTLLIB_DEBUG(RTLLIB_DL_HT, "Reserved. Eooro!!!\n"); break; case 3: RTLLIB_DEBUG(RTLLIB_DL_HT, "Lower Channel\n"); break; } RTLLIB_DEBUG(RTLLIB_DL_HT, "\tRecommended channel width = %s\n", (pHTInfoEle->RecommemdedTxWidth) ? "20Mhz" : "40Mhz"); RTLLIB_DEBUG(RTLLIB_DL_HT, "\tOperation mode for protection = "); switch (pHTInfoEle->OptMode) { case 0: RTLLIB_DEBUG(RTLLIB_DL_HT, "No Protection\n"); break; case 1: RTLLIB_DEBUG(RTLLIB_DL_HT, "HT non-member protection mode\n"); break; case 2: RTLLIB_DEBUG(RTLLIB_DL_HT, "Suggest to open protection\n"); break; case 3: RTLLIB_DEBUG(RTLLIB_DL_HT, "HT mixed mode\n"); break; } RTLLIB_DEBUG(RTLLIB_DL_HT, "\tBasic MCS Rate Set = [%x][%x][%x][%x]" "[%x]\n", pHTInfoEle->BasicMSC[0], pHTInfoEle->BasicMSC[1], pHTInfoEle->BasicMSC[2], pHTInfoEle->BasicMSC[3], pHTInfoEle->BasicMSC[4]); return; } static bool IsHTHalfNmode40Bandwidth(struct rtllib_device *ieee) { bool retValue = false; struct rt_hi_throughput *pHTInfo = ieee->pHTInfo; if (pHTInfo->bCurrentHTSupport == false) retValue = false; else if (pHTInfo->bRegBW40MHz == false) retValue = false; else if (!ieee->GetHalfNmodeSupportByAPsHandler(ieee->dev)) retValue = false; else if (((struct ht_capab_ele *)(pHTInfo->PeerHTCapBuf))->ChlWidth) retValue = true; else retValue = false; return retValue; } static bool IsHTHalfNmodeSGI(struct rtllib_device *ieee, bool is40MHz) { bool retValue = false; struct rt_hi_throughput *pHTInfo = ieee->pHTInfo; if (pHTInfo->bCurrentHTSupport == false) retValue = false; else if (!ieee->GetHalfNmodeSupportByAPsHandler(ieee->dev)) retValue = false; else if (is40MHz) { if (((struct ht_capab_ele *) (pHTInfo->PeerHTCapBuf))->ShortGI40Mhz) retValue = true; else retValue = false; } else { if (((struct ht_capab_ele *) (pHTInfo->PeerHTCapBuf))->ShortGI20Mhz) retValue = true; else retValue = false; } return retValue; } u16 HTHalfMcsToDataRate(struct rtllib_device *ieee, u8 nMcsRate) { u8 is40MHz; u8 isShortGI; is40MHz = (IsHTHalfNmode40Bandwidth(ieee)) ? 1 : 0; isShortGI = (IsHTHalfNmodeSGI(ieee, is40MHz)) ? 1 : 0; return MCS_DATA_RATE[is40MHz][isShortGI][(nMcsRate & 0x7f)]; } u16 HTMcsToDataRate(struct rtllib_device *ieee, u8 nMcsRate) { struct rt_hi_throughput *pHTInfo = ieee->pHTInfo; u8 is40MHz = (pHTInfo->bCurBW40MHz) ? 1 : 0; u8 isShortGI = (pHTInfo->bCurBW40MHz) ? ((pHTInfo->bCurShortGI40MHz) ? 1 : 0) : ((pHTInfo->bCurShortGI20MHz) ? 1 : 0); return MCS_DATA_RATE[is40MHz][isShortGI][(nMcsRate & 0x7f)]; } u16 TxCountToDataRate(struct rtllib_device *ieee, u8 nDataRate) { u16 CCKOFDMRate[12] = {0x02, 0x04, 0x0b, 0x16, 0x0c, 0x12, 0x18, 0x24, 0x30, 0x48, 0x60, 0x6c}; u8 is40MHz = 0; u8 isShortGI = 0; if (nDataRate < 12) { return CCKOFDMRate[nDataRate]; } else { if (nDataRate >= 0x10 && nDataRate <= 0x1f) { is40MHz = 0; isShortGI = 0; } else if (nDataRate >= 0x20 && nDataRate <= 0x2f) { is40MHz = 1; isShortGI = 0; } else if (nDataRate >= 0x30 && nDataRate <= 0x3f) { is40MHz = 0; isShortGI = 1; } else if (nDataRate >= 0x40 && nDataRate <= 0x4f) { is40MHz = 1; isShortGI = 1; } return MCS_DATA_RATE[is40MHz][isShortGI][nDataRate&0xf]; } } bool IsHTHalfNmodeAPs(struct rtllib_device *ieee) { bool retValue = false; struct rtllib_network *net = &ieee->current_network; if ((memcmp(net->bssid, BELKINF5D8233V1_RALINK, 3) == 0) || (memcmp(net->bssid, BELKINF5D82334V3_RALINK, 3) == 0) || (memcmp(net->bssid, PCI_RALINK, 3) == 0) || (memcmp(net->bssid, EDIMAX_RALINK, 3) == 0) || (memcmp(net->bssid, AIRLINK_RALINK, 3) == 0) || (net->ralink_cap_exist)) retValue = true; else if (!memcmp(net->bssid, UNKNOWN_BORADCOM, 3) || !memcmp(net->bssid, LINKSYSWRT330_LINKSYSWRT300_BROADCOM, 3) || !memcmp(net->bssid, LINKSYSWRT350_LINKSYSWRT150_BROADCOM, 3) || (net->broadcom_cap_exist)) retValue = true; else if (net->bssht.bdRT2RTAggregation) retValue = true; else retValue = false; return retValue; } static void HTIOTPeerDetermine(struct rtllib_device *ieee) { struct rt_hi_throughput *pHTInfo = ieee->pHTInfo; struct rtllib_network *net = &ieee->current_network; if (net->bssht.bdRT2RTAggregation) { pHTInfo->IOTPeer = HT_IOT_PEER_REALTEK; if (net->bssht.RT2RT_HT_Mode & RT_HT_CAP_USE_92SE) pHTInfo->IOTPeer = HT_IOT_PEER_REALTEK_92SE; if (net->bssht.RT2RT_HT_Mode & RT_HT_CAP_USE_SOFTAP) pHTInfo->IOTPeer = HT_IOT_PEER_92U_SOFTAP; } else if (net->broadcom_cap_exist) pHTInfo->IOTPeer = HT_IOT_PEER_BROADCOM; else if (!memcmp(net->bssid, UNKNOWN_BORADCOM, 3) || !memcmp(net->bssid, LINKSYSWRT330_LINKSYSWRT300_BROADCOM, 3) || !memcmp(net->bssid, LINKSYSWRT350_LINKSYSWRT150_BROADCOM, 3)) pHTInfo->IOTPeer = HT_IOT_PEER_BROADCOM; else if ((memcmp(net->bssid, BELKINF5D8233V1_RALINK, 3) == 0) || (memcmp(net->bssid, BELKINF5D82334V3_RALINK, 3) == 0) || (memcmp(net->bssid, PCI_RALINK, 3) == 0) || (memcmp(net->bssid, EDIMAX_RALINK, 3) == 0) || (memcmp(net->bssid, AIRLINK_RALINK, 3) == 0) || net->ralink_cap_exist) pHTInfo->IOTPeer = HT_IOT_PEER_RALINK; else if ((net->atheros_cap_exist) || (memcmp(net->bssid, DLINK_ATHEROS_1, 3) == 0) || (memcmp(net->bssid, DLINK_ATHEROS_2, 3) == 0)) pHTInfo->IOTPeer = HT_IOT_PEER_ATHEROS; else if ((memcmp(net->bssid, CISCO_BROADCOM, 3) == 0) || net->cisco_cap_exist) pHTInfo->IOTPeer = HT_IOT_PEER_CISCO; else if ((memcmp(net->bssid, LINKSYS_MARVELL_4400N, 3) == 0) || net->marvell_cap_exist) pHTInfo->IOTPeer = HT_IOT_PEER_MARVELL; else if (net->airgo_cap_exist) pHTInfo->IOTPeer = HT_IOT_PEER_AIRGO; else pHTInfo->IOTPeer = HT_IOT_PEER_UNKNOWN; RTLLIB_DEBUG(RTLLIB_DL_IOT, "Joseph debug!! IOTPEER: %x\n", pHTInfo->IOTPeer); } static u8 HTIOTActIsDisableMCS14(struct rtllib_device *ieee, u8 *PeerMacAddr) { return 0; } static bool HTIOTActIsDisableMCS15(struct rtllib_device *ieee) { bool retValue = false; return retValue; } static bool HTIOTActIsDisableMCSTwoSpatialStream(struct rtllib_device *ieee) { return false; } static u8 HTIOTActIsDisableEDCATurbo(struct rtllib_device *ieee, u8 *PeerMacAddr) { return false; } static u8 HTIOTActIsMgntUseCCK6M(struct rtllib_device *ieee, struct rtllib_network *network) { u8 retValue = 0; if (ieee->pHTInfo->IOTPeer == HT_IOT_PEER_BROADCOM) retValue = 1; return retValue; } static u8 HTIOTActIsCCDFsync(struct rtllib_device *ieee) { u8 retValue = 0; if (ieee->pHTInfo->IOTPeer == HT_IOT_PEER_BROADCOM) retValue = 1; return retValue; } static void HTIOTActDetermineRaFunc(struct rtllib_device *ieee, bool bPeerRx2ss) { struct rt_hi_throughput *pHTInfo = ieee->pHTInfo; pHTInfo->IOTRaFunc &= HT_IOT_RAFUNC_DISABLE_ALL; if (pHTInfo->IOTPeer == HT_IOT_PEER_RALINK && !bPeerRx2ss) pHTInfo->IOTRaFunc |= HT_IOT_RAFUNC_PEER_1R; if (pHTInfo->IOTAction & HT_IOT_ACT_AMSDU_ENABLE) pHTInfo->IOTRaFunc |= HT_IOT_RAFUNC_TX_AMSDU; } void HTResetIOTSetting(struct rt_hi_throughput *pHTInfo) { pHTInfo->IOTAction = 0; pHTInfo->IOTPeer = HT_IOT_PEER_UNKNOWN; pHTInfo->IOTRaFunc = 0; } void HTConstructCapabilityElement(struct rtllib_device *ieee, u8 *posHTCap, u8 *len, u8 IsEncrypt, bool bAssoc) { struct rt_hi_throughput *pHT = ieee->pHTInfo; struct ht_capab_ele *pCapELE = NULL; if ((posHTCap == NULL) || (pHT == NULL)) { RTLLIB_DEBUG(RTLLIB_DL_ERR, "posHTCap or pHTInfo can't be " "null in HTConstructCapabilityElement()\n"); return; } memset(posHTCap, 0, *len); if ((bAssoc) && (pHT->ePeerHTSpecVer == HT_SPEC_VER_EWC)) { u8 EWC11NHTCap[] = {0x00, 0x90, 0x4c, 0x33}; memcpy(posHTCap, EWC11NHTCap, sizeof(EWC11NHTCap)); pCapELE = (struct ht_capab_ele *)&(posHTCap[4]); *len = 30 + 2; } else { pCapELE = (struct ht_capab_ele *)posHTCap; *len = 26 + 2; } pCapELE->AdvCoding = 0; if (ieee->GetHalfNmodeSupportByAPsHandler(ieee->dev)) pCapELE->ChlWidth = 0; else pCapELE->ChlWidth = (pHT->bRegBW40MHz ? 1 : 0); pCapELE->MimoPwrSave = pHT->SelfMimoPs; pCapELE->GreenField = 0; pCapELE->ShortGI20Mhz = 1; pCapELE->ShortGI40Mhz = 1; pCapELE->TxSTBC = 1; pCapELE->RxSTBC = 0; pCapELE->DelayBA = 0; pCapELE->MaxAMSDUSize = (MAX_RECEIVE_BUFFER_SIZE >= 7935) ? 1 : 0; pCapELE->DssCCk = ((pHT->bRegBW40MHz) ? (pHT->bRegSuppCCK ? 1 : 0) : 0); pCapELE->PSMP = 0; pCapELE->LSigTxopProtect = 0; RTLLIB_DEBUG(RTLLIB_DL_HT, "TX HT cap/info ele BW=%d MaxAMSDUSize:%d " "DssCCk:%d\n", pCapELE->ChlWidth, pCapELE->MaxAMSDUSize, pCapELE->DssCCk); if (IsEncrypt) { pCapELE->MPDUDensity = 7; pCapELE->MaxRxAMPDUFactor = 2; } else { pCapELE->MaxRxAMPDUFactor = 3; pCapELE->MPDUDensity = 0; } memcpy(pCapELE->MCS, ieee->Regdot11HTOperationalRateSet, 16); memset(&pCapELE->ExtHTCapInfo, 0, 2); memset(pCapELE->TxBFCap, 0, 4); pCapELE->ASCap = 0; if (bAssoc) { if (pHT->IOTAction & HT_IOT_ACT_DISABLE_MCS15) pCapELE->MCS[1] &= 0x7f; if (pHT->IOTAction & HT_IOT_ACT_DISABLE_MCS14) pCapELE->MCS[1] &= 0xbf; if (pHT->IOTAction & HT_IOT_ACT_DISABLE_ALL_2SS) pCapELE->MCS[1] &= 0x00; if (pHT->IOTAction & HT_IOT_ACT_DISABLE_RX_40MHZ_SHORT_GI) pCapELE->ShortGI40Mhz = 0; if (ieee->GetHalfNmodeSupportByAPsHandler(ieee->dev)) { pCapELE->ChlWidth = 0; pCapELE->MCS[1] = 0; } } return; } void HTConstructInfoElement(struct rtllib_device *ieee, u8 *posHTInfo, u8 *len, u8 IsEncrypt) { struct rt_hi_throughput *pHT = ieee->pHTInfo; struct ht_info_ele *pHTInfoEle = (struct ht_info_ele *)posHTInfo; if ((posHTInfo == NULL) || (pHTInfoEle == NULL)) { RTLLIB_DEBUG(RTLLIB_DL_ERR, "posHTInfo or pHTInfoEle can't be " "null in HTConstructInfoElement()\n"); return; } memset(posHTInfo, 0, *len); if ((ieee->iw_mode == IW_MODE_ADHOC) || (ieee->iw_mode == IW_MODE_MASTER)) { pHTInfoEle->ControlChl = ieee->current_network.channel; pHTInfoEle->ExtChlOffset = ((pHT->bRegBW40MHz == false) ? HT_EXTCHNL_OFFSET_NO_EXT : (ieee->current_network.channel <= 6) ? HT_EXTCHNL_OFFSET_UPPER : HT_EXTCHNL_OFFSET_LOWER); pHTInfoEle->RecommemdedTxWidth = pHT->bRegBW40MHz; pHTInfoEle->RIFS = 0; pHTInfoEle->PSMPAccessOnly = 0; pHTInfoEle->SrvIntGranularity = 0; pHTInfoEle->OptMode = pHT->CurrentOpMode; pHTInfoEle->NonGFDevPresent = 0; pHTInfoEle->DualBeacon = 0; pHTInfoEle->SecondaryBeacon = 0; pHTInfoEle->LSigTxopProtectFull = 0; pHTInfoEle->PcoActive = 0; pHTInfoEle->PcoPhase = 0; memset(pHTInfoEle->BasicMSC, 0, 16); *len = 22 + 2; } else { *len = 0; } return; } void HTConstructRT2RTAggElement(struct rtllib_device *ieee, u8 *posRT2RTAgg, u8 *len) { if (posRT2RTAgg == NULL) { RTLLIB_DEBUG(RTLLIB_DL_ERR, "posRT2RTAgg can't be null in " "HTConstructRT2RTAggElement()\n"); return; } memset(posRT2RTAgg, 0, *len); *posRT2RTAgg++ = 0x00; *posRT2RTAgg++ = 0xe0; *posRT2RTAgg++ = 0x4c; *posRT2RTAgg++ = 0x02; *posRT2RTAgg++ = 0x01; *posRT2RTAgg = 0x30; if (ieee->bSupportRemoteWakeUp) *posRT2RTAgg |= RT_HT_CAP_USE_WOW; *len = 6 + 2; return; } static u8 HT_PickMCSRate(struct rtllib_device *ieee, u8 *pOperateMCS) { u8 i; if (pOperateMCS == NULL) { RTLLIB_DEBUG(RTLLIB_DL_ERR, "pOperateMCS can't be null" " in HT_PickMCSRate()\n"); return false; } switch (ieee->mode) { case IEEE_A: case IEEE_B: case IEEE_G: for (i = 0; i <= 15; i++) pOperateMCS[i] = 0; break; case IEEE_N_24G: case IEEE_N_5G: pOperateMCS[0] &= RATE_ADPT_1SS_MASK; pOperateMCS[1] &= RATE_ADPT_2SS_MASK; pOperateMCS[3] &= RATE_ADPT_MCS32_MASK; break; default: break; } return true; } u8 HTGetHighestMCSRate(struct rtllib_device *ieee, u8 *pMCSRateSet, u8 *pMCSFilter) { u8 i, j; u8 bitMap; u8 mcsRate = 0; u8 availableMcsRate[16]; if (pMCSRateSet == NULL || pMCSFilter == NULL) { RTLLIB_DEBUG(RTLLIB_DL_ERR, "pMCSRateSet or pMCSFilter can't " "be null in HTGetHighestMCSRate()\n"); return false; } for (i = 0; i < 16; i++) availableMcsRate[i] = pMCSRateSet[i] & pMCSFilter[i]; for (i = 0; i < 16; i++) { if (availableMcsRate[i] != 0) break; } if (i == 16) return false; for (i = 0; i < 16; i++) { if (availableMcsRate[i] != 0) { bitMap = availableMcsRate[i]; for (j = 0; j < 8; j++) { if ((bitMap%2) != 0) { if (HTMcsToDataRate(ieee, (8*i+j)) > HTMcsToDataRate(ieee, mcsRate)) mcsRate = (8*i+j); } bitMap = bitMap>>1; } } } return mcsRate | 0x80; } u8 HTFilterMCSRate(struct rtllib_device *ieee, u8 *pSupportMCS, u8 *pOperateMCS) { u8 i; for (i = 0; i <= 15; i++) pOperateMCS[i] = ieee->Regdot11TxHTOperationalRateSet[i] & pSupportMCS[i]; HT_PickMCSRate(ieee, pOperateMCS); if (ieee->GetHalfNmodeSupportByAPsHandler(ieee->dev)) pOperateMCS[1] = 0; for (i = 2; i <= 15; i++) pOperateMCS[i] = 0; return true; } void HTSetConnectBwMode(struct rtllib_device *ieee, enum ht_channel_width Bandwidth, enum ht_extchnl_offset Offset); void HTOnAssocRsp(struct rtllib_device *ieee) { struct rt_hi_throughput *pHTInfo = ieee->pHTInfo; struct ht_capab_ele *pPeerHTCap = NULL; struct ht_info_ele *pPeerHTInfo = NULL; u16 nMaxAMSDUSize = 0; u8 *pMcsFilter = NULL; static u8 EWC11NHTCap[] = {0x00, 0x90, 0x4c, 0x33}; static u8 EWC11NHTInfo[] = {0x00, 0x90, 0x4c, 0x34}; if (pHTInfo->bCurrentHTSupport == false) { RTLLIB_DEBUG(RTLLIB_DL_ERR, "<=== HTOnAssocRsp(): " "HT_DISABLE\n"); return; } RTLLIB_DEBUG(RTLLIB_DL_HT, "===> HTOnAssocRsp_wq(): HT_ENABLE\n"); if (!memcmp(pHTInfo->PeerHTCapBuf, EWC11NHTCap, sizeof(EWC11NHTCap))) pPeerHTCap = (struct ht_capab_ele *)(&pHTInfo->PeerHTCapBuf[4]); else pPeerHTCap = (struct ht_capab_ele *)(pHTInfo->PeerHTCapBuf); if (!memcmp(pHTInfo->PeerHTInfoBuf, EWC11NHTInfo, sizeof(EWC11NHTInfo))) pPeerHTInfo = (struct ht_info_ele *) (&pHTInfo->PeerHTInfoBuf[4]); else pPeerHTInfo = (struct ht_info_ele *)(pHTInfo->PeerHTInfoBuf); RTLLIB_DEBUG_DATA(RTLLIB_DL_DATA | RTLLIB_DL_HT, pPeerHTCap, sizeof(struct ht_capab_ele)); HTSetConnectBwMode(ieee, (enum ht_channel_width)(pPeerHTCap->ChlWidth), (enum ht_extchnl_offset)(pPeerHTInfo->ExtChlOffset)); pHTInfo->bCurTxBW40MHz = ((pPeerHTInfo->RecommemdedTxWidth == 1) ? true : false); pHTInfo->bCurShortGI20MHz = ((pHTInfo->bRegShortGI20MHz) ? ((pPeerHTCap->ShortGI20Mhz == 1) ? true : false) : false); pHTInfo->bCurShortGI40MHz = ((pHTInfo->bRegShortGI40MHz) ? ((pPeerHTCap->ShortGI40Mhz == 1) ? true : false) : false); pHTInfo->bCurSuppCCK = ((pHTInfo->bRegSuppCCK) ? ((pPeerHTCap->DssCCk == 1) ? true : false) : false); pHTInfo->bCurrent_AMSDU_Support = pHTInfo->bAMSDU_Support; nMaxAMSDUSize = (pPeerHTCap->MaxAMSDUSize == 0) ? 3839 : 7935; if (pHTInfo->nAMSDU_MaxSize > nMaxAMSDUSize) pHTInfo->nCurrent_AMSDU_MaxSize = nMaxAMSDUSize; else pHTInfo->nCurrent_AMSDU_MaxSize = pHTInfo->nAMSDU_MaxSize; pHTInfo->bCurrentAMPDUEnable = pHTInfo->bAMPDUEnable; if (ieee->rtllib_ap_sec_type && (ieee->rtllib_ap_sec_type(ieee)&(SEC_ALG_WEP|SEC_ALG_TKIP))) { if ((pHTInfo->IOTPeer == HT_IOT_PEER_ATHEROS) || (pHTInfo->IOTPeer == HT_IOT_PEER_UNKNOWN)) pHTInfo->bCurrentAMPDUEnable = false; } if (!pHTInfo->bRegRT2RTAggregation) { if (pHTInfo->AMPDU_Factor > pPeerHTCap->MaxRxAMPDUFactor) pHTInfo->CurrentAMPDUFactor = pPeerHTCap->MaxRxAMPDUFactor; else pHTInfo->CurrentAMPDUFactor = pHTInfo->AMPDU_Factor; } else { if (ieee->current_network.bssht.bdRT2RTAggregation) { if (ieee->pairwise_key_type != KEY_TYPE_NA) pHTInfo->CurrentAMPDUFactor = pPeerHTCap->MaxRxAMPDUFactor; else pHTInfo->CurrentAMPDUFactor = HT_AGG_SIZE_64K; } else { if (pPeerHTCap->MaxRxAMPDUFactor < HT_AGG_SIZE_32K) pHTInfo->CurrentAMPDUFactor = pPeerHTCap->MaxRxAMPDUFactor; else pHTInfo->CurrentAMPDUFactor = HT_AGG_SIZE_32K; } } if (pHTInfo->MPDU_Density > pPeerHTCap->MPDUDensity) pHTInfo->CurrentMPDUDensity = pHTInfo->MPDU_Density; else pHTInfo->CurrentMPDUDensity = pPeerHTCap->MPDUDensity; if (pHTInfo->IOTAction & HT_IOT_ACT_TX_USE_AMSDU_8K) { pHTInfo->bCurrentAMPDUEnable = false; pHTInfo->ForcedAMSDUMode = HT_AGG_FORCE_ENABLE; pHTInfo->ForcedAMSDUMaxSize = 7935; } pHTInfo->bCurRxReorderEnable = pHTInfo->bRegRxReorderEnable; if (pPeerHTCap->MCS[0] == 0) pPeerHTCap->MCS[0] = 0xff; HTIOTActDetermineRaFunc(ieee, ((pPeerHTCap->MCS[1]) != 0)); HTFilterMCSRate(ieee, pPeerHTCap->MCS, ieee->dot11HTOperationalRateSet); pHTInfo->PeerMimoPs = pPeerHTCap->MimoPwrSave; if (pHTInfo->PeerMimoPs == MIMO_PS_STATIC) pMcsFilter = MCS_FILTER_1SS; else pMcsFilter = MCS_FILTER_ALL; ieee->HTHighestOperaRate = HTGetHighestMCSRate(ieee, ieee->dot11HTOperationalRateSet, pMcsFilter); ieee->HTCurrentOperaRate = ieee->HTHighestOperaRate; pHTInfo->CurrentOpMode = pPeerHTInfo->OptMode; } void HTInitializeHTInfo(struct rtllib_device *ieee) { struct rt_hi_throughput *pHTInfo = ieee->pHTInfo; RTLLIB_DEBUG(RTLLIB_DL_HT, "===========>%s()\n", __func__); pHTInfo->bCurrentHTSupport = false; pHTInfo->bCurBW40MHz = false; pHTInfo->bCurTxBW40MHz = false; pHTInfo->bCurShortGI20MHz = false; pHTInfo->bCurShortGI40MHz = false; pHTInfo->bForcedShortGI = false; pHTInfo->bCurSuppCCK = true; pHTInfo->bCurrent_AMSDU_Support = false; pHTInfo->nCurrent_AMSDU_MaxSize = pHTInfo->nAMSDU_MaxSize; pHTInfo->CurrentMPDUDensity = pHTInfo->MPDU_Density; pHTInfo->CurrentAMPDUFactor = pHTInfo->AMPDU_Factor; memset((void *)(&(pHTInfo->SelfHTCap)), 0, sizeof(pHTInfo->SelfHTCap)); memset((void *)(&(pHTInfo->SelfHTInfo)), 0, sizeof(pHTInfo->SelfHTInfo)); memset((void *)(&(pHTInfo->PeerHTCapBuf)), 0, sizeof(pHTInfo->PeerHTCapBuf)); memset((void *)(&(pHTInfo->PeerHTInfoBuf)), 0, sizeof(pHTInfo->PeerHTInfoBuf)); pHTInfo->bSwBwInProgress = false; pHTInfo->ChnlOp = CHNLOP_NONE; pHTInfo->ePeerHTSpecVer = HT_SPEC_VER_IEEE; pHTInfo->bCurrentRT2RTAggregation = false; pHTInfo->bCurrentRT2RTLongSlotTime = false; pHTInfo->RT2RT_HT_Mode = (enum rt_ht_capability)0; pHTInfo->IOTPeer = 0; pHTInfo->IOTAction = 0; pHTInfo->IOTRaFunc = 0; { u8 *RegHTSuppRateSets = &(ieee->RegHTSuppRateSet[0]); RegHTSuppRateSets[0] = 0xFF; RegHTSuppRateSets[1] = 0xFF; RegHTSuppRateSets[4] = 0x01; } } void HTInitializeBssDesc(struct bss_ht *pBssHT) { pBssHT->bdSupportHT = false; memset(pBssHT->bdHTCapBuf, 0, sizeof(pBssHT->bdHTCapBuf)); pBssHT->bdHTCapLen = 0; memset(pBssHT->bdHTInfoBuf, 0, sizeof(pBssHT->bdHTInfoBuf)); pBssHT->bdHTInfoLen = 0; pBssHT->bdHTSpecVer = HT_SPEC_VER_IEEE; pBssHT->bdRT2RTAggregation = false; pBssHT->bdRT2RTLongSlotTime = false; pBssHT->RT2RT_HT_Mode = (enum rt_ht_capability)0; } void HTResetSelfAndSavePeerSetting(struct rtllib_device *ieee, struct rtllib_network *pNetwork) { struct rt_hi_throughput *pHTInfo = ieee->pHTInfo; u8 bIOTAction = 0; RTLLIB_DEBUG(RTLLIB_DL_HT, "==============>%s()\n", __func__); /* unmark bEnableHT flag here is the same reason why unmarked in * function rtllib_softmac_new_net. WB 2008.09.10*/ if (pNetwork->bssht.bdSupportHT) { pHTInfo->bCurrentHTSupport = true; pHTInfo->ePeerHTSpecVer = pNetwork->bssht.bdHTSpecVer; if (pNetwork->bssht.bdHTCapLen > 0 && pNetwork->bssht.bdHTCapLen <= sizeof(pHTInfo->PeerHTCapBuf)) memcpy(pHTInfo->PeerHTCapBuf, pNetwork->bssht.bdHTCapBuf, pNetwork->bssht.bdHTCapLen); if (pNetwork->bssht.bdHTInfoLen > 0 && pNetwork->bssht.bdHTInfoLen <= sizeof(pHTInfo->PeerHTInfoBuf)) memcpy(pHTInfo->PeerHTInfoBuf, pNetwork->bssht.bdHTInfoBuf, pNetwork->bssht.bdHTInfoLen); if (pHTInfo->bRegRT2RTAggregation) { pHTInfo->bCurrentRT2RTAggregation = pNetwork->bssht.bdRT2RTAggregation; pHTInfo->bCurrentRT2RTLongSlotTime = pNetwork->bssht.bdRT2RTLongSlotTime; pHTInfo->RT2RT_HT_Mode = pNetwork->bssht.RT2RT_HT_Mode; } else { pHTInfo->bCurrentRT2RTAggregation = false; pHTInfo->bCurrentRT2RTLongSlotTime = false; pHTInfo->RT2RT_HT_Mode = (enum rt_ht_capability)0; } HTIOTPeerDetermine(ieee); pHTInfo->IOTAction = 0; bIOTAction = HTIOTActIsDisableMCS14(ieee, pNetwork->bssid); if (bIOTAction) pHTInfo->IOTAction |= HT_IOT_ACT_DISABLE_MCS14; bIOTAction = HTIOTActIsDisableMCS15(ieee); if (bIOTAction) pHTInfo->IOTAction |= HT_IOT_ACT_DISABLE_MCS15; bIOTAction = HTIOTActIsDisableMCSTwoSpatialStream(ieee); if (bIOTAction) pHTInfo->IOTAction |= HT_IOT_ACT_DISABLE_ALL_2SS; bIOTAction = HTIOTActIsDisableEDCATurbo(ieee, pNetwork->bssid); if (bIOTAction) pHTInfo->IOTAction |= HT_IOT_ACT_DISABLE_EDCA_TURBO; bIOTAction = HTIOTActIsMgntUseCCK6M(ieee, pNetwork); if (bIOTAction) pHTInfo->IOTAction |= HT_IOT_ACT_MGNT_USE_CCK_6M; bIOTAction = HTIOTActIsCCDFsync(ieee); if (bIOTAction) pHTInfo->IOTAction |= HT_IOT_ACT_CDD_FSYNC; } else { pHTInfo->bCurrentHTSupport = false; pHTInfo->bCurrentRT2RTAggregation = false; pHTInfo->bCurrentRT2RTLongSlotTime = false; pHTInfo->RT2RT_HT_Mode = (enum rt_ht_capability)0; pHTInfo->IOTAction = 0; pHTInfo->IOTRaFunc = 0; } } void HT_update_self_and_peer_setting(struct rtllib_device *ieee, struct rtllib_network *pNetwork) { struct rt_hi_throughput *pHTInfo = ieee->pHTInfo; struct ht_info_ele *pPeerHTInfo = (struct ht_info_ele *)pNetwork->bssht.bdHTInfoBuf; if (pHTInfo->bCurrentHTSupport) { if (pNetwork->bssht.bdHTInfoLen != 0) pHTInfo->CurrentOpMode = pPeerHTInfo->OptMode; } } EXPORT_SYMBOL(HT_update_self_and_peer_setting); void HTUseDefaultSetting(struct rtllib_device *ieee) { struct rt_hi_throughput *pHTInfo = ieee->pHTInfo; if (pHTInfo->bEnableHT) { pHTInfo->bCurrentHTSupport = true; pHTInfo->bCurSuppCCK = pHTInfo->bRegSuppCCK; pHTInfo->bCurBW40MHz = pHTInfo->bRegBW40MHz; pHTInfo->bCurShortGI20MHz = pHTInfo->bRegShortGI20MHz; pHTInfo->bCurShortGI40MHz = pHTInfo->bRegShortGI40MHz; if (ieee->iw_mode == IW_MODE_ADHOC) ieee->current_network.qos_data.active = ieee->current_network.qos_data.supported; pHTInfo->bCurrent_AMSDU_Support = pHTInfo->bAMSDU_Support; pHTInfo->nCurrent_AMSDU_MaxSize = pHTInfo->nAMSDU_MaxSize; pHTInfo->bCurrentAMPDUEnable = pHTInfo->bAMPDUEnable; pHTInfo->CurrentAMPDUFactor = pHTInfo->AMPDU_Factor; pHTInfo->CurrentMPDUDensity = pHTInfo->CurrentMPDUDensity; HTFilterMCSRate(ieee, ieee->Regdot11TxHTOperationalRateSet, ieee->dot11HTOperationalRateSet); ieee->HTHighestOperaRate = HTGetHighestMCSRate(ieee, ieee->dot11HTOperationalRateSet, MCS_FILTER_ALL); ieee->HTCurrentOperaRate = ieee->HTHighestOperaRate; } else { pHTInfo->bCurrentHTSupport = false; } return; } u8 HTCCheck(struct rtllib_device *ieee, u8 *pFrame) { if (ieee->pHTInfo->bCurrentHTSupport) { if ((IsQoSDataFrame(pFrame) && Frame_Order(pFrame)) == 1) { RTLLIB_DEBUG(RTLLIB_DL_HT, "HT CONTROL FILED " "EXIST!!\n"); return true; } } return false; } static void HTSetConnectBwModeCallback(struct rtllib_device *ieee) { struct rt_hi_throughput *pHTInfo = ieee->pHTInfo; RTLLIB_DEBUG(RTLLIB_DL_HT, "======>%s()\n", __func__); if (pHTInfo->bCurBW40MHz) { if (pHTInfo->CurSTAExtChnlOffset == HT_EXTCHNL_OFFSET_UPPER) ieee->set_chan(ieee->dev, ieee->current_network.channel + 2); else if (pHTInfo->CurSTAExtChnlOffset == HT_EXTCHNL_OFFSET_LOWER) ieee->set_chan(ieee->dev, ieee->current_network.channel - 2); else ieee->set_chan(ieee->dev, ieee->current_network.channel); ieee->SetBWModeHandler(ieee->dev, HT_CHANNEL_WIDTH_20_40, pHTInfo->CurSTAExtChnlOffset); } else { ieee->set_chan(ieee->dev, ieee->current_network.channel); ieee->SetBWModeHandler(ieee->dev, HT_CHANNEL_WIDTH_20, HT_EXTCHNL_OFFSET_NO_EXT); } pHTInfo->bSwBwInProgress = false; } void HTSetConnectBwMode(struct rtllib_device *ieee, enum ht_channel_width Bandwidth, enum ht_extchnl_offset Offset) { struct rt_hi_throughput *pHTInfo = ieee->pHTInfo; if (pHTInfo->bRegBW40MHz == false) return; if (ieee->GetHalfNmodeSupportByAPsHandler(ieee->dev)) Bandwidth = HT_CHANNEL_WIDTH_20; if (pHTInfo->bSwBwInProgress) { printk(KERN_INFO "%s: bSwBwInProgress!!\n", __func__); return; } if (Bandwidth == HT_CHANNEL_WIDTH_20_40) { if (ieee->current_network.channel < 2 && Offset == HT_EXTCHNL_OFFSET_LOWER) Offset = HT_EXTCHNL_OFFSET_NO_EXT; if (Offset == HT_EXTCHNL_OFFSET_UPPER || Offset == HT_EXTCHNL_OFFSET_LOWER) { pHTInfo->bCurBW40MHz = true; pHTInfo->CurSTAExtChnlOffset = Offset; } else { pHTInfo->bCurBW40MHz = false; pHTInfo->CurSTAExtChnlOffset = HT_EXTCHNL_OFFSET_NO_EXT; } } else { pHTInfo->bCurBW40MHz = false; pHTInfo->CurSTAExtChnlOffset = HT_EXTCHNL_OFFSET_NO_EXT; } printk(KERN_INFO "%s():pHTInfo->bCurBW40MHz:%x\n", __func__, pHTInfo->bCurBW40MHz); pHTInfo->bSwBwInProgress = true; HTSetConnectBwModeCallback(ieee); }
gpl-2.0
flashalot/android_kernel_samsung_milletwifi
drivers/mtd/ubi/upd.c
9021
13025
/* * Copyright (c) International Business Machines Corp., 2006 * Copyright (c) Nokia Corporation, 2006 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See * the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * Author: Artem Bityutskiy (Битюцкий Артём) * * Jan 2007: Alexander Schmidt, hacked per-volume update. */ /* * This file contains implementation of the volume update and atomic LEB change * functionality. * * The update operation is based on the per-volume update marker which is * stored in the volume table. The update marker is set before the update * starts, and removed after the update has been finished. So if the update was * interrupted by an unclean re-boot or due to some other reasons, the update * marker stays on the flash media and UBI finds it when it attaches the MTD * device next time. If the update marker is set for a volume, the volume is * treated as damaged and most I/O operations are prohibited. Only a new update * operation is allowed. * * Note, in general it is possible to implement the update operation as a * transaction with a roll-back capability. */ #include <linux/err.h> #include <linux/uaccess.h> #include <linux/math64.h> #include "ubi.h" /** * set_update_marker - set update marker. * @ubi: UBI device description object * @vol: volume description object * * This function sets the update marker flag for volume @vol. Returns zero * in case of success and a negative error code in case of failure. */ static int set_update_marker(struct ubi_device *ubi, struct ubi_volume *vol) { int err; struct ubi_vtbl_record vtbl_rec; dbg_gen("set update marker for volume %d", vol->vol_id); if (vol->upd_marker) { ubi_assert(ubi->vtbl[vol->vol_id].upd_marker); dbg_gen("already set"); return 0; } memcpy(&vtbl_rec, &ubi->vtbl[vol->vol_id], sizeof(struct ubi_vtbl_record)); vtbl_rec.upd_marker = 1; mutex_lock(&ubi->device_mutex); err = ubi_change_vtbl_record(ubi, vol->vol_id, &vtbl_rec); vol->upd_marker = 1; mutex_unlock(&ubi->device_mutex); return err; } /** * clear_update_marker - clear update marker. * @ubi: UBI device description object * @vol: volume description object * @bytes: new data size in bytes * * This function clears the update marker for volume @vol, sets new volume * data size and clears the "corrupted" flag (static volumes only). Returns * zero in case of success and a negative error code in case of failure. */ static int clear_update_marker(struct ubi_device *ubi, struct ubi_volume *vol, long long bytes) { int err; struct ubi_vtbl_record vtbl_rec; dbg_gen("clear update marker for volume %d", vol->vol_id); memcpy(&vtbl_rec, &ubi->vtbl[vol->vol_id], sizeof(struct ubi_vtbl_record)); ubi_assert(vol->upd_marker && vtbl_rec.upd_marker); vtbl_rec.upd_marker = 0; if (vol->vol_type == UBI_STATIC_VOLUME) { vol->corrupted = 0; vol->used_bytes = bytes; vol->used_ebs = div_u64_rem(bytes, vol->usable_leb_size, &vol->last_eb_bytes); if (vol->last_eb_bytes) vol->used_ebs += 1; else vol->last_eb_bytes = vol->usable_leb_size; } mutex_lock(&ubi->device_mutex); err = ubi_change_vtbl_record(ubi, vol->vol_id, &vtbl_rec); vol->upd_marker = 0; mutex_unlock(&ubi->device_mutex); return err; } /** * ubi_start_update - start volume update. * @ubi: UBI device description object * @vol: volume description object * @bytes: update bytes * * This function starts volume update operation. If @bytes is zero, the volume * is just wiped out. Returns zero in case of success and a negative error code * in case of failure. */ int ubi_start_update(struct ubi_device *ubi, struct ubi_volume *vol, long long bytes) { int i, err; dbg_gen("start update of volume %d, %llu bytes", vol->vol_id, bytes); ubi_assert(!vol->updating && !vol->changing_leb); vol->updating = 1; err = set_update_marker(ubi, vol); if (err) return err; /* Before updating - wipe out the volume */ for (i = 0; i < vol->reserved_pebs; i++) { err = ubi_eba_unmap_leb(ubi, vol, i); if (err) return err; } if (bytes == 0) { err = ubi_wl_flush(ubi); if (err) return err; err = clear_update_marker(ubi, vol, 0); if (err) return err; vol->updating = 0; return 0; } vol->upd_buf = vmalloc(ubi->leb_size); if (!vol->upd_buf) return -ENOMEM; vol->upd_ebs = div_u64(bytes + vol->usable_leb_size - 1, vol->usable_leb_size); vol->upd_bytes = bytes; vol->upd_received = 0; return 0; } /** * ubi_start_leb_change - start atomic LEB change. * @ubi: UBI device description object * @vol: volume description object * @req: operation request * * This function starts atomic LEB change operation. Returns zero in case of * success and a negative error code in case of failure. */ int ubi_start_leb_change(struct ubi_device *ubi, struct ubi_volume *vol, const struct ubi_leb_change_req *req) { ubi_assert(!vol->updating && !vol->changing_leb); dbg_gen("start changing LEB %d:%d, %u bytes", vol->vol_id, req->lnum, req->bytes); if (req->bytes == 0) return ubi_eba_atomic_leb_change(ubi, vol, req->lnum, NULL, 0, req->dtype); vol->upd_bytes = req->bytes; vol->upd_received = 0; vol->changing_leb = 1; vol->ch_lnum = req->lnum; vol->ch_dtype = req->dtype; vol->upd_buf = vmalloc(req->bytes); if (!vol->upd_buf) return -ENOMEM; return 0; } /** * write_leb - write update data. * @ubi: UBI device description object * @vol: volume description object * @lnum: logical eraseblock number * @buf: data to write * @len: data size * @used_ebs: how many logical eraseblocks will this volume contain (static * volumes only) * * This function writes update data to corresponding logical eraseblock. In * case of dynamic volume, this function checks if the data contains 0xFF bytes * at the end. If yes, the 0xFF bytes are cut and not written. So if the whole * buffer contains only 0xFF bytes, the LEB is left unmapped. * * The reason why we skip the trailing 0xFF bytes in case of dynamic volume is * that we want to make sure that more data may be appended to the logical * eraseblock in future. Indeed, writing 0xFF bytes may have side effects and * this PEB won't be writable anymore. So if one writes the file-system image * to the UBI volume where 0xFFs mean free space - UBI makes sure this free * space is writable after the update. * * We do not do this for static volumes because they are read-only. But this * also cannot be done because we have to store per-LEB CRC and the correct * data length. * * This function returns zero in case of success and a negative error code in * case of failure. */ static int write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum, void *buf, int len, int used_ebs) { int err; if (vol->vol_type == UBI_DYNAMIC_VOLUME) { int l = ALIGN(len, ubi->min_io_size); memset(buf + len, 0xFF, l - len); len = ubi_calc_data_len(ubi, buf, l); if (len == 0) { dbg_gen("all %d bytes contain 0xFF - skip", len); return 0; } err = ubi_eba_write_leb(ubi, vol, lnum, buf, 0, len, UBI_UNKNOWN); } else { /* * When writing static volume, and this is the last logical * eraseblock, the length (@len) does not have to be aligned to * the minimal flash I/O unit. The 'ubi_eba_write_leb_st()' * function accepts exact (unaligned) length and stores it in * the VID header. And it takes care of proper alignment by * padding the buffer. Here we just make sure the padding will * contain zeros, not random trash. */ memset(buf + len, 0, vol->usable_leb_size - len); err = ubi_eba_write_leb_st(ubi, vol, lnum, buf, len, UBI_UNKNOWN, used_ebs); } return err; } /** * ubi_more_update_data - write more update data. * @ubi: UBI device description object * @vol: volume description object * @buf: write data (user-space memory buffer) * @count: how much bytes to write * * This function writes more data to the volume which is being updated. It may * be called arbitrary number of times until all the update data arriveis. This * function returns %0 in case of success, number of bytes written during the * last call if the whole volume update has been successfully finished, and a * negative error code in case of failure. */ int ubi_more_update_data(struct ubi_device *ubi, struct ubi_volume *vol, const void __user *buf, int count) { int lnum, offs, err = 0, len, to_write = count; dbg_gen("write %d of %lld bytes, %lld already passed", count, vol->upd_bytes, vol->upd_received); if (ubi->ro_mode) return -EROFS; lnum = div_u64_rem(vol->upd_received, vol->usable_leb_size, &offs); if (vol->upd_received + count > vol->upd_bytes) to_write = count = vol->upd_bytes - vol->upd_received; /* * When updating volumes, we accumulate whole logical eraseblock of * data and write it at once. */ if (offs != 0) { /* * This is a write to the middle of the logical eraseblock. We * copy the data to our update buffer and wait for more data or * flush it if the whole eraseblock is written or the update * is finished. */ len = vol->usable_leb_size - offs; if (len > count) len = count; err = copy_from_user(vol->upd_buf + offs, buf, len); if (err) return -EFAULT; if (offs + len == vol->usable_leb_size || vol->upd_received + len == vol->upd_bytes) { int flush_len = offs + len; /* * OK, we gathered either the whole eraseblock or this * is the last chunk, it's time to flush the buffer. */ ubi_assert(flush_len <= vol->usable_leb_size); err = write_leb(ubi, vol, lnum, vol->upd_buf, flush_len, vol->upd_ebs); if (err) return err; } vol->upd_received += len; count -= len; buf += len; lnum += 1; } /* * If we've got more to write, let's continue. At this point we know we * are starting from the beginning of an eraseblock. */ while (count) { if (count > vol->usable_leb_size) len = vol->usable_leb_size; else len = count; err = copy_from_user(vol->upd_buf, buf, len); if (err) return -EFAULT; if (len == vol->usable_leb_size || vol->upd_received + len == vol->upd_bytes) { err = write_leb(ubi, vol, lnum, vol->upd_buf, len, vol->upd_ebs); if (err) break; } vol->upd_received += len; count -= len; lnum += 1; buf += len; } ubi_assert(vol->upd_received <= vol->upd_bytes); if (vol->upd_received == vol->upd_bytes) { err = ubi_wl_flush(ubi); if (err) return err; /* The update is finished, clear the update marker */ err = clear_update_marker(ubi, vol, vol->upd_bytes); if (err) return err; vol->updating = 0; err = to_write; vfree(vol->upd_buf); } return err; } /** * ubi_more_leb_change_data - accept more data for atomic LEB change. * @ubi: UBI device description object * @vol: volume description object * @buf: write data (user-space memory buffer) * @count: how much bytes to write * * This function accepts more data to the volume which is being under the * "atomic LEB change" operation. It may be called arbitrary number of times * until all data arrives. This function returns %0 in case of success, number * of bytes written during the last call if the whole "atomic LEB change" * operation has been successfully finished, and a negative error code in case * of failure. */ int ubi_more_leb_change_data(struct ubi_device *ubi, struct ubi_volume *vol, const void __user *buf, int count) { int err; dbg_gen("write %d of %lld bytes, %lld already passed", count, vol->upd_bytes, vol->upd_received); if (ubi->ro_mode) return -EROFS; if (vol->upd_received + count > vol->upd_bytes) count = vol->upd_bytes - vol->upd_received; err = copy_from_user(vol->upd_buf + vol->upd_received, buf, count); if (err) return -EFAULT; vol->upd_received += count; if (vol->upd_received == vol->upd_bytes) { int len = ALIGN((int)vol->upd_bytes, ubi->min_io_size); memset(vol->upd_buf + vol->upd_bytes, 0xFF, len - vol->upd_bytes); len = ubi_calc_data_len(ubi, vol->upd_buf, len); err = ubi_eba_atomic_leb_change(ubi, vol, vol->ch_lnum, vol->upd_buf, len, UBI_UNKNOWN); if (err) return err; } ubi_assert(vol->upd_received <= vol->upd_bytes); if (vol->upd_received == vol->upd_bytes) { vol->changing_leb = 0; err = count; vfree(vol->upd_buf); } return err; }
gpl-2.0
moddingg33k/deprecated_android_kernel_synopsis
net/ipv6/netfilter/ip6table_security.c
9277
2853
/* * "security" table for IPv6 * * This is for use by Mandatory Access Control (MAC) security models, * which need to be able to manage security policy in separate context * to DAC. * * Based on iptable_mangle.c * * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling * Copyright (C) 2000-2004 Netfilter Core Team <coreteam <at> netfilter.org> * Copyright (C) 2008 Red Hat, Inc., James Morris <jmorris <at> redhat.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/netfilter_ipv6/ip6_tables.h> #include <linux/slab.h> MODULE_LICENSE("GPL"); MODULE_AUTHOR("James Morris <jmorris <at> redhat.com>"); MODULE_DESCRIPTION("ip6tables security table, for MAC rules"); #define SECURITY_VALID_HOOKS (1 << NF_INET_LOCAL_IN) | \ (1 << NF_INET_FORWARD) | \ (1 << NF_INET_LOCAL_OUT) static const struct xt_table security_table = { .name = "security", .valid_hooks = SECURITY_VALID_HOOKS, .me = THIS_MODULE, .af = NFPROTO_IPV6, .priority = NF_IP6_PRI_SECURITY, }; static unsigned int ip6table_security_hook(unsigned int hook, struct sk_buff *skb, const struct net_device *in, const struct net_device *out, int (*okfn)(struct sk_buff *)) { const struct net *net = dev_net((in != NULL) ? in : out); return ip6t_do_table(skb, hook, in, out, net->ipv6.ip6table_security); } static struct nf_hook_ops *sectbl_ops __read_mostly; static int __net_init ip6table_security_net_init(struct net *net) { struct ip6t_replace *repl; repl = ip6t_alloc_initial_table(&security_table); if (repl == NULL) return -ENOMEM; net->ipv6.ip6table_security = ip6t_register_table(net, &security_table, repl); kfree(repl); if (IS_ERR(net->ipv6.ip6table_security)) return PTR_ERR(net->ipv6.ip6table_security); return 0; } static void __net_exit ip6table_security_net_exit(struct net *net) { ip6t_unregister_table(net, net->ipv6.ip6table_security); } static struct pernet_operations ip6table_security_net_ops = { .init = ip6table_security_net_init, .exit = ip6table_security_net_exit, }; static int __init ip6table_security_init(void) { int ret; ret = register_pernet_subsys(&ip6table_security_net_ops); if (ret < 0) return ret; sectbl_ops = xt_hook_link(&security_table, ip6table_security_hook); if (IS_ERR(sectbl_ops)) { ret = PTR_ERR(sectbl_ops); goto cleanup_table; } return ret; cleanup_table: unregister_pernet_subsys(&ip6table_security_net_ops); return ret; } static void __exit ip6table_security_fini(void) { xt_hook_unlink(&security_table, sectbl_ops); unregister_pernet_subsys(&ip6table_security_net_ops); } module_init(ip6table_security_init); module_exit(ip6table_security_fini);
gpl-2.0
varigit/VAR-SOM-AM33-Kernel-3-14
drivers/net/wireless/b43legacy/rfkill.c
9277
2661
/* Broadcom B43 wireless driver RFKILL support Copyright (c) 2007 Michael Buesch <m@bues.ch> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; see the file COPYING. If not, write to the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor, Boston, MA 02110-1301, USA. */ #include "radio.h" #include "b43legacy.h" /* Returns TRUE, if the radio is enabled in hardware. */ bool b43legacy_is_hw_radio_enabled(struct b43legacy_wldev *dev) { if (dev->dev->id.revision >= 3) { if (!(b43legacy_read32(dev, B43legacy_MMIO_RADIO_HWENABLED_HI) & B43legacy_MMIO_RADIO_HWENABLED_HI_MASK)) return 1; } else { /* To prevent CPU fault on PPC, do not read a register * unless the interface is started; however, on resume * for hibernation, this routine is entered early. When * that happens, unconditionally return TRUE. */ if (b43legacy_status(dev) < B43legacy_STAT_STARTED) return 1; if (b43legacy_read16(dev, B43legacy_MMIO_RADIO_HWENABLED_LO) & B43legacy_MMIO_RADIO_HWENABLED_LO_MASK) return 1; } return 0; } /* The poll callback for the hardware button. */ void b43legacy_rfkill_poll(struct ieee80211_hw *hw) { struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw); struct b43legacy_wldev *dev = wl->current_dev; struct ssb_bus *bus = dev->dev->bus; bool enabled; bool brought_up = false; mutex_lock(&wl->mutex); if (unlikely(b43legacy_status(dev) < B43legacy_STAT_INITIALIZED)) { if (ssb_bus_powerup(bus, 0)) { mutex_unlock(&wl->mutex); return; } ssb_device_enable(dev->dev, 0); brought_up = true; } enabled = b43legacy_is_hw_radio_enabled(dev); if (unlikely(enabled != dev->radio_hw_enable)) { dev->radio_hw_enable = enabled; b43legacyinfo(wl, "Radio hardware status changed to %s\n", enabled ? "ENABLED" : "DISABLED"); wiphy_rfkill_set_hw_state(hw->wiphy, !enabled); if (enabled != dev->phy.radio_on) { if (enabled) b43legacy_radio_turn_on(dev); else b43legacy_radio_turn_off(dev, 0); } } if (brought_up) { ssb_device_disable(dev->dev, 0); ssb_bus_may_powerdown(bus); } mutex_unlock(&wl->mutex); }
gpl-2.0
ozone999/at91sam
arch/mips/boot/elf2ecoff.c
9277
17012
/* * Copyright (c) 1995 * Ted Lemon (hereinafter referred to as the author) * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* elf2ecoff.c This program converts an elf executable to an ECOFF executable. No symbol table is retained. This is useful primarily in building net-bootable kernels for machines (e.g., DECstation and Alpha) which only support the ECOFF object file format. */ #include <stdio.h> #include <string.h> #include <errno.h> #include <sys/types.h> #include <fcntl.h> #include <unistd.h> #include <elf.h> #include <limits.h> #include <netinet/in.h> #include <stdlib.h> #include "ecoff.h" /* * Some extra ELF definitions */ #define PT_MIPS_REGINFO 0x70000000 /* Register usage information */ /* -------------------------------------------------------------------- */ struct sect { unsigned long vaddr; unsigned long len; }; int *symTypeTable; int must_convert_endian; int format_bigendian; static void copy(int out, int in, off_t offset, off_t size) { char ibuf[4096]; int remaining, cur, count; /* Go to the start of the ELF symbol table... */ if (lseek(in, offset, SEEK_SET) < 0) { perror("copy: lseek"); exit(1); } remaining = size; while (remaining) { cur = remaining; if (cur > sizeof ibuf) cur = sizeof ibuf; remaining -= cur; if ((count = read(in, ibuf, cur)) != cur) { fprintf(stderr, "copy: read: %s\n", count ? strerror(errno) : "premature end of file"); exit(1); } if ((count = write(out, ibuf, cur)) != cur) { perror("copy: write"); exit(1); } } } /* * Combine two segments, which must be contiguous. If pad is true, it's * okay for there to be padding between. */ static void combine(struct sect *base, struct sect *new, int pad) { if (!base->len) *base = *new; else if (new->len) { if (base->vaddr + base->len != new->vaddr) { if (pad) base->len = new->vaddr - base->vaddr; else { fprintf(stderr, "Non-contiguous data can't be converted.\n"); exit(1); } } base->len += new->len; } } static int phcmp(const void *v1, const void *v2) { const Elf32_Phdr *h1 = v1; const Elf32_Phdr *h2 = v2; if (h1->p_vaddr > h2->p_vaddr) return 1; else if (h1->p_vaddr < h2->p_vaddr) return -1; else return 0; } static char *saveRead(int file, off_t offset, off_t len, char *name) { char *tmp; int count; off_t off; if ((off = lseek(file, offset, SEEK_SET)) < 0) { fprintf(stderr, "%s: fseek: %s\n", name, strerror(errno)); exit(1); } if (!(tmp = (char *) malloc(len))) { fprintf(stderr, "%s: Can't allocate %ld bytes.\n", name, len); exit(1); } count = read(file, tmp, len); if (count != len) { fprintf(stderr, "%s: read: %s.\n", name, count ? strerror(errno) : "End of file reached"); exit(1); } return tmp; } #define swab16(x) \ ((unsigned short)( \ (((unsigned short)(x) & (unsigned short)0x00ffU) << 8) | \ (((unsigned short)(x) & (unsigned short)0xff00U) >> 8) )) #define swab32(x) \ ((unsigned int)( \ (((unsigned int)(x) & (unsigned int)0x000000ffUL) << 24) | \ (((unsigned int)(x) & (unsigned int)0x0000ff00UL) << 8) | \ (((unsigned int)(x) & (unsigned int)0x00ff0000UL) >> 8) | \ (((unsigned int)(x) & (unsigned int)0xff000000UL) >> 24) )) static void convert_elf_hdr(Elf32_Ehdr * e) { e->e_type = swab16(e->e_type); e->e_machine = swab16(e->e_machine); e->e_version = swab32(e->e_version); e->e_entry = swab32(e->e_entry); e->e_phoff = swab32(e->e_phoff); e->e_shoff = swab32(e->e_shoff); e->e_flags = swab32(e->e_flags); e->e_ehsize = swab16(e->e_ehsize); e->e_phentsize = swab16(e->e_phentsize); e->e_phnum = swab16(e->e_phnum); e->e_shentsize = swab16(e->e_shentsize); e->e_shnum = swab16(e->e_shnum); e->e_shstrndx = swab16(e->e_shstrndx); } static void convert_elf_phdrs(Elf32_Phdr * p, int num) { int i; for (i = 0; i < num; i++, p++) { p->p_type = swab32(p->p_type); p->p_offset = swab32(p->p_offset); p->p_vaddr = swab32(p->p_vaddr); p->p_paddr = swab32(p->p_paddr); p->p_filesz = swab32(p->p_filesz); p->p_memsz = swab32(p->p_memsz); p->p_flags = swab32(p->p_flags); p->p_align = swab32(p->p_align); } } static void convert_elf_shdrs(Elf32_Shdr * s, int num) { int i; for (i = 0; i < num; i++, s++) { s->sh_name = swab32(s->sh_name); s->sh_type = swab32(s->sh_type); s->sh_flags = swab32(s->sh_flags); s->sh_addr = swab32(s->sh_addr); s->sh_offset = swab32(s->sh_offset); s->sh_size = swab32(s->sh_size); s->sh_link = swab32(s->sh_link); s->sh_info = swab32(s->sh_info); s->sh_addralign = swab32(s->sh_addralign); s->sh_entsize = swab32(s->sh_entsize); } } static void convert_ecoff_filehdr(struct filehdr *f) { f->f_magic = swab16(f->f_magic); f->f_nscns = swab16(f->f_nscns); f->f_timdat = swab32(f->f_timdat); f->f_symptr = swab32(f->f_symptr); f->f_nsyms = swab32(f->f_nsyms); f->f_opthdr = swab16(f->f_opthdr); f->f_flags = swab16(f->f_flags); } static void convert_ecoff_aouthdr(struct aouthdr *a) { a->magic = swab16(a->magic); a->vstamp = swab16(a->vstamp); a->tsize = swab32(a->tsize); a->dsize = swab32(a->dsize); a->bsize = swab32(a->bsize); a->entry = swab32(a->entry); a->text_start = swab32(a->text_start); a->data_start = swab32(a->data_start); a->bss_start = swab32(a->bss_start); a->gprmask = swab32(a->gprmask); a->cprmask[0] = swab32(a->cprmask[0]); a->cprmask[1] = swab32(a->cprmask[1]); a->cprmask[2] = swab32(a->cprmask[2]); a->cprmask[3] = swab32(a->cprmask[3]); a->gp_value = swab32(a->gp_value); } static void convert_ecoff_esecs(struct scnhdr *s, int num) { int i; for (i = 0; i < num; i++, s++) { s->s_paddr = swab32(s->s_paddr); s->s_vaddr = swab32(s->s_vaddr); s->s_size = swab32(s->s_size); s->s_scnptr = swab32(s->s_scnptr); s->s_relptr = swab32(s->s_relptr); s->s_lnnoptr = swab32(s->s_lnnoptr); s->s_nreloc = swab16(s->s_nreloc); s->s_nlnno = swab16(s->s_nlnno); s->s_flags = swab32(s->s_flags); } } int main(int argc, char *argv[]) { Elf32_Ehdr ex; Elf32_Phdr *ph; Elf32_Shdr *sh; char *shstrtab; int i, pad; struct sect text, data, bss; struct filehdr efh; struct aouthdr eah; struct scnhdr esecs[6]; int infile, outfile; unsigned long cur_vma = ULONG_MAX; int addflag = 0; int nosecs; text.len = data.len = bss.len = 0; text.vaddr = data.vaddr = bss.vaddr = 0; /* Check args... */ if (argc < 3 || argc > 4) { usage: fprintf(stderr, "usage: elf2ecoff <elf executable> <ecoff executable> [-a]\n"); exit(1); } if (argc == 4) { if (strcmp(argv[3], "-a")) goto usage; addflag = 1; } /* Try the input file... */ if ((infile = open(argv[1], O_RDONLY)) < 0) { fprintf(stderr, "Can't open %s for read: %s\n", argv[1], strerror(errno)); exit(1); } /* Read the header, which is at the beginning of the file... */ i = read(infile, &ex, sizeof ex); if (i != sizeof ex) { fprintf(stderr, "ex: %s: %s.\n", argv[1], i ? strerror(errno) : "End of file reached"); exit(1); } if (ex.e_ident[EI_DATA] == ELFDATA2MSB) format_bigendian = 1; if (ntohs(0xaa55) == 0xaa55) { if (!format_bigendian) must_convert_endian = 1; } else { if (format_bigendian) must_convert_endian = 1; } if (must_convert_endian) convert_elf_hdr(&ex); /* Read the program headers... */ ph = (Elf32_Phdr *) saveRead(infile, ex.e_phoff, ex.e_phnum * sizeof(Elf32_Phdr), "ph"); if (must_convert_endian) convert_elf_phdrs(ph, ex.e_phnum); /* Read the section headers... */ sh = (Elf32_Shdr *) saveRead(infile, ex.e_shoff, ex.e_shnum * sizeof(Elf32_Shdr), "sh"); if (must_convert_endian) convert_elf_shdrs(sh, ex.e_shnum); /* Read in the section string table. */ shstrtab = saveRead(infile, sh[ex.e_shstrndx].sh_offset, sh[ex.e_shstrndx].sh_size, "shstrtab"); /* Figure out if we can cram the program header into an ECOFF header... Basically, we can't handle anything but loadable segments, but we can ignore some kinds of segments. We can't handle holes in the address space. Segments may be out of order, so we sort them first. */ qsort(ph, ex.e_phnum, sizeof(Elf32_Phdr), phcmp); for (i = 0; i < ex.e_phnum; i++) { /* Section types we can ignore... */ if (ph[i].p_type == PT_NULL || ph[i].p_type == PT_NOTE || ph[i].p_type == PT_PHDR || ph[i].p_type == PT_MIPS_REGINFO) continue; /* Section types we can't handle... */ else if (ph[i].p_type != PT_LOAD) { fprintf(stderr, "Program header %d type %d can't be converted.\n", ex.e_phnum, ph[i].p_type); exit(1); } /* Writable (data) segment? */ if (ph[i].p_flags & PF_W) { struct sect ndata, nbss; ndata.vaddr = ph[i].p_vaddr; ndata.len = ph[i].p_filesz; nbss.vaddr = ph[i].p_vaddr + ph[i].p_filesz; nbss.len = ph[i].p_memsz - ph[i].p_filesz; combine(&data, &ndata, 0); combine(&bss, &nbss, 1); } else { struct sect ntxt; ntxt.vaddr = ph[i].p_vaddr; ntxt.len = ph[i].p_filesz; combine(&text, &ntxt, 0); } /* Remember the lowest segment start address. */ if (ph[i].p_vaddr < cur_vma) cur_vma = ph[i].p_vaddr; } /* Sections must be in order to be converted... */ if (text.vaddr > data.vaddr || data.vaddr > bss.vaddr || text.vaddr + text.len > data.vaddr || data.vaddr + data.len > bss.vaddr) { fprintf(stderr, "Sections ordering prevents a.out conversion.\n"); exit(1); } /* If there's a data section but no text section, then the loader combined everything into one section. That needs to be the text section, so just make the data section zero length following text. */ if (data.len && !text.len) { text = data; data.vaddr = text.vaddr + text.len; data.len = 0; } /* If there is a gap between text and data, we'll fill it when we copy the data, so update the length of the text segment as represented in a.out to reflect that, since a.out doesn't allow gaps in the program address space. */ if (text.vaddr + text.len < data.vaddr) text.len = data.vaddr - text.vaddr; /* We now have enough information to cons up an a.out header... */ eah.magic = OMAGIC; eah.vstamp = 200; eah.tsize = text.len; eah.dsize = data.len; eah.bsize = bss.len; eah.entry = ex.e_entry; eah.text_start = text.vaddr; eah.data_start = data.vaddr; eah.bss_start = bss.vaddr; eah.gprmask = 0xf3fffffe; memset(&eah.cprmask, '\0', sizeof eah.cprmask); eah.gp_value = 0; /* unused. */ if (format_bigendian) efh.f_magic = MIPSEBMAGIC; else efh.f_magic = MIPSELMAGIC; if (addflag) nosecs = 6; else nosecs = 3; efh.f_nscns = nosecs; efh.f_timdat = 0; /* bogus */ efh.f_symptr = 0; efh.f_nsyms = 0; efh.f_opthdr = sizeof eah; efh.f_flags = 0x100f; /* Stripped, not sharable. */ memset(esecs, 0, sizeof esecs); strcpy(esecs[0].s_name, ".text"); strcpy(esecs[1].s_name, ".data"); strcpy(esecs[2].s_name, ".bss"); if (addflag) { strcpy(esecs[3].s_name, ".rdata"); strcpy(esecs[4].s_name, ".sdata"); strcpy(esecs[5].s_name, ".sbss"); } esecs[0].s_paddr = esecs[0].s_vaddr = eah.text_start; esecs[1].s_paddr = esecs[1].s_vaddr = eah.data_start; esecs[2].s_paddr = esecs[2].s_vaddr = eah.bss_start; if (addflag) { esecs[3].s_paddr = esecs[3].s_vaddr = 0; esecs[4].s_paddr = esecs[4].s_vaddr = 0; esecs[5].s_paddr = esecs[5].s_vaddr = 0; } esecs[0].s_size = eah.tsize; esecs[1].s_size = eah.dsize; esecs[2].s_size = eah.bsize; if (addflag) { esecs[3].s_size = 0; esecs[4].s_size = 0; esecs[5].s_size = 0; } esecs[0].s_scnptr = N_TXTOFF(efh, eah); esecs[1].s_scnptr = N_DATOFF(efh, eah); #define ECOFF_SEGMENT_ALIGNMENT(a) 0x10 #define ECOFF_ROUND(s, a) (((s)+(a)-1)&~((a)-1)) esecs[2].s_scnptr = esecs[1].s_scnptr + ECOFF_ROUND(esecs[1].s_size, ECOFF_SEGMENT_ALIGNMENT(&eah)); if (addflag) { esecs[3].s_scnptr = 0; esecs[4].s_scnptr = 0; esecs[5].s_scnptr = 0; } esecs[0].s_relptr = esecs[1].s_relptr = esecs[2].s_relptr = 0; esecs[0].s_lnnoptr = esecs[1].s_lnnoptr = esecs[2].s_lnnoptr = 0; esecs[0].s_nreloc = esecs[1].s_nreloc = esecs[2].s_nreloc = 0; esecs[0].s_nlnno = esecs[1].s_nlnno = esecs[2].s_nlnno = 0; if (addflag) { esecs[3].s_relptr = esecs[4].s_relptr = esecs[5].s_relptr = 0; esecs[3].s_lnnoptr = esecs[4].s_lnnoptr = esecs[5].s_lnnoptr = 0; esecs[3].s_nreloc = esecs[4].s_nreloc = esecs[5].s_nreloc = 0; esecs[3].s_nlnno = esecs[4].s_nlnno = esecs[5].s_nlnno = 0; } esecs[0].s_flags = 0x20; esecs[1].s_flags = 0x40; esecs[2].s_flags = 0x82; if (addflag) { esecs[3].s_flags = 0x100; esecs[4].s_flags = 0x200; esecs[5].s_flags = 0x400; } /* Make the output file... */ if ((outfile = open(argv[2], O_WRONLY | O_CREAT, 0777)) < 0) { fprintf(stderr, "Unable to create %s: %s\n", argv[2], strerror(errno)); exit(1); } if (must_convert_endian) convert_ecoff_filehdr(&efh); /* Write the headers... */ i = write(outfile, &efh, sizeof efh); if (i != sizeof efh) { perror("efh: write"); exit(1); for (i = 0; i < nosecs; i++) { printf ("Section %d: %s phys %lx size %lx file offset %lx\n", i, esecs[i].s_name, esecs[i].s_paddr, esecs[i].s_size, esecs[i].s_scnptr); } } fprintf(stderr, "wrote %d byte file header.\n", i); if (must_convert_endian) convert_ecoff_aouthdr(&eah); i = write(outfile, &eah, sizeof eah); if (i != sizeof eah) { perror("eah: write"); exit(1); } fprintf(stderr, "wrote %d byte a.out header.\n", i); if (must_convert_endian) convert_ecoff_esecs(&esecs[0], nosecs); i = write(outfile, &esecs, nosecs * sizeof(struct scnhdr)); if (i != nosecs * sizeof(struct scnhdr)) { perror("esecs: write"); exit(1); } fprintf(stderr, "wrote %d bytes of section headers.\n", i); pad = (sizeof(efh) + sizeof(eah) + nosecs * sizeof(struct scnhdr)) & 15; if (pad) { pad = 16 - pad; i = write(outfile, "\0\0\0\0\0\0\0\0\0\0\0\0\0\0", pad); if (i < 0) { perror("ipad: write"); exit(1); } fprintf(stderr, "wrote %d byte pad.\n", i); } /* * Copy the loadable sections. Zero-fill any gaps less than 64k; * complain about any zero-filling, and die if we're asked to zero-fill * more than 64k. */ for (i = 0; i < ex.e_phnum; i++) { /* Unprocessable sections were handled above, so just verify that the section can be loaded before copying. */ if (ph[i].p_type == PT_LOAD && ph[i].p_filesz) { if (cur_vma != ph[i].p_vaddr) { unsigned long gap = ph[i].p_vaddr - cur_vma; char obuf[1024]; if (gap > 65536) { fprintf(stderr, "Intersegment gap (%ld bytes) too large.\n", gap); exit(1); } fprintf(stderr, "Warning: %ld byte intersegment gap.\n", gap); memset(obuf, 0, sizeof obuf); while (gap) { int count = write(outfile, obuf, (gap > sizeof obuf ? sizeof obuf : gap)); if (count < 0) { fprintf(stderr, "Error writing gap: %s\n", strerror(errno)); exit(1); } gap -= count; } } fprintf(stderr, "writing %d bytes...\n", ph[i].p_filesz); copy(outfile, infile, ph[i].p_offset, ph[i].p_filesz); cur_vma = ph[i].p_vaddr + ph[i].p_filesz; } } /* * Write a page of padding for boot PROMS that read entire pages. * Without this, they may attempt to read past the end of the * data section, incur an error, and refuse to boot. */ { char obuf[4096]; memset(obuf, 0, sizeof obuf); if (write(outfile, obuf, sizeof(obuf)) != sizeof(obuf)) { fprintf(stderr, "Error writing PROM padding: %s\n", strerror(errno)); exit(1); } } /* Looks like we won... */ exit(0); }
gpl-2.0
SlimRoms/kernel_lge_geeb
arch/mips/txx9/rbtx4927/prom.c
9533
1725
/* * rbtx4927 specific prom routines * * Author: MontaVista Software, Inc. * source@mvista.com * * Copyright 2001-2002 MontaVista Software Inc. * * Copyright (C) 2004 MontaVista Software Inc. * Author: Manish Lachwani, mlachwani@mvista.com * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/init.h> #include <asm/bootinfo.h> #include <asm/txx9/generic.h> #include <asm/txx9/rbtx4927.h> void __init rbtx4927_prom_init(void) { add_memory_region(0, tx4927_get_mem_size(), BOOT_MEM_RAM); txx9_sio_putchar_init(TX4927_SIO_REG(0) & 0xfffffffffULL); }
gpl-2.0
thypon/bowser-kernel
arch/x86/kernel/doublefault_32.c
9789
1695
#include <linux/mm.h> #include <linux/sched.h> #include <linux/init.h> #include <linux/init_task.h> #include <linux/fs.h> #include <asm/uaccess.h> #include <asm/pgtable.h> #include <asm/processor.h> #include <asm/desc.h> #define DOUBLEFAULT_STACKSIZE (1024) static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE]; #define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE) #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM) static void doublefault_fn(void) { struct desc_ptr gdt_desc = {0, 0}; unsigned long gdt, tss; store_gdt(&gdt_desc); gdt = gdt_desc.address; printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size); if (ptr_ok(gdt)) { gdt += GDT_ENTRY_TSS << 3; tss = get_desc_base((struct desc_struct *)gdt); printk(KERN_EMERG "double fault, tss at %08lx\n", tss); if (ptr_ok(tss)) { struct x86_hw_tss *t = (struct x86_hw_tss *)tss; printk(KERN_EMERG "eip = %08lx, esp = %08lx\n", t->ip, t->sp); printk(KERN_EMERG "eax = %08lx, ebx = %08lx, ecx = %08lx, edx = %08lx\n", t->ax, t->bx, t->cx, t->dx); printk(KERN_EMERG "esi = %08lx, edi = %08lx\n", t->si, t->di); } } for (;;) cpu_relax(); } struct tss_struct doublefault_tss __cacheline_aligned = { .x86_tss = { .sp0 = STACK_START, .ss0 = __KERNEL_DS, .ldt = 0, .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, .ip = (unsigned long) doublefault_fn, /* 0x2 bit is always set */ .flags = X86_EFLAGS_SF | 0x2, .sp = STACK_START, .es = __USER_DS, .cs = __KERNEL_CS, .ss = __KERNEL_DS, .ds = __USER_DS, .fs = __KERNEL_PERCPU, .__cr3 = __pa_nodebug(swapper_pg_dir), } };
gpl-2.0
allwinner/linux-2.6.36
net/sctp/command.c
12349
2384
/* SCTP kernel implementation Copyright (C) 1999-2001 * Cisco, Motorola, and IBM * Copyright 2001 La Monte H.P. Yarroll * * This file is part of the SCTP kernel implementation * * These functions manipulate sctp command sequences. * * This SCTP implementation is free software; * you can redistribute it and/or modify it under the terms of * the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This SCTP implementation is distributed in the hope that it * will be useful, but WITHOUT ANY WARRANTY; without even the implied * ************************ * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * See the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with GNU CC; see the file COPYING. If not, write to * the Free Software Foundation, 59 Temple Place - Suite 330, * Boston, MA 02111-1307, USA. * * Please send any bug reports or fixes you make to the * email address(es): * lksctp developers <lksctp-developers@lists.sourceforge.net> * * Or submit a bug report through the following website: * http://www.sf.net/projects/lksctp * * Written or modified by: * La Monte H.P. Yarroll <piggy@acm.org> * Karl Knutson <karl@athena.chicago.il.us> * * Any bugs reported given to us we will try to fix... any fixes shared will * be incorporated into the next SCTP release. */ #include <linux/types.h> #include <net/sctp/sctp.h> #include <net/sctp/sm.h> /* Initialize a block of memory as a command sequence. */ int sctp_init_cmd_seq(sctp_cmd_seq_t *seq) { memset(seq, 0, sizeof(sctp_cmd_seq_t)); return 1; /* We always succeed. */ } /* Add a command to a sctp_cmd_seq_t. * Return 0 if the command sequence is full. */ void sctp_add_cmd_sf(sctp_cmd_seq_t *seq, sctp_verb_t verb, sctp_arg_t obj) { BUG_ON(seq->next_free_slot >= SCTP_MAX_NUM_COMMANDS); seq->cmds[seq->next_free_slot].verb = verb; seq->cmds[seq->next_free_slot++].obj = obj; } /* Return the next command structure in a sctp_cmd_seq. * Returns NULL at the end of the sequence. */ sctp_cmd_t *sctp_next_cmd(sctp_cmd_seq_t *seq) { sctp_cmd_t *retval = NULL; if (seq->next_cmd < seq->next_free_slot) retval = &seq->cmds[seq->next_cmd++]; return retval; }
gpl-2.0
akw28888/caf2
arch/mips/cobalt/lcd.c
13885
1549
/* * Registration of Cobalt LCD platform device. * * Copyright (C) 2008 Yoichi Yuasa <yuasa@linux-mips.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include <linux/errno.h> #include <linux/init.h> #include <linux/ioport.h> #include <linux/platform_device.h> static struct resource cobalt_lcd_resource __initdata = { .start = 0x1f000000, .end = 0x1f00001f, .flags = IORESOURCE_MEM, }; static __init int cobalt_lcd_add(void) { struct platform_device *pdev; int retval; pdev = platform_device_alloc("cobalt-lcd", -1); if (!pdev) return -ENOMEM; retval = platform_device_add_resources(pdev, &cobalt_lcd_resource, 1); if (retval) goto err_free_device; retval = platform_device_add(pdev); if (retval) goto err_free_device; return 0; err_free_device: platform_device_put(pdev); return retval; } device_initcall(cobalt_lcd_add);
gpl-2.0
evilwombat/linux-hero4
arch/mips/emma/markeins/led.c
13885
1510
/* * Copyright (C) NEC Electronics Corporation 2004-2006 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/string.h> #include <asm/emma/emma2rh.h> const unsigned long clear = 0x20202020; #define LED_BASE 0xb1400038 void markeins_led_clear(void) { emma2rh_out32(LED_BASE, clear); emma2rh_out32(LED_BASE + 4, clear); } void markeins_led(const char *str) { int i; int len = strlen(str); markeins_led_clear(); if (len > 8) len = 8; if (emma2rh_in32(0xb0000800) & (0x1 << 18)) for (i = 0; i < len; i++) emma2rh_out8(LED_BASE + i, str[i]); else for (i = 0; i < len; i++) emma2rh_out8(LED_BASE + (i & 4) + (3 - (i & 3)), str[i]); } void markeins_led_hex(u32 val) { char str[10]; sprintf(str, "%08x", val); markeins_led(str); }
gpl-2.0
StreamUtils/gst-plugins-bad
sys/vdpau/gstvdputils.c
62
2691
/* * gst-plugins-bad * Copyright (C) 2012 Edward Hervey <edward@collabora.com> * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Library General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Library General Public License for more details. * * You should have received a copy of the GNU Library General Public * License along with this library; if not, write to the * Free Software Foundation, Inc., 59 Temple Place - Suite 330, * Boston, MA 02111-1307, USA. */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include "gstvdputils.h" typedef struct { VdpChromaType chroma_type; VdpYCbCrFormat format; GstVideoFormat vformat; } GstVdpVideoBufferFormats; static const GstVdpVideoBufferFormats yuv_formats[] = { {VDP_CHROMA_TYPE_420, VDP_YCBCR_FORMAT_YV12, GST_VIDEO_FORMAT_YV12}, {VDP_CHROMA_TYPE_420, VDP_YCBCR_FORMAT_NV12, GST_VIDEO_FORMAT_NV12}, {VDP_CHROMA_TYPE_422, VDP_YCBCR_FORMAT_UYVY, GST_VIDEO_FORMAT_UYVY}, {VDP_CHROMA_TYPE_444, VDP_YCBCR_FORMAT_V8U8Y8A8, GST_VIDEO_FORMAT_AYUV}, /* { */ /* VDP_CHROMA_TYPE_444, */ /* VDP_YCBCR_FORMAT_Y8U8V8A8, */ /* GST_MAKE_FOURCC ('A', 'V', 'U', 'Y') */ /* }, */ {VDP_CHROMA_TYPE_422, VDP_YCBCR_FORMAT_YUYV, GST_VIDEO_FORMAT_YUY2} }; VdpYCbCrFormat gst_video_format_to_vdp_ycbcr (GstVideoFormat format) { int i; for (i = 0; i < G_N_ELEMENTS (yuv_formats); i++) { if (yuv_formats[i].vformat == format) return yuv_formats[i].format; } return -1; } VdpChromaType gst_video_info_to_vdp_chroma_type (GstVideoInfo * info) { const GstVideoFormatInfo *finfo = info->finfo; VdpChromaType ret = -1; /* Check subsampling of second plane (first is always non-subsampled) */ switch (GST_VIDEO_FORMAT_INFO_W_SUB (finfo, 1)) { case 0: /* Not subsampled in width for second plane */ if (GST_VIDEO_FORMAT_INFO_W_SUB (finfo, 2)) /* Not subsampled at all (4:4:4) */ ret = VDP_CHROMA_TYPE_444; break; case 1: /* Subsampled horizontally once */ if (GST_VIDEO_FORMAT_INFO_H_SUB (finfo, 2) == 0) /* Not subsampled vertically (4:2:2) */ ret = VDP_CHROMA_TYPE_422; else if (GST_VIDEO_FORMAT_INFO_H_SUB (finfo, 2) == 1) /* Subsampled vertically once (4:2:0) */ ret = VDP_CHROMA_TYPE_420; break; default: break; } return ret; }
gpl-2.0
ghbhaha/AK-OnePone
drivers/crypto/msm/qce50.c
62
160354
/* Qualcomm Crypto Engine driver. * * Copyright (c) 2012-2014, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #define pr_fmt(fmt) "QCE50: %s: " fmt, __func__ #include <linux/types.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/mod_devicetable.h> #include <linux/device.h> #include <linux/clk.h> #include <linux/err.h> #include <linux/dma-mapping.h> #include <linux/io.h> #include <linux/platform_device.h> #include <linux/spinlock.h> #include <linux/delay.h> #include <linux/crypto.h> #include <linux/qcedev.h> #include <linux/bitops.h> #include <crypto/hash.h> #include <crypto/sha.h> #include <mach/dma.h> #include <mach/clk.h> #include <mach/socinfo.h> #include <mach/qcrypto.h> #include "qce.h" #include "qce50.h" #include "qcryptohw_50.h" #include "qce_ota.h" #define CRYPTO_CONFIG_RESET 0xE001F #define QCE_MAX_NUM_DSCR 0x500 #define QCE_SECTOR_SIZE 0x200 static DEFINE_MUTEX(bam_register_lock); struct bam_registration_info { struct list_head qlist; uint32_t handle; uint32_t cnt; uint32_t bam_mem; void __iomem *bam_iobase; bool support_cmd_dscr; }; static LIST_HEAD(qce50_bam_list); /* * CE HW device structure. * Each engine has an instance of the structure. * Each engine can only handle one crypto operation at one time. It is up to * the sw above to ensure single threading of operation on an engine. */ struct qce_device { struct device *pdev; /* Handle to platform_device structure */ struct bam_registration_info *pbam; unsigned char *coh_vmem; /* Allocated coherent virtual memory */ dma_addr_t coh_pmem; /* Allocated coherent physical memory */ int memsize; /* Memory allocated */ uint32_t bam_mem; /* bam physical address, from DT */ uint32_t bam_mem_size; /* bam io size, from DT */ int is_shared; /* CE HW is shared */ bool support_cmd_dscr; bool support_hw_key; bool support_clk_mgmt_sus_res; void __iomem *iobase; /* Virtual io base of CE HW */ unsigned int phy_iobase; /* Physical io base of CE HW */ struct clk *ce_core_src_clk; /* Handle to CE src clk*/ struct clk *ce_core_clk; /* Handle to CE clk */ struct clk *ce_clk; /* Handle to CE clk */ struct clk *ce_bus_clk; /* Handle to CE AXI clk*/ qce_comp_func_ptr_t qce_cb; /* qce callback function pointer */ int assoc_nents; int ivsize; int authsize; int src_nents; int dst_nents; dma_addr_t phy_iv_in; unsigned char dec_iv[16]; int dir; void *areq; enum qce_cipher_mode_enum mode; struct qce_ce_cfg_reg_setting reg; struct ce_sps_data ce_sps; uint32_t engines_avail; dma_addr_t phy_ota_src; dma_addr_t phy_ota_dst; unsigned int ota_size; bool use_sw_aes_cbc_ecb_ctr_algo; bool use_sw_aead_algo; bool use_sw_aes_xts_algo; bool use_sw_ahash_algo; bool use_sw_hmac_algo; bool use_sw_aes_ccm_algo; }; /* Standard initialization vector for SHA-1, source: FIPS 180-2 */ static uint32_t _std_init_vector_sha1[] = { 0x67452301, 0xEFCDAB89, 0x98BADCFE, 0x10325476, 0xC3D2E1F0 }; /* Standard initialization vector for SHA-256, source: FIPS 180-2 */ static uint32_t _std_init_vector_sha256[] = { 0x6A09E667, 0xBB67AE85, 0x3C6EF372, 0xA54FF53A, 0x510E527F, 0x9B05688C, 0x1F83D9AB, 0x5BE0CD19 }; static void _byte_stream_to_net_words(uint32_t *iv, unsigned char *b, unsigned int len) { unsigned n; n = len / sizeof(uint32_t) ; for (; n > 0; n--) { *iv = ((*b << 24) & 0xff000000) | (((*(b+1)) << 16) & 0xff0000) | (((*(b+2)) << 8) & 0xff00) | (*(b+3) & 0xff); b += sizeof(uint32_t); iv++; } n = len % sizeof(uint32_t); if (n == 3) { *iv = ((*b << 24) & 0xff000000) | (((*(b+1)) << 16) & 0xff0000) | (((*(b+2)) << 8) & 0xff00) ; } else if (n == 2) { *iv = ((*b << 24) & 0xff000000) | (((*(b+1)) << 16) & 0xff0000) ; } else if (n == 1) { *iv = ((*b << 24) & 0xff000000) ; } } static void _byte_stream_swap_to_net_words(uint32_t *iv, unsigned char *b, unsigned int len) { unsigned i, j; unsigned char swap_iv[AES_IV_LENGTH]; memset(swap_iv, 0, AES_IV_LENGTH); for (i = (AES_IV_LENGTH-len), j = len-1; i < AES_IV_LENGTH; i++, j--) swap_iv[i] = b[j]; _byte_stream_to_net_words(iv, swap_iv, AES_IV_LENGTH); } static int count_sg(struct scatterlist *sg, int nbytes) { int i; for (i = 0; nbytes > 0; i++, sg = scatterwalk_sg_next(sg)) nbytes -= sg->length; return i; } static int qce_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction direction) { int i; for (i = 0; i < nents; ++i) { dma_map_sg(dev, sg, 1, direction); sg = scatterwalk_sg_next(sg); } return nents; } static int qce_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction direction) { int i; for (i = 0; i < nents; ++i) { dma_unmap_sg(dev, sg, 1, direction); sg = scatterwalk_sg_next(sg); } return nents; } static int _probe_ce_engine(struct qce_device *pce_dev) { unsigned int rev; unsigned int maj_rev, min_rev, step_rev; rev = readl_relaxed(pce_dev->iobase + CRYPTO_VERSION_REG); mb(); maj_rev = (rev & CRYPTO_CORE_MAJOR_REV_MASK) >> CRYPTO_CORE_MAJOR_REV; min_rev = (rev & CRYPTO_CORE_MINOR_REV_MASK) >> CRYPTO_CORE_MINOR_REV; step_rev = (rev & CRYPTO_CORE_STEP_REV_MASK) >> CRYPTO_CORE_STEP_REV; if (maj_rev != 0x05) { pr_err("Unknown Qualcomm crypto device at 0x%x, rev %d.%d.%d\n", pce_dev->phy_iobase, maj_rev, min_rev, step_rev); return -EIO; }; pce_dev->ce_sps.minor_version = min_rev; pce_dev->engines_avail = readl_relaxed(pce_dev->iobase + CRYPTO_ENGINES_AVAIL); dev_info(pce_dev->pdev, "Qualcomm Crypto %d.%d.%d device found @0x%x\n", maj_rev, min_rev, step_rev, pce_dev->phy_iobase); pce_dev->ce_sps.ce_burst_size = MAX_CE_BAM_BURST_SIZE; dev_info(pce_dev->pdev, "CE device = 0x%x\n, " "IO base, CE = 0x%x\n, " "Consumer (IN) PIPE %d, " "Producer (OUT) PIPE %d\n" "IO base BAM = 0x%x\n" "BAM IRQ %d\n" "Engines Availability = 0x%x\n", (uint32_t) pce_dev->ce_sps.ce_device, (uint32_t) pce_dev->iobase, pce_dev->ce_sps.dest_pipe_index, pce_dev->ce_sps.src_pipe_index, (uint32_t)pce_dev->ce_sps.bam_iobase, pce_dev->ce_sps.bam_irq, pce_dev->engines_avail); return 0; }; static int _ce_get_hash_cmdlistinfo(struct qce_device *pce_dev, struct qce_sha_req *sreq, struct qce_cmdlist_info **cmdplistinfo) { struct qce_cmdlistptr_ops *cmdlistptr = &pce_dev->ce_sps.cmdlistptr; switch (sreq->alg) { case QCE_HASH_SHA1: *cmdplistinfo = &cmdlistptr->auth_sha1; break; case QCE_HASH_SHA256: *cmdplistinfo = &cmdlistptr->auth_sha256; break; case QCE_HASH_SHA1_HMAC: *cmdplistinfo = &cmdlistptr->auth_sha1_hmac; break; case QCE_HASH_SHA256_HMAC: *cmdplistinfo = &cmdlistptr->auth_sha256_hmac; break; case QCE_HASH_AES_CMAC: if (sreq->authklen == AES128_KEY_SIZE) *cmdplistinfo = &cmdlistptr->auth_aes_128_cmac; else *cmdplistinfo = &cmdlistptr->auth_aes_256_cmac; break; default: break; } return 0; } static int _ce_setup_hash(struct qce_device *pce_dev, struct qce_sha_req *sreq, struct qce_cmdlist_info *cmdlistinfo) { uint32_t auth32[SHA256_DIGEST_SIZE / sizeof(uint32_t)]; uint32_t diglen; int i; uint32_t mackey32[SHA_HMAC_KEY_SIZE/sizeof(uint32_t)] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; bool sha1 = false; struct sps_command_element *pce = NULL; bool use_hw_key = false; bool use_pipe_key = false; uint32_t authk_size_in_word = sreq->authklen/sizeof(uint32_t); uint32_t auth_cfg; if ((sreq->alg == QCE_HASH_SHA1_HMAC) || (sreq->alg == QCE_HASH_SHA256_HMAC) || (sreq->alg == QCE_HASH_AES_CMAC)) { /* no more check for null key. use flag */ if ((sreq->flags & QCRYPTO_CTX_USE_HW_KEY) == QCRYPTO_CTX_USE_HW_KEY) use_hw_key = true; else if ((sreq->flags & QCRYPTO_CTX_USE_PIPE_KEY) == QCRYPTO_CTX_USE_PIPE_KEY) use_pipe_key = true; pce = cmdlistinfo->go_proc; if (use_hw_key == true) { pce->addr = (uint32_t)(CRYPTO_GOPROC_QC_KEY_REG + pce_dev->phy_iobase); } else { pce->addr = (uint32_t)(CRYPTO_GOPROC_REG + pce_dev->phy_iobase); pce = cmdlistinfo->auth_key; if (use_pipe_key == false) { _byte_stream_to_net_words(mackey32, sreq->authkey, sreq->authklen); for (i = 0; i < authk_size_in_word; i++, pce++) pce->data = mackey32[i]; } } } if (sreq->alg == QCE_HASH_AES_CMAC) goto go_proc; /* if not the last, the size has to be on the block boundary */ if (sreq->last_blk == 0 && (sreq->size % SHA256_BLOCK_SIZE)) return -EIO; switch (sreq->alg) { case QCE_HASH_SHA1: case QCE_HASH_SHA1_HMAC: diglen = SHA1_DIGEST_SIZE; sha1 = true; break; case QCE_HASH_SHA256: case QCE_HASH_SHA256_HMAC: diglen = SHA256_DIGEST_SIZE; break; default: return -EINVAL; } /* write 20/32 bytes, 5/8 words into auth_iv for SHA1/SHA256 */ if (sreq->first_blk) { if (sha1) { for (i = 0; i < 5; i++) auth32[i] = _std_init_vector_sha1[i]; } else { for (i = 0; i < 8; i++) auth32[i] = _std_init_vector_sha256[i]; } } else { _byte_stream_to_net_words(auth32, sreq->digest, diglen); } pce = cmdlistinfo->auth_iv; for (i = 0; i < 5; i++, pce++) pce->data = auth32[i]; if ((sreq->alg == QCE_HASH_SHA256) || (sreq->alg == QCE_HASH_SHA256_HMAC)) { for (i = 5; i < 8; i++, pce++) pce->data = auth32[i]; } /* write auth_bytecnt 0/1, start with 0 */ pce = cmdlistinfo->auth_bytecount; for (i = 0; i < 2; i++, pce++) pce->data = sreq->auth_data[i]; /* Set/reset last bit in CFG register */ pce = cmdlistinfo->auth_seg_cfg; auth_cfg = pce->data & ~(1 << CRYPTO_LAST | 1 << CRYPTO_FIRST | 1 << CRYPTO_USE_PIPE_KEY_AUTH | 1 << CRYPTO_USE_HW_KEY_AUTH); if (sreq->last_blk) auth_cfg |= 1 << CRYPTO_LAST; if (sreq->first_blk) auth_cfg |= 1 << CRYPTO_FIRST; if (use_hw_key) auth_cfg |= 1 << CRYPTO_USE_HW_KEY_AUTH; if (use_pipe_key) auth_cfg |= 1 << CRYPTO_USE_PIPE_KEY_AUTH; pce->data = auth_cfg; go_proc: /* write auth seg size */ pce = cmdlistinfo->auth_seg_size; pce->data = sreq->size; pce = cmdlistinfo->encr_seg_cfg; pce->data = 0; /* write auth seg size start*/ pce = cmdlistinfo->auth_seg_start; pce->data = 0; /* write seg size */ pce = cmdlistinfo->seg_size; pce->data = sreq->size; return 0; } static struct qce_cmdlist_info *_ce_get_aead_cmdlistinfo( struct qce_device *pce_dev, struct qce_req *creq) { switch (creq->alg) { case CIPHER_ALG_DES: switch (creq->mode) { case QCE_MODE_ECB: return &pce_dev->ce_sps. cmdlistptr.aead_hmac_sha1_ecb_des; break; case QCE_MODE_CBC: return &pce_dev->ce_sps. cmdlistptr.aead_hmac_sha1_cbc_des; break; default: return NULL; } break; case CIPHER_ALG_3DES: switch (creq->mode) { case QCE_MODE_ECB: return &pce_dev->ce_sps. cmdlistptr.aead_hmac_sha1_ecb_3des; break; case QCE_MODE_CBC: return &pce_dev->ce_sps. cmdlistptr.aead_hmac_sha1_cbc_3des; break; default: return NULL; } break; case CIPHER_ALG_AES: switch (creq->mode) { case QCE_MODE_ECB: if (creq->encklen == AES128_KEY_SIZE) return &pce_dev->ce_sps. cmdlistptr.aead_hmac_sha1_ecb_aes_128; else if (creq->encklen == AES256_KEY_SIZE) return &pce_dev->ce_sps. cmdlistptr.aead_hmac_sha1_ecb_aes_256; else return NULL; break; case QCE_MODE_CBC: if (creq->encklen == AES128_KEY_SIZE) return &pce_dev->ce_sps. cmdlistptr.aead_hmac_sha1_cbc_aes_128; else if (creq->encklen == AES256_KEY_SIZE) return &pce_dev->ce_sps. cmdlistptr.aead_hmac_sha1_cbc_aes_256; else return NULL; break; default: return NULL; } break; default: return NULL; } return NULL; } static int _ce_setup_aead(struct qce_device *pce_dev, struct qce_req *q_req, uint32_t totallen_in, uint32_t coffset, struct qce_cmdlist_info *cmdlistinfo) { int32_t authk_size_in_word = SHA_HMAC_KEY_SIZE/sizeof(uint32_t); int i; uint32_t mackey32[SHA_HMAC_KEY_SIZE/sizeof(uint32_t)] = {0}; struct sps_command_element *pce; uint32_t a_cfg; uint32_t enckey32[(MAX_CIPHER_KEY_SIZE*2)/sizeof(uint32_t)] = {0}; uint32_t enciv32[MAX_IV_LENGTH/sizeof(uint32_t)] = {0}; uint32_t enck_size_in_word = 0; uint32_t enciv_in_word; uint32_t key_size; uint32_t encr_cfg = 0; uint32_t ivsize = q_req->ivsize; key_size = q_req->encklen; enck_size_in_word = key_size/sizeof(uint32_t); switch (q_req->alg) { case CIPHER_ALG_DES: enciv_in_word = 2; break; case CIPHER_ALG_3DES: enciv_in_word = 2; break; case CIPHER_ALG_AES: if ((key_size != AES128_KEY_SIZE) && (key_size != AES256_KEY_SIZE)) return -EINVAL; enciv_in_word = 4; break; default: return -EINVAL; } switch (q_req->mode) { case QCE_MODE_ECB: case QCE_MODE_CBC: case QCE_MODE_CTR: pce_dev->mode = q_req->mode; break; default: return -EINVAL; } if (q_req->mode != QCE_MODE_ECB) { _byte_stream_to_net_words(enciv32, q_req->iv, ivsize); pce = cmdlistinfo->encr_cntr_iv; for (i = 0; i < enciv_in_word; i++, pce++) pce->data = enciv32[i]; } /* * write encr key * do not use hw key or pipe key */ _byte_stream_to_net_words(enckey32, q_req->enckey, key_size); pce = cmdlistinfo->encr_key; for (i = 0; i < enck_size_in_word; i++, pce++) pce->data = enckey32[i]; /* write encr seg cfg */ pce = cmdlistinfo->encr_seg_cfg; encr_cfg = pce->data; if (q_req->dir == QCE_ENCRYPT) encr_cfg |= (1 << CRYPTO_ENCODE); else encr_cfg &= ~(1 << CRYPTO_ENCODE); pce->data = encr_cfg; /* we only support sha1-hmac at this point */ _byte_stream_to_net_words(mackey32, q_req->authkey, q_req->authklen); pce = cmdlistinfo->auth_key; for (i = 0; i < authk_size_in_word; i++, pce++) pce->data = mackey32[i]; pce = cmdlistinfo->auth_iv; for (i = 0; i < 5; i++, pce++) pce->data = _std_init_vector_sha1[i]; /* write auth_bytecnt 0/1, start with 0 */ pce = cmdlistinfo->auth_bytecount; for (i = 0; i < 2; i++, pce++) pce->data = 0; pce = cmdlistinfo->auth_seg_cfg; a_cfg = pce->data; a_cfg &= ~(CRYPTO_AUTH_POS_MASK); if (q_req->dir == QCE_ENCRYPT) a_cfg |= (CRYPTO_AUTH_POS_AFTER << CRYPTO_AUTH_POS); else a_cfg |= (CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS); pce->data = a_cfg; /* write auth seg size */ pce = cmdlistinfo->auth_seg_size; pce->data = totallen_in; /* write auth seg size start*/ pce = cmdlistinfo->auth_seg_start; pce->data = 0; /* write seg size */ pce = cmdlistinfo->seg_size; pce->data = totallen_in; /* write encr seg size */ pce = cmdlistinfo->encr_seg_size; pce->data = q_req->cryptlen; /* write encr seg start */ pce = cmdlistinfo->encr_seg_start; pce->data = (coffset & 0xffff); return 0; }; static int _ce_get_cipher_cmdlistinfo(struct qce_device *pce_dev, struct qce_req *creq, struct qce_cmdlist_info **cmdlistinfo) { struct qce_cmdlistptr_ops *cmdlistptr = &pce_dev->ce_sps.cmdlistptr; if (creq->alg != CIPHER_ALG_AES) { switch (creq->alg) { case CIPHER_ALG_DES: if (creq->mode == QCE_MODE_ECB) *cmdlistinfo = &cmdlistptr->cipher_des_ecb; else *cmdlistinfo = &cmdlistptr->cipher_des_cbc; break; case CIPHER_ALG_3DES: if (creq->mode == QCE_MODE_ECB) *cmdlistinfo = &cmdlistptr->cipher_3des_ecb; else *cmdlistinfo = &cmdlistptr->cipher_3des_cbc; break; default: break; } } else { switch (creq->mode) { case QCE_MODE_ECB: if (creq->encklen == AES128_KEY_SIZE) *cmdlistinfo = &cmdlistptr->cipher_aes_128_ecb; else *cmdlistinfo = &cmdlistptr->cipher_aes_256_ecb; break; case QCE_MODE_CBC: case QCE_MODE_CTR: if (creq->encklen == AES128_KEY_SIZE) *cmdlistinfo = &cmdlistptr->cipher_aes_128_cbc_ctr; else *cmdlistinfo = &cmdlistptr->cipher_aes_256_cbc_ctr; break; case QCE_MODE_XTS: if (creq->encklen/2 == AES128_KEY_SIZE) *cmdlistinfo = &cmdlistptr->cipher_aes_128_xts; else *cmdlistinfo = &cmdlistptr->cipher_aes_256_xts; break; case QCE_MODE_CCM: if (creq->encklen == AES128_KEY_SIZE) *cmdlistinfo = &cmdlistptr->aead_aes_128_ccm; else *cmdlistinfo = &cmdlistptr->aead_aes_256_ccm; break; default: break; } } return 0; } static int _ce_setup_cipher(struct qce_device *pce_dev, struct qce_req *creq, uint32_t totallen_in, uint32_t coffset, struct qce_cmdlist_info *cmdlistinfo) { uint32_t enckey32[(MAX_CIPHER_KEY_SIZE * 2)/sizeof(uint32_t)] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; uint32_t enciv32[MAX_IV_LENGTH / sizeof(uint32_t)] = { 0, 0, 0, 0}; uint32_t enck_size_in_word = 0; uint32_t key_size; bool use_hw_key = false; bool use_pipe_key = false; uint32_t encr_cfg = 0; uint32_t ivsize = creq->ivsize; int i; struct sps_command_element *pce = NULL; if (creq->mode == QCE_MODE_XTS) key_size = creq->encklen/2; else key_size = creq->encklen; pce = cmdlistinfo->go_proc; if ((creq->flags & QCRYPTO_CTX_USE_HW_KEY) == QCRYPTO_CTX_USE_HW_KEY) { use_hw_key = true; } else { if ((creq->flags & QCRYPTO_CTX_USE_PIPE_KEY) == QCRYPTO_CTX_USE_PIPE_KEY) use_pipe_key = true; } pce = cmdlistinfo->go_proc; if (use_hw_key == true) pce->addr = (uint32_t)(CRYPTO_GOPROC_QC_KEY_REG + pce_dev->phy_iobase); else pce->addr = (uint32_t)(CRYPTO_GOPROC_REG + pce_dev->phy_iobase); if ((use_pipe_key == false) && (use_hw_key == false)) { _byte_stream_to_net_words(enckey32, creq->enckey, key_size); enck_size_in_word = key_size/sizeof(uint32_t); } if ((creq->op == QCE_REQ_AEAD) && (creq->mode == QCE_MODE_CCM)) { uint32_t authklen32 = creq->encklen/sizeof(uint32_t); uint32_t noncelen32 = MAX_NONCE/sizeof(uint32_t); uint32_t nonce32[MAX_NONCE/sizeof(uint32_t)] = {0, 0, 0, 0}; uint32_t auth_cfg = 0; /* write nonce */ _byte_stream_to_net_words(nonce32, creq->nonce, MAX_NONCE); pce = cmdlistinfo->auth_nonce_info; for (i = 0; i < noncelen32; i++, pce++) pce->data = nonce32[i]; if (creq->authklen == AES128_KEY_SIZE) auth_cfg = pce_dev->reg.auth_cfg_aes_ccm_128; else { if (creq->authklen == AES256_KEY_SIZE) auth_cfg = pce_dev->reg.auth_cfg_aes_ccm_256; } if (creq->dir == QCE_ENCRYPT) auth_cfg |= (CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS); else auth_cfg |= (CRYPTO_AUTH_POS_AFTER << CRYPTO_AUTH_POS); auth_cfg |= ((creq->authsize - 1) << CRYPTO_AUTH_SIZE); if (use_hw_key == true) { auth_cfg |= (1 << CRYPTO_USE_HW_KEY_AUTH); } else { auth_cfg &= ~(1 << CRYPTO_USE_HW_KEY_AUTH); /* write auth key */ pce = cmdlistinfo->auth_key; for (i = 0; i < authklen32; i++, pce++) pce->data = enckey32[i]; } pce = cmdlistinfo->auth_seg_cfg; pce->data = auth_cfg; pce = cmdlistinfo->auth_seg_size; if (creq->dir == QCE_ENCRYPT) pce->data = totallen_in; else pce->data = totallen_in - creq->authsize; pce = cmdlistinfo->auth_seg_start; pce->data = 0; } else { if (creq->op != QCE_REQ_AEAD) { pce = cmdlistinfo->auth_seg_cfg; pce->data = 0; } } switch (creq->mode) { case QCE_MODE_ECB: if (key_size == AES128_KEY_SIZE) encr_cfg = pce_dev->reg.encr_cfg_aes_ecb_128; else encr_cfg = pce_dev->reg.encr_cfg_aes_ecb_256; break; case QCE_MODE_CBC: if (key_size == AES128_KEY_SIZE) encr_cfg = pce_dev->reg.encr_cfg_aes_cbc_128; else encr_cfg = pce_dev->reg.encr_cfg_aes_cbc_256; break; case QCE_MODE_XTS: if (key_size == AES128_KEY_SIZE) encr_cfg = pce_dev->reg.encr_cfg_aes_xts_128; else encr_cfg = pce_dev->reg.encr_cfg_aes_xts_256; break; case QCE_MODE_CCM: if (key_size == AES128_KEY_SIZE) encr_cfg = pce_dev->reg.encr_cfg_aes_ccm_128; else encr_cfg = pce_dev->reg.encr_cfg_aes_ccm_256; encr_cfg |= (CRYPTO_ENCR_MODE_CCM << CRYPTO_ENCR_MODE) | (CRYPTO_LAST_CCM_XFR << CRYPTO_LAST_CCM); break; case QCE_MODE_CTR: default: if (key_size == AES128_KEY_SIZE) encr_cfg = pce_dev->reg.encr_cfg_aes_ctr_128; else encr_cfg = pce_dev->reg.encr_cfg_aes_ctr_256; break; } pce_dev->mode = creq->mode; switch (creq->alg) { case CIPHER_ALG_DES: if (creq->mode != QCE_MODE_ECB) { _byte_stream_to_net_words(enciv32, creq->iv, ivsize); pce = cmdlistinfo->encr_cntr_iv; pce->data = enciv32[0]; pce++; pce->data = enciv32[1]; } if (use_hw_key == false) { pce = cmdlistinfo->encr_key; pce->data = enckey32[0]; pce++; pce->data = enckey32[1]; } break; case CIPHER_ALG_3DES: if (creq->mode != QCE_MODE_ECB) { _byte_stream_to_net_words(enciv32, creq->iv, ivsize); pce = cmdlistinfo->encr_cntr_iv; pce->data = enciv32[0]; pce++; pce->data = enciv32[1]; } if (use_hw_key == false) { /* write encr key */ pce = cmdlistinfo->encr_key; for (i = 0; i < 6; i++, pce++) pce->data = enckey32[i]; } break; case CIPHER_ALG_AES: default: if (creq->mode == QCE_MODE_XTS) { uint32_t xtskey32[MAX_CIPHER_KEY_SIZE/sizeof(uint32_t)] = {0, 0, 0, 0, 0, 0, 0, 0}; uint32_t xtsklen = creq->encklen/(2 * sizeof(uint32_t)); if ((use_hw_key == false) && (use_pipe_key == false)) { _byte_stream_to_net_words(xtskey32, (creq->enckey + creq->encklen/2), creq->encklen/2); /* write xts encr key */ pce = cmdlistinfo->encr_xts_key; for (i = 0; i < xtsklen; i++, pce++) pce->data = xtskey32[i]; } /* write xts du size */ pce = cmdlistinfo->encr_xts_du_size; switch (creq->flags & QCRYPTO_CTX_XTS_MASK) { case QCRYPTO_CTX_XTS_DU_SIZE_512B: pce->data = min((unsigned int)QCE_SECTOR_SIZE, creq->cryptlen); break; case QCRYPTO_CTX_XTS_DU_SIZE_1KB: pce->data = min((unsigned int)QCE_SECTOR_SIZE * 2, creq->cryptlen); break; default: pce->data = creq->cryptlen; break; } } if (creq->mode != QCE_MODE_ECB) { if (creq->mode == QCE_MODE_XTS) _byte_stream_swap_to_net_words(enciv32, creq->iv, ivsize); else _byte_stream_to_net_words(enciv32, creq->iv, ivsize); /* write encr cntr iv */ pce = cmdlistinfo->encr_cntr_iv; for (i = 0; i < 4; i++, pce++) pce->data = enciv32[i]; if (creq->mode == QCE_MODE_CCM) { /* write cntr iv for ccm */ pce = cmdlistinfo->encr_ccm_cntr_iv; for (i = 0; i < 4; i++, pce++) pce->data = enciv32[i]; /* update cntr_iv[3] by one */ pce = cmdlistinfo->encr_cntr_iv; pce += 3; pce->data += 1; } } if (creq->op == QCE_REQ_ABLK_CIPHER_NO_KEY) { encr_cfg |= (CRYPTO_ENCR_KEY_SZ_AES128 << CRYPTO_ENCR_KEY_SZ); } else { if (use_hw_key == false) { /* write encr key */ pce = cmdlistinfo->encr_key; for (i = 0; i < enck_size_in_word; i++, pce++) pce->data = enckey32[i]; } } /* else of if (creq->op == QCE_REQ_ABLK_CIPHER_NO_KEY) */ break; } /* end of switch (creq->mode) */ if (use_pipe_key) encr_cfg |= (CRYPTO_USE_PIPE_KEY_ENCR_ENABLED << CRYPTO_USE_PIPE_KEY_ENCR); /* write encr seg cfg */ pce = cmdlistinfo->encr_seg_cfg; if ((creq->alg == CIPHER_ALG_DES) || (creq->alg == CIPHER_ALG_3DES)) { if (creq->dir == QCE_ENCRYPT) pce->data |= (1 << CRYPTO_ENCODE); else pce->data &= ~(1 << CRYPTO_ENCODE); encr_cfg = pce->data; } else { encr_cfg |= ((creq->dir == QCE_ENCRYPT) ? 1 : 0) << CRYPTO_ENCODE; } if (use_hw_key == true) encr_cfg |= (CRYPTO_USE_HW_KEY << CRYPTO_USE_HW_KEY_ENCR); else encr_cfg &= ~(CRYPTO_USE_HW_KEY << CRYPTO_USE_HW_KEY_ENCR); pce->data = encr_cfg; /* write encr seg size */ pce = cmdlistinfo->encr_seg_size; if ((creq->mode == QCE_MODE_CCM) && (creq->dir == QCE_DECRYPT)) pce->data = (creq->cryptlen + creq->authsize); else pce->data = creq->cryptlen; /* write encr seg start */ pce = cmdlistinfo->encr_seg_start; pce->data = (coffset & 0xffff); /* write seg size */ pce = cmdlistinfo->seg_size; pce->data = totallen_in; return 0; }; static int _ce_f9_setup(struct qce_device *pce_dev, struct qce_f9_req *req, struct qce_cmdlist_info *cmdlistinfo) { uint32_t ikey32[OTA_KEY_SIZE/sizeof(uint32_t)]; uint32_t key_size_in_word = OTA_KEY_SIZE/sizeof(uint32_t); uint32_t cfg; struct sps_command_element *pce; int i; switch (req->algorithm) { case QCE_OTA_ALGO_KASUMI: cfg = pce_dev->reg.auth_cfg_kasumi; break; case QCE_OTA_ALGO_SNOW3G: default: cfg = pce_dev->reg.auth_cfg_snow3g; break; }; /* write key in CRYPTO_AUTH_IV0-3_REG */ _byte_stream_to_net_words(ikey32, &req->ikey[0], OTA_KEY_SIZE); pce = cmdlistinfo->auth_iv; for (i = 0; i < key_size_in_word; i++, pce++) pce->data = ikey32[i]; /* write last bits in CRYPTO_AUTH_IV4_REG */ pce->data = req->last_bits; /* write fresh to CRYPTO_AUTH_BYTECNT0_REG */ pce = cmdlistinfo->auth_bytecount; pce->data = req->fresh; /* write count-i to CRYPTO_AUTH_BYTECNT1_REG */ pce++; pce->data = req->count_i; /* write auth seg cfg */ pce = cmdlistinfo->auth_seg_cfg; if (req->direction == QCE_OTA_DIR_DOWNLINK) cfg |= BIT(CRYPTO_F9_DIRECTION); pce->data = cfg; /* write auth seg size */ pce = cmdlistinfo->auth_seg_size; pce->data = req->msize; /* write auth seg start*/ pce = cmdlistinfo->auth_seg_start; pce->data = 0; /* write seg size */ pce = cmdlistinfo->seg_size; pce->data = req->msize; /* write go */ pce = cmdlistinfo->go_proc; pce->addr = (uint32_t)(CRYPTO_GOPROC_REG + pce_dev->phy_iobase); return 0; } static int _ce_f8_setup(struct qce_device *pce_dev, struct qce_f8_req *req, bool key_stream_mode, uint16_t npkts, uint16_t cipher_offset, uint16_t cipher_size, struct qce_cmdlist_info *cmdlistinfo) { uint32_t ckey32[OTA_KEY_SIZE/sizeof(uint32_t)]; uint32_t key_size_in_word = OTA_KEY_SIZE/sizeof(uint32_t); uint32_t cfg; struct sps_command_element *pce; int i; switch (req->algorithm) { case QCE_OTA_ALGO_KASUMI: cfg = pce_dev->reg.encr_cfg_kasumi; break; case QCE_OTA_ALGO_SNOW3G: default: cfg = pce_dev->reg.encr_cfg_snow3g; break; }; /* write key */ _byte_stream_to_net_words(ckey32, &req->ckey[0], OTA_KEY_SIZE); pce = cmdlistinfo->encr_key; for (i = 0; i < key_size_in_word; i++, pce++) pce->data = ckey32[i]; /* write encr seg cfg */ pce = cmdlistinfo->encr_seg_cfg; if (key_stream_mode) cfg |= BIT(CRYPTO_F8_KEYSTREAM_ENABLE); if (req->direction == QCE_OTA_DIR_DOWNLINK) cfg |= BIT(CRYPTO_F8_DIRECTION); pce->data = cfg; /* write encr seg start */ pce = cmdlistinfo->encr_seg_start; pce->data = (cipher_offset & 0xffff); /* write encr seg size */ pce = cmdlistinfo->encr_seg_size; pce->data = cipher_size; /* write seg size */ pce = cmdlistinfo->seg_size; pce->data = req->data_len; /* write cntr0_iv0 for countC */ pce = cmdlistinfo->encr_cntr_iv; pce->data = req->count_c; /* write cntr1_iv1 for nPkts, and bearer */ pce++; if (npkts == 1) npkts = 0; pce->data = req->bearer << CRYPTO_CNTR1_IV1_REG_F8_BEARER | npkts << CRYPTO_CNTR1_IV1_REG_F8_PKT_CNT; /* write go */ pce = cmdlistinfo->go_proc; pce->addr = (uint32_t)(CRYPTO_GOPROC_REG + pce_dev->phy_iobase); return 0; } static int _ce_setup_hash_direct(struct qce_device *pce_dev, struct qce_sha_req *sreq) { uint32_t auth32[SHA256_DIGEST_SIZE / sizeof(uint32_t)]; uint32_t diglen; bool use_hw_key = false; bool use_pipe_key = false; int i; uint32_t mackey32[SHA_HMAC_KEY_SIZE/sizeof(uint32_t)] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; uint32_t authk_size_in_word = sreq->authklen/sizeof(uint32_t); bool sha1 = false; uint32_t auth_cfg = 0; /* clear status */ writel_relaxed(0, pce_dev->iobase + CRYPTO_STATUS_REG); writel_relaxed(pce_dev->reg.crypto_cfg_be, (pce_dev->iobase + CRYPTO_CONFIG_REG)); /* * Ensure previous instructions (setting the CONFIG register) * was completed before issuing starting to set other config register * This is to ensure the configurations are done in correct endian-ness * as set in the CONFIG registers */ mb(); if (sreq->alg == QCE_HASH_AES_CMAC) { /* write seg_cfg */ writel_relaxed(0, pce_dev->iobase + CRYPTO_AUTH_SEG_CFG_REG); /* write seg_cfg */ writel_relaxed(0, pce_dev->iobase + CRYPTO_ENCR_SEG_CFG_REG); /* write seg_cfg */ writel_relaxed(0, pce_dev->iobase + CRYPTO_ENCR_SEG_SIZE_REG); /* Clear auth_ivn, auth_keyn registers */ for (i = 0; i < 16; i++) { writel_relaxed(0, (pce_dev->iobase + (CRYPTO_AUTH_IV0_REG + i*sizeof(uint32_t)))); writel_relaxed(0, (pce_dev->iobase + (CRYPTO_AUTH_KEY0_REG + i*sizeof(uint32_t)))); } /* write auth_bytecnt 0/1/2/3, start with 0 */ for (i = 0; i < 4; i++) writel_relaxed(0, pce_dev->iobase + CRYPTO_AUTH_BYTECNT0_REG + i * sizeof(uint32_t)); if (sreq->authklen == AES128_KEY_SIZE) auth_cfg = pce_dev->reg.auth_cfg_cmac_128; else auth_cfg = pce_dev->reg.auth_cfg_cmac_256; } if ((sreq->alg == QCE_HASH_SHA1_HMAC) || (sreq->alg == QCE_HASH_SHA256_HMAC) || (sreq->alg == QCE_HASH_AES_CMAC)) { _byte_stream_to_net_words(mackey32, sreq->authkey, sreq->authklen); /* no more check for null key. use flag to check*/ if ((sreq->flags & QCRYPTO_CTX_USE_HW_KEY) == QCRYPTO_CTX_USE_HW_KEY) { use_hw_key = true; } else if ((sreq->flags & QCRYPTO_CTX_USE_PIPE_KEY) == QCRYPTO_CTX_USE_PIPE_KEY) { use_pipe_key = true; } else { /* setup key */ for (i = 0; i < authk_size_in_word; i++) writel_relaxed(mackey32[i], (pce_dev->iobase + (CRYPTO_AUTH_KEY0_REG + i*sizeof(uint32_t)))); } } if (sreq->alg == QCE_HASH_AES_CMAC) goto go_proc; /* if not the last, the size has to be on the block boundary */ if (sreq->last_blk == 0 && (sreq->size % SHA256_BLOCK_SIZE)) return -EIO; switch (sreq->alg) { case QCE_HASH_SHA1: auth_cfg = pce_dev->reg.auth_cfg_sha1; diglen = SHA1_DIGEST_SIZE; sha1 = true; break; case QCE_HASH_SHA1_HMAC: auth_cfg = pce_dev->reg.auth_cfg_hmac_sha1; diglen = SHA1_DIGEST_SIZE; sha1 = true; break; case QCE_HASH_SHA256: auth_cfg = pce_dev->reg.auth_cfg_sha256; diglen = SHA256_DIGEST_SIZE; break; case QCE_HASH_SHA256_HMAC: auth_cfg = pce_dev->reg.auth_cfg_hmac_sha256; diglen = SHA256_DIGEST_SIZE; break; default: return -EINVAL; } /* write 20/32 bytes, 5/8 words into auth_iv for SHA1/SHA256 */ if (sreq->first_blk) { if (sha1) { for (i = 0; i < 5; i++) auth32[i] = _std_init_vector_sha1[i]; } else { for (i = 0; i < 8; i++) auth32[i] = _std_init_vector_sha256[i]; } } else { _byte_stream_to_net_words(auth32, sreq->digest, diglen); } /* Set auth_ivn, auth_keyn registers */ for (i = 0; i < 5; i++) writel_relaxed(auth32[i], (pce_dev->iobase + (CRYPTO_AUTH_IV0_REG + i*sizeof(uint32_t)))); if ((sreq->alg == QCE_HASH_SHA256) || (sreq->alg == QCE_HASH_SHA256_HMAC)) { for (i = 5; i < 8; i++) writel_relaxed(auth32[i], (pce_dev->iobase + (CRYPTO_AUTH_IV0_REG + i*sizeof(uint32_t)))); } /* write auth_bytecnt 0/1/2/3, start with 0 */ for (i = 0; i < 2; i++) writel_relaxed(sreq->auth_data[i], pce_dev->iobase + CRYPTO_AUTH_BYTECNT0_REG + i * sizeof(uint32_t)); /* Set/reset last bit in CFG register */ if (sreq->last_blk) auth_cfg |= 1 << CRYPTO_LAST; else auth_cfg &= ~(1 << CRYPTO_LAST); if (sreq->first_blk) auth_cfg |= 1 << CRYPTO_FIRST; else auth_cfg &= ~(1 << CRYPTO_FIRST); if (use_hw_key) auth_cfg |= 1 << CRYPTO_USE_HW_KEY_AUTH; if (use_pipe_key) auth_cfg |= 1 << CRYPTO_USE_PIPE_KEY_AUTH; go_proc: /* write seg_cfg */ writel_relaxed(auth_cfg, pce_dev->iobase + CRYPTO_AUTH_SEG_CFG_REG); /* write auth seg_size */ writel_relaxed(sreq->size, pce_dev->iobase + CRYPTO_AUTH_SEG_SIZE_REG); /* write auth_seg_start */ writel_relaxed(0, pce_dev->iobase + CRYPTO_AUTH_SEG_START_REG); /* reset encr seg_cfg */ writel_relaxed(0, pce_dev->iobase + CRYPTO_ENCR_SEG_CFG_REG); /* write seg_size */ writel_relaxed(sreq->size, pce_dev->iobase + CRYPTO_SEG_SIZE_REG); writel_relaxed(pce_dev->reg.crypto_cfg_le, (pce_dev->iobase + CRYPTO_CONFIG_REG)); /* issue go to crypto */ if (use_hw_key == false) writel_relaxed(((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP)), pce_dev->iobase + CRYPTO_GOPROC_REG); else writel_relaxed(((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP)), pce_dev->iobase + CRYPTO_GOPROC_QC_KEY_REG); /* * Ensure previous instructions (setting the GO register) * was completed before issuing a DMA transfer request */ mb(); return 0; } static int _ce_setup_aead_direct(struct qce_device *pce_dev, struct qce_req *q_req, uint32_t totallen_in, uint32_t coffset) { int32_t authk_size_in_word = SHA_HMAC_KEY_SIZE/sizeof(uint32_t); int i; uint32_t mackey32[SHA_HMAC_KEY_SIZE/sizeof(uint32_t)] = {0}; uint32_t a_cfg; uint32_t enckey32[(MAX_CIPHER_KEY_SIZE*2)/sizeof(uint32_t)] = {0}; uint32_t enciv32[MAX_IV_LENGTH/sizeof(uint32_t)] = {0}; uint32_t enck_size_in_word = 0; uint32_t enciv_in_word; uint32_t key_size; uint32_t ivsize = q_req->ivsize; uint32_t encr_cfg; /* clear status */ writel_relaxed(0, pce_dev->iobase + CRYPTO_STATUS_REG); writel_relaxed(pce_dev->reg.crypto_cfg_be, (pce_dev->iobase + CRYPTO_CONFIG_REG)); /* * Ensure previous instructions (setting the CONFIG register) * was completed before issuing starting to set other config register * This is to ensure the configurations are done in correct endian-ness * as set in the CONFIG registers */ mb(); key_size = q_req->encklen; enck_size_in_word = key_size/sizeof(uint32_t); switch (q_req->alg) { case CIPHER_ALG_DES: switch (q_req->mode) { case QCE_MODE_ECB: encr_cfg = pce_dev->reg.encr_cfg_des_ecb; break; case QCE_MODE_CBC: encr_cfg = pce_dev->reg.encr_cfg_des_cbc; break; default: return -EINVAL; } enciv_in_word = 2; break; case CIPHER_ALG_3DES: switch (q_req->mode) { case QCE_MODE_ECB: encr_cfg = pce_dev->reg.encr_cfg_3des_ecb; break; case QCE_MODE_CBC: encr_cfg = pce_dev->reg.encr_cfg_3des_cbc; break; default: return -EINVAL; } enciv_in_word = 2; break; case CIPHER_ALG_AES: switch (q_req->mode) { case QCE_MODE_ECB: if (key_size == AES128_KEY_SIZE) encr_cfg = pce_dev->reg.encr_cfg_aes_ecb_128; else if (key_size == AES256_KEY_SIZE) encr_cfg = pce_dev->reg.encr_cfg_aes_ecb_256; else return -EINVAL; break; case QCE_MODE_CBC: if (key_size == AES128_KEY_SIZE) encr_cfg = pce_dev->reg.encr_cfg_aes_cbc_128; else if (key_size == AES256_KEY_SIZE) encr_cfg = pce_dev->reg.encr_cfg_aes_cbc_256; else return -EINVAL; break; default: return -EINVAL; } enciv_in_word = 4; break; default: return -EINVAL; } pce_dev->mode = q_req->mode; /* write CNTR0_IV0_REG */ if (q_req->mode != QCE_MODE_ECB) { _byte_stream_to_net_words(enciv32, q_req->iv, ivsize); for (i = 0; i < enciv_in_word; i++) writel_relaxed(enciv32[i], pce_dev->iobase + (CRYPTO_CNTR0_IV0_REG + i * sizeof(uint32_t))); } /* * write encr key * do not use hw key or pipe key */ _byte_stream_to_net_words(enckey32, q_req->enckey, key_size); for (i = 0; i < enck_size_in_word; i++) writel_relaxed(enckey32[i], pce_dev->iobase + (CRYPTO_ENCR_KEY0_REG + i * sizeof(uint32_t))); /* write encr seg cfg */ if (q_req->dir == QCE_ENCRYPT) encr_cfg |= (1 << CRYPTO_ENCODE); writel_relaxed(encr_cfg, pce_dev->iobase + CRYPTO_ENCR_SEG_CFG_REG); /* we only support sha1-hmac at this point */ _byte_stream_to_net_words(mackey32, q_req->authkey, q_req->authklen); for (i = 0; i < authk_size_in_word; i++) writel_relaxed(mackey32[i], pce_dev->iobase + (CRYPTO_AUTH_KEY0_REG + i * sizeof(uint32_t))); for (i = 0; i < 5; i++) writel_relaxed(_std_init_vector_sha1[i], pce_dev->iobase + (CRYPTO_AUTH_IV0_REG + i * sizeof(uint32_t))); /* write auth_bytecnt 0/1, start with 0 */ writel_relaxed(0, pce_dev->iobase + CRYPTO_AUTH_BYTECNT0_REG); writel_relaxed(0, pce_dev->iobase + CRYPTO_AUTH_BYTECNT1_REG); /* write encr seg size */ writel_relaxed(q_req->cryptlen, pce_dev->iobase + CRYPTO_ENCR_SEG_SIZE_REG); /* write encr start */ writel_relaxed(coffset & 0xffff, pce_dev->iobase + CRYPTO_ENCR_SEG_START_REG); a_cfg = (CRYPTO_AUTH_MODE_HMAC << CRYPTO_AUTH_MODE) | (CRYPTO_AUTH_SIZE_SHA1 << CRYPTO_AUTH_SIZE) | (1 << CRYPTO_LAST) | (1 << CRYPTO_FIRST) | (CRYPTO_AUTH_ALG_SHA << CRYPTO_AUTH_ALG); if (q_req->dir == QCE_ENCRYPT) a_cfg |= (CRYPTO_AUTH_POS_AFTER << CRYPTO_AUTH_POS); else a_cfg |= (CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS); /* write auth seg_cfg */ writel_relaxed(a_cfg, pce_dev->iobase + CRYPTO_AUTH_SEG_CFG_REG); /* write auth seg_size */ writel_relaxed(totallen_in, pce_dev->iobase + CRYPTO_AUTH_SEG_SIZE_REG); /* write auth_seg_start */ writel_relaxed(0, pce_dev->iobase + CRYPTO_AUTH_SEG_START_REG); /* write seg_size */ writel_relaxed(totallen_in, pce_dev->iobase + CRYPTO_SEG_SIZE_REG); writel_relaxed(pce_dev->reg.crypto_cfg_le, (pce_dev->iobase + CRYPTO_CONFIG_REG)); /* issue go to crypto */ writel_relaxed(((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP)), pce_dev->iobase + CRYPTO_GOPROC_REG); /* * Ensure previous instructions (setting the GO register) * was completed before issuing a DMA transfer request */ mb(); return 0; }; static int _ce_setup_cipher_direct(struct qce_device *pce_dev, struct qce_req *creq, uint32_t totallen_in, uint32_t coffset) { uint32_t enckey32[(MAX_CIPHER_KEY_SIZE * 2)/sizeof(uint32_t)] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; uint32_t enciv32[MAX_IV_LENGTH / sizeof(uint32_t)] = { 0, 0, 0, 0}; uint32_t enck_size_in_word = 0; uint32_t key_size; bool use_hw_key = false; bool use_pipe_key = false; uint32_t encr_cfg = 0; uint32_t ivsize = creq->ivsize; int i; /* clear status */ writel_relaxed(0, pce_dev->iobase + CRYPTO_STATUS_REG); writel_relaxed(pce_dev->reg.crypto_cfg_be, (pce_dev->iobase + CRYPTO_CONFIG_REG)); /* * Ensure previous instructions (setting the CONFIG register) * was completed before issuing starting to set other config register * This is to ensure the configurations are done in correct endian-ness * as set in the CONFIG registers */ mb(); if (creq->mode == QCE_MODE_XTS) key_size = creq->encklen/2; else key_size = creq->encklen; if ((creq->flags & QCRYPTO_CTX_USE_HW_KEY) == QCRYPTO_CTX_USE_HW_KEY) { use_hw_key = true; } else { if ((creq->flags & QCRYPTO_CTX_USE_PIPE_KEY) == QCRYPTO_CTX_USE_PIPE_KEY) use_pipe_key = true; } if ((use_pipe_key == false) && (use_hw_key == false)) { _byte_stream_to_net_words(enckey32, creq->enckey, key_size); enck_size_in_word = key_size/sizeof(uint32_t); } if ((creq->op == QCE_REQ_AEAD) && (creq->mode == QCE_MODE_CCM)) { uint32_t authklen32 = creq->encklen/sizeof(uint32_t); uint32_t noncelen32 = MAX_NONCE/sizeof(uint32_t); uint32_t nonce32[MAX_NONCE/sizeof(uint32_t)] = {0, 0, 0, 0}; uint32_t auth_cfg = 0; /* Clear auth_ivn, auth_keyn registers */ for (i = 0; i < 16; i++) { writel_relaxed(0, (pce_dev->iobase + (CRYPTO_AUTH_IV0_REG + i*sizeof(uint32_t)))); writel_relaxed(0, (pce_dev->iobase + (CRYPTO_AUTH_KEY0_REG + i*sizeof(uint32_t)))); } /* write auth_bytecnt 0/1/2/3, start with 0 */ for (i = 0; i < 4; i++) writel_relaxed(0, pce_dev->iobase + CRYPTO_AUTH_BYTECNT0_REG + i * sizeof(uint32_t)); /* write nonce */ _byte_stream_to_net_words(nonce32, creq->nonce, MAX_NONCE); for (i = 0; i < noncelen32; i++) writel_relaxed(nonce32[i], pce_dev->iobase + CRYPTO_AUTH_INFO_NONCE0_REG + (i*sizeof(uint32_t))); if (creq->authklen == AES128_KEY_SIZE) auth_cfg = pce_dev->reg.auth_cfg_aes_ccm_128; else { if (creq->authklen == AES256_KEY_SIZE) auth_cfg = pce_dev->reg.auth_cfg_aes_ccm_256; } if (creq->dir == QCE_ENCRYPT) auth_cfg |= (CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS); else auth_cfg |= (CRYPTO_AUTH_POS_AFTER << CRYPTO_AUTH_POS); auth_cfg |= ((creq->authsize - 1) << CRYPTO_AUTH_SIZE); if (use_hw_key == true) { auth_cfg |= (1 << CRYPTO_USE_HW_KEY_AUTH); } else { auth_cfg &= ~(1 << CRYPTO_USE_HW_KEY_AUTH); /* write auth key */ for (i = 0; i < authklen32; i++) writel_relaxed(enckey32[i], pce_dev->iobase + CRYPTO_AUTH_KEY0_REG + (i*sizeof(uint32_t))); } writel_relaxed(auth_cfg, pce_dev->iobase + CRYPTO_AUTH_SEG_CFG_REG); if (creq->dir == QCE_ENCRYPT) writel_relaxed(totallen_in, pce_dev->iobase + CRYPTO_AUTH_SEG_SIZE_REG); else writel_relaxed((totallen_in - creq->authsize), pce_dev->iobase + CRYPTO_AUTH_SEG_SIZE_REG); writel_relaxed(0, pce_dev->iobase + CRYPTO_AUTH_SEG_START_REG); } else { if (creq->op != QCE_REQ_AEAD) writel_relaxed(0, pce_dev->iobase + CRYPTO_AUTH_SEG_CFG_REG); } /* * Ensure previous instructions (write to all AUTH registers) * was completed before accessing a register that is not in * in the same 1K range. */ mb(); switch (creq->mode) { case QCE_MODE_ECB: if (key_size == AES128_KEY_SIZE) encr_cfg = pce_dev->reg.encr_cfg_aes_ecb_128; else encr_cfg = pce_dev->reg.encr_cfg_aes_ecb_256; break; case QCE_MODE_CBC: if (key_size == AES128_KEY_SIZE) encr_cfg = pce_dev->reg.encr_cfg_aes_cbc_128; else encr_cfg = pce_dev->reg.encr_cfg_aes_cbc_256; break; case QCE_MODE_XTS: if (key_size == AES128_KEY_SIZE) encr_cfg = pce_dev->reg.encr_cfg_aes_xts_128; else encr_cfg = pce_dev->reg.encr_cfg_aes_xts_256; break; case QCE_MODE_CCM: if (key_size == AES128_KEY_SIZE) encr_cfg = pce_dev->reg.encr_cfg_aes_ccm_128; else encr_cfg = pce_dev->reg.encr_cfg_aes_ccm_256; break; case QCE_MODE_CTR: default: if (key_size == AES128_KEY_SIZE) encr_cfg = pce_dev->reg.encr_cfg_aes_ctr_128; else encr_cfg = pce_dev->reg.encr_cfg_aes_ctr_256; break; } pce_dev->mode = creq->mode; switch (creq->alg) { case CIPHER_ALG_DES: if (creq->mode != QCE_MODE_ECB) { encr_cfg = pce_dev->reg.encr_cfg_des_cbc; _byte_stream_to_net_words(enciv32, creq->iv, ivsize); writel_relaxed(enciv32[0], pce_dev->iobase + CRYPTO_CNTR0_IV0_REG); writel_relaxed(enciv32[1], pce_dev->iobase + CRYPTO_CNTR1_IV1_REG); } else { encr_cfg = pce_dev->reg.encr_cfg_des_ecb; } if (use_hw_key == false) { writel_relaxed(enckey32[0], pce_dev->iobase + CRYPTO_ENCR_KEY0_REG); writel_relaxed(enckey32[1], pce_dev->iobase + CRYPTO_ENCR_KEY1_REG); } break; case CIPHER_ALG_3DES: if (creq->mode != QCE_MODE_ECB) { _byte_stream_to_net_words(enciv32, creq->iv, ivsize); writel_relaxed(enciv32[0], pce_dev->iobase + CRYPTO_CNTR0_IV0_REG); writel_relaxed(enciv32[1], pce_dev->iobase + CRYPTO_CNTR1_IV1_REG); encr_cfg = pce_dev->reg.encr_cfg_3des_cbc; } else { encr_cfg = pce_dev->reg.encr_cfg_3des_ecb; } if (use_hw_key == false) { /* write encr key */ for (i = 0; i < 6; i++) writel_relaxed(enckey32[0], (pce_dev->iobase + (CRYPTO_ENCR_KEY0_REG + i * sizeof(uint32_t)))); } break; case CIPHER_ALG_AES: default: if (creq->mode == QCE_MODE_XTS) { uint32_t xtskey32[MAX_CIPHER_KEY_SIZE/sizeof(uint32_t)] = {0, 0, 0, 0, 0, 0, 0, 0}; uint32_t xtsklen = creq->encklen/(2 * sizeof(uint32_t)); if ((use_hw_key == false) && (use_pipe_key == false)) { _byte_stream_to_net_words(xtskey32, (creq->enckey + creq->encklen/2), creq->encklen/2); /* write xts encr key */ for (i = 0; i < xtsklen; i++) writel_relaxed(xtskey32[i], pce_dev->iobase + CRYPTO_ENCR_XTS_KEY0_REG + (i * sizeof(uint32_t))); } /* write xts du size */ switch (creq->flags & QCRYPTO_CTX_XTS_MASK) { case QCRYPTO_CTX_XTS_DU_SIZE_512B: writel_relaxed( min((uint32_t)QCE_SECTOR_SIZE, creq->cryptlen), pce_dev->iobase + CRYPTO_ENCR_XTS_DU_SIZE_REG); break; case QCRYPTO_CTX_XTS_DU_SIZE_1KB: writel_relaxed( min((uint32_t)(QCE_SECTOR_SIZE * 2), creq->cryptlen), pce_dev->iobase + CRYPTO_ENCR_XTS_DU_SIZE_REG); break; default: writel_relaxed(creq->cryptlen, pce_dev->iobase + CRYPTO_ENCR_XTS_DU_SIZE_REG); break; } } if (creq->mode != QCE_MODE_ECB) { if (creq->mode == QCE_MODE_XTS) _byte_stream_swap_to_net_words(enciv32, creq->iv, ivsize); else _byte_stream_to_net_words(enciv32, creq->iv, ivsize); /* write encr cntr iv */ for (i = 0; i <= 3; i++) writel_relaxed(enciv32[i], pce_dev->iobase + CRYPTO_CNTR0_IV0_REG + (i * sizeof(uint32_t))); if (creq->mode == QCE_MODE_CCM) { /* write cntr iv for ccm */ for (i = 0; i <= 3; i++) writel_relaxed(enciv32[i], pce_dev->iobase + CRYPTO_ENCR_CCM_INT_CNTR0_REG + (i * sizeof(uint32_t))); /* update cntr_iv[3] by one */ writel_relaxed((enciv32[3] + 1), pce_dev->iobase + CRYPTO_CNTR0_IV0_REG + (3 * sizeof(uint32_t))); } } if (creq->op == QCE_REQ_ABLK_CIPHER_NO_KEY) { encr_cfg |= (CRYPTO_ENCR_KEY_SZ_AES128 << CRYPTO_ENCR_KEY_SZ); } else { if ((use_hw_key == false) && (use_pipe_key == false)) { for (i = 0; i < enck_size_in_word; i++) writel_relaxed(enckey32[i], pce_dev->iobase + CRYPTO_ENCR_KEY0_REG + (i * sizeof(uint32_t))); } } /* else of if (creq->op == QCE_REQ_ABLK_CIPHER_NO_KEY) */ break; } /* end of switch (creq->mode) */ if (use_pipe_key) encr_cfg |= (CRYPTO_USE_PIPE_KEY_ENCR_ENABLED << CRYPTO_USE_PIPE_KEY_ENCR); /* write encr seg cfg */ encr_cfg |= ((creq->dir == QCE_ENCRYPT) ? 1 : 0) << CRYPTO_ENCODE; if (use_hw_key == true) encr_cfg |= (CRYPTO_USE_HW_KEY << CRYPTO_USE_HW_KEY_ENCR); else encr_cfg &= ~(CRYPTO_USE_HW_KEY << CRYPTO_USE_HW_KEY_ENCR); /* write encr seg cfg */ writel_relaxed(encr_cfg, pce_dev->iobase + CRYPTO_ENCR_SEG_CFG_REG); /* write encr seg size */ if ((creq->mode == QCE_MODE_CCM) && (creq->dir == QCE_DECRYPT)) writel_relaxed((creq->cryptlen + creq->authsize), pce_dev->iobase + CRYPTO_ENCR_SEG_SIZE_REG); else writel_relaxed(creq->cryptlen, pce_dev->iobase + CRYPTO_ENCR_SEG_SIZE_REG); /* write encr seg start */ writel_relaxed((coffset & 0xffff), pce_dev->iobase + CRYPTO_ENCR_SEG_START_REG); /* write encr seg start */ writel_relaxed(0xffffffff, pce_dev->iobase + CRYPTO_CNTR_MASK_REG); /* write seg size */ writel_relaxed(totallen_in, pce_dev->iobase + CRYPTO_SEG_SIZE_REG); writel_relaxed(pce_dev->reg.crypto_cfg_le, (pce_dev->iobase + CRYPTO_CONFIG_REG)); /* issue go to crypto */ if (use_hw_key == false) writel_relaxed(((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP)), pce_dev->iobase + CRYPTO_GOPROC_REG); else writel_relaxed(((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP)), pce_dev->iobase + CRYPTO_GOPROC_QC_KEY_REG); /* * Ensure previous instructions (setting the GO register) * was completed before issuing a DMA transfer request */ mb(); return 0; }; static int _ce_f9_setup_direct(struct qce_device *pce_dev, struct qce_f9_req *req) { uint32_t ikey32[OTA_KEY_SIZE/sizeof(uint32_t)]; uint32_t key_size_in_word = OTA_KEY_SIZE/sizeof(uint32_t); uint32_t auth_cfg; int i; switch (req->algorithm) { case QCE_OTA_ALGO_KASUMI: auth_cfg = pce_dev->reg.auth_cfg_kasumi; break; case QCE_OTA_ALGO_SNOW3G: default: auth_cfg = pce_dev->reg.auth_cfg_snow3g; break; }; /* clear status */ writel_relaxed(0, pce_dev->iobase + CRYPTO_STATUS_REG); /* set big endian configuration */ writel_relaxed(pce_dev->reg.crypto_cfg_be, (pce_dev->iobase + CRYPTO_CONFIG_REG)); /* * Ensure previous instructions (setting the CONFIG register) * was completed before issuing starting to set other config register * This is to ensure the configurations are done in correct endian-ness * as set in the CONFIG registers */ mb(); /* write enc_seg_cfg */ writel_relaxed(0, pce_dev->iobase + CRYPTO_ENCR_SEG_CFG_REG); /* write ecn_seg_size */ writel_relaxed(0, pce_dev->iobase + CRYPTO_ENCR_SEG_SIZE_REG); /* write key in CRYPTO_AUTH_IV0-3_REG */ _byte_stream_to_net_words(ikey32, &req->ikey[0], OTA_KEY_SIZE); for (i = 0; i < key_size_in_word; i++) writel_relaxed(ikey32[i], (pce_dev->iobase + (CRYPTO_AUTH_IV0_REG + i*sizeof(uint32_t)))); /* write last bits in CRYPTO_AUTH_IV4_REG */ writel_relaxed(req->last_bits, (pce_dev->iobase + CRYPTO_AUTH_IV4_REG)); /* write fresh to CRYPTO_AUTH_BYTECNT0_REG */ writel_relaxed(req->fresh, (pce_dev->iobase + CRYPTO_AUTH_BYTECNT0_REG)); /* write count-i to CRYPTO_AUTH_BYTECNT1_REG */ writel_relaxed(req->count_i, (pce_dev->iobase + CRYPTO_AUTH_BYTECNT1_REG)); /* write auth seg cfg */ if (req->direction == QCE_OTA_DIR_DOWNLINK) auth_cfg |= BIT(CRYPTO_F9_DIRECTION); writel_relaxed(auth_cfg, pce_dev->iobase + CRYPTO_AUTH_SEG_CFG_REG); /* write auth seg size */ writel_relaxed(req->msize, pce_dev->iobase + CRYPTO_AUTH_SEG_SIZE_REG); /* write auth seg start*/ writel_relaxed(0, pce_dev->iobase + CRYPTO_AUTH_SEG_START_REG); /* write seg size */ writel_relaxed(req->msize, pce_dev->iobase + CRYPTO_SEG_SIZE_REG); /* set little endian configuration before go*/ writel_relaxed(pce_dev->reg.crypto_cfg_le, (pce_dev->iobase + CRYPTO_CONFIG_REG)); /* write go */ writel_relaxed(((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP)), pce_dev->iobase + CRYPTO_GOPROC_REG); /* * Ensure previous instructions (setting the GO register) * was completed before issuing a DMA transfer request */ mb(); return 0; } static int _ce_f8_setup_direct(struct qce_device *pce_dev, struct qce_f8_req *req, bool key_stream_mode, uint16_t npkts, uint16_t cipher_offset, uint16_t cipher_size) { int i = 0; uint32_t encr_cfg = 0; uint32_t ckey32[OTA_KEY_SIZE/sizeof(uint32_t)]; uint32_t key_size_in_word = OTA_KEY_SIZE/sizeof(uint32_t); switch (req->algorithm) { case QCE_OTA_ALGO_KASUMI: encr_cfg = pce_dev->reg.encr_cfg_kasumi; break; case QCE_OTA_ALGO_SNOW3G: default: encr_cfg = pce_dev->reg.encr_cfg_snow3g; break; }; /* clear status */ writel_relaxed(0, pce_dev->iobase + CRYPTO_STATUS_REG); /* set big endian configuration */ writel_relaxed(pce_dev->reg.crypto_cfg_be, (pce_dev->iobase + CRYPTO_CONFIG_REG)); /* write auth seg configuration */ writel_relaxed(0, pce_dev->iobase + CRYPTO_AUTH_SEG_CFG_REG); /* write auth seg size */ writel_relaxed(0, pce_dev->iobase + CRYPTO_AUTH_SEG_SIZE_REG); /* write key */ _byte_stream_to_net_words(ckey32, &req->ckey[0], OTA_KEY_SIZE); for (i = 0; i < key_size_in_word; i++) writel_relaxed(ckey32[i], (pce_dev->iobase + (CRYPTO_ENCR_KEY0_REG + i*sizeof(uint32_t)))); /* write encr seg cfg */ if (key_stream_mode) encr_cfg |= BIT(CRYPTO_F8_KEYSTREAM_ENABLE); if (req->direction == QCE_OTA_DIR_DOWNLINK) encr_cfg |= BIT(CRYPTO_F8_DIRECTION); writel_relaxed(encr_cfg, pce_dev->iobase + CRYPTO_ENCR_SEG_CFG_REG); /* write encr seg start */ writel_relaxed((cipher_offset & 0xffff), pce_dev->iobase + CRYPTO_ENCR_SEG_START_REG); /* write encr seg size */ writel_relaxed(cipher_size, pce_dev->iobase + CRYPTO_ENCR_SEG_SIZE_REG); /* write seg size */ writel_relaxed(req->data_len, pce_dev->iobase + CRYPTO_SEG_SIZE_REG); /* write cntr0_iv0 for countC */ writel_relaxed(req->count_c, pce_dev->iobase + CRYPTO_CNTR0_IV0_REG); /* write cntr1_iv1 for nPkts, and bearer */ if (npkts == 1) npkts = 0; writel_relaxed(req->bearer << CRYPTO_CNTR1_IV1_REG_F8_BEARER | npkts << CRYPTO_CNTR1_IV1_REG_F8_PKT_CNT, pce_dev->iobase + CRYPTO_CNTR1_IV1_REG); /* set little endian configuration before go*/ writel_relaxed(pce_dev->reg.crypto_cfg_le, (pce_dev->iobase + CRYPTO_CONFIG_REG)); /* write go */ writel_relaxed(((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP)), pce_dev->iobase + CRYPTO_GOPROC_REG); /* * Ensure previous instructions (setting the GO register) * was completed before issuing a DMA transfer request */ mb(); return 0; } static int _qce_unlock_other_pipes(struct qce_device *pce_dev) { int rc = 0; if (pce_dev->support_cmd_dscr == false) return rc; pce_dev->ce_sps.consumer.event.callback = NULL; rc = sps_transfer_one(pce_dev->ce_sps.consumer.pipe, GET_PHYS_ADDR(pce_dev->ce_sps.cmdlistptr.unlock_all_pipes.cmdlist), 0, NULL, (SPS_IOVEC_FLAG_CMD | SPS_IOVEC_FLAG_UNLOCK)); if (rc) { pr_err("sps_xfr_one() fail rc=%d", rc); rc = -EINVAL; } return rc; } static int _aead_complete(struct qce_device *pce_dev) { struct aead_request *areq; unsigned char mac[SHA256_DIGEST_SIZE]; uint32_t status; int32_t result_status; areq = (struct aead_request *) pce_dev->areq; if (areq->src != areq->dst) { qce_dma_unmap_sg(pce_dev->pdev, areq->dst, pce_dev->dst_nents, DMA_FROM_DEVICE); } qce_dma_unmap_sg(pce_dev->pdev, areq->src, pce_dev->src_nents, (areq->src == areq->dst) ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE); qce_dma_unmap_sg(pce_dev->pdev, areq->assoc, pce_dev->assoc_nents, DMA_TO_DEVICE); /* check MAC */ memcpy(mac, (char *)(&pce_dev->ce_sps.result->auth_iv[0]), SHA256_DIGEST_SIZE); /* read status before unlock */ status = readl_relaxed(pce_dev->iobase + CRYPTO_STATUS_REG); if (_qce_unlock_other_pipes(pce_dev)) return -EINVAL; /* * Don't use result dump status. The operation may not * be complete. * Instead, use the status we just read of device. * In case, we need to use result_status from result * dump the result_status needs to be byte swapped, * since we set the device to little endian. */ result_status = 0; pce_dev->ce_sps.result->status = 0; if (status & ((1 << CRYPTO_SW_ERR) | (1 << CRYPTO_AXI_ERR) | (1 << CRYPTO_HSD_ERR))) { pr_err("aead operation error. Status %x\n", status); result_status = -ENXIO; } else if (pce_dev->ce_sps.consumer_status | pce_dev->ce_sps.producer_status) { pr_err("aead sps operation error. sps status %x %x\n", pce_dev->ce_sps.consumer_status, pce_dev->ce_sps.producer_status); result_status = -ENXIO; } else if ((status & (1 << CRYPTO_OPERATION_DONE)) == 0) { pr_err("aead operation not done? Status %x, sps status %x %x\n", status, pce_dev->ce_sps.consumer_status, pce_dev->ce_sps.producer_status); result_status = -ENXIO; } if (pce_dev->mode == QCE_MODE_CCM) { if (result_status == 0 && (status & (1 << CRYPTO_MAC_FAILED))) result_status = -EBADMSG; pce_dev->qce_cb(areq, mac, NULL, result_status); } else { uint32_t ivsize = 0; struct crypto_aead *aead; unsigned char iv[NUM_OF_CRYPTO_CNTR_IV_REG * CRYPTO_REG_SIZE]; aead = crypto_aead_reqtfm(areq); ivsize = crypto_aead_ivsize(aead); if (pce_dev->ce_sps.minor_version != 0) dma_unmap_single(pce_dev->pdev, pce_dev->phy_iv_in, ivsize, DMA_TO_DEVICE); memcpy(iv, (char *)(pce_dev->ce_sps.result->encr_cntr_iv), sizeof(iv)); pce_dev->qce_cb(areq, mac, iv, result_status); } return 0; }; static int _sha_complete(struct qce_device *pce_dev) { struct ahash_request *areq; unsigned char digest[SHA256_DIGEST_SIZE]; uint32_t bytecount32[2]; int32_t result_status = pce_dev->ce_sps.result->status; uint32_t status; areq = (struct ahash_request *) pce_dev->areq; qce_dma_unmap_sg(pce_dev->pdev, areq->src, pce_dev->src_nents, DMA_TO_DEVICE); memcpy(digest, (char *)(&pce_dev->ce_sps.result->auth_iv[0]), SHA256_DIGEST_SIZE); _byte_stream_to_net_words(bytecount32, (unsigned char *)pce_dev->ce_sps.result->auth_byte_count, 2 * CRYPTO_REG_SIZE); /* read status before unlock */ status = readl_relaxed(pce_dev->iobase + CRYPTO_STATUS_REG); if (_qce_unlock_other_pipes(pce_dev)) return -EINVAL; /* * Don't use result dump status. The operation may not be complete. * Instead, use the status we just read of device. * In case, we need to use result_status from result * dump the result_status needs to be byte swapped, * since we set the device to little endian. */ if (status & ((1 << CRYPTO_SW_ERR) | (1 << CRYPTO_AXI_ERR) | (1 << CRYPTO_HSD_ERR))) { pr_err("sha operation error. Status %x\n", status); result_status = -ENXIO; } else if (pce_dev->ce_sps.consumer_status) { pr_err("sha sps operation error. sps status %x\n", pce_dev->ce_sps.consumer_status); result_status = -ENXIO; } else if ((status & (1 << CRYPTO_OPERATION_DONE)) == 0) { pr_err("sha operation not done? Status %x, sps status %x\n", status, pce_dev->ce_sps.consumer_status); result_status = -ENXIO; } else { result_status = 0; } pce_dev->qce_cb(areq, digest, (char *)bytecount32, result_status); return 0; }; static int _f9_complete(struct qce_device *pce_dev) { uint32_t mac_i; uint32_t status; int32_t result_status; dma_unmap_single(pce_dev->pdev, pce_dev->phy_ota_src, pce_dev->ota_size, DMA_TO_DEVICE); _byte_stream_to_net_words(&mac_i, (char *)(&pce_dev->ce_sps.result->auth_iv[0]), CRYPTO_REG_SIZE); /* read status before unlock */ status = readl_relaxed(pce_dev->iobase + CRYPTO_STATUS_REG); if (_qce_unlock_other_pipes(pce_dev)) { pce_dev->qce_cb(pce_dev->areq, NULL, NULL, -ENXIO); return -ENXIO; } if (status & ((1 << CRYPTO_SW_ERR) | (1 << CRYPTO_AXI_ERR) | (1 << CRYPTO_HSD_ERR))) { pr_err("f9 operation error. Status %x\n", status); result_status = -ENXIO; } else if (pce_dev->ce_sps.consumer_status | pce_dev->ce_sps.producer_status) { pr_err("f9 sps operation error. sps status %x %x\n", pce_dev->ce_sps.consumer_status, pce_dev->ce_sps.producer_status); result_status = -ENXIO; } else if ((status & (1 << CRYPTO_OPERATION_DONE)) == 0) { pr_err("f9 operation not done? Status %x, sps status %x %x\n", status, pce_dev->ce_sps.consumer_status, pce_dev->ce_sps.producer_status); result_status = -ENXIO; } else { result_status = 0; } pce_dev->qce_cb(pce_dev->areq, (void *) mac_i, NULL, result_status); return 0; } static int _ablk_cipher_complete(struct qce_device *pce_dev) { struct ablkcipher_request *areq; unsigned char iv[NUM_OF_CRYPTO_CNTR_IV_REG * CRYPTO_REG_SIZE]; uint32_t status; int32_t result_status; areq = (struct ablkcipher_request *) pce_dev->areq; if (areq->src != areq->dst) { qce_dma_unmap_sg(pce_dev->pdev, areq->dst, pce_dev->dst_nents, DMA_FROM_DEVICE); } qce_dma_unmap_sg(pce_dev->pdev, areq->src, pce_dev->src_nents, (areq->src == areq->dst) ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE); /* read status before unlock */ status = readl_relaxed(pce_dev->iobase + CRYPTO_STATUS_REG); if (_qce_unlock_other_pipes(pce_dev)) return -EINVAL; /* * Don't use result dump status. The operation may not be complete. * Instead, use the status we just read of device. * In case, we need to use result_status from result * dump the result_status needs to be byte swapped, * since we set the device to little endian. */ if (status & ((1 << CRYPTO_SW_ERR) | (1 << CRYPTO_AXI_ERR) | (1 << CRYPTO_HSD_ERR))) { pr_err("ablk_cipher operation error. Status %x\n", status); result_status = -ENXIO; } else if (pce_dev->ce_sps.consumer_status | pce_dev->ce_sps.producer_status) { pr_err("ablk_cipher sps operation error. sps status %x %x\n", pce_dev->ce_sps.consumer_status, pce_dev->ce_sps.producer_status); result_status = -ENXIO; } else if ((status & (1 << CRYPTO_OPERATION_DONE)) == 0) { pr_err("ablk_cipher operation not done? Status %x, sps status %x %x\n", status, pce_dev->ce_sps.consumer_status, pce_dev->ce_sps.producer_status); result_status = -ENXIO; } else { result_status = 0; } if (pce_dev->mode == QCE_MODE_ECB) { pce_dev->qce_cb(areq, NULL, NULL, pce_dev->ce_sps.consumer_status | result_status); } else { if (pce_dev->ce_sps.minor_version == 0) { if (pce_dev->mode == QCE_MODE_CBC) { if (pce_dev->dir == QCE_DECRYPT) memcpy(iv, (char *)pce_dev->dec_iv, sizeof(iv)); else memcpy(iv, (unsigned char *) (sg_virt(areq->src) + areq->src->length - 16), sizeof(iv)); } if ((pce_dev->mode == QCE_MODE_CTR) || (pce_dev->mode == QCE_MODE_XTS)) { uint32_t num_blk = 0; uint32_t cntr_iv3 = 0; unsigned long long cntr_iv64 = 0; unsigned char *b = (unsigned char *)(&cntr_iv3); memcpy(iv, areq->info, sizeof(iv)); if (pce_dev->mode != QCE_MODE_XTS) num_blk = areq->nbytes/16; else num_blk = 1; cntr_iv3 = ((*(iv + 12) << 24) & 0xff000000) | (((*(iv + 13)) << 16) & 0xff0000) | (((*(iv + 14)) << 8) & 0xff00) | (*(iv + 15) & 0xff); cntr_iv64 = (((unsigned long long)cntr_iv3 & (unsigned long long)0xFFFFFFFFULL) + (unsigned long long)num_blk) % (unsigned long long)(0x100000000ULL); cntr_iv3 = (u32)(cntr_iv64 & 0xFFFFFFFF); *(iv + 15) = (char)(*b); *(iv + 14) = (char)(*(b + 1)); *(iv + 13) = (char)(*(b + 2)); *(iv + 12) = (char)(*(b + 3)); } } else { memcpy(iv, (char *)(pce_dev->ce_sps.result->encr_cntr_iv), sizeof(iv)); } pce_dev->qce_cb(areq, NULL, iv, result_status); } return 0; }; static int _f8_complete(struct qce_device *pce_dev) { uint32_t status; int32_t result_status; if (pce_dev->phy_ota_dst != 0) dma_unmap_single(pce_dev->pdev, pce_dev->phy_ota_dst, pce_dev->ota_size, DMA_FROM_DEVICE); if (pce_dev->phy_ota_src != 0) dma_unmap_single(pce_dev->pdev, pce_dev->phy_ota_src, pce_dev->ota_size, (pce_dev->phy_ota_dst) ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL); /* read status before unlock */ status = readl_relaxed(pce_dev->iobase + CRYPTO_STATUS_REG); if (_qce_unlock_other_pipes(pce_dev)) { pce_dev->qce_cb(pce_dev->areq, NULL, NULL, -ENXIO); return -ENXIO; } if (status & ((1 << CRYPTO_SW_ERR) | (1 << CRYPTO_AXI_ERR) | (1 << CRYPTO_HSD_ERR))) { pr_err("f8 operation error. Status %x\n", status); result_status = -ENXIO; } else if (pce_dev->ce_sps.consumer_status | pce_dev->ce_sps.producer_status) { pr_err("f8 sps operation error. sps status %x %x\n", pce_dev->ce_sps.consumer_status, pce_dev->ce_sps.producer_status); result_status = -ENXIO; } else if ((status & (1 << CRYPTO_OPERATION_DONE)) == 0) { pr_err("f8 operation not done? Status %x, sps status %x %x\n", status, pce_dev->ce_sps.consumer_status, pce_dev->ce_sps.producer_status); result_status = -ENXIO; } else { result_status = 0; } pce_dev->qce_cb(pce_dev->areq, NULL, NULL, result_status); return 0; } #ifdef QCE_DEBUG static void _qce_dump_descr_fifos(struct qce_device *pce_dev) { int i, j, ents; struct sps_iovec *iovec = pce_dev->ce_sps.in_transfer.iovec; uint32_t cmd_flags = SPS_IOVEC_FLAG_CMD; printk(KERN_INFO "==============================================\n"); printk(KERN_INFO "CONSUMER (TX/IN/DEST) PIPE DESCRIPTOR\n"); printk(KERN_INFO "==============================================\n"); for (i = 0; i < pce_dev->ce_sps.in_transfer.iovec_count; i++) { printk(KERN_INFO " [%d] addr=0x%x size=0x%x flags=0x%x\n", i, iovec->addr, iovec->size, iovec->flags); if (iovec->flags & cmd_flags) { struct sps_command_element *pced; pced = (struct sps_command_element *) (GET_VIRT_ADDR(iovec->addr)); ents = iovec->size/(sizeof(struct sps_command_element)); for (j = 0; j < ents; j++) { printk(KERN_INFO " [%d] [0x%x] 0x%x\n", j, pced->addr, pced->data); pced++; } } iovec++; } printk(KERN_INFO "==============================================\n"); printk(KERN_INFO "PRODUCER (RX/OUT/SRC) PIPE DESCRIPTOR\n"); printk(KERN_INFO "==============================================\n"); iovec = pce_dev->ce_sps.out_transfer.iovec; for (i = 0; i < pce_dev->ce_sps.out_transfer.iovec_count; i++) { printk(KERN_INFO " [%d] addr=0x%x size=0x%x flags=0x%x\n", i, iovec->addr, iovec->size, iovec->flags); iovec++; } } #else static void _qce_dump_descr_fifos(struct qce_device *pce_dev) { } #endif static void _qce_dump_descr_fifos_fail(struct qce_device *pce_dev) { int i, j, ents; struct sps_iovec *iovec = pce_dev->ce_sps.in_transfer.iovec; uint32_t cmd_flags = SPS_IOVEC_FLAG_CMD; printk(KERN_INFO "==============================================\n"); printk(KERN_INFO "CONSUMER (TX/IN/DEST) PIPE DESCRIPTOR\n"); printk(KERN_INFO "==============================================\n"); for (i = 0; i < pce_dev->ce_sps.in_transfer.iovec_count; i++) { printk(KERN_INFO " [%d] addr=0x%x size=0x%x flags=0x%x\n", i, iovec->addr, iovec->size, iovec->flags); if (iovec->flags & cmd_flags) { struct sps_command_element *pced; pced = (struct sps_command_element *) (GET_VIRT_ADDR(iovec->addr)); ents = iovec->size/(sizeof(struct sps_command_element)); for (j = 0; j < ents; j++) { printk(KERN_INFO " [%d] [0x%x] 0x%x\n", j, pced->addr, pced->data); pced++; } } iovec++; } printk(KERN_INFO "==============================================\n"); printk(KERN_INFO "PRODUCER (RX/OUT/SRC) PIPE DESCRIPTOR\n"); printk(KERN_INFO "==============================================\n"); iovec = pce_dev->ce_sps.out_transfer.iovec; for (i = 0; i < pce_dev->ce_sps.out_transfer.iovec_count; i++) { printk(KERN_INFO " [%d] addr=0x%x size=0x%x flags=0x%x\n", i, iovec->addr, iovec->size, iovec->flags); iovec++; } } static void _qce_sps_iovec_count_init(struct qce_device *pce_dev) { pce_dev->ce_sps.in_transfer.iovec_count = 0; pce_dev->ce_sps.out_transfer.iovec_count = 0; } static void _qce_set_flag(struct sps_transfer *sps_bam_pipe, uint32_t flag) { struct sps_iovec *iovec; if (sps_bam_pipe->iovec_count == 0) return; iovec = sps_bam_pipe->iovec + (sps_bam_pipe->iovec_count - 1); iovec->flags |= flag; } static int _qce_sps_add_data(uint32_t addr, uint32_t len, struct sps_transfer *sps_bam_pipe) { struct sps_iovec *iovec = sps_bam_pipe->iovec + sps_bam_pipe->iovec_count; uint32_t data_cnt; while (len > 0) { if (sps_bam_pipe->iovec_count == QCE_MAX_NUM_DSCR) { pr_err("Num of descrptor %d exceed max (%d)", sps_bam_pipe->iovec_count, (uint32_t)QCE_MAX_NUM_DSCR); return -ENOMEM; } if (len > SPS_MAX_PKT_SIZE) data_cnt = SPS_MAX_PKT_SIZE; else data_cnt = len; iovec->size = data_cnt; iovec->addr = addr; iovec->flags = 0; sps_bam_pipe->iovec_count++; iovec++; addr += data_cnt; len -= data_cnt; } return 0; } static int _qce_sps_add_sg_data(struct qce_device *pce_dev, struct scatterlist *sg_src, uint32_t nbytes, struct sps_transfer *sps_bam_pipe) { uint32_t addr, data_cnt, len; struct sps_iovec *iovec = sps_bam_pipe->iovec + sps_bam_pipe->iovec_count; while (nbytes > 0) { len = min(nbytes, sg_dma_len(sg_src)); nbytes -= len; addr = sg_dma_address(sg_src); if (pce_dev->ce_sps.minor_version == 0) len = ALIGN(len, pce_dev->ce_sps.ce_burst_size); while (len > 0) { if (sps_bam_pipe->iovec_count == QCE_MAX_NUM_DSCR) { pr_err("Num of descrptor %d exceed max (%d)", sps_bam_pipe->iovec_count, (uint32_t)QCE_MAX_NUM_DSCR); return -ENOMEM; } if (len > SPS_MAX_PKT_SIZE) { data_cnt = SPS_MAX_PKT_SIZE; iovec->size = data_cnt; iovec->addr = addr; iovec->flags = 0; } else { data_cnt = len; iovec->size = data_cnt; iovec->addr = addr; iovec->flags = 0; } iovec++; sps_bam_pipe->iovec_count++; addr += data_cnt; len -= data_cnt; } sg_src = scatterwalk_sg_next(sg_src); } return 0; } static int _qce_sps_add_cmd(struct qce_device *pce_dev, uint32_t flag, struct qce_cmdlist_info *cmdptr, struct sps_transfer *sps_bam_pipe) { struct sps_iovec *iovec = sps_bam_pipe->iovec + sps_bam_pipe->iovec_count; iovec->size = cmdptr->size; iovec->addr = GET_PHYS_ADDR(cmdptr->cmdlist); iovec->flags = SPS_IOVEC_FLAG_CMD | flag; sps_bam_pipe->iovec_count++; return 0; } static int _qce_sps_transfer(struct qce_device *pce_dev) { int rc = 0; _qce_dump_descr_fifos(pce_dev); if (pce_dev->ce_sps.in_transfer.iovec_count) { rc = sps_transfer(pce_dev->ce_sps.consumer.pipe, &pce_dev->ce_sps.in_transfer); if (rc) { pr_err("sps_xfr() fail (consumer pipe=0x%x) rc = %d,", (u32)pce_dev->ce_sps.consumer.pipe, rc); _qce_dump_descr_fifos_fail(pce_dev); return rc; } } rc = sps_transfer(pce_dev->ce_sps.producer.pipe, &pce_dev->ce_sps.out_transfer); if (rc) { pr_err("sps_xfr() fail (producer pipe=0x%x) rc = %d,", (u32)pce_dev->ce_sps.producer.pipe, rc); return rc; } return rc; } /** * Allocate and Connect a CE peripheral's SPS endpoint * * This function allocates endpoint context and * connect it with memory endpoint by calling * appropriate SPS driver APIs. * * Also registers a SPS callback function with * SPS driver * * This function should only be called once typically * during driver probe. * * @pce_dev - Pointer to qce_device structure * @ep - Pointer to sps endpoint data structure * @is_produce - 1 means Producer endpoint * 0 means Consumer endpoint * * @return - 0 if successful else negative value. * */ static int qce_sps_init_ep_conn(struct qce_device *pce_dev, struct qce_sps_ep_conn_data *ep, bool is_producer) { int rc = 0; struct sps_pipe *sps_pipe_info; struct sps_connect *sps_connect_info = &ep->connect; struct sps_register_event *sps_event = &ep->event; /* Allocate endpoint context */ sps_pipe_info = sps_alloc_endpoint(); if (!sps_pipe_info) { pr_err("sps_alloc_endpoint() failed!!! is_producer=%d", is_producer); rc = -ENOMEM; goto out; } /* Now save the sps pipe handle */ ep->pipe = sps_pipe_info; /* Get default connection configuration for an endpoint */ rc = sps_get_config(sps_pipe_info, sps_connect_info); if (rc) { pr_err("sps_get_config() fail pipe_handle=0x%x, rc = %d\n", (u32)sps_pipe_info, rc); goto get_config_err; } /* Modify the default connection configuration */ if (is_producer) { /* * For CE producer transfer, source should be * CE peripheral where as destination should * be system memory. */ sps_connect_info->source = pce_dev->ce_sps.bam_handle; sps_connect_info->destination = SPS_DEV_HANDLE_MEM; /* Producer pipe will handle this connection */ sps_connect_info->mode = SPS_MODE_SRC; sps_connect_info->options = SPS_O_AUTO_ENABLE | SPS_O_DESC_DONE; } else { /* For CE consumer transfer, source should be * system memory where as destination should * CE peripheral */ sps_connect_info->source = SPS_DEV_HANDLE_MEM; sps_connect_info->destination = pce_dev->ce_sps.bam_handle; sps_connect_info->mode = SPS_MODE_DEST; sps_connect_info->options = SPS_O_AUTO_ENABLE | SPS_O_EOT; } /* Producer pipe index */ sps_connect_info->src_pipe_index = pce_dev->ce_sps.src_pipe_index; /* Consumer pipe index */ sps_connect_info->dest_pipe_index = pce_dev->ce_sps.dest_pipe_index; /* Set pipe group */ sps_connect_info->lock_group = pce_dev->ce_sps.pipe_pair_index; sps_connect_info->event_thresh = 0x10; /* * Max. no of scatter/gather buffers that can * be passed by block layer = 32 (NR_SG). * Each BAM descritor needs 64 bits (8 bytes). * One BAM descriptor is required per buffer transfer. * So we would require total 256 (32 * 8) bytes of descriptor FIFO. * But due to HW limitation we need to allocate atleast one extra * descriptor memory (256 bytes + 8 bytes). But in order to be * in power of 2, we are allocating 512 bytes of memory. */ sps_connect_info->desc.size = QCE_MAX_NUM_DSCR * sizeof(struct sps_iovec); sps_connect_info->desc.base = dma_alloc_coherent(pce_dev->pdev, sps_connect_info->desc.size, &sps_connect_info->desc.phys_base, GFP_KERNEL); if (sps_connect_info->desc.base == NULL) { rc = -ENOMEM; pr_err("Can not allocate coherent memory for sps data\n"); goto get_config_err; } memset(sps_connect_info->desc.base, 0x00, sps_connect_info->desc.size); /* Establish connection between peripheral and memory endpoint */ rc = sps_connect(sps_pipe_info, sps_connect_info); if (rc) { pr_err("sps_connect() fail pipe_handle=0x%x, rc = %d\n", (u32)sps_pipe_info, rc); goto sps_connect_err; } sps_event->mode = SPS_TRIGGER_CALLBACK; if (is_producer) sps_event->options = SPS_O_EOT | SPS_O_DESC_DONE; else sps_event->options = SPS_O_EOT; sps_event->xfer_done = NULL; sps_event->user = (void *)pce_dev; pr_debug("success, %s : pipe_handle=0x%x, desc fifo base (phy) = 0x%x\n", is_producer ? "PRODUCER(RX/OUT)" : "CONSUMER(TX/IN)", (u32)sps_pipe_info, sps_connect_info->desc.phys_base); goto out; sps_connect_err: dma_free_coherent(pce_dev->pdev, sps_connect_info->desc.size, sps_connect_info->desc.base, sps_connect_info->desc.phys_base); get_config_err: sps_free_endpoint(sps_pipe_info); out: return rc; } /** * Disconnect and Deallocate a CE peripheral's SPS endpoint * * This function disconnect endpoint and deallocates * endpoint context. * * This function should only be called once typically * during driver remove. * * @pce_dev - Pointer to qce_device structure * @ep - Pointer to sps endpoint data structure * */ static void qce_sps_exit_ep_conn(struct qce_device *pce_dev, struct qce_sps_ep_conn_data *ep) { struct sps_pipe *sps_pipe_info = ep->pipe; struct sps_connect *sps_connect_info = &ep->connect; sps_disconnect(sps_pipe_info); dma_free_coherent(pce_dev->pdev, sps_connect_info->desc.size, sps_connect_info->desc.base, sps_connect_info->desc.phys_base); sps_free_endpoint(sps_pipe_info); } static void qce_sps_release_bam(struct qce_device *pce_dev) { struct bam_registration_info *pbam; mutex_lock(&bam_register_lock); pbam = pce_dev->pbam; if (pbam == NULL) goto ret; pbam->cnt--; if (pbam->cnt > 0) goto ret; if (pce_dev->ce_sps.bam_handle) { sps_deregister_bam_device(pce_dev->ce_sps.bam_handle); pr_debug("deregister bam handle %x\n", pce_dev->ce_sps.bam_handle); pce_dev->ce_sps.bam_handle = 0; } iounmap(pbam->bam_iobase); pr_debug("delete bam 0x%x\n", pbam->bam_mem); list_del(&pbam->qlist); kfree(pbam); ret: pce_dev->pbam = NULL; mutex_unlock(&bam_register_lock); } static int qce_sps_get_bam(struct qce_device *pce_dev) { int rc = 0; struct sps_bam_props bam = {0}; struct bam_registration_info *pbam = NULL; struct bam_registration_info *p; uint32_t bam_cfg = 0 ; mutex_lock(&bam_register_lock); list_for_each_entry(p, &qce50_bam_list, qlist) { if (p->bam_mem == pce_dev->bam_mem) { pbam = p; /* found */ break; } } if (pbam) { pr_debug("found bam 0x%x\n", pbam->bam_mem); pbam->cnt++; pce_dev->ce_sps.bam_handle = pbam->handle; pce_dev->ce_sps.bam_mem = pbam->bam_mem; pce_dev->ce_sps.bam_iobase = pbam->bam_iobase; pce_dev->pbam = pbam; pce_dev->support_cmd_dscr = pbam->support_cmd_dscr; goto ret; } pbam = kzalloc(sizeof(struct bam_registration_info), GFP_KERNEL); if (!pbam) { pr_err("qce50 Memory allocation of bam FAIL, error %ld\n", PTR_ERR(pbam)); rc = -ENOMEM; goto ret; } pbam->cnt = 1; pbam->bam_mem = pce_dev->bam_mem; pbam->bam_iobase = ioremap_nocache(pce_dev->bam_mem, pce_dev->bam_mem_size); if (!pbam->bam_iobase) { kfree(pbam); rc = -ENOMEM; pr_err("Can not map BAM io memory\n"); goto ret; } pce_dev->ce_sps.bam_mem = pbam->bam_mem; pce_dev->ce_sps.bam_iobase = pbam->bam_iobase; pbam->handle = 0; pr_debug("allocate bam 0x%x\n", pbam->bam_mem); bam_cfg = readl_relaxed(pce_dev->ce_sps.bam_iobase + CRYPTO_BAM_CNFG_BITS_REG); pbam->support_cmd_dscr = (bam_cfg & CRYPTO_BAM_CD_ENABLE_MASK) ? true : false; if (pbam->support_cmd_dscr == false) { pr_info("qce50 don't support command descriptor. bam_cfg%x\n", bam_cfg); } pce_dev->support_cmd_dscr = pbam->support_cmd_dscr; bam.phys_addr = pce_dev->ce_sps.bam_mem; bam.virt_addr = pce_dev->ce_sps.bam_iobase; /* * This event thresold value is only significant for BAM-to-BAM * transfer. It's ignored for BAM-to-System mode transfer. */ bam.event_threshold = 0x10; /* Pipe event threshold */ /* * This threshold controls when the BAM publish * the descriptor size on the sideband interface. * SPS HW will only be used when * data transfer size > 64 bytes. */ bam.summing_threshold = 64; /* SPS driver wll handle the crypto BAM IRQ */ bam.irq = (u32)pce_dev->ce_sps.bam_irq; /* * Set flag to indicate BAM global device control is managed * remotely. */ if ((pce_dev->support_cmd_dscr == false) || (pce_dev->is_shared)) bam.manage = SPS_BAM_MGR_DEVICE_REMOTE; else bam.manage = SPS_BAM_MGR_LOCAL; bam.ee = 1; pr_debug("bam physical base=0x%x\n", (u32)bam.phys_addr); pr_debug("bam virtual base=0x%x\n", (u32)bam.virt_addr); /* Register CE Peripheral BAM device to SPS driver */ rc = sps_register_bam_device(&bam, &pbam->handle); if (rc) { pr_err("sps_register_bam_device() failed! err=%d", rc); rc = -EIO; iounmap(pbam->bam_iobase); kfree(pbam); goto ret; } pce_dev->pbam = pbam; list_add_tail(&pbam->qlist, &qce50_bam_list); pce_dev->ce_sps.bam_handle = pbam->handle; ret: mutex_unlock(&bam_register_lock); return rc; } /** * Initialize SPS HW connected with CE core * * This function register BAM HW resources with * SPS driver and then initialize 2 SPS endpoints * * This function should only be called once typically * during driver probe. * * @pce_dev - Pointer to qce_device structure * * @return - 0 if successful else negative value. * */ static int qce_sps_init(struct qce_device *pce_dev) { int rc = 0; rc = qce_sps_get_bam(pce_dev); if (rc) return rc; pr_debug("BAM device registered. bam_handle=0x%x", pce_dev->ce_sps.bam_handle); rc = qce_sps_init_ep_conn(pce_dev, &pce_dev->ce_sps.producer, true); if (rc) goto sps_connect_producer_err; rc = qce_sps_init_ep_conn(pce_dev, &pce_dev->ce_sps.consumer, false); if (rc) goto sps_connect_consumer_err; pce_dev->ce_sps.out_transfer.user = pce_dev->ce_sps.producer.pipe; pce_dev->ce_sps.in_transfer.user = pce_dev->ce_sps.consumer.pipe; pr_info(" Qualcomm MSM CE-BAM at 0x%016llx irq %d\n", (unsigned long long)pce_dev->ce_sps.bam_mem, (unsigned int)pce_dev->ce_sps.bam_irq); return rc; sps_connect_consumer_err: qce_sps_exit_ep_conn(pce_dev, &pce_dev->ce_sps.producer); sps_connect_producer_err: qce_sps_release_bam(pce_dev); return rc; } /** * De-initialize SPS HW connected with CE core * * This function deinitialize SPS endpoints and then * deregisters BAM resources from SPS driver. * * This function should only be called once typically * during driver remove. * * @pce_dev - Pointer to qce_device structure * */ static void qce_sps_exit(struct qce_device *pce_dev) { qce_sps_exit_ep_conn(pce_dev, &pce_dev->ce_sps.consumer); qce_sps_exit_ep_conn(pce_dev, &pce_dev->ce_sps.producer); qce_sps_release_bam(pce_dev); } static void _aead_sps_producer_callback(struct sps_event_notify *notify) { struct qce_device *pce_dev = (struct qce_device *) ((struct sps_event_notify *)notify)->user; pce_dev->ce_sps.notify = *notify; pr_debug("sps ev_id=%d, addr=0x%x, size=0x%x, flags=0x%x\n", notify->event_id, notify->data.transfer.iovec.addr, notify->data.transfer.iovec.size, notify->data.transfer.iovec.flags); if (pce_dev->ce_sps.producer_state == QCE_PIPE_STATE_COMP) { pce_dev->ce_sps.producer_state = QCE_PIPE_STATE_IDLE; /* done */ _aead_complete(pce_dev); } else { int rc = 0; pce_dev->ce_sps.producer_state = QCE_PIPE_STATE_COMP; pce_dev->ce_sps.out_transfer.iovec_count = 0; _qce_sps_add_data(GET_PHYS_ADDR(pce_dev->ce_sps.result_dump), CRYPTO_RESULT_DUMP_SIZE, &pce_dev->ce_sps.out_transfer); _qce_set_flag(&pce_dev->ce_sps.out_transfer, SPS_IOVEC_FLAG_INT); rc = sps_transfer(pce_dev->ce_sps.producer.pipe, &pce_dev->ce_sps.out_transfer); if (rc) { pr_err("sps_xfr() fail (producer pipe=0x%x) rc = %d,", (u32)pce_dev->ce_sps.producer.pipe, rc); } } }; static void _sha_sps_producer_callback(struct sps_event_notify *notify) { struct qce_device *pce_dev = (struct qce_device *) ((struct sps_event_notify *)notify)->user; pce_dev->ce_sps.notify = *notify; pr_debug("sps ev_id=%d, addr=0x%x, size=0x%x, flags=0x%x\n", notify->event_id, notify->data.transfer.iovec.addr, notify->data.transfer.iovec.size, notify->data.transfer.iovec.flags); /* done */ _sha_complete(pce_dev); }; static void _f9_sps_producer_callback(struct sps_event_notify *notify) { struct qce_device *pce_dev = (struct qce_device *) ((struct sps_event_notify *)notify)->user; pce_dev->ce_sps.notify = *notify; pr_debug("sps ev_id=%d, addr=0x%x, size=0x%x, flags=0x%x\n", notify->event_id, notify->data.transfer.iovec.addr, notify->data.transfer.iovec.size, notify->data.transfer.iovec.flags); /* done */ _f9_complete(pce_dev); } static void _f8_sps_producer_callback(struct sps_event_notify *notify) { struct qce_device *pce_dev = (struct qce_device *) ((struct sps_event_notify *)notify)->user; pce_dev->ce_sps.notify = *notify; pr_debug("sps ev_id=%d, addr=0x%x, size=0x%x, flags=0x%x\n", notify->event_id, notify->data.transfer.iovec.addr, notify->data.transfer.iovec.size, notify->data.transfer.iovec.flags); if (pce_dev->ce_sps.producer_state == QCE_PIPE_STATE_COMP) { pce_dev->ce_sps.producer_state = QCE_PIPE_STATE_IDLE; /* done */ _f8_complete(pce_dev); } else { int rc = 0; pce_dev->ce_sps.producer_state = QCE_PIPE_STATE_COMP; pce_dev->ce_sps.out_transfer.iovec_count = 0; _qce_sps_add_data(GET_PHYS_ADDR(pce_dev->ce_sps.result_dump), CRYPTO_RESULT_DUMP_SIZE, &pce_dev->ce_sps.out_transfer); _qce_set_flag(&pce_dev->ce_sps.out_transfer, SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_INT); rc = sps_transfer(pce_dev->ce_sps.producer.pipe, &pce_dev->ce_sps.out_transfer); if (rc) { pr_err("sps_xfr() fail (producer pipe=0x%x) rc = %d,", (u32)pce_dev->ce_sps.producer.pipe, rc); } } } static void _ablk_cipher_sps_producer_callback(struct sps_event_notify *notify) { struct qce_device *pce_dev = (struct qce_device *) ((struct sps_event_notify *)notify)->user; pce_dev->ce_sps.notify = *notify; pr_debug("sps ev_id=%d, addr=0x%x, size=0x%x, flags=0x%x\n", notify->event_id, notify->data.transfer.iovec.addr, notify->data.transfer.iovec.size, notify->data.transfer.iovec.flags); if (pce_dev->ce_sps.producer_state == QCE_PIPE_STATE_COMP) { pce_dev->ce_sps.producer_state = QCE_PIPE_STATE_IDLE; /* done */ _ablk_cipher_complete(pce_dev); } else { int rc = 0; pce_dev->ce_sps.producer_state = QCE_PIPE_STATE_COMP; pce_dev->ce_sps.out_transfer.iovec_count = 0; _qce_sps_add_data(GET_PHYS_ADDR(pce_dev->ce_sps.result_dump), CRYPTO_RESULT_DUMP_SIZE, &pce_dev->ce_sps.out_transfer); _qce_set_flag(&pce_dev->ce_sps.out_transfer, SPS_IOVEC_FLAG_INT); rc = sps_transfer(pce_dev->ce_sps.producer.pipe, &pce_dev->ce_sps.out_transfer); if (rc) { pr_err("sps_xfr() fail (producer pipe=0x%x) rc = %d,", (u32)pce_dev->ce_sps.producer.pipe, rc); } } }; static void qce_add_cmd_element(struct qce_device *pdev, struct sps_command_element **cmd_ptr, u32 addr, u32 data, struct sps_command_element **populate) { (*cmd_ptr)->addr = (uint32_t)(addr + pdev->phy_iobase); (*cmd_ptr)->data = data; (*cmd_ptr)->mask = 0xFFFFFFFF; if (populate != NULL) *populate = *cmd_ptr; (*cmd_ptr)++ ; } static int _setup_cipher_aes_cmdlistptrs(struct qce_device *pdev, unsigned char **pvaddr, enum qce_cipher_mode_enum mode, bool key_128) { struct sps_command_element *ce_vaddr; uint32_t ce_vaddr_start; struct qce_cmdlistptr_ops *cmdlistptr = &pdev->ce_sps.cmdlistptr; struct qce_cmdlist_info *pcl_info = NULL; int i = 0; uint32_t encr_cfg = 0; uint32_t key_reg = 0; uint32_t xts_key_reg = 0; uint32_t iv_reg = 0; *pvaddr = (unsigned char *) ALIGN(((unsigned int)(*pvaddr)), pdev->ce_sps.ce_burst_size); ce_vaddr = (struct sps_command_element *)(*pvaddr); ce_vaddr_start = (uint32_t)(*pvaddr); /* * Designate chunks of the allocated memory to various * command list pointers related to AES cipher operations defined * in ce_cmdlistptrs_ops structure. */ switch (mode) { case QCE_MODE_CBC: case QCE_MODE_CTR: if (key_128 == true) { cmdlistptr->cipher_aes_128_cbc_ctr.cmdlist = (uint32_t)ce_vaddr; pcl_info = &(cmdlistptr->cipher_aes_128_cbc_ctr); if (mode == QCE_MODE_CBC) encr_cfg = pdev->reg.encr_cfg_aes_cbc_128; else encr_cfg = pdev->reg.encr_cfg_aes_ctr_128; iv_reg = 4; key_reg = 4; xts_key_reg = 0; } else { cmdlistptr->cipher_aes_256_cbc_ctr.cmdlist = (uint32_t)ce_vaddr; pcl_info = &(cmdlistptr->cipher_aes_256_cbc_ctr); if (mode == QCE_MODE_CBC) encr_cfg = pdev->reg.encr_cfg_aes_cbc_256; else encr_cfg = pdev->reg.encr_cfg_aes_ctr_256; iv_reg = 4; key_reg = 8; xts_key_reg = 0; } break; case QCE_MODE_ECB: if (key_128 == true) { cmdlistptr->cipher_aes_128_ecb.cmdlist = (uint32_t)ce_vaddr; pcl_info = &(cmdlistptr->cipher_aes_128_ecb); encr_cfg = pdev->reg.encr_cfg_aes_ecb_128; iv_reg = 0; key_reg = 4; xts_key_reg = 0; } else { cmdlistptr->cipher_aes_256_ecb.cmdlist = (uint32_t)ce_vaddr; pcl_info = &(cmdlistptr->cipher_aes_256_ecb); encr_cfg = pdev->reg.encr_cfg_aes_ecb_256; iv_reg = 0; key_reg = 8; xts_key_reg = 0; } break; case QCE_MODE_XTS: if (key_128 == true) { cmdlistptr->cipher_aes_128_xts.cmdlist = (uint32_t)ce_vaddr; pcl_info = &(cmdlistptr->cipher_aes_128_xts); encr_cfg = pdev->reg.encr_cfg_aes_xts_128; iv_reg = 4; key_reg = 4; xts_key_reg = 4; } else { cmdlistptr->cipher_aes_256_xts.cmdlist = (uint32_t)ce_vaddr; pcl_info = &(cmdlistptr->cipher_aes_256_xts); encr_cfg = pdev->reg.encr_cfg_aes_xts_256; iv_reg = 4; key_reg = 8; xts_key_reg = 8; } break; default: pr_err("Unknown mode of operation %d received, exiting now\n", mode); return -EINVAL; break; } /* clear status register */ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG, 0, NULL); qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG, pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg); qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_SEG_SIZE_REG, 0, &pcl_info->seg_size); qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG, encr_cfg, &pcl_info->encr_seg_cfg); qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG, 0, &pcl_info->encr_seg_size); qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_START_REG, 0, &pcl_info->encr_seg_start); qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR_MASK_REG, (uint32_t)0xffffffff, &pcl_info->encr_mask); qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG, 0, &pcl_info->auth_seg_cfg); qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_KEY0_REG, 0, &pcl_info->encr_key); for (i = 1; i < key_reg; i++) qce_add_cmd_element(pdev, &ce_vaddr, (CRYPTO_ENCR_KEY0_REG + i * sizeof(uint32_t)), 0, NULL); if (xts_key_reg) { qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_XTS_KEY0_REG, 0, &pcl_info->encr_xts_key); for (i = 1; i < xts_key_reg; i++) qce_add_cmd_element(pdev, &ce_vaddr, (CRYPTO_ENCR_XTS_KEY0_REG + i * sizeof(uint32_t)), 0, NULL); qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_XTS_DU_SIZE_REG, 0, &pcl_info->encr_xts_du_size); } if (iv_reg) { qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR0_IV0_REG, 0, &pcl_info->encr_cntr_iv); for (i = 1; i < iv_reg; i++) qce_add_cmd_element(pdev, &ce_vaddr, (CRYPTO_CNTR0_IV0_REG + i * sizeof(uint32_t)), 0, NULL); } /* Add dummy to align size to burst-size multiple */ if (mode == QCE_MODE_XTS) { qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_SIZE_REG, 0, &pcl_info->auth_seg_size); } else { qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_SIZE_REG, 0, &pcl_info->auth_seg_size); qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_START_REG, 0, &pcl_info->auth_seg_size); } qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG, pdev->reg.crypto_cfg_le, NULL); qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG, ((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP)), &pcl_info->go_proc); pcl_info->size = (uint32_t)ce_vaddr - (uint32_t)ce_vaddr_start; *pvaddr = (unsigned char *) ce_vaddr; return 0; } static int _setup_cipher_des_cmdlistptrs(struct qce_device *pdev, unsigned char **pvaddr, enum qce_cipher_alg_enum alg, bool mode_cbc) { struct sps_command_element *ce_vaddr; uint32_t ce_vaddr_start; struct qce_cmdlistptr_ops *cmdlistptr = &pdev->ce_sps.cmdlistptr; struct qce_cmdlist_info *pcl_info = NULL; int i = 0; uint32_t encr_cfg = 0; uint32_t key_reg = 0; uint32_t iv_reg = 0; *pvaddr = (unsigned char *) ALIGN(((unsigned int)(*pvaddr)), pdev->ce_sps.ce_burst_size); ce_vaddr = (struct sps_command_element *)(*pvaddr); ce_vaddr_start = (uint32_t)(*pvaddr); /* * Designate chunks of the allocated memory to various * command list pointers related to cipher operations defined * in ce_cmdlistptrs_ops structure. */ switch (alg) { case CIPHER_ALG_DES: if (mode_cbc) { cmdlistptr->cipher_des_cbc.cmdlist = (uint32_t)ce_vaddr; pcl_info = &(cmdlistptr->cipher_des_cbc); encr_cfg = pdev->reg.encr_cfg_des_cbc; iv_reg = 2; key_reg = 2; } else { cmdlistptr->cipher_des_ecb.cmdlist = (uint32_t)ce_vaddr; pcl_info = &(cmdlistptr->cipher_des_ecb); encr_cfg = pdev->reg.encr_cfg_des_ecb; iv_reg = 0; key_reg = 2; } break; case CIPHER_ALG_3DES: if (mode_cbc) { cmdlistptr->cipher_3des_cbc.cmdlist = (uint32_t)ce_vaddr; pcl_info = &(cmdlistptr->cipher_3des_cbc); encr_cfg = pdev->reg.encr_cfg_3des_cbc; iv_reg = 2; key_reg = 6; } else { cmdlistptr->cipher_3des_ecb.cmdlist = (uint32_t)ce_vaddr; pcl_info = &(cmdlistptr->cipher_3des_ecb); encr_cfg = pdev->reg.encr_cfg_3des_ecb; iv_reg = 0; key_reg = 6; } break; default: pr_err("Unknown algorithms %d received, exiting now\n", alg); return -EINVAL; break; } /* clear status register */ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG, 0, NULL); qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG, pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg); qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_SEG_SIZE_REG, 0, &pcl_info->seg_size); qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG, encr_cfg, &pcl_info->encr_seg_cfg); qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG, 0, &pcl_info->encr_seg_size); qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_START_REG, 0, &pcl_info->encr_seg_start); qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG, 0, &pcl_info->auth_seg_cfg); qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_KEY0_REG, 0, &pcl_info->encr_key); for (i = 1; i < key_reg; i++) qce_add_cmd_element(pdev, &ce_vaddr, (CRYPTO_ENCR_KEY0_REG + i * sizeof(uint32_t)), 0, NULL); if (iv_reg) { qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR0_IV0_REG, 0, &pcl_info->encr_cntr_iv); qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR1_IV1_REG, 0, NULL); } qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG, pdev->reg.crypto_cfg_le, NULL); qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG, ((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP)), &pcl_info->go_proc); pcl_info->size = (uint32_t)ce_vaddr - (uint32_t)ce_vaddr_start; *pvaddr = (unsigned char *) ce_vaddr; return 0; } static int _setup_auth_cmdlistptrs(struct qce_device *pdev, unsigned char **pvaddr, enum qce_hash_alg_enum alg, bool key_128) { struct sps_command_element *ce_vaddr; uint32_t ce_vaddr_start; struct qce_cmdlistptr_ops *cmdlistptr = &pdev->ce_sps.cmdlistptr; struct qce_cmdlist_info *pcl_info = NULL; int i = 0; uint32_t key_reg = 0; uint32_t auth_cfg = 0; uint32_t iv_reg = 0; *pvaddr = (unsigned char *) ALIGN(((unsigned int)(*pvaddr)), pdev->ce_sps.ce_burst_size); ce_vaddr_start = (uint32_t)(*pvaddr); ce_vaddr = (struct sps_command_element *)(*pvaddr); /* * Designate chunks of the allocated memory to various * command list pointers related to authentication operations * defined in ce_cmdlistptrs_ops structure. */ switch (alg) { case QCE_HASH_SHA1: cmdlistptr->auth_sha1.cmdlist = (uint32_t)ce_vaddr; pcl_info = &(cmdlistptr->auth_sha1); auth_cfg = pdev->reg.auth_cfg_sha1; iv_reg = 5; /* clear status register */ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG, 0, NULL); qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG, pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg); break; case QCE_HASH_SHA256: cmdlistptr->auth_sha256.cmdlist = (uint32_t)ce_vaddr; pcl_info = &(cmdlistptr->auth_sha256); auth_cfg = pdev->reg.auth_cfg_sha256; iv_reg = 8; /* clear status register */ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG, 0, NULL); qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG, pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg); /* 1 dummy write */ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG, 0, NULL); break; case QCE_HASH_SHA1_HMAC: cmdlistptr->auth_sha1_hmac.cmdlist = (uint32_t)ce_vaddr; pcl_info = &(cmdlistptr->auth_sha1_hmac); auth_cfg = pdev->reg.auth_cfg_hmac_sha1; key_reg = 16; iv_reg = 5; /* clear status register */ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG, 0, NULL); qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG, pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg); break; case QCE_HASH_SHA256_HMAC: cmdlistptr->auth_sha256_hmac.cmdlist = (uint32_t)ce_vaddr; pcl_info = &(cmdlistptr->auth_sha256_hmac); auth_cfg = pdev->reg.auth_cfg_hmac_sha256; key_reg = 16; iv_reg = 8; /* clear status register */ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG, 0, NULL); qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG, pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg); /* 1 dummy write */ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG, 0, NULL); break; case QCE_HASH_AES_CMAC: if (key_128 == true) { cmdlistptr->auth_aes_128_cmac.cmdlist = (uint32_t)ce_vaddr; pcl_info = &(cmdlistptr->auth_aes_128_cmac); auth_cfg = pdev->reg.auth_cfg_cmac_128; key_reg = 4; } else { cmdlistptr->auth_aes_256_cmac.cmdlist = (uint32_t)ce_vaddr; pcl_info = &(cmdlistptr->auth_aes_256_cmac); auth_cfg = pdev->reg.auth_cfg_cmac_256; key_reg = 8; } /* clear status register */ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG, 0, NULL); qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG, pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg); /* 1 dummy write */ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG, 0, NULL); break; default: pr_err("Unknown algorithms %d received, exiting now\n", alg); return -EINVAL; break; } qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_SEG_SIZE_REG, 0, &pcl_info->seg_size); qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG, 0, &pcl_info->encr_seg_cfg); qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG, auth_cfg, &pcl_info->auth_seg_cfg); qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_SIZE_REG, 0, &pcl_info->auth_seg_size); qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_START_REG, 0, &pcl_info->auth_seg_start); if (alg == QCE_HASH_AES_CMAC) { /* reset auth iv, bytecount and key registers */ for (i = 0; i < 16; i++) qce_add_cmd_element(pdev, &ce_vaddr, (CRYPTO_AUTH_IV0_REG + i * sizeof(uint32_t)), 0, NULL); for (i = 0; i < 16; i++) qce_add_cmd_element(pdev, &ce_vaddr, (CRYPTO_AUTH_KEY0_REG + i*sizeof(uint32_t)), 0, NULL); qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT0_REG, 0, NULL); } else { qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_IV0_REG, 0, &pcl_info->auth_iv); for (i = 1; i < iv_reg; i++) qce_add_cmd_element(pdev, &ce_vaddr, (CRYPTO_AUTH_IV0_REG + i*sizeof(uint32_t)), 0, NULL); qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT0_REG, 0, &pcl_info->auth_bytecount); } qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT1_REG, 0, NULL); if (key_reg) { qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_KEY0_REG, 0, &pcl_info->auth_key); for (i = 1; i < key_reg; i++) qce_add_cmd_element(pdev, &ce_vaddr, (CRYPTO_AUTH_KEY0_REG + i*sizeof(uint32_t)), 0, NULL); } qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG, pdev->reg.crypto_cfg_le, NULL); qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG, ((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP)), &pcl_info->go_proc); pcl_info->size = (uint32_t)ce_vaddr - (uint32_t)ce_vaddr_start; *pvaddr = (unsigned char *) ce_vaddr; return 0; } static int _setup_aead_cmdlistptrs(struct qce_device *pdev, unsigned char **pvaddr, uint32_t alg, uint32_t mode, uint32_t key_size) { struct sps_command_element *ce_vaddr; uint32_t ce_vaddr_start; struct qce_cmdlistptr_ops *cmdlistptr = &pdev->ce_sps.cmdlistptr; struct qce_cmdlist_info *pcl_info = NULL; uint32_t key_reg; uint32_t iv_reg; uint32_t i; uint32_t enciv_in_word; uint32_t encr_cfg; *pvaddr = (unsigned char *) ALIGN(((unsigned int)(*pvaddr)), pdev->ce_sps.ce_burst_size); ce_vaddr_start = (uint32_t)(*pvaddr); ce_vaddr = (struct sps_command_element *)(*pvaddr); switch (alg) { case CIPHER_ALG_DES: switch (mode) { case QCE_MODE_ECB: cmdlistptr->aead_hmac_sha1_ecb_des.cmdlist = (uint32_t)ce_vaddr; pcl_info = &(cmdlistptr->aead_hmac_sha1_ecb_des); encr_cfg = pdev->reg.encr_cfg_des_ecb; break; case QCE_MODE_CBC: cmdlistptr->aead_hmac_sha1_cbc_des.cmdlist = (uint32_t)ce_vaddr; pcl_info = &(cmdlistptr->aead_hmac_sha1_cbc_des); encr_cfg = pdev->reg.encr_cfg_des_cbc; break; default: return -EINVAL; }; enciv_in_word = 2; break; case CIPHER_ALG_3DES: switch (mode) { case QCE_MODE_ECB: cmdlistptr->aead_hmac_sha1_ecb_3des.cmdlist = (uint32_t)ce_vaddr; pcl_info = &(cmdlistptr->aead_hmac_sha1_ecb_3des); encr_cfg = pdev->reg.encr_cfg_3des_ecb; break; case QCE_MODE_CBC: cmdlistptr->aead_hmac_sha1_cbc_3des.cmdlist = (uint32_t)ce_vaddr; pcl_info = &(cmdlistptr->aead_hmac_sha1_cbc_3des); encr_cfg = pdev->reg.encr_cfg_3des_cbc; break; default: return -EINVAL; }; enciv_in_word = 2; break; case CIPHER_ALG_AES: switch (mode) { case QCE_MODE_ECB: if (key_size == AES128_KEY_SIZE) { cmdlistptr->aead_hmac_sha1_ecb_aes_128.cmdlist = (uint32_t)ce_vaddr; pcl_info = &(cmdlistptr-> aead_hmac_sha1_ecb_aes_128); encr_cfg = pdev->reg.encr_cfg_aes_ecb_128; } else if (key_size == AES256_KEY_SIZE) { cmdlistptr->aead_hmac_sha1_ecb_aes_256.cmdlist = (uint32_t)ce_vaddr; pcl_info = &(cmdlistptr-> aead_hmac_sha1_ecb_aes_256); encr_cfg = pdev->reg.encr_cfg_aes_ecb_256; } else { return -EINVAL; } break; case QCE_MODE_CBC: if (key_size == AES128_KEY_SIZE) { cmdlistptr->aead_hmac_sha1_cbc_aes_128.cmdlist = (uint32_t)ce_vaddr; pcl_info = &(cmdlistptr-> aead_hmac_sha1_cbc_aes_128); encr_cfg = pdev->reg.encr_cfg_aes_cbc_128; } else if (key_size == AES256_KEY_SIZE) { cmdlistptr->aead_hmac_sha1_cbc_aes_256.cmdlist = (uint32_t)ce_vaddr; pcl_info = &(cmdlistptr-> aead_hmac_sha1_cbc_aes_256); encr_cfg = pdev->reg.encr_cfg_aes_cbc_256; } else { return -EINVAL; } break; default: return -EINVAL; }; enciv_in_word = 4; break; default: return -EINVAL; }; qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG, 0, NULL); qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG, pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg); key_reg = key_size/sizeof(uint32_t); qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_KEY0_REG, 0, &pcl_info->encr_key); for (i = 1; i < key_reg; i++) qce_add_cmd_element(pdev, &ce_vaddr, (CRYPTO_ENCR_KEY0_REG + i * sizeof(uint32_t)), 0, NULL); if (mode != QCE_MODE_ECB) { qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR0_IV0_REG, 0, &pcl_info->encr_cntr_iv); for (i = 1; i < enciv_in_word; i++) qce_add_cmd_element(pdev, &ce_vaddr, (CRYPTO_CNTR0_IV0_REG + i * sizeof(uint32_t)), 0, NULL); }; iv_reg = 5; qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_IV0_REG, 0, &pcl_info->auth_iv); for (i = 1; i < iv_reg; i++) qce_add_cmd_element(pdev, &ce_vaddr, (CRYPTO_AUTH_IV0_REG + i*sizeof(uint32_t)), 0, NULL); qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT0_REG, 0, &pcl_info->auth_bytecount); qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT1_REG, 0, NULL); key_reg = SHA_HMAC_KEY_SIZE/sizeof(uint32_t); qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_KEY0_REG, 0, &pcl_info->auth_key); for (i = 1; i < key_reg; i++) qce_add_cmd_element(pdev, &ce_vaddr, (CRYPTO_AUTH_KEY0_REG + i*sizeof(uint32_t)), 0, NULL); qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_SEG_SIZE_REG, 0, &pcl_info->seg_size); qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG, encr_cfg, &pcl_info->encr_seg_cfg); qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG, 0, &pcl_info->encr_seg_size); qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_START_REG, 0, &pcl_info->encr_seg_start); qce_add_cmd_element( pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG, pdev->reg.auth_cfg_aead_sha1_hmac, &pcl_info->auth_seg_cfg); qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_SIZE_REG, 0, &pcl_info->auth_seg_size); qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_START_REG, 0, &pcl_info->auth_seg_start); qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG, pdev->reg.crypto_cfg_le, NULL); qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG, ((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP)), &pcl_info->go_proc); pcl_info->size = (uint32_t)ce_vaddr - (uint32_t)ce_vaddr_start; *pvaddr = (unsigned char *) ce_vaddr; return 0; } static int _setup_aead_ccm_cmdlistptrs(struct qce_device *pdev, unsigned char **pvaddr, bool key_128) { struct sps_command_element *ce_vaddr; uint32_t ce_vaddr_start; struct qce_cmdlistptr_ops *cmdlistptr = &pdev->ce_sps.cmdlistptr; struct qce_cmdlist_info *pcl_info = NULL; int i = 0; uint32_t encr_cfg = 0; uint32_t auth_cfg = 0; uint32_t key_reg = 0; *pvaddr = (unsigned char *) ALIGN(((unsigned int)(*pvaddr)), pdev->ce_sps.ce_burst_size); ce_vaddr_start = (uint32_t)(*pvaddr); ce_vaddr = (struct sps_command_element *)(*pvaddr); /* * Designate chunks of the allocated memory to various * command list pointers related to aead operations * defined in ce_cmdlistptrs_ops structure. */ if (key_128 == true) { cmdlistptr->aead_aes_128_ccm.cmdlist = (uint32_t)ce_vaddr; pcl_info = &(cmdlistptr->aead_aes_128_ccm); auth_cfg = pdev->reg.auth_cfg_aes_ccm_128; encr_cfg = pdev->reg.encr_cfg_aes_ccm_128; key_reg = 4; } else { cmdlistptr->aead_aes_256_ccm.cmdlist = (uint32_t)ce_vaddr; pcl_info = &(cmdlistptr->aead_aes_256_ccm); auth_cfg = pdev->reg.auth_cfg_aes_ccm_256; encr_cfg = pdev->reg.encr_cfg_aes_ccm_256; key_reg = 8; } /* clear status register */ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG, 0, NULL); qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG, pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg); qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG, 0, NULL); qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_START_REG, 0, NULL); qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_SEG_SIZE_REG, 0, &pcl_info->seg_size); qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG, encr_cfg, &pcl_info->encr_seg_cfg); qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG, 0, &pcl_info->encr_seg_size); qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_START_REG, 0, &pcl_info->encr_seg_start); qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR_MASK_REG, (uint32_t)0xffffffff, &pcl_info->encr_mask); qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG, auth_cfg, &pcl_info->auth_seg_cfg); qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_SIZE_REG, 0, &pcl_info->auth_seg_size); qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_START_REG, 0, &pcl_info->auth_seg_start); /* reset auth iv, bytecount and key registers */ for (i = 0; i < 8; i++) qce_add_cmd_element(pdev, &ce_vaddr, (CRYPTO_AUTH_IV0_REG + i * sizeof(uint32_t)), 0, NULL); qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT0_REG, 0, NULL); qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT1_REG, 0, NULL); for (i = 0; i < 16; i++) qce_add_cmd_element(pdev, &ce_vaddr, (CRYPTO_AUTH_KEY0_REG + i * sizeof(uint32_t)), 0, NULL); /* set auth key */ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_KEY0_REG, 0, &pcl_info->auth_key); for (i = 1; i < key_reg; i++) qce_add_cmd_element(pdev, &ce_vaddr, (CRYPTO_AUTH_KEY0_REG + i * sizeof(uint32_t)), 0, NULL); /* set NONCE info */ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_INFO_NONCE0_REG, 0, &pcl_info->auth_nonce_info); for (i = 1; i < 4; i++) qce_add_cmd_element(pdev, &ce_vaddr, (CRYPTO_AUTH_INFO_NONCE0_REG + i * sizeof(uint32_t)), 0, NULL); qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_KEY0_REG, 0, &pcl_info->encr_key); for (i = 1; i < key_reg; i++) qce_add_cmd_element(pdev, &ce_vaddr, (CRYPTO_ENCR_KEY0_REG + i * sizeof(uint32_t)), 0, NULL); qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR0_IV0_REG, 0, &pcl_info->encr_cntr_iv); for (i = 1; i < 4; i++) qce_add_cmd_element(pdev, &ce_vaddr, (CRYPTO_CNTR0_IV0_REG + i * sizeof(uint32_t)), 0, NULL); qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_CCM_INT_CNTR0_REG, 0, &pcl_info->encr_ccm_cntr_iv); for (i = 1; i < 4; i++) qce_add_cmd_element(pdev, &ce_vaddr, (CRYPTO_ENCR_CCM_INT_CNTR0_REG + i * sizeof(uint32_t)), 0, NULL); qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG, pdev->reg.crypto_cfg_le, NULL); qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG, ((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP)), &pcl_info->go_proc); pcl_info->size = (uint32_t)ce_vaddr - (uint32_t)ce_vaddr_start; *pvaddr = (unsigned char *) ce_vaddr; return 0; } static int _setup_f8_cmdlistptrs(struct qce_device *pdev, unsigned char **pvaddr, enum qce_ota_algo_enum alg) { struct sps_command_element *ce_vaddr; uint32_t ce_vaddr_start; struct qce_cmdlistptr_ops *cmdlistptr = &pdev->ce_sps.cmdlistptr; struct qce_cmdlist_info *pcl_info = NULL; int i = 0; uint32_t encr_cfg = 0; uint32_t key_reg = 4; *pvaddr = (unsigned char *) ALIGN(((unsigned int)(*pvaddr)), pdev->ce_sps.ce_burst_size); ce_vaddr = (struct sps_command_element *)(*pvaddr); ce_vaddr_start = (uint32_t)(*pvaddr); /* * Designate chunks of the allocated memory to various * command list pointers related to f8 cipher algorithm defined * in ce_cmdlistptrs_ops structure. */ switch (alg) { case QCE_OTA_ALGO_KASUMI: cmdlistptr->f8_kasumi.cmdlist = (uint32_t)ce_vaddr; pcl_info = &(cmdlistptr->f8_kasumi); encr_cfg = pdev->reg.encr_cfg_kasumi; break; case QCE_OTA_ALGO_SNOW3G: default: cmdlistptr->f8_snow3g.cmdlist = (uint32_t)ce_vaddr; pcl_info = &(cmdlistptr->f8_snow3g); encr_cfg = pdev->reg.encr_cfg_snow3g; break; } /* clear status register */ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG, 0, NULL); /* set config to big endian */ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG, pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg); qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_SEG_SIZE_REG, 0, &pcl_info->seg_size); qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG, encr_cfg, &pcl_info->encr_seg_cfg); qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG, 0, &pcl_info->encr_seg_size); qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_START_REG, 0, &pcl_info->encr_seg_start); qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG, 0, &pcl_info->auth_seg_cfg); qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_SIZE_REG, 0, &pcl_info->auth_seg_size); qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_START_REG, 0, &pcl_info->auth_seg_start); qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_KEY0_REG, 0, &pcl_info->encr_key); for (i = 1; i < key_reg; i++) qce_add_cmd_element(pdev, &ce_vaddr, (CRYPTO_ENCR_KEY0_REG + i * sizeof(uint32_t)), 0, NULL); qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR0_IV0_REG, 0, &pcl_info->encr_cntr_iv); qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR1_IV1_REG, 0, NULL); qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG, pdev->reg.crypto_cfg_le, NULL); qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG, ((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP)), &pcl_info->go_proc); pcl_info->size = (uint32_t)ce_vaddr - (uint32_t)ce_vaddr_start; *pvaddr = (unsigned char *) ce_vaddr; return 0; } static int _setup_f9_cmdlistptrs(struct qce_device *pdev, unsigned char **pvaddr, enum qce_ota_algo_enum alg) { struct sps_command_element *ce_vaddr; uint32_t ce_vaddr_start; struct qce_cmdlistptr_ops *cmdlistptr = &pdev->ce_sps.cmdlistptr; struct qce_cmdlist_info *pcl_info = NULL; int i = 0; uint32_t auth_cfg = 0; uint32_t iv_reg = 0; *pvaddr = (unsigned char *) ALIGN(((unsigned int)(*pvaddr)), pdev->ce_sps.ce_burst_size); ce_vaddr_start = (uint32_t)(*pvaddr); ce_vaddr = (struct sps_command_element *)(*pvaddr); /* * Designate chunks of the allocated memory to various * command list pointers related to authentication operations * defined in ce_cmdlistptrs_ops structure. */ switch (alg) { case QCE_OTA_ALGO_KASUMI: cmdlistptr->f9_kasumi.cmdlist = (uint32_t)ce_vaddr; pcl_info = &(cmdlistptr->f9_kasumi); auth_cfg = pdev->reg.auth_cfg_kasumi; break; case QCE_OTA_ALGO_SNOW3G: default: cmdlistptr->f9_snow3g.cmdlist = (uint32_t)ce_vaddr; pcl_info = &(cmdlistptr->f9_snow3g); auth_cfg = pdev->reg.auth_cfg_snow3g; }; /* clear status register */ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG, 0, NULL); /* set config to big endian */ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG, pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg); iv_reg = 5; qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_SEG_SIZE_REG, 0, &pcl_info->seg_size); qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG, 0, &pcl_info->encr_seg_cfg); qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG, auth_cfg, &pcl_info->auth_seg_cfg); qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_SIZE_REG, 0, &pcl_info->auth_seg_size); qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_START_REG, 0, &pcl_info->auth_seg_start); qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_IV0_REG, 0, &pcl_info->auth_iv); for (i = 1; i < iv_reg; i++) { qce_add_cmd_element(pdev, &ce_vaddr, (CRYPTO_AUTH_IV0_REG + i*sizeof(uint32_t)), 0, NULL); } qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT0_REG, 0, &pcl_info->auth_bytecount); qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT1_REG, 0, NULL); qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG, pdev->reg.crypto_cfg_le, NULL); qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG, ((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP)), &pcl_info->go_proc); pcl_info->size = (uint32_t)ce_vaddr - (uint32_t)ce_vaddr_start; *pvaddr = (unsigned char *) ce_vaddr; return 0; } static int _setup_unlock_pipe_cmdlistptrs(struct qce_device *pdev, unsigned char **pvaddr) { struct sps_command_element *ce_vaddr; uint32_t ce_vaddr_start = (uint32_t)(*pvaddr); struct qce_cmdlistptr_ops *cmdlistptr = &pdev->ce_sps.cmdlistptr; struct qce_cmdlist_info *pcl_info = NULL; *pvaddr = (unsigned char *) ALIGN(((unsigned int)(*pvaddr)), pdev->ce_sps.ce_burst_size); ce_vaddr = (struct sps_command_element *)(*pvaddr); cmdlistptr->unlock_all_pipes.cmdlist = (uint32_t)ce_vaddr; pcl_info = &(cmdlistptr->unlock_all_pipes); /* * Designate chunks of the allocated memory to command list * to unlock pipes. */ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG, CRYPTO_CONFIG_RESET, NULL); pcl_info->size = (uint32_t)ce_vaddr - (uint32_t)ce_vaddr_start; *pvaddr = (unsigned char *) ce_vaddr; return 0; } static int qce_setup_cmdlistptrs(struct qce_device *pdev, unsigned char **pvaddr) { struct sps_command_element *ce_vaddr = (struct sps_command_element *)(*pvaddr); /* * Designate chunks of the allocated memory to various * command list pointers related to operations defined * in ce_cmdlistptrs_ops structure. */ ce_vaddr = (struct sps_command_element *) ALIGN(((unsigned int) ce_vaddr), pdev->ce_sps.ce_burst_size); *pvaddr = (unsigned char *) ce_vaddr; _setup_cipher_aes_cmdlistptrs(pdev, pvaddr, QCE_MODE_CBC, true); _setup_cipher_aes_cmdlistptrs(pdev, pvaddr, QCE_MODE_CTR, true); _setup_cipher_aes_cmdlistptrs(pdev, pvaddr, QCE_MODE_ECB, true); _setup_cipher_aes_cmdlistptrs(pdev, pvaddr, QCE_MODE_XTS, true); _setup_cipher_aes_cmdlistptrs(pdev, pvaddr, QCE_MODE_CBC, false); _setup_cipher_aes_cmdlistptrs(pdev, pvaddr, QCE_MODE_CTR, false); _setup_cipher_aes_cmdlistptrs(pdev, pvaddr, QCE_MODE_ECB, false); _setup_cipher_aes_cmdlistptrs(pdev, pvaddr, QCE_MODE_XTS, false); _setup_cipher_des_cmdlistptrs(pdev, pvaddr, CIPHER_ALG_DES, true); _setup_cipher_des_cmdlistptrs(pdev, pvaddr, CIPHER_ALG_DES, false); _setup_cipher_des_cmdlistptrs(pdev, pvaddr, CIPHER_ALG_3DES, true); _setup_cipher_des_cmdlistptrs(pdev, pvaddr, CIPHER_ALG_3DES, false); _setup_auth_cmdlistptrs(pdev, pvaddr, QCE_HASH_SHA1, false); _setup_auth_cmdlistptrs(pdev, pvaddr, QCE_HASH_SHA256, false); _setup_auth_cmdlistptrs(pdev, pvaddr, QCE_HASH_SHA1_HMAC, false); _setup_auth_cmdlistptrs(pdev, pvaddr, QCE_HASH_SHA256_HMAC, false); _setup_auth_cmdlistptrs(pdev, pvaddr, QCE_HASH_AES_CMAC, true); _setup_auth_cmdlistptrs(pdev, pvaddr, QCE_HASH_AES_CMAC, false); _setup_aead_cmdlistptrs(pdev, pvaddr, CIPHER_ALG_DES, QCE_MODE_CBC, DES_KEY_SIZE); _setup_aead_cmdlistptrs(pdev, pvaddr, CIPHER_ALG_DES, QCE_MODE_ECB, DES_KEY_SIZE); _setup_aead_cmdlistptrs(pdev, pvaddr, CIPHER_ALG_3DES, QCE_MODE_CBC, DES3_EDE_KEY_SIZE); _setup_aead_cmdlistptrs(pdev, pvaddr, CIPHER_ALG_3DES, QCE_MODE_ECB, DES3_EDE_KEY_SIZE); _setup_aead_cmdlistptrs(pdev, pvaddr, CIPHER_ALG_AES, QCE_MODE_CBC, AES128_KEY_SIZE); _setup_aead_cmdlistptrs(pdev, pvaddr, CIPHER_ALG_AES, QCE_MODE_ECB, AES128_KEY_SIZE); _setup_aead_cmdlistptrs(pdev, pvaddr, CIPHER_ALG_AES, QCE_MODE_CBC, AES256_KEY_SIZE); _setup_aead_cmdlistptrs(pdev, pvaddr, CIPHER_ALG_AES, QCE_MODE_ECB, AES256_KEY_SIZE); _setup_aead_ccm_cmdlistptrs(pdev, pvaddr, true); _setup_aead_ccm_cmdlistptrs(pdev, pvaddr, false); _setup_f8_cmdlistptrs(pdev, pvaddr, QCE_OTA_ALGO_KASUMI); _setup_f8_cmdlistptrs(pdev, pvaddr, QCE_OTA_ALGO_SNOW3G); _setup_f9_cmdlistptrs(pdev, pvaddr, QCE_OTA_ALGO_KASUMI); _setup_f9_cmdlistptrs(pdev, pvaddr, QCE_OTA_ALGO_SNOW3G); _setup_unlock_pipe_cmdlistptrs(pdev, pvaddr); return 0; } static int qce_setup_ce_sps_data(struct qce_device *pce_dev) { unsigned char *vaddr; vaddr = pce_dev->coh_vmem; vaddr = (unsigned char *) ALIGN(((unsigned int)vaddr), pce_dev->ce_sps.ce_burst_size); /* Allow for 256 descriptor (cmd and data) entries per pipe */ pce_dev->ce_sps.in_transfer.iovec = (struct sps_iovec *)vaddr; pce_dev->ce_sps.in_transfer.iovec_phys = (uint32_t)GET_PHYS_ADDR(vaddr); vaddr += QCE_MAX_NUM_DSCR * sizeof(struct sps_iovec); pce_dev->ce_sps.out_transfer.iovec = (struct sps_iovec *)vaddr; pce_dev->ce_sps.out_transfer.iovec_phys = (uint32_t)GET_PHYS_ADDR(vaddr); vaddr += QCE_MAX_NUM_DSCR * sizeof(struct sps_iovec); if (pce_dev->support_cmd_dscr) qce_setup_cmdlistptrs(pce_dev, &vaddr); vaddr = (unsigned char *) ALIGN(((unsigned int)vaddr), pce_dev->ce_sps.ce_burst_size); pce_dev->ce_sps.result_dump = (uint32_t)vaddr; pce_dev->ce_sps.result = (struct ce_result_dump_format *)vaddr; vaddr += CRYPTO_RESULT_DUMP_SIZE; pce_dev->ce_sps.ignore_buffer = (uint32_t)vaddr; vaddr += pce_dev->ce_sps.ce_burst_size * 2; if ((vaddr - pce_dev->coh_vmem) > pce_dev->memsize) panic("qce50: Not enough coherent memory. Allocate %x , need %x", pce_dev->memsize, vaddr - pce_dev->coh_vmem); return 0; } static int qce_init_ce_cfg_val(struct qce_device *pce_dev) { uint32_t beats = (pce_dev->ce_sps.ce_burst_size >> 3) - 1; uint32_t pipe_pair = pce_dev->ce_sps.pipe_pair_index; pce_dev->reg.crypto_cfg_be = (beats << CRYPTO_REQ_SIZE) | BIT(CRYPTO_MASK_DOUT_INTR) | BIT(CRYPTO_MASK_DIN_INTR) | BIT(CRYPTO_MASK_OP_DONE_INTR) | (0 << CRYPTO_HIGH_SPD_EN_N) | (pipe_pair << CRYPTO_PIPE_SET_SELECT); pce_dev->reg.crypto_cfg_le = (pce_dev->reg.crypto_cfg_be | CRYPTO_LITTLE_ENDIAN_MASK); /* Initialize encr_cfg register for AES alg */ pce_dev->reg.encr_cfg_aes_cbc_128 = (CRYPTO_ENCR_KEY_SZ_AES128 << CRYPTO_ENCR_KEY_SZ) | (CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) | (CRYPTO_ENCR_MODE_CBC << CRYPTO_ENCR_MODE); pce_dev->reg.encr_cfg_aes_cbc_256 = (CRYPTO_ENCR_KEY_SZ_AES256 << CRYPTO_ENCR_KEY_SZ) | (CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) | (CRYPTO_ENCR_MODE_CBC << CRYPTO_ENCR_MODE); pce_dev->reg.encr_cfg_aes_ctr_128 = (CRYPTO_ENCR_KEY_SZ_AES128 << CRYPTO_ENCR_KEY_SZ) | (CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) | (CRYPTO_ENCR_MODE_CTR << CRYPTO_ENCR_MODE); pce_dev->reg.encr_cfg_aes_ctr_256 = (CRYPTO_ENCR_KEY_SZ_AES256 << CRYPTO_ENCR_KEY_SZ) | (CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) | (CRYPTO_ENCR_MODE_CTR << CRYPTO_ENCR_MODE); pce_dev->reg.encr_cfg_aes_xts_128 = (CRYPTO_ENCR_KEY_SZ_AES128 << CRYPTO_ENCR_KEY_SZ) | (CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) | (CRYPTO_ENCR_MODE_XTS << CRYPTO_ENCR_MODE); pce_dev->reg.encr_cfg_aes_xts_256 = (CRYPTO_ENCR_KEY_SZ_AES256 << CRYPTO_ENCR_KEY_SZ) | (CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) | (CRYPTO_ENCR_MODE_XTS << CRYPTO_ENCR_MODE); pce_dev->reg.encr_cfg_aes_ecb_128 = (CRYPTO_ENCR_KEY_SZ_AES128 << CRYPTO_ENCR_KEY_SZ) | (CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) | (CRYPTO_ENCR_MODE_ECB << CRYPTO_ENCR_MODE); pce_dev->reg.encr_cfg_aes_ecb_256 = (CRYPTO_ENCR_KEY_SZ_AES256 << CRYPTO_ENCR_KEY_SZ) | (CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) | (CRYPTO_ENCR_MODE_ECB << CRYPTO_ENCR_MODE); pce_dev->reg.encr_cfg_aes_ccm_128 = (CRYPTO_ENCR_KEY_SZ_AES128 << CRYPTO_ENCR_KEY_SZ) | (CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) | (CRYPTO_ENCR_MODE_CCM << CRYPTO_ENCR_MODE)| (CRYPTO_LAST_CCM_XFR << CRYPTO_LAST_CCM); pce_dev->reg.encr_cfg_aes_ccm_256 = (CRYPTO_ENCR_KEY_SZ_AES256 << CRYPTO_ENCR_KEY_SZ) | (CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) | (CRYPTO_ENCR_MODE_CCM << CRYPTO_ENCR_MODE) | (CRYPTO_LAST_CCM_XFR << CRYPTO_LAST_CCM); /* Initialize encr_cfg register for DES alg */ pce_dev->reg.encr_cfg_des_ecb = (CRYPTO_ENCR_KEY_SZ_DES << CRYPTO_ENCR_KEY_SZ) | (CRYPTO_ENCR_ALG_DES << CRYPTO_ENCR_ALG) | (CRYPTO_ENCR_MODE_ECB << CRYPTO_ENCR_MODE); pce_dev->reg.encr_cfg_des_cbc = (CRYPTO_ENCR_KEY_SZ_DES << CRYPTO_ENCR_KEY_SZ) | (CRYPTO_ENCR_ALG_DES << CRYPTO_ENCR_ALG) | (CRYPTO_ENCR_MODE_CBC << CRYPTO_ENCR_MODE); pce_dev->reg.encr_cfg_3des_ecb = (CRYPTO_ENCR_KEY_SZ_3DES << CRYPTO_ENCR_KEY_SZ) | (CRYPTO_ENCR_ALG_DES << CRYPTO_ENCR_ALG) | (CRYPTO_ENCR_MODE_ECB << CRYPTO_ENCR_MODE); pce_dev->reg.encr_cfg_3des_cbc = (CRYPTO_ENCR_KEY_SZ_3DES << CRYPTO_ENCR_KEY_SZ) | (CRYPTO_ENCR_ALG_DES << CRYPTO_ENCR_ALG) | (CRYPTO_ENCR_MODE_CBC << CRYPTO_ENCR_MODE); /* Initialize encr_cfg register for kasumi/snow3g alg */ pce_dev->reg.encr_cfg_kasumi = (CRYPTO_ENCR_ALG_KASUMI << CRYPTO_ENCR_ALG); pce_dev->reg.encr_cfg_snow3g = (CRYPTO_ENCR_ALG_SNOW_3G << CRYPTO_ENCR_ALG); /* Initialize auth_cfg register for CMAC alg */ pce_dev->reg.auth_cfg_cmac_128 = (1 << CRYPTO_LAST) | (1 << CRYPTO_FIRST) | (CRYPTO_AUTH_MODE_CMAC << CRYPTO_AUTH_MODE)| (CRYPTO_AUTH_SIZE_ENUM_16_BYTES << CRYPTO_AUTH_SIZE) | (CRYPTO_AUTH_ALG_AES << CRYPTO_AUTH_ALG) | (CRYPTO_AUTH_KEY_SZ_AES128 << CRYPTO_AUTH_KEY_SIZE); pce_dev->reg.auth_cfg_cmac_256 = (1 << CRYPTO_LAST) | (1 << CRYPTO_FIRST) | (CRYPTO_AUTH_MODE_CMAC << CRYPTO_AUTH_MODE)| (CRYPTO_AUTH_SIZE_ENUM_16_BYTES << CRYPTO_AUTH_SIZE) | (CRYPTO_AUTH_ALG_AES << CRYPTO_AUTH_ALG) | (CRYPTO_AUTH_KEY_SZ_AES256 << CRYPTO_AUTH_KEY_SIZE); /* Initialize auth_cfg register for HMAC alg */ pce_dev->reg.auth_cfg_hmac_sha1 = (CRYPTO_AUTH_MODE_HMAC << CRYPTO_AUTH_MODE)| (CRYPTO_AUTH_SIZE_SHA1 << CRYPTO_AUTH_SIZE) | (CRYPTO_AUTH_ALG_SHA << CRYPTO_AUTH_ALG) | (CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS); pce_dev->reg.auth_cfg_hmac_sha256 = (CRYPTO_AUTH_MODE_HMAC << CRYPTO_AUTH_MODE)| (CRYPTO_AUTH_SIZE_SHA256 << CRYPTO_AUTH_SIZE) | (CRYPTO_AUTH_ALG_SHA << CRYPTO_AUTH_ALG) | (CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS); /* Initialize auth_cfg register for SHA1/256 alg */ pce_dev->reg.auth_cfg_sha1 = (CRYPTO_AUTH_MODE_HASH << CRYPTO_AUTH_MODE)| (CRYPTO_AUTH_SIZE_SHA1 << CRYPTO_AUTH_SIZE) | (CRYPTO_AUTH_ALG_SHA << CRYPTO_AUTH_ALG) | (CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS); pce_dev->reg.auth_cfg_sha256 = (CRYPTO_AUTH_MODE_HASH << CRYPTO_AUTH_MODE)| (CRYPTO_AUTH_SIZE_SHA256 << CRYPTO_AUTH_SIZE) | (CRYPTO_AUTH_ALG_SHA << CRYPTO_AUTH_ALG) | (CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS); /* Initialize auth_cfg register for AEAD alg */ pce_dev->reg.auth_cfg_aead_sha1_hmac = (CRYPTO_AUTH_MODE_HMAC << CRYPTO_AUTH_MODE)| (CRYPTO_AUTH_SIZE_SHA1 << CRYPTO_AUTH_SIZE) | (CRYPTO_AUTH_ALG_SHA << CRYPTO_AUTH_ALG) | (1 << CRYPTO_LAST) | (1 << CRYPTO_FIRST); pce_dev->reg.auth_cfg_aead_sha256_hmac = (CRYPTO_AUTH_MODE_HMAC << CRYPTO_AUTH_MODE)| (CRYPTO_AUTH_SIZE_SHA256 << CRYPTO_AUTH_SIZE) | (CRYPTO_AUTH_ALG_SHA << CRYPTO_AUTH_ALG) | (1 << CRYPTO_LAST) | (1 << CRYPTO_FIRST); pce_dev->reg.auth_cfg_aes_ccm_128 = (1 << CRYPTO_LAST) | (1 << CRYPTO_FIRST) | (CRYPTO_AUTH_MODE_CCM << CRYPTO_AUTH_MODE)| (CRYPTO_AUTH_ALG_AES << CRYPTO_AUTH_ALG) | (CRYPTO_AUTH_KEY_SZ_AES128 << CRYPTO_AUTH_KEY_SIZE) | ((MAX_NONCE/sizeof(uint32_t)) << CRYPTO_AUTH_NONCE_NUM_WORDS); pce_dev->reg.auth_cfg_aes_ccm_128 &= ~(1 << CRYPTO_USE_HW_KEY_AUTH); pce_dev->reg.auth_cfg_aes_ccm_256 = (1 << CRYPTO_LAST) | (1 << CRYPTO_FIRST) | (CRYPTO_AUTH_MODE_CCM << CRYPTO_AUTH_MODE)| (CRYPTO_AUTH_ALG_AES << CRYPTO_AUTH_ALG) | (CRYPTO_AUTH_KEY_SZ_AES256 << CRYPTO_AUTH_KEY_SIZE) | ((MAX_NONCE/sizeof(uint32_t)) << CRYPTO_AUTH_NONCE_NUM_WORDS); pce_dev->reg.auth_cfg_aes_ccm_256 &= ~(1 << CRYPTO_USE_HW_KEY_AUTH); /* Initialize auth_cfg register for kasumi/snow3g */ pce_dev->reg.auth_cfg_kasumi = (CRYPTO_AUTH_ALG_KASUMI << CRYPTO_AUTH_ALG) | BIT(CRYPTO_FIRST) | BIT(CRYPTO_LAST); pce_dev->reg.auth_cfg_snow3g = (CRYPTO_AUTH_ALG_SNOW3G << CRYPTO_AUTH_ALG) | BIT(CRYPTO_FIRST) | BIT(CRYPTO_LAST); return 0; } static int _qce_aead_ccm_req(void *handle, struct qce_req *q_req) { struct qce_device *pce_dev = (struct qce_device *) handle; struct aead_request *areq = (struct aead_request *) q_req->areq; uint32_t authsize = q_req->authsize; uint32_t totallen_in, out_len; uint32_t hw_pad_out = 0; int rc = 0; int ce_burst_size; struct qce_cmdlist_info *cmdlistinfo = NULL; ce_burst_size = pce_dev->ce_sps.ce_burst_size; totallen_in = areq->cryptlen + areq->assoclen; if (q_req->dir == QCE_ENCRYPT) { q_req->cryptlen = areq->cryptlen; out_len = areq->cryptlen + authsize; hw_pad_out = ALIGN(authsize, ce_burst_size) - authsize; } else { q_req->cryptlen = areq->cryptlen - authsize; out_len = q_req->cryptlen; hw_pad_out = authsize; } if (pce_dev->ce_sps.minor_version == 0) { /* * For crypto 5.0 that has burst size alignment requirement * for data descritpor, * the agent above(qcrypto) prepares the src scatter list with * memory starting with associated data, followed by * data stream to be ciphered. * The destination scatter list is pointing to the same * data area as source. */ pce_dev->src_nents = count_sg(areq->src, totallen_in); } else { pce_dev->src_nents = count_sg(areq->src, areq->cryptlen); } pce_dev->assoc_nents = count_sg(areq->assoc, areq->assoclen); pce_dev->authsize = q_req->authsize; /* associated data input */ qce_dma_map_sg(pce_dev->pdev, areq->assoc, pce_dev->assoc_nents, DMA_TO_DEVICE); /* cipher input */ qce_dma_map_sg(pce_dev->pdev, areq->src, pce_dev->src_nents, (areq->src == areq->dst) ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE); /* cipher + mac output for encryption */ if (areq->src != areq->dst) { if (pce_dev->ce_sps.minor_version == 0) /* * The destination scatter list is pointing to the same * data area as src. * Note, the associated data will be pass-through * at the begining of destination area. */ pce_dev->dst_nents = count_sg(areq->dst, out_len + areq->assoclen); else pce_dev->dst_nents = count_sg(areq->dst, out_len); qce_dma_map_sg(pce_dev->pdev, areq->dst, pce_dev->dst_nents, DMA_FROM_DEVICE); } else { pce_dev->dst_nents = pce_dev->src_nents; } if (pce_dev->support_cmd_dscr) { _ce_get_cipher_cmdlistinfo(pce_dev, q_req, &cmdlistinfo); /* set up crypto device */ rc = _ce_setup_cipher(pce_dev, q_req, totallen_in, areq->assoclen, cmdlistinfo); } else { /* set up crypto device */ rc = _ce_setup_cipher_direct(pce_dev, q_req, totallen_in, areq->assoclen); } if (rc < 0) goto bad; /* setup for callback, and issue command to bam */ pce_dev->areq = q_req->areq; pce_dev->qce_cb = q_req->qce_cb; /* Register callback event for EOT (End of transfer) event. */ pce_dev->ce_sps.producer.event.callback = _aead_sps_producer_callback; pce_dev->ce_sps.producer.event.options = SPS_O_DESC_DONE; rc = sps_register_event(pce_dev->ce_sps.producer.pipe, &pce_dev->ce_sps.producer.event); if (rc) { pr_err("Producer callback registration failed rc = %d\n", rc); goto bad; } _qce_sps_iovec_count_init(pce_dev); if (pce_dev->support_cmd_dscr) _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_LOCK, cmdlistinfo, &pce_dev->ce_sps.in_transfer); if (pce_dev->ce_sps.minor_version == 0) { if (_qce_sps_add_sg_data(pce_dev, areq->src, totallen_in, &pce_dev->ce_sps.in_transfer)) goto bad; _qce_set_flag(&pce_dev->ce_sps.in_transfer, SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD); /* * The destination data should be big enough to * include CCM padding. */ if (_qce_sps_add_sg_data(pce_dev, areq->dst, out_len + areq->assoclen + hw_pad_out, &pce_dev->ce_sps.out_transfer)) goto bad; if (totallen_in > SPS_MAX_PKT_SIZE) { _qce_set_flag(&pce_dev->ce_sps.out_transfer, SPS_IOVEC_FLAG_INT); pce_dev->ce_sps.producer.event.options = SPS_O_DESC_DONE; pce_dev->ce_sps.producer_state = QCE_PIPE_STATE_IDLE; } else { if (_qce_sps_add_data(GET_PHYS_ADDR( pce_dev->ce_sps.result_dump), CRYPTO_RESULT_DUMP_SIZE, &pce_dev->ce_sps.out_transfer)) goto bad; _qce_set_flag(&pce_dev->ce_sps.out_transfer, SPS_IOVEC_FLAG_INT); pce_dev->ce_sps.producer_state = QCE_PIPE_STATE_COMP; } } else { if (_qce_sps_add_sg_data(pce_dev, areq->assoc, areq->assoclen, &pce_dev->ce_sps.in_transfer)) goto bad; if (_qce_sps_add_sg_data(pce_dev, areq->src, areq->cryptlen, &pce_dev->ce_sps.in_transfer)) goto bad; _qce_set_flag(&pce_dev->ce_sps.in_transfer, SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD); /* Pass through to ignore associated data*/ if (_qce_sps_add_data( GET_PHYS_ADDR(pce_dev->ce_sps.ignore_buffer), areq->assoclen, &pce_dev->ce_sps.out_transfer)) goto bad; if (_qce_sps_add_sg_data(pce_dev, areq->dst, out_len, &pce_dev->ce_sps.out_transfer)) goto bad; /* Pass through to ignore hw_pad (padding of the MAC data) */ if (_qce_sps_add_data( GET_PHYS_ADDR(pce_dev->ce_sps.ignore_buffer), hw_pad_out, &pce_dev->ce_sps.out_transfer)) goto bad; if (totallen_in > SPS_MAX_PKT_SIZE) { _qce_set_flag(&pce_dev->ce_sps.out_transfer, SPS_IOVEC_FLAG_INT); pce_dev->ce_sps.producer_state = QCE_PIPE_STATE_IDLE; } else { if (_qce_sps_add_data( GET_PHYS_ADDR(pce_dev->ce_sps.result_dump), CRYPTO_RESULT_DUMP_SIZE, &pce_dev->ce_sps.out_transfer)) goto bad; _qce_set_flag(&pce_dev->ce_sps.out_transfer, SPS_IOVEC_FLAG_INT); pce_dev->ce_sps.producer_state = QCE_PIPE_STATE_COMP; } } rc = _qce_sps_transfer(pce_dev); if (rc) goto bad; return 0; bad: if (pce_dev->assoc_nents) { qce_dma_unmap_sg(pce_dev->pdev, areq->assoc, pce_dev->assoc_nents, DMA_TO_DEVICE); } if (pce_dev->src_nents) { qce_dma_unmap_sg(pce_dev->pdev, areq->src, pce_dev->src_nents, (areq->src == areq->dst) ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE); } if (areq->src != areq->dst) { qce_dma_unmap_sg(pce_dev->pdev, areq->dst, pce_dev->dst_nents, DMA_FROM_DEVICE); } return rc; } int qce_aead_req(void *handle, struct qce_req *q_req) { struct qce_device *pce_dev; struct aead_request *areq; uint32_t authsize; struct crypto_aead *aead; uint32_t ivsize; uint32_t totallen; int rc; struct qce_cmdlist_info *cmdlistinfo = NULL; if (q_req->mode == QCE_MODE_CCM) return _qce_aead_ccm_req(handle, q_req); pce_dev = (struct qce_device *) handle; areq = (struct aead_request *) q_req->areq; aead = crypto_aead_reqtfm(areq); ivsize = crypto_aead_ivsize(aead); q_req->ivsize = ivsize; authsize = q_req->authsize; if (q_req->dir == QCE_ENCRYPT) q_req->cryptlen = areq->cryptlen; else q_req->cryptlen = areq->cryptlen - authsize; totallen = q_req->cryptlen + areq->assoclen + ivsize; if (pce_dev->support_cmd_dscr) { cmdlistinfo = _ce_get_aead_cmdlistinfo(pce_dev, q_req); if (cmdlistinfo == NULL) { pr_err("Unsupported aead ciphering algorithm %d, mode %d, ciphering key length %d, auth digest size %d\n", q_req->alg, q_req->mode, q_req->encklen, q_req->authsize); return -EINVAL; } /* set up crypto device */ rc = _ce_setup_aead(pce_dev, q_req, totallen, areq->assoclen + ivsize, cmdlistinfo); if (rc < 0) return -EINVAL; }; pce_dev->assoc_nents = count_sg(areq->assoc, areq->assoclen); if (pce_dev->ce_sps.minor_version == 0) { /* * For crypto 5.0 that has burst size alignment requirement * for data descritpor, * the agent above(qcrypto) prepares the src scatter list with * memory starting with associated data, followed by * iv, and data stream to be ciphered. */ pce_dev->src_nents = count_sg(areq->src, totallen); } else { pce_dev->src_nents = count_sg(areq->src, q_req->cryptlen); }; pce_dev->ivsize = q_req->ivsize; pce_dev->authsize = q_req->authsize; pce_dev->phy_iv_in = 0; /* associated data input */ qce_dma_map_sg(pce_dev->pdev, areq->assoc, pce_dev->assoc_nents, DMA_TO_DEVICE); /* cipher input */ qce_dma_map_sg(pce_dev->pdev, areq->src, pce_dev->src_nents, (areq->src == areq->dst) ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE); /* cipher output for encryption */ if (areq->src != areq->dst) { if (pce_dev->ce_sps.minor_version == 0) /* * The destination scatter list is pointing to the same * data area as source. */ pce_dev->dst_nents = count_sg(areq->dst, totallen); else pce_dev->dst_nents = count_sg(areq->dst, q_req->cryptlen); qce_dma_map_sg(pce_dev->pdev, areq->dst, pce_dev->dst_nents, DMA_FROM_DEVICE); } /* cipher iv for input */ if (pce_dev->ce_sps.minor_version != 0) pce_dev->phy_iv_in = dma_map_single(pce_dev->pdev, q_req->iv, ivsize, DMA_TO_DEVICE); /* setup for callback, and issue command to bam */ pce_dev->areq = q_req->areq; pce_dev->qce_cb = q_req->qce_cb; /* Register callback event for EOT (End of transfer) event. */ pce_dev->ce_sps.producer.event.callback = _aead_sps_producer_callback; pce_dev->ce_sps.producer.event.options = SPS_O_DESC_DONE; rc = sps_register_event(pce_dev->ce_sps.producer.pipe, &pce_dev->ce_sps.producer.event); if (rc) { pr_err("Producer callback registration failed rc = %d\n", rc); goto bad; } _qce_sps_iovec_count_init(pce_dev); if (pce_dev->support_cmd_dscr) { _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_LOCK, cmdlistinfo, &pce_dev->ce_sps.in_transfer); } else { rc = _ce_setup_aead_direct(pce_dev, q_req, totallen, areq->assoclen + ivsize); if (rc) goto bad; } if (pce_dev->ce_sps.minor_version == 0) { if (_qce_sps_add_sg_data(pce_dev, areq->src, totallen, &pce_dev->ce_sps.in_transfer)) goto bad; _qce_set_flag(&pce_dev->ce_sps.in_transfer, SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD); if (_qce_sps_add_sg_data(pce_dev, areq->dst, totallen, &pce_dev->ce_sps.out_transfer)) goto bad; if (totallen > SPS_MAX_PKT_SIZE) { _qce_set_flag(&pce_dev->ce_sps.out_transfer, SPS_IOVEC_FLAG_INT); pce_dev->ce_sps.producer.event.options = SPS_O_DESC_DONE; pce_dev->ce_sps.producer_state = QCE_PIPE_STATE_IDLE; } else { if (_qce_sps_add_data(GET_PHYS_ADDR( pce_dev->ce_sps.result_dump), CRYPTO_RESULT_DUMP_SIZE, &pce_dev->ce_sps.out_transfer)) goto bad; _qce_set_flag(&pce_dev->ce_sps.out_transfer, SPS_IOVEC_FLAG_INT); pce_dev->ce_sps.producer_state = QCE_PIPE_STATE_COMP; } } else { if (_qce_sps_add_sg_data(pce_dev, areq->assoc, areq->assoclen, &pce_dev->ce_sps.in_transfer)) goto bad; if (_qce_sps_add_data((uint32_t)pce_dev->phy_iv_in, ivsize, &pce_dev->ce_sps.in_transfer)) goto bad; if (_qce_sps_add_sg_data(pce_dev, areq->src, q_req->cryptlen, &pce_dev->ce_sps.in_transfer)) goto bad; _qce_set_flag(&pce_dev->ce_sps.in_transfer, SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD); /* Pass through to ignore associated + iv data*/ if (_qce_sps_add_data( GET_PHYS_ADDR(pce_dev->ce_sps.ignore_buffer), (ivsize + areq->assoclen), &pce_dev->ce_sps.out_transfer)) goto bad; if (_qce_sps_add_sg_data(pce_dev, areq->dst, q_req->cryptlen, &pce_dev->ce_sps.out_transfer)) goto bad; if (totallen > SPS_MAX_PKT_SIZE) { _qce_set_flag(&pce_dev->ce_sps.out_transfer, SPS_IOVEC_FLAG_INT); pce_dev->ce_sps.producer_state = QCE_PIPE_STATE_IDLE; } else { if (_qce_sps_add_data( GET_PHYS_ADDR(pce_dev->ce_sps.result_dump), CRYPTO_RESULT_DUMP_SIZE, &pce_dev->ce_sps.out_transfer)) goto bad; _qce_set_flag(&pce_dev->ce_sps.out_transfer, SPS_IOVEC_FLAG_INT); pce_dev->ce_sps.producer_state = QCE_PIPE_STATE_COMP; } } rc = _qce_sps_transfer(pce_dev); if (rc) goto bad; return 0; bad: if (pce_dev->assoc_nents) { qce_dma_unmap_sg(pce_dev->pdev, areq->assoc, pce_dev->assoc_nents, DMA_TO_DEVICE); } if (pce_dev->src_nents) { qce_dma_unmap_sg(pce_dev->pdev, areq->src, pce_dev->src_nents, (areq->src == areq->dst) ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE); } if (areq->src != areq->dst) { qce_dma_unmap_sg(pce_dev->pdev, areq->dst, pce_dev->dst_nents, DMA_FROM_DEVICE); } if (pce_dev->phy_iv_in) { dma_unmap_single(pce_dev->pdev, pce_dev->phy_iv_in, ivsize, DMA_TO_DEVICE); } return rc; } EXPORT_SYMBOL(qce_aead_req); int qce_ablk_cipher_req(void *handle, struct qce_req *c_req) { int rc = 0; struct qce_device *pce_dev = (struct qce_device *) handle; struct ablkcipher_request *areq = (struct ablkcipher_request *) c_req->areq; struct qce_cmdlist_info *cmdlistinfo = NULL; pce_dev->src_nents = 0; pce_dev->dst_nents = 0; /* cipher input */ pce_dev->src_nents = count_sg(areq->src, areq->nbytes); qce_dma_map_sg(pce_dev->pdev, areq->src, pce_dev->src_nents, (areq->src == areq->dst) ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE); /* cipher output */ if (areq->src != areq->dst) { pce_dev->dst_nents = count_sg(areq->dst, areq->nbytes); qce_dma_map_sg(pce_dev->pdev, areq->dst, pce_dev->dst_nents, DMA_FROM_DEVICE); } else { pce_dev->dst_nents = pce_dev->src_nents; } pce_dev->dir = c_req->dir; if ((pce_dev->ce_sps.minor_version == 0) && (c_req->dir == QCE_DECRYPT) && (c_req->mode == QCE_MODE_CBC)) { memcpy(pce_dev->dec_iv, (unsigned char *)sg_virt(areq->src) + areq->src->length - 16, NUM_OF_CRYPTO_CNTR_IV_REG * CRYPTO_REG_SIZE); } /* set up crypto device */ if (pce_dev->support_cmd_dscr) { _ce_get_cipher_cmdlistinfo(pce_dev, c_req, &cmdlistinfo); rc = _ce_setup_cipher(pce_dev, c_req, areq->nbytes, 0, cmdlistinfo); } else { rc = _ce_setup_cipher_direct(pce_dev, c_req, areq->nbytes, 0); } if (rc < 0) goto bad; /* setup for client callback, and issue command to BAM */ pce_dev->areq = areq; pce_dev->qce_cb = c_req->qce_cb; /* Register callback event for EOT (End of transfer) event. */ pce_dev->ce_sps.producer.event.callback = _ablk_cipher_sps_producer_callback; pce_dev->ce_sps.producer.event.options = SPS_O_DESC_DONE; rc = sps_register_event(pce_dev->ce_sps.producer.pipe, &pce_dev->ce_sps.producer.event); if (rc) { pr_err("Producer callback registration failed rc = %d\n", rc); goto bad; } _qce_sps_iovec_count_init(pce_dev); if (pce_dev->support_cmd_dscr) _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_LOCK, cmdlistinfo, &pce_dev->ce_sps.in_transfer); if (_qce_sps_add_sg_data(pce_dev, areq->src, areq->nbytes, &pce_dev->ce_sps.in_transfer)) goto bad; _qce_set_flag(&pce_dev->ce_sps.in_transfer, SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD); if (_qce_sps_add_sg_data(pce_dev, areq->dst, areq->nbytes, &pce_dev->ce_sps.out_transfer)) goto bad; if (areq->nbytes > SPS_MAX_PKT_SIZE) { _qce_set_flag(&pce_dev->ce_sps.out_transfer, SPS_IOVEC_FLAG_INT); pce_dev->ce_sps.producer_state = QCE_PIPE_STATE_IDLE; } else { pce_dev->ce_sps.producer_state = QCE_PIPE_STATE_COMP; if (_qce_sps_add_data( GET_PHYS_ADDR(pce_dev->ce_sps.result_dump), CRYPTO_RESULT_DUMP_SIZE, &pce_dev->ce_sps.out_transfer)) goto bad; _qce_set_flag(&pce_dev->ce_sps.out_transfer, SPS_IOVEC_FLAG_INT); } rc = _qce_sps_transfer(pce_dev); if (rc) goto bad; return 0; bad: if (areq->src != areq->dst) { if (pce_dev->dst_nents) { qce_dma_unmap_sg(pce_dev->pdev, areq->dst, pce_dev->dst_nents, DMA_FROM_DEVICE); } } if (pce_dev->src_nents) { qce_dma_unmap_sg(pce_dev->pdev, areq->src, pce_dev->src_nents, (areq->src == areq->dst) ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE); } return rc; } EXPORT_SYMBOL(qce_ablk_cipher_req); int qce_process_sha_req(void *handle, struct qce_sha_req *sreq) { struct qce_device *pce_dev = (struct qce_device *) handle; int rc; struct ahash_request *areq = (struct ahash_request *)sreq->areq; struct qce_cmdlist_info *cmdlistinfo = NULL; pce_dev->src_nents = count_sg(sreq->src, sreq->size); qce_dma_map_sg(pce_dev->pdev, sreq->src, pce_dev->src_nents, DMA_TO_DEVICE); if (pce_dev->support_cmd_dscr) { _ce_get_hash_cmdlistinfo(pce_dev, sreq, &cmdlistinfo); rc = _ce_setup_hash(pce_dev, sreq, cmdlistinfo); } else { rc = _ce_setup_hash_direct(pce_dev, sreq); } if (rc < 0) goto bad; pce_dev->areq = areq; pce_dev->qce_cb = sreq->qce_cb; /* Register callback event for EOT (End of transfer) event. */ pce_dev->ce_sps.producer.event.callback = _sha_sps_producer_callback; pce_dev->ce_sps.producer.event.options = SPS_O_DESC_DONE; rc = sps_register_event(pce_dev->ce_sps.producer.pipe, &pce_dev->ce_sps.producer.event); if (rc) { pr_err("Producer callback registration failed rc = %d\n", rc); goto bad; } _qce_sps_iovec_count_init(pce_dev); if (pce_dev->support_cmd_dscr) _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_LOCK, cmdlistinfo, &pce_dev->ce_sps.in_transfer); if (_qce_sps_add_sg_data(pce_dev, areq->src, areq->nbytes, &pce_dev->ce_sps.in_transfer)) goto bad; if (areq->nbytes) _qce_set_flag(&pce_dev->ce_sps.in_transfer, SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD); if (_qce_sps_add_data(GET_PHYS_ADDR(pce_dev->ce_sps.result_dump), CRYPTO_RESULT_DUMP_SIZE, &pce_dev->ce_sps.out_transfer)) goto bad; _qce_set_flag(&pce_dev->ce_sps.out_transfer, SPS_IOVEC_FLAG_INT); rc = _qce_sps_transfer(pce_dev); if (rc) goto bad; return 0; bad: if (pce_dev->src_nents) { qce_dma_unmap_sg(pce_dev->pdev, sreq->src, pce_dev->src_nents, DMA_TO_DEVICE); } return rc; } EXPORT_SYMBOL(qce_process_sha_req); int qce_f8_req(void *handle, struct qce_f8_req *req, void *cookie, qce_comp_func_ptr_t qce_cb) { struct qce_device *pce_dev = (struct qce_device *) handle; bool key_stream_mode; dma_addr_t dst; int rc; struct qce_cmdlist_info *cmdlistinfo; switch (req->algorithm) { case QCE_OTA_ALGO_KASUMI: cmdlistinfo = &pce_dev->ce_sps.cmdlistptr.f8_kasumi; break; case QCE_OTA_ALGO_SNOW3G: cmdlistinfo = &pce_dev->ce_sps.cmdlistptr.f8_snow3g; break; default: return -EINVAL; }; key_stream_mode = (req->data_in == NULL); if ((key_stream_mode && (req->data_len & 0xf)) || (req->bearer >= QCE_OTA_MAX_BEARER)) return -EINVAL; /* F8 cipher input */ if (key_stream_mode) pce_dev->phy_ota_src = 0; else { pce_dev->phy_ota_src = dma_map_single(pce_dev->pdev, req->data_in, req->data_len, (req->data_in == req->data_out) ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE); } /* F8 cipher output */ if (req->data_in != req->data_out) { dst = dma_map_single(pce_dev->pdev, req->data_out, req->data_len, DMA_FROM_DEVICE); pce_dev->phy_ota_dst = dst; } else { /* in place ciphering */ dst = pce_dev->phy_ota_src; pce_dev->phy_ota_dst = 0; } pce_dev->ota_size = req->data_len; /* set up crypto device */ if (pce_dev->support_cmd_dscr) rc = _ce_f8_setup(pce_dev, req, key_stream_mode, 1, 0, req->data_len, cmdlistinfo); else rc = _ce_f8_setup_direct(pce_dev, req, key_stream_mode, 1, 0, req->data_len); if (rc < 0) goto bad; /* setup for callback, and issue command to sps */ pce_dev->areq = cookie; pce_dev->qce_cb = qce_cb; /* Register producer callback event for DESC_DONE event. */ pce_dev->ce_sps.producer.event.callback = _f8_sps_producer_callback; pce_dev->ce_sps.producer.event.options = SPS_O_DESC_DONE; rc = sps_register_event(pce_dev->ce_sps.producer.pipe, &pce_dev->ce_sps.producer.event); if (rc) { pr_err("Producer callback registration failed rc = %d\n", rc); goto bad; } _qce_sps_iovec_count_init(pce_dev); if (pce_dev->support_cmd_dscr) _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_LOCK, cmdlistinfo, &pce_dev->ce_sps.in_transfer); if (!key_stream_mode) { _qce_sps_add_data((uint32_t)pce_dev->phy_ota_src, req->data_len, &pce_dev->ce_sps.in_transfer); _qce_set_flag(&pce_dev->ce_sps.in_transfer, SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD); } _qce_sps_add_data((uint32_t)dst, req->data_len, &pce_dev->ce_sps.out_transfer); if (req->data_len > SPS_MAX_PKT_SIZE) { _qce_set_flag(&pce_dev->ce_sps.out_transfer, SPS_IOVEC_FLAG_INT); pce_dev->ce_sps.producer_state = QCE_PIPE_STATE_IDLE; } else { pce_dev->ce_sps.producer_state = QCE_PIPE_STATE_COMP; _qce_sps_add_data(GET_PHYS_ADDR(pce_dev->ce_sps.result_dump), CRYPTO_RESULT_DUMP_SIZE, &pce_dev->ce_sps.out_transfer); _qce_set_flag(&pce_dev->ce_sps.out_transfer, SPS_IOVEC_FLAG_INT); } rc = _qce_sps_transfer(pce_dev); if (rc) goto bad; return 0; bad: if (pce_dev->phy_ota_dst != 0) dma_unmap_single(pce_dev->pdev, pce_dev->phy_ota_dst, req->data_len, DMA_FROM_DEVICE); if (pce_dev->phy_ota_src != 0) dma_unmap_single(pce_dev->pdev, pce_dev->phy_ota_src, req->data_len, (req->data_in == req->data_out) ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE); return rc; } EXPORT_SYMBOL(qce_f8_req); int qce_f8_multi_pkt_req(void *handle, struct qce_f8_multi_pkt_req *mreq, void *cookie, qce_comp_func_ptr_t qce_cb) { struct qce_device *pce_dev = (struct qce_device *) handle; uint16_t num_pkt = mreq->num_pkt; uint16_t cipher_start = mreq->cipher_start; uint16_t cipher_size = mreq->cipher_size; struct qce_f8_req *req = &mreq->qce_f8_req; uint32_t total; dma_addr_t dst = 0; int rc = 0; struct qce_cmdlist_info *cmdlistinfo; switch (req->algorithm) { case QCE_OTA_ALGO_KASUMI: cmdlistinfo = &pce_dev->ce_sps.cmdlistptr.f8_kasumi; break; case QCE_OTA_ALGO_SNOW3G: cmdlistinfo = &pce_dev->ce_sps.cmdlistptr.f8_snow3g; break; default: return -EINVAL; }; total = num_pkt * req->data_len; /* F8 cipher input */ pce_dev->phy_ota_src = dma_map_single(pce_dev->pdev, req->data_in, total, (req->data_in == req->data_out) ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE); /* F8 cipher output */ if (req->data_in != req->data_out) { dst = dma_map_single(pce_dev->pdev, req->data_out, total, DMA_FROM_DEVICE); pce_dev->phy_ota_dst = dst; } else { /* in place ciphering */ dst = pce_dev->phy_ota_src; pce_dev->phy_ota_dst = 0; } pce_dev->ota_size = total; /* set up crypto device */ if (pce_dev->support_cmd_dscr) rc = _ce_f8_setup(pce_dev, req, false, num_pkt, cipher_start, cipher_size, cmdlistinfo); else rc = _ce_f8_setup_direct(pce_dev, req, false, num_pkt, cipher_start, cipher_size); if (rc) goto bad; /* setup for callback, and issue command to sps */ pce_dev->areq = cookie; pce_dev->qce_cb = qce_cb; /* Register producer callback event for DESC_DONE event. */ pce_dev->ce_sps.producer.event.callback = _f8_sps_producer_callback; pce_dev->ce_sps.producer.event.options = SPS_O_DESC_DONE; rc = sps_register_event(pce_dev->ce_sps.producer.pipe, &pce_dev->ce_sps.producer.event); if (rc) { pr_err("Producer callback registration failed rc = %d\n", rc); goto bad; } _qce_sps_iovec_count_init(pce_dev); if (pce_dev->support_cmd_dscr) _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_LOCK, cmdlistinfo, &pce_dev->ce_sps.in_transfer); _qce_sps_add_data((uint32_t)pce_dev->phy_ota_src, total, &pce_dev->ce_sps.in_transfer); _qce_set_flag(&pce_dev->ce_sps.in_transfer, SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD); _qce_sps_add_data((uint32_t)dst, total, &pce_dev->ce_sps.out_transfer); if (total > SPS_MAX_PKT_SIZE) { _qce_set_flag(&pce_dev->ce_sps.out_transfer, SPS_IOVEC_FLAG_INT); pce_dev->ce_sps.producer_state = QCE_PIPE_STATE_IDLE; } else { pce_dev->ce_sps.producer_state = QCE_PIPE_STATE_COMP; _qce_sps_add_data(GET_PHYS_ADDR(pce_dev->ce_sps.result_dump), CRYPTO_RESULT_DUMP_SIZE, &pce_dev->ce_sps.out_transfer); _qce_set_flag(&pce_dev->ce_sps.out_transfer, SPS_IOVEC_FLAG_INT); } rc = _qce_sps_transfer(pce_dev); if (rc == 0) return 0; bad: if (pce_dev->phy_ota_dst) dma_unmap_single(pce_dev->pdev, pce_dev->phy_ota_dst, total, DMA_FROM_DEVICE); dma_unmap_single(pce_dev->pdev, pce_dev->phy_ota_src, total, (req->data_in == req->data_out) ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE); return rc; } EXPORT_SYMBOL(qce_f8_multi_pkt_req); int qce_f9_req(void *handle, struct qce_f9_req *req, void *cookie, qce_comp_func_ptr_t qce_cb) { struct qce_device *pce_dev = (struct qce_device *) handle; int rc; struct qce_cmdlist_info *cmdlistinfo; switch (req->algorithm) { case QCE_OTA_ALGO_KASUMI: cmdlistinfo = &pce_dev->ce_sps.cmdlistptr.f9_kasumi; break; case QCE_OTA_ALGO_SNOW3G: cmdlistinfo = &pce_dev->ce_sps.cmdlistptr.f9_snow3g; break; default: return -EINVAL; }; pce_dev->phy_ota_src = dma_map_single(pce_dev->pdev, req->message, req->msize, DMA_TO_DEVICE); pce_dev->ota_size = req->msize; if (pce_dev->support_cmd_dscr) rc = _ce_f9_setup(pce_dev, req, cmdlistinfo); else rc = _ce_f9_setup_direct(pce_dev, req); if (rc < 0) goto bad; /* setup for callback, and issue command to sps */ pce_dev->areq = cookie; pce_dev->qce_cb = qce_cb; /* Register producer callback event for DESC_DONE event. */ pce_dev->ce_sps.producer.event.callback = _f9_sps_producer_callback; pce_dev->ce_sps.producer.event.options = SPS_O_DESC_DONE; rc = sps_register_event(pce_dev->ce_sps.producer.pipe, &pce_dev->ce_sps.producer.event); if (rc) { pr_err("Producer callback registration failed rc = %d\n", rc); goto bad; } _qce_sps_iovec_count_init(pce_dev); if (pce_dev->support_cmd_dscr) _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_LOCK, cmdlistinfo, &pce_dev->ce_sps.in_transfer); _qce_sps_add_data((uint32_t)pce_dev->phy_ota_src, req->msize, &pce_dev->ce_sps.in_transfer); _qce_set_flag(&pce_dev->ce_sps.in_transfer, SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD); _qce_sps_add_data(GET_PHYS_ADDR(pce_dev->ce_sps.result_dump), CRYPTO_RESULT_DUMP_SIZE, &pce_dev->ce_sps.out_transfer); _qce_set_flag(&pce_dev->ce_sps.out_transfer, SPS_IOVEC_FLAG_INT); rc = _qce_sps_transfer(pce_dev); if (rc) goto bad; return 0; bad: dma_unmap_single(pce_dev->pdev, pce_dev->phy_ota_src, req->msize, DMA_TO_DEVICE); return rc; } EXPORT_SYMBOL(qce_f9_req); static int __qce_get_device_tree_data(struct platform_device *pdev, struct qce_device *pce_dev) { struct resource *resource; int rc = 0; pce_dev->is_shared = of_property_read_bool((&pdev->dev)->of_node, "qcom,ce-hw-shared"); pce_dev->support_hw_key = of_property_read_bool((&pdev->dev)->of_node, "qcom,ce-hw-key"); pce_dev->use_sw_aes_cbc_ecb_ctr_algo = of_property_read_bool((&pdev->dev)->of_node, "qcom,use-sw-aes-cbc-ecb-ctr-algo"); pce_dev->use_sw_aead_algo = of_property_read_bool((&pdev->dev)->of_node, "qcom,use-sw-aead-algo"); pce_dev->use_sw_aes_xts_algo = of_property_read_bool((&pdev->dev)->of_node, "qcom,use-sw-aes-xts-algo"); pce_dev->use_sw_ahash_algo = of_property_read_bool((&pdev->dev)->of_node, "qcom,use-sw-ahash-algo"); pce_dev->use_sw_hmac_algo = of_property_read_bool((&pdev->dev)->of_node, "qcom,use-sw-hmac-algo"); pce_dev->use_sw_aes_ccm_algo = of_property_read_bool((&pdev->dev)->of_node, "qcom,use-sw-aes-ccm-algo"); pce_dev->support_clk_mgmt_sus_res = of_property_read_bool( (&pdev->dev)->of_node, "qcom,clk-mgmt-sus-res"); if (of_property_read_u32((&pdev->dev)->of_node, "qcom,bam-pipe-pair", &pce_dev->ce_sps.pipe_pair_index)) { pr_err("Fail to get bam pipe pair information.\n"); return -EINVAL; } else { pr_warn("bam_pipe_pair=0x%x", pce_dev->ce_sps.pipe_pair_index); } if (of_property_read_u32((&pdev->dev)->of_node, "qcom,ce-device", &pce_dev->ce_sps.ce_device)) { pr_err("Fail to get CE device information.\n"); return -EINVAL; } else { pr_warn("ce-device =0x%x", pce_dev->ce_sps.ce_device); } pce_dev->ce_sps.dest_pipe_index = 2 * pce_dev->ce_sps.pipe_pair_index; pce_dev->ce_sps.src_pipe_index = pce_dev->ce_sps.dest_pipe_index + 1; resource = platform_get_resource_byname(pdev, IORESOURCE_MEM, "crypto-base"); if (resource) { pce_dev->phy_iobase = resource->start; pce_dev->iobase = ioremap_nocache(resource->start, resource_size(resource)); if (!pce_dev->iobase) { pr_err("Can not map CRYPTO io memory\n"); return -ENOMEM; } } else { pr_err("CRYPTO HW mem unavailable.\n"); return -ENODEV; } pr_warn("ce_phy_reg_base=0x%x ", pce_dev->phy_iobase); pr_warn("ce_virt_reg_base=0x%x\n", (uint32_t)pce_dev->iobase); resource = platform_get_resource_byname(pdev, IORESOURCE_MEM, "crypto-bam-base"); if (resource) { pce_dev->bam_mem = resource->start; pce_dev->bam_mem_size = resource_size(resource); } else { pr_err("CRYPTO BAM mem unavailable.\n"); rc = -ENODEV; goto err_getting_bam_info; } pr_warn("ce_bam_phy_reg_base=0x%x ", pce_dev->bam_mem); resource = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (resource) { pce_dev->ce_sps.bam_irq = resource->start; pr_warn("CRYPTO BAM IRQ = %d.\n", pce_dev->ce_sps.bam_irq); } else { pr_err("CRYPTO BAM IRQ unavailable.\n"); goto err_dev; } return rc; err_dev: if (pce_dev->ce_sps.bam_iobase) iounmap(pce_dev->ce_sps.bam_iobase); err_getting_bam_info: if (pce_dev->iobase) iounmap(pce_dev->iobase); return rc; } static int __qce_init_clk(struct qce_device *pce_dev) { int rc = 0; struct clk *ce_core_clk; struct clk *ce_clk; struct clk *ce_core_src_clk; struct clk *ce_bus_clk; /* Get CE3 src core clk. */ ce_core_src_clk = clk_get(pce_dev->pdev, "core_clk_src"); if (!IS_ERR(ce_core_src_clk)) { pce_dev->ce_core_src_clk = ce_core_src_clk; /* Set the core src clk @100Mhz */ rc = clk_set_rate(pce_dev->ce_core_src_clk, 100000000); if (rc) { clk_put(pce_dev->ce_core_src_clk); pce_dev->ce_core_src_clk = NULL; pr_err("Unable to set the core src clk @100Mhz.\n"); goto err_clk; } } else { pr_warn("Unable to get CE core src clk, set to NULL\n"); pce_dev->ce_core_src_clk = NULL; } /* Get CE core clk */ ce_core_clk = clk_get(pce_dev->pdev, "core_clk"); if (IS_ERR(ce_core_clk)) { rc = PTR_ERR(ce_core_clk); pr_err("Unable to get CE core clk\n"); if (pce_dev->ce_core_src_clk != NULL) clk_put(pce_dev->ce_core_src_clk); goto err_clk; } pce_dev->ce_core_clk = ce_core_clk; /* Get CE Interface clk */ ce_clk = clk_get(pce_dev->pdev, "iface_clk"); if (IS_ERR(ce_clk)) { rc = PTR_ERR(ce_clk); pr_err("Unable to get CE interface clk\n"); if (pce_dev->ce_core_src_clk != NULL) clk_put(pce_dev->ce_core_src_clk); clk_put(pce_dev->ce_core_clk); goto err_clk; } pce_dev->ce_clk = ce_clk; /* Get CE AXI clk */ ce_bus_clk = clk_get(pce_dev->pdev, "bus_clk"); if (IS_ERR(ce_bus_clk)) { rc = PTR_ERR(ce_bus_clk); pr_err("Unable to get CE BUS interface clk\n"); if (pce_dev->ce_core_src_clk != NULL) clk_put(pce_dev->ce_core_src_clk); clk_put(pce_dev->ce_core_clk); clk_put(pce_dev->ce_clk); goto err_clk; } pce_dev->ce_bus_clk = ce_bus_clk; err_clk: if (rc) pr_err("Unable to init CE clks, rc = %d\n", rc); return rc; } static void __qce_deinit_clk(struct qce_device *pce_dev) { if (pce_dev->ce_clk != NULL) { clk_put(pce_dev->ce_clk); pce_dev->ce_clk = NULL; } if (pce_dev->ce_core_clk != NULL) { clk_put(pce_dev->ce_core_clk); pce_dev->ce_core_clk = NULL; } if (pce_dev->ce_bus_clk != NULL) { clk_put(pce_dev->ce_bus_clk); pce_dev->ce_bus_clk = NULL; } if (pce_dev->ce_core_src_clk != NULL) { clk_put(pce_dev->ce_core_src_clk); pce_dev->ce_core_src_clk = NULL; } } int qce_enable_clk(void *handle) { struct qce_device *pce_dev = (struct qce_device *) handle; int rc = 0; /* Enable CE core clk */ if (pce_dev->ce_core_clk != NULL) { rc = clk_prepare_enable(pce_dev->ce_core_clk); if (rc) { pr_err("Unable to enable/prepare CE core clk\n"); return rc; } } /* Enable CE clk */ if (pce_dev->ce_clk != NULL) { rc = clk_prepare_enable(pce_dev->ce_clk); if (rc) { pr_err("Unable to enable/prepare CE iface clk\n"); clk_disable_unprepare(pce_dev->ce_core_clk); return rc; } } /* Enable AXI clk */ if (pce_dev->ce_bus_clk != NULL) { rc = clk_prepare_enable(pce_dev->ce_bus_clk); if (rc) { pr_err("Unable to enable/prepare CE BUS clk\n"); clk_disable_unprepare(pce_dev->ce_clk); clk_disable_unprepare(pce_dev->ce_core_clk); return rc; } } return rc; } EXPORT_SYMBOL(qce_enable_clk); int qce_disable_clk(void *handle) { struct qce_device *pce_dev = (struct qce_device *) handle; int rc = 0; if (pce_dev->ce_clk != NULL) clk_disable_unprepare(pce_dev->ce_clk); if (pce_dev->ce_core_clk != NULL) clk_disable_unprepare(pce_dev->ce_core_clk); if (pce_dev->ce_bus_clk != NULL) clk_disable_unprepare(pce_dev->ce_bus_clk); return rc; } EXPORT_SYMBOL(qce_disable_clk); /* crypto engine open function. */ void *qce_open(struct platform_device *pdev, int *rc) { struct qce_device *pce_dev; pce_dev = kzalloc(sizeof(struct qce_device), GFP_KERNEL); if (!pce_dev) { *rc = -ENOMEM; pr_err("Can not allocate memory: %d\n", *rc); return NULL; } pce_dev->pdev = &pdev->dev; if (pdev->dev.of_node) { *rc = __qce_get_device_tree_data(pdev, pce_dev); if (*rc) goto err_pce_dev; } else { *rc = -EINVAL; pr_err("Device Node not found.\n"); goto err_pce_dev; } pce_dev->memsize = 10 * PAGE_SIZE; pce_dev->coh_vmem = dma_alloc_coherent(pce_dev->pdev, pce_dev->memsize, &pce_dev->coh_pmem, GFP_KERNEL); if (pce_dev->coh_vmem == NULL) { *rc = -ENOMEM; pr_err("Can not allocate coherent memory for sps data\n"); goto err_iobase; } *rc = __qce_init_clk(pce_dev); if (*rc) goto err_mem; *rc = qce_enable_clk(pce_dev); if (*rc) goto err_enable_clk; if (_probe_ce_engine(pce_dev)) { *rc = -ENXIO; goto err; } *rc = 0; qce_init_ce_cfg_val(pce_dev); *rc = qce_sps_init(pce_dev); if (*rc) goto err; qce_setup_ce_sps_data(pce_dev); qce_disable_clk(pce_dev); return pce_dev; err: qce_disable_clk(pce_dev); err_enable_clk: __qce_deinit_clk(pce_dev); err_mem: if (pce_dev->coh_vmem) dma_free_coherent(pce_dev->pdev, pce_dev->memsize, pce_dev->coh_vmem, pce_dev->coh_pmem); err_iobase: if (pce_dev->iobase) iounmap(pce_dev->iobase); err_pce_dev: kfree(pce_dev); return NULL; } EXPORT_SYMBOL(qce_open); /* crypto engine close function. */ int qce_close(void *handle) { struct qce_device *pce_dev = (struct qce_device *) handle; if (handle == NULL) return -ENODEV; qce_enable_clk(pce_dev); qce_sps_exit(pce_dev); if (pce_dev->iobase) iounmap(pce_dev->iobase); if (pce_dev->coh_vmem) dma_free_coherent(pce_dev->pdev, pce_dev->memsize, pce_dev->coh_vmem, pce_dev->coh_pmem); qce_disable_clk(pce_dev); __qce_deinit_clk(pce_dev); kfree(handle); return 0; } EXPORT_SYMBOL(qce_close); #define OTA_SUPPORT_MASK (1 << CRYPTO_ENCR_SNOW3G_SEL |\ 1 << CRYPTO_ENCR_KASUMI_SEL |\ 1 << CRYPTO_AUTH_SNOW3G_SEL |\ 1 << CRYPTO_AUTH_KASUMI_SEL) int qce_hw_support(void *handle, struct ce_hw_support *ce_support) { struct qce_device *pce_dev = (struct qce_device *)handle; if (ce_support == NULL) return -EINVAL; ce_support->sha1_hmac_20 = false; ce_support->sha1_hmac = false; ce_support->sha256_hmac = false; ce_support->sha_hmac = true; ce_support->cmac = true; ce_support->aes_key_192 = false; ce_support->aes_xts = true; if ((pce_dev->engines_avail & OTA_SUPPORT_MASK) == OTA_SUPPORT_MASK) ce_support->ota = true; else ce_support->ota = false; ce_support->bam = true; ce_support->is_shared = (pce_dev->is_shared == 1) ? true : false; ce_support->hw_key = pce_dev->support_hw_key; ce_support->aes_ccm = true; ce_support->clk_mgmt_sus_res = pce_dev->support_clk_mgmt_sus_res; if (pce_dev->ce_sps.minor_version) ce_support->aligned_only = false; else ce_support->aligned_only = true; ce_support->use_sw_aes_cbc_ecb_ctr_algo = pce_dev->use_sw_aes_cbc_ecb_ctr_algo; ce_support->use_sw_aead_algo = pce_dev->use_sw_aead_algo; ce_support->use_sw_aes_xts_algo = pce_dev->use_sw_aes_xts_algo; ce_support->use_sw_ahash_algo = pce_dev->use_sw_ahash_algo; ce_support->use_sw_hmac_algo = pce_dev->use_sw_hmac_algo; ce_support->use_sw_aes_ccm_algo = pce_dev->use_sw_aes_ccm_algo; ce_support->ce_device = pce_dev->ce_sps.ce_device; return 0; } EXPORT_SYMBOL(qce_hw_support); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("Crypto Engine driver");
gpl-2.0
raumfeld/linux-am33xx
drivers/regulator/da9055-regulator.c
62
17857
/* * Regulator driver for DA9055 PMIC * * Copyright(c) 2012 Dialog Semiconductor Ltd. * * Author: David Dajun Chen <dchen@diasemi.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * */ #include <linux/module.h> #include <linux/init.h> #include <linux/err.h> #include <linux/gpio.h> #include <linux/gpio/consumer.h> #include <linux/platform_device.h> #include <linux/regulator/driver.h> #include <linux/regulator/machine.h> #include <linux/of.h> #include <linux/regulator/of_regulator.h> #include <linux/mfd/da9055/core.h> #include <linux/mfd/da9055/reg.h> #include <linux/mfd/da9055/pdata.h> #define DA9055_MIN_UA 0 #define DA9055_MAX_UA 3 #define DA9055_LDO_MODE_SYNC 0 #define DA9055_LDO_MODE_SLEEP 1 #define DA9055_BUCK_MODE_SLEEP 1 #define DA9055_BUCK_MODE_SYNC 2 #define DA9055_BUCK_MODE_AUTO 3 /* DA9055 REGULATOR IDs */ #define DA9055_ID_BUCK1 0 #define DA9055_ID_BUCK2 1 #define DA9055_ID_LDO1 2 #define DA9055_ID_LDO2 3 #define DA9055_ID_LDO3 4 #define DA9055_ID_LDO4 5 #define DA9055_ID_LDO5 6 #define DA9055_ID_LDO6 7 /* DA9055 BUCK current limit */ static const int da9055_current_limits[] = { 500000, 600000, 700000, 800000 }; struct da9055_conf_reg { int reg; int sel_mask; int en_mask; }; struct da9055_volt_reg { int reg_a; int reg_b; int sl_shift; int v_mask; }; struct da9055_mode_reg { int reg; int mask; int shift; }; struct da9055_regulator_info { struct regulator_desc reg_desc; struct da9055_conf_reg conf; struct da9055_volt_reg volt; struct da9055_mode_reg mode; }; struct da9055_regulator { struct da9055 *da9055; struct da9055_regulator_info *info; struct regulator_dev *rdev; enum gpio_select reg_rselect; }; static unsigned int da9055_buck_get_mode(struct regulator_dev *rdev) { struct da9055_regulator *regulator = rdev_get_drvdata(rdev); struct da9055_regulator_info *info = regulator->info; int ret, mode = 0; ret = da9055_reg_read(regulator->da9055, info->mode.reg); if (ret < 0) return ret; switch ((ret & info->mode.mask) >> info->mode.shift) { case DA9055_BUCK_MODE_SYNC: mode = REGULATOR_MODE_FAST; break; case DA9055_BUCK_MODE_AUTO: mode = REGULATOR_MODE_NORMAL; break; case DA9055_BUCK_MODE_SLEEP: mode = REGULATOR_MODE_STANDBY; break; } return mode; } static int da9055_buck_set_mode(struct regulator_dev *rdev, unsigned int mode) { struct da9055_regulator *regulator = rdev_get_drvdata(rdev); struct da9055_regulator_info *info = regulator->info; int val = 0; switch (mode) { case REGULATOR_MODE_FAST: val = DA9055_BUCK_MODE_SYNC << info->mode.shift; break; case REGULATOR_MODE_NORMAL: val = DA9055_BUCK_MODE_AUTO << info->mode.shift; break; case REGULATOR_MODE_STANDBY: val = DA9055_BUCK_MODE_SLEEP << info->mode.shift; break; } return da9055_reg_update(regulator->da9055, info->mode.reg, info->mode.mask, val); } static unsigned int da9055_ldo_get_mode(struct regulator_dev *rdev) { struct da9055_regulator *regulator = rdev_get_drvdata(rdev); struct da9055_regulator_info *info = regulator->info; int ret; ret = da9055_reg_read(regulator->da9055, info->volt.reg_b); if (ret < 0) return ret; if (ret >> info->volt.sl_shift) return REGULATOR_MODE_STANDBY; else return REGULATOR_MODE_NORMAL; } static int da9055_ldo_set_mode(struct regulator_dev *rdev, unsigned int mode) { struct da9055_regulator *regulator = rdev_get_drvdata(rdev); struct da9055_regulator_info *info = regulator->info; struct da9055_volt_reg volt = info->volt; int val = 0; switch (mode) { case REGULATOR_MODE_NORMAL: case REGULATOR_MODE_FAST: val = DA9055_LDO_MODE_SYNC; break; case REGULATOR_MODE_STANDBY: val = DA9055_LDO_MODE_SLEEP; break; } return da9055_reg_update(regulator->da9055, volt.reg_b, 1 << volt.sl_shift, val << volt.sl_shift); } static int da9055_buck_get_current_limit(struct regulator_dev *rdev) { struct da9055_regulator *regulator = rdev_get_drvdata(rdev); struct da9055_regulator_info *info = regulator->info; int ret; ret = da9055_reg_read(regulator->da9055, DA9055_REG_BUCK_LIM); if (ret < 0) return ret; ret &= info->mode.mask; return da9055_current_limits[ret >> info->mode.shift]; } static int da9055_buck_set_current_limit(struct regulator_dev *rdev, int min_uA, int max_uA) { struct da9055_regulator *regulator = rdev_get_drvdata(rdev); struct da9055_regulator_info *info = regulator->info; int i; for (i = ARRAY_SIZE(da9055_current_limits) - 1; i >= 0; i--) { if ((min_uA <= da9055_current_limits[i]) && (da9055_current_limits[i] <= max_uA)) return da9055_reg_update(regulator->da9055, DA9055_REG_BUCK_LIM, info->mode.mask, i << info->mode.shift); } return -EINVAL; } static int da9055_regulator_get_voltage_sel(struct regulator_dev *rdev) { struct da9055_regulator *regulator = rdev_get_drvdata(rdev); struct da9055_regulator_info *info = regulator->info; struct da9055_volt_reg volt = info->volt; int ret, sel; /* * There are two voltage register set A & B for voltage ramping but * either one of then can be active therefore we first determine * the active register set. */ ret = da9055_reg_read(regulator->da9055, info->conf.reg); if (ret < 0) return ret; ret &= info->conf.sel_mask; /* Get the voltage for the active register set A/B */ if (ret == DA9055_REGUALTOR_SET_A) ret = da9055_reg_read(regulator->da9055, volt.reg_a); else ret = da9055_reg_read(regulator->da9055, volt.reg_b); if (ret < 0) return ret; sel = (ret & volt.v_mask); return sel; } static int da9055_regulator_set_voltage_sel(struct regulator_dev *rdev, unsigned int selector) { struct da9055_regulator *regulator = rdev_get_drvdata(rdev); struct da9055_regulator_info *info = regulator->info; int ret; /* * Regulator register set A/B is not selected through GPIO therefore * we use default register set A for voltage ramping. */ if (regulator->reg_rselect == NO_GPIO) { /* Select register set A */ ret = da9055_reg_update(regulator->da9055, info->conf.reg, info->conf.sel_mask, DA9055_SEL_REG_A); if (ret < 0) return ret; /* Set the voltage */ return da9055_reg_update(regulator->da9055, info->volt.reg_a, info->volt.v_mask, selector); } /* * Here regulator register set A/B is selected through GPIO. * Therefore we first determine the selected register set A/B and * then set the desired voltage for that register set A/B. */ ret = da9055_reg_read(regulator->da9055, info->conf.reg); if (ret < 0) return ret; ret &= info->conf.sel_mask; /* Set the voltage */ if (ret == DA9055_REGUALTOR_SET_A) return da9055_reg_update(regulator->da9055, info->volt.reg_a, info->volt.v_mask, selector); else return da9055_reg_update(regulator->da9055, info->volt.reg_b, info->volt.v_mask, selector); } static int da9055_regulator_set_suspend_voltage(struct regulator_dev *rdev, int uV) { struct da9055_regulator *regulator = rdev_get_drvdata(rdev); struct da9055_regulator_info *info = regulator->info; int ret; /* Select register set B for suspend voltage ramping. */ if (regulator->reg_rselect == NO_GPIO) { ret = da9055_reg_update(regulator->da9055, info->conf.reg, info->conf.sel_mask, DA9055_SEL_REG_B); if (ret < 0) return ret; } ret = regulator_map_voltage_linear(rdev, uV, uV); if (ret < 0) return ret; return da9055_reg_update(regulator->da9055, info->volt.reg_b, info->volt.v_mask, ret); } static int da9055_suspend_enable(struct regulator_dev *rdev) { struct da9055_regulator *regulator = rdev_get_drvdata(rdev); struct da9055_regulator_info *info = regulator->info; /* Select register set B for voltage ramping. */ if (regulator->reg_rselect == NO_GPIO) return da9055_reg_update(regulator->da9055, info->conf.reg, info->conf.sel_mask, DA9055_SEL_REG_B); else return 0; } static int da9055_suspend_disable(struct regulator_dev *rdev) { struct da9055_regulator *regulator = rdev_get_drvdata(rdev); struct da9055_regulator_info *info = regulator->info; /* Diselect register set B. */ if (regulator->reg_rselect == NO_GPIO) return da9055_reg_update(regulator->da9055, info->conf.reg, info->conf.sel_mask, DA9055_SEL_REG_A); else return 0; } static const struct regulator_ops da9055_buck_ops = { .get_mode = da9055_buck_get_mode, .set_mode = da9055_buck_set_mode, .get_current_limit = da9055_buck_get_current_limit, .set_current_limit = da9055_buck_set_current_limit, .get_voltage_sel = da9055_regulator_get_voltage_sel, .set_voltage_sel = da9055_regulator_set_voltage_sel, .list_voltage = regulator_list_voltage_linear, .map_voltage = regulator_map_voltage_linear, .is_enabled = regulator_is_enabled_regmap, .enable = regulator_enable_regmap, .disable = regulator_disable_regmap, .set_suspend_voltage = da9055_regulator_set_suspend_voltage, .set_suspend_enable = da9055_suspend_enable, .set_suspend_disable = da9055_suspend_disable, .set_suspend_mode = da9055_buck_set_mode, }; static const struct regulator_ops da9055_ldo_ops = { .get_mode = da9055_ldo_get_mode, .set_mode = da9055_ldo_set_mode, .get_voltage_sel = da9055_regulator_get_voltage_sel, .set_voltage_sel = da9055_regulator_set_voltage_sel, .list_voltage = regulator_list_voltage_linear, .map_voltage = regulator_map_voltage_linear, .is_enabled = regulator_is_enabled_regmap, .enable = regulator_enable_regmap, .disable = regulator_disable_regmap, .set_suspend_voltage = da9055_regulator_set_suspend_voltage, .set_suspend_enable = da9055_suspend_enable, .set_suspend_disable = da9055_suspend_disable, .set_suspend_mode = da9055_ldo_set_mode, }; #define DA9055_LDO(_id, step, min, max, vbits, voffset) \ {\ .reg_desc = {\ .name = #_id,\ .ops = &da9055_ldo_ops,\ .type = REGULATOR_VOLTAGE,\ .id = DA9055_ID_##_id,\ .n_voltages = (max - min) / step + 1 + (voffset), \ .enable_reg = DA9055_REG_BCORE_CONT + DA9055_ID_##_id, \ .enable_mask = 1, \ .min_uV = (min) * 1000,\ .uV_step = (step) * 1000,\ .linear_min_sel = (voffset),\ .owner = THIS_MODULE,\ },\ .conf = {\ .reg = DA9055_REG_BCORE_CONT + DA9055_ID_##_id, \ .sel_mask = (1 << 4),\ .en_mask = 1,\ },\ .volt = {\ .reg_a = DA9055_REG_VBCORE_A + DA9055_ID_##_id, \ .reg_b = DA9055_REG_VBCORE_B + DA9055_ID_##_id, \ .sl_shift = 7,\ .v_mask = (1 << (vbits)) - 1,\ },\ } #define DA9055_BUCK(_id, step, min, max, vbits, voffset, mbits, sbits) \ {\ .reg_desc = {\ .name = #_id,\ .ops = &da9055_buck_ops,\ .type = REGULATOR_VOLTAGE,\ .id = DA9055_ID_##_id,\ .n_voltages = (max - min) / step + 1 + (voffset), \ .enable_reg = DA9055_REG_BCORE_CONT + DA9055_ID_##_id, \ .enable_mask = 1,\ .min_uV = (min) * 1000,\ .uV_step = (step) * 1000,\ .linear_min_sel = (voffset),\ .owner = THIS_MODULE,\ },\ .conf = {\ .reg = DA9055_REG_BCORE_CONT + DA9055_ID_##_id, \ .sel_mask = (1 << 4),\ .en_mask = 1,\ },\ .volt = {\ .reg_a = DA9055_REG_VBCORE_A + DA9055_ID_##_id, \ .reg_b = DA9055_REG_VBCORE_B + DA9055_ID_##_id, \ .sl_shift = 7,\ .v_mask = (1 << (vbits)) - 1,\ },\ .mode = {\ .reg = DA9055_REG_BCORE_MODE,\ .mask = (mbits),\ .shift = (sbits),\ },\ } static struct da9055_regulator_info da9055_regulator_info[] = { DA9055_BUCK(BUCK1, 25, 725, 2075, 6, 9, 0xc, 2), DA9055_BUCK(BUCK2, 25, 925, 2500, 6, 0, 3, 0), DA9055_LDO(LDO1, 50, 900, 3300, 6, 2), DA9055_LDO(LDO2, 50, 900, 3300, 6, 3), DA9055_LDO(LDO3, 50, 900, 3300, 6, 2), DA9055_LDO(LDO4, 50, 900, 3300, 6, 2), DA9055_LDO(LDO5, 50, 900, 2750, 6, 2), DA9055_LDO(LDO6, 20, 900, 3300, 7, 0), }; /* * Configures regulator to be controlled either through GPIO 1 or 2. * GPIO can control regulator state and/or select the regulator register * set A/B for voltage ramping. */ static int da9055_gpio_init(struct da9055_regulator *regulator, struct regulator_config *config, struct da9055_pdata *pdata, int id) { struct da9055_regulator_info *info = regulator->info; int ret = 0; if (!pdata) return 0; if (pdata->gpio_ren && pdata->gpio_ren[id]) { char name[18]; int gpio_mux = pdata->gpio_ren[id]; config->ena_gpiod = pdata->ena_gpiods[id]; config->ena_gpio_invert = 1; /* * GPI pin is muxed with regulator to control the * regulator state. */ sprintf(name, "DA9055 GPI %d", gpio_mux); ret = devm_gpio_request_one(config->dev, gpio_mux, GPIOF_DIR_IN, name); if (ret < 0) goto err; /* * Let the regulator know that its state is controlled * through GPI. */ ret = da9055_reg_update(regulator->da9055, info->conf.reg, DA9055_E_GPI_MASK, pdata->reg_ren[id] << DA9055_E_GPI_SHIFT); if (ret < 0) goto err; } if (pdata->gpio_rsel && pdata->gpio_rsel[id]) { char name[18]; int gpio_mux = pdata->gpio_rsel[id]; regulator->reg_rselect = pdata->reg_rsel[id]; /* * GPI pin is muxed with regulator to select the * regulator register set A/B for voltage ramping. */ sprintf(name, "DA9055 GPI %d", gpio_mux); ret = devm_gpio_request_one(config->dev, gpio_mux, GPIOF_DIR_IN, name); if (ret < 0) goto err; /* * Let the regulator know that its register set A/B * will be selected through GPI for voltage ramping. */ ret = da9055_reg_update(regulator->da9055, info->conf.reg, DA9055_V_GPI_MASK, pdata->reg_rsel[id] << DA9055_V_GPI_SHIFT); } err: return ret; } static irqreturn_t da9055_ldo5_6_oc_irq(int irq, void *data) { struct da9055_regulator *regulator = data; regulator_notifier_call_chain(regulator->rdev, REGULATOR_EVENT_OVER_CURRENT, NULL); return IRQ_HANDLED; } static inline struct da9055_regulator_info *find_regulator_info(int id) { struct da9055_regulator_info *info; int i; for (i = 0; i < ARRAY_SIZE(da9055_regulator_info); i++) { info = &da9055_regulator_info[i]; if (info->reg_desc.id == id) return info; } return NULL; } #ifdef CONFIG_OF static struct of_regulator_match da9055_reg_matches[] = { { .name = "BUCK1", }, { .name = "BUCK2", }, { .name = "LDO1", }, { .name = "LDO2", }, { .name = "LDO3", }, { .name = "LDO4", }, { .name = "LDO5", }, { .name = "LDO6", }, }; static int da9055_regulator_dt_init(struct platform_device *pdev, struct da9055_regulator *regulator, struct regulator_config *config, int regid) { struct device_node *nproot, *np; int ret; nproot = of_node_get(pdev->dev.parent->of_node); if (!nproot) return -ENODEV; np = of_get_child_by_name(nproot, "regulators"); if (!np) return -ENODEV; ret = of_regulator_match(&pdev->dev, np, &da9055_reg_matches[regid], 1); of_node_put(nproot); if (ret < 0) { dev_err(&pdev->dev, "Error matching regulator: %d\n", ret); return ret; } config->init_data = da9055_reg_matches[regid].init_data; config->of_node = da9055_reg_matches[regid].of_node; if (!config->of_node) return -ENODEV; return 0; } #else static inline int da9055_regulator_dt_init(struct platform_device *pdev, struct da9055_regulator *regulator, struct regulator_config *config, int regid) { return -ENODEV; } #endif /* CONFIG_OF */ static int da9055_regulator_probe(struct platform_device *pdev) { struct regulator_config config = { }; struct da9055_regulator *regulator; struct da9055 *da9055 = dev_get_drvdata(pdev->dev.parent); struct da9055_pdata *pdata = dev_get_platdata(da9055->dev); int ret, irq; regulator = devm_kzalloc(&pdev->dev, sizeof(struct da9055_regulator), GFP_KERNEL); if (!regulator) return -ENOMEM; regulator->info = find_regulator_info(pdev->id); if (regulator->info == NULL) { dev_err(&pdev->dev, "invalid regulator ID specified\n"); return -EINVAL; } regulator->da9055 = da9055; config.dev = &pdev->dev; config.driver_data = regulator; config.regmap = da9055->regmap; if (pdata && pdata->regulators) { config.init_data = pdata->regulators[pdev->id]; } else { ret = da9055_regulator_dt_init(pdev, regulator, &config, pdev->id); if (ret < 0) return ret; } ret = da9055_gpio_init(regulator, &config, pdata, pdev->id); if (ret < 0) return ret; regulator->rdev = devm_regulator_register(&pdev->dev, &regulator->info->reg_desc, &config); if (IS_ERR(regulator->rdev)) { dev_err(&pdev->dev, "Failed to register regulator %s\n", regulator->info->reg_desc.name); return PTR_ERR(regulator->rdev); } /* Only LDO 5 and 6 has got the over current interrupt */ if (pdev->id == DA9055_ID_LDO5 || pdev->id == DA9055_ID_LDO6) { irq = platform_get_irq_byname(pdev, "REGULATOR"); if (irq < 0) return irq; ret = devm_request_threaded_irq(&pdev->dev, irq, NULL, da9055_ldo5_6_oc_irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT | IRQF_PROBE_SHARED, pdev->name, regulator); if (ret != 0) { if (ret != -EBUSY) { dev_err(&pdev->dev, "Failed to request Regulator IRQ %d: %d\n", irq, ret); return ret; } } } platform_set_drvdata(pdev, regulator); return 0; } static struct platform_driver da9055_regulator_driver = { .probe = da9055_regulator_probe, .driver = { .name = "da9055-regulator", }, }; static int __init da9055_regulator_init(void) { return platform_driver_register(&da9055_regulator_driver); } subsys_initcall(da9055_regulator_init); static void __exit da9055_regulator_exit(void) { platform_driver_unregister(&da9055_regulator_driver); } module_exit(da9055_regulator_exit); MODULE_AUTHOR("David Dajun Chen <dchen@diasemi.com>"); MODULE_DESCRIPTION("Power Regulator driver for Dialog DA9055 PMIC"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:da9055-regulator");
gpl-2.0
ambikadash/linux-fqt
drivers/video/backlight/ili922x.c
318
14878
/* * (C) Copyright 2008 * Stefano Babic, DENX Software Engineering, sbabic@denx.de. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * * This driver implements a lcd device for the ILITEK 922x display * controller. The interface to the display is SPI and the display's * memory is cyclically updated over the RGB interface. */ #include <linux/fb.h> #include <linux/delay.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/lcd.h> #include <linux/module.h> #include <linux/of.h> #include <linux/slab.h> #include <linux/spi/spi.h> #include <linux/string.h> /* Register offset, see manual section 8.2 */ #define REG_START_OSCILLATION 0x00 #define REG_DRIVER_CODE_READ 0x00 #define REG_DRIVER_OUTPUT_CONTROL 0x01 #define REG_LCD_AC_DRIVEING_CONTROL 0x02 #define REG_ENTRY_MODE 0x03 #define REG_COMPARE_1 0x04 #define REG_COMPARE_2 0x05 #define REG_DISPLAY_CONTROL_1 0x07 #define REG_DISPLAY_CONTROL_2 0x08 #define REG_DISPLAY_CONTROL_3 0x09 #define REG_FRAME_CYCLE_CONTROL 0x0B #define REG_EXT_INTF_CONTROL 0x0C #define REG_POWER_CONTROL_1 0x10 #define REG_POWER_CONTROL_2 0x11 #define REG_POWER_CONTROL_3 0x12 #define REG_POWER_CONTROL_4 0x13 #define REG_RAM_ADDRESS_SET 0x21 #define REG_WRITE_DATA_TO_GRAM 0x22 #define REG_RAM_WRITE_MASK1 0x23 #define REG_RAM_WRITE_MASK2 0x24 #define REG_GAMMA_CONTROL_1 0x30 #define REG_GAMMA_CONTROL_2 0x31 #define REG_GAMMA_CONTROL_3 0x32 #define REG_GAMMA_CONTROL_4 0x33 #define REG_GAMMA_CONTROL_5 0x34 #define REG_GAMMA_CONTROL_6 0x35 #define REG_GAMMA_CONTROL_7 0x36 #define REG_GAMMA_CONTROL_8 0x37 #define REG_GAMMA_CONTROL_9 0x38 #define REG_GAMMA_CONTROL_10 0x39 #define REG_GATE_SCAN_CONTROL 0x40 #define REG_VERT_SCROLL_CONTROL 0x41 #define REG_FIRST_SCREEN_DRIVE_POS 0x42 #define REG_SECOND_SCREEN_DRIVE_POS 0x43 #define REG_RAM_ADDR_POS_H 0x44 #define REG_RAM_ADDR_POS_V 0x45 #define REG_OSCILLATOR_CONTROL 0x4F #define REG_GPIO 0x60 #define REG_OTP_VCM_PROGRAMMING 0x61 #define REG_OTP_VCM_STATUS_ENABLE 0x62 #define REG_OTP_PROGRAMMING_ID_KEY 0x65 /* * maximum frequency for register access * (not for the GRAM access) */ #define ILITEK_MAX_FREQ_REG 4000000 /* * Device ID as found in the datasheet (supports 9221 and 9222) */ #define ILITEK_DEVICE_ID 0x9220 #define ILITEK_DEVICE_ID_MASK 0xFFF0 /* Last two bits in the START BYTE */ #define START_RS_INDEX 0 #define START_RS_REG 1 #define START_RW_WRITE 0 #define START_RW_READ 1 /** * START_BYTE(id, rs, rw) * * Set the start byte according to the required operation. * The start byte is defined as: * ---------------------------------- * | 0 | 1 | 1 | 1 | 0 | ID | RS | RW | * ---------------------------------- * @id: display's id as set by the manufacturer * @rs: operation type bit, one of: * - START_RS_INDEX set the index register * - START_RS_REG write/read registers/GRAM * @rw: read/write operation * - START_RW_WRITE write * - START_RW_READ read */ #define START_BYTE(id, rs, rw) \ (0x70 | (((id) & 0x01) << 2) | (((rs) & 0x01) << 1) | ((rw) & 0x01)) /** * CHECK_FREQ_REG(spi_device s, spi_transfer x) - Check the frequency * for the SPI transfer. According to the datasheet, the controller * accept higher frequency for the GRAM transfer, but it requires * lower frequency when the registers are read/written. * The macro sets the frequency in the spi_transfer structure if * the frequency exceeds the maximum value. */ #define CHECK_FREQ_REG(s, x) \ do { \ if (s->max_speed_hz > ILITEK_MAX_FREQ_REG) \ ((struct spi_transfer *)x)->speed_hz = \ ILITEK_MAX_FREQ_REG; \ } while (0) #define CMD_BUFSIZE 16 #define POWER_IS_ON(pwr) ((pwr) <= FB_BLANK_NORMAL) #define set_tx_byte(b) (tx_invert ? ~(b) : b) /** * ili922x_id - id as set by manufacturer */ static int ili922x_id = 1; module_param(ili922x_id, int, 0); static int tx_invert; module_param(tx_invert, int, 0); /** * driver's private structure */ struct ili922x { struct spi_device *spi; struct lcd_device *ld; int power; }; /** * ili922x_read_status - read status register from display * @spi: spi device * @rs: output value */ static int ili922x_read_status(struct spi_device *spi, u16 *rs) { struct spi_message msg; struct spi_transfer xfer; unsigned char tbuf[CMD_BUFSIZE]; unsigned char rbuf[CMD_BUFSIZE]; int ret, i; memset(&xfer, 0, sizeof(struct spi_transfer)); spi_message_init(&msg); xfer.tx_buf = tbuf; xfer.rx_buf = rbuf; xfer.cs_change = 1; CHECK_FREQ_REG(spi, &xfer); tbuf[0] = set_tx_byte(START_BYTE(ili922x_id, START_RS_INDEX, START_RW_READ)); /* * we need 4-byte xfer here due to invalid dummy byte * received after start byte */ for (i = 1; i < 4; i++) tbuf[i] = set_tx_byte(0); /* dummy */ xfer.bits_per_word = 8; xfer.len = 4; spi_message_add_tail(&xfer, &msg); ret = spi_sync(spi, &msg); if (ret < 0) { dev_dbg(&spi->dev, "Error sending SPI message 0x%x", ret); return ret; } *rs = (rbuf[2] << 8) + rbuf[3]; return 0; } /** * ili922x_read - read register from display * @spi: spi device * @reg: offset of the register to be read * @rx: output value */ static int ili922x_read(struct spi_device *spi, u8 reg, u16 *rx) { struct spi_message msg; struct spi_transfer xfer_regindex, xfer_regvalue; unsigned char tbuf[CMD_BUFSIZE]; unsigned char rbuf[CMD_BUFSIZE]; int ret, len = 0, send_bytes; memset(&xfer_regindex, 0, sizeof(struct spi_transfer)); memset(&xfer_regvalue, 0, sizeof(struct spi_transfer)); spi_message_init(&msg); xfer_regindex.tx_buf = tbuf; xfer_regindex.rx_buf = rbuf; xfer_regindex.cs_change = 1; CHECK_FREQ_REG(spi, &xfer_regindex); tbuf[0] = set_tx_byte(START_BYTE(ili922x_id, START_RS_INDEX, START_RW_WRITE)); tbuf[1] = set_tx_byte(0); tbuf[2] = set_tx_byte(reg); xfer_regindex.bits_per_word = 8; len = xfer_regindex.len = 3; spi_message_add_tail(&xfer_regindex, &msg); send_bytes = len; tbuf[len++] = set_tx_byte(START_BYTE(ili922x_id, START_RS_REG, START_RW_READ)); tbuf[len++] = set_tx_byte(0); tbuf[len] = set_tx_byte(0); xfer_regvalue.cs_change = 1; xfer_regvalue.len = 3; xfer_regvalue.tx_buf = &tbuf[send_bytes]; xfer_regvalue.rx_buf = &rbuf[send_bytes]; CHECK_FREQ_REG(spi, &xfer_regvalue); spi_message_add_tail(&xfer_regvalue, &msg); ret = spi_sync(spi, &msg); if (ret < 0) { dev_dbg(&spi->dev, "Error sending SPI message 0x%x", ret); return ret; } *rx = (rbuf[1 + send_bytes] << 8) + rbuf[2 + send_bytes]; return 0; } /** * ili922x_write - write a controller register * @spi: struct spi_device * * @reg: offset of the register to be written * @value: value to be written */ static int ili922x_write(struct spi_device *spi, u8 reg, u16 value) { struct spi_message msg; struct spi_transfer xfer_regindex, xfer_regvalue; unsigned char tbuf[CMD_BUFSIZE]; unsigned char rbuf[CMD_BUFSIZE]; int ret, len = 0; memset(&xfer_regindex, 0, sizeof(struct spi_transfer)); memset(&xfer_regvalue, 0, sizeof(struct spi_transfer)); spi_message_init(&msg); xfer_regindex.tx_buf = tbuf; xfer_regindex.rx_buf = rbuf; xfer_regindex.cs_change = 1; CHECK_FREQ_REG(spi, &xfer_regindex); tbuf[0] = set_tx_byte(START_BYTE(ili922x_id, START_RS_INDEX, START_RW_WRITE)); tbuf[1] = set_tx_byte(0); tbuf[2] = set_tx_byte(reg); xfer_regindex.bits_per_word = 8; xfer_regindex.len = 3; spi_message_add_tail(&xfer_regindex, &msg); ret = spi_sync(spi, &msg); spi_message_init(&msg); len = 0; tbuf[0] = set_tx_byte(START_BYTE(ili922x_id, START_RS_REG, START_RW_WRITE)); tbuf[1] = set_tx_byte((value & 0xFF00) >> 8); tbuf[2] = set_tx_byte(value & 0x00FF); xfer_regvalue.cs_change = 1; xfer_regvalue.len = 3; xfer_regvalue.tx_buf = tbuf; xfer_regvalue.rx_buf = rbuf; CHECK_FREQ_REG(spi, &xfer_regvalue); spi_message_add_tail(&xfer_regvalue, &msg); ret = spi_sync(spi, &msg); if (ret < 0) { dev_err(&spi->dev, "Error sending SPI message 0x%x", ret); return ret; } return 0; } #ifdef DEBUG /** * ili922x_reg_dump - dump all registers */ static void ili922x_reg_dump(struct spi_device *spi) { u8 reg; u16 rx; dev_dbg(&spi->dev, "ILI922x configuration registers:\n"); for (reg = REG_START_OSCILLATION; reg <= REG_OTP_PROGRAMMING_ID_KEY; reg++) { ili922x_read(spi, reg, &rx); dev_dbg(&spi->dev, "reg @ 0x%02X: 0x%04X\n", reg, rx); } } #else static inline void ili922x_reg_dump(struct spi_device *spi) {} #endif /** * set_write_to_gram_reg - initialize the display to write the GRAM * @spi: spi device */ static void set_write_to_gram_reg(struct spi_device *spi) { struct spi_message msg; struct spi_transfer xfer; unsigned char tbuf[CMD_BUFSIZE]; memset(&xfer, 0, sizeof(struct spi_transfer)); spi_message_init(&msg); xfer.tx_buf = tbuf; xfer.rx_buf = NULL; xfer.cs_change = 1; tbuf[0] = START_BYTE(ili922x_id, START_RS_INDEX, START_RW_WRITE); tbuf[1] = 0; tbuf[2] = REG_WRITE_DATA_TO_GRAM; xfer.bits_per_word = 8; xfer.len = 3; spi_message_add_tail(&xfer, &msg); spi_sync(spi, &msg); } /** * ili922x_poweron - turn the display on * @spi: spi device * * The sequence to turn on the display is taken from * the datasheet and/or the example code provided by the * manufacturer. */ static int ili922x_poweron(struct spi_device *spi) { int ret; /* Power on */ ret = ili922x_write(spi, REG_POWER_CONTROL_1, 0x0000); usleep_range(10000, 10500); ret += ili922x_write(spi, REG_POWER_CONTROL_2, 0x0000); ret += ili922x_write(spi, REG_POWER_CONTROL_3, 0x0000); msleep(40); ret += ili922x_write(spi, REG_POWER_CONTROL_4, 0x0000); msleep(40); /* register 0x56 is not documented in the datasheet */ ret += ili922x_write(spi, 0x56, 0x080F); ret += ili922x_write(spi, REG_POWER_CONTROL_1, 0x4240); usleep_range(10000, 10500); ret += ili922x_write(spi, REG_POWER_CONTROL_2, 0x0000); ret += ili922x_write(spi, REG_POWER_CONTROL_3, 0x0014); msleep(40); ret += ili922x_write(spi, REG_POWER_CONTROL_4, 0x1319); msleep(40); return ret; } /** * ili922x_poweroff - turn the display off * @spi: spi device */ static int ili922x_poweroff(struct spi_device *spi) { int ret; /* Power off */ ret = ili922x_write(spi, REG_POWER_CONTROL_1, 0x0000); usleep_range(10000, 10500); ret += ili922x_write(spi, REG_POWER_CONTROL_2, 0x0000); ret += ili922x_write(spi, REG_POWER_CONTROL_3, 0x0000); msleep(40); ret += ili922x_write(spi, REG_POWER_CONTROL_4, 0x0000); msleep(40); return ret; } /** * ili922x_display_init - initialize the display by setting * the configuration registers * @spi: spi device */ static void ili922x_display_init(struct spi_device *spi) { ili922x_write(spi, REG_START_OSCILLATION, 1); usleep_range(10000, 10500); ili922x_write(spi, REG_DRIVER_OUTPUT_CONTROL, 0x691B); ili922x_write(spi, REG_LCD_AC_DRIVEING_CONTROL, 0x0700); ili922x_write(spi, REG_ENTRY_MODE, 0x1030); ili922x_write(spi, REG_COMPARE_1, 0x0000); ili922x_write(spi, REG_COMPARE_2, 0x0000); ili922x_write(spi, REG_DISPLAY_CONTROL_1, 0x0037); ili922x_write(spi, REG_DISPLAY_CONTROL_2, 0x0202); ili922x_write(spi, REG_DISPLAY_CONTROL_3, 0x0000); ili922x_write(spi, REG_FRAME_CYCLE_CONTROL, 0x0000); /* Set RGB interface */ ili922x_write(spi, REG_EXT_INTF_CONTROL, 0x0110); ili922x_poweron(spi); ili922x_write(spi, REG_GAMMA_CONTROL_1, 0x0302); ili922x_write(spi, REG_GAMMA_CONTROL_2, 0x0407); ili922x_write(spi, REG_GAMMA_CONTROL_3, 0x0304); ili922x_write(spi, REG_GAMMA_CONTROL_4, 0x0203); ili922x_write(spi, REG_GAMMA_CONTROL_5, 0x0706); ili922x_write(spi, REG_GAMMA_CONTROL_6, 0x0407); ili922x_write(spi, REG_GAMMA_CONTROL_7, 0x0706); ili922x_write(spi, REG_GAMMA_CONTROL_8, 0x0000); ili922x_write(spi, REG_GAMMA_CONTROL_9, 0x0C06); ili922x_write(spi, REG_GAMMA_CONTROL_10, 0x0F00); ili922x_write(spi, REG_RAM_ADDRESS_SET, 0x0000); ili922x_write(spi, REG_GATE_SCAN_CONTROL, 0x0000); ili922x_write(spi, REG_VERT_SCROLL_CONTROL, 0x0000); ili922x_write(spi, REG_FIRST_SCREEN_DRIVE_POS, 0xDB00); ili922x_write(spi, REG_SECOND_SCREEN_DRIVE_POS, 0xDB00); ili922x_write(spi, REG_RAM_ADDR_POS_H, 0xAF00); ili922x_write(spi, REG_RAM_ADDR_POS_V, 0xDB00); ili922x_reg_dump(spi); set_write_to_gram_reg(spi); } static int ili922x_lcd_power(struct ili922x *lcd, int power) { int ret = 0; if (POWER_IS_ON(power) && !POWER_IS_ON(lcd->power)) ret = ili922x_poweron(lcd->spi); else if (!POWER_IS_ON(power) && POWER_IS_ON(lcd->power)) ret = ili922x_poweroff(lcd->spi); if (!ret) lcd->power = power; return ret; } static int ili922x_set_power(struct lcd_device *ld, int power) { struct ili922x *ili = lcd_get_data(ld); return ili922x_lcd_power(ili, power); } static int ili922x_get_power(struct lcd_device *ld) { struct ili922x *ili = lcd_get_data(ld); return ili->power; } static struct lcd_ops ili922x_ops = { .get_power = ili922x_get_power, .set_power = ili922x_set_power, }; static int ili922x_probe(struct spi_device *spi) { struct ili922x *ili; struct lcd_device *lcd; int ret; u16 reg = 0; ili = devm_kzalloc(&spi->dev, sizeof(*ili), GFP_KERNEL); if (!ili) { dev_err(&spi->dev, "cannot alloc priv data\n"); return -ENOMEM; } ili->spi = spi; spi_set_drvdata(spi, ili); /* check if the device is connected */ ret = ili922x_read(spi, REG_DRIVER_CODE_READ, &reg); if (ret || ((reg & ILITEK_DEVICE_ID_MASK) != ILITEK_DEVICE_ID)) { dev_err(&spi->dev, "no LCD found: Chip ID 0x%x, ret %d\n", reg, ret); return -ENODEV; } else { dev_info(&spi->dev, "ILI%x found, SPI freq %d, mode %d\n", reg, spi->max_speed_hz, spi->mode); } ret = ili922x_read_status(spi, &reg); if (ret) { dev_err(&spi->dev, "reading RS failed...\n"); return ret; } else dev_dbg(&spi->dev, "status: 0x%x\n", reg); ili922x_display_init(spi); ili->power = FB_BLANK_POWERDOWN; lcd = devm_lcd_device_register(&spi->dev, "ili922xlcd", &spi->dev, ili, &ili922x_ops); if (IS_ERR(lcd)) { dev_err(&spi->dev, "cannot register LCD\n"); return PTR_ERR(lcd); } ili->ld = lcd; spi_set_drvdata(spi, ili); ili922x_lcd_power(ili, FB_BLANK_UNBLANK); return 0; } static int ili922x_remove(struct spi_device *spi) { ili922x_poweroff(spi); return 0; } static struct spi_driver ili922x_driver = { .driver = { .name = "ili922x", .owner = THIS_MODULE, }, .probe = ili922x_probe, .remove = ili922x_remove, }; module_spi_driver(ili922x_driver); MODULE_AUTHOR("Stefano Babic <sbabic@denx.de>"); MODULE_DESCRIPTION("ILI9221/9222 LCD driver"); MODULE_LICENSE("GPL"); MODULE_PARM_DESC(ili922x_id, "set controller identifier (default=1)"); MODULE_PARM_DESC(tx_invert, "invert bytes before sending");
gpl-2.0
liwentao0705/openwrt-build
target/linux/generic/files/drivers/net/phy/ip17xx.c
574
35090
/* * ip17xx.c: Swconfig configuration for IC+ IP17xx switch family * * Copyright (C) 2008 Patrick Horn <patrick.horn@gmail.com> * Copyright (C) 2008, 2010 Martin Mares <mj@ucw.cz> * Copyright (C) 2009 Felix Fietkau <nbd@openwrt.org> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/list.h> #include <linux/skbuff.h> #include <linux/mii.h> #include <linux/phy.h> #include <linux/delay.h> #include <linux/switch.h> #include <linux/device.h> #define MAX_VLANS 16 #define MAX_PORTS 9 #undef DUMP_MII_IO typedef struct ip17xx_reg { u16 p; // phy u16 m; // mii } reg; typedef char bitnum; #define NOTSUPPORTED {-1,-1} #define REG_SUPP(x) (((x).m != ((u16)-1)) && ((x).p != (u16)-1)) struct ip17xx_state; /*********** CONSTANTS ***********/ struct register_mappings { char *NAME; u16 MODEL_NO; // Compare to bits 4-9 of MII register 0,3. bitnum NUM_PORTS; bitnum CPU_PORT; /* The default VLAN for each port. Default: 0x0001 for Ports 0,1,2,3 0x0002 for Ports 4,5 */ reg VLAN_DEFAULT_TAG_REG[MAX_PORTS]; /* These ports are tagged. Default: 0x00 */ reg ADD_TAG_REG; reg REMOVE_TAG_REG; bitnum ADD_TAG_BIT[MAX_PORTS]; /* These ports are untagged. Default: 0x00 (i.e. do not alter any VLAN tags...) Maybe set to 0 if user disables VLANs. */ bitnum REMOVE_TAG_BIT[MAX_PORTS]; /* Port M and Port N are on the same VLAN. Default: All ports on all VLANs. */ // Use register {29, 19+N/2} reg VLAN_LOOKUP_REG; // Port 5 uses register {30, 18} but same as odd bits. reg VLAN_LOOKUP_REG_5; // in a different register on IP175C. bitnum VLAN_LOOKUP_EVEN_BIT[MAX_PORTS]; bitnum VLAN_LOOKUP_ODD_BIT[MAX_PORTS]; /* This VLAN corresponds to which ports. Default: 0x2f,0x30,0x3f,0x3f... */ reg TAG_VLAN_MASK_REG; bitnum TAG_VLAN_MASK_EVEN_BIT[MAX_PORTS]; bitnum TAG_VLAN_MASK_ODD_BIT[MAX_PORTS]; int RESET_VAL; reg RESET_REG; reg MODE_REG; int MODE_VAL; /* General flags */ reg ROUTER_CONTROL_REG; reg VLAN_CONTROL_REG; bitnum TAG_VLAN_BIT; bitnum ROUTER_EN_BIT; bitnum NUMLAN_GROUPS_MAX; bitnum NUMLAN_GROUPS_BIT; reg MII_REGISTER_EN; bitnum MII_REGISTER_EN_BIT; // set to 1 for 178C, 0 for 175C. bitnum SIMPLE_VLAN_REGISTERS; // 175C has two vlans per register but 178C has only one. // Pointers to functions which manipulate hardware state int (*update_state)(struct ip17xx_state *state); int (*set_vlan_mode)(struct ip17xx_state *state); int (*reset)(struct ip17xx_state *state); }; static int ip175c_update_state(struct ip17xx_state *state); static int ip175c_set_vlan_mode(struct ip17xx_state *state); static int ip175c_reset(struct ip17xx_state *state); static const struct register_mappings IP178C = { .NAME = "IP178C", .MODEL_NO = 0x18, .VLAN_DEFAULT_TAG_REG = { {30,3},{30,4},{30,5},{30,6},{30,7},{30,8}, {30,9},{30,10},{30,11}, }, .ADD_TAG_REG = {30,12}, .ADD_TAG_BIT = {0,1,2,3,4,5,6,7,8}, .REMOVE_TAG_REG = {30,13}, .REMOVE_TAG_BIT = {4,5,6,7,8,9,10,11,12}, .SIMPLE_VLAN_REGISTERS = 1, .VLAN_LOOKUP_REG = {31,0},// +N .VLAN_LOOKUP_REG_5 = NOTSUPPORTED, // not used with SIMPLE_VLAN_REGISTERS .VLAN_LOOKUP_EVEN_BIT = {0,1,2,3,4,5,6,7,8}, .VLAN_LOOKUP_ODD_BIT = {0,1,2,3,4,5,6,7,8}, .TAG_VLAN_MASK_REG = {30,14}, // +N .TAG_VLAN_MASK_EVEN_BIT = {0,1,2,3,4,5,6,7,8}, .TAG_VLAN_MASK_ODD_BIT = {0,1,2,3,4,5,6,7,8}, .RESET_VAL = 0x55AA, .RESET_REG = {30,0}, .MODE_VAL = 0, .MODE_REG = NOTSUPPORTED, .ROUTER_CONTROL_REG = {30,30}, .ROUTER_EN_BIT = 11, .NUMLAN_GROUPS_MAX = 8, .NUMLAN_GROUPS_BIT = 8, // {0-2} .VLAN_CONTROL_REG = {30,13}, .TAG_VLAN_BIT = 3, .CPU_PORT = 8, .NUM_PORTS = 9, .MII_REGISTER_EN = NOTSUPPORTED, .update_state = ip175c_update_state, .set_vlan_mode = ip175c_set_vlan_mode, .reset = ip175c_reset, }; static const struct register_mappings IP175C = { .NAME = "IP175C", .MODEL_NO = 0x18, .VLAN_DEFAULT_TAG_REG = { {29,24},{29,25},{29,26},{29,27},{29,28},{29,30}, NOTSUPPORTED,NOTSUPPORTED,NOTSUPPORTED }, .ADD_TAG_REG = {29,23}, .REMOVE_TAG_REG = {29,23}, .ADD_TAG_BIT = {11,12,13,14,15,1,-1,-1,-1}, .REMOVE_TAG_BIT = {6,7,8,9,10,0,-1,-1,-1}, .SIMPLE_VLAN_REGISTERS = 0, .VLAN_LOOKUP_REG = {29,19},// +N/2 .VLAN_LOOKUP_REG_5 = {30,18}, .VLAN_LOOKUP_EVEN_BIT = {8,9,10,11,12,15,-1,-1,-1}, .VLAN_LOOKUP_ODD_BIT = {0,1,2,3,4,7,-1,-1,-1}, .TAG_VLAN_MASK_REG = {30,1}, // +N/2 .TAG_VLAN_MASK_EVEN_BIT = {0,1,2,3,4,5,-1,-1,-1}, .TAG_VLAN_MASK_ODD_BIT = {8,9,10,11,12,13,-1,-1,-1}, .RESET_VAL = 0x175C, .RESET_REG = {30,0}, .MODE_VAL = 0x175C, .MODE_REG = {29,31}, .ROUTER_CONTROL_REG = {30,9}, .ROUTER_EN_BIT = 3, .NUMLAN_GROUPS_MAX = 8, .NUMLAN_GROUPS_BIT = 0, // {0-2} .VLAN_CONTROL_REG = {30,9}, .TAG_VLAN_BIT = 7, .NUM_PORTS = 6, .CPU_PORT = 5, .MII_REGISTER_EN = NOTSUPPORTED, .update_state = ip175c_update_state, .set_vlan_mode = ip175c_set_vlan_mode, .reset = ip175c_reset, }; static const struct register_mappings IP175A = { .NAME = "IP175A", .MODEL_NO = 0x05, .VLAN_DEFAULT_TAG_REG = { {0,24},{0,25},{0,26},{0,27},{0,28},NOTSUPPORTED, NOTSUPPORTED,NOTSUPPORTED,NOTSUPPORTED }, .ADD_TAG_REG = {0,23}, .REMOVE_TAG_REG = {0,23}, .ADD_TAG_BIT = {11,12,13,14,15,-1,-1,-1,-1}, .REMOVE_TAG_BIT = {6,7,8,9,10,-1,-1,-1,-1}, .SIMPLE_VLAN_REGISTERS = 0, // Only programmable via EEPROM .VLAN_LOOKUP_REG = NOTSUPPORTED,// +N/2 .VLAN_LOOKUP_REG_5 = NOTSUPPORTED, .VLAN_LOOKUP_EVEN_BIT = {8,9,10,11,12,-1,-1,-1,-1}, .VLAN_LOOKUP_ODD_BIT = {0,1,2,3,4,-1,-1,-1,-1}, .TAG_VLAN_MASK_REG = NOTSUPPORTED, // +N/2, .TAG_VLAN_MASK_EVEN_BIT = {-1,-1,-1,-1,-1,-1,-1,-1,-1}, .TAG_VLAN_MASK_ODD_BIT = {-1,-1,-1,-1,-1,-1,-1,-1,-1}, .RESET_VAL = -1, .RESET_REG = NOTSUPPORTED, .MODE_VAL = 0, .MODE_REG = NOTSUPPORTED, .ROUTER_CONTROL_REG = NOTSUPPORTED, .VLAN_CONTROL_REG = NOTSUPPORTED, .TAG_VLAN_BIT = -1, .ROUTER_EN_BIT = -1, .NUMLAN_GROUPS_MAX = -1, .NUMLAN_GROUPS_BIT = -1, // {0-2} .NUM_PORTS = 5, .CPU_PORT = 4, .MII_REGISTER_EN = {0, 18}, .MII_REGISTER_EN_BIT = 7, .update_state = ip175c_update_state, .set_vlan_mode = ip175c_set_vlan_mode, .reset = ip175c_reset, }; static int ip175d_update_state(struct ip17xx_state *state); static int ip175d_set_vlan_mode(struct ip17xx_state *state); static int ip175d_reset(struct ip17xx_state *state); static const struct register_mappings IP175D = { .NAME = "IP175D", .MODEL_NO = 0x18, // The IP175D has a completely different interface, so we leave most // of the registers undefined and switch to different code paths. .VLAN_DEFAULT_TAG_REG = { NOTSUPPORTED,NOTSUPPORTED,NOTSUPPORTED,NOTSUPPORTED, NOTSUPPORTED,NOTSUPPORTED,NOTSUPPORTED,NOTSUPPORTED, }, .ADD_TAG_REG = NOTSUPPORTED, .REMOVE_TAG_REG = NOTSUPPORTED, .SIMPLE_VLAN_REGISTERS = 0, .VLAN_LOOKUP_REG = NOTSUPPORTED, .VLAN_LOOKUP_REG_5 = NOTSUPPORTED, .TAG_VLAN_MASK_REG = NOTSUPPORTED, .RESET_VAL = 0x175D, .RESET_REG = {20,2}, .MODE_REG = NOTSUPPORTED, .ROUTER_CONTROL_REG = NOTSUPPORTED, .ROUTER_EN_BIT = -1, .NUMLAN_GROUPS_BIT = -1, .VLAN_CONTROL_REG = NOTSUPPORTED, .TAG_VLAN_BIT = -1, .NUM_PORTS = 6, .CPU_PORT = 5, .MII_REGISTER_EN = NOTSUPPORTED, .update_state = ip175d_update_state, .set_vlan_mode = ip175d_set_vlan_mode, .reset = ip175d_reset, }; struct ip17xx_state { struct switch_dev dev; struct mii_bus *mii_bus; bool registered; int router_mode; // ROUTER_EN int vlan_enabled; // TAG_VLAN_EN struct port_state { u16 pvid; unsigned int shareports; } ports[MAX_PORTS]; unsigned int add_tag; unsigned int remove_tag; int num_vlans; struct vlan_state { unsigned int ports; unsigned int tag; // VLAN tag (IP175D only) } vlans[MAX_VLANS]; const struct register_mappings *regs; reg proc_mii; // phy/reg for the low level register access via swconfig char buf[80]; }; #define get_state(_dev) container_of((_dev), struct ip17xx_state, dev) static int ip_phy_read(struct ip17xx_state *state, int port, int reg) { int val = mdiobus_read(state->mii_bus, port, reg); if (val < 0) pr_warning("IP17xx: Unable to get MII register %d,%d: error %d\n", port, reg, -val); #ifdef DUMP_MII_IO else pr_debug("IP17xx: Read MII(%d,%d) -> %04x\n", port, reg, val); #endif return val; } static int ip_phy_write(struct ip17xx_state *state, int port, int reg, u16 val) { int err; #ifdef DUMP_MII_IO pr_debug("IP17xx: Write MII(%d,%d) <- %04x\n", port, reg, val); #endif err = mdiobus_write(state->mii_bus, port, reg, val); if (err < 0) pr_warning("IP17xx: Unable to write MII register %d,%d: error %d\n", port, reg, -err); return err; } static int ip_phy_write_masked(struct ip17xx_state *state, int port, int reg, unsigned int mask, unsigned int data) { int val = ip_phy_read(state, port, reg); if (val < 0) return 0; return ip_phy_write(state, port, reg, (val & ~mask) | data); } static int getPhy(struct ip17xx_state *state, reg mii) { if (!REG_SUPP(mii)) return -EFAULT; return ip_phy_read(state, mii.p, mii.m); } static int setPhy(struct ip17xx_state *state, reg mii, u16 value) { int err; if (!REG_SUPP(mii)) return -EFAULT; err = ip_phy_write(state, mii.p, mii.m, value); if (err < 0) return err; mdelay(2); getPhy(state, mii); return 0; } /** * These two macros are to simplify the mapping of logical bits to the bits in hardware. * NOTE: these macros will return if there is an error! */ #define GET_PORT_BITS(state, bits, addr, bit_lookup) \ do { \ int i, val = getPhy((state), (addr)); \ if (val < 0) \ return val; \ (bits) = 0; \ for (i = 0; i < MAX_PORTS; i++) { \ if ((bit_lookup)[i] == -1) continue; \ if (val & (1<<(bit_lookup)[i])) \ (bits) |= (1<<i); \ } \ } while (0) #define SET_PORT_BITS(state, bits, addr, bit_lookup) \ do { \ int i, val = getPhy((state), (addr)); \ if (val < 0) \ return val; \ for (i = 0; i < MAX_PORTS; i++) { \ unsigned int newmask = ((bits)&(1<<i)); \ if ((bit_lookup)[i] == -1) continue; \ val &= ~(1<<(bit_lookup)[i]); \ val |= ((newmask>>i)<<(bit_lookup)[i]); \ } \ val = setPhy((state), (addr), val); \ if (val < 0) \ return val; \ } while (0) static int get_model(struct ip17xx_state *state) { int id1, id2; int oui_id, model_no, rev_no, chip_no; id1 = ip_phy_read(state, 0, 2); id2 = ip_phy_read(state, 0, 3); oui_id = (id1 << 6) | ((id2 >> 10) & 0x3f); model_no = (id2 >> 4) & 0x3f; rev_no = id2 & 0xf; pr_debug("IP17xx: Identified oui=%06x model=%02x rev=%X\n", oui_id, model_no, rev_no); if (oui_id != 0x0090c3) // No other oui_id should have reached us anyway return -ENODEV; if (model_no == IP175A.MODEL_NO) { state->regs = &IP175A; } else if (model_no == IP175C.MODEL_NO) { /* * Several models share the same model_no: * 178C has more PHYs, so we try whether the device responds to a read from PHY5 * 175D has a new chip ID register * 175C has neither */ if (ip_phy_read(state, 5, 2) == 0x0243) { state->regs = &IP178C; } else { chip_no = ip_phy_read(state, 20, 0); pr_debug("IP17xx: Chip ID register reads %04x\n", chip_no); if (chip_no == 0x175d) { state->regs = &IP175D; } else { state->regs = &IP175C; } } } else { pr_warning("IP17xx: Found an unknown IC+ switch with model number %02x, revision %X.\n", model_no, rev_no); return -EPERM; } return 0; } /*** Low-level functions for the older models ***/ /** Only set vlan and router flags in the switch **/ static int ip175c_set_flags(struct ip17xx_state *state) { int val; if (!REG_SUPP(state->regs->ROUTER_CONTROL_REG)) { return 0; } val = getPhy(state, state->regs->ROUTER_CONTROL_REG); if (val < 0) { return val; } if (state->regs->ROUTER_EN_BIT >= 0) { if (state->router_mode) { val |= (1<<state->regs->ROUTER_EN_BIT); } else { val &= (~(1<<state->regs->ROUTER_EN_BIT)); } } if (state->regs->TAG_VLAN_BIT >= 0) { if (state->vlan_enabled) { val |= (1<<state->regs->TAG_VLAN_BIT); } else { val &= (~(1<<state->regs->TAG_VLAN_BIT)); } } if (state->regs->NUMLAN_GROUPS_BIT >= 0) { val &= (~((state->regs->NUMLAN_GROUPS_MAX-1)<<state->regs->NUMLAN_GROUPS_BIT)); if (state->num_vlans > state->regs->NUMLAN_GROUPS_MAX) { val |= state->regs->NUMLAN_GROUPS_MAX << state->regs->NUMLAN_GROUPS_BIT; } else if (state->num_vlans >= 1) { val |= (state->num_vlans-1) << state->regs->NUMLAN_GROUPS_BIT; } } return setPhy(state, state->regs->ROUTER_CONTROL_REG, val); } /** Set all VLAN and port state. Usually you should call "correct_vlan_state" first. **/ static int ip175c_set_state(struct ip17xx_state *state) { int j; int i; SET_PORT_BITS(state, state->add_tag, state->regs->ADD_TAG_REG, state->regs->ADD_TAG_BIT); SET_PORT_BITS(state, state->remove_tag, state->regs->REMOVE_TAG_REG, state->regs->REMOVE_TAG_BIT); if (REG_SUPP(state->regs->VLAN_LOOKUP_REG)) { for (j=0; j<state->regs->NUM_PORTS; j++) { reg addr; const bitnum *bit_lookup = (j%2==0)? state->regs->VLAN_LOOKUP_EVEN_BIT: state->regs->VLAN_LOOKUP_ODD_BIT; addr = state->regs->VLAN_LOOKUP_REG; if (state->regs->SIMPLE_VLAN_REGISTERS) { addr.m += j; } else { switch (j) { case 0: case 1: break; case 2: case 3: addr.m+=1; break; case 4: addr.m+=2; break; case 5: addr = state->regs->VLAN_LOOKUP_REG_5; break; default: addr.m = -1; // shouldn't get here, but... break; } } //printf("shareports for %d is %02X\n",j,state->ports[j].shareports); if (REG_SUPP(addr)) { SET_PORT_BITS(state, state->ports[j].shareports, addr, bit_lookup); } } } if (REG_SUPP(state->regs->TAG_VLAN_MASK_REG)) { for (j=0; j<MAX_VLANS; j++) { reg addr = state->regs->TAG_VLAN_MASK_REG; const bitnum *bit_lookup = (j%2==0)? state->regs->TAG_VLAN_MASK_EVEN_BIT: state->regs->TAG_VLAN_MASK_ODD_BIT; unsigned int vlan_mask; if (state->regs->SIMPLE_VLAN_REGISTERS) { addr.m += j; } else { addr.m += j/2; } vlan_mask = state->vlans[j].ports; SET_PORT_BITS(state, vlan_mask, addr, bit_lookup); } } for (i=0; i<MAX_PORTS; i++) { if (REG_SUPP(state->regs->VLAN_DEFAULT_TAG_REG[i])) { int err = setPhy(state, state->regs->VLAN_DEFAULT_TAG_REG[i], state->ports[i].pvid); if (err < 0) { return err; } } } return ip175c_set_flags(state); } /** * Uses only the VLAN port mask and the add tag mask to generate the other fields: * which ports are part of the same VLAN, removing vlan tags, and VLAN tag ids. */ static void ip175c_correct_vlan_state(struct ip17xx_state *state) { int i, j; state->num_vlans = 0; for (i=0; i<MAX_VLANS; i++) { if (state->vlans[i].ports != 0) { state->num_vlans = i+1; // Hack -- we need to store the "set" vlans somewhere... } } for (i=0; i<state->regs->NUM_PORTS; i++) { unsigned int portmask = (1<<i); if (!state->vlan_enabled) { // Share with everybody! state->ports[i].shareports = (1<<state->regs->NUM_PORTS)-1; continue; } state->ports[i].shareports = portmask; for (j=0; j<MAX_VLANS; j++) { if (state->vlans[j].ports & portmask) state->ports[i].shareports |= state->vlans[j].ports; } } } static int ip175c_update_state(struct ip17xx_state *state) { ip175c_correct_vlan_state(state); return ip175c_set_state(state); } static int ip175c_set_vlan_mode(struct ip17xx_state *state) { return ip175c_update_state(state); } static int ip175c_reset(struct ip17xx_state *state) { int err; if (REG_SUPP(state->regs->MODE_REG)) { err = setPhy(state, state->regs->MODE_REG, state->regs->MODE_VAL); if (err < 0) return err; err = getPhy(state, state->regs->MODE_REG); if (err < 0) return err; } return ip175c_update_state(state); } /*** Low-level functions for IP175D ***/ static int ip175d_update_state(struct ip17xx_state *state) { unsigned int filter_mask = 0; unsigned int ports[16], add[16], rem[16]; int i, j; int err = 0; for (i = 0; i < 16; i++) { ports[i] = 0; add[i] = 0; rem[i] = 0; if (!state->vlan_enabled) { err |= ip_phy_write(state, 22, 14+i, i+1); // default tags ports[i] = 0x3f; continue; } if (!state->vlans[i].tag) { // Reset the filter err |= ip_phy_write(state, 22, 14+i, 0); // tag continue; } filter_mask |= 1 << i; err |= ip_phy_write(state, 22, 14+i, state->vlans[i].tag); ports[i] = state->vlans[i].ports; for (j = 0; j < 6; j++) { if (ports[i] & (1 << j)) { if (state->add_tag & (1 << j)) add[i] |= 1 << j; if (state->remove_tag & (1 << j)) rem[i] |= 1 << j; } } } // Port masks, tag adds and removals for (i = 0; i < 8; i++) { err |= ip_phy_write(state, 23, i, ports[2*i] | (ports[2*i+1] << 8)); err |= ip_phy_write(state, 23, 8+i, add[2*i] | (add[2*i+1] << 8)); err |= ip_phy_write(state, 23, 16+i, rem[2*i] | (rem[2*i+1] << 8)); } err |= ip_phy_write(state, 22, 10, filter_mask); // Default VLAN tag for each port for (i = 0; i < 6; i++) err |= ip_phy_write(state, 22, 4+i, state->vlans[state->ports[i].pvid].tag); return (err ? -EIO : 0); } static int ip175d_set_vlan_mode(struct ip17xx_state *state) { int i; int err = 0; if (state->vlan_enabled) { // VLAN classification rules: tag-based VLANs, use VID to classify, // drop packets that cannot be classified. err |= ip_phy_write_masked(state, 22, 0, 0x3fff, 0x003f); // Ingress rules: CFI=1 dropped, null VID is untagged, VID=1 passed, // VID=0xfff discarded, admin both tagged and untagged, ingress // filters enabled. err |= ip_phy_write_masked(state, 22, 1, 0x0fff, 0x0c3f); // Egress rules: IGMP processing off, keep VLAN header off err |= ip_phy_write_masked(state, 22, 2, 0x0fff, 0x0000); } else { // VLAN classification rules: everything off & clear table err |= ip_phy_write_masked(state, 22, 0, 0xbfff, 0x8000); // Ingress and egress rules: set to defaults err |= ip_phy_write_masked(state, 22, 1, 0x0fff, 0x0c3f); err |= ip_phy_write_masked(state, 22, 2, 0x0fff, 0x0000); } // Reset default VLAN for each port to 0 for (i = 0; i < 6; i++) state->ports[i].pvid = 0; err |= ip175d_update_state(state); return (err ? -EIO : 0); } static int ip175d_reset(struct ip17xx_state *state) { int err = 0; // Disable the special tagging mode err |= ip_phy_write_masked(state, 21, 22, 0x0003, 0x0000); // Set 802.1q protocol type err |= ip_phy_write(state, 22, 3, 0x8100); state->vlan_enabled = 0; err |= ip175d_set_vlan_mode(state); return (err ? -EIO : 0); } /*** High-level functions ***/ static int ip17xx_get_enable_vlan(struct switch_dev *dev, const struct switch_attr *attr, struct switch_val *val) { struct ip17xx_state *state = get_state(dev); val->value.i = state->vlan_enabled; return 0; } static void ip17xx_reset_vlan_config(struct ip17xx_state *state) { int i; state->remove_tag = (state->vlan_enabled ? ((1<<state->regs->NUM_PORTS)-1) : 0x0000); state->add_tag = 0x0000; for (i = 0; i < MAX_VLANS; i++) { state->vlans[i].ports = 0x0000; state->vlans[i].tag = (i ? i : 16); } for (i = 0; i < MAX_PORTS; i++) state->ports[i].pvid = 0; } static int ip17xx_set_enable_vlan(struct switch_dev *dev, const struct switch_attr *attr, struct switch_val *val) { struct ip17xx_state *state = get_state(dev); int enable; enable = val->value.i; if (state->vlan_enabled == enable) { // Do not change any state. return 0; } state->vlan_enabled = enable; // Otherwise, if we are switching state, set fields to a known default. ip17xx_reset_vlan_config(state); return state->regs->set_vlan_mode(state); } static int ip17xx_get_ports(struct switch_dev *dev, struct switch_val *val) { struct ip17xx_state *state = get_state(dev); int b; int ind; unsigned int ports; if (val->port_vlan >= dev->vlans || val->port_vlan < 0) return -EINVAL; ports = state->vlans[val->port_vlan].ports; b = 0; ind = 0; while (b < MAX_PORTS) { if (ports&1) { int istagged = ((state->add_tag >> b) & 1); val->value.ports[ind].id = b; val->value.ports[ind].flags = (istagged << SWITCH_PORT_FLAG_TAGGED); ind++; } b++; ports >>= 1; } val->len = ind; return 0; } static int ip17xx_set_ports(struct switch_dev *dev, struct switch_val *val) { struct ip17xx_state *state = get_state(dev); int i; if (val->port_vlan >= dev->vlans || val->port_vlan < 0) return -EINVAL; state->vlans[val->port_vlan].ports = 0; for (i = 0; i < val->len; i++) { unsigned int bitmask = (1<<val->value.ports[i].id); state->vlans[val->port_vlan].ports |= bitmask; if (val->value.ports[i].flags & (1<<SWITCH_PORT_FLAG_TAGGED)) { state->add_tag |= bitmask; state->remove_tag &= (~bitmask); } else { state->add_tag &= (~bitmask); state->remove_tag |= bitmask; } } return state->regs->update_state(state); } static int ip17xx_apply(struct switch_dev *dev) { struct ip17xx_state *state = get_state(dev); if (REG_SUPP(state->regs->MII_REGISTER_EN)) { int val = getPhy(state, state->regs->MII_REGISTER_EN); if (val < 0) { return val; } val |= (1<<state->regs->MII_REGISTER_EN_BIT); return setPhy(state, state->regs->MII_REGISTER_EN, val); } return 0; } static int ip17xx_reset(struct switch_dev *dev) { struct ip17xx_state *state = get_state(dev); int i, err; if (REG_SUPP(state->regs->RESET_REG)) { err = setPhy(state, state->regs->RESET_REG, state->regs->RESET_VAL); if (err < 0) return err; err = getPhy(state, state->regs->RESET_REG); /* * Data sheet specifies reset period to be 2 msec. * (I don't see any mention of the 2ms delay in the IP178C spec, only * in IP175C, but it can't hurt.) */ mdelay(2); } /* reset switch ports */ for (i = 0; i < state->regs->NUM_PORTS-1; i++) { err = ip_phy_write(state, i, MII_BMCR, BMCR_RESET); if (err < 0) return err; } state->router_mode = 0; state->vlan_enabled = 0; ip17xx_reset_vlan_config(state); return state->regs->reset(state); } static int ip17xx_get_tagged(struct switch_dev *dev, const struct switch_attr *attr, struct switch_val *val) { struct ip17xx_state *state = get_state(dev); if (state->add_tag & (1<<val->port_vlan)) { if (state->remove_tag & (1<<val->port_vlan)) val->value.i = 3; // shouldn't ever happen. else val->value.i = 1; } else { if (state->remove_tag & (1<<val->port_vlan)) val->value.i = 0; else val->value.i = 2; } return 0; } static int ip17xx_set_tagged(struct switch_dev *dev, const struct switch_attr *attr, struct switch_val *val) { struct ip17xx_state *state = get_state(dev); state->add_tag &= ~(1<<val->port_vlan); state->remove_tag &= ~(1<<val->port_vlan); if (val->value.i == 0) state->remove_tag |= (1<<val->port_vlan); if (val->value.i == 1) state->add_tag |= (1<<val->port_vlan); return state->regs->update_state(state); } /** Get the current phy address */ static int ip17xx_get_phy(struct switch_dev *dev, const struct switch_attr *attr, struct switch_val *val) { struct ip17xx_state *state = get_state(dev); val->value.i = state->proc_mii.p; return 0; } /** Set a new phy address for low level access to registers */ static int ip17xx_set_phy(struct switch_dev *dev, const struct switch_attr *attr, struct switch_val *val) { struct ip17xx_state *state = get_state(dev); int new_reg = val->value.i; if (new_reg < 0 || new_reg > 31) state->proc_mii.p = (u16)-1; else state->proc_mii.p = (u16)new_reg; return 0; } /** Get the current register number */ static int ip17xx_get_reg(struct switch_dev *dev, const struct switch_attr *attr, struct switch_val *val) { struct ip17xx_state *state = get_state(dev); val->value.i = state->proc_mii.m; return 0; } /** Set a new register address for low level access to registers */ static int ip17xx_set_reg(struct switch_dev *dev, const struct switch_attr *attr, struct switch_val *val) { struct ip17xx_state *state = get_state(dev); int new_reg = val->value.i; if (new_reg < 0 || new_reg > 31) state->proc_mii.m = (u16)-1; else state->proc_mii.m = (u16)new_reg; return 0; } /** Get the register content of state->proc_mii */ static int ip17xx_get_val(struct switch_dev *dev, const struct switch_attr *attr, struct switch_val *val) { struct ip17xx_state *state = get_state(dev); int retval = -EINVAL; if (REG_SUPP(state->proc_mii)) retval = getPhy(state, state->proc_mii); if (retval < 0) { return retval; } else { val->value.i = retval; return 0; } } /** Write a value to the register defined by phy/reg above */ static int ip17xx_set_val(struct switch_dev *dev, const struct switch_attr *attr, struct switch_val *val) { struct ip17xx_state *state = get_state(dev); int myval, err = -EINVAL; myval = val->value.i; if (myval <= 0xffff && myval >= 0 && REG_SUPP(state->proc_mii)) { err = setPhy(state, state->proc_mii, (u16)myval); } return err; } static int ip17xx_read_name(struct switch_dev *dev, const struct switch_attr *attr, struct switch_val *val) { struct ip17xx_state *state = get_state(dev); val->value.s = state->regs->NAME; // Just a const pointer, won't be freed by swconfig. return 0; } static int ip17xx_get_tag(struct switch_dev *dev, const struct switch_attr *attr, struct switch_val *val) { struct ip17xx_state *state = get_state(dev); int vlan = val->port_vlan; if (vlan < 0 || vlan >= MAX_VLANS) return -EINVAL; val->value.i = state->vlans[vlan].tag; return 0; } static int ip17xx_set_tag(struct switch_dev *dev, const struct switch_attr *attr, struct switch_val *val) { struct ip17xx_state *state = get_state(dev); int vlan = val->port_vlan; int tag = val->value.i; if (vlan < 0 || vlan >= MAX_VLANS) return -EINVAL; if (tag < 0 || tag > 4095) return -EINVAL; state->vlans[vlan].tag = tag; return state->regs->update_state(state); } static int ip17xx_set_port_speed(struct switch_dev *dev, const struct switch_attr *attr, struct switch_val *val) { struct ip17xx_state *state = get_state(dev); int nr = val->port_vlan; int ctrl; int autoneg; int speed; if (val->value.i == 100) { speed = 1; autoneg = 0; } else if (val->value.i == 10) { speed = 0; autoneg = 0; } else { autoneg = 1; speed = 1; } /* Can't set speed for cpu port */ if (nr == state->regs->CPU_PORT) return -EINVAL; if (nr >= dev->ports || nr < 0) return -EINVAL; ctrl = ip_phy_read(state, nr, 0); if (ctrl < 0) return -EIO; ctrl &= (~(1<<12)); ctrl &= (~(1<<13)); ctrl |= (autoneg<<12); ctrl |= (speed<<13); return ip_phy_write(state, nr, 0, ctrl); } static int ip17xx_get_port_speed(struct switch_dev *dev, const struct switch_attr *attr, struct switch_val *val) { struct ip17xx_state *state = get_state(dev); int nr = val->port_vlan; int speed, status; if (nr == state->regs->CPU_PORT) { val->value.i = 100; return 0; } if (nr >= dev->ports || nr < 0) return -EINVAL; status = ip_phy_read(state, nr, 1); speed = ip_phy_read(state, nr, 18); if (status < 0 || speed < 0) return -EIO; if (status & 4) val->value.i = ((speed & (1<<11)) ? 100 : 10); else val->value.i = 0; return 0; } static int ip17xx_get_port_status(struct switch_dev *dev, const struct switch_attr *attr, struct switch_val *val) { struct ip17xx_state *state = get_state(dev); int ctrl, speed, status; int nr = val->port_vlan; int len; char *buf = state->buf; // fixed-length at 80. if (nr == state->regs->CPU_PORT) { sprintf(buf, "up, 100 Mbps, cpu port"); val->value.s = buf; return 0; } if (nr >= dev->ports || nr < 0) return -EINVAL; ctrl = ip_phy_read(state, nr, 0); status = ip_phy_read(state, nr, 1); speed = ip_phy_read(state, nr, 18); if (ctrl < 0 || status < 0 || speed < 0) return -EIO; if (status & 4) len = sprintf(buf, "up, %d Mbps, %s duplex", ((speed & (1<<11)) ? 100 : 10), ((speed & (1<<10)) ? "full" : "half")); else len = sprintf(buf, "down"); if (ctrl & (1<<12)) { len += sprintf(buf+len, ", auto-negotiate"); if (!(status & (1<<5))) len += sprintf(buf+len, " (in progress)"); } else { len += sprintf(buf+len, ", fixed speed (%d)", ((ctrl & (1<<13)) ? 100 : 10)); } buf[len] = '\0'; val->value.s = buf; return 0; } static int ip17xx_get_pvid(struct switch_dev *dev, int port, int *val) { struct ip17xx_state *state = get_state(dev); *val = state->ports[port].pvid; return 0; } static int ip17xx_set_pvid(struct switch_dev *dev, int port, int val) { struct ip17xx_state *state = get_state(dev); if (val < 0 || val >= MAX_VLANS) return -EINVAL; state->ports[port].pvid = val; return state->regs->update_state(state); } enum Ports { IP17XX_PORT_STATUS, IP17XX_PORT_LINK, IP17XX_PORT_TAGGED, IP17XX_PORT_PVID, }; enum Globals { IP17XX_ENABLE_VLAN, IP17XX_GET_NAME, IP17XX_REGISTER_PHY, IP17XX_REGISTER_MII, IP17XX_REGISTER_VALUE, IP17XX_REGISTER_ERRNO, }; enum Vlans { IP17XX_VLAN_TAG, }; static const struct switch_attr ip17xx_global[] = { [IP17XX_ENABLE_VLAN] = { .id = IP17XX_ENABLE_VLAN, .type = SWITCH_TYPE_INT, .name = "enable_vlan", .description = "Flag to enable or disable VLANs and tagging", .get = ip17xx_get_enable_vlan, .set = ip17xx_set_enable_vlan, }, [IP17XX_GET_NAME] = { .id = IP17XX_GET_NAME, .type = SWITCH_TYPE_STRING, .description = "Returns the type of IC+ chip.", .name = "name", .get = ip17xx_read_name, .set = NULL, }, /* jal: added for low level debugging etc. */ [IP17XX_REGISTER_PHY] = { .id = IP17XX_REGISTER_PHY, .type = SWITCH_TYPE_INT, .description = "Direct register access: set PHY (0-4, or 29,30,31)", .name = "phy", .get = ip17xx_get_phy, .set = ip17xx_set_phy, }, [IP17XX_REGISTER_MII] = { .id = IP17XX_REGISTER_MII, .type = SWITCH_TYPE_INT, .description = "Direct register access: set MII register number (0-31)", .name = "reg", .get = ip17xx_get_reg, .set = ip17xx_set_reg, }, [IP17XX_REGISTER_VALUE] = { .id = IP17XX_REGISTER_VALUE, .type = SWITCH_TYPE_INT, .description = "Direct register access: read/write to register (0-65535)", .name = "val", .get = ip17xx_get_val, .set = ip17xx_set_val, }, }; static const struct switch_attr ip17xx_vlan[] = { [IP17XX_VLAN_TAG] = { .id = IP17XX_VLAN_TAG, .type = SWITCH_TYPE_INT, .description = "VLAN ID (0-4095) [IP175D only]", .name = "vid", .get = ip17xx_get_tag, .set = ip17xx_set_tag, } }; static const struct switch_attr ip17xx_port[] = { [IP17XX_PORT_STATUS] = { .id = IP17XX_PORT_STATUS, .type = SWITCH_TYPE_STRING, .description = "Returns Detailed port status", .name = "status", .get = ip17xx_get_port_status, .set = NULL, }, [IP17XX_PORT_LINK] = { .id = IP17XX_PORT_LINK, .type = SWITCH_TYPE_INT, .description = "Link speed. Can write 0 for auto-negotiate, or 10 or 100", .name = "link", .get = ip17xx_get_port_speed, .set = ip17xx_set_port_speed, }, [IP17XX_PORT_TAGGED] = { .id = IP17XX_PORT_LINK, .type = SWITCH_TYPE_INT, .description = "0 = untag, 1 = add tags, 2 = do not alter (This value is reset if vlans are altered)", .name = "tagged", .get = ip17xx_get_tagged, .set = ip17xx_set_tagged, }, }; static const struct switch_dev_ops ip17xx_ops = { .attr_global = { .attr = ip17xx_global, .n_attr = ARRAY_SIZE(ip17xx_global), }, .attr_port = { .attr = ip17xx_port, .n_attr = ARRAY_SIZE(ip17xx_port), }, .attr_vlan = { .attr = ip17xx_vlan, .n_attr = ARRAY_SIZE(ip17xx_vlan), }, .get_port_pvid = ip17xx_get_pvid, .set_port_pvid = ip17xx_set_pvid, .get_vlan_ports = ip17xx_get_ports, .set_vlan_ports = ip17xx_set_ports, .apply_config = ip17xx_apply, .reset_switch = ip17xx_reset, }; static int ip17xx_probe(struct phy_device *pdev) { struct ip17xx_state *state; struct switch_dev *dev; int err; /* We only attach to PHY 0, but use all available PHYs */ if (pdev->addr != 0) return -ENODEV; state = kzalloc(sizeof(*state), GFP_KERNEL); if (!state) return -ENOMEM; dev = &state->dev; pdev->priv = state; state->mii_bus = pdev->bus; err = get_model(state); if (err < 0) goto error; dev->vlans = MAX_VLANS; dev->cpu_port = state->regs->CPU_PORT; dev->ports = state->regs->NUM_PORTS; dev->name = state->regs->NAME; dev->ops = &ip17xx_ops; pr_info("IP17xx: Found %s at %s\n", dev->name, dev_name(&pdev->dev)); return 0; error: kfree(state); return err; } static int ip17xx_config_init(struct phy_device *pdev) { struct ip17xx_state *state = pdev->priv; struct net_device *dev = pdev->attached_dev; int err; err = register_switch(&state->dev, dev); if (err < 0) return err; state->registered = true; ip17xx_reset(&state->dev); return 0; } static void ip17xx_remove(struct phy_device *pdev) { struct ip17xx_state *state = pdev->priv; if (state->registered) unregister_switch(&state->dev); kfree(state); } static int ip17xx_config_aneg(struct phy_device *pdev) { return 0; } static int ip17xx_aneg_done(struct phy_device *pdev) { return BMSR_ANEGCOMPLETE; } static int ip17xx_update_link(struct phy_device *pdev) { pdev->link = 1; return 0; } static int ip17xx_read_status(struct phy_device *pdev) { pdev->speed = SPEED_100; pdev->duplex = DUPLEX_FULL; pdev->pause = pdev->asym_pause = 0; pdev->link = 1; return 0; } static struct phy_driver ip17xx_driver = { .name = "IC+ IP17xx", .phy_id = 0x02430c00, .phy_id_mask = 0x0ffffc00, .features = PHY_BASIC_FEATURES, .probe = ip17xx_probe, .remove = ip17xx_remove, .config_init = ip17xx_config_init, .config_aneg = ip17xx_config_aneg, .aneg_done = ip17xx_aneg_done, .update_link = ip17xx_update_link, .read_status = ip17xx_read_status, .driver = { .owner = THIS_MODULE }, }; static struct phy_driver ip175a_driver = { .name = "IC+ IP175A", .phy_id = 0x02430c50, .phy_id_mask = 0x0ffffff0, .features = PHY_BASIC_FEATURES, .probe = ip17xx_probe, .remove = ip17xx_remove, .config_init = ip17xx_config_init, .config_aneg = ip17xx_config_aneg, .aneg_done = ip17xx_aneg_done, .update_link = ip17xx_update_link, .read_status = ip17xx_read_status, .driver = { .owner = THIS_MODULE }, }; int __init ip17xx_init(void) { int ret; ret = phy_driver_register(&ip175a_driver); if (ret < 0) return ret; return phy_driver_register(&ip17xx_driver); } void __exit ip17xx_exit(void) { phy_driver_unregister(&ip17xx_driver); phy_driver_unregister(&ip175a_driver); } MODULE_AUTHOR("Patrick Horn <patrick.horn@gmail.com>"); MODULE_AUTHOR("Felix Fietkau <nbd@openwrt.org>"); MODULE_AUTHOR("Martin Mares <mj@ucw.cz>"); MODULE_LICENSE("GPL"); module_init(ip17xx_init); module_exit(ip17xx_exit);
gpl-2.0
vfalico/popcorn
drivers/scsi/bfa/bfad.c
574
41854
/* * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. * All rights reserved * www.brocade.com * * Linux driver for Brocade Fibre Channel Host Bus Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as * published by the Free Software Foundation * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ /* * bfad.c Linux driver PCI interface module. */ #include <linux/module.h> #include <linux/kthread.h> #include <linux/errno.h> #include <linux/sched.h> #include <linux/init.h> #include <linux/fs.h> #include <linux/pci.h> #include <linux/firmware.h> #include <asm/uaccess.h> #include <asm/fcntl.h> #include "bfad_drv.h" #include "bfad_im.h" #include "bfa_fcs.h" #include "bfa_defs.h" #include "bfa.h" BFA_TRC_FILE(LDRV, BFAD); DEFINE_MUTEX(bfad_mutex); LIST_HEAD(bfad_list); static int bfad_inst; static int num_sgpgs_parm; int supported_fc4s; char *host_name, *os_name, *os_patch; int num_rports, num_ios, num_tms; int num_fcxps, num_ufbufs; int reqq_size, rspq_size, num_sgpgs; int rport_del_timeout = BFA_FCS_RPORT_DEF_DEL_TIMEOUT; int bfa_lun_queue_depth = BFAD_LUN_QUEUE_DEPTH; int bfa_io_max_sge = BFAD_IO_MAX_SGE; int bfa_log_level = 3; /* WARNING log level */ int ioc_auto_recover = BFA_TRUE; int bfa_linkup_delay = -1; int fdmi_enable = BFA_TRUE; int pcie_max_read_reqsz; int bfa_debugfs_enable = 1; int msix_disable_cb = 0, msix_disable_ct = 0; int max_xfer_size = BFAD_MAX_SECTORS >> 1; /* Firmware releated */ u32 bfi_image_cb_size, bfi_image_ct_size, bfi_image_ct2_size; u32 *bfi_image_cb, *bfi_image_ct, *bfi_image_ct2; #define BFAD_FW_FILE_CB "cbfw.bin" #define BFAD_FW_FILE_CT "ctfw.bin" #define BFAD_FW_FILE_CT2 "ct2fw.bin" static u32 *bfad_load_fwimg(struct pci_dev *pdev); static void bfad_free_fwimg(void); static void bfad_read_firmware(struct pci_dev *pdev, u32 **bfi_image, u32 *bfi_image_size, char *fw_name); static const char *msix_name_ct[] = { "ctrl", "cpe0", "cpe1", "cpe2", "cpe3", "rme0", "rme1", "rme2", "rme3" }; static const char *msix_name_cb[] = { "cpe0", "cpe1", "cpe2", "cpe3", "rme0", "rme1", "rme2", "rme3", "eemc", "elpu0", "elpu1", "epss", "mlpu" }; MODULE_FIRMWARE(BFAD_FW_FILE_CB); MODULE_FIRMWARE(BFAD_FW_FILE_CT); MODULE_FIRMWARE(BFAD_FW_FILE_CT2); module_param(os_name, charp, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(os_name, "OS name of the hba host machine"); module_param(os_patch, charp, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(os_patch, "OS patch level of the hba host machine"); module_param(host_name, charp, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(host_name, "Hostname of the hba host machine"); module_param(num_rports, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(num_rports, "Max number of rports supported per port " "(physical/logical), default=1024"); module_param(num_ios, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(num_ios, "Max number of ioim requests, default=2000"); module_param(num_tms, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(num_tms, "Max number of task im requests, default=128"); module_param(num_fcxps, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(num_fcxps, "Max number of fcxp requests, default=64"); module_param(num_ufbufs, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(num_ufbufs, "Max number of unsolicited frame " "buffers, default=64"); module_param(reqq_size, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(reqq_size, "Max number of request queue elements, " "default=256"); module_param(rspq_size, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(rspq_size, "Max number of response queue elements, " "default=64"); module_param(num_sgpgs, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(num_sgpgs, "Number of scatter/gather pages, default=2048"); module_param(rport_del_timeout, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(rport_del_timeout, "Rport delete timeout, default=90 secs, " "Range[>0]"); module_param(bfa_lun_queue_depth, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(bfa_lun_queue_depth, "Lun queue depth, default=32, Range[>0]"); module_param(bfa_io_max_sge, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(bfa_io_max_sge, "Max io scatter/gather elements, default=255"); module_param(bfa_log_level, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(bfa_log_level, "Driver log level, default=3, " "Range[Critical:1|Error:2|Warning:3|Info:4]"); module_param(ioc_auto_recover, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(ioc_auto_recover, "IOC auto recovery, default=1, " "Range[off:0|on:1]"); module_param(bfa_linkup_delay, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(bfa_linkup_delay, "Link up delay, default=30 secs for " "boot port. Otherwise 10 secs in RHEL4 & 0 for " "[RHEL5, SLES10, ESX40] Range[>0]"); module_param(msix_disable_cb, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(msix_disable_cb, "Disable Message Signaled Interrupts " "for Brocade-415/425/815/825 cards, default=0, " " Range[false:0|true:1]"); module_param(msix_disable_ct, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(msix_disable_ct, "Disable Message Signaled Interrupts " "if possible for Brocade-1010/1020/804/1007/902/1741 " "cards, default=0, Range[false:0|true:1]"); module_param(fdmi_enable, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(fdmi_enable, "Enables fdmi registration, default=1, " "Range[false:0|true:1]"); module_param(pcie_max_read_reqsz, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(pcie_max_read_reqsz, "PCIe max read request size, default=0 " "(use system setting), Range[128|256|512|1024|2048|4096]"); module_param(bfa_debugfs_enable, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(bfa_debugfs_enable, "Enables debugfs feature, default=1," " Range[false:0|true:1]"); module_param(max_xfer_size, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(max_xfer_size, "default=32MB," " Range[64k|128k|256k|512k|1024k|2048k]"); static void bfad_sm_uninit(struct bfad_s *bfad, enum bfad_sm_event event); static void bfad_sm_created(struct bfad_s *bfad, enum bfad_sm_event event); static void bfad_sm_initializing(struct bfad_s *bfad, enum bfad_sm_event event); static void bfad_sm_operational(struct bfad_s *bfad, enum bfad_sm_event event); static void bfad_sm_stopping(struct bfad_s *bfad, enum bfad_sm_event event); static void bfad_sm_failed(struct bfad_s *bfad, enum bfad_sm_event event); static void bfad_sm_fcs_exit(struct bfad_s *bfad, enum bfad_sm_event event); /* * Beginning state for the driver instance, awaiting the pci_probe event */ static void bfad_sm_uninit(struct bfad_s *bfad, enum bfad_sm_event event) { bfa_trc(bfad, event); switch (event) { case BFAD_E_CREATE: bfa_sm_set_state(bfad, bfad_sm_created); bfad->bfad_tsk = kthread_create(bfad_worker, (void *) bfad, "%s", "bfad_worker"); if (IS_ERR(bfad->bfad_tsk)) { printk(KERN_INFO "bfad[%d]: Kernel thread " "creation failed!\n", bfad->inst_no); bfa_sm_send_event(bfad, BFAD_E_KTHREAD_CREATE_FAILED); } bfa_sm_send_event(bfad, BFAD_E_INIT); break; case BFAD_E_STOP: /* Ignore stop; already in uninit */ break; default: bfa_sm_fault(bfad, event); } } /* * Driver Instance is created, awaiting event INIT to initialize the bfad */ static void bfad_sm_created(struct bfad_s *bfad, enum bfad_sm_event event) { unsigned long flags; bfa_trc(bfad, event); switch (event) { case BFAD_E_INIT: bfa_sm_set_state(bfad, bfad_sm_initializing); init_completion(&bfad->comp); /* Enable Interrupt and wait bfa_init completion */ if (bfad_setup_intr(bfad)) { printk(KERN_WARNING "bfad%d: bfad_setup_intr failed\n", bfad->inst_no); bfa_sm_send_event(bfad, BFAD_E_INTR_INIT_FAILED); break; } spin_lock_irqsave(&bfad->bfad_lock, flags); bfa_iocfc_init(&bfad->bfa); spin_unlock_irqrestore(&bfad->bfad_lock, flags); /* Set up interrupt handler for each vectors */ if ((bfad->bfad_flags & BFAD_MSIX_ON) && bfad_install_msix_handler(bfad)) { printk(KERN_WARNING "%s: install_msix failed, bfad%d\n", __func__, bfad->inst_no); } bfad_init_timer(bfad); wait_for_completion(&bfad->comp); if ((bfad->bfad_flags & BFAD_HAL_INIT_DONE)) { bfa_sm_send_event(bfad, BFAD_E_INIT_SUCCESS); } else { printk(KERN_WARNING "bfa %s: bfa init failed\n", bfad->pci_name); bfad->bfad_flags |= BFAD_HAL_INIT_FAIL; bfa_sm_send_event(bfad, BFAD_E_INIT_FAILED); } break; case BFAD_E_KTHREAD_CREATE_FAILED: bfa_sm_set_state(bfad, bfad_sm_uninit); break; default: bfa_sm_fault(bfad, event); } } static void bfad_sm_initializing(struct bfad_s *bfad, enum bfad_sm_event event) { int retval; unsigned long flags; bfa_trc(bfad, event); switch (event) { case BFAD_E_INIT_SUCCESS: kthread_stop(bfad->bfad_tsk); spin_lock_irqsave(&bfad->bfad_lock, flags); bfad->bfad_tsk = NULL; spin_unlock_irqrestore(&bfad->bfad_lock, flags); retval = bfad_start_ops(bfad); if (retval != BFA_STATUS_OK) break; bfa_sm_set_state(bfad, bfad_sm_operational); break; case BFAD_E_INTR_INIT_FAILED: bfa_sm_set_state(bfad, bfad_sm_uninit); kthread_stop(bfad->bfad_tsk); spin_lock_irqsave(&bfad->bfad_lock, flags); bfad->bfad_tsk = NULL; spin_unlock_irqrestore(&bfad->bfad_lock, flags); break; case BFAD_E_INIT_FAILED: bfa_sm_set_state(bfad, bfad_sm_failed); break; default: bfa_sm_fault(bfad, event); } } static void bfad_sm_failed(struct bfad_s *bfad, enum bfad_sm_event event) { int retval; bfa_trc(bfad, event); switch (event) { case BFAD_E_INIT_SUCCESS: retval = bfad_start_ops(bfad); if (retval != BFA_STATUS_OK) break; bfa_sm_set_state(bfad, bfad_sm_operational); break; case BFAD_E_STOP: if (bfad->bfad_flags & BFAD_CFG_PPORT_DONE) bfad_uncfg_pport(bfad); if (bfad->bfad_flags & BFAD_FC4_PROBE_DONE) { bfad_im_probe_undo(bfad); bfad->bfad_flags &= ~BFAD_FC4_PROBE_DONE; } bfad_stop(bfad); break; case BFAD_E_EXIT_COMP: bfa_sm_set_state(bfad, bfad_sm_uninit); bfad_remove_intr(bfad); del_timer_sync(&bfad->hal_tmo); break; default: bfa_sm_fault(bfad, event); } } static void bfad_sm_operational(struct bfad_s *bfad, enum bfad_sm_event event) { bfa_trc(bfad, event); switch (event) { case BFAD_E_STOP: bfa_sm_set_state(bfad, bfad_sm_fcs_exit); bfad_fcs_stop(bfad); break; default: bfa_sm_fault(bfad, event); } } static void bfad_sm_fcs_exit(struct bfad_s *bfad, enum bfad_sm_event event) { bfa_trc(bfad, event); switch (event) { case BFAD_E_FCS_EXIT_COMP: bfa_sm_set_state(bfad, bfad_sm_stopping); bfad_stop(bfad); break; default: bfa_sm_fault(bfad, event); } } static void bfad_sm_stopping(struct bfad_s *bfad, enum bfad_sm_event event) { bfa_trc(bfad, event); switch (event) { case BFAD_E_EXIT_COMP: bfa_sm_set_state(bfad, bfad_sm_uninit); bfad_remove_intr(bfad); del_timer_sync(&bfad->hal_tmo); bfad_im_probe_undo(bfad); bfad->bfad_flags &= ~BFAD_FC4_PROBE_DONE; bfad_uncfg_pport(bfad); break; default: bfa_sm_fault(bfad, event); break; } } /* * BFA callbacks */ void bfad_hcb_comp(void *arg, bfa_status_t status) { struct bfad_hal_comp *fcomp = (struct bfad_hal_comp *)arg; fcomp->status = status; complete(&fcomp->comp); } /* * bfa_init callback */ void bfa_cb_init(void *drv, bfa_status_t init_status) { struct bfad_s *bfad = drv; if (init_status == BFA_STATUS_OK) { bfad->bfad_flags |= BFAD_HAL_INIT_DONE; /* * If BFAD_HAL_INIT_FAIL flag is set: * Wake up the kernel thread to start * the bfad operations after HAL init done */ if ((bfad->bfad_flags & BFAD_HAL_INIT_FAIL)) { bfad->bfad_flags &= ~BFAD_HAL_INIT_FAIL; wake_up_process(bfad->bfad_tsk); } } complete(&bfad->comp); } /* * BFA_FCS callbacks */ struct bfad_port_s * bfa_fcb_lport_new(struct bfad_s *bfad, struct bfa_fcs_lport_s *port, enum bfa_lport_role roles, struct bfad_vf_s *vf_drv, struct bfad_vport_s *vp_drv) { bfa_status_t rc; struct bfad_port_s *port_drv; if (!vp_drv && !vf_drv) { port_drv = &bfad->pport; port_drv->pvb_type = BFAD_PORT_PHYS_BASE; } else if (!vp_drv && vf_drv) { port_drv = &vf_drv->base_port; port_drv->pvb_type = BFAD_PORT_VF_BASE; } else if (vp_drv && !vf_drv) { port_drv = &vp_drv->drv_port; port_drv->pvb_type = BFAD_PORT_PHYS_VPORT; } else { port_drv = &vp_drv->drv_port; port_drv->pvb_type = BFAD_PORT_VF_VPORT; } port_drv->fcs_port = port; port_drv->roles = roles; if (roles & BFA_LPORT_ROLE_FCP_IM) { rc = bfad_im_port_new(bfad, port_drv); if (rc != BFA_STATUS_OK) { bfad_im_port_delete(bfad, port_drv); port_drv = NULL; } } return port_drv; } void bfa_fcb_lport_delete(struct bfad_s *bfad, enum bfa_lport_role roles, struct bfad_vf_s *vf_drv, struct bfad_vport_s *vp_drv) { struct bfad_port_s *port_drv; /* this will be only called from rmmod context */ if (vp_drv && !vp_drv->comp_del) { port_drv = (vp_drv) ? (&(vp_drv)->drv_port) : ((vf_drv) ? (&(vf_drv)->base_port) : (&(bfad)->pport)); bfa_trc(bfad, roles); if (roles & BFA_LPORT_ROLE_FCP_IM) bfad_im_port_delete(bfad, port_drv); } } /* * FCS RPORT alloc callback, after successful PLOGI by FCS */ bfa_status_t bfa_fcb_rport_alloc(struct bfad_s *bfad, struct bfa_fcs_rport_s **rport, struct bfad_rport_s **rport_drv) { bfa_status_t rc = BFA_STATUS_OK; *rport_drv = kzalloc(sizeof(struct bfad_rport_s), GFP_ATOMIC); if (*rport_drv == NULL) { rc = BFA_STATUS_ENOMEM; goto ext; } *rport = &(*rport_drv)->fcs_rport; ext: return rc; } /* * FCS PBC VPORT Create */ void bfa_fcb_pbc_vport_create(struct bfad_s *bfad, struct bfi_pbc_vport_s pbc_vport) { struct bfa_lport_cfg_s port_cfg = {0}; struct bfad_vport_s *vport; int rc; vport = kzalloc(sizeof(struct bfad_vport_s), GFP_KERNEL); if (!vport) { bfa_trc(bfad, 0); return; } vport->drv_port.bfad = bfad; port_cfg.roles = BFA_LPORT_ROLE_FCP_IM; port_cfg.pwwn = pbc_vport.vp_pwwn; port_cfg.nwwn = pbc_vport.vp_nwwn; port_cfg.preboot_vp = BFA_TRUE; rc = bfa_fcs_pbc_vport_create(&vport->fcs_vport, &bfad->bfa_fcs, 0, &port_cfg, vport); if (rc != BFA_STATUS_OK) { bfa_trc(bfad, 0); return; } list_add_tail(&vport->list_entry, &bfad->pbc_vport_list); } void bfad_hal_mem_release(struct bfad_s *bfad) { struct bfa_meminfo_s *hal_meminfo = &bfad->meminfo; struct bfa_mem_dma_s *dma_info, *dma_elem; struct bfa_mem_kva_s *kva_info, *kva_elem; struct list_head *dm_qe, *km_qe; dma_info = &hal_meminfo->dma_info; kva_info = &hal_meminfo->kva_info; /* Iterate through the KVA meminfo queue */ list_for_each(km_qe, &kva_info->qe) { kva_elem = (struct bfa_mem_kva_s *) km_qe; vfree(kva_elem->kva); } /* Iterate through the DMA meminfo queue */ list_for_each(dm_qe, &dma_info->qe) { dma_elem = (struct bfa_mem_dma_s *) dm_qe; dma_free_coherent(&bfad->pcidev->dev, dma_elem->mem_len, dma_elem->kva, (dma_addr_t) dma_elem->dma); } memset(hal_meminfo, 0, sizeof(struct bfa_meminfo_s)); } void bfad_update_hal_cfg(struct bfa_iocfc_cfg_s *bfa_cfg) { if (num_rports > 0) bfa_cfg->fwcfg.num_rports = num_rports; if (num_ios > 0) bfa_cfg->fwcfg.num_ioim_reqs = num_ios; if (num_tms > 0) bfa_cfg->fwcfg.num_tskim_reqs = num_tms; if (num_fcxps > 0 && num_fcxps <= BFA_FCXP_MAX) bfa_cfg->fwcfg.num_fcxp_reqs = num_fcxps; if (num_ufbufs > 0 && num_ufbufs <= BFA_UF_MAX) bfa_cfg->fwcfg.num_uf_bufs = num_ufbufs; if (reqq_size > 0) bfa_cfg->drvcfg.num_reqq_elems = reqq_size; if (rspq_size > 0) bfa_cfg->drvcfg.num_rspq_elems = rspq_size; if (num_sgpgs > 0 && num_sgpgs <= BFA_SGPG_MAX) bfa_cfg->drvcfg.num_sgpgs = num_sgpgs; /* * populate the hal values back to the driver for sysfs use. * otherwise, the default values will be shown as 0 in sysfs */ num_rports = bfa_cfg->fwcfg.num_rports; num_ios = bfa_cfg->fwcfg.num_ioim_reqs; num_tms = bfa_cfg->fwcfg.num_tskim_reqs; num_fcxps = bfa_cfg->fwcfg.num_fcxp_reqs; num_ufbufs = bfa_cfg->fwcfg.num_uf_bufs; reqq_size = bfa_cfg->drvcfg.num_reqq_elems; rspq_size = bfa_cfg->drvcfg.num_rspq_elems; num_sgpgs = bfa_cfg->drvcfg.num_sgpgs; } bfa_status_t bfad_hal_mem_alloc(struct bfad_s *bfad) { struct bfa_meminfo_s *hal_meminfo = &bfad->meminfo; struct bfa_mem_dma_s *dma_info, *dma_elem; struct bfa_mem_kva_s *kva_info, *kva_elem; struct list_head *dm_qe, *km_qe; bfa_status_t rc = BFA_STATUS_OK; dma_addr_t phys_addr; bfa_cfg_get_default(&bfad->ioc_cfg); bfad_update_hal_cfg(&bfad->ioc_cfg); bfad->cfg_data.ioc_queue_depth = bfad->ioc_cfg.fwcfg.num_ioim_reqs; bfa_cfg_get_meminfo(&bfad->ioc_cfg, hal_meminfo, &bfad->bfa); dma_info = &hal_meminfo->dma_info; kva_info = &hal_meminfo->kva_info; /* Iterate through the KVA meminfo queue */ list_for_each(km_qe, &kva_info->qe) { kva_elem = (struct bfa_mem_kva_s *) km_qe; kva_elem->kva = vmalloc(kva_elem->mem_len); if (kva_elem->kva == NULL) { bfad_hal_mem_release(bfad); rc = BFA_STATUS_ENOMEM; goto ext; } memset(kva_elem->kva, 0, kva_elem->mem_len); } /* Iterate through the DMA meminfo queue */ list_for_each(dm_qe, &dma_info->qe) { dma_elem = (struct bfa_mem_dma_s *) dm_qe; dma_elem->kva = dma_alloc_coherent(&bfad->pcidev->dev, dma_elem->mem_len, &phys_addr, GFP_KERNEL); if (dma_elem->kva == NULL) { bfad_hal_mem_release(bfad); rc = BFA_STATUS_ENOMEM; goto ext; } dma_elem->dma = phys_addr; memset(dma_elem->kva, 0, dma_elem->mem_len); } ext: return rc; } /* * Create a vport under a vf. */ bfa_status_t bfad_vport_create(struct bfad_s *bfad, u16 vf_id, struct bfa_lport_cfg_s *port_cfg, struct device *dev) { struct bfad_vport_s *vport; int rc = BFA_STATUS_OK; unsigned long flags; struct completion fcomp; vport = kzalloc(sizeof(struct bfad_vport_s), GFP_KERNEL); if (!vport) { rc = BFA_STATUS_ENOMEM; goto ext; } vport->drv_port.bfad = bfad; spin_lock_irqsave(&bfad->bfad_lock, flags); rc = bfa_fcs_vport_create(&vport->fcs_vport, &bfad->bfa_fcs, vf_id, port_cfg, vport); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (rc != BFA_STATUS_OK) goto ext_free_vport; if (port_cfg->roles & BFA_LPORT_ROLE_FCP_IM) { rc = bfad_im_scsi_host_alloc(bfad, vport->drv_port.im_port, dev); if (rc != BFA_STATUS_OK) goto ext_free_fcs_vport; } spin_lock_irqsave(&bfad->bfad_lock, flags); bfa_fcs_vport_start(&vport->fcs_vport); spin_unlock_irqrestore(&bfad->bfad_lock, flags); return BFA_STATUS_OK; ext_free_fcs_vport: spin_lock_irqsave(&bfad->bfad_lock, flags); vport->comp_del = &fcomp; init_completion(vport->comp_del); bfa_fcs_vport_delete(&vport->fcs_vport); spin_unlock_irqrestore(&bfad->bfad_lock, flags); wait_for_completion(vport->comp_del); ext_free_vport: kfree(vport); ext: return rc; } void bfad_bfa_tmo(unsigned long data) { struct bfad_s *bfad = (struct bfad_s *) data; unsigned long flags; struct list_head doneq; spin_lock_irqsave(&bfad->bfad_lock, flags); bfa_timer_beat(&bfad->bfa.timer_mod); bfa_comp_deq(&bfad->bfa, &doneq); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (!list_empty(&doneq)) { bfa_comp_process(&bfad->bfa, &doneq); spin_lock_irqsave(&bfad->bfad_lock, flags); bfa_comp_free(&bfad->bfa, &doneq); spin_unlock_irqrestore(&bfad->bfad_lock, flags); } mod_timer(&bfad->hal_tmo, jiffies + msecs_to_jiffies(BFA_TIMER_FREQ)); } void bfad_init_timer(struct bfad_s *bfad) { init_timer(&bfad->hal_tmo); bfad->hal_tmo.function = bfad_bfa_tmo; bfad->hal_tmo.data = (unsigned long)bfad; mod_timer(&bfad->hal_tmo, jiffies + msecs_to_jiffies(BFA_TIMER_FREQ)); } int bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad) { int rc = -ENODEV; if (pci_enable_device(pdev)) { printk(KERN_ERR "pci_enable_device fail %p\n", pdev); goto out; } if (pci_request_regions(pdev, BFAD_DRIVER_NAME)) goto out_disable_device; pci_set_master(pdev); if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) || (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0)) { if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) || (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)) { printk(KERN_ERR "pci_set_dma_mask fail %p\n", pdev); goto out_release_region; } } bfad->pci_bar0_kva = pci_iomap(pdev, 0, pci_resource_len(pdev, 0)); bfad->pci_bar2_kva = pci_iomap(pdev, 2, pci_resource_len(pdev, 2)); if (bfad->pci_bar0_kva == NULL) { printk(KERN_ERR "Fail to map bar0\n"); goto out_release_region; } bfad->hal_pcidev.pci_slot = PCI_SLOT(pdev->devfn); bfad->hal_pcidev.pci_func = PCI_FUNC(pdev->devfn); bfad->hal_pcidev.pci_bar_kva = bfad->pci_bar0_kva; bfad->hal_pcidev.device_id = pdev->device; bfad->hal_pcidev.ssid = pdev->subsystem_device; bfad->pci_name = pci_name(pdev); bfad->pci_attr.vendor_id = pdev->vendor; bfad->pci_attr.device_id = pdev->device; bfad->pci_attr.ssid = pdev->subsystem_device; bfad->pci_attr.ssvid = pdev->subsystem_vendor; bfad->pci_attr.pcifn = PCI_FUNC(pdev->devfn); bfad->pcidev = pdev; /* Adjust PCIe Maximum Read Request Size */ if (pcie_max_read_reqsz > 0) { int pcie_cap_reg; u16 pcie_dev_ctl; u16 mask = 0xffff; switch (pcie_max_read_reqsz) { case 128: mask = 0x0; break; case 256: mask = 0x1000; break; case 512: mask = 0x2000; break; case 1024: mask = 0x3000; break; case 2048: mask = 0x4000; break; case 4096: mask = 0x5000; break; default: break; } pcie_cap_reg = pci_find_capability(pdev, PCI_CAP_ID_EXP); if (mask != 0xffff && pcie_cap_reg) { pcie_cap_reg += 0x08; pci_read_config_word(pdev, pcie_cap_reg, &pcie_dev_ctl); if ((pcie_dev_ctl & 0x7000) != mask) { printk(KERN_WARNING "BFA[%s]: " "pcie_max_read_request_size is %d, " "reset to %d\n", bfad->pci_name, (1 << ((pcie_dev_ctl & 0x7000) >> 12)) << 7, pcie_max_read_reqsz); pcie_dev_ctl &= ~0x7000; pci_write_config_word(pdev, pcie_cap_reg, pcie_dev_ctl | mask); } } } return 0; out_release_region: pci_release_regions(pdev); out_disable_device: pci_disable_device(pdev); out: return rc; } void bfad_pci_uninit(struct pci_dev *pdev, struct bfad_s *bfad) { pci_iounmap(pdev, bfad->pci_bar0_kva); pci_iounmap(pdev, bfad->pci_bar2_kva); pci_release_regions(pdev); pci_disable_device(pdev); pci_set_drvdata(pdev, NULL); } bfa_status_t bfad_drv_init(struct bfad_s *bfad) { bfa_status_t rc; unsigned long flags; bfad->cfg_data.rport_del_timeout = rport_del_timeout; bfad->cfg_data.lun_queue_depth = bfa_lun_queue_depth; bfad->cfg_data.io_max_sge = bfa_io_max_sge; bfad->cfg_data.binding_method = FCP_PWWN_BINDING; rc = bfad_hal_mem_alloc(bfad); if (rc != BFA_STATUS_OK) { printk(KERN_WARNING "bfad%d bfad_hal_mem_alloc failure\n", bfad->inst_no); printk(KERN_WARNING "Not enough memory to attach all Brocade HBA ports, %s", "System may need more memory.\n"); goto out_hal_mem_alloc_failure; } bfad->bfa.trcmod = bfad->trcmod; bfad->bfa.plog = &bfad->plog_buf; bfa_plog_init(&bfad->plog_buf); bfa_plog_str(&bfad->plog_buf, BFA_PL_MID_DRVR, BFA_PL_EID_DRIVER_START, 0, "Driver Attach"); bfa_attach(&bfad->bfa, bfad, &bfad->ioc_cfg, &bfad->meminfo, &bfad->hal_pcidev); /* FCS INIT */ spin_lock_irqsave(&bfad->bfad_lock, flags); bfad->bfa_fcs.trcmod = bfad->trcmod; bfa_fcs_attach(&bfad->bfa_fcs, &bfad->bfa, bfad, BFA_FALSE); bfad->bfa_fcs.fdmi_enabled = fdmi_enable; bfa_fcs_init(&bfad->bfa_fcs); spin_unlock_irqrestore(&bfad->bfad_lock, flags); bfad->bfad_flags |= BFAD_DRV_INIT_DONE; /* configure base port */ rc = bfad_cfg_pport(bfad, BFA_LPORT_ROLE_FCP_IM); if (rc != BFA_STATUS_OK) goto out_cfg_pport_fail; return BFA_STATUS_OK; out_cfg_pport_fail: /* fcs exit - on cfg pport failure */ spin_lock_irqsave(&bfad->bfad_lock, flags); init_completion(&bfad->comp); bfad->pport.flags |= BFAD_PORT_DELETE; bfa_fcs_exit(&bfad->bfa_fcs); spin_unlock_irqrestore(&bfad->bfad_lock, flags); wait_for_completion(&bfad->comp); /* bfa detach - free hal memory */ bfa_detach(&bfad->bfa); bfad_hal_mem_release(bfad); out_hal_mem_alloc_failure: return BFA_STATUS_FAILED; } void bfad_drv_uninit(struct bfad_s *bfad) { unsigned long flags; spin_lock_irqsave(&bfad->bfad_lock, flags); init_completion(&bfad->comp); bfa_iocfc_stop(&bfad->bfa); spin_unlock_irqrestore(&bfad->bfad_lock, flags); wait_for_completion(&bfad->comp); del_timer_sync(&bfad->hal_tmo); bfa_isr_disable(&bfad->bfa); bfa_detach(&bfad->bfa); bfad_remove_intr(bfad); bfad_hal_mem_release(bfad); bfad->bfad_flags &= ~BFAD_DRV_INIT_DONE; } void bfad_drv_start(struct bfad_s *bfad) { unsigned long flags; spin_lock_irqsave(&bfad->bfad_lock, flags); bfa_iocfc_start(&bfad->bfa); bfa_fcs_pbc_vport_init(&bfad->bfa_fcs); bfa_fcs_fabric_modstart(&bfad->bfa_fcs); bfad->bfad_flags |= BFAD_HAL_START_DONE; spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (bfad->im) flush_workqueue(bfad->im->drv_workq); } void bfad_fcs_stop(struct bfad_s *bfad) { unsigned long flags; spin_lock_irqsave(&bfad->bfad_lock, flags); init_completion(&bfad->comp); bfad->pport.flags |= BFAD_PORT_DELETE; bfa_fcs_exit(&bfad->bfa_fcs); spin_unlock_irqrestore(&bfad->bfad_lock, flags); wait_for_completion(&bfad->comp); bfa_sm_send_event(bfad, BFAD_E_FCS_EXIT_COMP); } void bfad_stop(struct bfad_s *bfad) { unsigned long flags; spin_lock_irqsave(&bfad->bfad_lock, flags); init_completion(&bfad->comp); bfa_iocfc_stop(&bfad->bfa); bfad->bfad_flags &= ~BFAD_HAL_START_DONE; spin_unlock_irqrestore(&bfad->bfad_lock, flags); wait_for_completion(&bfad->comp); bfa_sm_send_event(bfad, BFAD_E_EXIT_COMP); } bfa_status_t bfad_cfg_pport(struct bfad_s *bfad, enum bfa_lport_role role) { int rc = BFA_STATUS_OK; /* Allocate scsi_host for the physical port */ if ((supported_fc4s & BFA_LPORT_ROLE_FCP_IM) && (role & BFA_LPORT_ROLE_FCP_IM)) { if (bfad->pport.im_port == NULL) { rc = BFA_STATUS_FAILED; goto out; } rc = bfad_im_scsi_host_alloc(bfad, bfad->pport.im_port, &bfad->pcidev->dev); if (rc != BFA_STATUS_OK) goto out; bfad->pport.roles |= BFA_LPORT_ROLE_FCP_IM; } bfad->bfad_flags |= BFAD_CFG_PPORT_DONE; out: return rc; } void bfad_uncfg_pport(struct bfad_s *bfad) { if ((supported_fc4s & BFA_LPORT_ROLE_FCP_IM) && (bfad->pport.roles & BFA_LPORT_ROLE_FCP_IM)) { bfad_im_scsi_host_free(bfad, bfad->pport.im_port); bfad_im_port_clean(bfad->pport.im_port); kfree(bfad->pport.im_port); bfad->pport.roles &= ~BFA_LPORT_ROLE_FCP_IM; } bfad->bfad_flags &= ~BFAD_CFG_PPORT_DONE; } bfa_status_t bfad_start_ops(struct bfad_s *bfad) { int retval; unsigned long flags; struct bfad_vport_s *vport, *vport_new; struct bfa_fcs_driver_info_s driver_info; /* Limit min/max. xfer size to [64k-32MB] */ if (max_xfer_size < BFAD_MIN_SECTORS >> 1) max_xfer_size = BFAD_MIN_SECTORS >> 1; if (max_xfer_size > BFAD_MAX_SECTORS >> 1) max_xfer_size = BFAD_MAX_SECTORS >> 1; /* Fill the driver_info info to fcs*/ memset(&driver_info, 0, sizeof(driver_info)); strncpy(driver_info.version, BFAD_DRIVER_VERSION, sizeof(driver_info.version) - 1); if (host_name) strncpy(driver_info.host_machine_name, host_name, sizeof(driver_info.host_machine_name) - 1); if (os_name) strncpy(driver_info.host_os_name, os_name, sizeof(driver_info.host_os_name) - 1); if (os_patch) strncpy(driver_info.host_os_patch, os_patch, sizeof(driver_info.host_os_patch) - 1); strncpy(driver_info.os_device_name, bfad->pci_name, sizeof(driver_info.os_device_name - 1)); /* FCS driver info init */ spin_lock_irqsave(&bfad->bfad_lock, flags); bfa_fcs_driver_info_init(&bfad->bfa_fcs, &driver_info); spin_unlock_irqrestore(&bfad->bfad_lock, flags); /* * FCS update cfg - reset the pwwn/nwwn of fabric base logical port * with values learned during bfa_init firmware GETATTR REQ. */ bfa_fcs_update_cfg(&bfad->bfa_fcs); /* Setup fc host fixed attribute if the lk supports */ bfad_fc_host_init(bfad->pport.im_port); /* BFAD level FC4 IM specific resource allocation */ retval = bfad_im_probe(bfad); if (retval != BFA_STATUS_OK) { printk(KERN_WARNING "bfad_im_probe failed\n"); if (bfa_sm_cmp_state(bfad, bfad_sm_initializing)) bfa_sm_set_state(bfad, bfad_sm_failed); bfad_im_probe_undo(bfad); bfad->bfad_flags &= ~BFAD_FC4_PROBE_DONE; bfad_uncfg_pport(bfad); bfad_stop(bfad); return BFA_STATUS_FAILED; } else bfad->bfad_flags |= BFAD_FC4_PROBE_DONE; bfad_drv_start(bfad); /* Complete pbc vport create */ list_for_each_entry_safe(vport, vport_new, &bfad->pbc_vport_list, list_entry) { struct fc_vport_identifiers vid; struct fc_vport *fc_vport; char pwwn_buf[BFA_STRING_32]; memset(&vid, 0, sizeof(vid)); vid.roles = FC_PORT_ROLE_FCP_INITIATOR; vid.vport_type = FC_PORTTYPE_NPIV; vid.disable = false; vid.node_name = wwn_to_u64((u8 *) (&((vport->fcs_vport).lport.port_cfg.nwwn))); vid.port_name = wwn_to_u64((u8 *) (&((vport->fcs_vport).lport.port_cfg.pwwn))); fc_vport = fc_vport_create(bfad->pport.im_port->shost, 0, &vid); if (!fc_vport) { wwn2str(pwwn_buf, vid.port_name); printk(KERN_WARNING "bfad%d: failed to create pbc vport" " %s\n", bfad->inst_no, pwwn_buf); } list_del(&vport->list_entry); kfree(vport); } /* * If bfa_linkup_delay is set to -1 default; try to retrive the * value using the bfad_get_linkup_delay(); else use the * passed in module param value as the bfa_linkup_delay. */ if (bfa_linkup_delay < 0) { bfa_linkup_delay = bfad_get_linkup_delay(bfad); bfad_rport_online_wait(bfad); bfa_linkup_delay = -1; } else bfad_rport_online_wait(bfad); BFA_LOG(KERN_INFO, bfad, bfa_log_level, "bfa device claimed\n"); return BFA_STATUS_OK; } int bfad_worker(void *ptr) { struct bfad_s *bfad; unsigned long flags; bfad = (struct bfad_s *)ptr; while (!kthread_should_stop()) { /* Send event BFAD_E_INIT_SUCCESS */ bfa_sm_send_event(bfad, BFAD_E_INIT_SUCCESS); spin_lock_irqsave(&bfad->bfad_lock, flags); bfad->bfad_tsk = NULL; spin_unlock_irqrestore(&bfad->bfad_lock, flags); break; } return 0; } /* * BFA driver interrupt functions */ irqreturn_t bfad_intx(int irq, void *dev_id) { struct bfad_s *bfad = dev_id; struct list_head doneq; unsigned long flags; bfa_boolean_t rc; spin_lock_irqsave(&bfad->bfad_lock, flags); rc = bfa_intx(&bfad->bfa); if (!rc) { spin_unlock_irqrestore(&bfad->bfad_lock, flags); return IRQ_NONE; } bfa_comp_deq(&bfad->bfa, &doneq); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (!list_empty(&doneq)) { bfa_comp_process(&bfad->bfa, &doneq); spin_lock_irqsave(&bfad->bfad_lock, flags); bfa_comp_free(&bfad->bfa, &doneq); spin_unlock_irqrestore(&bfad->bfad_lock, flags); } return IRQ_HANDLED; } static irqreturn_t bfad_msix(int irq, void *dev_id) { struct bfad_msix_s *vec = dev_id; struct bfad_s *bfad = vec->bfad; struct list_head doneq; unsigned long flags; spin_lock_irqsave(&bfad->bfad_lock, flags); bfa_msix(&bfad->bfa, vec->msix.entry); bfa_comp_deq(&bfad->bfa, &doneq); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (!list_empty(&doneq)) { bfa_comp_process(&bfad->bfa, &doneq); spin_lock_irqsave(&bfad->bfad_lock, flags); bfa_comp_free(&bfad->bfa, &doneq); spin_unlock_irqrestore(&bfad->bfad_lock, flags); } return IRQ_HANDLED; } /* * Initialize the MSIX entry table. */ static void bfad_init_msix_entry(struct bfad_s *bfad, struct msix_entry *msix_entries, int mask, int max_bit) { int i; int match = 0x00000001; for (i = 0, bfad->nvec = 0; i < MAX_MSIX_ENTRY; i++) { if (mask & match) { bfad->msix_tab[bfad->nvec].msix.entry = i; bfad->msix_tab[bfad->nvec].bfad = bfad; msix_entries[bfad->nvec].entry = i; bfad->nvec++; } match <<= 1; } } int bfad_install_msix_handler(struct bfad_s *bfad) { int i, error = 0; for (i = 0; i < bfad->nvec; i++) { sprintf(bfad->msix_tab[i].name, "bfa-%s-%s", bfad->pci_name, ((bfa_asic_id_cb(bfad->hal_pcidev.device_id)) ? msix_name_cb[i] : msix_name_ct[i])); error = request_irq(bfad->msix_tab[i].msix.vector, (irq_handler_t) bfad_msix, 0, bfad->msix_tab[i].name, &bfad->msix_tab[i]); bfa_trc(bfad, i); bfa_trc(bfad, bfad->msix_tab[i].msix.vector); if (error) { int j; for (j = 0; j < i; j++) free_irq(bfad->msix_tab[j].msix.vector, &bfad->msix_tab[j]); bfad->bfad_flags &= ~BFAD_MSIX_ON; pci_disable_msix(bfad->pcidev); return 1; } } return 0; } /* * Setup MSIX based interrupt. */ int bfad_setup_intr(struct bfad_s *bfad) { int error = 0; u32 mask = 0, i, num_bit = 0, max_bit = 0; struct msix_entry msix_entries[MAX_MSIX_ENTRY]; struct pci_dev *pdev = bfad->pcidev; u16 reg; /* Call BFA to get the msix map for this PCI function. */ bfa_msix_getvecs(&bfad->bfa, &mask, &num_bit, &max_bit); /* Set up the msix entry table */ bfad_init_msix_entry(bfad, msix_entries, mask, max_bit); if ((bfa_asic_id_ctc(pdev->device) && !msix_disable_ct) || (bfa_asic_id_cb(pdev->device) && !msix_disable_cb)) { error = pci_enable_msix(bfad->pcidev, msix_entries, bfad->nvec); if (error) { /* * Only error number of vector is available. * We don't have a mechanism to map multiple * interrupts into one vector, so even if we * can try to request less vectors, we don't * know how to associate interrupt events to * vectors. Linux doesn't duplicate vectors * in the MSIX table for this case. */ printk(KERN_WARNING "bfad%d: " "pci_enable_msix failed (%d)," " use line based.\n", bfad->inst_no, error); goto line_based; } /* Disable INTX in MSI-X mode */ pci_read_config_word(pdev, PCI_COMMAND, &reg); if (!(reg & PCI_COMMAND_INTX_DISABLE)) pci_write_config_word(pdev, PCI_COMMAND, reg | PCI_COMMAND_INTX_DISABLE); /* Save the vectors */ for (i = 0; i < bfad->nvec; i++) { bfa_trc(bfad, msix_entries[i].vector); bfad->msix_tab[i].msix.vector = msix_entries[i].vector; } bfa_msix_init(&bfad->bfa, bfad->nvec); bfad->bfad_flags |= BFAD_MSIX_ON; return error; } line_based: error = 0; if (request_irq (bfad->pcidev->irq, (irq_handler_t) bfad_intx, BFAD_IRQ_FLAGS, BFAD_DRIVER_NAME, bfad) != 0) { /* Enable interrupt handler failed */ return 1; } bfad->bfad_flags |= BFAD_INTX_ON; return error; } void bfad_remove_intr(struct bfad_s *bfad) { int i; if (bfad->bfad_flags & BFAD_MSIX_ON) { for (i = 0; i < bfad->nvec; i++) free_irq(bfad->msix_tab[i].msix.vector, &bfad->msix_tab[i]); pci_disable_msix(bfad->pcidev); bfad->bfad_flags &= ~BFAD_MSIX_ON; } else if (bfad->bfad_flags & BFAD_INTX_ON) { free_irq(bfad->pcidev->irq, bfad); } } /* * PCI probe entry. */ int bfad_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid) { struct bfad_s *bfad; int error = -ENODEV, retval, i; /* For single port cards - only claim function 0 */ if ((pdev->device == BFA_PCI_DEVICE_ID_FC_8G1P) && (PCI_FUNC(pdev->devfn) != 0)) return -ENODEV; bfad = kzalloc(sizeof(struct bfad_s), GFP_KERNEL); if (!bfad) { error = -ENOMEM; goto out; } bfad->trcmod = kzalloc(sizeof(struct bfa_trc_mod_s), GFP_KERNEL); if (!bfad->trcmod) { printk(KERN_WARNING "Error alloc trace buffer!\n"); error = -ENOMEM; goto out_alloc_trace_failure; } /* TRACE INIT */ bfa_trc_init(bfad->trcmod); bfa_trc(bfad, bfad_inst); /* AEN INIT */ INIT_LIST_HEAD(&bfad->free_aen_q); INIT_LIST_HEAD(&bfad->active_aen_q); for (i = 0; i < BFA_AEN_MAX_ENTRY; i++) list_add_tail(&bfad->aen_list[i].qe, &bfad->free_aen_q); if (!(bfad_load_fwimg(pdev))) { kfree(bfad->trcmod); goto out_alloc_trace_failure; } retval = bfad_pci_init(pdev, bfad); if (retval) { printk(KERN_WARNING "bfad_pci_init failure!\n"); error = retval; goto out_pci_init_failure; } mutex_lock(&bfad_mutex); bfad->inst_no = bfad_inst++; list_add_tail(&bfad->list_entry, &bfad_list); mutex_unlock(&bfad_mutex); /* Initializing the state machine: State set to uninit */ bfa_sm_set_state(bfad, bfad_sm_uninit); spin_lock_init(&bfad->bfad_lock); pci_set_drvdata(pdev, bfad); bfad->ref_count = 0; bfad->pport.bfad = bfad; INIT_LIST_HEAD(&bfad->pbc_vport_list); /* Setup the debugfs node for this bfad */ if (bfa_debugfs_enable) bfad_debugfs_init(&bfad->pport); retval = bfad_drv_init(bfad); if (retval != BFA_STATUS_OK) goto out_drv_init_failure; bfa_sm_send_event(bfad, BFAD_E_CREATE); if (bfa_sm_cmp_state(bfad, bfad_sm_uninit)) goto out_bfad_sm_failure; return 0; out_bfad_sm_failure: bfa_detach(&bfad->bfa); bfad_hal_mem_release(bfad); out_drv_init_failure: /* Remove the debugfs node for this bfad */ kfree(bfad->regdata); bfad_debugfs_exit(&bfad->pport); mutex_lock(&bfad_mutex); bfad_inst--; list_del(&bfad->list_entry); mutex_unlock(&bfad_mutex); bfad_pci_uninit(pdev, bfad); out_pci_init_failure: kfree(bfad->trcmod); out_alloc_trace_failure: kfree(bfad); out: return error; } /* * PCI remove entry. */ void bfad_pci_remove(struct pci_dev *pdev) { struct bfad_s *bfad = pci_get_drvdata(pdev); unsigned long flags; bfa_trc(bfad, bfad->inst_no); spin_lock_irqsave(&bfad->bfad_lock, flags); if (bfad->bfad_tsk != NULL) { spin_unlock_irqrestore(&bfad->bfad_lock, flags); kthread_stop(bfad->bfad_tsk); } else { spin_unlock_irqrestore(&bfad->bfad_lock, flags); } /* Send Event BFAD_E_STOP */ bfa_sm_send_event(bfad, BFAD_E_STOP); /* Driver detach and dealloc mem */ spin_lock_irqsave(&bfad->bfad_lock, flags); bfa_detach(&bfad->bfa); spin_unlock_irqrestore(&bfad->bfad_lock, flags); bfad_hal_mem_release(bfad); /* Remove the debugfs node for this bfad */ kfree(bfad->regdata); bfad_debugfs_exit(&bfad->pport); /* Cleaning the BFAD instance */ mutex_lock(&bfad_mutex); bfad_inst--; list_del(&bfad->list_entry); mutex_unlock(&bfad_mutex); bfad_pci_uninit(pdev, bfad); kfree(bfad->trcmod); kfree(bfad); } struct pci_device_id bfad_id_table[] = { { .vendor = BFA_PCI_VENDOR_ID_BROCADE, .device = BFA_PCI_DEVICE_ID_FC_8G2P, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, }, { .vendor = BFA_PCI_VENDOR_ID_BROCADE, .device = BFA_PCI_DEVICE_ID_FC_8G1P, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, }, { .vendor = BFA_PCI_VENDOR_ID_BROCADE, .device = BFA_PCI_DEVICE_ID_CT, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .class = (PCI_CLASS_SERIAL_FIBER << 8), .class_mask = ~0, }, { .vendor = BFA_PCI_VENDOR_ID_BROCADE, .device = BFA_PCI_DEVICE_ID_CT_FC, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .class = (PCI_CLASS_SERIAL_FIBER << 8), .class_mask = ~0, }, { .vendor = BFA_PCI_VENDOR_ID_BROCADE, .device = BFA_PCI_DEVICE_ID_CT2, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .class = (PCI_CLASS_SERIAL_FIBER << 8), .class_mask = ~0, }, {0, 0}, }; MODULE_DEVICE_TABLE(pci, bfad_id_table); static struct pci_driver bfad_pci_driver = { .name = BFAD_DRIVER_NAME, .id_table = bfad_id_table, .probe = bfad_pci_probe, .remove = __devexit_p(bfad_pci_remove), }; /* * Driver module init. */ static int __init bfad_init(void) { int error = 0; printk(KERN_INFO "Brocade BFA FC/FCOE SCSI driver - version: %s\n", BFAD_DRIVER_VERSION); if (num_sgpgs > 0) num_sgpgs_parm = num_sgpgs; error = bfad_im_module_init(); if (error) { error = -ENOMEM; printk(KERN_WARNING "bfad_im_module_init failure\n"); goto ext; } if (strcmp(FCPI_NAME, " fcpim") == 0) supported_fc4s |= BFA_LPORT_ROLE_FCP_IM; bfa_auto_recover = ioc_auto_recover; bfa_fcs_rport_set_del_timeout(rport_del_timeout); error = pci_register_driver(&bfad_pci_driver); if (error) { printk(KERN_WARNING "pci_register_driver failure\n"); goto ext; } return 0; ext: bfad_im_module_exit(); return error; } /* * Driver module exit. */ static void __exit bfad_exit(void) { pci_unregister_driver(&bfad_pci_driver); bfad_im_module_exit(); bfad_free_fwimg(); } /* Firmware handling */ static void bfad_read_firmware(struct pci_dev *pdev, u32 **bfi_image, u32 *bfi_image_size, char *fw_name) { const struct firmware *fw; if (request_firmware(&fw, fw_name, &pdev->dev)) { printk(KERN_ALERT "Can't locate firmware %s\n", fw_name); *bfi_image = NULL; goto out; } *bfi_image = vmalloc(fw->size); if (NULL == *bfi_image) { printk(KERN_ALERT "Fail to allocate buffer for fw image " "size=%x!\n", (u32) fw->size); goto out; } memcpy(*bfi_image, fw->data, fw->size); *bfi_image_size = fw->size/sizeof(u32); out: release_firmware(fw); } static u32 * bfad_load_fwimg(struct pci_dev *pdev) { if (pdev->device == BFA_PCI_DEVICE_ID_CT2) { if (bfi_image_ct2_size == 0) bfad_read_firmware(pdev, &bfi_image_ct2, &bfi_image_ct2_size, BFAD_FW_FILE_CT2); return bfi_image_ct2; } else if (bfa_asic_id_ct(pdev->device)) { if (bfi_image_ct_size == 0) bfad_read_firmware(pdev, &bfi_image_ct, &bfi_image_ct_size, BFAD_FW_FILE_CT); return bfi_image_ct; } else { if (bfi_image_cb_size == 0) bfad_read_firmware(pdev, &bfi_image_cb, &bfi_image_cb_size, BFAD_FW_FILE_CB); return bfi_image_cb; } } static void bfad_free_fwimg(void) { if (bfi_image_ct2_size && bfi_image_ct2) vfree(bfi_image_ct2); if (bfi_image_ct_size && bfi_image_ct) vfree(bfi_image_ct); if (bfi_image_cb_size && bfi_image_cb) vfree(bfi_image_cb); } module_init(bfad_init); module_exit(bfad_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Brocade Fibre Channel HBA Driver" BFAD_PROTO_NAME); MODULE_AUTHOR("Brocade Communications Systems, Inc."); MODULE_VERSION(BFAD_DRIVER_VERSION);
gpl-2.0
morixhub/linux-am335x
drivers/net/wireless/rtlwifi/rtl8821ae/trx.c
830
28877
/****************************************************************************** * * Copyright(c) 2009-2010 Realtek Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * * Contact Information: * wlanfae <wlanfae@realtek.com> * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, * Hsinchu 300, Taiwan. * * Larry Finger <Larry.Finger@lwfinger.net> * *****************************************************************************/ #include "../wifi.h" #include "../pci.h" #include "../base.h" #include "../stats.h" #include "reg.h" #include "def.h" #include "phy.h" #include "trx.h" #include "led.h" #include "dm.h" #include "phy.h" #include "fw.h" static u8 _rtl8821ae_map_hwqueue_to_fwqueue(struct sk_buff *skb, u8 hw_queue) { __le16 fc = rtl_get_fc(skb); if (unlikely(ieee80211_is_beacon(fc))) return QSLT_BEACON; if (ieee80211_is_mgmt(fc) || ieee80211_is_ctl(fc)) return QSLT_MGNT; return skb->priority; } static u16 odm_cfo(char value) { int ret_val; if (value < 0) { ret_val = 0 - value; ret_val = (ret_val << 1) + (ret_val >> 1); /* set bit12 as 1 for negative cfo */ ret_val = ret_val | BIT(12); } else { ret_val = value; ret_val = (ret_val << 1) + (ret_val >> 1); } return ret_val; } static u8 _rtl8821ae_evm_dbm_jaguar(char value) { char ret_val = value; /* -33dB~0dB to 33dB ~ 0dB*/ if (ret_val == -128) ret_val = 127; else if (ret_val < 0) ret_val = 0 - ret_val; ret_val = ret_val >> 1; return ret_val; } static void query_rxphystatus(struct ieee80211_hw *hw, struct rtl_stats *pstatus, u8 *pdesc, struct rx_fwinfo_8821ae *p_drvinfo, bool bpacket_match_bssid, bool bpacket_toself, bool packet_beacon) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct phy_status_rpt *p_phystrpt = (struct phy_status_rpt *)p_drvinfo; struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw)); struct rtl_phy *rtlphy = &rtlpriv->phy; char rx_pwr_all = 0, rx_pwr[4]; u8 rf_rx_num = 0, evm, evmdbm, pwdb_all; u8 i, max_spatial_stream; u32 rssi, total_rssi = 0; bool is_cck = pstatus->is_cck; u8 lan_idx, vga_idx; /* Record it for next packet processing */ pstatus->packet_matchbssid = bpacket_match_bssid; pstatus->packet_toself = bpacket_toself; pstatus->packet_beacon = packet_beacon; pstatus->rx_mimo_signalquality[0] = -1; pstatus->rx_mimo_signalquality[1] = -1; if (is_cck) { u8 cck_highpwr; u8 cck_agc_rpt; cck_agc_rpt = p_phystrpt->cfosho[0]; /* (1)Hardware does not provide RSSI for CCK * (2)PWDB, Average PWDB cacluated by * hardware (for rate adaptive) */ cck_highpwr = (u8)rtlphy->cck_high_power; lan_idx = ((cck_agc_rpt & 0xE0) >> 5); vga_idx = (cck_agc_rpt & 0x1f); if (rtlpriv->rtlhal.hw_type == HARDWARE_TYPE_RTL8812AE) { switch (lan_idx) { case 7: if (vga_idx <= 27) /*VGA_idx = 27~2*/ rx_pwr_all = -100 + 2*(27-vga_idx); else rx_pwr_all = -100; break; case 6: /*VGA_idx = 2~0*/ rx_pwr_all = -48 + 2*(2-vga_idx); break; case 5: /*VGA_idx = 7~5*/ rx_pwr_all = -42 + 2*(7-vga_idx); break; case 4: /*VGA_idx = 7~4*/ rx_pwr_all = -36 + 2*(7-vga_idx); break; case 3: /*VGA_idx = 7~0*/ rx_pwr_all = -24 + 2*(7-vga_idx); break; case 2: if (cck_highpwr) /*VGA_idx = 5~0*/ rx_pwr_all = -12 + 2*(5-vga_idx); else rx_pwr_all = -6 + 2*(5-vga_idx); break; case 1: rx_pwr_all = 8-2*vga_idx; break; case 0: rx_pwr_all = 14-2*vga_idx; break; default: break; } rx_pwr_all += 6; pwdb_all = rtl_query_rxpwrpercentage(rx_pwr_all); if (!cck_highpwr) { if (pwdb_all >= 80) pwdb_all = ((pwdb_all - 80)<<1) + ((pwdb_all - 80)>>1) + 80; else if ((pwdb_all <= 78) && (pwdb_all >= 20)) pwdb_all += 3; if (pwdb_all > 100) pwdb_all = 100; } } else { /* 8821 */ char pout = -6; switch (lan_idx) { case 5: rx_pwr_all = pout - 32 - (2*vga_idx); break; case 4: rx_pwr_all = pout - 24 - (2*vga_idx); break; case 2: rx_pwr_all = pout - 11 - (2*vga_idx); break; case 1: rx_pwr_all = pout + 5 - (2*vga_idx); break; case 0: rx_pwr_all = pout + 21 - (2*vga_idx); break; } pwdb_all = rtl_query_rxpwrpercentage(rx_pwr_all); } pstatus->rx_pwdb_all = pwdb_all; pstatus->recvsignalpower = rx_pwr_all; /* (3) Get Signal Quality (EVM) */ if (bpacket_match_bssid) { u8 sq; if (pstatus->rx_pwdb_all > 40) { sq = 100; } else { sq = p_phystrpt->pwdb_all; if (sq > 64) sq = 0; else if (sq < 20) sq = 100; else sq = ((64 - sq) * 100) / 44; } pstatus->signalquality = sq; pstatus->rx_mimo_signalquality[0] = sq; pstatus->rx_mimo_signalquality[1] = -1; } } else { /* (1)Get RSSI for HT rate */ for (i = RF90_PATH_A; i < RF6052_MAX_PATH; i++) { /* we will judge RF RX path now. */ if (rtlpriv->dm.rfpath_rxenable[i]) rf_rx_num++; rx_pwr[i] = (p_phystrpt->gain_trsw[i] & 0x7f) - 110; /* Translate DBM to percentage. */ rssi = rtl_query_rxpwrpercentage(rx_pwr[i]); total_rssi += rssi; /* Get Rx snr value in DB */ pstatus->rx_snr[i] = p_phystrpt->rxsnr[i] / 2; rtlpriv->stats.rx_snr_db[i] = p_phystrpt->rxsnr[i] / 2; pstatus->cfo_short[i] = odm_cfo(p_phystrpt->cfosho[i]); pstatus->cfo_tail[i] = odm_cfo(p_phystrpt->cfotail[i]); /* Record Signal Strength for next packet */ pstatus->rx_mimo_signalstrength[i] = (u8)rssi; } /* (2)PWDB, Average PWDB cacluated by * hardware (for rate adaptive) */ rx_pwr_all = ((p_drvinfo->pwdb_all >> 1) & 0x7f) - 110; pwdb_all = rtl_query_rxpwrpercentage(rx_pwr_all); pstatus->rx_pwdb_all = pwdb_all; pstatus->rxpower = rx_pwr_all; pstatus->recvsignalpower = rx_pwr_all; /* (3)EVM of HT rate */ if ((pstatus->is_ht && pstatus->rate >= DESC_RATEMCS8 && pstatus->rate <= DESC_RATEMCS15) || (pstatus->is_vht && pstatus->rate >= DESC_RATEVHT2SS_MCS0 && pstatus->rate <= DESC_RATEVHT2SS_MCS9)) max_spatial_stream = 2; else max_spatial_stream = 1; for (i = 0; i < max_spatial_stream; i++) { evm = rtl_evm_db_to_percentage(p_phystrpt->rxevm[i]); evmdbm = _rtl8821ae_evm_dbm_jaguar(p_phystrpt->rxevm[i]); if (bpacket_match_bssid) { /* Fill value in RFD, Get the first * spatial stream only */ if (i == 0) pstatus->signalquality = evm; pstatus->rx_mimo_signalquality[i] = evm; pstatus->rx_mimo_evm_dbm[i] = evmdbm; } } if (bpacket_match_bssid) { for (i = RF90_PATH_A; i <= RF90_PATH_B; i++) rtl_priv(hw)->dm.cfo_tail[i] = (char)p_phystrpt->cfotail[i]; rtl_priv(hw)->dm.packet_count++; } } /* UI BSS List signal strength(in percentage), * make it good looking, from 0~100. */ if (is_cck) pstatus->signalstrength = (u8)(rtl_signal_scale_mapping(hw, pwdb_all)); else if (rf_rx_num != 0) pstatus->signalstrength = (u8)(rtl_signal_scale_mapping(hw, total_rssi /= rf_rx_num)); /*HW antenna diversity*/ rtldm->fat_table.antsel_rx_keep_0 = p_phystrpt->antidx_anta; rtldm->fat_table.antsel_rx_keep_1 = p_phystrpt->antidx_antb; } static void translate_rx_signal_stuff(struct ieee80211_hw *hw, struct sk_buff *skb, struct rtl_stats *pstatus, u8 *pdesc, struct rx_fwinfo_8821ae *p_drvinfo) { struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); struct ieee80211_hdr *hdr; u8 *tmp_buf; u8 *praddr; u8 *psaddr; __le16 fc; u16 type; bool packet_matchbssid, packet_toself, packet_beacon; tmp_buf = skb->data + pstatus->rx_drvinfo_size + pstatus->rx_bufshift; hdr = (struct ieee80211_hdr *)tmp_buf; fc = hdr->frame_control; type = WLAN_FC_GET_TYPE(hdr->frame_control); praddr = hdr->addr1; psaddr = ieee80211_get_SA(hdr); ether_addr_copy(pstatus->psaddr, psaddr); packet_matchbssid = (!ieee80211_is_ctl(fc) && (ether_addr_equal(mac->bssid, ieee80211_has_tods(fc) ? hdr->addr1 : ieee80211_has_fromds(fc) ? hdr->addr2 : hdr->addr3)) && (!pstatus->hwerror) && (!pstatus->crc) && (!pstatus->icv)); packet_toself = packet_matchbssid && (ether_addr_equal(praddr, rtlefuse->dev_addr)); if (ieee80211_is_beacon(hdr->frame_control)) packet_beacon = true; else packet_beacon = false; if (packet_beacon && packet_matchbssid) rtl_priv(hw)->dm.dbginfo.num_qry_beacon_pkt++; if (packet_matchbssid && ieee80211_is_data_qos(hdr->frame_control) && !is_multicast_ether_addr(ieee80211_get_DA(hdr))) { struct ieee80211_qos_hdr *hdr_qos = (struct ieee80211_qos_hdr *)tmp_buf; u16 tid = le16_to_cpu(hdr_qos->qos_ctrl) & 0xf; if (tid != 0 && tid != 3) rtl_priv(hw)->dm.dbginfo.num_non_be_pkt++; } query_rxphystatus(hw, pstatus, pdesc, p_drvinfo, packet_matchbssid, packet_toself, packet_beacon); /*_rtl8821ae_smart_antenna(hw, pstatus); */ rtl_process_phyinfo(hw, tmp_buf, pstatus); } static void _rtl8821ae_insert_emcontent(struct rtl_tcb_desc *ptcb_desc, u8 *virtualaddress) { u32 dwtmp = 0; memset(virtualaddress, 0, 8); SET_EARLYMODE_PKTNUM(virtualaddress, ptcb_desc->empkt_num); if (ptcb_desc->empkt_num == 1) { dwtmp = ptcb_desc->empkt_len[0]; } else { dwtmp = ptcb_desc->empkt_len[0]; dwtmp += ((dwtmp % 4) ? (4 - dwtmp % 4) : 0)+4; dwtmp += ptcb_desc->empkt_len[1]; } SET_EARLYMODE_LEN0(virtualaddress, dwtmp); if (ptcb_desc->empkt_num <= 3) { dwtmp = ptcb_desc->empkt_len[2]; } else { dwtmp = ptcb_desc->empkt_len[2]; dwtmp += ((dwtmp % 4) ? (4 - dwtmp % 4) : 0)+4; dwtmp += ptcb_desc->empkt_len[3]; } SET_EARLYMODE_LEN1(virtualaddress, dwtmp); if (ptcb_desc->empkt_num <= 5) { dwtmp = ptcb_desc->empkt_len[4]; } else { dwtmp = ptcb_desc->empkt_len[4]; dwtmp += ((dwtmp % 4) ? (4 - dwtmp % 4) : 0)+4; dwtmp += ptcb_desc->empkt_len[5]; } SET_EARLYMODE_LEN2_1(virtualaddress, dwtmp & 0xF); SET_EARLYMODE_LEN2_2(virtualaddress, dwtmp >> 4); if (ptcb_desc->empkt_num <= 7) { dwtmp = ptcb_desc->empkt_len[6]; } else { dwtmp = ptcb_desc->empkt_len[6]; dwtmp += ((dwtmp % 4) ? (4 - dwtmp % 4) : 0)+4; dwtmp += ptcb_desc->empkt_len[7]; } SET_EARLYMODE_LEN3(virtualaddress, dwtmp); if (ptcb_desc->empkt_num <= 9) { dwtmp = ptcb_desc->empkt_len[8]; } else { dwtmp = ptcb_desc->empkt_len[8]; dwtmp += ((dwtmp % 4) ? (4 - dwtmp % 4) : 0)+4; dwtmp += ptcb_desc->empkt_len[9]; } SET_EARLYMODE_LEN4(virtualaddress, dwtmp); } static bool rtl8821ae_get_rxdesc_is_ht(struct ieee80211_hw *hw, u8 *pdesc) { struct rtl_priv *rtlpriv = rtl_priv(hw); u8 rx_rate = 0; rx_rate = GET_RX_DESC_RXMCS(pdesc); RT_TRACE(rtlpriv, COMP_RXDESC, DBG_LOUD, "rx_rate=0x%02x.\n", rx_rate); if ((rx_rate >= DESC_RATEMCS0) && (rx_rate <= DESC_RATEMCS15)) return true; return false; } static bool rtl8821ae_get_rxdesc_is_vht(struct ieee80211_hw *hw, u8 *pdesc) { struct rtl_priv *rtlpriv = rtl_priv(hw); u8 rx_rate = 0; rx_rate = GET_RX_DESC_RXMCS(pdesc); RT_TRACE(rtlpriv, COMP_RXDESC, DBG_LOUD, "rx_rate=0x%02x.\n", rx_rate); if (rx_rate >= DESC_RATEVHT1SS_MCS0) return true; return false; } static u8 rtl8821ae_get_rx_vht_nss(struct ieee80211_hw *hw, u8 *pdesc) { u8 rx_rate = 0; u8 vht_nss = 0; rx_rate = GET_RX_DESC_RXMCS(pdesc); if ((rx_rate >= DESC_RATEVHT1SS_MCS0) && (rx_rate <= DESC_RATEVHT1SS_MCS9)) vht_nss = 1; else if ((rx_rate >= DESC_RATEVHT2SS_MCS0) && (rx_rate <= DESC_RATEVHT2SS_MCS9)) vht_nss = 2; return vht_nss; } bool rtl8821ae_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *status, struct ieee80211_rx_status *rx_status, u8 *pdesc, struct sk_buff *skb) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rx_fwinfo_8821ae *p_drvinfo; struct ieee80211_hdr *hdr; u32 phystatus = GET_RX_DESC_PHYST(pdesc); status->length = (u16)GET_RX_DESC_PKT_LEN(pdesc); status->rx_drvinfo_size = (u8)GET_RX_DESC_DRV_INFO_SIZE(pdesc) * RX_DRV_INFO_SIZE_UNIT; status->rx_bufshift = (u8)(GET_RX_DESC_SHIFT(pdesc) & 0x03); status->icv = (u16)GET_RX_DESC_ICV(pdesc); status->crc = (u16)GET_RX_DESC_CRC32(pdesc); status->hwerror = (status->crc | status->icv); status->decrypted = !GET_RX_DESC_SWDEC(pdesc); status->rate = (u8)GET_RX_DESC_RXMCS(pdesc); status->shortpreamble = (u16)GET_RX_DESC_SPLCP(pdesc); status->isampdu = (bool)(GET_RX_DESC_PAGGR(pdesc) == 1); status->isfirst_ampdu = (bool)(GET_RX_DESC_PAGGR(pdesc) == 1); status->timestamp_low = GET_RX_DESC_TSFL(pdesc); status->rx_packet_bw = GET_RX_DESC_BW(pdesc); status->macid = GET_RX_DESC_MACID(pdesc); status->is_short_gi = !(bool)GET_RX_DESC_SPLCP(pdesc); status->is_ht = rtl8821ae_get_rxdesc_is_ht(hw, pdesc); status->is_vht = rtl8821ae_get_rxdesc_is_vht(hw, pdesc); status->vht_nss = rtl8821ae_get_rx_vht_nss(hw, pdesc); status->is_cck = RTL8821AE_RX_HAL_IS_CCK_RATE(status->rate); RT_TRACE(rtlpriv, COMP_RXDESC, DBG_LOUD, "rx_packet_bw=%s,is_ht %d, is_vht %d, vht_nss=%d,is_short_gi %d.\n", (status->rx_packet_bw == 2) ? "80M" : (status->rx_packet_bw == 1) ? "40M" : "20M", status->is_ht, status->is_vht, status->vht_nss, status->is_short_gi); if (GET_RX_STATUS_DESC_RPT_SEL(pdesc)) status->packet_report_type = C2H_PACKET; else status->packet_report_type = NORMAL_RX; if (GET_RX_STATUS_DESC_PATTERN_MATCH(pdesc)) status->wake_match = BIT(2); else if (GET_RX_STATUS_DESC_MAGIC_MATCH(pdesc)) status->wake_match = BIT(1); else if (GET_RX_STATUS_DESC_UNICAST_MATCH(pdesc)) status->wake_match = BIT(0); else status->wake_match = 0; if (status->wake_match) RT_TRACE(rtlpriv, COMP_RXDESC, DBG_LOUD, "GGGGGGGGGGGGGet Wakeup Packet!! WakeMatch=%d\n", status->wake_match); rx_status->freq = hw->conf.chandef.chan->center_freq; rx_status->band = hw->conf.chandef.chan->band; hdr = (struct ieee80211_hdr *)(skb->data + status->rx_drvinfo_size + status->rx_bufshift); if (status->crc) rx_status->flag |= RX_FLAG_FAILED_FCS_CRC; if (status->rx_packet_bw == HT_CHANNEL_WIDTH_20_40) rx_status->flag |= RX_FLAG_40MHZ; else if (status->rx_packet_bw == HT_CHANNEL_WIDTH_80) rx_status->vht_flag |= RX_VHT_FLAG_80MHZ; if (status->is_ht) rx_status->flag |= RX_FLAG_HT; if (status->is_vht) rx_status->flag |= RX_FLAG_VHT; if (status->is_short_gi) rx_status->flag |= RX_FLAG_SHORT_GI; rx_status->vht_nss = status->vht_nss; rx_status->flag |= RX_FLAG_MACTIME_START; /* hw will set status->decrypted true, if it finds the * frame is open data frame or mgmt frame. * So hw will not decryption robust managment frame * for IEEE80211w but still set status->decrypted * true, so here we should set it back to undecrypted * for IEEE80211w frame, and mac80211 sw will help * to decrypt it */ if (status->decrypted) { if ((!_ieee80211_is_robust_mgmt_frame(hdr)) && (ieee80211_has_protected(hdr->frame_control))) rx_status->flag |= RX_FLAG_DECRYPTED; else rx_status->flag &= ~RX_FLAG_DECRYPTED; } /* rate_idx: index of data rate into band's * supported rates or MCS index if HT rates * are use (RX_FLAG_HT) */ rx_status->rate_idx = rtlwifi_rate_mapping(hw, status->is_ht, status->is_vht, status->rate); rx_status->mactime = status->timestamp_low; if (phystatus) { p_drvinfo = (struct rx_fwinfo_8821ae *)(skb->data + status->rx_bufshift); translate_rx_signal_stuff(hw, skb, status, pdesc, p_drvinfo); } rx_status->signal = status->recvsignalpower + 10; if (status->packet_report_type == TX_REPORT2) { status->macid_valid_entry[0] = GET_RX_RPT2_DESC_MACID_VALID_1(pdesc); status->macid_valid_entry[1] = GET_RX_RPT2_DESC_MACID_VALID_2(pdesc); } return true; } static u8 rtl8821ae_bw_mapping(struct ieee80211_hw *hw, struct rtl_tcb_desc *ptcb_desc) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_phy *rtlphy = &rtlpriv->phy; u8 bw_setting_of_desc = 0; RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, "rtl8821ae_bw_mapping, current_chan_bw %d, packet_bw %d\n", rtlphy->current_chan_bw, ptcb_desc->packet_bw); if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_80) { if (ptcb_desc->packet_bw == HT_CHANNEL_WIDTH_80) bw_setting_of_desc = 2; else if (ptcb_desc->packet_bw == HT_CHANNEL_WIDTH_20_40) bw_setting_of_desc = 1; else bw_setting_of_desc = 0; } else if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40) { if ((ptcb_desc->packet_bw == HT_CHANNEL_WIDTH_20_40) || (ptcb_desc->packet_bw == HT_CHANNEL_WIDTH_80)) bw_setting_of_desc = 1; else bw_setting_of_desc = 0; } else { bw_setting_of_desc = 0; } return bw_setting_of_desc; } static u8 rtl8821ae_sc_mapping(struct ieee80211_hw *hw, struct rtl_tcb_desc *ptcb_desc) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_phy *rtlphy = &rtlpriv->phy; struct rtl_mac *mac = rtl_mac(rtlpriv); u8 sc_setting_of_desc = 0; if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_80) { if (ptcb_desc->packet_bw == HT_CHANNEL_WIDTH_80) { sc_setting_of_desc = VHT_DATA_SC_DONOT_CARE; } else if (ptcb_desc->packet_bw == HT_CHANNEL_WIDTH_20_40) { if (mac->cur_80_prime_sc == HAL_PRIME_CHNL_OFFSET_LOWER) sc_setting_of_desc = VHT_DATA_SC_40_LOWER_OF_80MHZ; else if (mac->cur_80_prime_sc == HAL_PRIME_CHNL_OFFSET_UPPER) sc_setting_of_desc = VHT_DATA_SC_40_UPPER_OF_80MHZ; else RT_TRACE(rtlpriv, COMP_SEND, DBG_LOUD, "rtl8821ae_sc_mapping: Not Correct Primary40MHz Setting\n"); } else { if ((mac->cur_40_prime_sc == HAL_PRIME_CHNL_OFFSET_LOWER) && (mac->cur_80_prime_sc == HAL_PRIME_CHNL_OFFSET_LOWER)) sc_setting_of_desc = VHT_DATA_SC_20_LOWEST_OF_80MHZ; else if ((mac->cur_40_prime_sc == HAL_PRIME_CHNL_OFFSET_UPPER) && (mac->cur_80_prime_sc == HAL_PRIME_CHNL_OFFSET_LOWER)) sc_setting_of_desc = VHT_DATA_SC_20_LOWER_OF_80MHZ; else if ((mac->cur_40_prime_sc == HAL_PRIME_CHNL_OFFSET_LOWER) && (mac->cur_80_prime_sc == HAL_PRIME_CHNL_OFFSET_UPPER)) sc_setting_of_desc = VHT_DATA_SC_20_UPPER_OF_80MHZ; else if ((mac->cur_40_prime_sc == HAL_PRIME_CHNL_OFFSET_UPPER) && (mac->cur_80_prime_sc == HAL_PRIME_CHNL_OFFSET_UPPER)) sc_setting_of_desc = VHT_DATA_SC_20_UPPERST_OF_80MHZ; else RT_TRACE(rtlpriv, COMP_SEND, DBG_LOUD, "rtl8821ae_sc_mapping: Not Correct Primary40MHz Setting\n"); } } else if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40) { if (ptcb_desc->packet_bw == HT_CHANNEL_WIDTH_20_40) { sc_setting_of_desc = VHT_DATA_SC_DONOT_CARE; } else if (ptcb_desc->packet_bw == HT_CHANNEL_WIDTH_20) { if (mac->cur_40_prime_sc == HAL_PRIME_CHNL_OFFSET_UPPER) { sc_setting_of_desc = VHT_DATA_SC_20_UPPER_OF_80MHZ; } else if (mac->cur_40_prime_sc == HAL_PRIME_CHNL_OFFSET_LOWER){ sc_setting_of_desc = VHT_DATA_SC_20_LOWER_OF_80MHZ; } else { sc_setting_of_desc = VHT_DATA_SC_DONOT_CARE; } } } else { sc_setting_of_desc = VHT_DATA_SC_DONOT_CARE; } return sc_setting_of_desc; } void rtl8821ae_tx_fill_desc(struct ieee80211_hw *hw, struct ieee80211_hdr *hdr, u8 *pdesc_tx, u8 *txbd, struct ieee80211_tx_info *info, struct ieee80211_sta *sta, struct sk_buff *skb, u8 hw_queue, struct rtl_tcb_desc *ptcb_desc) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); struct rtl_hal *rtlhal = rtl_hal(rtlpriv); u8 *pdesc = (u8 *)pdesc_tx; u16 seq_number; __le16 fc = hdr->frame_control; unsigned int buf_len = 0; unsigned int skb_len = skb->len; u8 fw_qsel = _rtl8821ae_map_hwqueue_to_fwqueue(skb, hw_queue); bool firstseg = ((hdr->seq_ctrl & cpu_to_le16(IEEE80211_SCTL_FRAG)) == 0); bool lastseg = ((hdr->frame_control & cpu_to_le16(IEEE80211_FCTL_MOREFRAGS)) == 0); dma_addr_t mapping; u8 short_gi = 0; seq_number = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4; rtl_get_tcb_desc(hw, info, sta, skb, ptcb_desc); /* reserve 8 byte for AMPDU early mode */ if (rtlhal->earlymode_enable) { skb_push(skb, EM_HDR_LEN); memset(skb->data, 0, EM_HDR_LEN); } buf_len = skb->len; mapping = pci_map_single(rtlpci->pdev, skb->data, skb->len, PCI_DMA_TODEVICE); if (pci_dma_mapping_error(rtlpci->pdev, mapping)) { RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, "DMA mapping error"); return; } CLEAR_PCI_TX_DESC_CONTENT(pdesc, sizeof(struct tx_desc_8821ae)); if (ieee80211_is_nullfunc(fc) || ieee80211_is_ctl(fc)) { firstseg = true; lastseg = true; } if (firstseg) { if (rtlhal->earlymode_enable) { SET_TX_DESC_PKT_OFFSET(pdesc, 1); SET_TX_DESC_OFFSET(pdesc, USB_HWDESC_HEADER_LEN + EM_HDR_LEN); if (ptcb_desc->empkt_num) { RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, "Insert 8 byte.pTcb->EMPktNum:%d\n", ptcb_desc->empkt_num); _rtl8821ae_insert_emcontent(ptcb_desc, (u8 *)(skb->data)); } } else { SET_TX_DESC_OFFSET(pdesc, USB_HWDESC_HEADER_LEN); } /* ptcb_desc->use_driver_rate = true; */ SET_TX_DESC_TX_RATE(pdesc, ptcb_desc->hw_rate); if (ptcb_desc->hw_rate > DESC_RATEMCS0) short_gi = (ptcb_desc->use_shortgi) ? 1 : 0; else short_gi = (ptcb_desc->use_shortpreamble) ? 1 : 0; SET_TX_DESC_DATA_SHORTGI(pdesc, short_gi); if (info->flags & IEEE80211_TX_CTL_AMPDU) { SET_TX_DESC_AGG_ENABLE(pdesc, 1); SET_TX_DESC_MAX_AGG_NUM(pdesc, 0x1f); } SET_TX_DESC_SEQ(pdesc, seq_number); SET_TX_DESC_RTS_ENABLE(pdesc, ((ptcb_desc->rts_enable && !ptcb_desc->cts_enable) ? 1 : 0)); SET_TX_DESC_HW_RTS_ENABLE(pdesc, 0); SET_TX_DESC_CTS2SELF(pdesc, ((ptcb_desc->cts_enable) ? 1 : 0)); SET_TX_DESC_RTS_RATE(pdesc, ptcb_desc->rts_rate); SET_TX_DESC_RTS_SC(pdesc, ptcb_desc->rts_sc); SET_TX_DESC_RTS_SHORT(pdesc, ((ptcb_desc->rts_rate <= DESC_RATE54M) ? (ptcb_desc->rts_use_shortpreamble ? 1 : 0) : (ptcb_desc->rts_use_shortgi ? 1 : 0))); if (ptcb_desc->tx_enable_sw_calc_duration) SET_TX_DESC_NAV_USE_HDR(pdesc, 1); SET_TX_DESC_DATA_BW(pdesc, rtl8821ae_bw_mapping(hw, ptcb_desc)); SET_TX_DESC_TX_SUB_CARRIER(pdesc, rtl8821ae_sc_mapping(hw, ptcb_desc)); SET_TX_DESC_LINIP(pdesc, 0); SET_TX_DESC_PKT_SIZE(pdesc, (u16)skb_len); if (sta) { u8 ampdu_density = sta->ht_cap.ampdu_density; SET_TX_DESC_AMPDU_DENSITY(pdesc, ampdu_density); } if (info->control.hw_key) { struct ieee80211_key_conf *keyconf = info->control.hw_key; switch (keyconf->cipher) { case WLAN_CIPHER_SUITE_WEP40: case WLAN_CIPHER_SUITE_WEP104: case WLAN_CIPHER_SUITE_TKIP: SET_TX_DESC_SEC_TYPE(pdesc, 0x1); break; case WLAN_CIPHER_SUITE_CCMP: SET_TX_DESC_SEC_TYPE(pdesc, 0x3); break; default: SET_TX_DESC_SEC_TYPE(pdesc, 0x0); break; } } SET_TX_DESC_QUEUE_SEL(pdesc, fw_qsel); SET_TX_DESC_DATA_RATE_FB_LIMIT(pdesc, 0x1F); SET_TX_DESC_RTS_RATE_FB_LIMIT(pdesc, 0xF); SET_TX_DESC_DISABLE_FB(pdesc, ptcb_desc->disable_ratefallback ? 1 : 0); SET_TX_DESC_USE_RATE(pdesc, ptcb_desc->use_driver_rate ? 1 : 0); if (ieee80211_is_data_qos(fc)) { if (mac->rdg_en) { RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, "Enable RDG function.\n"); SET_TX_DESC_RDG_ENABLE(pdesc, 1); SET_TX_DESC_HTC(pdesc, 1); } } } SET_TX_DESC_FIRST_SEG(pdesc, (firstseg ? 1 : 0)); SET_TX_DESC_LAST_SEG(pdesc, (lastseg ? 1 : 0)); SET_TX_DESC_TX_BUFFER_SIZE(pdesc, (u16)buf_len); SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, mapping); /* if (rtlpriv->dm.useramask) { */ if (1) { SET_TX_DESC_RATE_ID(pdesc, ptcb_desc->ratr_index); SET_TX_DESC_MACID(pdesc, ptcb_desc->mac_id); } else { SET_TX_DESC_RATE_ID(pdesc, 0xC + ptcb_desc->ratr_index); SET_TX_DESC_MACID(pdesc, ptcb_desc->mac_id); } if (!ieee80211_is_data_qos(fc)) { SET_TX_DESC_HWSEQ_EN(pdesc, 1); SET_TX_DESC_HWSEQ_SEL(pdesc, 0); } SET_TX_DESC_MORE_FRAG(pdesc, (lastseg ? 0 : 1)); if (is_multicast_ether_addr(ieee80211_get_DA(hdr)) || is_broadcast_ether_addr(ieee80211_get_DA(hdr))) { SET_TX_DESC_BMC(pdesc, 1); } rtl8821ae_dm_set_tx_ant_by_tx_info(hw, pdesc, ptcb_desc->mac_id); RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, "\n"); } void rtl8821ae_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc, bool firstseg, bool lastseg, struct sk_buff *skb) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); u8 fw_queue = QSLT_BEACON; dma_addr_t mapping = pci_map_single(rtlpci->pdev, skb->data, skb->len, PCI_DMA_TODEVICE); if (pci_dma_mapping_error(rtlpci->pdev, mapping)) { RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, "DMA mapping error"); return; } CLEAR_PCI_TX_DESC_CONTENT(pdesc, TX_DESC_SIZE); SET_TX_DESC_FIRST_SEG(pdesc, 1); SET_TX_DESC_LAST_SEG(pdesc, 1); SET_TX_DESC_PKT_SIZE((u8 *)pdesc, (u16)(skb->len)); SET_TX_DESC_OFFSET(pdesc, USB_HWDESC_HEADER_LEN); SET_TX_DESC_USE_RATE(pdesc, 1); SET_TX_DESC_TX_RATE(pdesc, DESC_RATE1M); SET_TX_DESC_DISABLE_FB(pdesc, 1); SET_TX_DESC_DATA_BW(pdesc, 0); SET_TX_DESC_HWSEQ_EN(pdesc, 1); SET_TX_DESC_QUEUE_SEL(pdesc, fw_queue); SET_TX_DESC_TX_BUFFER_SIZE(pdesc, (u16)(skb->len)); SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, mapping); SET_TX_DESC_MACID(pdesc, 0); SET_TX_DESC_OWN(pdesc, 1); RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_LOUD, "H2C Tx Cmd Content\n", pdesc, TX_DESC_SIZE); } void rtl8821ae_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx, u8 desc_name, u8 *val) { if (istx) { switch (desc_name) { case HW_DESC_OWN: SET_TX_DESC_OWN(pdesc, 1); break; case HW_DESC_TX_NEXTDESC_ADDR: SET_TX_DESC_NEXT_DESC_ADDRESS(pdesc, *(u32 *)val); break; default: RT_ASSERT(false, "ERR txdesc :%d not process\n", desc_name); break; } } else { switch (desc_name) { case HW_DESC_RXOWN: SET_RX_DESC_OWN(pdesc, 1); break; case HW_DESC_RXBUFF_ADDR: SET_RX_DESC_BUFF_ADDR(pdesc, *(u32 *)val); break; case HW_DESC_RXPKT_LEN: SET_RX_DESC_PKT_LEN(pdesc, *(u32 *)val); break; case HW_DESC_RXERO: SET_RX_DESC_EOR(pdesc, 1); break; default: RT_ASSERT(false, "ERR rxdesc :%d not process\n", desc_name); break; } } } u32 rtl8821ae_get_desc(u8 *pdesc, bool istx, u8 desc_name) { u32 ret = 0; if (istx) { switch (desc_name) { case HW_DESC_OWN: ret = GET_TX_DESC_OWN(pdesc); break; case HW_DESC_TXBUFF_ADDR: ret = GET_TX_DESC_TX_BUFFER_ADDRESS(pdesc); break; default: RT_ASSERT(false, "ERR txdesc :%d not process\n", desc_name); break; } } else { switch (desc_name) { case HW_DESC_OWN: ret = GET_RX_DESC_OWN(pdesc); break; case HW_DESC_RXPKT_LEN: ret = GET_RX_DESC_PKT_LEN(pdesc); break; case HW_DESC_RXBUFF_ADDR: ret = GET_RX_DESC_BUFF_ADDR(pdesc); break; default: RT_ASSERT(false, "ERR rxdesc :%d not process\n", desc_name); break; } } return ret; } bool rtl8821ae_is_tx_desc_closed(struct ieee80211_hw *hw, u8 hw_queue, u16 index) { struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[hw_queue]; u8 *entry = (u8 *)(&ring->desc[ring->idx]); u8 own = (u8)rtl8821ae_get_desc(entry, true, HW_DESC_OWN); /** *beacon packet will only use the first *descriptor defautly,and the own may not *be cleared by the hardware */ if (own) return false; return true; } void rtl8821ae_tx_polling(struct ieee80211_hw *hw, u8 hw_queue) { struct rtl_priv *rtlpriv = rtl_priv(hw); if (hw_queue == BEACON_QUEUE) { rtl_write_word(rtlpriv, REG_PCIE_CTRL_REG, BIT(4)); } else { rtl_write_word(rtlpriv, REG_PCIE_CTRL_REG, BIT(0) << (hw_queue)); } } u32 rtl8821ae_rx_command_packet(struct ieee80211_hw *hw, struct rtl_stats status, struct sk_buff *skb) { u32 result = 0; struct rtl_priv *rtlpriv = rtl_priv(hw); switch (status.packet_report_type) { case NORMAL_RX: result = 0; break; case C2H_PACKET: rtl8821ae_c2h_packet_handler(hw, skb->data, (u8)skb->len); result = 1; RT_TRACE(rtlpriv, COMP_RECV, DBG_LOUD, "skb->len=%d\n\n", skb->len); break; default: RT_TRACE(rtlpriv, COMP_RECV, DBG_LOUD, "No this packet type!!\n"); break; } return result; }
gpl-2.0
pio-masaki/android_kernel_lge_v510
drivers/mfd/pm8821-core.c
1086
9561
/* * Copyright (c) 2011-2012, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #define pr_fmt(fmt) "%s: " fmt, __func__ #include <linux/kernel.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/err.h> #include <linux/msm_ssbi.h> #include <linux/mfd/core.h> #include <linux/mfd/pm8xxx/pm8821.h> #include <linux/mfd/pm8xxx/core.h> #define REG_HWREV 0x002 /* PMIC4 revision */ #define REG_HWREV_2 0x0E8 /* PMIC4 revision 2 */ #define REG_MPP_BASE 0x050 #define REG_IRQ_BASE 0x100 #define REG_TEMP_ALARM_CTRL 0x01B #define REG_TEMP_ALARM_PWM 0x09B #define PM8821_VERSION_MASK 0xFFF0 #define PM8821_VERSION_VALUE 0x0BF0 #define PM8821_REVISION_MASK 0x000F #define SINGLE_IRQ_RESOURCE(_name, _irq) \ { \ .name = _name, \ .start = _irq, \ .end = _irq, \ .flags = IORESOURCE_IRQ, \ } struct pm8821 { struct device *dev; struct pm_irq_chip *irq_chip; u32 rev_registers; }; static int pm8821_readb(const struct device *dev, u16 addr, u8 *val) { const struct pm8xxx_drvdata *pm8821_drvdata = dev_get_drvdata(dev); const struct pm8821 *pmic = pm8821_drvdata->pm_chip_data; return msm_ssbi_read(pmic->dev->parent, addr, val, 1); } static int pm8821_writeb(const struct device *dev, u16 addr, u8 val) { const struct pm8xxx_drvdata *pm8821_drvdata = dev_get_drvdata(dev); const struct pm8821 *pmic = pm8821_drvdata->pm_chip_data; return msm_ssbi_write(pmic->dev->parent, addr, &val, 1); } static int pm8821_read_buf(const struct device *dev, u16 addr, u8 *buf, int cnt) { const struct pm8xxx_drvdata *pm8821_drvdata = dev_get_drvdata(dev); const struct pm8821 *pmic = pm8821_drvdata->pm_chip_data; return msm_ssbi_read(pmic->dev->parent, addr, buf, cnt); } static int pm8821_write_buf(const struct device *dev, u16 addr, u8 *buf, int cnt) { const struct pm8xxx_drvdata *pm8821_drvdata = dev_get_drvdata(dev); const struct pm8821 *pmic = pm8821_drvdata->pm_chip_data; return msm_ssbi_write(pmic->dev->parent, addr, buf, cnt); } static int pm8821_read_irq_stat(const struct device *dev, int irq) { const struct pm8xxx_drvdata *pm8821_drvdata = dev_get_drvdata(dev); const struct pm8821 *pmic = pm8821_drvdata->pm_chip_data; return pm8821_get_irq_stat(pmic->irq_chip, irq); } static enum pm8xxx_version pm8821_get_version(const struct device *dev) { const struct pm8xxx_drvdata *pm8821_drvdata = dev_get_drvdata(dev); const struct pm8821 *pmic = pm8821_drvdata->pm_chip_data; enum pm8xxx_version version = -ENODEV; if ((pmic->rev_registers & PM8821_VERSION_MASK) == PM8821_VERSION_VALUE) version = PM8XXX_VERSION_8821; return version; } static int pm8821_get_revision(const struct device *dev) { const struct pm8xxx_drvdata *pm8821_drvdata = dev_get_drvdata(dev); const struct pm8821 *pmic = pm8821_drvdata->pm_chip_data; return pmic->rev_registers & PM8821_REVISION_MASK; } static struct pm8xxx_drvdata pm8821_drvdata = { .pmic_readb = pm8821_readb, .pmic_writeb = pm8821_writeb, .pmic_read_buf = pm8821_read_buf, .pmic_write_buf = pm8821_write_buf, .pmic_read_irq_stat = pm8821_read_irq_stat, .pmic_get_version = pm8821_get_version, .pmic_get_revision = pm8821_get_revision, }; static const struct resource mpp_cell_resources[] __devinitconst = { { .start = PM8821_IRQ_BLOCK_BIT(PM8821_MPP_BLOCK_START, 0), .end = PM8821_IRQ_BLOCK_BIT(PM8821_MPP_BLOCK_START, 0) + PM8821_NR_MPPS - 1, .flags = IORESOURCE_IRQ, }, }; static struct mfd_cell mpp_cell __devinitdata = { .name = PM8XXX_MPP_DEV_NAME, .id = 1, .resources = mpp_cell_resources, .num_resources = ARRAY_SIZE(mpp_cell_resources), }; static struct mfd_cell debugfs_cell __devinitdata = { .name = "pm8xxx-debug", .id = 1, .platform_data = "pm8821-dbg", .pdata_size = sizeof("pm8821-dbg"), }; static const struct resource thermal_alarm_cell_resources[] __devinitconst = { SINGLE_IRQ_RESOURCE("pm8821_tempstat_irq", PM8821_TEMPSTAT_IRQ), SINGLE_IRQ_RESOURCE("pm8821_overtemp_irq", PM8821_OVERTEMP_IRQ), }; static struct pm8xxx_tm_core_data thermal_alarm_cdata = { .adc_type = PM8XXX_TM_ADC_NONE, .reg_addr_temp_alarm_ctrl = REG_TEMP_ALARM_CTRL, .reg_addr_temp_alarm_pwm = REG_TEMP_ALARM_PWM, .tm_name = "pm8821_tz", .irq_name_temp_stat = "pm8821_tempstat_irq", .irq_name_over_temp = "pm8821_overtemp_irq", .default_no_adc_temp = 37000, }; static struct mfd_cell thermal_alarm_cell __devinitdata = { .name = PM8XXX_TM_DEV_NAME, .id = 1, .resources = thermal_alarm_cell_resources, .num_resources = ARRAY_SIZE(thermal_alarm_cell_resources), .platform_data = &thermal_alarm_cdata, .pdata_size = sizeof(struct pm8xxx_tm_core_data), }; static int __devinit pm8821_add_subdevices(const struct pm8821_platform_data *pdata, struct pm8821 *pmic) { int ret = 0, irq_base = 0; struct pm_irq_chip *irq_chip; if (pdata->irq_pdata) { pdata->irq_pdata->irq_cdata.nirqs = PM8821_NR_IRQS; pdata->irq_pdata->irq_cdata.base_addr = REG_IRQ_BASE; irq_base = pdata->irq_pdata->irq_base; irq_chip = pm8821_irq_init(pmic->dev, pdata->irq_pdata); if (IS_ERR(irq_chip)) { pr_err("Failed to init interrupts ret=%ld\n", PTR_ERR(irq_chip)); return PTR_ERR(irq_chip); } pmic->irq_chip = irq_chip; } if (pdata->mpp_pdata) { pdata->mpp_pdata->core_data.nmpps = PM8821_NR_MPPS; pdata->mpp_pdata->core_data.base_addr = REG_MPP_BASE; mpp_cell.platform_data = pdata->mpp_pdata; mpp_cell.pdata_size = sizeof(struct pm8xxx_mpp_platform_data); ret = mfd_add_devices(pmic->dev, 0, &mpp_cell, 1, NULL, irq_base); if (ret) { pr_err("Failed to add mpp subdevice ret=%d\n", ret); goto bail; } } ret = mfd_add_devices(pmic->dev, 0, &debugfs_cell, 1, NULL, irq_base); if (ret) { pr_err("Failed to add debugfs subdevice ret=%d\n", ret); goto bail; } ret = mfd_add_devices(pmic->dev, 0, &thermal_alarm_cell, 1, NULL, irq_base); if (ret) { pr_err("Failed to add thermal alarm subdevice ret=%d\n", ret); goto bail; } return 0; bail: if (pmic->irq_chip) { pm8821_irq_exit(pmic->irq_chip); pmic->irq_chip = NULL; } return ret; } static const char * const pm8821_rev_names[] = { [PM8XXX_REVISION_8821_TEST] = "test", [PM8XXX_REVISION_8821_1p0] = "1.0", [PM8XXX_REVISION_8821_2p0] = "2.0", [PM8XXX_REVISION_8821_2p1] = "2.1", }; static int __devinit pm8821_probe(struct platform_device *pdev) { const struct pm8821_platform_data *pdata = pdev->dev.platform_data; const char *revision_name = "unknown"; struct pm8821 *pmic; enum pm8xxx_version version; int revision; int rc; u8 val; if (!pdata) { pr_err("missing platform data\n"); return -EINVAL; } pmic = kzalloc(sizeof(struct pm8821), GFP_KERNEL); if (!pmic) { pr_err("Cannot alloc pm8821 struct\n"); return -ENOMEM; } /* Read PMIC chip revision */ rc = msm_ssbi_read(pdev->dev.parent, REG_HWREV, &val, sizeof(val)); if (rc) { pr_err("Failed to read hw rev reg %d:rc=%d\n", REG_HWREV, rc); goto err_read_rev; } pr_info("PMIC revision 1: PM8821 rev %02X\n", val); pmic->rev_registers = val; /* Read PMIC chip revision 2 */ rc = msm_ssbi_read(pdev->dev.parent, REG_HWREV_2, &val, sizeof(val)); if (rc) { pr_err("Failed to read hw rev 2 reg %d:rc=%d\n", REG_HWREV_2, rc); goto err_read_rev; } pr_info("PMIC revision 2: PM8821 rev %02X\n", val); pmic->rev_registers |= val << BITS_PER_BYTE; pmic->dev = &pdev->dev; pm8821_drvdata.pm_chip_data = pmic; platform_set_drvdata(pdev, &pm8821_drvdata); /* Print out human readable version and revision names. */ version = pm8xxx_get_version(pmic->dev); if (version == PM8XXX_VERSION_8821) { revision = pm8xxx_get_revision(pmic->dev); if (revision >= 0 && revision < ARRAY_SIZE(pm8821_rev_names)) revision_name = pm8821_rev_names[revision]; pr_info("PMIC version: PM8821 ver %s\n", revision_name); } else { WARN_ON(version != PM8XXX_VERSION_8821); } rc = pm8821_add_subdevices(pdata, pmic); if (rc) { pr_err("Cannot add subdevices rc=%d\n", rc); goto err; } return 0; err: mfd_remove_devices(pmic->dev); platform_set_drvdata(pdev, NULL); err_read_rev: kfree(pmic); return rc; } static int __devexit pm8821_remove(struct platform_device *pdev) { struct pm8xxx_drvdata *drvdata; struct pm8821 *pmic = NULL; drvdata = platform_get_drvdata(pdev); if (drvdata) pmic = drvdata->pm_chip_data; if (pmic) mfd_remove_devices(pmic->dev); if (pmic->irq_chip) { pm8821_irq_exit(pmic->irq_chip); pmic->irq_chip = NULL; } platform_set_drvdata(pdev, NULL); kfree(pmic); return 0; } static struct platform_driver pm8821_driver = { .probe = pm8821_probe, .remove = __devexit_p(pm8821_remove), .driver = { .name = "pm8821-core", .owner = THIS_MODULE, }, }; static int __init pm8821_init(void) { return platform_driver_register(&pm8821_driver); } postcore_initcall(pm8821_init); static void __exit pm8821_exit(void) { platform_driver_unregister(&pm8821_driver); } module_exit(pm8821_exit); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("PMIC 8821 core driver"); MODULE_VERSION("1.0"); MODULE_ALIAS("platform:pm8821-core");
gpl-2.0
PlatinumMaster/SM-G360T1_kernel
drivers/uwb/rsv.c
2110
27567
/* * UWB reservation management. * * Copyright (C) 2008 Cambridge Silicon Radio Ltd. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version * 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/kernel.h> #include <linux/uwb.h> #include <linux/slab.h> #include <linux/random.h> #include <linux/export.h> #include "uwb-internal.h" static void uwb_rsv_timer(unsigned long arg); static const char *rsv_states[] = { [UWB_RSV_STATE_NONE] = "none ", [UWB_RSV_STATE_O_INITIATED] = "o initiated ", [UWB_RSV_STATE_O_PENDING] = "o pending ", [UWB_RSV_STATE_O_MODIFIED] = "o modified ", [UWB_RSV_STATE_O_ESTABLISHED] = "o established ", [UWB_RSV_STATE_O_TO_BE_MOVED] = "o to be moved ", [UWB_RSV_STATE_O_MOVE_EXPANDING] = "o move expanding", [UWB_RSV_STATE_O_MOVE_COMBINING] = "o move combining", [UWB_RSV_STATE_O_MOVE_REDUCING] = "o move reducing ", [UWB_RSV_STATE_T_ACCEPTED] = "t accepted ", [UWB_RSV_STATE_T_CONFLICT] = "t conflict ", [UWB_RSV_STATE_T_PENDING] = "t pending ", [UWB_RSV_STATE_T_DENIED] = "t denied ", [UWB_RSV_STATE_T_RESIZED] = "t resized ", [UWB_RSV_STATE_T_EXPANDING_ACCEPTED] = "t expanding acc ", [UWB_RSV_STATE_T_EXPANDING_CONFLICT] = "t expanding conf", [UWB_RSV_STATE_T_EXPANDING_PENDING] = "t expanding pend", [UWB_RSV_STATE_T_EXPANDING_DENIED] = "t expanding den ", }; static const char *rsv_types[] = { [UWB_DRP_TYPE_ALIEN_BP] = "alien-bp", [UWB_DRP_TYPE_HARD] = "hard", [UWB_DRP_TYPE_SOFT] = "soft", [UWB_DRP_TYPE_PRIVATE] = "private", [UWB_DRP_TYPE_PCA] = "pca", }; bool uwb_rsv_has_two_drp_ies(struct uwb_rsv *rsv) { static const bool has_two_drp_ies[] = { [UWB_RSV_STATE_O_INITIATED] = false, [UWB_RSV_STATE_O_PENDING] = false, [UWB_RSV_STATE_O_MODIFIED] = false, [UWB_RSV_STATE_O_ESTABLISHED] = false, [UWB_RSV_STATE_O_TO_BE_MOVED] = false, [UWB_RSV_STATE_O_MOVE_COMBINING] = false, [UWB_RSV_STATE_O_MOVE_REDUCING] = false, [UWB_RSV_STATE_O_MOVE_EXPANDING] = true, [UWB_RSV_STATE_T_ACCEPTED] = false, [UWB_RSV_STATE_T_CONFLICT] = false, [UWB_RSV_STATE_T_PENDING] = false, [UWB_RSV_STATE_T_DENIED] = false, [UWB_RSV_STATE_T_RESIZED] = false, [UWB_RSV_STATE_T_EXPANDING_ACCEPTED] = true, [UWB_RSV_STATE_T_EXPANDING_CONFLICT] = true, [UWB_RSV_STATE_T_EXPANDING_PENDING] = true, [UWB_RSV_STATE_T_EXPANDING_DENIED] = true, }; return has_two_drp_ies[rsv->state]; } /** * uwb_rsv_state_str - return a string for a reservation state * @state: the reservation state. */ const char *uwb_rsv_state_str(enum uwb_rsv_state state) { if (state < UWB_RSV_STATE_NONE || state >= UWB_RSV_STATE_LAST) return "unknown"; return rsv_states[state]; } EXPORT_SYMBOL_GPL(uwb_rsv_state_str); /** * uwb_rsv_type_str - return a string for a reservation type * @type: the reservation type */ const char *uwb_rsv_type_str(enum uwb_drp_type type) { if (type < UWB_DRP_TYPE_ALIEN_BP || type > UWB_DRP_TYPE_PCA) return "invalid"; return rsv_types[type]; } EXPORT_SYMBOL_GPL(uwb_rsv_type_str); void uwb_rsv_dump(char *text, struct uwb_rsv *rsv) { struct device *dev = &rsv->rc->uwb_dev.dev; struct uwb_dev_addr devaddr; char owner[UWB_ADDR_STRSIZE], target[UWB_ADDR_STRSIZE]; uwb_dev_addr_print(owner, sizeof(owner), &rsv->owner->dev_addr); if (rsv->target.type == UWB_RSV_TARGET_DEV) devaddr = rsv->target.dev->dev_addr; else devaddr = rsv->target.devaddr; uwb_dev_addr_print(target, sizeof(target), &devaddr); dev_dbg(dev, "rsv %s %s -> %s: %s\n", text, owner, target, uwb_rsv_state_str(rsv->state)); } static void uwb_rsv_release(struct kref *kref) { struct uwb_rsv *rsv = container_of(kref, struct uwb_rsv, kref); kfree(rsv); } void uwb_rsv_get(struct uwb_rsv *rsv) { kref_get(&rsv->kref); } void uwb_rsv_put(struct uwb_rsv *rsv) { kref_put(&rsv->kref, uwb_rsv_release); } /* * Get a free stream index for a reservation. * * If the target is a DevAddr (e.g., a WUSB cluster reservation) then * the stream is allocated from a pool of per-RC stream indexes, * otherwise a unique stream index for the target is selected. */ static int uwb_rsv_get_stream(struct uwb_rsv *rsv) { struct uwb_rc *rc = rsv->rc; struct device *dev = &rc->uwb_dev.dev; unsigned long *streams_bm; int stream; switch (rsv->target.type) { case UWB_RSV_TARGET_DEV: streams_bm = rsv->target.dev->streams; break; case UWB_RSV_TARGET_DEVADDR: streams_bm = rc->uwb_dev.streams; break; default: return -EINVAL; } stream = find_first_zero_bit(streams_bm, UWB_NUM_STREAMS); if (stream >= UWB_NUM_STREAMS) return -EBUSY; rsv->stream = stream; set_bit(stream, streams_bm); dev_dbg(dev, "get stream %d\n", rsv->stream); return 0; } static void uwb_rsv_put_stream(struct uwb_rsv *rsv) { struct uwb_rc *rc = rsv->rc; struct device *dev = &rc->uwb_dev.dev; unsigned long *streams_bm; switch (rsv->target.type) { case UWB_RSV_TARGET_DEV: streams_bm = rsv->target.dev->streams; break; case UWB_RSV_TARGET_DEVADDR: streams_bm = rc->uwb_dev.streams; break; default: return; } clear_bit(rsv->stream, streams_bm); dev_dbg(dev, "put stream %d\n", rsv->stream); } void uwb_rsv_backoff_win_timer(unsigned long arg) { struct uwb_drp_backoff_win *bow = (struct uwb_drp_backoff_win *)arg; struct uwb_rc *rc = container_of(bow, struct uwb_rc, bow); struct device *dev = &rc->uwb_dev.dev; bow->can_reserve_extra_mases = true; if (bow->total_expired <= 4) { bow->total_expired++; } else { /* after 4 backoff window has expired we can exit from * the backoff procedure */ bow->total_expired = 0; bow->window = UWB_DRP_BACKOFF_WIN_MIN >> 1; } dev_dbg(dev, "backoff_win_timer total_expired=%d, n=%d\n: ", bow->total_expired, bow->n); /* try to relocate all the "to be moved" relocations */ uwb_rsv_handle_drp_avail_change(rc); } void uwb_rsv_backoff_win_increment(struct uwb_rc *rc) { struct uwb_drp_backoff_win *bow = &rc->bow; struct device *dev = &rc->uwb_dev.dev; unsigned timeout_us; dev_dbg(dev, "backoff_win_increment: window=%d\n", bow->window); bow->can_reserve_extra_mases = false; if((bow->window << 1) == UWB_DRP_BACKOFF_WIN_MAX) return; bow->window <<= 1; bow->n = prandom_u32() & (bow->window - 1); dev_dbg(dev, "new_window=%d, n=%d\n: ", bow->window, bow->n); /* reset the timer associated variables */ timeout_us = bow->n * UWB_SUPERFRAME_LENGTH_US; bow->total_expired = 0; mod_timer(&bow->timer, jiffies + usecs_to_jiffies(timeout_us)); } static void uwb_rsv_stroke_timer(struct uwb_rsv *rsv) { int sframes = UWB_MAX_LOST_BEACONS; /* * Multicast reservations can become established within 1 * super frame and should not be terminated if no response is * received. */ if (rsv->is_multicast) { if (rsv->state == UWB_RSV_STATE_O_INITIATED || rsv->state == UWB_RSV_STATE_O_MOVE_EXPANDING || rsv->state == UWB_RSV_STATE_O_MOVE_COMBINING || rsv->state == UWB_RSV_STATE_O_MOVE_REDUCING) sframes = 1; if (rsv->state == UWB_RSV_STATE_O_ESTABLISHED) sframes = 0; } if (sframes > 0) { /* * Add an additional 2 superframes to account for the * time to send the SET DRP IE command. */ unsigned timeout_us = (sframes + 2) * UWB_SUPERFRAME_LENGTH_US; mod_timer(&rsv->timer, jiffies + usecs_to_jiffies(timeout_us)); } else del_timer(&rsv->timer); } /* * Update a reservations state, and schedule an update of the * transmitted DRP IEs. */ static void uwb_rsv_state_update(struct uwb_rsv *rsv, enum uwb_rsv_state new_state) { rsv->state = new_state; rsv->ie_valid = false; uwb_rsv_dump("SU", rsv); uwb_rsv_stroke_timer(rsv); uwb_rsv_sched_update(rsv->rc); } static void uwb_rsv_callback(struct uwb_rsv *rsv) { if (rsv->callback) rsv->callback(rsv); } void uwb_rsv_set_state(struct uwb_rsv *rsv, enum uwb_rsv_state new_state) { struct uwb_rsv_move *mv = &rsv->mv; if (rsv->state == new_state) { switch (rsv->state) { case UWB_RSV_STATE_O_ESTABLISHED: case UWB_RSV_STATE_O_MOVE_EXPANDING: case UWB_RSV_STATE_O_MOVE_COMBINING: case UWB_RSV_STATE_O_MOVE_REDUCING: case UWB_RSV_STATE_T_ACCEPTED: case UWB_RSV_STATE_T_EXPANDING_ACCEPTED: case UWB_RSV_STATE_T_RESIZED: case UWB_RSV_STATE_NONE: uwb_rsv_stroke_timer(rsv); break; default: /* Expecting a state transition so leave timer as-is. */ break; } return; } uwb_rsv_dump("SC", rsv); switch (new_state) { case UWB_RSV_STATE_NONE: uwb_rsv_state_update(rsv, UWB_RSV_STATE_NONE); uwb_rsv_callback(rsv); break; case UWB_RSV_STATE_O_INITIATED: uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_INITIATED); break; case UWB_RSV_STATE_O_PENDING: uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_PENDING); break; case UWB_RSV_STATE_O_MODIFIED: /* in the companion there are the MASes to drop */ bitmap_andnot(rsv->mas.bm, rsv->mas.bm, mv->companion_mas.bm, UWB_NUM_MAS); uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_MODIFIED); break; case UWB_RSV_STATE_O_ESTABLISHED: if (rsv->state == UWB_RSV_STATE_O_MODIFIED || rsv->state == UWB_RSV_STATE_O_MOVE_REDUCING) { uwb_drp_avail_release(rsv->rc, &mv->companion_mas); rsv->needs_release_companion_mas = false; } uwb_drp_avail_reserve(rsv->rc, &rsv->mas); uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_ESTABLISHED); uwb_rsv_callback(rsv); break; case UWB_RSV_STATE_O_MOVE_EXPANDING: rsv->needs_release_companion_mas = true; uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_MOVE_EXPANDING); break; case UWB_RSV_STATE_O_MOVE_COMBINING: rsv->needs_release_companion_mas = false; uwb_drp_avail_reserve(rsv->rc, &mv->companion_mas); bitmap_or(rsv->mas.bm, rsv->mas.bm, mv->companion_mas.bm, UWB_NUM_MAS); rsv->mas.safe += mv->companion_mas.safe; rsv->mas.unsafe += mv->companion_mas.unsafe; uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_MOVE_COMBINING); break; case UWB_RSV_STATE_O_MOVE_REDUCING: bitmap_andnot(mv->companion_mas.bm, rsv->mas.bm, mv->final_mas.bm, UWB_NUM_MAS); rsv->needs_release_companion_mas = true; rsv->mas.safe = mv->final_mas.safe; rsv->mas.unsafe = mv->final_mas.unsafe; bitmap_copy(rsv->mas.bm, mv->final_mas.bm, UWB_NUM_MAS); bitmap_copy(rsv->mas.unsafe_bm, mv->final_mas.unsafe_bm, UWB_NUM_MAS); uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_MOVE_REDUCING); break; case UWB_RSV_STATE_T_ACCEPTED: case UWB_RSV_STATE_T_RESIZED: rsv->needs_release_companion_mas = false; uwb_drp_avail_reserve(rsv->rc, &rsv->mas); uwb_rsv_state_update(rsv, UWB_RSV_STATE_T_ACCEPTED); uwb_rsv_callback(rsv); break; case UWB_RSV_STATE_T_DENIED: uwb_rsv_state_update(rsv, UWB_RSV_STATE_T_DENIED); break; case UWB_RSV_STATE_T_CONFLICT: uwb_rsv_state_update(rsv, UWB_RSV_STATE_T_CONFLICT); break; case UWB_RSV_STATE_T_PENDING: uwb_rsv_state_update(rsv, UWB_RSV_STATE_T_PENDING); break; case UWB_RSV_STATE_T_EXPANDING_ACCEPTED: rsv->needs_release_companion_mas = true; uwb_drp_avail_reserve(rsv->rc, &mv->companion_mas); uwb_rsv_state_update(rsv, UWB_RSV_STATE_T_EXPANDING_ACCEPTED); break; default: dev_err(&rsv->rc->uwb_dev.dev, "unhandled state: %s (%d)\n", uwb_rsv_state_str(new_state), new_state); } } static void uwb_rsv_handle_timeout_work(struct work_struct *work) { struct uwb_rsv *rsv = container_of(work, struct uwb_rsv, handle_timeout_work); struct uwb_rc *rc = rsv->rc; mutex_lock(&rc->rsvs_mutex); uwb_rsv_dump("TO", rsv); switch (rsv->state) { case UWB_RSV_STATE_O_INITIATED: if (rsv->is_multicast) { uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED); goto unlock; } break; case UWB_RSV_STATE_O_MOVE_EXPANDING: if (rsv->is_multicast) { uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_COMBINING); goto unlock; } break; case UWB_RSV_STATE_O_MOVE_COMBINING: if (rsv->is_multicast) { uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_REDUCING); goto unlock; } break; case UWB_RSV_STATE_O_MOVE_REDUCING: if (rsv->is_multicast) { uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED); goto unlock; } break; case UWB_RSV_STATE_O_ESTABLISHED: if (rsv->is_multicast) goto unlock; break; case UWB_RSV_STATE_T_EXPANDING_ACCEPTED: /* * The time out could be for the main or of the * companion DRP, assume it's for the companion and * drop that first. A further time out is required to * drop the main. */ uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_ACCEPTED); uwb_drp_avail_release(rsv->rc, &rsv->mv.companion_mas); goto unlock; default: break; } uwb_rsv_remove(rsv); unlock: mutex_unlock(&rc->rsvs_mutex); } static struct uwb_rsv *uwb_rsv_alloc(struct uwb_rc *rc) { struct uwb_rsv *rsv; rsv = kzalloc(sizeof(struct uwb_rsv), GFP_KERNEL); if (!rsv) return NULL; INIT_LIST_HEAD(&rsv->rc_node); INIT_LIST_HEAD(&rsv->pal_node); kref_init(&rsv->kref); init_timer(&rsv->timer); rsv->timer.function = uwb_rsv_timer; rsv->timer.data = (unsigned long)rsv; rsv->rc = rc; INIT_WORK(&rsv->handle_timeout_work, uwb_rsv_handle_timeout_work); return rsv; } /** * uwb_rsv_create - allocate and initialize a UWB reservation structure * @rc: the radio controller * @cb: callback to use when the reservation completes or terminates * @pal_priv: data private to the PAL to be passed in the callback * * The callback is called when the state of the reservation changes from: * * - pending to accepted * - pending to denined * - accepted to terminated * - pending to terminated */ struct uwb_rsv *uwb_rsv_create(struct uwb_rc *rc, uwb_rsv_cb_f cb, void *pal_priv) { struct uwb_rsv *rsv; rsv = uwb_rsv_alloc(rc); if (!rsv) return NULL; rsv->callback = cb; rsv->pal_priv = pal_priv; return rsv; } EXPORT_SYMBOL_GPL(uwb_rsv_create); void uwb_rsv_remove(struct uwb_rsv *rsv) { uwb_rsv_dump("RM", rsv); if (rsv->state != UWB_RSV_STATE_NONE) uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE); if (rsv->needs_release_companion_mas) uwb_drp_avail_release(rsv->rc, &rsv->mv.companion_mas); uwb_drp_avail_release(rsv->rc, &rsv->mas); if (uwb_rsv_is_owner(rsv)) uwb_rsv_put_stream(rsv); uwb_dev_put(rsv->owner); if (rsv->target.type == UWB_RSV_TARGET_DEV) uwb_dev_put(rsv->target.dev); list_del_init(&rsv->rc_node); uwb_rsv_put(rsv); } /** * uwb_rsv_destroy - free a UWB reservation structure * @rsv: the reservation to free * * The reservation must already be terminated. */ void uwb_rsv_destroy(struct uwb_rsv *rsv) { uwb_rsv_put(rsv); } EXPORT_SYMBOL_GPL(uwb_rsv_destroy); /** * usb_rsv_establish - start a reservation establishment * @rsv: the reservation * * The PAL should fill in @rsv's owner, target, type, max_mas, * min_mas, max_interval and is_multicast fields. If the target is a * uwb_dev it must be referenced. * * The reservation's callback will be called when the reservation is * accepted, denied or times out. */ int uwb_rsv_establish(struct uwb_rsv *rsv) { struct uwb_rc *rc = rsv->rc; struct uwb_mas_bm available; int ret; mutex_lock(&rc->rsvs_mutex); ret = uwb_rsv_get_stream(rsv); if (ret) goto out; rsv->tiebreaker = prandom_u32() & 1; /* get available mas bitmap */ uwb_drp_available(rc, &available); ret = uwb_rsv_find_best_allocation(rsv, &available, &rsv->mas); if (ret == UWB_RSV_ALLOC_NOT_FOUND) { ret = -EBUSY; uwb_rsv_put_stream(rsv); goto out; } ret = uwb_drp_avail_reserve_pending(rc, &rsv->mas); if (ret != 0) { uwb_rsv_put_stream(rsv); goto out; } uwb_rsv_get(rsv); list_add_tail(&rsv->rc_node, &rc->reservations); rsv->owner = &rc->uwb_dev; uwb_dev_get(rsv->owner); uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_INITIATED); out: mutex_unlock(&rc->rsvs_mutex); return ret; } EXPORT_SYMBOL_GPL(uwb_rsv_establish); /** * uwb_rsv_modify - modify an already established reservation * @rsv: the reservation to modify * @max_mas: new maximum MAS to reserve * @min_mas: new minimum MAS to reserve * @max_interval: new max_interval to use * * FIXME: implement this once there are PALs that use it. */ int uwb_rsv_modify(struct uwb_rsv *rsv, int max_mas, int min_mas, int max_interval) { return -ENOSYS; } EXPORT_SYMBOL_GPL(uwb_rsv_modify); /* * move an already established reservation (rc->rsvs_mutex must to be * taken when tis function is called) */ int uwb_rsv_try_move(struct uwb_rsv *rsv, struct uwb_mas_bm *available) { struct uwb_rc *rc = rsv->rc; struct uwb_drp_backoff_win *bow = &rc->bow; struct device *dev = &rc->uwb_dev.dev; struct uwb_rsv_move *mv; int ret = 0; if (bow->can_reserve_extra_mases == false) return -EBUSY; mv = &rsv->mv; if (uwb_rsv_find_best_allocation(rsv, available, &mv->final_mas) == UWB_RSV_ALLOC_FOUND) { if (!bitmap_equal(rsv->mas.bm, mv->final_mas.bm, UWB_NUM_MAS)) { /* We want to move the reservation */ bitmap_andnot(mv->companion_mas.bm, mv->final_mas.bm, rsv->mas.bm, UWB_NUM_MAS); uwb_drp_avail_reserve_pending(rc, &mv->companion_mas); uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_EXPANDING); } } else { dev_dbg(dev, "new allocation not found\n"); } return ret; } /* It will try to move every reservation in state O_ESTABLISHED giving * to the MAS allocator algorithm an availability that is the real one * plus the allocation already established from the reservation. */ void uwb_rsv_handle_drp_avail_change(struct uwb_rc *rc) { struct uwb_drp_backoff_win *bow = &rc->bow; struct uwb_rsv *rsv; struct uwb_mas_bm mas; if (bow->can_reserve_extra_mases == false) return; list_for_each_entry(rsv, &rc->reservations, rc_node) { if (rsv->state == UWB_RSV_STATE_O_ESTABLISHED || rsv->state == UWB_RSV_STATE_O_TO_BE_MOVED) { uwb_drp_available(rc, &mas); bitmap_or(mas.bm, mas.bm, rsv->mas.bm, UWB_NUM_MAS); uwb_rsv_try_move(rsv, &mas); } } } /** * uwb_rsv_terminate - terminate an established reservation * @rsv: the reservation to terminate * * A reservation is terminated by removing the DRP IE from the beacon, * the other end will consider the reservation to be terminated when * it does not see the DRP IE for at least mMaxLostBeacons. * * If applicable, the reference to the target uwb_dev will be released. */ void uwb_rsv_terminate(struct uwb_rsv *rsv) { struct uwb_rc *rc = rsv->rc; mutex_lock(&rc->rsvs_mutex); if (rsv->state != UWB_RSV_STATE_NONE) uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE); mutex_unlock(&rc->rsvs_mutex); } EXPORT_SYMBOL_GPL(uwb_rsv_terminate); /** * uwb_rsv_accept - accept a new reservation from a peer * @rsv: the reservation * @cb: call back for reservation changes * @pal_priv: data to be passed in the above call back * * Reservation requests from peers are denied unless a PAL accepts it * by calling this function. * * The PAL call uwb_rsv_destroy() for all accepted reservations before * calling uwb_pal_unregister(). */ void uwb_rsv_accept(struct uwb_rsv *rsv, uwb_rsv_cb_f cb, void *pal_priv) { uwb_rsv_get(rsv); rsv->callback = cb; rsv->pal_priv = pal_priv; rsv->state = UWB_RSV_STATE_T_ACCEPTED; } EXPORT_SYMBOL_GPL(uwb_rsv_accept); /* * Is a received DRP IE for this reservation? */ static bool uwb_rsv_match(struct uwb_rsv *rsv, struct uwb_dev *src, struct uwb_ie_drp *drp_ie) { struct uwb_dev_addr *rsv_src; int stream; stream = uwb_ie_drp_stream_index(drp_ie); if (rsv->stream != stream) return false; switch (rsv->target.type) { case UWB_RSV_TARGET_DEVADDR: return rsv->stream == stream; case UWB_RSV_TARGET_DEV: if (uwb_ie_drp_owner(drp_ie)) rsv_src = &rsv->owner->dev_addr; else rsv_src = &rsv->target.dev->dev_addr; return uwb_dev_addr_cmp(&src->dev_addr, rsv_src) == 0; } return false; } static struct uwb_rsv *uwb_rsv_new_target(struct uwb_rc *rc, struct uwb_dev *src, struct uwb_ie_drp *drp_ie) { struct uwb_rsv *rsv; struct uwb_pal *pal; enum uwb_rsv_state state; rsv = uwb_rsv_alloc(rc); if (!rsv) return NULL; rsv->rc = rc; rsv->owner = src; uwb_dev_get(rsv->owner); rsv->target.type = UWB_RSV_TARGET_DEV; rsv->target.dev = &rc->uwb_dev; uwb_dev_get(&rc->uwb_dev); rsv->type = uwb_ie_drp_type(drp_ie); rsv->stream = uwb_ie_drp_stream_index(drp_ie); uwb_drp_ie_to_bm(&rsv->mas, drp_ie); /* * See if any PALs are interested in this reservation. If not, * deny the request. */ rsv->state = UWB_RSV_STATE_T_DENIED; mutex_lock(&rc->uwb_dev.mutex); list_for_each_entry(pal, &rc->pals, node) { if (pal->new_rsv) pal->new_rsv(pal, rsv); if (rsv->state == UWB_RSV_STATE_T_ACCEPTED) break; } mutex_unlock(&rc->uwb_dev.mutex); list_add_tail(&rsv->rc_node, &rc->reservations); state = rsv->state; rsv->state = UWB_RSV_STATE_NONE; /* FIXME: do something sensible here */ if (state == UWB_RSV_STATE_T_ACCEPTED && uwb_drp_avail_reserve_pending(rc, &rsv->mas) == -EBUSY) { /* FIXME: do something sensible here */ } else { uwb_rsv_set_state(rsv, state); } return rsv; } /** * uwb_rsv_get_usable_mas - get the bitmap of the usable MAS of a reservations * @rsv: the reservation. * @mas: returns the available MAS. * * The usable MAS of a reservation may be less than the negotiated MAS * if alien BPs are present. */ void uwb_rsv_get_usable_mas(struct uwb_rsv *rsv, struct uwb_mas_bm *mas) { bitmap_zero(mas->bm, UWB_NUM_MAS); bitmap_andnot(mas->bm, rsv->mas.bm, rsv->rc->cnflt_alien_bitmap.bm, UWB_NUM_MAS); } EXPORT_SYMBOL_GPL(uwb_rsv_get_usable_mas); /** * uwb_rsv_find - find a reservation for a received DRP IE. * @rc: the radio controller * @src: source of the DRP IE * @drp_ie: the DRP IE * * If the reservation cannot be found and the DRP IE is from a peer * attempting to establish a new reservation, create a new reservation * and add it to the list. */ struct uwb_rsv *uwb_rsv_find(struct uwb_rc *rc, struct uwb_dev *src, struct uwb_ie_drp *drp_ie) { struct uwb_rsv *rsv; list_for_each_entry(rsv, &rc->reservations, rc_node) { if (uwb_rsv_match(rsv, src, drp_ie)) return rsv; } if (uwb_ie_drp_owner(drp_ie)) return uwb_rsv_new_target(rc, src, drp_ie); return NULL; } /* * Go through all the reservations and check for timeouts and (if * necessary) update their DRP IEs. * * FIXME: look at building the SET_DRP_IE command here rather than * having to rescan the list in uwb_rc_send_all_drp_ie(). */ static bool uwb_rsv_update_all(struct uwb_rc *rc) { struct uwb_rsv *rsv, *t; bool ie_updated = false; list_for_each_entry_safe(rsv, t, &rc->reservations, rc_node) { if (!rsv->ie_valid) { uwb_drp_ie_update(rsv); ie_updated = true; } } return ie_updated; } void uwb_rsv_queue_update(struct uwb_rc *rc) { unsigned long delay_us = UWB_MAS_LENGTH_US * UWB_MAS_PER_ZONE; queue_delayed_work(rc->rsv_workq, &rc->rsv_update_work, usecs_to_jiffies(delay_us)); } /** * uwb_rsv_sched_update - schedule an update of the DRP IEs * @rc: the radio controller. * * To improve performance and ensure correctness with [ECMA-368] the * number of SET-DRP-IE commands that are done are limited. * * DRP IEs update come from two sources: DRP events from the hardware * which all occur at the beginning of the superframe ('syncronous' * events) and reservation establishment/termination requests from * PALs or timers ('asynchronous' events). * * A delayed work ensures that all the synchronous events result in * one SET-DRP-IE command. * * Additional logic (the set_drp_ie_pending and rsv_updated_postponed * flags) will prevent an asynchrous event starting a SET-DRP-IE * command if one is currently awaiting a response. * * FIXME: this does leave a window where an asynchrous event can delay * the SET-DRP-IE for a synchronous event by one superframe. */ void uwb_rsv_sched_update(struct uwb_rc *rc) { spin_lock_bh(&rc->rsvs_lock); if (!delayed_work_pending(&rc->rsv_update_work)) { if (rc->set_drp_ie_pending > 0) { rc->set_drp_ie_pending++; goto unlock; } uwb_rsv_queue_update(rc); } unlock: spin_unlock_bh(&rc->rsvs_lock); } /* * Update DRP IEs and, if necessary, the DRP Availability IE and send * the updated IEs to the radio controller. */ static void uwb_rsv_update_work(struct work_struct *work) { struct uwb_rc *rc = container_of(work, struct uwb_rc, rsv_update_work.work); bool ie_updated; mutex_lock(&rc->rsvs_mutex); ie_updated = uwb_rsv_update_all(rc); if (!rc->drp_avail.ie_valid) { uwb_drp_avail_ie_update(rc); ie_updated = true; } if (ie_updated && (rc->set_drp_ie_pending == 0)) uwb_rc_send_all_drp_ie(rc); mutex_unlock(&rc->rsvs_mutex); } static void uwb_rsv_alien_bp_work(struct work_struct *work) { struct uwb_rc *rc = container_of(work, struct uwb_rc, rsv_alien_bp_work.work); struct uwb_rsv *rsv; mutex_lock(&rc->rsvs_mutex); list_for_each_entry(rsv, &rc->reservations, rc_node) { if (rsv->type != UWB_DRP_TYPE_ALIEN_BP) { rsv->callback(rsv); } } mutex_unlock(&rc->rsvs_mutex); } static void uwb_rsv_timer(unsigned long arg) { struct uwb_rsv *rsv = (struct uwb_rsv *)arg; queue_work(rsv->rc->rsv_workq, &rsv->handle_timeout_work); } /** * uwb_rsv_remove_all - remove all reservations * @rc: the radio controller * * A DRP IE update is not done. */ void uwb_rsv_remove_all(struct uwb_rc *rc) { struct uwb_rsv *rsv, *t; mutex_lock(&rc->rsvs_mutex); list_for_each_entry_safe(rsv, t, &rc->reservations, rc_node) { if (rsv->state != UWB_RSV_STATE_NONE) uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE); del_timer_sync(&rsv->timer); } /* Cancel any postponed update. */ rc->set_drp_ie_pending = 0; mutex_unlock(&rc->rsvs_mutex); cancel_delayed_work_sync(&rc->rsv_update_work); flush_workqueue(rc->rsv_workq); mutex_lock(&rc->rsvs_mutex); list_for_each_entry_safe(rsv, t, &rc->reservations, rc_node) { uwb_rsv_remove(rsv); } mutex_unlock(&rc->rsvs_mutex); } void uwb_rsv_init(struct uwb_rc *rc) { INIT_LIST_HEAD(&rc->reservations); INIT_LIST_HEAD(&rc->cnflt_alien_list); mutex_init(&rc->rsvs_mutex); spin_lock_init(&rc->rsvs_lock); INIT_DELAYED_WORK(&rc->rsv_update_work, uwb_rsv_update_work); INIT_DELAYED_WORK(&rc->rsv_alien_bp_work, uwb_rsv_alien_bp_work); rc->bow.can_reserve_extra_mases = true; rc->bow.total_expired = 0; rc->bow.window = UWB_DRP_BACKOFF_WIN_MIN >> 1; init_timer(&rc->bow.timer); rc->bow.timer.function = uwb_rsv_backoff_win_timer; rc->bow.timer.data = (unsigned long)&rc->bow; bitmap_complement(rc->uwb_dev.streams, rc->uwb_dev.streams, UWB_NUM_STREAMS); } int uwb_rsv_setup(struct uwb_rc *rc) { char name[16]; snprintf(name, sizeof(name), "%s_rsvd", dev_name(&rc->uwb_dev.dev)); rc->rsv_workq = create_singlethread_workqueue(name); if (rc->rsv_workq == NULL) return -ENOMEM; return 0; } void uwb_rsv_cleanup(struct uwb_rc *rc) { uwb_rsv_remove_all(rc); destroy_workqueue(rc->rsv_workq); }
gpl-2.0
TeamGlade-Devices/android_kernel_htc_pico
drivers/net/atlx/atl2.c
2366
82165
/* * Copyright(c) 2006 - 2007 Atheros Corporation. All rights reserved. * Copyright(c) 2007 - 2008 Chris Snook <csnook@redhat.com> * * Derived from Intel e1000 driver * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 59 * Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #include <asm/atomic.h> #include <linux/crc32.h> #include <linux/dma-mapping.h> #include <linux/etherdevice.h> #include <linux/ethtool.h> #include <linux/hardirq.h> #include <linux/if_vlan.h> #include <linux/in.h> #include <linux/interrupt.h> #include <linux/ip.h> #include <linux/irqflags.h> #include <linux/irqreturn.h> #include <linux/mii.h> #include <linux/net.h> #include <linux/netdevice.h> #include <linux/pci.h> #include <linux/pci_ids.h> #include <linux/pm.h> #include <linux/skbuff.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/string.h> #include <linux/tcp.h> #include <linux/timer.h> #include <linux/types.h> #include <linux/workqueue.h> #include "atl2.h" #define ATL2_DRV_VERSION "2.2.3" static const char atl2_driver_name[] = "atl2"; static const char atl2_driver_string[] = "Atheros(R) L2 Ethernet Driver"; static const char atl2_copyright[] = "Copyright (c) 2007 Atheros Corporation."; static const char atl2_driver_version[] = ATL2_DRV_VERSION; MODULE_AUTHOR("Atheros Corporation <xiong.huang@atheros.com>, Chris Snook <csnook@redhat.com>"); MODULE_DESCRIPTION("Atheros Fast Ethernet Network Driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(ATL2_DRV_VERSION); /* * atl2_pci_tbl - PCI Device ID Table */ static DEFINE_PCI_DEVICE_TABLE(atl2_pci_tbl) = { {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L2)}, /* required last entry */ {0,} }; MODULE_DEVICE_TABLE(pci, atl2_pci_tbl); static void atl2_set_ethtool_ops(struct net_device *netdev); static void atl2_check_options(struct atl2_adapter *adapter); /* * atl2_sw_init - Initialize general software structures (struct atl2_adapter) * @adapter: board private structure to initialize * * atl2_sw_init initializes the Adapter private data structure. * Fields are initialized based on PCI device information and * OS network device settings (MTU size). */ static int __devinit atl2_sw_init(struct atl2_adapter *adapter) { struct atl2_hw *hw = &adapter->hw; struct pci_dev *pdev = adapter->pdev; /* PCI config space info */ hw->vendor_id = pdev->vendor; hw->device_id = pdev->device; hw->subsystem_vendor_id = pdev->subsystem_vendor; hw->subsystem_id = pdev->subsystem_device; hw->revision_id = pdev->revision; pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word); adapter->wol = 0; adapter->ict = 50000; /* ~100ms */ adapter->link_speed = SPEED_0; /* hardware init */ adapter->link_duplex = FULL_DUPLEX; hw->phy_configured = false; hw->preamble_len = 7; hw->ipgt = 0x60; hw->min_ifg = 0x50; hw->ipgr1 = 0x40; hw->ipgr2 = 0x60; hw->retry_buf = 2; hw->max_retry = 0xf; hw->lcol = 0x37; hw->jam_ipg = 7; hw->fc_rxd_hi = 0; hw->fc_rxd_lo = 0; hw->max_frame_size = adapter->netdev->mtu; spin_lock_init(&adapter->stats_lock); set_bit(__ATL2_DOWN, &adapter->flags); return 0; } /* * atl2_set_multi - Multicast and Promiscuous mode set * @netdev: network interface device structure * * The set_multi entry point is called whenever the multicast address * list or the network interface flags are updated. This routine is * responsible for configuring the hardware for proper multicast, * promiscuous mode, and all-multi behavior. */ static void atl2_set_multi(struct net_device *netdev) { struct atl2_adapter *adapter = netdev_priv(netdev); struct atl2_hw *hw = &adapter->hw; struct netdev_hw_addr *ha; u32 rctl; u32 hash_value; /* Check for Promiscuous and All Multicast modes */ rctl = ATL2_READ_REG(hw, REG_MAC_CTRL); if (netdev->flags & IFF_PROMISC) { rctl |= MAC_CTRL_PROMIS_EN; } else if (netdev->flags & IFF_ALLMULTI) { rctl |= MAC_CTRL_MC_ALL_EN; rctl &= ~MAC_CTRL_PROMIS_EN; } else rctl &= ~(MAC_CTRL_PROMIS_EN | MAC_CTRL_MC_ALL_EN); ATL2_WRITE_REG(hw, REG_MAC_CTRL, rctl); /* clear the old settings from the multicast hash table */ ATL2_WRITE_REG(hw, REG_RX_HASH_TABLE, 0); ATL2_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, 1, 0); /* comoute mc addresses' hash value ,and put it into hash table */ netdev_for_each_mc_addr(ha, netdev) { hash_value = atl2_hash_mc_addr(hw, ha->addr); atl2_hash_set(hw, hash_value); } } static void init_ring_ptrs(struct atl2_adapter *adapter) { /* Read / Write Ptr Initialize: */ adapter->txd_write_ptr = 0; atomic_set(&adapter->txd_read_ptr, 0); adapter->rxd_read_ptr = 0; adapter->rxd_write_ptr = 0; atomic_set(&adapter->txs_write_ptr, 0); adapter->txs_next_clear = 0; } /* * atl2_configure - Configure Transmit&Receive Unit after Reset * @adapter: board private structure * * Configure the Tx /Rx unit of the MAC after a reset. */ static int atl2_configure(struct atl2_adapter *adapter) { struct atl2_hw *hw = &adapter->hw; u32 value; /* clear interrupt status */ ATL2_WRITE_REG(&adapter->hw, REG_ISR, 0xffffffff); /* set MAC Address */ value = (((u32)hw->mac_addr[2]) << 24) | (((u32)hw->mac_addr[3]) << 16) | (((u32)hw->mac_addr[4]) << 8) | (((u32)hw->mac_addr[5])); ATL2_WRITE_REG(hw, REG_MAC_STA_ADDR, value); value = (((u32)hw->mac_addr[0]) << 8) | (((u32)hw->mac_addr[1])); ATL2_WRITE_REG(hw, (REG_MAC_STA_ADDR+4), value); /* HI base address */ ATL2_WRITE_REG(hw, REG_DESC_BASE_ADDR_HI, (u32)((adapter->ring_dma & 0xffffffff00000000ULL) >> 32)); /* LO base address */ ATL2_WRITE_REG(hw, REG_TXD_BASE_ADDR_LO, (u32)(adapter->txd_dma & 0x00000000ffffffffULL)); ATL2_WRITE_REG(hw, REG_TXS_BASE_ADDR_LO, (u32)(adapter->txs_dma & 0x00000000ffffffffULL)); ATL2_WRITE_REG(hw, REG_RXD_BASE_ADDR_LO, (u32)(adapter->rxd_dma & 0x00000000ffffffffULL)); /* element count */ ATL2_WRITE_REGW(hw, REG_TXD_MEM_SIZE, (u16)(adapter->txd_ring_size/4)); ATL2_WRITE_REGW(hw, REG_TXS_MEM_SIZE, (u16)adapter->txs_ring_size); ATL2_WRITE_REGW(hw, REG_RXD_BUF_NUM, (u16)adapter->rxd_ring_size); /* config Internal SRAM */ /* ATL2_WRITE_REGW(hw, REG_SRAM_TXRAM_END, sram_tx_end); ATL2_WRITE_REGW(hw, REG_SRAM_TXRAM_END, sram_rx_end); */ /* config IPG/IFG */ value = (((u32)hw->ipgt & MAC_IPG_IFG_IPGT_MASK) << MAC_IPG_IFG_IPGT_SHIFT) | (((u32)hw->min_ifg & MAC_IPG_IFG_MIFG_MASK) << MAC_IPG_IFG_MIFG_SHIFT) | (((u32)hw->ipgr1 & MAC_IPG_IFG_IPGR1_MASK) << MAC_IPG_IFG_IPGR1_SHIFT)| (((u32)hw->ipgr2 & MAC_IPG_IFG_IPGR2_MASK) << MAC_IPG_IFG_IPGR2_SHIFT); ATL2_WRITE_REG(hw, REG_MAC_IPG_IFG, value); /* config Half-Duplex Control */ value = ((u32)hw->lcol & MAC_HALF_DUPLX_CTRL_LCOL_MASK) | (((u32)hw->max_retry & MAC_HALF_DUPLX_CTRL_RETRY_MASK) << MAC_HALF_DUPLX_CTRL_RETRY_SHIFT) | MAC_HALF_DUPLX_CTRL_EXC_DEF_EN | (0xa << MAC_HALF_DUPLX_CTRL_ABEBT_SHIFT) | (((u32)hw->jam_ipg & MAC_HALF_DUPLX_CTRL_JAMIPG_MASK) << MAC_HALF_DUPLX_CTRL_JAMIPG_SHIFT); ATL2_WRITE_REG(hw, REG_MAC_HALF_DUPLX_CTRL, value); /* set Interrupt Moderator Timer */ ATL2_WRITE_REGW(hw, REG_IRQ_MODU_TIMER_INIT, adapter->imt); ATL2_WRITE_REG(hw, REG_MASTER_CTRL, MASTER_CTRL_ITIMER_EN); /* set Interrupt Clear Timer */ ATL2_WRITE_REGW(hw, REG_CMBDISDMA_TIMER, adapter->ict); /* set MTU */ ATL2_WRITE_REG(hw, REG_MTU, adapter->netdev->mtu + ENET_HEADER_SIZE + VLAN_SIZE + ETHERNET_FCS_SIZE); /* 1590 */ ATL2_WRITE_REG(hw, REG_TX_CUT_THRESH, 0x177); /* flow control */ ATL2_WRITE_REGW(hw, REG_PAUSE_ON_TH, hw->fc_rxd_hi); ATL2_WRITE_REGW(hw, REG_PAUSE_OFF_TH, hw->fc_rxd_lo); /* Init mailbox */ ATL2_WRITE_REGW(hw, REG_MB_TXD_WR_IDX, (u16)adapter->txd_write_ptr); ATL2_WRITE_REGW(hw, REG_MB_RXD_RD_IDX, (u16)adapter->rxd_read_ptr); /* enable DMA read/write */ ATL2_WRITE_REGB(hw, REG_DMAR, DMAR_EN); ATL2_WRITE_REGB(hw, REG_DMAW, DMAW_EN); value = ATL2_READ_REG(&adapter->hw, REG_ISR); if ((value & ISR_PHY_LINKDOWN) != 0) value = 1; /* config failed */ else value = 0; /* clear all interrupt status */ ATL2_WRITE_REG(&adapter->hw, REG_ISR, 0x3fffffff); ATL2_WRITE_REG(&adapter->hw, REG_ISR, 0); return value; } /* * atl2_setup_ring_resources - allocate Tx / RX descriptor resources * @adapter: board private structure * * Return 0 on success, negative on failure */ static s32 atl2_setup_ring_resources(struct atl2_adapter *adapter) { struct pci_dev *pdev = adapter->pdev; int size; u8 offset = 0; /* real ring DMA buffer */ adapter->ring_size = size = adapter->txd_ring_size * 1 + 7 + /* dword align */ adapter->txs_ring_size * 4 + 7 + /* dword align */ adapter->rxd_ring_size * 1536 + 127; /* 128bytes align */ adapter->ring_vir_addr = pci_alloc_consistent(pdev, size, &adapter->ring_dma); if (!adapter->ring_vir_addr) return -ENOMEM; memset(adapter->ring_vir_addr, 0, adapter->ring_size); /* Init TXD Ring */ adapter->txd_dma = adapter->ring_dma ; offset = (adapter->txd_dma & 0x7) ? (8 - (adapter->txd_dma & 0x7)) : 0; adapter->txd_dma += offset; adapter->txd_ring = (struct tx_pkt_header *) (adapter->ring_vir_addr + offset); /* Init TXS Ring */ adapter->txs_dma = adapter->txd_dma + adapter->txd_ring_size; offset = (adapter->txs_dma & 0x7) ? (8 - (adapter->txs_dma & 0x7)) : 0; adapter->txs_dma += offset; adapter->txs_ring = (struct tx_pkt_status *) (((u8 *)adapter->txd_ring) + (adapter->txd_ring_size + offset)); /* Init RXD Ring */ adapter->rxd_dma = adapter->txs_dma + adapter->txs_ring_size * 4; offset = (adapter->rxd_dma & 127) ? (128 - (adapter->rxd_dma & 127)) : 0; if (offset > 7) offset -= 8; else offset += (128 - 8); adapter->rxd_dma += offset; adapter->rxd_ring = (struct rx_desc *) (((u8 *)adapter->txs_ring) + (adapter->txs_ring_size * 4 + offset)); /* * Read / Write Ptr Initialize: * init_ring_ptrs(adapter); */ return 0; } /* * atl2_irq_enable - Enable default interrupt generation settings * @adapter: board private structure */ static inline void atl2_irq_enable(struct atl2_adapter *adapter) { ATL2_WRITE_REG(&adapter->hw, REG_IMR, IMR_NORMAL_MASK); ATL2_WRITE_FLUSH(&adapter->hw); } /* * atl2_irq_disable - Mask off interrupt generation on the NIC * @adapter: board private structure */ static inline void atl2_irq_disable(struct atl2_adapter *adapter) { ATL2_WRITE_REG(&adapter->hw, REG_IMR, 0); ATL2_WRITE_FLUSH(&adapter->hw); synchronize_irq(adapter->pdev->irq); } #ifdef NETIF_F_HW_VLAN_TX static void atl2_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp) { struct atl2_adapter *adapter = netdev_priv(netdev); u32 ctrl; atl2_irq_disable(adapter); adapter->vlgrp = grp; if (grp) { /* enable VLAN tag insert/strip */ ctrl = ATL2_READ_REG(&adapter->hw, REG_MAC_CTRL); ctrl |= MAC_CTRL_RMV_VLAN; ATL2_WRITE_REG(&adapter->hw, REG_MAC_CTRL, ctrl); } else { /* disable VLAN tag insert/strip */ ctrl = ATL2_READ_REG(&adapter->hw, REG_MAC_CTRL); ctrl &= ~MAC_CTRL_RMV_VLAN; ATL2_WRITE_REG(&adapter->hw, REG_MAC_CTRL, ctrl); } atl2_irq_enable(adapter); } static void atl2_restore_vlan(struct atl2_adapter *adapter) { atl2_vlan_rx_register(adapter->netdev, adapter->vlgrp); } #endif static void atl2_intr_rx(struct atl2_adapter *adapter) { struct net_device *netdev = adapter->netdev; struct rx_desc *rxd; struct sk_buff *skb; do { rxd = adapter->rxd_ring+adapter->rxd_write_ptr; if (!rxd->status.update) break; /* end of tx */ /* clear this flag at once */ rxd->status.update = 0; if (rxd->status.ok && rxd->status.pkt_size >= 60) { int rx_size = (int)(rxd->status.pkt_size - 4); /* alloc new buffer */ skb = netdev_alloc_skb_ip_align(netdev, rx_size); if (NULL == skb) { printk(KERN_WARNING "%s: Mem squeeze, deferring packet.\n", netdev->name); /* * Check that some rx space is free. If not, * free one and mark stats->rx_dropped++. */ netdev->stats.rx_dropped++; break; } memcpy(skb->data, rxd->packet, rx_size); skb_put(skb, rx_size); skb->protocol = eth_type_trans(skb, netdev); #ifdef NETIF_F_HW_VLAN_TX if (adapter->vlgrp && (rxd->status.vlan)) { u16 vlan_tag = (rxd->status.vtag>>4) | ((rxd->status.vtag&7) << 13) | ((rxd->status.vtag&8) << 9); vlan_hwaccel_rx(skb, adapter->vlgrp, vlan_tag); } else #endif netif_rx(skb); netdev->stats.rx_bytes += rx_size; netdev->stats.rx_packets++; } else { netdev->stats.rx_errors++; if (rxd->status.ok && rxd->status.pkt_size <= 60) netdev->stats.rx_length_errors++; if (rxd->status.mcast) netdev->stats.multicast++; if (rxd->status.crc) netdev->stats.rx_crc_errors++; if (rxd->status.align) netdev->stats.rx_frame_errors++; } /* advance write ptr */ if (++adapter->rxd_write_ptr == adapter->rxd_ring_size) adapter->rxd_write_ptr = 0; } while (1); /* update mailbox? */ adapter->rxd_read_ptr = adapter->rxd_write_ptr; ATL2_WRITE_REGW(&adapter->hw, REG_MB_RXD_RD_IDX, adapter->rxd_read_ptr); } static void atl2_intr_tx(struct atl2_adapter *adapter) { struct net_device *netdev = adapter->netdev; u32 txd_read_ptr; u32 txs_write_ptr; struct tx_pkt_status *txs; struct tx_pkt_header *txph; int free_hole = 0; do { txs_write_ptr = (u32) atomic_read(&adapter->txs_write_ptr); txs = adapter->txs_ring + txs_write_ptr; if (!txs->update) break; /* tx stop here */ free_hole = 1; txs->update = 0; if (++txs_write_ptr == adapter->txs_ring_size) txs_write_ptr = 0; atomic_set(&adapter->txs_write_ptr, (int)txs_write_ptr); txd_read_ptr = (u32) atomic_read(&adapter->txd_read_ptr); txph = (struct tx_pkt_header *) (((u8 *)adapter->txd_ring) + txd_read_ptr); if (txph->pkt_size != txs->pkt_size) { struct tx_pkt_status *old_txs = txs; printk(KERN_WARNING "%s: txs packet size not consistent with txd" " txd_:0x%08x, txs_:0x%08x!\n", adapter->netdev->name, *(u32 *)txph, *(u32 *)txs); printk(KERN_WARNING "txd read ptr: 0x%x\n", txd_read_ptr); txs = adapter->txs_ring + txs_write_ptr; printk(KERN_WARNING "txs-behind:0x%08x\n", *(u32 *)txs); if (txs_write_ptr < 2) { txs = adapter->txs_ring + (adapter->txs_ring_size + txs_write_ptr - 2); } else { txs = adapter->txs_ring + (txs_write_ptr - 2); } printk(KERN_WARNING "txs-before:0x%08x\n", *(u32 *)txs); txs = old_txs; } /* 4for TPH */ txd_read_ptr += (((u32)(txph->pkt_size) + 7) & ~3); if (txd_read_ptr >= adapter->txd_ring_size) txd_read_ptr -= adapter->txd_ring_size; atomic_set(&adapter->txd_read_ptr, (int)txd_read_ptr); /* tx statistics: */ if (txs->ok) { netdev->stats.tx_bytes += txs->pkt_size; netdev->stats.tx_packets++; } else netdev->stats.tx_errors++; if (txs->defer) netdev->stats.collisions++; if (txs->abort_col) netdev->stats.tx_aborted_errors++; if (txs->late_col) netdev->stats.tx_window_errors++; if (txs->underun) netdev->stats.tx_fifo_errors++; } while (1); if (free_hole) { if (netif_queue_stopped(adapter->netdev) && netif_carrier_ok(adapter->netdev)) netif_wake_queue(adapter->netdev); } } static void atl2_check_for_link(struct atl2_adapter *adapter) { struct net_device *netdev = adapter->netdev; u16 phy_data = 0; spin_lock(&adapter->stats_lock); atl2_read_phy_reg(&adapter->hw, MII_BMSR, &phy_data); atl2_read_phy_reg(&adapter->hw, MII_BMSR, &phy_data); spin_unlock(&adapter->stats_lock); /* notify upper layer link down ASAP */ if (!(phy_data & BMSR_LSTATUS)) { /* Link Down */ if (netif_carrier_ok(netdev)) { /* old link state: Up */ printk(KERN_INFO "%s: %s NIC Link is Down\n", atl2_driver_name, netdev->name); adapter->link_speed = SPEED_0; netif_carrier_off(netdev); netif_stop_queue(netdev); } } schedule_work(&adapter->link_chg_task); } static inline void atl2_clear_phy_int(struct atl2_adapter *adapter) { u16 phy_data; spin_lock(&adapter->stats_lock); atl2_read_phy_reg(&adapter->hw, 19, &phy_data); spin_unlock(&adapter->stats_lock); } /* * atl2_intr - Interrupt Handler * @irq: interrupt number * @data: pointer to a network interface device structure * @pt_regs: CPU registers structure */ static irqreturn_t atl2_intr(int irq, void *data) { struct atl2_adapter *adapter = netdev_priv(data); struct atl2_hw *hw = &adapter->hw; u32 status; status = ATL2_READ_REG(hw, REG_ISR); if (0 == status) return IRQ_NONE; /* link event */ if (status & ISR_PHY) atl2_clear_phy_int(adapter); /* clear ISR status, and Enable CMB DMA/Disable Interrupt */ ATL2_WRITE_REG(hw, REG_ISR, status | ISR_DIS_INT); /* check if PCIE PHY Link down */ if (status & ISR_PHY_LINKDOWN) { if (netif_running(adapter->netdev)) { /* reset MAC */ ATL2_WRITE_REG(hw, REG_ISR, 0); ATL2_WRITE_REG(hw, REG_IMR, 0); ATL2_WRITE_FLUSH(hw); schedule_work(&adapter->reset_task); return IRQ_HANDLED; } } /* check if DMA read/write error? */ if (status & (ISR_DMAR_TO_RST | ISR_DMAW_TO_RST)) { ATL2_WRITE_REG(hw, REG_ISR, 0); ATL2_WRITE_REG(hw, REG_IMR, 0); ATL2_WRITE_FLUSH(hw); schedule_work(&adapter->reset_task); return IRQ_HANDLED; } /* link event */ if (status & (ISR_PHY | ISR_MANUAL)) { adapter->netdev->stats.tx_carrier_errors++; atl2_check_for_link(adapter); } /* transmit event */ if (status & ISR_TX_EVENT) atl2_intr_tx(adapter); /* rx exception */ if (status & ISR_RX_EVENT) atl2_intr_rx(adapter); /* re-enable Interrupt */ ATL2_WRITE_REG(&adapter->hw, REG_ISR, 0); return IRQ_HANDLED; } static int atl2_request_irq(struct atl2_adapter *adapter) { struct net_device *netdev = adapter->netdev; int flags, err = 0; flags = IRQF_SHARED; adapter->have_msi = true; err = pci_enable_msi(adapter->pdev); if (err) adapter->have_msi = false; if (adapter->have_msi) flags &= ~IRQF_SHARED; return request_irq(adapter->pdev->irq, atl2_intr, flags, netdev->name, netdev); } /* * atl2_free_ring_resources - Free Tx / RX descriptor Resources * @adapter: board private structure * * Free all transmit software resources */ static void atl2_free_ring_resources(struct atl2_adapter *adapter) { struct pci_dev *pdev = adapter->pdev; pci_free_consistent(pdev, adapter->ring_size, adapter->ring_vir_addr, adapter->ring_dma); } /* * atl2_open - Called when a network interface is made active * @netdev: network interface device structure * * Returns 0 on success, negative value on failure * * The open entry point is called when a network interface is made * active by the system (IFF_UP). At this point all resources needed * for transmit and receive operations are allocated, the interrupt * handler is registered with the OS, the watchdog timer is started, * and the stack is notified that the interface is ready. */ static int atl2_open(struct net_device *netdev) { struct atl2_adapter *adapter = netdev_priv(netdev); int err; u32 val; /* disallow open during test */ if (test_bit(__ATL2_TESTING, &adapter->flags)) return -EBUSY; /* allocate transmit descriptors */ err = atl2_setup_ring_resources(adapter); if (err) return err; err = atl2_init_hw(&adapter->hw); if (err) { err = -EIO; goto err_init_hw; } /* hardware has been reset, we need to reload some things */ atl2_set_multi(netdev); init_ring_ptrs(adapter); #ifdef NETIF_F_HW_VLAN_TX atl2_restore_vlan(adapter); #endif if (atl2_configure(adapter)) { err = -EIO; goto err_config; } err = atl2_request_irq(adapter); if (err) goto err_req_irq; clear_bit(__ATL2_DOWN, &adapter->flags); mod_timer(&adapter->watchdog_timer, round_jiffies(jiffies + 4*HZ)); val = ATL2_READ_REG(&adapter->hw, REG_MASTER_CTRL); ATL2_WRITE_REG(&adapter->hw, REG_MASTER_CTRL, val | MASTER_CTRL_MANUAL_INT); atl2_irq_enable(adapter); return 0; err_init_hw: err_req_irq: err_config: atl2_free_ring_resources(adapter); atl2_reset_hw(&adapter->hw); return err; } static void atl2_down(struct atl2_adapter *adapter) { struct net_device *netdev = adapter->netdev; /* signal that we're down so the interrupt handler does not * reschedule our watchdog timer */ set_bit(__ATL2_DOWN, &adapter->flags); netif_tx_disable(netdev); /* reset MAC to disable all RX/TX */ atl2_reset_hw(&adapter->hw); msleep(1); atl2_irq_disable(adapter); del_timer_sync(&adapter->watchdog_timer); del_timer_sync(&adapter->phy_config_timer); clear_bit(0, &adapter->cfg_phy); netif_carrier_off(netdev); adapter->link_speed = SPEED_0; adapter->link_duplex = -1; } static void atl2_free_irq(struct atl2_adapter *adapter) { struct net_device *netdev = adapter->netdev; free_irq(adapter->pdev->irq, netdev); #ifdef CONFIG_PCI_MSI if (adapter->have_msi) pci_disable_msi(adapter->pdev); #endif } /* * atl2_close - Disables a network interface * @netdev: network interface device structure * * Returns 0, this is not allowed to fail * * The close entry point is called when an interface is de-activated * by the OS. The hardware is still under the drivers control, but * needs to be disabled. A global MAC reset is issued to stop the * hardware, and all transmit and receive resources are freed. */ static int atl2_close(struct net_device *netdev) { struct atl2_adapter *adapter = netdev_priv(netdev); WARN_ON(test_bit(__ATL2_RESETTING, &adapter->flags)); atl2_down(adapter); atl2_free_irq(adapter); atl2_free_ring_resources(adapter); return 0; } static inline int TxsFreeUnit(struct atl2_adapter *adapter) { u32 txs_write_ptr = (u32) atomic_read(&adapter->txs_write_ptr); return (adapter->txs_next_clear >= txs_write_ptr) ? (int) (adapter->txs_ring_size - adapter->txs_next_clear + txs_write_ptr - 1) : (int) (txs_write_ptr - adapter->txs_next_clear - 1); } static inline int TxdFreeBytes(struct atl2_adapter *adapter) { u32 txd_read_ptr = (u32)atomic_read(&adapter->txd_read_ptr); return (adapter->txd_write_ptr >= txd_read_ptr) ? (int) (adapter->txd_ring_size - adapter->txd_write_ptr + txd_read_ptr - 1) : (int) (txd_read_ptr - adapter->txd_write_ptr - 1); } static netdev_tx_t atl2_xmit_frame(struct sk_buff *skb, struct net_device *netdev) { struct atl2_adapter *adapter = netdev_priv(netdev); struct tx_pkt_header *txph; u32 offset, copy_len; int txs_unused; int txbuf_unused; if (test_bit(__ATL2_DOWN, &adapter->flags)) { dev_kfree_skb_any(skb); return NETDEV_TX_OK; } if (unlikely(skb->len <= 0)) { dev_kfree_skb_any(skb); return NETDEV_TX_OK; } txs_unused = TxsFreeUnit(adapter); txbuf_unused = TxdFreeBytes(adapter); if (skb->len + sizeof(struct tx_pkt_header) + 4 > txbuf_unused || txs_unused < 1) { /* not enough resources */ netif_stop_queue(netdev); return NETDEV_TX_BUSY; } offset = adapter->txd_write_ptr; txph = (struct tx_pkt_header *) (((u8 *)adapter->txd_ring) + offset); *(u32 *)txph = 0; txph->pkt_size = skb->len; offset += 4; if (offset >= adapter->txd_ring_size) offset -= adapter->txd_ring_size; copy_len = adapter->txd_ring_size - offset; if (copy_len >= skb->len) { memcpy(((u8 *)adapter->txd_ring) + offset, skb->data, skb->len); offset += ((u32)(skb->len + 3) & ~3); } else { memcpy(((u8 *)adapter->txd_ring)+offset, skb->data, copy_len); memcpy((u8 *)adapter->txd_ring, skb->data+copy_len, skb->len-copy_len); offset = ((u32)(skb->len-copy_len + 3) & ~3); } #ifdef NETIF_F_HW_VLAN_TX if (vlan_tx_tag_present(skb)) { u16 vlan_tag = vlan_tx_tag_get(skb); vlan_tag = (vlan_tag << 4) | (vlan_tag >> 13) | ((vlan_tag >> 9) & 0x8); txph->ins_vlan = 1; txph->vlan = vlan_tag; } #endif if (offset >= adapter->txd_ring_size) offset -= adapter->txd_ring_size; adapter->txd_write_ptr = offset; /* clear txs before send */ adapter->txs_ring[adapter->txs_next_clear].update = 0; if (++adapter->txs_next_clear == adapter->txs_ring_size) adapter->txs_next_clear = 0; ATL2_WRITE_REGW(&adapter->hw, REG_MB_TXD_WR_IDX, (adapter->txd_write_ptr >> 2)); mmiowb(); dev_kfree_skb_any(skb); return NETDEV_TX_OK; } /* * atl2_change_mtu - Change the Maximum Transfer Unit * @netdev: network interface device structure * @new_mtu: new value for maximum frame size * * Returns 0 on success, negative on failure */ static int atl2_change_mtu(struct net_device *netdev, int new_mtu) { struct atl2_adapter *adapter = netdev_priv(netdev); struct atl2_hw *hw = &adapter->hw; if ((new_mtu < 40) || (new_mtu > (ETH_DATA_LEN + VLAN_SIZE))) return -EINVAL; /* set MTU */ if (hw->max_frame_size != new_mtu) { netdev->mtu = new_mtu; ATL2_WRITE_REG(hw, REG_MTU, new_mtu + ENET_HEADER_SIZE + VLAN_SIZE + ETHERNET_FCS_SIZE); } return 0; } /* * atl2_set_mac - Change the Ethernet Address of the NIC * @netdev: network interface device structure * @p: pointer to an address structure * * Returns 0 on success, negative on failure */ static int atl2_set_mac(struct net_device *netdev, void *p) { struct atl2_adapter *adapter = netdev_priv(netdev); struct sockaddr *addr = p; if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; if (netif_running(netdev)) return -EBUSY; memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len); atl2_set_mac_addr(&adapter->hw); return 0; } /* * atl2_mii_ioctl - * @netdev: * @ifreq: * @cmd: */ static int atl2_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) { struct atl2_adapter *adapter = netdev_priv(netdev); struct mii_ioctl_data *data = if_mii(ifr); unsigned long flags; switch (cmd) { case SIOCGMIIPHY: data->phy_id = 0; break; case SIOCGMIIREG: spin_lock_irqsave(&adapter->stats_lock, flags); if (atl2_read_phy_reg(&adapter->hw, data->reg_num & 0x1F, &data->val_out)) { spin_unlock_irqrestore(&adapter->stats_lock, flags); return -EIO; } spin_unlock_irqrestore(&adapter->stats_lock, flags); break; case SIOCSMIIREG: if (data->reg_num & ~(0x1F)) return -EFAULT; spin_lock_irqsave(&adapter->stats_lock, flags); if (atl2_write_phy_reg(&adapter->hw, data->reg_num, data->val_in)) { spin_unlock_irqrestore(&adapter->stats_lock, flags); return -EIO; } spin_unlock_irqrestore(&adapter->stats_lock, flags); break; default: return -EOPNOTSUPP; } return 0; } /* * atl2_ioctl - * @netdev: * @ifreq: * @cmd: */ static int atl2_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) { switch (cmd) { case SIOCGMIIPHY: case SIOCGMIIREG: case SIOCSMIIREG: return atl2_mii_ioctl(netdev, ifr, cmd); #ifdef ETHTOOL_OPS_COMPAT case SIOCETHTOOL: return ethtool_ioctl(ifr); #endif default: return -EOPNOTSUPP; } } /* * atl2_tx_timeout - Respond to a Tx Hang * @netdev: network interface device structure */ static void atl2_tx_timeout(struct net_device *netdev) { struct atl2_adapter *adapter = netdev_priv(netdev); /* Do the reset outside of interrupt context */ schedule_work(&adapter->reset_task); } /* * atl2_watchdog - Timer Call-back * @data: pointer to netdev cast into an unsigned long */ static void atl2_watchdog(unsigned long data) { struct atl2_adapter *adapter = (struct atl2_adapter *) data; if (!test_bit(__ATL2_DOWN, &adapter->flags)) { u32 drop_rxd, drop_rxs; unsigned long flags; spin_lock_irqsave(&adapter->stats_lock, flags); drop_rxd = ATL2_READ_REG(&adapter->hw, REG_STS_RXD_OV); drop_rxs = ATL2_READ_REG(&adapter->hw, REG_STS_RXS_OV); spin_unlock_irqrestore(&adapter->stats_lock, flags); adapter->netdev->stats.rx_over_errors += drop_rxd + drop_rxs; /* Reset the timer */ mod_timer(&adapter->watchdog_timer, round_jiffies(jiffies + 4 * HZ)); } } /* * atl2_phy_config - Timer Call-back * @data: pointer to netdev cast into an unsigned long */ static void atl2_phy_config(unsigned long data) { struct atl2_adapter *adapter = (struct atl2_adapter *) data; struct atl2_hw *hw = &adapter->hw; unsigned long flags; spin_lock_irqsave(&adapter->stats_lock, flags); atl2_write_phy_reg(hw, MII_ADVERTISE, hw->mii_autoneg_adv_reg); atl2_write_phy_reg(hw, MII_BMCR, MII_CR_RESET | MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG); spin_unlock_irqrestore(&adapter->stats_lock, flags); clear_bit(0, &adapter->cfg_phy); } static int atl2_up(struct atl2_adapter *adapter) { struct net_device *netdev = adapter->netdev; int err = 0; u32 val; /* hardware has been reset, we need to reload some things */ err = atl2_init_hw(&adapter->hw); if (err) { err = -EIO; return err; } atl2_set_multi(netdev); init_ring_ptrs(adapter); #ifdef NETIF_F_HW_VLAN_TX atl2_restore_vlan(adapter); #endif if (atl2_configure(adapter)) { err = -EIO; goto err_up; } clear_bit(__ATL2_DOWN, &adapter->flags); val = ATL2_READ_REG(&adapter->hw, REG_MASTER_CTRL); ATL2_WRITE_REG(&adapter->hw, REG_MASTER_CTRL, val | MASTER_CTRL_MANUAL_INT); atl2_irq_enable(adapter); err_up: return err; } static void atl2_reinit_locked(struct atl2_adapter *adapter) { WARN_ON(in_interrupt()); while (test_and_set_bit(__ATL2_RESETTING, &adapter->flags)) msleep(1); atl2_down(adapter); atl2_up(adapter); clear_bit(__ATL2_RESETTING, &adapter->flags); } static void atl2_reset_task(struct work_struct *work) { struct atl2_adapter *adapter; adapter = container_of(work, struct atl2_adapter, reset_task); atl2_reinit_locked(adapter); } static void atl2_setup_mac_ctrl(struct atl2_adapter *adapter) { u32 value; struct atl2_hw *hw = &adapter->hw; struct net_device *netdev = adapter->netdev; /* Config MAC CTRL Register */ value = MAC_CTRL_TX_EN | MAC_CTRL_RX_EN | MAC_CTRL_MACLP_CLK_PHY; /* duplex */ if (FULL_DUPLEX == adapter->link_duplex) value |= MAC_CTRL_DUPLX; /* flow control */ value |= (MAC_CTRL_TX_FLOW | MAC_CTRL_RX_FLOW); /* PAD & CRC */ value |= (MAC_CTRL_ADD_CRC | MAC_CTRL_PAD); /* preamble length */ value |= (((u32)adapter->hw.preamble_len & MAC_CTRL_PRMLEN_MASK) << MAC_CTRL_PRMLEN_SHIFT); /* vlan */ if (adapter->vlgrp) value |= MAC_CTRL_RMV_VLAN; /* filter mode */ value |= MAC_CTRL_BC_EN; if (netdev->flags & IFF_PROMISC) value |= MAC_CTRL_PROMIS_EN; else if (netdev->flags & IFF_ALLMULTI) value |= MAC_CTRL_MC_ALL_EN; /* half retry buffer */ value |= (((u32)(adapter->hw.retry_buf & MAC_CTRL_HALF_LEFT_BUF_MASK)) << MAC_CTRL_HALF_LEFT_BUF_SHIFT); ATL2_WRITE_REG(hw, REG_MAC_CTRL, value); } static int atl2_check_link(struct atl2_adapter *adapter) { struct atl2_hw *hw = &adapter->hw; struct net_device *netdev = adapter->netdev; int ret_val; u16 speed, duplex, phy_data; int reconfig = 0; /* MII_BMSR must read twise */ atl2_read_phy_reg(hw, MII_BMSR, &phy_data); atl2_read_phy_reg(hw, MII_BMSR, &phy_data); if (!(phy_data&BMSR_LSTATUS)) { /* link down */ if (netif_carrier_ok(netdev)) { /* old link state: Up */ u32 value; /* disable rx */ value = ATL2_READ_REG(hw, REG_MAC_CTRL); value &= ~MAC_CTRL_RX_EN; ATL2_WRITE_REG(hw, REG_MAC_CTRL, value); adapter->link_speed = SPEED_0; netif_carrier_off(netdev); netif_stop_queue(netdev); } return 0; } /* Link Up */ ret_val = atl2_get_speed_and_duplex(hw, &speed, &duplex); if (ret_val) return ret_val; switch (hw->MediaType) { case MEDIA_TYPE_100M_FULL: if (speed != SPEED_100 || duplex != FULL_DUPLEX) reconfig = 1; break; case MEDIA_TYPE_100M_HALF: if (speed != SPEED_100 || duplex != HALF_DUPLEX) reconfig = 1; break; case MEDIA_TYPE_10M_FULL: if (speed != SPEED_10 || duplex != FULL_DUPLEX) reconfig = 1; break; case MEDIA_TYPE_10M_HALF: if (speed != SPEED_10 || duplex != HALF_DUPLEX) reconfig = 1; break; } /* link result is our setting */ if (reconfig == 0) { if (adapter->link_speed != speed || adapter->link_duplex != duplex) { adapter->link_speed = speed; adapter->link_duplex = duplex; atl2_setup_mac_ctrl(adapter); printk(KERN_INFO "%s: %s NIC Link is Up<%d Mbps %s>\n", atl2_driver_name, netdev->name, adapter->link_speed, adapter->link_duplex == FULL_DUPLEX ? "Full Duplex" : "Half Duplex"); } if (!netif_carrier_ok(netdev)) { /* Link down -> Up */ netif_carrier_on(netdev); netif_wake_queue(netdev); } return 0; } /* change original link status */ if (netif_carrier_ok(netdev)) { u32 value; /* disable rx */ value = ATL2_READ_REG(hw, REG_MAC_CTRL); value &= ~MAC_CTRL_RX_EN; ATL2_WRITE_REG(hw, REG_MAC_CTRL, value); adapter->link_speed = SPEED_0; netif_carrier_off(netdev); netif_stop_queue(netdev); } /* auto-neg, insert timer to re-config phy * (if interval smaller than 5 seconds, something strange) */ if (!test_bit(__ATL2_DOWN, &adapter->flags)) { if (!test_and_set_bit(0, &adapter->cfg_phy)) mod_timer(&adapter->phy_config_timer, round_jiffies(jiffies + 5 * HZ)); } return 0; } /* * atl2_link_chg_task - deal with link change event Out of interrupt context * @netdev: network interface device structure */ static void atl2_link_chg_task(struct work_struct *work) { struct atl2_adapter *adapter; unsigned long flags; adapter = container_of(work, struct atl2_adapter, link_chg_task); spin_lock_irqsave(&adapter->stats_lock, flags); atl2_check_link(adapter); spin_unlock_irqrestore(&adapter->stats_lock, flags); } static void atl2_setup_pcicmd(struct pci_dev *pdev) { u16 cmd; pci_read_config_word(pdev, PCI_COMMAND, &cmd); if (cmd & PCI_COMMAND_INTX_DISABLE) cmd &= ~PCI_COMMAND_INTX_DISABLE; if (cmd & PCI_COMMAND_IO) cmd &= ~PCI_COMMAND_IO; if (0 == (cmd & PCI_COMMAND_MEMORY)) cmd |= PCI_COMMAND_MEMORY; if (0 == (cmd & PCI_COMMAND_MASTER)) cmd |= PCI_COMMAND_MASTER; pci_write_config_word(pdev, PCI_COMMAND, cmd); /* * some motherboards BIOS(PXE/EFI) driver may set PME * while they transfer control to OS (Windows/Linux) * so we should clear this bit before NIC work normally */ pci_write_config_dword(pdev, REG_PM_CTRLSTAT, 0); } #ifdef CONFIG_NET_POLL_CONTROLLER static void atl2_poll_controller(struct net_device *netdev) { disable_irq(netdev->irq); atl2_intr(netdev->irq, netdev); enable_irq(netdev->irq); } #endif static const struct net_device_ops atl2_netdev_ops = { .ndo_open = atl2_open, .ndo_stop = atl2_close, .ndo_start_xmit = atl2_xmit_frame, .ndo_set_multicast_list = atl2_set_multi, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = atl2_set_mac, .ndo_change_mtu = atl2_change_mtu, .ndo_do_ioctl = atl2_ioctl, .ndo_tx_timeout = atl2_tx_timeout, .ndo_vlan_rx_register = atl2_vlan_rx_register, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = atl2_poll_controller, #endif }; /* * atl2_probe - Device Initialization Routine * @pdev: PCI device information struct * @ent: entry in atl2_pci_tbl * * Returns 0 on success, negative on failure * * atl2_probe initializes an adapter identified by a pci_dev structure. * The OS initialization, configuring of the adapter private structure, * and a hardware reset occur. */ static int __devinit atl2_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct net_device *netdev; struct atl2_adapter *adapter; static int cards_found; unsigned long mmio_start; int mmio_len; int err; cards_found = 0; err = pci_enable_device(pdev); if (err) return err; /* * atl2 is a shared-high-32-bit device, so we're stuck with 32-bit DMA * until the kernel has the proper infrastructure to support 64-bit DMA * on these devices. */ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) && pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) { printk(KERN_ERR "atl2: No usable DMA configuration, aborting\n"); goto err_dma; } /* Mark all PCI regions associated with PCI device * pdev as being reserved by owner atl2_driver_name */ err = pci_request_regions(pdev, atl2_driver_name); if (err) goto err_pci_reg; /* Enables bus-mastering on the device and calls * pcibios_set_master to do the needed arch specific settings */ pci_set_master(pdev); err = -ENOMEM; netdev = alloc_etherdev(sizeof(struct atl2_adapter)); if (!netdev) goto err_alloc_etherdev; SET_NETDEV_DEV(netdev, &pdev->dev); pci_set_drvdata(pdev, netdev); adapter = netdev_priv(netdev); adapter->netdev = netdev; adapter->pdev = pdev; adapter->hw.back = adapter; mmio_start = pci_resource_start(pdev, 0x0); mmio_len = pci_resource_len(pdev, 0x0); adapter->hw.mem_rang = (u32)mmio_len; adapter->hw.hw_addr = ioremap(mmio_start, mmio_len); if (!adapter->hw.hw_addr) { err = -EIO; goto err_ioremap; } atl2_setup_pcicmd(pdev); netdev->netdev_ops = &atl2_netdev_ops; atl2_set_ethtool_ops(netdev); netdev->watchdog_timeo = 5 * HZ; strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); netdev->mem_start = mmio_start; netdev->mem_end = mmio_start + mmio_len; adapter->bd_number = cards_found; adapter->pci_using_64 = false; /* setup the private structure */ err = atl2_sw_init(adapter); if (err) goto err_sw_init; err = -EIO; netdev->hw_features = NETIF_F_SG; netdev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX); /* Init PHY as early as possible due to power saving issue */ atl2_phy_init(&adapter->hw); /* reset the controller to * put the device in a known good starting state */ if (atl2_reset_hw(&adapter->hw)) { err = -EIO; goto err_reset; } /* copy the MAC address out of the EEPROM */ atl2_read_mac_addr(&adapter->hw); memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len); /* FIXME: do we still need this? */ #ifdef ETHTOOL_GPERMADDR memcpy(netdev->perm_addr, adapter->hw.mac_addr, netdev->addr_len); if (!is_valid_ether_addr(netdev->perm_addr)) { #else if (!is_valid_ether_addr(netdev->dev_addr)) { #endif err = -EIO; goto err_eeprom; } atl2_check_options(adapter); init_timer(&adapter->watchdog_timer); adapter->watchdog_timer.function = atl2_watchdog; adapter->watchdog_timer.data = (unsigned long) adapter; init_timer(&adapter->phy_config_timer); adapter->phy_config_timer.function = atl2_phy_config; adapter->phy_config_timer.data = (unsigned long) adapter; INIT_WORK(&adapter->reset_task, atl2_reset_task); INIT_WORK(&adapter->link_chg_task, atl2_link_chg_task); strcpy(netdev->name, "eth%d"); /* ?? */ err = register_netdev(netdev); if (err) goto err_register; /* assume we have no link for now */ netif_carrier_off(netdev); netif_stop_queue(netdev); cards_found++; return 0; err_reset: err_register: err_sw_init: err_eeprom: iounmap(adapter->hw.hw_addr); err_ioremap: free_netdev(netdev); err_alloc_etherdev: pci_release_regions(pdev); err_pci_reg: err_dma: pci_disable_device(pdev); return err; } /* * atl2_remove - Device Removal Routine * @pdev: PCI device information struct * * atl2_remove is called by the PCI subsystem to alert the driver * that it should release a PCI device. The could be caused by a * Hot-Plug event, or because the driver is going to be removed from * memory. */ /* FIXME: write the original MAC address back in case it was changed from a * BIOS-set value, as in atl1 -- CHS */ static void __devexit atl2_remove(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); struct atl2_adapter *adapter = netdev_priv(netdev); /* flush_scheduled work may reschedule our watchdog task, so * explicitly disable watchdog tasks from being rescheduled */ set_bit(__ATL2_DOWN, &adapter->flags); del_timer_sync(&adapter->watchdog_timer); del_timer_sync(&adapter->phy_config_timer); cancel_work_sync(&adapter->reset_task); cancel_work_sync(&adapter->link_chg_task); unregister_netdev(netdev); atl2_force_ps(&adapter->hw); iounmap(adapter->hw.hw_addr); pci_release_regions(pdev); free_netdev(netdev); pci_disable_device(pdev); } static int atl2_suspend(struct pci_dev *pdev, pm_message_t state) { struct net_device *netdev = pci_get_drvdata(pdev); struct atl2_adapter *adapter = netdev_priv(netdev); struct atl2_hw *hw = &adapter->hw; u16 speed, duplex; u32 ctrl = 0; u32 wufc = adapter->wol; #ifdef CONFIG_PM int retval = 0; #endif netif_device_detach(netdev); if (netif_running(netdev)) { WARN_ON(test_bit(__ATL2_RESETTING, &adapter->flags)); atl2_down(adapter); } #ifdef CONFIG_PM retval = pci_save_state(pdev); if (retval) return retval; #endif atl2_read_phy_reg(hw, MII_BMSR, (u16 *)&ctrl); atl2_read_phy_reg(hw, MII_BMSR, (u16 *)&ctrl); if (ctrl & BMSR_LSTATUS) wufc &= ~ATLX_WUFC_LNKC; if (0 != (ctrl & BMSR_LSTATUS) && 0 != wufc) { u32 ret_val; /* get current link speed & duplex */ ret_val = atl2_get_speed_and_duplex(hw, &speed, &duplex); if (ret_val) { printk(KERN_DEBUG "%s: get speed&duplex error while suspend\n", atl2_driver_name); goto wol_dis; } ctrl = 0; /* turn on magic packet wol */ if (wufc & ATLX_WUFC_MAG) ctrl |= (WOL_MAGIC_EN | WOL_MAGIC_PME_EN); /* ignore Link Chg event when Link is up */ ATL2_WRITE_REG(hw, REG_WOL_CTRL, ctrl); /* Config MAC CTRL Register */ ctrl = MAC_CTRL_RX_EN | MAC_CTRL_MACLP_CLK_PHY; if (FULL_DUPLEX == adapter->link_duplex) ctrl |= MAC_CTRL_DUPLX; ctrl |= (MAC_CTRL_ADD_CRC | MAC_CTRL_PAD); ctrl |= (((u32)adapter->hw.preamble_len & MAC_CTRL_PRMLEN_MASK) << MAC_CTRL_PRMLEN_SHIFT); ctrl |= (((u32)(adapter->hw.retry_buf & MAC_CTRL_HALF_LEFT_BUF_MASK)) << MAC_CTRL_HALF_LEFT_BUF_SHIFT); if (wufc & ATLX_WUFC_MAG) { /* magic packet maybe Broadcast&multicast&Unicast */ ctrl |= MAC_CTRL_BC_EN; } ATL2_WRITE_REG(hw, REG_MAC_CTRL, ctrl); /* pcie patch */ ctrl = ATL2_READ_REG(hw, REG_PCIE_PHYMISC); ctrl |= PCIE_PHYMISC_FORCE_RCV_DET; ATL2_WRITE_REG(hw, REG_PCIE_PHYMISC, ctrl); ctrl = ATL2_READ_REG(hw, REG_PCIE_DLL_TX_CTRL1); ctrl |= PCIE_DLL_TX_CTRL1_SEL_NOR_CLK; ATL2_WRITE_REG(hw, REG_PCIE_DLL_TX_CTRL1, ctrl); pci_enable_wake(pdev, pci_choose_state(pdev, state), 1); goto suspend_exit; } if (0 == (ctrl&BMSR_LSTATUS) && 0 != (wufc&ATLX_WUFC_LNKC)) { /* link is down, so only LINK CHG WOL event enable */ ctrl |= (WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN); ATL2_WRITE_REG(hw, REG_WOL_CTRL, ctrl); ATL2_WRITE_REG(hw, REG_MAC_CTRL, 0); /* pcie patch */ ctrl = ATL2_READ_REG(hw, REG_PCIE_PHYMISC); ctrl |= PCIE_PHYMISC_FORCE_RCV_DET; ATL2_WRITE_REG(hw, REG_PCIE_PHYMISC, ctrl); ctrl = ATL2_READ_REG(hw, REG_PCIE_DLL_TX_CTRL1); ctrl |= PCIE_DLL_TX_CTRL1_SEL_NOR_CLK; ATL2_WRITE_REG(hw, REG_PCIE_DLL_TX_CTRL1, ctrl); hw->phy_configured = false; /* re-init PHY when resume */ pci_enable_wake(pdev, pci_choose_state(pdev, state), 1); goto suspend_exit; } wol_dis: /* WOL disabled */ ATL2_WRITE_REG(hw, REG_WOL_CTRL, 0); /* pcie patch */ ctrl = ATL2_READ_REG(hw, REG_PCIE_PHYMISC); ctrl |= PCIE_PHYMISC_FORCE_RCV_DET; ATL2_WRITE_REG(hw, REG_PCIE_PHYMISC, ctrl); ctrl = ATL2_READ_REG(hw, REG_PCIE_DLL_TX_CTRL1); ctrl |= PCIE_DLL_TX_CTRL1_SEL_NOR_CLK; ATL2_WRITE_REG(hw, REG_PCIE_DLL_TX_CTRL1, ctrl); atl2_force_ps(hw); hw->phy_configured = false; /* re-init PHY when resume */ pci_enable_wake(pdev, pci_choose_state(pdev, state), 0); suspend_exit: if (netif_running(netdev)) atl2_free_irq(adapter); pci_disable_device(pdev); pci_set_power_state(pdev, pci_choose_state(pdev, state)); return 0; } #ifdef CONFIG_PM static int atl2_resume(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); struct atl2_adapter *adapter = netdev_priv(netdev); u32 err; pci_set_power_state(pdev, PCI_D0); pci_restore_state(pdev); err = pci_enable_device(pdev); if (err) { printk(KERN_ERR "atl2: Cannot enable PCI device from suspend\n"); return err; } pci_set_master(pdev); ATL2_READ_REG(&adapter->hw, REG_WOL_CTRL); /* clear WOL status */ pci_enable_wake(pdev, PCI_D3hot, 0); pci_enable_wake(pdev, PCI_D3cold, 0); ATL2_WRITE_REG(&adapter->hw, REG_WOL_CTRL, 0); if (netif_running(netdev)) { err = atl2_request_irq(adapter); if (err) return err; } atl2_reset_hw(&adapter->hw); if (netif_running(netdev)) atl2_up(adapter); netif_device_attach(netdev); return 0; } #endif static void atl2_shutdown(struct pci_dev *pdev) { atl2_suspend(pdev, PMSG_SUSPEND); } static struct pci_driver atl2_driver = { .name = atl2_driver_name, .id_table = atl2_pci_tbl, .probe = atl2_probe, .remove = __devexit_p(atl2_remove), /* Power Management Hooks */ .suspend = atl2_suspend, #ifdef CONFIG_PM .resume = atl2_resume, #endif .shutdown = atl2_shutdown, }; /* * atl2_init_module - Driver Registration Routine * * atl2_init_module is the first routine called when the driver is * loaded. All it does is register with the PCI subsystem. */ static int __init atl2_init_module(void) { printk(KERN_INFO "%s - version %s\n", atl2_driver_string, atl2_driver_version); printk(KERN_INFO "%s\n", atl2_copyright); return pci_register_driver(&atl2_driver); } module_init(atl2_init_module); /* * atl2_exit_module - Driver Exit Cleanup Routine * * atl2_exit_module is called just before the driver is removed * from memory. */ static void __exit atl2_exit_module(void) { pci_unregister_driver(&atl2_driver); } module_exit(atl2_exit_module); static void atl2_read_pci_cfg(struct atl2_hw *hw, u32 reg, u16 *value) { struct atl2_adapter *adapter = hw->back; pci_read_config_word(adapter->pdev, reg, value); } static void atl2_write_pci_cfg(struct atl2_hw *hw, u32 reg, u16 *value) { struct atl2_adapter *adapter = hw->back; pci_write_config_word(adapter->pdev, reg, *value); } static int atl2_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) { struct atl2_adapter *adapter = netdev_priv(netdev); struct atl2_hw *hw = &adapter->hw; ecmd->supported = (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | SUPPORTED_Autoneg | SUPPORTED_TP); ecmd->advertising = ADVERTISED_TP; ecmd->advertising |= ADVERTISED_Autoneg; ecmd->advertising |= hw->autoneg_advertised; ecmd->port = PORT_TP; ecmd->phy_address = 0; ecmd->transceiver = XCVR_INTERNAL; if (adapter->link_speed != SPEED_0) { ethtool_cmd_speed_set(ecmd, adapter->link_speed); if (adapter->link_duplex == FULL_DUPLEX) ecmd->duplex = DUPLEX_FULL; else ecmd->duplex = DUPLEX_HALF; } else { ethtool_cmd_speed_set(ecmd, -1); ecmd->duplex = -1; } ecmd->autoneg = AUTONEG_ENABLE; return 0; } static int atl2_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) { struct atl2_adapter *adapter = netdev_priv(netdev); struct atl2_hw *hw = &adapter->hw; while (test_and_set_bit(__ATL2_RESETTING, &adapter->flags)) msleep(1); if (ecmd->autoneg == AUTONEG_ENABLE) { #define MY_ADV_MASK (ADVERTISE_10_HALF | \ ADVERTISE_10_FULL | \ ADVERTISE_100_HALF| \ ADVERTISE_100_FULL) if ((ecmd->advertising & MY_ADV_MASK) == MY_ADV_MASK) { hw->MediaType = MEDIA_TYPE_AUTO_SENSOR; hw->autoneg_advertised = MY_ADV_MASK; } else if ((ecmd->advertising & MY_ADV_MASK) == ADVERTISE_100_FULL) { hw->MediaType = MEDIA_TYPE_100M_FULL; hw->autoneg_advertised = ADVERTISE_100_FULL; } else if ((ecmd->advertising & MY_ADV_MASK) == ADVERTISE_100_HALF) { hw->MediaType = MEDIA_TYPE_100M_HALF; hw->autoneg_advertised = ADVERTISE_100_HALF; } else if ((ecmd->advertising & MY_ADV_MASK) == ADVERTISE_10_FULL) { hw->MediaType = MEDIA_TYPE_10M_FULL; hw->autoneg_advertised = ADVERTISE_10_FULL; } else if ((ecmd->advertising & MY_ADV_MASK) == ADVERTISE_10_HALF) { hw->MediaType = MEDIA_TYPE_10M_HALF; hw->autoneg_advertised = ADVERTISE_10_HALF; } else { clear_bit(__ATL2_RESETTING, &adapter->flags); return -EINVAL; } ecmd->advertising = hw->autoneg_advertised | ADVERTISED_TP | ADVERTISED_Autoneg; } else { clear_bit(__ATL2_RESETTING, &adapter->flags); return -EINVAL; } /* reset the link */ if (netif_running(adapter->netdev)) { atl2_down(adapter); atl2_up(adapter); } else atl2_reset_hw(&adapter->hw); clear_bit(__ATL2_RESETTING, &adapter->flags); return 0; } static u32 atl2_get_msglevel(struct net_device *netdev) { return 0; } /* * It's sane for this to be empty, but we might want to take advantage of this. */ static void atl2_set_msglevel(struct net_device *netdev, u32 data) { } static int atl2_get_regs_len(struct net_device *netdev) { #define ATL2_REGS_LEN 42 return sizeof(u32) * ATL2_REGS_LEN; } static void atl2_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p) { struct atl2_adapter *adapter = netdev_priv(netdev); struct atl2_hw *hw = &adapter->hw; u32 *regs_buff = p; u16 phy_data; memset(p, 0, sizeof(u32) * ATL2_REGS_LEN); regs->version = (1 << 24) | (hw->revision_id << 16) | hw->device_id; regs_buff[0] = ATL2_READ_REG(hw, REG_VPD_CAP); regs_buff[1] = ATL2_READ_REG(hw, REG_SPI_FLASH_CTRL); regs_buff[2] = ATL2_READ_REG(hw, REG_SPI_FLASH_CONFIG); regs_buff[3] = ATL2_READ_REG(hw, REG_TWSI_CTRL); regs_buff[4] = ATL2_READ_REG(hw, REG_PCIE_DEV_MISC_CTRL); regs_buff[5] = ATL2_READ_REG(hw, REG_MASTER_CTRL); regs_buff[6] = ATL2_READ_REG(hw, REG_MANUAL_TIMER_INIT); regs_buff[7] = ATL2_READ_REG(hw, REG_IRQ_MODU_TIMER_INIT); regs_buff[8] = ATL2_READ_REG(hw, REG_PHY_ENABLE); regs_buff[9] = ATL2_READ_REG(hw, REG_CMBDISDMA_TIMER); regs_buff[10] = ATL2_READ_REG(hw, REG_IDLE_STATUS); regs_buff[11] = ATL2_READ_REG(hw, REG_MDIO_CTRL); regs_buff[12] = ATL2_READ_REG(hw, REG_SERDES_LOCK); regs_buff[13] = ATL2_READ_REG(hw, REG_MAC_CTRL); regs_buff[14] = ATL2_READ_REG(hw, REG_MAC_IPG_IFG); regs_buff[15] = ATL2_READ_REG(hw, REG_MAC_STA_ADDR); regs_buff[16] = ATL2_READ_REG(hw, REG_MAC_STA_ADDR+4); regs_buff[17] = ATL2_READ_REG(hw, REG_RX_HASH_TABLE); regs_buff[18] = ATL2_READ_REG(hw, REG_RX_HASH_TABLE+4); regs_buff[19] = ATL2_READ_REG(hw, REG_MAC_HALF_DUPLX_CTRL); regs_buff[20] = ATL2_READ_REG(hw, REG_MTU); regs_buff[21] = ATL2_READ_REG(hw, REG_WOL_CTRL); regs_buff[22] = ATL2_READ_REG(hw, REG_SRAM_TXRAM_END); regs_buff[23] = ATL2_READ_REG(hw, REG_DESC_BASE_ADDR_HI); regs_buff[24] = ATL2_READ_REG(hw, REG_TXD_BASE_ADDR_LO); regs_buff[25] = ATL2_READ_REG(hw, REG_TXD_MEM_SIZE); regs_buff[26] = ATL2_READ_REG(hw, REG_TXS_BASE_ADDR_LO); regs_buff[27] = ATL2_READ_REG(hw, REG_TXS_MEM_SIZE); regs_buff[28] = ATL2_READ_REG(hw, REG_RXD_BASE_ADDR_LO); regs_buff[29] = ATL2_READ_REG(hw, REG_RXD_BUF_NUM); regs_buff[30] = ATL2_READ_REG(hw, REG_DMAR); regs_buff[31] = ATL2_READ_REG(hw, REG_TX_CUT_THRESH); regs_buff[32] = ATL2_READ_REG(hw, REG_DMAW); regs_buff[33] = ATL2_READ_REG(hw, REG_PAUSE_ON_TH); regs_buff[34] = ATL2_READ_REG(hw, REG_PAUSE_OFF_TH); regs_buff[35] = ATL2_READ_REG(hw, REG_MB_TXD_WR_IDX); regs_buff[36] = ATL2_READ_REG(hw, REG_MB_RXD_RD_IDX); regs_buff[38] = ATL2_READ_REG(hw, REG_ISR); regs_buff[39] = ATL2_READ_REG(hw, REG_IMR); atl2_read_phy_reg(hw, MII_BMCR, &phy_data); regs_buff[40] = (u32)phy_data; atl2_read_phy_reg(hw, MII_BMSR, &phy_data); regs_buff[41] = (u32)phy_data; } static int atl2_get_eeprom_len(struct net_device *netdev) { struct atl2_adapter *adapter = netdev_priv(netdev); if (!atl2_check_eeprom_exist(&adapter->hw)) return 512; else return 0; } static int atl2_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom, u8 *bytes) { struct atl2_adapter *adapter = netdev_priv(netdev); struct atl2_hw *hw = &adapter->hw; u32 *eeprom_buff; int first_dword, last_dword; int ret_val = 0; int i; if (eeprom->len == 0) return -EINVAL; if (atl2_check_eeprom_exist(hw)) return -EINVAL; eeprom->magic = hw->vendor_id | (hw->device_id << 16); first_dword = eeprom->offset >> 2; last_dword = (eeprom->offset + eeprom->len - 1) >> 2; eeprom_buff = kmalloc(sizeof(u32) * (last_dword - first_dword + 1), GFP_KERNEL); if (!eeprom_buff) return -ENOMEM; for (i = first_dword; i < last_dword; i++) { if (!atl2_read_eeprom(hw, i*4, &(eeprom_buff[i-first_dword]))) { ret_val = -EIO; goto free; } } memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 3), eeprom->len); free: kfree(eeprom_buff); return ret_val; } static int atl2_set_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom, u8 *bytes) { struct atl2_adapter *adapter = netdev_priv(netdev); struct atl2_hw *hw = &adapter->hw; u32 *eeprom_buff; u32 *ptr; int max_len, first_dword, last_dword, ret_val = 0; int i; if (eeprom->len == 0) return -EOPNOTSUPP; if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16))) return -EFAULT; max_len = 512; first_dword = eeprom->offset >> 2; last_dword = (eeprom->offset + eeprom->len - 1) >> 2; eeprom_buff = kmalloc(max_len, GFP_KERNEL); if (!eeprom_buff) return -ENOMEM; ptr = eeprom_buff; if (eeprom->offset & 3) { /* need read/modify/write of first changed EEPROM word */ /* only the second byte of the word is being modified */ if (!atl2_read_eeprom(hw, first_dword*4, &(eeprom_buff[0]))) { ret_val = -EIO; goto out; } ptr++; } if (((eeprom->offset + eeprom->len) & 3)) { /* * need read/modify/write of last changed EEPROM word * only the first byte of the word is being modified */ if (!atl2_read_eeprom(hw, last_dword * 4, &(eeprom_buff[last_dword - first_dword]))) { ret_val = -EIO; goto out; } } /* Device's eeprom is always little-endian, word addressable */ memcpy(ptr, bytes, eeprom->len); for (i = 0; i < last_dword - first_dword + 1; i++) { if (!atl2_write_eeprom(hw, ((first_dword+i)*4), eeprom_buff[i])) { ret_val = -EIO; goto out; } } out: kfree(eeprom_buff); return ret_val; } static void atl2_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) { struct atl2_adapter *adapter = netdev_priv(netdev); strncpy(drvinfo->driver, atl2_driver_name, 32); strncpy(drvinfo->version, atl2_driver_version, 32); strncpy(drvinfo->fw_version, "L2", 32); strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32); drvinfo->n_stats = 0; drvinfo->testinfo_len = 0; drvinfo->regdump_len = atl2_get_regs_len(netdev); drvinfo->eedump_len = atl2_get_eeprom_len(netdev); } static void atl2_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) { struct atl2_adapter *adapter = netdev_priv(netdev); wol->supported = WAKE_MAGIC; wol->wolopts = 0; if (adapter->wol & ATLX_WUFC_EX) wol->wolopts |= WAKE_UCAST; if (adapter->wol & ATLX_WUFC_MC) wol->wolopts |= WAKE_MCAST; if (adapter->wol & ATLX_WUFC_BC) wol->wolopts |= WAKE_BCAST; if (adapter->wol & ATLX_WUFC_MAG) wol->wolopts |= WAKE_MAGIC; if (adapter->wol & ATLX_WUFC_LNKC) wol->wolopts |= WAKE_PHY; } static int atl2_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) { struct atl2_adapter *adapter = netdev_priv(netdev); if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE)) return -EOPNOTSUPP; if (wol->wolopts & (WAKE_UCAST | WAKE_BCAST | WAKE_MCAST)) return -EOPNOTSUPP; /* these settings will always override what we currently have */ adapter->wol = 0; if (wol->wolopts & WAKE_MAGIC) adapter->wol |= ATLX_WUFC_MAG; if (wol->wolopts & WAKE_PHY) adapter->wol |= ATLX_WUFC_LNKC; return 0; } static int atl2_nway_reset(struct net_device *netdev) { struct atl2_adapter *adapter = netdev_priv(netdev); if (netif_running(netdev)) atl2_reinit_locked(adapter); return 0; } static const struct ethtool_ops atl2_ethtool_ops = { .get_settings = atl2_get_settings, .set_settings = atl2_set_settings, .get_drvinfo = atl2_get_drvinfo, .get_regs_len = atl2_get_regs_len, .get_regs = atl2_get_regs, .get_wol = atl2_get_wol, .set_wol = atl2_set_wol, .get_msglevel = atl2_get_msglevel, .set_msglevel = atl2_set_msglevel, .nway_reset = atl2_nway_reset, .get_link = ethtool_op_get_link, .get_eeprom_len = atl2_get_eeprom_len, .get_eeprom = atl2_get_eeprom, .set_eeprom = atl2_set_eeprom, }; static void atl2_set_ethtool_ops(struct net_device *netdev) { SET_ETHTOOL_OPS(netdev, &atl2_ethtool_ops); } #define LBYTESWAP(a) ((((a) & 0x00ff00ff) << 8) | \ (((a) & 0xff00ff00) >> 8)) #define LONGSWAP(a) ((LBYTESWAP(a) << 16) | (LBYTESWAP(a) >> 16)) #define SHORTSWAP(a) (((a) << 8) | ((a) >> 8)) /* * Reset the transmit and receive units; mask and clear all interrupts. * * hw - Struct containing variables accessed by shared code * return : 0 or idle status (if error) */ static s32 atl2_reset_hw(struct atl2_hw *hw) { u32 icr; u16 pci_cfg_cmd_word; int i; /* Workaround for PCI problem when BIOS sets MMRBC incorrectly. */ atl2_read_pci_cfg(hw, PCI_REG_COMMAND, &pci_cfg_cmd_word); if ((pci_cfg_cmd_word & (CMD_IO_SPACE|CMD_MEMORY_SPACE|CMD_BUS_MASTER)) != (CMD_IO_SPACE|CMD_MEMORY_SPACE|CMD_BUS_MASTER)) { pci_cfg_cmd_word |= (CMD_IO_SPACE|CMD_MEMORY_SPACE|CMD_BUS_MASTER); atl2_write_pci_cfg(hw, PCI_REG_COMMAND, &pci_cfg_cmd_word); } /* Clear Interrupt mask to stop board from generating * interrupts & Clear any pending interrupt events */ /* FIXME */ /* ATL2_WRITE_REG(hw, REG_IMR, 0); */ /* ATL2_WRITE_REG(hw, REG_ISR, 0xffffffff); */ /* Issue Soft Reset to the MAC. This will reset the chip's * transmit, receive, DMA. It will not effect * the current PCI configuration. The global reset bit is self- * clearing, and should clear within a microsecond. */ ATL2_WRITE_REG(hw, REG_MASTER_CTRL, MASTER_CTRL_SOFT_RST); wmb(); msleep(1); /* delay about 1ms */ /* Wait at least 10ms for All module to be Idle */ for (i = 0; i < 10; i++) { icr = ATL2_READ_REG(hw, REG_IDLE_STATUS); if (!icr) break; msleep(1); /* delay 1 ms */ cpu_relax(); } if (icr) return icr; return 0; } #define CUSTOM_SPI_CS_SETUP 2 #define CUSTOM_SPI_CLK_HI 2 #define CUSTOM_SPI_CLK_LO 2 #define CUSTOM_SPI_CS_HOLD 2 #define CUSTOM_SPI_CS_HI 3 static struct atl2_spi_flash_dev flash_table[] = { /* MFR WRSR READ PROGRAM WREN WRDI RDSR RDID SECTOR_ERASE CHIP_ERASE */ {"Atmel", 0x0, 0x03, 0x02, 0x06, 0x04, 0x05, 0x15, 0x52, 0x62 }, {"SST", 0x01, 0x03, 0x02, 0x06, 0x04, 0x05, 0x90, 0x20, 0x60 }, {"ST", 0x01, 0x03, 0x02, 0x06, 0x04, 0x05, 0xAB, 0xD8, 0xC7 }, }; static bool atl2_spi_read(struct atl2_hw *hw, u32 addr, u32 *buf) { int i; u32 value; ATL2_WRITE_REG(hw, REG_SPI_DATA, 0); ATL2_WRITE_REG(hw, REG_SPI_ADDR, addr); value = SPI_FLASH_CTRL_WAIT_READY | (CUSTOM_SPI_CS_SETUP & SPI_FLASH_CTRL_CS_SETUP_MASK) << SPI_FLASH_CTRL_CS_SETUP_SHIFT | (CUSTOM_SPI_CLK_HI & SPI_FLASH_CTRL_CLK_HI_MASK) << SPI_FLASH_CTRL_CLK_HI_SHIFT | (CUSTOM_SPI_CLK_LO & SPI_FLASH_CTRL_CLK_LO_MASK) << SPI_FLASH_CTRL_CLK_LO_SHIFT | (CUSTOM_SPI_CS_HOLD & SPI_FLASH_CTRL_CS_HOLD_MASK) << SPI_FLASH_CTRL_CS_HOLD_SHIFT | (CUSTOM_SPI_CS_HI & SPI_FLASH_CTRL_CS_HI_MASK) << SPI_FLASH_CTRL_CS_HI_SHIFT | (0x1 & SPI_FLASH_CTRL_INS_MASK) << SPI_FLASH_CTRL_INS_SHIFT; ATL2_WRITE_REG(hw, REG_SPI_FLASH_CTRL, value); value |= SPI_FLASH_CTRL_START; ATL2_WRITE_REG(hw, REG_SPI_FLASH_CTRL, value); for (i = 0; i < 10; i++) { msleep(1); value = ATL2_READ_REG(hw, REG_SPI_FLASH_CTRL); if (!(value & SPI_FLASH_CTRL_START)) break; } if (value & SPI_FLASH_CTRL_START) return false; *buf = ATL2_READ_REG(hw, REG_SPI_DATA); return true; } /* * get_permanent_address * return 0 if get valid mac address, */ static int get_permanent_address(struct atl2_hw *hw) { u32 Addr[2]; u32 i, Control; u16 Register; u8 EthAddr[NODE_ADDRESS_SIZE]; bool KeyValid; if (is_valid_ether_addr(hw->perm_mac_addr)) return 0; Addr[0] = 0; Addr[1] = 0; if (!atl2_check_eeprom_exist(hw)) { /* eeprom exists */ Register = 0; KeyValid = false; /* Read out all EEPROM content */ i = 0; while (1) { if (atl2_read_eeprom(hw, i + 0x100, &Control)) { if (KeyValid) { if (Register == REG_MAC_STA_ADDR) Addr[0] = Control; else if (Register == (REG_MAC_STA_ADDR + 4)) Addr[1] = Control; KeyValid = false; } else if ((Control & 0xff) == 0x5A) { KeyValid = true; Register = (u16) (Control >> 16); } else { /* assume data end while encount an invalid KEYWORD */ break; } } else { break; /* read error */ } i += 4; } *(u32 *) &EthAddr[2] = LONGSWAP(Addr[0]); *(u16 *) &EthAddr[0] = SHORTSWAP(*(u16 *) &Addr[1]); if (is_valid_ether_addr(EthAddr)) { memcpy(hw->perm_mac_addr, EthAddr, NODE_ADDRESS_SIZE); return 0; } return 1; } /* see if SPI flash exists? */ Addr[0] = 0; Addr[1] = 0; Register = 0; KeyValid = false; i = 0; while (1) { if (atl2_spi_read(hw, i + 0x1f000, &Control)) { if (KeyValid) { if (Register == REG_MAC_STA_ADDR) Addr[0] = Control; else if (Register == (REG_MAC_STA_ADDR + 4)) Addr[1] = Control; KeyValid = false; } else if ((Control & 0xff) == 0x5A) { KeyValid = true; Register = (u16) (Control >> 16); } else { break; /* data end */ } } else { break; /* read error */ } i += 4; } *(u32 *) &EthAddr[2] = LONGSWAP(Addr[0]); *(u16 *) &EthAddr[0] = SHORTSWAP(*(u16 *)&Addr[1]); if (is_valid_ether_addr(EthAddr)) { memcpy(hw->perm_mac_addr, EthAddr, NODE_ADDRESS_SIZE); return 0; } /* maybe MAC-address is from BIOS */ Addr[0] = ATL2_READ_REG(hw, REG_MAC_STA_ADDR); Addr[1] = ATL2_READ_REG(hw, REG_MAC_STA_ADDR + 4); *(u32 *) &EthAddr[2] = LONGSWAP(Addr[0]); *(u16 *) &EthAddr[0] = SHORTSWAP(*(u16 *) &Addr[1]); if (is_valid_ether_addr(EthAddr)) { memcpy(hw->perm_mac_addr, EthAddr, NODE_ADDRESS_SIZE); return 0; } return 1; } /* * Reads the adapter's MAC address from the EEPROM * * hw - Struct containing variables accessed by shared code */ static s32 atl2_read_mac_addr(struct atl2_hw *hw) { u16 i; if (get_permanent_address(hw)) { /* for test */ /* FIXME: shouldn't we use random_ether_addr() here? */ hw->perm_mac_addr[0] = 0x00; hw->perm_mac_addr[1] = 0x13; hw->perm_mac_addr[2] = 0x74; hw->perm_mac_addr[3] = 0x00; hw->perm_mac_addr[4] = 0x5c; hw->perm_mac_addr[5] = 0x38; } for (i = 0; i < NODE_ADDRESS_SIZE; i++) hw->mac_addr[i] = hw->perm_mac_addr[i]; return 0; } /* * Hashes an address to determine its location in the multicast table * * hw - Struct containing variables accessed by shared code * mc_addr - the multicast address to hash * * atl2_hash_mc_addr * purpose * set hash value for a multicast address * hash calcu processing : * 1. calcu 32bit CRC for multicast address * 2. reverse crc with MSB to LSB */ static u32 atl2_hash_mc_addr(struct atl2_hw *hw, u8 *mc_addr) { u32 crc32, value; int i; value = 0; crc32 = ether_crc_le(6, mc_addr); for (i = 0; i < 32; i++) value |= (((crc32 >> i) & 1) << (31 - i)); return value; } /* * Sets the bit in the multicast table corresponding to the hash value. * * hw - Struct containing variables accessed by shared code * hash_value - Multicast address hash value */ static void atl2_hash_set(struct atl2_hw *hw, u32 hash_value) { u32 hash_bit, hash_reg; u32 mta; /* The HASH Table is a register array of 2 32-bit registers. * It is treated like an array of 64 bits. We want to set * bit BitArray[hash_value]. So we figure out what register * the bit is in, read it, OR in the new bit, then write * back the new value. The register is determined by the * upper 7 bits of the hash value and the bit within that * register are determined by the lower 5 bits of the value. */ hash_reg = (hash_value >> 31) & 0x1; hash_bit = (hash_value >> 26) & 0x1F; mta = ATL2_READ_REG_ARRAY(hw, REG_RX_HASH_TABLE, hash_reg); mta |= (1 << hash_bit); ATL2_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, hash_reg, mta); } /* * atl2_init_pcie - init PCIE module */ static void atl2_init_pcie(struct atl2_hw *hw) { u32 value; value = LTSSM_TEST_MODE_DEF; ATL2_WRITE_REG(hw, REG_LTSSM_TEST_MODE, value); value = PCIE_DLL_TX_CTRL1_DEF; ATL2_WRITE_REG(hw, REG_PCIE_DLL_TX_CTRL1, value); } static void atl2_init_flash_opcode(struct atl2_hw *hw) { if (hw->flash_vendor >= ARRAY_SIZE(flash_table)) hw->flash_vendor = 0; /* ATMEL */ /* Init OP table */ ATL2_WRITE_REGB(hw, REG_SPI_FLASH_OP_PROGRAM, flash_table[hw->flash_vendor].cmdPROGRAM); ATL2_WRITE_REGB(hw, REG_SPI_FLASH_OP_SC_ERASE, flash_table[hw->flash_vendor].cmdSECTOR_ERASE); ATL2_WRITE_REGB(hw, REG_SPI_FLASH_OP_CHIP_ERASE, flash_table[hw->flash_vendor].cmdCHIP_ERASE); ATL2_WRITE_REGB(hw, REG_SPI_FLASH_OP_RDID, flash_table[hw->flash_vendor].cmdRDID); ATL2_WRITE_REGB(hw, REG_SPI_FLASH_OP_WREN, flash_table[hw->flash_vendor].cmdWREN); ATL2_WRITE_REGB(hw, REG_SPI_FLASH_OP_RDSR, flash_table[hw->flash_vendor].cmdRDSR); ATL2_WRITE_REGB(hw, REG_SPI_FLASH_OP_WRSR, flash_table[hw->flash_vendor].cmdWRSR); ATL2_WRITE_REGB(hw, REG_SPI_FLASH_OP_READ, flash_table[hw->flash_vendor].cmdREAD); } /******************************************************************** * Performs basic configuration of the adapter. * * hw - Struct containing variables accessed by shared code * Assumes that the controller has previously been reset and is in a * post-reset uninitialized state. Initializes multicast table, * and Calls routines to setup link * Leaves the transmit and receive units disabled and uninitialized. ********************************************************************/ static s32 atl2_init_hw(struct atl2_hw *hw) { u32 ret_val = 0; atl2_init_pcie(hw); /* Zero out the Multicast HASH table */ /* clear the old settings from the multicast hash table */ ATL2_WRITE_REG(hw, REG_RX_HASH_TABLE, 0); ATL2_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, 1, 0); atl2_init_flash_opcode(hw); ret_val = atl2_phy_init(hw); return ret_val; } /* * Detects the current speed and duplex settings of the hardware. * * hw - Struct containing variables accessed by shared code * speed - Speed of the connection * duplex - Duplex setting of the connection */ static s32 atl2_get_speed_and_duplex(struct atl2_hw *hw, u16 *speed, u16 *duplex) { s32 ret_val; u16 phy_data; /* Read PHY Specific Status Register (17) */ ret_val = atl2_read_phy_reg(hw, MII_ATLX_PSSR, &phy_data); if (ret_val) return ret_val; if (!(phy_data & MII_ATLX_PSSR_SPD_DPLX_RESOLVED)) return ATLX_ERR_PHY_RES; switch (phy_data & MII_ATLX_PSSR_SPEED) { case MII_ATLX_PSSR_100MBS: *speed = SPEED_100; break; case MII_ATLX_PSSR_10MBS: *speed = SPEED_10; break; default: return ATLX_ERR_PHY_SPEED; break; } if (phy_data & MII_ATLX_PSSR_DPLX) *duplex = FULL_DUPLEX; else *duplex = HALF_DUPLEX; return 0; } /* * Reads the value from a PHY register * hw - Struct containing variables accessed by shared code * reg_addr - address of the PHY register to read */ static s32 atl2_read_phy_reg(struct atl2_hw *hw, u16 reg_addr, u16 *phy_data) { u32 val; int i; val = ((u32)(reg_addr & MDIO_REG_ADDR_MASK)) << MDIO_REG_ADDR_SHIFT | MDIO_START | MDIO_SUP_PREAMBLE | MDIO_RW | MDIO_CLK_25_4 << MDIO_CLK_SEL_SHIFT; ATL2_WRITE_REG(hw, REG_MDIO_CTRL, val); wmb(); for (i = 0; i < MDIO_WAIT_TIMES; i++) { udelay(2); val = ATL2_READ_REG(hw, REG_MDIO_CTRL); if (!(val & (MDIO_START | MDIO_BUSY))) break; wmb(); } if (!(val & (MDIO_START | MDIO_BUSY))) { *phy_data = (u16)val; return 0; } return ATLX_ERR_PHY; } /* * Writes a value to a PHY register * hw - Struct containing variables accessed by shared code * reg_addr - address of the PHY register to write * data - data to write to the PHY */ static s32 atl2_write_phy_reg(struct atl2_hw *hw, u32 reg_addr, u16 phy_data) { int i; u32 val; val = ((u32)(phy_data & MDIO_DATA_MASK)) << MDIO_DATA_SHIFT | (reg_addr & MDIO_REG_ADDR_MASK) << MDIO_REG_ADDR_SHIFT | MDIO_SUP_PREAMBLE | MDIO_START | MDIO_CLK_25_4 << MDIO_CLK_SEL_SHIFT; ATL2_WRITE_REG(hw, REG_MDIO_CTRL, val); wmb(); for (i = 0; i < MDIO_WAIT_TIMES; i++) { udelay(2); val = ATL2_READ_REG(hw, REG_MDIO_CTRL); if (!(val & (MDIO_START | MDIO_BUSY))) break; wmb(); } if (!(val & (MDIO_START | MDIO_BUSY))) return 0; return ATLX_ERR_PHY; } /* * Configures PHY autoneg and flow control advertisement settings * * hw - Struct containing variables accessed by shared code */ static s32 atl2_phy_setup_autoneg_adv(struct atl2_hw *hw) { s32 ret_val; s16 mii_autoneg_adv_reg; /* Read the MII Auto-Neg Advertisement Register (Address 4). */ mii_autoneg_adv_reg = MII_AR_DEFAULT_CAP_MASK; /* Need to parse autoneg_advertised and set up * the appropriate PHY registers. First we will parse for * autoneg_advertised software override. Since we can advertise * a plethora of combinations, we need to check each bit * individually. */ /* First we clear all the 10/100 mb speed bits in the Auto-Neg * Advertisement Register (Address 4) and the 1000 mb speed bits in * the 1000Base-T Control Register (Address 9). */ mii_autoneg_adv_reg &= ~MII_AR_SPEED_MASK; /* Need to parse MediaType and setup the * appropriate PHY registers. */ switch (hw->MediaType) { case MEDIA_TYPE_AUTO_SENSOR: mii_autoneg_adv_reg |= (MII_AR_10T_HD_CAPS | MII_AR_10T_FD_CAPS | MII_AR_100TX_HD_CAPS| MII_AR_100TX_FD_CAPS); hw->autoneg_advertised = ADVERTISE_10_HALF | ADVERTISE_10_FULL | ADVERTISE_100_HALF| ADVERTISE_100_FULL; break; case MEDIA_TYPE_100M_FULL: mii_autoneg_adv_reg |= MII_AR_100TX_FD_CAPS; hw->autoneg_advertised = ADVERTISE_100_FULL; break; case MEDIA_TYPE_100M_HALF: mii_autoneg_adv_reg |= MII_AR_100TX_HD_CAPS; hw->autoneg_advertised = ADVERTISE_100_HALF; break; case MEDIA_TYPE_10M_FULL: mii_autoneg_adv_reg |= MII_AR_10T_FD_CAPS; hw->autoneg_advertised = ADVERTISE_10_FULL; break; default: mii_autoneg_adv_reg |= MII_AR_10T_HD_CAPS; hw->autoneg_advertised = ADVERTISE_10_HALF; break; } /* flow control fixed to enable all */ mii_autoneg_adv_reg |= (MII_AR_ASM_DIR | MII_AR_PAUSE); hw->mii_autoneg_adv_reg = mii_autoneg_adv_reg; ret_val = atl2_write_phy_reg(hw, MII_ADVERTISE, mii_autoneg_adv_reg); if (ret_val) return ret_val; return 0; } /* * Resets the PHY and make all config validate * * hw - Struct containing variables accessed by shared code * * Sets bit 15 and 12 of the MII Control regiser (for F001 bug) */ static s32 atl2_phy_commit(struct atl2_hw *hw) { s32 ret_val; u16 phy_data; phy_data = MII_CR_RESET | MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG; ret_val = atl2_write_phy_reg(hw, MII_BMCR, phy_data); if (ret_val) { u32 val; int i; /* pcie serdes link may be down ! */ for (i = 0; i < 25; i++) { msleep(1); val = ATL2_READ_REG(hw, REG_MDIO_CTRL); if (!(val & (MDIO_START | MDIO_BUSY))) break; } if (0 != (val & (MDIO_START | MDIO_BUSY))) { printk(KERN_ERR "atl2: PCIe link down for at least 25ms !\n"); return ret_val; } } return 0; } static s32 atl2_phy_init(struct atl2_hw *hw) { s32 ret_val; u16 phy_val; if (hw->phy_configured) return 0; /* Enable PHY */ ATL2_WRITE_REGW(hw, REG_PHY_ENABLE, 1); ATL2_WRITE_FLUSH(hw); msleep(1); /* check if the PHY is in powersaving mode */ atl2_write_phy_reg(hw, MII_DBG_ADDR, 0); atl2_read_phy_reg(hw, MII_DBG_DATA, &phy_val); /* 024E / 124E 0r 0274 / 1274 ? */ if (phy_val & 0x1000) { phy_val &= ~0x1000; atl2_write_phy_reg(hw, MII_DBG_DATA, phy_val); } msleep(1); /*Enable PHY LinkChange Interrupt */ ret_val = atl2_write_phy_reg(hw, 18, 0xC00); if (ret_val) return ret_val; /* setup AutoNeg parameters */ ret_val = atl2_phy_setup_autoneg_adv(hw); if (ret_val) return ret_val; /* SW.Reset & En-Auto-Neg to restart Auto-Neg */ ret_val = atl2_phy_commit(hw); if (ret_val) return ret_val; hw->phy_configured = true; return ret_val; } static void atl2_set_mac_addr(struct atl2_hw *hw) { u32 value; /* 00-0B-6A-F6-00-DC * 0: 6AF600DC 1: 000B * low dword */ value = (((u32)hw->mac_addr[2]) << 24) | (((u32)hw->mac_addr[3]) << 16) | (((u32)hw->mac_addr[4]) << 8) | (((u32)hw->mac_addr[5])); ATL2_WRITE_REG_ARRAY(hw, REG_MAC_STA_ADDR, 0, value); /* hight dword */ value = (((u32)hw->mac_addr[0]) << 8) | (((u32)hw->mac_addr[1])); ATL2_WRITE_REG_ARRAY(hw, REG_MAC_STA_ADDR, 1, value); } /* * check_eeprom_exist * return 0 if eeprom exist */ static int atl2_check_eeprom_exist(struct atl2_hw *hw) { u32 value; value = ATL2_READ_REG(hw, REG_SPI_FLASH_CTRL); if (value & SPI_FLASH_CTRL_EN_VPD) { value &= ~SPI_FLASH_CTRL_EN_VPD; ATL2_WRITE_REG(hw, REG_SPI_FLASH_CTRL, value); } value = ATL2_READ_REGW(hw, REG_PCIE_CAP_LIST); return ((value & 0xFF00) == 0x6C00) ? 0 : 1; } /* FIXME: This doesn't look right. -- CHS */ static bool atl2_write_eeprom(struct atl2_hw *hw, u32 offset, u32 value) { return true; } static bool atl2_read_eeprom(struct atl2_hw *hw, u32 Offset, u32 *pValue) { int i; u32 Control; if (Offset & 0x3) return false; /* address do not align */ ATL2_WRITE_REG(hw, REG_VPD_DATA, 0); Control = (Offset & VPD_CAP_VPD_ADDR_MASK) << VPD_CAP_VPD_ADDR_SHIFT; ATL2_WRITE_REG(hw, REG_VPD_CAP, Control); for (i = 0; i < 10; i++) { msleep(2); Control = ATL2_READ_REG(hw, REG_VPD_CAP); if (Control & VPD_CAP_VPD_FLAG) break; } if (Control & VPD_CAP_VPD_FLAG) { *pValue = ATL2_READ_REG(hw, REG_VPD_DATA); return true; } return false; /* timeout */ } static void atl2_force_ps(struct atl2_hw *hw) { u16 phy_val; atl2_write_phy_reg(hw, MII_DBG_ADDR, 0); atl2_read_phy_reg(hw, MII_DBG_DATA, &phy_val); atl2_write_phy_reg(hw, MII_DBG_DATA, phy_val | 0x1000); atl2_write_phy_reg(hw, MII_DBG_ADDR, 2); atl2_write_phy_reg(hw, MII_DBG_DATA, 0x3000); atl2_write_phy_reg(hw, MII_DBG_ADDR, 3); atl2_write_phy_reg(hw, MII_DBG_DATA, 0); } /* This is the only thing that needs to be changed to adjust the * maximum number of ports that the driver can manage. */ #define ATL2_MAX_NIC 4 #define OPTION_UNSET -1 #define OPTION_DISABLED 0 #define OPTION_ENABLED 1 /* All parameters are treated the same, as an integer array of values. * This macro just reduces the need to repeat the same declaration code * over and over (plus this helps to avoid typo bugs). */ #define ATL2_PARAM_INIT {[0 ... ATL2_MAX_NIC] = OPTION_UNSET} #ifndef module_param_array /* Module Parameters are always initialized to -1, so that the driver * can tell the difference between no user specified value or the * user asking for the default value. * The true default values are loaded in when atl2_check_options is called. * * This is a GCC extension to ANSI C. * See the item "Labeled Elements in Initializers" in the section * "Extensions to the C Language Family" of the GCC documentation. */ #define ATL2_PARAM(X, desc) \ static const int __devinitdata X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \ MODULE_PARM(X, "1-" __MODULE_STRING(ATL2_MAX_NIC) "i"); \ MODULE_PARM_DESC(X, desc); #else #define ATL2_PARAM(X, desc) \ static int __devinitdata X[ATL2_MAX_NIC+1] = ATL2_PARAM_INIT; \ static unsigned int num_##X; \ module_param_array_named(X, X, int, &num_##X, 0); \ MODULE_PARM_DESC(X, desc); #endif /* * Transmit Memory Size * Valid Range: 64-2048 * Default Value: 128 */ #define ATL2_MIN_TX_MEMSIZE 4 /* 4KB */ #define ATL2_MAX_TX_MEMSIZE 64 /* 64KB */ #define ATL2_DEFAULT_TX_MEMSIZE 8 /* 8KB */ ATL2_PARAM(TxMemSize, "Bytes of Transmit Memory"); /* * Receive Memory Block Count * Valid Range: 16-512 * Default Value: 128 */ #define ATL2_MIN_RXD_COUNT 16 #define ATL2_MAX_RXD_COUNT 512 #define ATL2_DEFAULT_RXD_COUNT 64 ATL2_PARAM(RxMemBlock, "Number of receive memory block"); /* * User Specified MediaType Override * * Valid Range: 0-5 * - 0 - auto-negotiate at all supported speeds * - 1 - only link at 1000Mbps Full Duplex * - 2 - only link at 100Mbps Full Duplex * - 3 - only link at 100Mbps Half Duplex * - 4 - only link at 10Mbps Full Duplex * - 5 - only link at 10Mbps Half Duplex * Default Value: 0 */ ATL2_PARAM(MediaType, "MediaType Select"); /* * Interrupt Moderate Timer in units of 2048 ns (~2 us) * Valid Range: 10-65535 * Default Value: 45000(90ms) */ #define INT_MOD_DEFAULT_CNT 100 /* 200us */ #define INT_MOD_MAX_CNT 65000 #define INT_MOD_MIN_CNT 50 ATL2_PARAM(IntModTimer, "Interrupt Moderator Timer"); /* * FlashVendor * Valid Range: 0-2 * 0 - Atmel * 1 - SST * 2 - ST */ ATL2_PARAM(FlashVendor, "SPI Flash Vendor"); #define AUTONEG_ADV_DEFAULT 0x2F #define AUTONEG_ADV_MASK 0x2F #define FLOW_CONTROL_DEFAULT FLOW_CONTROL_FULL #define FLASH_VENDOR_DEFAULT 0 #define FLASH_VENDOR_MIN 0 #define FLASH_VENDOR_MAX 2 struct atl2_option { enum { enable_option, range_option, list_option } type; char *name; char *err; int def; union { struct { /* range_option info */ int min; int max; } r; struct { /* list_option info */ int nr; struct atl2_opt_list { int i; char *str; } *p; } l; } arg; }; static int __devinit atl2_validate_option(int *value, struct atl2_option *opt) { int i; struct atl2_opt_list *ent; if (*value == OPTION_UNSET) { *value = opt->def; return 0; } switch (opt->type) { case enable_option: switch (*value) { case OPTION_ENABLED: printk(KERN_INFO "%s Enabled\n", opt->name); return 0; break; case OPTION_DISABLED: printk(KERN_INFO "%s Disabled\n", opt->name); return 0; break; } break; case range_option: if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) { printk(KERN_INFO "%s set to %i\n", opt->name, *value); return 0; } break; case list_option: for (i = 0; i < opt->arg.l.nr; i++) { ent = &opt->arg.l.p[i]; if (*value == ent->i) { if (ent->str[0] != '\0') printk(KERN_INFO "%s\n", ent->str); return 0; } } break; default: BUG(); } printk(KERN_INFO "Invalid %s specified (%i) %s\n", opt->name, *value, opt->err); *value = opt->def; return -1; } /* * atl2_check_options - Range Checking for Command Line Parameters * @adapter: board private structure * * This routine checks all command line parameters for valid user * input. If an invalid value is given, or if no user specified * value exists, a default value is used. The final value is stored * in a variable in the adapter structure. */ static void __devinit atl2_check_options(struct atl2_adapter *adapter) { int val; struct atl2_option opt; int bd = adapter->bd_number; if (bd >= ATL2_MAX_NIC) { printk(KERN_NOTICE "Warning: no configuration for board #%i\n", bd); printk(KERN_NOTICE "Using defaults for all values\n"); #ifndef module_param_array bd = ATL2_MAX_NIC; #endif } /* Bytes of Transmit Memory */ opt.type = range_option; opt.name = "Bytes of Transmit Memory"; opt.err = "using default of " __MODULE_STRING(ATL2_DEFAULT_TX_MEMSIZE); opt.def = ATL2_DEFAULT_TX_MEMSIZE; opt.arg.r.min = ATL2_MIN_TX_MEMSIZE; opt.arg.r.max = ATL2_MAX_TX_MEMSIZE; #ifdef module_param_array if (num_TxMemSize > bd) { #endif val = TxMemSize[bd]; atl2_validate_option(&val, &opt); adapter->txd_ring_size = ((u32) val) * 1024; #ifdef module_param_array } else adapter->txd_ring_size = ((u32)opt.def) * 1024; #endif /* txs ring size: */ adapter->txs_ring_size = adapter->txd_ring_size / 128; if (adapter->txs_ring_size > 160) adapter->txs_ring_size = 160; /* Receive Memory Block Count */ opt.type = range_option; opt.name = "Number of receive memory block"; opt.err = "using default of " __MODULE_STRING(ATL2_DEFAULT_RXD_COUNT); opt.def = ATL2_DEFAULT_RXD_COUNT; opt.arg.r.min = ATL2_MIN_RXD_COUNT; opt.arg.r.max = ATL2_MAX_RXD_COUNT; #ifdef module_param_array if (num_RxMemBlock > bd) { #endif val = RxMemBlock[bd]; atl2_validate_option(&val, &opt); adapter->rxd_ring_size = (u32)val; /* FIXME */ /* ((u16)val)&~1; */ /* even number */ #ifdef module_param_array } else adapter->rxd_ring_size = (u32)opt.def; #endif /* init RXD Flow control value */ adapter->hw.fc_rxd_hi = (adapter->rxd_ring_size / 8) * 7; adapter->hw.fc_rxd_lo = (ATL2_MIN_RXD_COUNT / 8) > (adapter->rxd_ring_size / 12) ? (ATL2_MIN_RXD_COUNT / 8) : (adapter->rxd_ring_size / 12); /* Interrupt Moderate Timer */ opt.type = range_option; opt.name = "Interrupt Moderate Timer"; opt.err = "using default of " __MODULE_STRING(INT_MOD_DEFAULT_CNT); opt.def = INT_MOD_DEFAULT_CNT; opt.arg.r.min = INT_MOD_MIN_CNT; opt.arg.r.max = INT_MOD_MAX_CNT; #ifdef module_param_array if (num_IntModTimer > bd) { #endif val = IntModTimer[bd]; atl2_validate_option(&val, &opt); adapter->imt = (u16) val; #ifdef module_param_array } else adapter->imt = (u16)(opt.def); #endif /* Flash Vendor */ opt.type = range_option; opt.name = "SPI Flash Vendor"; opt.err = "using default of " __MODULE_STRING(FLASH_VENDOR_DEFAULT); opt.def = FLASH_VENDOR_DEFAULT; opt.arg.r.min = FLASH_VENDOR_MIN; opt.arg.r.max = FLASH_VENDOR_MAX; #ifdef module_param_array if (num_FlashVendor > bd) { #endif val = FlashVendor[bd]; atl2_validate_option(&val, &opt); adapter->hw.flash_vendor = (u8) val; #ifdef module_param_array } else adapter->hw.flash_vendor = (u8)(opt.def); #endif /* MediaType */ opt.type = range_option; opt.name = "Speed/Duplex Selection"; opt.err = "using default of " __MODULE_STRING(MEDIA_TYPE_AUTO_SENSOR); opt.def = MEDIA_TYPE_AUTO_SENSOR; opt.arg.r.min = MEDIA_TYPE_AUTO_SENSOR; opt.arg.r.max = MEDIA_TYPE_10M_HALF; #ifdef module_param_array if (num_MediaType > bd) { #endif val = MediaType[bd]; atl2_validate_option(&val, &opt); adapter->hw.MediaType = (u16) val; #ifdef module_param_array } else adapter->hw.MediaType = (u16)(opt.def); #endif }
gpl-2.0
ElysiumRom/android_kernel_samsung_msm8660-common
arch/arm/mach-imx/dma-v1.c
2366
24000
/* * linux/arch/arm/plat-mxc/dma-v1.c * * i.MX DMA registration and IRQ dispatching * * Copyright 2006 Pavel Pisa <pisa@cmp.felk.cvut.cz> * Copyright 2008 Juergen Beisert, <kernel@pengutronix.de> * Copyright 2008 Sascha Hauer, <s.hauer@pengutronix.de> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, * MA 02110-1301, USA. */ #include <linux/module.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/interrupt.h> #include <linux/errno.h> #include <linux/clk.h> #include <linux/scatterlist.h> #include <linux/io.h> #include <asm/system.h> #include <asm/irq.h> #include <mach/hardware.h> #include <mach/dma-v1.h> #define DMA_DCR 0x00 /* Control Register */ #define DMA_DISR 0x04 /* Interrupt status Register */ #define DMA_DIMR 0x08 /* Interrupt mask Register */ #define DMA_DBTOSR 0x0c /* Burst timeout status Register */ #define DMA_DRTOSR 0x10 /* Request timeout Register */ #define DMA_DSESR 0x14 /* Transfer Error Status Register */ #define DMA_DBOSR 0x18 /* Buffer overflow status Register */ #define DMA_DBTOCR 0x1c /* Burst timeout control Register */ #define DMA_WSRA 0x40 /* W-Size Register A */ #define DMA_XSRA 0x44 /* X-Size Register A */ #define DMA_YSRA 0x48 /* Y-Size Register A */ #define DMA_WSRB 0x4c /* W-Size Register B */ #define DMA_XSRB 0x50 /* X-Size Register B */ #define DMA_YSRB 0x54 /* Y-Size Register B */ #define DMA_SAR(x) (0x80 + ((x) << 6)) /* Source Address Registers */ #define DMA_DAR(x) (0x84 + ((x) << 6)) /* Destination Address Registers */ #define DMA_CNTR(x) (0x88 + ((x) << 6)) /* Count Registers */ #define DMA_CCR(x) (0x8c + ((x) << 6)) /* Control Registers */ #define DMA_RSSR(x) (0x90 + ((x) << 6)) /* Request source select Registers */ #define DMA_BLR(x) (0x94 + ((x) << 6)) /* Burst length Registers */ #define DMA_RTOR(x) (0x98 + ((x) << 6)) /* Request timeout Registers */ #define DMA_BUCR(x) (0x98 + ((x) << 6)) /* Bus Utilization Registers */ #define DMA_CCNR(x) (0x9C + ((x) << 6)) /* Channel counter Registers */ #define DCR_DRST (1<<1) #define DCR_DEN (1<<0) #define DBTOCR_EN (1<<15) #define DBTOCR_CNT(x) ((x) & 0x7fff) #define CNTR_CNT(x) ((x) & 0xffffff) #define CCR_ACRPT (1<<14) #define CCR_DMOD_LINEAR (0x0 << 12) #define CCR_DMOD_2D (0x1 << 12) #define CCR_DMOD_FIFO (0x2 << 12) #define CCR_DMOD_EOBFIFO (0x3 << 12) #define CCR_SMOD_LINEAR (0x0 << 10) #define CCR_SMOD_2D (0x1 << 10) #define CCR_SMOD_FIFO (0x2 << 10) #define CCR_SMOD_EOBFIFO (0x3 << 10) #define CCR_MDIR_DEC (1<<9) #define CCR_MSEL_B (1<<8) #define CCR_DSIZ_32 (0x0 << 6) #define CCR_DSIZ_8 (0x1 << 6) #define CCR_DSIZ_16 (0x2 << 6) #define CCR_SSIZ_32 (0x0 << 4) #define CCR_SSIZ_8 (0x1 << 4) #define CCR_SSIZ_16 (0x2 << 4) #define CCR_REN (1<<3) #define CCR_RPT (1<<2) #define CCR_FRC (1<<1) #define CCR_CEN (1<<0) #define RTOR_EN (1<<15) #define RTOR_CLK (1<<14) #define RTOR_PSC (1<<13) /* * struct imx_dma_channel - i.MX specific DMA extension * @name: name specified by DMA client * @irq_handler: client callback for end of transfer * @err_handler: client callback for error condition * @data: clients context data for callbacks * @dma_mode: direction of the transfer %DMA_MODE_READ or %DMA_MODE_WRITE * @sg: pointer to the actual read/written chunk for scatter-gather emulation * @resbytes: total residual number of bytes to transfer * (it can be lower or same as sum of SG mapped chunk sizes) * @sgcount: number of chunks to be read/written * * Structure is used for IMX DMA processing. It would be probably good * @struct dma_struct in the future for external interfacing and use * @struct imx_dma_channel only as extension to it. */ struct imx_dma_channel { const char *name; void (*irq_handler) (int, void *); void (*err_handler) (int, void *, int errcode); void (*prog_handler) (int, void *, struct scatterlist *); void *data; unsigned int dma_mode; struct scatterlist *sg; unsigned int resbytes; int dma_num; int in_use; u32 ccr_from_device; u32 ccr_to_device; struct timer_list watchdog; int hw_chaining; }; static void __iomem *imx_dmav1_baseaddr; static void imx_dmav1_writel(unsigned val, unsigned offset) { __raw_writel(val, imx_dmav1_baseaddr + offset); } static unsigned imx_dmav1_readl(unsigned offset) { return __raw_readl(imx_dmav1_baseaddr + offset); } static struct imx_dma_channel imx_dma_channels[IMX_DMA_CHANNELS]; static struct clk *dma_clk; static int imx_dma_hw_chain(struct imx_dma_channel *imxdma) { if (cpu_is_mx27()) return imxdma->hw_chaining; else return 0; } /* * imx_dma_sg_next - prepare next chunk for scatter-gather DMA emulation */ static inline int imx_dma_sg_next(int channel, struct scatterlist *sg) { struct imx_dma_channel *imxdma = &imx_dma_channels[channel]; unsigned long now; if (!imxdma->name) { printk(KERN_CRIT "%s: called for not allocated channel %d\n", __func__, channel); return 0; } now = min(imxdma->resbytes, sg->length); if (imxdma->resbytes != IMX_DMA_LENGTH_LOOP) imxdma->resbytes -= now; if ((imxdma->dma_mode & DMA_MODE_MASK) == DMA_MODE_READ) imx_dmav1_writel(sg->dma_address, DMA_DAR(channel)); else imx_dmav1_writel(sg->dma_address, DMA_SAR(channel)); imx_dmav1_writel(now, DMA_CNTR(channel)); pr_debug("imxdma%d: next sg chunk dst 0x%08x, src 0x%08x, " "size 0x%08x\n", channel, imx_dmav1_readl(DMA_DAR(channel)), imx_dmav1_readl(DMA_SAR(channel)), imx_dmav1_readl(DMA_CNTR(channel))); return now; } /** * imx_dma_setup_single - setup i.MX DMA channel for linear memory to/from * device transfer * * @channel: i.MX DMA channel number * @dma_address: the DMA/physical memory address of the linear data block * to transfer * @dma_length: length of the data block in bytes * @dev_addr: physical device port address * @dmamode: DMA transfer mode, %DMA_MODE_READ from the device to the memory * or %DMA_MODE_WRITE from memory to the device * * Return value: if incorrect parameters are provided -%EINVAL. * Zero indicates success. */ int imx_dma_setup_single(int channel, dma_addr_t dma_address, unsigned int dma_length, unsigned int dev_addr, unsigned int dmamode) { struct imx_dma_channel *imxdma = &imx_dma_channels[channel]; imxdma->sg = NULL; imxdma->dma_mode = dmamode; if (!dma_address) { printk(KERN_ERR "imxdma%d: imx_dma_setup_single null address\n", channel); return -EINVAL; } if (!dma_length) { printk(KERN_ERR "imxdma%d: imx_dma_setup_single zero length\n", channel); return -EINVAL; } if ((dmamode & DMA_MODE_MASK) == DMA_MODE_READ) { pr_debug("imxdma%d: %s dma_addressg=0x%08x dma_length=%d " "dev_addr=0x%08x for read\n", channel, __func__, (unsigned int)dma_address, dma_length, dev_addr); imx_dmav1_writel(dev_addr, DMA_SAR(channel)); imx_dmav1_writel(dma_address, DMA_DAR(channel)); imx_dmav1_writel(imxdma->ccr_from_device, DMA_CCR(channel)); } else if ((dmamode & DMA_MODE_MASK) == DMA_MODE_WRITE) { pr_debug("imxdma%d: %s dma_addressg=0x%08x dma_length=%d " "dev_addr=0x%08x for write\n", channel, __func__, (unsigned int)dma_address, dma_length, dev_addr); imx_dmav1_writel(dma_address, DMA_SAR(channel)); imx_dmav1_writel(dev_addr, DMA_DAR(channel)); imx_dmav1_writel(imxdma->ccr_to_device, DMA_CCR(channel)); } else { printk(KERN_ERR "imxdma%d: imx_dma_setup_single bad dmamode\n", channel); return -EINVAL; } imx_dmav1_writel(dma_length, DMA_CNTR(channel)); return 0; } EXPORT_SYMBOL(imx_dma_setup_single); /** * imx_dma_setup_sg - setup i.MX DMA channel SG list to/from device transfer * @channel: i.MX DMA channel number * @sg: pointer to the scatter-gather list/vector * @sgcount: scatter-gather list hungs count * @dma_length: total length of the transfer request in bytes * @dev_addr: physical device port address * @dmamode: DMA transfer mode, %DMA_MODE_READ from the device to the memory * or %DMA_MODE_WRITE from memory to the device * * The function sets up DMA channel state and registers to be ready for * transfer specified by provided parameters. The scatter-gather emulation * is set up according to the parameters. * * The full preparation of the transfer requires setup of more register * by the caller before imx_dma_enable() can be called. * * %BLR(channel) holds transfer burst length in bytes, 0 means 64 bytes * * %RSSR(channel) has to be set to the DMA request line source %DMA_REQ_xxx * * %CCR(channel) has to specify transfer parameters, the next settings is * typical for linear or simple scatter-gather transfers if %DMA_MODE_READ is * specified * * %CCR_DMOD_LINEAR | %CCR_DSIZ_32 | %CCR_SMOD_FIFO | %CCR_SSIZ_x * * The typical setup for %DMA_MODE_WRITE is specified by next options * combination * * %CCR_SMOD_LINEAR | %CCR_SSIZ_32 | %CCR_DMOD_FIFO | %CCR_DSIZ_x * * Be careful here and do not mistakenly mix source and target device * port sizes constants, they are really different: * %CCR_SSIZ_8, %CCR_SSIZ_16, %CCR_SSIZ_32, * %CCR_DSIZ_8, %CCR_DSIZ_16, %CCR_DSIZ_32 * * Return value: if incorrect parameters are provided -%EINVAL. * Zero indicates success. */ int imx_dma_setup_sg(int channel, struct scatterlist *sg, unsigned int sgcount, unsigned int dma_length, unsigned int dev_addr, unsigned int dmamode) { struct imx_dma_channel *imxdma = &imx_dma_channels[channel]; if (imxdma->in_use) return -EBUSY; imxdma->sg = sg; imxdma->dma_mode = dmamode; imxdma->resbytes = dma_length; if (!sg || !sgcount) { printk(KERN_ERR "imxdma%d: imx_dma_setup_sg empty sg list\n", channel); return -EINVAL; } if (!sg->length) { printk(KERN_ERR "imxdma%d: imx_dma_setup_sg zero length\n", channel); return -EINVAL; } if ((dmamode & DMA_MODE_MASK) == DMA_MODE_READ) { pr_debug("imxdma%d: %s sg=%p sgcount=%d total length=%d " "dev_addr=0x%08x for read\n", channel, __func__, sg, sgcount, dma_length, dev_addr); imx_dmav1_writel(dev_addr, DMA_SAR(channel)); imx_dmav1_writel(imxdma->ccr_from_device, DMA_CCR(channel)); } else if ((dmamode & DMA_MODE_MASK) == DMA_MODE_WRITE) { pr_debug("imxdma%d: %s sg=%p sgcount=%d total length=%d " "dev_addr=0x%08x for write\n", channel, __func__, sg, sgcount, dma_length, dev_addr); imx_dmav1_writel(dev_addr, DMA_DAR(channel)); imx_dmav1_writel(imxdma->ccr_to_device, DMA_CCR(channel)); } else { printk(KERN_ERR "imxdma%d: imx_dma_setup_sg bad dmamode\n", channel); return -EINVAL; } imx_dma_sg_next(channel, sg); return 0; } EXPORT_SYMBOL(imx_dma_setup_sg); int imx_dma_config_channel(int channel, unsigned int config_port, unsigned int config_mem, unsigned int dmareq, int hw_chaining) { struct imx_dma_channel *imxdma = &imx_dma_channels[channel]; u32 dreq = 0; imxdma->hw_chaining = 0; if (hw_chaining) { imxdma->hw_chaining = 1; if (!imx_dma_hw_chain(imxdma)) return -EINVAL; } if (dmareq) dreq = CCR_REN; imxdma->ccr_from_device = config_port | (config_mem << 2) | dreq; imxdma->ccr_to_device = config_mem | (config_port << 2) | dreq; imx_dmav1_writel(dmareq, DMA_RSSR(channel)); return 0; } EXPORT_SYMBOL(imx_dma_config_channel); void imx_dma_config_burstlen(int channel, unsigned int burstlen) { imx_dmav1_writel(burstlen, DMA_BLR(channel)); } EXPORT_SYMBOL(imx_dma_config_burstlen); /** * imx_dma_setup_handlers - setup i.MX DMA channel end and error notification * handlers * @channel: i.MX DMA channel number * @irq_handler: the pointer to the function called if the transfer * ends successfully * @err_handler: the pointer to the function called if the premature * end caused by error occurs * @data: user specified value to be passed to the handlers */ int imx_dma_setup_handlers(int channel, void (*irq_handler) (int, void *), void (*err_handler) (int, void *, int), void *data) { struct imx_dma_channel *imxdma = &imx_dma_channels[channel]; unsigned long flags; if (!imxdma->name) { printk(KERN_CRIT "%s: called for not allocated channel %d\n", __func__, channel); return -ENODEV; } local_irq_save(flags); imx_dmav1_writel(1 << channel, DMA_DISR); imxdma->irq_handler = irq_handler; imxdma->err_handler = err_handler; imxdma->data = data; local_irq_restore(flags); return 0; } EXPORT_SYMBOL(imx_dma_setup_handlers); /** * imx_dma_setup_progression_handler - setup i.MX DMA channel progression * handlers * @channel: i.MX DMA channel number * @prog_handler: the pointer to the function called if the transfer progresses */ int imx_dma_setup_progression_handler(int channel, void (*prog_handler) (int, void*, struct scatterlist*)) { struct imx_dma_channel *imxdma = &imx_dma_channels[channel]; unsigned long flags; if (!imxdma->name) { printk(KERN_CRIT "%s: called for not allocated channel %d\n", __func__, channel); return -ENODEV; } local_irq_save(flags); imxdma->prog_handler = prog_handler; local_irq_restore(flags); return 0; } EXPORT_SYMBOL(imx_dma_setup_progression_handler); /** * imx_dma_enable - function to start i.MX DMA channel operation * @channel: i.MX DMA channel number * * The channel has to be allocated by driver through imx_dma_request() * or imx_dma_request_by_prio() function. * The transfer parameters has to be set to the channel registers through * call of the imx_dma_setup_single() or imx_dma_setup_sg() function * and registers %BLR(channel), %RSSR(channel) and %CCR(channel) has to * be set prior this function call by the channel user. */ void imx_dma_enable(int channel) { struct imx_dma_channel *imxdma = &imx_dma_channels[channel]; unsigned long flags; pr_debug("imxdma%d: imx_dma_enable\n", channel); if (!imxdma->name) { printk(KERN_CRIT "%s: called for not allocated channel %d\n", __func__, channel); return; } if (imxdma->in_use) return; local_irq_save(flags); imx_dmav1_writel(1 << channel, DMA_DISR); imx_dmav1_writel(imx_dmav1_readl(DMA_DIMR) & ~(1 << channel), DMA_DIMR); imx_dmav1_writel(imx_dmav1_readl(DMA_CCR(channel)) | CCR_CEN | CCR_ACRPT, DMA_CCR(channel)); #ifdef CONFIG_ARCH_MX2 if ((cpu_is_mx21() || cpu_is_mx27()) && imxdma->sg && imx_dma_hw_chain(imxdma)) { imxdma->sg = sg_next(imxdma->sg); if (imxdma->sg) { u32 tmp; imx_dma_sg_next(channel, imxdma->sg); tmp = imx_dmav1_readl(DMA_CCR(channel)); imx_dmav1_writel(tmp | CCR_RPT | CCR_ACRPT, DMA_CCR(channel)); } } #endif imxdma->in_use = 1; local_irq_restore(flags); } EXPORT_SYMBOL(imx_dma_enable); /** * imx_dma_disable - stop, finish i.MX DMA channel operatin * @channel: i.MX DMA channel number */ void imx_dma_disable(int channel) { struct imx_dma_channel *imxdma = &imx_dma_channels[channel]; unsigned long flags; pr_debug("imxdma%d: imx_dma_disable\n", channel); if (imx_dma_hw_chain(imxdma)) del_timer(&imxdma->watchdog); local_irq_save(flags); imx_dmav1_writel(imx_dmav1_readl(DMA_DIMR) | (1 << channel), DMA_DIMR); imx_dmav1_writel(imx_dmav1_readl(DMA_CCR(channel)) & ~CCR_CEN, DMA_CCR(channel)); imx_dmav1_writel(1 << channel, DMA_DISR); imxdma->in_use = 0; local_irq_restore(flags); } EXPORT_SYMBOL(imx_dma_disable); #ifdef CONFIG_ARCH_MX2 static void imx_dma_watchdog(unsigned long chno) { struct imx_dma_channel *imxdma = &imx_dma_channels[chno]; imx_dmav1_writel(0, DMA_CCR(chno)); imxdma->in_use = 0; imxdma->sg = NULL; if (imxdma->err_handler) imxdma->err_handler(chno, imxdma->data, IMX_DMA_ERR_TIMEOUT); } #endif static irqreturn_t dma_err_handler(int irq, void *dev_id) { int i, disr; struct imx_dma_channel *imxdma; unsigned int err_mask; int errcode; disr = imx_dmav1_readl(DMA_DISR); err_mask = imx_dmav1_readl(DMA_DBTOSR) | imx_dmav1_readl(DMA_DRTOSR) | imx_dmav1_readl(DMA_DSESR) | imx_dmav1_readl(DMA_DBOSR); if (!err_mask) return IRQ_HANDLED; imx_dmav1_writel(disr & err_mask, DMA_DISR); for (i = 0; i < IMX_DMA_CHANNELS; i++) { if (!(err_mask & (1 << i))) continue; imxdma = &imx_dma_channels[i]; errcode = 0; if (imx_dmav1_readl(DMA_DBTOSR) & (1 << i)) { imx_dmav1_writel(1 << i, DMA_DBTOSR); errcode |= IMX_DMA_ERR_BURST; } if (imx_dmav1_readl(DMA_DRTOSR) & (1 << i)) { imx_dmav1_writel(1 << i, DMA_DRTOSR); errcode |= IMX_DMA_ERR_REQUEST; } if (imx_dmav1_readl(DMA_DSESR) & (1 << i)) { imx_dmav1_writel(1 << i, DMA_DSESR); errcode |= IMX_DMA_ERR_TRANSFER; } if (imx_dmav1_readl(DMA_DBOSR) & (1 << i)) { imx_dmav1_writel(1 << i, DMA_DBOSR); errcode |= IMX_DMA_ERR_BUFFER; } if (imxdma->name && imxdma->err_handler) { imxdma->err_handler(i, imxdma->data, errcode); continue; } imx_dma_channels[i].sg = NULL; printk(KERN_WARNING "DMA timeout on channel %d (%s) -%s%s%s%s\n", i, imxdma->name, errcode & IMX_DMA_ERR_BURST ? " burst" : "", errcode & IMX_DMA_ERR_REQUEST ? " request" : "", errcode & IMX_DMA_ERR_TRANSFER ? " transfer" : "", errcode & IMX_DMA_ERR_BUFFER ? " buffer" : ""); } return IRQ_HANDLED; } static void dma_irq_handle_channel(int chno) { struct imx_dma_channel *imxdma = &imx_dma_channels[chno]; if (!imxdma->name) { /* * IRQ for an unregistered DMA channel: * let's clear the interrupts and disable it. */ printk(KERN_WARNING "spurious IRQ for DMA channel %d\n", chno); return; } if (imxdma->sg) { u32 tmp; struct scatterlist *current_sg = imxdma->sg; imxdma->sg = sg_next(imxdma->sg); if (imxdma->sg) { imx_dma_sg_next(chno, imxdma->sg); tmp = imx_dmav1_readl(DMA_CCR(chno)); if (imx_dma_hw_chain(imxdma)) { /* FIXME: The timeout should probably be * configurable */ mod_timer(&imxdma->watchdog, jiffies + msecs_to_jiffies(500)); tmp |= CCR_CEN | CCR_RPT | CCR_ACRPT; imx_dmav1_writel(tmp, DMA_CCR(chno)); } else { imx_dmav1_writel(tmp & ~CCR_CEN, DMA_CCR(chno)); tmp |= CCR_CEN; } imx_dmav1_writel(tmp, DMA_CCR(chno)); if (imxdma->prog_handler) imxdma->prog_handler(chno, imxdma->data, current_sg); return; } if (imx_dma_hw_chain(imxdma)) { del_timer(&imxdma->watchdog); return; } } imx_dmav1_writel(0, DMA_CCR(chno)); imxdma->in_use = 0; if (imxdma->irq_handler) imxdma->irq_handler(chno, imxdma->data); } static irqreturn_t dma_irq_handler(int irq, void *dev_id) { int i, disr; #ifdef CONFIG_ARCH_MX2 if (cpu_is_mx21() || cpu_is_mx27()) dma_err_handler(irq, dev_id); #endif disr = imx_dmav1_readl(DMA_DISR); pr_debug("imxdma: dma_irq_handler called, disr=0x%08x\n", disr); imx_dmav1_writel(disr, DMA_DISR); for (i = 0; i < IMX_DMA_CHANNELS; i++) { if (disr & (1 << i)) dma_irq_handle_channel(i); } return IRQ_HANDLED; } /** * imx_dma_request - request/allocate specified channel number * @channel: i.MX DMA channel number * @name: the driver/caller own non-%NULL identification */ int imx_dma_request(int channel, const char *name) { struct imx_dma_channel *imxdma = &imx_dma_channels[channel]; unsigned long flags; int ret = 0; /* basic sanity checks */ if (!name) return -EINVAL; if (channel >= IMX_DMA_CHANNELS) { printk(KERN_CRIT "%s: called for non-existed channel %d\n", __func__, channel); return -EINVAL; } local_irq_save(flags); if (imxdma->name) { local_irq_restore(flags); return -EBUSY; } memset(imxdma, 0, sizeof(*imxdma)); imxdma->name = name; local_irq_restore(flags); /* request_irq() can block */ #ifdef CONFIG_ARCH_MX2 if (cpu_is_mx21() || cpu_is_mx27()) { ret = request_irq(MX2x_INT_DMACH0 + channel, dma_irq_handler, 0, "DMA", NULL); if (ret) { imxdma->name = NULL; pr_crit("Can't register IRQ %d for DMA channel %d\n", MX2x_INT_DMACH0 + channel, channel); return ret; } init_timer(&imxdma->watchdog); imxdma->watchdog.function = &imx_dma_watchdog; imxdma->watchdog.data = channel; } #endif return ret; } EXPORT_SYMBOL(imx_dma_request); /** * imx_dma_free - release previously acquired channel * @channel: i.MX DMA channel number */ void imx_dma_free(int channel) { unsigned long flags; struct imx_dma_channel *imxdma = &imx_dma_channels[channel]; if (!imxdma->name) { printk(KERN_CRIT "%s: trying to free free channel %d\n", __func__, channel); return; } local_irq_save(flags); /* Disable interrupts */ imx_dma_disable(channel); imxdma->name = NULL; #ifdef CONFIG_ARCH_MX2 if (cpu_is_mx21() || cpu_is_mx27()) free_irq(MX2x_INT_DMACH0 + channel, NULL); #endif local_irq_restore(flags); } EXPORT_SYMBOL(imx_dma_free); /** * imx_dma_request_by_prio - find and request some of free channels best * suiting requested priority * @channel: i.MX DMA channel number * @name: the driver/caller own non-%NULL identification * * This function tries to find a free channel in the specified priority group * if the priority cannot be achieved it tries to look for free channel * in the higher and then even lower priority groups. * * Return value: If there is no free channel to allocate, -%ENODEV is returned. * On successful allocation channel is returned. */ int imx_dma_request_by_prio(const char *name, enum imx_dma_prio prio) { int i; int best; switch (prio) { case (DMA_PRIO_HIGH): best = 8; break; case (DMA_PRIO_MEDIUM): best = 4; break; case (DMA_PRIO_LOW): default: best = 0; break; } for (i = best; i < IMX_DMA_CHANNELS; i++) if (!imx_dma_request(i, name)) return i; for (i = best - 1; i >= 0; i--) if (!imx_dma_request(i, name)) return i; printk(KERN_ERR "%s: no free DMA channel found\n", __func__); return -ENODEV; } EXPORT_SYMBOL(imx_dma_request_by_prio); static int __init imx_dma_init(void) { int ret = 0; int i; #ifdef CONFIG_ARCH_MX1 if (cpu_is_mx1()) imx_dmav1_baseaddr = MX1_IO_ADDRESS(MX1_DMA_BASE_ADDR); else #endif #ifdef CONFIG_MACH_MX21 if (cpu_is_mx21()) imx_dmav1_baseaddr = MX21_IO_ADDRESS(MX21_DMA_BASE_ADDR); else #endif #ifdef CONFIG_MACH_MX27 if (cpu_is_mx27()) imx_dmav1_baseaddr = MX27_IO_ADDRESS(MX27_DMA_BASE_ADDR); else #endif return 0; dma_clk = clk_get(NULL, "dma"); if (IS_ERR(dma_clk)) return PTR_ERR(dma_clk); clk_enable(dma_clk); /* reset DMA module */ imx_dmav1_writel(DCR_DRST, DMA_DCR); #ifdef CONFIG_ARCH_MX1 if (cpu_is_mx1()) { ret = request_irq(MX1_DMA_INT, dma_irq_handler, 0, "DMA", NULL); if (ret) { pr_crit("Wow! Can't register IRQ for DMA\n"); return ret; } ret = request_irq(MX1_DMA_ERR, dma_err_handler, 0, "DMA", NULL); if (ret) { pr_crit("Wow! Can't register ERRIRQ for DMA\n"); free_irq(MX1_DMA_INT, NULL); return ret; } } #endif /* enable DMA module */ imx_dmav1_writel(DCR_DEN, DMA_DCR); /* clear all interrupts */ imx_dmav1_writel((1 << IMX_DMA_CHANNELS) - 1, DMA_DISR); /* disable interrupts */ imx_dmav1_writel((1 << IMX_DMA_CHANNELS) - 1, DMA_DIMR); for (i = 0; i < IMX_DMA_CHANNELS; i++) { imx_dma_channels[i].sg = NULL; imx_dma_channels[i].dma_num = i; } return ret; } arch_initcall(imx_dma_init);
gpl-2.0
neumeika/kernel_hws10101l
kernel/trace/trace_kprobe.c
2366
49346
/* * Kprobes-based tracing events * * Created by Masami Hiramatsu <mhiramat@redhat.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/module.h> #include <linux/uaccess.h> #include <linux/kprobes.h> #include <linux/seq_file.h> #include <linux/slab.h> #include <linux/smp.h> #include <linux/debugfs.h> #include <linux/types.h> #include <linux/string.h> #include <linux/ctype.h> #include <linux/ptrace.h> #include <linux/perf_event.h> #include <linux/stringify.h> #include <linux/limits.h> #include <asm/bitsperlong.h> #include "trace.h" #include "trace_output.h" #define MAX_TRACE_ARGS 128 #define MAX_ARGSTR_LEN 63 #define MAX_EVENT_NAME_LEN 64 #define MAX_STRING_SIZE PATH_MAX #define KPROBE_EVENT_SYSTEM "kprobes" /* Reserved field names */ #define FIELD_STRING_IP "__probe_ip" #define FIELD_STRING_RETIP "__probe_ret_ip" #define FIELD_STRING_FUNC "__probe_func" const char *reserved_field_names[] = { "common_type", "common_flags", "common_preempt_count", "common_pid", "common_tgid", FIELD_STRING_IP, FIELD_STRING_RETIP, FIELD_STRING_FUNC, }; /* Printing function type */ typedef int (*print_type_func_t)(struct trace_seq *, const char *, void *, void *); #define PRINT_TYPE_FUNC_NAME(type) print_type_##type #define PRINT_TYPE_FMT_NAME(type) print_type_format_##type /* Printing in basic type function template */ #define DEFINE_BASIC_PRINT_TYPE_FUNC(type, fmt, cast) \ static __kprobes int PRINT_TYPE_FUNC_NAME(type)(struct trace_seq *s, \ const char *name, \ void *data, void *ent)\ { \ return trace_seq_printf(s, " %s=" fmt, name, (cast)*(type *)data);\ } \ static const char PRINT_TYPE_FMT_NAME(type)[] = fmt; DEFINE_BASIC_PRINT_TYPE_FUNC(u8, "%x", unsigned int) DEFINE_BASIC_PRINT_TYPE_FUNC(u16, "%x", unsigned int) DEFINE_BASIC_PRINT_TYPE_FUNC(u32, "%lx", unsigned long) DEFINE_BASIC_PRINT_TYPE_FUNC(u64, "%llx", unsigned long long) DEFINE_BASIC_PRINT_TYPE_FUNC(s8, "%d", int) DEFINE_BASIC_PRINT_TYPE_FUNC(s16, "%d", int) DEFINE_BASIC_PRINT_TYPE_FUNC(s32, "%ld", long) DEFINE_BASIC_PRINT_TYPE_FUNC(s64, "%lld", long long) /* data_rloc: data relative location, compatible with u32 */ #define make_data_rloc(len, roffs) \ (((u32)(len) << 16) | ((u32)(roffs) & 0xffff)) #define get_rloc_len(dl) ((u32)(dl) >> 16) #define get_rloc_offs(dl) ((u32)(dl) & 0xffff) static inline void *get_rloc_data(u32 *dl) { return (u8 *)dl + get_rloc_offs(*dl); } /* For data_loc conversion */ static inline void *get_loc_data(u32 *dl, void *ent) { return (u8 *)ent + get_rloc_offs(*dl); } /* * Convert data_rloc to data_loc: * data_rloc stores the offset from data_rloc itself, but data_loc * stores the offset from event entry. */ #define convert_rloc_to_loc(dl, offs) ((u32)(dl) + (offs)) /* For defining macros, define string/string_size types */ typedef u32 string; typedef u32 string_size; /* Print type function for string type */ static __kprobes int PRINT_TYPE_FUNC_NAME(string)(struct trace_seq *s, const char *name, void *data, void *ent) { int len = *(u32 *)data >> 16; if (!len) return trace_seq_printf(s, " %s=(fault)", name); else return trace_seq_printf(s, " %s=\"%s\"", name, (const char *)get_loc_data(data, ent)); } static const char PRINT_TYPE_FMT_NAME(string)[] = "\\\"%s\\\""; /* Data fetch function type */ typedef void (*fetch_func_t)(struct pt_regs *, void *, void *); struct fetch_param { fetch_func_t fn; void *data; }; static __kprobes void call_fetch(struct fetch_param *fprm, struct pt_regs *regs, void *dest) { return fprm->fn(regs, fprm->data, dest); } #define FETCH_FUNC_NAME(method, type) fetch_##method##_##type /* * Define macro for basic types - we don't need to define s* types, because * we have to care only about bitwidth at recording time. */ #define DEFINE_BASIC_FETCH_FUNCS(method) \ DEFINE_FETCH_##method(u8) \ DEFINE_FETCH_##method(u16) \ DEFINE_FETCH_##method(u32) \ DEFINE_FETCH_##method(u64) #define CHECK_FETCH_FUNCS(method, fn) \ (((FETCH_FUNC_NAME(method, u8) == fn) || \ (FETCH_FUNC_NAME(method, u16) == fn) || \ (FETCH_FUNC_NAME(method, u32) == fn) || \ (FETCH_FUNC_NAME(method, u64) == fn) || \ (FETCH_FUNC_NAME(method, string) == fn) || \ (FETCH_FUNC_NAME(method, string_size) == fn)) \ && (fn != NULL)) /* Data fetch function templates */ #define DEFINE_FETCH_reg(type) \ static __kprobes void FETCH_FUNC_NAME(reg, type)(struct pt_regs *regs, \ void *offset, void *dest) \ { \ *(type *)dest = (type)regs_get_register(regs, \ (unsigned int)((unsigned long)offset)); \ } DEFINE_BASIC_FETCH_FUNCS(reg) /* No string on the register */ #define fetch_reg_string NULL #define fetch_reg_string_size NULL #define DEFINE_FETCH_stack(type) \ static __kprobes void FETCH_FUNC_NAME(stack, type)(struct pt_regs *regs,\ void *offset, void *dest) \ { \ *(type *)dest = (type)regs_get_kernel_stack_nth(regs, \ (unsigned int)((unsigned long)offset)); \ } DEFINE_BASIC_FETCH_FUNCS(stack) /* No string on the stack entry */ #define fetch_stack_string NULL #define fetch_stack_string_size NULL #define DEFINE_FETCH_retval(type) \ static __kprobes void FETCH_FUNC_NAME(retval, type)(struct pt_regs *regs,\ void *dummy, void *dest) \ { \ *(type *)dest = (type)regs_return_value(regs); \ } DEFINE_BASIC_FETCH_FUNCS(retval) /* No string on the retval */ #define fetch_retval_string NULL #define fetch_retval_string_size NULL #define DEFINE_FETCH_memory(type) \ static __kprobes void FETCH_FUNC_NAME(memory, type)(struct pt_regs *regs,\ void *addr, void *dest) \ { \ type retval; \ if (probe_kernel_address(addr, retval)) \ *(type *)dest = 0; \ else \ *(type *)dest = retval; \ } DEFINE_BASIC_FETCH_FUNCS(memory) /* * Fetch a null-terminated string. Caller MUST set *(u32 *)dest with max * length and relative data location. */ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs, void *addr, void *dest) { long ret; int maxlen = get_rloc_len(*(u32 *)dest); u8 *dst = get_rloc_data(dest); u8 *src = addr; mm_segment_t old_fs = get_fs(); if (!maxlen) return; /* * Try to get string again, since the string can be changed while * probing. */ set_fs(KERNEL_DS); pagefault_disable(); do ret = __copy_from_user_inatomic(dst++, src++, 1); while (dst[-1] && ret == 0 && src - (u8 *)addr < maxlen); dst[-1] = '\0'; pagefault_enable(); set_fs(old_fs); if (ret < 0) { /* Failed to fetch string */ ((u8 *)get_rloc_data(dest))[0] = '\0'; *(u32 *)dest = make_data_rloc(0, get_rloc_offs(*(u32 *)dest)); } else *(u32 *)dest = make_data_rloc(src - (u8 *)addr, get_rloc_offs(*(u32 *)dest)); } /* Return the length of string -- including null terminal byte */ static __kprobes void FETCH_FUNC_NAME(memory, string_size)(struct pt_regs *regs, void *addr, void *dest) { int ret, len = 0; u8 c; mm_segment_t old_fs = get_fs(); set_fs(KERNEL_DS); pagefault_disable(); do { ret = __copy_from_user_inatomic(&c, (u8 *)addr + len, 1); len++; } while (c && ret == 0 && len < MAX_STRING_SIZE); pagefault_enable(); set_fs(old_fs); if (ret < 0) /* Failed to check the length */ *(u32 *)dest = 0; else *(u32 *)dest = len; } /* Memory fetching by symbol */ struct symbol_cache { char *symbol; long offset; unsigned long addr; }; static unsigned long update_symbol_cache(struct symbol_cache *sc) { sc->addr = (unsigned long)kallsyms_lookup_name(sc->symbol); if (sc->addr) sc->addr += sc->offset; return sc->addr; } static void free_symbol_cache(struct symbol_cache *sc) { kfree(sc->symbol); kfree(sc); } static struct symbol_cache *alloc_symbol_cache(const char *sym, long offset) { struct symbol_cache *sc; if (!sym || strlen(sym) == 0) return NULL; sc = kzalloc(sizeof(struct symbol_cache), GFP_KERNEL); if (!sc) return NULL; sc->symbol = kstrdup(sym, GFP_KERNEL); if (!sc->symbol) { kfree(sc); return NULL; } sc->offset = offset; update_symbol_cache(sc); return sc; } #define DEFINE_FETCH_symbol(type) \ static __kprobes void FETCH_FUNC_NAME(symbol, type)(struct pt_regs *regs,\ void *data, void *dest) \ { \ struct symbol_cache *sc = data; \ if (sc->addr) \ fetch_memory_##type(regs, (void *)sc->addr, dest); \ else \ *(type *)dest = 0; \ } DEFINE_BASIC_FETCH_FUNCS(symbol) DEFINE_FETCH_symbol(string) DEFINE_FETCH_symbol(string_size) /* Dereference memory access function */ struct deref_fetch_param { struct fetch_param orig; long offset; }; #define DEFINE_FETCH_deref(type) \ static __kprobes void FETCH_FUNC_NAME(deref, type)(struct pt_regs *regs,\ void *data, void *dest) \ { \ struct deref_fetch_param *dprm = data; \ unsigned long addr; \ call_fetch(&dprm->orig, regs, &addr); \ if (addr) { \ addr += dprm->offset; \ fetch_memory_##type(regs, (void *)addr, dest); \ } else \ *(type *)dest = 0; \ } DEFINE_BASIC_FETCH_FUNCS(deref) DEFINE_FETCH_deref(string) DEFINE_FETCH_deref(string_size) static __kprobes void free_deref_fetch_param(struct deref_fetch_param *data) { if (CHECK_FETCH_FUNCS(deref, data->orig.fn)) free_deref_fetch_param(data->orig.data); else if (CHECK_FETCH_FUNCS(symbol, data->orig.fn)) free_symbol_cache(data->orig.data); kfree(data); } /* Bitfield fetch function */ struct bitfield_fetch_param { struct fetch_param orig; unsigned char hi_shift; unsigned char low_shift; }; #define DEFINE_FETCH_bitfield(type) \ static __kprobes void FETCH_FUNC_NAME(bitfield, type)(struct pt_regs *regs,\ void *data, void *dest) \ { \ struct bitfield_fetch_param *bprm = data; \ type buf = 0; \ call_fetch(&bprm->orig, regs, &buf); \ if (buf) { \ buf <<= bprm->hi_shift; \ buf >>= bprm->low_shift; \ } \ *(type *)dest = buf; \ } DEFINE_BASIC_FETCH_FUNCS(bitfield) #define fetch_bitfield_string NULL #define fetch_bitfield_string_size NULL static __kprobes void free_bitfield_fetch_param(struct bitfield_fetch_param *data) { /* * Don't check the bitfield itself, because this must be the * last fetch function. */ if (CHECK_FETCH_FUNCS(deref, data->orig.fn)) free_deref_fetch_param(data->orig.data); else if (CHECK_FETCH_FUNCS(symbol, data->orig.fn)) free_symbol_cache(data->orig.data); kfree(data); } /* Default (unsigned long) fetch type */ #define __DEFAULT_FETCH_TYPE(t) u##t #define _DEFAULT_FETCH_TYPE(t) __DEFAULT_FETCH_TYPE(t) #define DEFAULT_FETCH_TYPE _DEFAULT_FETCH_TYPE(BITS_PER_LONG) #define DEFAULT_FETCH_TYPE_STR __stringify(DEFAULT_FETCH_TYPE) /* Fetch types */ enum { FETCH_MTD_reg = 0, FETCH_MTD_stack, FETCH_MTD_retval, FETCH_MTD_memory, FETCH_MTD_symbol, FETCH_MTD_deref, FETCH_MTD_bitfield, FETCH_MTD_END, }; #define ASSIGN_FETCH_FUNC(method, type) \ [FETCH_MTD_##method] = FETCH_FUNC_NAME(method, type) #define __ASSIGN_FETCH_TYPE(_name, ptype, ftype, _size, sign, _fmttype) \ {.name = _name, \ .size = _size, \ .is_signed = sign, \ .print = PRINT_TYPE_FUNC_NAME(ptype), \ .fmt = PRINT_TYPE_FMT_NAME(ptype), \ .fmttype = _fmttype, \ .fetch = { \ ASSIGN_FETCH_FUNC(reg, ftype), \ ASSIGN_FETCH_FUNC(stack, ftype), \ ASSIGN_FETCH_FUNC(retval, ftype), \ ASSIGN_FETCH_FUNC(memory, ftype), \ ASSIGN_FETCH_FUNC(symbol, ftype), \ ASSIGN_FETCH_FUNC(deref, ftype), \ ASSIGN_FETCH_FUNC(bitfield, ftype), \ } \ } #define ASSIGN_FETCH_TYPE(ptype, ftype, sign) \ __ASSIGN_FETCH_TYPE(#ptype, ptype, ftype, sizeof(ftype), sign, #ptype) #define FETCH_TYPE_STRING 0 #define FETCH_TYPE_STRSIZE 1 /* Fetch type information table */ static const struct fetch_type { const char *name; /* Name of type */ size_t size; /* Byte size of type */ int is_signed; /* Signed flag */ print_type_func_t print; /* Print functions */ const char *fmt; /* Fromat string */ const char *fmttype; /* Name in format file */ /* Fetch functions */ fetch_func_t fetch[FETCH_MTD_END]; } fetch_type_table[] = { /* Special types */ [FETCH_TYPE_STRING] = __ASSIGN_FETCH_TYPE("string", string, string, sizeof(u32), 1, "__data_loc char[]"), [FETCH_TYPE_STRSIZE] = __ASSIGN_FETCH_TYPE("string_size", u32, string_size, sizeof(u32), 0, "u32"), /* Basic types */ ASSIGN_FETCH_TYPE(u8, u8, 0), ASSIGN_FETCH_TYPE(u16, u16, 0), ASSIGN_FETCH_TYPE(u32, u32, 0), ASSIGN_FETCH_TYPE(u64, u64, 0), ASSIGN_FETCH_TYPE(s8, u8, 1), ASSIGN_FETCH_TYPE(s16, u16, 1), ASSIGN_FETCH_TYPE(s32, u32, 1), ASSIGN_FETCH_TYPE(s64, u64, 1), }; static const struct fetch_type *find_fetch_type(const char *type) { int i; if (!type) type = DEFAULT_FETCH_TYPE_STR; /* Special case: bitfield */ if (*type == 'b') { unsigned long bs; type = strchr(type, '/'); if (!type) goto fail; type++; if (strict_strtoul(type, 0, &bs)) goto fail; switch (bs) { case 8: return find_fetch_type("u8"); case 16: return find_fetch_type("u16"); case 32: return find_fetch_type("u32"); case 64: return find_fetch_type("u64"); default: goto fail; } } for (i = 0; i < ARRAY_SIZE(fetch_type_table); i++) if (strcmp(type, fetch_type_table[i].name) == 0) return &fetch_type_table[i]; fail: return NULL; } /* Special function : only accept unsigned long */ static __kprobes void fetch_stack_address(struct pt_regs *regs, void *dummy, void *dest) { *(unsigned long *)dest = kernel_stack_pointer(regs); } static fetch_func_t get_fetch_size_function(const struct fetch_type *type, fetch_func_t orig_fn) { int i; if (type != &fetch_type_table[FETCH_TYPE_STRING]) return NULL; /* Only string type needs size function */ for (i = 0; i < FETCH_MTD_END; i++) if (type->fetch[i] == orig_fn) return fetch_type_table[FETCH_TYPE_STRSIZE].fetch[i]; WARN_ON(1); /* This should not happen */ return NULL; } /** * Kprobe event core functions */ struct probe_arg { struct fetch_param fetch; struct fetch_param fetch_size; unsigned int offset; /* Offset from argument entry */ const char *name; /* Name of this argument */ const char *comm; /* Command of this argument */ const struct fetch_type *type; /* Type of this argument */ }; /* Flags for trace_probe */ #define TP_FLAG_TRACE 1 #define TP_FLAG_PROFILE 2 struct trace_probe { struct list_head list; struct kretprobe rp; /* Use rp.kp for kprobe use */ unsigned long nhit; unsigned int flags; /* For TP_FLAG_* */ const char *symbol; /* symbol name */ struct ftrace_event_class class; struct ftrace_event_call call; ssize_t size; /* trace entry size */ unsigned int nr_args; struct probe_arg args[]; }; #define SIZEOF_TRACE_PROBE(n) \ (offsetof(struct trace_probe, args) + \ (sizeof(struct probe_arg) * (n))) static __kprobes int probe_is_return(struct trace_probe *tp) { return tp->rp.handler != NULL; } static __kprobes const char *probe_symbol(struct trace_probe *tp) { return tp->symbol ? tp->symbol : "unknown"; } static int register_probe_event(struct trace_probe *tp); static void unregister_probe_event(struct trace_probe *tp); static DEFINE_MUTEX(probe_lock); static LIST_HEAD(probe_list); static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs); static int kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs); /* Check the name is good for event/group/fields */ static int is_good_name(const char *name) { if (!isalpha(*name) && *name != '_') return 0; while (*++name != '\0') { if (!isalpha(*name) && !isdigit(*name) && *name != '_') return 0; } return 1; } /* * Allocate new trace_probe and initialize it (including kprobes). */ static struct trace_probe *alloc_trace_probe(const char *group, const char *event, void *addr, const char *symbol, unsigned long offs, int nargs, int is_return) { struct trace_probe *tp; int ret = -ENOMEM; tp = kzalloc(SIZEOF_TRACE_PROBE(nargs), GFP_KERNEL); if (!tp) return ERR_PTR(ret); if (symbol) { tp->symbol = kstrdup(symbol, GFP_KERNEL); if (!tp->symbol) goto error; tp->rp.kp.symbol_name = tp->symbol; tp->rp.kp.offset = offs; } else tp->rp.kp.addr = addr; if (is_return) tp->rp.handler = kretprobe_dispatcher; else tp->rp.kp.pre_handler = kprobe_dispatcher; if (!event || !is_good_name(event)) { ret = -EINVAL; goto error; } tp->call.class = &tp->class; tp->call.name = kstrdup(event, GFP_KERNEL); if (!tp->call.name) goto error; if (!group || !is_good_name(group)) { ret = -EINVAL; goto error; } tp->class.system = kstrdup(group, GFP_KERNEL); if (!tp->class.system) goto error; INIT_LIST_HEAD(&tp->list); return tp; error: kfree(tp->call.name); kfree(tp->symbol); kfree(tp); return ERR_PTR(ret); } static void free_probe_arg(struct probe_arg *arg) { if (CHECK_FETCH_FUNCS(bitfield, arg->fetch.fn)) free_bitfield_fetch_param(arg->fetch.data); else if (CHECK_FETCH_FUNCS(deref, arg->fetch.fn)) free_deref_fetch_param(arg->fetch.data); else if (CHECK_FETCH_FUNCS(symbol, arg->fetch.fn)) free_symbol_cache(arg->fetch.data); kfree(arg->name); kfree(arg->comm); } static void free_trace_probe(struct trace_probe *tp) { int i; for (i = 0; i < tp->nr_args; i++) free_probe_arg(&tp->args[i]); kfree(tp->call.class->system); kfree(tp->call.name); kfree(tp->symbol); kfree(tp); } static struct trace_probe *find_probe_event(const char *event, const char *group) { struct trace_probe *tp; list_for_each_entry(tp, &probe_list, list) if (strcmp(tp->call.name, event) == 0 && strcmp(tp->call.class->system, group) == 0) return tp; return NULL; } /* Unregister a trace_probe and probe_event: call with locking probe_lock */ static void unregister_trace_probe(struct trace_probe *tp) { if (probe_is_return(tp)) unregister_kretprobe(&tp->rp); else unregister_kprobe(&tp->rp.kp); list_del(&tp->list); unregister_probe_event(tp); } /* Register a trace_probe and probe_event */ static int register_trace_probe(struct trace_probe *tp) { struct trace_probe *old_tp; int ret; mutex_lock(&probe_lock); /* register as an event */ old_tp = find_probe_event(tp->call.name, tp->call.class->system); if (old_tp) { /* delete old event */ unregister_trace_probe(old_tp); free_trace_probe(old_tp); } ret = register_probe_event(tp); if (ret) { pr_warning("Failed to register probe event(%d)\n", ret); goto end; } tp->rp.kp.flags |= KPROBE_FLAG_DISABLED; if (probe_is_return(tp)) ret = register_kretprobe(&tp->rp); else ret = register_kprobe(&tp->rp.kp); if (ret) { pr_warning("Could not insert probe(%d)\n", ret); if (ret == -EILSEQ) { pr_warning("Probing address(0x%p) is not an " "instruction boundary.\n", tp->rp.kp.addr); ret = -EINVAL; } unregister_probe_event(tp); } else list_add_tail(&tp->list, &probe_list); end: mutex_unlock(&probe_lock); return ret; } /* Split symbol and offset. */ static int split_symbol_offset(char *symbol, unsigned long *offset) { char *tmp; int ret; if (!offset) return -EINVAL; tmp = strchr(symbol, '+'); if (tmp) { /* skip sign because strict_strtol doesn't accept '+' */ ret = strict_strtoul(tmp + 1, 0, offset); if (ret) return ret; *tmp = '\0'; } else *offset = 0; return 0; } #define PARAM_MAX_ARGS 16 #define PARAM_MAX_STACK (THREAD_SIZE / sizeof(unsigned long)) static int parse_probe_vars(char *arg, const struct fetch_type *t, struct fetch_param *f, int is_return) { int ret = 0; unsigned long param; if (strcmp(arg, "retval") == 0) { if (is_return) f->fn = t->fetch[FETCH_MTD_retval]; else ret = -EINVAL; } else if (strncmp(arg, "stack", 5) == 0) { if (arg[5] == '\0') { if (strcmp(t->name, DEFAULT_FETCH_TYPE_STR) == 0) f->fn = fetch_stack_address; else ret = -EINVAL; } else if (isdigit(arg[5])) { ret = strict_strtoul(arg + 5, 10, &param); if (ret || param > PARAM_MAX_STACK) ret = -EINVAL; else { f->fn = t->fetch[FETCH_MTD_stack]; f->data = (void *)param; } } else ret = -EINVAL; } else ret = -EINVAL; return ret; } /* Recursive argument parser */ static int __parse_probe_arg(char *arg, const struct fetch_type *t, struct fetch_param *f, int is_return) { int ret = 0; unsigned long param; long offset; char *tmp; switch (arg[0]) { case '$': ret = parse_probe_vars(arg + 1, t, f, is_return); break; case '%': /* named register */ ret = regs_query_register_offset(arg + 1); if (ret >= 0) { f->fn = t->fetch[FETCH_MTD_reg]; f->data = (void *)(unsigned long)ret; ret = 0; } break; case '@': /* memory or symbol */ if (isdigit(arg[1])) { ret = strict_strtoul(arg + 1, 0, &param); if (ret) break; f->fn = t->fetch[FETCH_MTD_memory]; f->data = (void *)param; } else { ret = split_symbol_offset(arg + 1, &offset); if (ret) break; f->data = alloc_symbol_cache(arg + 1, offset); if (f->data) f->fn = t->fetch[FETCH_MTD_symbol]; } break; case '+': /* deref memory */ arg++; /* Skip '+', because strict_strtol() rejects it. */ case '-': tmp = strchr(arg, '('); if (!tmp) break; *tmp = '\0'; ret = strict_strtol(arg, 0, &offset); if (ret) break; arg = tmp + 1; tmp = strrchr(arg, ')'); if (tmp) { struct deref_fetch_param *dprm; const struct fetch_type *t2 = find_fetch_type(NULL); *tmp = '\0'; dprm = kzalloc(sizeof(struct deref_fetch_param), GFP_KERNEL); if (!dprm) return -ENOMEM; dprm->offset = offset; ret = __parse_probe_arg(arg, t2, &dprm->orig, is_return); if (ret) kfree(dprm); else { f->fn = t->fetch[FETCH_MTD_deref]; f->data = (void *)dprm; } } break; } if (!ret && !f->fn) { /* Parsed, but do not find fetch method */ pr_info("%s type has no corresponding fetch method.\n", t->name); ret = -EINVAL; } return ret; } #define BYTES_TO_BITS(nb) ((BITS_PER_LONG * (nb)) / sizeof(long)) /* Bitfield type needs to be parsed into a fetch function */ static int __parse_bitfield_probe_arg(const char *bf, const struct fetch_type *t, struct fetch_param *f) { struct bitfield_fetch_param *bprm; unsigned long bw, bo; char *tail; if (*bf != 'b') return 0; bprm = kzalloc(sizeof(*bprm), GFP_KERNEL); if (!bprm) return -ENOMEM; bprm->orig = *f; f->fn = t->fetch[FETCH_MTD_bitfield]; f->data = (void *)bprm; bw = simple_strtoul(bf + 1, &tail, 0); /* Use simple one */ if (bw == 0 || *tail != '@') return -EINVAL; bf = tail + 1; bo = simple_strtoul(bf, &tail, 0); if (tail == bf || *tail != '/') return -EINVAL; bprm->hi_shift = BYTES_TO_BITS(t->size) - (bw + bo); bprm->low_shift = bprm->hi_shift + bo; return (BYTES_TO_BITS(t->size) < (bw + bo)) ? -EINVAL : 0; } /* String length checking wrapper */ static int parse_probe_arg(char *arg, struct trace_probe *tp, struct probe_arg *parg, int is_return) { const char *t; int ret; if (strlen(arg) > MAX_ARGSTR_LEN) { pr_info("Argument is too long.: %s\n", arg); return -ENOSPC; } parg->comm = kstrdup(arg, GFP_KERNEL); if (!parg->comm) { pr_info("Failed to allocate memory for command '%s'.\n", arg); return -ENOMEM; } t = strchr(parg->comm, ':'); if (t) { arg[t - parg->comm] = '\0'; t++; } parg->type = find_fetch_type(t); if (!parg->type) { pr_info("Unsupported type: %s\n", t); return -EINVAL; } parg->offset = tp->size; tp->size += parg->type->size; ret = __parse_probe_arg(arg, parg->type, &parg->fetch, is_return); if (ret >= 0 && t != NULL) ret = __parse_bitfield_probe_arg(t, parg->type, &parg->fetch); if (ret >= 0) { parg->fetch_size.fn = get_fetch_size_function(parg->type, parg->fetch.fn); parg->fetch_size.data = parg->fetch.data; } return ret; } /* Return 1 if name is reserved or already used by another argument */ static int conflict_field_name(const char *name, struct probe_arg *args, int narg) { int i; for (i = 0; i < ARRAY_SIZE(reserved_field_names); i++) if (strcmp(reserved_field_names[i], name) == 0) return 1; for (i = 0; i < narg; i++) if (strcmp(args[i].name, name) == 0) return 1; return 0; } static int create_trace_probe(int argc, char **argv) { /* * Argument syntax: * - Add kprobe: p[:[GRP/]EVENT] KSYM[+OFFS]|KADDR [FETCHARGS] * - Add kretprobe: r[:[GRP/]EVENT] KSYM[+0] [FETCHARGS] * Fetch args: * $retval : fetch return value * $stack : fetch stack address * $stackN : fetch Nth of stack (N:0-) * @ADDR : fetch memory at ADDR (ADDR should be in kernel) * @SYM[+|-offs] : fetch memory at SYM +|- offs (SYM is a data symbol) * %REG : fetch register REG * Dereferencing memory fetch: * +|-offs(ARG) : fetch memory at ARG +|- offs address. * Alias name of args: * NAME=FETCHARG : set NAME as alias of FETCHARG. * Type of args: * FETCHARG:TYPE : use TYPE instead of unsigned long. */ struct trace_probe *tp; int i, ret = 0; int is_return = 0, is_delete = 0; char *symbol = NULL, *event = NULL, *group = NULL; char *arg; unsigned long offset = 0; void *addr = NULL; char buf[MAX_EVENT_NAME_LEN]; /* argc must be >= 1 */ if (argv[0][0] == 'p') is_return = 0; else if (argv[0][0] == 'r') is_return = 1; else if (argv[0][0] == '-') is_delete = 1; else { pr_info("Probe definition must be started with 'p', 'r' or" " '-'.\n"); return -EINVAL; } if (argv[0][1] == ':') { event = &argv[0][2]; if (strchr(event, '/')) { group = event; event = strchr(group, '/') + 1; event[-1] = '\0'; if (strlen(group) == 0) { pr_info("Group name is not specified\n"); return -EINVAL; } } if (strlen(event) == 0) { pr_info("Event name is not specified\n"); return -EINVAL; } } if (!group) group = KPROBE_EVENT_SYSTEM; if (is_delete) { if (!event) { pr_info("Delete command needs an event name.\n"); return -EINVAL; } mutex_lock(&probe_lock); tp = find_probe_event(event, group); if (!tp) { mutex_unlock(&probe_lock); pr_info("Event %s/%s doesn't exist.\n", group, event); return -ENOENT; } /* delete an event */ unregister_trace_probe(tp); free_trace_probe(tp); mutex_unlock(&probe_lock); return 0; } if (argc < 2) { pr_info("Probe point is not specified.\n"); return -EINVAL; } if (isdigit(argv[1][0])) { if (is_return) { pr_info("Return probe point must be a symbol.\n"); return -EINVAL; } /* an address specified */ ret = strict_strtoul(&argv[1][0], 0, (unsigned long *)&addr); if (ret) { pr_info("Failed to parse address.\n"); return ret; } } else { /* a symbol specified */ symbol = argv[1]; /* TODO: support .init module functions */ ret = split_symbol_offset(symbol, &offset); if (ret) { pr_info("Failed to parse symbol.\n"); return ret; } if (offset && is_return) { pr_info("Return probe must be used without offset.\n"); return -EINVAL; } } argc -= 2; argv += 2; /* setup a probe */ if (!event) { /* Make a new event name */ if (symbol) snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_%ld", is_return ? 'r' : 'p', symbol, offset); else snprintf(buf, MAX_EVENT_NAME_LEN, "%c_0x%p", is_return ? 'r' : 'p', addr); event = buf; } tp = alloc_trace_probe(group, event, addr, symbol, offset, argc, is_return); if (IS_ERR(tp)) { pr_info("Failed to allocate trace_probe.(%d)\n", (int)PTR_ERR(tp)); return PTR_ERR(tp); } /* parse arguments */ ret = 0; for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) { /* Increment count for freeing args in error case */ tp->nr_args++; /* Parse argument name */ arg = strchr(argv[i], '='); if (arg) { *arg++ = '\0'; tp->args[i].name = kstrdup(argv[i], GFP_KERNEL); } else { arg = argv[i]; /* If argument name is omitted, set "argN" */ snprintf(buf, MAX_EVENT_NAME_LEN, "arg%d", i + 1); tp->args[i].name = kstrdup(buf, GFP_KERNEL); } if (!tp->args[i].name) { pr_info("Failed to allocate argument[%d] name.\n", i); ret = -ENOMEM; goto error; } if (!is_good_name(tp->args[i].name)) { pr_info("Invalid argument[%d] name: %s\n", i, tp->args[i].name); ret = -EINVAL; goto error; } if (conflict_field_name(tp->args[i].name, tp->args, i)) { pr_info("Argument[%d] name '%s' conflicts with " "another field.\n", i, argv[i]); ret = -EINVAL; goto error; } /* Parse fetch argument */ ret = parse_probe_arg(arg, tp, &tp->args[i], is_return); if (ret) { pr_info("Parse error at argument[%d]. (%d)\n", i, ret); goto error; } } ret = register_trace_probe(tp); if (ret) goto error; return 0; error: free_trace_probe(tp); return ret; } static void cleanup_all_probes(void) { struct trace_probe *tp; mutex_lock(&probe_lock); /* TODO: Use batch unregistration */ while (!list_empty(&probe_list)) { tp = list_entry(probe_list.next, struct trace_probe, list); unregister_trace_probe(tp); free_trace_probe(tp); } mutex_unlock(&probe_lock); } /* Probes listing interfaces */ static void *probes_seq_start(struct seq_file *m, loff_t *pos) { mutex_lock(&probe_lock); return seq_list_start(&probe_list, *pos); } static void *probes_seq_next(struct seq_file *m, void *v, loff_t *pos) { return seq_list_next(v, &probe_list, pos); } static void probes_seq_stop(struct seq_file *m, void *v) { mutex_unlock(&probe_lock); } static int probes_seq_show(struct seq_file *m, void *v) { struct trace_probe *tp = v; int i; seq_printf(m, "%c", probe_is_return(tp) ? 'r' : 'p'); seq_printf(m, ":%s/%s", tp->call.class->system, tp->call.name); if (!tp->symbol) seq_printf(m, " 0x%p", tp->rp.kp.addr); else if (tp->rp.kp.offset) seq_printf(m, " %s+%u", probe_symbol(tp), tp->rp.kp.offset); else seq_printf(m, " %s", probe_symbol(tp)); for (i = 0; i < tp->nr_args; i++) seq_printf(m, " %s=%s", tp->args[i].name, tp->args[i].comm); seq_printf(m, "\n"); return 0; } static const struct seq_operations probes_seq_op = { .start = probes_seq_start, .next = probes_seq_next, .stop = probes_seq_stop, .show = probes_seq_show }; static int probes_open(struct inode *inode, struct file *file) { if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) cleanup_all_probes(); return seq_open(file, &probes_seq_op); } static int command_trace_probe(const char *buf) { char **argv; int argc = 0, ret = 0; argv = argv_split(GFP_KERNEL, buf, &argc); if (!argv) return -ENOMEM; if (argc) ret = create_trace_probe(argc, argv); argv_free(argv); return ret; } #define WRITE_BUFSIZE 4096 static ssize_t probes_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos) { char *kbuf, *tmp; int ret; size_t done; size_t size; kbuf = kmalloc(WRITE_BUFSIZE, GFP_KERNEL); if (!kbuf) return -ENOMEM; ret = done = 0; while (done < count) { size = count - done; if (size >= WRITE_BUFSIZE) size = WRITE_BUFSIZE - 1; if (copy_from_user(kbuf, buffer + done, size)) { ret = -EFAULT; goto out; } kbuf[size] = '\0'; tmp = strchr(kbuf, '\n'); if (tmp) { *tmp = '\0'; size = tmp - kbuf + 1; } else if (done + size < count) { pr_warning("Line length is too long: " "Should be less than %d.", WRITE_BUFSIZE); ret = -EINVAL; goto out; } done += size; /* Remove comments */ tmp = strchr(kbuf, '#'); if (tmp) *tmp = '\0'; ret = command_trace_probe(kbuf); if (ret) goto out; } ret = done; out: kfree(kbuf); return ret; } static const struct file_operations kprobe_events_ops = { .owner = THIS_MODULE, .open = probes_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, .write = probes_write, }; /* Probes profiling interfaces */ static int probes_profile_seq_show(struct seq_file *m, void *v) { struct trace_probe *tp = v; seq_printf(m, " %-44s %15lu %15lu\n", tp->call.name, tp->nhit, tp->rp.kp.nmissed); return 0; } static const struct seq_operations profile_seq_op = { .start = probes_seq_start, .next = probes_seq_next, .stop = probes_seq_stop, .show = probes_profile_seq_show }; static int profile_open(struct inode *inode, struct file *file) { return seq_open(file, &profile_seq_op); } static const struct file_operations kprobe_profile_ops = { .owner = THIS_MODULE, .open = profile_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; /* Sum up total data length for dynamic arraies (strings) */ static __kprobes int __get_data_size(struct trace_probe *tp, struct pt_regs *regs) { int i, ret = 0; u32 len; for (i = 0; i < tp->nr_args; i++) if (unlikely(tp->args[i].fetch_size.fn)) { call_fetch(&tp->args[i].fetch_size, regs, &len); ret += len; } return ret; } /* Store the value of each argument */ static __kprobes void store_trace_args(int ent_size, struct trace_probe *tp, struct pt_regs *regs, u8 *data, int maxlen) { int i; u32 end = tp->size; u32 *dl; /* Data (relative) location */ for (i = 0; i < tp->nr_args; i++) { if (unlikely(tp->args[i].fetch_size.fn)) { /* * First, we set the relative location and * maximum data length to *dl */ dl = (u32 *)(data + tp->args[i].offset); *dl = make_data_rloc(maxlen, end - tp->args[i].offset); /* Then try to fetch string or dynamic array data */ call_fetch(&tp->args[i].fetch, regs, dl); /* Reduce maximum length */ end += get_rloc_len(*dl); maxlen -= get_rloc_len(*dl); /* Trick here, convert data_rloc to data_loc */ *dl = convert_rloc_to_loc(*dl, ent_size + tp->args[i].offset); } else /* Just fetching data normally */ call_fetch(&tp->args[i].fetch, regs, data + tp->args[i].offset); } } /* Kprobe handler */ static __kprobes void kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs) { struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp); struct kprobe_trace_entry_head *entry; struct ring_buffer_event *event; struct ring_buffer *buffer; int size, dsize, pc; unsigned long irq_flags; struct ftrace_event_call *call = &tp->call; tp->nhit++; local_save_flags(irq_flags); pc = preempt_count(); dsize = __get_data_size(tp, regs); size = sizeof(*entry) + tp->size + dsize; event = trace_current_buffer_lock_reserve(&buffer, call->event.type, size, irq_flags, pc); if (!event) return; entry = ring_buffer_event_data(event); entry->ip = (unsigned long)kp->addr; store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize); if (!filter_current_check_discard(buffer, call, entry, event)) trace_nowake_buffer_unlock_commit(buffer, event, irq_flags, pc); } /* Kretprobe handler */ static __kprobes void kretprobe_trace_func(struct kretprobe_instance *ri, struct pt_regs *regs) { struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp); struct kretprobe_trace_entry_head *entry; struct ring_buffer_event *event; struct ring_buffer *buffer; int size, pc, dsize; unsigned long irq_flags; struct ftrace_event_call *call = &tp->call; local_save_flags(irq_flags); pc = preempt_count(); dsize = __get_data_size(tp, regs); size = sizeof(*entry) + tp->size + dsize; event = trace_current_buffer_lock_reserve(&buffer, call->event.type, size, irq_flags, pc); if (!event) return; entry = ring_buffer_event_data(event); entry->func = (unsigned long)tp->rp.kp.addr; entry->ret_ip = (unsigned long)ri->ret_addr; store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize); if (!filter_current_check_discard(buffer, call, entry, event)) trace_nowake_buffer_unlock_commit(buffer, event, irq_flags, pc); } /* Event entry printers */ enum print_line_t print_kprobe_event(struct trace_iterator *iter, int flags, struct trace_event *event) { struct kprobe_trace_entry_head *field; struct trace_seq *s = &iter->seq; struct trace_probe *tp; u8 *data; int i; field = (struct kprobe_trace_entry_head *)iter->ent; tp = container_of(event, struct trace_probe, call.event); if (!trace_seq_printf(s, "%s: (", tp->call.name)) goto partial; if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET)) goto partial; if (!trace_seq_puts(s, ")")) goto partial; data = (u8 *)&field[1]; for (i = 0; i < tp->nr_args; i++) if (!tp->args[i].type->print(s, tp->args[i].name, data + tp->args[i].offset, field)) goto partial; if (!trace_seq_puts(s, "\n")) goto partial; return TRACE_TYPE_HANDLED; partial: return TRACE_TYPE_PARTIAL_LINE; } enum print_line_t print_kretprobe_event(struct trace_iterator *iter, int flags, struct trace_event *event) { struct kretprobe_trace_entry_head *field; struct trace_seq *s = &iter->seq; struct trace_probe *tp; u8 *data; int i; field = (struct kretprobe_trace_entry_head *)iter->ent; tp = container_of(event, struct trace_probe, call.event); if (!trace_seq_printf(s, "%s: (", tp->call.name)) goto partial; if (!seq_print_ip_sym(s, field->ret_ip, flags | TRACE_ITER_SYM_OFFSET)) goto partial; if (!trace_seq_puts(s, " <- ")) goto partial; if (!seq_print_ip_sym(s, field->func, flags & ~TRACE_ITER_SYM_OFFSET)) goto partial; if (!trace_seq_puts(s, ")")) goto partial; data = (u8 *)&field[1]; for (i = 0; i < tp->nr_args; i++) if (!tp->args[i].type->print(s, tp->args[i].name, data + tp->args[i].offset, field)) goto partial; if (!trace_seq_puts(s, "\n")) goto partial; return TRACE_TYPE_HANDLED; partial: return TRACE_TYPE_PARTIAL_LINE; } static int probe_event_enable(struct ftrace_event_call *call) { struct trace_probe *tp = (struct trace_probe *)call->data; tp->flags |= TP_FLAG_TRACE; if (probe_is_return(tp)) return enable_kretprobe(&tp->rp); else return enable_kprobe(&tp->rp.kp); } static void probe_event_disable(struct ftrace_event_call *call) { struct trace_probe *tp = (struct trace_probe *)call->data; tp->flags &= ~TP_FLAG_TRACE; if (!(tp->flags & (TP_FLAG_TRACE | TP_FLAG_PROFILE))) { if (probe_is_return(tp)) disable_kretprobe(&tp->rp); else disable_kprobe(&tp->rp.kp); } } #undef DEFINE_FIELD #define DEFINE_FIELD(type, item, name, is_signed) \ do { \ ret = trace_define_field(event_call, #type, name, \ offsetof(typeof(field), item), \ sizeof(field.item), is_signed, \ FILTER_OTHER); \ if (ret) \ return ret; \ } while (0) static int kprobe_event_define_fields(struct ftrace_event_call *event_call) { int ret, i; struct kprobe_trace_entry_head field; struct trace_probe *tp = (struct trace_probe *)event_call->data; DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0); /* Set argument names as fields */ for (i = 0; i < tp->nr_args; i++) { ret = trace_define_field(event_call, tp->args[i].type->fmttype, tp->args[i].name, sizeof(field) + tp->args[i].offset, tp->args[i].type->size, tp->args[i].type->is_signed, FILTER_OTHER); if (ret) return ret; } return 0; } static int kretprobe_event_define_fields(struct ftrace_event_call *event_call) { int ret, i; struct kretprobe_trace_entry_head field; struct trace_probe *tp = (struct trace_probe *)event_call->data; DEFINE_FIELD(unsigned long, func, FIELD_STRING_FUNC, 0); DEFINE_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP, 0); /* Set argument names as fields */ for (i = 0; i < tp->nr_args; i++) { ret = trace_define_field(event_call, tp->args[i].type->fmttype, tp->args[i].name, sizeof(field) + tp->args[i].offset, tp->args[i].type->size, tp->args[i].type->is_signed, FILTER_OTHER); if (ret) return ret; } return 0; } static int __set_print_fmt(struct trace_probe *tp, char *buf, int len) { int i; int pos = 0; const char *fmt, *arg; if (!probe_is_return(tp)) { fmt = "(%lx)"; arg = "REC->" FIELD_STRING_IP; } else { fmt = "(%lx <- %lx)"; arg = "REC->" FIELD_STRING_FUNC ", REC->" FIELD_STRING_RETIP; } /* When len=0, we just calculate the needed length */ #define LEN_OR_ZERO (len ? len - pos : 0) pos += snprintf(buf + pos, LEN_OR_ZERO, "\"%s", fmt); for (i = 0; i < tp->nr_args; i++) { pos += snprintf(buf + pos, LEN_OR_ZERO, " %s=%s", tp->args[i].name, tp->args[i].type->fmt); } pos += snprintf(buf + pos, LEN_OR_ZERO, "\", %s", arg); for (i = 0; i < tp->nr_args; i++) { if (strcmp(tp->args[i].type->name, "string") == 0) pos += snprintf(buf + pos, LEN_OR_ZERO, ", __get_str(%s)", tp->args[i].name); else pos += snprintf(buf + pos, LEN_OR_ZERO, ", REC->%s", tp->args[i].name); } #undef LEN_OR_ZERO /* return the length of print_fmt */ return pos; } static int set_print_fmt(struct trace_probe *tp) { int len; char *print_fmt; /* First: called with 0 length to calculate the needed length */ len = __set_print_fmt(tp, NULL, 0); print_fmt = kmalloc(len + 1, GFP_KERNEL); if (!print_fmt) return -ENOMEM; /* Second: actually write the @print_fmt */ __set_print_fmt(tp, print_fmt, len + 1); tp->call.print_fmt = print_fmt; return 0; } #ifdef CONFIG_PERF_EVENTS /* Kprobe profile handler */ static __kprobes void kprobe_perf_func(struct kprobe *kp, struct pt_regs *regs) { struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp); struct ftrace_event_call *call = &tp->call; struct kprobe_trace_entry_head *entry; struct hlist_head *head; int size, __size, dsize; int rctx; dsize = __get_data_size(tp, regs); __size = sizeof(*entry) + tp->size + dsize; size = ALIGN(__size + sizeof(u32), sizeof(u64)); size -= sizeof(u32); if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, "profile buffer not large enough")) return; entry = perf_trace_buf_prepare(size, call->event.type, regs, &rctx); if (!entry) return; entry->ip = (unsigned long)kp->addr; memset(&entry[1], 0, dsize); store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize); head = this_cpu_ptr(call->perf_events); perf_trace_buf_submit(entry, size, rctx, entry->ip, 1, regs, head); } /* Kretprobe profile handler */ static __kprobes void kretprobe_perf_func(struct kretprobe_instance *ri, struct pt_regs *regs) { struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp); struct ftrace_event_call *call = &tp->call; struct kretprobe_trace_entry_head *entry; struct hlist_head *head; int size, __size, dsize; int rctx; dsize = __get_data_size(tp, regs); __size = sizeof(*entry) + tp->size + dsize; size = ALIGN(__size + sizeof(u32), sizeof(u64)); size -= sizeof(u32); if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, "profile buffer not large enough")) return; entry = perf_trace_buf_prepare(size, call->event.type, regs, &rctx); if (!entry) return; entry->func = (unsigned long)tp->rp.kp.addr; entry->ret_ip = (unsigned long)ri->ret_addr; store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize); head = this_cpu_ptr(call->perf_events); perf_trace_buf_submit(entry, size, rctx, entry->ret_ip, 1, regs, head); } static int probe_perf_enable(struct ftrace_event_call *call) { struct trace_probe *tp = (struct trace_probe *)call->data; tp->flags |= TP_FLAG_PROFILE; if (probe_is_return(tp)) return enable_kretprobe(&tp->rp); else return enable_kprobe(&tp->rp.kp); } static void probe_perf_disable(struct ftrace_event_call *call) { struct trace_probe *tp = (struct trace_probe *)call->data; tp->flags &= ~TP_FLAG_PROFILE; if (!(tp->flags & TP_FLAG_TRACE)) { if (probe_is_return(tp)) disable_kretprobe(&tp->rp); else disable_kprobe(&tp->rp.kp); } } #endif /* CONFIG_PERF_EVENTS */ static __kprobes int kprobe_register(struct ftrace_event_call *event, enum trace_reg type) { switch (type) { case TRACE_REG_REGISTER: return probe_event_enable(event); case TRACE_REG_UNREGISTER: probe_event_disable(event); return 0; #ifdef CONFIG_PERF_EVENTS case TRACE_REG_PERF_REGISTER: return probe_perf_enable(event); case TRACE_REG_PERF_UNREGISTER: probe_perf_disable(event); return 0; #endif } return 0; } static __kprobes int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs) { struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp); if (tp->flags & TP_FLAG_TRACE) kprobe_trace_func(kp, regs); #ifdef CONFIG_PERF_EVENTS if (tp->flags & TP_FLAG_PROFILE) kprobe_perf_func(kp, regs); #endif return 0; /* We don't tweek kernel, so just return 0 */ } static __kprobes int kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs) { struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp); if (tp->flags & TP_FLAG_TRACE) kretprobe_trace_func(ri, regs); #ifdef CONFIG_PERF_EVENTS if (tp->flags & TP_FLAG_PROFILE) kretprobe_perf_func(ri, regs); #endif return 0; /* We don't tweek kernel, so just return 0 */ } static struct trace_event_functions kretprobe_funcs = { .trace = print_kretprobe_event }; static struct trace_event_functions kprobe_funcs = { .trace = print_kprobe_event }; static int register_probe_event(struct trace_probe *tp) { struct ftrace_event_call *call = &tp->call; int ret; /* Initialize ftrace_event_call */ INIT_LIST_HEAD(&call->class->fields); if (probe_is_return(tp)) { call->event.funcs = &kretprobe_funcs; call->class->define_fields = kretprobe_event_define_fields; } else { call->event.funcs = &kprobe_funcs; call->class->define_fields = kprobe_event_define_fields; } if (set_print_fmt(tp) < 0) return -ENOMEM; ret = register_ftrace_event(&call->event); if (!ret) { kfree(call->print_fmt); return -ENODEV; } call->flags = 0; call->class->reg = kprobe_register; call->data = tp; ret = trace_add_event_call(call); if (ret) { pr_info("Failed to register kprobe event: %s\n", call->name); kfree(call->print_fmt); unregister_ftrace_event(&call->event); } return ret; } static void unregister_probe_event(struct trace_probe *tp) { /* tp->event is unregistered in trace_remove_event_call() */ trace_remove_event_call(&tp->call); kfree(tp->call.print_fmt); } /* Make a debugfs interface for controlling probe points */ static __init int init_kprobe_trace(void) { struct dentry *d_tracer; struct dentry *entry; d_tracer = tracing_init_dentry(); if (!d_tracer) return 0; entry = debugfs_create_file("kprobe_events", 0644, d_tracer, NULL, &kprobe_events_ops); /* Event list interface */ if (!entry) pr_warning("Could not create debugfs " "'kprobe_events' entry\n"); /* Profile interface */ entry = debugfs_create_file("kprobe_profile", 0444, d_tracer, NULL, &kprobe_profile_ops); if (!entry) pr_warning("Could not create debugfs " "'kprobe_profile' entry\n"); return 0; } fs_initcall(init_kprobe_trace); #ifdef CONFIG_FTRACE_STARTUP_TEST /* * The "__used" keeps gcc from removing the function symbol * from the kallsyms table. */ static __used int kprobe_trace_selftest_target(int a1, int a2, int a3, int a4, int a5, int a6) { return a1 + a2 + a3 + a4 + a5 + a6; } static __init int kprobe_trace_self_tests_init(void) { int ret, warn = 0; int (*target)(int, int, int, int, int, int); struct trace_probe *tp; target = kprobe_trace_selftest_target; pr_info("Testing kprobe tracing: "); ret = command_trace_probe("p:testprobe kprobe_trace_selftest_target " "$stack $stack0 +0($stack)"); if (WARN_ON_ONCE(ret)) { pr_warning("error on probing function entry.\n"); warn++; } else { /* Enable trace point */ tp = find_probe_event("testprobe", KPROBE_EVENT_SYSTEM); if (WARN_ON_ONCE(tp == NULL)) { pr_warning("error on getting new probe.\n"); warn++; } else probe_event_enable(&tp->call); } ret = command_trace_probe("r:testprobe2 kprobe_trace_selftest_target " "$retval"); if (WARN_ON_ONCE(ret)) { pr_warning("error on probing function return.\n"); warn++; } else { /* Enable trace point */ tp = find_probe_event("testprobe2", KPROBE_EVENT_SYSTEM); if (WARN_ON_ONCE(tp == NULL)) { pr_warning("error on getting new probe.\n"); warn++; } else probe_event_enable(&tp->call); } if (warn) goto end; ret = target(1, 2, 3, 4, 5, 6); ret = command_trace_probe("-:testprobe"); if (WARN_ON_ONCE(ret)) { pr_warning("error on deleting a probe.\n"); warn++; } ret = command_trace_probe("-:testprobe2"); if (WARN_ON_ONCE(ret)) { pr_warning("error on deleting a probe.\n"); warn++; } end: cleanup_all_probes(); if (warn) pr_cont("NG: Some tests are failed. Please check them.\n"); else pr_cont("OK\n"); return 0; } late_initcall(kprobe_trace_self_tests_init); #endif
gpl-2.0
shuiziliuBUPT/linuxkernel
drivers/net/wireless/brcm80211/brcmutil/d11.c
2622
4037
/* * Copyright (c) 2013 Broadcom Corporation * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ /*********************channel spec common functions*********************/ #include <linux/module.h> #include <brcmu_utils.h> #include <brcmu_wifi.h> #include <brcmu_d11.h> static void brcmu_d11n_encchspec(struct brcmu_chan *ch) { ch->chspec = ch->chnum & BRCMU_CHSPEC_CH_MASK; switch (ch->bw) { case BRCMU_CHAN_BW_20: ch->chspec |= BRCMU_CHSPEC_D11N_BW_20 | BRCMU_CHSPEC_D11N_SB_N; break; case BRCMU_CHAN_BW_40: default: WARN_ON_ONCE(1); break; } if (ch->chnum <= CH_MAX_2G_CHANNEL) ch->chspec |= BRCMU_CHSPEC_D11N_BND_2G; else ch->chspec |= BRCMU_CHSPEC_D11N_BND_5G; } static void brcmu_d11ac_encchspec(struct brcmu_chan *ch) { ch->chspec = ch->chnum & BRCMU_CHSPEC_CH_MASK; switch (ch->bw) { case BRCMU_CHAN_BW_20: ch->chspec |= BRCMU_CHSPEC_D11AC_BW_20; break; case BRCMU_CHAN_BW_40: case BRCMU_CHAN_BW_80: case BRCMU_CHAN_BW_80P80: case BRCMU_CHAN_BW_160: default: WARN_ON_ONCE(1); break; } if (ch->chnum <= CH_MAX_2G_CHANNEL) ch->chspec |= BRCMU_CHSPEC_D11AC_BND_2G; else ch->chspec |= BRCMU_CHSPEC_D11AC_BND_5G; } static void brcmu_d11n_decchspec(struct brcmu_chan *ch) { u16 val; ch->chnum = (u8)(ch->chspec & BRCMU_CHSPEC_CH_MASK); switch (ch->chspec & BRCMU_CHSPEC_D11N_BW_MASK) { case BRCMU_CHSPEC_D11N_BW_20: ch->bw = BRCMU_CHAN_BW_20; break; case BRCMU_CHSPEC_D11N_BW_40: ch->bw = BRCMU_CHAN_BW_40; val = ch->chspec & BRCMU_CHSPEC_D11N_SB_MASK; if (val == BRCMU_CHSPEC_D11N_SB_L) { ch->sb = BRCMU_CHAN_SB_L; ch->chnum -= CH_10MHZ_APART; } else { ch->sb = BRCMU_CHAN_SB_U; ch->chnum += CH_10MHZ_APART; } break; default: WARN_ON_ONCE(1); break; } switch (ch->chspec & BRCMU_CHSPEC_D11N_BND_MASK) { case BRCMU_CHSPEC_D11N_BND_5G: ch->band = BRCMU_CHAN_BAND_5G; break; case BRCMU_CHSPEC_D11N_BND_2G: ch->band = BRCMU_CHAN_BAND_2G; break; default: WARN_ON_ONCE(1); break; } } static void brcmu_d11ac_decchspec(struct brcmu_chan *ch) { u16 val; ch->chnum = (u8)(ch->chspec & BRCMU_CHSPEC_CH_MASK); switch (ch->chspec & BRCMU_CHSPEC_D11AC_BW_MASK) { case BRCMU_CHSPEC_D11AC_BW_20: ch->bw = BRCMU_CHAN_BW_20; break; case BRCMU_CHSPEC_D11AC_BW_40: ch->bw = BRCMU_CHAN_BW_40; val = ch->chspec & BRCMU_CHSPEC_D11AC_SB_MASK; if (val == BRCMU_CHSPEC_D11AC_SB_L) { ch->sb = BRCMU_CHAN_SB_L; ch->chnum -= CH_10MHZ_APART; } else if (val == BRCMU_CHSPEC_D11AC_SB_U) { ch->sb = BRCMU_CHAN_SB_U; ch->chnum += CH_10MHZ_APART; } else { WARN_ON_ONCE(1); } break; case BRCMU_CHSPEC_D11AC_BW_80: ch->bw = BRCMU_CHAN_BW_80; break; case BRCMU_CHSPEC_D11AC_BW_8080: case BRCMU_CHSPEC_D11AC_BW_160: default: WARN_ON_ONCE(1); break; } switch (ch->chspec & BRCMU_CHSPEC_D11AC_BND_MASK) { case BRCMU_CHSPEC_D11AC_BND_5G: ch->band = BRCMU_CHAN_BAND_5G; break; case BRCMU_CHSPEC_D11AC_BND_2G: ch->band = BRCMU_CHAN_BAND_2G; break; default: WARN_ON_ONCE(1); break; } } void brcmu_d11_attach(struct brcmu_d11inf *d11inf) { if (d11inf->io_type == BRCMU_D11N_IOTYPE) { d11inf->encchspec = brcmu_d11n_encchspec; d11inf->decchspec = brcmu_d11n_decchspec; } else { d11inf->encchspec = brcmu_d11ac_encchspec; d11inf->decchspec = brcmu_d11ac_decchspec; } } EXPORT_SYMBOL(brcmu_d11_attach);
gpl-2.0
buck101/superlinux
drivers/net/phy/fixed.c
2878
5693
/* * Fixed MDIO bus (MDIO bus emulation with fixed PHYs) * * Author: Vitaly Bordug <vbordug@ru.mvista.com> * Anton Vorontsov <avorontsov@ru.mvista.com> * * Copyright (c) 2006-2007 MontaVista Software, Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/list.h> #include <linux/mii.h> #include <linux/phy.h> #include <linux/phy_fixed.h> #include <linux/err.h> #include <linux/slab.h> #define MII_REGS_NUM 29 struct fixed_mdio_bus { int irqs[PHY_MAX_ADDR]; struct mii_bus *mii_bus; struct list_head phys; }; struct fixed_phy { int id; u16 regs[MII_REGS_NUM]; struct phy_device *phydev; struct fixed_phy_status status; int (*link_update)(struct net_device *, struct fixed_phy_status *); struct list_head node; }; static struct platform_device *pdev; static struct fixed_mdio_bus platform_fmb = { .phys = LIST_HEAD_INIT(platform_fmb.phys), }; static int fixed_phy_update_regs(struct fixed_phy *fp) { u16 bmsr = BMSR_ANEGCAPABLE; u16 bmcr = 0; u16 lpagb = 0; u16 lpa = 0; if (fp->status.duplex) { bmcr |= BMCR_FULLDPLX; switch (fp->status.speed) { case 1000: bmsr |= BMSR_ESTATEN; bmcr |= BMCR_SPEED1000; lpagb |= LPA_1000FULL; break; case 100: bmsr |= BMSR_100FULL; bmcr |= BMCR_SPEED100; lpa |= LPA_100FULL; break; case 10: bmsr |= BMSR_10FULL; lpa |= LPA_10FULL; break; default: pr_warn("fixed phy: unknown speed\n"); return -EINVAL; } } else { switch (fp->status.speed) { case 1000: bmsr |= BMSR_ESTATEN; bmcr |= BMCR_SPEED1000; lpagb |= LPA_1000HALF; break; case 100: bmsr |= BMSR_100HALF; bmcr |= BMCR_SPEED100; lpa |= LPA_100HALF; break; case 10: bmsr |= BMSR_10HALF; lpa |= LPA_10HALF; break; default: pr_warn("fixed phy: unknown speed\n"); return -EINVAL; } } if (fp->status.link) bmsr |= BMSR_LSTATUS | BMSR_ANEGCOMPLETE; if (fp->status.pause) lpa |= LPA_PAUSE_CAP; if (fp->status.asym_pause) lpa |= LPA_PAUSE_ASYM; fp->regs[MII_PHYSID1] = fp->id >> 16; fp->regs[MII_PHYSID2] = fp->id; fp->regs[MII_BMSR] = bmsr; fp->regs[MII_BMCR] = bmcr; fp->regs[MII_LPA] = lpa; fp->regs[MII_STAT1000] = lpagb; return 0; } static int fixed_mdio_read(struct mii_bus *bus, int phy_id, int reg_num) { struct fixed_mdio_bus *fmb = bus->priv; struct fixed_phy *fp; if (reg_num >= MII_REGS_NUM) return -1; list_for_each_entry(fp, &fmb->phys, node) { if (fp->id == phy_id) { /* Issue callback if user registered it. */ if (fp->link_update) { fp->link_update(fp->phydev->attached_dev, &fp->status); fixed_phy_update_regs(fp); } return fp->regs[reg_num]; } } return 0xFFFF; } static int fixed_mdio_write(struct mii_bus *bus, int phy_id, int reg_num, u16 val) { return 0; } /* * If something weird is required to be done with link/speed, * network driver is able to assign a function to implement this. * May be useful for PHY's that need to be software-driven. */ int fixed_phy_set_link_update(struct phy_device *phydev, int (*link_update)(struct net_device *, struct fixed_phy_status *)) { struct fixed_mdio_bus *fmb = &platform_fmb; struct fixed_phy *fp; if (!link_update || !phydev || !phydev->bus) return -EINVAL; list_for_each_entry(fp, &fmb->phys, node) { if (fp->id == phydev->phy_id) { fp->link_update = link_update; fp->phydev = phydev; return 0; } } return -ENOENT; } EXPORT_SYMBOL_GPL(fixed_phy_set_link_update); int fixed_phy_add(unsigned int irq, int phy_id, struct fixed_phy_status *status) { int ret; struct fixed_mdio_bus *fmb = &platform_fmb; struct fixed_phy *fp; fp = kzalloc(sizeof(*fp), GFP_KERNEL); if (!fp) return -ENOMEM; memset(fp->regs, 0xFF, sizeof(fp->regs[0]) * MII_REGS_NUM); fmb->irqs[phy_id] = irq; fp->id = phy_id; fp->status = *status; ret = fixed_phy_update_regs(fp); if (ret) goto err_regs; list_add_tail(&fp->node, &fmb->phys); return 0; err_regs: kfree(fp); return ret; } EXPORT_SYMBOL_GPL(fixed_phy_add); static int __init fixed_mdio_bus_init(void) { struct fixed_mdio_bus *fmb = &platform_fmb; int ret; pdev = platform_device_register_simple("Fixed MDIO bus", 0, NULL, 0); if (IS_ERR(pdev)) { ret = PTR_ERR(pdev); goto err_pdev; } fmb->mii_bus = mdiobus_alloc(); if (fmb->mii_bus == NULL) { ret = -ENOMEM; goto err_mdiobus_reg; } snprintf(fmb->mii_bus->id, MII_BUS_ID_SIZE, "fixed-0"); fmb->mii_bus->name = "Fixed MDIO Bus"; fmb->mii_bus->priv = fmb; fmb->mii_bus->parent = &pdev->dev; fmb->mii_bus->read = &fixed_mdio_read; fmb->mii_bus->write = &fixed_mdio_write; fmb->mii_bus->irq = fmb->irqs; ret = mdiobus_register(fmb->mii_bus); if (ret) goto err_mdiobus_alloc; return 0; err_mdiobus_alloc: mdiobus_free(fmb->mii_bus); err_mdiobus_reg: platform_device_unregister(pdev); err_pdev: return ret; } module_init(fixed_mdio_bus_init); static void __exit fixed_mdio_bus_exit(void) { struct fixed_mdio_bus *fmb = &platform_fmb; struct fixed_phy *fp, *tmp; mdiobus_unregister(fmb->mii_bus); mdiobus_free(fmb->mii_bus); platform_device_unregister(pdev); list_for_each_entry_safe(fp, tmp, &fmb->phys, node) { list_del(&fp->node); kfree(fp); } } module_exit(fixed_mdio_bus_exit); MODULE_DESCRIPTION("Fixed MDIO bus (MDIO bus emulation with fixed PHYs)"); MODULE_AUTHOR("Vitaly Bordug"); MODULE_LICENSE("GPL");
gpl-2.0
emwno/android_kernel_N7100
arch/parisc/lib/bitops.c
3390
1841
/* * bitops.c: atomic operations which got too long to be inlined all over * the place. * * Copyright 1999 Philipp Rumpf (prumpf@tux.org) * Copyright 2000 Grant Grundler (grundler@cup.hp.com) */ #include <linux/kernel.h> #include <linux/spinlock.h> #include <asm/system.h> #include <asm/atomic.h> #ifdef CONFIG_SMP arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned = { [0 ... (ATOMIC_HASH_SIZE-1)] = __ARCH_SPIN_LOCK_UNLOCKED }; #endif #ifdef CONFIG_64BIT unsigned long __xchg64(unsigned long x, unsigned long *ptr) { unsigned long temp, flags; _atomic_spin_lock_irqsave(ptr, flags); temp = *ptr; *ptr = x; _atomic_spin_unlock_irqrestore(ptr, flags); return temp; } #endif unsigned long __xchg32(int x, int *ptr) { unsigned long flags; long temp; _atomic_spin_lock_irqsave(ptr, flags); temp = (long) *ptr; /* XXX - sign extension wanted? */ *ptr = x; _atomic_spin_unlock_irqrestore(ptr, flags); return (unsigned long)temp; } unsigned long __xchg8(char x, char *ptr) { unsigned long flags; long temp; _atomic_spin_lock_irqsave(ptr, flags); temp = (long) *ptr; /* XXX - sign extension wanted? */ *ptr = x; _atomic_spin_unlock_irqrestore(ptr, flags); return (unsigned long)temp; } #ifdef CONFIG_64BIT unsigned long __cmpxchg_u64(volatile unsigned long *ptr, unsigned long old, unsigned long new) { unsigned long flags; unsigned long prev; _atomic_spin_lock_irqsave(ptr, flags); if ((prev = *ptr) == old) *ptr = new; _atomic_spin_unlock_irqrestore(ptr, flags); return prev; } #endif unsigned long __cmpxchg_u32(volatile unsigned int *ptr, unsigned int old, unsigned int new) { unsigned long flags; unsigned int prev; _atomic_spin_lock_irqsave(ptr, flags); if ((prev = *ptr) == old) *ptr = new; _atomic_spin_unlock_irqrestore(ptr, flags); return (unsigned long)prev; }
gpl-2.0
Team-Hydra/android_kernel_samsung_klte
drivers/usb/gadget/f_eem.c
3646
15318
/* * f_eem.c -- USB CDC Ethernet (EEM) link function driver * * Copyright (C) 2003-2005,2008 David Brownell * Copyright (C) 2008 Nokia Corporation * Copyright (C) 2009 EF Johnson Technologies * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <linux/kernel.h> #include <linux/device.h> #include <linux/etherdevice.h> #include <linux/crc32.h> #include <linux/slab.h> #include "u_ether.h" #define EEM_HLEN 2 /* * This function is a "CDC Ethernet Emulation Model" (CDC EEM) * Ethernet link. */ struct f_eem { struct gether port; u8 ctrl_id; }; static inline struct f_eem *func_to_eem(struct usb_function *f) { return container_of(f, struct f_eem, port.func); } /*-------------------------------------------------------------------------*/ /* interface descriptor: */ static struct usb_interface_descriptor eem_intf __initdata = { .bLength = sizeof eem_intf, .bDescriptorType = USB_DT_INTERFACE, /* .bInterfaceNumber = DYNAMIC */ .bNumEndpoints = 2, .bInterfaceClass = USB_CLASS_COMM, .bInterfaceSubClass = USB_CDC_SUBCLASS_EEM, .bInterfaceProtocol = USB_CDC_PROTO_EEM, /* .iInterface = DYNAMIC */ }; /* full speed support: */ static struct usb_endpoint_descriptor eem_fs_in_desc __initdata = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_IN, .bmAttributes = USB_ENDPOINT_XFER_BULK, }; static struct usb_endpoint_descriptor eem_fs_out_desc __initdata = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_OUT, .bmAttributes = USB_ENDPOINT_XFER_BULK, }; static struct usb_descriptor_header *eem_fs_function[] __initdata = { /* CDC EEM control descriptors */ (struct usb_descriptor_header *) &eem_intf, (struct usb_descriptor_header *) &eem_fs_in_desc, (struct usb_descriptor_header *) &eem_fs_out_desc, NULL, }; /* high speed support: */ static struct usb_endpoint_descriptor eem_hs_in_desc __initdata = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_IN, .bmAttributes = USB_ENDPOINT_XFER_BULK, .wMaxPacketSize = cpu_to_le16(512), }; static struct usb_endpoint_descriptor eem_hs_out_desc __initdata = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_OUT, .bmAttributes = USB_ENDPOINT_XFER_BULK, .wMaxPacketSize = cpu_to_le16(512), }; static struct usb_descriptor_header *eem_hs_function[] __initdata = { /* CDC EEM control descriptors */ (struct usb_descriptor_header *) &eem_intf, (struct usb_descriptor_header *) &eem_hs_in_desc, (struct usb_descriptor_header *) &eem_hs_out_desc, NULL, }; /* super speed support: */ static struct usb_endpoint_descriptor eem_ss_in_desc __initdata = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_IN, .bmAttributes = USB_ENDPOINT_XFER_BULK, .wMaxPacketSize = cpu_to_le16(1024), }; static struct usb_endpoint_descriptor eem_ss_out_desc __initdata = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_OUT, .bmAttributes = USB_ENDPOINT_XFER_BULK, .wMaxPacketSize = cpu_to_le16(1024), }; static struct usb_ss_ep_comp_descriptor eem_ss_bulk_comp_desc __initdata = { .bLength = sizeof eem_ss_bulk_comp_desc, .bDescriptorType = USB_DT_SS_ENDPOINT_COMP, /* the following 2 values can be tweaked if necessary */ /* .bMaxBurst = 0, */ /* .bmAttributes = 0, */ }; static struct usb_descriptor_header *eem_ss_function[] __initdata = { /* CDC EEM control descriptors */ (struct usb_descriptor_header *) &eem_intf, (struct usb_descriptor_header *) &eem_ss_in_desc, (struct usb_descriptor_header *) &eem_ss_bulk_comp_desc, (struct usb_descriptor_header *) &eem_ss_out_desc, (struct usb_descriptor_header *) &eem_ss_bulk_comp_desc, NULL, }; /* string descriptors: */ static struct usb_string eem_string_defs[] = { [0].s = "CDC Ethernet Emulation Model (EEM)", { } /* end of list */ }; static struct usb_gadget_strings eem_string_table = { .language = 0x0409, /* en-us */ .strings = eem_string_defs, }; static struct usb_gadget_strings *eem_strings[] = { &eem_string_table, NULL, }; /*-------------------------------------------------------------------------*/ static int eem_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl) { struct usb_composite_dev *cdev = f->config->cdev; int value = -EOPNOTSUPP; u16 w_index = le16_to_cpu(ctrl->wIndex); u16 w_value = le16_to_cpu(ctrl->wValue); u16 w_length = le16_to_cpu(ctrl->wLength); DBG(cdev, "invalid control req%02x.%02x v%04x i%04x l%d\n", ctrl->bRequestType, ctrl->bRequest, w_value, w_index, w_length); /* device either stalls (value < 0) or reports success */ return value; } static int eem_set_alt(struct usb_function *f, unsigned intf, unsigned alt) { struct f_eem *eem = func_to_eem(f); struct usb_composite_dev *cdev = f->config->cdev; struct net_device *net; /* we know alt == 0, so this is an activation or a reset */ if (alt != 0) goto fail; if (intf == eem->ctrl_id) { if (eem->port.in_ep->driver_data) { DBG(cdev, "reset eem\n"); gether_disconnect(&eem->port); } if (!eem->port.in_ep->desc || !eem->port.out_ep->desc) { DBG(cdev, "init eem\n"); if (config_ep_by_speed(cdev->gadget, f, eem->port.in_ep) || config_ep_by_speed(cdev->gadget, f, eem->port.out_ep)) { eem->port.in_ep->desc = NULL; eem->port.out_ep->desc = NULL; goto fail; } } /* zlps should not occur because zero-length EEM packets * will be inserted in those cases where they would occur */ eem->port.is_zlp_ok = 1; eem->port.cdc_filter = DEFAULT_FILTER; DBG(cdev, "activate eem\n"); net = gether_connect(&eem->port); if (IS_ERR(net)) return PTR_ERR(net); } else goto fail; return 0; fail: return -EINVAL; } static void eem_disable(struct usb_function *f) { struct f_eem *eem = func_to_eem(f); struct usb_composite_dev *cdev = f->config->cdev; DBG(cdev, "eem deactivated\n"); if (eem->port.in_ep->driver_data) gether_disconnect(&eem->port); } /*-------------------------------------------------------------------------*/ /* EEM function driver setup/binding */ static int __init eem_bind(struct usb_configuration *c, struct usb_function *f) { struct usb_composite_dev *cdev = c->cdev; struct f_eem *eem = func_to_eem(f); int status; struct usb_ep *ep; /* allocate instance-specific interface IDs */ status = usb_interface_id(c, f); if (status < 0) goto fail; eem->ctrl_id = status; eem_intf.bInterfaceNumber = status; status = -ENODEV; /* allocate instance-specific endpoints */ ep = usb_ep_autoconfig(cdev->gadget, &eem_fs_in_desc); if (!ep) goto fail; eem->port.in_ep = ep; ep->driver_data = cdev; /* claim */ ep = usb_ep_autoconfig(cdev->gadget, &eem_fs_out_desc); if (!ep) goto fail; eem->port.out_ep = ep; ep->driver_data = cdev; /* claim */ status = -ENOMEM; /* copy descriptors, and track endpoint copies */ f->descriptors = usb_copy_descriptors(eem_fs_function); if (!f->descriptors) goto fail; /* support all relevant hardware speeds... we expect that when * hardware is dual speed, all bulk-capable endpoints work at * both speeds */ if (gadget_is_dualspeed(c->cdev->gadget)) { eem_hs_in_desc.bEndpointAddress = eem_fs_in_desc.bEndpointAddress; eem_hs_out_desc.bEndpointAddress = eem_fs_out_desc.bEndpointAddress; /* copy descriptors, and track endpoint copies */ f->hs_descriptors = usb_copy_descriptors(eem_hs_function); if (!f->hs_descriptors) goto fail; } if (gadget_is_superspeed(c->cdev->gadget)) { eem_ss_in_desc.bEndpointAddress = eem_fs_in_desc.bEndpointAddress; eem_ss_out_desc.bEndpointAddress = eem_fs_out_desc.bEndpointAddress; /* copy descriptors, and track endpoint copies */ f->ss_descriptors = usb_copy_descriptors(eem_ss_function); if (!f->ss_descriptors) goto fail; } DBG(cdev, "CDC Ethernet (EEM): %s speed IN/%s OUT/%s\n", gadget_is_superspeed(c->cdev->gadget) ? "super" : gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full", eem->port.in_ep->name, eem->port.out_ep->name); return 0; fail: if (f->descriptors) usb_free_descriptors(f->descriptors); if (f->hs_descriptors) usb_free_descriptors(f->hs_descriptors); /* we might as well release our claims on endpoints */ if (eem->port.out_ep->desc) eem->port.out_ep->driver_data = NULL; if (eem->port.in_ep->desc) eem->port.in_ep->driver_data = NULL; ERROR(cdev, "%s: can't bind, err %d\n", f->name, status); return status; } static void eem_unbind(struct usb_configuration *c, struct usb_function *f) { struct f_eem *eem = func_to_eem(f); DBG(c->cdev, "eem unbind\n"); if (gadget_is_superspeed(c->cdev->gadget)) usb_free_descriptors(f->ss_descriptors); if (gadget_is_dualspeed(c->cdev->gadget)) usb_free_descriptors(f->hs_descriptors); usb_free_descriptors(f->descriptors); kfree(eem); } static void eem_cmd_complete(struct usb_ep *ep, struct usb_request *req) { struct sk_buff *skb = (struct sk_buff *)req->context; dev_kfree_skb_any(skb); } /* * Add the EEM header and ethernet checksum. * We currently do not attempt to put multiple ethernet frames * into a single USB transfer */ static struct sk_buff *eem_wrap(struct gether *port, struct sk_buff *skb) { struct sk_buff *skb2 = NULL; struct usb_ep *in = port->in_ep; int padlen = 0; u16 len = skb->len; if (!skb_cloned(skb)) { int headroom = skb_headroom(skb); int tailroom = skb_tailroom(skb); /* When (len + EEM_HLEN + ETH_FCS_LEN) % in->maxpacket) is 0, * stick two bytes of zero-length EEM packet on the end. */ if (((len + EEM_HLEN + ETH_FCS_LEN) % in->maxpacket) == 0) padlen += 2; if ((tailroom >= (ETH_FCS_LEN + padlen)) && (headroom >= EEM_HLEN)) goto done; } skb2 = skb_copy_expand(skb, EEM_HLEN, ETH_FCS_LEN + padlen, GFP_ATOMIC); dev_kfree_skb_any(skb); skb = skb2; if (!skb) return skb; done: /* use the "no CRC" option */ put_unaligned_be32(0xdeadbeef, skb_put(skb, 4)); /* EEM packet header format: * b0..13: length of ethernet frame * b14: bmCRC (0 == sentinel CRC) * b15: bmType (0 == data) */ len = skb->len; put_unaligned_le16(len & 0x3FFF, skb_push(skb, 2)); /* add a zero-length EEM packet, if needed */ if (padlen) put_unaligned_le16(0, skb_put(skb, 2)); return skb; } /* * Remove the EEM header. Note that there can be many EEM packets in a single * USB transfer, so we need to break them out and handle them independently. */ static int eem_unwrap(struct gether *port, struct sk_buff *skb, struct sk_buff_head *list) { struct usb_composite_dev *cdev = port->func.config->cdev; int status = 0; do { struct sk_buff *skb2; u16 header; u16 len = 0; if (skb->len < EEM_HLEN) { status = -EINVAL; DBG(cdev, "invalid EEM header\n"); goto error; } /* remove the EEM header */ header = get_unaligned_le16(skb->data); skb_pull(skb, EEM_HLEN); /* EEM packet header format: * b0..14: EEM type dependent (data or command) * b15: bmType (0 == data, 1 == command) */ if (header & BIT(15)) { struct usb_request *req = cdev->req; u16 bmEEMCmd; /* EEM command packet format: * b0..10: bmEEMCmdParam * b11..13: bmEEMCmd * b14: reserved (must be zero) * b15: bmType (1 == command) */ if (header & BIT(14)) continue; bmEEMCmd = (header >> 11) & 0x7; switch (bmEEMCmd) { case 0: /* echo */ len = header & 0x7FF; if (skb->len < len) { status = -EOVERFLOW; goto error; } skb2 = skb_clone(skb, GFP_ATOMIC); if (unlikely(!skb2)) { DBG(cdev, "EEM echo response error\n"); goto next; } skb_trim(skb2, len); put_unaligned_le16(BIT(15) | BIT(11) | len, skb_push(skb2, 2)); skb_copy_bits(skb2, 0, req->buf, skb2->len); req->length = skb2->len; req->complete = eem_cmd_complete; req->zero = 1; req->context = skb2; if (usb_ep_queue(port->in_ep, req, GFP_ATOMIC)) DBG(cdev, "echo response queue fail\n"); break; case 1: /* echo response */ case 2: /* suspend hint */ case 3: /* response hint */ case 4: /* response complete hint */ case 5: /* tickle */ default: /* reserved */ continue; } } else { u32 crc, crc2; struct sk_buff *skb3; /* check for zero-length EEM packet */ if (header == 0) continue; /* EEM data packet format: * b0..13: length of ethernet frame * b14: bmCRC (0 == sentinel, 1 == calculated) * b15: bmType (0 == data) */ len = header & 0x3FFF; if ((skb->len < len) || (len < (ETH_HLEN + ETH_FCS_LEN))) { status = -EINVAL; goto error; } /* validate CRC */ if (header & BIT(14)) { crc = get_unaligned_le32(skb->data + len - ETH_FCS_LEN); crc2 = ~crc32_le(~0, skb->data, len - ETH_FCS_LEN); } else { crc = get_unaligned_be32(skb->data + len - ETH_FCS_LEN); crc2 = 0xdeadbeef; } if (crc != crc2) { DBG(cdev, "invalid EEM CRC\n"); goto next; } skb2 = skb_clone(skb, GFP_ATOMIC); if (unlikely(!skb2)) { DBG(cdev, "unable to unframe EEM packet\n"); continue; } skb_trim(skb2, len - ETH_FCS_LEN); skb3 = skb_copy_expand(skb2, NET_IP_ALIGN, 0, GFP_ATOMIC); if (unlikely(!skb3)) { DBG(cdev, "unable to realign EEM packet\n"); dev_kfree_skb_any(skb2); continue; } dev_kfree_skb_any(skb2); skb_queue_tail(list, skb3); } next: skb_pull(skb, len); } while (skb->len); error: dev_kfree_skb_any(skb); return status; } /** * eem_bind_config - add CDC Ethernet (EEM) network link to a configuration * @c: the configuration to support the network link * Context: single threaded during gadget setup * * Returns zero on success, else negative errno. * * Caller must have called @gether_setup(). Caller is also responsible * for calling @gether_cleanup() before module unload. */ int __init eem_bind_config(struct usb_configuration *c) { struct f_eem *eem; int status; /* maybe allocate device-global string IDs */ if (eem_string_defs[0].id == 0) { /* control interface label */ status = usb_string_id(c->cdev); if (status < 0) return status; eem_string_defs[0].id = status; eem_intf.iInterface = status; } /* allocate and initialize one new instance */ eem = kzalloc(sizeof *eem, GFP_KERNEL); if (!eem) return -ENOMEM; eem->port.cdc_filter = DEFAULT_FILTER; eem->port.func.name = "cdc_eem"; eem->port.func.strings = eem_strings; /* descriptors are per-instance copies */ eem->port.func.bind = eem_bind; eem->port.func.unbind = eem_unbind; eem->port.func.set_alt = eem_set_alt; eem->port.func.setup = eem_setup; eem->port.func.disable = eem_disable; eem->port.wrap = eem_wrap; eem->port.unwrap = eem_unwrap; eem->port.header_len = EEM_HLEN; status = usb_add_function(c, &eem->port.func); if (status) kfree(eem); return status; }
gpl-2.0
flar2/ElementalX-evita-8.0
fs/hostfs/hostfs_kern.c
4414
21069
/* * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) * Licensed under the GPL * * Ported the filesystem routines to 2.5. * 2003-02-10 Petr Baudis <pasky@ucw.cz> */ #include <linux/fs.h> #include <linux/module.h> #include <linux/mm.h> #include <linux/pagemap.h> #include <linux/statfs.h> #include <linux/slab.h> #include <linux/seq_file.h> #include <linux/mount.h> #include <linux/namei.h> #include "hostfs.h" #include "init.h" #include "kern.h" struct hostfs_inode_info { int fd; fmode_t mode; struct inode vfs_inode; }; static inline struct hostfs_inode_info *HOSTFS_I(struct inode *inode) { return list_entry(inode, struct hostfs_inode_info, vfs_inode); } #define FILE_HOSTFS_I(file) HOSTFS_I((file)->f_path.dentry->d_inode) static int hostfs_d_delete(const struct dentry *dentry) { return 1; } static const struct dentry_operations hostfs_dentry_ops = { .d_delete = hostfs_d_delete, }; /* Changed in hostfs_args before the kernel starts running */ static char *root_ino = ""; static int append = 0; #define HOSTFS_SUPER_MAGIC 0x00c0ffee static const struct inode_operations hostfs_iops; static const struct inode_operations hostfs_dir_iops; static const struct inode_operations hostfs_link_iops; #ifndef MODULE static int __init hostfs_args(char *options, int *add) { char *ptr; ptr = strchr(options, ','); if (ptr != NULL) *ptr++ = '\0'; if (*options != '\0') root_ino = options; options = ptr; while (options) { ptr = strchr(options, ','); if (ptr != NULL) *ptr++ = '\0'; if (*options != '\0') { if (!strcmp(options, "append")) append = 1; else printf("hostfs_args - unsupported option - %s\n", options); } options = ptr; } return 0; } __uml_setup("hostfs=", hostfs_args, "hostfs=<root dir>,<flags>,...\n" " This is used to set hostfs parameters. The root directory argument\n" " is used to confine all hostfs mounts to within the specified directory\n" " tree on the host. If this isn't specified, then a user inside UML can\n" " mount anything on the host that's accessible to the user that's running\n" " it.\n" " The only flag currently supported is 'append', which specifies that all\n" " files opened by hostfs will be opened in append mode.\n\n" ); #endif static char *__dentry_name(struct dentry *dentry, char *name) { char *p = dentry_path_raw(dentry, name, PATH_MAX); char *root; size_t len; root = dentry->d_sb->s_fs_info; len = strlen(root); if (IS_ERR(p)) { __putname(name); return NULL; } strlcpy(name, root, PATH_MAX); if (len > p - name) { __putname(name); return NULL; } if (p > name + len) { char *s = name + len; while ((*s++ = *p++) != '\0') ; } return name; } static char *dentry_name(struct dentry *dentry) { char *name = __getname(); if (!name) return NULL; return __dentry_name(dentry, name); /* will unlock */ } static char *inode_name(struct inode *ino) { struct dentry *dentry; char *name; dentry = d_find_alias(ino); if (!dentry) return NULL; name = dentry_name(dentry); dput(dentry); return name; } static char *follow_link(char *link) { int len, n; char *name, *resolved, *end; len = 64; while (1) { n = -ENOMEM; name = kmalloc(len, GFP_KERNEL); if (name == NULL) goto out; n = hostfs_do_readlink(link, name, len); if (n < len) break; len *= 2; kfree(name); } if (n < 0) goto out_free; if (*name == '/') return name; end = strrchr(link, '/'); if (end == NULL) return name; *(end + 1) = '\0'; len = strlen(link) + strlen(name) + 1; resolved = kmalloc(len, GFP_KERNEL); if (resolved == NULL) { n = -ENOMEM; goto out_free; } sprintf(resolved, "%s%s", link, name); kfree(name); kfree(link); return resolved; out_free: kfree(name); out: return ERR_PTR(n); } static struct inode *hostfs_iget(struct super_block *sb) { struct inode *inode = new_inode(sb); if (!inode) return ERR_PTR(-ENOMEM); return inode; } int hostfs_statfs(struct dentry *dentry, struct kstatfs *sf) { /* * do_statfs uses struct statfs64 internally, but the linux kernel * struct statfs still has 32-bit versions for most of these fields, * so we convert them here */ int err; long long f_blocks; long long f_bfree; long long f_bavail; long long f_files; long long f_ffree; err = do_statfs(dentry->d_sb->s_fs_info, &sf->f_bsize, &f_blocks, &f_bfree, &f_bavail, &f_files, &f_ffree, &sf->f_fsid, sizeof(sf->f_fsid), &sf->f_namelen); if (err) return err; sf->f_blocks = f_blocks; sf->f_bfree = f_bfree; sf->f_bavail = f_bavail; sf->f_files = f_files; sf->f_ffree = f_ffree; sf->f_type = HOSTFS_SUPER_MAGIC; return 0; } static struct inode *hostfs_alloc_inode(struct super_block *sb) { struct hostfs_inode_info *hi; hi = kzalloc(sizeof(*hi), GFP_KERNEL); if (hi == NULL) return NULL; hi->fd = -1; inode_init_once(&hi->vfs_inode); return &hi->vfs_inode; } static void hostfs_evict_inode(struct inode *inode) { truncate_inode_pages(&inode->i_data, 0); end_writeback(inode); if (HOSTFS_I(inode)->fd != -1) { close_file(&HOSTFS_I(inode)->fd); HOSTFS_I(inode)->fd = -1; } } static void hostfs_i_callback(struct rcu_head *head) { struct inode *inode = container_of(head, struct inode, i_rcu); kfree(HOSTFS_I(inode)); } static void hostfs_destroy_inode(struct inode *inode) { call_rcu(&inode->i_rcu, hostfs_i_callback); } static int hostfs_show_options(struct seq_file *seq, struct dentry *root) { const char *root_path = root->d_sb->s_fs_info; size_t offset = strlen(root_ino) + 1; if (strlen(root_path) > offset) seq_printf(seq, ",%s", root_path + offset); return 0; } static const struct super_operations hostfs_sbops = { .alloc_inode = hostfs_alloc_inode, .destroy_inode = hostfs_destroy_inode, .evict_inode = hostfs_evict_inode, .statfs = hostfs_statfs, .show_options = hostfs_show_options, }; int hostfs_readdir(struct file *file, void *ent, filldir_t filldir) { void *dir; char *name; unsigned long long next, ino; int error, len; unsigned int type; name = dentry_name(file->f_path.dentry); if (name == NULL) return -ENOMEM; dir = open_dir(name, &error); __putname(name); if (dir == NULL) return -error; next = file->f_pos; while ((name = read_dir(dir, &next, &ino, &len, &type)) != NULL) { error = (*filldir)(ent, name, len, file->f_pos, ino, type); if (error) break; file->f_pos = next; } close_dir(dir); return 0; } int hostfs_file_open(struct inode *ino, struct file *file) { static DEFINE_MUTEX(open_mutex); char *name; fmode_t mode = 0; int err; int r = 0, w = 0, fd; mode = file->f_mode & (FMODE_READ | FMODE_WRITE); if ((mode & HOSTFS_I(ino)->mode) == mode) return 0; mode |= HOSTFS_I(ino)->mode; retry: if (mode & FMODE_READ) r = 1; if (mode & FMODE_WRITE) w = 1; if (w) r = 1; name = dentry_name(file->f_path.dentry); if (name == NULL) return -ENOMEM; fd = open_file(name, r, w, append); __putname(name); if (fd < 0) return fd; mutex_lock(&open_mutex); /* somebody else had handled it first? */ if ((mode & HOSTFS_I(ino)->mode) == mode) { mutex_unlock(&open_mutex); return 0; } if ((mode | HOSTFS_I(ino)->mode) != mode) { mode |= HOSTFS_I(ino)->mode; mutex_unlock(&open_mutex); close_file(&fd); goto retry; } if (HOSTFS_I(ino)->fd == -1) { HOSTFS_I(ino)->fd = fd; } else { err = replace_file(fd, HOSTFS_I(ino)->fd); close_file(&fd); if (err < 0) { mutex_unlock(&open_mutex); return err; } } HOSTFS_I(ino)->mode = mode; mutex_unlock(&open_mutex); return 0; } int hostfs_fsync(struct file *file, loff_t start, loff_t end, int datasync) { struct inode *inode = file->f_mapping->host; int ret; ret = filemap_write_and_wait_range(inode->i_mapping, start, end); if (ret) return ret; mutex_lock(&inode->i_mutex); ret = fsync_file(HOSTFS_I(inode)->fd, datasync); mutex_unlock(&inode->i_mutex); return ret; } static const struct file_operations hostfs_file_fops = { .llseek = generic_file_llseek, .read = do_sync_read, .splice_read = generic_file_splice_read, .aio_read = generic_file_aio_read, .aio_write = generic_file_aio_write, .write = do_sync_write, .mmap = generic_file_mmap, .open = hostfs_file_open, .release = NULL, .fsync = hostfs_fsync, }; static const struct file_operations hostfs_dir_fops = { .llseek = generic_file_llseek, .readdir = hostfs_readdir, .read = generic_read_dir, }; int hostfs_writepage(struct page *page, struct writeback_control *wbc) { struct address_space *mapping = page->mapping; struct inode *inode = mapping->host; char *buffer; unsigned long long base; int count = PAGE_CACHE_SIZE; int end_index = inode->i_size >> PAGE_CACHE_SHIFT; int err; if (page->index >= end_index) count = inode->i_size & (PAGE_CACHE_SIZE-1); buffer = kmap(page); base = ((unsigned long long) page->index) << PAGE_CACHE_SHIFT; err = write_file(HOSTFS_I(inode)->fd, &base, buffer, count); if (err != count) { ClearPageUptodate(page); goto out; } if (base > inode->i_size) inode->i_size = base; if (PageError(page)) ClearPageError(page); err = 0; out: kunmap(page); unlock_page(page); return err; } int hostfs_readpage(struct file *file, struct page *page) { char *buffer; long long start; int err = 0; start = (long long) page->index << PAGE_CACHE_SHIFT; buffer = kmap(page); err = read_file(FILE_HOSTFS_I(file)->fd, &start, buffer, PAGE_CACHE_SIZE); if (err < 0) goto out; memset(&buffer[err], 0, PAGE_CACHE_SIZE - err); flush_dcache_page(page); SetPageUptodate(page); if (PageError(page)) ClearPageError(page); err = 0; out: kunmap(page); unlock_page(page); return err; } int hostfs_write_begin(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata) { pgoff_t index = pos >> PAGE_CACHE_SHIFT; *pagep = grab_cache_page_write_begin(mapping, index, flags); if (!*pagep) return -ENOMEM; return 0; } int hostfs_write_end(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata) { struct inode *inode = mapping->host; void *buffer; unsigned from = pos & (PAGE_CACHE_SIZE - 1); int err; buffer = kmap(page); err = write_file(FILE_HOSTFS_I(file)->fd, &pos, buffer + from, copied); kunmap(page); if (!PageUptodate(page) && err == PAGE_CACHE_SIZE) SetPageUptodate(page); /* * If err > 0, write_file has added err to pos, so we are comparing * i_size against the last byte written. */ if (err > 0 && (pos > inode->i_size)) inode->i_size = pos; unlock_page(page); page_cache_release(page); return err; } static const struct address_space_operations hostfs_aops = { .writepage = hostfs_writepage, .readpage = hostfs_readpage, .set_page_dirty = __set_page_dirty_nobuffers, .write_begin = hostfs_write_begin, .write_end = hostfs_write_end, }; static int read_name(struct inode *ino, char *name) { dev_t rdev; struct hostfs_stat st; int err = stat_file(name, &st, -1); if (err) return err; /* Reencode maj and min with the kernel encoding.*/ rdev = MKDEV(st.maj, st.min); switch (st.mode & S_IFMT) { case S_IFLNK: ino->i_op = &hostfs_link_iops; break; case S_IFDIR: ino->i_op = &hostfs_dir_iops; ino->i_fop = &hostfs_dir_fops; break; case S_IFCHR: case S_IFBLK: case S_IFIFO: case S_IFSOCK: init_special_inode(ino, st.mode & S_IFMT, rdev); ino->i_op = &hostfs_iops; break; default: ino->i_op = &hostfs_iops; ino->i_fop = &hostfs_file_fops; ino->i_mapping->a_ops = &hostfs_aops; } ino->i_ino = st.ino; ino->i_mode = st.mode; set_nlink(ino, st.nlink); ino->i_uid = st.uid; ino->i_gid = st.gid; ino->i_atime = st.atime; ino->i_mtime = st.mtime; ino->i_ctime = st.ctime; ino->i_size = st.size; ino->i_blocks = st.blocks; return 0; } int hostfs_create(struct inode *dir, struct dentry *dentry, umode_t mode, struct nameidata *nd) { struct inode *inode; char *name; int error, fd; inode = hostfs_iget(dir->i_sb); if (IS_ERR(inode)) { error = PTR_ERR(inode); goto out; } error = -ENOMEM; name = dentry_name(dentry); if (name == NULL) goto out_put; fd = file_create(name, mode & S_IRUSR, mode & S_IWUSR, mode & S_IXUSR, mode & S_IRGRP, mode & S_IWGRP, mode & S_IXGRP, mode & S_IROTH, mode & S_IWOTH, mode & S_IXOTH); if (fd < 0) error = fd; else error = read_name(inode, name); __putname(name); if (error) goto out_put; HOSTFS_I(inode)->fd = fd; HOSTFS_I(inode)->mode = FMODE_READ | FMODE_WRITE; d_instantiate(dentry, inode); return 0; out_put: iput(inode); out: return error; } struct dentry *hostfs_lookup(struct inode *ino, struct dentry *dentry, struct nameidata *nd) { struct inode *inode; char *name; int err; inode = hostfs_iget(ino->i_sb); if (IS_ERR(inode)) { err = PTR_ERR(inode); goto out; } err = -ENOMEM; name = dentry_name(dentry); if (name == NULL) goto out_put; err = read_name(inode, name); __putname(name); if (err == -ENOENT) { iput(inode); inode = NULL; } else if (err) goto out_put; d_add(dentry, inode); return NULL; out_put: iput(inode); out: return ERR_PTR(err); } int hostfs_link(struct dentry *to, struct inode *ino, struct dentry *from) { char *from_name, *to_name; int err; if ((from_name = dentry_name(from)) == NULL) return -ENOMEM; to_name = dentry_name(to); if (to_name == NULL) { __putname(from_name); return -ENOMEM; } err = link_file(to_name, from_name); __putname(from_name); __putname(to_name); return err; } int hostfs_unlink(struct inode *ino, struct dentry *dentry) { char *file; int err; if (append) return -EPERM; if ((file = dentry_name(dentry)) == NULL) return -ENOMEM; err = unlink_file(file); __putname(file); return err; } int hostfs_symlink(struct inode *ino, struct dentry *dentry, const char *to) { char *file; int err; if ((file = dentry_name(dentry)) == NULL) return -ENOMEM; err = make_symlink(file, to); __putname(file); return err; } int hostfs_mkdir(struct inode *ino, struct dentry *dentry, umode_t mode) { char *file; int err; if ((file = dentry_name(dentry)) == NULL) return -ENOMEM; err = do_mkdir(file, mode); __putname(file); return err; } int hostfs_rmdir(struct inode *ino, struct dentry *dentry) { char *file; int err; if ((file = dentry_name(dentry)) == NULL) return -ENOMEM; err = do_rmdir(file); __putname(file); return err; } static int hostfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev) { struct inode *inode; char *name; int err; inode = hostfs_iget(dir->i_sb); if (IS_ERR(inode)) { err = PTR_ERR(inode); goto out; } err = -ENOMEM; name = dentry_name(dentry); if (name == NULL) goto out_put; init_special_inode(inode, mode, dev); err = do_mknod(name, mode, MAJOR(dev), MINOR(dev)); if (!err) goto out_free; err = read_name(inode, name); __putname(name); if (err) goto out_put; if (err) goto out_put; d_instantiate(dentry, inode); return 0; out_free: __putname(name); out_put: iput(inode); out: return err; } int hostfs_rename(struct inode *from_ino, struct dentry *from, struct inode *to_ino, struct dentry *to) { char *from_name, *to_name; int err; if ((from_name = dentry_name(from)) == NULL) return -ENOMEM; if ((to_name = dentry_name(to)) == NULL) { __putname(from_name); return -ENOMEM; } err = rename_file(from_name, to_name); __putname(from_name); __putname(to_name); return err; } int hostfs_permission(struct inode *ino, int desired) { char *name; int r = 0, w = 0, x = 0, err; if (desired & MAY_NOT_BLOCK) return -ECHILD; if (desired & MAY_READ) r = 1; if (desired & MAY_WRITE) w = 1; if (desired & MAY_EXEC) x = 1; name = inode_name(ino); if (name == NULL) return -ENOMEM; if (S_ISCHR(ino->i_mode) || S_ISBLK(ino->i_mode) || S_ISFIFO(ino->i_mode) || S_ISSOCK(ino->i_mode)) err = 0; else err = access_file(name, r, w, x); __putname(name); if (!err) err = generic_permission(ino, desired); return err; } int hostfs_setattr(struct dentry *dentry, struct iattr *attr) { struct inode *inode = dentry->d_inode; struct hostfs_iattr attrs; char *name; int err; int fd = HOSTFS_I(inode)->fd; err = inode_change_ok(inode, attr); if (err) return err; if (append) attr->ia_valid &= ~ATTR_SIZE; attrs.ia_valid = 0; if (attr->ia_valid & ATTR_MODE) { attrs.ia_valid |= HOSTFS_ATTR_MODE; attrs.ia_mode = attr->ia_mode; } if (attr->ia_valid & ATTR_UID) { attrs.ia_valid |= HOSTFS_ATTR_UID; attrs.ia_uid = attr->ia_uid; } if (attr->ia_valid & ATTR_GID) { attrs.ia_valid |= HOSTFS_ATTR_GID; attrs.ia_gid = attr->ia_gid; } if (attr->ia_valid & ATTR_SIZE) { attrs.ia_valid |= HOSTFS_ATTR_SIZE; attrs.ia_size = attr->ia_size; } if (attr->ia_valid & ATTR_ATIME) { attrs.ia_valid |= HOSTFS_ATTR_ATIME; attrs.ia_atime = attr->ia_atime; } if (attr->ia_valid & ATTR_MTIME) { attrs.ia_valid |= HOSTFS_ATTR_MTIME; attrs.ia_mtime = attr->ia_mtime; } if (attr->ia_valid & ATTR_CTIME) { attrs.ia_valid |= HOSTFS_ATTR_CTIME; attrs.ia_ctime = attr->ia_ctime; } if (attr->ia_valid & ATTR_ATIME_SET) { attrs.ia_valid |= HOSTFS_ATTR_ATIME_SET; } if (attr->ia_valid & ATTR_MTIME_SET) { attrs.ia_valid |= HOSTFS_ATTR_MTIME_SET; } name = dentry_name(dentry); if (name == NULL) return -ENOMEM; err = set_attr(name, &attrs, fd); __putname(name); if (err) return err; if ((attr->ia_valid & ATTR_SIZE) && attr->ia_size != i_size_read(inode)) { int error; error = vmtruncate(inode, attr->ia_size); if (err) return err; } setattr_copy(inode, attr); mark_inode_dirty(inode); return 0; } static const struct inode_operations hostfs_iops = { .create = hostfs_create, .link = hostfs_link, .unlink = hostfs_unlink, .symlink = hostfs_symlink, .mkdir = hostfs_mkdir, .rmdir = hostfs_rmdir, .mknod = hostfs_mknod, .rename = hostfs_rename, .permission = hostfs_permission, .setattr = hostfs_setattr, }; static const struct inode_operations hostfs_dir_iops = { .create = hostfs_create, .lookup = hostfs_lookup, .link = hostfs_link, .unlink = hostfs_unlink, .symlink = hostfs_symlink, .mkdir = hostfs_mkdir, .rmdir = hostfs_rmdir, .mknod = hostfs_mknod, .rename = hostfs_rename, .permission = hostfs_permission, .setattr = hostfs_setattr, }; static void *hostfs_follow_link(struct dentry *dentry, struct nameidata *nd) { char *link = __getname(); if (link) { char *path = dentry_name(dentry); int err = -ENOMEM; if (path) { err = hostfs_do_readlink(path, link, PATH_MAX); if (err == PATH_MAX) err = -E2BIG; __putname(path); } if (err < 0) { __putname(link); link = ERR_PTR(err); } } else { link = ERR_PTR(-ENOMEM); } nd_set_link(nd, link); return NULL; } static void hostfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie) { char *s = nd_get_link(nd); if (!IS_ERR(s)) __putname(s); } static const struct inode_operations hostfs_link_iops = { .readlink = generic_readlink, .follow_link = hostfs_follow_link, .put_link = hostfs_put_link, }; static int hostfs_fill_sb_common(struct super_block *sb, void *d, int silent) { struct inode *root_inode; char *host_root_path, *req_root = d; int err; sb->s_blocksize = 1024; sb->s_blocksize_bits = 10; sb->s_magic = HOSTFS_SUPER_MAGIC; sb->s_op = &hostfs_sbops; sb->s_d_op = &hostfs_dentry_ops; sb->s_maxbytes = MAX_LFS_FILESIZE; /* NULL is printed as <NULL> by sprintf: avoid that. */ if (req_root == NULL) req_root = ""; err = -ENOMEM; sb->s_fs_info = host_root_path = kmalloc(strlen(root_ino) + strlen(req_root) + 2, GFP_KERNEL); if (host_root_path == NULL) goto out; sprintf(host_root_path, "%s/%s", root_ino, req_root); root_inode = new_inode(sb); if (!root_inode) goto out; err = read_name(root_inode, host_root_path); if (err) goto out_put; if (S_ISLNK(root_inode->i_mode)) { char *name = follow_link(host_root_path); if (IS_ERR(name)) err = PTR_ERR(name); else err = read_name(root_inode, name); kfree(name); if (err) goto out_put; } err = -ENOMEM; sb->s_root = d_make_root(root_inode); if (sb->s_root == NULL) goto out; return 0; out_put: iput(root_inode); out: return err; } static struct dentry *hostfs_read_sb(struct file_system_type *type, int flags, const char *dev_name, void *data) { return mount_nodev(type, flags, data, hostfs_fill_sb_common); } static void hostfs_kill_sb(struct super_block *s) { kill_anon_super(s); kfree(s->s_fs_info); } static struct file_system_type hostfs_type = { .owner = THIS_MODULE, .name = "hostfs", .mount = hostfs_read_sb, .kill_sb = hostfs_kill_sb, .fs_flags = 0, }; static int __init init_hostfs(void) { return register_filesystem(&hostfs_type); } static void __exit exit_hostfs(void) { unregister_filesystem(&hostfs_type); } module_init(init_hostfs) module_exit(exit_hostfs) MODULE_LICENSE("GPL");
gpl-2.0
friedrich420/Note4-TMO-AELKernel
drivers/net/ethernet/atheros/atlx/atlx.c
4670
8108
/* atlx.c -- common functions for Attansic network drivers * * Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved. * Copyright(c) 2006 - 2007 Chris Snook <csnook@redhat.com> * Copyright(c) 2006 - 2008 Jay Cliburn <jcliburn@gmail.com> * Copyright(c) 2007 Atheros Corporation. All rights reserved. * * Derived from Intel e1000 driver * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 59 * Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* Including this file like a header is a temporary hack, I promise. -- CHS */ #ifndef ATLX_C #define ATLX_C #include <linux/device.h> #include <linux/errno.h> #include <linux/etherdevice.h> #include <linux/if.h> #include <linux/netdevice.h> #include <linux/socket.h> #include <linux/sockios.h> #include <linux/spinlock.h> #include <linux/string.h> #include <linux/types.h> #include <linux/workqueue.h> #include "atlx.h" static s32 atlx_read_phy_reg(struct atl1_hw *hw, u16 reg_addr, u16 *phy_data); static u32 atlx_hash_mc_addr(struct atl1_hw *hw, u8 *mc_addr); static void atlx_set_mac_addr(struct atl1_hw *hw); static struct atlx_spi_flash_dev flash_table[] = { /* MFR_NAME WRSR READ PRGM WREN WRDI RDSR RDID SEC_ERS CHIP_ERS */ {"Atmel", 0x00, 0x03, 0x02, 0x06, 0x04, 0x05, 0x15, 0x52, 0x62}, {"SST", 0x01, 0x03, 0x02, 0x06, 0x04, 0x05, 0x90, 0x20, 0x60}, {"ST", 0x01, 0x03, 0x02, 0x06, 0x04, 0x05, 0xAB, 0xD8, 0xC7}, }; static int atlx_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) { switch (cmd) { case SIOCGMIIPHY: case SIOCGMIIREG: case SIOCSMIIREG: return atlx_mii_ioctl(netdev, ifr, cmd); default: return -EOPNOTSUPP; } } /** * atlx_set_mac - Change the Ethernet Address of the NIC * @netdev: network interface device structure * @p: pointer to an address structure * * Returns 0 on success, negative on failure */ static int atlx_set_mac(struct net_device *netdev, void *p) { struct atlx_adapter *adapter = netdev_priv(netdev); struct sockaddr *addr = p; if (netif_running(netdev)) return -EBUSY; if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len); atlx_set_mac_addr(&adapter->hw); return 0; } static void atlx_check_for_link(struct atlx_adapter *adapter) { struct net_device *netdev = adapter->netdev; u16 phy_data = 0; spin_lock(&adapter->lock); adapter->phy_timer_pending = false; atlx_read_phy_reg(&adapter->hw, MII_BMSR, &phy_data); atlx_read_phy_reg(&adapter->hw, MII_BMSR, &phy_data); spin_unlock(&adapter->lock); /* notify upper layer link down ASAP */ if (!(phy_data & BMSR_LSTATUS)) { /* Link Down */ if (netif_carrier_ok(netdev)) { /* old link state: Up */ dev_info(&adapter->pdev->dev, "%s link is down\n", netdev->name); adapter->link_speed = SPEED_0; netif_carrier_off(netdev); } } schedule_work(&adapter->link_chg_task); } /** * atlx_set_multi - Multicast and Promiscuous mode set * @netdev: network interface device structure * * The set_multi entry point is called whenever the multicast address * list or the network interface flags are updated. This routine is * responsible for configuring the hardware for proper multicast, * promiscuous mode, and all-multi behavior. */ static void atlx_set_multi(struct net_device *netdev) { struct atlx_adapter *adapter = netdev_priv(netdev); struct atlx_hw *hw = &adapter->hw; struct netdev_hw_addr *ha; u32 rctl; u32 hash_value; /* Check for Promiscuous and All Multicast modes */ rctl = ioread32(hw->hw_addr + REG_MAC_CTRL); if (netdev->flags & IFF_PROMISC) rctl |= MAC_CTRL_PROMIS_EN; else if (netdev->flags & IFF_ALLMULTI) { rctl |= MAC_CTRL_MC_ALL_EN; rctl &= ~MAC_CTRL_PROMIS_EN; } else rctl &= ~(MAC_CTRL_PROMIS_EN | MAC_CTRL_MC_ALL_EN); iowrite32(rctl, hw->hw_addr + REG_MAC_CTRL); /* clear the old settings from the multicast hash table */ iowrite32(0, hw->hw_addr + REG_RX_HASH_TABLE); iowrite32(0, (hw->hw_addr + REG_RX_HASH_TABLE) + (1 << 2)); /* compute mc addresses' hash value ,and put it into hash table */ netdev_for_each_mc_addr(ha, netdev) { hash_value = atlx_hash_mc_addr(hw, ha->addr); atlx_hash_set(hw, hash_value); } } static inline void atlx_imr_set(struct atlx_adapter *adapter, unsigned int imr) { iowrite32(imr, adapter->hw.hw_addr + REG_IMR); ioread32(adapter->hw.hw_addr + REG_IMR); } /** * atlx_irq_enable - Enable default interrupt generation settings * @adapter: board private structure */ static void atlx_irq_enable(struct atlx_adapter *adapter) { atlx_imr_set(adapter, IMR_NORMAL_MASK); adapter->int_enabled = true; } /** * atlx_irq_disable - Mask off interrupt generation on the NIC * @adapter: board private structure */ static void atlx_irq_disable(struct atlx_adapter *adapter) { adapter->int_enabled = false; atlx_imr_set(adapter, 0); synchronize_irq(adapter->pdev->irq); } static void atlx_clear_phy_int(struct atlx_adapter *adapter) { u16 phy_data; unsigned long flags; spin_lock_irqsave(&adapter->lock, flags); atlx_read_phy_reg(&adapter->hw, 19, &phy_data); spin_unlock_irqrestore(&adapter->lock, flags); } /** * atlx_tx_timeout - Respond to a Tx Hang * @netdev: network interface device structure */ static void atlx_tx_timeout(struct net_device *netdev) { struct atlx_adapter *adapter = netdev_priv(netdev); /* Do the reset outside of interrupt context */ schedule_work(&adapter->reset_dev_task); } /* * atlx_link_chg_task - deal with link change event Out of interrupt context */ static void atlx_link_chg_task(struct work_struct *work) { struct atlx_adapter *adapter; unsigned long flags; adapter = container_of(work, struct atlx_adapter, link_chg_task); spin_lock_irqsave(&adapter->lock, flags); atlx_check_link(adapter); spin_unlock_irqrestore(&adapter->lock, flags); } static void __atlx_vlan_mode(netdev_features_t features, u32 *ctrl) { if (features & NETIF_F_HW_VLAN_CTAG_RX) { /* enable VLAN tag insert/strip */ *ctrl |= MAC_CTRL_RMV_VLAN; } else { /* disable VLAN tag insert/strip */ *ctrl &= ~MAC_CTRL_RMV_VLAN; } } static void atlx_vlan_mode(struct net_device *netdev, netdev_features_t features) { struct atlx_adapter *adapter = netdev_priv(netdev); unsigned long flags; u32 ctrl; spin_lock_irqsave(&adapter->lock, flags); /* atlx_irq_disable(adapter); FIXME: confirm/remove */ ctrl = ioread32(adapter->hw.hw_addr + REG_MAC_CTRL); __atlx_vlan_mode(features, &ctrl); iowrite32(ctrl, adapter->hw.hw_addr + REG_MAC_CTRL); /* atlx_irq_enable(adapter); FIXME */ spin_unlock_irqrestore(&adapter->lock, flags); } static void atlx_restore_vlan(struct atlx_adapter *adapter) { atlx_vlan_mode(adapter->netdev, adapter->netdev->features); } static netdev_features_t atlx_fix_features(struct net_device *netdev, netdev_features_t features) { /* * Since there is no support for separate rx/tx vlan accel * enable/disable make sure tx flag is always in same state as rx. */ if (features & NETIF_F_HW_VLAN_CTAG_RX) features |= NETIF_F_HW_VLAN_CTAG_TX; else features &= ~NETIF_F_HW_VLAN_CTAG_TX; return features; } static int atlx_set_features(struct net_device *netdev, netdev_features_t features) { netdev_features_t changed = netdev->features ^ features; if (changed & NETIF_F_HW_VLAN_CTAG_RX) atlx_vlan_mode(netdev, features); return 0; } #endif /* ATLX_C */
gpl-2.0
fear130986/GT-I9195_EUR_KK_Opensource_kernel
drivers/gpu/drm/nouveau/nouveau_pm.c
4926
24832
/* * Copyright 2010 Red Hat Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Ben Skeggs */ #include "drmP.h" #include "nouveau_drv.h" #include "nouveau_pm.h" #include "nouveau_gpio.h" #ifdef CONFIG_ACPI #include <linux/acpi.h> #endif #include <linux/power_supply.h> #include <linux/hwmon.h> #include <linux/hwmon-sysfs.h> static int nouveau_pwmfan_get(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_pm_engine *pm = &dev_priv->engine.pm; struct gpio_func gpio; u32 divs, duty; int ret; if (!pm->pwm_get) return -ENODEV; ret = nouveau_gpio_find(dev, 0, DCB_GPIO_PWM_FAN, 0xff, &gpio); if (ret == 0) { ret = pm->pwm_get(dev, gpio.line, &divs, &duty); if (ret == 0 && divs) { divs = max(divs, duty); if (dev_priv->card_type <= NV_40 || (gpio.log[0] & 1)) duty = divs - duty; return (duty * 100) / divs; } return nouveau_gpio_func_get(dev, gpio.func) * 100; } return -ENODEV; } static int nouveau_pwmfan_set(struct drm_device *dev, int percent) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_pm_engine *pm = &dev_priv->engine.pm; struct gpio_func gpio; u32 divs, duty; int ret; if (!pm->pwm_set) return -ENODEV; ret = nouveau_gpio_find(dev, 0, DCB_GPIO_PWM_FAN, 0xff, &gpio); if (ret == 0) { divs = pm->fan.pwm_divisor; if (pm->fan.pwm_freq) { /*XXX: PNVIO clock more than likely... */ divs = 135000 / pm->fan.pwm_freq; if (dev_priv->chipset < 0xa3) divs /= 4; } duty = ((divs * percent) + 99) / 100; if (dev_priv->card_type <= NV_40 || (gpio.log[0] & 1)) duty = divs - duty; ret = pm->pwm_set(dev, gpio.line, divs, duty); if (!ret) pm->fan.percent = percent; return ret; } return -ENODEV; } static int nouveau_pm_perflvl_aux(struct drm_device *dev, struct nouveau_pm_level *perflvl, struct nouveau_pm_level *a, struct nouveau_pm_level *b) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_pm_engine *pm = &dev_priv->engine.pm; int ret; /*XXX: not on all boards, we should control based on temperature * on recent boards.. or maybe on some other factor we don't * know about? */ if (a->fanspeed && b->fanspeed && b->fanspeed > a->fanspeed) { ret = nouveau_pwmfan_set(dev, perflvl->fanspeed); if (ret && ret != -ENODEV) { NV_ERROR(dev, "fanspeed set failed: %d\n", ret); return ret; } } if (pm->voltage.supported && pm->voltage_set) { if (perflvl->volt_min && b->volt_min > a->volt_min) { ret = pm->voltage_set(dev, perflvl->volt_min); if (ret) { NV_ERROR(dev, "voltage set failed: %d\n", ret); return ret; } } } return 0; } static int nouveau_pm_perflvl_set(struct drm_device *dev, struct nouveau_pm_level *perflvl) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_pm_engine *pm = &dev_priv->engine.pm; void *state; int ret; if (perflvl == pm->cur) return 0; ret = nouveau_pm_perflvl_aux(dev, perflvl, pm->cur, perflvl); if (ret) return ret; state = pm->clocks_pre(dev, perflvl); if (IS_ERR(state)) { ret = PTR_ERR(state); goto error; } ret = pm->clocks_set(dev, state); if (ret) goto error; ret = nouveau_pm_perflvl_aux(dev, perflvl, perflvl, pm->cur); if (ret) return ret; pm->cur = perflvl; return 0; error: /* restore the fan speed and voltage before leaving */ nouveau_pm_perflvl_aux(dev, perflvl, perflvl, pm->cur); return ret; } void nouveau_pm_trigger(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_pm_engine *pm = &dev_priv->engine.pm; struct nouveau_pm_profile *profile = NULL; struct nouveau_pm_level *perflvl = NULL; int ret; /* select power profile based on current power source */ if (power_supply_is_system_supplied()) profile = pm->profile_ac; else profile = pm->profile_dc; if (profile != pm->profile) { pm->profile->func->fini(pm->profile); pm->profile = profile; pm->profile->func->init(pm->profile); } /* select performance level based on profile */ perflvl = profile->func->select(profile); /* change perflvl, if necessary */ if (perflvl != pm->cur) { struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer; u64 time0 = ptimer->read(dev); NV_INFO(dev, "setting performance level: %d", perflvl->id); ret = nouveau_pm_perflvl_set(dev, perflvl); if (ret) NV_INFO(dev, "> reclocking failed: %d\n\n", ret); NV_INFO(dev, "> reclocking took %lluns\n\n", ptimer->read(dev) - time0); } } static struct nouveau_pm_profile * profile_find(struct drm_device *dev, const char *string) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_pm_engine *pm = &dev_priv->engine.pm; struct nouveau_pm_profile *profile; list_for_each_entry(profile, &pm->profiles, head) { if (!strncmp(profile->name, string, sizeof(profile->name))) return profile; } return NULL; } static int nouveau_pm_profile_set(struct drm_device *dev, const char *profile) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_pm_engine *pm = &dev_priv->engine.pm; struct nouveau_pm_profile *ac = NULL, *dc = NULL; char string[16], *cur = string, *ptr; /* safety precaution, for now */ if (nouveau_perflvl_wr != 7777) return -EPERM; strncpy(string, profile, sizeof(string)); string[sizeof(string) - 1] = 0; if ((ptr = strchr(string, '\n'))) *ptr = '\0'; ptr = strsep(&cur, ","); if (ptr) ac = profile_find(dev, ptr); ptr = strsep(&cur, ","); if (ptr) dc = profile_find(dev, ptr); else dc = ac; if (ac == NULL || dc == NULL) return -EINVAL; pm->profile_ac = ac; pm->profile_dc = dc; nouveau_pm_trigger(dev); return 0; } static void nouveau_pm_static_dummy(struct nouveau_pm_profile *profile) { } static struct nouveau_pm_level * nouveau_pm_static_select(struct nouveau_pm_profile *profile) { return container_of(profile, struct nouveau_pm_level, profile); } const struct nouveau_pm_profile_func nouveau_pm_static_profile_func = { .destroy = nouveau_pm_static_dummy, .init = nouveau_pm_static_dummy, .fini = nouveau_pm_static_dummy, .select = nouveau_pm_static_select, }; static int nouveau_pm_perflvl_get(struct drm_device *dev, struct nouveau_pm_level *perflvl) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_pm_engine *pm = &dev_priv->engine.pm; int ret; memset(perflvl, 0, sizeof(*perflvl)); if (pm->clocks_get) { ret = pm->clocks_get(dev, perflvl); if (ret) return ret; } if (pm->voltage.supported && pm->voltage_get) { ret = pm->voltage_get(dev); if (ret > 0) { perflvl->volt_min = ret; perflvl->volt_max = ret; } } ret = nouveau_pwmfan_get(dev); if (ret > 0) perflvl->fanspeed = ret; nouveau_mem_timing_read(dev, &perflvl->timing); return 0; } static void nouveau_pm_perflvl_info(struct nouveau_pm_level *perflvl, char *ptr, int len) { char c[16], s[16], v[32], f[16], m[16]; c[0] = '\0'; if (perflvl->core) snprintf(c, sizeof(c), " core %dMHz", perflvl->core / 1000); s[0] = '\0'; if (perflvl->shader) snprintf(s, sizeof(s), " shader %dMHz", perflvl->shader / 1000); m[0] = '\0'; if (perflvl->memory) snprintf(m, sizeof(m), " memory %dMHz", perflvl->memory / 1000); v[0] = '\0'; if (perflvl->volt_min && perflvl->volt_min != perflvl->volt_max) { snprintf(v, sizeof(v), " voltage %dmV-%dmV", perflvl->volt_min / 1000, perflvl->volt_max / 1000); } else if (perflvl->volt_min) { snprintf(v, sizeof(v), " voltage %dmV", perflvl->volt_min / 1000); } f[0] = '\0'; if (perflvl->fanspeed) snprintf(f, sizeof(f), " fanspeed %d%%", perflvl->fanspeed); snprintf(ptr, len, "%s%s%s%s%s\n", c, s, m, v, f); } static ssize_t nouveau_pm_get_perflvl_info(struct device *d, struct device_attribute *a, char *buf) { struct nouveau_pm_level *perflvl = container_of(a, struct nouveau_pm_level, dev_attr); char *ptr = buf; int len = PAGE_SIZE; snprintf(ptr, len, "%d:", perflvl->id); ptr += strlen(buf); len -= strlen(buf); nouveau_pm_perflvl_info(perflvl, ptr, len); return strlen(buf); } static ssize_t nouveau_pm_get_perflvl(struct device *d, struct device_attribute *a, char *buf) { struct drm_device *dev = pci_get_drvdata(to_pci_dev(d)); struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_pm_engine *pm = &dev_priv->engine.pm; struct nouveau_pm_level cur; int len = PAGE_SIZE, ret; char *ptr = buf; snprintf(ptr, len, "profile: %s, %s\nc:", pm->profile_ac->name, pm->profile_dc->name); ptr += strlen(buf); len -= strlen(buf); ret = nouveau_pm_perflvl_get(dev, &cur); if (ret == 0) nouveau_pm_perflvl_info(&cur, ptr, len); return strlen(buf); } static ssize_t nouveau_pm_set_perflvl(struct device *d, struct device_attribute *a, const char *buf, size_t count) { struct drm_device *dev = pci_get_drvdata(to_pci_dev(d)); int ret; ret = nouveau_pm_profile_set(dev, buf); if (ret) return ret; return strlen(buf); } static DEVICE_ATTR(performance_level, S_IRUGO | S_IWUSR, nouveau_pm_get_perflvl, nouveau_pm_set_perflvl); static int nouveau_sysfs_init(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_pm_engine *pm = &dev_priv->engine.pm; struct device *d = &dev->pdev->dev; int ret, i; ret = device_create_file(d, &dev_attr_performance_level); if (ret) return ret; for (i = 0; i < pm->nr_perflvl; i++) { struct nouveau_pm_level *perflvl = &pm->perflvl[i]; perflvl->dev_attr.attr.name = perflvl->name; perflvl->dev_attr.attr.mode = S_IRUGO; perflvl->dev_attr.show = nouveau_pm_get_perflvl_info; perflvl->dev_attr.store = NULL; sysfs_attr_init(&perflvl->dev_attr.attr); ret = device_create_file(d, &perflvl->dev_attr); if (ret) { NV_ERROR(dev, "failed pervlvl %d sysfs: %d\n", perflvl->id, i); perflvl->dev_attr.attr.name = NULL; nouveau_pm_fini(dev); return ret; } } return 0; } static void nouveau_sysfs_fini(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_pm_engine *pm = &dev_priv->engine.pm; struct device *d = &dev->pdev->dev; int i; device_remove_file(d, &dev_attr_performance_level); for (i = 0; i < pm->nr_perflvl; i++) { struct nouveau_pm_level *pl = &pm->perflvl[i]; if (!pl->dev_attr.attr.name) break; device_remove_file(d, &pl->dev_attr); } } #if defined(CONFIG_HWMON) || (defined(MODULE) && defined(CONFIG_HWMON_MODULE)) static ssize_t nouveau_hwmon_show_temp(struct device *d, struct device_attribute *a, char *buf) { struct drm_device *dev = dev_get_drvdata(d); struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_pm_engine *pm = &dev_priv->engine.pm; return snprintf(buf, PAGE_SIZE, "%d\n", pm->temp_get(dev)*1000); } static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, nouveau_hwmon_show_temp, NULL, 0); static ssize_t nouveau_hwmon_max_temp(struct device *d, struct device_attribute *a, char *buf) { struct drm_device *dev = dev_get_drvdata(d); struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_pm_engine *pm = &dev_priv->engine.pm; struct nouveau_pm_threshold_temp *temp = &pm->threshold_temp; return snprintf(buf, PAGE_SIZE, "%d\n", temp->down_clock*1000); } static ssize_t nouveau_hwmon_set_max_temp(struct device *d, struct device_attribute *a, const char *buf, size_t count) { struct drm_device *dev = dev_get_drvdata(d); struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_pm_engine *pm = &dev_priv->engine.pm; struct nouveau_pm_threshold_temp *temp = &pm->threshold_temp; long value; if (kstrtol(buf, 10, &value) == -EINVAL) return count; temp->down_clock = value/1000; nouveau_temp_safety_checks(dev); return count; } static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO | S_IWUSR, nouveau_hwmon_max_temp, nouveau_hwmon_set_max_temp, 0); static ssize_t nouveau_hwmon_critical_temp(struct device *d, struct device_attribute *a, char *buf) { struct drm_device *dev = dev_get_drvdata(d); struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_pm_engine *pm = &dev_priv->engine.pm; struct nouveau_pm_threshold_temp *temp = &pm->threshold_temp; return snprintf(buf, PAGE_SIZE, "%d\n", temp->critical*1000); } static ssize_t nouveau_hwmon_set_critical_temp(struct device *d, struct device_attribute *a, const char *buf, size_t count) { struct drm_device *dev = dev_get_drvdata(d); struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_pm_engine *pm = &dev_priv->engine.pm; struct nouveau_pm_threshold_temp *temp = &pm->threshold_temp; long value; if (kstrtol(buf, 10, &value) == -EINVAL) return count; temp->critical = value/1000; nouveau_temp_safety_checks(dev); return count; } static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO | S_IWUSR, nouveau_hwmon_critical_temp, nouveau_hwmon_set_critical_temp, 0); static ssize_t nouveau_hwmon_show_name(struct device *dev, struct device_attribute *attr, char *buf) { return sprintf(buf, "nouveau\n"); } static SENSOR_DEVICE_ATTR(name, S_IRUGO, nouveau_hwmon_show_name, NULL, 0); static ssize_t nouveau_hwmon_show_update_rate(struct device *dev, struct device_attribute *attr, char *buf) { return sprintf(buf, "1000\n"); } static SENSOR_DEVICE_ATTR(update_rate, S_IRUGO, nouveau_hwmon_show_update_rate, NULL, 0); static ssize_t nouveau_hwmon_show_fan0_input(struct device *d, struct device_attribute *attr, char *buf) { struct drm_device *dev = dev_get_drvdata(d); struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer; struct gpio_func gpio; u32 cycles, cur, prev; u64 start; int ret; ret = nouveau_gpio_find(dev, 0, DCB_GPIO_FAN_SENSE, 0xff, &gpio); if (ret) return ret; /* Monitor the GPIO input 0x3b for 250ms. * When the fan spins, it changes the value of GPIO FAN_SENSE. * We get 4 changes (0 -> 1 -> 0 -> 1 -> [...]) per complete rotation. */ start = ptimer->read(dev); prev = nouveau_gpio_sense(dev, 0, gpio.line); cycles = 0; do { cur = nouveau_gpio_sense(dev, 0, gpio.line); if (prev != cur) { cycles++; prev = cur; } usleep_range(500, 1000); /* supports 0 < rpm < 7500 */ } while (ptimer->read(dev) - start < 250000000); /* interpolate to get rpm */ return sprintf(buf, "%i\n", cycles / 4 * 4 * 60); } static SENSOR_DEVICE_ATTR(fan0_input, S_IRUGO, nouveau_hwmon_show_fan0_input, NULL, 0); static ssize_t nouveau_hwmon_get_pwm0(struct device *d, struct device_attribute *a, char *buf) { struct drm_device *dev = dev_get_drvdata(d); int ret; ret = nouveau_pwmfan_get(dev); if (ret < 0) return ret; return sprintf(buf, "%i\n", ret); } static ssize_t nouveau_hwmon_set_pwm0(struct device *d, struct device_attribute *a, const char *buf, size_t count) { struct drm_device *dev = dev_get_drvdata(d); struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_pm_engine *pm = &dev_priv->engine.pm; int ret = -ENODEV; long value; if (nouveau_perflvl_wr != 7777) return -EPERM; if (kstrtol(buf, 10, &value) == -EINVAL) return -EINVAL; if (value < pm->fan.min_duty) value = pm->fan.min_duty; if (value > pm->fan.max_duty) value = pm->fan.max_duty; ret = nouveau_pwmfan_set(dev, value); if (ret) return ret; return count; } static SENSOR_DEVICE_ATTR(pwm0, S_IRUGO | S_IWUSR, nouveau_hwmon_get_pwm0, nouveau_hwmon_set_pwm0, 0); static ssize_t nouveau_hwmon_get_pwm0_min(struct device *d, struct device_attribute *a, char *buf) { struct drm_device *dev = dev_get_drvdata(d); struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_pm_engine *pm = &dev_priv->engine.pm; return sprintf(buf, "%i\n", pm->fan.min_duty); } static ssize_t nouveau_hwmon_set_pwm0_min(struct device *d, struct device_attribute *a, const char *buf, size_t count) { struct drm_device *dev = dev_get_drvdata(d); struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_pm_engine *pm = &dev_priv->engine.pm; long value; if (kstrtol(buf, 10, &value) == -EINVAL) return -EINVAL; if (value < 0) value = 0; if (pm->fan.max_duty - value < 10) value = pm->fan.max_duty - 10; if (value < 10) pm->fan.min_duty = 10; else pm->fan.min_duty = value; return count; } static SENSOR_DEVICE_ATTR(pwm0_min, S_IRUGO | S_IWUSR, nouveau_hwmon_get_pwm0_min, nouveau_hwmon_set_pwm0_min, 0); static ssize_t nouveau_hwmon_get_pwm0_max(struct device *d, struct device_attribute *a, char *buf) { struct drm_device *dev = dev_get_drvdata(d); struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_pm_engine *pm = &dev_priv->engine.pm; return sprintf(buf, "%i\n", pm->fan.max_duty); } static ssize_t nouveau_hwmon_set_pwm0_max(struct device *d, struct device_attribute *a, const char *buf, size_t count) { struct drm_device *dev = dev_get_drvdata(d); struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_pm_engine *pm = &dev_priv->engine.pm; long value; if (kstrtol(buf, 10, &value) == -EINVAL) return -EINVAL; if (value < 0) value = 0; if (value - pm->fan.min_duty < 10) value = pm->fan.min_duty + 10; if (value > 100) pm->fan.max_duty = 100; else pm->fan.max_duty = value; return count; } static SENSOR_DEVICE_ATTR(pwm0_max, S_IRUGO | S_IWUSR, nouveau_hwmon_get_pwm0_max, nouveau_hwmon_set_pwm0_max, 0); static struct attribute *hwmon_attributes[] = { &sensor_dev_attr_temp1_input.dev_attr.attr, &sensor_dev_attr_temp1_max.dev_attr.attr, &sensor_dev_attr_temp1_crit.dev_attr.attr, &sensor_dev_attr_name.dev_attr.attr, &sensor_dev_attr_update_rate.dev_attr.attr, NULL }; static struct attribute *hwmon_fan_rpm_attributes[] = { &sensor_dev_attr_fan0_input.dev_attr.attr, NULL }; static struct attribute *hwmon_pwm_fan_attributes[] = { &sensor_dev_attr_pwm0.dev_attr.attr, &sensor_dev_attr_pwm0_min.dev_attr.attr, &sensor_dev_attr_pwm0_max.dev_attr.attr, NULL }; static const struct attribute_group hwmon_attrgroup = { .attrs = hwmon_attributes, }; static const struct attribute_group hwmon_fan_rpm_attrgroup = { .attrs = hwmon_fan_rpm_attributes, }; static const struct attribute_group hwmon_pwm_fan_attrgroup = { .attrs = hwmon_pwm_fan_attributes, }; #endif static int nouveau_hwmon_init(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_pm_engine *pm = &dev_priv->engine.pm; #if defined(CONFIG_HWMON) || (defined(MODULE) && defined(CONFIG_HWMON_MODULE)) struct device *hwmon_dev; int ret = 0; if (!pm->temp_get) return -ENODEV; hwmon_dev = hwmon_device_register(&dev->pdev->dev); if (IS_ERR(hwmon_dev)) { ret = PTR_ERR(hwmon_dev); NV_ERROR(dev, "Unable to register hwmon device: %d\n", ret); return ret; } dev_set_drvdata(hwmon_dev, dev); /* default sysfs entries */ ret = sysfs_create_group(&dev->pdev->dev.kobj, &hwmon_attrgroup); if (ret) { if (ret) goto error; } /* if the card has a pwm fan */ /*XXX: incorrect, need better detection for this, some boards have * the gpio entries for pwm fan control even when there's no * actual fan connected to it... therm table? */ if (nouveau_pwmfan_get(dev) >= 0) { ret = sysfs_create_group(&dev->pdev->dev.kobj, &hwmon_pwm_fan_attrgroup); if (ret) goto error; } /* if the card can read the fan rpm */ if (nouveau_gpio_func_valid(dev, DCB_GPIO_FAN_SENSE)) { ret = sysfs_create_group(&dev->pdev->dev.kobj, &hwmon_fan_rpm_attrgroup); if (ret) goto error; } pm->hwmon = hwmon_dev; return 0; error: NV_ERROR(dev, "Unable to create some hwmon sysfs files: %d\n", ret); hwmon_device_unregister(hwmon_dev); pm->hwmon = NULL; return ret; #else pm->hwmon = NULL; return 0; #endif } static void nouveau_hwmon_fini(struct drm_device *dev) { #if defined(CONFIG_HWMON) || (defined(MODULE) && defined(CONFIG_HWMON_MODULE)) struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_pm_engine *pm = &dev_priv->engine.pm; if (pm->hwmon) { sysfs_remove_group(&dev->pdev->dev.kobj, &hwmon_attrgroup); sysfs_remove_group(&dev->pdev->dev.kobj, &hwmon_pwm_fan_attrgroup); sysfs_remove_group(&dev->pdev->dev.kobj, &hwmon_fan_rpm_attrgroup); hwmon_device_unregister(pm->hwmon); } #endif } #if defined(CONFIG_ACPI) && defined(CONFIG_POWER_SUPPLY) static int nouveau_pm_acpi_event(struct notifier_block *nb, unsigned long val, void *data) { struct drm_nouveau_private *dev_priv = container_of(nb, struct drm_nouveau_private, engine.pm.acpi_nb); struct drm_device *dev = dev_priv->dev; struct acpi_bus_event *entry = (struct acpi_bus_event *)data; if (strcmp(entry->device_class, "ac_adapter") == 0) { bool ac = power_supply_is_system_supplied(); NV_DEBUG(dev, "power supply changed: %s\n", ac ? "AC" : "DC"); nouveau_pm_trigger(dev); } return NOTIFY_OK; } #endif int nouveau_pm_init(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_pm_engine *pm = &dev_priv->engine.pm; char info[256]; int ret, i; /* parse aux tables from vbios */ nouveau_volt_init(dev); nouveau_temp_init(dev); /* determine current ("boot") performance level */ ret = nouveau_pm_perflvl_get(dev, &pm->boot); if (ret) { NV_ERROR(dev, "failed to determine boot perflvl\n"); return ret; } strncpy(pm->boot.name, "boot", 4); strncpy(pm->boot.profile.name, "boot", 4); pm->boot.profile.func = &nouveau_pm_static_profile_func; INIT_LIST_HEAD(&pm->profiles); list_add(&pm->boot.profile.head, &pm->profiles); pm->profile_ac = &pm->boot.profile; pm->profile_dc = &pm->boot.profile; pm->profile = &pm->boot.profile; pm->cur = &pm->boot; /* add performance levels from vbios */ nouveau_perf_init(dev); /* display available performance levels */ NV_INFO(dev, "%d available performance level(s)\n", pm->nr_perflvl); for (i = 0; i < pm->nr_perflvl; i++) { nouveau_pm_perflvl_info(&pm->perflvl[i], info, sizeof(info)); NV_INFO(dev, "%d:%s", pm->perflvl[i].id, info); } nouveau_pm_perflvl_info(&pm->boot, info, sizeof(info)); NV_INFO(dev, "c:%s", info); /* switch performance levels now if requested */ if (nouveau_perflvl != NULL) nouveau_pm_profile_set(dev, nouveau_perflvl); /* determine the current fan speed */ pm->fan.percent = nouveau_pwmfan_get(dev); nouveau_sysfs_init(dev); nouveau_hwmon_init(dev); #if defined(CONFIG_ACPI) && defined(CONFIG_POWER_SUPPLY) pm->acpi_nb.notifier_call = nouveau_pm_acpi_event; register_acpi_notifier(&pm->acpi_nb); #endif return 0; } void nouveau_pm_fini(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_pm_engine *pm = &dev_priv->engine.pm; struct nouveau_pm_profile *profile, *tmp; list_for_each_entry_safe(profile, tmp, &pm->profiles, head) { list_del(&profile->head); profile->func->destroy(profile); } if (pm->cur != &pm->boot) nouveau_pm_perflvl_set(dev, &pm->boot); nouveau_temp_fini(dev); nouveau_perf_fini(dev); nouveau_volt_fini(dev); #if defined(CONFIG_ACPI) && defined(CONFIG_POWER_SUPPLY) unregister_acpi_notifier(&pm->acpi_nb); #endif nouveau_hwmon_fini(dev); nouveau_sysfs_fini(dev); } void nouveau_pm_resume(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_pm_engine *pm = &dev_priv->engine.pm; struct nouveau_pm_level *perflvl; if (!pm->cur || pm->cur == &pm->boot) return; perflvl = pm->cur; pm->cur = &pm->boot; nouveau_pm_perflvl_set(dev, perflvl); nouveau_pwmfan_set(dev, pm->fan.percent); }
gpl-2.0
AndroPlus-org/sony_sources-H2_2014
drivers/gpu/drm/nouveau/nouveau_pm.c
4926
24832
/* * Copyright 2010 Red Hat Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Ben Skeggs */ #include "drmP.h" #include "nouveau_drv.h" #include "nouveau_pm.h" #include "nouveau_gpio.h" #ifdef CONFIG_ACPI #include <linux/acpi.h> #endif #include <linux/power_supply.h> #include <linux/hwmon.h> #include <linux/hwmon-sysfs.h> static int nouveau_pwmfan_get(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_pm_engine *pm = &dev_priv->engine.pm; struct gpio_func gpio; u32 divs, duty; int ret; if (!pm->pwm_get) return -ENODEV; ret = nouveau_gpio_find(dev, 0, DCB_GPIO_PWM_FAN, 0xff, &gpio); if (ret == 0) { ret = pm->pwm_get(dev, gpio.line, &divs, &duty); if (ret == 0 && divs) { divs = max(divs, duty); if (dev_priv->card_type <= NV_40 || (gpio.log[0] & 1)) duty = divs - duty; return (duty * 100) / divs; } return nouveau_gpio_func_get(dev, gpio.func) * 100; } return -ENODEV; } static int nouveau_pwmfan_set(struct drm_device *dev, int percent) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_pm_engine *pm = &dev_priv->engine.pm; struct gpio_func gpio; u32 divs, duty; int ret; if (!pm->pwm_set) return -ENODEV; ret = nouveau_gpio_find(dev, 0, DCB_GPIO_PWM_FAN, 0xff, &gpio); if (ret == 0) { divs = pm->fan.pwm_divisor; if (pm->fan.pwm_freq) { /*XXX: PNVIO clock more than likely... */ divs = 135000 / pm->fan.pwm_freq; if (dev_priv->chipset < 0xa3) divs /= 4; } duty = ((divs * percent) + 99) / 100; if (dev_priv->card_type <= NV_40 || (gpio.log[0] & 1)) duty = divs - duty; ret = pm->pwm_set(dev, gpio.line, divs, duty); if (!ret) pm->fan.percent = percent; return ret; } return -ENODEV; } static int nouveau_pm_perflvl_aux(struct drm_device *dev, struct nouveau_pm_level *perflvl, struct nouveau_pm_level *a, struct nouveau_pm_level *b) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_pm_engine *pm = &dev_priv->engine.pm; int ret; /*XXX: not on all boards, we should control based on temperature * on recent boards.. or maybe on some other factor we don't * know about? */ if (a->fanspeed && b->fanspeed && b->fanspeed > a->fanspeed) { ret = nouveau_pwmfan_set(dev, perflvl->fanspeed); if (ret && ret != -ENODEV) { NV_ERROR(dev, "fanspeed set failed: %d\n", ret); return ret; } } if (pm->voltage.supported && pm->voltage_set) { if (perflvl->volt_min && b->volt_min > a->volt_min) { ret = pm->voltage_set(dev, perflvl->volt_min); if (ret) { NV_ERROR(dev, "voltage set failed: %d\n", ret); return ret; } } } return 0; } static int nouveau_pm_perflvl_set(struct drm_device *dev, struct nouveau_pm_level *perflvl) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_pm_engine *pm = &dev_priv->engine.pm; void *state; int ret; if (perflvl == pm->cur) return 0; ret = nouveau_pm_perflvl_aux(dev, perflvl, pm->cur, perflvl); if (ret) return ret; state = pm->clocks_pre(dev, perflvl); if (IS_ERR(state)) { ret = PTR_ERR(state); goto error; } ret = pm->clocks_set(dev, state); if (ret) goto error; ret = nouveau_pm_perflvl_aux(dev, perflvl, perflvl, pm->cur); if (ret) return ret; pm->cur = perflvl; return 0; error: /* restore the fan speed and voltage before leaving */ nouveau_pm_perflvl_aux(dev, perflvl, perflvl, pm->cur); return ret; } void nouveau_pm_trigger(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_pm_engine *pm = &dev_priv->engine.pm; struct nouveau_pm_profile *profile = NULL; struct nouveau_pm_level *perflvl = NULL; int ret; /* select power profile based on current power source */ if (power_supply_is_system_supplied()) profile = pm->profile_ac; else profile = pm->profile_dc; if (profile != pm->profile) { pm->profile->func->fini(pm->profile); pm->profile = profile; pm->profile->func->init(pm->profile); } /* select performance level based on profile */ perflvl = profile->func->select(profile); /* change perflvl, if necessary */ if (perflvl != pm->cur) { struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer; u64 time0 = ptimer->read(dev); NV_INFO(dev, "setting performance level: %d", perflvl->id); ret = nouveau_pm_perflvl_set(dev, perflvl); if (ret) NV_INFO(dev, "> reclocking failed: %d\n\n", ret); NV_INFO(dev, "> reclocking took %lluns\n\n", ptimer->read(dev) - time0); } } static struct nouveau_pm_profile * profile_find(struct drm_device *dev, const char *string) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_pm_engine *pm = &dev_priv->engine.pm; struct nouveau_pm_profile *profile; list_for_each_entry(profile, &pm->profiles, head) { if (!strncmp(profile->name, string, sizeof(profile->name))) return profile; } return NULL; } static int nouveau_pm_profile_set(struct drm_device *dev, const char *profile) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_pm_engine *pm = &dev_priv->engine.pm; struct nouveau_pm_profile *ac = NULL, *dc = NULL; char string[16], *cur = string, *ptr; /* safety precaution, for now */ if (nouveau_perflvl_wr != 7777) return -EPERM; strncpy(string, profile, sizeof(string)); string[sizeof(string) - 1] = 0; if ((ptr = strchr(string, '\n'))) *ptr = '\0'; ptr = strsep(&cur, ","); if (ptr) ac = profile_find(dev, ptr); ptr = strsep(&cur, ","); if (ptr) dc = profile_find(dev, ptr); else dc = ac; if (ac == NULL || dc == NULL) return -EINVAL; pm->profile_ac = ac; pm->profile_dc = dc; nouveau_pm_trigger(dev); return 0; } static void nouveau_pm_static_dummy(struct nouveau_pm_profile *profile) { } static struct nouveau_pm_level * nouveau_pm_static_select(struct nouveau_pm_profile *profile) { return container_of(profile, struct nouveau_pm_level, profile); } const struct nouveau_pm_profile_func nouveau_pm_static_profile_func = { .destroy = nouveau_pm_static_dummy, .init = nouveau_pm_static_dummy, .fini = nouveau_pm_static_dummy, .select = nouveau_pm_static_select, }; static int nouveau_pm_perflvl_get(struct drm_device *dev, struct nouveau_pm_level *perflvl) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_pm_engine *pm = &dev_priv->engine.pm; int ret; memset(perflvl, 0, sizeof(*perflvl)); if (pm->clocks_get) { ret = pm->clocks_get(dev, perflvl); if (ret) return ret; } if (pm->voltage.supported && pm->voltage_get) { ret = pm->voltage_get(dev); if (ret > 0) { perflvl->volt_min = ret; perflvl->volt_max = ret; } } ret = nouveau_pwmfan_get(dev); if (ret > 0) perflvl->fanspeed = ret; nouveau_mem_timing_read(dev, &perflvl->timing); return 0; } static void nouveau_pm_perflvl_info(struct nouveau_pm_level *perflvl, char *ptr, int len) { char c[16], s[16], v[32], f[16], m[16]; c[0] = '\0'; if (perflvl->core) snprintf(c, sizeof(c), " core %dMHz", perflvl->core / 1000); s[0] = '\0'; if (perflvl->shader) snprintf(s, sizeof(s), " shader %dMHz", perflvl->shader / 1000); m[0] = '\0'; if (perflvl->memory) snprintf(m, sizeof(m), " memory %dMHz", perflvl->memory / 1000); v[0] = '\0'; if (perflvl->volt_min && perflvl->volt_min != perflvl->volt_max) { snprintf(v, sizeof(v), " voltage %dmV-%dmV", perflvl->volt_min / 1000, perflvl->volt_max / 1000); } else if (perflvl->volt_min) { snprintf(v, sizeof(v), " voltage %dmV", perflvl->volt_min / 1000); } f[0] = '\0'; if (perflvl->fanspeed) snprintf(f, sizeof(f), " fanspeed %d%%", perflvl->fanspeed); snprintf(ptr, len, "%s%s%s%s%s\n", c, s, m, v, f); } static ssize_t nouveau_pm_get_perflvl_info(struct device *d, struct device_attribute *a, char *buf) { struct nouveau_pm_level *perflvl = container_of(a, struct nouveau_pm_level, dev_attr); char *ptr = buf; int len = PAGE_SIZE; snprintf(ptr, len, "%d:", perflvl->id); ptr += strlen(buf); len -= strlen(buf); nouveau_pm_perflvl_info(perflvl, ptr, len); return strlen(buf); } static ssize_t nouveau_pm_get_perflvl(struct device *d, struct device_attribute *a, char *buf) { struct drm_device *dev = pci_get_drvdata(to_pci_dev(d)); struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_pm_engine *pm = &dev_priv->engine.pm; struct nouveau_pm_level cur; int len = PAGE_SIZE, ret; char *ptr = buf; snprintf(ptr, len, "profile: %s, %s\nc:", pm->profile_ac->name, pm->profile_dc->name); ptr += strlen(buf); len -= strlen(buf); ret = nouveau_pm_perflvl_get(dev, &cur); if (ret == 0) nouveau_pm_perflvl_info(&cur, ptr, len); return strlen(buf); } static ssize_t nouveau_pm_set_perflvl(struct device *d, struct device_attribute *a, const char *buf, size_t count) { struct drm_device *dev = pci_get_drvdata(to_pci_dev(d)); int ret; ret = nouveau_pm_profile_set(dev, buf); if (ret) return ret; return strlen(buf); } static DEVICE_ATTR(performance_level, S_IRUGO | S_IWUSR, nouveau_pm_get_perflvl, nouveau_pm_set_perflvl); static int nouveau_sysfs_init(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_pm_engine *pm = &dev_priv->engine.pm; struct device *d = &dev->pdev->dev; int ret, i; ret = device_create_file(d, &dev_attr_performance_level); if (ret) return ret; for (i = 0; i < pm->nr_perflvl; i++) { struct nouveau_pm_level *perflvl = &pm->perflvl[i]; perflvl->dev_attr.attr.name = perflvl->name; perflvl->dev_attr.attr.mode = S_IRUGO; perflvl->dev_attr.show = nouveau_pm_get_perflvl_info; perflvl->dev_attr.store = NULL; sysfs_attr_init(&perflvl->dev_attr.attr); ret = device_create_file(d, &perflvl->dev_attr); if (ret) { NV_ERROR(dev, "failed pervlvl %d sysfs: %d\n", perflvl->id, i); perflvl->dev_attr.attr.name = NULL; nouveau_pm_fini(dev); return ret; } } return 0; } static void nouveau_sysfs_fini(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_pm_engine *pm = &dev_priv->engine.pm; struct device *d = &dev->pdev->dev; int i; device_remove_file(d, &dev_attr_performance_level); for (i = 0; i < pm->nr_perflvl; i++) { struct nouveau_pm_level *pl = &pm->perflvl[i]; if (!pl->dev_attr.attr.name) break; device_remove_file(d, &pl->dev_attr); } } #if defined(CONFIG_HWMON) || (defined(MODULE) && defined(CONFIG_HWMON_MODULE)) static ssize_t nouveau_hwmon_show_temp(struct device *d, struct device_attribute *a, char *buf) { struct drm_device *dev = dev_get_drvdata(d); struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_pm_engine *pm = &dev_priv->engine.pm; return snprintf(buf, PAGE_SIZE, "%d\n", pm->temp_get(dev)*1000); } static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, nouveau_hwmon_show_temp, NULL, 0); static ssize_t nouveau_hwmon_max_temp(struct device *d, struct device_attribute *a, char *buf) { struct drm_device *dev = dev_get_drvdata(d); struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_pm_engine *pm = &dev_priv->engine.pm; struct nouveau_pm_threshold_temp *temp = &pm->threshold_temp; return snprintf(buf, PAGE_SIZE, "%d\n", temp->down_clock*1000); } static ssize_t nouveau_hwmon_set_max_temp(struct device *d, struct device_attribute *a, const char *buf, size_t count) { struct drm_device *dev = dev_get_drvdata(d); struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_pm_engine *pm = &dev_priv->engine.pm; struct nouveau_pm_threshold_temp *temp = &pm->threshold_temp; long value; if (kstrtol(buf, 10, &value) == -EINVAL) return count; temp->down_clock = value/1000; nouveau_temp_safety_checks(dev); return count; } static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO | S_IWUSR, nouveau_hwmon_max_temp, nouveau_hwmon_set_max_temp, 0); static ssize_t nouveau_hwmon_critical_temp(struct device *d, struct device_attribute *a, char *buf) { struct drm_device *dev = dev_get_drvdata(d); struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_pm_engine *pm = &dev_priv->engine.pm; struct nouveau_pm_threshold_temp *temp = &pm->threshold_temp; return snprintf(buf, PAGE_SIZE, "%d\n", temp->critical*1000); } static ssize_t nouveau_hwmon_set_critical_temp(struct device *d, struct device_attribute *a, const char *buf, size_t count) { struct drm_device *dev = dev_get_drvdata(d); struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_pm_engine *pm = &dev_priv->engine.pm; struct nouveau_pm_threshold_temp *temp = &pm->threshold_temp; long value; if (kstrtol(buf, 10, &value) == -EINVAL) return count; temp->critical = value/1000; nouveau_temp_safety_checks(dev); return count; } static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO | S_IWUSR, nouveau_hwmon_critical_temp, nouveau_hwmon_set_critical_temp, 0); static ssize_t nouveau_hwmon_show_name(struct device *dev, struct device_attribute *attr, char *buf) { return sprintf(buf, "nouveau\n"); } static SENSOR_DEVICE_ATTR(name, S_IRUGO, nouveau_hwmon_show_name, NULL, 0); static ssize_t nouveau_hwmon_show_update_rate(struct device *dev, struct device_attribute *attr, char *buf) { return sprintf(buf, "1000\n"); } static SENSOR_DEVICE_ATTR(update_rate, S_IRUGO, nouveau_hwmon_show_update_rate, NULL, 0); static ssize_t nouveau_hwmon_show_fan0_input(struct device *d, struct device_attribute *attr, char *buf) { struct drm_device *dev = dev_get_drvdata(d); struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer; struct gpio_func gpio; u32 cycles, cur, prev; u64 start; int ret; ret = nouveau_gpio_find(dev, 0, DCB_GPIO_FAN_SENSE, 0xff, &gpio); if (ret) return ret; /* Monitor the GPIO input 0x3b for 250ms. * When the fan spins, it changes the value of GPIO FAN_SENSE. * We get 4 changes (0 -> 1 -> 0 -> 1 -> [...]) per complete rotation. */ start = ptimer->read(dev); prev = nouveau_gpio_sense(dev, 0, gpio.line); cycles = 0; do { cur = nouveau_gpio_sense(dev, 0, gpio.line); if (prev != cur) { cycles++; prev = cur; } usleep_range(500, 1000); /* supports 0 < rpm < 7500 */ } while (ptimer->read(dev) - start < 250000000); /* interpolate to get rpm */ return sprintf(buf, "%i\n", cycles / 4 * 4 * 60); } static SENSOR_DEVICE_ATTR(fan0_input, S_IRUGO, nouveau_hwmon_show_fan0_input, NULL, 0); static ssize_t nouveau_hwmon_get_pwm0(struct device *d, struct device_attribute *a, char *buf) { struct drm_device *dev = dev_get_drvdata(d); int ret; ret = nouveau_pwmfan_get(dev); if (ret < 0) return ret; return sprintf(buf, "%i\n", ret); } static ssize_t nouveau_hwmon_set_pwm0(struct device *d, struct device_attribute *a, const char *buf, size_t count) { struct drm_device *dev = dev_get_drvdata(d); struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_pm_engine *pm = &dev_priv->engine.pm; int ret = -ENODEV; long value; if (nouveau_perflvl_wr != 7777) return -EPERM; if (kstrtol(buf, 10, &value) == -EINVAL) return -EINVAL; if (value < pm->fan.min_duty) value = pm->fan.min_duty; if (value > pm->fan.max_duty) value = pm->fan.max_duty; ret = nouveau_pwmfan_set(dev, value); if (ret) return ret; return count; } static SENSOR_DEVICE_ATTR(pwm0, S_IRUGO | S_IWUSR, nouveau_hwmon_get_pwm0, nouveau_hwmon_set_pwm0, 0); static ssize_t nouveau_hwmon_get_pwm0_min(struct device *d, struct device_attribute *a, char *buf) { struct drm_device *dev = dev_get_drvdata(d); struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_pm_engine *pm = &dev_priv->engine.pm; return sprintf(buf, "%i\n", pm->fan.min_duty); } static ssize_t nouveau_hwmon_set_pwm0_min(struct device *d, struct device_attribute *a, const char *buf, size_t count) { struct drm_device *dev = dev_get_drvdata(d); struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_pm_engine *pm = &dev_priv->engine.pm; long value; if (kstrtol(buf, 10, &value) == -EINVAL) return -EINVAL; if (value < 0) value = 0; if (pm->fan.max_duty - value < 10) value = pm->fan.max_duty - 10; if (value < 10) pm->fan.min_duty = 10; else pm->fan.min_duty = value; return count; } static SENSOR_DEVICE_ATTR(pwm0_min, S_IRUGO | S_IWUSR, nouveau_hwmon_get_pwm0_min, nouveau_hwmon_set_pwm0_min, 0); static ssize_t nouveau_hwmon_get_pwm0_max(struct device *d, struct device_attribute *a, char *buf) { struct drm_device *dev = dev_get_drvdata(d); struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_pm_engine *pm = &dev_priv->engine.pm; return sprintf(buf, "%i\n", pm->fan.max_duty); } static ssize_t nouveau_hwmon_set_pwm0_max(struct device *d, struct device_attribute *a, const char *buf, size_t count) { struct drm_device *dev = dev_get_drvdata(d); struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_pm_engine *pm = &dev_priv->engine.pm; long value; if (kstrtol(buf, 10, &value) == -EINVAL) return -EINVAL; if (value < 0) value = 0; if (value - pm->fan.min_duty < 10) value = pm->fan.min_duty + 10; if (value > 100) pm->fan.max_duty = 100; else pm->fan.max_duty = value; return count; } static SENSOR_DEVICE_ATTR(pwm0_max, S_IRUGO | S_IWUSR, nouveau_hwmon_get_pwm0_max, nouveau_hwmon_set_pwm0_max, 0); static struct attribute *hwmon_attributes[] = { &sensor_dev_attr_temp1_input.dev_attr.attr, &sensor_dev_attr_temp1_max.dev_attr.attr, &sensor_dev_attr_temp1_crit.dev_attr.attr, &sensor_dev_attr_name.dev_attr.attr, &sensor_dev_attr_update_rate.dev_attr.attr, NULL }; static struct attribute *hwmon_fan_rpm_attributes[] = { &sensor_dev_attr_fan0_input.dev_attr.attr, NULL }; static struct attribute *hwmon_pwm_fan_attributes[] = { &sensor_dev_attr_pwm0.dev_attr.attr, &sensor_dev_attr_pwm0_min.dev_attr.attr, &sensor_dev_attr_pwm0_max.dev_attr.attr, NULL }; static const struct attribute_group hwmon_attrgroup = { .attrs = hwmon_attributes, }; static const struct attribute_group hwmon_fan_rpm_attrgroup = { .attrs = hwmon_fan_rpm_attributes, }; static const struct attribute_group hwmon_pwm_fan_attrgroup = { .attrs = hwmon_pwm_fan_attributes, }; #endif static int nouveau_hwmon_init(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_pm_engine *pm = &dev_priv->engine.pm; #if defined(CONFIG_HWMON) || (defined(MODULE) && defined(CONFIG_HWMON_MODULE)) struct device *hwmon_dev; int ret = 0; if (!pm->temp_get) return -ENODEV; hwmon_dev = hwmon_device_register(&dev->pdev->dev); if (IS_ERR(hwmon_dev)) { ret = PTR_ERR(hwmon_dev); NV_ERROR(dev, "Unable to register hwmon device: %d\n", ret); return ret; } dev_set_drvdata(hwmon_dev, dev); /* default sysfs entries */ ret = sysfs_create_group(&dev->pdev->dev.kobj, &hwmon_attrgroup); if (ret) { if (ret) goto error; } /* if the card has a pwm fan */ /*XXX: incorrect, need better detection for this, some boards have * the gpio entries for pwm fan control even when there's no * actual fan connected to it... therm table? */ if (nouveau_pwmfan_get(dev) >= 0) { ret = sysfs_create_group(&dev->pdev->dev.kobj, &hwmon_pwm_fan_attrgroup); if (ret) goto error; } /* if the card can read the fan rpm */ if (nouveau_gpio_func_valid(dev, DCB_GPIO_FAN_SENSE)) { ret = sysfs_create_group(&dev->pdev->dev.kobj, &hwmon_fan_rpm_attrgroup); if (ret) goto error; } pm->hwmon = hwmon_dev; return 0; error: NV_ERROR(dev, "Unable to create some hwmon sysfs files: %d\n", ret); hwmon_device_unregister(hwmon_dev); pm->hwmon = NULL; return ret; #else pm->hwmon = NULL; return 0; #endif } static void nouveau_hwmon_fini(struct drm_device *dev) { #if defined(CONFIG_HWMON) || (defined(MODULE) && defined(CONFIG_HWMON_MODULE)) struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_pm_engine *pm = &dev_priv->engine.pm; if (pm->hwmon) { sysfs_remove_group(&dev->pdev->dev.kobj, &hwmon_attrgroup); sysfs_remove_group(&dev->pdev->dev.kobj, &hwmon_pwm_fan_attrgroup); sysfs_remove_group(&dev->pdev->dev.kobj, &hwmon_fan_rpm_attrgroup); hwmon_device_unregister(pm->hwmon); } #endif } #if defined(CONFIG_ACPI) && defined(CONFIG_POWER_SUPPLY) static int nouveau_pm_acpi_event(struct notifier_block *nb, unsigned long val, void *data) { struct drm_nouveau_private *dev_priv = container_of(nb, struct drm_nouveau_private, engine.pm.acpi_nb); struct drm_device *dev = dev_priv->dev; struct acpi_bus_event *entry = (struct acpi_bus_event *)data; if (strcmp(entry->device_class, "ac_adapter") == 0) { bool ac = power_supply_is_system_supplied(); NV_DEBUG(dev, "power supply changed: %s\n", ac ? "AC" : "DC"); nouveau_pm_trigger(dev); } return NOTIFY_OK; } #endif int nouveau_pm_init(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_pm_engine *pm = &dev_priv->engine.pm; char info[256]; int ret, i; /* parse aux tables from vbios */ nouveau_volt_init(dev); nouveau_temp_init(dev); /* determine current ("boot") performance level */ ret = nouveau_pm_perflvl_get(dev, &pm->boot); if (ret) { NV_ERROR(dev, "failed to determine boot perflvl\n"); return ret; } strncpy(pm->boot.name, "boot", 4); strncpy(pm->boot.profile.name, "boot", 4); pm->boot.profile.func = &nouveau_pm_static_profile_func; INIT_LIST_HEAD(&pm->profiles); list_add(&pm->boot.profile.head, &pm->profiles); pm->profile_ac = &pm->boot.profile; pm->profile_dc = &pm->boot.profile; pm->profile = &pm->boot.profile; pm->cur = &pm->boot; /* add performance levels from vbios */ nouveau_perf_init(dev); /* display available performance levels */ NV_INFO(dev, "%d available performance level(s)\n", pm->nr_perflvl); for (i = 0; i < pm->nr_perflvl; i++) { nouveau_pm_perflvl_info(&pm->perflvl[i], info, sizeof(info)); NV_INFO(dev, "%d:%s", pm->perflvl[i].id, info); } nouveau_pm_perflvl_info(&pm->boot, info, sizeof(info)); NV_INFO(dev, "c:%s", info); /* switch performance levels now if requested */ if (nouveau_perflvl != NULL) nouveau_pm_profile_set(dev, nouveau_perflvl); /* determine the current fan speed */ pm->fan.percent = nouveau_pwmfan_get(dev); nouveau_sysfs_init(dev); nouveau_hwmon_init(dev); #if defined(CONFIG_ACPI) && defined(CONFIG_POWER_SUPPLY) pm->acpi_nb.notifier_call = nouveau_pm_acpi_event; register_acpi_notifier(&pm->acpi_nb); #endif return 0; } void nouveau_pm_fini(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_pm_engine *pm = &dev_priv->engine.pm; struct nouveau_pm_profile *profile, *tmp; list_for_each_entry_safe(profile, tmp, &pm->profiles, head) { list_del(&profile->head); profile->func->destroy(profile); } if (pm->cur != &pm->boot) nouveau_pm_perflvl_set(dev, &pm->boot); nouveau_temp_fini(dev); nouveau_perf_fini(dev); nouveau_volt_fini(dev); #if defined(CONFIG_ACPI) && defined(CONFIG_POWER_SUPPLY) unregister_acpi_notifier(&pm->acpi_nb); #endif nouveau_hwmon_fini(dev); nouveau_sysfs_fini(dev); } void nouveau_pm_resume(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_pm_engine *pm = &dev_priv->engine.pm; struct nouveau_pm_level *perflvl; if (!pm->cur || pm->cur == &pm->boot) return; perflvl = pm->cur; pm->cur = &pm->boot; nouveau_pm_perflvl_set(dev, perflvl); nouveau_pwmfan_set(dev, pm->fan.percent); }
gpl-2.0
cholokei/msm8660_test_kernel-1
drivers/pnp/support.c
13886
4919
/* * support.c - standard functions for the use of pnp protocol drivers * * Copyright 2003 Adam Belay <ambx1@neo.rr.com> * Copyright (C) 2008 Hewlett-Packard Development Company, L.P. * Bjorn Helgaas <bjorn.helgaas@hp.com> */ #include <linux/module.h> #include <linux/ctype.h> #include <linux/pnp.h> #include "base.h" /** * pnp_is_active - Determines if a device is active based on its current * resources * @dev: pointer to the desired PnP device */ int pnp_is_active(struct pnp_dev *dev) { /* * I don't think this is very reliable because pnp_disable_dev() * only clears out auto-assigned resources. */ if (!pnp_port_start(dev, 0) && pnp_port_len(dev, 0) <= 1 && !pnp_mem_start(dev, 0) && pnp_mem_len(dev, 0) <= 1 && pnp_irq(dev, 0) == -1 && pnp_dma(dev, 0) == -1) return 0; else return 1; } EXPORT_SYMBOL(pnp_is_active); /* * Functionally similar to acpi_ex_eisa_id_to_string(), but that's * buried in the ACPI CA, and we can't depend on it being present. */ void pnp_eisa_id_to_string(u32 id, char *str) { id = be32_to_cpu(id); /* * According to the specs, the first three characters are five-bit * compressed ASCII, and the left-over high order bit should be zero. * However, the Linux ISAPNP code historically used six bits for the * first character, and there seem to be IDs that depend on that, * e.g., "nEC8241" in the Linux 8250_pnp serial driver and the * FreeBSD sys/pc98/cbus/sio_cbus.c driver. */ str[0] = 'A' + ((id >> 26) & 0x3f) - 1; str[1] = 'A' + ((id >> 21) & 0x1f) - 1; str[2] = 'A' + ((id >> 16) & 0x1f) - 1; str[3] = hex_asc_hi(id >> 8); str[4] = hex_asc_lo(id >> 8); str[5] = hex_asc_hi(id); str[6] = hex_asc_lo(id); str[7] = '\0'; } char *pnp_resource_type_name(struct resource *res) { switch (pnp_resource_type(res)) { case IORESOURCE_IO: return "io"; case IORESOURCE_MEM: return "mem"; case IORESOURCE_IRQ: return "irq"; case IORESOURCE_DMA: return "dma"; case IORESOURCE_BUS: return "bus"; } return "unknown"; } void dbg_pnp_show_resources(struct pnp_dev *dev, char *desc) { struct pnp_resource *pnp_res; if (list_empty(&dev->resources)) pnp_dbg(&dev->dev, "%s: no current resources\n", desc); else { pnp_dbg(&dev->dev, "%s: current resources:\n", desc); list_for_each_entry(pnp_res, &dev->resources, list) pnp_dbg(&dev->dev, "%pr\n", &pnp_res->res); } } char *pnp_option_priority_name(struct pnp_option *option) { switch (pnp_option_priority(option)) { case PNP_RES_PRIORITY_PREFERRED: return "preferred"; case PNP_RES_PRIORITY_ACCEPTABLE: return "acceptable"; case PNP_RES_PRIORITY_FUNCTIONAL: return "functional"; } return "invalid"; } void dbg_pnp_show_option(struct pnp_dev *dev, struct pnp_option *option) { char buf[128]; int len = 0, i; struct pnp_port *port; struct pnp_mem *mem; struct pnp_irq *irq; struct pnp_dma *dma; if (pnp_option_is_dependent(option)) len += scnprintf(buf + len, sizeof(buf) - len, " dependent set %d (%s) ", pnp_option_set(option), pnp_option_priority_name(option)); else len += scnprintf(buf + len, sizeof(buf) - len, " independent "); switch (option->type) { case IORESOURCE_IO: port = &option->u.port; len += scnprintf(buf + len, sizeof(buf) - len, "io min %#llx " "max %#llx align %lld size %lld flags %#x", (unsigned long long) port->min, (unsigned long long) port->max, (unsigned long long) port->align, (unsigned long long) port->size, port->flags); break; case IORESOURCE_MEM: mem = &option->u.mem; len += scnprintf(buf + len, sizeof(buf) - len, "mem min %#llx " "max %#llx align %lld size %lld flags %#x", (unsigned long long) mem->min, (unsigned long long) mem->max, (unsigned long long) mem->align, (unsigned long long) mem->size, mem->flags); break; case IORESOURCE_IRQ: irq = &option->u.irq; len += scnprintf(buf + len, sizeof(buf) - len, "irq"); if (bitmap_empty(irq->map.bits, PNP_IRQ_NR)) len += scnprintf(buf + len, sizeof(buf) - len, " <none>"); else { for (i = 0; i < PNP_IRQ_NR; i++) if (test_bit(i, irq->map.bits)) len += scnprintf(buf + len, sizeof(buf) - len, " %d", i); } len += scnprintf(buf + len, sizeof(buf) - len, " flags %#x", irq->flags); if (irq->flags & IORESOURCE_IRQ_OPTIONAL) len += scnprintf(buf + len, sizeof(buf) - len, " (optional)"); break; case IORESOURCE_DMA: dma = &option->u.dma; len += scnprintf(buf + len, sizeof(buf) - len, "dma"); if (!dma->map) len += scnprintf(buf + len, sizeof(buf) - len, " <none>"); else { for (i = 0; i < 8; i++) if (dma->map & (1 << i)) len += scnprintf(buf + len, sizeof(buf) - len, " %d", i); } len += scnprintf(buf + len, sizeof(buf) - len, " (bitmask %#x) " "flags %#x", dma->map, dma->flags); break; } pnp_dbg(&dev->dev, "%s\n", buf); }
gpl-2.0
jacobbarsoe/linux
drivers/net/ethernet/intel/i40e/i40e_common.c
63
82807
/******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Driver * Copyright(c) 2013 - 2014 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along * with this program. If not, see <http://www.gnu.org/licenses/>. * * The full GNU General Public License is included in this distribution in * the file called "COPYING". * * Contact Information: * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * ******************************************************************************/ #include "i40e_type.h" #include "i40e_adminq.h" #include "i40e_prototype.h" #include "i40e_virtchnl.h" /** * i40e_set_mac_type - Sets MAC type * @hw: pointer to the HW structure * * This function sets the mac type of the adapter based on the * vendor ID and device ID stored in the hw structure. **/ static i40e_status i40e_set_mac_type(struct i40e_hw *hw) { i40e_status status = 0; if (hw->vendor_id == PCI_VENDOR_ID_INTEL) { switch (hw->device_id) { case I40E_DEV_ID_SFP_XL710: case I40E_DEV_ID_SFP_X710: case I40E_DEV_ID_QEMU: case I40E_DEV_ID_KX_A: case I40E_DEV_ID_KX_B: case I40E_DEV_ID_KX_C: case I40E_DEV_ID_KX_D: case I40E_DEV_ID_QSFP_A: case I40E_DEV_ID_QSFP_B: case I40E_DEV_ID_QSFP_C: hw->mac.type = I40E_MAC_XL710; break; case I40E_DEV_ID_VF: case I40E_DEV_ID_VF_HV: hw->mac.type = I40E_MAC_VF; break; default: hw->mac.type = I40E_MAC_GENERIC; break; } } else { status = I40E_ERR_DEVICE_NOT_SUPPORTED; } hw_dbg(hw, "i40e_set_mac_type found mac: %d, returns: %d\n", hw->mac.type, status); return status; } /** * i40e_debug_aq * @hw: debug mask related to admin queue * @mask: debug mask * @desc: pointer to admin queue descriptor * @buffer: pointer to command buffer * * Dumps debug log about adminq command with descriptor contents. **/ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc, void *buffer) { struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc; u8 *aq_buffer = (u8 *)buffer; u32 data[4]; u32 i = 0; if ((!(mask & hw->debug_mask)) || (desc == NULL)) return; i40e_debug(hw, mask, "AQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n", aq_desc->opcode, aq_desc->flags, aq_desc->datalen, aq_desc->retval); i40e_debug(hw, mask, "\tcookie (h,l) 0x%08X 0x%08X\n", aq_desc->cookie_high, aq_desc->cookie_low); i40e_debug(hw, mask, "\tparam (0,1) 0x%08X 0x%08X\n", aq_desc->params.internal.param0, aq_desc->params.internal.param1); i40e_debug(hw, mask, "\taddr (h,l) 0x%08X 0x%08X\n", aq_desc->params.external.addr_high, aq_desc->params.external.addr_low); if ((buffer != NULL) && (aq_desc->datalen != 0)) { memset(data, 0, sizeof(data)); i40e_debug(hw, mask, "AQ CMD Buffer:\n"); for (i = 0; i < le16_to_cpu(aq_desc->datalen); i++) { data[((i % 16) / 4)] |= ((u32)aq_buffer[i]) << (8 * (i % 4)); if ((i % 16) == 15) { i40e_debug(hw, mask, "\t0x%04X %08X %08X %08X %08X\n", i - 15, data[0], data[1], data[2], data[3]); memset(data, 0, sizeof(data)); } } if ((i % 16) != 0) i40e_debug(hw, mask, "\t0x%04X %08X %08X %08X %08X\n", i - (i % 16), data[0], data[1], data[2], data[3]); } } /** * i40e_check_asq_alive * @hw: pointer to the hw struct * * Returns true if Queue is enabled else false. **/ bool i40e_check_asq_alive(struct i40e_hw *hw) { return !!(rd32(hw, hw->aq.asq.len) & I40E_PF_ATQLEN_ATQENABLE_MASK); } /** * i40e_aq_queue_shutdown * @hw: pointer to the hw struct * @unloading: is the driver unloading itself * * Tell the Firmware that we're shutting down the AdminQ and whether * or not the driver is unloading as well. **/ i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw, bool unloading) { struct i40e_aq_desc desc; struct i40e_aqc_queue_shutdown *cmd = (struct i40e_aqc_queue_shutdown *)&desc.params.raw; i40e_status status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_queue_shutdown); if (unloading) cmd->driver_unloading = cpu_to_le32(I40E_AQ_DRIVER_UNLOADING); status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL); return status; } /* The i40e_ptype_lookup table is used to convert from the 8-bit ptype in the * hardware to a bit-field that can be used by SW to more easily determine the * packet type. * * Macros are used to shorten the table lines and make this table human * readable. * * We store the PTYPE in the top byte of the bit field - this is just so that * we can check that the table doesn't have a row missing, as the index into * the table should be the PTYPE. * * Typical work flow: * * IF NOT i40e_ptype_lookup[ptype].known * THEN * Packet is unknown * ELSE IF i40e_ptype_lookup[ptype].outer_ip == I40E_RX_PTYPE_OUTER_IP * Use the rest of the fields to look at the tunnels, inner protocols, etc * ELSE * Use the enum i40e_rx_l2_ptype to decode the packet type * ENDIF */ /* macro to make the table lines short */ #define I40E_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\ { PTYPE, \ 1, \ I40E_RX_PTYPE_OUTER_##OUTER_IP, \ I40E_RX_PTYPE_OUTER_##OUTER_IP_VER, \ I40E_RX_PTYPE_##OUTER_FRAG, \ I40E_RX_PTYPE_TUNNEL_##T, \ I40E_RX_PTYPE_TUNNEL_END_##TE, \ I40E_RX_PTYPE_##TEF, \ I40E_RX_PTYPE_INNER_PROT_##I, \ I40E_RX_PTYPE_PAYLOAD_LAYER_##PL } #define I40E_PTT_UNUSED_ENTRY(PTYPE) \ { PTYPE, 0, 0, 0, 0, 0, 0, 0, 0, 0 } /* shorter macros makes the table fit but are terse */ #define I40E_RX_PTYPE_NOF I40E_RX_PTYPE_NOT_FRAG #define I40E_RX_PTYPE_FRG I40E_RX_PTYPE_FRAG #define I40E_RX_PTYPE_INNER_PROT_TS I40E_RX_PTYPE_INNER_PROT_TIMESYNC /* Lookup table mapping the HW PTYPE to the bit field for decoding */ struct i40e_rx_ptype_decoded i40e_ptype_lookup[] = { /* L2 Packet types */ I40E_PTT_UNUSED_ENTRY(0), I40E_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), I40E_PTT(2, L2, NONE, NOF, NONE, NONE, NOF, TS, PAY2), I40E_PTT(3, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), I40E_PTT_UNUSED_ENTRY(4), I40E_PTT_UNUSED_ENTRY(5), I40E_PTT(6, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), I40E_PTT(7, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), I40E_PTT_UNUSED_ENTRY(8), I40E_PTT_UNUSED_ENTRY(9), I40E_PTT(10, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), I40E_PTT(11, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), I40E_PTT(12, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), I40E_PTT(13, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), I40E_PTT(14, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), I40E_PTT(15, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), I40E_PTT(16, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), I40E_PTT(17, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), I40E_PTT(18, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), I40E_PTT(19, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), I40E_PTT(20, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), I40E_PTT(21, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), /* Non Tunneled IPv4 */ I40E_PTT(22, IP, IPV4, FRG, NONE, NONE, NOF, NONE, PAY3), I40E_PTT(23, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3), I40E_PTT(24, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4), I40E_PTT_UNUSED_ENTRY(25), I40E_PTT(26, IP, IPV4, NOF, NONE, NONE, NOF, TCP, PAY4), I40E_PTT(27, IP, IPV4, NOF, NONE, NONE, NOF, SCTP, PAY4), I40E_PTT(28, IP, IPV4, NOF, NONE, NONE, NOF, ICMP, PAY4), /* IPv4 --> IPv4 */ I40E_PTT(29, IP, IPV4, NOF, IP_IP, IPV4, FRG, NONE, PAY3), I40E_PTT(30, IP, IPV4, NOF, IP_IP, IPV4, NOF, NONE, PAY3), I40E_PTT(31, IP, IPV4, NOF, IP_IP, IPV4, NOF, UDP, PAY4), I40E_PTT_UNUSED_ENTRY(32), I40E_PTT(33, IP, IPV4, NOF, IP_IP, IPV4, NOF, TCP, PAY4), I40E_PTT(34, IP, IPV4, NOF, IP_IP, IPV4, NOF, SCTP, PAY4), I40E_PTT(35, IP, IPV4, NOF, IP_IP, IPV4, NOF, ICMP, PAY4), /* IPv4 --> IPv6 */ I40E_PTT(36, IP, IPV4, NOF, IP_IP, IPV6, FRG, NONE, PAY3), I40E_PTT(37, IP, IPV4, NOF, IP_IP, IPV6, NOF, NONE, PAY3), I40E_PTT(38, IP, IPV4, NOF, IP_IP, IPV6, NOF, UDP, PAY4), I40E_PTT_UNUSED_ENTRY(39), I40E_PTT(40, IP, IPV4, NOF, IP_IP, IPV6, NOF, TCP, PAY4), I40E_PTT(41, IP, IPV4, NOF, IP_IP, IPV6, NOF, SCTP, PAY4), I40E_PTT(42, IP, IPV4, NOF, IP_IP, IPV6, NOF, ICMP, PAY4), /* IPv4 --> GRE/NAT */ I40E_PTT(43, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3), /* IPv4 --> GRE/NAT --> IPv4 */ I40E_PTT(44, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3), I40E_PTT(45, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3), I40E_PTT(46, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4), I40E_PTT_UNUSED_ENTRY(47), I40E_PTT(48, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4), I40E_PTT(49, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4), I40E_PTT(50, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4), /* IPv4 --> GRE/NAT --> IPv6 */ I40E_PTT(51, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3), I40E_PTT(52, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3), I40E_PTT(53, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4), I40E_PTT_UNUSED_ENTRY(54), I40E_PTT(55, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4), I40E_PTT(56, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4), I40E_PTT(57, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4), /* IPv4 --> GRE/NAT --> MAC */ I40E_PTT(58, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3), /* IPv4 --> GRE/NAT --> MAC --> IPv4 */ I40E_PTT(59, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3), I40E_PTT(60, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3), I40E_PTT(61, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4), I40E_PTT_UNUSED_ENTRY(62), I40E_PTT(63, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4), I40E_PTT(64, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4), I40E_PTT(65, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4), /* IPv4 --> GRE/NAT -> MAC --> IPv6 */ I40E_PTT(66, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3), I40E_PTT(67, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3), I40E_PTT(68, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4), I40E_PTT_UNUSED_ENTRY(69), I40E_PTT(70, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4), I40E_PTT(71, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4), I40E_PTT(72, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4), /* IPv4 --> GRE/NAT --> MAC/VLAN */ I40E_PTT(73, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3), /* IPv4 ---> GRE/NAT -> MAC/VLAN --> IPv4 */ I40E_PTT(74, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3), I40E_PTT(75, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3), I40E_PTT(76, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4), I40E_PTT_UNUSED_ENTRY(77), I40E_PTT(78, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4), I40E_PTT(79, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4), I40E_PTT(80, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4), /* IPv4 -> GRE/NAT -> MAC/VLAN --> IPv6 */ I40E_PTT(81, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3), I40E_PTT(82, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3), I40E_PTT(83, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4), I40E_PTT_UNUSED_ENTRY(84), I40E_PTT(85, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4), I40E_PTT(86, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4), I40E_PTT(87, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4), /* Non Tunneled IPv6 */ I40E_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3), I40E_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3), I40E_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY3), I40E_PTT_UNUSED_ENTRY(91), I40E_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4), I40E_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4), I40E_PTT(94, IP, IPV6, NOF, NONE, NONE, NOF, ICMP, PAY4), /* IPv6 --> IPv4 */ I40E_PTT(95, IP, IPV6, NOF, IP_IP, IPV4, FRG, NONE, PAY3), I40E_PTT(96, IP, IPV6, NOF, IP_IP, IPV4, NOF, NONE, PAY3), I40E_PTT(97, IP, IPV6, NOF, IP_IP, IPV4, NOF, UDP, PAY4), I40E_PTT_UNUSED_ENTRY(98), I40E_PTT(99, IP, IPV6, NOF, IP_IP, IPV4, NOF, TCP, PAY4), I40E_PTT(100, IP, IPV6, NOF, IP_IP, IPV4, NOF, SCTP, PAY4), I40E_PTT(101, IP, IPV6, NOF, IP_IP, IPV4, NOF, ICMP, PAY4), /* IPv6 --> IPv6 */ I40E_PTT(102, IP, IPV6, NOF, IP_IP, IPV6, FRG, NONE, PAY3), I40E_PTT(103, IP, IPV6, NOF, IP_IP, IPV6, NOF, NONE, PAY3), I40E_PTT(104, IP, IPV6, NOF, IP_IP, IPV6, NOF, UDP, PAY4), I40E_PTT_UNUSED_ENTRY(105), I40E_PTT(106, IP, IPV6, NOF, IP_IP, IPV6, NOF, TCP, PAY4), I40E_PTT(107, IP, IPV6, NOF, IP_IP, IPV6, NOF, SCTP, PAY4), I40E_PTT(108, IP, IPV6, NOF, IP_IP, IPV6, NOF, ICMP, PAY4), /* IPv6 --> GRE/NAT */ I40E_PTT(109, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3), /* IPv6 --> GRE/NAT -> IPv4 */ I40E_PTT(110, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3), I40E_PTT(111, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3), I40E_PTT(112, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4), I40E_PTT_UNUSED_ENTRY(113), I40E_PTT(114, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4), I40E_PTT(115, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4), I40E_PTT(116, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4), /* IPv6 --> GRE/NAT -> IPv6 */ I40E_PTT(117, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3), I40E_PTT(118, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3), I40E_PTT(119, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4), I40E_PTT_UNUSED_ENTRY(120), I40E_PTT(121, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4), I40E_PTT(122, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4), I40E_PTT(123, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4), /* IPv6 --> GRE/NAT -> MAC */ I40E_PTT(124, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3), /* IPv6 --> GRE/NAT -> MAC -> IPv4 */ I40E_PTT(125, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3), I40E_PTT(126, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3), I40E_PTT(127, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4), I40E_PTT_UNUSED_ENTRY(128), I40E_PTT(129, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4), I40E_PTT(130, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4), I40E_PTT(131, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4), /* IPv6 --> GRE/NAT -> MAC -> IPv6 */ I40E_PTT(132, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3), I40E_PTT(133, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3), I40E_PTT(134, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4), I40E_PTT_UNUSED_ENTRY(135), I40E_PTT(136, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4), I40E_PTT(137, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4), I40E_PTT(138, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4), /* IPv6 --> GRE/NAT -> MAC/VLAN */ I40E_PTT(139, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3), /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv4 */ I40E_PTT(140, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3), I40E_PTT(141, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3), I40E_PTT(142, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4), I40E_PTT_UNUSED_ENTRY(143), I40E_PTT(144, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4), I40E_PTT(145, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4), I40E_PTT(146, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4), /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv6 */ I40E_PTT(147, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3), I40E_PTT(148, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3), I40E_PTT(149, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4), I40E_PTT_UNUSED_ENTRY(150), I40E_PTT(151, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4), I40E_PTT(152, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4), I40E_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4), /* unused entries */ I40E_PTT_UNUSED_ENTRY(154), I40E_PTT_UNUSED_ENTRY(155), I40E_PTT_UNUSED_ENTRY(156), I40E_PTT_UNUSED_ENTRY(157), I40E_PTT_UNUSED_ENTRY(158), I40E_PTT_UNUSED_ENTRY(159), I40E_PTT_UNUSED_ENTRY(160), I40E_PTT_UNUSED_ENTRY(161), I40E_PTT_UNUSED_ENTRY(162), I40E_PTT_UNUSED_ENTRY(163), I40E_PTT_UNUSED_ENTRY(164), I40E_PTT_UNUSED_ENTRY(165), I40E_PTT_UNUSED_ENTRY(166), I40E_PTT_UNUSED_ENTRY(167), I40E_PTT_UNUSED_ENTRY(168), I40E_PTT_UNUSED_ENTRY(169), I40E_PTT_UNUSED_ENTRY(170), I40E_PTT_UNUSED_ENTRY(171), I40E_PTT_UNUSED_ENTRY(172), I40E_PTT_UNUSED_ENTRY(173), I40E_PTT_UNUSED_ENTRY(174), I40E_PTT_UNUSED_ENTRY(175), I40E_PTT_UNUSED_ENTRY(176), I40E_PTT_UNUSED_ENTRY(177), I40E_PTT_UNUSED_ENTRY(178), I40E_PTT_UNUSED_ENTRY(179), I40E_PTT_UNUSED_ENTRY(180), I40E_PTT_UNUSED_ENTRY(181), I40E_PTT_UNUSED_ENTRY(182), I40E_PTT_UNUSED_ENTRY(183), I40E_PTT_UNUSED_ENTRY(184), I40E_PTT_UNUSED_ENTRY(185), I40E_PTT_UNUSED_ENTRY(186), I40E_PTT_UNUSED_ENTRY(187), I40E_PTT_UNUSED_ENTRY(188), I40E_PTT_UNUSED_ENTRY(189), I40E_PTT_UNUSED_ENTRY(190), I40E_PTT_UNUSED_ENTRY(191), I40E_PTT_UNUSED_ENTRY(192), I40E_PTT_UNUSED_ENTRY(193), I40E_PTT_UNUSED_ENTRY(194), I40E_PTT_UNUSED_ENTRY(195), I40E_PTT_UNUSED_ENTRY(196), I40E_PTT_UNUSED_ENTRY(197), I40E_PTT_UNUSED_ENTRY(198), I40E_PTT_UNUSED_ENTRY(199), I40E_PTT_UNUSED_ENTRY(200), I40E_PTT_UNUSED_ENTRY(201), I40E_PTT_UNUSED_ENTRY(202), I40E_PTT_UNUSED_ENTRY(203), I40E_PTT_UNUSED_ENTRY(204), I40E_PTT_UNUSED_ENTRY(205), I40E_PTT_UNUSED_ENTRY(206), I40E_PTT_UNUSED_ENTRY(207), I40E_PTT_UNUSED_ENTRY(208), I40E_PTT_UNUSED_ENTRY(209), I40E_PTT_UNUSED_ENTRY(210), I40E_PTT_UNUSED_ENTRY(211), I40E_PTT_UNUSED_ENTRY(212), I40E_PTT_UNUSED_ENTRY(213), I40E_PTT_UNUSED_ENTRY(214), I40E_PTT_UNUSED_ENTRY(215), I40E_PTT_UNUSED_ENTRY(216), I40E_PTT_UNUSED_ENTRY(217), I40E_PTT_UNUSED_ENTRY(218), I40E_PTT_UNUSED_ENTRY(219), I40E_PTT_UNUSED_ENTRY(220), I40E_PTT_UNUSED_ENTRY(221), I40E_PTT_UNUSED_ENTRY(222), I40E_PTT_UNUSED_ENTRY(223), I40E_PTT_UNUSED_ENTRY(224), I40E_PTT_UNUSED_ENTRY(225), I40E_PTT_UNUSED_ENTRY(226), I40E_PTT_UNUSED_ENTRY(227), I40E_PTT_UNUSED_ENTRY(228), I40E_PTT_UNUSED_ENTRY(229), I40E_PTT_UNUSED_ENTRY(230), I40E_PTT_UNUSED_ENTRY(231), I40E_PTT_UNUSED_ENTRY(232), I40E_PTT_UNUSED_ENTRY(233), I40E_PTT_UNUSED_ENTRY(234), I40E_PTT_UNUSED_ENTRY(235), I40E_PTT_UNUSED_ENTRY(236), I40E_PTT_UNUSED_ENTRY(237), I40E_PTT_UNUSED_ENTRY(238), I40E_PTT_UNUSED_ENTRY(239), I40E_PTT_UNUSED_ENTRY(240), I40E_PTT_UNUSED_ENTRY(241), I40E_PTT_UNUSED_ENTRY(242), I40E_PTT_UNUSED_ENTRY(243), I40E_PTT_UNUSED_ENTRY(244), I40E_PTT_UNUSED_ENTRY(245), I40E_PTT_UNUSED_ENTRY(246), I40E_PTT_UNUSED_ENTRY(247), I40E_PTT_UNUSED_ENTRY(248), I40E_PTT_UNUSED_ENTRY(249), I40E_PTT_UNUSED_ENTRY(250), I40E_PTT_UNUSED_ENTRY(251), I40E_PTT_UNUSED_ENTRY(252), I40E_PTT_UNUSED_ENTRY(253), I40E_PTT_UNUSED_ENTRY(254), I40E_PTT_UNUSED_ENTRY(255) }; /** * i40e_init_shared_code - Initialize the shared code * @hw: pointer to hardware structure * * This assigns the MAC type and PHY code and inits the NVM. * Does not touch the hardware. This function must be called prior to any * other function in the shared code. The i40e_hw structure should be * memset to 0 prior to calling this function. The following fields in * hw structure should be filled in prior to calling this function: * hw_addr, back, device_id, vendor_id, subsystem_device_id, * subsystem_vendor_id, and revision_id **/ i40e_status i40e_init_shared_code(struct i40e_hw *hw) { i40e_status status = 0; u32 reg; i40e_set_mac_type(hw); switch (hw->mac.type) { case I40E_MAC_XL710: break; default: return I40E_ERR_DEVICE_NOT_SUPPORTED; break; } hw->phy.get_link_info = true; /* Determine port number */ reg = rd32(hw, I40E_PFGEN_PORTNUM); reg = ((reg & I40E_PFGEN_PORTNUM_PORT_NUM_MASK) >> I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT); hw->port = (u8)reg; /* Determine the PF number based on the PCI fn */ reg = rd32(hw, I40E_GLPCI_CAPSUP); if (reg & I40E_GLPCI_CAPSUP_ARI_EN_MASK) hw->pf_id = (u8)((hw->bus.device << 3) | hw->bus.func); else hw->pf_id = (u8)hw->bus.func; status = i40e_init_nvm(hw); return status; } /** * i40e_aq_mac_address_read - Retrieve the MAC addresses * @hw: pointer to the hw struct * @flags: a return indicator of what addresses were added to the addr store * @addrs: the requestor's mac addr store * @cmd_details: pointer to command details structure or NULL **/ static i40e_status i40e_aq_mac_address_read(struct i40e_hw *hw, u16 *flags, struct i40e_aqc_mac_address_read_data *addrs, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_mac_address_read *cmd_data = (struct i40e_aqc_mac_address_read *)&desc.params.raw; i40e_status status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_mac_address_read); desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF); status = i40e_asq_send_command(hw, &desc, addrs, sizeof(*addrs), cmd_details); *flags = le16_to_cpu(cmd_data->command_flags); return status; } /** * i40e_aq_mac_address_write - Change the MAC addresses * @hw: pointer to the hw struct * @flags: indicates which MAC to be written * @mac_addr: address to write * @cmd_details: pointer to command details structure or NULL **/ i40e_status i40e_aq_mac_address_write(struct i40e_hw *hw, u16 flags, u8 *mac_addr, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_mac_address_write *cmd_data = (struct i40e_aqc_mac_address_write *)&desc.params.raw; i40e_status status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_mac_address_write); cmd_data->command_flags = cpu_to_le16(flags); cmd_data->mac_sah = cpu_to_le16((u16)mac_addr[0] << 8 | mac_addr[1]); cmd_data->mac_sal = cpu_to_le32(((u32)mac_addr[2] << 24) | ((u32)mac_addr[3] << 16) | ((u32)mac_addr[4] << 8) | mac_addr[5]); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_get_mac_addr - get MAC address * @hw: pointer to the HW structure * @mac_addr: pointer to MAC address * * Reads the adapter's MAC address from register **/ i40e_status i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr) { struct i40e_aqc_mac_address_read_data addrs; i40e_status status; u16 flags = 0; status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL); if (flags & I40E_AQC_LAN_ADDR_VALID) memcpy(mac_addr, &addrs.pf_lan_mac, sizeof(addrs.pf_lan_mac)); return status; } /** * i40e_get_media_type - Gets media type * @hw: pointer to the hardware structure **/ static enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw) { enum i40e_media_type media; switch (hw->phy.link_info.phy_type) { case I40E_PHY_TYPE_10GBASE_SR: case I40E_PHY_TYPE_10GBASE_LR: case I40E_PHY_TYPE_40GBASE_SR4: case I40E_PHY_TYPE_40GBASE_LR4: media = I40E_MEDIA_TYPE_FIBER; break; case I40E_PHY_TYPE_100BASE_TX: case I40E_PHY_TYPE_1000BASE_T: case I40E_PHY_TYPE_10GBASE_T: media = I40E_MEDIA_TYPE_BASET; break; case I40E_PHY_TYPE_10GBASE_CR1_CU: case I40E_PHY_TYPE_40GBASE_CR4_CU: case I40E_PHY_TYPE_10GBASE_CR1: case I40E_PHY_TYPE_40GBASE_CR4: case I40E_PHY_TYPE_10GBASE_SFPP_CU: media = I40E_MEDIA_TYPE_DA; break; case I40E_PHY_TYPE_1000BASE_KX: case I40E_PHY_TYPE_10GBASE_KX4: case I40E_PHY_TYPE_10GBASE_KR: case I40E_PHY_TYPE_40GBASE_KR4: media = I40E_MEDIA_TYPE_BACKPLANE; break; case I40E_PHY_TYPE_SGMII: case I40E_PHY_TYPE_XAUI: case I40E_PHY_TYPE_XFI: case I40E_PHY_TYPE_XLAUI: case I40E_PHY_TYPE_XLPPI: default: media = I40E_MEDIA_TYPE_UNKNOWN; break; } return media; } #define I40E_PF_RESET_WAIT_COUNT_A0 200 #define I40E_PF_RESET_WAIT_COUNT 10 /** * i40e_pf_reset - Reset the PF * @hw: pointer to the hardware structure * * Assuming someone else has triggered a global reset, * assure the global reset is complete and then reset the PF **/ i40e_status i40e_pf_reset(struct i40e_hw *hw) { u32 cnt = 0; u32 cnt1 = 0; u32 reg = 0; u32 grst_del; /* Poll for Global Reset steady state in case of recent GRST. * The grst delay value is in 100ms units, and we'll wait a * couple counts longer to be sure we don't just miss the end. */ grst_del = rd32(hw, I40E_GLGEN_RSTCTL) & I40E_GLGEN_RSTCTL_GRSTDEL_MASK >> I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT; for (cnt = 0; cnt < grst_del + 2; cnt++) { reg = rd32(hw, I40E_GLGEN_RSTAT); if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK)) break; msleep(100); } if (reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK) { hw_dbg(hw, "Global reset polling failed to complete.\n"); return I40E_ERR_RESET_FAILED; } /* Now Wait for the FW to be ready */ for (cnt1 = 0; cnt1 < I40E_PF_RESET_WAIT_COUNT; cnt1++) { reg = rd32(hw, I40E_GLNVM_ULD); reg &= (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK | I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK); if (reg == (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK | I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK)) { hw_dbg(hw, "Core and Global modules ready %d\n", cnt1); break; } usleep_range(10000, 20000); } if (!(reg & (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK | I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK))) { hw_dbg(hw, "wait for FW Reset complete timedout\n"); hw_dbg(hw, "I40E_GLNVM_ULD = 0x%x\n", reg); return I40E_ERR_RESET_FAILED; } /* If there was a Global Reset in progress when we got here, * we don't need to do the PF Reset */ if (!cnt) { if (hw->revision_id == 0) cnt = I40E_PF_RESET_WAIT_COUNT_A0; else cnt = I40E_PF_RESET_WAIT_COUNT; reg = rd32(hw, I40E_PFGEN_CTRL); wr32(hw, I40E_PFGEN_CTRL, (reg | I40E_PFGEN_CTRL_PFSWR_MASK)); for (; cnt; cnt--) { reg = rd32(hw, I40E_PFGEN_CTRL); if (!(reg & I40E_PFGEN_CTRL_PFSWR_MASK)) break; usleep_range(1000, 2000); } if (reg & I40E_PFGEN_CTRL_PFSWR_MASK) { hw_dbg(hw, "PF reset polling failed to complete.\n"); return I40E_ERR_RESET_FAILED; } } i40e_clear_pxe_mode(hw); return 0; } /** * i40e_clear_pxe_mode - clear pxe operations mode * @hw: pointer to the hw struct * * Make sure all PXE mode settings are cleared, including things * like descriptor fetch/write-back mode. **/ void i40e_clear_pxe_mode(struct i40e_hw *hw) { u32 reg; /* Clear single descriptor fetch/write-back mode */ reg = rd32(hw, I40E_GLLAN_RCTL_0); if (hw->revision_id == 0) { /* As a work around clear PXE_MODE instead of setting it */ wr32(hw, I40E_GLLAN_RCTL_0, (reg & (~I40E_GLLAN_RCTL_0_PXE_MODE_MASK))); } else { wr32(hw, I40E_GLLAN_RCTL_0, (reg | I40E_GLLAN_RCTL_0_PXE_MODE_MASK)); } } /** * i40e_led_is_mine - helper to find matching led * @hw: pointer to the hw struct * @idx: index into GPIO registers * * returns: 0 if no match, otherwise the value of the GPIO_CTL register */ static u32 i40e_led_is_mine(struct i40e_hw *hw, int idx) { u32 gpio_val = 0; u32 port; if (!hw->func_caps.led[idx]) return 0; gpio_val = rd32(hw, I40E_GLGEN_GPIO_CTL(idx)); port = (gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK) >> I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT; /* if PRT_NUM_NA is 1 then this LED is not port specific, OR * if it is not our port then ignore */ if ((gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_MASK) || (port != hw->port)) return 0; return gpio_val; } #define I40E_LED0 22 #define I40E_LINK_ACTIVITY 0xC /** * i40e_led_get - return current on/off mode * @hw: pointer to the hw struct * * The value returned is the 'mode' field as defined in the * GPIO register definitions: 0x0 = off, 0xf = on, and other * values are variations of possible behaviors relating to * blink, link, and wire. **/ u32 i40e_led_get(struct i40e_hw *hw) { u32 mode = 0; int i; /* as per the documentation GPIO 22-29 are the LED * GPIO pins named LED0..LED7 */ for (i = I40E_LED0; i <= I40E_GLGEN_GPIO_CTL_MAX_INDEX; i++) { u32 gpio_val = i40e_led_is_mine(hw, i); if (!gpio_val) continue; mode = (gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK) >> I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT; break; } return mode; } /** * i40e_led_set - set new on/off mode * @hw: pointer to the hw struct * @mode: 0=off, 0xf=on (else see manual for mode details) * @blink: true if the LED should blink when on, false if steady * * if this function is used to turn on the blink it should * be used to disable the blink when restoring the original state. **/ void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink) { int i; if (mode & 0xfffffff0) hw_dbg(hw, "invalid mode passed in %X\n", mode); /* as per the documentation GPIO 22-29 are the LED * GPIO pins named LED0..LED7 */ for (i = I40E_LED0; i <= I40E_GLGEN_GPIO_CTL_MAX_INDEX; i++) { u32 gpio_val = i40e_led_is_mine(hw, i); if (!gpio_val) continue; gpio_val &= ~I40E_GLGEN_GPIO_CTL_LED_MODE_MASK; /* this & is a bit of paranoia, but serves as a range check */ gpio_val |= ((mode << I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK); if (mode == I40E_LINK_ACTIVITY) blink = false; gpio_val |= (blink ? 1 : 0) << I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT; wr32(hw, I40E_GLGEN_GPIO_CTL(i), gpio_val); break; } } /* Admin command wrappers */ /** * i40e_aq_set_link_restart_an * @hw: pointer to the hw struct * @cmd_details: pointer to command details structure or NULL * * Sets up the link and restarts the Auto-Negotiation over the link. **/ i40e_status i40e_aq_set_link_restart_an(struct i40e_hw *hw, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_set_link_restart_an *cmd = (struct i40e_aqc_set_link_restart_an *)&desc.params.raw; i40e_status status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_link_restart_an); cmd->command = I40E_AQ_PHY_RESTART_AN; status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_aq_get_link_info * @hw: pointer to the hw struct * @enable_lse: enable/disable LinkStatusEvent reporting * @link: pointer to link status structure - optional * @cmd_details: pointer to command details structure or NULL * * Returns the link status of the adapter. **/ i40e_status i40e_aq_get_link_info(struct i40e_hw *hw, bool enable_lse, struct i40e_link_status *link, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_get_link_status *resp = (struct i40e_aqc_get_link_status *)&desc.params.raw; struct i40e_link_status *hw_link_info = &hw->phy.link_info; i40e_status status; u16 command_flags; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_link_status); if (enable_lse) command_flags = I40E_AQ_LSE_ENABLE; else command_flags = I40E_AQ_LSE_DISABLE; resp->command_flags = cpu_to_le16(command_flags); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); if (status) goto aq_get_link_info_exit; /* save off old link status information */ hw->phy.link_info_old = *hw_link_info; /* update link status */ hw_link_info->phy_type = (enum i40e_aq_phy_type)resp->phy_type; hw->phy.media_type = i40e_get_media_type(hw); hw_link_info->link_speed = (enum i40e_aq_link_speed)resp->link_speed; hw_link_info->link_info = resp->link_info; hw_link_info->an_info = resp->an_info; hw_link_info->ext_info = resp->ext_info; hw_link_info->loopback = resp->loopback; if (resp->command_flags & cpu_to_le16(I40E_AQ_LSE_ENABLE)) hw_link_info->lse_enable = true; else hw_link_info->lse_enable = false; /* save link status information */ if (link) *link = *hw_link_info; /* flag cleared so helper functions don't call AQ again */ hw->phy.get_link_info = false; aq_get_link_info_exit: return status; } /** * i40e_aq_add_vsi * @hw: pointer to the hw struct * @vsi_ctx: pointer to a vsi context struct * @cmd_details: pointer to command details structure or NULL * * Add a VSI context to the hardware. **/ i40e_status i40e_aq_add_vsi(struct i40e_hw *hw, struct i40e_vsi_context *vsi_ctx, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_add_get_update_vsi *cmd = (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw; struct i40e_aqc_add_get_update_vsi_completion *resp = (struct i40e_aqc_add_get_update_vsi_completion *) &desc.params.raw; i40e_status status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_vsi); cmd->uplink_seid = cpu_to_le16(vsi_ctx->uplink_seid); cmd->connection_type = vsi_ctx->connection_type; cmd->vf_id = vsi_ctx->vf_num; cmd->vsi_flags = cpu_to_le16(vsi_ctx->flags); desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); if (sizeof(vsi_ctx->info) > I40E_AQ_LARGE_BUF) desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info, sizeof(vsi_ctx->info), cmd_details); if (status) goto aq_add_vsi_exit; vsi_ctx->seid = le16_to_cpu(resp->seid); vsi_ctx->vsi_number = le16_to_cpu(resp->vsi_number); vsi_ctx->vsis_allocated = le16_to_cpu(resp->vsi_used); vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free); aq_add_vsi_exit: return status; } /** * i40e_aq_set_vsi_unicast_promiscuous * @hw: pointer to the hw struct * @seid: vsi number * @set: set unicast promiscuous enable/disable * @cmd_details: pointer to command details structure or NULL **/ i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw, u16 seid, bool set, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_set_vsi_promiscuous_modes *cmd = (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; i40e_status status; u16 flags = 0; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_vsi_promiscuous_modes); if (set) flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST; cmd->promiscuous_flags = cpu_to_le16(flags); cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST); cmd->seid = cpu_to_le16(seid); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_aq_set_vsi_multicast_promiscuous * @hw: pointer to the hw struct * @seid: vsi number * @set: set multicast promiscuous enable/disable * @cmd_details: pointer to command details structure or NULL **/ i40e_status i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw, u16 seid, bool set, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_set_vsi_promiscuous_modes *cmd = (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; i40e_status status; u16 flags = 0; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_vsi_promiscuous_modes); if (set) flags |= I40E_AQC_SET_VSI_PROMISC_MULTICAST; cmd->promiscuous_flags = cpu_to_le16(flags); cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_MULTICAST); cmd->seid = cpu_to_le16(seid); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_aq_set_vsi_broadcast * @hw: pointer to the hw struct * @seid: vsi number * @set_filter: true to set filter, false to clear filter * @cmd_details: pointer to command details structure or NULL * * Set or clear the broadcast promiscuous flag (filter) for a given VSI. **/ i40e_status i40e_aq_set_vsi_broadcast(struct i40e_hw *hw, u16 seid, bool set_filter, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_set_vsi_promiscuous_modes *cmd = (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; i40e_status status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_vsi_promiscuous_modes); if (set_filter) cmd->promiscuous_flags |= cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_BROADCAST); else cmd->promiscuous_flags &= cpu_to_le16(~I40E_AQC_SET_VSI_PROMISC_BROADCAST); cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_BROADCAST); cmd->seid = cpu_to_le16(seid); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_get_vsi_params - get VSI configuration info * @hw: pointer to the hw struct * @vsi_ctx: pointer to a vsi context struct * @cmd_details: pointer to command details structure or NULL **/ i40e_status i40e_aq_get_vsi_params(struct i40e_hw *hw, struct i40e_vsi_context *vsi_ctx, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_add_get_update_vsi *cmd = (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw; struct i40e_aqc_add_get_update_vsi_completion *resp = (struct i40e_aqc_add_get_update_vsi_completion *) &desc.params.raw; i40e_status status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_vsi_parameters); cmd->uplink_seid = cpu_to_le16(vsi_ctx->seid); desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); if (sizeof(vsi_ctx->info) > I40E_AQ_LARGE_BUF) desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info, sizeof(vsi_ctx->info), NULL); if (status) goto aq_get_vsi_params_exit; vsi_ctx->seid = le16_to_cpu(resp->seid); vsi_ctx->vsi_number = le16_to_cpu(resp->vsi_number); vsi_ctx->vsis_allocated = le16_to_cpu(resp->vsi_used); vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free); aq_get_vsi_params_exit: return status; } /** * i40e_aq_update_vsi_params * @hw: pointer to the hw struct * @vsi_ctx: pointer to a vsi context struct * @cmd_details: pointer to command details structure or NULL * * Update a VSI context. **/ i40e_status i40e_aq_update_vsi_params(struct i40e_hw *hw, struct i40e_vsi_context *vsi_ctx, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_add_get_update_vsi *cmd = (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw; i40e_status status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_update_vsi_parameters); cmd->uplink_seid = cpu_to_le16(vsi_ctx->seid); desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); if (sizeof(vsi_ctx->info) > I40E_AQ_LARGE_BUF) desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info, sizeof(vsi_ctx->info), cmd_details); return status; } /** * i40e_aq_get_switch_config * @hw: pointer to the hardware structure * @buf: pointer to the result buffer * @buf_size: length of input buffer * @start_seid: seid to start for the report, 0 == beginning * @cmd_details: pointer to command details structure or NULL * * Fill the buf with switch configuration returned from AdminQ command **/ i40e_status i40e_aq_get_switch_config(struct i40e_hw *hw, struct i40e_aqc_get_switch_config_resp *buf, u16 buf_size, u16 *start_seid, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_switch_seid *scfg = (struct i40e_aqc_switch_seid *)&desc.params.raw; i40e_status status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_switch_config); desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); if (buf_size > I40E_AQ_LARGE_BUF) desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); scfg->seid = cpu_to_le16(*start_seid); status = i40e_asq_send_command(hw, &desc, buf, buf_size, cmd_details); *start_seid = le16_to_cpu(scfg->seid); return status; } /** * i40e_aq_get_firmware_version * @hw: pointer to the hw struct * @fw_major_version: firmware major version * @fw_minor_version: firmware minor version * @api_major_version: major queue version * @api_minor_version: minor queue version * @cmd_details: pointer to command details structure or NULL * * Get the firmware version from the admin queue commands **/ i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw, u16 *fw_major_version, u16 *fw_minor_version, u16 *api_major_version, u16 *api_minor_version, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_get_version *resp = (struct i40e_aqc_get_version *)&desc.params.raw; i40e_status status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_version); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); if (!status) { if (fw_major_version != NULL) *fw_major_version = le16_to_cpu(resp->fw_major); if (fw_minor_version != NULL) *fw_minor_version = le16_to_cpu(resp->fw_minor); if (api_major_version != NULL) *api_major_version = le16_to_cpu(resp->api_major); if (api_minor_version != NULL) *api_minor_version = le16_to_cpu(resp->api_minor); } return status; } /** * i40e_aq_send_driver_version * @hw: pointer to the hw struct * @dv: driver's major, minor version * @cmd_details: pointer to command details structure or NULL * * Send the driver version to the firmware **/ i40e_status i40e_aq_send_driver_version(struct i40e_hw *hw, struct i40e_driver_version *dv, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_driver_version *cmd = (struct i40e_aqc_driver_version *)&desc.params.raw; i40e_status status; if (dv == NULL) return I40E_ERR_PARAM; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_driver_version); desc.flags |= cpu_to_le16(I40E_AQ_FLAG_SI); cmd->driver_major_ver = dv->major_version; cmd->driver_minor_ver = dv->minor_version; cmd->driver_build_ver = dv->build_version; cmd->driver_subbuild_ver = dv->subbuild_version; status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_get_link_status - get status of the HW network link * @hw: pointer to the hw struct * * Returns true if link is up, false if link is down. * * Side effect: LinkStatusEvent reporting becomes enabled **/ bool i40e_get_link_status(struct i40e_hw *hw) { i40e_status status = 0; bool link_status = false; if (hw->phy.get_link_info) { status = i40e_aq_get_link_info(hw, true, NULL, NULL); if (status) goto i40e_get_link_status_exit; } link_status = hw->phy.link_info.link_info & I40E_AQ_LINK_UP; i40e_get_link_status_exit: return link_status; } /** * i40e_aq_add_veb - Insert a VEB between the VSI and the MAC * @hw: pointer to the hw struct * @uplink_seid: the MAC or other gizmo SEID * @downlink_seid: the VSI SEID * @enabled_tc: bitmap of TCs to be enabled * @default_port: true for default port VSI, false for control port * @enable_l2_filtering: true to add L2 filter table rules to regular forwarding rules for cloud support * @veb_seid: pointer to where to put the resulting VEB SEID * @cmd_details: pointer to command details structure or NULL * * This asks the FW to add a VEB between the uplink and downlink * elements. If the uplink SEID is 0, this will be a floating VEB. **/ i40e_status i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid, u16 downlink_seid, u8 enabled_tc, bool default_port, bool enable_l2_filtering, u16 *veb_seid, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_add_veb *cmd = (struct i40e_aqc_add_veb *)&desc.params.raw; struct i40e_aqc_add_veb_completion *resp = (struct i40e_aqc_add_veb_completion *)&desc.params.raw; i40e_status status; u16 veb_flags = 0; /* SEIDs need to either both be set or both be 0 for floating VEB */ if (!!uplink_seid != !!downlink_seid) return I40E_ERR_PARAM; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_veb); cmd->uplink_seid = cpu_to_le16(uplink_seid); cmd->downlink_seid = cpu_to_le16(downlink_seid); cmd->enable_tcs = enabled_tc; if (!uplink_seid) veb_flags |= I40E_AQC_ADD_VEB_FLOATING; if (default_port) veb_flags |= I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT; else veb_flags |= I40E_AQC_ADD_VEB_PORT_TYPE_DATA; if (enable_l2_filtering) veb_flags |= I40E_AQC_ADD_VEB_ENABLE_L2_FILTER; cmd->veb_flags = cpu_to_le16(veb_flags); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); if (!status && veb_seid) *veb_seid = le16_to_cpu(resp->veb_seid); return status; } /** * i40e_aq_get_veb_parameters - Retrieve VEB parameters * @hw: pointer to the hw struct * @veb_seid: the SEID of the VEB to query * @switch_id: the uplink switch id * @floating: set to true if the VEB is floating * @statistic_index: index of the stats counter block for this VEB * @vebs_used: number of VEB's used by function * @vebs_free: total VEB's not reserved by any function * @cmd_details: pointer to command details structure or NULL * * This retrieves the parameters for a particular VEB, specified by * uplink_seid, and returns them to the caller. **/ i40e_status i40e_aq_get_veb_parameters(struct i40e_hw *hw, u16 veb_seid, u16 *switch_id, bool *floating, u16 *statistic_index, u16 *vebs_used, u16 *vebs_free, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_get_veb_parameters_completion *cmd_resp = (struct i40e_aqc_get_veb_parameters_completion *) &desc.params.raw; i40e_status status; if (veb_seid == 0) return I40E_ERR_PARAM; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_veb_parameters); cmd_resp->seid = cpu_to_le16(veb_seid); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); if (status) goto get_veb_exit; if (switch_id) *switch_id = le16_to_cpu(cmd_resp->switch_id); if (statistic_index) *statistic_index = le16_to_cpu(cmd_resp->statistic_index); if (vebs_used) *vebs_used = le16_to_cpu(cmd_resp->vebs_used); if (vebs_free) *vebs_free = le16_to_cpu(cmd_resp->vebs_free); if (floating) { u16 flags = le16_to_cpu(cmd_resp->veb_flags); if (flags & I40E_AQC_ADD_VEB_FLOATING) *floating = true; else *floating = false; } get_veb_exit: return status; } /** * i40e_aq_add_macvlan * @hw: pointer to the hw struct * @seid: VSI for the mac address * @mv_list: list of macvlans to be added * @count: length of the list * @cmd_details: pointer to command details structure or NULL * * Add MAC/VLAN addresses to the HW filtering **/ i40e_status i40e_aq_add_macvlan(struct i40e_hw *hw, u16 seid, struct i40e_aqc_add_macvlan_element_data *mv_list, u16 count, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_macvlan *cmd = (struct i40e_aqc_macvlan *)&desc.params.raw; i40e_status status; u16 buf_size; if (count == 0 || !mv_list || !hw) return I40E_ERR_PARAM; buf_size = count * sizeof(struct i40e_aqc_add_macvlan_element_data); /* prep the rest of the request */ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_macvlan); cmd->num_addresses = cpu_to_le16(count); cmd->seid[0] = cpu_to_le16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid); cmd->seid[1] = 0; cmd->seid[2] = 0; desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); if (buf_size > I40E_AQ_LARGE_BUF) desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); status = i40e_asq_send_command(hw, &desc, mv_list, buf_size, cmd_details); return status; } /** * i40e_aq_remove_macvlan * @hw: pointer to the hw struct * @seid: VSI for the mac address * @mv_list: list of macvlans to be removed * @count: length of the list * @cmd_details: pointer to command details structure or NULL * * Remove MAC/VLAN addresses from the HW filtering **/ i40e_status i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid, struct i40e_aqc_remove_macvlan_element_data *mv_list, u16 count, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_macvlan *cmd = (struct i40e_aqc_macvlan *)&desc.params.raw; i40e_status status; u16 buf_size; if (count == 0 || !mv_list || !hw) return I40E_ERR_PARAM; buf_size = count * sizeof(struct i40e_aqc_remove_macvlan_element_data); /* prep the rest of the request */ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_macvlan); cmd->num_addresses = cpu_to_le16(count); cmd->seid[0] = cpu_to_le16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid); cmd->seid[1] = 0; cmd->seid[2] = 0; desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); if (buf_size > I40E_AQ_LARGE_BUF) desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); status = i40e_asq_send_command(hw, &desc, mv_list, buf_size, cmd_details); return status; } /** * i40e_aq_send_msg_to_vf * @hw: pointer to the hardware structure * @vfid: vf id to send msg * @v_opcode: opcodes for VF-PF communication * @v_retval: return error code * @msg: pointer to the msg buffer * @msglen: msg length * @cmd_details: pointer to command details * * send msg to vf **/ i40e_status i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid, u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_pf_vf_message *cmd = (struct i40e_aqc_pf_vf_message *)&desc.params.raw; i40e_status status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_send_msg_to_vf); cmd->id = cpu_to_le32(vfid); desc.cookie_high = cpu_to_le32(v_opcode); desc.cookie_low = cpu_to_le32(v_retval); desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_SI); if (msglen) { desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); if (msglen > I40E_AQ_LARGE_BUF) desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); desc.datalen = cpu_to_le16(msglen); } status = i40e_asq_send_command(hw, &desc, msg, msglen, cmd_details); return status; } /** * i40e_aq_set_hmc_resource_profile * @hw: pointer to the hw struct * @profile: type of profile the HMC is to be set as * @pe_vf_enabled_count: the number of PE enabled VFs the system has * @cmd_details: pointer to command details structure or NULL * * set the HMC profile of the device. **/ i40e_status i40e_aq_set_hmc_resource_profile(struct i40e_hw *hw, enum i40e_aq_hmc_profile profile, u8 pe_vf_enabled_count, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aq_get_set_hmc_resource_profile *cmd = (struct i40e_aq_get_set_hmc_resource_profile *)&desc.params.raw; i40e_status status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_hmc_resource_profile); cmd->pm_profile = (u8)profile; cmd->pe_vf_enabled = pe_vf_enabled_count; status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_aq_request_resource * @hw: pointer to the hw struct * @resource: resource id * @access: access type * @sdp_number: resource number * @timeout: the maximum time in ms that the driver may hold the resource * @cmd_details: pointer to command details structure or NULL * * requests common resource using the admin queue commands **/ i40e_status i40e_aq_request_resource(struct i40e_hw *hw, enum i40e_aq_resources_ids resource, enum i40e_aq_resource_access_type access, u8 sdp_number, u64 *timeout, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_request_resource *cmd_resp = (struct i40e_aqc_request_resource *)&desc.params.raw; i40e_status status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_request_resource); cmd_resp->resource_id = cpu_to_le16(resource); cmd_resp->access_type = cpu_to_le16(access); cmd_resp->resource_number = cpu_to_le32(sdp_number); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); /* The completion specifies the maximum time in ms that the driver * may hold the resource in the Timeout field. * If the resource is held by someone else, the command completes with * busy return value and the timeout field indicates the maximum time * the current owner of the resource has to free it. */ if (!status || hw->aq.asq_last_status == I40E_AQ_RC_EBUSY) *timeout = le32_to_cpu(cmd_resp->timeout); return status; } /** * i40e_aq_release_resource * @hw: pointer to the hw struct * @resource: resource id * @sdp_number: resource number * @cmd_details: pointer to command details structure or NULL * * release common resource using the admin queue commands **/ i40e_status i40e_aq_release_resource(struct i40e_hw *hw, enum i40e_aq_resources_ids resource, u8 sdp_number, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_request_resource *cmd = (struct i40e_aqc_request_resource *)&desc.params.raw; i40e_status status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_release_resource); cmd->resource_id = cpu_to_le16(resource); cmd->resource_number = cpu_to_le32(sdp_number); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_aq_read_nvm * @hw: pointer to the hw struct * @module_pointer: module pointer location in words from the NVM beginning * @offset: byte offset from the module beginning * @length: length of the section to be read (in bytes from the offset) * @data: command buffer (size [bytes] = length) * @last_command: tells if this is the last command in a series * @cmd_details: pointer to command details structure or NULL * * Read the NVM using the admin queue commands **/ i40e_status i40e_aq_read_nvm(struct i40e_hw *hw, u8 module_pointer, u32 offset, u16 length, void *data, bool last_command, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_nvm_update *cmd = (struct i40e_aqc_nvm_update *)&desc.params.raw; i40e_status status; /* In offset the highest byte must be zeroed. */ if (offset & 0xFF000000) { status = I40E_ERR_PARAM; goto i40e_aq_read_nvm_exit; } i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_read); /* If this is the last command in a series, set the proper flag. */ if (last_command) cmd->command_flags |= I40E_AQ_NVM_LAST_CMD; cmd->module_pointer = module_pointer; cmd->offset = cpu_to_le32(offset); cmd->length = cpu_to_le16(length); desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); if (length > I40E_AQ_LARGE_BUF) desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); status = i40e_asq_send_command(hw, &desc, data, length, cmd_details); i40e_aq_read_nvm_exit: return status; } #define I40E_DEV_FUNC_CAP_SWITCH_MODE 0x01 #define I40E_DEV_FUNC_CAP_MGMT_MODE 0x02 #define I40E_DEV_FUNC_CAP_NPAR 0x03 #define I40E_DEV_FUNC_CAP_OS2BMC 0x04 #define I40E_DEV_FUNC_CAP_VALID_FUNC 0x05 #define I40E_DEV_FUNC_CAP_SRIOV_1_1 0x12 #define I40E_DEV_FUNC_CAP_VF 0x13 #define I40E_DEV_FUNC_CAP_VMDQ 0x14 #define I40E_DEV_FUNC_CAP_802_1_QBG 0x15 #define I40E_DEV_FUNC_CAP_802_1_QBH 0x16 #define I40E_DEV_FUNC_CAP_VSI 0x17 #define I40E_DEV_FUNC_CAP_DCB 0x18 #define I40E_DEV_FUNC_CAP_FCOE 0x21 #define I40E_DEV_FUNC_CAP_RSS 0x40 #define I40E_DEV_FUNC_CAP_RX_QUEUES 0x41 #define I40E_DEV_FUNC_CAP_TX_QUEUES 0x42 #define I40E_DEV_FUNC_CAP_MSIX 0x43 #define I40E_DEV_FUNC_CAP_MSIX_VF 0x44 #define I40E_DEV_FUNC_CAP_FLOW_DIRECTOR 0x45 #define I40E_DEV_FUNC_CAP_IEEE_1588 0x46 #define I40E_DEV_FUNC_CAP_MFP_MODE_1 0xF1 #define I40E_DEV_FUNC_CAP_CEM 0xF2 #define I40E_DEV_FUNC_CAP_IWARP 0x51 #define I40E_DEV_FUNC_CAP_LED 0x61 #define I40E_DEV_FUNC_CAP_SDP 0x62 #define I40E_DEV_FUNC_CAP_MDIO 0x63 /** * i40e_parse_discover_capabilities * @hw: pointer to the hw struct * @buff: pointer to a buffer containing device/function capability records * @cap_count: number of capability records in the list * @list_type_opc: type of capabilities list to parse * * Parse the device/function capabilities list. **/ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff, u32 cap_count, enum i40e_admin_queue_opc list_type_opc) { struct i40e_aqc_list_capabilities_element_resp *cap; u32 number, logical_id, phys_id; struct i40e_hw_capabilities *p; u32 reg_val; u32 i = 0; u16 id; cap = (struct i40e_aqc_list_capabilities_element_resp *) buff; if (list_type_opc == i40e_aqc_opc_list_dev_capabilities) p = &hw->dev_caps; else if (list_type_opc == i40e_aqc_opc_list_func_capabilities) p = &hw->func_caps; else return; for (i = 0; i < cap_count; i++, cap++) { id = le16_to_cpu(cap->id); number = le32_to_cpu(cap->number); logical_id = le32_to_cpu(cap->logical_id); phys_id = le32_to_cpu(cap->phys_id); switch (id) { case I40E_DEV_FUNC_CAP_SWITCH_MODE: p->switch_mode = number; break; case I40E_DEV_FUNC_CAP_MGMT_MODE: p->management_mode = number; break; case I40E_DEV_FUNC_CAP_NPAR: p->npar_enable = number; break; case I40E_DEV_FUNC_CAP_OS2BMC: p->os2bmc = number; break; case I40E_DEV_FUNC_CAP_VALID_FUNC: p->valid_functions = number; break; case I40E_DEV_FUNC_CAP_SRIOV_1_1: if (number == 1) p->sr_iov_1_1 = true; break; case I40E_DEV_FUNC_CAP_VF: p->num_vfs = number; p->vf_base_id = logical_id; break; case I40E_DEV_FUNC_CAP_VMDQ: if (number == 1) p->vmdq = true; break; case I40E_DEV_FUNC_CAP_802_1_QBG: if (number == 1) p->evb_802_1_qbg = true; break; case I40E_DEV_FUNC_CAP_802_1_QBH: if (number == 1) p->evb_802_1_qbh = true; break; case I40E_DEV_FUNC_CAP_VSI: p->num_vsis = number; break; case I40E_DEV_FUNC_CAP_DCB: if (number == 1) { p->dcb = true; p->enabled_tcmap = logical_id; p->maxtc = phys_id; } break; case I40E_DEV_FUNC_CAP_FCOE: if (number == 1) p->fcoe = true; break; case I40E_DEV_FUNC_CAP_RSS: p->rss = true; reg_val = rd32(hw, I40E_PFQF_CTL_0); if (reg_val & I40E_PFQF_CTL_0_HASHLUTSIZE_MASK) p->rss_table_size = number; else p->rss_table_size = 128; p->rss_table_entry_width = logical_id; break; case I40E_DEV_FUNC_CAP_RX_QUEUES: p->num_rx_qp = number; p->base_queue = phys_id; break; case I40E_DEV_FUNC_CAP_TX_QUEUES: p->num_tx_qp = number; p->base_queue = phys_id; break; case I40E_DEV_FUNC_CAP_MSIX: p->num_msix_vectors = number; break; case I40E_DEV_FUNC_CAP_MSIX_VF: p->num_msix_vectors_vf = number; break; case I40E_DEV_FUNC_CAP_MFP_MODE_1: if (number == 1) p->mfp_mode_1 = true; break; case I40E_DEV_FUNC_CAP_CEM: if (number == 1) p->mgmt_cem = true; break; case I40E_DEV_FUNC_CAP_IWARP: if (number == 1) p->iwarp = true; break; case I40E_DEV_FUNC_CAP_LED: if (phys_id < I40E_HW_CAP_MAX_GPIO) p->led[phys_id] = true; break; case I40E_DEV_FUNC_CAP_SDP: if (phys_id < I40E_HW_CAP_MAX_GPIO) p->sdp[phys_id] = true; break; case I40E_DEV_FUNC_CAP_MDIO: if (number == 1) { p->mdio_port_num = phys_id; p->mdio_port_mode = logical_id; } break; case I40E_DEV_FUNC_CAP_IEEE_1588: if (number == 1) p->ieee_1588 = true; break; case I40E_DEV_FUNC_CAP_FLOW_DIRECTOR: p->fd = true; p->fd_filters_guaranteed = number; p->fd_filters_best_effort = logical_id; break; default: break; } } /* additional HW specific goodies that might * someday be HW version specific */ p->rx_buf_chain_len = I40E_MAX_CHAINED_RX_BUFFERS; } /** * i40e_aq_discover_capabilities * @hw: pointer to the hw struct * @buff: a virtual buffer to hold the capabilities * @buff_size: Size of the virtual buffer * @data_size: Size of the returned data, or buff size needed if AQ err==ENOMEM * @list_type_opc: capabilities type to discover - pass in the command opcode * @cmd_details: pointer to command details structure or NULL * * Get the device capabilities descriptions from the firmware **/ i40e_status i40e_aq_discover_capabilities(struct i40e_hw *hw, void *buff, u16 buff_size, u16 *data_size, enum i40e_admin_queue_opc list_type_opc, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aqc_list_capabilites *cmd; struct i40e_aq_desc desc; i40e_status status = 0; cmd = (struct i40e_aqc_list_capabilites *)&desc.params.raw; if (list_type_opc != i40e_aqc_opc_list_func_capabilities && list_type_opc != i40e_aqc_opc_list_dev_capabilities) { status = I40E_ERR_PARAM; goto exit; } i40e_fill_default_direct_cmd_desc(&desc, list_type_opc); desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); if (buff_size > I40E_AQ_LARGE_BUF) desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); *data_size = le16_to_cpu(desc.datalen); if (status) goto exit; i40e_parse_discover_capabilities(hw, buff, le32_to_cpu(cmd->count), list_type_opc); exit: return status; } /** * i40e_aq_get_lldp_mib * @hw: pointer to the hw struct * @bridge_type: type of bridge requested * @mib_type: Local, Remote or both Local and Remote MIBs * @buff: pointer to a user supplied buffer to store the MIB block * @buff_size: size of the buffer (in bytes) * @local_len : length of the returned Local LLDP MIB * @remote_len: length of the returned Remote LLDP MIB * @cmd_details: pointer to command details structure or NULL * * Requests the complete LLDP MIB (entire packet). **/ i40e_status i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type, u8 mib_type, void *buff, u16 buff_size, u16 *local_len, u16 *remote_len, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_lldp_get_mib *cmd = (struct i40e_aqc_lldp_get_mib *)&desc.params.raw; struct i40e_aqc_lldp_get_mib *resp = (struct i40e_aqc_lldp_get_mib *)&desc.params.raw; i40e_status status; if (buff_size == 0 || !buff) return I40E_ERR_PARAM; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_get_mib); /* Indirect Command */ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); cmd->type = mib_type & I40E_AQ_LLDP_MIB_TYPE_MASK; cmd->type |= ((bridge_type << I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT) & I40E_AQ_LLDP_BRIDGE_TYPE_MASK); desc.datalen = cpu_to_le16(buff_size); desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); if (buff_size > I40E_AQ_LARGE_BUF) desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); if (!status) { if (local_len != NULL) *local_len = le16_to_cpu(resp->local_len); if (remote_len != NULL) *remote_len = le16_to_cpu(resp->remote_len); } return status; } /** * i40e_aq_cfg_lldp_mib_change_event * @hw: pointer to the hw struct * @enable_update: Enable or Disable event posting * @cmd_details: pointer to command details structure or NULL * * Enable or Disable posting of an event on ARQ when LLDP MIB * associated with the interface changes **/ i40e_status i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw, bool enable_update, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_lldp_update_mib *cmd = (struct i40e_aqc_lldp_update_mib *)&desc.params.raw; i40e_status status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_update_mib); if (!enable_update) cmd->command |= I40E_AQ_LLDP_MIB_UPDATE_DISABLE; status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_aq_stop_lldp * @hw: pointer to the hw struct * @shutdown_agent: True if LLDP Agent needs to be Shutdown * @cmd_details: pointer to command details structure or NULL * * Stop or Shutdown the embedded LLDP Agent **/ i40e_status i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_lldp_stop *cmd = (struct i40e_aqc_lldp_stop *)&desc.params.raw; i40e_status status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_stop); if (shutdown_agent) cmd->command |= I40E_AQ_LLDP_AGENT_SHUTDOWN; status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_aq_start_lldp * @hw: pointer to the hw struct * @cmd_details: pointer to command details structure or NULL * * Start the embedded LLDP Agent on all ports. **/ i40e_status i40e_aq_start_lldp(struct i40e_hw *hw, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_lldp_start *cmd = (struct i40e_aqc_lldp_start *)&desc.params.raw; i40e_status status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_start); cmd->command = I40E_AQ_LLDP_AGENT_START; status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_aq_add_udp_tunnel * @hw: pointer to the hw struct * @udp_port: the UDP port to add * @header_len: length of the tunneling header length in DWords * @protocol_index: protocol index type * @filter_index: pointer to filter index * @cmd_details: pointer to command details structure or NULL **/ i40e_status i40e_aq_add_udp_tunnel(struct i40e_hw *hw, u16 udp_port, u8 header_len, u8 protocol_index, u8 *filter_index, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_add_udp_tunnel *cmd = (struct i40e_aqc_add_udp_tunnel *)&desc.params.raw; struct i40e_aqc_del_udp_tunnel_completion *resp = (struct i40e_aqc_del_udp_tunnel_completion *)&desc.params.raw; i40e_status status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_udp_tunnel); cmd->udp_port = cpu_to_le16(udp_port); cmd->protocol_type = protocol_index; status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); if (!status) *filter_index = resp->index; return status; } /** * i40e_aq_del_udp_tunnel * @hw: pointer to the hw struct * @index: filter index * @cmd_details: pointer to command details structure or NULL **/ i40e_status i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_remove_udp_tunnel *cmd = (struct i40e_aqc_remove_udp_tunnel *)&desc.params.raw; i40e_status status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_del_udp_tunnel); cmd->index = index; status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_aq_delete_element - Delete switch element * @hw: pointer to the hw struct * @seid: the SEID to delete from the switch * @cmd_details: pointer to command details structure or NULL * * This deletes a switch element from the switch. **/ i40e_status i40e_aq_delete_element(struct i40e_hw *hw, u16 seid, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_switch_seid *cmd = (struct i40e_aqc_switch_seid *)&desc.params.raw; i40e_status status; if (seid == 0) return I40E_ERR_PARAM; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_delete_element); cmd->seid = cpu_to_le16(seid); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_aq_dcb_updated - DCB Updated Command * @hw: pointer to the hw struct * @cmd_details: pointer to command details structure or NULL * * EMP will return when the shared RPB settings have been * recomputed and modified. The retval field in the descriptor * will be set to 0 when RPB is modified. **/ i40e_status i40e_aq_dcb_updated(struct i40e_hw *hw, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; i40e_status status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_dcb_updated); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_aq_tx_sched_cmd - generic Tx scheduler AQ command handler * @hw: pointer to the hw struct * @seid: seid for the physical port/switching component/vsi * @buff: Indirect buffer to hold data parameters and response * @buff_size: Indirect buffer size * @opcode: Tx scheduler AQ command opcode * @cmd_details: pointer to command details structure or NULL * * Generic command handler for Tx scheduler AQ commands **/ static i40e_status i40e_aq_tx_sched_cmd(struct i40e_hw *hw, u16 seid, void *buff, u16 buff_size, enum i40e_admin_queue_opc opcode, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_tx_sched_ind *cmd = (struct i40e_aqc_tx_sched_ind *)&desc.params.raw; i40e_status status; bool cmd_param_flag = false; switch (opcode) { case i40e_aqc_opc_configure_vsi_ets_sla_bw_limit: case i40e_aqc_opc_configure_vsi_tc_bw: case i40e_aqc_opc_enable_switching_comp_ets: case i40e_aqc_opc_modify_switching_comp_ets: case i40e_aqc_opc_disable_switching_comp_ets: case i40e_aqc_opc_configure_switching_comp_ets_bw_limit: case i40e_aqc_opc_configure_switching_comp_bw_config: cmd_param_flag = true; break; case i40e_aqc_opc_query_vsi_bw_config: case i40e_aqc_opc_query_vsi_ets_sla_config: case i40e_aqc_opc_query_switching_comp_ets_config: case i40e_aqc_opc_query_port_ets_config: case i40e_aqc_opc_query_switching_comp_bw_config: cmd_param_flag = false; break; default: return I40E_ERR_PARAM; } i40e_fill_default_direct_cmd_desc(&desc, opcode); /* Indirect command */ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); if (cmd_param_flag) desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD); if (buff_size > I40E_AQ_LARGE_BUF) desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); desc.datalen = cpu_to_le16(buff_size); cmd->vsi_seid = cpu_to_le16(seid); status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); return status; } /** * i40e_aq_config_vsi_tc_bw - Config VSI BW Allocation per TC * @hw: pointer to the hw struct * @seid: VSI seid * @bw_data: Buffer holding enabled TCs, relative TC BW limit/credits * @cmd_details: pointer to command details structure or NULL **/ i40e_status i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw, u16 seid, struct i40e_aqc_configure_vsi_tc_bw_data *bw_data, struct i40e_asq_cmd_details *cmd_details) { return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), i40e_aqc_opc_configure_vsi_tc_bw, cmd_details); } /** * i40e_aq_config_switch_comp_ets - Enable/Disable/Modify ETS on the port * @hw: pointer to the hw struct * @seid: seid of the switching component connected to Physical Port * @ets_data: Buffer holding ETS parameters * @cmd_details: pointer to command details structure or NULL **/ i40e_status i40e_aq_config_switch_comp_ets(struct i40e_hw *hw, u16 seid, struct i40e_aqc_configure_switching_comp_ets_data *ets_data, enum i40e_admin_queue_opc opcode, struct i40e_asq_cmd_details *cmd_details) { return i40e_aq_tx_sched_cmd(hw, seid, (void *)ets_data, sizeof(*ets_data), opcode, cmd_details); } /** * i40e_aq_config_switch_comp_bw_config - Config Switch comp BW Alloc per TC * @hw: pointer to the hw struct * @seid: seid of the switching component * @bw_data: Buffer holding enabled TCs, relative/absolute TC BW limit/credits * @cmd_details: pointer to command details structure or NULL **/ i40e_status i40e_aq_config_switch_comp_bw_config(struct i40e_hw *hw, u16 seid, struct i40e_aqc_configure_switching_comp_bw_config_data *bw_data, struct i40e_asq_cmd_details *cmd_details) { return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), i40e_aqc_opc_configure_switching_comp_bw_config, cmd_details); } /** * i40e_aq_query_vsi_bw_config - Query VSI BW configuration * @hw: pointer to the hw struct * @seid: seid of the VSI * @bw_data: Buffer to hold VSI BW configuration * @cmd_details: pointer to command details structure or NULL **/ i40e_status i40e_aq_query_vsi_bw_config(struct i40e_hw *hw, u16 seid, struct i40e_aqc_query_vsi_bw_config_resp *bw_data, struct i40e_asq_cmd_details *cmd_details) { return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), i40e_aqc_opc_query_vsi_bw_config, cmd_details); } /** * i40e_aq_query_vsi_ets_sla_config - Query VSI BW configuration per TC * @hw: pointer to the hw struct * @seid: seid of the VSI * @bw_data: Buffer to hold VSI BW configuration per TC * @cmd_details: pointer to command details structure or NULL **/ i40e_status i40e_aq_query_vsi_ets_sla_config(struct i40e_hw *hw, u16 seid, struct i40e_aqc_query_vsi_ets_sla_config_resp *bw_data, struct i40e_asq_cmd_details *cmd_details) { return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), i40e_aqc_opc_query_vsi_ets_sla_config, cmd_details); } /** * i40e_aq_query_switch_comp_ets_config - Query Switch comp BW config per TC * @hw: pointer to the hw struct * @seid: seid of the switching component * @bw_data: Buffer to hold switching component's per TC BW config * @cmd_details: pointer to command details structure or NULL **/ i40e_status i40e_aq_query_switch_comp_ets_config(struct i40e_hw *hw, u16 seid, struct i40e_aqc_query_switching_comp_ets_config_resp *bw_data, struct i40e_asq_cmd_details *cmd_details) { return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), i40e_aqc_opc_query_switching_comp_ets_config, cmd_details); } /** * i40e_aq_query_port_ets_config - Query Physical Port ETS configuration * @hw: pointer to the hw struct * @seid: seid of the VSI or switching component connected to Physical Port * @bw_data: Buffer to hold current ETS configuration for the Physical Port * @cmd_details: pointer to command details structure or NULL **/ i40e_status i40e_aq_query_port_ets_config(struct i40e_hw *hw, u16 seid, struct i40e_aqc_query_port_ets_config_resp *bw_data, struct i40e_asq_cmd_details *cmd_details) { return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), i40e_aqc_opc_query_port_ets_config, cmd_details); } /** * i40e_aq_query_switch_comp_bw_config - Query Switch comp BW configuration * @hw: pointer to the hw struct * @seid: seid of the switching component * @bw_data: Buffer to hold switching component's BW configuration * @cmd_details: pointer to command details structure or NULL **/ i40e_status i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw, u16 seid, struct i40e_aqc_query_switching_comp_bw_config_resp *bw_data, struct i40e_asq_cmd_details *cmd_details) { return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), i40e_aqc_opc_query_switching_comp_bw_config, cmd_details); } /** * i40e_validate_filter_settings * @hw: pointer to the hardware structure * @settings: Filter control settings * * Check and validate the filter control settings passed. * The function checks for the valid filter/context sizes being * passed for FCoE and PE. * * Returns 0 if the values passed are valid and within * range else returns an error. **/ static i40e_status i40e_validate_filter_settings(struct i40e_hw *hw, struct i40e_filter_control_settings *settings) { u32 fcoe_cntx_size, fcoe_filt_size; u32 pe_cntx_size, pe_filt_size; u32 fcoe_fmax, pe_fmax; u32 val; /* Validate FCoE settings passed */ switch (settings->fcoe_filt_num) { case I40E_HASH_FILTER_SIZE_1K: case I40E_HASH_FILTER_SIZE_2K: case I40E_HASH_FILTER_SIZE_4K: case I40E_HASH_FILTER_SIZE_8K: case I40E_HASH_FILTER_SIZE_16K: case I40E_HASH_FILTER_SIZE_32K: fcoe_filt_size = I40E_HASH_FILTER_BASE_SIZE; fcoe_filt_size <<= (u32)settings->fcoe_filt_num; break; default: return I40E_ERR_PARAM; } switch (settings->fcoe_cntx_num) { case I40E_DMA_CNTX_SIZE_512: case I40E_DMA_CNTX_SIZE_1K: case I40E_DMA_CNTX_SIZE_2K: case I40E_DMA_CNTX_SIZE_4K: fcoe_cntx_size = I40E_DMA_CNTX_BASE_SIZE; fcoe_cntx_size <<= (u32)settings->fcoe_cntx_num; break; default: return I40E_ERR_PARAM; } /* Validate PE settings passed */ switch (settings->pe_filt_num) { case I40E_HASH_FILTER_SIZE_1K: case I40E_HASH_FILTER_SIZE_2K: case I40E_HASH_FILTER_SIZE_4K: case I40E_HASH_FILTER_SIZE_8K: case I40E_HASH_FILTER_SIZE_16K: case I40E_HASH_FILTER_SIZE_32K: case I40E_HASH_FILTER_SIZE_64K: case I40E_HASH_FILTER_SIZE_128K: case I40E_HASH_FILTER_SIZE_256K: case I40E_HASH_FILTER_SIZE_512K: case I40E_HASH_FILTER_SIZE_1M: pe_filt_size = I40E_HASH_FILTER_BASE_SIZE; pe_filt_size <<= (u32)settings->pe_filt_num; break; default: return I40E_ERR_PARAM; } switch (settings->pe_cntx_num) { case I40E_DMA_CNTX_SIZE_512: case I40E_DMA_CNTX_SIZE_1K: case I40E_DMA_CNTX_SIZE_2K: case I40E_DMA_CNTX_SIZE_4K: case I40E_DMA_CNTX_SIZE_8K: case I40E_DMA_CNTX_SIZE_16K: case I40E_DMA_CNTX_SIZE_32K: case I40E_DMA_CNTX_SIZE_64K: case I40E_DMA_CNTX_SIZE_128K: case I40E_DMA_CNTX_SIZE_256K: pe_cntx_size = I40E_DMA_CNTX_BASE_SIZE; pe_cntx_size <<= (u32)settings->pe_cntx_num; break; default: return I40E_ERR_PARAM; } /* FCHSIZE + FCDSIZE should not be greater than PMFCOEFMAX */ val = rd32(hw, I40E_GLHMC_FCOEFMAX); fcoe_fmax = (val & I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_MASK) >> I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT; if (fcoe_filt_size + fcoe_cntx_size > fcoe_fmax) return I40E_ERR_INVALID_SIZE; /* PEHSIZE + PEDSIZE should not be greater than PMPEXFMAX */ val = rd32(hw, I40E_GLHMC_PEXFMAX); pe_fmax = (val & I40E_GLHMC_PEXFMAX_PMPEXFMAX_MASK) >> I40E_GLHMC_PEXFMAX_PMPEXFMAX_SHIFT; if (pe_filt_size + pe_cntx_size > pe_fmax) return I40E_ERR_INVALID_SIZE; return 0; } /** * i40e_set_filter_control * @hw: pointer to the hardware structure * @settings: Filter control settings * * Set the Queue Filters for PE/FCoE and enable filters required * for a single PF. It is expected that these settings are programmed * at the driver initialization time. **/ i40e_status i40e_set_filter_control(struct i40e_hw *hw, struct i40e_filter_control_settings *settings) { i40e_status ret = 0; u32 hash_lut_size = 0; u32 val; if (!settings) return I40E_ERR_PARAM; /* Validate the input settings */ ret = i40e_validate_filter_settings(hw, settings); if (ret) return ret; /* Read the PF Queue Filter control register */ val = rd32(hw, I40E_PFQF_CTL_0); /* Program required PE hash buckets for the PF */ val &= ~I40E_PFQF_CTL_0_PEHSIZE_MASK; val |= ((u32)settings->pe_filt_num << I40E_PFQF_CTL_0_PEHSIZE_SHIFT) & I40E_PFQF_CTL_0_PEHSIZE_MASK; /* Program required PE contexts for the PF */ val &= ~I40E_PFQF_CTL_0_PEDSIZE_MASK; val |= ((u32)settings->pe_cntx_num << I40E_PFQF_CTL_0_PEDSIZE_SHIFT) & I40E_PFQF_CTL_0_PEDSIZE_MASK; /* Program required FCoE hash buckets for the PF */ val &= ~I40E_PFQF_CTL_0_PFFCHSIZE_MASK; val |= ((u32)settings->fcoe_filt_num << I40E_PFQF_CTL_0_PFFCHSIZE_SHIFT) & I40E_PFQF_CTL_0_PFFCHSIZE_MASK; /* Program required FCoE DDP contexts for the PF */ val &= ~I40E_PFQF_CTL_0_PFFCDSIZE_MASK; val |= ((u32)settings->fcoe_cntx_num << I40E_PFQF_CTL_0_PFFCDSIZE_SHIFT) & I40E_PFQF_CTL_0_PFFCDSIZE_MASK; /* Program Hash LUT size for the PF */ val &= ~I40E_PFQF_CTL_0_HASHLUTSIZE_MASK; if (settings->hash_lut_size == I40E_HASH_LUT_SIZE_512) hash_lut_size = 1; val |= (hash_lut_size << I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT) & I40E_PFQF_CTL_0_HASHLUTSIZE_MASK; /* Enable FDIR, Ethertype and MACVLAN filters for PF and VFs */ if (settings->enable_fdir) val |= I40E_PFQF_CTL_0_FD_ENA_MASK; if (settings->enable_ethtype) val |= I40E_PFQF_CTL_0_ETYPE_ENA_MASK; if (settings->enable_macvlan) val |= I40E_PFQF_CTL_0_MACVLAN_ENA_MASK; wr32(hw, I40E_PFQF_CTL_0, val); return 0; } /** * i40e_aq_add_rem_control_packet_filter - Add or Remove Control Packet Filter * @hw: pointer to the hw struct * @mac_addr: MAC address to use in the filter * @ethtype: Ethertype to use in the filter * @flags: Flags that needs to be applied to the filter * @vsi_seid: seid of the control VSI * @queue: VSI queue number to send the packet to * @is_add: Add control packet filter if True else remove * @stats: Structure to hold information on control filter counts * @cmd_details: pointer to command details structure or NULL * * This command will Add or Remove control packet filter for a control VSI. * In return it will update the total number of perfect filter count in * the stats member. **/ i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw, u8 *mac_addr, u16 ethtype, u16 flags, u16 vsi_seid, u16 queue, bool is_add, struct i40e_control_filter_stats *stats, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_add_remove_control_packet_filter *cmd = (struct i40e_aqc_add_remove_control_packet_filter *) &desc.params.raw; struct i40e_aqc_add_remove_control_packet_filter_completion *resp = (struct i40e_aqc_add_remove_control_packet_filter_completion *) &desc.params.raw; i40e_status status; if (vsi_seid == 0) return I40E_ERR_PARAM; if (is_add) { i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_control_packet_filter); cmd->queue = cpu_to_le16(queue); } else { i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_control_packet_filter); } if (mac_addr) memcpy(cmd->mac, mac_addr, ETH_ALEN); cmd->etype = cpu_to_le16(ethtype); cmd->flags = cpu_to_le16(flags); cmd->seid = cpu_to_le16(vsi_seid); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); if (!status && stats) { stats->mac_etype_used = le16_to_cpu(resp->mac_etype_used); stats->etype_used = le16_to_cpu(resp->etype_used); stats->mac_etype_free = le16_to_cpu(resp->mac_etype_free); stats->etype_free = le16_to_cpu(resp->etype_free); } return status; } /** * i40e_set_pci_config_data - store PCI bus info * @hw: pointer to hardware structure * @link_status: the link status word from PCI config space * * Stores the PCI bus info (speed, width, type) within the i40e_hw structure **/ void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status) { hw->bus.type = i40e_bus_type_pci_express; switch (link_status & PCI_EXP_LNKSTA_NLW) { case PCI_EXP_LNKSTA_NLW_X1: hw->bus.width = i40e_bus_width_pcie_x1; break; case PCI_EXP_LNKSTA_NLW_X2: hw->bus.width = i40e_bus_width_pcie_x2; break; case PCI_EXP_LNKSTA_NLW_X4: hw->bus.width = i40e_bus_width_pcie_x4; break; case PCI_EXP_LNKSTA_NLW_X8: hw->bus.width = i40e_bus_width_pcie_x8; break; default: hw->bus.width = i40e_bus_width_unknown; break; } switch (link_status & PCI_EXP_LNKSTA_CLS) { case PCI_EXP_LNKSTA_CLS_2_5GB: hw->bus.speed = i40e_bus_speed_2500; break; case PCI_EXP_LNKSTA_CLS_5_0GB: hw->bus.speed = i40e_bus_speed_5000; break; case PCI_EXP_LNKSTA_CLS_8_0GB: hw->bus.speed = i40e_bus_speed_8000; break; default: hw->bus.speed = i40e_bus_speed_unknown; break; } }
gpl-2.0
KaSt/Kappa34
arch/arm/mach-msm/qdsp5/audio_ac3.c
63
47166
/* arch/arm/mach-msm/audio_ac3.c * * Copyright (c) 2008-2009, 2011-2012 Code Aurora Forum. All rights reserved. * * This code also borrows from audio_aac.c, which is * Copyright (C) 2008 Google, Inc. * Copyright (C) 2008 HTC Corporation * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * * See the GNU General Public License for more details. * You should have received a copy of the GNU General Public License * along with this program; if not, you can find it at http://www.fsf.org. */ #include <asm/atomic.h> #include <asm/ioctls.h> #include <linux/module.h> #include <linux/fs.h> #include <linux/miscdevice.h> #include <linux/uaccess.h> #include <linux/kthread.h> #include <linux/wait.h> #include <linux/dma-mapping.h> #include <linux/debugfs.h> #include <linux/delay.h> #include <linux/list.h> #include <linux/earlysuspend.h> #include <linux/slab.h> #include <linux/msm_audio.h> #include <linux/memory_alloc.h> #include <linux/msm_audio_ac3.h> #include <linux/msm_ion.h> #include <mach/msm_adsp.h> #include <mach/iommu.h> #include <mach/iommu_domains.h> #include <mach/qdsp5/qdsp5audppcmdi.h> #include <mach/qdsp5/qdsp5audppmsg.h> #include <mach/qdsp5/qdsp5audpp.h> #include <mach/qdsp5/qdsp5audplaycmdi.h> #include <mach/qdsp5/qdsp5audplaymsg.h> #include <mach/qdsp5/qdsp5rmtcmdi.h> #include <mach/debug_mm.h> #include <mach/msm_memtypes.h> #include "audmgr.h" #define BUFSZ 4096 #define DMASZ (BUFSZ * 2) #define AUDDEC_DEC_AC3 23 #define PCM_BUFSZ 6168 /* maximum frame size is 512 * 6 samples */ #define PCM_BUF_MAX_COUNT 5 /* DSP only accepts 5 buffers at most * but support 2 buffers currently */ #define ROUTING_MODE_FTRT 1 #define ROUTING_MODE_RT 2 /* Decoder status received from AUDPPTASK */ #define AUDPP_DEC_STATUS_SLEEP 0 #define AUDPP_DEC_STATUS_INIT 1 #define AUDPP_DEC_STATUS_CFG 2 #define AUDPP_DEC_STATUS_PLAY 3 #define AUDAC3_METAFIELD_MASK 0xFFFF0000 #define AUDAC3_EOS_FLG_OFFSET 0x0A /* Offset from beginning of buffer */ #define AUDAC3_EOS_FLG_MASK 0x01 #define AUDAC3_EOS_NONE 0x0 /* No EOS detected */ #define AUDAC3_EOS_SET 0x1 /* EOS set in meta field */ #define AUDAC3_EVENT_NUM 10 /* Default number of pre-allocated event packets */ struct buffer { void *data; unsigned size; unsigned used; /* Input usage actual DSP produced PCM size */ unsigned addr; unsigned short mfield_sz; /* only useful for data has meta field */ }; #ifdef CONFIG_HAS_EARLYSUSPEND struct audac3_suspend_ctl { struct early_suspend node; struct audio *audio; }; #endif struct audac3_event { struct list_head list; int event_type; union msm_audio_event_payload payload; }; struct audio { struct buffer out[2]; spinlock_t dsp_lock; uint8_t out_head; uint8_t out_tail; uint8_t out_needed; /* number of buffers the dsp is waiting for */ atomic_t out_bytes; struct mutex lock; struct mutex write_lock; wait_queue_head_t write_wait; /* Host PCM section */ struct buffer in[PCM_BUF_MAX_COUNT]; struct mutex read_lock; wait_queue_head_t read_wait; /* Wait queue for read */ char *read_data; /* pointer to reader buffer */ int32_t read_phys; /* physical address of reader buffer */ uint8_t read_next; /* index to input buffers to be read next */ uint8_t fill_next; /* index to buffer that DSP should be filling */ uint8_t pcm_buf_count; /* number of pcm buffer allocated */ /* ---- End of Host PCM section */ struct msm_adsp_module *audplay; struct audmgr audmgr; struct msm_audio_ac3_config ac3_config; /* data allocated for various buffers */ char *data; int32_t phys; /* physical address of write buffer */ void *map_v_read; void *map_v_write; int mfield; /* meta field embedded in data */ int rflush; /* Read flush */ int wflush; /* Write flush */ uint8_t opened; uint8_t enabled; uint8_t running; uint8_t stopped; /* set when stopped, cleared on flush */ uint8_t pcm_feedback; uint8_t buf_refresh; int teos; /* valid only if tunnel mode & no data left for decoder */ enum msm_aud_decoder_state dec_state; /* Represents decoder state */ int rmt_resource_released; const char *module_name; unsigned queue_id; uint16_t dec_id; uint32_t read_ptr_offset; #ifdef CONFIG_HAS_EARLYSUSPEND struct audac3_suspend_ctl suspend_ctl; #endif #ifdef CONFIG_DEBUG_FS struct dentry *dentry; #endif wait_queue_head_t wait; struct list_head free_event_queue; struct list_head event_queue; wait_queue_head_t event_wait; spinlock_t event_queue_lock; struct mutex get_event_lock; int event_abort; int eq_enable; int eq_needs_commit; audpp_cmd_cfg_object_params_eqalizer eq; audpp_cmd_cfg_object_params_volume vol_pan; struct ion_client *client; struct ion_handle *input_buff_handle; struct ion_handle *output_buff_handle; }; static int auddec_dsp_config(struct audio *audio, int enable); static void audpp_cmd_cfg_adec_params(struct audio *audio); static void audpp_cmd_cfg_routing_mode(struct audio *audio); static void audac3_send_data(struct audio *audio, unsigned needed); static void audac3_dsp_event(void *private, unsigned id, uint16_t *msg); static void audac3_config_hostpcm(struct audio *audio); static void audac3_buffer_refresh(struct audio *audio); static void audac3_post_event(struct audio *audio, int type, union msm_audio_event_payload payload); static int rmt_put_resource(struct audio *audio) { struct aud_codec_config_cmd cmd; unsigned short client_idx; cmd.cmd_id = RM_CMD_AUD_CODEC_CFG; cmd.client_id = RM_AUD_CLIENT_ID; cmd.task_id = audio->dec_id; cmd.enable = RMT_DISABLE; cmd.dec_type = AUDDEC_DEC_AC3; client_idx = ((cmd.client_id << 8) | cmd.task_id); return put_adsp_resource(client_idx, &cmd, sizeof(cmd)); } static int rmt_get_resource(struct audio *audio) { struct aud_codec_config_cmd cmd; unsigned short client_idx; cmd.cmd_id = RM_CMD_AUD_CODEC_CFG; cmd.client_id = RM_AUD_CLIENT_ID; cmd.task_id = audio->dec_id; cmd.enable = RMT_ENABLE; cmd.dec_type = AUDDEC_DEC_AC3; client_idx = ((cmd.client_id << 8) | cmd.task_id); return get_adsp_resource(client_idx, &cmd, sizeof(cmd)); } /* must be called with audio->lock held */ static int audac3_enable(struct audio *audio) { struct audmgr_config cfg; int rc; MM_DBG("\n"); /* Macro prints the file name and function */ if (audio->enabled) return 0; if (audio->rmt_resource_released == 1) { audio->rmt_resource_released = 0; rc = rmt_get_resource(audio); if (rc) { MM_ERR("ADSP resources are not available for AC3"\ " session 0x%08x on decoder: %d\n Ignoring"\ " error and going ahead with the playback\n", (int)audio, audio->dec_id); } } audio->dec_state = MSM_AUD_DECODER_STATE_NONE; audio->out_tail = 0; audio->out_needed = 0; if (audio->pcm_feedback == TUNNEL_MODE_PLAYBACK) { cfg.tx_rate = RPC_AUD_DEF_SAMPLE_RATE_NONE; cfg.rx_rate = RPC_AUD_DEF_SAMPLE_RATE_48000; cfg.def_method = RPC_AUD_DEF_METHOD_PLAYBACK; cfg.codec = RPC_AUD_DEF_CODEC_AC3; cfg.snd_method = RPC_SND_METHOD_MIDI; rc = audmgr_enable(&audio->audmgr, &cfg); if (rc < 0) return rc; } if (msm_adsp_enable(audio->audplay)) { MM_ERR("msm_adsp_enable(audplay) failed\n"); if (audio->pcm_feedback == TUNNEL_MODE_PLAYBACK) audmgr_disable(&audio->audmgr); return -ENODEV; } if (audpp_enable(audio->dec_id, audac3_dsp_event, audio)) { MM_ERR("audpp_enable() failed\n"); msm_adsp_disable(audio->audplay); if (audio->pcm_feedback == TUNNEL_MODE_PLAYBACK) audmgr_disable(&audio->audmgr); return -ENODEV; } audio->enabled = 1; return 0; } /* must be called with audio->lock held */ static int audac3_disable(struct audio *audio) { int rc = 0; if (audio->enabled) { audio->enabled = 0; audio->dec_state = MSM_AUD_DECODER_STATE_NONE; auddec_dsp_config(audio, 0); rc = wait_event_interruptible_timeout(audio->wait, audio->dec_state != MSM_AUD_DECODER_STATE_NONE, msecs_to_jiffies(MSM_AUD_DECODER_WAIT_MS)); if (rc == 0) rc = -ETIMEDOUT; else if (audio->dec_state != MSM_AUD_DECODER_STATE_CLOSE) rc = -EFAULT; else rc = 0; audio->stopped = 1; wake_up(&audio->write_wait); wake_up(&audio->read_wait); msm_adsp_disable(audio->audplay); audpp_disable(audio->dec_id, audio); if (audio->pcm_feedback == TUNNEL_MODE_PLAYBACK) audmgr_disable(&audio->audmgr); audio->out_needed = 0; rmt_put_resource(audio); audio->rmt_resource_released = 1; } return rc; } /* ------------------- dsp --------------------- */ static void audac3_update_pcm_buf_entry(struct audio *audio, uint32_t *payload) { uint8_t index; unsigned long flags; if (audio->rflush) return; spin_lock_irqsave(&audio->dsp_lock, flags); for (index = 0; index < payload[1]; index++) { if (audio->in[audio->fill_next].addr == payload[2 + index * 2]) { MM_DBG("in[%d] ready\n", audio->fill_next); audio->in[audio->fill_next].used = payload[3 + index * 2]; if ((++audio->fill_next) == audio->pcm_buf_count) audio->fill_next = 0; } else { MM_ERR("expected=%x ret=%x\n", audio->in[audio->fill_next].addr, payload[1 + index * 2]); break; } } if (audio->in[audio->fill_next].used == 0) { audac3_buffer_refresh(audio); } else { MM_DBG("read cannot keep up\n"); audio->buf_refresh = 1; } wake_up(&audio->read_wait); spin_unlock_irqrestore(&audio->dsp_lock, flags); } static void audplay_dsp_event(void *data, unsigned id, size_t len, void (*getevent) (void *ptr, size_t len)) { struct audio *audio = data; uint32_t msg[28]; getevent(msg, sizeof(msg)); MM_DBG("msg_id=%x\n", id); switch (id) { case AUDPLAY_MSG_DEC_NEEDS_DATA: audac3_send_data(audio, 1); break; case AUDPLAY_MSG_BUFFER_UPDATE: MM_DBG("\n"); /* Macro prints the file name and function */ audac3_update_pcm_buf_entry(audio, msg); break; case ADSP_MESSAGE_ID: MM_DBG("Received ADSP event: module enable(audplaytask)\n"); break; default: MM_ERR("unexpected message from decoder\n"); } } static void audac3_dsp_event(void *private, unsigned id, uint16_t *msg) { struct audio *audio = private; switch (id) { case AUDPP_MSG_STATUS_MSG:{ unsigned status = msg[1]; switch (status) { case AUDPP_DEC_STATUS_SLEEP: { uint16_t reason = msg[2]; MM_DBG("decoder status:sleep reason =0x%04x\n", reason); if ((reason == AUDPP_MSG_REASON_MEM) || (reason == AUDPP_MSG_REASON_NODECODER)) { audio->dec_state = MSM_AUD_DECODER_STATE_FAILURE; wake_up(&audio->wait); } else if (reason == AUDPP_MSG_REASON_NONE) { /* decoder is in disable state */ audio->dec_state = MSM_AUD_DECODER_STATE_CLOSE; wake_up(&audio->wait); } break; } case AUDPP_DEC_STATUS_INIT: MM_DBG("decoder status: init\n"); if (audio->pcm_feedback) audpp_cmd_cfg_routing_mode(audio); else audpp_cmd_cfg_adec_params(audio); break; case AUDPP_DEC_STATUS_CFG: MM_DBG("decoder status: cfg\n"); break; case AUDPP_DEC_STATUS_PLAY: MM_DBG("decoder status: play\n"); if (audio->pcm_feedback) { audac3_config_hostpcm(audio); audac3_buffer_refresh(audio); } audio->dec_state = MSM_AUD_DECODER_STATE_SUCCESS; wake_up(&audio->wait); break; default: MM_ERR("unknown decoder status\n"); } break; } case AUDPP_MSG_CFG_MSG: if (msg[0] == AUDPP_MSG_ENA_ENA) { MM_DBG("CFG_MSG ENABLE\n"); auddec_dsp_config(audio, 1); audio->out_needed = 0; audio->running = 1; audpp_dsp_set_vol_pan(audio->dec_id, &audio->vol_pan); audpp_dsp_set_eq(audio->dec_id, audio->eq_enable, &audio->eq); audpp_avsync(audio->dec_id, 22050); } else if (msg[0] == AUDPP_MSG_ENA_DIS) { MM_DBG("CFG_MSG DISABLE\n"); audpp_avsync(audio->dec_id, 0); audio->running = 0; } else { MM_DBG("CFG_MSG %d?\n", msg[0]); } break; case AUDPP_MSG_ROUTING_ACK: MM_DBG("ROUTING_ACK\n"); audpp_cmd_cfg_adec_params(audio); break; case AUDPP_MSG_FLUSH_ACK: MM_DBG("FLUSH_ACK\n"); audio->wflush = 0; audio->rflush = 0; wake_up(&audio->write_wait); if (audio->pcm_feedback) audac3_buffer_refresh(audio); break; case AUDPP_MSG_PCMDMAMISSED: MM_DBG("PCMDMAMISSED\n"); audio->teos = 1; wake_up(&audio->write_wait); break; default: MM_ERR("UNKNOWN (%d)\n", id); } } struct msm_adsp_ops audplay_adsp_ops_ac3 = { .event = audplay_dsp_event, }; #define audplay_send_queue0(audio, cmd, len) \ msm_adsp_write(audio->audplay, audio->queue_id, \ cmd, len) static int auddec_dsp_config(struct audio *audio, int enable) { u16 cfg_dec_cmd[AUDPP_CMD_CFG_DEC_TYPE_LEN / sizeof(unsigned short)]; memset(cfg_dec_cmd, 0, sizeof(cfg_dec_cmd)); cfg_dec_cmd[0] = AUDPP_CMD_CFG_DEC_TYPE; if (enable) cfg_dec_cmd[1 + audio->dec_id] = AUDPP_CMD_UPDATDE_CFG_DEC | AUDPP_CMD_ENA_DEC_V | AUDDEC_DEC_AC3; else cfg_dec_cmd[1 + audio->dec_id] = AUDPP_CMD_UPDATDE_CFG_DEC | AUDPP_CMD_DIS_DEC_V; return audpp_send_queue1(&cfg_dec_cmd, sizeof(cfg_dec_cmd)); } static int get_frequency_index(unsigned short frequency) { switch (frequency) { case 48000: return 0; case 44100: return 1; case 32000: return 2; default: return -EINVAL; } } static void audpp_cmd_cfg_adec_params(struct audio *audio) { struct audpp_cmd_cfg_adec_params_ac3 cmd; memset(&cmd, 0, sizeof(cmd)); cmd.common.cmd_id = AUDPP_CMD_CFG_ADEC_PARAMS; /* dsp needs word size */ cmd.common.length = AUDPP_CMD_CFG_ADEC_PARAMS_AC3_LEN >> 1; cmd.common.dec_id = audio->dec_id; cmd.common.input_sampling_frequency = (audio->ac3_config).fsCod; cmd.index[0] = (((audio->ac3_config).numChans << 8) & 0xFF00) | ((audio->ac3_config).wordSize & 0x00FF); cmd.index[1] = (((audio->ac3_config).kCapableMode << 12) & 0xF000) | (((audio->ac3_config).compMode << 8) & 0x0F00) | (((audio->ac3_config).outLfeOn << 4) & 0x00F0) | ((audio->ac3_config).outputMode & 0x000F); cmd.index[2] = ((((audio->ac3_config).stereoMode << 12) & 0xF000) | (((audio->ac3_config).dualMonoMode << 8) & 0x0F00) | ((get_frequency_index((audio->ac3_config).fsCod) << 4) & 0x00F0)) & 0xFFF0; /* last 4 bytes are reserved */ cmd.index[3] = (audio->ac3_config).pcmScaleFac; cmd.index[4] = (audio->ac3_config).dynRngScaleHi; cmd.index[5] = (audio->ac3_config).dynRngScaleLow; cmd.index[6] = (((audio->ac3_config).user_downmix_flag << 8) & 0xFF00)| ((audio->ac3_config).user_karaoke_flag & 0x00FF); cmd.index[7] = (audio->ac3_config).dm_address_high; cmd.index[8] = (audio->ac3_config).dm_address_low; cmd.index[9] = (audio->ac3_config).ko_address_high; cmd.index[10] = (audio->ac3_config).ko_address_high; cmd.index[11] = (((audio->ac3_config).max_rep_count << 1) & 0xFFFE) | ((audio->ac3_config).error_concealment & 0x0001); cmd.index[12] = (((audio->ac3_config).channel_routing_mode[3] << 12) & 0xF000) | (((audio->ac3_config).channel_routing_mode[2] << 8) & 0x0F00) | (((audio->ac3_config).channel_routing_mode[1] << 4) & 0x00F0) | ((audio->ac3_config).channel_routing_mode[0] & 0x000F); cmd.index[13] = ((((audio->ac3_config).channel_routing_mode[5] << 12) & 0xF000) | (((audio->ac3_config).channel_routing_mode[4] << 8) & 0x0F00)) & 0xFF00; /* last 8 bytes are reserved */ audpp_send_queue2(&cmd, sizeof(cmd)); } static void audpp_cmd_cfg_routing_mode(struct audio *audio) { struct audpp_cmd_routing_mode cmd; MM_DBG("\n"); /* Macro prints the file name and function */ memset(&cmd, 0, sizeof(cmd)); cmd.cmd_id = AUDPP_CMD_ROUTING_MODE; cmd.object_number = audio->dec_id; if (audio->pcm_feedback) cmd.routing_mode = ROUTING_MODE_FTRT; else cmd.routing_mode = ROUTING_MODE_RT; audpp_send_queue1(&cmd, sizeof(cmd)); } static int audplay_dsp_send_data_avail(struct audio *audio, unsigned idx, unsigned len) { struct audplay_cmd_bitstream_data_avail_nt2 cmd; cmd.cmd_id = AUDPLAY_CMD_BITSTREAM_DATA_AVAIL_NT2; if (audio->mfield) cmd.decoder_id = AUDAC3_METAFIELD_MASK | (audio->out[idx].mfield_sz >> 1); else cmd.decoder_id = audio->dec_id; cmd.buf_ptr = audio->out[idx].addr; cmd.buf_size = len / 2; cmd.partition_number = 0; /* complete writes to the input buffer */ wmb(); return audplay_send_queue0(audio, &cmd, sizeof(cmd)); } static void audac3_buffer_refresh(struct audio *audio) { struct audplay_cmd_buffer_refresh refresh_cmd; refresh_cmd.cmd_id = AUDPLAY_CMD_BUFFER_REFRESH; refresh_cmd.num_buffers = 1; refresh_cmd.buf0_address = audio->in[audio->fill_next].addr; refresh_cmd.buf0_length = audio->in[audio->fill_next].size; refresh_cmd.buf_read_count = 0; MM_DBG("buf0_addr=%x buf0_len=%d\n", refresh_cmd.buf0_address, refresh_cmd.buf0_length); (void)audplay_send_queue0(audio, &refresh_cmd, sizeof(refresh_cmd)); } static void audac3_config_hostpcm(struct audio *audio) { struct audplay_cmd_hpcm_buf_cfg cfg_cmd; MM_DBG("\n"); /* Macro prints the file name and function */ cfg_cmd.cmd_id = AUDPLAY_CMD_HPCM_BUF_CFG; cfg_cmd.max_buffers = 1; cfg_cmd.byte_swap = 0; cfg_cmd.hostpcm_config = (0x8000) | (0x4000); cfg_cmd.feedback_frequency = 1; cfg_cmd.partition_number = 0; (void)audplay_send_queue0(audio, &cfg_cmd, sizeof(cfg_cmd)); } static void audac3_send_data(struct audio *audio, unsigned needed) { struct buffer *frame; unsigned long flags; spin_lock_irqsave(&audio->dsp_lock, flags); if (!audio->running) goto done; if (needed && !audio->wflush) { /* We were called from the callback because the DSP * requested more data. Note that the DSP does want * more data, and if a buffer was in-flight, mark it * as available (since the DSP must now be done with * it). */ audio->out_needed = 1; frame = audio->out + audio->out_tail; if (frame->used == 0xffffffff) { MM_DBG("frame %d free\n", audio->out_tail); frame->used = 0; audio->out_tail ^= 1; wake_up(&audio->write_wait); } } if (audio->out_needed) { /* If the DSP currently wants data and we have a * buffer available, we will send it and reset * the needed flag. We'll mark the buffer as in-flight * so that it won't be recycled until the next buffer * is requested */ frame = audio->out + audio->out_tail; if (frame->used) { BUG_ON(frame->used == 0xffffffff); MM_DBG("frame %d busy\n", audio->out_tail); audplay_dsp_send_data_avail(audio, audio->out_tail, frame->used); frame->used = 0xffffffff; audio->out_needed = 0; } } done: spin_unlock_irqrestore(&audio->dsp_lock, flags); } /* ------------------- device --------------------- */ static void audac3_flush(struct audio *audio) { unsigned long flags; spin_lock_irqsave(&audio->dsp_lock, flags); audio->out[0].used = 0; audio->out[1].used = 0; audio->out_head = 0; audio->out_tail = 0; audio->out_needed = 0; spin_unlock_irqrestore(&audio->dsp_lock, flags); atomic_set(&audio->out_bytes, 0); } static void audac3_flush_pcm_buf(struct audio *audio) { uint8_t index; unsigned long flags; spin_lock_irqsave(&audio->dsp_lock, flags); for (index = 0; index < PCM_BUF_MAX_COUNT; index++) audio->in[index].used = 0; audio->buf_refresh = 0; audio->read_next = 0; audio->fill_next = 0; spin_unlock_irqrestore(&audio->dsp_lock, flags); } /*check if func to be added to validate user data*/ static void audac3_ioport_reset(struct audio *audio) { /* Make sure read/write thread are free from * sleep and knowing that system is not able * to process io request at the moment */ wake_up(&audio->write_wait); mutex_lock(&audio->write_lock); audac3_flush(audio); mutex_unlock(&audio->write_lock); wake_up(&audio->read_wait); mutex_lock(&audio->read_lock); audac3_flush_pcm_buf(audio); mutex_unlock(&audio->read_lock); } static int audac3_events_pending(struct audio *audio) { unsigned long flags; int empty; spin_lock_irqsave(&audio->event_queue_lock, flags); empty = !list_empty(&audio->event_queue); spin_unlock_irqrestore(&audio->event_queue_lock, flags); return empty || audio->event_abort; } static void audac3_reset_event_queue(struct audio *audio) { unsigned long flags; struct audac3_event *drv_evt; struct list_head *ptr, *next; spin_lock_irqsave(&audio->event_queue_lock, flags); list_for_each_safe(ptr, next, &audio->event_queue) { drv_evt = list_first_entry(&audio->event_queue, struct audac3_event, list); list_del(&drv_evt->list); kfree(drv_evt); } list_for_each_safe(ptr, next, &audio->free_event_queue) { drv_evt = list_first_entry(&audio->free_event_queue, struct audac3_event, list); list_del(&drv_evt->list); kfree(drv_evt); } spin_unlock_irqrestore(&audio->event_queue_lock, flags); return; } static long audac3_process_event_req(struct audio *audio, void __user *arg) { long rc; struct msm_audio_event usr_evt; struct audac3_event *drv_evt = NULL; int timeout; unsigned long flags; if (copy_from_user(&usr_evt, arg, sizeof(struct msm_audio_event))) return -EFAULT; timeout = (int) usr_evt.timeout_ms; if (timeout > 0) { rc = wait_event_interruptible_timeout( audio->event_wait, audac3_events_pending(audio), msecs_to_jiffies(timeout)); if (rc == 0) return -ETIMEDOUT; } else { rc = wait_event_interruptible( audio->event_wait, audac3_events_pending(audio)); } if (rc < 0) return rc; if (audio->event_abort) { audio->event_abort = 0; return -ENODEV; } rc = 0; spin_lock_irqsave(&audio->event_queue_lock, flags); if (!list_empty(&audio->event_queue)) { drv_evt = list_first_entry(&audio->event_queue, struct audac3_event, list); list_del(&drv_evt->list); } if (drv_evt) { usr_evt.event_type = drv_evt->event_type; usr_evt.event_payload = drv_evt->payload; list_add_tail(&drv_evt->list, &audio->free_event_queue); } else rc = -1; spin_unlock_irqrestore(&audio->event_queue_lock, flags); if (!rc && copy_to_user(arg, &usr_evt, sizeof(usr_evt))) rc = -EFAULT; return rc; } static int audio_enable_eq(struct audio *audio, int enable) { if (audio->eq_enable == enable && !audio->eq_needs_commit) return 0; audio->eq_enable = enable; if (audio->running) { audpp_dsp_set_eq(audio->dec_id, enable, &audio->eq); audio->eq_needs_commit = 0; } return 0; } static long audac3_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct audio *audio = file->private_data; int rc = -EINVAL; unsigned long flags = 0; uint16_t enable_mask; int enable; int prev_state; unsigned long ionflag = 0; ion_phys_addr_t addr = 0; struct ion_handle *handle = NULL; int len = 0; MM_DBG("cmd = %d\n", cmd); switch (cmd) { case AUDIO_ENABLE_AUDPP: if (copy_from_user(&enable_mask, (void *) arg, sizeof(enable_mask))) { rc = -EFAULT; break; } spin_lock_irqsave(&audio->dsp_lock, flags); enable = (enable_mask & EQ_ENABLE) ? 1 : 0; audio_enable_eq(audio, enable); spin_unlock_irqrestore(&audio->dsp_lock, flags); rc = 0; break; case AUDIO_SET_VOLUME: spin_lock_irqsave(&audio->dsp_lock, flags); audio->vol_pan.volume = arg; if (audio->running) audpp_dsp_set_vol_pan(audio->dec_id, &audio->vol_pan); spin_unlock_irqrestore(&audio->dsp_lock, flags); rc = 0; break; case AUDIO_SET_PAN: spin_lock_irqsave(&audio->dsp_lock, flags); audio->vol_pan.pan = arg; if (audio->running) audpp_dsp_set_vol_pan(audio->dec_id, &audio->vol_pan); spin_unlock_irqrestore(&audio->dsp_lock, flags); rc = 0; break; case AUDIO_SET_EQ: prev_state = audio->eq_enable; audio->eq_enable = 0; if (copy_from_user(&audio->eq.num_bands, (void *) arg, sizeof(audio->eq) - (AUDPP_CMD_CFG_OBJECT_PARAMS_COMMON_LEN + 2))) { rc = -EFAULT; break; } audio->eq_enable = prev_state; audio->eq_needs_commit = 1; rc = 0; break; } if (-EINVAL != rc) return rc; if (cmd == AUDIO_GET_EVENT) { MM_DBG("AUDIO_GET_EVENT\n"); if (mutex_trylock(&audio->get_event_lock)) { rc = audac3_process_event_req(audio, (void __user *) arg); mutex_unlock(&audio->get_event_lock); } else rc = -EBUSY; return rc; } if (cmd == AUDIO_ABORT_GET_EVENT) { audio->event_abort = 1; wake_up(&audio->event_wait); return 0; } mutex_lock(&audio->lock); switch (cmd) { case AUDIO_START: MM_DBG("AUDIO_START\n"); rc = audac3_enable(audio); if (!rc) { rc = wait_event_interruptible_timeout(audio->wait, audio->dec_state != MSM_AUD_DECODER_STATE_NONE, msecs_to_jiffies(MSM_AUD_DECODER_WAIT_MS)); MM_INFO("dec_state %d rc = %d\n", audio->dec_state, rc); if (audio->dec_state != MSM_AUD_DECODER_STATE_SUCCESS) { MM_ERR("In audio->dec_state !=\n"); rc = -ENODEV; } else rc = 0; } break; case AUDIO_STOP: MM_DBG("AUDIO_STOP\n"); rc = audac3_disable(audio); audac3_ioport_reset(audio); audio->stopped = 0; break; case AUDIO_FLUSH: MM_DBG("AUDIO_FLUSH\n"); audio->rflush = 1; audio->wflush = 1; audac3_ioport_reset(audio); if (audio->running) { audpp_flush(audio->dec_id); rc = wait_event_interruptible(audio->write_wait, !audio->wflush); if (rc < 0) { MM_ERR("AUDIO_FLUSH interrupted\n"); rc = -EINTR; } } else { audio->rflush = 0; audio->wflush = 0; } break; case AUDIO_SET_CONFIG:{ struct msm_audio_config config; if (copy_from_user (&config, (void *)arg, sizeof(config))) { rc = -EFAULT; break; } audio->mfield = config.meta_field; rc = 0; MM_DBG("AUDIO_SET_CONFIG applicable only"\ " for meta field configuration\n"); break; } case AUDIO_GET_CONFIG:{ struct msm_audio_config config; config.buffer_size = BUFSZ; config.buffer_count = 2; config.sample_rate = (audio->ac3_config).fsCod; config.channel_count = 2; config.meta_field = 0; config.unused[0] = 0; config.unused[1] = 0; config.unused[2] = 0; if (copy_to_user((void *)arg, &config, sizeof(config))) rc = -EFAULT; else rc = 0; break; } case AUDIO_GET_AC3_CONFIG:{ if (copy_to_user((void *)arg, &audio->ac3_config, sizeof(audio->ac3_config))) rc = -EFAULT; else rc = 0; break; } case AUDIO_SET_AC3_CONFIG:{ struct msm_audio_ac3_config usr_config; if (copy_from_user (&usr_config, (void *)arg, sizeof(usr_config))) { rc = -EFAULT; break; } audio->ac3_config = usr_config; rc = 0; break; } case AUDIO_GET_PCM_CONFIG:{ struct msm_audio_pcm_config config; config.pcm_feedback = audio->pcm_feedback; config.buffer_count = PCM_BUF_MAX_COUNT; config.buffer_size = PCM_BUFSZ; if (copy_to_user((void *)arg, &config, sizeof(config))) rc = -EFAULT; else rc = 0; break; } case AUDIO_SET_PCM_CONFIG:{ struct msm_audio_pcm_config config; if (copy_from_user (&config, (void *)arg, sizeof(config))) { rc = -EFAULT; break; } if (config.pcm_feedback != audio->pcm_feedback) { MM_ERR("Not sufficient permission to"\ " change the playback mode\n"); rc = -EACCES; break; } if ((config.buffer_count > PCM_BUF_MAX_COUNT) || (config.buffer_count == 1)) config.buffer_count = PCM_BUF_MAX_COUNT; if (config.buffer_size < PCM_BUFSZ) config.buffer_size = PCM_BUFSZ; /* Check if pcm feedback is required */ if ((config.pcm_feedback) && (!audio->read_data)) { MM_DBG("allocate PCM buf %d\n", config.buffer_count * config.buffer_size); handle = ion_alloc(audio->client, (config.buffer_size * config.buffer_count), SZ_4K, ION_HEAP(ION_AUDIO_HEAP_ID)); if (IS_ERR_OR_NULL(handle)) { MM_ERR("Unable to alloc I/P buffs\n"); audio->input_buff_handle = NULL; rc = -ENOMEM; break; } audio->input_buff_handle = handle; rc = ion_phys(audio->client , handle, &addr, &len); if (rc) { MM_ERR("Invalid phy: %x sz: %x\n", (unsigned int) addr, (unsigned int) len); ion_free(audio->client, handle); audio->input_buff_handle = NULL; rc = -ENOMEM; break; } else { MM_INFO("Got valid phy: %x sz: %x\n", (unsigned int) audio->read_phys, (unsigned int) len); } audio->read_phys = (int32_t)addr; rc = ion_handle_get_flags(audio->client, handle, &ionflag); if (rc) { MM_ERR("could not get flags\n"); ion_free(audio->client, handle); audio->input_buff_handle = NULL; rc = -ENOMEM; break; } audio->map_v_read = ion_map_kernel( audio->client, handle, ionflag); if (IS_ERR(audio->map_v_read)) { MM_ERR("map of read buf failed\n"); ion_free(audio->client, handle); audio->input_buff_handle = NULL; rc = -ENOMEM; } else { uint8_t index; uint32_t offset = 0; audio->read_data = audio->map_v_read; audio->buf_refresh = 0; audio->pcm_buf_count = config.buffer_count; audio->read_next = 0; audio->fill_next = 0; for (index = 0; index < config.buffer_count; index++) { audio->in[index].data = audio->read_data + offset; audio->in[index].addr = audio->read_phys + offset; audio->in[index].size = config.buffer_size; audio->in[index].used = 0; offset += config.buffer_size; } MM_DBG("read buf: phy addr"\ " 0x%08x kernel addr 0x%08x\n", audio->read_phys, (int)audio->read_data); rc = 0; } } else { rc = 0; } break; } case AUDIO_PAUSE: MM_DBG("AUDIO_PAUSE %ld\n", arg); rc = audpp_pause(audio->dec_id, (int) arg); break; default: rc = -EINVAL; } mutex_unlock(&audio->lock); return rc; } /* Only useful in tunnel-mode */ static int audac3_fsync(struct file *file, loff_t a, loff_t b, int datasync) { struct audio *audio = file->private_data; int rc = 0; MM_DBG("\n"); /* Macro prints the file name and function */ if (!audio->running || audio->pcm_feedback) { rc = -EINVAL; goto done_nolock; } mutex_lock(&audio->write_lock); rc = wait_event_interruptible(audio->write_wait, (!audio->out[0].used && !audio->out[1].used && audio->out_needed) || audio->wflush); if (rc < 0) goto done; else if (audio->wflush) { rc = -EBUSY; goto done; } /* pcm dmamiss message is sent continously * when decoder is starved so no race * condition concern */ audio->teos = 0; rc = wait_event_interruptible(audio->write_wait, audio->teos || audio->wflush); if (audio->wflush) rc = -EBUSY; done: mutex_unlock(&audio->write_lock); done_nolock: return rc; } static ssize_t audac3_read(struct file *file, char __user *buf, size_t count, loff_t *pos) { struct audio *audio = file->private_data; const char __user *start = buf; int rc = 0; if (!audio->pcm_feedback) { MM_ERR("returning from read as tunnel mode\n"); return 0; /* PCM feedback is not enabled. Nothing to read */ } mutex_lock(&audio->read_lock); MM_DBG("\n"); /* Macro prints the file name and function */ while (count > 0) { rc = wait_event_interruptible(audio->read_wait, (audio->in[audio->read_next].used > 0) || (audio->stopped) || (audio->rflush)); MM_DBG("wait terminated count%d\n", count); if (rc < 0) break; if (audio->stopped || audio->rflush) { rc = -EBUSY; break; } if (count < audio->in[audio->read_next].used) { /* Read must happen in frame boundary. Since driver does * not know frame size, read count must be greater or * equal to size of PCM samples */ MM_DBG("read stop - partial frame\n"); break; } else { MM_DBG("read from in[%d]\n", audio->read_next); /* order reads from the output buffer */ rmb(); if (copy_to_user (buf, audio->in[audio->read_next].data, audio->in[audio->read_next].used)) { MM_ERR("invalid addr %x\n", (unsigned int)buf); rc = -EFAULT; break; } count -= audio->in[audio->read_next].used; buf += audio->in[audio->read_next].used; audio->in[audio->read_next].used = 0; if ((++audio->read_next) == audio->pcm_buf_count) audio->read_next = 0; break; /* Force to exit while loop * to prevent output thread * sleep too long if data is * not ready at this moment */ } } /* don't feed output buffer to HW decoder during flushing * buffer refresh command will be sent once flush completes * send buf refresh command here can confuse HW decoder */ if (audio->buf_refresh && !audio->rflush) { audio->buf_refresh = 0; MM_DBG("kick start pcm feedback again\n"); audac3_buffer_refresh(audio); } mutex_unlock(&audio->read_lock); if (buf > start) rc = buf - start; MM_DBG("read %d bytes\n", rc); return rc; } static int audac3_process_eos(struct audio *audio, const char __user *buf_start, unsigned short mfield_size) { int rc = 0; struct buffer *frame; frame = audio->out + audio->out_head; rc = wait_event_interruptible(audio->write_wait, (audio->out_needed && audio->out[0].used == 0 && audio->out[1].used == 0) || (audio->stopped) || (audio->wflush)); if (rc < 0) goto done; if (audio->stopped || audio->wflush) { rc = -EBUSY; goto done; } if (copy_from_user(frame->data, buf_start, mfield_size)) { rc = -EFAULT; goto done; } frame->mfield_sz = mfield_size; audio->out_head ^= 1; frame->used = mfield_size; audac3_send_data(audio, 0); done: return rc; } static ssize_t audac3_write(struct file *file, const char __user *buf, size_t count, loff_t *pos) { struct audio *audio = file->private_data; const char __user *start = buf; struct buffer *frame; size_t xfer; char *cpy_ptr; unsigned short mfield_size = 0; int rc = 0, eos_condition = AUDAC3_EOS_NONE; MM_DBG("cnt=%d\n", count); if (count & 1) return -EINVAL; mutex_lock(&audio->write_lock); while (count > 0) { frame = audio->out + audio->out_head; cpy_ptr = frame->data; rc = wait_event_interruptible(audio->write_wait, (frame->used == 0) || (audio->stopped) || (audio->wflush)); if (rc < 0) break; if (audio->stopped || audio->wflush) { rc = -EBUSY; break; } if (audio->mfield) { if (buf == start) { /* Processing beginning of user buffer */ if (__get_user(mfield_size, (unsigned short __user *) buf)) { rc = -EFAULT; break; } else if (mfield_size > count) { rc = -EINVAL; break; } MM_DBG("mf offset_val %x\n", mfield_size); if (copy_from_user(cpy_ptr, buf, mfield_size)) { rc = -EFAULT; break; } /* Check if EOS flag is set and buffer has * contains just meta field */ if (cpy_ptr[AUDAC3_EOS_FLG_OFFSET] & AUDAC3_EOS_FLG_MASK) { MM_DBG("eos set\n"); eos_condition = AUDAC3_EOS_SET; if (mfield_size == count) { buf += mfield_size; break; } else cpy_ptr[AUDAC3_EOS_FLG_OFFSET] &= ~AUDAC3_EOS_FLG_MASK; } /* Check EOS to see if */ cpy_ptr += mfield_size; count -= mfield_size; buf += mfield_size; } else { mfield_size = 0; MM_DBG("continuous buffer\n"); } frame->mfield_sz = mfield_size; } xfer = (count > (frame->size - mfield_size)) ? (frame->size - mfield_size) : count; if (copy_from_user(cpy_ptr, buf, xfer)) { rc = -EFAULT; break; } frame->used = xfer + mfield_size; audio->out_head ^= 1; count -= xfer; buf += xfer; audac3_send_data(audio, 0); } if (eos_condition == AUDAC3_EOS_SET) rc = audac3_process_eos(audio, start, mfield_size); mutex_unlock(&audio->write_lock); if (!rc) { if (buf > start) return buf - start; } return rc; } static int audac3_release(struct inode *inode, struct file *file) { struct audio *audio = file->private_data; MM_INFO("audio instance 0x%08x freeing\n", (int)audio); mutex_lock(&audio->lock); audac3_disable(audio); if (audio->rmt_resource_released == 0) rmt_put_resource(audio); audac3_flush(audio); audac3_flush_pcm_buf(audio); msm_adsp_put(audio->audplay); audpp_adec_free(audio->dec_id); #ifdef CONFIG_HAS_EARLYSUSPEND unregister_early_suspend(&audio->suspend_ctl.node); #endif audio->event_abort = 1; wake_up(&audio->event_wait); audac3_reset_event_queue(audio); ion_unmap_kernel(audio->client, audio->output_buff_handle); ion_free(audio->client, audio->output_buff_handle); if (audio->input_buff_handle != NULL) { ion_unmap_kernel(audio->client, audio->input_buff_handle); ion_free(audio->client, audio->input_buff_handle); } ion_client_destroy(audio->client); mutex_unlock(&audio->lock); #ifdef CONFIG_DEBUG_FS if (audio->dentry) debugfs_remove(audio->dentry); #endif kfree(audio); return 0; } #ifdef CONFIG_HAS_EARLYSUSPEND static void audac3_post_event(struct audio *audio, int type, union msm_audio_event_payload payload) { struct audac3_event *e_node = NULL; unsigned long flags; spin_lock_irqsave(&audio->event_queue_lock, flags); if (!list_empty(&audio->free_event_queue)) { e_node = list_first_entry(&audio->free_event_queue, struct audac3_event, list); list_del(&e_node->list); } else { e_node = kmalloc(sizeof(struct audac3_event), GFP_ATOMIC); if (!e_node) { MM_ERR("No mem to post event %d\n", type); spin_unlock_irqrestore(&audio->event_queue_lock, flags); return; } } e_node->event_type = type; e_node->payload = payload; list_add_tail(&e_node->list, &audio->event_queue); spin_unlock_irqrestore(&audio->event_queue_lock, flags); wake_up(&audio->event_wait); } static void audac3_suspend(struct early_suspend *h) { struct audac3_suspend_ctl *ctl = container_of(h, struct audac3_suspend_ctl, node); union msm_audio_event_payload payload; MM_DBG("\n"); /* Macro prints the file name and function */ audac3_post_event(ctl->audio, AUDIO_EVENT_SUSPEND, payload); } static void audac3_resume(struct early_suspend *h) { struct audac3_suspend_ctl *ctl = container_of(h, struct audac3_suspend_ctl, node); union msm_audio_event_payload payload; MM_DBG("\n"); /* Macro prints the file name and function */ audac3_post_event(ctl->audio, AUDIO_EVENT_RESUME, payload); } #endif #ifdef CONFIG_DEBUG_FS static ssize_t audac3_debug_open(struct inode *inode, struct file *file) { file->private_data = inode->i_private; return 0; } static ssize_t audac3_debug_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { const int debug_bufmax = 1024; static char buffer[1024]; int n = 0, i; struct audio *audio = file->private_data; mutex_lock(&audio->lock); n = scnprintf(buffer, debug_bufmax, "opened %d\n", audio->opened); n += scnprintf(buffer + n, debug_bufmax - n, "enabled %d\n", audio->enabled); n += scnprintf(buffer + n, debug_bufmax - n, "stopped %d\n", audio->stopped); n += scnprintf(buffer + n, debug_bufmax - n, "pcm_feedback %d\n", audio->pcm_feedback); n += scnprintf(buffer + n, debug_bufmax - n, "out_buf_sz %d\n", audio->out[0].size); n += scnprintf(buffer + n, debug_bufmax - n, "pcm_buf_count %d\n", audio->pcm_buf_count); n += scnprintf(buffer + n, debug_bufmax - n, "pcm_buf_sz %d\n", audio->in[0].size); n += scnprintf(buffer + n, debug_bufmax - n, "volume %x\n", audio->vol_pan.volume); mutex_unlock(&audio->lock); /* Following variables are only useful for debugging when * when playback halts unexpectedly. Thus, no mutual exclusion * enforced */ n += scnprintf(buffer + n, debug_bufmax - n, "wflush %d\n", audio->wflush); n += scnprintf(buffer + n, debug_bufmax - n, "rflush %d\n", audio->rflush); n += scnprintf(buffer + n, debug_bufmax - n, "running %d\n", audio->running); n += scnprintf(buffer + n, debug_bufmax - n, "dec state %d\n", audio->dec_state); n += scnprintf(buffer + n, debug_bufmax - n, "out_needed %d\n", audio->out_needed); n += scnprintf(buffer + n, debug_bufmax - n, "out_head %d\n", audio->out_head); n += scnprintf(buffer + n, debug_bufmax - n, "out_tail %d\n", audio->out_tail); n += scnprintf(buffer + n, debug_bufmax - n, "out[0].used %d\n", audio->out[0].used); n += scnprintf(buffer + n, debug_bufmax - n, "out[1].used %d\n", audio->out[1].used); n += scnprintf(buffer + n, debug_bufmax - n, "buffer_refresh %d\n", audio->buf_refresh); n += scnprintf(buffer + n, debug_bufmax - n, "read_next %d\n", audio->read_next); n += scnprintf(buffer + n, debug_bufmax - n, "fill_next %d\n", audio->fill_next); for (i = 0; i < audio->pcm_buf_count; i++) n += scnprintf(buffer + n, debug_bufmax - n, "in[%d].size %d\n", i, audio->in[i].used); buffer[n] = 0; return simple_read_from_buffer(buf, count, ppos, buffer, n); } static const struct file_operations audac3_debug_fops = { .read = audac3_debug_read, .open = audac3_debug_open, }; #endif static int audac3_open(struct inode *inode, struct file *file) { struct audio *audio = NULL; int rc, dec_attrb, decid, i; struct audac3_event *e_node = NULL; int len = 0; unsigned long ionflag = 0; ion_phys_addr_t addr = 0; struct ion_handle *handle = NULL; struct ion_client *client = NULL; #ifdef CONFIG_DEBUG_FS /* 4 bytes represents decoder number, 1 byte for terminate string */ char name[sizeof "msm_ac3_" + 5]; #endif /* Allocate audio instance, set to zero */ audio = kzalloc(sizeof(struct audio), GFP_KERNEL); if (!audio) { MM_ERR("no memory to allocate audio instance\n"); rc = -ENOMEM; goto done; } MM_INFO("audio instance 0x%08x created\n", (int)audio); /* Allocate the decoder */ dec_attrb = AUDDEC_DEC_AC3; if ((file->f_mode & FMODE_WRITE) && (file->f_mode & FMODE_READ)) { dec_attrb |= MSM_AUD_MODE_NONTUNNEL; audio->pcm_feedback = NON_TUNNEL_MODE_PLAYBACK; } else if ((file->f_mode & FMODE_WRITE) && !(file->f_mode & FMODE_READ)) { dec_attrb |= MSM_AUD_MODE_TUNNEL; audio->pcm_feedback = TUNNEL_MODE_PLAYBACK; } else { kfree(audio); rc = -EACCES; goto done; } decid = audpp_adec_alloc(dec_attrb, &audio->module_name, &audio->queue_id); if (decid < 0) { MM_ERR("No free decoder available, freeing instance 0x%08x\n", (int)audio); rc = -ENODEV; kfree(audio); goto done; } audio->dec_id = decid & MSM_AUD_DECODER_MASK; client = msm_ion_client_create(UINT_MAX, "Audio_AC3_client"); if (IS_ERR_OR_NULL(client)) { MM_ERR("Unable to create ION client\n"); rc = -ENOMEM; goto client_create_error; } audio->client = client; handle = ion_alloc(client, DMASZ, SZ_4K, ION_HEAP(ION_AUDIO_HEAP_ID)); if (IS_ERR_OR_NULL(handle)) { MM_ERR("Unable to create allocate O/P buffers\n"); rc = -ENOMEM; goto output_buff_alloc_error; } audio->output_buff_handle = handle; rc = ion_phys(client, handle, &addr, &len); if (rc) { MM_ERR("O/P buffers:Invalid phy: %x sz: %x\n", (unsigned int) addr, (unsigned int) len); goto output_buff_get_phys_error; } else { MM_INFO("O/P buffers:valid phy: %x sz: %x\n", (unsigned int) addr, (unsigned int) len); } audio->phys = (int32_t)addr; rc = ion_handle_get_flags(client, handle, &ionflag); if (rc) { MM_ERR("could not get flags for the handle\n"); goto output_buff_get_flags_error; } audio->map_v_write = ion_map_kernel(client, handle, ionflag); if (IS_ERR(audio->map_v_write)) { MM_ERR("could not map write buffers,freeing instance 0x%08x\n", (int)audio); rc = -ENOMEM; goto output_buff_map_error; } audio->data = audio->map_v_write; MM_DBG("write buf: phy addr 0x%08x kernel addr 0x%08x\n", audio->phys, (int)audio->data); rc = msm_adsp_get(audio->module_name, &audio->audplay, &audplay_adsp_ops_ac3, audio); if (rc) { MM_ERR("failed to get %s module, freeing instance 0x%08x\n", audio->module_name, (int)audio); if (audio->pcm_feedback == TUNNEL_MODE_PLAYBACK) audmgr_close(&audio->audmgr); goto err; } rc = rmt_get_resource(audio); if (rc) { MM_ERR("ADSP resources are not available for AC3 session"\ " 0x%08x on decoder: %d\n", (int)audio, audio->dec_id); if (audio->pcm_feedback == TUNNEL_MODE_PLAYBACK) audmgr_close(&audio->audmgr); msm_adsp_put(audio->audplay); goto err; } /* Initialize all locks of audio instance */ audio->input_buff_handle = NULL; mutex_init(&audio->lock); mutex_init(&audio->write_lock); mutex_init(&audio->read_lock); mutex_init(&audio->get_event_lock); spin_lock_init(&audio->dsp_lock); init_waitqueue_head(&audio->write_wait); init_waitqueue_head(&audio->read_wait); INIT_LIST_HEAD(&audio->free_event_queue); INIT_LIST_HEAD(&audio->event_queue); init_waitqueue_head(&audio->wait); init_waitqueue_head(&audio->event_wait); spin_lock_init(&audio->event_queue_lock); audio->out[0].data = audio->data + 0; audio->out[0].addr = audio->phys + 0; audio->out[0].size = BUFSZ; audio->out[1].data = audio->data + BUFSZ; audio->out[1].addr = audio->phys + BUFSZ; audio->out[1].size = BUFSZ; audio->vol_pan.volume = 0x3FFF; (audio->ac3_config).wordSize = AUDAC3_DEF_WORDSIZE; (audio->ac3_config).user_downmix_flag = AUDAC3_DEF_USER_DOWNMIX_FLAG; (audio->ac3_config).user_karaoke_flag = AUDAC3_DEF_USER_KARAOKE_FLAG; (audio->ac3_config).error_concealment = AUDAC3_DEF_ERROR_CONCEALMENT; (audio->ac3_config).max_rep_count = AUDAC3_DEF_MAX_REPEAT_COUNT; audac3_flush(audio); file->private_data = audio; audio->opened = 1; #ifdef CONFIG_DEBUG_FS snprintf(name, sizeof name, "msm_ac3_%04x", audio->dec_id); audio->dentry = debugfs_create_file(name, S_IFREG | S_IRUGO, NULL, (void *) audio, &audac3_debug_fops); if (IS_ERR(audio->dentry)) MM_DBG("debugfs_create_file failed\n"); #endif #ifdef CONFIG_HAS_EARLYSUSPEND audio->suspend_ctl.node.level = EARLY_SUSPEND_LEVEL_DISABLE_FB; audio->suspend_ctl.node.resume = audac3_resume; audio->suspend_ctl.node.suspend = audac3_suspend; audio->suspend_ctl.audio = audio; register_early_suspend(&audio->suspend_ctl.node); #endif for (i = 0; i < AUDAC3_EVENT_NUM; i++) { e_node = kmalloc(sizeof(struct audac3_event), GFP_KERNEL); if (e_node) list_add_tail(&e_node->list, &audio->free_event_queue); else { MM_ERR("event pkt alloc failed\n"); break; } } done: return rc; err: ion_unmap_kernel(client, audio->output_buff_handle); output_buff_map_error: output_buff_get_flags_error: output_buff_get_phys_error: ion_free(client, audio->output_buff_handle); output_buff_alloc_error: ion_client_destroy(client); client_create_error: audpp_adec_free(audio->dec_id); kfree(audio); return rc; } static const struct file_operations audio_ac3_fops = { .owner = THIS_MODULE, .open = audac3_open, .release = audac3_release, .read = audac3_read, .write = audac3_write, .unlocked_ioctl = audac3_ioctl, .fsync = audac3_fsync, }; struct miscdevice audio_ac3_misc = { .minor = MISC_DYNAMIC_MINOR, .name = "msm_ac3", .fops = &audio_ac3_fops, }; static int __init audac3_init(void) { return misc_register(&audio_ac3_misc); } static void __exit audac3_exit(void) { misc_deregister(&audio_ac3_misc); } module_init(audac3_init); module_exit(audac3_exit); MODULE_DESCRIPTION("MSM AC3 driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
netico-solutions/linux-urtu-bb
kernel/ksysfs.c
319
5770
/* * kernel/ksysfs.c - sysfs attributes in /sys/kernel, which * are not related to any other subsystem * * Copyright (C) 2004 Kay Sievers <kay.sievers@vrfy.org> * * This file is release under the GPLv2 * */ #include <linux/kobject.h> #include <linux/string.h> #include <linux/sysfs.h> #include <linux/export.h> #include <linux/init.h> #include <linux/kexec.h> #include <linux/profile.h> #include <linux/stat.h> #include <linux/sched.h> #include <linux/capability.h> #include <linux/compiler.h> #include <linux/rcupdate.h> /* rcu_expedited */ #define KERNEL_ATTR_RO(_name) \ static struct kobj_attribute _name##_attr = __ATTR_RO(_name) #define KERNEL_ATTR_RW(_name) \ static struct kobj_attribute _name##_attr = \ __ATTR(_name, 0644, _name##_show, _name##_store) /* current uevent sequence number */ static ssize_t uevent_seqnum_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sprintf(buf, "%llu\n", (unsigned long long)uevent_seqnum); } KERNEL_ATTR_RO(uevent_seqnum); #ifdef CONFIG_UEVENT_HELPER /* uevent helper program, used during early boot */ static ssize_t uevent_helper_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sprintf(buf, "%s\n", uevent_helper); } static ssize_t uevent_helper_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { if (count+1 > UEVENT_HELPER_PATH_LEN) return -ENOENT; memcpy(uevent_helper, buf, count); uevent_helper[count] = '\0'; if (count && uevent_helper[count-1] == '\n') uevent_helper[count-1] = '\0'; return count; } KERNEL_ATTR_RW(uevent_helper); #endif #ifdef CONFIG_PROFILING static ssize_t profiling_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sprintf(buf, "%d\n", prof_on); } static ssize_t profiling_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { int ret; if (prof_on) return -EEXIST; /* * This eventually calls into get_option() which * has a ton of callers and is not const. It is * easiest to cast it away here. */ profile_setup((char *)buf); ret = profile_init(); if (ret) return ret; ret = create_proc_profile(); if (ret) return ret; return count; } KERNEL_ATTR_RW(profiling); #endif #ifdef CONFIG_KEXEC_CORE static ssize_t kexec_loaded_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sprintf(buf, "%d\n", !!kexec_image); } KERNEL_ATTR_RO(kexec_loaded); static ssize_t kexec_crash_loaded_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sprintf(buf, "%d\n", !!kexec_crash_image); } KERNEL_ATTR_RO(kexec_crash_loaded); static ssize_t kexec_crash_size_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sprintf(buf, "%zu\n", crash_get_memory_size()); } static ssize_t kexec_crash_size_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { unsigned long cnt; int ret; if (kstrtoul(buf, 0, &cnt)) return -EINVAL; ret = crash_shrink_memory(cnt); return ret < 0 ? ret : count; } KERNEL_ATTR_RW(kexec_crash_size); static ssize_t vmcoreinfo_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sprintf(buf, "%lx %x\n", paddr_vmcoreinfo_note(), (unsigned int)sizeof(vmcoreinfo_note)); } KERNEL_ATTR_RO(vmcoreinfo); #endif /* CONFIG_KEXEC_CORE */ /* whether file capabilities are enabled */ static ssize_t fscaps_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sprintf(buf, "%d\n", file_caps_enabled); } KERNEL_ATTR_RO(fscaps); int rcu_expedited; static ssize_t rcu_expedited_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sprintf(buf, "%d\n", rcu_expedited); } static ssize_t rcu_expedited_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { if (kstrtoint(buf, 0, &rcu_expedited)) return -EINVAL; return count; } KERNEL_ATTR_RW(rcu_expedited); /* * Make /sys/kernel/notes give the raw contents of our kernel .notes section. */ extern const void __start_notes __weak; extern const void __stop_notes __weak; #define notes_size (&__stop_notes - &__start_notes) static ssize_t notes_read(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { memcpy(buf, &__start_notes + off, count); return count; } static struct bin_attribute notes_attr = { .attr = { .name = "notes", .mode = S_IRUGO, }, .read = &notes_read, }; struct kobject *kernel_kobj; EXPORT_SYMBOL_GPL(kernel_kobj); static struct attribute * kernel_attrs[] = { &fscaps_attr.attr, &uevent_seqnum_attr.attr, #ifdef CONFIG_UEVENT_HELPER &uevent_helper_attr.attr, #endif #ifdef CONFIG_PROFILING &profiling_attr.attr, #endif #ifdef CONFIG_KEXEC_CORE &kexec_loaded_attr.attr, &kexec_crash_loaded_attr.attr, &kexec_crash_size_attr.attr, &vmcoreinfo_attr.attr, #endif &rcu_expedited_attr.attr, NULL }; static struct attribute_group kernel_attr_group = { .attrs = kernel_attrs, }; static int __init ksysfs_init(void) { int error; kernel_kobj = kobject_create_and_add("kernel", NULL); if (!kernel_kobj) { error = -ENOMEM; goto exit; } error = sysfs_create_group(kernel_kobj, &kernel_attr_group); if (error) goto kset_exit; if (notes_size > 0) { notes_attr.size = notes_size; error = sysfs_create_bin_file(kernel_kobj, &notes_attr); if (error) goto group_exit; } return 0; group_exit: sysfs_remove_group(kernel_kobj, &kernel_attr_group); kset_exit: kobject_put(kernel_kobj); exit: return error; } core_initcall(ksysfs_init);
gpl-2.0
piyushnet/linux-rt-rpi
arch/arm/mach-omap1/ams-delta-fiq.c
1599
4380
/* * Amstrad E3 FIQ handling * * Copyright (C) 2009 Janusz Krzysztofik * Copyright (c) 2006 Matt Callow * Copyright (c) 2004 Amstrad Plc * Copyright (C) 2001 RidgeRun, Inc. * * Parts of this code are taken from linux/arch/arm/mach-omap/irq.c * in the MontaVista 2.4 kernel (and the Amstrad changes therein) * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. */ #include <linux/gpio.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/module.h> #include <linux/io.h> #include <mach/board-ams-delta.h> #include <asm/fiq.h> #include <mach/ams-delta-fiq.h> static struct fiq_handler fh = { .name = "ams-delta-fiq" }; /* * This buffer is shared between FIQ and IRQ contexts. * The FIQ and IRQ isrs can both read and write it. * It is structured as a header section several 32bit slots, * followed by the circular buffer where the FIQ isr stores * keystrokes received from the qwerty keyboard. * See ams-delta-fiq.h for details of offsets. */ unsigned int fiq_buffer[1024]; EXPORT_SYMBOL(fiq_buffer); static unsigned int irq_counter[16]; static irqreturn_t deferred_fiq(int irq, void *dev_id) { int gpio, irq_num, fiq_count; struct irq_chip *irq_chip; irq_chip = irq_get_chip(gpio_to_irq(AMS_DELTA_GPIO_PIN_KEYBRD_CLK)); /* * For each handled GPIO interrupt, keep calling its interrupt handler * until the IRQ counter catches the FIQ incremented interrupt counter. */ for (gpio = AMS_DELTA_GPIO_PIN_KEYBRD_CLK; gpio <= AMS_DELTA_GPIO_PIN_HOOK_SWITCH; gpio++) { irq_num = gpio_to_irq(gpio); fiq_count = fiq_buffer[FIQ_CNT_INT_00 + gpio]; while (irq_counter[gpio] < fiq_count) { if (gpio != AMS_DELTA_GPIO_PIN_KEYBRD_CLK) { struct irq_data *d = irq_get_irq_data(irq_num); /* * It looks like handle_edge_irq() that * OMAP GPIO edge interrupts default to, * expects interrupt already unmasked. */ if (irq_chip && irq_chip->irq_unmask) irq_chip->irq_unmask(d); } generic_handle_irq(irq_num); irq_counter[gpio]++; } } return IRQ_HANDLED; } void __init ams_delta_init_fiq(void) { void *fiqhandler_start; unsigned int fiqhandler_length; struct pt_regs FIQ_regs; unsigned long val, offset; int i, retval; fiqhandler_start = &qwerty_fiqin_start; fiqhandler_length = &qwerty_fiqin_end - &qwerty_fiqin_start; pr_info("Installing fiq handler from %p, length 0x%x\n", fiqhandler_start, fiqhandler_length); retval = claim_fiq(&fh); if (retval) { pr_err("ams_delta_init_fiq(): couldn't claim FIQ, ret=%d\n", retval); return; } retval = request_irq(INT_DEFERRED_FIQ, deferred_fiq, IRQ_TYPE_EDGE_RISING, "deferred_fiq", NULL); if (retval < 0) { pr_err("Failed to get deferred_fiq IRQ, ret=%d\n", retval); release_fiq(&fh); return; } /* * Since no set_type() method is provided by OMAP irq chip, * switch to edge triggered interrupt type manually. */ offset = IRQ_ILR0_REG_OFFSET + INT_DEFERRED_FIQ * 0x4; val = omap_readl(DEFERRED_FIQ_IH_BASE + offset) & ~(1 << 1); omap_writel(val, DEFERRED_FIQ_IH_BASE + offset); set_fiq_handler(fiqhandler_start, fiqhandler_length); /* * Initialise the buffer which is shared * between FIQ mode and IRQ mode */ fiq_buffer[FIQ_GPIO_INT_MASK] = 0; fiq_buffer[FIQ_MASK] = 0; fiq_buffer[FIQ_STATE] = 0; fiq_buffer[FIQ_KEY] = 0; fiq_buffer[FIQ_KEYS_CNT] = 0; fiq_buffer[FIQ_KEYS_HICNT] = 0; fiq_buffer[FIQ_TAIL_OFFSET] = 0; fiq_buffer[FIQ_HEAD_OFFSET] = 0; fiq_buffer[FIQ_BUF_LEN] = 256; fiq_buffer[FIQ_MISSED_KEYS] = 0; fiq_buffer[FIQ_BUFFER_START] = (unsigned int) &fiq_buffer[FIQ_CIRC_BUFF]; for (i = FIQ_CNT_INT_00; i <= FIQ_CNT_INT_15; i++) fiq_buffer[i] = 0; /* * FIQ mode r9 always points to the fiq_buffer, becauses the FIQ isr * will run in an unpredictable context. The fiq_buffer is the FIQ isr's * only means of communication with the IRQ level and other kernel * context code. */ FIQ_regs.ARM_r9 = (unsigned int)fiq_buffer; set_fiq_regs(&FIQ_regs); pr_info("request_fiq(): fiq_buffer = %p\n", fiq_buffer); /* * Redirect GPIO interrupts to FIQ */ offset = IRQ_ILR0_REG_OFFSET + INT_GPIO_BANK1 * 0x4; val = omap_readl(OMAP_IH1_BASE + offset) | 1; omap_writel(val, OMAP_IH1_BASE + offset); }
gpl-2.0
xjljian/android_kernel_huawei_msm8916
drivers/staging/zcache/ramster/r2net.c
2367
10428
/* * r2net.c * * Copyright (c) 2011-2012, Dan Magenheimer, Oracle Corp. * * Ramster_r2net provides an interface between zcache and r2net. * * FIXME: support more than two nodes */ #include <linux/list.h> #include "tcp.h" #include "nodemanager.h" #include "../tmem.h" #include "../zcache.h" #include "ramster.h" #define RAMSTER_TESTING #define RMSTR_KEY 0x77347734 enum { RMSTR_TMEM_PUT_EPH = 100, RMSTR_TMEM_PUT_PERS, RMSTR_TMEM_ASYNC_GET_REQUEST, RMSTR_TMEM_ASYNC_GET_AND_FREE_REQUEST, RMSTR_TMEM_ASYNC_GET_REPLY, RMSTR_TMEM_FLUSH, RMSTR_TMEM_FLOBJ, RMSTR_TMEM_DESTROY_POOL, }; #define RMSTR_R2NET_MAX_LEN \ (R2NET_MAX_PAYLOAD_BYTES - sizeof(struct tmem_xhandle)) #include "tcp_internal.h" static struct r2nm_node *r2net_target_node; static int r2net_target_nodenum; int r2net_remote_target_node_set(int node_num) { int ret = -1; r2net_target_node = r2nm_get_node_by_num(node_num); if (r2net_target_node != NULL) { r2net_target_nodenum = node_num; r2nm_node_put(r2net_target_node); ret = 0; } return ret; } /* FIXME following buffer should be per-cpu, protected by preempt_disable */ static char ramster_async_get_buf[R2NET_MAX_PAYLOAD_BYTES]; static int ramster_remote_async_get_request_handler(struct r2net_msg *msg, u32 len, void *data, void **ret_data) { char *pdata; struct tmem_xhandle xh; int found; size_t size = RMSTR_R2NET_MAX_LEN; u16 msgtype = be16_to_cpu(msg->msg_type); bool get_and_free = (msgtype == RMSTR_TMEM_ASYNC_GET_AND_FREE_REQUEST); unsigned long flags; xh = *(struct tmem_xhandle *)msg->buf; if (xh.xh_data_size > RMSTR_R2NET_MAX_LEN) BUG(); pdata = ramster_async_get_buf; *(struct tmem_xhandle *)pdata = xh; pdata += sizeof(struct tmem_xhandle); local_irq_save(flags); found = zcache_get_page(xh.client_id, xh.pool_id, &xh.oid, xh.index, pdata, &size, true, get_and_free ? 1 : -1); local_irq_restore(flags); if (found < 0) { /* a zero size indicates the get failed */ size = 0; } if (size > RMSTR_R2NET_MAX_LEN) BUG(); *ret_data = pdata - sizeof(struct tmem_xhandle); /* now make caller (r2net_process_message) handle specially */ r2net_force_data_magic(msg, RMSTR_TMEM_ASYNC_GET_REPLY, RMSTR_KEY); return size + sizeof(struct tmem_xhandle); } static int ramster_remote_async_get_reply_handler(struct r2net_msg *msg, u32 len, void *data, void **ret_data) { char *in = (char *)msg->buf; int datalen = len - sizeof(struct r2net_msg); int ret = -1; struct tmem_xhandle *xh = (struct tmem_xhandle *)in; in += sizeof(struct tmem_xhandle); datalen -= sizeof(struct tmem_xhandle); BUG_ON(datalen < 0 || datalen > PAGE_SIZE); ret = ramster_localify(xh->pool_id, &xh->oid, xh->index, in, datalen, xh->extra); #ifdef RAMSTER_TESTING if (ret == -EEXIST) pr_err("TESTING ArrgREP, aborted overwrite on racy put\n"); #endif return ret; } int ramster_remote_put_handler(struct r2net_msg *msg, u32 len, void *data, void **ret_data) { struct tmem_xhandle *xh; char *p = (char *)msg->buf; int datalen = len - sizeof(struct r2net_msg) - sizeof(struct tmem_xhandle); u16 msgtype = be16_to_cpu(msg->msg_type); bool ephemeral = (msgtype == RMSTR_TMEM_PUT_EPH); unsigned long flags; int ret; xh = (struct tmem_xhandle *)p; p += sizeof(struct tmem_xhandle); zcache_autocreate_pool(xh->client_id, xh->pool_id, ephemeral); local_irq_save(flags); ret = zcache_put_page(xh->client_id, xh->pool_id, &xh->oid, xh->index, p, datalen, true, ephemeral); local_irq_restore(flags); return ret; } int ramster_remote_flush_handler(struct r2net_msg *msg, u32 len, void *data, void **ret_data) { struct tmem_xhandle *xh; char *p = (char *)msg->buf; xh = (struct tmem_xhandle *)p; p += sizeof(struct tmem_xhandle); (void)zcache_flush_page(xh->client_id, xh->pool_id, &xh->oid, xh->index); return 0; } int ramster_remote_flobj_handler(struct r2net_msg *msg, u32 len, void *data, void **ret_data) { struct tmem_xhandle *xh; char *p = (char *)msg->buf; xh = (struct tmem_xhandle *)p; p += sizeof(struct tmem_xhandle); (void)zcache_flush_object(xh->client_id, xh->pool_id, &xh->oid); return 0; } int r2net_remote_async_get(struct tmem_xhandle *xh, bool free, int remotenode, size_t expect_size, uint8_t expect_cksum, void *extra) { int nodenum, ret = -1, status; struct r2nm_node *node = NULL; struct kvec vec[1]; size_t veclen = 1; u32 msg_type; struct r2net_node *nn; node = r2nm_get_node_by_num(remotenode); if (node == NULL) goto out; xh->client_id = r2nm_this_node(); /* which node is getting */ xh->xh_data_cksum = expect_cksum; xh->xh_data_size = expect_size; xh->extra = extra; vec[0].iov_len = sizeof(*xh); vec[0].iov_base = xh; node = r2net_target_node; if (!node) goto out; nodenum = r2net_target_nodenum; r2nm_node_get(node); nn = r2net_nn_from_num(nodenum); if (nn->nn_persistent_error || !nn->nn_sc_valid) { ret = -ENOTCONN; r2nm_node_put(node); goto out; } if (free) msg_type = RMSTR_TMEM_ASYNC_GET_AND_FREE_REQUEST; else msg_type = RMSTR_TMEM_ASYNC_GET_REQUEST; ret = r2net_send_message_vec(msg_type, RMSTR_KEY, vec, veclen, remotenode, &status); r2nm_node_put(node); if (ret < 0) { if (ret == -ENOTCONN || ret == -EHOSTDOWN) goto out; if (ret == -EAGAIN) goto out; /* FIXME handle bad message possibilities here? */ pr_err("UNTESTED ret<0 in ramster_remote_async_get: ret=%d\n", ret); } ret = status; out: return ret; } #ifdef RAMSTER_TESTING /* leave me here to see if it catches a weird crash */ static void ramster_check_irq_counts(void) { static int last_hardirq_cnt, last_softirq_cnt, last_preempt_cnt; int cur_hardirq_cnt, cur_softirq_cnt, cur_preempt_cnt; cur_hardirq_cnt = hardirq_count() >> HARDIRQ_SHIFT; if (cur_hardirq_cnt > last_hardirq_cnt) { last_hardirq_cnt = cur_hardirq_cnt; if (!(last_hardirq_cnt&(last_hardirq_cnt-1))) pr_err("RAMSTER TESTING RRP hardirq_count=%d\n", last_hardirq_cnt); } cur_softirq_cnt = softirq_count() >> SOFTIRQ_SHIFT; if (cur_softirq_cnt > last_softirq_cnt) { last_softirq_cnt = cur_softirq_cnt; if (!(last_softirq_cnt&(last_softirq_cnt-1))) pr_err("RAMSTER TESTING RRP softirq_count=%d\n", last_softirq_cnt); } cur_preempt_cnt = preempt_count() & PREEMPT_MASK; if (cur_preempt_cnt > last_preempt_cnt) { last_preempt_cnt = cur_preempt_cnt; if (!(last_preempt_cnt&(last_preempt_cnt-1))) pr_err("RAMSTER TESTING RRP preempt_count=%d\n", last_preempt_cnt); } } #endif int r2net_remote_put(struct tmem_xhandle *xh, char *data, size_t size, bool ephemeral, int *remotenode) { int nodenum, ret = -1, status; struct r2nm_node *node = NULL; struct kvec vec[2]; size_t veclen = 2; u32 msg_type; struct r2net_node *nn; BUG_ON(size > RMSTR_R2NET_MAX_LEN); xh->client_id = r2nm_this_node(); /* which node is putting */ vec[0].iov_len = sizeof(*xh); vec[0].iov_base = xh; vec[1].iov_len = size; vec[1].iov_base = data; node = r2net_target_node; if (!node) goto out; nodenum = r2net_target_nodenum; r2nm_node_get(node); nn = r2net_nn_from_num(nodenum); if (nn->nn_persistent_error || !nn->nn_sc_valid) { ret = -ENOTCONN; r2nm_node_put(node); goto out; } if (ephemeral) msg_type = RMSTR_TMEM_PUT_EPH; else msg_type = RMSTR_TMEM_PUT_PERS; #ifdef RAMSTER_TESTING /* leave me here to see if it catches a weird crash */ ramster_check_irq_counts(); #endif ret = r2net_send_message_vec(msg_type, RMSTR_KEY, vec, veclen, nodenum, &status); if (ret < 0) ret = -1; else { ret = status; *remotenode = nodenum; } r2nm_node_put(node); out: return ret; } int r2net_remote_flush(struct tmem_xhandle *xh, int remotenode) { int ret = -1, status; struct r2nm_node *node = NULL; struct kvec vec[1]; size_t veclen = 1; node = r2nm_get_node_by_num(remotenode); BUG_ON(node == NULL); xh->client_id = r2nm_this_node(); /* which node is flushing */ vec[0].iov_len = sizeof(*xh); vec[0].iov_base = xh; BUG_ON(irqs_disabled()); BUG_ON(in_softirq()); ret = r2net_send_message_vec(RMSTR_TMEM_FLUSH, RMSTR_KEY, vec, veclen, remotenode, &status); r2nm_node_put(node); return ret; } int r2net_remote_flush_object(struct tmem_xhandle *xh, int remotenode) { int ret = -1, status; struct r2nm_node *node = NULL; struct kvec vec[1]; size_t veclen = 1; node = r2nm_get_node_by_num(remotenode); BUG_ON(node == NULL); xh->client_id = r2nm_this_node(); /* which node is flobjing */ vec[0].iov_len = sizeof(*xh); vec[0].iov_base = xh; ret = r2net_send_message_vec(RMSTR_TMEM_FLOBJ, RMSTR_KEY, vec, veclen, remotenode, &status); r2nm_node_put(node); return ret; } /* * Handler registration */ static LIST_HEAD(r2net_unreg_list); static void r2net_unregister_handlers(void) { r2net_unregister_handler_list(&r2net_unreg_list); } int r2net_register_handlers(void) { int status; status = r2net_register_handler(RMSTR_TMEM_PUT_EPH, RMSTR_KEY, RMSTR_R2NET_MAX_LEN, ramster_remote_put_handler, NULL, NULL, &r2net_unreg_list); if (status) goto bail; status = r2net_register_handler(RMSTR_TMEM_PUT_PERS, RMSTR_KEY, RMSTR_R2NET_MAX_LEN, ramster_remote_put_handler, NULL, NULL, &r2net_unreg_list); if (status) goto bail; status = r2net_register_handler(RMSTR_TMEM_ASYNC_GET_REQUEST, RMSTR_KEY, RMSTR_R2NET_MAX_LEN, ramster_remote_async_get_request_handler, NULL, NULL, &r2net_unreg_list); if (status) goto bail; status = r2net_register_handler(RMSTR_TMEM_ASYNC_GET_AND_FREE_REQUEST, RMSTR_KEY, RMSTR_R2NET_MAX_LEN, ramster_remote_async_get_request_handler, NULL, NULL, &r2net_unreg_list); if (status) goto bail; status = r2net_register_handler(RMSTR_TMEM_ASYNC_GET_REPLY, RMSTR_KEY, RMSTR_R2NET_MAX_LEN, ramster_remote_async_get_reply_handler, NULL, NULL, &r2net_unreg_list); if (status) goto bail; status = r2net_register_handler(RMSTR_TMEM_FLUSH, RMSTR_KEY, RMSTR_R2NET_MAX_LEN, ramster_remote_flush_handler, NULL, NULL, &r2net_unreg_list); if (status) goto bail; status = r2net_register_handler(RMSTR_TMEM_FLOBJ, RMSTR_KEY, RMSTR_R2NET_MAX_LEN, ramster_remote_flobj_handler, NULL, NULL, &r2net_unreg_list); if (status) goto bail; pr_info("ramster: r2net handlers registered\n"); bail: if (status) { r2net_unregister_handlers(); pr_err("ramster: couldn't register r2net handlers\n"); } return status; }
gpl-2.0
matnyman/xhci
drivers/acpi/acpica/psscope.c
2623
8353
/****************************************************************************** * * Module Name: psscope - Parser scope stack management routines * *****************************************************************************/ /* * Copyright (C) 2000 - 2013, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. */ #include <acpi/acpi.h> #include "accommon.h" #include "acparser.h" #define _COMPONENT ACPI_PARSER ACPI_MODULE_NAME("psscope") /******************************************************************************* * * FUNCTION: acpi_ps_get_parent_scope * * PARAMETERS: parser_state - Current parser state object * * RETURN: Pointer to an Op object * * DESCRIPTION: Get parent of current op being parsed * ******************************************************************************/ union acpi_parse_object *acpi_ps_get_parent_scope(struct acpi_parse_state *parser_state) { return (parser_state->scope->parse_scope.op); } /******************************************************************************* * * FUNCTION: acpi_ps_has_completed_scope * * PARAMETERS: parser_state - Current parser state object * * RETURN: Boolean, TRUE = scope completed. * * DESCRIPTION: Is parsing of current argument complete? Determined by * 1) AML pointer is at or beyond the end of the scope * 2) The scope argument count has reached zero. * ******************************************************************************/ u8 acpi_ps_has_completed_scope(struct acpi_parse_state * parser_state) { return ((u8) ((parser_state->aml >= parser_state->scope->parse_scope.arg_end || !parser_state->scope->parse_scope.arg_count))); } /******************************************************************************* * * FUNCTION: acpi_ps_init_scope * * PARAMETERS: parser_state - Current parser state object * root - the Root Node of this new scope * * RETURN: Status * * DESCRIPTION: Allocate and init a new scope object * ******************************************************************************/ acpi_status acpi_ps_init_scope(struct acpi_parse_state * parser_state, union acpi_parse_object * root_op) { union acpi_generic_state *scope; ACPI_FUNCTION_TRACE_PTR(ps_init_scope, root_op); scope = acpi_ut_create_generic_state(); if (!scope) { return_ACPI_STATUS(AE_NO_MEMORY); } scope->common.descriptor_type = ACPI_DESC_TYPE_STATE_RPSCOPE; scope->parse_scope.op = root_op; scope->parse_scope.arg_count = ACPI_VAR_ARGS; scope->parse_scope.arg_end = parser_state->aml_end; scope->parse_scope.pkg_end = parser_state->aml_end; parser_state->scope = scope; parser_state->start_op = root_op; return_ACPI_STATUS(AE_OK); } /******************************************************************************* * * FUNCTION: acpi_ps_push_scope * * PARAMETERS: parser_state - Current parser state object * op - Current op to be pushed * remaining_args - List of args remaining * arg_count - Fixed or variable number of args * * RETURN: Status * * DESCRIPTION: Push current op to begin parsing its argument * ******************************************************************************/ acpi_status acpi_ps_push_scope(struct acpi_parse_state *parser_state, union acpi_parse_object *op, u32 remaining_args, u32 arg_count) { union acpi_generic_state *scope; ACPI_FUNCTION_TRACE_PTR(ps_push_scope, op); scope = acpi_ut_create_generic_state(); if (!scope) { return_ACPI_STATUS(AE_NO_MEMORY); } scope->common.descriptor_type = ACPI_DESC_TYPE_STATE_PSCOPE; scope->parse_scope.op = op; scope->parse_scope.arg_list = remaining_args; scope->parse_scope.arg_count = arg_count; scope->parse_scope.pkg_end = parser_state->pkg_end; /* Push onto scope stack */ acpi_ut_push_generic_state(&parser_state->scope, scope); if (arg_count == ACPI_VAR_ARGS) { /* Multiple arguments */ scope->parse_scope.arg_end = parser_state->pkg_end; } else { /* Single argument */ scope->parse_scope.arg_end = ACPI_TO_POINTER(ACPI_MAX_PTR); } return_ACPI_STATUS(AE_OK); } /******************************************************************************* * * FUNCTION: acpi_ps_pop_scope * * PARAMETERS: parser_state - Current parser state object * op - Where the popped op is returned * arg_list - Where the popped "next argument" is * returned * arg_count - Count of objects in arg_list * * RETURN: Status * * DESCRIPTION: Return to parsing a previous op * ******************************************************************************/ void acpi_ps_pop_scope(struct acpi_parse_state *parser_state, union acpi_parse_object **op, u32 * arg_list, u32 * arg_count) { union acpi_generic_state *scope = parser_state->scope; ACPI_FUNCTION_TRACE(ps_pop_scope); /* Only pop the scope if there is in fact a next scope */ if (scope->common.next) { scope = acpi_ut_pop_generic_state(&parser_state->scope); /* Return to parsing previous op */ *op = scope->parse_scope.op; *arg_list = scope->parse_scope.arg_list; *arg_count = scope->parse_scope.arg_count; parser_state->pkg_end = scope->parse_scope.pkg_end; /* All done with this scope state structure */ acpi_ut_delete_generic_state(scope); } else { /* Empty parse stack, prepare to fetch next opcode */ *op = NULL; *arg_list = 0; *arg_count = 0; } ACPI_DEBUG_PRINT((ACPI_DB_PARSE, "Popped Op %p Args %X\n", *op, *arg_count)); return_VOID; } /******************************************************************************* * * FUNCTION: acpi_ps_cleanup_scope * * PARAMETERS: parser_state - Current parser state object * * RETURN: None * * DESCRIPTION: Destroy available list, remaining stack levels, and return * root scope * ******************************************************************************/ void acpi_ps_cleanup_scope(struct acpi_parse_state *parser_state) { union acpi_generic_state *scope; ACPI_FUNCTION_TRACE_PTR(ps_cleanup_scope, parser_state); if (!parser_state) { return_VOID; } /* Delete anything on the scope stack */ while (parser_state->scope) { scope = acpi_ut_pop_generic_state(&parser_state->scope); acpi_ut_delete_generic_state(scope); } return_VOID; }
gpl-2.0
s9yobena/linux
drivers/infiniband/hw/qib/qib_ruc.c
2879
22238
/* * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved. * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/spinlock.h> #include "qib.h" #include "qib_mad.h" /* * Convert the AETH RNR timeout code into the number of microseconds. */ const u32 ib_qib_rnr_table[32] = { 655360, /* 00: 655.36 */ 10, /* 01: .01 */ 20, /* 02 .02 */ 30, /* 03: .03 */ 40, /* 04: .04 */ 60, /* 05: .06 */ 80, /* 06: .08 */ 120, /* 07: .12 */ 160, /* 08: .16 */ 240, /* 09: .24 */ 320, /* 0A: .32 */ 480, /* 0B: .48 */ 640, /* 0C: .64 */ 960, /* 0D: .96 */ 1280, /* 0E: 1.28 */ 1920, /* 0F: 1.92 */ 2560, /* 10: 2.56 */ 3840, /* 11: 3.84 */ 5120, /* 12: 5.12 */ 7680, /* 13: 7.68 */ 10240, /* 14: 10.24 */ 15360, /* 15: 15.36 */ 20480, /* 16: 20.48 */ 30720, /* 17: 30.72 */ 40960, /* 18: 40.96 */ 61440, /* 19: 61.44 */ 81920, /* 1A: 81.92 */ 122880, /* 1B: 122.88 */ 163840, /* 1C: 163.84 */ 245760, /* 1D: 245.76 */ 327680, /* 1E: 327.68 */ 491520 /* 1F: 491.52 */ }; /* * Validate a RWQE and fill in the SGE state. * Return 1 if OK. */ static int qib_init_sge(struct qib_qp *qp, struct qib_rwqe *wqe) { int i, j, ret; struct ib_wc wc; struct qib_lkey_table *rkt; struct qib_pd *pd; struct qib_sge_state *ss; rkt = &to_idev(qp->ibqp.device)->lk_table; pd = to_ipd(qp->ibqp.srq ? qp->ibqp.srq->pd : qp->ibqp.pd); ss = &qp->r_sge; ss->sg_list = qp->r_sg_list; qp->r_len = 0; for (i = j = 0; i < wqe->num_sge; i++) { if (wqe->sg_list[i].length == 0) continue; /* Check LKEY */ if (!qib_lkey_ok(rkt, pd, j ? &ss->sg_list[j - 1] : &ss->sge, &wqe->sg_list[i], IB_ACCESS_LOCAL_WRITE)) goto bad_lkey; qp->r_len += wqe->sg_list[i].length; j++; } ss->num_sge = j; ss->total_len = qp->r_len; ret = 1; goto bail; bad_lkey: while (j) { struct qib_sge *sge = --j ? &ss->sg_list[j - 1] : &ss->sge; qib_put_mr(sge->mr); } ss->num_sge = 0; memset(&wc, 0, sizeof(wc)); wc.wr_id = wqe->wr_id; wc.status = IB_WC_LOC_PROT_ERR; wc.opcode = IB_WC_RECV; wc.qp = &qp->ibqp; /* Signal solicited completion event. */ qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1); ret = 0; bail: return ret; } /** * qib_get_rwqe - copy the next RWQE into the QP's RWQE * @qp: the QP * @wr_id_only: update qp->r_wr_id only, not qp->r_sge * * Return -1 if there is a local error, 0 if no RWQE is available, * otherwise return 1. * * Can be called from interrupt level. */ int qib_get_rwqe(struct qib_qp *qp, int wr_id_only) { unsigned long flags; struct qib_rq *rq; struct qib_rwq *wq; struct qib_srq *srq; struct qib_rwqe *wqe; void (*handler)(struct ib_event *, void *); u32 tail; int ret; if (qp->ibqp.srq) { srq = to_isrq(qp->ibqp.srq); handler = srq->ibsrq.event_handler; rq = &srq->rq; } else { srq = NULL; handler = NULL; rq = &qp->r_rq; } spin_lock_irqsave(&rq->lock, flags); if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) { ret = 0; goto unlock; } wq = rq->wq; tail = wq->tail; /* Validate tail before using it since it is user writable. */ if (tail >= rq->size) tail = 0; if (unlikely(tail == wq->head)) { ret = 0; goto unlock; } /* Make sure entry is read after head index is read. */ smp_rmb(); wqe = get_rwqe_ptr(rq, tail); /* * Even though we update the tail index in memory, the verbs * consumer is not supposed to post more entries until a * completion is generated. */ if (++tail >= rq->size) tail = 0; wq->tail = tail; if (!wr_id_only && !qib_init_sge(qp, wqe)) { ret = -1; goto unlock; } qp->r_wr_id = wqe->wr_id; ret = 1; set_bit(QIB_R_WRID_VALID, &qp->r_aflags); if (handler) { u32 n; /* * Validate head pointer value and compute * the number of remaining WQEs. */ n = wq->head; if (n >= rq->size) n = 0; if (n < tail) n += rq->size - tail; else n -= tail; if (n < srq->limit) { struct ib_event ev; srq->limit = 0; spin_unlock_irqrestore(&rq->lock, flags); ev.device = qp->ibqp.device; ev.element.srq = qp->ibqp.srq; ev.event = IB_EVENT_SRQ_LIMIT_REACHED; handler(&ev, srq->ibsrq.srq_context); goto bail; } } unlock: spin_unlock_irqrestore(&rq->lock, flags); bail: return ret; } /* * Switch to alternate path. * The QP s_lock should be held and interrupts disabled. */ void qib_migrate_qp(struct qib_qp *qp) { struct ib_event ev; qp->s_mig_state = IB_MIG_MIGRATED; qp->remote_ah_attr = qp->alt_ah_attr; qp->port_num = qp->alt_ah_attr.port_num; qp->s_pkey_index = qp->s_alt_pkey_index; ev.device = qp->ibqp.device; ev.element.qp = &qp->ibqp; ev.event = IB_EVENT_PATH_MIG; qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); } static __be64 get_sguid(struct qib_ibport *ibp, unsigned index) { if (!index) { struct qib_pportdata *ppd = ppd_from_ibp(ibp); return ppd->guid; } else return ibp->guids[index - 1]; } static int gid_ok(union ib_gid *gid, __be64 gid_prefix, __be64 id) { return (gid->global.interface_id == id && (gid->global.subnet_prefix == gid_prefix || gid->global.subnet_prefix == IB_DEFAULT_GID_PREFIX)); } /* * * This should be called with the QP r_lock held. * * The s_lock will be acquired around the qib_migrate_qp() call. */ int qib_ruc_check_hdr(struct qib_ibport *ibp, struct qib_ib_header *hdr, int has_grh, struct qib_qp *qp, u32 bth0) { __be64 guid; unsigned long flags; if (qp->s_mig_state == IB_MIG_ARMED && (bth0 & IB_BTH_MIG_REQ)) { if (!has_grh) { if (qp->alt_ah_attr.ah_flags & IB_AH_GRH) goto err; } else { if (!(qp->alt_ah_attr.ah_flags & IB_AH_GRH)) goto err; guid = get_sguid(ibp, qp->alt_ah_attr.grh.sgid_index); if (!gid_ok(&hdr->u.l.grh.dgid, ibp->gid_prefix, guid)) goto err; if (!gid_ok(&hdr->u.l.grh.sgid, qp->alt_ah_attr.grh.dgid.global.subnet_prefix, qp->alt_ah_attr.grh.dgid.global.interface_id)) goto err; } if (!qib_pkey_ok((u16)bth0, qib_get_pkey(ibp, qp->s_alt_pkey_index))) { qib_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_PKEY, (u16)bth0, (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF, 0, qp->ibqp.qp_num, hdr->lrh[3], hdr->lrh[1]); goto err; } /* Validate the SLID. See Ch. 9.6.1.5 and 17.2.8 */ if (be16_to_cpu(hdr->lrh[3]) != qp->alt_ah_attr.dlid || ppd_from_ibp(ibp)->port != qp->alt_ah_attr.port_num) goto err; spin_lock_irqsave(&qp->s_lock, flags); qib_migrate_qp(qp); spin_unlock_irqrestore(&qp->s_lock, flags); } else { if (!has_grh) { if (qp->remote_ah_attr.ah_flags & IB_AH_GRH) goto err; } else { if (!(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) goto err; guid = get_sguid(ibp, qp->remote_ah_attr.grh.sgid_index); if (!gid_ok(&hdr->u.l.grh.dgid, ibp->gid_prefix, guid)) goto err; if (!gid_ok(&hdr->u.l.grh.sgid, qp->remote_ah_attr.grh.dgid.global.subnet_prefix, qp->remote_ah_attr.grh.dgid.global.interface_id)) goto err; } if (!qib_pkey_ok((u16)bth0, qib_get_pkey(ibp, qp->s_pkey_index))) { qib_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_PKEY, (u16)bth0, (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF, 0, qp->ibqp.qp_num, hdr->lrh[3], hdr->lrh[1]); goto err; } /* Validate the SLID. See Ch. 9.6.1.5 */ if (be16_to_cpu(hdr->lrh[3]) != qp->remote_ah_attr.dlid || ppd_from_ibp(ibp)->port != qp->port_num) goto err; if (qp->s_mig_state == IB_MIG_REARM && !(bth0 & IB_BTH_MIG_REQ)) qp->s_mig_state = IB_MIG_ARMED; } return 0; err: return 1; } /** * qib_ruc_loopback - handle UC and RC lookback requests * @sqp: the sending QP * * This is called from qib_do_send() to * forward a WQE addressed to the same HCA. * Note that although we are single threaded due to the tasklet, we still * have to protect against post_send(). We don't have to worry about * receive interrupts since this is a connected protocol and all packets * will pass through here. */ static void qib_ruc_loopback(struct qib_qp *sqp) { struct qib_ibport *ibp = to_iport(sqp->ibqp.device, sqp->port_num); struct qib_qp *qp; struct qib_swqe *wqe; struct qib_sge *sge; unsigned long flags; struct ib_wc wc; u64 sdata; atomic64_t *maddr; enum ib_wc_status send_status; int release; int ret; /* * Note that we check the responder QP state after * checking the requester's state. */ qp = qib_lookup_qpn(ibp, sqp->remote_qpn); spin_lock_irqsave(&sqp->s_lock, flags); /* Return if we are already busy processing a work request. */ if ((sqp->s_flags & (QIB_S_BUSY | QIB_S_ANY_WAIT)) || !(ib_qib_state_ops[sqp->state] & QIB_PROCESS_OR_FLUSH_SEND)) goto unlock; sqp->s_flags |= QIB_S_BUSY; again: if (sqp->s_last == sqp->s_head) goto clr_busy; wqe = get_swqe_ptr(sqp, sqp->s_last); /* Return if it is not OK to start a new work reqeust. */ if (!(ib_qib_state_ops[sqp->state] & QIB_PROCESS_NEXT_SEND_OK)) { if (!(ib_qib_state_ops[sqp->state] & QIB_FLUSH_SEND)) goto clr_busy; /* We are in the error state, flush the work request. */ send_status = IB_WC_WR_FLUSH_ERR; goto flush_send; } /* * We can rely on the entry not changing without the s_lock * being held until we update s_last. * We increment s_cur to indicate s_last is in progress. */ if (sqp->s_last == sqp->s_cur) { if (++sqp->s_cur >= sqp->s_size) sqp->s_cur = 0; } spin_unlock_irqrestore(&sqp->s_lock, flags); if (!qp || !(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) || qp->ibqp.qp_type != sqp->ibqp.qp_type) { ibp->n_pkt_drops++; /* * For RC, the requester would timeout and retry so * shortcut the timeouts and just signal too many retries. */ if (sqp->ibqp.qp_type == IB_QPT_RC) send_status = IB_WC_RETRY_EXC_ERR; else send_status = IB_WC_SUCCESS; goto serr; } memset(&wc, 0, sizeof wc); send_status = IB_WC_SUCCESS; release = 1; sqp->s_sge.sge = wqe->sg_list[0]; sqp->s_sge.sg_list = wqe->sg_list + 1; sqp->s_sge.num_sge = wqe->wr.num_sge; sqp->s_len = wqe->length; switch (wqe->wr.opcode) { case IB_WR_SEND_WITH_IMM: wc.wc_flags = IB_WC_WITH_IMM; wc.ex.imm_data = wqe->wr.ex.imm_data; /* FALLTHROUGH */ case IB_WR_SEND: ret = qib_get_rwqe(qp, 0); if (ret < 0) goto op_err; if (!ret) goto rnr_nak; break; case IB_WR_RDMA_WRITE_WITH_IMM: if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE))) goto inv_err; wc.wc_flags = IB_WC_WITH_IMM; wc.ex.imm_data = wqe->wr.ex.imm_data; ret = qib_get_rwqe(qp, 1); if (ret < 0) goto op_err; if (!ret) goto rnr_nak; /* FALLTHROUGH */ case IB_WR_RDMA_WRITE: if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE))) goto inv_err; if (wqe->length == 0) break; if (unlikely(!qib_rkey_ok(qp, &qp->r_sge.sge, wqe->length, wqe->wr.wr.rdma.remote_addr, wqe->wr.wr.rdma.rkey, IB_ACCESS_REMOTE_WRITE))) goto acc_err; qp->r_sge.sg_list = NULL; qp->r_sge.num_sge = 1; qp->r_sge.total_len = wqe->length; break; case IB_WR_RDMA_READ: if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ))) goto inv_err; if (unlikely(!qib_rkey_ok(qp, &sqp->s_sge.sge, wqe->length, wqe->wr.wr.rdma.remote_addr, wqe->wr.wr.rdma.rkey, IB_ACCESS_REMOTE_READ))) goto acc_err; release = 0; sqp->s_sge.sg_list = NULL; sqp->s_sge.num_sge = 1; qp->r_sge.sge = wqe->sg_list[0]; qp->r_sge.sg_list = wqe->sg_list + 1; qp->r_sge.num_sge = wqe->wr.num_sge; qp->r_sge.total_len = wqe->length; break; case IB_WR_ATOMIC_CMP_AND_SWP: case IB_WR_ATOMIC_FETCH_AND_ADD: if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC))) goto inv_err; if (unlikely(!qib_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64), wqe->wr.wr.atomic.remote_addr, wqe->wr.wr.atomic.rkey, IB_ACCESS_REMOTE_ATOMIC))) goto acc_err; /* Perform atomic OP and save result. */ maddr = (atomic64_t *) qp->r_sge.sge.vaddr; sdata = wqe->wr.wr.atomic.compare_add; *(u64 *) sqp->s_sge.sge.vaddr = (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ? (u64) atomic64_add_return(sdata, maddr) - sdata : (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr, sdata, wqe->wr.wr.atomic.swap); qib_put_mr(qp->r_sge.sge.mr); qp->r_sge.num_sge = 0; goto send_comp; default: send_status = IB_WC_LOC_QP_OP_ERR; goto serr; } sge = &sqp->s_sge.sge; while (sqp->s_len) { u32 len = sqp->s_len; if (len > sge->length) len = sge->length; if (len > sge->sge_length) len = sge->sge_length; BUG_ON(len == 0); qib_copy_sge(&qp->r_sge, sge->vaddr, len, release); sge->vaddr += len; sge->length -= len; sge->sge_length -= len; if (sge->sge_length == 0) { if (!release) qib_put_mr(sge->mr); if (--sqp->s_sge.num_sge) *sge = *sqp->s_sge.sg_list++; } else if (sge->length == 0 && sge->mr->lkey) { if (++sge->n >= QIB_SEGSZ) { if (++sge->m >= sge->mr->mapsz) break; sge->n = 0; } sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr; sge->length = sge->mr->map[sge->m]->segs[sge->n].length; } sqp->s_len -= len; } if (release) qib_put_ss(&qp->r_sge); if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags)) goto send_comp; if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM) wc.opcode = IB_WC_RECV_RDMA_WITH_IMM; else wc.opcode = IB_WC_RECV; wc.wr_id = qp->r_wr_id; wc.status = IB_WC_SUCCESS; wc.byte_len = wqe->length; wc.qp = &qp->ibqp; wc.src_qp = qp->remote_qpn; wc.slid = qp->remote_ah_attr.dlid; wc.sl = qp->remote_ah_attr.sl; wc.port_num = 1; /* Signal completion event if the solicited bit is set. */ qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, wqe->wr.send_flags & IB_SEND_SOLICITED); send_comp: spin_lock_irqsave(&sqp->s_lock, flags); ibp->n_loop_pkts++; flush_send: sqp->s_rnr_retry = sqp->s_rnr_retry_cnt; qib_send_complete(sqp, wqe, send_status); goto again; rnr_nak: /* Handle RNR NAK */ if (qp->ibqp.qp_type == IB_QPT_UC) goto send_comp; ibp->n_rnr_naks++; /* * Note: we don't need the s_lock held since the BUSY flag * makes this single threaded. */ if (sqp->s_rnr_retry == 0) { send_status = IB_WC_RNR_RETRY_EXC_ERR; goto serr; } if (sqp->s_rnr_retry_cnt < 7) sqp->s_rnr_retry--; spin_lock_irqsave(&sqp->s_lock, flags); if (!(ib_qib_state_ops[sqp->state] & QIB_PROCESS_RECV_OK)) goto clr_busy; sqp->s_flags |= QIB_S_WAIT_RNR; sqp->s_timer.function = qib_rc_rnr_retry; sqp->s_timer.expires = jiffies + usecs_to_jiffies(ib_qib_rnr_table[qp->r_min_rnr_timer]); add_timer(&sqp->s_timer); goto clr_busy; op_err: send_status = IB_WC_REM_OP_ERR; wc.status = IB_WC_LOC_QP_OP_ERR; goto err; inv_err: send_status = IB_WC_REM_INV_REQ_ERR; wc.status = IB_WC_LOC_QP_OP_ERR; goto err; acc_err: send_status = IB_WC_REM_ACCESS_ERR; wc.status = IB_WC_LOC_PROT_ERR; err: /* responder goes to error state */ qib_rc_error(qp, wc.status); serr: spin_lock_irqsave(&sqp->s_lock, flags); qib_send_complete(sqp, wqe, send_status); if (sqp->ibqp.qp_type == IB_QPT_RC) { int lastwqe = qib_error_qp(sqp, IB_WC_WR_FLUSH_ERR); sqp->s_flags &= ~QIB_S_BUSY; spin_unlock_irqrestore(&sqp->s_lock, flags); if (lastwqe) { struct ib_event ev; ev.device = sqp->ibqp.device; ev.element.qp = &sqp->ibqp; ev.event = IB_EVENT_QP_LAST_WQE_REACHED; sqp->ibqp.event_handler(&ev, sqp->ibqp.qp_context); } goto done; } clr_busy: sqp->s_flags &= ~QIB_S_BUSY; unlock: spin_unlock_irqrestore(&sqp->s_lock, flags); done: if (qp && atomic_dec_and_test(&qp->refcount)) wake_up(&qp->wait); } /** * qib_make_grh - construct a GRH header * @ibp: a pointer to the IB port * @hdr: a pointer to the GRH header being constructed * @grh: the global route address to send to * @hwords: the number of 32 bit words of header being sent * @nwords: the number of 32 bit words of data being sent * * Return the size of the header in 32 bit words. */ u32 qib_make_grh(struct qib_ibport *ibp, struct ib_grh *hdr, struct ib_global_route *grh, u32 hwords, u32 nwords) { hdr->version_tclass_flow = cpu_to_be32((IB_GRH_VERSION << IB_GRH_VERSION_SHIFT) | (grh->traffic_class << IB_GRH_TCLASS_SHIFT) | (grh->flow_label << IB_GRH_FLOW_SHIFT)); hdr->paylen = cpu_to_be16((hwords - 2 + nwords + SIZE_OF_CRC) << 2); /* next_hdr is defined by C8-7 in ch. 8.4.1 */ hdr->next_hdr = IB_GRH_NEXT_HDR; hdr->hop_limit = grh->hop_limit; /* The SGID is 32-bit aligned. */ hdr->sgid.global.subnet_prefix = ibp->gid_prefix; hdr->sgid.global.interface_id = grh->sgid_index ? ibp->guids[grh->sgid_index - 1] : ppd_from_ibp(ibp)->guid; hdr->dgid = grh->dgid; /* GRH header size in 32-bit words. */ return sizeof(struct ib_grh) / sizeof(u32); } void qib_make_ruc_header(struct qib_qp *qp, struct qib_other_headers *ohdr, u32 bth0, u32 bth2) { struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); u16 lrh0; u32 nwords; u32 extra_bytes; /* Construct the header. */ extra_bytes = -qp->s_cur_size & 3; nwords = (qp->s_cur_size + extra_bytes) >> 2; lrh0 = QIB_LRH_BTH; if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) { qp->s_hdrwords += qib_make_grh(ibp, &qp->s_hdr->u.l.grh, &qp->remote_ah_attr.grh, qp->s_hdrwords, nwords); lrh0 = QIB_LRH_GRH; } lrh0 |= ibp->sl_to_vl[qp->remote_ah_attr.sl] << 12 | qp->remote_ah_attr.sl << 4; qp->s_hdr->lrh[0] = cpu_to_be16(lrh0); qp->s_hdr->lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid); qp->s_hdr->lrh[2] = cpu_to_be16(qp->s_hdrwords + nwords + SIZE_OF_CRC); qp->s_hdr->lrh[3] = cpu_to_be16(ppd_from_ibp(ibp)->lid | qp->remote_ah_attr.src_path_bits); bth0 |= qib_get_pkey(ibp, qp->s_pkey_index); bth0 |= extra_bytes << 20; if (qp->s_mig_state == IB_MIG_MIGRATED) bth0 |= IB_BTH_MIG_REQ; ohdr->bth[0] = cpu_to_be32(bth0); ohdr->bth[1] = cpu_to_be32(qp->remote_qpn); ohdr->bth[2] = cpu_to_be32(bth2); } /** * qib_do_send - perform a send on a QP * @work: contains a pointer to the QP * * Process entries in the send work queue until credit or queue is * exhausted. Only allow one CPU to send a packet per QP (tasklet). * Otherwise, two threads could send packets out of order. */ void qib_do_send(struct work_struct *work) { struct qib_qp *qp = container_of(work, struct qib_qp, s_work); struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); struct qib_pportdata *ppd = ppd_from_ibp(ibp); int (*make_req)(struct qib_qp *qp); unsigned long flags; if ((qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC) && (qp->remote_ah_attr.dlid & ~((1 << ppd->lmc) - 1)) == ppd->lid) { qib_ruc_loopback(qp); return; } if (qp->ibqp.qp_type == IB_QPT_RC) make_req = qib_make_rc_req; else if (qp->ibqp.qp_type == IB_QPT_UC) make_req = qib_make_uc_req; else make_req = qib_make_ud_req; spin_lock_irqsave(&qp->s_lock, flags); /* Return if we are already busy processing a work request. */ if (!qib_send_ok(qp)) { spin_unlock_irqrestore(&qp->s_lock, flags); return; } qp->s_flags |= QIB_S_BUSY; spin_unlock_irqrestore(&qp->s_lock, flags); do { /* Check for a constructed packet to be sent. */ if (qp->s_hdrwords != 0) { /* * If the packet cannot be sent now, return and * the send tasklet will be woken up later. */ if (qib_verbs_send(qp, qp->s_hdr, qp->s_hdrwords, qp->s_cur_sge, qp->s_cur_size)) break; /* Record that s_hdr is empty. */ qp->s_hdrwords = 0; } } while (make_req(qp)); } /* * This should be called with s_lock held. */ void qib_send_complete(struct qib_qp *qp, struct qib_swqe *wqe, enum ib_wc_status status) { u32 old_last, last; unsigned i; if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_OR_FLUSH_SEND)) return; for (i = 0; i < wqe->wr.num_sge; i++) { struct qib_sge *sge = &wqe->sg_list[i]; qib_put_mr(sge->mr); } if (qp->ibqp.qp_type == IB_QPT_UD || qp->ibqp.qp_type == IB_QPT_SMI || qp->ibqp.qp_type == IB_QPT_GSI) atomic_dec(&to_iah(wqe->wr.wr.ud.ah)->refcount); /* See ch. 11.2.4.1 and 10.7.3.1 */ if (!(qp->s_flags & QIB_S_SIGNAL_REQ_WR) || (wqe->wr.send_flags & IB_SEND_SIGNALED) || status != IB_WC_SUCCESS) { struct ib_wc wc; memset(&wc, 0, sizeof wc); wc.wr_id = wqe->wr.wr_id; wc.status = status; wc.opcode = ib_qib_wc_opcode[wqe->wr.opcode]; wc.qp = &qp->ibqp; if (status == IB_WC_SUCCESS) wc.byte_len = wqe->length; qib_cq_enter(to_icq(qp->ibqp.send_cq), &wc, status != IB_WC_SUCCESS); } last = qp->s_last; old_last = last; if (++last >= qp->s_size) last = 0; qp->s_last = last; if (qp->s_acked == old_last) qp->s_acked = last; if (qp->s_cur == old_last) qp->s_cur = last; if (qp->s_tail == old_last) qp->s_tail = last; if (qp->state == IB_QPS_SQD && last == qp->s_cur) qp->s_draining = 0; }
gpl-2.0
CM-Tab-S/android_kernel_samsung_exynos5420
arch/tile/kernel/traps.c
4415
8166
/* * Copyright 2010 Tilera Corporation. All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation, version 2. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for * more details. */ #include <linux/sched.h> #include <linux/kernel.h> #include <linux/kprobes.h> #include <linux/module.h> #include <linux/reboot.h> #include <linux/uaccess.h> #include <linux/ptrace.h> #include <asm/stack.h> #include <asm/traps.h> #include <asm/setup.h> #include <arch/interrupts.h> #include <arch/spr_def.h> #include <arch/opcode.h> void __init trap_init(void) { /* Nothing needed here since we link code at .intrpt1 */ } int unaligned_fixup = 1; static int __init setup_unaligned_fixup(char *str) { /* * Say "=-1" to completely disable it. If you just do "=0", we * will still parse the instruction, then fire a SIGBUS with * the correct address from inside the single_step code. */ long val; if (strict_strtol(str, 0, &val) != 0) return 0; unaligned_fixup = val; pr_info("Fixups for unaligned data accesses are %s\n", unaligned_fixup >= 0 ? (unaligned_fixup ? "enabled" : "disabled") : "completely disabled"); return 1; } __setup("unaligned_fixup=", setup_unaligned_fixup); #if CHIP_HAS_TILE_DMA() static int dma_disabled; static int __init nodma(char *str) { pr_info("User-space DMA is disabled\n"); dma_disabled = 1; return 1; } __setup("nodma", nodma); /* How to decode SPR_GPV_REASON */ #define IRET_ERROR (1U << 31) #define MT_ERROR (1U << 30) #define MF_ERROR (1U << 29) #define SPR_INDEX ((1U << 15) - 1) #define SPR_MPL_SHIFT 9 /* starting bit position for MPL encoded in SPR */ /* * See if this GPV is just to notify the kernel of SPR use and we can * retry the user instruction after adjusting some MPLs suitably. */ static int retry_gpv(unsigned int gpv_reason) { int mpl; if (gpv_reason & IRET_ERROR) return 0; BUG_ON((gpv_reason & (MT_ERROR|MF_ERROR)) == 0); mpl = (gpv_reason & SPR_INDEX) >> SPR_MPL_SHIFT; if (mpl == INT_DMA_NOTIFY && !dma_disabled) { /* User is turning on DMA. Allow it and retry. */ printk(KERN_DEBUG "Process %d/%s is now enabled for DMA\n", current->pid, current->comm); BUG_ON(current->thread.tile_dma_state.enabled); current->thread.tile_dma_state.enabled = 1; grant_dma_mpls(); return 1; } return 0; } #endif /* CHIP_HAS_TILE_DMA() */ #ifdef __tilegx__ #define bundle_bits tilegx_bundle_bits #else #define bundle_bits tile_bundle_bits #endif extern bundle_bits bpt_code; asm(".pushsection .rodata.bpt_code,\"a\";" ".align 8;" "bpt_code: bpt;" ".size bpt_code,.-bpt_code;" ".popsection"); static int special_ill(bundle_bits bundle, int *sigp, int *codep) { int sig, code, maxcode; if (bundle == bpt_code) { *sigp = SIGTRAP; *codep = TRAP_BRKPT; return 1; } /* If it's a "raise" bundle, then "ill" must be in pipe X1. */ #ifdef __tilegx__ if ((bundle & TILEGX_BUNDLE_MODE_MASK) != 0) return 0; if (get_Opcode_X1(bundle) != RRR_0_OPCODE_X1) return 0; if (get_RRROpcodeExtension_X1(bundle) != UNARY_RRR_0_OPCODE_X1) return 0; if (get_UnaryOpcodeExtension_X1(bundle) != ILL_UNARY_OPCODE_X1) return 0; #else if (bundle & TILEPRO_BUNDLE_Y_ENCODING_MASK) return 0; if (get_Opcode_X1(bundle) != SHUN_0_OPCODE_X1) return 0; if (get_UnShOpcodeExtension_X1(bundle) != UN_0_SHUN_0_OPCODE_X1) return 0; if (get_UnOpcodeExtension_X1(bundle) != ILL_UN_0_SHUN_0_OPCODE_X1) return 0; #endif /* Check that the magic distinguishers are set to mean "raise". */ if (get_Dest_X1(bundle) != 29 || get_SrcA_X1(bundle) != 37) return 0; /* There must be an "addli zero, zero, VAL" in X0. */ if (get_Opcode_X0(bundle) != ADDLI_OPCODE_X0) return 0; if (get_Dest_X0(bundle) != TREG_ZERO) return 0; if (get_SrcA_X0(bundle) != TREG_ZERO) return 0; /* * Validate the proposed signal number and si_code value. * Note that we embed these in the static instruction itself * so that we perturb the register state as little as possible * at the time of the actual fault; it's unlikely you'd ever * need to dynamically choose which kind of fault to raise * from user space. */ sig = get_Imm16_X0(bundle) & 0x3f; switch (sig) { case SIGILL: maxcode = NSIGILL; break; case SIGFPE: maxcode = NSIGFPE; break; case SIGSEGV: maxcode = NSIGSEGV; break; case SIGBUS: maxcode = NSIGBUS; break; case SIGTRAP: maxcode = NSIGTRAP; break; default: return 0; } code = (get_Imm16_X0(bundle) >> 6) & 0xf; if (code <= 0 || code > maxcode) return 0; /* Make it the requested signal. */ *sigp = sig; *codep = code | __SI_FAULT; return 1; } void __kprobes do_trap(struct pt_regs *regs, int fault_num, unsigned long reason) { siginfo_t info = { 0 }; int signo, code; unsigned long address = 0; bundle_bits instr; /* Re-enable interrupts. */ local_irq_enable(); /* * If it hits in kernel mode and we can't fix it up, just exit the * current process and hope for the best. */ if (!user_mode(regs)) { if (fixup_exception(regs)) /* only UNALIGN_DATA in practice */ return; pr_alert("Kernel took bad trap %d at PC %#lx\n", fault_num, regs->pc); if (fault_num == INT_GPV) pr_alert("GPV_REASON is %#lx\n", reason); show_regs(regs); do_exit(SIGKILL); /* FIXME: implement i386 die() */ return; } switch (fault_num) { case INT_MEM_ERROR: signo = SIGBUS; code = BUS_OBJERR; break; case INT_ILL: if (copy_from_user(&instr, (void __user *)regs->pc, sizeof(instr))) { pr_err("Unreadable instruction for INT_ILL:" " %#lx\n", regs->pc); do_exit(SIGKILL); return; } if (!special_ill(instr, &signo, &code)) { signo = SIGILL; code = ILL_ILLOPC; } address = regs->pc; break; case INT_GPV: #if CHIP_HAS_TILE_DMA() if (retry_gpv(reason)) return; #endif /*FALLTHROUGH*/ case INT_UDN_ACCESS: case INT_IDN_ACCESS: #if CHIP_HAS_SN() case INT_SN_ACCESS: #endif signo = SIGILL; code = ILL_PRVREG; address = regs->pc; break; case INT_SWINT_3: case INT_SWINT_2: case INT_SWINT_0: signo = SIGILL; code = ILL_ILLTRP; address = regs->pc; break; case INT_UNALIGN_DATA: #ifndef __tilegx__ /* Emulated support for single step debugging */ if (unaligned_fixup >= 0) { struct single_step_state *state = current_thread_info()->step_state; if (!state || (void __user *)(regs->pc) != state->buffer) { single_step_once(regs); return; } } #endif signo = SIGBUS; code = BUS_ADRALN; address = 0; break; case INT_DOUBLE_FAULT: /* * For double fault, "reason" is actually passed as * SYSTEM_SAVE_K_2, the hypervisor's double-fault info, so * we can provide the original fault number rather than * the uninteresting "INT_DOUBLE_FAULT" so the user can * learn what actually struck while PL0 ICS was set. */ fault_num = reason; signo = SIGILL; code = ILL_DBLFLT; address = regs->pc; break; #ifdef __tilegx__ case INT_ILL_TRANS: { /* Avoid a hardware erratum with the return address stack. */ fill_ra_stack(); signo = SIGSEGV; code = SEGV_MAPERR; if (reason & SPR_ILL_TRANS_REASON__I_STREAM_VA_RMASK) address = regs->pc; else address = 0; /* FIXME: GX: single-step for address */ break; } #endif default: panic("Unexpected do_trap interrupt number %d", fault_num); return; } info.si_signo = signo; info.si_code = code; info.si_addr = (void __user *)address; if (signo == SIGILL) info.si_trapno = fault_num; if (signo != SIGTRAP) trace_unhandled_signal("trap", regs, address, signo); force_sig_info(signo, &info, current); } void kernel_double_fault(int dummy, ulong pc, ulong lr, ulong sp, ulong r52) { _dump_stack(dummy, pc, lr, sp, r52); pr_emerg("Double fault: exiting\n"); machine_halt(); }
gpl-2.0
tinganho/linux-kernel
fs/fifo.c
4927
3277
/* * linux/fs/fifo.c * * written by Paul H. Hargrove * * Fixes: * 10-06-1999, AV: fixed OOM handling in fifo_open(), moved * initialization there, switched to external * allocation of pipe_inode_info. */ #include <linux/mm.h> #include <linux/fs.h> #include <linux/sched.h> #include <linux/pipe_fs_i.h> static void wait_for_partner(struct inode* inode, unsigned int *cnt) { int cur = *cnt; while (cur == *cnt) { pipe_wait(inode->i_pipe); if (signal_pending(current)) break; } } static void wake_up_partner(struct inode* inode) { wake_up_interruptible(&inode->i_pipe->wait); } static int fifo_open(struct inode *inode, struct file *filp) { struct pipe_inode_info *pipe; int ret; mutex_lock(&inode->i_mutex); pipe = inode->i_pipe; if (!pipe) { ret = -ENOMEM; pipe = alloc_pipe_info(inode); if (!pipe) goto err_nocleanup; inode->i_pipe = pipe; } filp->f_version = 0; /* We can only do regular read/write on fifos */ filp->f_mode &= (FMODE_READ | FMODE_WRITE); switch (filp->f_mode) { case FMODE_READ: /* * O_RDONLY * POSIX.1 says that O_NONBLOCK means return with the FIFO * opened, even when there is no process writing the FIFO. */ filp->f_op = &read_pipefifo_fops; pipe->r_counter++; if (pipe->readers++ == 0) wake_up_partner(inode); if (!pipe->writers) { if ((filp->f_flags & O_NONBLOCK)) { /* suppress POLLHUP until we have * seen a writer */ filp->f_version = pipe->w_counter; } else { wait_for_partner(inode, &pipe->w_counter); if(signal_pending(current)) goto err_rd; } } break; case FMODE_WRITE: /* * O_WRONLY * POSIX.1 says that O_NONBLOCK means return -1 with * errno=ENXIO when there is no process reading the FIFO. */ ret = -ENXIO; if ((filp->f_flags & O_NONBLOCK) && !pipe->readers) goto err; filp->f_op = &write_pipefifo_fops; pipe->w_counter++; if (!pipe->writers++) wake_up_partner(inode); if (!pipe->readers) { wait_for_partner(inode, &pipe->r_counter); if (signal_pending(current)) goto err_wr; } break; case FMODE_READ | FMODE_WRITE: /* * O_RDWR * POSIX.1 leaves this case "undefined" when O_NONBLOCK is set. * This implementation will NEVER block on a O_RDWR open, since * the process can at least talk to itself. */ filp->f_op = &rdwr_pipefifo_fops; pipe->readers++; pipe->writers++; pipe->r_counter++; pipe->w_counter++; if (pipe->readers == 1 || pipe->writers == 1) wake_up_partner(inode); break; default: ret = -EINVAL; goto err; } /* Ok! */ mutex_unlock(&inode->i_mutex); return 0; err_rd: if (!--pipe->readers) wake_up_interruptible(&pipe->wait); ret = -ERESTARTSYS; goto err; err_wr: if (!--pipe->writers) wake_up_interruptible(&pipe->wait); ret = -ERESTARTSYS; goto err; err: if (!pipe->readers && !pipe->writers) free_pipe_info(inode); err_nocleanup: mutex_unlock(&inode->i_mutex); return ret; } /* * Dummy default file-operations: the only thing this does * is contain the open that then fills in the correct operations * depending on the access mode of the file... */ const struct file_operations def_fifo_fops = { .open = fifo_open, /* will set read_ or write_pipefifo_fops */ .llseek = noop_llseek, };
gpl-2.0
zarboz/EvilZ-213
fs/fifo.c
4927
3277
/* * linux/fs/fifo.c * * written by Paul H. Hargrove * * Fixes: * 10-06-1999, AV: fixed OOM handling in fifo_open(), moved * initialization there, switched to external * allocation of pipe_inode_info. */ #include <linux/mm.h> #include <linux/fs.h> #include <linux/sched.h> #include <linux/pipe_fs_i.h> static void wait_for_partner(struct inode* inode, unsigned int *cnt) { int cur = *cnt; while (cur == *cnt) { pipe_wait(inode->i_pipe); if (signal_pending(current)) break; } } static void wake_up_partner(struct inode* inode) { wake_up_interruptible(&inode->i_pipe->wait); } static int fifo_open(struct inode *inode, struct file *filp) { struct pipe_inode_info *pipe; int ret; mutex_lock(&inode->i_mutex); pipe = inode->i_pipe; if (!pipe) { ret = -ENOMEM; pipe = alloc_pipe_info(inode); if (!pipe) goto err_nocleanup; inode->i_pipe = pipe; } filp->f_version = 0; /* We can only do regular read/write on fifos */ filp->f_mode &= (FMODE_READ | FMODE_WRITE); switch (filp->f_mode) { case FMODE_READ: /* * O_RDONLY * POSIX.1 says that O_NONBLOCK means return with the FIFO * opened, even when there is no process writing the FIFO. */ filp->f_op = &read_pipefifo_fops; pipe->r_counter++; if (pipe->readers++ == 0) wake_up_partner(inode); if (!pipe->writers) { if ((filp->f_flags & O_NONBLOCK)) { /* suppress POLLHUP until we have * seen a writer */ filp->f_version = pipe->w_counter; } else { wait_for_partner(inode, &pipe->w_counter); if(signal_pending(current)) goto err_rd; } } break; case FMODE_WRITE: /* * O_WRONLY * POSIX.1 says that O_NONBLOCK means return -1 with * errno=ENXIO when there is no process reading the FIFO. */ ret = -ENXIO; if ((filp->f_flags & O_NONBLOCK) && !pipe->readers) goto err; filp->f_op = &write_pipefifo_fops; pipe->w_counter++; if (!pipe->writers++) wake_up_partner(inode); if (!pipe->readers) { wait_for_partner(inode, &pipe->r_counter); if (signal_pending(current)) goto err_wr; } break; case FMODE_READ | FMODE_WRITE: /* * O_RDWR * POSIX.1 leaves this case "undefined" when O_NONBLOCK is set. * This implementation will NEVER block on a O_RDWR open, since * the process can at least talk to itself. */ filp->f_op = &rdwr_pipefifo_fops; pipe->readers++; pipe->writers++; pipe->r_counter++; pipe->w_counter++; if (pipe->readers == 1 || pipe->writers == 1) wake_up_partner(inode); break; default: ret = -EINVAL; goto err; } /* Ok! */ mutex_unlock(&inode->i_mutex); return 0; err_rd: if (!--pipe->readers) wake_up_interruptible(&pipe->wait); ret = -ERESTARTSYS; goto err; err_wr: if (!--pipe->writers) wake_up_interruptible(&pipe->wait); ret = -ERESTARTSYS; goto err; err: if (!pipe->readers && !pipe->writers) free_pipe_info(inode); err_nocleanup: mutex_unlock(&inode->i_mutex); return ret; } /* * Dummy default file-operations: the only thing this does * is contain the open that then fills in the correct operations * depending on the access mode of the file... */ const struct file_operations def_fifo_fops = { .open = fifo_open, /* will set read_ or write_pipefifo_fops */ .llseek = noop_llseek, };
gpl-2.0
coolshou/htc_dlxub1_kernel-3.4.10
drivers/memstick/host/r592.c
5183
22064
/* * Copyright (C) 2010 - Maxim Levitsky * driver for Ricoh memstick readers * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/freezer.h> #include <linux/jiffies.h> #include <linux/interrupt.h> #include <linux/pci.h> #include <linux/pci_ids.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/kthread.h> #include <linux/sched.h> #include <linux/highmem.h> #include <asm/byteorder.h> #include <linux/swab.h> #include "r592.h" static bool r592_enable_dma = 1; static int debug; static const char *tpc_names[] = { "MS_TPC_READ_MG_STATUS", "MS_TPC_READ_LONG_DATA", "MS_TPC_READ_SHORT_DATA", "MS_TPC_READ_REG", "MS_TPC_READ_QUAD_DATA", "INVALID", "MS_TPC_GET_INT", "MS_TPC_SET_RW_REG_ADRS", "MS_TPC_EX_SET_CMD", "MS_TPC_WRITE_QUAD_DATA", "MS_TPC_WRITE_REG", "MS_TPC_WRITE_SHORT_DATA", "MS_TPC_WRITE_LONG_DATA", "MS_TPC_SET_CMD", }; /** * memstick_debug_get_tpc_name - debug helper that returns string for * a TPC number */ const char *memstick_debug_get_tpc_name(int tpc) { return tpc_names[tpc-1]; } EXPORT_SYMBOL(memstick_debug_get_tpc_name); /* Read a register*/ static inline u32 r592_read_reg(struct r592_device *dev, int address) { u32 value = readl(dev->mmio + address); dbg_reg("reg #%02d == 0x%08x", address, value); return value; } /* Write a register */ static inline void r592_write_reg(struct r592_device *dev, int address, u32 value) { dbg_reg("reg #%02d <- 0x%08x", address, value); writel(value, dev->mmio + address); } /* Reads a big endian DWORD register */ static inline u32 r592_read_reg_raw_be(struct r592_device *dev, int address) { u32 value = __raw_readl(dev->mmio + address); dbg_reg("reg #%02d == 0x%08x", address, value); return be32_to_cpu(value); } /* Writes a big endian DWORD register */ static inline void r592_write_reg_raw_be(struct r592_device *dev, int address, u32 value) { dbg_reg("reg #%02d <- 0x%08x", address, value); __raw_writel(cpu_to_be32(value), dev->mmio + address); } /* Set specific bits in a register (little endian) */ static inline void r592_set_reg_mask(struct r592_device *dev, int address, u32 mask) { u32 reg = readl(dev->mmio + address); dbg_reg("reg #%02d |= 0x%08x (old =0x%08x)", address, mask, reg); writel(reg | mask , dev->mmio + address); } /* Clear specific bits in a register (little endian) */ static inline void r592_clear_reg_mask(struct r592_device *dev, int address, u32 mask) { u32 reg = readl(dev->mmio + address); dbg_reg("reg #%02d &= 0x%08x (old = 0x%08x, mask = 0x%08x)", address, ~mask, reg, mask); writel(reg & ~mask, dev->mmio + address); } /* Wait for status bits while checking for errors */ static int r592_wait_status(struct r592_device *dev, u32 mask, u32 wanted_mask) { unsigned long timeout = jiffies + msecs_to_jiffies(1000); u32 reg = r592_read_reg(dev, R592_STATUS); if ((reg & mask) == wanted_mask) return 0; while (time_before(jiffies, timeout)) { reg = r592_read_reg(dev, R592_STATUS); if ((reg & mask) == wanted_mask) return 0; if (reg & (R592_STATUS_SEND_ERR | R592_STATUS_RECV_ERR)) return -EIO; cpu_relax(); } return -ETIME; } /* Enable/disable device */ static int r592_enable_device(struct r592_device *dev, bool enable) { dbg("%sabling the device", enable ? "en" : "dis"); if (enable) { /* Power up the card */ r592_write_reg(dev, R592_POWER, R592_POWER_0 | R592_POWER_1); /* Perform a reset */ r592_set_reg_mask(dev, R592_IO, R592_IO_RESET); msleep(100); } else /* Power down the card */ r592_write_reg(dev, R592_POWER, 0); return 0; } /* Set serial/parallel mode */ static int r592_set_mode(struct r592_device *dev, bool parallel_mode) { if (!parallel_mode) { dbg("switching to serial mode"); /* Set serial mode */ r592_write_reg(dev, R592_IO_MODE, R592_IO_MODE_SERIAL); r592_clear_reg_mask(dev, R592_POWER, R592_POWER_20); } else { dbg("switching to parallel mode"); /* This setting should be set _before_ switch TPC */ r592_set_reg_mask(dev, R592_POWER, R592_POWER_20); r592_clear_reg_mask(dev, R592_IO, R592_IO_SERIAL1 | R592_IO_SERIAL2); /* Set the parallel mode now */ r592_write_reg(dev, R592_IO_MODE, R592_IO_MODE_PARALLEL); } dev->parallel_mode = parallel_mode; return 0; } /* Perform a controller reset without powering down the card */ static void r592_host_reset(struct r592_device *dev) { r592_set_reg_mask(dev, R592_IO, R592_IO_RESET); msleep(100); r592_set_mode(dev, dev->parallel_mode); } /* Disable all hardware interrupts */ static void r592_clear_interrupts(struct r592_device *dev) { /* Disable & ACK all interrupts */ r592_clear_reg_mask(dev, R592_REG_MSC, IRQ_ALL_ACK_MASK); r592_clear_reg_mask(dev, R592_REG_MSC, IRQ_ALL_EN_MASK); } /* Tests if there is an CRC error */ static int r592_test_io_error(struct r592_device *dev) { if (!(r592_read_reg(dev, R592_STATUS) & (R592_STATUS_SEND_ERR | R592_STATUS_RECV_ERR))) return 0; return -EIO; } /* Ensure that FIFO is ready for use */ static int r592_test_fifo_empty(struct r592_device *dev) { if (r592_read_reg(dev, R592_REG_MSC) & R592_REG_MSC_FIFO_EMPTY) return 0; dbg("FIFO not ready, trying to reset the device"); r592_host_reset(dev); if (r592_read_reg(dev, R592_REG_MSC) & R592_REG_MSC_FIFO_EMPTY) return 0; message("FIFO still not ready, giving up"); return -EIO; } /* Activates the DMA transfer from to FIFO */ static void r592_start_dma(struct r592_device *dev, bool is_write) { unsigned long flags; u32 reg; spin_lock_irqsave(&dev->irq_lock, flags); /* Ack interrupts (just in case) + enable them */ r592_clear_reg_mask(dev, R592_REG_MSC, DMA_IRQ_ACK_MASK); r592_set_reg_mask(dev, R592_REG_MSC, DMA_IRQ_EN_MASK); /* Set DMA address */ r592_write_reg(dev, R592_FIFO_DMA, sg_dma_address(&dev->req->sg)); /* Enable the DMA */ reg = r592_read_reg(dev, R592_FIFO_DMA_SETTINGS); reg |= R592_FIFO_DMA_SETTINGS_EN; if (!is_write) reg |= R592_FIFO_DMA_SETTINGS_DIR; else reg &= ~R592_FIFO_DMA_SETTINGS_DIR; r592_write_reg(dev, R592_FIFO_DMA_SETTINGS, reg); spin_unlock_irqrestore(&dev->irq_lock, flags); } /* Cleanups DMA related settings */ static void r592_stop_dma(struct r592_device *dev, int error) { r592_clear_reg_mask(dev, R592_FIFO_DMA_SETTINGS, R592_FIFO_DMA_SETTINGS_EN); /* This is only a precation */ r592_write_reg(dev, R592_FIFO_DMA, dev->dummy_dma_page_physical_address); r592_clear_reg_mask(dev, R592_REG_MSC, DMA_IRQ_EN_MASK); r592_clear_reg_mask(dev, R592_REG_MSC, DMA_IRQ_ACK_MASK); dev->dma_error = error; } /* Test if hardware supports DMA */ static void r592_check_dma(struct r592_device *dev) { dev->dma_capable = r592_enable_dma && (r592_read_reg(dev, R592_FIFO_DMA_SETTINGS) & R592_FIFO_DMA_SETTINGS_CAP); } /* Transfers fifo contents in/out using DMA */ static int r592_transfer_fifo_dma(struct r592_device *dev) { int len, sg_count; bool is_write; if (!dev->dma_capable || !dev->req->long_data) return -EINVAL; len = dev->req->sg.length; is_write = dev->req->data_dir == WRITE; if (len != R592_LFIFO_SIZE) return -EINVAL; dbg_verbose("doing dma transfer"); dev->dma_error = 0; INIT_COMPLETION(dev->dma_done); /* TODO: hidden assumption about nenth beeing always 1 */ sg_count = dma_map_sg(&dev->pci_dev->dev, &dev->req->sg, 1, is_write ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE); if (sg_count != 1 || (sg_dma_len(&dev->req->sg) < dev->req->sg.length)) { message("problem in dma_map_sg"); return -EIO; } r592_start_dma(dev, is_write); /* Wait for DMA completion */ if (!wait_for_completion_timeout( &dev->dma_done, msecs_to_jiffies(1000))) { message("DMA timeout"); r592_stop_dma(dev, -ETIMEDOUT); } dma_unmap_sg(&dev->pci_dev->dev, &dev->req->sg, 1, is_write ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE); return dev->dma_error; } /* * Writes the FIFO in 4 byte chunks. * If length isn't 4 byte aligned, rest of the data if put to a fifo * to be written later * Use r592_flush_fifo_write to flush that fifo when writing for the * last time */ static void r592_write_fifo_pio(struct r592_device *dev, unsigned char *buffer, int len) { /* flush spill from former write */ if (!kfifo_is_empty(&dev->pio_fifo)) { u8 tmp[4] = {0}; int copy_len = kfifo_in(&dev->pio_fifo, buffer, len); if (!kfifo_is_full(&dev->pio_fifo)) return; len -= copy_len; buffer += copy_len; copy_len = kfifo_out(&dev->pio_fifo, tmp, 4); WARN_ON(copy_len != 4); r592_write_reg_raw_be(dev, R592_FIFO_PIO, *(u32 *)tmp); } WARN_ON(!kfifo_is_empty(&dev->pio_fifo)); /* write full dwords */ while (len >= 4) { r592_write_reg_raw_be(dev, R592_FIFO_PIO, *(u32 *)buffer); buffer += 4; len -= 4; } /* put remaining bytes to the spill */ if (len) kfifo_in(&dev->pio_fifo, buffer, len); } /* Flushes the temporary FIFO used to make aligned DWORD writes */ static void r592_flush_fifo_write(struct r592_device *dev) { u8 buffer[4] = { 0 }; int len; if (kfifo_is_empty(&dev->pio_fifo)) return; len = kfifo_out(&dev->pio_fifo, buffer, 4); r592_write_reg_raw_be(dev, R592_FIFO_PIO, *(u32 *)buffer); } /* * Read a fifo in 4 bytes chunks. * If input doesn't fit the buffer, it places bytes of last dword in spill * buffer, so that they don't get lost on last read, just throw these away. */ static void r592_read_fifo_pio(struct r592_device *dev, unsigned char *buffer, int len) { u8 tmp[4]; /* Read from last spill */ if (!kfifo_is_empty(&dev->pio_fifo)) { int bytes_copied = kfifo_out(&dev->pio_fifo, buffer, min(4, len)); buffer += bytes_copied; len -= bytes_copied; if (!kfifo_is_empty(&dev->pio_fifo)) return; } /* Reads dwords from FIFO */ while (len >= 4) { *(u32 *)buffer = r592_read_reg_raw_be(dev, R592_FIFO_PIO); buffer += 4; len -= 4; } if (len) { *(u32 *)tmp = r592_read_reg_raw_be(dev, R592_FIFO_PIO); kfifo_in(&dev->pio_fifo, tmp, 4); len -= kfifo_out(&dev->pio_fifo, buffer, len); } WARN_ON(len); return; } /* Transfers actual data using PIO. */ static int r592_transfer_fifo_pio(struct r592_device *dev) { unsigned long flags; bool is_write = dev->req->tpc >= MS_TPC_SET_RW_REG_ADRS; struct sg_mapping_iter miter; kfifo_reset(&dev->pio_fifo); if (!dev->req->long_data) { if (is_write) { r592_write_fifo_pio(dev, dev->req->data, dev->req->data_len); r592_flush_fifo_write(dev); } else r592_read_fifo_pio(dev, dev->req->data, dev->req->data_len); return 0; } local_irq_save(flags); sg_miter_start(&miter, &dev->req->sg, 1, SG_MITER_ATOMIC | (is_write ? SG_MITER_FROM_SG : SG_MITER_TO_SG)); /* Do the transfer fifo<->memory*/ while (sg_miter_next(&miter)) if (is_write) r592_write_fifo_pio(dev, miter.addr, miter.length); else r592_read_fifo_pio(dev, miter.addr, miter.length); /* Write last few non aligned bytes*/ if (is_write) r592_flush_fifo_write(dev); sg_miter_stop(&miter); local_irq_restore(flags); return 0; } /* Executes one TPC (data is read/written from small or large fifo) */ static void r592_execute_tpc(struct r592_device *dev) { bool is_write = dev->req->tpc >= MS_TPC_SET_RW_REG_ADRS; int len, error; u32 status, reg; if (!dev->req) { message("BUG: tpc execution without request!"); return; } len = dev->req->long_data ? dev->req->sg.length : dev->req->data_len; /* Ensure that FIFO can hold the input data */ if (len > R592_LFIFO_SIZE) { message("IO: hardware doesn't support TPCs longer that 512"); error = -ENOSYS; goto out; } if (!(r592_read_reg(dev, R592_REG_MSC) & R592_REG_MSC_PRSNT)) { dbg("IO: refusing to send TPC because card is absent"); error = -ENODEV; goto out; } dbg("IO: executing %s LEN=%d", memstick_debug_get_tpc_name(dev->req->tpc), len); /* Set IO direction */ if (is_write) r592_set_reg_mask(dev, R592_IO, R592_IO_DIRECTION); else r592_clear_reg_mask(dev, R592_IO, R592_IO_DIRECTION); error = r592_test_fifo_empty(dev); if (error) goto out; /* Transfer write data */ if (is_write) { error = r592_transfer_fifo_dma(dev); if (error == -EINVAL) error = r592_transfer_fifo_pio(dev); } if (error) goto out; /* Trigger the TPC */ reg = (len << R592_TPC_EXEC_LEN_SHIFT) | (dev->req->tpc << R592_TPC_EXEC_TPC_SHIFT) | R592_TPC_EXEC_BIG_FIFO; r592_write_reg(dev, R592_TPC_EXEC, reg); /* Wait for TPC completion */ status = R592_STATUS_RDY; if (dev->req->need_card_int) status |= R592_STATUS_CED; error = r592_wait_status(dev, status, status); if (error) { message("card didn't respond"); goto out; } /* Test IO errors */ error = r592_test_io_error(dev); if (error) { dbg("IO error"); goto out; } /* Read data from FIFO */ if (!is_write) { error = r592_transfer_fifo_dma(dev); if (error == -EINVAL) error = r592_transfer_fifo_pio(dev); } /* read INT reg. This can be shortened with shifts, but that way its more readable */ if (dev->parallel_mode && dev->req->need_card_int) { dev->req->int_reg = 0; status = r592_read_reg(dev, R592_STATUS); if (status & R592_STATUS_P_CMDNACK) dev->req->int_reg |= MEMSTICK_INT_CMDNAK; if (status & R592_STATUS_P_BREQ) dev->req->int_reg |= MEMSTICK_INT_BREQ; if (status & R592_STATUS_P_INTERR) dev->req->int_reg |= MEMSTICK_INT_ERR; if (status & R592_STATUS_P_CED) dev->req->int_reg |= MEMSTICK_INT_CED; } if (error) dbg("FIFO read error"); out: dev->req->error = error; r592_clear_reg_mask(dev, R592_REG_MSC, R592_REG_MSC_LED); return; } /* Main request processing thread */ static int r592_process_thread(void *data) { int error; struct r592_device *dev = (struct r592_device *)data; unsigned long flags; while (!kthread_should_stop()) { spin_lock_irqsave(&dev->io_thread_lock, flags); set_current_state(TASK_INTERRUPTIBLE); error = memstick_next_req(dev->host, &dev->req); spin_unlock_irqrestore(&dev->io_thread_lock, flags); if (error) { if (error == -ENXIO || error == -EAGAIN) { dbg_verbose("IO: done IO, sleeping"); } else { dbg("IO: unknown error from " "memstick_next_req %d", error); } if (kthread_should_stop()) set_current_state(TASK_RUNNING); schedule(); } else { set_current_state(TASK_RUNNING); r592_execute_tpc(dev); } } return 0; } /* Reprogram chip to detect change in card state */ /* eg, if card is detected, arm it to detect removal, and vice versa */ static void r592_update_card_detect(struct r592_device *dev) { u32 reg = r592_read_reg(dev, R592_REG_MSC); bool card_detected = reg & R592_REG_MSC_PRSNT; dbg("update card detect. card state: %s", card_detected ? "present" : "absent"); reg &= ~((R592_REG_MSC_IRQ_REMOVE | R592_REG_MSC_IRQ_INSERT) << 16); if (card_detected) reg |= (R592_REG_MSC_IRQ_REMOVE << 16); else reg |= (R592_REG_MSC_IRQ_INSERT << 16); r592_write_reg(dev, R592_REG_MSC, reg); } /* Timer routine that fires 1 second after last card detection event, */ static void r592_detect_timer(long unsigned int data) { struct r592_device *dev = (struct r592_device *)data; r592_update_card_detect(dev); memstick_detect_change(dev->host); } /* Interrupt handler */ static irqreturn_t r592_irq(int irq, void *data) { struct r592_device *dev = (struct r592_device *)data; irqreturn_t ret = IRQ_NONE; u32 reg; u16 irq_enable, irq_status; unsigned long flags; int error; spin_lock_irqsave(&dev->irq_lock, flags); reg = r592_read_reg(dev, R592_REG_MSC); irq_enable = reg >> 16; irq_status = reg & 0xFFFF; /* Ack the interrupts */ reg &= ~irq_status; r592_write_reg(dev, R592_REG_MSC, reg); /* Get the IRQ status minus bits that aren't enabled */ irq_status &= (irq_enable); /* Due to limitation of memstick core, we don't look at bits that indicate that card was removed/inserted and/or present */ if (irq_status & (R592_REG_MSC_IRQ_INSERT | R592_REG_MSC_IRQ_REMOVE)) { bool card_was_added = irq_status & R592_REG_MSC_IRQ_INSERT; ret = IRQ_HANDLED; message("IRQ: card %s", card_was_added ? "added" : "removed"); mod_timer(&dev->detect_timer, jiffies + msecs_to_jiffies(card_was_added ? 500 : 50)); } if (irq_status & (R592_REG_MSC_FIFO_DMA_DONE | R592_REG_MSC_FIFO_DMA_ERR)) { ret = IRQ_HANDLED; if (irq_status & R592_REG_MSC_FIFO_DMA_ERR) { message("IRQ: DMA error"); error = -EIO; } else { dbg_verbose("IRQ: dma done"); error = 0; } r592_stop_dma(dev, error); complete(&dev->dma_done); } spin_unlock_irqrestore(&dev->irq_lock, flags); return ret; } /* External inteface: set settings */ static int r592_set_param(struct memstick_host *host, enum memstick_param param, int value) { struct r592_device *dev = memstick_priv(host); switch (param) { case MEMSTICK_POWER: switch (value) { case MEMSTICK_POWER_ON: return r592_enable_device(dev, true); case MEMSTICK_POWER_OFF: return r592_enable_device(dev, false); default: return -EINVAL; } case MEMSTICK_INTERFACE: switch (value) { case MEMSTICK_SERIAL: return r592_set_mode(dev, 0); case MEMSTICK_PAR4: return r592_set_mode(dev, 1); default: return -EINVAL; } default: return -EINVAL; } } /* External interface: submit requests */ static void r592_submit_req(struct memstick_host *host) { struct r592_device *dev = memstick_priv(host); unsigned long flags; if (dev->req) return; spin_lock_irqsave(&dev->io_thread_lock, flags); if (wake_up_process(dev->io_thread)) dbg_verbose("IO thread woken to process requests"); spin_unlock_irqrestore(&dev->io_thread_lock, flags); } static const struct pci_device_id r592_pci_id_tbl[] = { { PCI_VDEVICE(RICOH, 0x0592), }, { }, }; /* Main entry */ static int r592_probe(struct pci_dev *pdev, const struct pci_device_id *id) { int error = -ENOMEM; struct memstick_host *host; struct r592_device *dev; /* Allocate memory */ host = memstick_alloc_host(sizeof(struct r592_device), &pdev->dev); if (!host) goto error1; dev = memstick_priv(host); dev->host = host; dev->pci_dev = pdev; pci_set_drvdata(pdev, dev); /* pci initialization */ error = pci_enable_device(pdev); if (error) goto error2; pci_set_master(pdev); error = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (error) goto error3; error = pci_request_regions(pdev, DRV_NAME); if (error) goto error3; dev->mmio = pci_ioremap_bar(pdev, 0); if (!dev->mmio) goto error4; dev->irq = pdev->irq; spin_lock_init(&dev->irq_lock); spin_lock_init(&dev->io_thread_lock); init_completion(&dev->dma_done); INIT_KFIFO(dev->pio_fifo); setup_timer(&dev->detect_timer, r592_detect_timer, (long unsigned int)dev); /* Host initialization */ host->caps = MEMSTICK_CAP_PAR4; host->request = r592_submit_req; host->set_param = r592_set_param; r592_check_dma(dev); dev->io_thread = kthread_run(r592_process_thread, dev, "r592_io"); if (IS_ERR(dev->io_thread)) { error = PTR_ERR(dev->io_thread); goto error5; } /* This is just a precation, so don't fail */ dev->dummy_dma_page = pci_alloc_consistent(pdev, PAGE_SIZE, &dev->dummy_dma_page_physical_address); r592_stop_dma(dev , 0); if (request_irq(dev->irq, &r592_irq, IRQF_SHARED, DRV_NAME, dev)) goto error6; r592_update_card_detect(dev); if (memstick_add_host(host)) goto error7; message("driver successfully loaded"); return 0; error7: free_irq(dev->irq, dev); error6: if (dev->dummy_dma_page) pci_free_consistent(pdev, PAGE_SIZE, dev->dummy_dma_page, dev->dummy_dma_page_physical_address); kthread_stop(dev->io_thread); error5: iounmap(dev->mmio); error4: pci_release_regions(pdev); error3: pci_disable_device(pdev); error2: memstick_free_host(host); error1: return error; } static void r592_remove(struct pci_dev *pdev) { int error = 0; struct r592_device *dev = pci_get_drvdata(pdev); /* Stop the processing thread. That ensures that we won't take any more requests */ kthread_stop(dev->io_thread); r592_enable_device(dev, false); while (!error && dev->req) { dev->req->error = -ETIME; error = memstick_next_req(dev->host, &dev->req); } memstick_remove_host(dev->host); free_irq(dev->irq, dev); iounmap(dev->mmio); pci_release_regions(pdev); pci_disable_device(pdev); memstick_free_host(dev->host); if (dev->dummy_dma_page) pci_free_consistent(pdev, PAGE_SIZE, dev->dummy_dma_page, dev->dummy_dma_page_physical_address); } #ifdef CONFIG_PM static int r592_suspend(struct device *core_dev) { struct pci_dev *pdev = to_pci_dev(core_dev); struct r592_device *dev = pci_get_drvdata(pdev); r592_clear_interrupts(dev); memstick_suspend_host(dev->host); del_timer_sync(&dev->detect_timer); return 0; } static int r592_resume(struct device *core_dev) { struct pci_dev *pdev = to_pci_dev(core_dev); struct r592_device *dev = pci_get_drvdata(pdev); r592_clear_interrupts(dev); r592_enable_device(dev, false); memstick_resume_host(dev->host); r592_update_card_detect(dev); return 0; } SIMPLE_DEV_PM_OPS(r592_pm_ops, r592_suspend, r592_resume); #endif MODULE_DEVICE_TABLE(pci, r592_pci_id_tbl); static struct pci_driver r852_pci_driver = { .name = DRV_NAME, .id_table = r592_pci_id_tbl, .probe = r592_probe, .remove = r592_remove, #ifdef CONFIG_PM .driver.pm = &r592_pm_ops, #endif }; static __init int r592_module_init(void) { return pci_register_driver(&r852_pci_driver); } static void __exit r592_module_exit(void) { pci_unregister_driver(&r852_pci_driver); } module_init(r592_module_init); module_exit(r592_module_exit); module_param_named(enable_dma, r592_enable_dma, bool, S_IRUGO); MODULE_PARM_DESC(enable_dma, "Enable usage of the DMA (default)"); module_param(debug, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(debug, "Debug level (0-3)"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Maxim Levitsky <maximlevitsky@gmail.com>"); MODULE_DESCRIPTION("Ricoh R5C592 Memstick/Memstick PRO card reader driver");
gpl-2.0
stoku/linux-3.6.11-ab
drivers/media/video/tm6000/tm6000-core.c
5183
27111
/* * tm6000-core.c - driver for TM5600/TM6000/TM6010 USB video capture devices * * Copyright (C) 2006-2007 Mauro Carvalho Chehab <mchehab@infradead.org> * * Copyright (C) 2007 Michel Ludwig <michel.ludwig@gmail.com> * - DVB-T support * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation version 2 * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/usb.h> #include <linux/i2c.h> #include "tm6000.h" #include "tm6000-regs.h" #include <media/v4l2-common.h> #include <media/tuner.h> #define USB_TIMEOUT (5 * HZ) /* ms */ int tm6000_read_write_usb(struct tm6000_core *dev, u8 req_type, u8 req, u16 value, u16 index, u8 *buf, u16 len) { int ret, i; unsigned int pipe; u8 *data = NULL; int delay = 5000; mutex_lock(&dev->usb_lock); if (len) data = kzalloc(len, GFP_KERNEL); if (req_type & USB_DIR_IN) pipe = usb_rcvctrlpipe(dev->udev, 0); else { pipe = usb_sndctrlpipe(dev->udev, 0); memcpy(data, buf, len); } if (tm6000_debug & V4L2_DEBUG_I2C) { printk(KERN_DEBUG "(dev %p, pipe %08x): ", dev->udev, pipe); printk(KERN_CONT "%s: %02x %02x %02x %02x %02x %02x %02x %02x ", (req_type & USB_DIR_IN) ? " IN" : "OUT", req_type, req, value&0xff, value>>8, index&0xff, index>>8, len&0xff, len>>8); if (!(req_type & USB_DIR_IN)) { printk(KERN_CONT ">>> "); for (i = 0; i < len; i++) printk(KERN_CONT " %02x", buf[i]); printk(KERN_CONT "\n"); } } ret = usb_control_msg(dev->udev, pipe, req, req_type, value, index, data, len, USB_TIMEOUT); if (req_type & USB_DIR_IN) memcpy(buf, data, len); if (tm6000_debug & V4L2_DEBUG_I2C) { if (ret < 0) { if (req_type & USB_DIR_IN) printk(KERN_DEBUG "<<< (len=%d)\n", len); printk(KERN_CONT "%s: Error #%d\n", __func__, ret); } else if (req_type & USB_DIR_IN) { printk(KERN_CONT "<<< "); for (i = 0; i < len; i++) printk(KERN_CONT " %02x", buf[i]); printk(KERN_CONT "\n"); } } kfree(data); if (dev->quirks & TM6000_QUIRK_NO_USB_DELAY) delay = 0; if (req == REQ_16_SET_GET_I2C_WR1_RDN && !(req_type & USB_DIR_IN)) { unsigned int tsleep; /* Calculate delay time, 14000us for 64 bytes */ tsleep = (len * 200) + 200; if (tsleep < delay) tsleep = delay; usleep_range(tsleep, tsleep + 1000); } else if (delay) usleep_range(delay, delay + 1000); mutex_unlock(&dev->usb_lock); return ret; } int tm6000_set_reg(struct tm6000_core *dev, u8 req, u16 value, u16 index) { return tm6000_read_write_usb(dev, USB_DIR_OUT | USB_TYPE_VENDOR, req, value, index, NULL, 0); } EXPORT_SYMBOL_GPL(tm6000_set_reg); int tm6000_get_reg(struct tm6000_core *dev, u8 req, u16 value, u16 index) { int rc; u8 buf[1]; rc = tm6000_read_write_usb(dev, USB_DIR_IN | USB_TYPE_VENDOR, req, value, index, buf, 1); if (rc < 0) return rc; return *buf; } EXPORT_SYMBOL_GPL(tm6000_get_reg); int tm6000_set_reg_mask(struct tm6000_core *dev, u8 req, u16 value, u16 index, u16 mask) { int rc; u8 buf[1]; u8 new_index; rc = tm6000_read_write_usb(dev, USB_DIR_IN | USB_TYPE_VENDOR, req, value, 0, buf, 1); if (rc < 0) return rc; new_index = (buf[0] & ~mask) | (index & mask); if (new_index == buf[0]) return 0; return tm6000_read_write_usb(dev, USB_DIR_OUT | USB_TYPE_VENDOR, req, value, new_index, NULL, 0); } EXPORT_SYMBOL_GPL(tm6000_set_reg_mask); int tm6000_get_reg16(struct tm6000_core *dev, u8 req, u16 value, u16 index) { int rc; u8 buf[2]; rc = tm6000_read_write_usb(dev, USB_DIR_IN | USB_TYPE_VENDOR, req, value, index, buf, 2); if (rc < 0) return rc; return buf[1]|buf[0]<<8; } int tm6000_get_reg32(struct tm6000_core *dev, u8 req, u16 value, u16 index) { int rc; u8 buf[4]; rc = tm6000_read_write_usb(dev, USB_DIR_IN | USB_TYPE_VENDOR, req, value, index, buf, 4); if (rc < 0) return rc; return buf[3] | buf[2] << 8 | buf[1] << 16 | buf[0] << 24; } int tm6000_i2c_reset(struct tm6000_core *dev, u16 tsleep) { int rc; rc = tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN, TM6000_GPIO_CLK, 0); if (rc < 0) return rc; msleep(tsleep); rc = tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN, TM6000_GPIO_CLK, 1); msleep(tsleep); return rc; } void tm6000_set_fourcc_format(struct tm6000_core *dev) { if (dev->dev_type == TM6010) { int val; val = tm6000_get_reg(dev, TM6010_REQ07_RCC_ACTIVE_IF, 0) & 0xfc; if (dev->fourcc == V4L2_PIX_FMT_UYVY) tm6000_set_reg(dev, TM6010_REQ07_RCC_ACTIVE_IF, val); else tm6000_set_reg(dev, TM6010_REQ07_RCC_ACTIVE_IF, val | 1); } else { if (dev->fourcc == V4L2_PIX_FMT_UYVY) tm6000_set_reg(dev, TM6010_REQ07_RC1_TRESHOLD, 0xd0); else tm6000_set_reg(dev, TM6010_REQ07_RC1_TRESHOLD, 0x90); } } static void tm6000_set_vbi(struct tm6000_core *dev) { /* * FIXME: * VBI lines and start/end are different between 60Hz and 50Hz * So, it is very likely that we need to change the config to * something that takes it into account, doing something different * if (dev->norm & V4L2_STD_525_60) */ if (dev->dev_type == TM6010) { tm6000_set_reg(dev, TM6010_REQ07_R3F_RESET, 0x01); tm6000_set_reg(dev, TM6010_REQ07_R41_TELETEXT_VBI_CODE1, 0x27); tm6000_set_reg(dev, TM6010_REQ07_R42_VBI_DATA_HIGH_LEVEL, 0x55); tm6000_set_reg(dev, TM6010_REQ07_R43_VBI_DATA_TYPE_LINE7, 0x66); tm6000_set_reg(dev, TM6010_REQ07_R44_VBI_DATA_TYPE_LINE8, 0x66); tm6000_set_reg(dev, TM6010_REQ07_R45_VBI_DATA_TYPE_LINE9, 0x66); tm6000_set_reg(dev, TM6010_REQ07_R46_VBI_DATA_TYPE_LINE10, 0x66); tm6000_set_reg(dev, TM6010_REQ07_R47_VBI_DATA_TYPE_LINE11, 0x66); tm6000_set_reg(dev, TM6010_REQ07_R48_VBI_DATA_TYPE_LINE12, 0x66); tm6000_set_reg(dev, TM6010_REQ07_R49_VBI_DATA_TYPE_LINE13, 0x66); tm6000_set_reg(dev, TM6010_REQ07_R4A_VBI_DATA_TYPE_LINE14, 0x66); tm6000_set_reg(dev, TM6010_REQ07_R4B_VBI_DATA_TYPE_LINE15, 0x66); tm6000_set_reg(dev, TM6010_REQ07_R4C_VBI_DATA_TYPE_LINE16, 0x66); tm6000_set_reg(dev, TM6010_REQ07_R4D_VBI_DATA_TYPE_LINE17, 0x66); tm6000_set_reg(dev, TM6010_REQ07_R4E_VBI_DATA_TYPE_LINE18, 0x66); tm6000_set_reg(dev, TM6010_REQ07_R4F_VBI_DATA_TYPE_LINE19, 0x66); tm6000_set_reg(dev, TM6010_REQ07_R50_VBI_DATA_TYPE_LINE20, 0x66); tm6000_set_reg(dev, TM6010_REQ07_R51_VBI_DATA_TYPE_LINE21, 0x66); tm6000_set_reg(dev, TM6010_REQ07_R52_VBI_DATA_TYPE_LINE22, 0x66); tm6000_set_reg(dev, TM6010_REQ07_R53_VBI_DATA_TYPE_LINE23, 0x00); tm6000_set_reg(dev, TM6010_REQ07_R54_VBI_DATA_TYPE_RLINES, 0x00); tm6000_set_reg(dev, TM6010_REQ07_R55_VBI_LOOP_FILTER_GAIN, 0x01); tm6000_set_reg(dev, TM6010_REQ07_R56_VBI_LOOP_FILTER_I_GAIN, 0x00); tm6000_set_reg(dev, TM6010_REQ07_R57_VBI_LOOP_FILTER_P_GAIN, 0x02); tm6000_set_reg(dev, TM6010_REQ07_R58_VBI_CAPTION_DTO1, 0x35); tm6000_set_reg(dev, TM6010_REQ07_R59_VBI_CAPTION_DTO0, 0xa0); tm6000_set_reg(dev, TM6010_REQ07_R5A_VBI_TELETEXT_DTO1, 0x11); tm6000_set_reg(dev, TM6010_REQ07_R5B_VBI_TELETEXT_DTO0, 0x4c); tm6000_set_reg(dev, TM6010_REQ07_R40_TELETEXT_VBI_CODE0, 0x01); tm6000_set_reg(dev, TM6010_REQ07_R3F_RESET, 0x00); } } int tm6000_init_analog_mode(struct tm6000_core *dev) { struct v4l2_frequency f; if (dev->dev_type == TM6010) { u8 active = TM6010_REQ07_RCC_ACTIVE_IF_AUDIO_ENABLE; if (!dev->radio) active |= TM6010_REQ07_RCC_ACTIVE_IF_VIDEO_ENABLE; /* Enable video and audio */ tm6000_set_reg_mask(dev, TM6010_REQ07_RCC_ACTIVE_IF, active, 0x60); /* Disable TS input */ tm6000_set_reg_mask(dev, TM6010_REQ07_RC0_ACTIVE_VIDEO_SOURCE, 0x00, 0x40); } else { /* Enables soft reset */ tm6000_set_reg(dev, TM6010_REQ07_R3F_RESET, 0x01); if (dev->scaler) /* Disable Hfilter and Enable TS Drop err */ tm6000_set_reg(dev, TM6010_REQ07_RC0_ACTIVE_VIDEO_SOURCE, 0x20); else /* Enable Hfilter and disable TS Drop err */ tm6000_set_reg(dev, TM6010_REQ07_RC0_ACTIVE_VIDEO_SOURCE, 0x80); tm6000_set_reg(dev, TM6010_REQ07_RC3_HSTART1, 0x88); tm6000_set_reg(dev, TM6000_REQ07_RDA_CLK_SEL, 0x23); tm6000_set_reg(dev, TM6010_REQ07_RD1_ADDR_FOR_REQ1, 0xc0); tm6000_set_reg(dev, TM6010_REQ07_RD2_ADDR_FOR_REQ2, 0xd8); tm6000_set_reg(dev, TM6010_REQ07_RD6_ENDP_REQ1_REQ2, 0x06); tm6000_set_reg(dev, TM6000_REQ07_RDF_PWDOWN_ACLK, 0x1f); /* AP Software reset */ tm6000_set_reg(dev, TM6010_REQ07_RFF_SOFT_RESET, 0x08); tm6000_set_reg(dev, TM6010_REQ07_RFF_SOFT_RESET, 0x00); tm6000_set_fourcc_format(dev); /* Disables soft reset */ tm6000_set_reg(dev, TM6010_REQ07_R3F_RESET, 0x00); } msleep(20); /* Tuner firmware can now be loaded */ /* * FIXME: This is a hack! xc3028 "sleeps" when no channel is detected * for more than a few seconds. Not sure why, as this behavior does * not happen on other devices with xc3028. So, I suspect that it * is yet another bug at tm6000. After start sleeping, decoding * doesn't start automatically. Instead, it requires some * I2C commands to wake it up. As we want to have image at the * beginning, we needed to add this hack. The better would be to * discover some way to make tm6000 to wake up without this hack. */ f.frequency = dev->freq; v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, s_frequency, &f); msleep(100); tm6000_set_standard(dev); tm6000_set_vbi(dev); tm6000_set_audio_bitrate(dev, 48000); /* switch dvb led off */ if (dev->gpio.dvb_led) { tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN, dev->gpio.dvb_led, 0x01); } return 0; } int tm6000_init_digital_mode(struct tm6000_core *dev) { if (dev->dev_type == TM6010) { /* Disable video and audio */ tm6000_set_reg_mask(dev, TM6010_REQ07_RCC_ACTIVE_IF, 0x00, 0x60); /* Enable TS input */ tm6000_set_reg_mask(dev, TM6010_REQ07_RC0_ACTIVE_VIDEO_SOURCE, 0x40, 0x40); /* all power down, but not the digital data port */ tm6000_set_reg(dev, TM6010_REQ07_RFE_POWER_DOWN, 0x28); tm6000_set_reg(dev, TM6010_REQ08_RE2_POWER_DOWN_CTRL1, 0xfc); tm6000_set_reg(dev, TM6010_REQ08_RE6_POWER_DOWN_CTRL2, 0xff); } else { tm6000_set_reg(dev, TM6010_REQ07_RFF_SOFT_RESET, 0x08); tm6000_set_reg(dev, TM6010_REQ07_RFF_SOFT_RESET, 0x00); tm6000_set_reg(dev, TM6010_REQ07_R3F_RESET, 0x01); tm6000_set_reg(dev, TM6000_REQ07_RDF_PWDOWN_ACLK, 0x08); tm6000_set_reg(dev, TM6000_REQ07_RE2_VADC_STATUS_CTL, 0x0c); tm6000_set_reg(dev, TM6000_REQ07_RE8_VADC_PWDOWN_CTL, 0xff); tm6000_set_reg(dev, TM6000_REQ07_REB_VADC_AADC_MODE, 0xd8); tm6000_set_reg(dev, TM6010_REQ07_RC0_ACTIVE_VIDEO_SOURCE, 0x40); tm6000_set_reg(dev, TM6010_REQ07_RC1_TRESHOLD, 0xd0); tm6000_set_reg(dev, TM6010_REQ07_RC3_HSTART1, 0x09); tm6000_set_reg(dev, TM6000_REQ07_RDA_CLK_SEL, 0x37); tm6000_set_reg(dev, TM6010_REQ07_RD1_ADDR_FOR_REQ1, 0xd8); tm6000_set_reg(dev, TM6010_REQ07_RD2_ADDR_FOR_REQ2, 0xc0); tm6000_set_reg(dev, TM6010_REQ07_RD6_ENDP_REQ1_REQ2, 0x60); tm6000_set_reg(dev, TM6000_REQ07_RE2_VADC_STATUS_CTL, 0x0c); tm6000_set_reg(dev, TM6000_REQ07_RE8_VADC_PWDOWN_CTL, 0xff); tm6000_set_reg(dev, TM6000_REQ07_REB_VADC_AADC_MODE, 0x08); msleep(50); tm6000_set_reg(dev, REQ_04_EN_DISABLE_MCU_INT, 0x0020, 0x00); msleep(50); tm6000_set_reg(dev, REQ_04_EN_DISABLE_MCU_INT, 0x0020, 0x01); msleep(50); tm6000_set_reg(dev, REQ_04_EN_DISABLE_MCU_INT, 0x0020, 0x00); msleep(100); } /* switch dvb led on */ if (dev->gpio.dvb_led) { tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN, dev->gpio.dvb_led, 0x00); } return 0; } EXPORT_SYMBOL(tm6000_init_digital_mode); struct reg_init { u8 req; u8 reg; u8 val; }; /* The meaning of those initializations are unknown */ static struct reg_init tm6000_init_tab[] = { /* REG VALUE */ { TM6000_REQ07_RDF_PWDOWN_ACLK, 0x1f }, { TM6010_REQ07_RFF_SOFT_RESET, 0x08 }, { TM6010_REQ07_RFF_SOFT_RESET, 0x00 }, { TM6010_REQ07_RD5_POWERSAVE, 0x4f }, { TM6000_REQ07_RDA_CLK_SEL, 0x23 }, { TM6000_REQ07_RDB_OUT_SEL, 0x08 }, { TM6000_REQ07_RE2_VADC_STATUS_CTL, 0x00 }, { TM6000_REQ07_RE3_VADC_INP_LPF_SEL1, 0x10 }, { TM6000_REQ07_RE5_VADC_INP_LPF_SEL2, 0x00 }, { TM6000_REQ07_RE8_VADC_PWDOWN_CTL, 0x00 }, { TM6000_REQ07_REB_VADC_AADC_MODE, 0x64 }, /* 48000 bits/sample, external input */ { TM6000_REQ07_REE_VADC_CTRL_SEL_CONTROL, 0xc2 }, { TM6010_REQ07_R3F_RESET, 0x01 }, /* Start of soft reset */ { TM6010_REQ07_R00_VIDEO_CONTROL0, 0x00 }, { TM6010_REQ07_R01_VIDEO_CONTROL1, 0x07 }, { TM6010_REQ07_R02_VIDEO_CONTROL2, 0x5f }, { TM6010_REQ07_R03_YC_SEP_CONTROL, 0x00 }, { TM6010_REQ07_R05_NOISE_THRESHOLD, 0x64 }, { TM6010_REQ07_R07_OUTPUT_CONTROL, 0x01 }, { TM6010_REQ07_R08_LUMA_CONTRAST_ADJ, 0x82 }, { TM6010_REQ07_R09_LUMA_BRIGHTNESS_ADJ, 0x36 }, { TM6010_REQ07_R0A_CHROMA_SATURATION_ADJ, 0x50 }, { TM6010_REQ07_R0C_CHROMA_AGC_CONTROL, 0x6a }, { TM6010_REQ07_R11_AGC_PEAK_CONTROL, 0xc9 }, { TM6010_REQ07_R12_AGC_GATE_STARTH, 0x07 }, { TM6010_REQ07_R13_AGC_GATE_STARTL, 0x3b }, { TM6010_REQ07_R14_AGC_GATE_WIDTH, 0x47 }, { TM6010_REQ07_R15_AGC_BP_DELAY, 0x6f }, { TM6010_REQ07_R17_HLOOP_MAXSTATE, 0xcd }, { TM6010_REQ07_R18_CHROMA_DTO_INCREMENT3, 0x1e }, { TM6010_REQ07_R19_CHROMA_DTO_INCREMENT2, 0x8b }, { TM6010_REQ07_R1A_CHROMA_DTO_INCREMENT1, 0xa2 }, { TM6010_REQ07_R1B_CHROMA_DTO_INCREMENT0, 0xe9 }, { TM6010_REQ07_R1C_HSYNC_DTO_INCREMENT3, 0x1c }, { TM6010_REQ07_R1D_HSYNC_DTO_INCREMENT2, 0xcc }, { TM6010_REQ07_R1E_HSYNC_DTO_INCREMENT1, 0xcc }, { TM6010_REQ07_R1F_HSYNC_DTO_INCREMENT0, 0xcd }, { TM6010_REQ07_R20_HSYNC_RISING_EDGE_TIME, 0x3c }, { TM6010_REQ07_R21_HSYNC_PHASE_OFFSET, 0x3c }, { TM6010_REQ07_R2D_CHROMA_BURST_END, 0x48 }, { TM6010_REQ07_R2E_ACTIVE_VIDEO_HSTART, 0x88 }, { TM6010_REQ07_R30_ACTIVE_VIDEO_VSTART, 0x22 }, { TM6010_REQ07_R31_ACTIVE_VIDEO_VHIGHT, 0x61 }, { TM6010_REQ07_R32_VSYNC_HLOCK_MIN, 0x74 }, { TM6010_REQ07_R33_VSYNC_HLOCK_MAX, 0x1c }, { TM6010_REQ07_R34_VSYNC_AGC_MIN, 0x74 }, { TM6010_REQ07_R35_VSYNC_AGC_MAX, 0x1c }, { TM6010_REQ07_R36_VSYNC_VBI_MIN, 0x7a }, { TM6010_REQ07_R37_VSYNC_VBI_MAX, 0x26 }, { TM6010_REQ07_R38_VSYNC_THRESHOLD, 0x40 }, { TM6010_REQ07_R39_VSYNC_TIME_CONSTANT, 0x0a }, { TM6010_REQ07_R42_VBI_DATA_HIGH_LEVEL, 0x55 }, { TM6010_REQ07_R51_VBI_DATA_TYPE_LINE21, 0x11 }, { TM6010_REQ07_R55_VBI_LOOP_FILTER_GAIN, 0x01 }, { TM6010_REQ07_R57_VBI_LOOP_FILTER_P_GAIN, 0x02 }, { TM6010_REQ07_R58_VBI_CAPTION_DTO1, 0x35 }, { TM6010_REQ07_R59_VBI_CAPTION_DTO0, 0xa0 }, { TM6010_REQ07_R80_COMB_FILTER_TRESHOLD, 0x15 }, { TM6010_REQ07_R82_COMB_FILTER_CONFIG, 0x42 }, { TM6010_REQ07_RC1_TRESHOLD, 0xd0 }, { TM6010_REQ07_RC3_HSTART1, 0x88 }, { TM6010_REQ07_R3F_RESET, 0x00 }, /* End of the soft reset */ { TM6010_REQ05_R18_IMASK7, 0x00 }, }; static struct reg_init tm6010_init_tab[] = { { TM6010_REQ07_RC0_ACTIVE_VIDEO_SOURCE, 0x00 }, { TM6010_REQ07_RC4_HSTART0, 0xa0 }, { TM6010_REQ07_RC6_HEND0, 0x40 }, { TM6010_REQ07_RCA_VEND0, 0x31 }, { TM6010_REQ07_RCC_ACTIVE_IF, 0xe1 }, { TM6010_REQ07_RE0_DVIDEO_SOURCE, 0x03 }, { TM6010_REQ07_RFE_POWER_DOWN, 0x7f }, { TM6010_REQ08_RE2_POWER_DOWN_CTRL1, 0xf0 }, { TM6010_REQ08_RE3_ADC_IN1_SEL, 0xf4 }, { TM6010_REQ08_RE4_ADC_IN2_SEL, 0xf8 }, { TM6010_REQ08_RE6_POWER_DOWN_CTRL2, 0x00 }, { TM6010_REQ08_REA_BUFF_DRV_CTRL, 0xf2 }, { TM6010_REQ08_REB_SIF_GAIN_CTRL, 0xf0 }, { TM6010_REQ08_REC_REVERSE_YC_CTRL, 0xc2 }, { TM6010_REQ08_RF0_DAUDIO_INPUT_CONFIG, 0x60 }, { TM6010_REQ08_RF1_AADC_POWER_DOWN, 0xfc }, { TM6010_REQ07_R3F_RESET, 0x01 }, { TM6010_REQ07_R00_VIDEO_CONTROL0, 0x00 }, { TM6010_REQ07_R01_VIDEO_CONTROL1, 0x07 }, { TM6010_REQ07_R02_VIDEO_CONTROL2, 0x5f }, { TM6010_REQ07_R03_YC_SEP_CONTROL, 0x00 }, { TM6010_REQ07_R05_NOISE_THRESHOLD, 0x64 }, { TM6010_REQ07_R07_OUTPUT_CONTROL, 0x01 }, { TM6010_REQ07_R08_LUMA_CONTRAST_ADJ, 0x82 }, { TM6010_REQ07_R09_LUMA_BRIGHTNESS_ADJ, 0x36 }, { TM6010_REQ07_R0A_CHROMA_SATURATION_ADJ, 0x50 }, { TM6010_REQ07_R0C_CHROMA_AGC_CONTROL, 0x6a }, { TM6010_REQ07_R11_AGC_PEAK_CONTROL, 0xc9 }, { TM6010_REQ07_R12_AGC_GATE_STARTH, 0x07 }, { TM6010_REQ07_R13_AGC_GATE_STARTL, 0x3b }, { TM6010_REQ07_R14_AGC_GATE_WIDTH, 0x47 }, { TM6010_REQ07_R15_AGC_BP_DELAY, 0x6f }, { TM6010_REQ07_R17_HLOOP_MAXSTATE, 0xcd }, { TM6010_REQ07_R18_CHROMA_DTO_INCREMENT3, 0x1e }, { TM6010_REQ07_R19_CHROMA_DTO_INCREMENT2, 0x8b }, { TM6010_REQ07_R1A_CHROMA_DTO_INCREMENT1, 0xa2 }, { TM6010_REQ07_R1B_CHROMA_DTO_INCREMENT0, 0xe9 }, { TM6010_REQ07_R1C_HSYNC_DTO_INCREMENT3, 0x1c }, { TM6010_REQ07_R1D_HSYNC_DTO_INCREMENT2, 0xcc }, { TM6010_REQ07_R1E_HSYNC_DTO_INCREMENT1, 0xcc }, { TM6010_REQ07_R1F_HSYNC_DTO_INCREMENT0, 0xcd }, { TM6010_REQ07_R20_HSYNC_RISING_EDGE_TIME, 0x3c }, { TM6010_REQ07_R21_HSYNC_PHASE_OFFSET, 0x3c }, { TM6010_REQ07_R2D_CHROMA_BURST_END, 0x48 }, { TM6010_REQ07_R2E_ACTIVE_VIDEO_HSTART, 0x88 }, { TM6010_REQ07_R30_ACTIVE_VIDEO_VSTART, 0x22 }, { TM6010_REQ07_R31_ACTIVE_VIDEO_VHIGHT, 0x61 }, { TM6010_REQ07_R32_VSYNC_HLOCK_MIN, 0x74 }, { TM6010_REQ07_R33_VSYNC_HLOCK_MAX, 0x1c }, { TM6010_REQ07_R34_VSYNC_AGC_MIN, 0x74 }, { TM6010_REQ07_R35_VSYNC_AGC_MAX, 0x1c }, { TM6010_REQ07_R36_VSYNC_VBI_MIN, 0x7a }, { TM6010_REQ07_R37_VSYNC_VBI_MAX, 0x26 }, { TM6010_REQ07_R38_VSYNC_THRESHOLD, 0x40 }, { TM6010_REQ07_R39_VSYNC_TIME_CONSTANT, 0x0a }, { TM6010_REQ07_R42_VBI_DATA_HIGH_LEVEL, 0x55 }, { TM6010_REQ07_R51_VBI_DATA_TYPE_LINE21, 0x11 }, { TM6010_REQ07_R55_VBI_LOOP_FILTER_GAIN, 0x01 }, { TM6010_REQ07_R57_VBI_LOOP_FILTER_P_GAIN, 0x02 }, { TM6010_REQ07_R58_VBI_CAPTION_DTO1, 0x35 }, { TM6010_REQ07_R59_VBI_CAPTION_DTO0, 0xa0 }, { TM6010_REQ07_R80_COMB_FILTER_TRESHOLD, 0x15 }, { TM6010_REQ07_R82_COMB_FILTER_CONFIG, 0x42 }, { TM6010_REQ07_RC1_TRESHOLD, 0xd0 }, { TM6010_REQ07_RC3_HSTART1, 0x88 }, { TM6010_REQ07_R3F_RESET, 0x00 }, { TM6010_REQ05_R18_IMASK7, 0x00 }, { TM6010_REQ07_RDC_IR_LEADER1, 0xaa }, { TM6010_REQ07_RDD_IR_LEADER0, 0x30 }, { TM6010_REQ07_RDE_IR_PULSE_CNT1, 0x20 }, { TM6010_REQ07_RDF_IR_PULSE_CNT0, 0xd0 }, { REQ_04_EN_DISABLE_MCU_INT, 0x02, 0x00 }, { TM6010_REQ07_RD8_IR, 0x0f }, /* set remote wakeup key:any key wakeup */ { TM6010_REQ07_RE5_REMOTE_WAKEUP, 0xfe }, { TM6010_REQ07_RDA_IR_WAKEUP_SEL, 0xff }, }; int tm6000_init(struct tm6000_core *dev) { int board, rc = 0, i, size; struct reg_init *tab; /* Check board revision */ board = tm6000_get_reg32(dev, REQ_40_GET_VERSION, 0, 0); if (board >= 0) { switch (board & 0xff) { case 0xf3: printk(KERN_INFO "Found tm6000\n"); if (dev->dev_type != TM6000) dev->dev_type = TM6000; break; case 0xf4: printk(KERN_INFO "Found tm6010\n"); if (dev->dev_type != TM6010) dev->dev_type = TM6010; break; default: printk(KERN_INFO "Unknown board version = 0x%08x\n", board); } } else printk(KERN_ERR "Error %i while retrieving board version\n", board); if (dev->dev_type == TM6010) { tab = tm6010_init_tab; size = ARRAY_SIZE(tm6010_init_tab); } else { tab = tm6000_init_tab; size = ARRAY_SIZE(tm6000_init_tab); } /* Load board's initialization table */ for (i = 0; i < size; i++) { rc = tm6000_set_reg(dev, tab[i].req, tab[i].reg, tab[i].val); if (rc < 0) { printk(KERN_ERR "Error %i while setting req %d, " "reg %d to value %d\n", rc, tab[i].req, tab[i].reg, tab[i].val); return rc; } } msleep(5); /* Just to be conservative */ rc = tm6000_cards_setup(dev); return rc; } int tm6000_set_audio_bitrate(struct tm6000_core *dev, int bitrate) { int val = 0; u8 areg_f0 = 0x60; /* ADC MCLK = 250 Fs */ u8 areg_0a = 0x91; /* SIF 48KHz */ switch (bitrate) { case 48000: areg_f0 = 0x60; /* ADC MCLK = 250 Fs */ areg_0a = 0x91; /* SIF 48KHz */ dev->audio_bitrate = bitrate; break; case 32000: areg_f0 = 0x00; /* ADC MCLK = 375 Fs */ areg_0a = 0x90; /* SIF 32KHz */ dev->audio_bitrate = bitrate; break; default: return -EINVAL; } /* enable I2S, if we use sif or external I2S device */ if (dev->dev_type == TM6010) { val = tm6000_set_reg(dev, TM6010_REQ08_R0A_A_I2S_MOD, areg_0a); if (val < 0) return val; val = tm6000_set_reg_mask(dev, TM6010_REQ08_RF0_DAUDIO_INPUT_CONFIG, areg_f0, 0xf0); if (val < 0) return val; } else { val = tm6000_set_reg_mask(dev, TM6000_REQ07_REB_VADC_AADC_MODE, areg_f0, 0xf0); if (val < 0) return val; } return 0; } EXPORT_SYMBOL_GPL(tm6000_set_audio_bitrate); int tm6000_set_audio_rinput(struct tm6000_core *dev) { if (dev->dev_type == TM6010) { /* Audio crossbar setting, default SIF1 */ u8 areg_f0; u8 areg_07 = 0x10; switch (dev->rinput.amux) { case TM6000_AMUX_SIF1: case TM6000_AMUX_SIF2: areg_f0 = 0x03; areg_07 = 0x30; break; case TM6000_AMUX_ADC1: areg_f0 = 0x00; break; case TM6000_AMUX_ADC2: areg_f0 = 0x08; break; case TM6000_AMUX_I2S: areg_f0 = 0x04; break; default: printk(KERN_INFO "%s: audio input dosn't support\n", dev->name); return 0; break; } /* Set audio input crossbar */ tm6000_set_reg_mask(dev, TM6010_REQ08_RF0_DAUDIO_INPUT_CONFIG, areg_f0, 0x0f); /* Mux overflow workaround */ tm6000_set_reg_mask(dev, TM6010_REQ07_R07_OUTPUT_CONTROL, areg_07, 0xf0); } else { u8 areg_eb; /* Audio setting, default LINE1 */ switch (dev->rinput.amux) { case TM6000_AMUX_ADC1: areg_eb = 0x00; break; case TM6000_AMUX_ADC2: areg_eb = 0x04; break; default: printk(KERN_INFO "%s: audio input dosn't support\n", dev->name); return 0; break; } /* Set audio input */ tm6000_set_reg_mask(dev, TM6000_REQ07_REB_VADC_AADC_MODE, areg_eb, 0x0f); } return 0; } static void tm6010_set_mute_sif(struct tm6000_core *dev, u8 mute) { u8 mute_reg = 0; if (mute) mute_reg = 0x08; tm6000_set_reg_mask(dev, TM6010_REQ08_R0A_A_I2S_MOD, mute_reg, 0x08); } static void tm6010_set_mute_adc(struct tm6000_core *dev, u8 mute) { u8 mute_reg = 0; if (mute) mute_reg = 0x20; if (dev->dev_type == TM6010) { tm6000_set_reg_mask(dev, TM6010_REQ08_RF2_LEFT_CHANNEL_VOL, mute_reg, 0x20); tm6000_set_reg_mask(dev, TM6010_REQ08_RF3_RIGHT_CHANNEL_VOL, mute_reg, 0x20); } else { tm6000_set_reg_mask(dev, TM6000_REQ07_REC_VADC_AADC_LVOL, mute_reg, 0x20); tm6000_set_reg_mask(dev, TM6000_REQ07_RED_VADC_AADC_RVOL, mute_reg, 0x20); } } int tm6000_tvaudio_set_mute(struct tm6000_core *dev, u8 mute) { enum tm6000_mux mux; if (dev->radio) mux = dev->rinput.amux; else mux = dev->vinput[dev->input].amux; switch (mux) { case TM6000_AMUX_SIF1: case TM6000_AMUX_SIF2: if (dev->dev_type == TM6010) tm6010_set_mute_sif(dev, mute); else { printk(KERN_INFO "ERROR: TM5600 and TM6000 don't has" " SIF audio inputs. Please check the %s" " configuration.\n", dev->name); return -EINVAL; } break; case TM6000_AMUX_ADC1: case TM6000_AMUX_ADC2: tm6010_set_mute_adc(dev, mute); break; default: return -EINVAL; break; } return 0; } static void tm6010_set_volume_sif(struct tm6000_core *dev, int vol) { u8 vol_reg; vol_reg = vol & 0x0F; if (vol < 0) vol_reg |= 0x40; tm6000_set_reg(dev, TM6010_REQ08_R07_A_LEFT_VOL, vol_reg); tm6000_set_reg(dev, TM6010_REQ08_R08_A_RIGHT_VOL, vol_reg); } static void tm6010_set_volume_adc(struct tm6000_core *dev, int vol) { u8 vol_reg; vol_reg = (vol + 0x10) & 0x1f; if (dev->dev_type == TM6010) { tm6000_set_reg(dev, TM6010_REQ08_RF2_LEFT_CHANNEL_VOL, vol_reg); tm6000_set_reg(dev, TM6010_REQ08_RF3_RIGHT_CHANNEL_VOL, vol_reg); } else { tm6000_set_reg(dev, TM6000_REQ07_REC_VADC_AADC_LVOL, vol_reg); tm6000_set_reg(dev, TM6000_REQ07_RED_VADC_AADC_RVOL, vol_reg); } } void tm6000_set_volume(struct tm6000_core *dev, int vol) { enum tm6000_mux mux; if (dev->radio) { mux = dev->rinput.amux; vol += 8; /* Offset to 0 dB */ } else mux = dev->vinput[dev->input].amux; switch (mux) { case TM6000_AMUX_SIF1: case TM6000_AMUX_SIF2: if (dev->dev_type == TM6010) tm6010_set_volume_sif(dev, vol); else printk(KERN_INFO "ERROR: TM5600 and TM6000 don't has" " SIF audio inputs. Please check the %s" " configuration.\n", dev->name); break; case TM6000_AMUX_ADC1: case TM6000_AMUX_ADC2: tm6010_set_volume_adc(dev, vol); break; default: break; } } static LIST_HEAD(tm6000_devlist); static DEFINE_MUTEX(tm6000_devlist_mutex); /* * tm6000_realease_resource() */ void tm6000_remove_from_devlist(struct tm6000_core *dev) { mutex_lock(&tm6000_devlist_mutex); list_del(&dev->devlist); mutex_unlock(&tm6000_devlist_mutex); }; void tm6000_add_into_devlist(struct tm6000_core *dev) { mutex_lock(&tm6000_devlist_mutex); list_add_tail(&dev->devlist, &tm6000_devlist); mutex_unlock(&tm6000_devlist_mutex); }; /* * Extension interface */ static LIST_HEAD(tm6000_extension_devlist); int tm6000_call_fillbuf(struct tm6000_core *dev, enum tm6000_ops_type type, char *buf, int size) { struct tm6000_ops *ops = NULL; /* FIXME: tm6000_extension_devlist_lock should be a spinlock */ if (!list_empty(&tm6000_extension_devlist)) { list_for_each_entry(ops, &tm6000_extension_devlist, next) { if (ops->fillbuf && ops->type == type) ops->fillbuf(dev, buf, size); } } return 0; } int tm6000_register_extension(struct tm6000_ops *ops) { struct tm6000_core *dev = NULL; mutex_lock(&tm6000_devlist_mutex); list_add_tail(&ops->next, &tm6000_extension_devlist); list_for_each_entry(dev, &tm6000_devlist, devlist) { ops->init(dev); printk(KERN_INFO "%s: Initialized (%s) extension\n", dev->name, ops->name); } mutex_unlock(&tm6000_devlist_mutex); return 0; } EXPORT_SYMBOL(tm6000_register_extension); void tm6000_unregister_extension(struct tm6000_ops *ops) { struct tm6000_core *dev = NULL; mutex_lock(&tm6000_devlist_mutex); list_for_each_entry(dev, &tm6000_devlist, devlist) ops->fini(dev); printk(KERN_INFO "tm6000: Remove (%s) extension\n", ops->name); list_del(&ops->next); mutex_unlock(&tm6000_devlist_mutex); } EXPORT_SYMBOL(tm6000_unregister_extension); void tm6000_init_extension(struct tm6000_core *dev) { struct tm6000_ops *ops = NULL; mutex_lock(&tm6000_devlist_mutex); if (!list_empty(&tm6000_extension_devlist)) { list_for_each_entry(ops, &tm6000_extension_devlist, next) { if (ops->init) ops->init(dev); } } mutex_unlock(&tm6000_devlist_mutex); } void tm6000_close_extension(struct tm6000_core *dev) { struct tm6000_ops *ops = NULL; mutex_lock(&tm6000_devlist_mutex); if (!list_empty(&tm6000_extension_devlist)) { list_for_each_entry(ops, &tm6000_extension_devlist, next) { if (ops->fini) ops->fini(dev); } } mutex_unlock(&tm6000_devlist_mutex); }
gpl-2.0
byterom/android_kernel_lge_g3
drivers/media/common/tuners/mt20xx.c
8255
15742
/* * i2c tv tuner chip device driver * controls microtune tuners, mt2032 + mt2050 at the moment. * * This "mt20xx" module was split apart from the original "tuner" module. */ #include <linux/delay.h> #include <linux/i2c.h> #include <linux/slab.h> #include <linux/videodev2.h> #include "tuner-i2c.h" #include "mt20xx.h" static int debug; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "enable verbose debug messages"); /* ---------------------------------------------------------------------- */ static unsigned int optimize_vco = 1; module_param(optimize_vco, int, 0644); static unsigned int tv_antenna = 1; module_param(tv_antenna, int, 0644); static unsigned int radio_antenna; module_param(radio_antenna, int, 0644); /* ---------------------------------------------------------------------- */ #define MT2032 0x04 #define MT2030 0x06 #define MT2040 0x07 #define MT2050 0x42 static char *microtune_part[] = { [ MT2030 ] = "MT2030", [ MT2032 ] = "MT2032", [ MT2040 ] = "MT2040", [ MT2050 ] = "MT2050", }; struct microtune_priv { struct tuner_i2c_props i2c_props; unsigned int xogc; //unsigned int radio_if2; u32 frequency; }; static int microtune_release(struct dvb_frontend *fe) { kfree(fe->tuner_priv); fe->tuner_priv = NULL; return 0; } static int microtune_get_frequency(struct dvb_frontend *fe, u32 *frequency) { struct microtune_priv *priv = fe->tuner_priv; *frequency = priv->frequency; return 0; } // IsSpurInBand()? static int mt2032_spurcheck(struct dvb_frontend *fe, int f1, int f2, int spectrum_from,int spectrum_to) { struct microtune_priv *priv = fe->tuner_priv; int n1=1,n2,f; f1=f1/1000; //scale to kHz to avoid 32bit overflows f2=f2/1000; spectrum_from/=1000; spectrum_to/=1000; tuner_dbg("spurcheck f1=%d f2=%d from=%d to=%d\n", f1,f2,spectrum_from,spectrum_to); do { n2=-n1; f=n1*(f1-f2); do { n2--; f=f-f2; tuner_dbg("spurtest n1=%d n2=%d ftest=%d\n",n1,n2,f); if( (f>spectrum_from) && (f<spectrum_to)) tuner_dbg("mt2032 spurcheck triggered: %d\n",n1); } while ( (f>(f2-spectrum_to)) || (n2>-5)); n1++; } while (n1<5); return 1; } static int mt2032_compute_freq(struct dvb_frontend *fe, unsigned int rfin, unsigned int if1, unsigned int if2, unsigned int spectrum_from, unsigned int spectrum_to, unsigned char *buf, int *ret_sel, unsigned int xogc) //all in Hz { struct microtune_priv *priv = fe->tuner_priv; unsigned int fref,lo1,lo1n,lo1a,s,sel,lo1freq, desired_lo1, desired_lo2,lo2,lo2n,lo2a,lo2num,lo2freq; fref= 5250 *1000; //5.25MHz desired_lo1=rfin+if1; lo1=(2*(desired_lo1/1000)+(fref/1000)) / (2*fref/1000); lo1n=lo1/8; lo1a=lo1-(lo1n*8); s=rfin/1000/1000+1090; if(optimize_vco) { if(s>1890) sel=0; else if(s>1720) sel=1; else if(s>1530) sel=2; else if(s>1370) sel=3; else sel=4; // >1090 } else { if(s>1790) sel=0; // <1958 else if(s>1617) sel=1; else if(s>1449) sel=2; else if(s>1291) sel=3; else sel=4; // >1090 } *ret_sel=sel; lo1freq=(lo1a+8*lo1n)*fref; tuner_dbg("mt2032: rfin=%d lo1=%d lo1n=%d lo1a=%d sel=%d, lo1freq=%d\n", rfin,lo1,lo1n,lo1a,sel,lo1freq); desired_lo2=lo1freq-rfin-if2; lo2=(desired_lo2)/fref; lo2n=lo2/8; lo2a=lo2-(lo2n*8); lo2num=((desired_lo2/1000)%(fref/1000))* 3780/(fref/1000); //scale to fit in 32bit arith lo2freq=(lo2a+8*lo2n)*fref + lo2num*(fref/1000)/3780*1000; tuner_dbg("mt2032: rfin=%d lo2=%d lo2n=%d lo2a=%d num=%d lo2freq=%d\n", rfin,lo2,lo2n,lo2a,lo2num,lo2freq); if (lo1a > 7 || lo1n < 17 || lo1n > 48 || lo2a > 7 || lo2n < 17 || lo2n > 30) { tuner_info("mt2032: frequency parameters out of range: %d %d %d %d\n", lo1a, lo1n, lo2a,lo2n); return(-1); } mt2032_spurcheck(fe, lo1freq, desired_lo2, spectrum_from, spectrum_to); // should recalculate lo1 (one step up/down) // set up MT2032 register map for transfer over i2c buf[0]=lo1n-1; buf[1]=lo1a | (sel<<4); buf[2]=0x86; // LOGC buf[3]=0x0f; //reserved buf[4]=0x1f; buf[5]=(lo2n-1) | (lo2a<<5); if(rfin >400*1000*1000) buf[6]=0xe4; else buf[6]=0xf4; // set PKEN per rev 1.2 buf[7]=8+xogc; buf[8]=0xc3; //reserved buf[9]=0x4e; //reserved buf[10]=0xec; //reserved buf[11]=(lo2num&0xff); buf[12]=(lo2num>>8) |0x80; // Lo2RST return 0; } static int mt2032_check_lo_lock(struct dvb_frontend *fe) { struct microtune_priv *priv = fe->tuner_priv; int try,lock=0; unsigned char buf[2]; for(try=0;try<10;try++) { buf[0]=0x0e; tuner_i2c_xfer_send(&priv->i2c_props,buf,1); tuner_i2c_xfer_recv(&priv->i2c_props,buf,1); tuner_dbg("mt2032 Reg.E=0x%02x\n",buf[0]); lock=buf[0] &0x06; if (lock==6) break; tuner_dbg("mt2032: pll wait 1ms for lock (0x%2x)\n",buf[0]); udelay(1000); } return lock; } static int mt2032_optimize_vco(struct dvb_frontend *fe,int sel,int lock) { struct microtune_priv *priv = fe->tuner_priv; unsigned char buf[2]; int tad1; buf[0]=0x0f; tuner_i2c_xfer_send(&priv->i2c_props,buf,1); tuner_i2c_xfer_recv(&priv->i2c_props,buf,1); tuner_dbg("mt2032 Reg.F=0x%02x\n",buf[0]); tad1=buf[0]&0x07; if(tad1 ==0) return lock; if(tad1 ==1) return lock; if(tad1==2) { if(sel==0) return lock; else sel--; } else { if(sel<4) sel++; else return lock; } tuner_dbg("mt2032 optimize_vco: sel=%d\n",sel); buf[0]=0x0f; buf[1]=sel; tuner_i2c_xfer_send(&priv->i2c_props,buf,2); lock=mt2032_check_lo_lock(fe); return lock; } static void mt2032_set_if_freq(struct dvb_frontend *fe, unsigned int rfin, unsigned int if1, unsigned int if2, unsigned int from, unsigned int to) { unsigned char buf[21]; int lint_try,ret,sel,lock=0; struct microtune_priv *priv = fe->tuner_priv; tuner_dbg("mt2032_set_if_freq rfin=%d if1=%d if2=%d from=%d to=%d\n", rfin,if1,if2,from,to); buf[0]=0; ret=tuner_i2c_xfer_send(&priv->i2c_props,buf,1); tuner_i2c_xfer_recv(&priv->i2c_props,buf,21); buf[0]=0; ret=mt2032_compute_freq(fe,rfin,if1,if2,from,to,&buf[1],&sel,priv->xogc); if (ret<0) return; // send only the relevant registers per Rev. 1.2 buf[0]=0; ret=tuner_i2c_xfer_send(&priv->i2c_props,buf,4); buf[5]=5; ret=tuner_i2c_xfer_send(&priv->i2c_props,buf+5,4); buf[11]=11; ret=tuner_i2c_xfer_send(&priv->i2c_props,buf+11,3); if(ret!=3) tuner_warn("i2c i/o error: rc == %d (should be 3)\n",ret); // wait for PLLs to lock (per manual), retry LINT if not. for(lint_try=0; lint_try<2; lint_try++) { lock=mt2032_check_lo_lock(fe); if(optimize_vco) lock=mt2032_optimize_vco(fe,sel,lock); if(lock==6) break; tuner_dbg("mt2032: re-init PLLs by LINT\n"); buf[0]=7; buf[1]=0x80 +8+priv->xogc; // set LINT to re-init PLLs tuner_i2c_xfer_send(&priv->i2c_props,buf,2); mdelay(10); buf[1]=8+priv->xogc; tuner_i2c_xfer_send(&priv->i2c_props,buf,2); } if (lock!=6) tuner_warn("MT2032 Fatal Error: PLLs didn't lock.\n"); buf[0]=2; buf[1]=0x20; // LOGC for optimal phase noise ret=tuner_i2c_xfer_send(&priv->i2c_props,buf,2); if (ret!=2) tuner_warn("i2c i/o error: rc == %d (should be 2)\n",ret); } static int mt2032_set_tv_freq(struct dvb_frontend *fe, struct analog_parameters *params) { int if2,from,to; // signal bandwidth and picture carrier if (params->std & V4L2_STD_525_60) { // NTSC from = 40750*1000; to = 46750*1000; if2 = 45750*1000; } else { // PAL from = 32900*1000; to = 39900*1000; if2 = 38900*1000; } mt2032_set_if_freq(fe, params->frequency*62500, 1090*1000*1000, if2, from, to); return 0; } static int mt2032_set_radio_freq(struct dvb_frontend *fe, struct analog_parameters *params) { struct microtune_priv *priv = fe->tuner_priv; int if2; if (params->std & V4L2_STD_525_60) { tuner_dbg("pinnacle ntsc\n"); if2 = 41300 * 1000; } else { tuner_dbg("pinnacle pal\n"); if2 = 33300 * 1000; } // per Manual for FM tuning: first if center freq. 1085 MHz mt2032_set_if_freq(fe, params->frequency * 125 / 2, 1085*1000*1000,if2,if2,if2); return 0; } static int mt2032_set_params(struct dvb_frontend *fe, struct analog_parameters *params) { struct microtune_priv *priv = fe->tuner_priv; int ret = -EINVAL; switch (params->mode) { case V4L2_TUNER_RADIO: ret = mt2032_set_radio_freq(fe, params); priv->frequency = params->frequency * 125 / 2; break; case V4L2_TUNER_ANALOG_TV: case V4L2_TUNER_DIGITAL_TV: ret = mt2032_set_tv_freq(fe, params); priv->frequency = params->frequency * 62500; break; } return ret; } static struct dvb_tuner_ops mt2032_tuner_ops = { .set_analog_params = mt2032_set_params, .release = microtune_release, .get_frequency = microtune_get_frequency, }; // Initialization as described in "MT203x Programming Procedures", Rev 1.2, Feb.2001 static int mt2032_init(struct dvb_frontend *fe) { struct microtune_priv *priv = fe->tuner_priv; unsigned char buf[21]; int ret,xogc,xok=0; // Initialize Registers per spec. buf[1]=2; // Index to register 2 buf[2]=0xff; buf[3]=0x0f; buf[4]=0x1f; ret=tuner_i2c_xfer_send(&priv->i2c_props,buf+1,4); buf[5]=6; // Index register 6 buf[6]=0xe4; buf[7]=0x8f; buf[8]=0xc3; buf[9]=0x4e; buf[10]=0xec; ret=tuner_i2c_xfer_send(&priv->i2c_props,buf+5,6); buf[12]=13; // Index register 13 buf[13]=0x32; ret=tuner_i2c_xfer_send(&priv->i2c_props,buf+12,2); // Adjust XOGC (register 7), wait for XOK xogc=7; do { tuner_dbg("mt2032: xogc = 0x%02x\n",xogc&0x07); mdelay(10); buf[0]=0x0e; tuner_i2c_xfer_send(&priv->i2c_props,buf,1); tuner_i2c_xfer_recv(&priv->i2c_props,buf,1); xok=buf[0]&0x01; tuner_dbg("mt2032: xok = 0x%02x\n",xok); if (xok == 1) break; xogc--; tuner_dbg("mt2032: xogc = 0x%02x\n",xogc&0x07); if (xogc == 3) { xogc=4; // min. 4 per spec break; } buf[0]=0x07; buf[1]=0x88 + xogc; ret=tuner_i2c_xfer_send(&priv->i2c_props,buf,2); if (ret!=2) tuner_warn("i2c i/o error: rc == %d (should be 2)\n",ret); } while (xok != 1 ); priv->xogc=xogc; memcpy(&fe->ops.tuner_ops, &mt2032_tuner_ops, sizeof(struct dvb_tuner_ops)); return(1); } static void mt2050_set_antenna(struct dvb_frontend *fe, unsigned char antenna) { struct microtune_priv *priv = fe->tuner_priv; unsigned char buf[2]; buf[0] = 6; buf[1] = antenna ? 0x11 : 0x10; tuner_i2c_xfer_send(&priv->i2c_props, buf, 2); tuner_dbg("mt2050: enabled antenna connector %d\n", antenna); } static void mt2050_set_if_freq(struct dvb_frontend *fe,unsigned int freq, unsigned int if2) { struct microtune_priv *priv = fe->tuner_priv; unsigned int if1=1218*1000*1000; unsigned int f_lo1,f_lo2,lo1,lo2,f_lo1_modulo,f_lo2_modulo,num1,num2,div1a,div1b,div2a,div2b; int ret; unsigned char buf[6]; tuner_dbg("mt2050_set_if_freq freq=%d if1=%d if2=%d\n", freq,if1,if2); f_lo1=freq+if1; f_lo1=(f_lo1/1000000)*1000000; f_lo2=f_lo1-freq-if2; f_lo2=(f_lo2/50000)*50000; lo1=f_lo1/4000000; lo2=f_lo2/4000000; f_lo1_modulo= f_lo1-(lo1*4000000); f_lo2_modulo= f_lo2-(lo2*4000000); num1=4*f_lo1_modulo/4000000; num2=4096*(f_lo2_modulo/1000)/4000; // todo spurchecks div1a=(lo1/12)-1; div1b=lo1-(div1a+1)*12; div2a=(lo2/8)-1; div2b=lo2-(div2a+1)*8; if (debug > 1) { tuner_dbg("lo1 lo2 = %d %d\n", lo1, lo2); tuner_dbg("num1 num2 div1a div1b div2a div2b= %x %x %x %x %x %x\n", num1,num2,div1a,div1b,div2a,div2b); } buf[0]=1; buf[1]= 4*div1b + num1; if(freq<275*1000*1000) buf[1] = buf[1]|0x80; buf[2]=div1a; buf[3]=32*div2b + num2/256; buf[4]=num2-(num2/256)*256; buf[5]=div2a; if(num2!=0) buf[5]=buf[5]|0x40; if (debug > 1) { int i; tuner_dbg("bufs is: "); for(i=0;i<6;i++) printk("%x ",buf[i]); printk("\n"); } ret=tuner_i2c_xfer_send(&priv->i2c_props,buf,6); if (ret!=6) tuner_warn("i2c i/o error: rc == %d (should be 6)\n",ret); } static int mt2050_set_tv_freq(struct dvb_frontend *fe, struct analog_parameters *params) { unsigned int if2; if (params->std & V4L2_STD_525_60) { // NTSC if2 = 45750*1000; } else { // PAL if2 = 38900*1000; } if (V4L2_TUNER_DIGITAL_TV == params->mode) { // DVB (pinnacle 300i) if2 = 36150*1000; } mt2050_set_if_freq(fe, params->frequency*62500, if2); mt2050_set_antenna(fe, tv_antenna); return 0; } static int mt2050_set_radio_freq(struct dvb_frontend *fe, struct analog_parameters *params) { struct microtune_priv *priv = fe->tuner_priv; int if2; if (params->std & V4L2_STD_525_60) { tuner_dbg("pinnacle ntsc\n"); if2 = 41300 * 1000; } else { tuner_dbg("pinnacle pal\n"); if2 = 33300 * 1000; } mt2050_set_if_freq(fe, params->frequency * 125 / 2, if2); mt2050_set_antenna(fe, radio_antenna); return 0; } static int mt2050_set_params(struct dvb_frontend *fe, struct analog_parameters *params) { struct microtune_priv *priv = fe->tuner_priv; int ret = -EINVAL; switch (params->mode) { case V4L2_TUNER_RADIO: ret = mt2050_set_radio_freq(fe, params); priv->frequency = params->frequency * 125 / 2; break; case V4L2_TUNER_ANALOG_TV: case V4L2_TUNER_DIGITAL_TV: ret = mt2050_set_tv_freq(fe, params); priv->frequency = params->frequency * 62500; break; } return ret; } static struct dvb_tuner_ops mt2050_tuner_ops = { .set_analog_params = mt2050_set_params, .release = microtune_release, .get_frequency = microtune_get_frequency, }; static int mt2050_init(struct dvb_frontend *fe) { struct microtune_priv *priv = fe->tuner_priv; unsigned char buf[2]; buf[0] = 6; buf[1] = 0x10; tuner_i2c_xfer_send(&priv->i2c_props, buf, 2); /* power */ buf[0] = 0x0f; buf[1] = 0x0f; tuner_i2c_xfer_send(&priv->i2c_props, buf, 2); /* m1lo */ buf[0] = 0x0d; tuner_i2c_xfer_send(&priv->i2c_props, buf, 1); tuner_i2c_xfer_recv(&priv->i2c_props, buf, 1); tuner_dbg("mt2050: sro is %x\n", buf[0]); memcpy(&fe->ops.tuner_ops, &mt2050_tuner_ops, sizeof(struct dvb_tuner_ops)); return 0; } struct dvb_frontend *microtune_attach(struct dvb_frontend *fe, struct i2c_adapter* i2c_adap, u8 i2c_addr) { struct microtune_priv *priv = NULL; char *name; unsigned char buf[21]; int company_code; priv = kzalloc(sizeof(struct microtune_priv), GFP_KERNEL); if (priv == NULL) return NULL; fe->tuner_priv = priv; priv->i2c_props.addr = i2c_addr; priv->i2c_props.adap = i2c_adap; priv->i2c_props.name = "mt20xx"; //priv->radio_if2 = 10700 * 1000; /* 10.7MHz - FM radio */ memset(buf,0,sizeof(buf)); name = "unknown"; tuner_i2c_xfer_send(&priv->i2c_props,buf,1); tuner_i2c_xfer_recv(&priv->i2c_props,buf,21); if (debug) { int i; tuner_dbg("MT20xx hexdump:"); for(i=0;i<21;i++) { printk(" %02x",buf[i]); if(((i+1)%8)==0) printk(" "); } printk("\n"); } company_code = buf[0x11] << 8 | buf[0x12]; tuner_info("microtune: companycode=%04x part=%02x rev=%02x\n", company_code,buf[0x13],buf[0x14]); if (buf[0x13] < ARRAY_SIZE(microtune_part) && NULL != microtune_part[buf[0x13]]) name = microtune_part[buf[0x13]]; switch (buf[0x13]) { case MT2032: mt2032_init(fe); break; case MT2050: mt2050_init(fe); break; default: tuner_info("microtune %s found, not (yet?) supported, sorry :-/\n", name); return NULL; } strlcpy(fe->ops.tuner_ops.info.name, name, sizeof(fe->ops.tuner_ops.info.name)); tuner_info("microtune %s found, OK\n",name); return fe; } EXPORT_SYMBOL_GPL(microtune_attach); MODULE_DESCRIPTION("Microtune tuner driver"); MODULE_AUTHOR("Ralph Metzler, Gerd Knorr, Gunther Mayer"); MODULE_LICENSE("GPL"); /* * Overrides for Emacs so that we follow Linus's tabbing style. * --------------------------------------------------------------------------- * Local variables: * c-basic-offset: 8 * End: */
gpl-2.0
FrozenCow/msm
drivers/net/arcnet/arc-rimi.c
9023
10981
/* * Linux ARCnet driver - "RIM I" (entirely mem-mapped) cards * * Written 1994-1999 by Avery Pennarun. * Written 1999-2000 by Martin Mares <mj@ucw.cz>. * Derived from skeleton.c by Donald Becker. * * Special thanks to Contemporary Controls, Inc. (www.ccontrols.com) * for sponsoring the further development of this driver. * * ********************** * * The original copyright of skeleton.c was as follows: * * skeleton.c Written 1993 by Donald Becker. * Copyright 1993 United States Government as represented by the * Director, National Security Agency. This software may only be used * and distributed according to the terms of the GNU General Public License as * modified by SRC, incorporated herein by reference. * * ********************** * * For more details, see drivers/net/arcnet.c * * ********************** */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/ioport.h> #include <linux/delay.h> #include <linux/netdevice.h> #include <linux/bootmem.h> #include <linux/init.h> #include <linux/interrupt.h> #include <asm/io.h> #include <linux/arcdevice.h> #define VERSION "arcnet: RIM I (entirely mem-mapped) support\n" /* Internal function declarations */ static int arcrimi_probe(struct net_device *dev); static int arcrimi_found(struct net_device *dev); static void arcrimi_command(struct net_device *dev, int command); static int arcrimi_status(struct net_device *dev); static void arcrimi_setmask(struct net_device *dev, int mask); static int arcrimi_reset(struct net_device *dev, int really_reset); static void arcrimi_copy_to_card(struct net_device *dev, int bufnum, int offset, void *buf, int count); static void arcrimi_copy_from_card(struct net_device *dev, int bufnum, int offset, void *buf, int count); /* Handy defines for ARCnet specific stuff */ /* Amount of I/O memory used by the card */ #define BUFFER_SIZE (512) #define MIRROR_SIZE (BUFFER_SIZE*4) /* COM 9026 controller chip --> ARCnet register addresses */ #define _INTMASK (ioaddr+0) /* writable */ #define _STATUS (ioaddr+0) /* readable */ #define _COMMAND (ioaddr+1) /* writable, returns random vals on read (?) */ #define _RESET (ioaddr+8) /* software reset (on read) */ #define _MEMDATA (ioaddr+12) /* Data port for IO-mapped memory */ #define _ADDR_HI (ioaddr+15) /* Control registers for said */ #define _ADDR_LO (ioaddr+14) #define _CONFIG (ioaddr+2) /* Configuration register */ #undef ASTATUS #undef ACOMMAND #undef AINTMASK #define ASTATUS() readb(_STATUS) #define ACOMMAND(cmd) writeb((cmd),_COMMAND) #define AINTMASK(msk) writeb((msk),_INTMASK) #define SETCONF() writeb(lp->config,_CONFIG) /* * We cannot probe for a RIM I card; one reason is I don't know how to reset * them. In fact, we can't even get their node ID automatically. So, we * need to be passed a specific shmem address, IRQ, and node ID. */ static int __init arcrimi_probe(struct net_device *dev) { BUGLVL(D_NORMAL) printk(VERSION); BUGLVL(D_NORMAL) printk("E-mail me if you actually test the RIM I driver, please!\n"); BUGLVL(D_NORMAL) printk("Given: node %02Xh, shmem %lXh, irq %d\n", dev->dev_addr[0], dev->mem_start, dev->irq); if (dev->mem_start <= 0 || dev->irq <= 0) { BUGLVL(D_NORMAL) printk("No autoprobe for RIM I; you " "must specify the shmem and irq!\n"); return -ENODEV; } if (dev->dev_addr[0] == 0) { BUGLVL(D_NORMAL) printk("You need to specify your card's station " "ID!\n"); return -ENODEV; } /* * Grab the memory region at mem_start for MIRROR_SIZE bytes. * Later in arcrimi_found() the real size will be determined * and this reserve will be released and the correct size * will be taken. */ if (!request_mem_region(dev->mem_start, MIRROR_SIZE, "arcnet (90xx)")) { BUGLVL(D_NORMAL) printk("Card memory already allocated\n"); return -ENODEV; } return arcrimi_found(dev); } static int check_mirror(unsigned long addr, size_t size) { void __iomem *p; int res = -1; if (!request_mem_region(addr, size, "arcnet (90xx)")) return -1; p = ioremap(addr, size); if (p) { if (readb(p) == TESTvalue) res = 1; else res = 0; iounmap(p); } release_mem_region(addr, size); return res; } /* * Set up the struct net_device associated with this card. Called after * probing succeeds. */ static int __init arcrimi_found(struct net_device *dev) { struct arcnet_local *lp; unsigned long first_mirror, last_mirror, shmem; void __iomem *p; int mirror_size; int err; p = ioremap(dev->mem_start, MIRROR_SIZE); if (!p) { release_mem_region(dev->mem_start, MIRROR_SIZE); BUGMSG(D_NORMAL, "Can't ioremap\n"); return -ENODEV; } /* reserve the irq */ if (request_irq(dev->irq, arcnet_interrupt, 0, "arcnet (RIM I)", dev)) { iounmap(p); release_mem_region(dev->mem_start, MIRROR_SIZE); BUGMSG(D_NORMAL, "Can't get IRQ %d!\n", dev->irq); return -ENODEV; } shmem = dev->mem_start; writeb(TESTvalue, p); writeb(dev->dev_addr[0], p + 1); /* actually the node ID */ /* find the real shared memory start/end points, including mirrors */ /* guess the actual size of one "memory mirror" - the number of * bytes between copies of the shared memory. On most cards, it's * 2k (or there are no mirrors at all) but on some, it's 4k. */ mirror_size = MIRROR_SIZE; if (readb(p) == TESTvalue && check_mirror(shmem - MIRROR_SIZE, MIRROR_SIZE) == 0 && check_mirror(shmem - 2 * MIRROR_SIZE, MIRROR_SIZE) == 1) mirror_size = 2 * MIRROR_SIZE; first_mirror = shmem - mirror_size; while (check_mirror(first_mirror, mirror_size) == 1) first_mirror -= mirror_size; first_mirror += mirror_size; last_mirror = shmem + mirror_size; while (check_mirror(last_mirror, mirror_size) == 1) last_mirror += mirror_size; last_mirror -= mirror_size; dev->mem_start = first_mirror; dev->mem_end = last_mirror + MIRROR_SIZE - 1; /* initialize the rest of the device structure. */ lp = netdev_priv(dev); lp->card_name = "RIM I"; lp->hw.command = arcrimi_command; lp->hw.status = arcrimi_status; lp->hw.intmask = arcrimi_setmask; lp->hw.reset = arcrimi_reset; lp->hw.owner = THIS_MODULE; lp->hw.copy_to_card = arcrimi_copy_to_card; lp->hw.copy_from_card = arcrimi_copy_from_card; /* * re-reserve the memory region - arcrimi_probe() alloced this reqion * but didn't know the real size. Free that region and then re-get * with the correct size. There is a VERY slim chance this could * fail. */ iounmap(p); release_mem_region(shmem, MIRROR_SIZE); if (!request_mem_region(dev->mem_start, dev->mem_end - dev->mem_start + 1, "arcnet (90xx)")) { BUGMSG(D_NORMAL, "Card memory already allocated\n"); goto err_free_irq; } lp->mem_start = ioremap(dev->mem_start, dev->mem_end - dev->mem_start + 1); if (!lp->mem_start) { BUGMSG(D_NORMAL, "Can't remap device memory!\n"); goto err_release_mem; } /* get and check the station ID from offset 1 in shmem */ dev->dev_addr[0] = readb(lp->mem_start + 1); BUGMSG(D_NORMAL, "ARCnet RIM I: station %02Xh found at IRQ %d, " "ShMem %lXh (%ld*%d bytes).\n", dev->dev_addr[0], dev->irq, dev->mem_start, (dev->mem_end - dev->mem_start + 1) / mirror_size, mirror_size); err = register_netdev(dev); if (err) goto err_unmap; return 0; err_unmap: iounmap(lp->mem_start); err_release_mem: release_mem_region(dev->mem_start, dev->mem_end - dev->mem_start + 1); err_free_irq: free_irq(dev->irq, dev); return -EIO; } /* * Do a hardware reset on the card, and set up necessary registers. * * This should be called as little as possible, because it disrupts the * token on the network (causes a RECON) and requires a significant delay. * * However, it does make sure the card is in a defined state. */ static int arcrimi_reset(struct net_device *dev, int really_reset) { struct arcnet_local *lp = netdev_priv(dev); void __iomem *ioaddr = lp->mem_start + 0x800; BUGMSG(D_INIT, "Resetting %s (status=%02Xh)\n", dev->name, ASTATUS()); if (really_reset) { writeb(TESTvalue, ioaddr - 0x800); /* fake reset */ return 0; } ACOMMAND(CFLAGScmd | RESETclear); /* clear flags & end reset */ ACOMMAND(CFLAGScmd | CONFIGclear); /* enable extended (512-byte) packets */ ACOMMAND(CONFIGcmd | EXTconf); /* done! return success. */ return 0; } static void arcrimi_setmask(struct net_device *dev, int mask) { struct arcnet_local *lp = netdev_priv(dev); void __iomem *ioaddr = lp->mem_start + 0x800; AINTMASK(mask); } static int arcrimi_status(struct net_device *dev) { struct arcnet_local *lp = netdev_priv(dev); void __iomem *ioaddr = lp->mem_start + 0x800; return ASTATUS(); } static void arcrimi_command(struct net_device *dev, int cmd) { struct arcnet_local *lp = netdev_priv(dev); void __iomem *ioaddr = lp->mem_start + 0x800; ACOMMAND(cmd); } static void arcrimi_copy_to_card(struct net_device *dev, int bufnum, int offset, void *buf, int count) { struct arcnet_local *lp = netdev_priv(dev); void __iomem *memaddr = lp->mem_start + 0x800 + bufnum * 512 + offset; TIME("memcpy_toio", count, memcpy_toio(memaddr, buf, count)); } static void arcrimi_copy_from_card(struct net_device *dev, int bufnum, int offset, void *buf, int count) { struct arcnet_local *lp = netdev_priv(dev); void __iomem *memaddr = lp->mem_start + 0x800 + bufnum * 512 + offset; TIME("memcpy_fromio", count, memcpy_fromio(buf, memaddr, count)); } static int node; static int io; /* use the insmod io= irq= node= options */ static int irq; static char device[9]; /* use eg. device=arc1 to change name */ module_param(node, int, 0); module_param(io, int, 0); module_param(irq, int, 0); module_param_string(device, device, sizeof(device), 0); MODULE_LICENSE("GPL"); static struct net_device *my_dev; static int __init arc_rimi_init(void) { struct net_device *dev; dev = alloc_arcdev(device); if (!dev) return -ENOMEM; if (node && node != 0xff) dev->dev_addr[0] = node; dev->mem_start = io; dev->irq = irq; if (dev->irq == 2) dev->irq = 9; if (arcrimi_probe(dev)) { free_netdev(dev); return -EIO; } my_dev = dev; return 0; } static void __exit arc_rimi_exit(void) { struct net_device *dev = my_dev; struct arcnet_local *lp = netdev_priv(dev); unregister_netdev(dev); iounmap(lp->mem_start); release_mem_region(dev->mem_start, dev->mem_end - dev->mem_start + 1); free_irq(dev->irq, dev); free_netdev(dev); } #ifndef MODULE static int __init arcrimi_setup(char *s) { int ints[8]; s = get_options(s, 8, ints); if (!ints[0]) return 1; switch (ints[0]) { default: /* ERROR */ printk("arcrimi: Too many arguments.\n"); case 3: /* Node ID */ node = ints[3]; case 2: /* IRQ */ irq = ints[2]; case 1: /* IO address */ io = ints[1]; } if (*s) snprintf(device, sizeof(device), "%s", s); return 1; } __setup("arcrimi=", arcrimi_setup); #endif /* MODULE */ module_init(arc_rimi_init) module_exit(arc_rimi_exit)
gpl-2.0
tchaari/android_kernel_samsung_crespo_kitkang
arch/mips/sgi-ip22/ip22-nvram.c
9535
3673
/* * ip22-nvram.c: NVRAM and serial EEPROM handling. * * Copyright (C) 2003 Ladislav Michl (ladis@linux-mips.org) */ #include <linux/module.h> #include <asm/sgi/hpc3.h> #include <asm/sgi/ip22.h> /* Control opcode for serial eeprom */ #define EEPROM_READ 0xc000 /* serial memory read */ #define EEPROM_WEN 0x9800 /* write enable before prog modes */ #define EEPROM_WRITE 0xa000 /* serial memory write */ #define EEPROM_WRALL 0x8800 /* write all registers */ #define EEPROM_WDS 0x8000 /* disable all programming */ #define EEPROM_PRREAD 0xc000 /* read protect register */ #define EEPROM_PREN 0x9800 /* enable protect register mode */ #define EEPROM_PRCLEAR 0xffff /* clear protect register */ #define EEPROM_PRWRITE 0xa000 /* write protect register */ #define EEPROM_PRDS 0x8000 /* disable protect register, forever */ #define EEPROM_EPROT 0x01 /* Protect register enable */ #define EEPROM_CSEL 0x02 /* Chip select */ #define EEPROM_ECLK 0x04 /* EEPROM clock */ #define EEPROM_DATO 0x08 /* Data out */ #define EEPROM_DATI 0x10 /* Data in */ /* We need to use these functions early... */ #define delay() ({ \ int x; \ for (x=0; x<100000; x++) __asm__ __volatile__(""); }) #define eeprom_cs_on(ptr) ({ \ __raw_writel(__raw_readl(ptr) & ~EEPROM_DATO, ptr); \ __raw_writel(__raw_readl(ptr) & ~EEPROM_ECLK, ptr); \ __raw_writel(__raw_readl(ptr) & ~EEPROM_EPROT, ptr); \ delay(); \ __raw_writel(__raw_readl(ptr) | EEPROM_CSEL, ptr); \ __raw_writel(__raw_readl(ptr) | EEPROM_ECLK, ptr); }) #define eeprom_cs_off(ptr) ({ \ __raw_writel(__raw_readl(ptr) & ~EEPROM_ECLK, ptr); \ __raw_writel(__raw_readl(ptr) & ~EEPROM_CSEL, ptr); \ __raw_writel(__raw_readl(ptr) | EEPROM_EPROT, ptr); \ __raw_writel(__raw_readl(ptr) | EEPROM_ECLK, ptr); }) #define BITS_IN_COMMAND 11 /* * clock in the nvram command and the register number. For the * national semiconductor nv ram chip the op code is 3 bits and * the address is 6/8 bits. */ static inline void eeprom_cmd(unsigned int *ctrl, unsigned cmd, unsigned reg) { unsigned short ser_cmd; int i; ser_cmd = cmd | (reg << (16 - BITS_IN_COMMAND)); for (i = 0; i < BITS_IN_COMMAND; i++) { if (ser_cmd & (1<<15)) /* if high order bit set */ __raw_writel(__raw_readl(ctrl) | EEPROM_DATO, ctrl); else __raw_writel(__raw_readl(ctrl) & ~EEPROM_DATO, ctrl); __raw_writel(__raw_readl(ctrl) & ~EEPROM_ECLK, ctrl); delay(); __raw_writel(__raw_readl(ctrl) | EEPROM_ECLK, ctrl); delay(); ser_cmd <<= 1; } /* see data sheet timing diagram */ __raw_writel(__raw_readl(ctrl) & ~EEPROM_DATO, ctrl); } unsigned short ip22_eeprom_read(unsigned int *ctrl, int reg) { unsigned short res = 0; int i; __raw_writel(__raw_readl(ctrl) & ~EEPROM_EPROT, ctrl); eeprom_cs_on(ctrl); eeprom_cmd(ctrl, EEPROM_READ, reg); /* clock the data ouf of serial mem */ for (i = 0; i < 16; i++) { __raw_writel(__raw_readl(ctrl) & ~EEPROM_ECLK, ctrl); delay(); __raw_writel(__raw_readl(ctrl) | EEPROM_ECLK, ctrl); delay(); res <<= 1; if (__raw_readl(ctrl) & EEPROM_DATI) res |= 1; } eeprom_cs_off(ctrl); return res; } EXPORT_SYMBOL(ip22_eeprom_read); /* * Read specified register from main NVRAM */ unsigned short ip22_nvram_read(int reg) { if (ip22_is_fullhouse()) /* IP22 (Indigo2 aka FullHouse) stores env variables into * 93CS56 Microwire Bus EEPROM 2048 Bit (128x16) */ return ip22_eeprom_read(&hpc3c0->eeprom, reg); else { unsigned short tmp; /* IP24 (Indy aka Guiness) uses DS1386 8K version */ reg <<= 1; tmp = hpc3c0->bbram[reg++] & 0xff; return (tmp << 8) | (hpc3c0->bbram[reg] & 0xff); } } EXPORT_SYMBOL(ip22_nvram_read);
gpl-2.0
AOSPA-legacy/android_kernel_oneplus_msm8974
net/ipv4/tcp_highspeed.c
10559
5018
/* * Sally Floyd's High Speed TCP (RFC 3649) congestion control * * See http://www.icir.org/floyd/hstcp.html * * John Heffner <jheffner@psc.edu> */ #include <linux/module.h> #include <net/tcp.h> /* From AIMD tables from RFC 3649 appendix B, * with fixed-point MD scaled <<8. */ static const struct hstcp_aimd_val { unsigned int cwnd; unsigned int md; } hstcp_aimd_vals[] = { { 38, 128, /* 0.50 */ }, { 118, 112, /* 0.44 */ }, { 221, 104, /* 0.41 */ }, { 347, 98, /* 0.38 */ }, { 495, 93, /* 0.37 */ }, { 663, 89, /* 0.35 */ }, { 851, 86, /* 0.34 */ }, { 1058, 83, /* 0.33 */ }, { 1284, 81, /* 0.32 */ }, { 1529, 78, /* 0.31 */ }, { 1793, 76, /* 0.30 */ }, { 2076, 74, /* 0.29 */ }, { 2378, 72, /* 0.28 */ }, { 2699, 71, /* 0.28 */ }, { 3039, 69, /* 0.27 */ }, { 3399, 68, /* 0.27 */ }, { 3778, 66, /* 0.26 */ }, { 4177, 65, /* 0.26 */ }, { 4596, 64, /* 0.25 */ }, { 5036, 62, /* 0.25 */ }, { 5497, 61, /* 0.24 */ }, { 5979, 60, /* 0.24 */ }, { 6483, 59, /* 0.23 */ }, { 7009, 58, /* 0.23 */ }, { 7558, 57, /* 0.22 */ }, { 8130, 56, /* 0.22 */ }, { 8726, 55, /* 0.22 */ }, { 9346, 54, /* 0.21 */ }, { 9991, 53, /* 0.21 */ }, { 10661, 52, /* 0.21 */ }, { 11358, 52, /* 0.20 */ }, { 12082, 51, /* 0.20 */ }, { 12834, 50, /* 0.20 */ }, { 13614, 49, /* 0.19 */ }, { 14424, 48, /* 0.19 */ }, { 15265, 48, /* 0.19 */ }, { 16137, 47, /* 0.19 */ }, { 17042, 46, /* 0.18 */ }, { 17981, 45, /* 0.18 */ }, { 18955, 45, /* 0.18 */ }, { 19965, 44, /* 0.17 */ }, { 21013, 43, /* 0.17 */ }, { 22101, 43, /* 0.17 */ }, { 23230, 42, /* 0.17 */ }, { 24402, 41, /* 0.16 */ }, { 25618, 41, /* 0.16 */ }, { 26881, 40, /* 0.16 */ }, { 28193, 39, /* 0.16 */ }, { 29557, 39, /* 0.15 */ }, { 30975, 38, /* 0.15 */ }, { 32450, 38, /* 0.15 */ }, { 33986, 37, /* 0.15 */ }, { 35586, 36, /* 0.14 */ }, { 37253, 36, /* 0.14 */ }, { 38992, 35, /* 0.14 */ }, { 40808, 35, /* 0.14 */ }, { 42707, 34, /* 0.13 */ }, { 44694, 33, /* 0.13 */ }, { 46776, 33, /* 0.13 */ }, { 48961, 32, /* 0.13 */ }, { 51258, 32, /* 0.13 */ }, { 53677, 31, /* 0.12 */ }, { 56230, 30, /* 0.12 */ }, { 58932, 30, /* 0.12 */ }, { 61799, 29, /* 0.12 */ }, { 64851, 28, /* 0.11 */ }, { 68113, 28, /* 0.11 */ }, { 71617, 27, /* 0.11 */ }, { 75401, 26, /* 0.10 */ }, { 79517, 26, /* 0.10 */ }, { 84035, 25, /* 0.10 */ }, { 89053, 24, /* 0.10 */ }, }; #define HSTCP_AIMD_MAX ARRAY_SIZE(hstcp_aimd_vals) struct hstcp { u32 ai; }; static void hstcp_init(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); struct hstcp *ca = inet_csk_ca(sk); ca->ai = 0; /* Ensure the MD arithmetic works. This is somewhat pedantic, * since I don't think we will see a cwnd this large. :) */ tp->snd_cwnd_clamp = min_t(u32, tp->snd_cwnd_clamp, 0xffffffff/128); } static void hstcp_cong_avoid(struct sock *sk, u32 adk, u32 in_flight) { struct tcp_sock *tp = tcp_sk(sk); struct hstcp *ca = inet_csk_ca(sk); if (!tcp_is_cwnd_limited(sk, in_flight)) return; if (tp->snd_cwnd <= tp->snd_ssthresh) tcp_slow_start(tp); else { /* Update AIMD parameters. * * We want to guarantee that: * hstcp_aimd_vals[ca->ai-1].cwnd < * snd_cwnd <= * hstcp_aimd_vals[ca->ai].cwnd */ if (tp->snd_cwnd > hstcp_aimd_vals[ca->ai].cwnd) { while (tp->snd_cwnd > hstcp_aimd_vals[ca->ai].cwnd && ca->ai < HSTCP_AIMD_MAX - 1) ca->ai++; } else if (ca->ai && tp->snd_cwnd <= hstcp_aimd_vals[ca->ai-1].cwnd) { while (ca->ai && tp->snd_cwnd <= hstcp_aimd_vals[ca->ai-1].cwnd) ca->ai--; } /* Do additive increase */ if (tp->snd_cwnd < tp->snd_cwnd_clamp) { /* cwnd = cwnd + a(w) / cwnd */ tp->snd_cwnd_cnt += ca->ai + 1; if (tp->snd_cwnd_cnt >= tp->snd_cwnd) { tp->snd_cwnd_cnt -= tp->snd_cwnd; tp->snd_cwnd++; } } } } static u32 hstcp_ssthresh(struct sock *sk) { const struct tcp_sock *tp = tcp_sk(sk); const struct hstcp *ca = inet_csk_ca(sk); /* Do multiplicative decrease */ return max(tp->snd_cwnd - ((tp->snd_cwnd * hstcp_aimd_vals[ca->ai].md) >> 8), 2U); } static struct tcp_congestion_ops tcp_highspeed __read_mostly = { .init = hstcp_init, .ssthresh = hstcp_ssthresh, .cong_avoid = hstcp_cong_avoid, .min_cwnd = tcp_reno_min_cwnd, .owner = THIS_MODULE, .name = "highspeed" }; static int __init hstcp_register(void) { BUILD_BUG_ON(sizeof(struct hstcp) > ICSK_CA_PRIV_SIZE); return tcp_register_congestion_control(&tcp_highspeed); } static void __exit hstcp_unregister(void) { tcp_unregister_congestion_control(&tcp_highspeed); } module_init(hstcp_register); module_exit(hstcp_unregister); MODULE_AUTHOR("John Heffner"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("High Speed TCP");
gpl-2.0
atniptw/PonyBuntu
arch/mips/dec/prom/console.c
13887
1025
/* * DECstation PROM-based early console support. * * Copyright (C) 2004, 2007 Maciej W. Rozycki * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/console.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/string.h> #include <asm/dec/prom.h> static void __init prom_console_write(struct console *con, const char *s, unsigned int c) { char buf[81]; unsigned int chunk = sizeof(buf) - 1; while (c > 0) { if (chunk > c) chunk = c; memcpy(buf, s, chunk); buf[chunk] = '\0'; prom_printf("%s", buf); s += chunk; c -= chunk; } } static struct console promcons __initdata = { .name = "prom", .write = prom_console_write, .flags = CON_BOOT | CON_PRINTBUFFER, .index = -1, }; void __init register_prom_console(void) { register_console(&promcons); }
gpl-2.0
lab11/bluetooth-next
drivers/clk/mediatek/clk-pll.c
320
7954
/* * Copyright (c) 2014 MediaTek Inc. * Author: James Liao <jamesjj.liao@mediatek.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/of.h> #include <linux/of_address.h> #include <linux/io.h> #include <linux/slab.h> #include <linux/clkdev.h> #include <linux/delay.h> #include "clk-mtk.h" #define REG_CON0 0 #define REG_CON1 4 #define CON0_BASE_EN BIT(0) #define CON0_PWR_ON BIT(0) #define CON0_ISO_EN BIT(1) #define CON0_PCW_CHG BIT(31) #define AUDPLL_TUNER_EN BIT(31) #define POSTDIV_MASK 0x7 #define INTEGER_BITS 7 /* * MediaTek PLLs are configured through their pcw value. The pcw value describes * a divider in the PLL feedback loop which consists of 7 bits for the integer * part and the remaining bits (if present) for the fractional part. Also they * have a 3 bit power-of-two post divider. */ struct mtk_clk_pll { struct clk_hw hw; void __iomem *base_addr; void __iomem *pd_addr; void __iomem *pwr_addr; void __iomem *tuner_addr; void __iomem *pcw_addr; const struct mtk_pll_data *data; }; static inline struct mtk_clk_pll *to_mtk_clk_pll(struct clk_hw *hw) { return container_of(hw, struct mtk_clk_pll, hw); } static int mtk_pll_is_prepared(struct clk_hw *hw) { struct mtk_clk_pll *pll = to_mtk_clk_pll(hw); return (readl(pll->base_addr + REG_CON0) & CON0_BASE_EN) != 0; } static unsigned long __mtk_pll_recalc_rate(struct mtk_clk_pll *pll, u32 fin, u32 pcw, int postdiv) { int pcwbits = pll->data->pcwbits; int pcwfbits; u64 vco; u8 c = 0; /* The fractional part of the PLL divider. */ pcwfbits = pcwbits > INTEGER_BITS ? pcwbits - INTEGER_BITS : 0; vco = (u64)fin * pcw; if (pcwfbits && (vco & GENMASK(pcwfbits - 1, 0))) c = 1; vco >>= pcwfbits; if (c) vco++; return ((unsigned long)vco + postdiv - 1) / postdiv; } static void mtk_pll_set_rate_regs(struct mtk_clk_pll *pll, u32 pcw, int postdiv) { u32 con1, val; int pll_en; pll_en = readl(pll->base_addr + REG_CON0) & CON0_BASE_EN; /* set postdiv */ val = readl(pll->pd_addr); val &= ~(POSTDIV_MASK << pll->data->pd_shift); val |= (ffs(postdiv) - 1) << pll->data->pd_shift; /* postdiv and pcw need to set at the same time if on same register */ if (pll->pd_addr != pll->pcw_addr) { writel(val, pll->pd_addr); val = readl(pll->pcw_addr); } /* set pcw */ val &= ~GENMASK(pll->data->pcw_shift + pll->data->pcwbits - 1, pll->data->pcw_shift); val |= pcw << pll->data->pcw_shift; writel(val, pll->pcw_addr); con1 = readl(pll->base_addr + REG_CON1); if (pll_en) con1 |= CON0_PCW_CHG; writel(con1, pll->base_addr + REG_CON1); if (pll->tuner_addr) writel(con1 + 1, pll->tuner_addr); if (pll_en) udelay(20); } /* * mtk_pll_calc_values - calculate good values for a given input frequency. * @pll: The pll * @pcw: The pcw value (output) * @postdiv: The post divider (output) * @freq: The desired target frequency * @fin: The input frequency * */ static void mtk_pll_calc_values(struct mtk_clk_pll *pll, u32 *pcw, u32 *postdiv, u32 freq, u32 fin) { unsigned long fmin = 1000 * MHZ; const struct mtk_pll_div_table *div_table = pll->data->div_table; u64 _pcw; u32 val; if (freq > pll->data->fmax) freq = pll->data->fmax; if (div_table) { if (freq > div_table[0].freq) freq = div_table[0].freq; for (val = 0; div_table[val + 1].freq != 0; val++) { if (freq > div_table[val + 1].freq) break; } *postdiv = 1 << val; } else { for (val = 0; val < 5; val++) { *postdiv = 1 << val; if ((u64)freq * *postdiv >= fmin) break; } } /* _pcw = freq * postdiv / fin * 2^pcwfbits */ _pcw = ((u64)freq << val) << (pll->data->pcwbits - INTEGER_BITS); do_div(_pcw, fin); *pcw = (u32)_pcw; } static int mtk_pll_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long parent_rate) { struct mtk_clk_pll *pll = to_mtk_clk_pll(hw); u32 pcw = 0; u32 postdiv; mtk_pll_calc_values(pll, &pcw, &postdiv, rate, parent_rate); mtk_pll_set_rate_regs(pll, pcw, postdiv); return 0; } static unsigned long mtk_pll_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) { struct mtk_clk_pll *pll = to_mtk_clk_pll(hw); u32 postdiv; u32 pcw; postdiv = (readl(pll->pd_addr) >> pll->data->pd_shift) & POSTDIV_MASK; postdiv = 1 << postdiv; pcw = readl(pll->pcw_addr) >> pll->data->pcw_shift; pcw &= GENMASK(pll->data->pcwbits - 1, 0); return __mtk_pll_recalc_rate(pll, parent_rate, pcw, postdiv); } static long mtk_pll_round_rate(struct clk_hw *hw, unsigned long rate, unsigned long *prate) { struct mtk_clk_pll *pll = to_mtk_clk_pll(hw); u32 pcw = 0; int postdiv; mtk_pll_calc_values(pll, &pcw, &postdiv, rate, *prate); return __mtk_pll_recalc_rate(pll, *prate, pcw, postdiv); } static int mtk_pll_prepare(struct clk_hw *hw) { struct mtk_clk_pll *pll = to_mtk_clk_pll(hw); u32 r; r = readl(pll->pwr_addr) | CON0_PWR_ON; writel(r, pll->pwr_addr); udelay(1); r = readl(pll->pwr_addr) & ~CON0_ISO_EN; writel(r, pll->pwr_addr); udelay(1); r = readl(pll->base_addr + REG_CON0); r |= pll->data->en_mask; writel(r, pll->base_addr + REG_CON0); if (pll->tuner_addr) { r = readl(pll->tuner_addr) | AUDPLL_TUNER_EN; writel(r, pll->tuner_addr); } udelay(20); if (pll->data->flags & HAVE_RST_BAR) { r = readl(pll->base_addr + REG_CON0); r |= pll->data->rst_bar_mask; writel(r, pll->base_addr + REG_CON0); } return 0; } static void mtk_pll_unprepare(struct clk_hw *hw) { struct mtk_clk_pll *pll = to_mtk_clk_pll(hw); u32 r; if (pll->data->flags & HAVE_RST_BAR) { r = readl(pll->base_addr + REG_CON0); r &= ~pll->data->rst_bar_mask; writel(r, pll->base_addr + REG_CON0); } if (pll->tuner_addr) { r = readl(pll->tuner_addr) & ~AUDPLL_TUNER_EN; writel(r, pll->tuner_addr); } r = readl(pll->base_addr + REG_CON0); r &= ~CON0_BASE_EN; writel(r, pll->base_addr + REG_CON0); r = readl(pll->pwr_addr) | CON0_ISO_EN; writel(r, pll->pwr_addr); r = readl(pll->pwr_addr) & ~CON0_PWR_ON; writel(r, pll->pwr_addr); } static const struct clk_ops mtk_pll_ops = { .is_prepared = mtk_pll_is_prepared, .prepare = mtk_pll_prepare, .unprepare = mtk_pll_unprepare, .recalc_rate = mtk_pll_recalc_rate, .round_rate = mtk_pll_round_rate, .set_rate = mtk_pll_set_rate, }; static struct clk *mtk_clk_register_pll(const struct mtk_pll_data *data, void __iomem *base) { struct mtk_clk_pll *pll; struct clk_init_data init = {}; struct clk *clk; const char *parent_name = "clk26m"; pll = kzalloc(sizeof(*pll), GFP_KERNEL); if (!pll) return ERR_PTR(-ENOMEM); pll->base_addr = base + data->reg; pll->pwr_addr = base + data->pwr_reg; pll->pd_addr = base + data->pd_reg; pll->pcw_addr = base + data->pcw_reg; if (data->tuner_reg) pll->tuner_addr = base + data->tuner_reg; pll->hw.init = &init; pll->data = data; init.name = data->name; init.ops = &mtk_pll_ops; init.parent_names = &parent_name; init.num_parents = 1; clk = clk_register(NULL, &pll->hw); if (IS_ERR(clk)) kfree(pll); return clk; } void __init mtk_clk_register_plls(struct device_node *node, const struct mtk_pll_data *plls, int num_plls, struct clk_onecell_data *clk_data) { void __iomem *base; int i; struct clk *clk; base = of_iomap(node, 0); if (!base) { pr_err("%s(): ioremap failed\n", __func__); return; } for (i = 0; i < num_plls; i++) { const struct mtk_pll_data *pll = &plls[i]; clk = mtk_clk_register_pll(pll, base); if (IS_ERR(clk)) { pr_err("Failed to register clk %s: %ld\n", pll->name, PTR_ERR(clk)); continue; } clk_data->clks[pll->id] = clk; } }
gpl-2.0
kongzizaixian/linux
sound/firewire/bebob/bebob_focusrite.c
320
8349
/* * bebob_focusrite.c - a part of driver for BeBoB based devices * * Copyright (c) 2013-2014 Takashi Sakamoto * * Licensed under the terms of the GNU General Public License, version 2. */ #include "./bebob.h" #define ANA_IN "Analog In" #define DIG_IN "Digital In" #define ANA_OUT "Analog Out" #define DIG_OUT "Digital Out" #define STM_IN "Stream In" #define SAFFIRE_ADDRESS_BASE 0x000100000000ULL #define SAFFIRE_OFFSET_CLOCK_SOURCE 0x00f8 #define SAFFIREPRO_OFFSET_CLOCK_SOURCE 0x0174 /* whether sync to external device or not */ #define SAFFIRE_OFFSET_CLOCK_SYNC_EXT 0x013c #define SAFFIRE_LE_OFFSET_CLOCK_SYNC_EXT 0x0432 #define SAFFIREPRO_OFFSET_CLOCK_SYNC_EXT 0x0164 #define SAFFIRE_CLOCK_SOURCE_INTERNAL 0 #define SAFFIRE_CLOCK_SOURCE_SPDIF 1 /* clock sources as returned from register of Saffire Pro 10 and 26 */ #define SAFFIREPRO_CLOCK_SOURCE_INTERNAL 0 #define SAFFIREPRO_CLOCK_SOURCE_SKIP 1 /* never used on hardware */ #define SAFFIREPRO_CLOCK_SOURCE_SPDIF 2 #define SAFFIREPRO_CLOCK_SOURCE_ADAT1 3 /* not used on s.pro. 10 */ #define SAFFIREPRO_CLOCK_SOURCE_ADAT2 4 /* not used on s.pro. 10 */ #define SAFFIREPRO_CLOCK_SOURCE_WORDCLOCK 5 #define SAFFIREPRO_CLOCK_SOURCE_COUNT 6 /* S/PDIF, ADAT1, ADAT2 is enabled or not. three quadlets */ #define SAFFIREPRO_ENABLE_DIG_IFACES 0x01a4 /* saffirepro has its own parameter for sampling frequency */ #define SAFFIREPRO_RATE_NOREBOOT 0x01cc /* index is the value for this register */ static const unsigned int rates[] = { [0] = 0, [1] = 44100, [2] = 48000, [3] = 88200, [4] = 96000, [5] = 176400, [6] = 192000 }; /* saffire(no label)/saffire LE has metering */ #define SAFFIRE_OFFSET_METER 0x0100 #define SAFFIRE_LE_OFFSET_METER 0x0168 static inline int saffire_read_block(struct snd_bebob *bebob, u64 offset, u32 *buf, unsigned int size) { unsigned int i; int err; __be32 *tmp = (__be32 *)buf; err = snd_fw_transaction(bebob->unit, TCODE_READ_BLOCK_REQUEST, SAFFIRE_ADDRESS_BASE + offset, tmp, size, 0); if (err < 0) goto end; for (i = 0; i < size / sizeof(u32); i++) buf[i] = be32_to_cpu(tmp[i]); end: return err; } static inline int saffire_read_quad(struct snd_bebob *bebob, u64 offset, u32 *value) { int err; __be32 tmp; err = snd_fw_transaction(bebob->unit, TCODE_READ_QUADLET_REQUEST, SAFFIRE_ADDRESS_BASE + offset, &tmp, sizeof(__be32), 0); if (err < 0) goto end; *value = be32_to_cpu(tmp); end: return err; } static inline int saffire_write_quad(struct snd_bebob *bebob, u64 offset, u32 value) { __be32 data = cpu_to_be32(value); return snd_fw_transaction(bebob->unit, TCODE_WRITE_QUADLET_REQUEST, SAFFIRE_ADDRESS_BASE + offset, &data, sizeof(__be32), 0); } static char *const saffirepro_10_clk_src_labels[] = { SND_BEBOB_CLOCK_INTERNAL, "S/PDIF", "Word Clock" }; static char *const saffirepro_26_clk_src_labels[] = { SND_BEBOB_CLOCK_INTERNAL, "S/PDIF", "ADAT1", "ADAT2", "Word Clock" }; /* Value maps between registers and labels for SaffirePro 10/26. */ static const signed char saffirepro_clk_maps[][SAFFIREPRO_CLOCK_SOURCE_COUNT] = { /* SaffirePro 10 */ [0] = { [SAFFIREPRO_CLOCK_SOURCE_INTERNAL] = 0, [SAFFIREPRO_CLOCK_SOURCE_SKIP] = -1, /* not supported */ [SAFFIREPRO_CLOCK_SOURCE_SPDIF] = 1, [SAFFIREPRO_CLOCK_SOURCE_ADAT1] = -1, /* not supported */ [SAFFIREPRO_CLOCK_SOURCE_ADAT2] = -1, /* not supported */ [SAFFIREPRO_CLOCK_SOURCE_WORDCLOCK] = 2, }, /* SaffirePro 26 */ [1] = { [SAFFIREPRO_CLOCK_SOURCE_INTERNAL] = 0, [SAFFIREPRO_CLOCK_SOURCE_SKIP] = -1, /* not supported */ [SAFFIREPRO_CLOCK_SOURCE_SPDIF] = 1, [SAFFIREPRO_CLOCK_SOURCE_ADAT1] = 2, [SAFFIREPRO_CLOCK_SOURCE_ADAT2] = 3, [SAFFIREPRO_CLOCK_SOURCE_WORDCLOCK] = 4, } }; static int saffirepro_both_clk_freq_get(struct snd_bebob *bebob, unsigned int *rate) { u32 id; int err; err = saffire_read_quad(bebob, SAFFIREPRO_RATE_NOREBOOT, &id); if (err < 0) goto end; if (id >= ARRAY_SIZE(rates)) err = -EIO; else *rate = rates[id]; end: return err; } static int saffirepro_both_clk_freq_set(struct snd_bebob *bebob, unsigned int rate) { u32 id; for (id = 0; id < ARRAY_SIZE(rates); id++) { if (rates[id] == rate) break; } if (id == ARRAY_SIZE(rates)) return -EINVAL; return saffire_write_quad(bebob, SAFFIREPRO_RATE_NOREBOOT, id); } /* * query hardware for current clock source, return our internally * used clock index in *id, depending on hardware. */ static int saffirepro_both_clk_src_get(struct snd_bebob *bebob, unsigned int *id) { int err; u32 value; /* clock source read from hw register */ const signed char *map; err = saffire_read_quad(bebob, SAFFIREPRO_OFFSET_CLOCK_SOURCE, &value); if (err < 0) goto end; /* depending on hardware, use a different mapping */ if (bebob->spec->clock->labels == saffirepro_10_clk_src_labels) map = saffirepro_clk_maps[0]; else map = saffirepro_clk_maps[1]; /* In a case that this driver cannot handle the value of register. */ if (value >= SAFFIREPRO_CLOCK_SOURCE_COUNT || map[value] < 0) { err = -EIO; goto end; } *id = (unsigned int)map[value]; end: return err; } struct snd_bebob_spec saffire_le_spec; static char *const saffire_both_clk_src_labels[] = { SND_BEBOB_CLOCK_INTERNAL, "S/PDIF" }; static int saffire_both_clk_src_get(struct snd_bebob *bebob, unsigned int *id) { int err; u32 value; err = saffire_read_quad(bebob, SAFFIRE_OFFSET_CLOCK_SOURCE, &value); if (err >= 0) *id = 0xff & value; return err; }; static char *const saffire_le_meter_labels[] = { ANA_IN, ANA_IN, DIG_IN, ANA_OUT, ANA_OUT, ANA_OUT, ANA_OUT, STM_IN, STM_IN }; static char *const saffire_meter_labels[] = { ANA_IN, ANA_IN, STM_IN, STM_IN, STM_IN, STM_IN, STM_IN, }; static int saffire_meter_get(struct snd_bebob *bebob, u32 *buf, unsigned int size) { struct snd_bebob_meter_spec *spec = bebob->spec->meter; unsigned int channels; u64 offset; int err; if (spec->labels == saffire_le_meter_labels) offset = SAFFIRE_LE_OFFSET_METER; else offset = SAFFIRE_OFFSET_METER; channels = spec->num * 2; if (size < channels * sizeof(u32)) return -EIO; err = saffire_read_block(bebob, offset, buf, size); if (err >= 0 && spec->labels == saffire_le_meter_labels) { swap(buf[1], buf[3]); swap(buf[2], buf[3]); swap(buf[3], buf[4]); swap(buf[7], buf[10]); swap(buf[8], buf[10]); swap(buf[9], buf[11]); swap(buf[11], buf[12]); swap(buf[15], buf[16]); } return err; } static struct snd_bebob_rate_spec saffirepro_both_rate_spec = { .get = &saffirepro_both_clk_freq_get, .set = &saffirepro_both_clk_freq_set, }; /* Saffire Pro 26 I/O */ static struct snd_bebob_clock_spec saffirepro_26_clk_spec = { .num = ARRAY_SIZE(saffirepro_26_clk_src_labels), .labels = saffirepro_26_clk_src_labels, .get = &saffirepro_both_clk_src_get, }; struct snd_bebob_spec saffirepro_26_spec = { .clock = &saffirepro_26_clk_spec, .rate = &saffirepro_both_rate_spec, .meter = NULL }; /* Saffire Pro 10 I/O */ static struct snd_bebob_clock_spec saffirepro_10_clk_spec = { .num = ARRAY_SIZE(saffirepro_10_clk_src_labels), .labels = saffirepro_10_clk_src_labels, .get = &saffirepro_both_clk_src_get, }; struct snd_bebob_spec saffirepro_10_spec = { .clock = &saffirepro_10_clk_spec, .rate = &saffirepro_both_rate_spec, .meter = NULL }; static struct snd_bebob_rate_spec saffire_both_rate_spec = { .get = &snd_bebob_stream_get_rate, .set = &snd_bebob_stream_set_rate, }; static struct snd_bebob_clock_spec saffire_both_clk_spec = { .num = ARRAY_SIZE(saffire_both_clk_src_labels), .labels = saffire_both_clk_src_labels, .get = &saffire_both_clk_src_get, }; /* Saffire LE */ static struct snd_bebob_meter_spec saffire_le_meter_spec = { .num = ARRAY_SIZE(saffire_le_meter_labels), .labels = saffire_le_meter_labels, .get = &saffire_meter_get, }; struct snd_bebob_spec saffire_le_spec = { .clock = &saffire_both_clk_spec, .rate = &saffire_both_rate_spec, .meter = &saffire_le_meter_spec }; /* Saffire */ static struct snd_bebob_meter_spec saffire_meter_spec = { .num = ARRAY_SIZE(saffire_meter_labels), .labels = saffire_meter_labels, .get = &saffire_meter_get, }; struct snd_bebob_spec saffire_spec = { .clock = &saffire_both_clk_spec, .rate = &saffire_both_rate_spec, .meter = &saffire_meter_spec };
gpl-2.0
droidcore/Hydra
net/ipv6/output_core.c
832
1091
/* * IPv6 library code, needed by static components when full IPv6 support is * not configured or static. These functions are needed by GSO/GRO implementation. */ #include <linux/export.h> #include <net/ipv6.h> #include <net/ip6_fib.h> int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr) { u16 offset = sizeof(struct ipv6hdr); struct ipv6_opt_hdr *exthdr = (struct ipv6_opt_hdr *)(ipv6_hdr(skb) + 1); unsigned int packet_len = skb->tail - skb->network_header; int found_rhdr = 0; *nexthdr = &ipv6_hdr(skb)->nexthdr; while (offset + 1 <= packet_len) { switch (**nexthdr) { case NEXTHDR_HOP: break; case NEXTHDR_ROUTING: found_rhdr = 1; break; case NEXTHDR_DEST: #if IS_ENABLED(CONFIG_IPV6_MIP6) if (ipv6_find_tlv(skb, offset, IPV6_TLV_HAO) >= 0) break; #endif if (found_rhdr) return offset; break; default : return offset; } offset += ipv6_optlen(exthdr); *nexthdr = &exthdr->nexthdr; exthdr = (struct ipv6_opt_hdr *)(skb_network_header(skb) + offset); } return offset; } EXPORT_SYMBOL(ip6_find_1stfragopt);
gpl-2.0
alskjstl/linux
drivers/uwb/whci.c
1344
6437
/* * WHCI UWB Multi-interface Controller enumerator. * * Copyright (C) 2007 Cambridge Silicon Radio Ltd. * * This file is released under the GNU GPL v2. */ #include <linux/delay.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/dma-mapping.h> #include <linux/slab.h> #include <linux/uwb/whci.h> #include <linux/uwb/umc.h> struct whci_card { struct pci_dev *pci; void __iomem *uwbbase; u8 n_caps; struct umc_dev *devs[0]; }; /* Fix faulty HW :( */ static u64 whci_capdata_quirks(struct whci_card *card, u64 capdata) { u64 capdata_orig = capdata; struct pci_dev *pci_dev = card->pci; if (pci_dev->vendor == PCI_VENDOR_ID_INTEL && (pci_dev->device == 0x0c3b || pci_dev->device == 0004) && pci_dev->class == 0x0d1010) { switch (UWBCAPDATA_TO_CAP_ID(capdata)) { /* WLP capability has 0x100 bytes of aperture */ case 0x80: capdata |= 0x40 << 8; break; /* WUSB capability has 0x80 bytes of aperture * and ID is 1 */ case 0x02: capdata &= ~0xffff; capdata |= 0x2001; break; } } if (capdata_orig != capdata) dev_warn(&pci_dev->dev, "PCI v%04x d%04x c%06x#%02x: " "corrected capdata from %016Lx to %016Lx\n", pci_dev->vendor, pci_dev->device, pci_dev->class, (unsigned)UWBCAPDATA_TO_CAP_ID(capdata), (unsigned long long)capdata_orig, (unsigned long long)capdata); return capdata; } /** * whci_wait_for - wait for a WHCI register to be set * * Polls (for at most @max_ms ms) until '*@reg & @mask == @result'. */ int whci_wait_for(struct device *dev, u32 __iomem *reg, u32 mask, u32 result, unsigned long max_ms, const char *tag) { unsigned t = 0; u32 val; for (;;) { val = le_readl(reg); if ((val & mask) == result) break; if (t >= max_ms) { dev_err(dev, "%s timed out\n", tag); return -ETIMEDOUT; } msleep(10); t += 10; } return 0; } EXPORT_SYMBOL_GPL(whci_wait_for); /* * NOTE: the capinfo and capdata registers are slightly different * (size and cap-id fields). So for cap #0, we need to fill * in. Size comes from the size of the register block * (statically calculated); cap_id comes from nowhere, we use * zero, that is reserved, for the radio controller, because * none was defined at the spec level. */ static int whci_add_cap(struct whci_card *card, int n) { struct umc_dev *umc; u64 capdata; int bar, err; umc = umc_device_create(&card->pci->dev, n); if (umc == NULL) return -ENOMEM; capdata = le_readq(card->uwbbase + UWBCAPDATA(n)); bar = UWBCAPDATA_TO_BAR(capdata) << 1; capdata = whci_capdata_quirks(card, capdata); /* Capability 0 is the radio controller. It's size is 32 * bytes (WHCI0.95[2.3, T2-9]). */ umc->version = UWBCAPDATA_TO_VERSION(capdata); umc->cap_id = n == 0 ? 0 : UWBCAPDATA_TO_CAP_ID(capdata); umc->bar = bar; umc->resource.start = pci_resource_start(card->pci, bar) + UWBCAPDATA_TO_OFFSET(capdata); umc->resource.end = umc->resource.start + (n == 0 ? 0x20 : UWBCAPDATA_TO_SIZE(capdata)) - 1; umc->resource.name = dev_name(&umc->dev); umc->resource.flags = card->pci->resource[bar].flags; umc->resource.parent = &card->pci->resource[bar]; umc->irq = card->pci->irq; err = umc_device_register(umc); if (err < 0) goto error; card->devs[n] = umc; return 0; error: kfree(umc); return err; } static void whci_del_cap(struct whci_card *card, int n) { struct umc_dev *umc = card->devs[n]; umc_device_unregister(umc); } static int whci_n_caps(struct pci_dev *pci) { void __iomem *uwbbase; u64 capinfo; uwbbase = pci_iomap(pci, 0, 8); if (!uwbbase) return -ENOMEM; capinfo = le_readq(uwbbase + UWBCAPINFO); pci_iounmap(pci, uwbbase); return UWBCAPINFO_TO_N_CAPS(capinfo); } static int whci_probe(struct pci_dev *pci, const struct pci_device_id *id) { struct whci_card *card; int err, n_caps, n; err = pci_enable_device(pci); if (err < 0) goto error; pci_enable_msi(pci); pci_set_master(pci); err = -ENXIO; if (!pci_set_dma_mask(pci, DMA_BIT_MASK(64))) pci_set_consistent_dma_mask(pci, DMA_BIT_MASK(64)); else if (!pci_set_dma_mask(pci, DMA_BIT_MASK(32))) pci_set_consistent_dma_mask(pci, DMA_BIT_MASK(32)); else goto error_dma; err = n_caps = whci_n_caps(pci); if (n_caps < 0) goto error_ncaps; err = -ENOMEM; card = kzalloc(sizeof(struct whci_card) + sizeof(struct umc_dev *) * (n_caps + 1), GFP_KERNEL); if (card == NULL) goto error_kzalloc; card->pci = pci; card->n_caps = n_caps; err = -EBUSY; if (!request_mem_region(pci_resource_start(pci, 0), UWBCAPDATA_SIZE(card->n_caps), "whci (capability data)")) goto error_request_memregion; err = -ENOMEM; card->uwbbase = pci_iomap(pci, 0, UWBCAPDATA_SIZE(card->n_caps)); if (!card->uwbbase) goto error_iomap; /* Add each capability. */ for (n = 0; n <= card->n_caps; n++) { err = whci_add_cap(card, n); if (err < 0 && n == 0) { dev_err(&pci->dev, "cannot bind UWB radio controller:" " %d\n", err); goto error_bind; } if (err < 0) dev_warn(&pci->dev, "warning: cannot bind capability " "#%u: %d\n", n, err); } pci_set_drvdata(pci, card); return 0; error_bind: pci_iounmap(pci, card->uwbbase); error_iomap: release_mem_region(pci_resource_start(pci, 0), UWBCAPDATA_SIZE(card->n_caps)); error_request_memregion: kfree(card); error_kzalloc: error_ncaps: error_dma: pci_disable_msi(pci); pci_disable_device(pci); error: return err; } static void whci_remove(struct pci_dev *pci) { struct whci_card *card = pci_get_drvdata(pci); int n; pci_set_drvdata(pci, NULL); /* Unregister each capability in reverse (so the master device * is unregistered last). */ for (n = card->n_caps; n >= 0 ; n--) whci_del_cap(card, n); pci_iounmap(pci, card->uwbbase); release_mem_region(pci_resource_start(pci, 0), UWBCAPDATA_SIZE(card->n_caps)); kfree(card); pci_disable_msi(pci); pci_disable_device(pci); } static struct pci_device_id whci_id_table[] = { { PCI_DEVICE_CLASS(PCI_CLASS_WIRELESS_WHCI, ~0) }, { 0 }, }; MODULE_DEVICE_TABLE(pci, whci_id_table); static struct pci_driver whci_driver = { .name = "whci", .id_table = whci_id_table, .probe = whci_probe, .remove = whci_remove, }; module_pci_driver(whci_driver); MODULE_DESCRIPTION("WHCI UWB Multi-interface Controller enumerator"); MODULE_AUTHOR("Cambridge Silicon Radio Ltd."); MODULE_LICENSE("GPL");
gpl-2.0
TeamExodus/kernel_huawei_angler
arch/arm/mach-omap2/prm44xx.c
2112
19925
/* * OMAP4 PRM module functions * * Copyright (C) 2011-2012 Texas Instruments, Inc. * Copyright (C) 2010 Nokia Corporation * Benoît Cousson * Paul Walmsley * Rajendra Nayak <rnayak@ti.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/delay.h> #include <linux/errno.h> #include <linux/err.h> #include <linux/io.h> #include "soc.h" #include "iomap.h" #include "common.h" #include "vp.h" #include "prm44xx.h" #include "prm-regbits-44xx.h" #include "prcm44xx.h" #include "prminst44xx.h" #include "powerdomain.h" /* Static data */ static const struct omap_prcm_irq omap4_prcm_irqs[] = { OMAP_PRCM_IRQ("wkup", 0, 0), OMAP_PRCM_IRQ("io", 9, 1), }; static struct omap_prcm_irq_setup omap4_prcm_irq_setup = { .ack = OMAP4_PRM_IRQSTATUS_MPU_OFFSET, .mask = OMAP4_PRM_IRQENABLE_MPU_OFFSET, .nr_regs = 2, .irqs = omap4_prcm_irqs, .nr_irqs = ARRAY_SIZE(omap4_prcm_irqs), .irq = 11 + OMAP44XX_IRQ_GIC_START, .read_pending_irqs = &omap44xx_prm_read_pending_irqs, .ocp_barrier = &omap44xx_prm_ocp_barrier, .save_and_clear_irqen = &omap44xx_prm_save_and_clear_irqen, .restore_irqen = &omap44xx_prm_restore_irqen, }; /* * omap44xx_prm_reset_src_map - map from bits in the PRM_RSTST * hardware register (which are specific to OMAP44xx SoCs) to reset * source ID bit shifts (which is an OMAP SoC-independent * enumeration) */ static struct prm_reset_src_map omap44xx_prm_reset_src_map[] = { { OMAP4430_GLOBAL_WARM_SW_RST_SHIFT, OMAP_GLOBAL_WARM_RST_SRC_ID_SHIFT }, { OMAP4430_GLOBAL_COLD_RST_SHIFT, OMAP_GLOBAL_COLD_RST_SRC_ID_SHIFT }, { OMAP4430_MPU_SECURITY_VIOL_RST_SHIFT, OMAP_SECU_VIOL_RST_SRC_ID_SHIFT }, { OMAP4430_MPU_WDT_RST_SHIFT, OMAP_MPU_WD_RST_SRC_ID_SHIFT }, { OMAP4430_SECURE_WDT_RST_SHIFT, OMAP_SECU_WD_RST_SRC_ID_SHIFT }, { OMAP4430_EXTERNAL_WARM_RST_SHIFT, OMAP_EXTWARM_RST_SRC_ID_SHIFT }, { OMAP4430_VDD_MPU_VOLT_MGR_RST_SHIFT, OMAP_VDD_MPU_VM_RST_SRC_ID_SHIFT }, { OMAP4430_VDD_IVA_VOLT_MGR_RST_SHIFT, OMAP_VDD_IVA_VM_RST_SRC_ID_SHIFT }, { OMAP4430_VDD_CORE_VOLT_MGR_RST_SHIFT, OMAP_VDD_CORE_VM_RST_SRC_ID_SHIFT }, { OMAP4430_ICEPICK_RST_SHIFT, OMAP_ICEPICK_RST_SRC_ID_SHIFT }, { OMAP4430_C2C_RST_SHIFT, OMAP_C2C_RST_SRC_ID_SHIFT }, { -1, -1 }, }; /* PRM low-level functions */ /* Read a register in a CM/PRM instance in the PRM module */ u32 omap4_prm_read_inst_reg(s16 inst, u16 reg) { return __raw_readl(prm_base + inst + reg); } /* Write into a register in a CM/PRM instance in the PRM module */ void omap4_prm_write_inst_reg(u32 val, s16 inst, u16 reg) { __raw_writel(val, prm_base + inst + reg); } /* Read-modify-write a register in a PRM module. Caller must lock */ u32 omap4_prm_rmw_inst_reg_bits(u32 mask, u32 bits, s16 inst, s16 reg) { u32 v; v = omap4_prm_read_inst_reg(inst, reg); v &= ~mask; v |= bits; omap4_prm_write_inst_reg(v, inst, reg); return v; } /* PRM VP */ /* * struct omap4_vp - OMAP4 VP register access description. * @irqstatus_mpu: offset to IRQSTATUS_MPU register for VP * @tranxdone_status: VP_TRANXDONE_ST bitmask in PRM_IRQSTATUS_MPU reg */ struct omap4_vp { u32 irqstatus_mpu; u32 tranxdone_status; }; static struct omap4_vp omap4_vp[] = { [OMAP4_VP_VDD_MPU_ID] = { .irqstatus_mpu = OMAP4_PRM_IRQSTATUS_MPU_2_OFFSET, .tranxdone_status = OMAP4430_VP_MPU_TRANXDONE_ST_MASK, }, [OMAP4_VP_VDD_IVA_ID] = { .irqstatus_mpu = OMAP4_PRM_IRQSTATUS_MPU_OFFSET, .tranxdone_status = OMAP4430_VP_IVA_TRANXDONE_ST_MASK, }, [OMAP4_VP_VDD_CORE_ID] = { .irqstatus_mpu = OMAP4_PRM_IRQSTATUS_MPU_OFFSET, .tranxdone_status = OMAP4430_VP_CORE_TRANXDONE_ST_MASK, }, }; u32 omap4_prm_vp_check_txdone(u8 vp_id) { struct omap4_vp *vp = &omap4_vp[vp_id]; u32 irqstatus; irqstatus = omap4_prminst_read_inst_reg(OMAP4430_PRM_PARTITION, OMAP4430_PRM_OCP_SOCKET_INST, vp->irqstatus_mpu); return irqstatus & vp->tranxdone_status; } void omap4_prm_vp_clear_txdone(u8 vp_id) { struct omap4_vp *vp = &omap4_vp[vp_id]; omap4_prminst_write_inst_reg(vp->tranxdone_status, OMAP4430_PRM_PARTITION, OMAP4430_PRM_OCP_SOCKET_INST, vp->irqstatus_mpu); }; u32 omap4_prm_vcvp_read(u8 offset) { return omap4_prminst_read_inst_reg(OMAP4430_PRM_PARTITION, OMAP4430_PRM_DEVICE_INST, offset); } void omap4_prm_vcvp_write(u32 val, u8 offset) { omap4_prminst_write_inst_reg(val, OMAP4430_PRM_PARTITION, OMAP4430_PRM_DEVICE_INST, offset); } u32 omap4_prm_vcvp_rmw(u32 mask, u32 bits, u8 offset) { return omap4_prminst_rmw_inst_reg_bits(mask, bits, OMAP4430_PRM_PARTITION, OMAP4430_PRM_DEVICE_INST, offset); } static inline u32 _read_pending_irq_reg(u16 irqen_offs, u16 irqst_offs) { u32 mask, st; /* XXX read mask from RAM? */ mask = omap4_prm_read_inst_reg(OMAP4430_PRM_OCP_SOCKET_INST, irqen_offs); st = omap4_prm_read_inst_reg(OMAP4430_PRM_OCP_SOCKET_INST, irqst_offs); return mask & st; } /** * omap44xx_prm_read_pending_irqs - read pending PRM MPU IRQs into @events * @events: ptr to two consecutive u32s, preallocated by caller * * Read PRM_IRQSTATUS_MPU* bits, AND'ed with the currently-enabled PRM * MPU IRQs, and store the result into the two u32s pointed to by @events. * No return value. */ void omap44xx_prm_read_pending_irqs(unsigned long *events) { events[0] = _read_pending_irq_reg(OMAP4_PRM_IRQENABLE_MPU_OFFSET, OMAP4_PRM_IRQSTATUS_MPU_OFFSET); events[1] = _read_pending_irq_reg(OMAP4_PRM_IRQENABLE_MPU_2_OFFSET, OMAP4_PRM_IRQSTATUS_MPU_2_OFFSET); } /** * omap44xx_prm_ocp_barrier - force buffered MPU writes to the PRM to complete * * Force any buffered writes to the PRM IP block to complete. Needed * by the PRM IRQ handler, which reads and writes directly to the IP * block, to avoid race conditions after acknowledging or clearing IRQ * bits. No return value. */ void omap44xx_prm_ocp_barrier(void) { omap4_prm_read_inst_reg(OMAP4430_PRM_OCP_SOCKET_INST, OMAP4_REVISION_PRM_OFFSET); } /** * omap44xx_prm_save_and_clear_irqen - save/clear PRM_IRQENABLE_MPU* regs * @saved_mask: ptr to a u32 array to save IRQENABLE bits * * Save the PRM_IRQENABLE_MPU and PRM_IRQENABLE_MPU_2 registers to * @saved_mask. @saved_mask must be allocated by the caller. * Intended to be used in the PRM interrupt handler suspend callback. * The OCP barrier is needed to ensure the write to disable PRM * interrupts reaches the PRM before returning; otherwise, spurious * interrupts might occur. No return value. */ void omap44xx_prm_save_and_clear_irqen(u32 *saved_mask) { saved_mask[0] = omap4_prm_read_inst_reg(OMAP4430_PRM_OCP_SOCKET_INST, OMAP4_PRM_IRQSTATUS_MPU_OFFSET); saved_mask[1] = omap4_prm_read_inst_reg(OMAP4430_PRM_OCP_SOCKET_INST, OMAP4_PRM_IRQSTATUS_MPU_2_OFFSET); omap4_prm_write_inst_reg(0, OMAP4430_PRM_OCP_SOCKET_INST, OMAP4_PRM_IRQENABLE_MPU_OFFSET); omap4_prm_write_inst_reg(0, OMAP4430_PRM_OCP_SOCKET_INST, OMAP4_PRM_IRQENABLE_MPU_2_OFFSET); /* OCP barrier */ omap4_prm_read_inst_reg(OMAP4430_PRM_OCP_SOCKET_INST, OMAP4_REVISION_PRM_OFFSET); } /** * omap44xx_prm_restore_irqen - set PRM_IRQENABLE_MPU* registers from args * @saved_mask: ptr to a u32 array of IRQENABLE bits saved previously * * Restore the PRM_IRQENABLE_MPU and PRM_IRQENABLE_MPU_2 registers from * @saved_mask. Intended to be used in the PRM interrupt handler resume * callback to restore values saved by omap44xx_prm_save_and_clear_irqen(). * No OCP barrier should be needed here; any pending PRM interrupts will fire * once the writes reach the PRM. No return value. */ void omap44xx_prm_restore_irqen(u32 *saved_mask) { omap4_prm_write_inst_reg(saved_mask[0], OMAP4430_PRM_OCP_SOCKET_INST, OMAP4_PRM_IRQENABLE_MPU_OFFSET); omap4_prm_write_inst_reg(saved_mask[1], OMAP4430_PRM_OCP_SOCKET_INST, OMAP4_PRM_IRQENABLE_MPU_2_OFFSET); } /** * omap44xx_prm_reconfigure_io_chain - clear latches and reconfigure I/O chain * * Clear any previously-latched I/O wakeup events and ensure that the * I/O wakeup gates are aligned with the current mux settings. Works * by asserting WUCLKIN, waiting for WUCLKOUT to be asserted, and then * deasserting WUCLKIN and waiting for WUCLKOUT to be deasserted. * No return value. XXX Are the final two steps necessary? */ void omap44xx_prm_reconfigure_io_chain(void) { int i = 0; /* Trigger WUCLKIN enable */ omap4_prm_rmw_inst_reg_bits(OMAP4430_WUCLK_CTRL_MASK, OMAP4430_WUCLK_CTRL_MASK, OMAP4430_PRM_DEVICE_INST, OMAP4_PRM_IO_PMCTRL_OFFSET); omap_test_timeout( (((omap4_prm_read_inst_reg(OMAP4430_PRM_DEVICE_INST, OMAP4_PRM_IO_PMCTRL_OFFSET) & OMAP4430_WUCLK_STATUS_MASK) >> OMAP4430_WUCLK_STATUS_SHIFT) == 1), MAX_IOPAD_LATCH_TIME, i); if (i == MAX_IOPAD_LATCH_TIME) pr_warn("PRM: I/O chain clock line assertion timed out\n"); /* Trigger WUCLKIN disable */ omap4_prm_rmw_inst_reg_bits(OMAP4430_WUCLK_CTRL_MASK, 0x0, OMAP4430_PRM_DEVICE_INST, OMAP4_PRM_IO_PMCTRL_OFFSET); omap_test_timeout( (((omap4_prm_read_inst_reg(OMAP4430_PRM_DEVICE_INST, OMAP4_PRM_IO_PMCTRL_OFFSET) & OMAP4430_WUCLK_STATUS_MASK) >> OMAP4430_WUCLK_STATUS_SHIFT) == 0), MAX_IOPAD_LATCH_TIME, i); if (i == MAX_IOPAD_LATCH_TIME) pr_warn("PRM: I/O chain clock line deassertion timed out\n"); return; } /** * omap44xx_prm_enable_io_wakeup - enable wakeup events from I/O wakeup latches * * Activates the I/O wakeup event latches and allows events logged by * those latches to signal a wakeup event to the PRCM. For I/O wakeups * to occur, WAKEUPENABLE bits must be set in the pad mux registers, and * omap44xx_prm_reconfigure_io_chain() must be called. No return value. */ static void __init omap44xx_prm_enable_io_wakeup(void) { omap4_prm_rmw_inst_reg_bits(OMAP4430_GLOBAL_WUEN_MASK, OMAP4430_GLOBAL_WUEN_MASK, OMAP4430_PRM_DEVICE_INST, OMAP4_PRM_IO_PMCTRL_OFFSET); } /** * omap44xx_prm_read_reset_sources - return the last SoC reset source * * Return a u32 representing the last reset sources of the SoC. The * returned reset source bits are standardized across OMAP SoCs. */ static u32 omap44xx_prm_read_reset_sources(void) { struct prm_reset_src_map *p; u32 r = 0; u32 v; v = omap4_prm_read_inst_reg(OMAP4430_PRM_DEVICE_INST, OMAP4_RM_RSTST); p = omap44xx_prm_reset_src_map; while (p->reg_shift >= 0 && p->std_shift >= 0) { if (v & (1 << p->reg_shift)) r |= 1 << p->std_shift; p++; } return r; } /** * omap44xx_prm_was_any_context_lost_old - was module hardware context lost? * @part: PRM partition ID (e.g., OMAP4430_PRM_PARTITION) * @inst: PRM instance offset (e.g., OMAP4430_PRM_MPU_INST) * @idx: CONTEXT register offset * * Return 1 if any bits were set in the *_CONTEXT_* register * identified by (@part, @inst, @idx), which means that some context * was lost for that module; otherwise, return 0. */ static bool omap44xx_prm_was_any_context_lost_old(u8 part, s16 inst, u16 idx) { return (omap4_prminst_read_inst_reg(part, inst, idx)) ? 1 : 0; } /** * omap44xx_prm_clear_context_lost_flags_old - clear context loss flags * @part: PRM partition ID (e.g., OMAP4430_PRM_PARTITION) * @inst: PRM instance offset (e.g., OMAP4430_PRM_MPU_INST) * @idx: CONTEXT register offset * * Clear hardware context loss bits for the module identified by * (@part, @inst, @idx). No return value. XXX Writes to reserved bits; * is there a way to avoid this? */ static void omap44xx_prm_clear_context_loss_flags_old(u8 part, s16 inst, u16 idx) { omap4_prminst_write_inst_reg(0xffffffff, part, inst, idx); } /* Powerdomain low-level functions */ static int omap4_pwrdm_set_next_pwrst(struct powerdomain *pwrdm, u8 pwrst) { omap4_prminst_rmw_inst_reg_bits(OMAP_POWERSTATE_MASK, (pwrst << OMAP_POWERSTATE_SHIFT), pwrdm->prcm_partition, pwrdm->prcm_offs, OMAP4_PM_PWSTCTRL); return 0; } static int omap4_pwrdm_read_next_pwrst(struct powerdomain *pwrdm) { u32 v; v = omap4_prminst_read_inst_reg(pwrdm->prcm_partition, pwrdm->prcm_offs, OMAP4_PM_PWSTCTRL); v &= OMAP_POWERSTATE_MASK; v >>= OMAP_POWERSTATE_SHIFT; return v; } static int omap4_pwrdm_read_pwrst(struct powerdomain *pwrdm) { u32 v; v = omap4_prminst_read_inst_reg(pwrdm->prcm_partition, pwrdm->prcm_offs, OMAP4_PM_PWSTST); v &= OMAP_POWERSTATEST_MASK; v >>= OMAP_POWERSTATEST_SHIFT; return v; } static int omap4_pwrdm_read_prev_pwrst(struct powerdomain *pwrdm) { u32 v; v = omap4_prminst_read_inst_reg(pwrdm->prcm_partition, pwrdm->prcm_offs, OMAP4_PM_PWSTST); v &= OMAP4430_LASTPOWERSTATEENTERED_MASK; v >>= OMAP4430_LASTPOWERSTATEENTERED_SHIFT; return v; } static int omap4_pwrdm_set_lowpwrstchange(struct powerdomain *pwrdm) { omap4_prminst_rmw_inst_reg_bits(OMAP4430_LOWPOWERSTATECHANGE_MASK, (1 << OMAP4430_LOWPOWERSTATECHANGE_SHIFT), pwrdm->prcm_partition, pwrdm->prcm_offs, OMAP4_PM_PWSTCTRL); return 0; } static int omap4_pwrdm_clear_all_prev_pwrst(struct powerdomain *pwrdm) { omap4_prminst_rmw_inst_reg_bits(OMAP4430_LASTPOWERSTATEENTERED_MASK, OMAP4430_LASTPOWERSTATEENTERED_MASK, pwrdm->prcm_partition, pwrdm->prcm_offs, OMAP4_PM_PWSTST); return 0; } static int omap4_pwrdm_set_logic_retst(struct powerdomain *pwrdm, u8 pwrst) { u32 v; v = pwrst << __ffs(OMAP4430_LOGICRETSTATE_MASK); omap4_prminst_rmw_inst_reg_bits(OMAP4430_LOGICRETSTATE_MASK, v, pwrdm->prcm_partition, pwrdm->prcm_offs, OMAP4_PM_PWSTCTRL); return 0; } static int omap4_pwrdm_set_mem_onst(struct powerdomain *pwrdm, u8 bank, u8 pwrst) { u32 m; m = omap2_pwrdm_get_mem_bank_onstate_mask(bank); omap4_prminst_rmw_inst_reg_bits(m, (pwrst << __ffs(m)), pwrdm->prcm_partition, pwrdm->prcm_offs, OMAP4_PM_PWSTCTRL); return 0; } static int omap4_pwrdm_set_mem_retst(struct powerdomain *pwrdm, u8 bank, u8 pwrst) { u32 m; m = omap2_pwrdm_get_mem_bank_retst_mask(bank); omap4_prminst_rmw_inst_reg_bits(m, (pwrst << __ffs(m)), pwrdm->prcm_partition, pwrdm->prcm_offs, OMAP4_PM_PWSTCTRL); return 0; } static int omap4_pwrdm_read_logic_pwrst(struct powerdomain *pwrdm) { u32 v; v = omap4_prminst_read_inst_reg(pwrdm->prcm_partition, pwrdm->prcm_offs, OMAP4_PM_PWSTST); v &= OMAP4430_LOGICSTATEST_MASK; v >>= OMAP4430_LOGICSTATEST_SHIFT; return v; } static int omap4_pwrdm_read_logic_retst(struct powerdomain *pwrdm) { u32 v; v = omap4_prminst_read_inst_reg(pwrdm->prcm_partition, pwrdm->prcm_offs, OMAP4_PM_PWSTCTRL); v &= OMAP4430_LOGICRETSTATE_MASK; v >>= OMAP4430_LOGICRETSTATE_SHIFT; return v; } /** * omap4_pwrdm_read_prev_logic_pwrst - read the previous logic powerstate * @pwrdm: struct powerdomain * to read the state for * * Reads the previous logic powerstate for a powerdomain. This * function must determine the previous logic powerstate by first * checking the previous powerstate for the domain. If that was OFF, * then logic has been lost. If previous state was RETENTION, the * function reads the setting for the next retention logic state to * see the actual value. In every other case, the logic is * retained. Returns either PWRDM_POWER_OFF or PWRDM_POWER_RET * depending whether the logic was retained or not. */ static int omap4_pwrdm_read_prev_logic_pwrst(struct powerdomain *pwrdm) { int state; state = omap4_pwrdm_read_prev_pwrst(pwrdm); if (state == PWRDM_POWER_OFF) return PWRDM_POWER_OFF; if (state != PWRDM_POWER_RET) return PWRDM_POWER_RET; return omap4_pwrdm_read_logic_retst(pwrdm); } static int omap4_pwrdm_read_mem_pwrst(struct powerdomain *pwrdm, u8 bank) { u32 m, v; m = omap2_pwrdm_get_mem_bank_stst_mask(bank); v = omap4_prminst_read_inst_reg(pwrdm->prcm_partition, pwrdm->prcm_offs, OMAP4_PM_PWSTST); v &= m; v >>= __ffs(m); return v; } static int omap4_pwrdm_read_mem_retst(struct powerdomain *pwrdm, u8 bank) { u32 m, v; m = omap2_pwrdm_get_mem_bank_retst_mask(bank); v = omap4_prminst_read_inst_reg(pwrdm->prcm_partition, pwrdm->prcm_offs, OMAP4_PM_PWSTCTRL); v &= m; v >>= __ffs(m); return v; } /** * omap4_pwrdm_read_prev_mem_pwrst - reads the previous memory powerstate * @pwrdm: struct powerdomain * to read mem powerstate for * @bank: memory bank index * * Reads the previous memory powerstate for a powerdomain. This * function must determine the previous memory powerstate by first * checking the previous powerstate for the domain. If that was OFF, * then logic has been lost. If previous state was RETENTION, the * function reads the setting for the next memory retention state to * see the actual value. In every other case, the logic is * retained. Returns either PWRDM_POWER_OFF or PWRDM_POWER_RET * depending whether logic was retained or not. */ static int omap4_pwrdm_read_prev_mem_pwrst(struct powerdomain *pwrdm, u8 bank) { int state; state = omap4_pwrdm_read_prev_pwrst(pwrdm); if (state == PWRDM_POWER_OFF) return PWRDM_POWER_OFF; if (state != PWRDM_POWER_RET) return PWRDM_POWER_RET; return omap4_pwrdm_read_mem_retst(pwrdm, bank); } static int omap4_pwrdm_wait_transition(struct powerdomain *pwrdm) { u32 c = 0; /* * REVISIT: pwrdm_wait_transition() may be better implemented * via a callback and a periodic timer check -- how long do we expect * powerdomain transitions to take? */ /* XXX Is this udelay() value meaningful? */ while ((omap4_prminst_read_inst_reg(pwrdm->prcm_partition, pwrdm->prcm_offs, OMAP4_PM_PWSTST) & OMAP_INTRANSITION_MASK) && (c++ < PWRDM_TRANSITION_BAILOUT)) udelay(1); if (c > PWRDM_TRANSITION_BAILOUT) { pr_err("powerdomain: %s: waited too long to complete transition\n", pwrdm->name); return -EAGAIN; } pr_debug("powerdomain: completed transition in %d loops\n", c); return 0; } struct pwrdm_ops omap4_pwrdm_operations = { .pwrdm_set_next_pwrst = omap4_pwrdm_set_next_pwrst, .pwrdm_read_next_pwrst = omap4_pwrdm_read_next_pwrst, .pwrdm_read_pwrst = omap4_pwrdm_read_pwrst, .pwrdm_read_prev_pwrst = omap4_pwrdm_read_prev_pwrst, .pwrdm_set_lowpwrstchange = omap4_pwrdm_set_lowpwrstchange, .pwrdm_clear_all_prev_pwrst = omap4_pwrdm_clear_all_prev_pwrst, .pwrdm_set_logic_retst = omap4_pwrdm_set_logic_retst, .pwrdm_read_logic_pwrst = omap4_pwrdm_read_logic_pwrst, .pwrdm_read_prev_logic_pwrst = omap4_pwrdm_read_prev_logic_pwrst, .pwrdm_read_logic_retst = omap4_pwrdm_read_logic_retst, .pwrdm_read_mem_pwrst = omap4_pwrdm_read_mem_pwrst, .pwrdm_read_mem_retst = omap4_pwrdm_read_mem_retst, .pwrdm_read_prev_mem_pwrst = omap4_pwrdm_read_prev_mem_pwrst, .pwrdm_set_mem_onst = omap4_pwrdm_set_mem_onst, .pwrdm_set_mem_retst = omap4_pwrdm_set_mem_retst, .pwrdm_wait_transition = omap4_pwrdm_wait_transition, }; /* * XXX document */ static struct prm_ll_data omap44xx_prm_ll_data = { .read_reset_sources = &omap44xx_prm_read_reset_sources, .was_any_context_lost_old = &omap44xx_prm_was_any_context_lost_old, .clear_context_loss_flags_old = &omap44xx_prm_clear_context_loss_flags_old, }; int __init omap44xx_prm_init(void) { if (!cpu_is_omap44xx() && !soc_is_omap54xx()) return 0; return prm_register(&omap44xx_prm_ll_data); } static int __init omap44xx_prm_late_init(void) { if (!cpu_is_omap44xx()) return 0; omap44xx_prm_enable_io_wakeup(); return omap_prcm_register_chain_handler(&omap4_prcm_irq_setup); } omap_subsys_initcall(omap44xx_prm_late_init); static void __exit omap44xx_prm_exit(void) { if (!cpu_is_omap44xx()) return; /* Should never happen */ WARN(prm_unregister(&omap44xx_prm_ll_data), "%s: prm_ll_data function pointer mismatch\n", __func__); } __exitcall(omap44xx_prm_exit);
gpl-2.0
Ezekeel/android-3.0
sound/pci/intel8x0m.c
2368
38620
/* * ALSA modem driver for Intel ICH (i8x0) chipsets * * Copyright (c) 2000 Jaroslav Kysela <perex@perex.cz> * * This is modified (by Sasha Khapyorsky <sashak@alsa-project.org>) version * of ALSA ICH sound driver intel8x0.c . * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <asm/io.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/pci.h> #include <linux/slab.h> #include <linux/moduleparam.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/ac97_codec.h> #include <sound/info.h> #include <sound/initval.h> MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>"); MODULE_DESCRIPTION("Intel 82801AA,82901AB,i810,i820,i830,i840,i845,MX440; " "SiS 7013; NVidia MCP/2/2S/3 modems"); MODULE_LICENSE("GPL"); MODULE_SUPPORTED_DEVICE("{{Intel,82801AA-ICH}," "{Intel,82901AB-ICH0}," "{Intel,82801BA-ICH2}," "{Intel,82801CA-ICH3}," "{Intel,82801DB-ICH4}," "{Intel,ICH5}," "{Intel,ICH6}," "{Intel,ICH7}," "{Intel,MX440}," "{SiS,7013}," "{NVidia,NForce Modem}," "{NVidia,NForce2 Modem}," "{NVidia,NForce2s Modem}," "{NVidia,NForce3 Modem}," "{AMD,AMD768}}"); static int index = -2; /* Exclude the first card */ static char *id = SNDRV_DEFAULT_STR1; /* ID for this card */ static int ac97_clock; module_param(index, int, 0444); MODULE_PARM_DESC(index, "Index value for Intel i8x0 modemcard."); module_param(id, charp, 0444); MODULE_PARM_DESC(id, "ID string for Intel i8x0 modemcard."); module_param(ac97_clock, int, 0444); MODULE_PARM_DESC(ac97_clock, "AC'97 codec clock (0 = auto-detect)."); /* just for backward compatibility */ static int enable; module_param(enable, bool, 0444); /* * Direct registers */ enum { DEVICE_INTEL, DEVICE_SIS, DEVICE_ALI, DEVICE_NFORCE }; #define ICHREG(x) ICH_REG_##x #define DEFINE_REGSET(name,base) \ enum { \ ICH_REG_##name##_BDBAR = base + 0x0, /* dword - buffer descriptor list base address */ \ ICH_REG_##name##_CIV = base + 0x04, /* byte - current index value */ \ ICH_REG_##name##_LVI = base + 0x05, /* byte - last valid index */ \ ICH_REG_##name##_SR = base + 0x06, /* byte - status register */ \ ICH_REG_##name##_PICB = base + 0x08, /* word - position in current buffer */ \ ICH_REG_##name##_PIV = base + 0x0a, /* byte - prefetched index value */ \ ICH_REG_##name##_CR = base + 0x0b, /* byte - control register */ \ }; /* busmaster blocks */ DEFINE_REGSET(OFF, 0); /* offset */ /* values for each busmaster block */ /* LVI */ #define ICH_REG_LVI_MASK 0x1f /* SR */ #define ICH_FIFOE 0x10 /* FIFO error */ #define ICH_BCIS 0x08 /* buffer completion interrupt status */ #define ICH_LVBCI 0x04 /* last valid buffer completion interrupt */ #define ICH_CELV 0x02 /* current equals last valid */ #define ICH_DCH 0x01 /* DMA controller halted */ /* PIV */ #define ICH_REG_PIV_MASK 0x1f /* mask */ /* CR */ #define ICH_IOCE 0x10 /* interrupt on completion enable */ #define ICH_FEIE 0x08 /* fifo error interrupt enable */ #define ICH_LVBIE 0x04 /* last valid buffer interrupt enable */ #define ICH_RESETREGS 0x02 /* reset busmaster registers */ #define ICH_STARTBM 0x01 /* start busmaster operation */ /* global block */ #define ICH_REG_GLOB_CNT 0x3c /* dword - global control */ #define ICH_TRIE 0x00000040 /* tertiary resume interrupt enable */ #define ICH_SRIE 0x00000020 /* secondary resume interrupt enable */ #define ICH_PRIE 0x00000010 /* primary resume interrupt enable */ #define ICH_ACLINK 0x00000008 /* AClink shut off */ #define ICH_AC97WARM 0x00000004 /* AC'97 warm reset */ #define ICH_AC97COLD 0x00000002 /* AC'97 cold reset */ #define ICH_GIE 0x00000001 /* GPI interrupt enable */ #define ICH_REG_GLOB_STA 0x40 /* dword - global status */ #define ICH_TRI 0x20000000 /* ICH4: tertiary (AC_SDIN2) resume interrupt */ #define ICH_TCR 0x10000000 /* ICH4: tertiary (AC_SDIN2) codec ready */ #define ICH_BCS 0x08000000 /* ICH4: bit clock stopped */ #define ICH_SPINT 0x04000000 /* ICH4: S/PDIF interrupt */ #define ICH_P2INT 0x02000000 /* ICH4: PCM2-In interrupt */ #define ICH_M2INT 0x01000000 /* ICH4: Mic2-In interrupt */ #define ICH_SAMPLE_CAP 0x00c00000 /* ICH4: sample capability bits (RO) */ #define ICH_MULTICHAN_CAP 0x00300000 /* ICH4: multi-channel capability bits (RO) */ #define ICH_MD3 0x00020000 /* modem power down semaphore */ #define ICH_AD3 0x00010000 /* audio power down semaphore */ #define ICH_RCS 0x00008000 /* read completion status */ #define ICH_BIT3 0x00004000 /* bit 3 slot 12 */ #define ICH_BIT2 0x00002000 /* bit 2 slot 12 */ #define ICH_BIT1 0x00001000 /* bit 1 slot 12 */ #define ICH_SRI 0x00000800 /* secondary (AC_SDIN1) resume interrupt */ #define ICH_PRI 0x00000400 /* primary (AC_SDIN0) resume interrupt */ #define ICH_SCR 0x00000200 /* secondary (AC_SDIN1) codec ready */ #define ICH_PCR 0x00000100 /* primary (AC_SDIN0) codec ready */ #define ICH_MCINT 0x00000080 /* MIC capture interrupt */ #define ICH_POINT 0x00000040 /* playback interrupt */ #define ICH_PIINT 0x00000020 /* capture interrupt */ #define ICH_NVSPINT 0x00000010 /* nforce spdif interrupt */ #define ICH_MOINT 0x00000004 /* modem playback interrupt */ #define ICH_MIINT 0x00000002 /* modem capture interrupt */ #define ICH_GSCI 0x00000001 /* GPI status change interrupt */ #define ICH_REG_ACC_SEMA 0x44 /* byte - codec write semaphore */ #define ICH_CAS 0x01 /* codec access semaphore */ #define ICH_MAX_FRAGS 32 /* max hw frags */ /* * */ enum { ICHD_MDMIN, ICHD_MDMOUT, ICHD_MDMLAST = ICHD_MDMOUT }; enum { ALID_MDMIN, ALID_MDMOUT, ALID_MDMLAST = ALID_MDMOUT }; #define get_ichdev(substream) (substream->runtime->private_data) struct ichdev { unsigned int ichd; /* ich device number */ unsigned long reg_offset; /* offset to bmaddr */ u32 *bdbar; /* CPU address (32bit) */ unsigned int bdbar_addr; /* PCI bus address (32bit) */ struct snd_pcm_substream *substream; unsigned int physbuf; /* physical address (32bit) */ unsigned int size; unsigned int fragsize; unsigned int fragsize1; unsigned int position; int frags; int lvi; int lvi_frag; int civ; int ack; int ack_reload; unsigned int ack_bit; unsigned int roff_sr; unsigned int roff_picb; unsigned int int_sta_mask; /* interrupt status mask */ unsigned int ali_slot; /* ALI DMA slot */ struct snd_ac97 *ac97; }; struct intel8x0m { unsigned int device_type; int irq; void __iomem *addr; void __iomem *bmaddr; struct pci_dev *pci; struct snd_card *card; int pcm_devs; struct snd_pcm *pcm[2]; struct ichdev ichd[2]; unsigned int in_ac97_init: 1; struct snd_ac97_bus *ac97_bus; struct snd_ac97 *ac97; spinlock_t reg_lock; struct snd_dma_buffer bdbars; u32 bdbars_count; u32 int_sta_reg; /* interrupt status register */ u32 int_sta_mask; /* interrupt status mask */ unsigned int pcm_pos_shift; }; static DEFINE_PCI_DEVICE_TABLE(snd_intel8x0m_ids) = { { PCI_VDEVICE(INTEL, 0x2416), DEVICE_INTEL }, /* 82801AA */ { PCI_VDEVICE(INTEL, 0x2426), DEVICE_INTEL }, /* 82901AB */ { PCI_VDEVICE(INTEL, 0x2446), DEVICE_INTEL }, /* 82801BA */ { PCI_VDEVICE(INTEL, 0x2486), DEVICE_INTEL }, /* ICH3 */ { PCI_VDEVICE(INTEL, 0x24c6), DEVICE_INTEL }, /* ICH4 */ { PCI_VDEVICE(INTEL, 0x24d6), DEVICE_INTEL }, /* ICH5 */ { PCI_VDEVICE(INTEL, 0x266d), DEVICE_INTEL }, /* ICH6 */ { PCI_VDEVICE(INTEL, 0x27dd), DEVICE_INTEL }, /* ICH7 */ { PCI_VDEVICE(INTEL, 0x7196), DEVICE_INTEL }, /* 440MX */ { PCI_VDEVICE(AMD, 0x7446), DEVICE_INTEL }, /* AMD768 */ { PCI_VDEVICE(SI, 0x7013), DEVICE_SIS }, /* SI7013 */ { PCI_VDEVICE(NVIDIA, 0x01c1), DEVICE_NFORCE }, /* NFORCE */ { PCI_VDEVICE(NVIDIA, 0x0069), DEVICE_NFORCE }, /* NFORCE2 */ { PCI_VDEVICE(NVIDIA, 0x0089), DEVICE_NFORCE }, /* NFORCE2s */ { PCI_VDEVICE(NVIDIA, 0x00d9), DEVICE_NFORCE }, /* NFORCE3 */ { PCI_VDEVICE(AMD, 0x746e), DEVICE_INTEL }, /* AMD8111 */ #if 0 { PCI_VDEVICE(AL, 0x5455), DEVICE_ALI }, /* Ali5455 */ #endif { 0, } }; MODULE_DEVICE_TABLE(pci, snd_intel8x0m_ids); /* * Lowlevel I/O - busmaster */ static inline u8 igetbyte(struct intel8x0m *chip, u32 offset) { return ioread8(chip->bmaddr + offset); } static inline u16 igetword(struct intel8x0m *chip, u32 offset) { return ioread16(chip->bmaddr + offset); } static inline u32 igetdword(struct intel8x0m *chip, u32 offset) { return ioread32(chip->bmaddr + offset); } static inline void iputbyte(struct intel8x0m *chip, u32 offset, u8 val) { iowrite8(val, chip->bmaddr + offset); } static inline void iputword(struct intel8x0m *chip, u32 offset, u16 val) { iowrite16(val, chip->bmaddr + offset); } static inline void iputdword(struct intel8x0m *chip, u32 offset, u32 val) { iowrite32(val, chip->bmaddr + offset); } /* * Lowlevel I/O - AC'97 registers */ static inline u16 iagetword(struct intel8x0m *chip, u32 offset) { return ioread16(chip->addr + offset); } static inline void iaputword(struct intel8x0m *chip, u32 offset, u16 val) { iowrite16(val, chip->addr + offset); } /* * Basic I/O */ /* * access to AC97 codec via normal i/o (for ICH and SIS7013) */ /* return the GLOB_STA bit for the corresponding codec */ static unsigned int get_ich_codec_bit(struct intel8x0m *chip, unsigned int codec) { static unsigned int codec_bit[3] = { ICH_PCR, ICH_SCR, ICH_TCR }; if (snd_BUG_ON(codec >= 3)) return ICH_PCR; return codec_bit[codec]; } static int snd_intel8x0m_codec_semaphore(struct intel8x0m *chip, unsigned int codec) { int time; if (codec > 1) return -EIO; codec = get_ich_codec_bit(chip, codec); /* codec ready ? */ if ((igetdword(chip, ICHREG(GLOB_STA)) & codec) == 0) return -EIO; /* Anyone holding a semaphore for 1 msec should be shot... */ time = 100; do { if (!(igetbyte(chip, ICHREG(ACC_SEMA)) & ICH_CAS)) return 0; udelay(10); } while (time--); /* access to some forbidden (non existent) ac97 registers will not * reset the semaphore. So even if you don't get the semaphore, still * continue the access. We don't need the semaphore anyway. */ snd_printk(KERN_ERR "codec_semaphore: semaphore is not ready [0x%x][0x%x]\n", igetbyte(chip, ICHREG(ACC_SEMA)), igetdword(chip, ICHREG(GLOB_STA))); iagetword(chip, 0); /* clear semaphore flag */ /* I don't care about the semaphore */ return -EBUSY; } static void snd_intel8x0m_codec_write(struct snd_ac97 *ac97, unsigned short reg, unsigned short val) { struct intel8x0m *chip = ac97->private_data; if (snd_intel8x0m_codec_semaphore(chip, ac97->num) < 0) { if (! chip->in_ac97_init) snd_printk(KERN_ERR "codec_write %d: semaphore is not ready for register 0x%x\n", ac97->num, reg); } iaputword(chip, reg + ac97->num * 0x80, val); } static unsigned short snd_intel8x0m_codec_read(struct snd_ac97 *ac97, unsigned short reg) { struct intel8x0m *chip = ac97->private_data; unsigned short res; unsigned int tmp; if (snd_intel8x0m_codec_semaphore(chip, ac97->num) < 0) { if (! chip->in_ac97_init) snd_printk(KERN_ERR "codec_read %d: semaphore is not ready for register 0x%x\n", ac97->num, reg); res = 0xffff; } else { res = iagetword(chip, reg + ac97->num * 0x80); if ((tmp = igetdword(chip, ICHREG(GLOB_STA))) & ICH_RCS) { /* reset RCS and preserve other R/WC bits */ iputdword(chip, ICHREG(GLOB_STA), tmp & ~(ICH_SRI|ICH_PRI|ICH_TRI|ICH_GSCI)); if (! chip->in_ac97_init) snd_printk(KERN_ERR "codec_read %d: read timeout for register 0x%x\n", ac97->num, reg); res = 0xffff; } } if (reg == AC97_GPIO_STATUS) iagetword(chip, 0); /* clear semaphore */ return res; } /* * DMA I/O */ static void snd_intel8x0m_setup_periods(struct intel8x0m *chip, struct ichdev *ichdev) { int idx; u32 *bdbar = ichdev->bdbar; unsigned long port = ichdev->reg_offset; iputdword(chip, port + ICH_REG_OFF_BDBAR, ichdev->bdbar_addr); if (ichdev->size == ichdev->fragsize) { ichdev->ack_reload = ichdev->ack = 2; ichdev->fragsize1 = ichdev->fragsize >> 1; for (idx = 0; idx < (ICH_REG_LVI_MASK + 1) * 2; idx += 4) { bdbar[idx + 0] = cpu_to_le32(ichdev->physbuf); bdbar[idx + 1] = cpu_to_le32(0x80000000 | /* interrupt on completion */ ichdev->fragsize1 >> chip->pcm_pos_shift); bdbar[idx + 2] = cpu_to_le32(ichdev->physbuf + (ichdev->size >> 1)); bdbar[idx + 3] = cpu_to_le32(0x80000000 | /* interrupt on completion */ ichdev->fragsize1 >> chip->pcm_pos_shift); } ichdev->frags = 2; } else { ichdev->ack_reload = ichdev->ack = 1; ichdev->fragsize1 = ichdev->fragsize; for (idx = 0; idx < (ICH_REG_LVI_MASK + 1) * 2; idx += 2) { bdbar[idx + 0] = cpu_to_le32(ichdev->physbuf + (((idx >> 1) * ichdev->fragsize) % ichdev->size)); bdbar[idx + 1] = cpu_to_le32(0x80000000 | /* interrupt on completion */ ichdev->fragsize >> chip->pcm_pos_shift); /* printk(KERN_DEBUG "bdbar[%i] = 0x%x [0x%x]\n", idx + 0, bdbar[idx + 0], bdbar[idx + 1]); */ } ichdev->frags = ichdev->size / ichdev->fragsize; } iputbyte(chip, port + ICH_REG_OFF_LVI, ichdev->lvi = ICH_REG_LVI_MASK); ichdev->civ = 0; iputbyte(chip, port + ICH_REG_OFF_CIV, 0); ichdev->lvi_frag = ICH_REG_LVI_MASK % ichdev->frags; ichdev->position = 0; #if 0 printk(KERN_DEBUG "lvi_frag = %i, frags = %i, period_size = 0x%x, " "period_size1 = 0x%x\n", ichdev->lvi_frag, ichdev->frags, ichdev->fragsize, ichdev->fragsize1); #endif /* clear interrupts */ iputbyte(chip, port + ichdev->roff_sr, ICH_FIFOE | ICH_BCIS | ICH_LVBCI); } /* * Interrupt handler */ static inline void snd_intel8x0m_update(struct intel8x0m *chip, struct ichdev *ichdev) { unsigned long port = ichdev->reg_offset; int civ, i, step; int ack = 0; civ = igetbyte(chip, port + ICH_REG_OFF_CIV); if (civ == ichdev->civ) { // snd_printd("civ same %d\n", civ); step = 1; ichdev->civ++; ichdev->civ &= ICH_REG_LVI_MASK; } else { step = civ - ichdev->civ; if (step < 0) step += ICH_REG_LVI_MASK + 1; // if (step != 1) // snd_printd("step = %d, %d -> %d\n", step, ichdev->civ, civ); ichdev->civ = civ; } ichdev->position += step * ichdev->fragsize1; ichdev->position %= ichdev->size; ichdev->lvi += step; ichdev->lvi &= ICH_REG_LVI_MASK; iputbyte(chip, port + ICH_REG_OFF_LVI, ichdev->lvi); for (i = 0; i < step; i++) { ichdev->lvi_frag++; ichdev->lvi_frag %= ichdev->frags; ichdev->bdbar[ichdev->lvi * 2] = cpu_to_le32(ichdev->physbuf + ichdev->lvi_frag * ichdev->fragsize1); #if 0 printk(KERN_DEBUG "new: bdbar[%i] = 0x%x [0x%x], " "prefetch = %i, all = 0x%x, 0x%x\n", ichdev->lvi * 2, ichdev->bdbar[ichdev->lvi * 2], ichdev->bdbar[ichdev->lvi * 2 + 1], inb(ICH_REG_OFF_PIV + port), inl(port + 4), inb(port + ICH_REG_OFF_CR)); #endif if (--ichdev->ack == 0) { ichdev->ack = ichdev->ack_reload; ack = 1; } } if (ack && ichdev->substream) { spin_unlock(&chip->reg_lock); snd_pcm_period_elapsed(ichdev->substream); spin_lock(&chip->reg_lock); } iputbyte(chip, port + ichdev->roff_sr, ICH_FIFOE | ICH_BCIS | ICH_LVBCI); } static irqreturn_t snd_intel8x0m_interrupt(int irq, void *dev_id) { struct intel8x0m *chip = dev_id; struct ichdev *ichdev; unsigned int status; unsigned int i; spin_lock(&chip->reg_lock); status = igetdword(chip, chip->int_sta_reg); if (status == 0xffffffff) { /* we are not yet resumed */ spin_unlock(&chip->reg_lock); return IRQ_NONE; } if ((status & chip->int_sta_mask) == 0) { if (status) iputdword(chip, chip->int_sta_reg, status); spin_unlock(&chip->reg_lock); return IRQ_NONE; } for (i = 0; i < chip->bdbars_count; i++) { ichdev = &chip->ichd[i]; if (status & ichdev->int_sta_mask) snd_intel8x0m_update(chip, ichdev); } /* ack them */ iputdword(chip, chip->int_sta_reg, status & chip->int_sta_mask); spin_unlock(&chip->reg_lock); return IRQ_HANDLED; } /* * PCM part */ static int snd_intel8x0m_pcm_trigger(struct snd_pcm_substream *substream, int cmd) { struct intel8x0m *chip = snd_pcm_substream_chip(substream); struct ichdev *ichdev = get_ichdev(substream); unsigned char val = 0; unsigned long port = ichdev->reg_offset; switch (cmd) { case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_RESUME: val = ICH_IOCE | ICH_STARTBM; break; case SNDRV_PCM_TRIGGER_STOP: case SNDRV_PCM_TRIGGER_SUSPEND: val = 0; break; case SNDRV_PCM_TRIGGER_PAUSE_PUSH: val = ICH_IOCE; break; case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: val = ICH_IOCE | ICH_STARTBM; break; default: return -EINVAL; } iputbyte(chip, port + ICH_REG_OFF_CR, val); if (cmd == SNDRV_PCM_TRIGGER_STOP) { /* wait until DMA stopped */ while (!(igetbyte(chip, port + ichdev->roff_sr) & ICH_DCH)) ; /* reset whole DMA things */ iputbyte(chip, port + ICH_REG_OFF_CR, ICH_RESETREGS); } return 0; } static int snd_intel8x0m_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *hw_params) { return snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params)); } static int snd_intel8x0m_hw_free(struct snd_pcm_substream *substream) { return snd_pcm_lib_free_pages(substream); } static snd_pcm_uframes_t snd_intel8x0m_pcm_pointer(struct snd_pcm_substream *substream) { struct intel8x0m *chip = snd_pcm_substream_chip(substream); struct ichdev *ichdev = get_ichdev(substream); size_t ptr1, ptr; ptr1 = igetword(chip, ichdev->reg_offset + ichdev->roff_picb) << chip->pcm_pos_shift; if (ptr1 != 0) ptr = ichdev->fragsize1 - ptr1; else ptr = 0; ptr += ichdev->position; if (ptr >= ichdev->size) return 0; return bytes_to_frames(substream->runtime, ptr); } static int snd_intel8x0m_pcm_prepare(struct snd_pcm_substream *substream) { struct intel8x0m *chip = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; struct ichdev *ichdev = get_ichdev(substream); ichdev->physbuf = runtime->dma_addr; ichdev->size = snd_pcm_lib_buffer_bytes(substream); ichdev->fragsize = snd_pcm_lib_period_bytes(substream); snd_ac97_write(ichdev->ac97, AC97_LINE1_RATE, runtime->rate); snd_ac97_write(ichdev->ac97, AC97_LINE1_LEVEL, 0); snd_intel8x0m_setup_periods(chip, ichdev); return 0; } static struct snd_pcm_hardware snd_intel8x0m_stream = { .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME), .formats = SNDRV_PCM_FMTBIT_S16_LE, .rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 | SNDRV_PCM_RATE_KNOT, .rate_min = 8000, .rate_max = 16000, .channels_min = 1, .channels_max = 1, .buffer_bytes_max = 64 * 1024, .period_bytes_min = 32, .period_bytes_max = 64 * 1024, .periods_min = 1, .periods_max = 1024, .fifo_size = 0, }; static int snd_intel8x0m_pcm_open(struct snd_pcm_substream *substream, struct ichdev *ichdev) { static unsigned int rates[] = { 8000, 9600, 12000, 16000 }; static struct snd_pcm_hw_constraint_list hw_constraints_rates = { .count = ARRAY_SIZE(rates), .list = rates, .mask = 0, }; struct snd_pcm_runtime *runtime = substream->runtime; int err; ichdev->substream = substream; runtime->hw = snd_intel8x0m_stream; err = snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_RATE, &hw_constraints_rates); if ( err < 0 ) return err; runtime->private_data = ichdev; return 0; } static int snd_intel8x0m_playback_open(struct snd_pcm_substream *substream) { struct intel8x0m *chip = snd_pcm_substream_chip(substream); return snd_intel8x0m_pcm_open(substream, &chip->ichd[ICHD_MDMOUT]); } static int snd_intel8x0m_playback_close(struct snd_pcm_substream *substream) { struct intel8x0m *chip = snd_pcm_substream_chip(substream); chip->ichd[ICHD_MDMOUT].substream = NULL; return 0; } static int snd_intel8x0m_capture_open(struct snd_pcm_substream *substream) { struct intel8x0m *chip = snd_pcm_substream_chip(substream); return snd_intel8x0m_pcm_open(substream, &chip->ichd[ICHD_MDMIN]); } static int snd_intel8x0m_capture_close(struct snd_pcm_substream *substream) { struct intel8x0m *chip = snd_pcm_substream_chip(substream); chip->ichd[ICHD_MDMIN].substream = NULL; return 0; } static struct snd_pcm_ops snd_intel8x0m_playback_ops = { .open = snd_intel8x0m_playback_open, .close = snd_intel8x0m_playback_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_intel8x0m_hw_params, .hw_free = snd_intel8x0m_hw_free, .prepare = snd_intel8x0m_pcm_prepare, .trigger = snd_intel8x0m_pcm_trigger, .pointer = snd_intel8x0m_pcm_pointer, }; static struct snd_pcm_ops snd_intel8x0m_capture_ops = { .open = snd_intel8x0m_capture_open, .close = snd_intel8x0m_capture_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_intel8x0m_hw_params, .hw_free = snd_intel8x0m_hw_free, .prepare = snd_intel8x0m_pcm_prepare, .trigger = snd_intel8x0m_pcm_trigger, .pointer = snd_intel8x0m_pcm_pointer, }; struct ich_pcm_table { char *suffix; struct snd_pcm_ops *playback_ops; struct snd_pcm_ops *capture_ops; size_t prealloc_size; size_t prealloc_max_size; int ac97_idx; }; static int __devinit snd_intel8x0m_pcm1(struct intel8x0m *chip, int device, struct ich_pcm_table *rec) { struct snd_pcm *pcm; int err; char name[32]; if (rec->suffix) sprintf(name, "Intel ICH - %s", rec->suffix); else strcpy(name, "Intel ICH"); err = snd_pcm_new(chip->card, name, device, rec->playback_ops ? 1 : 0, rec->capture_ops ? 1 : 0, &pcm); if (err < 0) return err; if (rec->playback_ops) snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, rec->playback_ops); if (rec->capture_ops) snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, rec->capture_ops); pcm->private_data = chip; pcm->info_flags = 0; pcm->dev_class = SNDRV_PCM_CLASS_MODEM; if (rec->suffix) sprintf(pcm->name, "%s - %s", chip->card->shortname, rec->suffix); else strcpy(pcm->name, chip->card->shortname); chip->pcm[device] = pcm; snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(chip->pci), rec->prealloc_size, rec->prealloc_max_size); return 0; } static struct ich_pcm_table intel_pcms[] __devinitdata = { { .suffix = "Modem", .playback_ops = &snd_intel8x0m_playback_ops, .capture_ops = &snd_intel8x0m_capture_ops, .prealloc_size = 32 * 1024, .prealloc_max_size = 64 * 1024, }, }; static int __devinit snd_intel8x0m_pcm(struct intel8x0m *chip) { int i, tblsize, device, err; struct ich_pcm_table *tbl, *rec; #if 1 tbl = intel_pcms; tblsize = 1; #else switch (chip->device_type) { case DEVICE_NFORCE: tbl = nforce_pcms; tblsize = ARRAY_SIZE(nforce_pcms); break; case DEVICE_ALI: tbl = ali_pcms; tblsize = ARRAY_SIZE(ali_pcms); break; default: tbl = intel_pcms; tblsize = 2; break; } #endif device = 0; for (i = 0; i < tblsize; i++) { rec = tbl + i; if (i > 0 && rec->ac97_idx) { /* activate PCM only when associated AC'97 codec */ if (! chip->ichd[rec->ac97_idx].ac97) continue; } err = snd_intel8x0m_pcm1(chip, device, rec); if (err < 0) return err; device++; } chip->pcm_devs = device; return 0; } /* * Mixer part */ static void snd_intel8x0m_mixer_free_ac97_bus(struct snd_ac97_bus *bus) { struct intel8x0m *chip = bus->private_data; chip->ac97_bus = NULL; } static void snd_intel8x0m_mixer_free_ac97(struct snd_ac97 *ac97) { struct intel8x0m *chip = ac97->private_data; chip->ac97 = NULL; } static int __devinit snd_intel8x0m_mixer(struct intel8x0m *chip, int ac97_clock) { struct snd_ac97_bus *pbus; struct snd_ac97_template ac97; struct snd_ac97 *x97; int err; unsigned int glob_sta = 0; static struct snd_ac97_bus_ops ops = { .write = snd_intel8x0m_codec_write, .read = snd_intel8x0m_codec_read, }; chip->in_ac97_init = 1; memset(&ac97, 0, sizeof(ac97)); ac97.private_data = chip; ac97.private_free = snd_intel8x0m_mixer_free_ac97; ac97.scaps = AC97_SCAP_SKIP_AUDIO | AC97_SCAP_POWER_SAVE; glob_sta = igetdword(chip, ICHREG(GLOB_STA)); if ((err = snd_ac97_bus(chip->card, 0, &ops, chip, &pbus)) < 0) goto __err; pbus->private_free = snd_intel8x0m_mixer_free_ac97_bus; if (ac97_clock >= 8000 && ac97_clock <= 48000) pbus->clock = ac97_clock; chip->ac97_bus = pbus; ac97.pci = chip->pci; ac97.num = glob_sta & ICH_SCR ? 1 : 0; if ((err = snd_ac97_mixer(pbus, &ac97, &x97)) < 0) { snd_printk(KERN_ERR "Unable to initialize codec #%d\n", ac97.num); if (ac97.num == 0) goto __err; return err; } chip->ac97 = x97; if(ac97_is_modem(x97) && !chip->ichd[ICHD_MDMIN].ac97) { chip->ichd[ICHD_MDMIN].ac97 = x97; chip->ichd[ICHD_MDMOUT].ac97 = x97; } chip->in_ac97_init = 0; return 0; __err: /* clear the cold-reset bit for the next chance */ if (chip->device_type != DEVICE_ALI) iputdword(chip, ICHREG(GLOB_CNT), igetdword(chip, ICHREG(GLOB_CNT)) & ~ICH_AC97COLD); return err; } /* * */ static int snd_intel8x0m_ich_chip_init(struct intel8x0m *chip, int probing) { unsigned long end_time; unsigned int cnt, status, nstatus; /* put logic to right state */ /* first clear status bits */ status = ICH_RCS | ICH_MIINT | ICH_MOINT; cnt = igetdword(chip, ICHREG(GLOB_STA)); iputdword(chip, ICHREG(GLOB_STA), cnt & status); /* ACLink on, 2 channels */ cnt = igetdword(chip, ICHREG(GLOB_CNT)); cnt &= ~(ICH_ACLINK); /* finish cold or do warm reset */ cnt |= (cnt & ICH_AC97COLD) == 0 ? ICH_AC97COLD : ICH_AC97WARM; iputdword(chip, ICHREG(GLOB_CNT), cnt); usleep_range(500, 1000); /* give warm reset some time */ end_time = jiffies + HZ / 4; do { if ((igetdword(chip, ICHREG(GLOB_CNT)) & ICH_AC97WARM) == 0) goto __ok; schedule_timeout_uninterruptible(1); } while (time_after_eq(end_time, jiffies)); snd_printk(KERN_ERR "AC'97 warm reset still in progress? [0x%x]\n", igetdword(chip, ICHREG(GLOB_CNT))); return -EIO; __ok: if (probing) { /* wait for any codec ready status. * Once it becomes ready it should remain ready * as long as we do not disable the ac97 link. */ end_time = jiffies + HZ; do { status = igetdword(chip, ICHREG(GLOB_STA)) & (ICH_PCR | ICH_SCR | ICH_TCR); if (status) break; schedule_timeout_uninterruptible(1); } while (time_after_eq(end_time, jiffies)); if (! status) { /* no codec is found */ snd_printk(KERN_ERR "codec_ready: codec is not ready [0x%x]\n", igetdword(chip, ICHREG(GLOB_STA))); return -EIO; } /* up to two codecs (modem cannot be tertiary with ICH4) */ nstatus = ICH_PCR | ICH_SCR; /* wait for other codecs ready status. */ end_time = jiffies + HZ / 4; while (status != nstatus && time_after_eq(end_time, jiffies)) { schedule_timeout_uninterruptible(1); status |= igetdword(chip, ICHREG(GLOB_STA)) & nstatus; } } else { /* resume phase */ status = 0; if (chip->ac97) status |= get_ich_codec_bit(chip, chip->ac97->num); /* wait until all the probed codecs are ready */ end_time = jiffies + HZ; do { nstatus = igetdword(chip, ICHREG(GLOB_STA)) & (ICH_PCR | ICH_SCR | ICH_TCR); if (status == nstatus) break; schedule_timeout_uninterruptible(1); } while (time_after_eq(end_time, jiffies)); } if (chip->device_type == DEVICE_SIS) { /* unmute the output on SIS7012 */ iputword(chip, 0x4c, igetword(chip, 0x4c) | 1); } return 0; } static int snd_intel8x0m_chip_init(struct intel8x0m *chip, int probing) { unsigned int i; int err; if ((err = snd_intel8x0m_ich_chip_init(chip, probing)) < 0) return err; iagetword(chip, 0); /* clear semaphore flag */ /* disable interrupts */ for (i = 0; i < chip->bdbars_count; i++) iputbyte(chip, ICH_REG_OFF_CR + chip->ichd[i].reg_offset, 0x00); /* reset channels */ for (i = 0; i < chip->bdbars_count; i++) iputbyte(chip, ICH_REG_OFF_CR + chip->ichd[i].reg_offset, ICH_RESETREGS); /* initialize Buffer Descriptor Lists */ for (i = 0; i < chip->bdbars_count; i++) iputdword(chip, ICH_REG_OFF_BDBAR + chip->ichd[i].reg_offset, chip->ichd[i].bdbar_addr); return 0; } static int snd_intel8x0m_free(struct intel8x0m *chip) { unsigned int i; if (chip->irq < 0) goto __hw_end; /* disable interrupts */ for (i = 0; i < chip->bdbars_count; i++) iputbyte(chip, ICH_REG_OFF_CR + chip->ichd[i].reg_offset, 0x00); /* reset channels */ for (i = 0; i < chip->bdbars_count; i++) iputbyte(chip, ICH_REG_OFF_CR + chip->ichd[i].reg_offset, ICH_RESETREGS); __hw_end: if (chip->irq >= 0) free_irq(chip->irq, chip); if (chip->bdbars.area) snd_dma_free_pages(&chip->bdbars); if (chip->addr) pci_iounmap(chip->pci, chip->addr); if (chip->bmaddr) pci_iounmap(chip->pci, chip->bmaddr); pci_release_regions(chip->pci); pci_disable_device(chip->pci); kfree(chip); return 0; } #ifdef CONFIG_PM /* * power management */ static int intel8x0m_suspend(struct pci_dev *pci, pm_message_t state) { struct snd_card *card = pci_get_drvdata(pci); struct intel8x0m *chip = card->private_data; int i; snd_power_change_state(card, SNDRV_CTL_POWER_D3hot); for (i = 0; i < chip->pcm_devs; i++) snd_pcm_suspend_all(chip->pcm[i]); snd_ac97_suspend(chip->ac97); if (chip->irq >= 0) { free_irq(chip->irq, chip); chip->irq = -1; } pci_disable_device(pci); pci_save_state(pci); pci_set_power_state(pci, pci_choose_state(pci, state)); return 0; } static int intel8x0m_resume(struct pci_dev *pci) { struct snd_card *card = pci_get_drvdata(pci); struct intel8x0m *chip = card->private_data; pci_set_power_state(pci, PCI_D0); pci_restore_state(pci); if (pci_enable_device(pci) < 0) { printk(KERN_ERR "intel8x0m: pci_enable_device failed, " "disabling device\n"); snd_card_disconnect(card); return -EIO; } pci_set_master(pci); if (request_irq(pci->irq, snd_intel8x0m_interrupt, IRQF_SHARED, card->shortname, chip)) { printk(KERN_ERR "intel8x0m: unable to grab IRQ %d, " "disabling device\n", pci->irq); snd_card_disconnect(card); return -EIO; } chip->irq = pci->irq; snd_intel8x0m_chip_init(chip, 0); snd_ac97_resume(chip->ac97); snd_power_change_state(card, SNDRV_CTL_POWER_D0); return 0; } #endif /* CONFIG_PM */ #ifdef CONFIG_PROC_FS static void snd_intel8x0m_proc_read(struct snd_info_entry * entry, struct snd_info_buffer *buffer) { struct intel8x0m *chip = entry->private_data; unsigned int tmp; snd_iprintf(buffer, "Intel8x0m\n\n"); if (chip->device_type == DEVICE_ALI) return; tmp = igetdword(chip, ICHREG(GLOB_STA)); snd_iprintf(buffer, "Global control : 0x%08x\n", igetdword(chip, ICHREG(GLOB_CNT))); snd_iprintf(buffer, "Global status : 0x%08x\n", tmp); snd_iprintf(buffer, "AC'97 codecs ready :%s%s%s%s\n", tmp & ICH_PCR ? " primary" : "", tmp & ICH_SCR ? " secondary" : "", tmp & ICH_TCR ? " tertiary" : "", (tmp & (ICH_PCR | ICH_SCR | ICH_TCR)) == 0 ? " none" : ""); } static void __devinit snd_intel8x0m_proc_init(struct intel8x0m * chip) { struct snd_info_entry *entry; if (! snd_card_proc_new(chip->card, "intel8x0m", &entry)) snd_info_set_text_ops(entry, chip, snd_intel8x0m_proc_read); } #else /* !CONFIG_PROC_FS */ #define snd_intel8x0m_proc_init(chip) #endif /* CONFIG_PROC_FS */ static int snd_intel8x0m_dev_free(struct snd_device *device) { struct intel8x0m *chip = device->device_data; return snd_intel8x0m_free(chip); } struct ich_reg_info { unsigned int int_sta_mask; unsigned int offset; }; static int __devinit snd_intel8x0m_create(struct snd_card *card, struct pci_dev *pci, unsigned long device_type, struct intel8x0m **r_intel8x0m) { struct intel8x0m *chip; int err; unsigned int i; unsigned int int_sta_masks; struct ichdev *ichdev; static struct snd_device_ops ops = { .dev_free = snd_intel8x0m_dev_free, }; static struct ich_reg_info intel_regs[2] = { { ICH_MIINT, 0 }, { ICH_MOINT, 0x10 }, }; struct ich_reg_info *tbl; *r_intel8x0m = NULL; if ((err = pci_enable_device(pci)) < 0) return err; chip = kzalloc(sizeof(*chip), GFP_KERNEL); if (chip == NULL) { pci_disable_device(pci); return -ENOMEM; } spin_lock_init(&chip->reg_lock); chip->device_type = device_type; chip->card = card; chip->pci = pci; chip->irq = -1; if ((err = pci_request_regions(pci, card->shortname)) < 0) { kfree(chip); pci_disable_device(pci); return err; } if (device_type == DEVICE_ALI) { /* ALI5455 has no ac97 region */ chip->bmaddr = pci_iomap(pci, 0, 0); goto port_inited; } if (pci_resource_flags(pci, 2) & IORESOURCE_MEM) /* ICH4 and Nforce */ chip->addr = pci_iomap(pci, 2, 0); else chip->addr = pci_iomap(pci, 0, 0); if (!chip->addr) { snd_printk(KERN_ERR "AC'97 space ioremap problem\n"); snd_intel8x0m_free(chip); return -EIO; } if (pci_resource_flags(pci, 3) & IORESOURCE_MEM) /* ICH4 */ chip->bmaddr = pci_iomap(pci, 3, 0); else chip->bmaddr = pci_iomap(pci, 1, 0); if (!chip->bmaddr) { snd_printk(KERN_ERR "Controller space ioremap problem\n"); snd_intel8x0m_free(chip); return -EIO; } port_inited: if (request_irq(pci->irq, snd_intel8x0m_interrupt, IRQF_SHARED, card->shortname, chip)) { snd_printk(KERN_ERR "unable to grab IRQ %d\n", pci->irq); snd_intel8x0m_free(chip); return -EBUSY; } chip->irq = pci->irq; pci_set_master(pci); synchronize_irq(chip->irq); /* initialize offsets */ chip->bdbars_count = 2; tbl = intel_regs; for (i = 0; i < chip->bdbars_count; i++) { ichdev = &chip->ichd[i]; ichdev->ichd = i; ichdev->reg_offset = tbl[i].offset; ichdev->int_sta_mask = tbl[i].int_sta_mask; if (device_type == DEVICE_SIS) { /* SiS 7013 swaps the registers */ ichdev->roff_sr = ICH_REG_OFF_PICB; ichdev->roff_picb = ICH_REG_OFF_SR; } else { ichdev->roff_sr = ICH_REG_OFF_SR; ichdev->roff_picb = ICH_REG_OFF_PICB; } if (device_type == DEVICE_ALI) ichdev->ali_slot = (ichdev->reg_offset - 0x40) / 0x10; } /* SIS7013 handles the pcm data in bytes, others are in words */ chip->pcm_pos_shift = (device_type == DEVICE_SIS) ? 0 : 1; /* allocate buffer descriptor lists */ /* the start of each lists must be aligned to 8 bytes */ if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(pci), chip->bdbars_count * sizeof(u32) * ICH_MAX_FRAGS * 2, &chip->bdbars) < 0) { snd_intel8x0m_free(chip); return -ENOMEM; } /* tables must be aligned to 8 bytes here, but the kernel pages are much bigger, so we don't care (on i386) */ int_sta_masks = 0; for (i = 0; i < chip->bdbars_count; i++) { ichdev = &chip->ichd[i]; ichdev->bdbar = ((u32 *)chip->bdbars.area) + (i * ICH_MAX_FRAGS * 2); ichdev->bdbar_addr = chip->bdbars.addr + (i * sizeof(u32) * ICH_MAX_FRAGS * 2); int_sta_masks |= ichdev->int_sta_mask; } chip->int_sta_reg = ICH_REG_GLOB_STA; chip->int_sta_mask = int_sta_masks; if ((err = snd_intel8x0m_chip_init(chip, 1)) < 0) { snd_intel8x0m_free(chip); return err; } if ((err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops)) < 0) { snd_intel8x0m_free(chip); return err; } snd_card_set_dev(card, &pci->dev); *r_intel8x0m = chip; return 0; } static struct shortname_table { unsigned int id; const char *s; } shortnames[] __devinitdata = { { PCI_DEVICE_ID_INTEL_82801AA_6, "Intel 82801AA-ICH" }, { PCI_DEVICE_ID_INTEL_82801AB_6, "Intel 82901AB-ICH0" }, { PCI_DEVICE_ID_INTEL_82801BA_6, "Intel 82801BA-ICH2" }, { PCI_DEVICE_ID_INTEL_440MX_6, "Intel 440MX" }, { PCI_DEVICE_ID_INTEL_82801CA_6, "Intel 82801CA-ICH3" }, { PCI_DEVICE_ID_INTEL_82801DB_6, "Intel 82801DB-ICH4" }, { PCI_DEVICE_ID_INTEL_82801EB_6, "Intel ICH5" }, { PCI_DEVICE_ID_INTEL_ICH6_17, "Intel ICH6" }, { PCI_DEVICE_ID_INTEL_ICH7_19, "Intel ICH7" }, { 0x7446, "AMD AMD768" }, { PCI_DEVICE_ID_SI_7013, "SiS SI7013" }, { PCI_DEVICE_ID_NVIDIA_MCP1_MODEM, "NVidia nForce" }, { PCI_DEVICE_ID_NVIDIA_MCP2_MODEM, "NVidia nForce2" }, { PCI_DEVICE_ID_NVIDIA_MCP2S_MODEM, "NVidia nForce2s" }, { PCI_DEVICE_ID_NVIDIA_MCP3_MODEM, "NVidia nForce3" }, { 0x746e, "AMD AMD8111" }, #if 0 { 0x5455, "ALi M5455" }, #endif { 0 }, }; static int __devinit snd_intel8x0m_probe(struct pci_dev *pci, const struct pci_device_id *pci_id) { struct snd_card *card; struct intel8x0m *chip; int err; struct shortname_table *name; err = snd_card_create(index, id, THIS_MODULE, 0, &card); if (err < 0) return err; strcpy(card->driver, "ICH-MODEM"); strcpy(card->shortname, "Intel ICH"); for (name = shortnames; name->id; name++) { if (pci->device == name->id) { strcpy(card->shortname, name->s); break; } } strcat(card->shortname," Modem"); if ((err = snd_intel8x0m_create(card, pci, pci_id->driver_data, &chip)) < 0) { snd_card_free(card); return err; } card->private_data = chip; if ((err = snd_intel8x0m_mixer(chip, ac97_clock)) < 0) { snd_card_free(card); return err; } if ((err = snd_intel8x0m_pcm(chip)) < 0) { snd_card_free(card); return err; } snd_intel8x0m_proc_init(chip); sprintf(card->longname, "%s at irq %i", card->shortname, chip->irq); if ((err = snd_card_register(card)) < 0) { snd_card_free(card); return err; } pci_set_drvdata(pci, card); return 0; } static void __devexit snd_intel8x0m_remove(struct pci_dev *pci) { snd_card_free(pci_get_drvdata(pci)); pci_set_drvdata(pci, NULL); } static struct pci_driver driver = { .name = "Intel ICH Modem", .id_table = snd_intel8x0m_ids, .probe = snd_intel8x0m_probe, .remove = __devexit_p(snd_intel8x0m_remove), #ifdef CONFIG_PM .suspend = intel8x0m_suspend, .resume = intel8x0m_resume, #endif }; static int __init alsa_card_intel8x0m_init(void) { return pci_register_driver(&driver); } static void __exit alsa_card_intel8x0m_exit(void) { pci_unregister_driver(&driver); } module_init(alsa_card_intel8x0m_init) module_exit(alsa_card_intel8x0m_exit)
gpl-2.0
MaxiCM/android_kernel_lge_msm8226
drivers/char/pcmcia/synclink_cs.c
4928
111464
/* * linux/drivers/char/pcmcia/synclink_cs.c * * $Id: synclink_cs.c,v 4.34 2005/09/08 13:20:54 paulkf Exp $ * * Device driver for Microgate SyncLink PC Card * multiprotocol serial adapter. * * written by Paul Fulghum for Microgate Corporation * paulkf@microgate.com * * Microgate and SyncLink are trademarks of Microgate Corporation * * This code is released under the GNU General Public License (GPL) * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED * OF THE POSSIBILITY OF SUCH DAMAGE. */ #define VERSION(ver,rel,seq) (((ver)<<16) | ((rel)<<8) | (seq)) #if defined(__i386__) # define BREAKPOINT() asm(" int $3"); #else # define BREAKPOINT() { } #endif #define MAX_DEVICE_COUNT 4 #include <linux/module.h> #include <linux/errno.h> #include <linux/signal.h> #include <linux/sched.h> #include <linux/timer.h> #include <linux/time.h> #include <linux/interrupt.h> #include <linux/tty.h> #include <linux/tty_flip.h> #include <linux/serial.h> #include <linux/major.h> #include <linux/string.h> #include <linux/fcntl.h> #include <linux/ptrace.h> #include <linux/ioport.h> #include <linux/mm.h> #include <linux/seq_file.h> #include <linux/slab.h> #include <linux/netdevice.h> #include <linux/vmalloc.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/ioctl.h> #include <linux/synclink.h> #include <asm/io.h> #include <asm/irq.h> #include <asm/dma.h> #include <linux/bitops.h> #include <asm/types.h> #include <linux/termios.h> #include <linux/workqueue.h> #include <linux/hdlc.h> #include <pcmcia/cistpl.h> #include <pcmcia/cisreg.h> #include <pcmcia/ds.h> #if defined(CONFIG_HDLC) || (defined(CONFIG_HDLC_MODULE) && defined(CONFIG_SYNCLINK_CS_MODULE)) #define SYNCLINK_GENERIC_HDLC 1 #else #define SYNCLINK_GENERIC_HDLC 0 #endif #define GET_USER(error,value,addr) error = get_user(value,addr) #define COPY_FROM_USER(error,dest,src,size) error = copy_from_user(dest,src,size) ? -EFAULT : 0 #define PUT_USER(error,value,addr) error = put_user(value,addr) #define COPY_TO_USER(error,dest,src,size) error = copy_to_user(dest,src,size) ? -EFAULT : 0 #include <asm/uaccess.h> static MGSL_PARAMS default_params = { MGSL_MODE_HDLC, /* unsigned long mode */ 0, /* unsigned char loopback; */ HDLC_FLAG_UNDERRUN_ABORT15, /* unsigned short flags; */ HDLC_ENCODING_NRZI_SPACE, /* unsigned char encoding; */ 0, /* unsigned long clock_speed; */ 0xff, /* unsigned char addr_filter; */ HDLC_CRC_16_CCITT, /* unsigned short crc_type; */ HDLC_PREAMBLE_LENGTH_8BITS, /* unsigned char preamble_length; */ HDLC_PREAMBLE_PATTERN_NONE, /* unsigned char preamble; */ 9600, /* unsigned long data_rate; */ 8, /* unsigned char data_bits; */ 1, /* unsigned char stop_bits; */ ASYNC_PARITY_NONE /* unsigned char parity; */ }; typedef struct { int count; unsigned char status; char data[1]; } RXBUF; /* The queue of BH actions to be performed */ #define BH_RECEIVE 1 #define BH_TRANSMIT 2 #define BH_STATUS 4 #define IO_PIN_SHUTDOWN_LIMIT 100 #define RELEVANT_IFLAG(iflag) (iflag & (IGNBRK|BRKINT|IGNPAR|PARMRK|INPCK)) struct _input_signal_events { int ri_up; int ri_down; int dsr_up; int dsr_down; int dcd_up; int dcd_down; int cts_up; int cts_down; }; /* * Device instance data structure */ typedef struct _mgslpc_info { struct tty_port port; void *if_ptr; /* General purpose pointer (used by SPPP) */ int magic; int line; struct mgsl_icount icount; int timeout; int x_char; /* xon/xoff character */ unsigned char read_status_mask; unsigned char ignore_status_mask; unsigned char *tx_buf; int tx_put; int tx_get; int tx_count; /* circular list of fixed length rx buffers */ unsigned char *rx_buf; /* memory allocated for all rx buffers */ int rx_buf_total_size; /* size of memory allocated for rx buffers */ int rx_put; /* index of next empty rx buffer */ int rx_get; /* index of next full rx buffer */ int rx_buf_size; /* size in bytes of single rx buffer */ int rx_buf_count; /* total number of rx buffers */ int rx_frame_count; /* number of full rx buffers */ wait_queue_head_t status_event_wait_q; wait_queue_head_t event_wait_q; struct timer_list tx_timer; /* HDLC transmit timeout timer */ struct _mgslpc_info *next_device; /* device list link */ unsigned short imra_value; unsigned short imrb_value; unsigned char pim_value; spinlock_t lock; struct work_struct task; /* task structure for scheduling bh */ u32 max_frame_size; u32 pending_bh; bool bh_running; bool bh_requested; int dcd_chkcount; /* check counts to prevent */ int cts_chkcount; /* too many IRQs if a signal */ int dsr_chkcount; /* is floating */ int ri_chkcount; bool rx_enabled; bool rx_overflow; bool tx_enabled; bool tx_active; bool tx_aborting; u32 idle_mode; int if_mode; /* serial interface selection (RS-232, v.35 etc) */ char device_name[25]; /* device instance name */ unsigned int io_base; /* base I/O address of adapter */ unsigned int irq_level; MGSL_PARAMS params; /* communications parameters */ unsigned char serial_signals; /* current serial signal states */ bool irq_occurred; /* for diagnostics use */ char testing_irq; unsigned int init_error; /* startup error (DIAGS) */ char flag_buf[MAX_ASYNC_BUFFER_SIZE]; bool drop_rts_on_tx_done; struct _input_signal_events input_signal_events; /* PCMCIA support */ struct pcmcia_device *p_dev; int stop; /* SPPP/Cisco HDLC device parts */ int netcount; spinlock_t netlock; #if SYNCLINK_GENERIC_HDLC struct net_device *netdev; #endif } MGSLPC_INFO; #define MGSLPC_MAGIC 0x5402 /* * The size of the serial xmit buffer is 1 page, or 4096 bytes */ #define TXBUFSIZE 4096 #define CHA 0x00 /* channel A offset */ #define CHB 0x40 /* channel B offset */ /* * FIXME: PPC has PVR defined in asm/reg.h. For now we just undef it. */ #undef PVR #define RXFIFO 0 #define TXFIFO 0 #define STAR 0x20 #define CMDR 0x20 #define RSTA 0x21 #define PRE 0x21 #define MODE 0x22 #define TIMR 0x23 #define XAD1 0x24 #define XAD2 0x25 #define RAH1 0x26 #define RAH2 0x27 #define DAFO 0x27 #define RAL1 0x28 #define RFC 0x28 #define RHCR 0x29 #define RAL2 0x29 #define RBCL 0x2a #define XBCL 0x2a #define RBCH 0x2b #define XBCH 0x2b #define CCR0 0x2c #define CCR1 0x2d #define CCR2 0x2e #define CCR3 0x2f #define VSTR 0x34 #define BGR 0x34 #define RLCR 0x35 #define AML 0x36 #define AMH 0x37 #define GIS 0x38 #define IVA 0x38 #define IPC 0x39 #define ISR 0x3a #define IMR 0x3a #define PVR 0x3c #define PIS 0x3d #define PIM 0x3d #define PCR 0x3e #define CCR4 0x3f // IMR/ISR #define IRQ_BREAK_ON BIT15 // rx break detected #define IRQ_DATAOVERRUN BIT14 // receive data overflow #define IRQ_ALLSENT BIT13 // all sent #define IRQ_UNDERRUN BIT12 // transmit data underrun #define IRQ_TIMER BIT11 // timer interrupt #define IRQ_CTS BIT10 // CTS status change #define IRQ_TXREPEAT BIT9 // tx message repeat #define IRQ_TXFIFO BIT8 // transmit pool ready #define IRQ_RXEOM BIT7 // receive message end #define IRQ_EXITHUNT BIT6 // receive frame start #define IRQ_RXTIME BIT6 // rx char timeout #define IRQ_DCD BIT2 // carrier detect status change #define IRQ_OVERRUN BIT1 // receive frame overflow #define IRQ_RXFIFO BIT0 // receive pool full // STAR #define XFW BIT6 // transmit FIFO write enable #define CEC BIT2 // command executing #define CTS BIT1 // CTS state #define PVR_DTR BIT0 #define PVR_DSR BIT1 #define PVR_RI BIT2 #define PVR_AUTOCTS BIT3 #define PVR_RS232 0x20 /* 0010b */ #define PVR_V35 0xe0 /* 1110b */ #define PVR_RS422 0x40 /* 0100b */ /* Register access functions */ #define write_reg(info, reg, val) outb((val),(info)->io_base + (reg)) #define read_reg(info, reg) inb((info)->io_base + (reg)) #define read_reg16(info, reg) inw((info)->io_base + (reg)) #define write_reg16(info, reg, val) outw((val), (info)->io_base + (reg)) #define set_reg_bits(info, reg, mask) \ write_reg(info, (reg), \ (unsigned char) (read_reg(info, (reg)) | (mask))) #define clear_reg_bits(info, reg, mask) \ write_reg(info, (reg), \ (unsigned char) (read_reg(info, (reg)) & ~(mask))) /* * interrupt enable/disable routines */ static void irq_disable(MGSLPC_INFO *info, unsigned char channel, unsigned short mask) { if (channel == CHA) { info->imra_value |= mask; write_reg16(info, CHA + IMR, info->imra_value); } else { info->imrb_value |= mask; write_reg16(info, CHB + IMR, info->imrb_value); } } static void irq_enable(MGSLPC_INFO *info, unsigned char channel, unsigned short mask) { if (channel == CHA) { info->imra_value &= ~mask; write_reg16(info, CHA + IMR, info->imra_value); } else { info->imrb_value &= ~mask; write_reg16(info, CHB + IMR, info->imrb_value); } } #define port_irq_disable(info, mask) \ { info->pim_value |= (mask); write_reg(info, PIM, info->pim_value); } #define port_irq_enable(info, mask) \ { info->pim_value &= ~(mask); write_reg(info, PIM, info->pim_value); } static void rx_start(MGSLPC_INFO *info); static void rx_stop(MGSLPC_INFO *info); static void tx_start(MGSLPC_INFO *info, struct tty_struct *tty); static void tx_stop(MGSLPC_INFO *info); static void tx_set_idle(MGSLPC_INFO *info); static void get_signals(MGSLPC_INFO *info); static void set_signals(MGSLPC_INFO *info); static void reset_device(MGSLPC_INFO *info); static void hdlc_mode(MGSLPC_INFO *info); static void async_mode(MGSLPC_INFO *info); static void tx_timeout(unsigned long context); static int carrier_raised(struct tty_port *port); static void dtr_rts(struct tty_port *port, int onoff); #if SYNCLINK_GENERIC_HDLC #define dev_to_port(D) (dev_to_hdlc(D)->priv) static void hdlcdev_tx_done(MGSLPC_INFO *info); static void hdlcdev_rx(MGSLPC_INFO *info, char *buf, int size); static int hdlcdev_init(MGSLPC_INFO *info); static void hdlcdev_exit(MGSLPC_INFO *info); #endif static void trace_block(MGSLPC_INFO *info,const char* data, int count, int xmit); static bool register_test(MGSLPC_INFO *info); static bool irq_test(MGSLPC_INFO *info); static int adapter_test(MGSLPC_INFO *info); static int claim_resources(MGSLPC_INFO *info); static void release_resources(MGSLPC_INFO *info); static void mgslpc_add_device(MGSLPC_INFO *info); static void mgslpc_remove_device(MGSLPC_INFO *info); static bool rx_get_frame(MGSLPC_INFO *info, struct tty_struct *tty); static void rx_reset_buffers(MGSLPC_INFO *info); static int rx_alloc_buffers(MGSLPC_INFO *info); static void rx_free_buffers(MGSLPC_INFO *info); static irqreturn_t mgslpc_isr(int irq, void *dev_id); /* * Bottom half interrupt handlers */ static void bh_handler(struct work_struct *work); static void bh_transmit(MGSLPC_INFO *info, struct tty_struct *tty); static void bh_status(MGSLPC_INFO *info); /* * ioctl handlers */ static int tiocmget(struct tty_struct *tty); static int tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear); static int get_stats(MGSLPC_INFO *info, struct mgsl_icount __user *user_icount); static int get_params(MGSLPC_INFO *info, MGSL_PARAMS __user *user_params); static int set_params(MGSLPC_INFO *info, MGSL_PARAMS __user *new_params, struct tty_struct *tty); static int get_txidle(MGSLPC_INFO *info, int __user *idle_mode); static int set_txidle(MGSLPC_INFO *info, int idle_mode); static int set_txenable(MGSLPC_INFO *info, int enable, struct tty_struct *tty); static int tx_abort(MGSLPC_INFO *info); static int set_rxenable(MGSLPC_INFO *info, int enable); static int wait_events(MGSLPC_INFO *info, int __user *mask); static MGSLPC_INFO *mgslpc_device_list = NULL; static int mgslpc_device_count = 0; /* * Set this param to non-zero to load eax with the * .text section address and breakpoint on module load. * This is useful for use with gdb and add-symbol-file command. */ static bool break_on_load=0; /* * Driver major number, defaults to zero to get auto * assigned major number. May be forced as module parameter. */ static int ttymajor=0; static int debug_level = 0; static int maxframe[MAX_DEVICE_COUNT] = {0,}; module_param(break_on_load, bool, 0); module_param(ttymajor, int, 0); module_param(debug_level, int, 0); module_param_array(maxframe, int, NULL, 0); MODULE_LICENSE("GPL"); static char *driver_name = "SyncLink PC Card driver"; static char *driver_version = "$Revision: 4.34 $"; static struct tty_driver *serial_driver; /* number of characters left in xmit buffer before we ask for more */ #define WAKEUP_CHARS 256 static void mgslpc_change_params(MGSLPC_INFO *info, struct tty_struct *tty); static void mgslpc_wait_until_sent(struct tty_struct *tty, int timeout); /* PCMCIA prototypes */ static int mgslpc_config(struct pcmcia_device *link); static void mgslpc_release(u_long arg); static void mgslpc_detach(struct pcmcia_device *p_dev); /* * 1st function defined in .text section. Calling this function in * init_module() followed by a breakpoint allows a remote debugger * (gdb) to get the .text address for the add-symbol-file command. * This allows remote debugging of dynamically loadable modules. */ static void* mgslpc_get_text_ptr(void) { return mgslpc_get_text_ptr; } /** * line discipline callback wrappers * * The wrappers maintain line discipline references * while calling into the line discipline. * * ldisc_receive_buf - pass receive data to line discipline */ static void ldisc_receive_buf(struct tty_struct *tty, const __u8 *data, char *flags, int count) { struct tty_ldisc *ld; if (!tty) return; ld = tty_ldisc_ref(tty); if (ld) { if (ld->ops->receive_buf) ld->ops->receive_buf(tty, data, flags, count); tty_ldisc_deref(ld); } } static const struct tty_port_operations mgslpc_port_ops = { .carrier_raised = carrier_raised, .dtr_rts = dtr_rts }; static int mgslpc_probe(struct pcmcia_device *link) { MGSLPC_INFO *info; int ret; if (debug_level >= DEBUG_LEVEL_INFO) printk("mgslpc_attach\n"); info = kzalloc(sizeof(MGSLPC_INFO), GFP_KERNEL); if (!info) { printk("Error can't allocate device instance data\n"); return -ENOMEM; } info->magic = MGSLPC_MAGIC; tty_port_init(&info->port); info->port.ops = &mgslpc_port_ops; INIT_WORK(&info->task, bh_handler); info->max_frame_size = 4096; info->port.close_delay = 5*HZ/10; info->port.closing_wait = 30*HZ; init_waitqueue_head(&info->status_event_wait_q); init_waitqueue_head(&info->event_wait_q); spin_lock_init(&info->lock); spin_lock_init(&info->netlock); memcpy(&info->params,&default_params,sizeof(MGSL_PARAMS)); info->idle_mode = HDLC_TXIDLE_FLAGS; info->imra_value = 0xffff; info->imrb_value = 0xffff; info->pim_value = 0xff; info->p_dev = link; link->priv = info; /* Initialize the struct pcmcia_device structure */ ret = mgslpc_config(link); if (ret) return ret; mgslpc_add_device(info); return 0; } /* Card has been inserted. */ static int mgslpc_ioprobe(struct pcmcia_device *p_dev, void *priv_data) { return pcmcia_request_io(p_dev); } static int mgslpc_config(struct pcmcia_device *link) { MGSLPC_INFO *info = link->priv; int ret; if (debug_level >= DEBUG_LEVEL_INFO) printk("mgslpc_config(0x%p)\n", link); link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IO; ret = pcmcia_loop_config(link, mgslpc_ioprobe, NULL); if (ret != 0) goto failed; link->config_index = 8; link->config_regs = PRESENT_OPTION; ret = pcmcia_request_irq(link, mgslpc_isr); if (ret) goto failed; ret = pcmcia_enable_device(link); if (ret) goto failed; info->io_base = link->resource[0]->start; info->irq_level = link->irq; return 0; failed: mgslpc_release((u_long)link); return -ENODEV; } /* Card has been removed. * Unregister device and release PCMCIA configuration. * If device is open, postpone until it is closed. */ static void mgslpc_release(u_long arg) { struct pcmcia_device *link = (struct pcmcia_device *)arg; if (debug_level >= DEBUG_LEVEL_INFO) printk("mgslpc_release(0x%p)\n", link); pcmcia_disable_device(link); } static void mgslpc_detach(struct pcmcia_device *link) { if (debug_level >= DEBUG_LEVEL_INFO) printk("mgslpc_detach(0x%p)\n", link); ((MGSLPC_INFO *)link->priv)->stop = 1; mgslpc_release((u_long)link); mgslpc_remove_device((MGSLPC_INFO *)link->priv); } static int mgslpc_suspend(struct pcmcia_device *link) { MGSLPC_INFO *info = link->priv; info->stop = 1; return 0; } static int mgslpc_resume(struct pcmcia_device *link) { MGSLPC_INFO *info = link->priv; info->stop = 0; return 0; } static inline bool mgslpc_paranoia_check(MGSLPC_INFO *info, char *name, const char *routine) { #ifdef MGSLPC_PARANOIA_CHECK static const char *badmagic = "Warning: bad magic number for mgsl struct (%s) in %s\n"; static const char *badinfo = "Warning: null mgslpc_info for (%s) in %s\n"; if (!info) { printk(badinfo, name, routine); return true; } if (info->magic != MGSLPC_MAGIC) { printk(badmagic, name, routine); return true; } #else if (!info) return true; #endif return false; } #define CMD_RXFIFO BIT7 // release current rx FIFO #define CMD_RXRESET BIT6 // receiver reset #define CMD_RXFIFO_READ BIT5 #define CMD_START_TIMER BIT4 #define CMD_TXFIFO BIT3 // release current tx FIFO #define CMD_TXEOM BIT1 // transmit end message #define CMD_TXRESET BIT0 // transmit reset static bool wait_command_complete(MGSLPC_INFO *info, unsigned char channel) { int i = 0; /* wait for command completion */ while (read_reg(info, (unsigned char)(channel+STAR)) & BIT2) { udelay(1); if (i++ == 1000) return false; } return true; } static void issue_command(MGSLPC_INFO *info, unsigned char channel, unsigned char cmd) { wait_command_complete(info, channel); write_reg(info, (unsigned char) (channel + CMDR), cmd); } static void tx_pause(struct tty_struct *tty) { MGSLPC_INFO *info = (MGSLPC_INFO *)tty->driver_data; unsigned long flags; if (mgslpc_paranoia_check(info, tty->name, "tx_pause")) return; if (debug_level >= DEBUG_LEVEL_INFO) printk("tx_pause(%s)\n",info->device_name); spin_lock_irqsave(&info->lock,flags); if (info->tx_enabled) tx_stop(info); spin_unlock_irqrestore(&info->lock,flags); } static void tx_release(struct tty_struct *tty) { MGSLPC_INFO *info = (MGSLPC_INFO *)tty->driver_data; unsigned long flags; if (mgslpc_paranoia_check(info, tty->name, "tx_release")) return; if (debug_level >= DEBUG_LEVEL_INFO) printk("tx_release(%s)\n",info->device_name); spin_lock_irqsave(&info->lock,flags); if (!info->tx_enabled) tx_start(info, tty); spin_unlock_irqrestore(&info->lock,flags); } /* Return next bottom half action to perform. * or 0 if nothing to do. */ static int bh_action(MGSLPC_INFO *info) { unsigned long flags; int rc = 0; spin_lock_irqsave(&info->lock,flags); if (info->pending_bh & BH_RECEIVE) { info->pending_bh &= ~BH_RECEIVE; rc = BH_RECEIVE; } else if (info->pending_bh & BH_TRANSMIT) { info->pending_bh &= ~BH_TRANSMIT; rc = BH_TRANSMIT; } else if (info->pending_bh & BH_STATUS) { info->pending_bh &= ~BH_STATUS; rc = BH_STATUS; } if (!rc) { /* Mark BH routine as complete */ info->bh_running = false; info->bh_requested = false; } spin_unlock_irqrestore(&info->lock,flags); return rc; } static void bh_handler(struct work_struct *work) { MGSLPC_INFO *info = container_of(work, MGSLPC_INFO, task); struct tty_struct *tty; int action; if (!info) return; if (debug_level >= DEBUG_LEVEL_BH) printk( "%s(%d):bh_handler(%s) entry\n", __FILE__,__LINE__,info->device_name); info->bh_running = true; tty = tty_port_tty_get(&info->port); while((action = bh_action(info)) != 0) { /* Process work item */ if ( debug_level >= DEBUG_LEVEL_BH ) printk( "%s(%d):bh_handler() work item action=%d\n", __FILE__,__LINE__,action); switch (action) { case BH_RECEIVE: while(rx_get_frame(info, tty)); break; case BH_TRANSMIT: bh_transmit(info, tty); break; case BH_STATUS: bh_status(info); break; default: /* unknown work item ID */ printk("Unknown work item ID=%08X!\n", action); break; } } tty_kref_put(tty); if (debug_level >= DEBUG_LEVEL_BH) printk( "%s(%d):bh_handler(%s) exit\n", __FILE__,__LINE__,info->device_name); } static void bh_transmit(MGSLPC_INFO *info, struct tty_struct *tty) { if (debug_level >= DEBUG_LEVEL_BH) printk("bh_transmit() entry on %s\n", info->device_name); if (tty) tty_wakeup(tty); } static void bh_status(MGSLPC_INFO *info) { info->ri_chkcount = 0; info->dsr_chkcount = 0; info->dcd_chkcount = 0; info->cts_chkcount = 0; } /* eom: non-zero = end of frame */ static void rx_ready_hdlc(MGSLPC_INFO *info, int eom) { unsigned char data[2]; unsigned char fifo_count, read_count, i; RXBUF *buf = (RXBUF*)(info->rx_buf + (info->rx_put * info->rx_buf_size)); if (debug_level >= DEBUG_LEVEL_ISR) printk("%s(%d):rx_ready_hdlc(eom=%d)\n",__FILE__,__LINE__,eom); if (!info->rx_enabled) return; if (info->rx_frame_count >= info->rx_buf_count) { /* no more free buffers */ issue_command(info, CHA, CMD_RXRESET); info->pending_bh |= BH_RECEIVE; info->rx_overflow = true; info->icount.buf_overrun++; return; } if (eom) { /* end of frame, get FIFO count from RBCL register */ if (!(fifo_count = (unsigned char)(read_reg(info, CHA+RBCL) & 0x1f))) fifo_count = 32; } else fifo_count = 32; do { if (fifo_count == 1) { read_count = 1; data[0] = read_reg(info, CHA + RXFIFO); } else { read_count = 2; *((unsigned short *) data) = read_reg16(info, CHA + RXFIFO); } fifo_count -= read_count; if (!fifo_count && eom) buf->status = data[--read_count]; for (i = 0; i < read_count; i++) { if (buf->count >= info->max_frame_size) { /* frame too large, reset receiver and reset current buffer */ issue_command(info, CHA, CMD_RXRESET); buf->count = 0; return; } *(buf->data + buf->count) = data[i]; buf->count++; } } while (fifo_count); if (eom) { info->pending_bh |= BH_RECEIVE; info->rx_frame_count++; info->rx_put++; if (info->rx_put >= info->rx_buf_count) info->rx_put = 0; } issue_command(info, CHA, CMD_RXFIFO); } static void rx_ready_async(MGSLPC_INFO *info, int tcd, struct tty_struct *tty) { unsigned char data, status, flag; int fifo_count; int work = 0; struct mgsl_icount *icount = &info->icount; if (tcd) { /* early termination, get FIFO count from RBCL register */ fifo_count = (unsigned char)(read_reg(info, CHA+RBCL) & 0x1f); /* Zero fifo count could mean 0 or 32 bytes available. * If BIT5 of STAR is set then at least 1 byte is available. */ if (!fifo_count && (read_reg(info,CHA+STAR) & BIT5)) fifo_count = 32; } else fifo_count = 32; tty_buffer_request_room(tty, fifo_count); /* Flush received async data to receive data buffer. */ while (fifo_count) { data = read_reg(info, CHA + RXFIFO); status = read_reg(info, CHA + RXFIFO); fifo_count -= 2; icount->rx++; flag = TTY_NORMAL; // if no frameing/crc error then save data // BIT7:parity error // BIT6:framing error if (status & (BIT7 + BIT6)) { if (status & BIT7) icount->parity++; else icount->frame++; /* discard char if tty control flags say so */ if (status & info->ignore_status_mask) continue; status &= info->read_status_mask; if (status & BIT7) flag = TTY_PARITY; else if (status & BIT6) flag = TTY_FRAME; } work += tty_insert_flip_char(tty, data, flag); } issue_command(info, CHA, CMD_RXFIFO); if (debug_level >= DEBUG_LEVEL_ISR) { printk("%s(%d):rx_ready_async", __FILE__,__LINE__); printk("%s(%d):rx=%d brk=%d parity=%d frame=%d overrun=%d\n", __FILE__,__LINE__,icount->rx,icount->brk, icount->parity,icount->frame,icount->overrun); } if (work) tty_flip_buffer_push(tty); } static void tx_done(MGSLPC_INFO *info, struct tty_struct *tty) { if (!info->tx_active) return; info->tx_active = false; info->tx_aborting = false; if (info->params.mode == MGSL_MODE_ASYNC) return; info->tx_count = info->tx_put = info->tx_get = 0; del_timer(&info->tx_timer); if (info->drop_rts_on_tx_done) { get_signals(info); if (info->serial_signals & SerialSignal_RTS) { info->serial_signals &= ~SerialSignal_RTS; set_signals(info); } info->drop_rts_on_tx_done = false; } #if SYNCLINK_GENERIC_HDLC if (info->netcount) hdlcdev_tx_done(info); else #endif { if (tty->stopped || tty->hw_stopped) { tx_stop(info); return; } info->pending_bh |= BH_TRANSMIT; } } static void tx_ready(MGSLPC_INFO *info, struct tty_struct *tty) { unsigned char fifo_count = 32; int c; if (debug_level >= DEBUG_LEVEL_ISR) printk("%s(%d):tx_ready(%s)\n", __FILE__,__LINE__,info->device_name); if (info->params.mode == MGSL_MODE_HDLC) { if (!info->tx_active) return; } else { if (tty->stopped || tty->hw_stopped) { tx_stop(info); return; } if (!info->tx_count) info->tx_active = false; } if (!info->tx_count) return; while (info->tx_count && fifo_count) { c = min(2, min_t(int, fifo_count, min(info->tx_count, TXBUFSIZE - info->tx_get))); if (c == 1) { write_reg(info, CHA + TXFIFO, *(info->tx_buf + info->tx_get)); } else { write_reg16(info, CHA + TXFIFO, *((unsigned short*)(info->tx_buf + info->tx_get))); } info->tx_count -= c; info->tx_get = (info->tx_get + c) & (TXBUFSIZE - 1); fifo_count -= c; } if (info->params.mode == MGSL_MODE_ASYNC) { if (info->tx_count < WAKEUP_CHARS) info->pending_bh |= BH_TRANSMIT; issue_command(info, CHA, CMD_TXFIFO); } else { if (info->tx_count) issue_command(info, CHA, CMD_TXFIFO); else issue_command(info, CHA, CMD_TXFIFO + CMD_TXEOM); } } static void cts_change(MGSLPC_INFO *info, struct tty_struct *tty) { get_signals(info); if ((info->cts_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT) irq_disable(info, CHB, IRQ_CTS); info->icount.cts++; if (info->serial_signals & SerialSignal_CTS) info->input_signal_events.cts_up++; else info->input_signal_events.cts_down++; wake_up_interruptible(&info->status_event_wait_q); wake_up_interruptible(&info->event_wait_q); if (info->port.flags & ASYNC_CTS_FLOW) { if (tty->hw_stopped) { if (info->serial_signals & SerialSignal_CTS) { if (debug_level >= DEBUG_LEVEL_ISR) printk("CTS tx start..."); if (tty) tty->hw_stopped = 0; tx_start(info, tty); info->pending_bh |= BH_TRANSMIT; return; } } else { if (!(info->serial_signals & SerialSignal_CTS)) { if (debug_level >= DEBUG_LEVEL_ISR) printk("CTS tx stop..."); if (tty) tty->hw_stopped = 1; tx_stop(info); } } } info->pending_bh |= BH_STATUS; } static void dcd_change(MGSLPC_INFO *info, struct tty_struct *tty) { get_signals(info); if ((info->dcd_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT) irq_disable(info, CHB, IRQ_DCD); info->icount.dcd++; if (info->serial_signals & SerialSignal_DCD) { info->input_signal_events.dcd_up++; } else info->input_signal_events.dcd_down++; #if SYNCLINK_GENERIC_HDLC if (info->netcount) { if (info->serial_signals & SerialSignal_DCD) netif_carrier_on(info->netdev); else netif_carrier_off(info->netdev); } #endif wake_up_interruptible(&info->status_event_wait_q); wake_up_interruptible(&info->event_wait_q); if (info->port.flags & ASYNC_CHECK_CD) { if (debug_level >= DEBUG_LEVEL_ISR) printk("%s CD now %s...", info->device_name, (info->serial_signals & SerialSignal_DCD) ? "on" : "off"); if (info->serial_signals & SerialSignal_DCD) wake_up_interruptible(&info->port.open_wait); else { if (debug_level >= DEBUG_LEVEL_ISR) printk("doing serial hangup..."); if (tty) tty_hangup(tty); } } info->pending_bh |= BH_STATUS; } static void dsr_change(MGSLPC_INFO *info) { get_signals(info); if ((info->dsr_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT) port_irq_disable(info, PVR_DSR); info->icount.dsr++; if (info->serial_signals & SerialSignal_DSR) info->input_signal_events.dsr_up++; else info->input_signal_events.dsr_down++; wake_up_interruptible(&info->status_event_wait_q); wake_up_interruptible(&info->event_wait_q); info->pending_bh |= BH_STATUS; } static void ri_change(MGSLPC_INFO *info) { get_signals(info); if ((info->ri_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT) port_irq_disable(info, PVR_RI); info->icount.rng++; if (info->serial_signals & SerialSignal_RI) info->input_signal_events.ri_up++; else info->input_signal_events.ri_down++; wake_up_interruptible(&info->status_event_wait_q); wake_up_interruptible(&info->event_wait_q); info->pending_bh |= BH_STATUS; } /* Interrupt service routine entry point. * * Arguments: * * irq interrupt number that caused interrupt * dev_id device ID supplied during interrupt registration */ static irqreturn_t mgslpc_isr(int dummy, void *dev_id) { MGSLPC_INFO *info = dev_id; struct tty_struct *tty; unsigned short isr; unsigned char gis, pis; int count=0; if (debug_level >= DEBUG_LEVEL_ISR) printk("mgslpc_isr(%d) entry.\n", info->irq_level); if (!(info->p_dev->_locked)) return IRQ_HANDLED; tty = tty_port_tty_get(&info->port); spin_lock(&info->lock); while ((gis = read_reg(info, CHA + GIS))) { if (debug_level >= DEBUG_LEVEL_ISR) printk("mgslpc_isr %s gis=%04X\n", info->device_name,gis); if ((gis & 0x70) || count > 1000) { printk("synclink_cs:hardware failed or ejected\n"); break; } count++; if (gis & (BIT1 + BIT0)) { isr = read_reg16(info, CHB + ISR); if (isr & IRQ_DCD) dcd_change(info, tty); if (isr & IRQ_CTS) cts_change(info, tty); } if (gis & (BIT3 + BIT2)) { isr = read_reg16(info, CHA + ISR); if (isr & IRQ_TIMER) { info->irq_occurred = true; irq_disable(info, CHA, IRQ_TIMER); } /* receive IRQs */ if (isr & IRQ_EXITHUNT) { info->icount.exithunt++; wake_up_interruptible(&info->event_wait_q); } if (isr & IRQ_BREAK_ON) { info->icount.brk++; if (info->port.flags & ASYNC_SAK) do_SAK(tty); } if (isr & IRQ_RXTIME) { issue_command(info, CHA, CMD_RXFIFO_READ); } if (isr & (IRQ_RXEOM + IRQ_RXFIFO)) { if (info->params.mode == MGSL_MODE_HDLC) rx_ready_hdlc(info, isr & IRQ_RXEOM); else rx_ready_async(info, isr & IRQ_RXEOM, tty); } /* transmit IRQs */ if (isr & IRQ_UNDERRUN) { if (info->tx_aborting) info->icount.txabort++; else info->icount.txunder++; tx_done(info, tty); } else if (isr & IRQ_ALLSENT) { info->icount.txok++; tx_done(info, tty); } else if (isr & IRQ_TXFIFO) tx_ready(info, tty); } if (gis & BIT7) { pis = read_reg(info, CHA + PIS); if (pis & BIT1) dsr_change(info); if (pis & BIT2) ri_change(info); } } /* Request bottom half processing if there's something * for it to do and the bh is not already running */ if (info->pending_bh && !info->bh_running && !info->bh_requested) { if ( debug_level >= DEBUG_LEVEL_ISR ) printk("%s(%d):%s queueing bh task.\n", __FILE__,__LINE__,info->device_name); schedule_work(&info->task); info->bh_requested = true; } spin_unlock(&info->lock); tty_kref_put(tty); if (debug_level >= DEBUG_LEVEL_ISR) printk("%s(%d):mgslpc_isr(%d)exit.\n", __FILE__, __LINE__, info->irq_level); return IRQ_HANDLED; } /* Initialize and start device. */ static int startup(MGSLPC_INFO * info, struct tty_struct *tty) { int retval = 0; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):startup(%s)\n",__FILE__,__LINE__,info->device_name); if (info->port.flags & ASYNC_INITIALIZED) return 0; if (!info->tx_buf) { /* allocate a page of memory for a transmit buffer */ info->tx_buf = (unsigned char *)get_zeroed_page(GFP_KERNEL); if (!info->tx_buf) { printk(KERN_ERR"%s(%d):%s can't allocate transmit buffer\n", __FILE__,__LINE__,info->device_name); return -ENOMEM; } } info->pending_bh = 0; memset(&info->icount, 0, sizeof(info->icount)); setup_timer(&info->tx_timer, tx_timeout, (unsigned long)info); /* Allocate and claim adapter resources */ retval = claim_resources(info); /* perform existence check and diagnostics */ if ( !retval ) retval = adapter_test(info); if ( retval ) { if (capable(CAP_SYS_ADMIN) && tty) set_bit(TTY_IO_ERROR, &tty->flags); release_resources(info); return retval; } /* program hardware for current parameters */ mgslpc_change_params(info, tty); if (tty) clear_bit(TTY_IO_ERROR, &tty->flags); info->port.flags |= ASYNC_INITIALIZED; return 0; } /* Called by mgslpc_close() and mgslpc_hangup() to shutdown hardware */ static void shutdown(MGSLPC_INFO * info, struct tty_struct *tty) { unsigned long flags; if (!(info->port.flags & ASYNC_INITIALIZED)) return; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):mgslpc_shutdown(%s)\n", __FILE__,__LINE__, info->device_name ); /* clear status wait queue because status changes */ /* can't happen after shutting down the hardware */ wake_up_interruptible(&info->status_event_wait_q); wake_up_interruptible(&info->event_wait_q); del_timer_sync(&info->tx_timer); if (info->tx_buf) { free_page((unsigned long) info->tx_buf); info->tx_buf = NULL; } spin_lock_irqsave(&info->lock,flags); rx_stop(info); tx_stop(info); /* TODO:disable interrupts instead of reset to preserve signal states */ reset_device(info); if (!tty || tty->termios->c_cflag & HUPCL) { info->serial_signals &= ~(SerialSignal_DTR + SerialSignal_RTS); set_signals(info); } spin_unlock_irqrestore(&info->lock,flags); release_resources(info); if (tty) set_bit(TTY_IO_ERROR, &tty->flags); info->port.flags &= ~ASYNC_INITIALIZED; } static void mgslpc_program_hw(MGSLPC_INFO *info, struct tty_struct *tty) { unsigned long flags; spin_lock_irqsave(&info->lock,flags); rx_stop(info); tx_stop(info); info->tx_count = info->tx_put = info->tx_get = 0; if (info->params.mode == MGSL_MODE_HDLC || info->netcount) hdlc_mode(info); else async_mode(info); set_signals(info); info->dcd_chkcount = 0; info->cts_chkcount = 0; info->ri_chkcount = 0; info->dsr_chkcount = 0; irq_enable(info, CHB, IRQ_DCD | IRQ_CTS); port_irq_enable(info, (unsigned char) PVR_DSR | PVR_RI); get_signals(info); if (info->netcount || (tty && (tty->termios->c_cflag & CREAD))) rx_start(info); spin_unlock_irqrestore(&info->lock,flags); } /* Reconfigure adapter based on new parameters */ static void mgslpc_change_params(MGSLPC_INFO *info, struct tty_struct *tty) { unsigned cflag; int bits_per_char; if (!tty || !tty->termios) return; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):mgslpc_change_params(%s)\n", __FILE__,__LINE__, info->device_name ); cflag = tty->termios->c_cflag; /* if B0 rate (hangup) specified then negate DTR and RTS */ /* otherwise assert DTR and RTS */ if (cflag & CBAUD) info->serial_signals |= SerialSignal_RTS + SerialSignal_DTR; else info->serial_signals &= ~(SerialSignal_RTS + SerialSignal_DTR); /* byte size and parity */ switch (cflag & CSIZE) { case CS5: info->params.data_bits = 5; break; case CS6: info->params.data_bits = 6; break; case CS7: info->params.data_bits = 7; break; case CS8: info->params.data_bits = 8; break; default: info->params.data_bits = 7; break; } if (cflag & CSTOPB) info->params.stop_bits = 2; else info->params.stop_bits = 1; info->params.parity = ASYNC_PARITY_NONE; if (cflag & PARENB) { if (cflag & PARODD) info->params.parity = ASYNC_PARITY_ODD; else info->params.parity = ASYNC_PARITY_EVEN; #ifdef CMSPAR if (cflag & CMSPAR) info->params.parity = ASYNC_PARITY_SPACE; #endif } /* calculate number of jiffies to transmit a full * FIFO (32 bytes) at specified data rate */ bits_per_char = info->params.data_bits + info->params.stop_bits + 1; /* if port data rate is set to 460800 or less then * allow tty settings to override, otherwise keep the * current data rate. */ if (info->params.data_rate <= 460800) { info->params.data_rate = tty_get_baud_rate(tty); } if ( info->params.data_rate ) { info->timeout = (32*HZ*bits_per_char) / info->params.data_rate; } info->timeout += HZ/50; /* Add .02 seconds of slop */ if (cflag & CRTSCTS) info->port.flags |= ASYNC_CTS_FLOW; else info->port.flags &= ~ASYNC_CTS_FLOW; if (cflag & CLOCAL) info->port.flags &= ~ASYNC_CHECK_CD; else info->port.flags |= ASYNC_CHECK_CD; /* process tty input control flags */ info->read_status_mask = 0; if (I_INPCK(tty)) info->read_status_mask |= BIT7 | BIT6; if (I_IGNPAR(tty)) info->ignore_status_mask |= BIT7 | BIT6; mgslpc_program_hw(info, tty); } /* Add a character to the transmit buffer */ static int mgslpc_put_char(struct tty_struct *tty, unsigned char ch) { MGSLPC_INFO *info = (MGSLPC_INFO *)tty->driver_data; unsigned long flags; if (debug_level >= DEBUG_LEVEL_INFO) { printk( "%s(%d):mgslpc_put_char(%d) on %s\n", __FILE__,__LINE__,ch,info->device_name); } if (mgslpc_paranoia_check(info, tty->name, "mgslpc_put_char")) return 0; if (!info->tx_buf) return 0; spin_lock_irqsave(&info->lock,flags); if (info->params.mode == MGSL_MODE_ASYNC || !info->tx_active) { if (info->tx_count < TXBUFSIZE - 1) { info->tx_buf[info->tx_put++] = ch; info->tx_put &= TXBUFSIZE-1; info->tx_count++; } } spin_unlock_irqrestore(&info->lock,flags); return 1; } /* Enable transmitter so remaining characters in the * transmit buffer are sent. */ static void mgslpc_flush_chars(struct tty_struct *tty) { MGSLPC_INFO *info = (MGSLPC_INFO *)tty->driver_data; unsigned long flags; if (debug_level >= DEBUG_LEVEL_INFO) printk( "%s(%d):mgslpc_flush_chars() entry on %s tx_count=%d\n", __FILE__,__LINE__,info->device_name,info->tx_count); if (mgslpc_paranoia_check(info, tty->name, "mgslpc_flush_chars")) return; if (info->tx_count <= 0 || tty->stopped || tty->hw_stopped || !info->tx_buf) return; if (debug_level >= DEBUG_LEVEL_INFO) printk( "%s(%d):mgslpc_flush_chars() entry on %s starting transmitter\n", __FILE__,__LINE__,info->device_name); spin_lock_irqsave(&info->lock,flags); if (!info->tx_active) tx_start(info, tty); spin_unlock_irqrestore(&info->lock,flags); } /* Send a block of data * * Arguments: * * tty pointer to tty information structure * buf pointer to buffer containing send data * count size of send data in bytes * * Returns: number of characters written */ static int mgslpc_write(struct tty_struct * tty, const unsigned char *buf, int count) { int c, ret = 0; MGSLPC_INFO *info = (MGSLPC_INFO *)tty->driver_data; unsigned long flags; if (debug_level >= DEBUG_LEVEL_INFO) printk( "%s(%d):mgslpc_write(%s) count=%d\n", __FILE__,__LINE__,info->device_name,count); if (mgslpc_paranoia_check(info, tty->name, "mgslpc_write") || !info->tx_buf) goto cleanup; if (info->params.mode == MGSL_MODE_HDLC) { if (count > TXBUFSIZE) { ret = -EIO; goto cleanup; } if (info->tx_active) goto cleanup; else if (info->tx_count) goto start; } for (;;) { c = min(count, min(TXBUFSIZE - info->tx_count - 1, TXBUFSIZE - info->tx_put)); if (c <= 0) break; memcpy(info->tx_buf + info->tx_put, buf, c); spin_lock_irqsave(&info->lock,flags); info->tx_put = (info->tx_put + c) & (TXBUFSIZE-1); info->tx_count += c; spin_unlock_irqrestore(&info->lock,flags); buf += c; count -= c; ret += c; } start: if (info->tx_count && !tty->stopped && !tty->hw_stopped) { spin_lock_irqsave(&info->lock,flags); if (!info->tx_active) tx_start(info, tty); spin_unlock_irqrestore(&info->lock,flags); } cleanup: if (debug_level >= DEBUG_LEVEL_INFO) printk( "%s(%d):mgslpc_write(%s) returning=%d\n", __FILE__,__LINE__,info->device_name,ret); return ret; } /* Return the count of free bytes in transmit buffer */ static int mgslpc_write_room(struct tty_struct *tty) { MGSLPC_INFO *info = (MGSLPC_INFO *)tty->driver_data; int ret; if (mgslpc_paranoia_check(info, tty->name, "mgslpc_write_room")) return 0; if (info->params.mode == MGSL_MODE_HDLC) { /* HDLC (frame oriented) mode */ if (info->tx_active) return 0; else return HDLC_MAX_FRAME_SIZE; } else { ret = TXBUFSIZE - info->tx_count - 1; if (ret < 0) ret = 0; } if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):mgslpc_write_room(%s)=%d\n", __FILE__,__LINE__, info->device_name, ret); return ret; } /* Return the count of bytes in transmit buffer */ static int mgslpc_chars_in_buffer(struct tty_struct *tty) { MGSLPC_INFO *info = (MGSLPC_INFO *)tty->driver_data; int rc; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):mgslpc_chars_in_buffer(%s)\n", __FILE__,__LINE__, info->device_name ); if (mgslpc_paranoia_check(info, tty->name, "mgslpc_chars_in_buffer")) return 0; if (info->params.mode == MGSL_MODE_HDLC) rc = info->tx_active ? info->max_frame_size : 0; else rc = info->tx_count; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):mgslpc_chars_in_buffer(%s)=%d\n", __FILE__,__LINE__, info->device_name, rc); return rc; } /* Discard all data in the send buffer */ static void mgslpc_flush_buffer(struct tty_struct *tty) { MGSLPC_INFO *info = (MGSLPC_INFO *)tty->driver_data; unsigned long flags; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):mgslpc_flush_buffer(%s) entry\n", __FILE__,__LINE__, info->device_name ); if (mgslpc_paranoia_check(info, tty->name, "mgslpc_flush_buffer")) return; spin_lock_irqsave(&info->lock,flags); info->tx_count = info->tx_put = info->tx_get = 0; del_timer(&info->tx_timer); spin_unlock_irqrestore(&info->lock,flags); wake_up_interruptible(&tty->write_wait); tty_wakeup(tty); } /* Send a high-priority XON/XOFF character */ static void mgslpc_send_xchar(struct tty_struct *tty, char ch) { MGSLPC_INFO *info = (MGSLPC_INFO *)tty->driver_data; unsigned long flags; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):mgslpc_send_xchar(%s,%d)\n", __FILE__,__LINE__, info->device_name, ch ); if (mgslpc_paranoia_check(info, tty->name, "mgslpc_send_xchar")) return; info->x_char = ch; if (ch) { spin_lock_irqsave(&info->lock,flags); if (!info->tx_enabled) tx_start(info, tty); spin_unlock_irqrestore(&info->lock,flags); } } /* Signal remote device to throttle send data (our receive data) */ static void mgslpc_throttle(struct tty_struct * tty) { MGSLPC_INFO *info = (MGSLPC_INFO *)tty->driver_data; unsigned long flags; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):mgslpc_throttle(%s) entry\n", __FILE__,__LINE__, info->device_name ); if (mgslpc_paranoia_check(info, tty->name, "mgslpc_throttle")) return; if (I_IXOFF(tty)) mgslpc_send_xchar(tty, STOP_CHAR(tty)); if (tty->termios->c_cflag & CRTSCTS) { spin_lock_irqsave(&info->lock,flags); info->serial_signals &= ~SerialSignal_RTS; set_signals(info); spin_unlock_irqrestore(&info->lock,flags); } } /* Signal remote device to stop throttling send data (our receive data) */ static void mgslpc_unthrottle(struct tty_struct * tty) { MGSLPC_INFO *info = (MGSLPC_INFO *)tty->driver_data; unsigned long flags; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):mgslpc_unthrottle(%s) entry\n", __FILE__,__LINE__, info->device_name ); if (mgslpc_paranoia_check(info, tty->name, "mgslpc_unthrottle")) return; if (I_IXOFF(tty)) { if (info->x_char) info->x_char = 0; else mgslpc_send_xchar(tty, START_CHAR(tty)); } if (tty->termios->c_cflag & CRTSCTS) { spin_lock_irqsave(&info->lock,flags); info->serial_signals |= SerialSignal_RTS; set_signals(info); spin_unlock_irqrestore(&info->lock,flags); } } /* get the current serial statistics */ static int get_stats(MGSLPC_INFO * info, struct mgsl_icount __user *user_icount) { int err; if (debug_level >= DEBUG_LEVEL_INFO) printk("get_params(%s)\n", info->device_name); if (!user_icount) { memset(&info->icount, 0, sizeof(info->icount)); } else { COPY_TO_USER(err, user_icount, &info->icount, sizeof(struct mgsl_icount)); if (err) return -EFAULT; } return 0; } /* get the current serial parameters */ static int get_params(MGSLPC_INFO * info, MGSL_PARAMS __user *user_params) { int err; if (debug_level >= DEBUG_LEVEL_INFO) printk("get_params(%s)\n", info->device_name); COPY_TO_USER(err,user_params, &info->params, sizeof(MGSL_PARAMS)); if (err) return -EFAULT; return 0; } /* set the serial parameters * * Arguments: * * info pointer to device instance data * new_params user buffer containing new serial params * * Returns: 0 if success, otherwise error code */ static int set_params(MGSLPC_INFO * info, MGSL_PARAMS __user *new_params, struct tty_struct *tty) { unsigned long flags; MGSL_PARAMS tmp_params; int err; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):set_params %s\n", __FILE__,__LINE__, info->device_name ); COPY_FROM_USER(err,&tmp_params, new_params, sizeof(MGSL_PARAMS)); if (err) { if ( debug_level >= DEBUG_LEVEL_INFO ) printk( "%s(%d):set_params(%s) user buffer copy failed\n", __FILE__,__LINE__,info->device_name); return -EFAULT; } spin_lock_irqsave(&info->lock,flags); memcpy(&info->params,&tmp_params,sizeof(MGSL_PARAMS)); spin_unlock_irqrestore(&info->lock,flags); mgslpc_change_params(info, tty); return 0; } static int get_txidle(MGSLPC_INFO * info, int __user *idle_mode) { int err; if (debug_level >= DEBUG_LEVEL_INFO) printk("get_txidle(%s)=%d\n", info->device_name, info->idle_mode); COPY_TO_USER(err,idle_mode, &info->idle_mode, sizeof(int)); if (err) return -EFAULT; return 0; } static int set_txidle(MGSLPC_INFO * info, int idle_mode) { unsigned long flags; if (debug_level >= DEBUG_LEVEL_INFO) printk("set_txidle(%s,%d)\n", info->device_name, idle_mode); spin_lock_irqsave(&info->lock,flags); info->idle_mode = idle_mode; tx_set_idle(info); spin_unlock_irqrestore(&info->lock,flags); return 0; } static int get_interface(MGSLPC_INFO * info, int __user *if_mode) { int err; if (debug_level >= DEBUG_LEVEL_INFO) printk("get_interface(%s)=%d\n", info->device_name, info->if_mode); COPY_TO_USER(err,if_mode, &info->if_mode, sizeof(int)); if (err) return -EFAULT; return 0; } static int set_interface(MGSLPC_INFO * info, int if_mode) { unsigned long flags; unsigned char val; if (debug_level >= DEBUG_LEVEL_INFO) printk("set_interface(%s,%d)\n", info->device_name, if_mode); spin_lock_irqsave(&info->lock,flags); info->if_mode = if_mode; val = read_reg(info, PVR) & 0x0f; switch (info->if_mode) { case MGSL_INTERFACE_RS232: val |= PVR_RS232; break; case MGSL_INTERFACE_V35: val |= PVR_V35; break; case MGSL_INTERFACE_RS422: val |= PVR_RS422; break; } write_reg(info, PVR, val); spin_unlock_irqrestore(&info->lock,flags); return 0; } static int set_txenable(MGSLPC_INFO * info, int enable, struct tty_struct *tty) { unsigned long flags; if (debug_level >= DEBUG_LEVEL_INFO) printk("set_txenable(%s,%d)\n", info->device_name, enable); spin_lock_irqsave(&info->lock,flags); if (enable) { if (!info->tx_enabled) tx_start(info, tty); } else { if (info->tx_enabled) tx_stop(info); } spin_unlock_irqrestore(&info->lock,flags); return 0; } static int tx_abort(MGSLPC_INFO * info) { unsigned long flags; if (debug_level >= DEBUG_LEVEL_INFO) printk("tx_abort(%s)\n", info->device_name); spin_lock_irqsave(&info->lock,flags); if (info->tx_active && info->tx_count && info->params.mode == MGSL_MODE_HDLC) { /* clear data count so FIFO is not filled on next IRQ. * This results in underrun and abort transmission. */ info->tx_count = info->tx_put = info->tx_get = 0; info->tx_aborting = true; } spin_unlock_irqrestore(&info->lock,flags); return 0; } static int set_rxenable(MGSLPC_INFO * info, int enable) { unsigned long flags; if (debug_level >= DEBUG_LEVEL_INFO) printk("set_rxenable(%s,%d)\n", info->device_name, enable); spin_lock_irqsave(&info->lock,flags); if (enable) { if (!info->rx_enabled) rx_start(info); } else { if (info->rx_enabled) rx_stop(info); } spin_unlock_irqrestore(&info->lock,flags); return 0; } /* wait for specified event to occur * * Arguments: info pointer to device instance data * mask pointer to bitmask of events to wait for * Return Value: 0 if successful and bit mask updated with * of events triggerred, * otherwise error code */ static int wait_events(MGSLPC_INFO * info, int __user *mask_ptr) { unsigned long flags; int s; int rc=0; struct mgsl_icount cprev, cnow; int events; int mask; struct _input_signal_events oldsigs, newsigs; DECLARE_WAITQUEUE(wait, current); COPY_FROM_USER(rc,&mask, mask_ptr, sizeof(int)); if (rc) return -EFAULT; if (debug_level >= DEBUG_LEVEL_INFO) printk("wait_events(%s,%d)\n", info->device_name, mask); spin_lock_irqsave(&info->lock,flags); /* return immediately if state matches requested events */ get_signals(info); s = info->serial_signals; events = mask & ( ((s & SerialSignal_DSR) ? MgslEvent_DsrActive:MgslEvent_DsrInactive) + ((s & SerialSignal_DCD) ? MgslEvent_DcdActive:MgslEvent_DcdInactive) + ((s & SerialSignal_CTS) ? MgslEvent_CtsActive:MgslEvent_CtsInactive) + ((s & SerialSignal_RI) ? MgslEvent_RiActive :MgslEvent_RiInactive) ); if (events) { spin_unlock_irqrestore(&info->lock,flags); goto exit; } /* save current irq counts */ cprev = info->icount; oldsigs = info->input_signal_events; if ((info->params.mode == MGSL_MODE_HDLC) && (mask & MgslEvent_ExitHuntMode)) irq_enable(info, CHA, IRQ_EXITHUNT); set_current_state(TASK_INTERRUPTIBLE); add_wait_queue(&info->event_wait_q, &wait); spin_unlock_irqrestore(&info->lock,flags); for(;;) { schedule(); if (signal_pending(current)) { rc = -ERESTARTSYS; break; } /* get current irq counts */ spin_lock_irqsave(&info->lock,flags); cnow = info->icount; newsigs = info->input_signal_events; set_current_state(TASK_INTERRUPTIBLE); spin_unlock_irqrestore(&info->lock,flags); /* if no change, wait aborted for some reason */ if (newsigs.dsr_up == oldsigs.dsr_up && newsigs.dsr_down == oldsigs.dsr_down && newsigs.dcd_up == oldsigs.dcd_up && newsigs.dcd_down == oldsigs.dcd_down && newsigs.cts_up == oldsigs.cts_up && newsigs.cts_down == oldsigs.cts_down && newsigs.ri_up == oldsigs.ri_up && newsigs.ri_down == oldsigs.ri_down && cnow.exithunt == cprev.exithunt && cnow.rxidle == cprev.rxidle) { rc = -EIO; break; } events = mask & ( (newsigs.dsr_up != oldsigs.dsr_up ? MgslEvent_DsrActive:0) + (newsigs.dsr_down != oldsigs.dsr_down ? MgslEvent_DsrInactive:0) + (newsigs.dcd_up != oldsigs.dcd_up ? MgslEvent_DcdActive:0) + (newsigs.dcd_down != oldsigs.dcd_down ? MgslEvent_DcdInactive:0) + (newsigs.cts_up != oldsigs.cts_up ? MgslEvent_CtsActive:0) + (newsigs.cts_down != oldsigs.cts_down ? MgslEvent_CtsInactive:0) + (newsigs.ri_up != oldsigs.ri_up ? MgslEvent_RiActive:0) + (newsigs.ri_down != oldsigs.ri_down ? MgslEvent_RiInactive:0) + (cnow.exithunt != cprev.exithunt ? MgslEvent_ExitHuntMode:0) + (cnow.rxidle != cprev.rxidle ? MgslEvent_IdleReceived:0) ); if (events) break; cprev = cnow; oldsigs = newsigs; } remove_wait_queue(&info->event_wait_q, &wait); set_current_state(TASK_RUNNING); if (mask & MgslEvent_ExitHuntMode) { spin_lock_irqsave(&info->lock,flags); if (!waitqueue_active(&info->event_wait_q)) irq_disable(info, CHA, IRQ_EXITHUNT); spin_unlock_irqrestore(&info->lock,flags); } exit: if (rc == 0) PUT_USER(rc, events, mask_ptr); return rc; } static int modem_input_wait(MGSLPC_INFO *info,int arg) { unsigned long flags; int rc; struct mgsl_icount cprev, cnow; DECLARE_WAITQUEUE(wait, current); /* save current irq counts */ spin_lock_irqsave(&info->lock,flags); cprev = info->icount; add_wait_queue(&info->status_event_wait_q, &wait); set_current_state(TASK_INTERRUPTIBLE); spin_unlock_irqrestore(&info->lock,flags); for(;;) { schedule(); if (signal_pending(current)) { rc = -ERESTARTSYS; break; } /* get new irq counts */ spin_lock_irqsave(&info->lock,flags); cnow = info->icount; set_current_state(TASK_INTERRUPTIBLE); spin_unlock_irqrestore(&info->lock,flags); /* if no change, wait aborted for some reason */ if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr && cnow.dcd == cprev.dcd && cnow.cts == cprev.cts) { rc = -EIO; break; } /* check for change in caller specified modem input */ if ((arg & TIOCM_RNG && cnow.rng != cprev.rng) || (arg & TIOCM_DSR && cnow.dsr != cprev.dsr) || (arg & TIOCM_CD && cnow.dcd != cprev.dcd) || (arg & TIOCM_CTS && cnow.cts != cprev.cts)) { rc = 0; break; } cprev = cnow; } remove_wait_queue(&info->status_event_wait_q, &wait); set_current_state(TASK_RUNNING); return rc; } /* return the state of the serial control and status signals */ static int tiocmget(struct tty_struct *tty) { MGSLPC_INFO *info = (MGSLPC_INFO *)tty->driver_data; unsigned int result; unsigned long flags; spin_lock_irqsave(&info->lock,flags); get_signals(info); spin_unlock_irqrestore(&info->lock,flags); result = ((info->serial_signals & SerialSignal_RTS) ? TIOCM_RTS:0) + ((info->serial_signals & SerialSignal_DTR) ? TIOCM_DTR:0) + ((info->serial_signals & SerialSignal_DCD) ? TIOCM_CAR:0) + ((info->serial_signals & SerialSignal_RI) ? TIOCM_RNG:0) + ((info->serial_signals & SerialSignal_DSR) ? TIOCM_DSR:0) + ((info->serial_signals & SerialSignal_CTS) ? TIOCM_CTS:0); if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):%s tiocmget() value=%08X\n", __FILE__,__LINE__, info->device_name, result ); return result; } /* set modem control signals (DTR/RTS) */ static int tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear) { MGSLPC_INFO *info = (MGSLPC_INFO *)tty->driver_data; unsigned long flags; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):%s tiocmset(%x,%x)\n", __FILE__,__LINE__,info->device_name, set, clear); if (set & TIOCM_RTS) info->serial_signals |= SerialSignal_RTS; if (set & TIOCM_DTR) info->serial_signals |= SerialSignal_DTR; if (clear & TIOCM_RTS) info->serial_signals &= ~SerialSignal_RTS; if (clear & TIOCM_DTR) info->serial_signals &= ~SerialSignal_DTR; spin_lock_irqsave(&info->lock,flags); set_signals(info); spin_unlock_irqrestore(&info->lock,flags); return 0; } /* Set or clear transmit break condition * * Arguments: tty pointer to tty instance data * break_state -1=set break condition, 0=clear */ static int mgslpc_break(struct tty_struct *tty, int break_state) { MGSLPC_INFO * info = (MGSLPC_INFO *)tty->driver_data; unsigned long flags; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):mgslpc_break(%s,%d)\n", __FILE__,__LINE__, info->device_name, break_state); if (mgslpc_paranoia_check(info, tty->name, "mgslpc_break")) return -EINVAL; spin_lock_irqsave(&info->lock,flags); if (break_state == -1) set_reg_bits(info, CHA+DAFO, BIT6); else clear_reg_bits(info, CHA+DAFO, BIT6); spin_unlock_irqrestore(&info->lock,flags); return 0; } static int mgslpc_get_icount(struct tty_struct *tty, struct serial_icounter_struct *icount) { MGSLPC_INFO * info = (MGSLPC_INFO *)tty->driver_data; struct mgsl_icount cnow; /* kernel counter temps */ unsigned long flags; spin_lock_irqsave(&info->lock,flags); cnow = info->icount; spin_unlock_irqrestore(&info->lock,flags); icount->cts = cnow.cts; icount->dsr = cnow.dsr; icount->rng = cnow.rng; icount->dcd = cnow.dcd; icount->rx = cnow.rx; icount->tx = cnow.tx; icount->frame = cnow.frame; icount->overrun = cnow.overrun; icount->parity = cnow.parity; icount->brk = cnow.brk; icount->buf_overrun = cnow.buf_overrun; return 0; } /* Service an IOCTL request * * Arguments: * * tty pointer to tty instance data * cmd IOCTL command code * arg command argument/context * * Return Value: 0 if success, otherwise error code */ static int mgslpc_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg) { MGSLPC_INFO * info = (MGSLPC_INFO *)tty->driver_data; void __user *argp = (void __user *)arg; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):mgslpc_ioctl %s cmd=%08X\n", __FILE__,__LINE__, info->device_name, cmd ); if (mgslpc_paranoia_check(info, tty->name, "mgslpc_ioctl")) return -ENODEV; if ((cmd != TIOCGSERIAL) && (cmd != TIOCSSERIAL) && (cmd != TIOCMIWAIT)) { if (tty->flags & (1 << TTY_IO_ERROR)) return -EIO; } switch (cmd) { case MGSL_IOCGPARAMS: return get_params(info, argp); case MGSL_IOCSPARAMS: return set_params(info, argp, tty); case MGSL_IOCGTXIDLE: return get_txidle(info, argp); case MGSL_IOCSTXIDLE: return set_txidle(info, (int)arg); case MGSL_IOCGIF: return get_interface(info, argp); case MGSL_IOCSIF: return set_interface(info,(int)arg); case MGSL_IOCTXENABLE: return set_txenable(info,(int)arg, tty); case MGSL_IOCRXENABLE: return set_rxenable(info,(int)arg); case MGSL_IOCTXABORT: return tx_abort(info); case MGSL_IOCGSTATS: return get_stats(info, argp); case MGSL_IOCWAITEVENT: return wait_events(info, argp); case TIOCMIWAIT: return modem_input_wait(info,(int)arg); default: return -ENOIOCTLCMD; } return 0; } /* Set new termios settings * * Arguments: * * tty pointer to tty structure * termios pointer to buffer to hold returned old termios */ static void mgslpc_set_termios(struct tty_struct *tty, struct ktermios *old_termios) { MGSLPC_INFO *info = (MGSLPC_INFO *)tty->driver_data; unsigned long flags; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):mgslpc_set_termios %s\n", __FILE__,__LINE__, tty->driver->name ); /* just return if nothing has changed */ if ((tty->termios->c_cflag == old_termios->c_cflag) && (RELEVANT_IFLAG(tty->termios->c_iflag) == RELEVANT_IFLAG(old_termios->c_iflag))) return; mgslpc_change_params(info, tty); /* Handle transition to B0 status */ if (old_termios->c_cflag & CBAUD && !(tty->termios->c_cflag & CBAUD)) { info->serial_signals &= ~(SerialSignal_RTS + SerialSignal_DTR); spin_lock_irqsave(&info->lock,flags); set_signals(info); spin_unlock_irqrestore(&info->lock,flags); } /* Handle transition away from B0 status */ if (!(old_termios->c_cflag & CBAUD) && tty->termios->c_cflag & CBAUD) { info->serial_signals |= SerialSignal_DTR; if (!(tty->termios->c_cflag & CRTSCTS) || !test_bit(TTY_THROTTLED, &tty->flags)) { info->serial_signals |= SerialSignal_RTS; } spin_lock_irqsave(&info->lock,flags); set_signals(info); spin_unlock_irqrestore(&info->lock,flags); } /* Handle turning off CRTSCTS */ if (old_termios->c_cflag & CRTSCTS && !(tty->termios->c_cflag & CRTSCTS)) { tty->hw_stopped = 0; tx_release(tty); } } static void mgslpc_close(struct tty_struct *tty, struct file * filp) { MGSLPC_INFO * info = (MGSLPC_INFO *)tty->driver_data; struct tty_port *port = &info->port; if (mgslpc_paranoia_check(info, tty->name, "mgslpc_close")) return; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):mgslpc_close(%s) entry, count=%d\n", __FILE__,__LINE__, info->device_name, port->count); WARN_ON(!port->count); if (tty_port_close_start(port, tty, filp) == 0) goto cleanup; if (port->flags & ASYNC_INITIALIZED) mgslpc_wait_until_sent(tty, info->timeout); mgslpc_flush_buffer(tty); tty_ldisc_flush(tty); shutdown(info, tty); tty_port_close_end(port, tty); tty_port_tty_set(port, NULL); cleanup: if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):mgslpc_close(%s) exit, count=%d\n", __FILE__,__LINE__, tty->driver->name, port->count); } /* Wait until the transmitter is empty. */ static void mgslpc_wait_until_sent(struct tty_struct *tty, int timeout) { MGSLPC_INFO * info = (MGSLPC_INFO *)tty->driver_data; unsigned long orig_jiffies, char_time; if (!info ) return; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):mgslpc_wait_until_sent(%s) entry\n", __FILE__,__LINE__, info->device_name ); if (mgslpc_paranoia_check(info, tty->name, "mgslpc_wait_until_sent")) return; if (!(info->port.flags & ASYNC_INITIALIZED)) goto exit; orig_jiffies = jiffies; /* Set check interval to 1/5 of estimated time to * send a character, and make it at least 1. The check * interval should also be less than the timeout. * Note: use tight timings here to satisfy the NIST-PCTS. */ if ( info->params.data_rate ) { char_time = info->timeout/(32 * 5); if (!char_time) char_time++; } else char_time = 1; if (timeout) char_time = min_t(unsigned long, char_time, timeout); if (info->params.mode == MGSL_MODE_HDLC) { while (info->tx_active) { msleep_interruptible(jiffies_to_msecs(char_time)); if (signal_pending(current)) break; if (timeout && time_after(jiffies, orig_jiffies + timeout)) break; } } else { while ((info->tx_count || info->tx_active) && info->tx_enabled) { msleep_interruptible(jiffies_to_msecs(char_time)); if (signal_pending(current)) break; if (timeout && time_after(jiffies, orig_jiffies + timeout)) break; } } exit: if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):mgslpc_wait_until_sent(%s) exit\n", __FILE__,__LINE__, info->device_name ); } /* Called by tty_hangup() when a hangup is signaled. * This is the same as closing all open files for the port. */ static void mgslpc_hangup(struct tty_struct *tty) { MGSLPC_INFO * info = (MGSLPC_INFO *)tty->driver_data; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):mgslpc_hangup(%s)\n", __FILE__,__LINE__, info->device_name ); if (mgslpc_paranoia_check(info, tty->name, "mgslpc_hangup")) return; mgslpc_flush_buffer(tty); shutdown(info, tty); tty_port_hangup(&info->port); } static int carrier_raised(struct tty_port *port) { MGSLPC_INFO *info = container_of(port, MGSLPC_INFO, port); unsigned long flags; spin_lock_irqsave(&info->lock,flags); get_signals(info); spin_unlock_irqrestore(&info->lock,flags); if (info->serial_signals & SerialSignal_DCD) return 1; return 0; } static void dtr_rts(struct tty_port *port, int onoff) { MGSLPC_INFO *info = container_of(port, MGSLPC_INFO, port); unsigned long flags; spin_lock_irqsave(&info->lock,flags); if (onoff) info->serial_signals |= SerialSignal_RTS + SerialSignal_DTR; else info->serial_signals &= ~SerialSignal_RTS + SerialSignal_DTR; set_signals(info); spin_unlock_irqrestore(&info->lock,flags); } static int mgslpc_open(struct tty_struct *tty, struct file * filp) { MGSLPC_INFO *info; struct tty_port *port; int retval, line; unsigned long flags; /* verify range of specified line number */ line = tty->index; if (line >= mgslpc_device_count) { printk("%s(%d):mgslpc_open with invalid line #%d.\n", __FILE__,__LINE__,line); return -ENODEV; } /* find the info structure for the specified line */ info = mgslpc_device_list; while(info && info->line != line) info = info->next_device; if (mgslpc_paranoia_check(info, tty->name, "mgslpc_open")) return -ENODEV; port = &info->port; tty->driver_data = info; tty_port_tty_set(port, tty); if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):mgslpc_open(%s), old ref count = %d\n", __FILE__,__LINE__,tty->driver->name, port->count); /* If port is closing, signal caller to try again */ if (tty_hung_up_p(filp) || port->flags & ASYNC_CLOSING){ if (port->flags & ASYNC_CLOSING) interruptible_sleep_on(&port->close_wait); retval = ((port->flags & ASYNC_HUP_NOTIFY) ? -EAGAIN : -ERESTARTSYS); goto cleanup; } tty->low_latency = (port->flags & ASYNC_LOW_LATENCY) ? 1 : 0; spin_lock_irqsave(&info->netlock, flags); if (info->netcount) { retval = -EBUSY; spin_unlock_irqrestore(&info->netlock, flags); goto cleanup; } spin_lock(&port->lock); port->count++; spin_unlock(&port->lock); spin_unlock_irqrestore(&info->netlock, flags); if (port->count == 1) { /* 1st open on this device, init hardware */ retval = startup(info, tty); if (retval < 0) goto cleanup; } retval = tty_port_block_til_ready(&info->port, tty, filp); if (retval) { if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):block_til_ready(%s) returned %d\n", __FILE__,__LINE__, info->device_name, retval); goto cleanup; } if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):mgslpc_open(%s) success\n", __FILE__,__LINE__, info->device_name); retval = 0; cleanup: return retval; } /* * /proc fs routines.... */ static inline void line_info(struct seq_file *m, MGSLPC_INFO *info) { char stat_buf[30]; unsigned long flags; seq_printf(m, "%s:io:%04X irq:%d", info->device_name, info->io_base, info->irq_level); /* output current serial signal states */ spin_lock_irqsave(&info->lock,flags); get_signals(info); spin_unlock_irqrestore(&info->lock,flags); stat_buf[0] = 0; stat_buf[1] = 0; if (info->serial_signals & SerialSignal_RTS) strcat(stat_buf, "|RTS"); if (info->serial_signals & SerialSignal_CTS) strcat(stat_buf, "|CTS"); if (info->serial_signals & SerialSignal_DTR) strcat(stat_buf, "|DTR"); if (info->serial_signals & SerialSignal_DSR) strcat(stat_buf, "|DSR"); if (info->serial_signals & SerialSignal_DCD) strcat(stat_buf, "|CD"); if (info->serial_signals & SerialSignal_RI) strcat(stat_buf, "|RI"); if (info->params.mode == MGSL_MODE_HDLC) { seq_printf(m, " HDLC txok:%d rxok:%d", info->icount.txok, info->icount.rxok); if (info->icount.txunder) seq_printf(m, " txunder:%d", info->icount.txunder); if (info->icount.txabort) seq_printf(m, " txabort:%d", info->icount.txabort); if (info->icount.rxshort) seq_printf(m, " rxshort:%d", info->icount.rxshort); if (info->icount.rxlong) seq_printf(m, " rxlong:%d", info->icount.rxlong); if (info->icount.rxover) seq_printf(m, " rxover:%d", info->icount.rxover); if (info->icount.rxcrc) seq_printf(m, " rxcrc:%d", info->icount.rxcrc); } else { seq_printf(m, " ASYNC tx:%d rx:%d", info->icount.tx, info->icount.rx); if (info->icount.frame) seq_printf(m, " fe:%d", info->icount.frame); if (info->icount.parity) seq_printf(m, " pe:%d", info->icount.parity); if (info->icount.brk) seq_printf(m, " brk:%d", info->icount.brk); if (info->icount.overrun) seq_printf(m, " oe:%d", info->icount.overrun); } /* Append serial signal status to end */ seq_printf(m, " %s\n", stat_buf+1); seq_printf(m, "txactive=%d bh_req=%d bh_run=%d pending_bh=%x\n", info->tx_active,info->bh_requested,info->bh_running, info->pending_bh); } /* Called to print information about devices */ static int mgslpc_proc_show(struct seq_file *m, void *v) { MGSLPC_INFO *info; seq_printf(m, "synclink driver:%s\n", driver_version); info = mgslpc_device_list; while( info ) { line_info(m, info); info = info->next_device; } return 0; } static int mgslpc_proc_open(struct inode *inode, struct file *file) { return single_open(file, mgslpc_proc_show, NULL); } static const struct file_operations mgslpc_proc_fops = { .owner = THIS_MODULE, .open = mgslpc_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static int rx_alloc_buffers(MGSLPC_INFO *info) { /* each buffer has header and data */ info->rx_buf_size = sizeof(RXBUF) + info->max_frame_size; /* calculate total allocation size for 8 buffers */ info->rx_buf_total_size = info->rx_buf_size * 8; /* limit total allocated memory */ if (info->rx_buf_total_size > 0x10000) info->rx_buf_total_size = 0x10000; /* calculate number of buffers */ info->rx_buf_count = info->rx_buf_total_size / info->rx_buf_size; info->rx_buf = kmalloc(info->rx_buf_total_size, GFP_KERNEL); if (info->rx_buf == NULL) return -ENOMEM; rx_reset_buffers(info); return 0; } static void rx_free_buffers(MGSLPC_INFO *info) { kfree(info->rx_buf); info->rx_buf = NULL; } static int claim_resources(MGSLPC_INFO *info) { if (rx_alloc_buffers(info) < 0 ) { printk( "Can't allocate rx buffer %s\n", info->device_name); release_resources(info); return -ENODEV; } return 0; } static void release_resources(MGSLPC_INFO *info) { if (debug_level >= DEBUG_LEVEL_INFO) printk("release_resources(%s)\n", info->device_name); rx_free_buffers(info); } /* Add the specified device instance data structure to the * global linked list of devices and increment the device count. * * Arguments: info pointer to device instance data */ static void mgslpc_add_device(MGSLPC_INFO *info) { info->next_device = NULL; info->line = mgslpc_device_count; sprintf(info->device_name,"ttySLP%d",info->line); if (info->line < MAX_DEVICE_COUNT) { if (maxframe[info->line]) info->max_frame_size = maxframe[info->line]; } mgslpc_device_count++; if (!mgslpc_device_list) mgslpc_device_list = info; else { MGSLPC_INFO *current_dev = mgslpc_device_list; while( current_dev->next_device ) current_dev = current_dev->next_device; current_dev->next_device = info; } if (info->max_frame_size < 4096) info->max_frame_size = 4096; else if (info->max_frame_size > 65535) info->max_frame_size = 65535; printk( "SyncLink PC Card %s:IO=%04X IRQ=%d\n", info->device_name, info->io_base, info->irq_level); #if SYNCLINK_GENERIC_HDLC hdlcdev_init(info); #endif } static void mgslpc_remove_device(MGSLPC_INFO *remove_info) { MGSLPC_INFO *info = mgslpc_device_list; MGSLPC_INFO *last = NULL; while(info) { if (info == remove_info) { if (last) last->next_device = info->next_device; else mgslpc_device_list = info->next_device; #if SYNCLINK_GENERIC_HDLC hdlcdev_exit(info); #endif release_resources(info); kfree(info); mgslpc_device_count--; return; } last = info; info = info->next_device; } } static const struct pcmcia_device_id mgslpc_ids[] = { PCMCIA_DEVICE_MANF_CARD(0x02c5, 0x0050), PCMCIA_DEVICE_NULL }; MODULE_DEVICE_TABLE(pcmcia, mgslpc_ids); static struct pcmcia_driver mgslpc_driver = { .owner = THIS_MODULE, .name = "synclink_cs", .probe = mgslpc_probe, .remove = mgslpc_detach, .id_table = mgslpc_ids, .suspend = mgslpc_suspend, .resume = mgslpc_resume, }; static const struct tty_operations mgslpc_ops = { .open = mgslpc_open, .close = mgslpc_close, .write = mgslpc_write, .put_char = mgslpc_put_char, .flush_chars = mgslpc_flush_chars, .write_room = mgslpc_write_room, .chars_in_buffer = mgslpc_chars_in_buffer, .flush_buffer = mgslpc_flush_buffer, .ioctl = mgslpc_ioctl, .throttle = mgslpc_throttle, .unthrottle = mgslpc_unthrottle, .send_xchar = mgslpc_send_xchar, .break_ctl = mgslpc_break, .wait_until_sent = mgslpc_wait_until_sent, .set_termios = mgslpc_set_termios, .stop = tx_pause, .start = tx_release, .hangup = mgslpc_hangup, .tiocmget = tiocmget, .tiocmset = tiocmset, .get_icount = mgslpc_get_icount, .proc_fops = &mgslpc_proc_fops, }; static void synclink_cs_cleanup(void) { int rc; while(mgslpc_device_list) mgslpc_remove_device(mgslpc_device_list); if (serial_driver) { if ((rc = tty_unregister_driver(serial_driver))) printk("%s(%d) failed to unregister tty driver err=%d\n", __FILE__,__LINE__,rc); put_tty_driver(serial_driver); } pcmcia_unregister_driver(&mgslpc_driver); } static int __init synclink_cs_init(void) { int rc; if (break_on_load) { mgslpc_get_text_ptr(); BREAKPOINT(); } if ((rc = pcmcia_register_driver(&mgslpc_driver)) < 0) return rc; serial_driver = alloc_tty_driver(MAX_DEVICE_COUNT); if (!serial_driver) { rc = -ENOMEM; goto error; } /* Initialize the tty_driver structure */ serial_driver->driver_name = "synclink_cs"; serial_driver->name = "ttySLP"; serial_driver->major = ttymajor; serial_driver->minor_start = 64; serial_driver->type = TTY_DRIVER_TYPE_SERIAL; serial_driver->subtype = SERIAL_TYPE_NORMAL; serial_driver->init_termios = tty_std_termios; serial_driver->init_termios.c_cflag = B9600 | CS8 | CREAD | HUPCL | CLOCAL; serial_driver->flags = TTY_DRIVER_REAL_RAW; tty_set_operations(serial_driver, &mgslpc_ops); if ((rc = tty_register_driver(serial_driver)) < 0) { printk("%s(%d):Couldn't register serial driver\n", __FILE__,__LINE__); put_tty_driver(serial_driver); serial_driver = NULL; goto error; } printk("%s %s, tty major#%d\n", driver_name, driver_version, serial_driver->major); return 0; error: synclink_cs_cleanup(); return rc; } static void __exit synclink_cs_exit(void) { synclink_cs_cleanup(); } module_init(synclink_cs_init); module_exit(synclink_cs_exit); static void mgslpc_set_rate(MGSLPC_INFO *info, unsigned char channel, unsigned int rate) { unsigned int M, N; unsigned char val; /* note:standard BRG mode is broken in V3.2 chip * so enhanced mode is always used */ if (rate) { N = 3686400 / rate; if (!N) N = 1; N >>= 1; for (M = 1; N > 64 && M < 16; M++) N >>= 1; N--; /* BGR[5..0] = N * BGR[9..6] = M * BGR[7..0] contained in BGR register * BGR[9..8] contained in CCR2[7..6] * divisor = (N+1)*2^M * * Note: M *must* not be zero (causes asymetric duty cycle) */ write_reg(info, (unsigned char) (channel + BGR), (unsigned char) ((M << 6) + N)); val = read_reg(info, (unsigned char) (channel + CCR2)) & 0x3f; val |= ((M << 4) & 0xc0); write_reg(info, (unsigned char) (channel + CCR2), val); } } /* Enabled the AUX clock output at the specified frequency. */ static void enable_auxclk(MGSLPC_INFO *info) { unsigned char val; /* MODE * * 07..06 MDS[1..0] 10 = transparent HDLC mode * 05 ADM Address Mode, 0 = no addr recognition * 04 TMD Timer Mode, 0 = external * 03 RAC Receiver Active, 0 = inactive * 02 RTS 0=RTS active during xmit, 1=RTS always active * 01 TRS Timer Resolution, 1=512 * 00 TLP Test Loop, 0 = no loop * * 1000 0010 */ val = 0x82; /* channel B RTS is used to enable AUXCLK driver on SP505 */ if (info->params.mode == MGSL_MODE_HDLC && info->params.clock_speed) val |= BIT2; write_reg(info, CHB + MODE, val); /* CCR0 * * 07 PU Power Up, 1=active, 0=power down * 06 MCE Master Clock Enable, 1=enabled * 05 Reserved, 0 * 04..02 SC[2..0] Encoding * 01..00 SM[1..0] Serial Mode, 00=HDLC * * 11000000 */ write_reg(info, CHB + CCR0, 0xc0); /* CCR1 * * 07 SFLG Shared Flag, 0 = disable shared flags * 06 GALP Go Active On Loop, 0 = not used * 05 GLP Go On Loop, 0 = not used * 04 ODS Output Driver Select, 1=TxD is push-pull output * 03 ITF Interframe Time Fill, 0=mark, 1=flag * 02..00 CM[2..0] Clock Mode * * 0001 0111 */ write_reg(info, CHB + CCR1, 0x17); /* CCR2 (Channel B) * * 07..06 BGR[9..8] Baud rate bits 9..8 * 05 BDF Baud rate divisor factor, 0=1, 1=BGR value * 04 SSEL Clock source select, 1=submode b * 03 TOE 0=TxCLK is input, 1=TxCLK is output * 02 RWX Read/Write Exchange 0=disabled * 01 C32, CRC select, 0=CRC-16, 1=CRC-32 * 00 DIV, data inversion 0=disabled, 1=enabled * * 0011 1000 */ if (info->params.mode == MGSL_MODE_HDLC && info->params.clock_speed) write_reg(info, CHB + CCR2, 0x38); else write_reg(info, CHB + CCR2, 0x30); /* CCR4 * * 07 MCK4 Master Clock Divide by 4, 1=enabled * 06 EBRG Enhanced Baud Rate Generator Mode, 1=enabled * 05 TST1 Test Pin, 0=normal operation * 04 ICD Ivert Carrier Detect, 1=enabled (active low) * 03..02 Reserved, must be 0 * 01..00 RFT[1..0] RxFIFO Threshold 00=32 bytes * * 0101 0000 */ write_reg(info, CHB + CCR4, 0x50); /* if auxclk not enabled, set internal BRG so * CTS transitions can be detected (requires TxC) */ if (info->params.mode == MGSL_MODE_HDLC && info->params.clock_speed) mgslpc_set_rate(info, CHB, info->params.clock_speed); else mgslpc_set_rate(info, CHB, 921600); } static void loopback_enable(MGSLPC_INFO *info) { unsigned char val; /* CCR1:02..00 CM[2..0] Clock Mode = 111 (clock mode 7) */ val = read_reg(info, CHA + CCR1) | (BIT2 + BIT1 + BIT0); write_reg(info, CHA + CCR1, val); /* CCR2:04 SSEL Clock source select, 1=submode b */ val = read_reg(info, CHA + CCR2) | (BIT4 + BIT5); write_reg(info, CHA + CCR2, val); /* set LinkSpeed if available, otherwise default to 2Mbps */ if (info->params.clock_speed) mgslpc_set_rate(info, CHA, info->params.clock_speed); else mgslpc_set_rate(info, CHA, 1843200); /* MODE:00 TLP Test Loop, 1=loopback enabled */ val = read_reg(info, CHA + MODE) | BIT0; write_reg(info, CHA + MODE, val); } static void hdlc_mode(MGSLPC_INFO *info) { unsigned char val; unsigned char clkmode, clksubmode; /* disable all interrupts */ irq_disable(info, CHA, 0xffff); irq_disable(info, CHB, 0xffff); port_irq_disable(info, 0xff); /* assume clock mode 0a, rcv=RxC xmt=TxC */ clkmode = clksubmode = 0; if (info->params.flags & HDLC_FLAG_RXC_DPLL && info->params.flags & HDLC_FLAG_TXC_DPLL) { /* clock mode 7a, rcv = DPLL, xmt = DPLL */ clkmode = 7; } else if (info->params.flags & HDLC_FLAG_RXC_BRG && info->params.flags & HDLC_FLAG_TXC_BRG) { /* clock mode 7b, rcv = BRG, xmt = BRG */ clkmode = 7; clksubmode = 1; } else if (info->params.flags & HDLC_FLAG_RXC_DPLL) { if (info->params.flags & HDLC_FLAG_TXC_BRG) { /* clock mode 6b, rcv = DPLL, xmt = BRG/16 */ clkmode = 6; clksubmode = 1; } else { /* clock mode 6a, rcv = DPLL, xmt = TxC */ clkmode = 6; } } else if (info->params.flags & HDLC_FLAG_TXC_BRG) { /* clock mode 0b, rcv = RxC, xmt = BRG */ clksubmode = 1; } /* MODE * * 07..06 MDS[1..0] 10 = transparent HDLC mode * 05 ADM Address Mode, 0 = no addr recognition * 04 TMD Timer Mode, 0 = external * 03 RAC Receiver Active, 0 = inactive * 02 RTS 0=RTS active during xmit, 1=RTS always active * 01 TRS Timer Resolution, 1=512 * 00 TLP Test Loop, 0 = no loop * * 1000 0010 */ val = 0x82; if (info->params.loopback) val |= BIT0; /* preserve RTS state */ if (info->serial_signals & SerialSignal_RTS) val |= BIT2; write_reg(info, CHA + MODE, val); /* CCR0 * * 07 PU Power Up, 1=active, 0=power down * 06 MCE Master Clock Enable, 1=enabled * 05 Reserved, 0 * 04..02 SC[2..0] Encoding * 01..00 SM[1..0] Serial Mode, 00=HDLC * * 11000000 */ val = 0xc0; switch (info->params.encoding) { case HDLC_ENCODING_NRZI: val |= BIT3; break; case HDLC_ENCODING_BIPHASE_SPACE: val |= BIT4; break; // FM0 case HDLC_ENCODING_BIPHASE_MARK: val |= BIT4 + BIT2; break; // FM1 case HDLC_ENCODING_BIPHASE_LEVEL: val |= BIT4 + BIT3; break; // Manchester } write_reg(info, CHA + CCR0, val); /* CCR1 * * 07 SFLG Shared Flag, 0 = disable shared flags * 06 GALP Go Active On Loop, 0 = not used * 05 GLP Go On Loop, 0 = not used * 04 ODS Output Driver Select, 1=TxD is push-pull output * 03 ITF Interframe Time Fill, 0=mark, 1=flag * 02..00 CM[2..0] Clock Mode * * 0001 0000 */ val = 0x10 + clkmode; write_reg(info, CHA + CCR1, val); /* CCR2 * * 07..06 BGR[9..8] Baud rate bits 9..8 * 05 BDF Baud rate divisor factor, 0=1, 1=BGR value * 04 SSEL Clock source select, 1=submode b * 03 TOE 0=TxCLK is input, 0=TxCLK is input * 02 RWX Read/Write Exchange 0=disabled * 01 C32, CRC select, 0=CRC-16, 1=CRC-32 * 00 DIV, data inversion 0=disabled, 1=enabled * * 0000 0000 */ val = 0x00; if (clkmode == 2 || clkmode == 3 || clkmode == 6 || clkmode == 7 || (clkmode == 0 && clksubmode == 1)) val |= BIT5; if (clksubmode) val |= BIT4; if (info->params.crc_type == HDLC_CRC_32_CCITT) val |= BIT1; if (info->params.encoding == HDLC_ENCODING_NRZB) val |= BIT0; write_reg(info, CHA + CCR2, val); /* CCR3 * * 07..06 PRE[1..0] Preamble count 00=1, 01=2, 10=4, 11=8 * 05 EPT Enable preamble transmission, 1=enabled * 04 RADD Receive address pushed to FIFO, 0=disabled * 03 CRL CRC Reset Level, 0=FFFF * 02 RCRC Rx CRC 0=On 1=Off * 01 TCRC Tx CRC 0=On 1=Off * 00 PSD DPLL Phase Shift Disable * * 0000 0000 */ val = 0x00; if (info->params.crc_type == HDLC_CRC_NONE) val |= BIT2 + BIT1; if (info->params.preamble != HDLC_PREAMBLE_PATTERN_NONE) val |= BIT5; switch (info->params.preamble_length) { case HDLC_PREAMBLE_LENGTH_16BITS: val |= BIT6; break; case HDLC_PREAMBLE_LENGTH_32BITS: val |= BIT6; break; case HDLC_PREAMBLE_LENGTH_64BITS: val |= BIT7 + BIT6; break; } write_reg(info, CHA + CCR3, val); /* PRE - Preamble pattern */ val = 0; switch (info->params.preamble) { case HDLC_PREAMBLE_PATTERN_FLAGS: val = 0x7e; break; case HDLC_PREAMBLE_PATTERN_10: val = 0xaa; break; case HDLC_PREAMBLE_PATTERN_01: val = 0x55; break; case HDLC_PREAMBLE_PATTERN_ONES: val = 0xff; break; } write_reg(info, CHA + PRE, val); /* CCR4 * * 07 MCK4 Master Clock Divide by 4, 1=enabled * 06 EBRG Enhanced Baud Rate Generator Mode, 1=enabled * 05 TST1 Test Pin, 0=normal operation * 04 ICD Ivert Carrier Detect, 1=enabled (active low) * 03..02 Reserved, must be 0 * 01..00 RFT[1..0] RxFIFO Threshold 00=32 bytes * * 0101 0000 */ val = 0x50; write_reg(info, CHA + CCR4, val); if (info->params.flags & HDLC_FLAG_RXC_DPLL) mgslpc_set_rate(info, CHA, info->params.clock_speed * 16); else mgslpc_set_rate(info, CHA, info->params.clock_speed); /* RLCR Receive length check register * * 7 1=enable receive length check * 6..0 Max frame length = (RL + 1) * 32 */ write_reg(info, CHA + RLCR, 0); /* XBCH Transmit Byte Count High * * 07 DMA mode, 0 = interrupt driven * 06 NRM, 0=ABM (ignored) * 05 CAS Carrier Auto Start * 04 XC Transmit Continuously (ignored) * 03..00 XBC[10..8] Transmit byte count bits 10..8 * * 0000 0000 */ val = 0x00; if (info->params.flags & HDLC_FLAG_AUTO_DCD) val |= BIT5; write_reg(info, CHA + XBCH, val); enable_auxclk(info); if (info->params.loopback || info->testing_irq) loopback_enable(info); if (info->params.flags & HDLC_FLAG_AUTO_CTS) { irq_enable(info, CHB, IRQ_CTS); /* PVR[3] 1=AUTO CTS active */ set_reg_bits(info, CHA + PVR, BIT3); } else clear_reg_bits(info, CHA + PVR, BIT3); irq_enable(info, CHA, IRQ_RXEOM + IRQ_RXFIFO + IRQ_ALLSENT + IRQ_UNDERRUN + IRQ_TXFIFO); issue_command(info, CHA, CMD_TXRESET + CMD_RXRESET); wait_command_complete(info, CHA); read_reg16(info, CHA + ISR); /* clear pending IRQs */ /* Master clock mode enabled above to allow reset commands * to complete even if no data clocks are present. * * Disable master clock mode for normal communications because * V3.2 of the ESCC2 has a bug that prevents the transmit all sent * IRQ when in master clock mode. * * Leave master clock mode enabled for IRQ test because the * timer IRQ used by the test can only happen in master clock mode. */ if (!info->testing_irq) clear_reg_bits(info, CHA + CCR0, BIT6); tx_set_idle(info); tx_stop(info); rx_stop(info); } static void rx_stop(MGSLPC_INFO *info) { if (debug_level >= DEBUG_LEVEL_ISR) printk("%s(%d):rx_stop(%s)\n", __FILE__,__LINE__, info->device_name ); /* MODE:03 RAC Receiver Active, 0=inactive */ clear_reg_bits(info, CHA + MODE, BIT3); info->rx_enabled = false; info->rx_overflow = false; } static void rx_start(MGSLPC_INFO *info) { if (debug_level >= DEBUG_LEVEL_ISR) printk("%s(%d):rx_start(%s)\n", __FILE__,__LINE__, info->device_name ); rx_reset_buffers(info); info->rx_enabled = false; info->rx_overflow = false; /* MODE:03 RAC Receiver Active, 1=active */ set_reg_bits(info, CHA + MODE, BIT3); info->rx_enabled = true; } static void tx_start(MGSLPC_INFO *info, struct tty_struct *tty) { if (debug_level >= DEBUG_LEVEL_ISR) printk("%s(%d):tx_start(%s)\n", __FILE__,__LINE__, info->device_name ); if (info->tx_count) { /* If auto RTS enabled and RTS is inactive, then assert */ /* RTS and set a flag indicating that the driver should */ /* negate RTS when the transmission completes. */ info->drop_rts_on_tx_done = false; if (info->params.flags & HDLC_FLAG_AUTO_RTS) { get_signals(info); if (!(info->serial_signals & SerialSignal_RTS)) { info->serial_signals |= SerialSignal_RTS; set_signals(info); info->drop_rts_on_tx_done = true; } } if (info->params.mode == MGSL_MODE_ASYNC) { if (!info->tx_active) { info->tx_active = true; tx_ready(info, tty); } } else { info->tx_active = true; tx_ready(info, tty); mod_timer(&info->tx_timer, jiffies + msecs_to_jiffies(5000)); } } if (!info->tx_enabled) info->tx_enabled = true; } static void tx_stop(MGSLPC_INFO *info) { if (debug_level >= DEBUG_LEVEL_ISR) printk("%s(%d):tx_stop(%s)\n", __FILE__,__LINE__, info->device_name ); del_timer(&info->tx_timer); info->tx_enabled = false; info->tx_active = false; } /* Reset the adapter to a known state and prepare it for further use. */ static void reset_device(MGSLPC_INFO *info) { /* power up both channels (set BIT7) */ write_reg(info, CHA + CCR0, 0x80); write_reg(info, CHB + CCR0, 0x80); write_reg(info, CHA + MODE, 0); write_reg(info, CHB + MODE, 0); /* disable all interrupts */ irq_disable(info, CHA, 0xffff); irq_disable(info, CHB, 0xffff); port_irq_disable(info, 0xff); /* PCR Port Configuration Register * * 07..04 DEC[3..0] Serial I/F select outputs * 03 output, 1=AUTO CTS control enabled * 02 RI Ring Indicator input 0=active * 01 DSR input 0=active * 00 DTR output 0=active * * 0000 0110 */ write_reg(info, PCR, 0x06); /* PVR Port Value Register * * 07..04 DEC[3..0] Serial I/F select (0000=disabled) * 03 AUTO CTS output 1=enabled * 02 RI Ring Indicator input * 01 DSR input * 00 DTR output (1=inactive) * * 0000 0001 */ // write_reg(info, PVR, PVR_DTR); /* IPC Interrupt Port Configuration * * 07 VIS 1=Masked interrupts visible * 06..05 Reserved, 0 * 04..03 SLA Slave address, 00 ignored * 02 CASM Cascading Mode, 1=daisy chain * 01..00 IC[1..0] Interrupt Config, 01=push-pull output, active low * * 0000 0101 */ write_reg(info, IPC, 0x05); } static void async_mode(MGSLPC_INFO *info) { unsigned char val; /* disable all interrupts */ irq_disable(info, CHA, 0xffff); irq_disable(info, CHB, 0xffff); port_irq_disable(info, 0xff); /* MODE * * 07 Reserved, 0 * 06 FRTS RTS State, 0=active * 05 FCTS Flow Control on CTS * 04 FLON Flow Control Enable * 03 RAC Receiver Active, 0 = inactive * 02 RTS 0=Auto RTS, 1=manual RTS * 01 TRS Timer Resolution, 1=512 * 00 TLP Test Loop, 0 = no loop * * 0000 0110 */ val = 0x06; if (info->params.loopback) val |= BIT0; /* preserve RTS state */ if (!(info->serial_signals & SerialSignal_RTS)) val |= BIT6; write_reg(info, CHA + MODE, val); /* CCR0 * * 07 PU Power Up, 1=active, 0=power down * 06 MCE Master Clock Enable, 1=enabled * 05 Reserved, 0 * 04..02 SC[2..0] Encoding, 000=NRZ * 01..00 SM[1..0] Serial Mode, 11=Async * * 1000 0011 */ write_reg(info, CHA + CCR0, 0x83); /* CCR1 * * 07..05 Reserved, 0 * 04 ODS Output Driver Select, 1=TxD is push-pull output * 03 BCR Bit Clock Rate, 1=16x * 02..00 CM[2..0] Clock Mode, 111=BRG * * 0001 1111 */ write_reg(info, CHA + CCR1, 0x1f); /* CCR2 (channel A) * * 07..06 BGR[9..8] Baud rate bits 9..8 * 05 BDF Baud rate divisor factor, 0=1, 1=BGR value * 04 SSEL Clock source select, 1=submode b * 03 TOE 0=TxCLK is input, 0=TxCLK is input * 02 RWX Read/Write Exchange 0=disabled * 01 Reserved, 0 * 00 DIV, data inversion 0=disabled, 1=enabled * * 0001 0000 */ write_reg(info, CHA + CCR2, 0x10); /* CCR3 * * 07..01 Reserved, 0 * 00 PSD DPLL Phase Shift Disable * * 0000 0000 */ write_reg(info, CHA + CCR3, 0); /* CCR4 * * 07 MCK4 Master Clock Divide by 4, 1=enabled * 06 EBRG Enhanced Baud Rate Generator Mode, 1=enabled * 05 TST1 Test Pin, 0=normal operation * 04 ICD Ivert Carrier Detect, 1=enabled (active low) * 03..00 Reserved, must be 0 * * 0101 0000 */ write_reg(info, CHA + CCR4, 0x50); mgslpc_set_rate(info, CHA, info->params.data_rate * 16); /* DAFO Data Format * * 07 Reserved, 0 * 06 XBRK transmit break, 0=normal operation * 05 Stop bits (0=1, 1=2) * 04..03 PAR[1..0] Parity (01=odd, 10=even) * 02 PAREN Parity Enable * 01..00 CHL[1..0] Character Length (00=8, 01=7) * */ val = 0x00; if (info->params.data_bits != 8) val |= BIT0; /* 7 bits */ if (info->params.stop_bits != 1) val |= BIT5; if (info->params.parity != ASYNC_PARITY_NONE) { val |= BIT2; /* Parity enable */ if (info->params.parity == ASYNC_PARITY_ODD) val |= BIT3; else val |= BIT4; } write_reg(info, CHA + DAFO, val); /* RFC Rx FIFO Control * * 07 Reserved, 0 * 06 DPS, 1=parity bit not stored in data byte * 05 DXS, 0=all data stored in FIFO (including XON/XOFF) * 04 RFDF Rx FIFO Data Format, 1=status byte stored in FIFO * 03..02 RFTH[1..0], rx threshold, 11=16 status + 16 data byte * 01 Reserved, 0 * 00 TCDE Terminate Char Detect Enable, 0=disabled * * 0101 1100 */ write_reg(info, CHA + RFC, 0x5c); /* RLCR Receive length check register * * Max frame length = (RL + 1) * 32 */ write_reg(info, CHA + RLCR, 0); /* XBCH Transmit Byte Count High * * 07 DMA mode, 0 = interrupt driven * 06 NRM, 0=ABM (ignored) * 05 CAS Carrier Auto Start * 04 XC Transmit Continuously (ignored) * 03..00 XBC[10..8] Transmit byte count bits 10..8 * * 0000 0000 */ val = 0x00; if (info->params.flags & HDLC_FLAG_AUTO_DCD) val |= BIT5; write_reg(info, CHA + XBCH, val); if (info->params.flags & HDLC_FLAG_AUTO_CTS) irq_enable(info, CHA, IRQ_CTS); /* MODE:03 RAC Receiver Active, 1=active */ set_reg_bits(info, CHA + MODE, BIT3); enable_auxclk(info); if (info->params.flags & HDLC_FLAG_AUTO_CTS) { irq_enable(info, CHB, IRQ_CTS); /* PVR[3] 1=AUTO CTS active */ set_reg_bits(info, CHA + PVR, BIT3); } else clear_reg_bits(info, CHA + PVR, BIT3); irq_enable(info, CHA, IRQ_RXEOM + IRQ_RXFIFO + IRQ_BREAK_ON + IRQ_RXTIME + IRQ_ALLSENT + IRQ_TXFIFO); issue_command(info, CHA, CMD_TXRESET + CMD_RXRESET); wait_command_complete(info, CHA); read_reg16(info, CHA + ISR); /* clear pending IRQs */ } /* Set the HDLC idle mode for the transmitter. */ static void tx_set_idle(MGSLPC_INFO *info) { /* Note: ESCC2 only supports flags and one idle modes */ if (info->idle_mode == HDLC_TXIDLE_FLAGS) set_reg_bits(info, CHA + CCR1, BIT3); else clear_reg_bits(info, CHA + CCR1, BIT3); } /* get state of the V24 status (input) signals. */ static void get_signals(MGSLPC_INFO *info) { unsigned char status = 0; /* preserve DTR and RTS */ info->serial_signals &= SerialSignal_DTR + SerialSignal_RTS; if (read_reg(info, CHB + VSTR) & BIT7) info->serial_signals |= SerialSignal_DCD; if (read_reg(info, CHB + STAR) & BIT1) info->serial_signals |= SerialSignal_CTS; status = read_reg(info, CHA + PVR); if (!(status & PVR_RI)) info->serial_signals |= SerialSignal_RI; if (!(status & PVR_DSR)) info->serial_signals |= SerialSignal_DSR; } /* Set the state of DTR and RTS based on contents of * serial_signals member of device extension. */ static void set_signals(MGSLPC_INFO *info) { unsigned char val; val = read_reg(info, CHA + MODE); if (info->params.mode == MGSL_MODE_ASYNC) { if (info->serial_signals & SerialSignal_RTS) val &= ~BIT6; else val |= BIT6; } else { if (info->serial_signals & SerialSignal_RTS) val |= BIT2; else val &= ~BIT2; } write_reg(info, CHA + MODE, val); if (info->serial_signals & SerialSignal_DTR) clear_reg_bits(info, CHA + PVR, PVR_DTR); else set_reg_bits(info, CHA + PVR, PVR_DTR); } static void rx_reset_buffers(MGSLPC_INFO *info) { RXBUF *buf; int i; info->rx_put = 0; info->rx_get = 0; info->rx_frame_count = 0; for (i=0 ; i < info->rx_buf_count ; i++) { buf = (RXBUF*)(info->rx_buf + (i * info->rx_buf_size)); buf->status = buf->count = 0; } } /* Attempt to return a received HDLC frame * Only frames received without errors are returned. * * Returns true if frame returned, otherwise false */ static bool rx_get_frame(MGSLPC_INFO *info, struct tty_struct *tty) { unsigned short status; RXBUF *buf; unsigned int framesize = 0; unsigned long flags; bool return_frame = false; if (info->rx_frame_count == 0) return false; buf = (RXBUF*)(info->rx_buf + (info->rx_get * info->rx_buf_size)); status = buf->status; /* 07 VFR 1=valid frame * 06 RDO 1=data overrun * 05 CRC 1=OK, 0=error * 04 RAB 1=frame aborted */ if ((status & 0xf0) != 0xA0) { if (!(status & BIT7) || (status & BIT4)) info->icount.rxabort++; else if (status & BIT6) info->icount.rxover++; else if (!(status & BIT5)) { info->icount.rxcrc++; if (info->params.crc_type & HDLC_CRC_RETURN_EX) return_frame = true; } framesize = 0; #if SYNCLINK_GENERIC_HDLC { info->netdev->stats.rx_errors++; info->netdev->stats.rx_frame_errors++; } #endif } else return_frame = true; if (return_frame) framesize = buf->count; if (debug_level >= DEBUG_LEVEL_BH) printk("%s(%d):rx_get_frame(%s) status=%04X size=%d\n", __FILE__,__LINE__,info->device_name,status,framesize); if (debug_level >= DEBUG_LEVEL_DATA) trace_block(info, buf->data, framesize, 0); if (framesize) { if ((info->params.crc_type & HDLC_CRC_RETURN_EX && framesize+1 > info->max_frame_size) || framesize > info->max_frame_size) info->icount.rxlong++; else { if (status & BIT5) info->icount.rxok++; if (info->params.crc_type & HDLC_CRC_RETURN_EX) { *(buf->data + framesize) = status & BIT5 ? RX_OK:RX_CRC_ERROR; ++framesize; } #if SYNCLINK_GENERIC_HDLC if (info->netcount) hdlcdev_rx(info, buf->data, framesize); else #endif ldisc_receive_buf(tty, buf->data, info->flag_buf, framesize); } } spin_lock_irqsave(&info->lock,flags); buf->status = buf->count = 0; info->rx_frame_count--; info->rx_get++; if (info->rx_get >= info->rx_buf_count) info->rx_get = 0; spin_unlock_irqrestore(&info->lock,flags); return true; } static bool register_test(MGSLPC_INFO *info) { static unsigned char patterns[] = { 0x00, 0xff, 0xaa, 0x55, 0x69, 0x96, 0x0f }; static unsigned int count = ARRAY_SIZE(patterns); unsigned int i; bool rc = true; unsigned long flags; spin_lock_irqsave(&info->lock,flags); reset_device(info); for (i = 0; i < count; i++) { write_reg(info, XAD1, patterns[i]); write_reg(info, XAD2, patterns[(i + 1) % count]); if ((read_reg(info, XAD1) != patterns[i]) || (read_reg(info, XAD2) != patterns[(i + 1) % count])) { rc = false; break; } } spin_unlock_irqrestore(&info->lock,flags); return rc; } static bool irq_test(MGSLPC_INFO *info) { unsigned long end_time; unsigned long flags; spin_lock_irqsave(&info->lock,flags); reset_device(info); info->testing_irq = true; hdlc_mode(info); info->irq_occurred = false; /* init hdlc mode */ irq_enable(info, CHA, IRQ_TIMER); write_reg(info, CHA + TIMR, 0); /* 512 cycles */ issue_command(info, CHA, CMD_START_TIMER); spin_unlock_irqrestore(&info->lock,flags); end_time=100; while(end_time-- && !info->irq_occurred) { msleep_interruptible(10); } info->testing_irq = false; spin_lock_irqsave(&info->lock,flags); reset_device(info); spin_unlock_irqrestore(&info->lock,flags); return info->irq_occurred; } static int adapter_test(MGSLPC_INFO *info) { if (!register_test(info)) { info->init_error = DiagStatus_AddressFailure; printk( "%s(%d):Register test failure for device %s Addr=%04X\n", __FILE__,__LINE__,info->device_name, (unsigned short)(info->io_base) ); return -ENODEV; } if (!irq_test(info)) { info->init_error = DiagStatus_IrqFailure; printk( "%s(%d):Interrupt test failure for device %s IRQ=%d\n", __FILE__,__LINE__,info->device_name, (unsigned short)(info->irq_level) ); return -ENODEV; } if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):device %s passed diagnostics\n", __FILE__,__LINE__,info->device_name); return 0; } static void trace_block(MGSLPC_INFO *info,const char* data, int count, int xmit) { int i; int linecount; if (xmit) printk("%s tx data:\n",info->device_name); else printk("%s rx data:\n",info->device_name); while(count) { if (count > 16) linecount = 16; else linecount = count; for(i=0;i<linecount;i++) printk("%02X ",(unsigned char)data[i]); for(;i<17;i++) printk(" "); for(i=0;i<linecount;i++) { if (data[i]>=040 && data[i]<=0176) printk("%c",data[i]); else printk("."); } printk("\n"); data += linecount; count -= linecount; } } /* HDLC frame time out * update stats and do tx completion processing */ static void tx_timeout(unsigned long context) { MGSLPC_INFO *info = (MGSLPC_INFO*)context; unsigned long flags; if ( debug_level >= DEBUG_LEVEL_INFO ) printk( "%s(%d):tx_timeout(%s)\n", __FILE__,__LINE__,info->device_name); if(info->tx_active && info->params.mode == MGSL_MODE_HDLC) { info->icount.txtimeout++; } spin_lock_irqsave(&info->lock,flags); info->tx_active = false; info->tx_count = info->tx_put = info->tx_get = 0; spin_unlock_irqrestore(&info->lock,flags); #if SYNCLINK_GENERIC_HDLC if (info->netcount) hdlcdev_tx_done(info); else #endif { struct tty_struct *tty = tty_port_tty_get(&info->port); bh_transmit(info, tty); tty_kref_put(tty); } } #if SYNCLINK_GENERIC_HDLC /** * called by generic HDLC layer when protocol selected (PPP, frame relay, etc.) * set encoding and frame check sequence (FCS) options * * dev pointer to network device structure * encoding serial encoding setting * parity FCS setting * * returns 0 if success, otherwise error code */ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding, unsigned short parity) { MGSLPC_INFO *info = dev_to_port(dev); struct tty_struct *tty; unsigned char new_encoding; unsigned short new_crctype; /* return error if TTY interface open */ if (info->port.count) return -EBUSY; switch (encoding) { case ENCODING_NRZ: new_encoding = HDLC_ENCODING_NRZ; break; case ENCODING_NRZI: new_encoding = HDLC_ENCODING_NRZI_SPACE; break; case ENCODING_FM_MARK: new_encoding = HDLC_ENCODING_BIPHASE_MARK; break; case ENCODING_FM_SPACE: new_encoding = HDLC_ENCODING_BIPHASE_SPACE; break; case ENCODING_MANCHESTER: new_encoding = HDLC_ENCODING_BIPHASE_LEVEL; break; default: return -EINVAL; } switch (parity) { case PARITY_NONE: new_crctype = HDLC_CRC_NONE; break; case PARITY_CRC16_PR1_CCITT: new_crctype = HDLC_CRC_16_CCITT; break; case PARITY_CRC32_PR1_CCITT: new_crctype = HDLC_CRC_32_CCITT; break; default: return -EINVAL; } info->params.encoding = new_encoding; info->params.crc_type = new_crctype; /* if network interface up, reprogram hardware */ if (info->netcount) { tty = tty_port_tty_get(&info->port); mgslpc_program_hw(info, tty); tty_kref_put(tty); } return 0; } /** * called by generic HDLC layer to send frame * * skb socket buffer containing HDLC frame * dev pointer to network device structure */ static netdev_tx_t hdlcdev_xmit(struct sk_buff *skb, struct net_device *dev) { MGSLPC_INFO *info = dev_to_port(dev); unsigned long flags; if (debug_level >= DEBUG_LEVEL_INFO) printk(KERN_INFO "%s:hdlc_xmit(%s)\n",__FILE__,dev->name); /* stop sending until this frame completes */ netif_stop_queue(dev); /* copy data to device buffers */ skb_copy_from_linear_data(skb, info->tx_buf, skb->len); info->tx_get = 0; info->tx_put = info->tx_count = skb->len; /* update network statistics */ dev->stats.tx_packets++; dev->stats.tx_bytes += skb->len; /* done with socket buffer, so free it */ dev_kfree_skb(skb); /* save start time for transmit timeout detection */ dev->trans_start = jiffies; /* start hardware transmitter if necessary */ spin_lock_irqsave(&info->lock,flags); if (!info->tx_active) { struct tty_struct *tty = tty_port_tty_get(&info->port); tx_start(info, tty); tty_kref_put(tty); } spin_unlock_irqrestore(&info->lock,flags); return NETDEV_TX_OK; } /** * called by network layer when interface enabled * claim resources and initialize hardware * * dev pointer to network device structure * * returns 0 if success, otherwise error code */ static int hdlcdev_open(struct net_device *dev) { MGSLPC_INFO *info = dev_to_port(dev); struct tty_struct *tty; int rc; unsigned long flags; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s:hdlcdev_open(%s)\n",__FILE__,dev->name); /* generic HDLC layer open processing */ if ((rc = hdlc_open(dev))) return rc; /* arbitrate between network and tty opens */ spin_lock_irqsave(&info->netlock, flags); if (info->port.count != 0 || info->netcount != 0) { printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name); spin_unlock_irqrestore(&info->netlock, flags); return -EBUSY; } info->netcount=1; spin_unlock_irqrestore(&info->netlock, flags); tty = tty_port_tty_get(&info->port); /* claim resources and init adapter */ if ((rc = startup(info, tty)) != 0) { tty_kref_put(tty); spin_lock_irqsave(&info->netlock, flags); info->netcount=0; spin_unlock_irqrestore(&info->netlock, flags); return rc; } /* assert DTR and RTS, apply hardware settings */ info->serial_signals |= SerialSignal_RTS + SerialSignal_DTR; mgslpc_program_hw(info, tty); tty_kref_put(tty); /* enable network layer transmit */ dev->trans_start = jiffies; netif_start_queue(dev); /* inform generic HDLC layer of current DCD status */ spin_lock_irqsave(&info->lock, flags); get_signals(info); spin_unlock_irqrestore(&info->lock, flags); if (info->serial_signals & SerialSignal_DCD) netif_carrier_on(dev); else netif_carrier_off(dev); return 0; } /** * called by network layer when interface is disabled * shutdown hardware and release resources * * dev pointer to network device structure * * returns 0 if success, otherwise error code */ static int hdlcdev_close(struct net_device *dev) { MGSLPC_INFO *info = dev_to_port(dev); struct tty_struct *tty = tty_port_tty_get(&info->port); unsigned long flags; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s:hdlcdev_close(%s)\n",__FILE__,dev->name); netif_stop_queue(dev); /* shutdown adapter and release resources */ shutdown(info, tty); tty_kref_put(tty); hdlc_close(dev); spin_lock_irqsave(&info->netlock, flags); info->netcount=0; spin_unlock_irqrestore(&info->netlock, flags); return 0; } /** * called by network layer to process IOCTL call to network device * * dev pointer to network device structure * ifr pointer to network interface request structure * cmd IOCTL command code * * returns 0 if success, otherwise error code */ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { const size_t size = sizeof(sync_serial_settings); sync_serial_settings new_line; sync_serial_settings __user *line = ifr->ifr_settings.ifs_ifsu.sync; MGSLPC_INFO *info = dev_to_port(dev); unsigned int flags; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name); /* return error if TTY interface open */ if (info->port.count) return -EBUSY; if (cmd != SIOCWANDEV) return hdlc_ioctl(dev, ifr, cmd); memset(&new_line, 0, size); switch(ifr->ifr_settings.type) { case IF_GET_IFACE: /* return current sync_serial_settings */ ifr->ifr_settings.type = IF_IFACE_SYNC_SERIAL; if (ifr->ifr_settings.size < size) { ifr->ifr_settings.size = size; /* data size wanted */ return -ENOBUFS; } flags = info->params.flags & (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL | HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN | HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL | HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN); switch (flags){ case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_TXCPIN): new_line.clock_type = CLOCK_EXT; break; case (HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG): new_line.clock_type = CLOCK_INT; break; case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_BRG): new_line.clock_type = CLOCK_TXINT; break; case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_RXCPIN): new_line.clock_type = CLOCK_TXFROMRX; break; default: new_line.clock_type = CLOCK_DEFAULT; } new_line.clock_rate = info->params.clock_speed; new_line.loopback = info->params.loopback ? 1:0; if (copy_to_user(line, &new_line, size)) return -EFAULT; return 0; case IF_IFACE_SYNC_SERIAL: /* set sync_serial_settings */ if(!capable(CAP_NET_ADMIN)) return -EPERM; if (copy_from_user(&new_line, line, size)) return -EFAULT; switch (new_line.clock_type) { case CLOCK_EXT: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_TXCPIN; break; case CLOCK_TXFROMRX: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_RXCPIN; break; case CLOCK_INT: flags = HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG; break; case CLOCK_TXINT: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_BRG; break; case CLOCK_DEFAULT: flags = info->params.flags & (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL | HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN | HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL | HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN); break; default: return -EINVAL; } if (new_line.loopback != 0 && new_line.loopback != 1) return -EINVAL; info->params.flags &= ~(HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL | HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN | HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL | HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN); info->params.flags |= flags; info->params.loopback = new_line.loopback; if (flags & (HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG)) info->params.clock_speed = new_line.clock_rate; else info->params.clock_speed = 0; /* if network interface up, reprogram hardware */ if (info->netcount) { struct tty_struct *tty = tty_port_tty_get(&info->port); mgslpc_program_hw(info, tty); tty_kref_put(tty); } return 0; default: return hdlc_ioctl(dev, ifr, cmd); } } /** * called by network layer when transmit timeout is detected * * dev pointer to network device structure */ static void hdlcdev_tx_timeout(struct net_device *dev) { MGSLPC_INFO *info = dev_to_port(dev); unsigned long flags; if (debug_level >= DEBUG_LEVEL_INFO) printk("hdlcdev_tx_timeout(%s)\n",dev->name); dev->stats.tx_errors++; dev->stats.tx_aborted_errors++; spin_lock_irqsave(&info->lock,flags); tx_stop(info); spin_unlock_irqrestore(&info->lock,flags); netif_wake_queue(dev); } /** * called by device driver when transmit completes * reenable network layer transmit if stopped * * info pointer to device instance information */ static void hdlcdev_tx_done(MGSLPC_INFO *info) { if (netif_queue_stopped(info->netdev)) netif_wake_queue(info->netdev); } /** * called by device driver when frame received * pass frame to network layer * * info pointer to device instance information * buf pointer to buffer contianing frame data * size count of data bytes in buf */ static void hdlcdev_rx(MGSLPC_INFO *info, char *buf, int size) { struct sk_buff *skb = dev_alloc_skb(size); struct net_device *dev = info->netdev; if (debug_level >= DEBUG_LEVEL_INFO) printk("hdlcdev_rx(%s)\n",dev->name); if (skb == NULL) { printk(KERN_NOTICE "%s: can't alloc skb, dropping packet\n", dev->name); dev->stats.rx_dropped++; return; } memcpy(skb_put(skb, size), buf, size); skb->protocol = hdlc_type_trans(skb, dev); dev->stats.rx_packets++; dev->stats.rx_bytes += size; netif_rx(skb); } static const struct net_device_ops hdlcdev_ops = { .ndo_open = hdlcdev_open, .ndo_stop = hdlcdev_close, .ndo_change_mtu = hdlc_change_mtu, .ndo_start_xmit = hdlc_start_xmit, .ndo_do_ioctl = hdlcdev_ioctl, .ndo_tx_timeout = hdlcdev_tx_timeout, }; /** * called by device driver when adding device instance * do generic HDLC initialization * * info pointer to device instance information * * returns 0 if success, otherwise error code */ static int hdlcdev_init(MGSLPC_INFO *info) { int rc; struct net_device *dev; hdlc_device *hdlc; /* allocate and initialize network and HDLC layer objects */ if (!(dev = alloc_hdlcdev(info))) { printk(KERN_ERR "%s:hdlc device allocation failure\n",__FILE__); return -ENOMEM; } /* for network layer reporting purposes only */ dev->base_addr = info->io_base; dev->irq = info->irq_level; /* network layer callbacks and settings */ dev->netdev_ops = &hdlcdev_ops; dev->watchdog_timeo = 10 * HZ; dev->tx_queue_len = 50; /* generic HDLC layer callbacks and settings */ hdlc = dev_to_hdlc(dev); hdlc->attach = hdlcdev_attach; hdlc->xmit = hdlcdev_xmit; /* register objects with HDLC layer */ if ((rc = register_hdlc_device(dev))) { printk(KERN_WARNING "%s:unable to register hdlc device\n",__FILE__); free_netdev(dev); return rc; } info->netdev = dev; return 0; } /** * called by device driver when removing device instance * do generic HDLC cleanup * * info pointer to device instance information */ static void hdlcdev_exit(MGSLPC_INFO *info) { unregister_hdlc_device(info->netdev); free_netdev(info->netdev); info->netdev = NULL; } #endif /* CONFIG_HDLC */
gpl-2.0
Mystic-Mirage/android_kernel_gigabyte_roma_r2_plus
drivers/media/rc/keymaps/rc-digitalnow-tinytwin.c
9536
3611
/* * DigitalNow TinyTwin remote controller keytable * * Copyright (C) 2010 Antti Palosaari <crope@iki.fi> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include <media/rc-map.h> #include <linux/module.h> static struct rc_map_table digitalnow_tinytwin[] = { { 0x0000, KEY_MUTE }, /* [symbol speaker] */ { 0x0001, KEY_VOLUMEUP }, { 0x0002, KEY_POWER2 }, /* TV [power button] */ { 0x0003, KEY_2 }, { 0x0004, KEY_3 }, { 0x0005, KEY_4 }, { 0x0006, KEY_6 }, { 0x0007, KEY_7 }, { 0x0008, KEY_8 }, { 0x0009, KEY_NUMERIC_STAR }, /* [*] */ { 0x000a, KEY_0 }, { 0x000b, KEY_NUMERIC_POUND }, /* [#] */ { 0x000c, KEY_RIGHT }, /* [right arrow] */ { 0x000d, KEY_HOMEPAGE }, /* [symbol home] Start */ { 0x000e, KEY_RED }, /* [red] Videos */ { 0x0010, KEY_POWER }, /* PC [power button] */ { 0x0011, KEY_YELLOW }, /* [yellow] Pictures */ { 0x0012, KEY_DOWN }, /* [down arrow] */ { 0x0013, KEY_GREEN }, /* [green] Music */ { 0x0014, KEY_CYCLEWINDOWS }, /* BACK */ { 0x0015, KEY_FAVORITES }, /* MORE */ { 0x0016, KEY_UP }, /* [up arrow] */ { 0x0017, KEY_LEFT }, /* [left arrow] */ { 0x0018, KEY_OK }, /* OK */ { 0x0019, KEY_BLUE }, /* [blue] MyTV */ { 0x001a, KEY_REWIND }, /* REW [<<] */ { 0x001b, KEY_PLAY }, /* PLAY */ { 0x001c, KEY_5 }, { 0x001d, KEY_9 }, { 0x001e, KEY_VOLUMEDOWN }, { 0x001f, KEY_1 }, { 0x0040, KEY_STOP }, /* STOP */ { 0x0042, KEY_PAUSE }, /* PAUSE */ { 0x0043, KEY_SCREEN }, /* Aspect */ { 0x0044, KEY_FORWARD }, /* FWD [>>] */ { 0x0045, KEY_NEXT }, /* SKIP */ { 0x0048, KEY_RECORD }, /* RECORD */ { 0x0049, KEY_VIDEO }, /* RTV */ { 0x004a, KEY_EPG }, /* Guide */ { 0x004b, KEY_CHANNELUP }, { 0x004c, KEY_HELP }, /* Help */ { 0x004d, KEY_RADIO }, /* Radio */ { 0x004f, KEY_CHANNELDOWN }, { 0x0050, KEY_DVD }, /* DVD */ { 0x0051, KEY_AUDIO }, /* Audio */ { 0x0052, KEY_TITLE }, /* Title */ { 0x0053, KEY_NEW }, /* [symbol PIP?] */ { 0x0057, KEY_MENU }, /* Mouse */ { 0x005a, KEY_PREVIOUS }, /* REPLAY */ }; static struct rc_map_list digitalnow_tinytwin_map = { .map = { .scan = digitalnow_tinytwin, .size = ARRAY_SIZE(digitalnow_tinytwin), .rc_type = RC_TYPE_NEC, .name = RC_MAP_DIGITALNOW_TINYTWIN, } }; static int __init init_rc_map_digitalnow_tinytwin(void) { return rc_map_register(&digitalnow_tinytwin_map); } static void __exit exit_rc_map_digitalnow_tinytwin(void) { rc_map_unregister(&digitalnow_tinytwin_map); } module_init(init_rc_map_digitalnow_tinytwin) module_exit(exit_rc_map_digitalnow_tinytwin) MODULE_LICENSE("GPL"); MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
gpl-2.0
PRJosh/kernel_samsung_manta
arch/mips/pci/fixup-ip32.c
9536
1518
#include <linux/init.h> #include <linux/kernel.h> #include <linux/pci.h> #include <asm/ip32/ip32_ints.h> /* * O2 has up to 5 PCI devices connected into the MACE bridge. The device * map looks like this: * * 0 aic7xxx 0 * 1 aic7xxx 1 * 2 expansion slot * 3 N/C * 4 N/C */ #define SCSI0 MACEPCI_SCSI0_IRQ #define SCSI1 MACEPCI_SCSI1_IRQ #define INTA0 MACEPCI_SLOT0_IRQ #define INTA1 MACEPCI_SLOT1_IRQ #define INTA2 MACEPCI_SLOT2_IRQ #define INTB MACEPCI_SHARED0_IRQ #define INTC MACEPCI_SHARED1_IRQ #define INTD MACEPCI_SHARED2_IRQ static char irq_tab_mace[][5] __initdata = { /* Dummy INT#A INT#B INT#C INT#D */ {0, 0, 0, 0, 0}, /* This is placeholder row - never used */ {0, SCSI0, SCSI0, SCSI0, SCSI0}, {0, SCSI1, SCSI1, SCSI1, SCSI1}, {0, INTA0, INTB, INTC, INTD}, {0, INTA1, INTC, INTD, INTB}, {0, INTA2, INTD, INTB, INTC}, }; /* * Given a PCI slot number (a la PCI_SLOT(...)) and the interrupt pin of * the device (1-4 => A-D), tell what irq to use. Note that we don't * in theory have slots 4 and 5, and we never normally use the shared * irqs. I suppose a device without a pin A will thank us for doing it * right if there exists such a broken piece of crap. */ int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { return irq_tab_mace[slot][pin]; } /* Do platform specific device initialization at pci_enable_device() time */ int pcibios_plat_dev_init(struct pci_dev *dev) { return 0; }
gpl-2.0
DerArtem/folio100-kernel-2.6.32-toshiba
sound/pci/pcxhr/pcxhr_mixer.c
12608
36943
#define __NO_VERSION__ /* * Driver for Digigram pcxhr compatible soundcards * * mixer callbacks * * Copyright (c) 2004 by Digigram <alsa@digigram.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/time.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/mutex.h> #include <sound/core.h> #include "pcxhr.h" #include "pcxhr_hwdep.h" #include "pcxhr_core.h" #include <sound/control.h> #include <sound/tlv.h> #include <sound/asoundef.h> #include "pcxhr_mixer.h" #include "pcxhr_mix22.h" #define PCXHR_LINE_CAPTURE_LEVEL_MIN 0 /* -112.0 dB */ #define PCXHR_LINE_CAPTURE_LEVEL_MAX 255 /* +15.5 dB */ #define PCXHR_LINE_CAPTURE_ZERO_LEVEL 224 /* 0.0 dB ( 0 dBu -> 0 dBFS ) */ #define PCXHR_LINE_PLAYBACK_LEVEL_MIN 0 /* -104.0 dB */ #define PCXHR_LINE_PLAYBACK_LEVEL_MAX 128 /* +24.0 dB */ #define PCXHR_LINE_PLAYBACK_ZERO_LEVEL 104 /* 0.0 dB ( 0 dBFS -> 0 dBu ) */ static const DECLARE_TLV_DB_SCALE(db_scale_analog_capture, -11200, 50, 1550); static const DECLARE_TLV_DB_SCALE(db_scale_analog_playback, -10400, 100, 2400); static const DECLARE_TLV_DB_SCALE(db_scale_a_hr222_capture, -11150, 50, 1600); static const DECLARE_TLV_DB_SCALE(db_scale_a_hr222_playback, -2550, 50, 2400); static int pcxhr_update_analog_audio_level(struct snd_pcxhr *chip, int is_capture, int channel) { int err, vol; struct pcxhr_rmh rmh; pcxhr_init_rmh(&rmh, CMD_ACCESS_IO_WRITE); if (is_capture) { rmh.cmd[0] |= IO_NUM_REG_IN_ANA_LEVEL; rmh.cmd[2] = chip->analog_capture_volume[channel]; } else { rmh.cmd[0] |= IO_NUM_REG_OUT_ANA_LEVEL; if (chip->analog_playback_active[channel]) vol = chip->analog_playback_volume[channel]; else vol = PCXHR_LINE_PLAYBACK_LEVEL_MIN; /* playback analog levels are inversed */ rmh.cmd[2] = PCXHR_LINE_PLAYBACK_LEVEL_MAX - vol; } rmh.cmd[1] = 1 << ((2 * chip->chip_idx) + channel); /* audio mask */ rmh.cmd_len = 3; err = pcxhr_send_msg(chip->mgr, &rmh); if (err < 0) { snd_printk(KERN_DEBUG "error update_analog_audio_level card(%d)" " is_capture(%d) err(%x)\n", chip->chip_idx, is_capture, err); return -EINVAL; } return 0; } /* * analog level control */ static int pcxhr_analog_vol_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { struct snd_pcxhr *chip = snd_kcontrol_chip(kcontrol); uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 2; if (kcontrol->private_value == 0) { /* playback */ if (chip->mgr->is_hr_stereo) { uinfo->value.integer.min = HR222_LINE_PLAYBACK_LEVEL_MIN; /* -25 dB */ uinfo->value.integer.max = HR222_LINE_PLAYBACK_LEVEL_MAX; /* +24 dB */ } else { uinfo->value.integer.min = PCXHR_LINE_PLAYBACK_LEVEL_MIN; /*-104 dB */ uinfo->value.integer.max = PCXHR_LINE_PLAYBACK_LEVEL_MAX; /* +24 dB */ } } else { /* capture */ if (chip->mgr->is_hr_stereo) { uinfo->value.integer.min = HR222_LINE_CAPTURE_LEVEL_MIN; /*-112 dB */ uinfo->value.integer.max = HR222_LINE_CAPTURE_LEVEL_MAX; /* +15.5 dB */ } else { uinfo->value.integer.min = PCXHR_LINE_CAPTURE_LEVEL_MIN; /*-112 dB */ uinfo->value.integer.max = PCXHR_LINE_CAPTURE_LEVEL_MAX; /* +15.5 dB */ } } return 0; } static int pcxhr_analog_vol_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_pcxhr *chip = snd_kcontrol_chip(kcontrol); mutex_lock(&chip->mgr->mixer_mutex); if (kcontrol->private_value == 0) { /* playback */ ucontrol->value.integer.value[0] = chip->analog_playback_volume[0]; ucontrol->value.integer.value[1] = chip->analog_playback_volume[1]; } else { /* capture */ ucontrol->value.integer.value[0] = chip->analog_capture_volume[0]; ucontrol->value.integer.value[1] = chip->analog_capture_volume[1]; } mutex_unlock(&chip->mgr->mixer_mutex); return 0; } static int pcxhr_analog_vol_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_pcxhr *chip = snd_kcontrol_chip(kcontrol); int changed = 0; int is_capture, i; mutex_lock(&chip->mgr->mixer_mutex); is_capture = (kcontrol->private_value != 0); for (i = 0; i < 2; i++) { int new_volume = ucontrol->value.integer.value[i]; int *stored_volume = is_capture ? &chip->analog_capture_volume[i] : &chip->analog_playback_volume[i]; if (is_capture) { if (chip->mgr->is_hr_stereo) { if (new_volume < HR222_LINE_CAPTURE_LEVEL_MIN || new_volume > HR222_LINE_CAPTURE_LEVEL_MAX) continue; } else { if (new_volume < PCXHR_LINE_CAPTURE_LEVEL_MIN || new_volume > PCXHR_LINE_CAPTURE_LEVEL_MAX) continue; } } else { if (chip->mgr->is_hr_stereo) { if (new_volume < HR222_LINE_PLAYBACK_LEVEL_MIN || new_volume > HR222_LINE_PLAYBACK_LEVEL_MAX) continue; } else { if (new_volume < PCXHR_LINE_PLAYBACK_LEVEL_MIN || new_volume > PCXHR_LINE_PLAYBACK_LEVEL_MAX) continue; } } if (*stored_volume != new_volume) { *stored_volume = new_volume; changed = 1; if (chip->mgr->is_hr_stereo) hr222_update_analog_audio_level(chip, is_capture, i); else pcxhr_update_analog_audio_level(chip, is_capture, i); } } mutex_unlock(&chip->mgr->mixer_mutex); return changed; } static struct snd_kcontrol_new pcxhr_control_analog_level = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .access = (SNDRV_CTL_ELEM_ACCESS_READWRITE | SNDRV_CTL_ELEM_ACCESS_TLV_READ), /* name will be filled later */ .info = pcxhr_analog_vol_info, .get = pcxhr_analog_vol_get, .put = pcxhr_analog_vol_put, /* tlv will be filled later */ }; /* shared */ #define pcxhr_sw_info snd_ctl_boolean_stereo_info static int pcxhr_audio_sw_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_pcxhr *chip = snd_kcontrol_chip(kcontrol); mutex_lock(&chip->mgr->mixer_mutex); ucontrol->value.integer.value[0] = chip->analog_playback_active[0]; ucontrol->value.integer.value[1] = chip->analog_playback_active[1]; mutex_unlock(&chip->mgr->mixer_mutex); return 0; } static int pcxhr_audio_sw_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_pcxhr *chip = snd_kcontrol_chip(kcontrol); int i, changed = 0; mutex_lock(&chip->mgr->mixer_mutex); for(i = 0; i < 2; i++) { if (chip->analog_playback_active[i] != ucontrol->value.integer.value[i]) { chip->analog_playback_active[i] = !!ucontrol->value.integer.value[i]; changed = 1; /* update playback levels */ if (chip->mgr->is_hr_stereo) hr222_update_analog_audio_level(chip, 0, i); else pcxhr_update_analog_audio_level(chip, 0, i); } } mutex_unlock(&chip->mgr->mixer_mutex); return changed; } static struct snd_kcontrol_new pcxhr_control_output_switch = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Master Playback Switch", .info = pcxhr_sw_info, /* shared */ .get = pcxhr_audio_sw_get, .put = pcxhr_audio_sw_put }; #define PCXHR_DIGITAL_LEVEL_MIN 0x000 /* -110 dB */ #define PCXHR_DIGITAL_LEVEL_MAX 0x1ff /* +18 dB */ #define PCXHR_DIGITAL_ZERO_LEVEL 0x1b7 /* 0 dB */ static const DECLARE_TLV_DB_SCALE(db_scale_digital, -10975, 25, 1800); #define MORE_THAN_ONE_STREAM_LEVEL 0x000001 #define VALID_STREAM_PAN_LEVEL_MASK 0x800000 #define VALID_STREAM_LEVEL_MASK 0x400000 #define VALID_STREAM_LEVEL_1_MASK 0x200000 #define VALID_STREAM_LEVEL_2_MASK 0x100000 static int pcxhr_update_playback_stream_level(struct snd_pcxhr* chip, int idx) { int err; struct pcxhr_rmh rmh; struct pcxhr_pipe *pipe = &chip->playback_pipe; int left, right; if (chip->digital_playback_active[idx][0]) left = chip->digital_playback_volume[idx][0]; else left = PCXHR_DIGITAL_LEVEL_MIN; if (chip->digital_playback_active[idx][1]) right = chip->digital_playback_volume[idx][1]; else right = PCXHR_DIGITAL_LEVEL_MIN; pcxhr_init_rmh(&rmh, CMD_STREAM_OUT_LEVEL_ADJUST); /* add pipe and stream mask */ pcxhr_set_pipe_cmd_params(&rmh, 0, pipe->first_audio, 0, 1<<idx); /* volume left->left / right->right panoramic level */ rmh.cmd[0] |= MORE_THAN_ONE_STREAM_LEVEL; rmh.cmd[2] = VALID_STREAM_PAN_LEVEL_MASK | VALID_STREAM_LEVEL_1_MASK; rmh.cmd[2] |= (left << 10); rmh.cmd[3] = VALID_STREAM_PAN_LEVEL_MASK | VALID_STREAM_LEVEL_2_MASK; rmh.cmd[3] |= right; rmh.cmd_len = 4; err = pcxhr_send_msg(chip->mgr, &rmh); if (err < 0) { snd_printk(KERN_DEBUG "error update_playback_stream_level " "card(%d) err(%x)\n", chip->chip_idx, err); return -EINVAL; } return 0; } #define AUDIO_IO_HAS_MUTE_LEVEL 0x400000 #define AUDIO_IO_HAS_MUTE_MONITOR_1 0x200000 #define VALID_AUDIO_IO_DIGITAL_LEVEL 0x000001 #define VALID_AUDIO_IO_MONITOR_LEVEL 0x000002 #define VALID_AUDIO_IO_MUTE_LEVEL 0x000004 #define VALID_AUDIO_IO_MUTE_MONITOR_1 0x000008 static int pcxhr_update_audio_pipe_level(struct snd_pcxhr *chip, int capture, int channel) { int err; struct pcxhr_rmh rmh; struct pcxhr_pipe *pipe; if (capture) pipe = &chip->capture_pipe[0]; else pipe = &chip->playback_pipe; pcxhr_init_rmh(&rmh, CMD_AUDIO_LEVEL_ADJUST); /* add channel mask */ pcxhr_set_pipe_cmd_params(&rmh, capture, 0, 0, 1 << (channel + pipe->first_audio)); /* TODO : if mask (3 << pipe->first_audio) is used, left and right * channel will be programmed to the same params */ if (capture) { rmh.cmd[0] |= VALID_AUDIO_IO_DIGITAL_LEVEL; /* VALID_AUDIO_IO_MUTE_LEVEL not yet handled * (capture pipe level) */ rmh.cmd[2] = chip->digital_capture_volume[channel]; } else { rmh.cmd[0] |= VALID_AUDIO_IO_MONITOR_LEVEL | VALID_AUDIO_IO_MUTE_MONITOR_1; /* VALID_AUDIO_IO_DIGITAL_LEVEL and VALID_AUDIO_IO_MUTE_LEVEL * not yet handled (playback pipe level) */ rmh.cmd[2] = chip->monitoring_volume[channel] << 10; if (chip->monitoring_active[channel] == 0) rmh.cmd[2] |= AUDIO_IO_HAS_MUTE_MONITOR_1; } rmh.cmd_len = 3; err = pcxhr_send_msg(chip->mgr, &rmh); if (err < 0) { snd_printk(KERN_DEBUG "error update_audio_level(%d) err=%x\n", chip->chip_idx, err); return -EINVAL; } return 0; } /* shared */ static int pcxhr_digital_vol_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 2; uinfo->value.integer.min = PCXHR_DIGITAL_LEVEL_MIN; /* -109.5 dB */ uinfo->value.integer.max = PCXHR_DIGITAL_LEVEL_MAX; /* 18.0 dB */ return 0; } static int pcxhr_pcm_vol_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_pcxhr *chip = snd_kcontrol_chip(kcontrol); int idx = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id); /* index */ int *stored_volume; int is_capture = kcontrol->private_value; mutex_lock(&chip->mgr->mixer_mutex); if (is_capture) /* digital capture */ stored_volume = chip->digital_capture_volume; else /* digital playback */ stored_volume = chip->digital_playback_volume[idx]; ucontrol->value.integer.value[0] = stored_volume[0]; ucontrol->value.integer.value[1] = stored_volume[1]; mutex_unlock(&chip->mgr->mixer_mutex); return 0; } static int pcxhr_pcm_vol_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_pcxhr *chip = snd_kcontrol_chip(kcontrol); int idx = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id); /* index */ int changed = 0; int is_capture = kcontrol->private_value; int *stored_volume; int i; mutex_lock(&chip->mgr->mixer_mutex); if (is_capture) /* digital capture */ stored_volume = chip->digital_capture_volume; else /* digital playback */ stored_volume = chip->digital_playback_volume[idx]; for (i = 0; i < 2; i++) { int vol = ucontrol->value.integer.value[i]; if (vol < PCXHR_DIGITAL_LEVEL_MIN || vol > PCXHR_DIGITAL_LEVEL_MAX) continue; if (stored_volume[i] != vol) { stored_volume[i] = vol; changed = 1; if (is_capture) /* update capture volume */ pcxhr_update_audio_pipe_level(chip, 1, i); } } if (!is_capture && changed) /* update playback volume */ pcxhr_update_playback_stream_level(chip, idx); mutex_unlock(&chip->mgr->mixer_mutex); return changed; } static struct snd_kcontrol_new snd_pcxhr_pcm_vol = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .access = (SNDRV_CTL_ELEM_ACCESS_READWRITE | SNDRV_CTL_ELEM_ACCESS_TLV_READ), /* name will be filled later */ /* count will be filled later */ .info = pcxhr_digital_vol_info, /* shared */ .get = pcxhr_pcm_vol_get, .put = pcxhr_pcm_vol_put, .tlv = { .p = db_scale_digital }, }; static int pcxhr_pcm_sw_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_pcxhr *chip = snd_kcontrol_chip(kcontrol); int idx = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id); /* index */ mutex_lock(&chip->mgr->mixer_mutex); ucontrol->value.integer.value[0] = chip->digital_playback_active[idx][0]; ucontrol->value.integer.value[1] = chip->digital_playback_active[idx][1]; mutex_unlock(&chip->mgr->mixer_mutex); return 0; } static int pcxhr_pcm_sw_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_pcxhr *chip = snd_kcontrol_chip(kcontrol); int changed = 0; int idx = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id); /* index */ int i, j; mutex_lock(&chip->mgr->mixer_mutex); j = idx; for (i = 0; i < 2; i++) { if (chip->digital_playback_active[j][i] != ucontrol->value.integer.value[i]) { chip->digital_playback_active[j][i] = !!ucontrol->value.integer.value[i]; changed = 1; } } if (changed) pcxhr_update_playback_stream_level(chip, idx); mutex_unlock(&chip->mgr->mixer_mutex); return changed; } static struct snd_kcontrol_new pcxhr_control_pcm_switch = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "PCM Playback Switch", .count = PCXHR_PLAYBACK_STREAMS, .info = pcxhr_sw_info, /* shared */ .get = pcxhr_pcm_sw_get, .put = pcxhr_pcm_sw_put }; /* * monitoring level control */ static int pcxhr_monitor_vol_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_pcxhr *chip = snd_kcontrol_chip(kcontrol); mutex_lock(&chip->mgr->mixer_mutex); ucontrol->value.integer.value[0] = chip->monitoring_volume[0]; ucontrol->value.integer.value[1] = chip->monitoring_volume[1]; mutex_unlock(&chip->mgr->mixer_mutex); return 0; } static int pcxhr_monitor_vol_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_pcxhr *chip = snd_kcontrol_chip(kcontrol); int changed = 0; int i; mutex_lock(&chip->mgr->mixer_mutex); for (i = 0; i < 2; i++) { if (chip->monitoring_volume[i] != ucontrol->value.integer.value[i]) { chip->monitoring_volume[i] = ucontrol->value.integer.value[i]; if (chip->monitoring_active[i]) /* update monitoring volume and mute */ /* do only when monitoring is unmuted */ pcxhr_update_audio_pipe_level(chip, 0, i); changed = 1; } } mutex_unlock(&chip->mgr->mixer_mutex); return changed; } static struct snd_kcontrol_new pcxhr_control_monitor_vol = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .access = (SNDRV_CTL_ELEM_ACCESS_READWRITE | SNDRV_CTL_ELEM_ACCESS_TLV_READ), .name = "Monitoring Playback Volume", .info = pcxhr_digital_vol_info, /* shared */ .get = pcxhr_monitor_vol_get, .put = pcxhr_monitor_vol_put, .tlv = { .p = db_scale_digital }, }; /* * monitoring switch control */ static int pcxhr_monitor_sw_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_pcxhr *chip = snd_kcontrol_chip(kcontrol); mutex_lock(&chip->mgr->mixer_mutex); ucontrol->value.integer.value[0] = chip->monitoring_active[0]; ucontrol->value.integer.value[1] = chip->monitoring_active[1]; mutex_unlock(&chip->mgr->mixer_mutex); return 0; } static int pcxhr_monitor_sw_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_pcxhr *chip = snd_kcontrol_chip(kcontrol); int changed = 0; int i; mutex_lock(&chip->mgr->mixer_mutex); for (i = 0; i < 2; i++) { if (chip->monitoring_active[i] != ucontrol->value.integer.value[i]) { chip->monitoring_active[i] = !!ucontrol->value.integer.value[i]; changed |= (1<<i); /* mask 0x01 and 0x02 */ } } if (changed & 0x01) /* update left monitoring volume and mute */ pcxhr_update_audio_pipe_level(chip, 0, 0); if (changed & 0x02) /* update right monitoring volume and mute */ pcxhr_update_audio_pipe_level(chip, 0, 1); mutex_unlock(&chip->mgr->mixer_mutex); return (changed != 0); } static struct snd_kcontrol_new pcxhr_control_monitor_sw = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Monitoring Playback Switch", .info = pcxhr_sw_info, /* shared */ .get = pcxhr_monitor_sw_get, .put = pcxhr_monitor_sw_put }; /* * audio source select */ #define PCXHR_SOURCE_AUDIO01_UER 0x000100 #define PCXHR_SOURCE_AUDIO01_SYNC 0x000200 #define PCXHR_SOURCE_AUDIO23_UER 0x000400 #define PCXHR_SOURCE_AUDIO45_UER 0x001000 #define PCXHR_SOURCE_AUDIO67_UER 0x040000 static int pcxhr_set_audio_source(struct snd_pcxhr* chip) { struct pcxhr_rmh rmh; unsigned int mask, reg; unsigned int codec; int err, changed; switch (chip->chip_idx) { case 0 : mask = PCXHR_SOURCE_AUDIO01_UER; codec = CS8420_01_CS; break; case 1 : mask = PCXHR_SOURCE_AUDIO23_UER; codec = CS8420_23_CS; break; case 2 : mask = PCXHR_SOURCE_AUDIO45_UER; codec = CS8420_45_CS; break; case 3 : mask = PCXHR_SOURCE_AUDIO67_UER; codec = CS8420_67_CS; break; default: return -EINVAL; } if (chip->audio_capture_source != 0) { reg = mask; /* audio source from digital plug */ } else { reg = 0; /* audio source from analog plug */ } /* set the input source */ pcxhr_write_io_num_reg_cont(chip->mgr, mask, reg, &changed); /* resync them (otherwise channel inversion possible) */ if (changed) { pcxhr_init_rmh(&rmh, CMD_RESYNC_AUDIO_INPUTS); rmh.cmd[0] |= (1 << chip->chip_idx); err = pcxhr_send_msg(chip->mgr, &rmh); if (err) return err; } if (chip->mgr->board_aes_in_192k) { int i; unsigned int src_config = 0xC0; /* update all src configs with one call */ for (i = 0; (i < 4) && (i < chip->mgr->capture_chips); i++) { if (chip->mgr->chip[i]->audio_capture_source == 2) src_config |= (1 << (3 - i)); } /* set codec SRC on off */ pcxhr_init_rmh(&rmh, CMD_ACCESS_IO_WRITE); rmh.cmd_len = 2; rmh.cmd[0] |= IO_NUM_REG_CONFIG_SRC; rmh.cmd[1] = src_config; err = pcxhr_send_msg(chip->mgr, &rmh); } else { int use_src = 0; if (chip->audio_capture_source == 2) use_src = 1; /* set codec SRC on off */ pcxhr_init_rmh(&rmh, CMD_ACCESS_IO_WRITE); rmh.cmd_len = 3; rmh.cmd[0] |= IO_NUM_UER_CHIP_REG; rmh.cmd[1] = codec; rmh.cmd[2] = ((CS8420_DATA_FLOW_CTL & CHIP_SIG_AND_MAP_SPI) | (use_src ? 0x41 : 0x54)); err = pcxhr_send_msg(chip->mgr, &rmh); if (err) return err; rmh.cmd[2] = ((CS8420_CLOCK_SRC_CTL & CHIP_SIG_AND_MAP_SPI) | (use_src ? 0x41 : 0x49)); err = pcxhr_send_msg(chip->mgr, &rmh); } return err; } static int pcxhr_audio_src_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { static const char *texts[5] = { "Line", "Digital", "Digi+SRC", "Mic", "Line+Mic" }; int i; struct snd_pcxhr *chip = snd_kcontrol_chip(kcontrol); i = 2; /* no SRC, no Mic available */ if (chip->mgr->board_has_aes1) { i = 3; /* SRC available */ if (chip->mgr->board_has_mic) i = 5; /* Mic and MicroMix available */ } uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED; uinfo->count = 1; uinfo->value.enumerated.items = i; if (uinfo->value.enumerated.item > (i-1)) uinfo->value.enumerated.item = i-1; strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]); return 0; } static int pcxhr_audio_src_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_pcxhr *chip = snd_kcontrol_chip(kcontrol); ucontrol->value.enumerated.item[0] = chip->audio_capture_source; return 0; } static int pcxhr_audio_src_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_pcxhr *chip = snd_kcontrol_chip(kcontrol); int ret = 0; int i = 2; /* no SRC, no Mic available */ if (chip->mgr->board_has_aes1) { i = 3; /* SRC available */ if (chip->mgr->board_has_mic) i = 5; /* Mic and MicroMix available */ } if (ucontrol->value.enumerated.item[0] >= i) return -EINVAL; mutex_lock(&chip->mgr->mixer_mutex); if (chip->audio_capture_source != ucontrol->value.enumerated.item[0]) { chip->audio_capture_source = ucontrol->value.enumerated.item[0]; if (chip->mgr->is_hr_stereo) hr222_set_audio_source(chip); else pcxhr_set_audio_source(chip); ret = 1; } mutex_unlock(&chip->mgr->mixer_mutex); return ret; } static struct snd_kcontrol_new pcxhr_control_audio_src = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Capture Source", .info = pcxhr_audio_src_info, .get = pcxhr_audio_src_get, .put = pcxhr_audio_src_put, }; /* * clock type selection * enum pcxhr_clock_type { * PCXHR_CLOCK_TYPE_INTERNAL = 0, * PCXHR_CLOCK_TYPE_WORD_CLOCK, * PCXHR_CLOCK_TYPE_AES_SYNC, * PCXHR_CLOCK_TYPE_AES_1, * PCXHR_CLOCK_TYPE_AES_2, * PCXHR_CLOCK_TYPE_AES_3, * PCXHR_CLOCK_TYPE_AES_4, * PCXHR_CLOCK_TYPE_MAX = PCXHR_CLOCK_TYPE_AES_4, * HR22_CLOCK_TYPE_INTERNAL = PCXHR_CLOCK_TYPE_INTERNAL, * HR22_CLOCK_TYPE_AES_SYNC, * HR22_CLOCK_TYPE_AES_1, * HR22_CLOCK_TYPE_MAX = HR22_CLOCK_TYPE_AES_1, * }; */ static int pcxhr_clock_type_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { static const char *textsPCXHR[7] = { "Internal", "WordClock", "AES Sync", "AES 1", "AES 2", "AES 3", "AES 4" }; static const char *textsHR22[3] = { "Internal", "AES Sync", "AES 1" }; const char **texts; struct pcxhr_mgr *mgr = snd_kcontrol_chip(kcontrol); int clock_items = 2; /* at least Internal and AES Sync clock */ if (mgr->board_has_aes1) { clock_items += mgr->capture_chips; /* add AES x */ if (!mgr->is_hr_stereo) clock_items += 1; /* add word clock */ } if (mgr->is_hr_stereo) { texts = textsHR22; snd_BUG_ON(clock_items > (HR22_CLOCK_TYPE_MAX+1)); } else { texts = textsPCXHR; snd_BUG_ON(clock_items > (PCXHR_CLOCK_TYPE_MAX+1)); } uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED; uinfo->count = 1; uinfo->value.enumerated.items = clock_items; if (uinfo->value.enumerated.item >= clock_items) uinfo->value.enumerated.item = clock_items-1; strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]); return 0; } static int pcxhr_clock_type_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct pcxhr_mgr *mgr = snd_kcontrol_chip(kcontrol); ucontrol->value.enumerated.item[0] = mgr->use_clock_type; return 0; } static int pcxhr_clock_type_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct pcxhr_mgr *mgr = snd_kcontrol_chip(kcontrol); int rate, ret = 0; unsigned int clock_items = 2; /* at least Internal and AES Sync clock */ if (mgr->board_has_aes1) { clock_items += mgr->capture_chips; /* add AES x */ if (!mgr->is_hr_stereo) clock_items += 1; /* add word clock */ } if (ucontrol->value.enumerated.item[0] >= clock_items) return -EINVAL; mutex_lock(&mgr->mixer_mutex); if (mgr->use_clock_type != ucontrol->value.enumerated.item[0]) { mutex_lock(&mgr->setup_mutex); mgr->use_clock_type = ucontrol->value.enumerated.item[0]; rate = 0; if (mgr->use_clock_type != PCXHR_CLOCK_TYPE_INTERNAL) { pcxhr_get_external_clock(mgr, mgr->use_clock_type, &rate); } else { rate = mgr->sample_rate; if (!rate) rate = 48000; } if (rate) { pcxhr_set_clock(mgr, rate); if (mgr->sample_rate) mgr->sample_rate = rate; } mutex_unlock(&mgr->setup_mutex); ret = 1; /* return 1 even if the set was not done. ok ? */ } mutex_unlock(&mgr->mixer_mutex); return ret; } static struct snd_kcontrol_new pcxhr_control_clock_type = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Clock Mode", .info = pcxhr_clock_type_info, .get = pcxhr_clock_type_get, .put = pcxhr_clock_type_put, }; /* * clock rate control * specific control that scans the sample rates on the external plugs */ static int pcxhr_clock_rate_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { struct pcxhr_mgr *mgr = snd_kcontrol_chip(kcontrol); uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 3 + mgr->capture_chips; uinfo->value.integer.min = 0; /* clock not present */ uinfo->value.integer.max = 192000; /* max sample rate 192 kHz */ return 0; } static int pcxhr_clock_rate_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct pcxhr_mgr *mgr = snd_kcontrol_chip(kcontrol); int i, err, rate; mutex_lock(&mgr->mixer_mutex); for(i = 0; i < 3 + mgr->capture_chips; i++) { if (i == PCXHR_CLOCK_TYPE_INTERNAL) rate = mgr->sample_rate_real; else { err = pcxhr_get_external_clock(mgr, i, &rate); if (err) break; } ucontrol->value.integer.value[i] = rate; } mutex_unlock(&mgr->mixer_mutex); return 0; } static struct snd_kcontrol_new pcxhr_control_clock_rate = { .access = SNDRV_CTL_ELEM_ACCESS_READ, .iface = SNDRV_CTL_ELEM_IFACE_CARD, .name = "Clock Rates", .info = pcxhr_clock_rate_info, .get = pcxhr_clock_rate_get, }; /* * IEC958 status bits */ static int pcxhr_iec958_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_IEC958; uinfo->count = 1; return 0; } static int pcxhr_iec958_capture_byte(struct snd_pcxhr *chip, int aes_idx, unsigned char *aes_bits) { int i, err; unsigned char temp; struct pcxhr_rmh rmh; pcxhr_init_rmh(&rmh, CMD_ACCESS_IO_READ); rmh.cmd[0] |= IO_NUM_UER_CHIP_REG; switch (chip->chip_idx) { /* instead of CS8420_01_CS use CS8416_01_CS for AES SYNC plug */ case 0: rmh.cmd[1] = CS8420_01_CS; break; case 1: rmh.cmd[1] = CS8420_23_CS; break; case 2: rmh.cmd[1] = CS8420_45_CS; break; case 3: rmh.cmd[1] = CS8420_67_CS; break; default: return -EINVAL; } if (chip->mgr->board_aes_in_192k) { switch (aes_idx) { case 0: rmh.cmd[2] = CS8416_CSB0; break; case 1: rmh.cmd[2] = CS8416_CSB1; break; case 2: rmh.cmd[2] = CS8416_CSB2; break; case 3: rmh.cmd[2] = CS8416_CSB3; break; case 4: rmh.cmd[2] = CS8416_CSB4; break; default: return -EINVAL; } } else { switch (aes_idx) { /* instead of CS8420_CSB0 use CS8416_CSBx for AES SYNC plug */ case 0: rmh.cmd[2] = CS8420_CSB0; break; case 1: rmh.cmd[2] = CS8420_CSB1; break; case 2: rmh.cmd[2] = CS8420_CSB2; break; case 3: rmh.cmd[2] = CS8420_CSB3; break; case 4: rmh.cmd[2] = CS8420_CSB4; break; default: return -EINVAL; } } /* size and code the chip id for the fpga */ rmh.cmd[1] &= 0x0fffff; /* chip signature + map for spi read */ rmh.cmd[2] &= CHIP_SIG_AND_MAP_SPI; rmh.cmd_len = 3; err = pcxhr_send_msg(chip->mgr, &rmh); if (err) return err; if (chip->mgr->board_aes_in_192k) { temp = (unsigned char)rmh.stat[1]; } else { temp = 0; /* reversed bit order (not with CS8416_01_CS) */ for (i = 0; i < 8; i++) { temp <<= 1; if (rmh.stat[1] & (1 << i)) temp |= 1; } } snd_printdd("read iec958 AES %d byte %d = 0x%x\n", chip->chip_idx, aes_idx, temp); *aes_bits = temp; return 0; } static int pcxhr_iec958_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_pcxhr *chip = snd_kcontrol_chip(kcontrol); unsigned char aes_bits; int i, err; mutex_lock(&chip->mgr->mixer_mutex); for(i = 0; i < 5; i++) { if (kcontrol->private_value == 0) /* playback */ aes_bits = chip->aes_bits[i]; else { /* capture */ if (chip->mgr->is_hr_stereo) err = hr222_iec958_capture_byte(chip, i, &aes_bits); else err = pcxhr_iec958_capture_byte(chip, i, &aes_bits); if (err) break; } ucontrol->value.iec958.status[i] = aes_bits; } mutex_unlock(&chip->mgr->mixer_mutex); return 0; } static int pcxhr_iec958_mask_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { int i; for (i = 0; i < 5; i++) ucontrol->value.iec958.status[i] = 0xff; return 0; } static int pcxhr_iec958_update_byte(struct snd_pcxhr *chip, int aes_idx, unsigned char aes_bits) { int i, err, cmd; unsigned char new_bits = aes_bits; unsigned char old_bits = chip->aes_bits[aes_idx]; struct pcxhr_rmh rmh; for (i = 0; i < 8; i++) { if ((old_bits & 0x01) != (new_bits & 0x01)) { cmd = chip->chip_idx & 0x03; /* chip index 0..3 */ if (chip->chip_idx > 3) /* new bit used if chip_idx>3 (PCX1222HR) */ cmd |= 1 << 22; cmd |= ((aes_idx << 3) + i) << 2; /* add bit offset */ cmd |= (new_bits & 0x01) << 23; /* add bit value */ pcxhr_init_rmh(&rmh, CMD_ACCESS_IO_WRITE); rmh.cmd[0] |= IO_NUM_REG_CUER; rmh.cmd[1] = cmd; rmh.cmd_len = 2; snd_printdd("write iec958 AES %d byte %d bit %d (cmd %x)\n", chip->chip_idx, aes_idx, i, cmd); err = pcxhr_send_msg(chip->mgr, &rmh); if (err) return err; } old_bits >>= 1; new_bits >>= 1; } chip->aes_bits[aes_idx] = aes_bits; return 0; } static int pcxhr_iec958_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_pcxhr *chip = snd_kcontrol_chip(kcontrol); int i, changed = 0; /* playback */ mutex_lock(&chip->mgr->mixer_mutex); for (i = 0; i < 5; i++) { if (ucontrol->value.iec958.status[i] != chip->aes_bits[i]) { if (chip->mgr->is_hr_stereo) hr222_iec958_update_byte(chip, i, ucontrol->value.iec958.status[i]); else pcxhr_iec958_update_byte(chip, i, ucontrol->value.iec958.status[i]); changed = 1; } } mutex_unlock(&chip->mgr->mixer_mutex); return changed; } static struct snd_kcontrol_new pcxhr_control_playback_iec958_mask = { .access = SNDRV_CTL_ELEM_ACCESS_READ, .iface = SNDRV_CTL_ELEM_IFACE_PCM, .name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,MASK), .info = pcxhr_iec958_info, .get = pcxhr_iec958_mask_get }; static struct snd_kcontrol_new pcxhr_control_playback_iec958 = { .iface = SNDRV_CTL_ELEM_IFACE_PCM, .name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,DEFAULT), .info = pcxhr_iec958_info, .get = pcxhr_iec958_get, .put = pcxhr_iec958_put, .private_value = 0 /* playback */ }; static struct snd_kcontrol_new pcxhr_control_capture_iec958_mask = { .access = SNDRV_CTL_ELEM_ACCESS_READ, .iface = SNDRV_CTL_ELEM_IFACE_PCM, .name = SNDRV_CTL_NAME_IEC958("",CAPTURE,MASK), .info = pcxhr_iec958_info, .get = pcxhr_iec958_mask_get }; static struct snd_kcontrol_new pcxhr_control_capture_iec958 = { .access = SNDRV_CTL_ELEM_ACCESS_READ, .iface = SNDRV_CTL_ELEM_IFACE_PCM, .name = SNDRV_CTL_NAME_IEC958("",CAPTURE,DEFAULT), .info = pcxhr_iec958_info, .get = pcxhr_iec958_get, .private_value = 1 /* capture */ }; static void pcxhr_init_audio_levels(struct snd_pcxhr *chip) { int i; for (i = 0; i < 2; i++) { if (chip->nb_streams_play) { int j; /* at boot time the digital volumes are unmuted 0dB */ for (j = 0; j < PCXHR_PLAYBACK_STREAMS; j++) { chip->digital_playback_active[j][i] = 1; chip->digital_playback_volume[j][i] = PCXHR_DIGITAL_ZERO_LEVEL; } /* after boot, only two bits are set on the uer * interface */ chip->aes_bits[0] = (IEC958_AES0_PROFESSIONAL | IEC958_AES0_PRO_FS_48000); #ifdef CONFIG_SND_DEBUG /* analog volumes for playback * (is LEVEL_MIN after boot) */ chip->analog_playback_active[i] = 1; if (chip->mgr->is_hr_stereo) chip->analog_playback_volume[i] = HR222_LINE_PLAYBACK_ZERO_LEVEL; else { chip->analog_playback_volume[i] = PCXHR_LINE_PLAYBACK_ZERO_LEVEL; pcxhr_update_analog_audio_level(chip, 0, i); } #endif /* stereo cards need to be initialised after boot */ if (chip->mgr->is_hr_stereo) hr222_update_analog_audio_level(chip, 0, i); } if (chip->nb_streams_capt) { /* at boot time the digital volumes are unmuted 0dB */ chip->digital_capture_volume[i] = PCXHR_DIGITAL_ZERO_LEVEL; chip->analog_capture_active = 1; #ifdef CONFIG_SND_DEBUG /* analog volumes for playback * (is LEVEL_MIN after boot) */ if (chip->mgr->is_hr_stereo) chip->analog_capture_volume[i] = HR222_LINE_CAPTURE_ZERO_LEVEL; else { chip->analog_capture_volume[i] = PCXHR_LINE_CAPTURE_ZERO_LEVEL; pcxhr_update_analog_audio_level(chip, 1, i); } #endif /* stereo cards need to be initialised after boot */ if (chip->mgr->is_hr_stereo) hr222_update_analog_audio_level(chip, 1, i); } } return; } int pcxhr_create_mixer(struct pcxhr_mgr *mgr) { struct snd_pcxhr *chip; int err, i; mutex_init(&mgr->mixer_mutex); /* can be in another place */ for (i = 0; i < mgr->num_cards; i++) { struct snd_kcontrol_new temp; chip = mgr->chip[i]; if (chip->nb_streams_play) { /* analog output level control */ temp = pcxhr_control_analog_level; temp.name = "Master Playback Volume"; temp.private_value = 0; /* playback */ if (mgr->is_hr_stereo) temp.tlv.p = db_scale_a_hr222_playback; else temp.tlv.p = db_scale_analog_playback; err = snd_ctl_add(chip->card, snd_ctl_new1(&temp, chip)); if (err < 0) return err; /* output mute controls */ err = snd_ctl_add(chip->card, snd_ctl_new1(&pcxhr_control_output_switch, chip)); if (err < 0) return err; temp = snd_pcxhr_pcm_vol; temp.name = "PCM Playback Volume"; temp.count = PCXHR_PLAYBACK_STREAMS; temp.private_value = 0; /* playback */ err = snd_ctl_add(chip->card, snd_ctl_new1(&temp, chip)); if (err < 0) return err; err = snd_ctl_add(chip->card, snd_ctl_new1(&pcxhr_control_pcm_switch, chip)); if (err < 0) return err; /* IEC958 controls */ err = snd_ctl_add(chip->card, snd_ctl_new1(&pcxhr_control_playback_iec958_mask, chip)); if (err < 0) return err; err = snd_ctl_add(chip->card, snd_ctl_new1(&pcxhr_control_playback_iec958, chip)); if (err < 0) return err; } if (chip->nb_streams_capt) { /* analog input level control */ temp = pcxhr_control_analog_level; temp.name = "Line Capture Volume"; temp.private_value = 1; /* capture */ if (mgr->is_hr_stereo) temp.tlv.p = db_scale_a_hr222_capture; else temp.tlv.p = db_scale_analog_capture; err = snd_ctl_add(chip->card, snd_ctl_new1(&temp, chip)); if (err < 0) return err; temp = snd_pcxhr_pcm_vol; temp.name = "PCM Capture Volume"; temp.count = 1; temp.private_value = 1; /* capture */ err = snd_ctl_add(chip->card, snd_ctl_new1(&temp, chip)); if (err < 0) return err; /* Audio source */ err = snd_ctl_add(chip->card, snd_ctl_new1(&pcxhr_control_audio_src, chip)); if (err < 0) return err; /* IEC958 controls */ err = snd_ctl_add(chip->card, snd_ctl_new1(&pcxhr_control_capture_iec958_mask, chip)); if (err < 0) return err; err = snd_ctl_add(chip->card, snd_ctl_new1(&pcxhr_control_capture_iec958, chip)); if (err < 0) return err; if (mgr->is_hr_stereo) { err = hr222_add_mic_controls(chip); if (err < 0) return err; } } /* monitoring only if playback and capture device available */ if (chip->nb_streams_capt > 0 && chip->nb_streams_play > 0) { /* monitoring */ err = snd_ctl_add(chip->card, snd_ctl_new1(&pcxhr_control_monitor_vol, chip)); if (err < 0) return err; err = snd_ctl_add(chip->card, snd_ctl_new1(&pcxhr_control_monitor_sw, chip)); if (err < 0) return err; } if (i == 0) { /* clock mode only one control per pcxhr */ err = snd_ctl_add(chip->card, snd_ctl_new1(&pcxhr_control_clock_type, mgr)); if (err < 0) return err; /* non standard control used to scan * the external clock presence/frequencies */ err = snd_ctl_add(chip->card, snd_ctl_new1(&pcxhr_control_clock_rate, mgr)); if (err < 0) return err; } /* init values for the mixer data */ pcxhr_init_audio_levels(chip); } return 0; }
gpl-2.0
ptmr3/L900_Kernel
drivers/video/matrox/g450_pll.c
14656
13720
/* * * Hardware accelerated Matrox PCI cards - G450/G550 PLL control. * * (c) 2001-2002 Petr Vandrovec <vandrove@vc.cvut.cz> * * Portions Copyright (c) 2001 Matrox Graphics Inc. * * Version: 1.64 2002/06/10 * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive for * more details. * */ #include "g450_pll.h" #include "matroxfb_DAC1064.h" static inline unsigned int g450_vco2f(unsigned char p, unsigned int fvco) { return (p & 0x40) ? fvco : fvco >> ((p & 3) + 1); } static inline unsigned int g450_f2vco(unsigned char p, unsigned int fin) { return (p & 0x40) ? fin : fin << ((p & 3) + 1); } static unsigned int g450_mnp2vco(const struct matrox_fb_info *minfo, unsigned int mnp) { unsigned int m, n; m = ((mnp >> 16) & 0x0FF) + 1; n = ((mnp >> 7) & 0x1FE) + 4; return (minfo->features.pll.ref_freq * n + (m >> 1)) / m; } unsigned int g450_mnp2f(const struct matrox_fb_info *minfo, unsigned int mnp) { return g450_vco2f(mnp, g450_mnp2vco(minfo, mnp)); } static inline unsigned int pll_freq_delta(unsigned int f1, unsigned int f2) { if (f2 < f1) { f2 = f1 - f2; } else { f2 = f2 - f1; } return f2; } #define NO_MORE_MNP 0x01FFFFFF #define G450_MNP_FREQBITS (0xFFFFFF43) /* do not mask high byte so we'll catch NO_MORE_MNP */ static unsigned int g450_nextpll(const struct matrox_fb_info *minfo, const struct matrox_pll_limits *pi, unsigned int *fvco, unsigned int mnp) { unsigned int m, n, p; unsigned int tvco = *fvco; m = (mnp >> 16) & 0xFF; p = mnp & 0xFF; do { if (m == 0 || m == 0xFF) { if (m == 0) { if (p & 0x40) { return NO_MORE_MNP; } if (p & 3) { p--; } else { p = 0x40; } tvco >>= 1; if (tvco < pi->vcomin) { return NO_MORE_MNP; } *fvco = tvco; } p &= 0x43; if (tvco < 550000) { /* p |= 0x00; */ } else if (tvco < 700000) { p |= 0x08; } else if (tvco < 1000000) { p |= 0x10; } else if (tvco < 1150000) { p |= 0x18; } else { p |= 0x20; } m = 9; } else { m--; } n = ((tvco * (m+1) + minfo->features.pll.ref_freq) / (minfo->features.pll.ref_freq * 2)) - 2; } while (n < 0x03 || n > 0x7A); return (m << 16) | (n << 8) | p; } static unsigned int g450_firstpll(const struct matrox_fb_info *minfo, const struct matrox_pll_limits *pi, unsigned int *vco, unsigned int fout) { unsigned int p; unsigned int vcomax; vcomax = pi->vcomax; if (fout > (vcomax / 2)) { if (fout > vcomax) { *vco = vcomax; } else { *vco = fout; } p = 0x40; } else { unsigned int tvco; p = 3; tvco = g450_f2vco(p, fout); while (p && (tvco > vcomax)) { p--; tvco >>= 1; } if (tvco < pi->vcomin) { tvco = pi->vcomin; } *vco = tvco; } return g450_nextpll(minfo, pi, vco, 0xFF0000 | p); } static inline unsigned int g450_setpll(const struct matrox_fb_info *minfo, unsigned int mnp, unsigned int pll) { switch (pll) { case M_PIXEL_PLL_A: matroxfb_DAC_out(minfo, M1064_XPIXPLLAM, mnp >> 16); matroxfb_DAC_out(minfo, M1064_XPIXPLLAN, mnp >> 8); matroxfb_DAC_out(minfo, M1064_XPIXPLLAP, mnp); return M1064_XPIXPLLSTAT; case M_PIXEL_PLL_B: matroxfb_DAC_out(minfo, M1064_XPIXPLLBM, mnp >> 16); matroxfb_DAC_out(minfo, M1064_XPIXPLLBN, mnp >> 8); matroxfb_DAC_out(minfo, M1064_XPIXPLLBP, mnp); return M1064_XPIXPLLSTAT; case M_PIXEL_PLL_C: matroxfb_DAC_out(minfo, M1064_XPIXPLLCM, mnp >> 16); matroxfb_DAC_out(minfo, M1064_XPIXPLLCN, mnp >> 8); matroxfb_DAC_out(minfo, M1064_XPIXPLLCP, mnp); return M1064_XPIXPLLSTAT; case M_SYSTEM_PLL: matroxfb_DAC_out(minfo, DAC1064_XSYSPLLM, mnp >> 16); matroxfb_DAC_out(minfo, DAC1064_XSYSPLLN, mnp >> 8); matroxfb_DAC_out(minfo, DAC1064_XSYSPLLP, mnp); return DAC1064_XSYSPLLSTAT; case M_VIDEO_PLL: matroxfb_DAC_out(minfo, M1064_XVIDPLLM, mnp >> 16); matroxfb_DAC_out(minfo, M1064_XVIDPLLN, mnp >> 8); matroxfb_DAC_out(minfo, M1064_XVIDPLLP, mnp); return M1064_XVIDPLLSTAT; } return 0; } static inline unsigned int g450_cmppll(const struct matrox_fb_info *minfo, unsigned int mnp, unsigned int pll) { unsigned char m = mnp >> 16; unsigned char n = mnp >> 8; unsigned char p = mnp; switch (pll) { case M_PIXEL_PLL_A: return (matroxfb_DAC_in(minfo, M1064_XPIXPLLAM) != m || matroxfb_DAC_in(minfo, M1064_XPIXPLLAN) != n || matroxfb_DAC_in(minfo, M1064_XPIXPLLAP) != p); case M_PIXEL_PLL_B: return (matroxfb_DAC_in(minfo, M1064_XPIXPLLBM) != m || matroxfb_DAC_in(minfo, M1064_XPIXPLLBN) != n || matroxfb_DAC_in(minfo, M1064_XPIXPLLBP) != p); case M_PIXEL_PLL_C: return (matroxfb_DAC_in(minfo, M1064_XPIXPLLCM) != m || matroxfb_DAC_in(minfo, M1064_XPIXPLLCN) != n || matroxfb_DAC_in(minfo, M1064_XPIXPLLCP) != p); case M_SYSTEM_PLL: return (matroxfb_DAC_in(minfo, DAC1064_XSYSPLLM) != m || matroxfb_DAC_in(minfo, DAC1064_XSYSPLLN) != n || matroxfb_DAC_in(minfo, DAC1064_XSYSPLLP) != p); case M_VIDEO_PLL: return (matroxfb_DAC_in(minfo, M1064_XVIDPLLM) != m || matroxfb_DAC_in(minfo, M1064_XVIDPLLN) != n || matroxfb_DAC_in(minfo, M1064_XVIDPLLP) != p); } return 1; } static inline int g450_isplllocked(const struct matrox_fb_info *minfo, unsigned int regidx) { unsigned int j; for (j = 0; j < 1000; j++) { if (matroxfb_DAC_in(minfo, regidx) & 0x40) { unsigned int r = 0; int i; for (i = 0; i < 100; i++) { r += matroxfb_DAC_in(minfo, regidx) & 0x40; } return r >= (90 * 0x40); } /* udelay(1)... but DAC_in is much slower... */ } return 0; } static int g450_testpll(const struct matrox_fb_info *minfo, unsigned int mnp, unsigned int pll) { return g450_isplllocked(minfo, g450_setpll(minfo, mnp, pll)); } static void updatehwstate_clk(struct matrox_hw_state* hw, unsigned int mnp, unsigned int pll) { switch (pll) { case M_SYSTEM_PLL: hw->DACclk[3] = mnp >> 16; hw->DACclk[4] = mnp >> 8; hw->DACclk[5] = mnp; break; } } void matroxfb_g450_setpll_cond(struct matrox_fb_info *minfo, unsigned int mnp, unsigned int pll) { if (g450_cmppll(minfo, mnp, pll)) { g450_setpll(minfo, mnp, pll); } } static inline unsigned int g450_findworkingpll(struct matrox_fb_info *minfo, unsigned int pll, unsigned int *mnparray, unsigned int mnpcount) { unsigned int found = 0; unsigned int idx; unsigned int mnpfound = mnparray[0]; for (idx = 0; idx < mnpcount; idx++) { unsigned int sarray[3]; unsigned int *sptr; { unsigned int mnp; sptr = sarray; mnp = mnparray[idx]; if (mnp & 0x38) { *sptr++ = mnp - 8; } if ((mnp & 0x38) != 0x38) { *sptr++ = mnp + 8; } *sptr = mnp; } while (sptr >= sarray) { unsigned int mnp = *sptr--; if (g450_testpll(minfo, mnp - 0x0300, pll) && g450_testpll(minfo, mnp + 0x0300, pll) && g450_testpll(minfo, mnp - 0x0200, pll) && g450_testpll(minfo, mnp + 0x0200, pll) && g450_testpll(minfo, mnp - 0x0100, pll) && g450_testpll(minfo, mnp + 0x0100, pll)) { if (g450_testpll(minfo, mnp, pll)) { return mnp; } } else if (!found && g450_testpll(minfo, mnp, pll)) { mnpfound = mnp; found = 1; } } } g450_setpll(minfo, mnpfound, pll); return mnpfound; } static void g450_addcache(struct matrox_pll_cache* ci, unsigned int mnp_key, unsigned int mnp_value) { if (++ci->valid > ARRAY_SIZE(ci->data)) { ci->valid = ARRAY_SIZE(ci->data); } memmove(ci->data + 1, ci->data, (ci->valid - 1) * sizeof(*ci->data)); ci->data[0].mnp_key = mnp_key & G450_MNP_FREQBITS; ci->data[0].mnp_value = mnp_value; } static int g450_checkcache(struct matrox_fb_info *minfo, struct matrox_pll_cache *ci, unsigned int mnp_key) { unsigned int i; mnp_key &= G450_MNP_FREQBITS; for (i = 0; i < ci->valid; i++) { if (ci->data[i].mnp_key == mnp_key) { unsigned int mnp; mnp = ci->data[i].mnp_value; if (i) { memmove(ci->data + 1, ci->data, i * sizeof(*ci->data)); ci->data[0].mnp_key = mnp_key; ci->data[0].mnp_value = mnp; } return mnp; } } return NO_MORE_MNP; } static int __g450_setclk(struct matrox_fb_info *minfo, unsigned int fout, unsigned int pll, unsigned int *mnparray, unsigned int *deltaarray) { unsigned int mnpcount; unsigned int pixel_vco; const struct matrox_pll_limits* pi; struct matrox_pll_cache* ci; pixel_vco = 0; switch (pll) { case M_PIXEL_PLL_A: case M_PIXEL_PLL_B: case M_PIXEL_PLL_C: { u_int8_t tmp, xpwrctrl; unsigned long flags; matroxfb_DAC_lock_irqsave(flags); xpwrctrl = matroxfb_DAC_in(minfo, M1064_XPWRCTRL); matroxfb_DAC_out(minfo, M1064_XPWRCTRL, xpwrctrl & ~M1064_XPWRCTRL_PANELPDN); mga_outb(M_SEQ_INDEX, M_SEQ1); mga_outb(M_SEQ_DATA, mga_inb(M_SEQ_DATA) | M_SEQ1_SCROFF); tmp = matroxfb_DAC_in(minfo, M1064_XPIXCLKCTRL); tmp |= M1064_XPIXCLKCTRL_DIS; if (!(tmp & M1064_XPIXCLKCTRL_PLL_UP)) { tmp |= M1064_XPIXCLKCTRL_PLL_UP; } matroxfb_DAC_out(minfo, M1064_XPIXCLKCTRL, tmp); /* DVI PLL preferred for frequencies up to panel link max, standard PLL otherwise */ if (fout >= minfo->max_pixel_clock_panellink) tmp = 0; else tmp = M1064_XDVICLKCTRL_DVIDATAPATHSEL | M1064_XDVICLKCTRL_C1DVICLKSEL | M1064_XDVICLKCTRL_C1DVICLKEN | M1064_XDVICLKCTRL_DVILOOPCTL | M1064_XDVICLKCTRL_P1LOOPBWDTCTL; /* Setting this breaks PC systems so don't do it */ /* matroxfb_DAC_out(minfo, M1064_XDVICLKCTRL, tmp); */ matroxfb_DAC_out(minfo, M1064_XPWRCTRL, xpwrctrl); matroxfb_DAC_unlock_irqrestore(flags); } { u_int8_t misc; misc = mga_inb(M_MISC_REG_READ) & ~0x0C; switch (pll) { case M_PIXEL_PLL_A: break; case M_PIXEL_PLL_B: misc |= 0x04; break; default: misc |= 0x0C; break; } mga_outb(M_MISC_REG, misc); } pi = &minfo->limits.pixel; ci = &minfo->cache.pixel; break; case M_SYSTEM_PLL: { u_int32_t opt; pci_read_config_dword(minfo->pcidev, PCI_OPTION_REG, &opt); if (!(opt & 0x20)) { pci_write_config_dword(minfo->pcidev, PCI_OPTION_REG, opt | 0x20); } } pi = &minfo->limits.system; ci = &minfo->cache.system; break; case M_VIDEO_PLL: { u_int8_t tmp; unsigned int mnp; unsigned long flags; matroxfb_DAC_lock_irqsave(flags); tmp = matroxfb_DAC_in(minfo, M1064_XPWRCTRL); if (!(tmp & 2)) { matroxfb_DAC_out(minfo, M1064_XPWRCTRL, tmp | 2); } mnp = matroxfb_DAC_in(minfo, M1064_XPIXPLLCM) << 16; mnp |= matroxfb_DAC_in(minfo, M1064_XPIXPLLCN) << 8; pixel_vco = g450_mnp2vco(minfo, mnp); matroxfb_DAC_unlock_irqrestore(flags); } pi = &minfo->limits.video; ci = &minfo->cache.video; break; default: return -EINVAL; } mnpcount = 0; { unsigned int mnp; unsigned int xvco; for (mnp = g450_firstpll(minfo, pi, &xvco, fout); mnp != NO_MORE_MNP; mnp = g450_nextpll(minfo, pi, &xvco, mnp)) { unsigned int idx; unsigned int vco; unsigned int delta; vco = g450_mnp2vco(minfo, mnp); #if 0 if (pll == M_VIDEO_PLL) { unsigned int big, small; if (vco < pixel_vco) { small = vco; big = pixel_vco; } else { small = pixel_vco; big = vco; } while (big > small) { big >>= 1; } if (big == small) { continue; } } #endif delta = pll_freq_delta(fout, g450_vco2f(mnp, vco)); for (idx = mnpcount; idx > 0; idx--) { /* == is important; due to nextpll algorithm we get sorted equally good frequencies from lower VCO frequency to higher - with <= lowest wins, while with < highest one wins */ if (delta <= deltaarray[idx-1]) { /* all else being equal except VCO, * choose VCO not near (within 1/16th or so) VCOmin * (freqs near VCOmin aren't as stable) */ if (delta == deltaarray[idx-1] && vco != g450_mnp2vco(minfo, mnparray[idx-1]) && vco < (pi->vcomin * 17 / 16)) { break; } mnparray[idx] = mnparray[idx-1]; deltaarray[idx] = deltaarray[idx-1]; } else { break; } } mnparray[idx] = mnp; deltaarray[idx] = delta; mnpcount++; } } /* VideoPLL and PixelPLL matched: do nothing... In all other cases we should get at least one frequency */ if (!mnpcount) { return -EBUSY; } { unsigned long flags; unsigned int mnp; matroxfb_DAC_lock_irqsave(flags); mnp = g450_checkcache(minfo, ci, mnparray[0]); if (mnp != NO_MORE_MNP) { matroxfb_g450_setpll_cond(minfo, mnp, pll); } else { mnp = g450_findworkingpll(minfo, pll, mnparray, mnpcount); g450_addcache(ci, mnparray[0], mnp); } updatehwstate_clk(&minfo->hw, mnp, pll); matroxfb_DAC_unlock_irqrestore(flags); return mnp; } } /* It must be greater than number of possible PLL values. * Currently there is 5(p) * 10(m) = 50 possible values. */ #define MNP_TABLE_SIZE 64 int matroxfb_g450_setclk(struct matrox_fb_info *minfo, unsigned int fout, unsigned int pll) { unsigned int* arr; arr = kmalloc(sizeof(*arr) * MNP_TABLE_SIZE * 2, GFP_KERNEL); if (arr) { int r; r = __g450_setclk(minfo, fout, pll, arr, arr + MNP_TABLE_SIZE); kfree(arr); return r; } return -ENOMEM; } EXPORT_SYMBOL(matroxfb_g450_setclk); EXPORT_SYMBOL(g450_mnp2f); EXPORT_SYMBOL(matroxfb_g450_setpll_cond); MODULE_AUTHOR("(c) 2001-2002 Petr Vandrovec <vandrove@vc.cvut.cz>"); MODULE_DESCRIPTION("Matrox G450/G550 PLL driver"); MODULE_LICENSE("GPL");
gpl-2.0
Starship-Android/starship_kernel_moto_shamu
drivers/pci/hotplug/rpadlpar_sysfs.c
14656
2832
/* * Interface for Dynamic Logical Partitioning of I/O Slots on * RPA-compliant PPC64 platform. * * John Rose <johnrose@austin.ibm.com> * October 2003 * * Copyright (C) 2003 IBM. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/kobject.h> #include <linux/string.h> #include <linux/pci.h> #include <linux/pci_hotplug.h> #include "rpadlpar.h" #include "../pci.h" #define DLPAR_KOBJ_NAME "control" /* Those two have no quotes because they are passed to __ATTR() which * stringifies the argument (yuck !) */ #define ADD_SLOT_ATTR_NAME add_slot #define REMOVE_SLOT_ATTR_NAME remove_slot #define MAX_DRC_NAME_LEN 64 static ssize_t add_slot_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t nbytes) { char drc_name[MAX_DRC_NAME_LEN]; char *end; int rc; if (nbytes >= MAX_DRC_NAME_LEN) return 0; memcpy(drc_name, buf, nbytes); end = strchr(drc_name, '\n'); if (!end) end = &drc_name[nbytes]; *end = '\0'; rc = dlpar_add_slot(drc_name); if (rc) return rc; return nbytes; } static ssize_t add_slot_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sprintf(buf, "0\n"); } static ssize_t remove_slot_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t nbytes) { char drc_name[MAX_DRC_NAME_LEN]; int rc; char *end; if (nbytes >= MAX_DRC_NAME_LEN) return 0; memcpy(drc_name, buf, nbytes); end = strchr(drc_name, '\n'); if (!end) end = &drc_name[nbytes]; *end = '\0'; rc = dlpar_remove_slot(drc_name); if (rc) return rc; return nbytes; } static ssize_t remove_slot_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sprintf(buf, "0\n"); } static struct kobj_attribute add_slot_attr = __ATTR(ADD_SLOT_ATTR_NAME, 0644, add_slot_show, add_slot_store); static struct kobj_attribute remove_slot_attr = __ATTR(REMOVE_SLOT_ATTR_NAME, 0644, remove_slot_show, remove_slot_store); static struct attribute *default_attrs[] = { &add_slot_attr.attr, &remove_slot_attr.attr, NULL, }; static struct attribute_group dlpar_attr_group = { .attrs = default_attrs, }; static struct kobject *dlpar_kobj; int dlpar_sysfs_init(void) { int error; dlpar_kobj = kobject_create_and_add(DLPAR_KOBJ_NAME, &pci_slots_kset->kobj); if (!dlpar_kobj) return -EINVAL; error = sysfs_create_group(dlpar_kobj, &dlpar_attr_group); if (error) kobject_put(dlpar_kobj); return error; } void dlpar_sysfs_exit(void) { sysfs_remove_group(dlpar_kobj, &dlpar_attr_group); kobject_put(dlpar_kobj); }
gpl-2.0
adegroote/linux
drivers/video/fbdev/matrox/g450_pll.c
14656
13720
/* * * Hardware accelerated Matrox PCI cards - G450/G550 PLL control. * * (c) 2001-2002 Petr Vandrovec <vandrove@vc.cvut.cz> * * Portions Copyright (c) 2001 Matrox Graphics Inc. * * Version: 1.64 2002/06/10 * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive for * more details. * */ #include "g450_pll.h" #include "matroxfb_DAC1064.h" static inline unsigned int g450_vco2f(unsigned char p, unsigned int fvco) { return (p & 0x40) ? fvco : fvco >> ((p & 3) + 1); } static inline unsigned int g450_f2vco(unsigned char p, unsigned int fin) { return (p & 0x40) ? fin : fin << ((p & 3) + 1); } static unsigned int g450_mnp2vco(const struct matrox_fb_info *minfo, unsigned int mnp) { unsigned int m, n; m = ((mnp >> 16) & 0x0FF) + 1; n = ((mnp >> 7) & 0x1FE) + 4; return (minfo->features.pll.ref_freq * n + (m >> 1)) / m; } unsigned int g450_mnp2f(const struct matrox_fb_info *minfo, unsigned int mnp) { return g450_vco2f(mnp, g450_mnp2vco(minfo, mnp)); } static inline unsigned int pll_freq_delta(unsigned int f1, unsigned int f2) { if (f2 < f1) { f2 = f1 - f2; } else { f2 = f2 - f1; } return f2; } #define NO_MORE_MNP 0x01FFFFFF #define G450_MNP_FREQBITS (0xFFFFFF43) /* do not mask high byte so we'll catch NO_MORE_MNP */ static unsigned int g450_nextpll(const struct matrox_fb_info *minfo, const struct matrox_pll_limits *pi, unsigned int *fvco, unsigned int mnp) { unsigned int m, n, p; unsigned int tvco = *fvco; m = (mnp >> 16) & 0xFF; p = mnp & 0xFF; do { if (m == 0 || m == 0xFF) { if (m == 0) { if (p & 0x40) { return NO_MORE_MNP; } if (p & 3) { p--; } else { p = 0x40; } tvco >>= 1; if (tvco < pi->vcomin) { return NO_MORE_MNP; } *fvco = tvco; } p &= 0x43; if (tvco < 550000) { /* p |= 0x00; */ } else if (tvco < 700000) { p |= 0x08; } else if (tvco < 1000000) { p |= 0x10; } else if (tvco < 1150000) { p |= 0x18; } else { p |= 0x20; } m = 9; } else { m--; } n = ((tvco * (m+1) + minfo->features.pll.ref_freq) / (minfo->features.pll.ref_freq * 2)) - 2; } while (n < 0x03 || n > 0x7A); return (m << 16) | (n << 8) | p; } static unsigned int g450_firstpll(const struct matrox_fb_info *minfo, const struct matrox_pll_limits *pi, unsigned int *vco, unsigned int fout) { unsigned int p; unsigned int vcomax; vcomax = pi->vcomax; if (fout > (vcomax / 2)) { if (fout > vcomax) { *vco = vcomax; } else { *vco = fout; } p = 0x40; } else { unsigned int tvco; p = 3; tvco = g450_f2vco(p, fout); while (p && (tvco > vcomax)) { p--; tvco >>= 1; } if (tvco < pi->vcomin) { tvco = pi->vcomin; } *vco = tvco; } return g450_nextpll(minfo, pi, vco, 0xFF0000 | p); } static inline unsigned int g450_setpll(const struct matrox_fb_info *minfo, unsigned int mnp, unsigned int pll) { switch (pll) { case M_PIXEL_PLL_A: matroxfb_DAC_out(minfo, M1064_XPIXPLLAM, mnp >> 16); matroxfb_DAC_out(minfo, M1064_XPIXPLLAN, mnp >> 8); matroxfb_DAC_out(minfo, M1064_XPIXPLLAP, mnp); return M1064_XPIXPLLSTAT; case M_PIXEL_PLL_B: matroxfb_DAC_out(minfo, M1064_XPIXPLLBM, mnp >> 16); matroxfb_DAC_out(minfo, M1064_XPIXPLLBN, mnp >> 8); matroxfb_DAC_out(minfo, M1064_XPIXPLLBP, mnp); return M1064_XPIXPLLSTAT; case M_PIXEL_PLL_C: matroxfb_DAC_out(minfo, M1064_XPIXPLLCM, mnp >> 16); matroxfb_DAC_out(minfo, M1064_XPIXPLLCN, mnp >> 8); matroxfb_DAC_out(minfo, M1064_XPIXPLLCP, mnp); return M1064_XPIXPLLSTAT; case M_SYSTEM_PLL: matroxfb_DAC_out(minfo, DAC1064_XSYSPLLM, mnp >> 16); matroxfb_DAC_out(minfo, DAC1064_XSYSPLLN, mnp >> 8); matroxfb_DAC_out(minfo, DAC1064_XSYSPLLP, mnp); return DAC1064_XSYSPLLSTAT; case M_VIDEO_PLL: matroxfb_DAC_out(minfo, M1064_XVIDPLLM, mnp >> 16); matroxfb_DAC_out(minfo, M1064_XVIDPLLN, mnp >> 8); matroxfb_DAC_out(minfo, M1064_XVIDPLLP, mnp); return M1064_XVIDPLLSTAT; } return 0; } static inline unsigned int g450_cmppll(const struct matrox_fb_info *minfo, unsigned int mnp, unsigned int pll) { unsigned char m = mnp >> 16; unsigned char n = mnp >> 8; unsigned char p = mnp; switch (pll) { case M_PIXEL_PLL_A: return (matroxfb_DAC_in(minfo, M1064_XPIXPLLAM) != m || matroxfb_DAC_in(minfo, M1064_XPIXPLLAN) != n || matroxfb_DAC_in(minfo, M1064_XPIXPLLAP) != p); case M_PIXEL_PLL_B: return (matroxfb_DAC_in(minfo, M1064_XPIXPLLBM) != m || matroxfb_DAC_in(minfo, M1064_XPIXPLLBN) != n || matroxfb_DAC_in(minfo, M1064_XPIXPLLBP) != p); case M_PIXEL_PLL_C: return (matroxfb_DAC_in(minfo, M1064_XPIXPLLCM) != m || matroxfb_DAC_in(minfo, M1064_XPIXPLLCN) != n || matroxfb_DAC_in(minfo, M1064_XPIXPLLCP) != p); case M_SYSTEM_PLL: return (matroxfb_DAC_in(minfo, DAC1064_XSYSPLLM) != m || matroxfb_DAC_in(minfo, DAC1064_XSYSPLLN) != n || matroxfb_DAC_in(minfo, DAC1064_XSYSPLLP) != p); case M_VIDEO_PLL: return (matroxfb_DAC_in(minfo, M1064_XVIDPLLM) != m || matroxfb_DAC_in(minfo, M1064_XVIDPLLN) != n || matroxfb_DAC_in(minfo, M1064_XVIDPLLP) != p); } return 1; } static inline int g450_isplllocked(const struct matrox_fb_info *minfo, unsigned int regidx) { unsigned int j; for (j = 0; j < 1000; j++) { if (matroxfb_DAC_in(minfo, regidx) & 0x40) { unsigned int r = 0; int i; for (i = 0; i < 100; i++) { r += matroxfb_DAC_in(minfo, regidx) & 0x40; } return r >= (90 * 0x40); } /* udelay(1)... but DAC_in is much slower... */ } return 0; } static int g450_testpll(const struct matrox_fb_info *minfo, unsigned int mnp, unsigned int pll) { return g450_isplllocked(minfo, g450_setpll(minfo, mnp, pll)); } static void updatehwstate_clk(struct matrox_hw_state* hw, unsigned int mnp, unsigned int pll) { switch (pll) { case M_SYSTEM_PLL: hw->DACclk[3] = mnp >> 16; hw->DACclk[4] = mnp >> 8; hw->DACclk[5] = mnp; break; } } void matroxfb_g450_setpll_cond(struct matrox_fb_info *minfo, unsigned int mnp, unsigned int pll) { if (g450_cmppll(minfo, mnp, pll)) { g450_setpll(minfo, mnp, pll); } } static inline unsigned int g450_findworkingpll(struct matrox_fb_info *minfo, unsigned int pll, unsigned int *mnparray, unsigned int mnpcount) { unsigned int found = 0; unsigned int idx; unsigned int mnpfound = mnparray[0]; for (idx = 0; idx < mnpcount; idx++) { unsigned int sarray[3]; unsigned int *sptr; { unsigned int mnp; sptr = sarray; mnp = mnparray[idx]; if (mnp & 0x38) { *sptr++ = mnp - 8; } if ((mnp & 0x38) != 0x38) { *sptr++ = mnp + 8; } *sptr = mnp; } while (sptr >= sarray) { unsigned int mnp = *sptr--; if (g450_testpll(minfo, mnp - 0x0300, pll) && g450_testpll(minfo, mnp + 0x0300, pll) && g450_testpll(minfo, mnp - 0x0200, pll) && g450_testpll(minfo, mnp + 0x0200, pll) && g450_testpll(minfo, mnp - 0x0100, pll) && g450_testpll(minfo, mnp + 0x0100, pll)) { if (g450_testpll(minfo, mnp, pll)) { return mnp; } } else if (!found && g450_testpll(minfo, mnp, pll)) { mnpfound = mnp; found = 1; } } } g450_setpll(minfo, mnpfound, pll); return mnpfound; } static void g450_addcache(struct matrox_pll_cache* ci, unsigned int mnp_key, unsigned int mnp_value) { if (++ci->valid > ARRAY_SIZE(ci->data)) { ci->valid = ARRAY_SIZE(ci->data); } memmove(ci->data + 1, ci->data, (ci->valid - 1) * sizeof(*ci->data)); ci->data[0].mnp_key = mnp_key & G450_MNP_FREQBITS; ci->data[0].mnp_value = mnp_value; } static int g450_checkcache(struct matrox_fb_info *minfo, struct matrox_pll_cache *ci, unsigned int mnp_key) { unsigned int i; mnp_key &= G450_MNP_FREQBITS; for (i = 0; i < ci->valid; i++) { if (ci->data[i].mnp_key == mnp_key) { unsigned int mnp; mnp = ci->data[i].mnp_value; if (i) { memmove(ci->data + 1, ci->data, i * sizeof(*ci->data)); ci->data[0].mnp_key = mnp_key; ci->data[0].mnp_value = mnp; } return mnp; } } return NO_MORE_MNP; } static int __g450_setclk(struct matrox_fb_info *minfo, unsigned int fout, unsigned int pll, unsigned int *mnparray, unsigned int *deltaarray) { unsigned int mnpcount; unsigned int pixel_vco; const struct matrox_pll_limits* pi; struct matrox_pll_cache* ci; pixel_vco = 0; switch (pll) { case M_PIXEL_PLL_A: case M_PIXEL_PLL_B: case M_PIXEL_PLL_C: { u_int8_t tmp, xpwrctrl; unsigned long flags; matroxfb_DAC_lock_irqsave(flags); xpwrctrl = matroxfb_DAC_in(minfo, M1064_XPWRCTRL); matroxfb_DAC_out(minfo, M1064_XPWRCTRL, xpwrctrl & ~M1064_XPWRCTRL_PANELPDN); mga_outb(M_SEQ_INDEX, M_SEQ1); mga_outb(M_SEQ_DATA, mga_inb(M_SEQ_DATA) | M_SEQ1_SCROFF); tmp = matroxfb_DAC_in(minfo, M1064_XPIXCLKCTRL); tmp |= M1064_XPIXCLKCTRL_DIS; if (!(tmp & M1064_XPIXCLKCTRL_PLL_UP)) { tmp |= M1064_XPIXCLKCTRL_PLL_UP; } matroxfb_DAC_out(minfo, M1064_XPIXCLKCTRL, tmp); /* DVI PLL preferred for frequencies up to panel link max, standard PLL otherwise */ if (fout >= minfo->max_pixel_clock_panellink) tmp = 0; else tmp = M1064_XDVICLKCTRL_DVIDATAPATHSEL | M1064_XDVICLKCTRL_C1DVICLKSEL | M1064_XDVICLKCTRL_C1DVICLKEN | M1064_XDVICLKCTRL_DVILOOPCTL | M1064_XDVICLKCTRL_P1LOOPBWDTCTL; /* Setting this breaks PC systems so don't do it */ /* matroxfb_DAC_out(minfo, M1064_XDVICLKCTRL, tmp); */ matroxfb_DAC_out(minfo, M1064_XPWRCTRL, xpwrctrl); matroxfb_DAC_unlock_irqrestore(flags); } { u_int8_t misc; misc = mga_inb(M_MISC_REG_READ) & ~0x0C; switch (pll) { case M_PIXEL_PLL_A: break; case M_PIXEL_PLL_B: misc |= 0x04; break; default: misc |= 0x0C; break; } mga_outb(M_MISC_REG, misc); } pi = &minfo->limits.pixel; ci = &minfo->cache.pixel; break; case M_SYSTEM_PLL: { u_int32_t opt; pci_read_config_dword(minfo->pcidev, PCI_OPTION_REG, &opt); if (!(opt & 0x20)) { pci_write_config_dword(minfo->pcidev, PCI_OPTION_REG, opt | 0x20); } } pi = &minfo->limits.system; ci = &minfo->cache.system; break; case M_VIDEO_PLL: { u_int8_t tmp; unsigned int mnp; unsigned long flags; matroxfb_DAC_lock_irqsave(flags); tmp = matroxfb_DAC_in(minfo, M1064_XPWRCTRL); if (!(tmp & 2)) { matroxfb_DAC_out(minfo, M1064_XPWRCTRL, tmp | 2); } mnp = matroxfb_DAC_in(minfo, M1064_XPIXPLLCM) << 16; mnp |= matroxfb_DAC_in(minfo, M1064_XPIXPLLCN) << 8; pixel_vco = g450_mnp2vco(minfo, mnp); matroxfb_DAC_unlock_irqrestore(flags); } pi = &minfo->limits.video; ci = &minfo->cache.video; break; default: return -EINVAL; } mnpcount = 0; { unsigned int mnp; unsigned int xvco; for (mnp = g450_firstpll(minfo, pi, &xvco, fout); mnp != NO_MORE_MNP; mnp = g450_nextpll(minfo, pi, &xvco, mnp)) { unsigned int idx; unsigned int vco; unsigned int delta; vco = g450_mnp2vco(minfo, mnp); #if 0 if (pll == M_VIDEO_PLL) { unsigned int big, small; if (vco < pixel_vco) { small = vco; big = pixel_vco; } else { small = pixel_vco; big = vco; } while (big > small) { big >>= 1; } if (big == small) { continue; } } #endif delta = pll_freq_delta(fout, g450_vco2f(mnp, vco)); for (idx = mnpcount; idx > 0; idx--) { /* == is important; due to nextpll algorithm we get sorted equally good frequencies from lower VCO frequency to higher - with <= lowest wins, while with < highest one wins */ if (delta <= deltaarray[idx-1]) { /* all else being equal except VCO, * choose VCO not near (within 1/16th or so) VCOmin * (freqs near VCOmin aren't as stable) */ if (delta == deltaarray[idx-1] && vco != g450_mnp2vco(minfo, mnparray[idx-1]) && vco < (pi->vcomin * 17 / 16)) { break; } mnparray[idx] = mnparray[idx-1]; deltaarray[idx] = deltaarray[idx-1]; } else { break; } } mnparray[idx] = mnp; deltaarray[idx] = delta; mnpcount++; } } /* VideoPLL and PixelPLL matched: do nothing... In all other cases we should get at least one frequency */ if (!mnpcount) { return -EBUSY; } { unsigned long flags; unsigned int mnp; matroxfb_DAC_lock_irqsave(flags); mnp = g450_checkcache(minfo, ci, mnparray[0]); if (mnp != NO_MORE_MNP) { matroxfb_g450_setpll_cond(minfo, mnp, pll); } else { mnp = g450_findworkingpll(minfo, pll, mnparray, mnpcount); g450_addcache(ci, mnparray[0], mnp); } updatehwstate_clk(&minfo->hw, mnp, pll); matroxfb_DAC_unlock_irqrestore(flags); return mnp; } } /* It must be greater than number of possible PLL values. * Currently there is 5(p) * 10(m) = 50 possible values. */ #define MNP_TABLE_SIZE 64 int matroxfb_g450_setclk(struct matrox_fb_info *minfo, unsigned int fout, unsigned int pll) { unsigned int* arr; arr = kmalloc(sizeof(*arr) * MNP_TABLE_SIZE * 2, GFP_KERNEL); if (arr) { int r; r = __g450_setclk(minfo, fout, pll, arr, arr + MNP_TABLE_SIZE); kfree(arr); return r; } return -ENOMEM; } EXPORT_SYMBOL(matroxfb_g450_setclk); EXPORT_SYMBOL(g450_mnp2f); EXPORT_SYMBOL(matroxfb_g450_setpll_cond); MODULE_AUTHOR("(c) 2001-2002 Petr Vandrovec <vandrove@vc.cvut.cz>"); MODULE_DESCRIPTION("Matrox G450/G550 PLL driver"); MODULE_LICENSE("GPL");
gpl-2.0
KutuSystems/linux
net/nfc/netlink.c
65
39815
/* * Copyright (C) 2011 Instituto Nokia de Tecnologia * * Authors: * Lauro Ramos Venancio <lauro.venancio@openbossa.org> * Aloisio Almeida Jr <aloisio.almeida@openbossa.org> * * Vendor commands implementation based on net/wireless/nl80211.c * which is: * * Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net> * Copyright 2013-2014 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, see <http://www.gnu.org/licenses/>. */ #define pr_fmt(fmt) KBUILD_MODNAME ": %s: " fmt, __func__ #include <net/genetlink.h> #include <linux/nfc.h> #include <linux/slab.h> #include "nfc.h" #include "llcp.h" static const struct genl_multicast_group nfc_genl_mcgrps[] = { { .name = NFC_GENL_MCAST_EVENT_NAME, }, }; static struct genl_family nfc_genl_family; static const struct nla_policy nfc_genl_policy[NFC_ATTR_MAX + 1] = { [NFC_ATTR_DEVICE_INDEX] = { .type = NLA_U32 }, [NFC_ATTR_DEVICE_NAME] = { .type = NLA_STRING, .len = NFC_DEVICE_NAME_MAXSIZE }, [NFC_ATTR_PROTOCOLS] = { .type = NLA_U32 }, [NFC_ATTR_COMM_MODE] = { .type = NLA_U8 }, [NFC_ATTR_RF_MODE] = { .type = NLA_U8 }, [NFC_ATTR_DEVICE_POWERED] = { .type = NLA_U8 }, [NFC_ATTR_IM_PROTOCOLS] = { .type = NLA_U32 }, [NFC_ATTR_TM_PROTOCOLS] = { .type = NLA_U32 }, [NFC_ATTR_LLC_PARAM_LTO] = { .type = NLA_U8 }, [NFC_ATTR_LLC_PARAM_RW] = { .type = NLA_U8 }, [NFC_ATTR_LLC_PARAM_MIUX] = { .type = NLA_U16 }, [NFC_ATTR_LLC_SDP] = { .type = NLA_NESTED }, [NFC_ATTR_FIRMWARE_NAME] = { .type = NLA_STRING, .len = NFC_FIRMWARE_NAME_MAXSIZE }, [NFC_ATTR_SE_APDU] = { .type = NLA_BINARY }, [NFC_ATTR_VENDOR_DATA] = { .type = NLA_BINARY }, }; static const struct nla_policy nfc_sdp_genl_policy[NFC_SDP_ATTR_MAX + 1] = { [NFC_SDP_ATTR_URI] = { .type = NLA_STRING }, [NFC_SDP_ATTR_SAP] = { .type = NLA_U8 }, }; static int nfc_genl_send_target(struct sk_buff *msg, struct nfc_target *target, struct netlink_callback *cb, int flags) { void *hdr; hdr = genlmsg_put(msg, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, &nfc_genl_family, flags, NFC_CMD_GET_TARGET); if (!hdr) return -EMSGSIZE; genl_dump_check_consistent(cb, hdr, &nfc_genl_family); if (nla_put_u32(msg, NFC_ATTR_TARGET_INDEX, target->idx) || nla_put_u32(msg, NFC_ATTR_PROTOCOLS, target->supported_protocols) || nla_put_u16(msg, NFC_ATTR_TARGET_SENS_RES, target->sens_res) || nla_put_u8(msg, NFC_ATTR_TARGET_SEL_RES, target->sel_res)) goto nla_put_failure; if (target->nfcid1_len > 0 && nla_put(msg, NFC_ATTR_TARGET_NFCID1, target->nfcid1_len, target->nfcid1)) goto nla_put_failure; if (target->sensb_res_len > 0 && nla_put(msg, NFC_ATTR_TARGET_SENSB_RES, target->sensb_res_len, target->sensb_res)) goto nla_put_failure; if (target->sensf_res_len > 0 && nla_put(msg, NFC_ATTR_TARGET_SENSF_RES, target->sensf_res_len, target->sensf_res)) goto nla_put_failure; if (target->is_iso15693) { if (nla_put_u8(msg, NFC_ATTR_TARGET_ISO15693_DSFID, target->iso15693_dsfid) || nla_put(msg, NFC_ATTR_TARGET_ISO15693_UID, sizeof(target->iso15693_uid), target->iso15693_uid)) goto nla_put_failure; } genlmsg_end(msg, hdr); return 0; nla_put_failure: genlmsg_cancel(msg, hdr); return -EMSGSIZE; } static struct nfc_dev *__get_device_from_cb(struct netlink_callback *cb) { struct nlattr **attrbuf = genl_family_attrbuf(&nfc_genl_family); struct nfc_dev *dev; int rc; u32 idx; rc = nlmsg_parse(cb->nlh, GENL_HDRLEN + nfc_genl_family.hdrsize, attrbuf, nfc_genl_family.maxattr, nfc_genl_policy); if (rc < 0) return ERR_PTR(rc); if (!attrbuf[NFC_ATTR_DEVICE_INDEX]) return ERR_PTR(-EINVAL); idx = nla_get_u32(attrbuf[NFC_ATTR_DEVICE_INDEX]); dev = nfc_get_device(idx); if (!dev) return ERR_PTR(-ENODEV); return dev; } static int nfc_genl_dump_targets(struct sk_buff *skb, struct netlink_callback *cb) { int i = cb->args[0]; struct nfc_dev *dev = (struct nfc_dev *) cb->args[1]; int rc; if (!dev) { dev = __get_device_from_cb(cb); if (IS_ERR(dev)) return PTR_ERR(dev); cb->args[1] = (long) dev; } device_lock(&dev->dev); cb->seq = dev->targets_generation; while (i < dev->n_targets) { rc = nfc_genl_send_target(skb, &dev->targets[i], cb, NLM_F_MULTI); if (rc < 0) break; i++; } device_unlock(&dev->dev); cb->args[0] = i; return skb->len; } static int nfc_genl_dump_targets_done(struct netlink_callback *cb) { struct nfc_dev *dev = (struct nfc_dev *) cb->args[1]; if (dev) nfc_put_device(dev); return 0; } int nfc_genl_targets_found(struct nfc_dev *dev) { struct sk_buff *msg; void *hdr; dev->genl_data.poll_req_portid = 0; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); if (!msg) return -ENOMEM; hdr = genlmsg_put(msg, 0, 0, &nfc_genl_family, 0, NFC_EVENT_TARGETS_FOUND); if (!hdr) goto free_msg; if (nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx)) goto nla_put_failure; genlmsg_end(msg, hdr); return genlmsg_multicast(&nfc_genl_family, msg, 0, 0, GFP_ATOMIC); nla_put_failure: genlmsg_cancel(msg, hdr); free_msg: nlmsg_free(msg); return -EMSGSIZE; } int nfc_genl_target_lost(struct nfc_dev *dev, u32 target_idx) { struct sk_buff *msg; void *hdr; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return -ENOMEM; hdr = genlmsg_put(msg, 0, 0, &nfc_genl_family, 0, NFC_EVENT_TARGET_LOST); if (!hdr) goto free_msg; if (nla_put_string(msg, NFC_ATTR_DEVICE_NAME, nfc_device_name(dev)) || nla_put_u32(msg, NFC_ATTR_TARGET_INDEX, target_idx)) goto nla_put_failure; genlmsg_end(msg, hdr); genlmsg_multicast(&nfc_genl_family, msg, 0, 0, GFP_KERNEL); return 0; nla_put_failure: genlmsg_cancel(msg, hdr); free_msg: nlmsg_free(msg); return -EMSGSIZE; } int nfc_genl_tm_activated(struct nfc_dev *dev, u32 protocol) { struct sk_buff *msg; void *hdr; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return -ENOMEM; hdr = genlmsg_put(msg, 0, 0, &nfc_genl_family, 0, NFC_EVENT_TM_ACTIVATED); if (!hdr) goto free_msg; if (nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx)) goto nla_put_failure; if (nla_put_u32(msg, NFC_ATTR_TM_PROTOCOLS, protocol)) goto nla_put_failure; genlmsg_end(msg, hdr); genlmsg_multicast(&nfc_genl_family, msg, 0, 0, GFP_KERNEL); return 0; nla_put_failure: genlmsg_cancel(msg, hdr); free_msg: nlmsg_free(msg); return -EMSGSIZE; } int nfc_genl_tm_deactivated(struct nfc_dev *dev) { struct sk_buff *msg; void *hdr; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return -ENOMEM; hdr = genlmsg_put(msg, 0, 0, &nfc_genl_family, 0, NFC_EVENT_TM_DEACTIVATED); if (!hdr) goto free_msg; if (nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx)) goto nla_put_failure; genlmsg_end(msg, hdr); genlmsg_multicast(&nfc_genl_family, msg, 0, 0, GFP_KERNEL); return 0; nla_put_failure: genlmsg_cancel(msg, hdr); free_msg: nlmsg_free(msg); return -EMSGSIZE; } int nfc_genl_device_added(struct nfc_dev *dev) { struct sk_buff *msg; void *hdr; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return -ENOMEM; hdr = genlmsg_put(msg, 0, 0, &nfc_genl_family, 0, NFC_EVENT_DEVICE_ADDED); if (!hdr) goto free_msg; if (nla_put_string(msg, NFC_ATTR_DEVICE_NAME, nfc_device_name(dev)) || nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx) || nla_put_u32(msg, NFC_ATTR_PROTOCOLS, dev->supported_protocols) || nla_put_u8(msg, NFC_ATTR_DEVICE_POWERED, dev->dev_up)) goto nla_put_failure; genlmsg_end(msg, hdr); genlmsg_multicast(&nfc_genl_family, msg, 0, 0, GFP_KERNEL); return 0; nla_put_failure: genlmsg_cancel(msg, hdr); free_msg: nlmsg_free(msg); return -EMSGSIZE; } int nfc_genl_device_removed(struct nfc_dev *dev) { struct sk_buff *msg; void *hdr; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return -ENOMEM; hdr = genlmsg_put(msg, 0, 0, &nfc_genl_family, 0, NFC_EVENT_DEVICE_REMOVED); if (!hdr) goto free_msg; if (nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx)) goto nla_put_failure; genlmsg_end(msg, hdr); genlmsg_multicast(&nfc_genl_family, msg, 0, 0, GFP_KERNEL); return 0; nla_put_failure: genlmsg_cancel(msg, hdr); free_msg: nlmsg_free(msg); return -EMSGSIZE; } int nfc_genl_llc_send_sdres(struct nfc_dev *dev, struct hlist_head *sdres_list) { struct sk_buff *msg; struct nlattr *sdp_attr, *uri_attr; struct nfc_llcp_sdp_tlv *sdres; struct hlist_node *n; void *hdr; int rc = -EMSGSIZE; int i; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return -ENOMEM; hdr = genlmsg_put(msg, 0, 0, &nfc_genl_family, 0, NFC_EVENT_LLC_SDRES); if (!hdr) goto free_msg; if (nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx)) goto nla_put_failure; sdp_attr = nla_nest_start(msg, NFC_ATTR_LLC_SDP); if (sdp_attr == NULL) { rc = -ENOMEM; goto nla_put_failure; } i = 1; hlist_for_each_entry_safe(sdres, n, sdres_list, node) { pr_debug("uri: %s, sap: %d\n", sdres->uri, sdres->sap); uri_attr = nla_nest_start(msg, i++); if (uri_attr == NULL) { rc = -ENOMEM; goto nla_put_failure; } if (nla_put_u8(msg, NFC_SDP_ATTR_SAP, sdres->sap)) goto nla_put_failure; if (nla_put_string(msg, NFC_SDP_ATTR_URI, sdres->uri)) goto nla_put_failure; nla_nest_end(msg, uri_attr); hlist_del(&sdres->node); nfc_llcp_free_sdp_tlv(sdres); } nla_nest_end(msg, sdp_attr); genlmsg_end(msg, hdr); return genlmsg_multicast(&nfc_genl_family, msg, 0, 0, GFP_ATOMIC); nla_put_failure: genlmsg_cancel(msg, hdr); free_msg: nlmsg_free(msg); nfc_llcp_free_sdp_tlv_list(sdres_list); return rc; } int nfc_genl_se_added(struct nfc_dev *dev, u32 se_idx, u16 type) { struct sk_buff *msg; void *hdr; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return -ENOMEM; hdr = genlmsg_put(msg, 0, 0, &nfc_genl_family, 0, NFC_EVENT_SE_ADDED); if (!hdr) goto free_msg; if (nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx) || nla_put_u32(msg, NFC_ATTR_SE_INDEX, se_idx) || nla_put_u8(msg, NFC_ATTR_SE_TYPE, type)) goto nla_put_failure; genlmsg_end(msg, hdr); genlmsg_multicast(&nfc_genl_family, msg, 0, 0, GFP_KERNEL); return 0; nla_put_failure: genlmsg_cancel(msg, hdr); free_msg: nlmsg_free(msg); return -EMSGSIZE; } int nfc_genl_se_removed(struct nfc_dev *dev, u32 se_idx) { struct sk_buff *msg; void *hdr; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return -ENOMEM; hdr = genlmsg_put(msg, 0, 0, &nfc_genl_family, 0, NFC_EVENT_SE_REMOVED); if (!hdr) goto free_msg; if (nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx) || nla_put_u32(msg, NFC_ATTR_SE_INDEX, se_idx)) goto nla_put_failure; genlmsg_end(msg, hdr); genlmsg_multicast(&nfc_genl_family, msg, 0, 0, GFP_KERNEL); return 0; nla_put_failure: genlmsg_cancel(msg, hdr); free_msg: nlmsg_free(msg); return -EMSGSIZE; } int nfc_genl_se_transaction(struct nfc_dev *dev, u8 se_idx, struct nfc_evt_transaction *evt_transaction) { struct nfc_se *se; struct sk_buff *msg; void *hdr; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return -ENOMEM; hdr = genlmsg_put(msg, 0, 0, &nfc_genl_family, 0, NFC_EVENT_SE_TRANSACTION); if (!hdr) goto free_msg; se = nfc_find_se(dev, se_idx); if (!se) goto free_msg; if (nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx) || nla_put_u32(msg, NFC_ATTR_SE_INDEX, se_idx) || nla_put_u8(msg, NFC_ATTR_SE_TYPE, se->type) || nla_put(msg, NFC_ATTR_SE_AID, evt_transaction->aid_len, evt_transaction->aid) || nla_put(msg, NFC_ATTR_SE_PARAMS, evt_transaction->params_len, evt_transaction->params)) goto nla_put_failure; /* evt_transaction is no more used */ devm_kfree(&dev->dev, evt_transaction); genlmsg_end(msg, hdr); genlmsg_multicast(&nfc_genl_family, msg, 0, 0, GFP_KERNEL); return 0; nla_put_failure: genlmsg_cancel(msg, hdr); free_msg: /* evt_transaction is no more used */ devm_kfree(&dev->dev, evt_transaction); nlmsg_free(msg); return -EMSGSIZE; } int nfc_genl_se_connectivity(struct nfc_dev *dev, u8 se_idx) { struct nfc_se *se; struct sk_buff *msg; void *hdr; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return -ENOMEM; hdr = genlmsg_put(msg, 0, 0, &nfc_genl_family, 0, NFC_EVENT_SE_CONNECTIVITY); if (!hdr) goto free_msg; se = nfc_find_se(dev, se_idx); if (!se) goto free_msg; if (nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx) || nla_put_u32(msg, NFC_ATTR_SE_INDEX, se_idx) || nla_put_u8(msg, NFC_ATTR_SE_TYPE, se->type)) goto nla_put_failure; genlmsg_end(msg, hdr); genlmsg_multicast(&nfc_genl_family, msg, 0, 0, GFP_KERNEL); return 0; nla_put_failure: genlmsg_cancel(msg, hdr); free_msg: nlmsg_free(msg); return -EMSGSIZE; } static int nfc_genl_send_device(struct sk_buff *msg, struct nfc_dev *dev, u32 portid, u32 seq, struct netlink_callback *cb, int flags) { void *hdr; hdr = genlmsg_put(msg, portid, seq, &nfc_genl_family, flags, NFC_CMD_GET_DEVICE); if (!hdr) return -EMSGSIZE; if (cb) genl_dump_check_consistent(cb, hdr, &nfc_genl_family); if (nla_put_string(msg, NFC_ATTR_DEVICE_NAME, nfc_device_name(dev)) || nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx) || nla_put_u32(msg, NFC_ATTR_PROTOCOLS, dev->supported_protocols) || nla_put_u8(msg, NFC_ATTR_DEVICE_POWERED, dev->dev_up) || nla_put_u8(msg, NFC_ATTR_RF_MODE, dev->rf_mode)) goto nla_put_failure; genlmsg_end(msg, hdr); return 0; nla_put_failure: genlmsg_cancel(msg, hdr); return -EMSGSIZE; } static int nfc_genl_dump_devices(struct sk_buff *skb, struct netlink_callback *cb) { struct class_dev_iter *iter = (struct class_dev_iter *) cb->args[0]; struct nfc_dev *dev = (struct nfc_dev *) cb->args[1]; bool first_call = false; if (!iter) { first_call = true; iter = kmalloc(sizeof(struct class_dev_iter), GFP_KERNEL); if (!iter) return -ENOMEM; cb->args[0] = (long) iter; } mutex_lock(&nfc_devlist_mutex); cb->seq = nfc_devlist_generation; if (first_call) { nfc_device_iter_init(iter); dev = nfc_device_iter_next(iter); } while (dev) { int rc; rc = nfc_genl_send_device(skb, dev, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, cb, NLM_F_MULTI); if (rc < 0) break; dev = nfc_device_iter_next(iter); } mutex_unlock(&nfc_devlist_mutex); cb->args[1] = (long) dev; return skb->len; } static int nfc_genl_dump_devices_done(struct netlink_callback *cb) { struct class_dev_iter *iter = (struct class_dev_iter *) cb->args[0]; nfc_device_iter_exit(iter); kfree(iter); return 0; } int nfc_genl_dep_link_up_event(struct nfc_dev *dev, u32 target_idx, u8 comm_mode, u8 rf_mode) { struct sk_buff *msg; void *hdr; pr_debug("DEP link is up\n"); msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); if (!msg) return -ENOMEM; hdr = genlmsg_put(msg, 0, 0, &nfc_genl_family, 0, NFC_CMD_DEP_LINK_UP); if (!hdr) goto free_msg; if (nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx)) goto nla_put_failure; if (rf_mode == NFC_RF_INITIATOR && nla_put_u32(msg, NFC_ATTR_TARGET_INDEX, target_idx)) goto nla_put_failure; if (nla_put_u8(msg, NFC_ATTR_COMM_MODE, comm_mode) || nla_put_u8(msg, NFC_ATTR_RF_MODE, rf_mode)) goto nla_put_failure; genlmsg_end(msg, hdr); dev->dep_link_up = true; genlmsg_multicast(&nfc_genl_family, msg, 0, 0, GFP_ATOMIC); return 0; nla_put_failure: genlmsg_cancel(msg, hdr); free_msg: nlmsg_free(msg); return -EMSGSIZE; } int nfc_genl_dep_link_down_event(struct nfc_dev *dev) { struct sk_buff *msg; void *hdr; pr_debug("DEP link is down\n"); msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); if (!msg) return -ENOMEM; hdr = genlmsg_put(msg, 0, 0, &nfc_genl_family, 0, NFC_CMD_DEP_LINK_DOWN); if (!hdr) goto free_msg; if (nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx)) goto nla_put_failure; genlmsg_end(msg, hdr); genlmsg_multicast(&nfc_genl_family, msg, 0, 0, GFP_ATOMIC); return 0; nla_put_failure: genlmsg_cancel(msg, hdr); free_msg: nlmsg_free(msg); return -EMSGSIZE; } static int nfc_genl_get_device(struct sk_buff *skb, struct genl_info *info) { struct sk_buff *msg; struct nfc_dev *dev; u32 idx; int rc = -ENOBUFS; if (!info->attrs[NFC_ATTR_DEVICE_INDEX]) return -EINVAL; idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]); dev = nfc_get_device(idx); if (!dev) return -ENODEV; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) { rc = -ENOMEM; goto out_putdev; } rc = nfc_genl_send_device(msg, dev, info->snd_portid, info->snd_seq, NULL, 0); if (rc < 0) goto out_free; nfc_put_device(dev); return genlmsg_reply(msg, info); out_free: nlmsg_free(msg); out_putdev: nfc_put_device(dev); return rc; } static int nfc_genl_dev_up(struct sk_buff *skb, struct genl_info *info) { struct nfc_dev *dev; int rc; u32 idx; if (!info->attrs[NFC_ATTR_DEVICE_INDEX]) return -EINVAL; idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]); dev = nfc_get_device(idx); if (!dev) return -ENODEV; rc = nfc_dev_up(dev); nfc_put_device(dev); return rc; } static int nfc_genl_dev_down(struct sk_buff *skb, struct genl_info *info) { struct nfc_dev *dev; int rc; u32 idx; if (!info->attrs[NFC_ATTR_DEVICE_INDEX]) return -EINVAL; idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]); dev = nfc_get_device(idx); if (!dev) return -ENODEV; rc = nfc_dev_down(dev); nfc_put_device(dev); return rc; } static int nfc_genl_start_poll(struct sk_buff *skb, struct genl_info *info) { struct nfc_dev *dev; int rc; u32 idx; u32 im_protocols = 0, tm_protocols = 0; pr_debug("Poll start\n"); if (!info->attrs[NFC_ATTR_DEVICE_INDEX] || ((!info->attrs[NFC_ATTR_IM_PROTOCOLS] && !info->attrs[NFC_ATTR_PROTOCOLS]) && !info->attrs[NFC_ATTR_TM_PROTOCOLS])) return -EINVAL; idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]); if (info->attrs[NFC_ATTR_TM_PROTOCOLS]) tm_protocols = nla_get_u32(info->attrs[NFC_ATTR_TM_PROTOCOLS]); if (info->attrs[NFC_ATTR_IM_PROTOCOLS]) im_protocols = nla_get_u32(info->attrs[NFC_ATTR_IM_PROTOCOLS]); else if (info->attrs[NFC_ATTR_PROTOCOLS]) im_protocols = nla_get_u32(info->attrs[NFC_ATTR_PROTOCOLS]); dev = nfc_get_device(idx); if (!dev) return -ENODEV; mutex_lock(&dev->genl_data.genl_data_mutex); rc = nfc_start_poll(dev, im_protocols, tm_protocols); if (!rc) dev->genl_data.poll_req_portid = info->snd_portid; mutex_unlock(&dev->genl_data.genl_data_mutex); nfc_put_device(dev); return rc; } static int nfc_genl_stop_poll(struct sk_buff *skb, struct genl_info *info) { struct nfc_dev *dev; int rc; u32 idx; if (!info->attrs[NFC_ATTR_DEVICE_INDEX]) return -EINVAL; idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]); dev = nfc_get_device(idx); if (!dev) return -ENODEV; device_lock(&dev->dev); if (!dev->polling) { device_unlock(&dev->dev); return -EINVAL; } device_unlock(&dev->dev); mutex_lock(&dev->genl_data.genl_data_mutex); if (dev->genl_data.poll_req_portid != info->snd_portid) { rc = -EBUSY; goto out; } rc = nfc_stop_poll(dev); dev->genl_data.poll_req_portid = 0; out: mutex_unlock(&dev->genl_data.genl_data_mutex); nfc_put_device(dev); return rc; } static int nfc_genl_activate_target(struct sk_buff *skb, struct genl_info *info) { struct nfc_dev *dev; u32 device_idx, target_idx, protocol; int rc; if (!info->attrs[NFC_ATTR_DEVICE_INDEX]) return -EINVAL; device_idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]); dev = nfc_get_device(device_idx); if (!dev) return -ENODEV; target_idx = nla_get_u32(info->attrs[NFC_ATTR_TARGET_INDEX]); protocol = nla_get_u32(info->attrs[NFC_ATTR_PROTOCOLS]); nfc_deactivate_target(dev, target_idx, NFC_TARGET_MODE_SLEEP); rc = nfc_activate_target(dev, target_idx, protocol); nfc_put_device(dev); return 0; } static int nfc_genl_dep_link_up(struct sk_buff *skb, struct genl_info *info) { struct nfc_dev *dev; int rc, tgt_idx; u32 idx; u8 comm; pr_debug("DEP link up\n"); if (!info->attrs[NFC_ATTR_DEVICE_INDEX] || !info->attrs[NFC_ATTR_COMM_MODE]) return -EINVAL; idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]); if (!info->attrs[NFC_ATTR_TARGET_INDEX]) tgt_idx = NFC_TARGET_IDX_ANY; else tgt_idx = nla_get_u32(info->attrs[NFC_ATTR_TARGET_INDEX]); comm = nla_get_u8(info->attrs[NFC_ATTR_COMM_MODE]); if (comm != NFC_COMM_ACTIVE && comm != NFC_COMM_PASSIVE) return -EINVAL; dev = nfc_get_device(idx); if (!dev) return -ENODEV; rc = nfc_dep_link_up(dev, tgt_idx, comm); nfc_put_device(dev); return rc; } static int nfc_genl_dep_link_down(struct sk_buff *skb, struct genl_info *info) { struct nfc_dev *dev; int rc; u32 idx; if (!info->attrs[NFC_ATTR_DEVICE_INDEX]) return -EINVAL; idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]); dev = nfc_get_device(idx); if (!dev) return -ENODEV; rc = nfc_dep_link_down(dev); nfc_put_device(dev); return rc; } static int nfc_genl_send_params(struct sk_buff *msg, struct nfc_llcp_local *local, u32 portid, u32 seq) { void *hdr; hdr = genlmsg_put(msg, portid, seq, &nfc_genl_family, 0, NFC_CMD_LLC_GET_PARAMS); if (!hdr) return -EMSGSIZE; if (nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, local->dev->idx) || nla_put_u8(msg, NFC_ATTR_LLC_PARAM_LTO, local->lto) || nla_put_u8(msg, NFC_ATTR_LLC_PARAM_RW, local->rw) || nla_put_u16(msg, NFC_ATTR_LLC_PARAM_MIUX, be16_to_cpu(local->miux))) goto nla_put_failure; genlmsg_end(msg, hdr); return 0; nla_put_failure: genlmsg_cancel(msg, hdr); return -EMSGSIZE; } static int nfc_genl_llc_get_params(struct sk_buff *skb, struct genl_info *info) { struct nfc_dev *dev; struct nfc_llcp_local *local; int rc = 0; struct sk_buff *msg = NULL; u32 idx; if (!info->attrs[NFC_ATTR_DEVICE_INDEX]) return -EINVAL; idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]); dev = nfc_get_device(idx); if (!dev) return -ENODEV; device_lock(&dev->dev); local = nfc_llcp_find_local(dev); if (!local) { rc = -ENODEV; goto exit; } msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) { rc = -ENOMEM; goto exit; } rc = nfc_genl_send_params(msg, local, info->snd_portid, info->snd_seq); exit: device_unlock(&dev->dev); nfc_put_device(dev); if (rc < 0) { if (msg) nlmsg_free(msg); return rc; } return genlmsg_reply(msg, info); } static int nfc_genl_llc_set_params(struct sk_buff *skb, struct genl_info *info) { struct nfc_dev *dev; struct nfc_llcp_local *local; u8 rw = 0; u16 miux = 0; u32 idx; int rc = 0; if (!info->attrs[NFC_ATTR_DEVICE_INDEX] || (!info->attrs[NFC_ATTR_LLC_PARAM_LTO] && !info->attrs[NFC_ATTR_LLC_PARAM_RW] && !info->attrs[NFC_ATTR_LLC_PARAM_MIUX])) return -EINVAL; if (info->attrs[NFC_ATTR_LLC_PARAM_RW]) { rw = nla_get_u8(info->attrs[NFC_ATTR_LLC_PARAM_RW]); if (rw > LLCP_MAX_RW) return -EINVAL; } if (info->attrs[NFC_ATTR_LLC_PARAM_MIUX]) { miux = nla_get_u16(info->attrs[NFC_ATTR_LLC_PARAM_MIUX]); if (miux > LLCP_MAX_MIUX) return -EINVAL; } idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]); dev = nfc_get_device(idx); if (!dev) return -ENODEV; device_lock(&dev->dev); local = nfc_llcp_find_local(dev); if (!local) { nfc_put_device(dev); rc = -ENODEV; goto exit; } if (info->attrs[NFC_ATTR_LLC_PARAM_LTO]) { if (dev->dep_link_up) { rc = -EINPROGRESS; goto exit; } local->lto = nla_get_u8(info->attrs[NFC_ATTR_LLC_PARAM_LTO]); } if (info->attrs[NFC_ATTR_LLC_PARAM_RW]) local->rw = rw; if (info->attrs[NFC_ATTR_LLC_PARAM_MIUX]) local->miux = cpu_to_be16(miux); exit: device_unlock(&dev->dev); nfc_put_device(dev); return rc; } static int nfc_genl_llc_sdreq(struct sk_buff *skb, struct genl_info *info) { struct nfc_dev *dev; struct nfc_llcp_local *local; struct nlattr *attr, *sdp_attrs[NFC_SDP_ATTR_MAX+1]; u32 idx; u8 tid; char *uri; int rc = 0, rem; size_t uri_len, tlvs_len; struct hlist_head sdreq_list; struct nfc_llcp_sdp_tlv *sdreq; if (!info->attrs[NFC_ATTR_DEVICE_INDEX] || !info->attrs[NFC_ATTR_LLC_SDP]) return -EINVAL; idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]); dev = nfc_get_device(idx); if (!dev) return -ENODEV; device_lock(&dev->dev); if (dev->dep_link_up == false) { rc = -ENOLINK; goto exit; } local = nfc_llcp_find_local(dev); if (!local) { nfc_put_device(dev); rc = -ENODEV; goto exit; } INIT_HLIST_HEAD(&sdreq_list); tlvs_len = 0; nla_for_each_nested(attr, info->attrs[NFC_ATTR_LLC_SDP], rem) { rc = nla_parse_nested(sdp_attrs, NFC_SDP_ATTR_MAX, attr, nfc_sdp_genl_policy); if (rc != 0) { rc = -EINVAL; goto exit; } if (!sdp_attrs[NFC_SDP_ATTR_URI]) continue; uri_len = nla_len(sdp_attrs[NFC_SDP_ATTR_URI]); if (uri_len == 0) continue; uri = nla_data(sdp_attrs[NFC_SDP_ATTR_URI]); if (uri == NULL || *uri == 0) continue; tid = local->sdreq_next_tid++; sdreq = nfc_llcp_build_sdreq_tlv(tid, uri, uri_len); if (sdreq == NULL) { rc = -ENOMEM; goto exit; } tlvs_len += sdreq->tlv_len; hlist_add_head(&sdreq->node, &sdreq_list); } if (hlist_empty(&sdreq_list)) { rc = -EINVAL; goto exit; } rc = nfc_llcp_send_snl_sdreq(local, &sdreq_list, tlvs_len); exit: device_unlock(&dev->dev); nfc_put_device(dev); return rc; } static int nfc_genl_fw_download(struct sk_buff *skb, struct genl_info *info) { struct nfc_dev *dev; int rc; u32 idx; char firmware_name[NFC_FIRMWARE_NAME_MAXSIZE + 1]; if (!info->attrs[NFC_ATTR_DEVICE_INDEX]) return -EINVAL; idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]); dev = nfc_get_device(idx); if (!dev) return -ENODEV; nla_strlcpy(firmware_name, info->attrs[NFC_ATTR_FIRMWARE_NAME], sizeof(firmware_name)); rc = nfc_fw_download(dev, firmware_name); nfc_put_device(dev); return rc; } int nfc_genl_fw_download_done(struct nfc_dev *dev, const char *firmware_name, u32 result) { struct sk_buff *msg; void *hdr; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return -ENOMEM; hdr = genlmsg_put(msg, 0, 0, &nfc_genl_family, 0, NFC_CMD_FW_DOWNLOAD); if (!hdr) goto free_msg; if (nla_put_string(msg, NFC_ATTR_FIRMWARE_NAME, firmware_name) || nla_put_u32(msg, NFC_ATTR_FIRMWARE_DOWNLOAD_STATUS, result) || nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx)) goto nla_put_failure; genlmsg_end(msg, hdr); genlmsg_multicast(&nfc_genl_family, msg, 0, 0, GFP_KERNEL); return 0; nla_put_failure: genlmsg_cancel(msg, hdr); free_msg: nlmsg_free(msg); return -EMSGSIZE; } static int nfc_genl_enable_se(struct sk_buff *skb, struct genl_info *info) { struct nfc_dev *dev; int rc; u32 idx, se_idx; if (!info->attrs[NFC_ATTR_DEVICE_INDEX] || !info->attrs[NFC_ATTR_SE_INDEX]) return -EINVAL; idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]); se_idx = nla_get_u32(info->attrs[NFC_ATTR_SE_INDEX]); dev = nfc_get_device(idx); if (!dev) return -ENODEV; rc = nfc_enable_se(dev, se_idx); nfc_put_device(dev); return rc; } static int nfc_genl_disable_se(struct sk_buff *skb, struct genl_info *info) { struct nfc_dev *dev; int rc; u32 idx, se_idx; if (!info->attrs[NFC_ATTR_DEVICE_INDEX] || !info->attrs[NFC_ATTR_SE_INDEX]) return -EINVAL; idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]); se_idx = nla_get_u32(info->attrs[NFC_ATTR_SE_INDEX]); dev = nfc_get_device(idx); if (!dev) return -ENODEV; rc = nfc_disable_se(dev, se_idx); nfc_put_device(dev); return rc; } static int nfc_genl_send_se(struct sk_buff *msg, struct nfc_dev *dev, u32 portid, u32 seq, struct netlink_callback *cb, int flags) { void *hdr; struct nfc_se *se, *n; list_for_each_entry_safe(se, n, &dev->secure_elements, list) { hdr = genlmsg_put(msg, portid, seq, &nfc_genl_family, flags, NFC_CMD_GET_SE); if (!hdr) goto nla_put_failure; if (cb) genl_dump_check_consistent(cb, hdr, &nfc_genl_family); if (nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx) || nla_put_u32(msg, NFC_ATTR_SE_INDEX, se->idx) || nla_put_u8(msg, NFC_ATTR_SE_TYPE, se->type)) goto nla_put_failure; genlmsg_end(msg, hdr); } return 0; nla_put_failure: genlmsg_cancel(msg, hdr); return -EMSGSIZE; } static int nfc_genl_dump_ses(struct sk_buff *skb, struct netlink_callback *cb) { struct class_dev_iter *iter = (struct class_dev_iter *) cb->args[0]; struct nfc_dev *dev = (struct nfc_dev *) cb->args[1]; bool first_call = false; if (!iter) { first_call = true; iter = kmalloc(sizeof(struct class_dev_iter), GFP_KERNEL); if (!iter) return -ENOMEM; cb->args[0] = (long) iter; } mutex_lock(&nfc_devlist_mutex); cb->seq = nfc_devlist_generation; if (first_call) { nfc_device_iter_init(iter); dev = nfc_device_iter_next(iter); } while (dev) { int rc; rc = nfc_genl_send_se(skb, dev, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, cb, NLM_F_MULTI); if (rc < 0) break; dev = nfc_device_iter_next(iter); } mutex_unlock(&nfc_devlist_mutex); cb->args[1] = (long) dev; return skb->len; } static int nfc_genl_dump_ses_done(struct netlink_callback *cb) { struct class_dev_iter *iter = (struct class_dev_iter *) cb->args[0]; nfc_device_iter_exit(iter); kfree(iter); return 0; } static int nfc_se_io(struct nfc_dev *dev, u32 se_idx, u8 *apdu, size_t apdu_length, se_io_cb_t cb, void *cb_context) { struct nfc_se *se; int rc; pr_debug("%s se index %d\n", dev_name(&dev->dev), se_idx); device_lock(&dev->dev); if (!device_is_registered(&dev->dev)) { rc = -ENODEV; goto error; } if (!dev->dev_up) { rc = -ENODEV; goto error; } if (!dev->ops->se_io) { rc = -EOPNOTSUPP; goto error; } se = nfc_find_se(dev, se_idx); if (!se) { rc = -EINVAL; goto error; } if (se->state != NFC_SE_ENABLED) { rc = -ENODEV; goto error; } rc = dev->ops->se_io(dev, se_idx, apdu, apdu_length, cb, cb_context); error: device_unlock(&dev->dev); return rc; } struct se_io_ctx { u32 dev_idx; u32 se_idx; }; static void se_io_cb(void *context, u8 *apdu, size_t apdu_len, int err) { struct se_io_ctx *ctx = context; struct sk_buff *msg; void *hdr; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) { kfree(ctx); return; } hdr = genlmsg_put(msg, 0, 0, &nfc_genl_family, 0, NFC_CMD_SE_IO); if (!hdr) goto free_msg; if (nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, ctx->dev_idx) || nla_put_u32(msg, NFC_ATTR_SE_INDEX, ctx->se_idx) || nla_put(msg, NFC_ATTR_SE_APDU, apdu_len, apdu)) goto nla_put_failure; genlmsg_end(msg, hdr); genlmsg_multicast(&nfc_genl_family, msg, 0, 0, GFP_KERNEL); kfree(ctx); return; nla_put_failure: genlmsg_cancel(msg, hdr); free_msg: nlmsg_free(msg); kfree(ctx); return; } static int nfc_genl_se_io(struct sk_buff *skb, struct genl_info *info) { struct nfc_dev *dev; struct se_io_ctx *ctx; u32 dev_idx, se_idx; u8 *apdu; size_t apdu_len; if (!info->attrs[NFC_ATTR_DEVICE_INDEX] || !info->attrs[NFC_ATTR_SE_INDEX] || !info->attrs[NFC_ATTR_SE_APDU]) return -EINVAL; dev_idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]); se_idx = nla_get_u32(info->attrs[NFC_ATTR_SE_INDEX]); dev = nfc_get_device(dev_idx); if (!dev) return -ENODEV; if (!dev->ops || !dev->ops->se_io) return -ENOTSUPP; apdu_len = nla_len(info->attrs[NFC_ATTR_SE_APDU]); if (apdu_len == 0) return -EINVAL; apdu = nla_data(info->attrs[NFC_ATTR_SE_APDU]); if (!apdu) return -EINVAL; ctx = kzalloc(sizeof(struct se_io_ctx), GFP_KERNEL); if (!ctx) return -ENOMEM; ctx->dev_idx = dev_idx; ctx->se_idx = se_idx; return nfc_se_io(dev, se_idx, apdu, apdu_len, se_io_cb, ctx); } static int nfc_genl_vendor_cmd(struct sk_buff *skb, struct genl_info *info) { struct nfc_dev *dev; struct nfc_vendor_cmd *cmd; u32 dev_idx, vid, subcmd; u8 *data; size_t data_len; int i, err; if (!info->attrs[NFC_ATTR_DEVICE_INDEX] || !info->attrs[NFC_ATTR_VENDOR_ID] || !info->attrs[NFC_ATTR_VENDOR_SUBCMD]) return -EINVAL; dev_idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]); vid = nla_get_u32(info->attrs[NFC_ATTR_VENDOR_ID]); subcmd = nla_get_u32(info->attrs[NFC_ATTR_VENDOR_SUBCMD]); dev = nfc_get_device(dev_idx); if (!dev || !dev->vendor_cmds || !dev->n_vendor_cmds) return -ENODEV; if (info->attrs[NFC_ATTR_VENDOR_DATA]) { data = nla_data(info->attrs[NFC_ATTR_VENDOR_DATA]); data_len = nla_len(info->attrs[NFC_ATTR_VENDOR_DATA]); if (data_len == 0) return -EINVAL; } else { data = NULL; data_len = 0; } for (i = 0; i < dev->n_vendor_cmds; i++) { cmd = &dev->vendor_cmds[i]; if (cmd->vendor_id != vid || cmd->subcmd != subcmd) continue; dev->cur_cmd_info = info; err = cmd->doit(dev, data, data_len); dev->cur_cmd_info = NULL; return err; } return -EOPNOTSUPP; } /* message building helper */ static inline void *nfc_hdr_put(struct sk_buff *skb, u32 portid, u32 seq, int flags, u8 cmd) { /* since there is no private header just add the generic one */ return genlmsg_put(skb, portid, seq, &nfc_genl_family, flags, cmd); } static struct sk_buff * __nfc_alloc_vendor_cmd_skb(struct nfc_dev *dev, int approxlen, u32 portid, u32 seq, enum nfc_attrs attr, u32 oui, u32 subcmd, gfp_t gfp) { struct sk_buff *skb; void *hdr; skb = nlmsg_new(approxlen + 100, gfp); if (!skb) return NULL; hdr = nfc_hdr_put(skb, portid, seq, 0, NFC_CMD_VENDOR); if (!hdr) { kfree_skb(skb); return NULL; } if (nla_put_u32(skb, NFC_ATTR_DEVICE_INDEX, dev->idx)) goto nla_put_failure; if (nla_put_u32(skb, NFC_ATTR_VENDOR_ID, oui)) goto nla_put_failure; if (nla_put_u32(skb, NFC_ATTR_VENDOR_SUBCMD, subcmd)) goto nla_put_failure; ((void **)skb->cb)[0] = dev; ((void **)skb->cb)[1] = hdr; return skb; nla_put_failure: kfree_skb(skb); return NULL; } struct sk_buff *__nfc_alloc_vendor_cmd_reply_skb(struct nfc_dev *dev, enum nfc_attrs attr, u32 oui, u32 subcmd, int approxlen) { if (WARN_ON(!dev->cur_cmd_info)) return NULL; return __nfc_alloc_vendor_cmd_skb(dev, approxlen, dev->cur_cmd_info->snd_portid, dev->cur_cmd_info->snd_seq, attr, oui, subcmd, GFP_KERNEL); } EXPORT_SYMBOL(__nfc_alloc_vendor_cmd_reply_skb); int nfc_vendor_cmd_reply(struct sk_buff *skb) { struct nfc_dev *dev = ((void **)skb->cb)[0]; void *hdr = ((void **)skb->cb)[1]; /* clear CB data for netlink core to own from now on */ memset(skb->cb, 0, sizeof(skb->cb)); if (WARN_ON(!dev->cur_cmd_info)) { kfree_skb(skb); return -EINVAL; } genlmsg_end(skb, hdr); return genlmsg_reply(skb, dev->cur_cmd_info); } EXPORT_SYMBOL(nfc_vendor_cmd_reply); static const struct genl_ops nfc_genl_ops[] = { { .cmd = NFC_CMD_GET_DEVICE, .doit = nfc_genl_get_device, .dumpit = nfc_genl_dump_devices, .done = nfc_genl_dump_devices_done, .policy = nfc_genl_policy, }, { .cmd = NFC_CMD_DEV_UP, .doit = nfc_genl_dev_up, .policy = nfc_genl_policy, }, { .cmd = NFC_CMD_DEV_DOWN, .doit = nfc_genl_dev_down, .policy = nfc_genl_policy, }, { .cmd = NFC_CMD_START_POLL, .doit = nfc_genl_start_poll, .policy = nfc_genl_policy, }, { .cmd = NFC_CMD_STOP_POLL, .doit = nfc_genl_stop_poll, .policy = nfc_genl_policy, }, { .cmd = NFC_CMD_DEP_LINK_UP, .doit = nfc_genl_dep_link_up, .policy = nfc_genl_policy, }, { .cmd = NFC_CMD_DEP_LINK_DOWN, .doit = nfc_genl_dep_link_down, .policy = nfc_genl_policy, }, { .cmd = NFC_CMD_GET_TARGET, .dumpit = nfc_genl_dump_targets, .done = nfc_genl_dump_targets_done, .policy = nfc_genl_policy, }, { .cmd = NFC_CMD_LLC_GET_PARAMS, .doit = nfc_genl_llc_get_params, .policy = nfc_genl_policy, }, { .cmd = NFC_CMD_LLC_SET_PARAMS, .doit = nfc_genl_llc_set_params, .policy = nfc_genl_policy, }, { .cmd = NFC_CMD_LLC_SDREQ, .doit = nfc_genl_llc_sdreq, .policy = nfc_genl_policy, }, { .cmd = NFC_CMD_FW_DOWNLOAD, .doit = nfc_genl_fw_download, .policy = nfc_genl_policy, }, { .cmd = NFC_CMD_ENABLE_SE, .doit = nfc_genl_enable_se, .policy = nfc_genl_policy, }, { .cmd = NFC_CMD_DISABLE_SE, .doit = nfc_genl_disable_se, .policy = nfc_genl_policy, }, { .cmd = NFC_CMD_GET_SE, .dumpit = nfc_genl_dump_ses, .done = nfc_genl_dump_ses_done, .policy = nfc_genl_policy, }, { .cmd = NFC_CMD_SE_IO, .doit = nfc_genl_se_io, .policy = nfc_genl_policy, }, { .cmd = NFC_CMD_ACTIVATE_TARGET, .doit = nfc_genl_activate_target, .policy = nfc_genl_policy, }, { .cmd = NFC_CMD_VENDOR, .doit = nfc_genl_vendor_cmd, .policy = nfc_genl_policy, }, }; static struct genl_family nfc_genl_family __ro_after_init = { .hdrsize = 0, .name = NFC_GENL_NAME, .version = NFC_GENL_VERSION, .maxattr = NFC_ATTR_MAX, .module = THIS_MODULE, .ops = nfc_genl_ops, .n_ops = ARRAY_SIZE(nfc_genl_ops), .mcgrps = nfc_genl_mcgrps, .n_mcgrps = ARRAY_SIZE(nfc_genl_mcgrps), }; struct urelease_work { struct work_struct w; u32 portid; }; static void nfc_urelease_event_work(struct work_struct *work) { struct urelease_work *w = container_of(work, struct urelease_work, w); struct class_dev_iter iter; struct nfc_dev *dev; pr_debug("portid %d\n", w->portid); mutex_lock(&nfc_devlist_mutex); nfc_device_iter_init(&iter); dev = nfc_device_iter_next(&iter); while (dev) { mutex_lock(&dev->genl_data.genl_data_mutex); if (dev->genl_data.poll_req_portid == w->portid) { nfc_stop_poll(dev); dev->genl_data.poll_req_portid = 0; } mutex_unlock(&dev->genl_data.genl_data_mutex); dev = nfc_device_iter_next(&iter); } nfc_device_iter_exit(&iter); mutex_unlock(&nfc_devlist_mutex); kfree(w); } static int nfc_genl_rcv_nl_event(struct notifier_block *this, unsigned long event, void *ptr) { struct netlink_notify *n = ptr; struct urelease_work *w; if (event != NETLINK_URELEASE || n->protocol != NETLINK_GENERIC) goto out; pr_debug("NETLINK_URELEASE event from id %d\n", n->portid); w = kmalloc(sizeof(*w), GFP_ATOMIC); if (w) { INIT_WORK((struct work_struct *) w, nfc_urelease_event_work); w->portid = n->portid; schedule_work((struct work_struct *) w); } out: return NOTIFY_DONE; } void nfc_genl_data_init(struct nfc_genl_data *genl_data) { genl_data->poll_req_portid = 0; mutex_init(&genl_data->genl_data_mutex); } void nfc_genl_data_exit(struct nfc_genl_data *genl_data) { mutex_destroy(&genl_data->genl_data_mutex); } static struct notifier_block nl_notifier = { .notifier_call = nfc_genl_rcv_nl_event, }; /** * nfc_genl_init() - Initialize netlink interface * * This initialization function registers the nfc netlink family. */ int __init nfc_genl_init(void) { int rc; rc = genl_register_family(&nfc_genl_family); if (rc) return rc; netlink_register_notifier(&nl_notifier); return 0; } /** * nfc_genl_exit() - Deinitialize netlink interface * * This exit function unregisters the nfc netlink family. */ void nfc_genl_exit(void) { netlink_unregister_notifier(&nl_notifier); genl_unregister_family(&nfc_genl_family); }
gpl-2.0
neldar/linux_gt-i8190
net/bluetooth_mgmt/l2cap_core.c
65
104458
/* BlueZ - Bluetooth protocol stack for Linux Copyright (C) 2000-2001 Qualcomm Incorporated Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org> Copyright (C) 2010 Google Inc. Copyright (C) 2011 ProFUSION Embedded Systems Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation; THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS SOFTWARE IS DISCLAIMED. */ /* Bluetooth L2CAP core. */ #include <linux/module.h> #include <linux/types.h> #include <linux/capability.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/poll.h> #include <linux/fcntl.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/socket.h> #include <linux/skbuff.h> #include <linux/list.h> #include <linux/device.h> #include <linux/debugfs.h> #include <linux/seq_file.h> #include <linux/uaccess.h> #include <linux/crc16.h> #include <net/sock.h> #include <asm/system.h> #include <asm/unaligned.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> #include <net/bluetooth/l2cap.h> #include <net/bluetooth/smp.h> bool disable_ertm; static u32 l2cap_feat_mask = 0x00000000; static u8 l2cap_fixed_chan[8] = { 0x02, }; static LIST_HEAD(chan_list); static DEFINE_RWLOCK(chan_list_lock); static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code, u8 ident, u16 dlen, void *data); static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data); static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data); static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err); static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb); /* workaround for a2dp chopping in multi connection. */ static struct l2cap_conn *av_conn; static struct l2cap_conn *hid_conn; static struct l2cap_conn *rfc_conn; /* END SS_BLUEZ_BT */ /* ---- L2CAP channels ---- */ static inline void chan_hold(struct l2cap_chan *c) { atomic_inc(&c->refcnt); } static inline void chan_put(struct l2cap_chan *c) { if (atomic_dec_and_test(&c->refcnt)) kfree(c); } static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid) { struct l2cap_chan *c; list_for_each_entry(c, &conn->chan_l, list) { if (c->dcid == cid) return c; } return NULL; } static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid) { struct l2cap_chan *c; list_for_each_entry(c, &conn->chan_l, list) { if (c->scid == cid) return c; } return NULL; } /* Find channel with given SCID. * Returns locked socket */ static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid) { struct l2cap_chan *c; read_lock(&conn->chan_lock); c = __l2cap_get_chan_by_scid(conn, cid); if (c) bh_lock_sock(c->sk); read_unlock(&conn->chan_lock); return c; } static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident) { struct l2cap_chan *c; list_for_each_entry(c, &conn->chan_l, list) { if (c->ident == ident) return c; } return NULL; } static inline struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident) { struct l2cap_chan *c; read_lock(&conn->chan_lock); c = __l2cap_get_chan_by_ident(conn, ident); if (c) bh_lock_sock(c->sk); read_unlock(&conn->chan_lock); return c; } static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src) { struct l2cap_chan *c; list_for_each_entry(c, &chan_list, global_l) { if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src)) goto found; } c = NULL; found: return c; } int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm) { int err; write_lock_bh(&chan_list_lock); if (psm && __l2cap_global_chan_by_addr(psm, src)) { err = -EADDRINUSE; goto done; } if (psm) { chan->psm = psm; chan->sport = psm; err = 0; } else { u16 p; err = -EINVAL; for (p = 0x1001; p < 0x1100; p += 2) if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) { chan->psm = cpu_to_le16(p); chan->sport = cpu_to_le16(p); err = 0; break; } } done: write_unlock_bh(&chan_list_lock); return err; } int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid) { write_lock_bh(&chan_list_lock); chan->scid = scid; write_unlock_bh(&chan_list_lock); return 0; } static u16 l2cap_alloc_cid(struct l2cap_conn *conn) { u16 cid = L2CAP_CID_DYN_START; for (; cid < L2CAP_CID_DYN_END; cid++) { if (!__l2cap_get_chan_by_scid(conn, cid)) return cid; } return 0; } static void l2cap_set_timer(struct l2cap_chan *chan, struct timer_list *timer, long timeout) { BT_DBG("chan %p state %d timeout %ld", chan->sk, chan->state, timeout); if (!mod_timer(timer, jiffies + msecs_to_jiffies(timeout))) chan_hold(chan); } static void l2cap_clear_timer(struct l2cap_chan *chan, struct timer_list *timer) { BT_DBG("chan %p state %d", chan, chan->state); if (timer_pending(timer) && del_timer(timer)) chan_put(chan); } static void l2cap_state_change(struct l2cap_chan *chan, int state) { chan->state = state; chan->ops->state_change(chan->data, state); } static void l2cap_chan_timeout(unsigned long arg) { struct l2cap_chan *chan = (struct l2cap_chan *) arg; struct sock *sk = chan->sk; int reason; BT_DBG("chan %p state %d", chan, chan->state); bh_lock_sock(sk); if (sock_owned_by_user(sk)) { /* sk is owned by user. Try again later */ /*change time format */ /*__set_chan_timer(chan, HZ / 5);*/ __set_chan_timer(chan, 200); bh_unlock_sock(sk); chan_put(chan); return; } if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG) reason = ECONNREFUSED; else if (chan->state == BT_CONNECT && chan->sec_level != BT_SECURITY_SDP) reason = ECONNREFUSED; else reason = ETIMEDOUT; l2cap_chan_close(chan, reason); bh_unlock_sock(sk); chan->ops->close(chan->data); chan_put(chan); } struct l2cap_chan *l2cap_chan_create(struct sock *sk) { struct l2cap_chan *chan; chan = kzalloc(sizeof(*chan), GFP_ATOMIC); if (!chan) return NULL; chan->sk = sk; write_lock_bh(&chan_list_lock); list_add(&chan->global_l, &chan_list); write_unlock_bh(&chan_list_lock); setup_timer(&chan->chan_timer, l2cap_chan_timeout, (unsigned long) chan); chan->state = BT_OPEN; atomic_set(&chan->refcnt, 1); return chan; } void l2cap_chan_destroy(struct l2cap_chan *chan) { write_lock_bh(&chan_list_lock); list_del(&chan->global_l); write_unlock_bh(&chan_list_lock); chan_put(chan); } static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan) { BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn, chan->psm, chan->dcid); /* workaround for a2dp chopping in multi connection.*/ /* todo : now, we can't check obex properly. */ switch (chan->psm) { case 0x03: rfc_conn = conn; if (av_conn != NULL && rfc_conn == av_conn) rfc_conn = NULL; break; case 0x11: hid_conn = conn; break; case 0x17: av_conn = conn; if (rfc_conn != NULL && rfc_conn == av_conn) rfc_conn = NULL; break; default: break; } if (av_conn != NULL && (hid_conn != NULL || rfc_conn != NULL)) { hci_conn_set_encrypt(av_conn->hcon, 0x00); hci_conn_switch_role(av_conn->hcon, 0x00); hci_conn_set_encrypt(av_conn->hcon, 0x01); hci_conn_change_policy(av_conn->hcon, 0x04); av_conn = NULL; } /* END SS_BLUEZ_BT */ conn->disc_reason = 0x13; chan->conn = conn; if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) { if (conn->hcon->type == LE_LINK) { /* LE connection */ chan->omtu = L2CAP_LE_DEFAULT_MTU; chan->scid = L2CAP_CID_LE_DATA; chan->dcid = L2CAP_CID_LE_DATA; } else { /* Alloc CID for connection-oriented socket */ chan->scid = l2cap_alloc_cid(conn); chan->omtu = L2CAP_DEFAULT_MTU; } } else if (chan->chan_type == L2CAP_CHAN_CONN_LESS) { /* Connectionless socket */ chan->scid = L2CAP_CID_CONN_LESS; chan->dcid = L2CAP_CID_CONN_LESS; chan->omtu = L2CAP_DEFAULT_MTU; } else { /* Raw socket can send/recv signalling messages only */ chan->scid = L2CAP_CID_SIGNALING; chan->dcid = L2CAP_CID_SIGNALING; chan->omtu = L2CAP_DEFAULT_MTU; } chan_hold(chan); list_add(&chan->list, &conn->chan_l); } /* Delete channel. * Must be called on the locked socket. */ static void l2cap_chan_del(struct l2cap_chan *chan, int err) { struct sock *sk = chan->sk; struct l2cap_conn *conn = chan->conn; struct sock *parent = bt_sk(sk)->parent; __clear_chan_timer(chan); BT_DBG("chan %p, conn %p, err %d", chan, conn, err); if (chan->mode == L2CAP_MODE_ERTM) { BT_DBG("L2CAP_MODE_ERTM __clear_ack_timer"); __clear_ack_timer(chan); __clear_retrans_timer(chan); __clear_monitor_timer(chan); } if (conn) { /* Delete from channel list */ write_lock_bh(&conn->chan_lock); list_del(&chan->list); write_unlock_bh(&conn->chan_lock); chan_put(chan); chan->conn = NULL; /* workaround for a2dp chopping in multi connection.*/ switch (chan->psm) { case 0x03: rfc_conn = NULL; break; case 0x11: hid_conn = NULL; break; case 0x17: av_conn = NULL; break; default: break; } /* to reduce disc_timeout for le (2s->100msec) * 2s is too long for FMP */ if (conn->hcon) { if (conn->hcon->type == LE_LINK) conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT/20; /* Too long to disconnect incoming ACL from local device.(40sec) */ conn->hcon->out = 1; hci_conn_put(conn->hcon); } } l2cap_state_change(chan, BT_CLOSED); sock_set_flag(sk, SOCK_ZAPPED); if (err) sk->sk_err = err; if (parent) { bt_accept_unlink(sk); parent->sk_data_ready(parent, 0); } else sk->sk_state_change(sk); if (!(test_bit(CONF_OUTPUT_DONE, &chan->conf_state) && test_bit(CONF_INPUT_DONE, &chan->conf_state))) return; skb_queue_purge(&chan->tx_q); if (chan->mode == L2CAP_MODE_ERTM) { struct srej_list *l, *tmp; /* __clear_retrans_timer(chan); __clear_monitor_timer(chan); __clear_ack_timer(chan); */ skb_queue_purge(&chan->srej_q); list_for_each_entry_safe(l, tmp, &chan->srej_l, list) { list_del(&l->list); kfree(l); } } } static void l2cap_chan_cleanup_listen(struct sock *parent) { struct sock *sk; BT_DBG("parent %p", parent); /* Close not yet accepted channels */ while ((sk = bt_accept_dequeue(parent, NULL))) { struct l2cap_chan *chan = l2cap_pi(sk)->chan; __clear_chan_timer(chan); lock_sock(sk); l2cap_chan_close(chan, ECONNRESET); release_sock(sk); chan->ops->close(chan->data); } } void l2cap_chan_close(struct l2cap_chan *chan, int reason) { struct l2cap_conn *conn = chan->conn; struct sock *sk = chan->sk; BT_DBG("chan %p state %d socket %p", chan, chan->state, sk->sk_socket); switch (chan->state) { case BT_LISTEN: l2cap_chan_cleanup_listen(sk); l2cap_state_change(chan, BT_CLOSED); sock_set_flag(sk, SOCK_ZAPPED); break; case BT_CONNECTED: case BT_CONFIG: if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && conn->hcon->type == ACL_LINK) { __clear_chan_timer(chan); __set_chan_timer(chan, sk->sk_sndtimeo); l2cap_send_disconn_req(conn, chan, reason); } else l2cap_chan_del(chan, reason); break; case BT_CONNECT2: if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && conn->hcon->type == ACL_LINK) { struct l2cap_conn_rsp rsp; __u16 result; if (bt_sk(sk)->defer_setup) result = L2CAP_CR_SEC_BLOCK; else result = L2CAP_CR_BAD_PSM; l2cap_state_change(chan, BT_DISCONN); rsp.scid = cpu_to_le16(chan->dcid); rsp.dcid = cpu_to_le16(chan->scid); rsp.result = cpu_to_le16(result); rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO); l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp); } l2cap_chan_del(chan, reason); break; case BT_CONNECT: case BT_DISCONN: l2cap_chan_del(chan, reason); break; default: sock_set_flag(sk, SOCK_ZAPPED); break; } } static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan) { if (chan->chan_type == L2CAP_CHAN_RAW) { switch (chan->sec_level) { case BT_SECURITY_HIGH: return HCI_AT_DEDICATED_BONDING_MITM; case BT_SECURITY_MEDIUM: return HCI_AT_DEDICATED_BONDING; default: return HCI_AT_NO_BONDING; } } else if (chan->psm == cpu_to_le16(0x0001)) { if (chan->sec_level == BT_SECURITY_LOW) chan->sec_level = BT_SECURITY_SDP; if (chan->sec_level == BT_SECURITY_HIGH) return HCI_AT_NO_BONDING_MITM; else return HCI_AT_NO_BONDING; } else { switch (chan->sec_level) { case BT_SECURITY_HIGH: return HCI_AT_GENERAL_BONDING_MITM; case BT_SECURITY_MEDIUM: return HCI_AT_GENERAL_BONDING; default: return HCI_AT_NO_BONDING; } } } /* Service level security */ static inline int l2cap_check_security(struct l2cap_chan *chan) { struct l2cap_conn *conn = chan->conn; __u8 auth_type; auth_type = l2cap_get_auth_type(chan); return hci_conn_security(conn->hcon, chan->sec_level, auth_type); } static u8 l2cap_get_ident(struct l2cap_conn *conn) { u8 id; /* Get next available identificator. * 1 - 128 are used by kernel. * 129 - 199 are reserved. * 200 - 254 are used by utilities like l2ping, etc. */ spin_lock_bh(&conn->lock); if (++conn->tx_ident > 128) conn->tx_ident = 1; id = conn->tx_ident; spin_unlock_bh(&conn->lock); return id; } static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data) { struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data); u8 flags; BT_DBG("code 0x%2.2x", code); if (!skb) return; if (lmp_no_flush_capable(conn->hcon->hdev)) flags = ACL_START_NO_FLUSH; else flags = ACL_START; bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON; hci_send_acl(conn->hcon, skb, flags); } static inline void l2cap_send_sframe(struct l2cap_chan *chan, u16 control) { struct sk_buff *skb; struct l2cap_hdr *lh; struct l2cap_conn *conn = chan->conn; int count, hlen = L2CAP_HDR_SIZE + 2; u8 flags; if (chan->state != BT_CONNECTED) return; if (chan->fcs == L2CAP_FCS_CRC16) hlen += 2; BT_DBG("chan %p, control 0x%2.2x", chan, control); count = min_t(unsigned int, conn->mtu, hlen); control |= L2CAP_CTRL_FRAME_TYPE; if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state)) control |= L2CAP_CTRL_FINAL; if (test_and_clear_bit(CONN_SEND_PBIT, &chan->conn_state)) control |= L2CAP_CTRL_POLL; skb = bt_skb_alloc(count, GFP_ATOMIC); if (!skb) return; lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE); lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE); lh->cid = cpu_to_le16(chan->dcid); put_unaligned_le16(control, skb_put(skb, 2)); if (chan->fcs == L2CAP_FCS_CRC16) { u16 fcs = crc16(0, (u8 *)lh, count - 2); put_unaligned_le16(fcs, skb_put(skb, 2)); } if (lmp_no_flush_capable(conn->hcon->hdev)) flags = ACL_START_NO_FLUSH; else flags = ACL_START; bt_cb(skb)->force_active = chan->force_active; hci_send_acl(chan->conn->hcon, skb, flags); } static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u16 control) { if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) { control |= L2CAP_SUPER_RCV_NOT_READY; set_bit(CONN_RNR_SENT, &chan->conn_state); } else control |= L2CAP_SUPER_RCV_READY; control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT; l2cap_send_sframe(chan, control); } static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan) { return !test_bit(CONF_CONNECT_PEND, &chan->conf_state); } static void l2cap_send_conn_req(struct l2cap_chan *chan) { struct l2cap_conn *conn = chan->conn; struct l2cap_conn_req req; req.scid = cpu_to_le16(chan->scid); req.psm = chan->psm; chan->ident = l2cap_get_ident(conn); set_bit(CONF_CONNECT_PEND, &chan->conf_state); l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req); } static void l2cap_do_start(struct l2cap_chan *chan) { struct l2cap_conn *conn = chan->conn; if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) { if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)) return; if (l2cap_check_security(chan) && __l2cap_no_conn_pending(chan)) l2cap_send_conn_req(chan); } else { struct l2cap_info_req req; req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK); conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT; conn->info_ident = l2cap_get_ident(conn); mod_timer(&conn->info_timer, jiffies + msecs_to_jiffies(L2CAP_INFO_TIMEOUT)); l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ, sizeof(req), &req); } } static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask) { u32 local_feat_mask = l2cap_feat_mask; if (!disable_ertm) local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING; switch (mode) { case L2CAP_MODE_ERTM: return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask; case L2CAP_MODE_STREAMING: return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask; default: return 0x00; } } static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err) { struct sock *sk; struct l2cap_disconn_req req; if (!conn) return; sk = chan->sk; if (chan->mode == L2CAP_MODE_ERTM) { __clear_retrans_timer(chan); __clear_monitor_timer(chan); __clear_ack_timer(chan); } req.dcid = cpu_to_le16(chan->dcid); req.scid = cpu_to_le16(chan->scid); l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ, sizeof(req), &req); l2cap_state_change(chan, BT_DISCONN); sk->sk_err = err; } /* ---- L2CAP connections ---- */ static void l2cap_conn_start(struct l2cap_conn *conn) { struct l2cap_chan *chan, *tmp; BT_DBG("conn %p", conn); read_lock(&conn->chan_lock); list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) { struct sock *sk = chan->sk; bh_lock_sock(sk); if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) { bh_unlock_sock(sk); continue; } if (chan->state == BT_CONNECT) { if (!l2cap_check_security(chan) || !__l2cap_no_conn_pending(chan)) { bh_unlock_sock(sk); continue; } if (!l2cap_mode_supported(chan->mode, conn->feat_mask) && test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) { /* l2cap_chan_close() calls list_del(chan) * so release the lock */ read_unlock(&conn->chan_lock); l2cap_chan_close(chan, ECONNRESET); read_lock(&conn->chan_lock); bh_unlock_sock(sk); continue; } l2cap_send_conn_req(chan); } else if (chan->state == BT_CONNECT2) { struct l2cap_conn_rsp rsp; char buf[128]; rsp.scid = cpu_to_le16(chan->dcid); rsp.dcid = cpu_to_le16(chan->scid); if (l2cap_check_security(chan)) { if (bt_sk(sk)->defer_setup) { struct sock *parent = bt_sk(sk)->parent; rsp.result = cpu_to_le16(L2CAP_CR_PEND); rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND); if (parent) parent->sk_data_ready(parent, 0); } else { l2cap_state_change(chan, BT_CONFIG); rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS); rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO); } } else { rsp.result = cpu_to_le16(L2CAP_CR_PEND); rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND); } l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp); if (test_bit(CONF_REQ_SENT, &chan->conf_state) || rsp.result != L2CAP_CR_SUCCESS) { bh_unlock_sock(sk); continue; } set_bit(CONF_REQ_SENT, &chan->conf_state); l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, l2cap_build_conf_req(chan, buf), buf); chan->num_conf_req++; } bh_unlock_sock(sk); } read_unlock(&conn->chan_lock); } /* Find socket with cid and source bdaddr. * Returns closest match, locked. */ static struct l2cap_chan *l2cap_global_chan_by_scid(int state, __le16 cid, bdaddr_t *src) { struct l2cap_chan *c, *c1 = NULL; read_lock(&chan_list_lock); list_for_each_entry(c, &chan_list, global_l) { struct sock *sk = c->sk; if (state && c->state != state) continue; if (c->scid == cid) { /* Exact match. */ if (!bacmp(&bt_sk(sk)->src, src)) { read_unlock(&chan_list_lock); return c; } /* Closest match */ if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) c1 = c; } } read_unlock(&chan_list_lock); return c1; } static void l2cap_le_conn_ready(struct l2cap_conn *conn) { struct sock *parent, *sk; struct l2cap_chan *chan, *pchan; BT_DBG(""); /* Check if we have socket listening on cid */ pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA, conn->src); if (!pchan) return; parent = pchan->sk; bh_lock_sock(parent); /* Check for backlog size */ if (sk_acceptq_is_full(parent)) { BT_DBG("backlog full %d", parent->sk_ack_backlog); goto clean; } chan = pchan->ops->new_connection(pchan->data); if (!chan) goto clean; sk = chan->sk; write_lock_bh(&conn->chan_lock); hci_conn_hold(conn->hcon); bacpy(&bt_sk(sk)->src, conn->src); bacpy(&bt_sk(sk)->dst, conn->dst); bt_accept_enqueue(parent, sk); __l2cap_chan_add(conn, chan); __set_chan_timer(chan, sk->sk_sndtimeo); l2cap_state_change(chan, BT_CONNECTED); parent->sk_data_ready(parent, 0); write_unlock_bh(&conn->chan_lock); clean: bh_unlock_sock(parent); } static void l2cap_chan_ready(struct sock *sk) { struct l2cap_chan *chan = l2cap_pi(sk)->chan; struct sock *parent = bt_sk(sk)->parent; BT_DBG("sk %p, parent %p", sk, parent); chan->conf_state = 0; __clear_chan_timer(chan); l2cap_state_change(chan, BT_CONNECTED); sk->sk_state_change(sk); if (parent) parent->sk_data_ready(parent, 0); } static void l2cap_conn_ready(struct l2cap_conn *conn) { struct l2cap_chan *chan; BT_DBG("conn %p", conn); if (!conn->hcon->out && conn->hcon->type == LE_LINK) l2cap_le_conn_ready(conn); read_lock(&conn->chan_lock); list_for_each_entry(chan, &conn->chan_l, list) { struct sock *sk = chan->sk; bh_lock_sock(sk); if (conn->hcon->type == LE_LINK) { if (smp_conn_security(conn, chan->sec_level)) l2cap_chan_ready(sk); } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) { __clear_chan_timer(chan); l2cap_state_change(chan, BT_CONNECTED); sk->sk_state_change(sk); } else if (chan->state == BT_CONNECT) l2cap_do_start(chan); bh_unlock_sock(sk); } read_unlock(&conn->chan_lock); } /* Notify sockets that we cannot guaranty reliability anymore */ static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err) { struct l2cap_chan *chan; BT_DBG("conn %p", conn); read_lock(&conn->chan_lock); list_for_each_entry(chan, &conn->chan_l, list) { struct sock *sk = chan->sk; if (chan->force_reliable) sk->sk_err = err; } read_unlock(&conn->chan_lock); } static void l2cap_info_timeout(unsigned long arg) { struct l2cap_conn *conn = (void *) arg; conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE; conn->info_ident = 0; l2cap_conn_start(conn); } static void l2cap_conn_del(struct hci_conn *hcon, int err) { struct l2cap_conn *conn = hcon->l2cap_data; struct l2cap_chan *chan, *l; struct sock *sk; if (!conn) return; BT_DBG("hcon %p conn %p, err %d", hcon, conn, err); kfree_skb(conn->rx_skb); /* Kill channels */ list_for_each_entry_safe(chan, l, &conn->chan_l, list) { sk = chan->sk; bh_lock_sock(sk); l2cap_chan_del(chan, err); bh_unlock_sock(sk); chan->ops->close(chan->data); } if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) del_timer_sync(&conn->info_timer); if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) { /* del_timer(&conn->security_timer); */ del_timer_sync(&conn->security_timer); smp_chan_destroy(conn); } hcon->l2cap_data = NULL; kfree(conn); } static void security_timeout(unsigned long arg) { struct l2cap_conn *conn = (void *) arg; l2cap_conn_del(conn->hcon, ETIMEDOUT); } static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status) { struct l2cap_conn *conn = hcon->l2cap_data; if (conn || status) return conn; conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC); if (!conn) return NULL; hcon->l2cap_data = conn; conn->hcon = hcon; BT_DBG("hcon %p conn %p hcon->type %x", hcon, conn, hcon->type); if (hcon->hdev->le_mtu && hcon->type == LE_LINK) conn->mtu = hcon->hdev->le_mtu; else conn->mtu = hcon->hdev->acl_mtu; conn->src = &hcon->hdev->bdaddr; conn->dst = &hcon->dst; conn->feat_mask = 0; spin_lock_init(&conn->lock); rwlock_init(&conn->chan_lock); INIT_LIST_HEAD(&conn->chan_l); if (hcon->type == LE_LINK) setup_timer(&conn->security_timer, security_timeout, (unsigned long) conn); else setup_timer(&conn->info_timer, l2cap_info_timeout, (unsigned long) conn); conn->disc_reason = 0x13; return conn; } static inline void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan) { write_lock_bh(&conn->chan_lock); __l2cap_chan_add(conn, chan); write_unlock_bh(&conn->chan_lock); } /* ---- Socket interface ---- */ /* Find socket with psm and source bdaddr. * Returns closest match. */ static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, bdaddr_t *src) { struct l2cap_chan *c, *c1 = NULL; read_lock(&chan_list_lock); list_for_each_entry(c, &chan_list, global_l) { struct sock *sk = c->sk; if (state && c->state != state) continue; if (c->psm == psm) { /* Exact match. */ if (!bacmp(&bt_sk(sk)->src, src)) { read_unlock(&chan_list_lock); return c; } /* Closest match */ if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) c1 = c; } } read_unlock(&chan_list_lock); return c1; } int l2cap_chan_connect(struct l2cap_chan *chan) { struct sock *sk = chan->sk; bdaddr_t *src = &bt_sk(sk)->src; bdaddr_t *dst = &bt_sk(sk)->dst; struct l2cap_conn *conn; struct hci_conn *hcon; struct hci_dev *hdev; __u8 auth_type; int err; BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst), chan->psm); hdev = hci_get_route(dst, src); if (!hdev) return -EHOSTUNREACH; hci_dev_lock_bh(hdev); auth_type = l2cap_get_auth_type(chan); if (chan->dcid == L2CAP_CID_LE_DATA) hcon = hci_connect(hdev, LE_LINK, 0, dst, chan->sec_level, auth_type); else hcon = hci_connect(hdev, ACL_LINK, 0, dst, chan->sec_level, auth_type); if (IS_ERR(hcon)) { err = PTR_ERR(hcon); goto done; } conn = l2cap_conn_add(hcon, 0); if (!conn) { hci_conn_put(hcon); err = -ENOMEM; goto done; } /* Update source addr of the socket */ bacpy(src, conn->src); l2cap_chan_add(conn, chan); l2cap_state_change(chan, BT_CONNECT); /* if connection is for LE, set timeout to 5 seconds. */ if (chan->dcid == L2CAP_CID_LE_DATA) { BT_DBG("L2CAP_CID_LE_DATA. set timeout to 5 seconds"); __set_chan_timer(chan, L2CAP_CONN_LE_TIMEOUT); } else __set_chan_timer(chan, sk->sk_sndtimeo); if (hcon->state == BT_CONNECTED) { if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) { __clear_chan_timer(chan); if (l2cap_check_security(chan)) l2cap_state_change(chan, BT_CONNECTED); /* temp to check connected le link */ } else if (chan->dcid == L2CAP_CID_LE_DATA) { __clear_chan_timer(chan); sk->sk_state = BT_CONNECTED; sk->sk_state_change(sk); } else l2cap_do_start(chan); } err = 0; done: hci_dev_unlock_bh(hdev); hci_dev_put(hdev); return err; } int __l2cap_wait_ack(struct sock *sk) { struct l2cap_chan *chan = l2cap_pi(sk)->chan; DECLARE_WAITQUEUE(wait, current); int err = 0; int timeo = HZ/5; add_wait_queue(sk_sleep(sk), &wait); set_current_state(TASK_INTERRUPTIBLE); while (chan->unacked_frames > 0 && chan->conn) { if (!timeo) timeo = HZ/5; if (signal_pending(current)) { err = sock_intr_errno(timeo); break; } release_sock(sk); timeo = schedule_timeout(timeo); lock_sock(sk); set_current_state(TASK_INTERRUPTIBLE); err = sock_error(sk); if (err) break; } set_current_state(TASK_RUNNING); remove_wait_queue(sk_sleep(sk), &wait); return err; } static void l2cap_monitor_timeout(unsigned long arg) { struct l2cap_chan *chan = (void *) arg; struct sock *sk = chan->sk; BT_DBG("chan %p", chan); bh_lock_sock(sk); if (chan->retry_count >= chan->remote_max_tx) { l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED); bh_unlock_sock(sk); return; } chan->retry_count++; __set_monitor_timer(chan); l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL); bh_unlock_sock(sk); } static void l2cap_retrans_timeout(unsigned long arg) { struct l2cap_chan *chan = (void *) arg; struct sock *sk = chan->sk; BT_DBG("chan %p", chan); bh_lock_sock(sk); chan->retry_count = 1; __set_monitor_timer(chan); set_bit(CONN_WAIT_F, &chan->conn_state); l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL); bh_unlock_sock(sk); } static void l2cap_drop_acked_frames(struct l2cap_chan *chan) { struct sk_buff *skb; while ((skb = skb_peek(&chan->tx_q)) && chan->unacked_frames) { if (bt_cb(skb)->tx_seq == chan->expected_ack_seq) break; skb = skb_dequeue(&chan->tx_q); kfree_skb(skb); chan->unacked_frames--; } if (!chan->unacked_frames) __clear_retrans_timer(chan); } void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb) { struct hci_conn *hcon = chan->conn->hcon; u16 flags; BT_DBG("chan %p, skb %p len %d", chan, skb, skb->len); if (!chan->flushable && lmp_no_flush_capable(hcon->hdev)) flags = ACL_START_NO_FLUSH; else flags = ACL_START; bt_cb(skb)->force_active = chan->force_active; hci_send_acl(hcon, skb, flags); } void l2cap_streaming_send(struct l2cap_chan *chan) { struct sk_buff *skb; u16 control, fcs; while ((skb = skb_dequeue(&chan->tx_q))) { control = get_unaligned_le16(skb->data + L2CAP_HDR_SIZE); control |= chan->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT; put_unaligned_le16(control, skb->data + L2CAP_HDR_SIZE); if (chan->fcs == L2CAP_FCS_CRC16) { fcs = crc16(0, (u8 *)skb->data, skb->len - 2); put_unaligned_le16(fcs, skb->data + skb->len - 2); } l2cap_do_send(chan, skb); chan->next_tx_seq = (chan->next_tx_seq + 1) % 64; } } static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u8 tx_seq) { struct sk_buff *skb, *tx_skb; u16 control, fcs; skb = skb_peek(&chan->tx_q); if (!skb) return; do { if (bt_cb(skb)->tx_seq == tx_seq) break; if (skb_queue_is_last(&chan->tx_q, skb)) return; } while ((skb = skb_queue_next(&chan->tx_q, skb))); if (chan->remote_max_tx && bt_cb(skb)->retries == chan->remote_max_tx) { l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED); return; } tx_skb = skb_clone(skb, GFP_ATOMIC); bt_cb(skb)->retries++; control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE); control &= L2CAP_CTRL_SAR; if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state)) control |= L2CAP_CTRL_FINAL; control |= (chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT) | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT); put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE); if (chan->fcs == L2CAP_FCS_CRC16) { fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2); put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2); } l2cap_do_send(chan, tx_skb); } int l2cap_ertm_send(struct l2cap_chan *chan) { struct sk_buff *skb, *tx_skb; u16 control, fcs; int nsent = 0; if (chan->state != BT_CONNECTED) return -ENOTCONN; while ((skb = chan->tx_send_head) && (!l2cap_tx_window_full(chan))) { if (chan->remote_max_tx && bt_cb(skb)->retries == chan->remote_max_tx) { l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED); break; } tx_skb = skb_clone(skb, GFP_ATOMIC); bt_cb(skb)->retries++; control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE); control &= L2CAP_CTRL_SAR; if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state)) control |= L2CAP_CTRL_FINAL; control |= (chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT) | (chan->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT); put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE); if (chan->fcs == L2CAP_FCS_CRC16) { fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2); put_unaligned_le16(fcs, skb->data + tx_skb->len - 2); } l2cap_do_send(chan, tx_skb); __set_retrans_timer(chan); bt_cb(skb)->tx_seq = chan->next_tx_seq; chan->next_tx_seq = (chan->next_tx_seq + 1) % 64; if (bt_cb(skb)->retries == 1) chan->unacked_frames++; chan->frames_sent++; if (skb_queue_is_last(&chan->tx_q, skb)) chan->tx_send_head = NULL; else chan->tx_send_head = skb_queue_next(&chan->tx_q, skb); nsent++; } return nsent; } static int l2cap_retransmit_frames(struct l2cap_chan *chan) { int ret; if (!skb_queue_empty(&chan->tx_q)) chan->tx_send_head = chan->tx_q.next; chan->next_tx_seq = chan->expected_ack_seq; ret = l2cap_ertm_send(chan); return ret; } /* static void l2cap_send_ack(struct l2cap_chan *chan) { u16 control = 0; control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT; if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) { control |= L2CAP_SUPER_RCV_NOT_READY; set_bit(CONN_RNR_SENT, &chan->conn_state); l2cap_send_sframe(chan, control); return; } if (l2cap_ertm_send(chan) > 0) return; control |= L2CAP_SUPER_RCV_READY; l2cap_send_sframe(chan, control); } */ static void __l2cap_send_ack(struct l2cap_chan *chan) { u16 control = 0; control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT; if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) { control |= L2CAP_SUPER_RCV_NOT_READY; set_bit(CONN_RNR_SENT, &chan->conn_state); l2cap_send_sframe(chan, control); return; } if (l2cap_ertm_send(chan) > 0) return; control |= L2CAP_SUPER_RCV_READY; l2cap_send_sframe(chan, control); } static void l2cap_send_ack(struct l2cap_chan *chan) { __clear_ack_timer(chan); __l2cap_send_ack(chan); } static void l2cap_send_srejtail(struct l2cap_chan *chan) { struct srej_list *tail; u16 control; control = L2CAP_SUPER_SELECT_REJECT; control |= L2CAP_CTRL_FINAL; tail = list_entry((&chan->srej_l)->prev, struct srej_list, list); control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT; l2cap_send_sframe(chan, control); } static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb) { struct l2cap_conn *conn = l2cap_pi(sk)->chan->conn; struct sk_buff **frag; int err, sent = 0; if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count)) return -EFAULT; sent += count; len -= count; /* Continuation fragments (no L2CAP header) */ frag = &skb_shinfo(skb)->frag_list; while (len) { count = min_t(unsigned int, conn->mtu, len); *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err); if (!*frag) return err; if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count)) return -EFAULT; sent += count; len -= count; frag = &(*frag)->next; } return sent; } struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len) { struct sock *sk = chan->sk; struct l2cap_conn *conn = chan->conn; struct sk_buff *skb; int err, count, hlen = L2CAP_HDR_SIZE + 2; struct l2cap_hdr *lh; BT_DBG("sk %p len %d", sk, (int)len); count = min_t(unsigned int, (conn->mtu - hlen), len); skb = bt_skb_send_alloc(sk, count + hlen, msg->msg_flags & MSG_DONTWAIT, &err); if (!skb) return ERR_PTR(err); /* Create L2CAP header */ lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE); lh->cid = cpu_to_le16(chan->dcid); lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE)); put_unaligned_le16(chan->psm, skb_put(skb, 2)); err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb); if (unlikely(err < 0)) { kfree_skb(skb); return ERR_PTR(err); } return skb; } struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len) { struct sock *sk = chan->sk; struct l2cap_conn *conn = chan->conn; struct sk_buff *skb; int err, count, hlen = L2CAP_HDR_SIZE; struct l2cap_hdr *lh; BT_DBG("sk %p len %d", sk, (int)len); count = min_t(unsigned int, (conn->mtu - hlen), len); skb = bt_skb_send_alloc(sk, count + hlen, msg->msg_flags & MSG_DONTWAIT, &err); if (!skb) return ERR_PTR(err); /* Create L2CAP header */ lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE); lh->cid = cpu_to_le16(chan->dcid); lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE)); err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb); if (unlikely(err < 0)) { kfree_skb(skb); return ERR_PTR(err); } return skb; } struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len, u16 control, u16 sdulen) { struct sock *sk = chan->sk; struct l2cap_conn *conn = chan->conn; struct sk_buff *skb; int err, count, hlen = L2CAP_HDR_SIZE + 2; struct l2cap_hdr *lh; BT_DBG("sk %p len %d", sk, (int)len); if (!conn) return ERR_PTR(-ENOTCONN); if (sdulen) hlen += 2; if (chan->fcs == L2CAP_FCS_CRC16) hlen += 2; count = min_t(unsigned int, (conn->mtu - hlen), len); skb = bt_skb_send_alloc(sk, count + hlen, msg->msg_flags & MSG_DONTWAIT, &err); if (!skb) return ERR_PTR(err); /* Create L2CAP header */ lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE); lh->cid = cpu_to_le16(chan->dcid); lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE)); put_unaligned_le16(control, skb_put(skb, 2)); if (sdulen) put_unaligned_le16(sdulen, skb_put(skb, 2)); err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb); if (unlikely(err < 0)) { kfree_skb(skb); return ERR_PTR(err); } if (chan->fcs == L2CAP_FCS_CRC16) put_unaligned_le16(0, skb_put(skb, 2)); bt_cb(skb)->retries = 0; return skb; } int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len) { struct sk_buff *skb; struct sk_buff_head sar_queue; u16 control; size_t size = 0; skb_queue_head_init(&sar_queue); control = L2CAP_SDU_START; skb = l2cap_create_iframe_pdu(chan, msg, chan->remote_mps, control, len); if (IS_ERR(skb)) return PTR_ERR(skb); __skb_queue_tail(&sar_queue, skb); len -= chan->remote_mps; size += chan->remote_mps; while (len > 0) { size_t buflen; if (len > chan->remote_mps) { control = L2CAP_SDU_CONTINUE; buflen = chan->remote_mps; } else { control = L2CAP_SDU_END; buflen = len; } skb = l2cap_create_iframe_pdu(chan, msg, buflen, control, 0); if (IS_ERR(skb)) { skb_queue_purge(&sar_queue); return PTR_ERR(skb); } __skb_queue_tail(&sar_queue, skb); len -= buflen; size += buflen; } skb_queue_splice_tail(&sar_queue, &chan->tx_q); if (chan->tx_send_head == NULL) chan->tx_send_head = sar_queue.next; return size; } int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len) { struct sk_buff *skb; u16 control; int err; /* Connectionless channel */ if (chan->chan_type == L2CAP_CHAN_CONN_LESS) { skb = l2cap_create_connless_pdu(chan, msg, len); if (IS_ERR(skb)) return PTR_ERR(skb); l2cap_do_send(chan, skb); return len; } switch (chan->mode) { case L2CAP_MODE_BASIC: /* Check outgoing MTU */ if (len > chan->omtu) return -EMSGSIZE; /* Create a basic PDU */ skb = l2cap_create_basic_pdu(chan, msg, len); if (IS_ERR(skb)) return PTR_ERR(skb); l2cap_do_send(chan, skb); err = len; break; case L2CAP_MODE_ERTM: case L2CAP_MODE_STREAMING: /* Entire SDU fits into one PDU */ if (len <= chan->remote_mps) { control = L2CAP_SDU_UNSEGMENTED; skb = l2cap_create_iframe_pdu(chan, msg, len, control, 0); if (IS_ERR(skb)) return PTR_ERR(skb); __skb_queue_tail(&chan->tx_q, skb); if (chan->tx_send_head == NULL) chan->tx_send_head = skb; } else { /* Segment SDU into multiples PDUs */ err = l2cap_sar_segment_sdu(chan, msg, len); if (err < 0) return err; } if (chan->mode == L2CAP_MODE_STREAMING) { l2cap_streaming_send(chan); err = len; break; } if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) && test_bit(CONN_WAIT_F, &chan->conn_state)) { err = len; break; } err = l2cap_ertm_send(chan); if (err >= 0) err = len; break; default: BT_DBG("bad state %1.1x", chan->mode); err = -EBADFD; } return err; } /* Copy frame to all raw sockets on that connection */ static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb) { struct sk_buff *nskb; struct l2cap_chan *chan; BT_DBG("conn %p", conn); read_lock(&conn->chan_lock); list_for_each_entry(chan, &conn->chan_l, list) { struct sock *sk = chan->sk; if (chan->chan_type != L2CAP_CHAN_RAW) continue; /* Don't send frame to the socket it came from */ if (skb->sk == sk) continue; nskb = skb_clone(skb, GFP_ATOMIC); if (!nskb) continue; if (chan->ops->recv(chan->data, nskb)) kfree_skb(nskb); } read_unlock(&conn->chan_lock); } /* ---- L2CAP signalling commands ---- */ static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code, u8 ident, u16 dlen, void *data) { struct sk_buff *skb, **frag; struct l2cap_cmd_hdr *cmd; struct l2cap_hdr *lh; int len, count; BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d", conn, code, ident, dlen); len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen; count = min_t(unsigned int, conn->mtu, len); skb = bt_skb_alloc(count, GFP_ATOMIC); if (!skb) return NULL; lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE); lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen); if (conn->hcon->type == LE_LINK) lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING); else lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING); cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE); cmd->code = code; cmd->ident = ident; cmd->len = cpu_to_le16(dlen); if (dlen) { count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE; memcpy(skb_put(skb, count), data, count); data += count; } len -= skb->len; /* Continuation fragments (no L2CAP header) */ frag = &skb_shinfo(skb)->frag_list; while (len) { count = min_t(unsigned int, conn->mtu, len); *frag = bt_skb_alloc(count, GFP_ATOMIC); if (!*frag) goto fail; memcpy(skb_put(*frag, count), data, count); len -= count; data += count; frag = &(*frag)->next; } return skb; fail: kfree_skb(skb); return NULL; } static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val) { struct l2cap_conf_opt *opt = *ptr; int len; len = L2CAP_CONF_OPT_SIZE + opt->len; *ptr += len; *type = opt->type; *olen = opt->len; switch (opt->len) { case 1: *val = *((u8 *) opt->val); break; case 2: *val = get_unaligned_le16(opt->val); break; case 4: *val = get_unaligned_le32(opt->val); break; default: *val = (unsigned long) opt->val; break; } BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val); return len; } static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val) { struct l2cap_conf_opt *opt = *ptr; BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val); opt->type = type; opt->len = len; switch (len) { case 1: *((u8 *) opt->val) = val; break; case 2: put_unaligned_le16(val, opt->val); break; case 4: put_unaligned_le32(val, opt->val); break; default: memcpy(opt->val, (void *) val, len); break; } *ptr += L2CAP_CONF_OPT_SIZE + len; } static void l2cap_ack_timeout(unsigned long arg) { struct l2cap_chan *chan = (void *) arg; spin_lock_bh(&((chan->sk)->sk_lock.slock)); /* l2cap_send_ack(chan);*/ __l2cap_send_ack(chan); spin_unlock_bh(&((chan->sk)->sk_lock.slock)); } static inline void l2cap_ertm_init(struct l2cap_chan *chan) { struct sock *sk = chan->sk; chan->expected_ack_seq = 0; chan->unacked_frames = 0; chan->buffer_seq = 0; chan->num_acked = 0; chan->frames_sent = 0; setup_timer(&chan->retrans_timer, l2cap_retrans_timeout, (unsigned long) chan); setup_timer(&chan->monitor_timer, l2cap_monitor_timeout, (unsigned long) chan); setup_timer(&chan->ack_timer, l2cap_ack_timeout, (unsigned long) chan); skb_queue_head_init(&chan->srej_q); INIT_LIST_HEAD(&chan->srej_l); sk->sk_backlog_rcv = l2cap_ertm_data_rcv; } static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask) { switch (mode) { case L2CAP_MODE_STREAMING: case L2CAP_MODE_ERTM: if (l2cap_mode_supported(mode, remote_feat_mask)) return mode; /* fall through */ default: return L2CAP_MODE_BASIC; } } static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data) { struct l2cap_conf_req *req = data; struct l2cap_conf_rfc rfc = { .mode = chan->mode }; void *ptr = req->data; BT_DBG("chan %p", chan); if (chan->num_conf_req || chan->num_conf_rsp) goto done; switch (chan->mode) { case L2CAP_MODE_STREAMING: case L2CAP_MODE_ERTM: if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) break; /* fall through */ default: chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask); break; } done: if (chan->imtu != L2CAP_DEFAULT_MTU) l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu); switch (chan->mode) { case L2CAP_MODE_BASIC: if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) && !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING)) break; rfc.mode = L2CAP_MODE_BASIC; rfc.txwin_size = 0; rfc.max_transmit = 0; rfc.retrans_timeout = 0; rfc.monitor_timeout = 0; rfc.max_pdu_size = 0; l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc), (unsigned long) &rfc); break; case L2CAP_MODE_ERTM: rfc.mode = L2CAP_MODE_ERTM; rfc.txwin_size = chan->tx_win; rfc.max_transmit = chan->max_tx; rfc.retrans_timeout = 0; rfc.monitor_timeout = 0; rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE); if (L2CAP_DEFAULT_MAX_PDU_SIZE > chan->conn->mtu - 10) rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10); l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc), (unsigned long) &rfc); if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS)) break; if (chan->fcs == L2CAP_FCS_NONE || test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) { chan->fcs = L2CAP_FCS_NONE; l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs); } break; case L2CAP_MODE_STREAMING: rfc.mode = L2CAP_MODE_STREAMING; rfc.txwin_size = 0; rfc.max_transmit = 0; rfc.retrans_timeout = 0; rfc.monitor_timeout = 0; rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE); if (L2CAP_DEFAULT_MAX_PDU_SIZE > chan->conn->mtu - 10) rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10); l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc), (unsigned long) &rfc); if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS)) break; if (chan->fcs == L2CAP_FCS_NONE || test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) { chan->fcs = L2CAP_FCS_NONE; l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs); } break; } req->dcid = cpu_to_le16(chan->dcid); req->flags = cpu_to_le16(0); return ptr - data; } static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data) { struct l2cap_conf_rsp *rsp = data; void *ptr = rsp->data; void *req = chan->conf_req; int len = chan->conf_len; int type, hint, olen; unsigned long val; struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC }; u16 mtu = L2CAP_DEFAULT_MTU; u16 result = L2CAP_CONF_SUCCESS; BT_DBG("chan %p", chan); while (len >= L2CAP_CONF_OPT_SIZE) { len -= l2cap_get_conf_opt(&req, &type, &olen, &val); hint = type & L2CAP_CONF_HINT; type &= L2CAP_CONF_MASK; switch (type) { case L2CAP_CONF_MTU: mtu = val; break; case L2CAP_CONF_FLUSH_TO: chan->flush_to = val; break; case L2CAP_CONF_QOS: break; case L2CAP_CONF_RFC: if (olen == sizeof(rfc)) memcpy(&rfc, (void *) val, olen); break; case L2CAP_CONF_FCS: if (val == L2CAP_FCS_NONE) set_bit(CONF_NO_FCS_RECV, &chan->conf_state); break; default: if (hint) break; result = L2CAP_CONF_UNKNOWN; *((u8 *) ptr++) = type; break; } } if (chan->num_conf_rsp || chan->num_conf_req > 1) goto done; switch (chan->mode) { case L2CAP_MODE_STREAMING: case L2CAP_MODE_ERTM: if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) { chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask); break; } if (chan->mode != rfc.mode) return -ECONNREFUSED; break; } done: if (chan->mode != rfc.mode) { result = L2CAP_CONF_UNACCEPT; rfc.mode = chan->mode; if (chan->num_conf_rsp == 1) return -ECONNREFUSED; l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc), (unsigned long) &rfc); } if (result == L2CAP_CONF_SUCCESS) { /* Configure output options and let the other side know * which ones we don't like. */ if (mtu < L2CAP_DEFAULT_MIN_MTU) result = L2CAP_CONF_UNACCEPT; else { chan->omtu = mtu; set_bit(CONF_MTU_DONE, &chan->conf_state); } l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu); switch (rfc.mode) { case L2CAP_MODE_BASIC: chan->fcs = L2CAP_FCS_NONE; set_bit(CONF_MODE_DONE, &chan->conf_state); break; case L2CAP_MODE_ERTM: chan->remote_tx_win = rfc.txwin_size; chan->remote_max_tx = rfc.max_transmit; if (le16_to_cpu(rfc.max_pdu_size) > chan->conn->mtu - 10) rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10); chan->remote_mps = le16_to_cpu(rfc.max_pdu_size); rfc.retrans_timeout = le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO); rfc.monitor_timeout = le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO); set_bit(CONF_MODE_DONE, &chan->conf_state); l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc), (unsigned long) &rfc); break; case L2CAP_MODE_STREAMING: if (le16_to_cpu(rfc.max_pdu_size) > chan->conn->mtu - 10) rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10); chan->remote_mps = le16_to_cpu(rfc.max_pdu_size); set_bit(CONF_MODE_DONE, &chan->conf_state); l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc), (unsigned long) &rfc); break; default: result = L2CAP_CONF_UNACCEPT; memset(&rfc, 0, sizeof(rfc)); rfc.mode = chan->mode; } if (result == L2CAP_CONF_SUCCESS) set_bit(CONF_OUTPUT_DONE, &chan->conf_state); } rsp->scid = cpu_to_le16(chan->dcid); rsp->result = cpu_to_le16(result); rsp->flags = cpu_to_le16(0x0000); return ptr - data; } static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result) { struct l2cap_conf_req *req = data; void *ptr = req->data; int type, olen; unsigned long val; struct l2cap_conf_rfc rfc; BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data); while (len >= L2CAP_CONF_OPT_SIZE) { len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val); switch (type) { case L2CAP_CONF_MTU: if (val < L2CAP_DEFAULT_MIN_MTU) { *result = L2CAP_CONF_UNACCEPT; chan->imtu = L2CAP_DEFAULT_MIN_MTU; } else chan->imtu = val; l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu); break; case L2CAP_CONF_FLUSH_TO: chan->flush_to = val; l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, chan->flush_to); break; case L2CAP_CONF_RFC: if (olen == sizeof(rfc)) memcpy(&rfc, (void *)val, olen); if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) && rfc.mode != chan->mode) return -ECONNREFUSED; chan->fcs = 0; l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc), (unsigned long) &rfc); break; } } if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode) return -ECONNREFUSED; chan->mode = rfc.mode; if (*result == L2CAP_CONF_SUCCESS) { switch (rfc.mode) { case L2CAP_MODE_ERTM: chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout); chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout); chan->mps = le16_to_cpu(rfc.max_pdu_size); break; case L2CAP_MODE_STREAMING: chan->mps = le16_to_cpu(rfc.max_pdu_size); } } req->dcid = cpu_to_le16(chan->dcid); req->flags = cpu_to_le16(0x0000); return ptr - data; } static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags) { struct l2cap_conf_rsp *rsp = data; void *ptr = rsp->data; BT_DBG("chan %p", chan); rsp->scid = cpu_to_le16(chan->dcid); rsp->result = cpu_to_le16(result); rsp->flags = cpu_to_le16(flags); return ptr - data; } void __l2cap_connect_rsp_defer(struct l2cap_chan *chan) { struct l2cap_conn_rsp rsp; struct l2cap_conn *conn = chan->conn; u8 buf[128]; rsp.scid = cpu_to_le16(chan->dcid); rsp.dcid = cpu_to_le16(chan->scid); rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS); rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO); l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp); if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) return; l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, l2cap_build_conf_req(chan, buf), buf); chan->num_conf_req++; } static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len) { int type, olen; unsigned long val; struct l2cap_conf_rfc rfc; BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len); if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING)) return; while (len >= L2CAP_CONF_OPT_SIZE) { len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val); switch (type) { case L2CAP_CONF_RFC: if (olen == sizeof(rfc)) memcpy(&rfc, (void *)val, olen); goto done; } } done: switch (rfc.mode) { case L2CAP_MODE_ERTM: chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout); chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout); chan->mps = le16_to_cpu(rfc.max_pdu_size); break; case L2CAP_MODE_STREAMING: chan->mps = le16_to_cpu(rfc.max_pdu_size); } } static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data) { struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data; if (rej->reason != 0x0000) return 0; if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) && cmd->ident == conn->info_ident) { del_timer(&conn->info_timer); conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE; conn->info_ident = 0; l2cap_conn_start(conn); } return 0; } static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data) { struct l2cap_conn_req *req = (struct l2cap_conn_req *) data; struct l2cap_conn_rsp rsp; struct l2cap_chan *chan = NULL, *pchan; struct sock *parent, *sk = NULL; int result, status = L2CAP_CS_NO_INFO; u16 dcid = 0, scid = __le16_to_cpu(req->scid); __le16 psm = req->psm; BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid); /* Check if we have socket listening on psm */ pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src); if (!pchan) { result = L2CAP_CR_BAD_PSM; goto sendresp; } parent = pchan->sk; bh_lock_sock(parent); /* Check if the ACL is secure enough (if not SDP) */ if (psm != cpu_to_le16(0x0001) && !hci_conn_check_link_mode(conn->hcon)) { conn->disc_reason = 0x05; result = L2CAP_CR_SEC_BLOCK; goto response; } result = L2CAP_CR_NO_MEM; /* Check for backlog size */ if (sk_acceptq_is_full(parent)) { BT_DBG("backlog full %d", parent->sk_ack_backlog); goto response; } chan = pchan->ops->new_connection(pchan->data); if (!chan) goto response; sk = chan->sk; write_lock_bh(&conn->chan_lock); /* Check if we already have channel with that dcid */ if (__l2cap_get_chan_by_dcid(conn, scid)) { write_unlock_bh(&conn->chan_lock); sock_set_flag(sk, SOCK_ZAPPED); chan->ops->close(chan->data); goto response; } hci_conn_hold(conn->hcon); bacpy(&bt_sk(sk)->src, conn->src); bacpy(&bt_sk(sk)->dst, conn->dst); chan->psm = psm; chan->dcid = scid; bt_accept_enqueue(parent, sk); __l2cap_chan_add(conn, chan); dcid = chan->scid; __set_chan_timer(chan, sk->sk_sndtimeo); chan->ident = cmd->ident; if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) { if (l2cap_check_security(chan)) { if (bt_sk(sk)->defer_setup) { l2cap_state_change(chan, BT_CONNECT2); result = L2CAP_CR_PEND; status = L2CAP_CS_AUTHOR_PEND; parent->sk_data_ready(parent, 0); } else { l2cap_state_change(chan, BT_CONFIG); result = L2CAP_CR_SUCCESS; status = L2CAP_CS_NO_INFO; } } else { l2cap_state_change(chan, BT_CONNECT2); result = L2CAP_CR_PEND; status = L2CAP_CS_AUTHEN_PEND; } } else { l2cap_state_change(chan, BT_CONNECT2); result = L2CAP_CR_PEND; status = L2CAP_CS_NO_INFO; /* if lm encryption enabled, send authorization pending. * this workaround is for ford carkit. (incoming avdtp connection failed) */ if ((conn->hcon->link_mode & HCI_LM_AUTH) && (conn->hcon->link_mode & HCI_LM_ENCRYPT) && !(conn->hcon->ssp_mode > 0) && psm == cpu_to_le16(0x0019) && bt_sk(sk)->defer_setup) { BT_DBG("psm is 0x0019, info req was not sent before"); /* Address Filer * 00-1e-a4 : Ford carkit, oui (Nokia Danmark A/S) */ if (conn->dst->b[5] == 0x00 && conn->dst->b[4] == 0x1e && conn->dst->b[3] == 0xa4) { BT_DBG("send L2CAP_CS_AUTHOR_PEND"); status = L2CAP_CS_AUTHOR_PEND; parent->sk_data_ready(parent, 0); } } } write_unlock_bh(&conn->chan_lock); response: bh_unlock_sock(parent); sendresp: rsp.scid = cpu_to_le16(scid); rsp.dcid = cpu_to_le16(dcid); rsp.result = cpu_to_le16(result); rsp.status = cpu_to_le16(status); l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp); if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) { struct l2cap_info_req info; info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK); conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT; conn->info_ident = l2cap_get_ident(conn); mod_timer(&conn->info_timer, jiffies + msecs_to_jiffies(L2CAP_INFO_TIMEOUT)); l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ, sizeof(info), &info); } /* this is workaround for windows mobile phone. */ /* maybe, conf negotiation has some problem in wm phone. */ /* wm phone send first pdu over max size. (we expect 1013, but recved 1014) */ /* this code is mandatory for SIG CERTI 3.0 */ /* this code is only for Honeycomb and ICS. */ /* Gingerbread doesn't have this part. */ /* if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) && result == L2CAP_CR_SUCCESS) { u8 buf[128]; set_bit(CONF_REQ_SENT, &chan->conf_state); l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, l2cap_build_conf_req(chan, buf), buf); chan->num_conf_req++; } */ return 0; } static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data) { struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data; u16 scid, dcid, result, status; struct l2cap_chan *chan; struct sock *sk; u8 req[128]; scid = __le16_to_cpu(rsp->scid); dcid = __le16_to_cpu(rsp->dcid); result = __le16_to_cpu(rsp->result); status = __le16_to_cpu(rsp->status); BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status); if (scid) { chan = l2cap_get_chan_by_scid(conn, scid); if (!chan) return -EFAULT; } else { chan = l2cap_get_chan_by_ident(conn, cmd->ident); if (!chan) return -EFAULT; } sk = chan->sk; switch (result) { case L2CAP_CR_SUCCESS: l2cap_state_change(chan, BT_CONFIG); chan->ident = 0; chan->dcid = dcid; clear_bit(CONF_CONNECT_PEND, &chan->conf_state); if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) break; l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, l2cap_build_conf_req(chan, req), req); chan->num_conf_req++; break; case L2CAP_CR_PEND: set_bit(CONF_CONNECT_PEND, &chan->conf_state); break; default: /* don't delete l2cap channel if sk is owned by user */ if (sock_owned_by_user(sk)) { l2cap_state_change(chan, BT_DISCONN); __clear_chan_timer(chan); /* change time format */ /* __set_chan_timer(chan, HZ / 5); */ __set_chan_timer(chan, 200); break; } l2cap_chan_del(chan, ECONNREFUSED); break; } bh_unlock_sock(sk); return 0; } static inline void set_default_fcs(struct l2cap_chan *chan) { /* FCS is enabled only in ERTM or streaming mode, if one or both * sides request it. */ if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING) chan->fcs = L2CAP_FCS_NONE; else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) chan->fcs = L2CAP_FCS_CRC16; } static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data) { struct l2cap_conf_req *req = (struct l2cap_conf_req *) data; u16 dcid, flags; u8 rsp[64]; struct l2cap_chan *chan; struct sock *sk; int len; dcid = __le16_to_cpu(req->dcid); flags = __le16_to_cpu(req->flags); BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags); chan = l2cap_get_chan_by_scid(conn, dcid); if (!chan) return -ENOENT; sk = chan->sk; if (sk->sk_state != BT_CONFIG && sk->sk_state != BT_CONNECT2) { struct l2cap_cmd_rej rej; rej.reason = cpu_to_le16(0x0002); l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej); goto unlock; } /* Reject if config buffer is too small. */ len = cmd_len - sizeof(*req); if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) { l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, l2cap_build_conf_rsp(chan, rsp, L2CAP_CONF_REJECT, flags), rsp); goto unlock; } /* Store config. */ memcpy(chan->conf_req + chan->conf_len, req->data, len); chan->conf_len += len; if (flags & 0x0001) { /* Incomplete config. Send empty response. */ l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, l2cap_build_conf_rsp(chan, rsp, L2CAP_CONF_SUCCESS, 0x0001), rsp); goto unlock; } /* Complete config. */ len = l2cap_parse_conf_req(chan, rsp); if (len < 0) { l2cap_send_disconn_req(conn, chan, ECONNRESET); goto unlock; } l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp); chan->num_conf_rsp++; /* Reset config buffer. */ chan->conf_len = 0; if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) goto unlock; if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) { set_default_fcs(chan); l2cap_state_change(chan, BT_CONNECTED); chan->next_tx_seq = 0; chan->expected_tx_seq = 0; skb_queue_head_init(&chan->tx_q); if (chan->mode == L2CAP_MODE_ERTM) l2cap_ertm_init(chan); l2cap_chan_ready(sk); goto unlock; } if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) { u8 buf[64]; l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, l2cap_build_conf_req(chan, buf), buf); chan->num_conf_req++; } unlock: bh_unlock_sock(sk); return 0; } static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data) { struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data; u16 scid, flags, result; struct l2cap_chan *chan; struct sock *sk; int len = cmd->len - sizeof(*rsp); scid = __le16_to_cpu(rsp->scid); flags = __le16_to_cpu(rsp->flags); result = __le16_to_cpu(rsp->result); BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x", scid, flags, result); chan = l2cap_get_chan_by_scid(conn, scid); if (!chan) return 0; sk = chan->sk; switch (result) { case L2CAP_CONF_SUCCESS: l2cap_conf_rfc_get(chan, rsp->data, len); break; case L2CAP_CONF_UNACCEPT: if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) { char req[64]; if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) { l2cap_send_disconn_req(conn, chan, ECONNRESET); goto done; } /* throw out any old stored conf requests */ result = L2CAP_CONF_SUCCESS; len = l2cap_parse_conf_rsp(chan, rsp->data, len, req, &result); if (len < 0) { l2cap_send_disconn_req(conn, chan, ECONNRESET); goto done; } l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, len, req); chan->num_conf_req++; if (result != L2CAP_CONF_SUCCESS) goto done; break; } default: sk->sk_err = ECONNRESET; /*change time format */ /* __set_chan_timer(chan, HZ * 5); */ __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT); l2cap_send_disconn_req(conn, chan, ECONNRESET); goto done; } if (flags & 0x01) goto done; set_bit(CONF_INPUT_DONE, &chan->conf_state); if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) { set_default_fcs(chan); l2cap_state_change(chan, BT_CONNECTED); chan->next_tx_seq = 0; chan->expected_tx_seq = 0; skb_queue_head_init(&chan->tx_q); if (chan->mode == L2CAP_MODE_ERTM) l2cap_ertm_init(chan); l2cap_chan_ready(sk); } done: bh_unlock_sock(sk); return 0; } static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data) { struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data; struct l2cap_disconn_rsp rsp; u16 dcid, scid; struct l2cap_chan *chan; struct sock *sk; scid = __le16_to_cpu(req->scid); dcid = __le16_to_cpu(req->dcid); BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid); chan = l2cap_get_chan_by_scid(conn, dcid); if (!chan) return 0; sk = chan->sk; rsp.dcid = cpu_to_le16(chan->scid); rsp.scid = cpu_to_le16(chan->dcid); l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp); sk->sk_shutdown = SHUTDOWN_MASK; /* don't delete l2cap channel if sk is owned by user */ if (sock_owned_by_user(sk)) { l2cap_state_change(chan, BT_DISCONN); __clear_chan_timer(chan); /* change time format */ /* __set_chan_timer(chan, HZ / 5); */ __set_chan_timer(chan, 200); bh_unlock_sock(sk); return 0; } l2cap_chan_del(chan, ECONNRESET); bh_unlock_sock(sk); chan->ops->close(chan->data); return 0; } static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data) { struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data; u16 dcid, scid; struct l2cap_chan *chan; struct sock *sk; scid = __le16_to_cpu(rsp->scid); dcid = __le16_to_cpu(rsp->dcid); BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid); chan = l2cap_get_chan_by_scid(conn, scid); if (!chan) return 0; sk = chan->sk; /* don't delete l2cap channel if sk is owned by user */ if (sock_owned_by_user(sk)) { l2cap_state_change(chan, BT_DISCONN); __clear_chan_timer(chan); /* change time format */ /* __set_chan_timer(chan, HZ / 5); */ __set_chan_timer(chan, 200); bh_unlock_sock(sk); return 0; } l2cap_chan_del(chan, 0); bh_unlock_sock(sk); chan->ops->close(chan->data); return 0; } static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data) { struct l2cap_info_req *req = (struct l2cap_info_req *) data; u16 type; type = __le16_to_cpu(req->type); BT_DBG("type 0x%4.4x", type); if (type == L2CAP_IT_FEAT_MASK) { u8 buf[8]; u32 feat_mask = l2cap_feat_mask; struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf; rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK); rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS); if (!disable_ertm) feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING; put_unaligned_le32(feat_mask, rsp->data); l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf), buf); } else if (type == L2CAP_IT_FIXED_CHAN) { u8 buf[12]; struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf; rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN); rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS); memcpy(buf + 4, l2cap_fixed_chan, 8); l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf), buf); } else { struct l2cap_info_rsp rsp; rsp.type = cpu_to_le16(type); rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP); l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp), &rsp); } return 0; } static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data) { struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data; u16 type, result; type = __le16_to_cpu(rsp->type); result = __le16_to_cpu(rsp->result); BT_DBG("type 0x%4.4x result 0x%2.2x", type, result); /* L2CAP Info req/rsp are unbound to channels, add extra checks */ if (cmd->ident != conn->info_ident || conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) return 0; del_timer(&conn->info_timer); if (result != L2CAP_IR_SUCCESS) { conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE; conn->info_ident = 0; l2cap_conn_start(conn); return 0; } if (type == L2CAP_IT_FEAT_MASK) { conn->feat_mask = get_unaligned_le32(rsp->data); if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) { struct l2cap_info_req req; req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN); conn->info_ident = l2cap_get_ident(conn); l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ, sizeof(req), &req); } else { conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE; conn->info_ident = 0; l2cap_conn_start(conn); } } else if (type == L2CAP_IT_FIXED_CHAN) { conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE; conn->info_ident = 0; l2cap_conn_start(conn); } return 0; } static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency, u16 to_multiplier) { u16 max_latency; if (min > max || min < 6 || max > 3200) return -EINVAL; if (to_multiplier < 10 || to_multiplier > 3200) return -EINVAL; if (max >= to_multiplier * 8) return -EINVAL; max_latency = (to_multiplier * 8 / max) - 1; if (latency > 499 || latency > max_latency) return -EINVAL; return 0; } static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data) { struct hci_conn *hcon = conn->hcon; struct l2cap_conn_param_update_req *req; struct l2cap_conn_param_update_rsp rsp; u16 min, max, latency, to_multiplier, cmd_len; int err; if (!(hcon->link_mode & HCI_LM_MASTER)) return -EINVAL; cmd_len = __le16_to_cpu(cmd->len); if (cmd_len != sizeof(struct l2cap_conn_param_update_req)) return -EPROTO; req = (struct l2cap_conn_param_update_req *) data; min = __le16_to_cpu(req->min); max = __le16_to_cpu(req->max); latency = __le16_to_cpu(req->latency); to_multiplier = __le16_to_cpu(req->to_multiplier); BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x", min, max, latency, to_multiplier); memset(&rsp, 0, sizeof(rsp)); err = l2cap_check_conn_param(min, max, latency, to_multiplier); if (err) rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED); else rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED); l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP, sizeof(rsp), &rsp); if (!err) hci_le_conn_update(hcon, min, max, latency, to_multiplier); return 0; } static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data) { int err = 0; switch (cmd->code) { case L2CAP_COMMAND_REJ: l2cap_command_rej(conn, cmd, data); break; case L2CAP_CONN_REQ: err = l2cap_connect_req(conn, cmd, data); break; case L2CAP_CONN_RSP: err = l2cap_connect_rsp(conn, cmd, data); break; case L2CAP_CONF_REQ: err = l2cap_config_req(conn, cmd, cmd_len, data); break; case L2CAP_CONF_RSP: err = l2cap_config_rsp(conn, cmd, data); break; case L2CAP_DISCONN_REQ: err = l2cap_disconnect_req(conn, cmd, data); break; case L2CAP_DISCONN_RSP: err = l2cap_disconnect_rsp(conn, cmd, data); break; case L2CAP_ECHO_REQ: l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data); break; case L2CAP_ECHO_RSP: break; case L2CAP_INFO_REQ: err = l2cap_information_req(conn, cmd, data); break; case L2CAP_INFO_RSP: err = l2cap_information_rsp(conn, cmd, data); break; default: BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code); err = -EINVAL; break; } return err; } static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data) { switch (cmd->code) { case L2CAP_COMMAND_REJ: return 0; case L2CAP_CONN_PARAM_UPDATE_REQ: return l2cap_conn_param_update_req(conn, cmd, data); case L2CAP_CONN_PARAM_UPDATE_RSP: return 0; default: BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code); return -EINVAL; } } static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb) { u8 *data = skb->data; int len = skb->len; struct l2cap_cmd_hdr cmd; int err; l2cap_raw_recv(conn, skb); while (len >= L2CAP_CMD_HDR_SIZE) { u16 cmd_len; memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE); data += L2CAP_CMD_HDR_SIZE; len -= L2CAP_CMD_HDR_SIZE; cmd_len = le16_to_cpu(cmd.len); BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident); if (cmd_len > len || !cmd.ident) { BT_DBG("corrupted command"); break; } if (conn->hcon->type == LE_LINK) err = l2cap_le_sig_cmd(conn, &cmd, data); else err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data); if (err) { struct l2cap_cmd_rej rej; BT_ERR("Wrong link type (%d)", err); /* FIXME: Map err to a valid reason */ rej.reason = cpu_to_le16(0); l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej); } data += cmd_len; len -= cmd_len; } kfree_skb(skb); } static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb) { u16 our_fcs, rcv_fcs; int hdr_size = L2CAP_HDR_SIZE + 2; if (chan->fcs == L2CAP_FCS_CRC16) { skb_trim(skb, skb->len - 2); rcv_fcs = get_unaligned_le16(skb->data + skb->len); our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size); if (our_fcs != rcv_fcs) return -EBADMSG; } return 0; } static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan) { u16 control = 0; chan->frames_sent = 0; control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT; if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) { control |= L2CAP_SUPER_RCV_NOT_READY; l2cap_send_sframe(chan, control); set_bit(CONN_RNR_SENT, &chan->conn_state); } if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) l2cap_retransmit_frames(chan); l2cap_ertm_send(chan); if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) && chan->frames_sent == 0) { control |= L2CAP_SUPER_RCV_READY; l2cap_send_sframe(chan, control); } } static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u8 tx_seq, u8 sar) { struct sk_buff *next_skb; int tx_seq_offset, next_tx_seq_offset; bt_cb(skb)->tx_seq = tx_seq; bt_cb(skb)->sar = sar; next_skb = skb_peek(&chan->srej_q); if (!next_skb) { __skb_queue_tail(&chan->srej_q, skb); return 0; } tx_seq_offset = (tx_seq - chan->buffer_seq) % 64; if (tx_seq_offset < 0) tx_seq_offset += 64; do { if (bt_cb(next_skb)->tx_seq == tx_seq) return -EINVAL; next_tx_seq_offset = (bt_cb(next_skb)->tx_seq - chan->buffer_seq) % 64; if (next_tx_seq_offset < 0) next_tx_seq_offset += 64; if (next_tx_seq_offset > tx_seq_offset) { __skb_queue_before(&chan->srej_q, next_skb, skb); return 0; } if (skb_queue_is_last(&chan->srej_q, next_skb)) break; } while ((next_skb = skb_queue_next(&chan->srej_q, next_skb))); __skb_queue_tail(&chan->srej_q, skb); return 0; } static int l2cap_ertm_reassembly_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u16 control) { struct sk_buff *_skb; int err; switch (control & L2CAP_CTRL_SAR) { case L2CAP_SDU_UNSEGMENTED: if (test_bit(CONN_SAR_SDU, &chan->conn_state)) goto drop; return chan->ops->recv(chan->data, skb); case L2CAP_SDU_START: if (test_bit(CONN_SAR_SDU, &chan->conn_state)) goto drop; chan->sdu_len = get_unaligned_le16(skb->data); if (chan->sdu_len > chan->imtu) goto disconnect; chan->sdu = bt_skb_alloc(chan->sdu_len, GFP_ATOMIC); if (!chan->sdu) return -ENOMEM; /* pull sdu_len bytes only after alloc, because of Local Busy * condition we have to be sure that this will be executed * only once, i.e., when alloc does not fail */ skb_pull(skb, 2); memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len); set_bit(CONN_SAR_SDU, &chan->conn_state); chan->partial_sdu_len = skb->len; break; case L2CAP_SDU_CONTINUE: if (!test_bit(CONN_SAR_SDU, &chan->conn_state)) goto disconnect; if (!chan->sdu) goto disconnect; chan->partial_sdu_len += skb->len; if (chan->partial_sdu_len > chan->sdu_len) goto drop; memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len); break; case L2CAP_SDU_END: if (!test_bit(CONN_SAR_SDU, &chan->conn_state)) goto disconnect; if (!chan->sdu) goto disconnect; chan->partial_sdu_len += skb->len; if (chan->partial_sdu_len > chan->imtu) goto drop; if (chan->partial_sdu_len != chan->sdu_len) goto drop; memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len); _skb = skb_clone(chan->sdu, GFP_ATOMIC); if (!_skb) { return -ENOMEM; } err = chan->ops->recv(chan->data, _skb); if (err < 0) { kfree_skb(_skb); return err; } clear_bit(CONN_SAR_SDU, &chan->conn_state); kfree_skb(chan->sdu); break; } kfree_skb(skb); return 0; drop: kfree_skb(chan->sdu); chan->sdu = NULL; disconnect: l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); kfree_skb(skb); return 0; } static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan) { u16 control; BT_DBG("chan %p, Enter local busy", chan); set_bit(CONN_LOCAL_BUSY, &chan->conn_state); control = chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT; control |= L2CAP_SUPER_RCV_NOT_READY; l2cap_send_sframe(chan, control); set_bit(CONN_RNR_SENT, &chan->conn_state); __clear_ack_timer(chan); } static void l2cap_ertm_exit_local_busy(struct l2cap_chan *chan) { u16 control; if (!test_bit(CONN_RNR_SENT, &chan->conn_state)) goto done; control = chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT; control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL; l2cap_send_sframe(chan, control); chan->retry_count = 1; __clear_retrans_timer(chan); __set_monitor_timer(chan); set_bit(CONN_WAIT_F, &chan->conn_state); done: clear_bit(CONN_LOCAL_BUSY, &chan->conn_state); clear_bit(CONN_RNR_SENT, &chan->conn_state); BT_DBG("chan %p, Exit local busy", chan); } void l2cap_chan_busy(struct l2cap_chan *chan, int busy) { if (chan->mode == L2CAP_MODE_ERTM) { if (busy) l2cap_ertm_enter_local_busy(chan); else l2cap_ertm_exit_local_busy(chan); } } static int l2cap_streaming_reassembly_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u16 control) { struct sk_buff *_skb; int err = -EINVAL; /* * TODO: We have to notify the userland if some data is lost with the * Streaming Mode. */ switch (control & L2CAP_CTRL_SAR) { case L2CAP_SDU_UNSEGMENTED: if (test_bit(CONN_SAR_SDU, &chan->conn_state)) { kfree_skb(chan->sdu); break; } err = chan->ops->recv(chan->data, skb); if (!err) return 0; break; case L2CAP_SDU_START: if (test_bit(CONN_SAR_SDU, &chan->conn_state)) { kfree_skb(chan->sdu); break; } chan->sdu_len = get_unaligned_le16(skb->data); skb_pull(skb, 2); if (chan->sdu_len > chan->imtu) { err = -EMSGSIZE; break; } chan->sdu = bt_skb_alloc(chan->sdu_len, GFP_ATOMIC); if (!chan->sdu) { err = -ENOMEM; break; } memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len); set_bit(CONN_SAR_SDU, &chan->conn_state); chan->partial_sdu_len = skb->len; err = 0; break; case L2CAP_SDU_CONTINUE: if (!test_bit(CONN_SAR_SDU, &chan->conn_state)) break; memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len); chan->partial_sdu_len += skb->len; if (chan->partial_sdu_len > chan->sdu_len) kfree_skb(chan->sdu); else err = 0; break; case L2CAP_SDU_END: if (!test_bit(CONN_SAR_SDU, &chan->conn_state)) break; memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len); clear_bit(CONN_SAR_SDU, &chan->conn_state); chan->partial_sdu_len += skb->len; if (chan->partial_sdu_len > chan->imtu) goto drop; if (chan->partial_sdu_len == chan->sdu_len) { _skb = skb_clone(chan->sdu, GFP_ATOMIC); err = chan->ops->recv(chan->data, _skb); if (err < 0) kfree_skb(_skb); } err = 0; drop: kfree_skb(chan->sdu); break; } kfree_skb(skb); return err; } static void l2cap_check_srej_gap(struct l2cap_chan *chan, u8 tx_seq) { struct sk_buff *skb; u16 control; while ((skb = skb_peek(&chan->srej_q)) && !test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) { int err; if (bt_cb(skb)->tx_seq != tx_seq) break; skb = skb_dequeue(&chan->srej_q); control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT; err = l2cap_ertm_reassembly_sdu(chan, skb, control); if (err < 0) { l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); break; } chan->buffer_seq_srej = (chan->buffer_seq_srej + 1) % 64; tx_seq = (tx_seq + 1) % 64; } } static void l2cap_resend_srejframe(struct l2cap_chan *chan, u8 tx_seq) { struct srej_list *l, *tmp; u16 control; list_for_each_entry_safe(l, tmp, &chan->srej_l, list) { if (l->tx_seq == tx_seq) { list_del(&l->list); kfree(l); return; } control = L2CAP_SUPER_SELECT_REJECT; control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT; l2cap_send_sframe(chan, control); list_del(&l->list); list_add_tail(&l->list, &chan->srej_l); } } static void l2cap_send_srejframe(struct l2cap_chan *chan, u8 tx_seq) { struct srej_list *new; u16 control; while (tx_seq != chan->expected_tx_seq) { control = L2CAP_SUPER_SELECT_REJECT; control |= chan->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT; l2cap_send_sframe(chan, control); new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC); new->tx_seq = chan->expected_tx_seq; chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64; list_add_tail(&new->list, &chan->srej_l); } chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64; } static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u16 rx_control, struct sk_buff *skb) { u8 tx_seq = __get_txseq(rx_control); u8 req_seq = __get_reqseq(rx_control); u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT; int tx_seq_offset, expected_tx_seq_offset; int num_to_ack = (chan->tx_win/6) + 1; int err = 0; BT_DBG("chan %p len %d tx_seq %d rx_control 0x%4.4x", chan, skb->len, tx_seq, rx_control); if (L2CAP_CTRL_FINAL & rx_control && test_bit(CONN_WAIT_F, &chan->conn_state)) { __clear_monitor_timer(chan); if (chan->unacked_frames > 0) __set_retrans_timer(chan); clear_bit(CONN_WAIT_F, &chan->conn_state); } chan->expected_ack_seq = req_seq; l2cap_drop_acked_frames(chan); tx_seq_offset = (tx_seq - chan->buffer_seq) % 64; if (tx_seq_offset < 0) tx_seq_offset += 64; /* invalid tx_seq */ if (tx_seq_offset >= chan->tx_win) { l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); goto drop; } if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) goto drop; if (tx_seq == chan->expected_tx_seq) goto expected; if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) { struct srej_list *first; first = list_first_entry(&chan->srej_l, struct srej_list, list); if (tx_seq == first->tx_seq) { l2cap_add_to_srej_queue(chan, skb, tx_seq, sar); l2cap_check_srej_gap(chan, tx_seq); list_del(&first->list); kfree(first); if (list_empty(&chan->srej_l)) { chan->buffer_seq = chan->buffer_seq_srej; clear_bit(CONN_SREJ_SENT, &chan->conn_state); l2cap_send_ack(chan); BT_DBG("chan %p, Exit SREJ_SENT", chan); } } else { struct srej_list *l; /* duplicated tx_seq */ if (l2cap_add_to_srej_queue(chan, skb, tx_seq, sar) < 0) goto drop; list_for_each_entry(l, &chan->srej_l, list) { if (l->tx_seq == tx_seq) { l2cap_resend_srejframe(chan, tx_seq); return 0; } } l2cap_send_srejframe(chan, tx_seq); } } else { expected_tx_seq_offset = (chan->expected_tx_seq - chan->buffer_seq) % 64; if (expected_tx_seq_offset < 0) expected_tx_seq_offset += 64; /* duplicated tx_seq */ if (tx_seq_offset < expected_tx_seq_offset) goto drop; set_bit(CONN_SREJ_SENT, &chan->conn_state); BT_DBG("chan %p, Enter SREJ", chan); INIT_LIST_HEAD(&chan->srej_l); chan->buffer_seq_srej = chan->buffer_seq; __skb_queue_head_init(&chan->srej_q); l2cap_add_to_srej_queue(chan, skb, tx_seq, sar); set_bit(CONN_SEND_PBIT, &chan->conn_state); l2cap_send_srejframe(chan, tx_seq); __clear_ack_timer(chan); } return 0; expected: chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64; if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) { bt_cb(skb)->tx_seq = tx_seq; bt_cb(skb)->sar = sar; __skb_queue_tail(&chan->srej_q, skb); return 0; } err = l2cap_ertm_reassembly_sdu(chan, skb, rx_control); chan->buffer_seq = (chan->buffer_seq + 1) % 64; if (err < 0) { l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); return err; } if (rx_control & L2CAP_CTRL_FINAL) { if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state)) l2cap_retransmit_frames(chan); } chan->num_acked = (chan->num_acked + 1) % num_to_ack; if (chan->num_acked == num_to_ack - 1) { l2cap_send_ack(chan); } else { BT_DBG("__set_ack_timer"); __set_ack_timer(chan); } return 0; drop: kfree_skb(skb); return 0; } static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u16 rx_control) { BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, __get_reqseq(rx_control), rx_control); chan->expected_ack_seq = __get_reqseq(rx_control); l2cap_drop_acked_frames(chan); if (rx_control & L2CAP_CTRL_POLL) { set_bit(CONN_SEND_FBIT, &chan->conn_state); if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) { if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) && (chan->unacked_frames > 0)) __set_retrans_timer(chan); clear_bit(CONN_REMOTE_BUSY, &chan->conn_state); l2cap_send_srejtail(chan); } else { l2cap_send_i_or_rr_or_rnr(chan); } } else if (rx_control & L2CAP_CTRL_FINAL) { clear_bit(CONN_REMOTE_BUSY, &chan->conn_state); if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state)) l2cap_retransmit_frames(chan); } else { if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) && (chan->unacked_frames > 0)) __set_retrans_timer(chan); clear_bit(CONN_REMOTE_BUSY, &chan->conn_state); if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) l2cap_send_ack(chan); else l2cap_ertm_send(chan); } } static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u16 rx_control) { u8 tx_seq = __get_reqseq(rx_control); BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control); clear_bit(CONN_REMOTE_BUSY, &chan->conn_state); chan->expected_ack_seq = tx_seq; l2cap_drop_acked_frames(chan); if (rx_control & L2CAP_CTRL_FINAL) { if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state)) l2cap_retransmit_frames(chan); } else { l2cap_retransmit_frames(chan); if (test_bit(CONN_WAIT_F, &chan->conn_state)) set_bit(CONN_REJ_ACT, &chan->conn_state); } } static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u16 rx_control) { u8 tx_seq = __get_reqseq(rx_control); BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control); clear_bit(CONN_REMOTE_BUSY, &chan->conn_state); if (rx_control & L2CAP_CTRL_POLL) { chan->expected_ack_seq = tx_seq; l2cap_drop_acked_frames(chan); set_bit(CONN_SEND_FBIT, &chan->conn_state); l2cap_retransmit_one_frame(chan, tx_seq); l2cap_ertm_send(chan); if (test_bit(CONN_WAIT_F, &chan->conn_state)) { chan->srej_save_reqseq = tx_seq; set_bit(CONN_SREJ_ACT, &chan->conn_state); } } else if (rx_control & L2CAP_CTRL_FINAL) { if (test_bit(CONN_SREJ_ACT, &chan->conn_state) && chan->srej_save_reqseq == tx_seq) clear_bit(CONN_SREJ_ACT, &chan->conn_state); else l2cap_retransmit_one_frame(chan, tx_seq); } else { l2cap_retransmit_one_frame(chan, tx_seq); if (test_bit(CONN_WAIT_F, &chan->conn_state)) { chan->srej_save_reqseq = tx_seq; set_bit(CONN_SREJ_ACT, &chan->conn_state); } } } static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u16 rx_control) { u8 tx_seq = __get_reqseq(rx_control); BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control); set_bit(CONN_REMOTE_BUSY, &chan->conn_state); chan->expected_ack_seq = tx_seq; l2cap_drop_acked_frames(chan); if (rx_control & L2CAP_CTRL_POLL) set_bit(CONN_SEND_FBIT, &chan->conn_state); if (!test_bit(CONN_SREJ_SENT, &chan->conn_state)) { __clear_retrans_timer(chan); if (rx_control & L2CAP_CTRL_POLL) l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL); return; } if (rx_control & L2CAP_CTRL_POLL) l2cap_send_srejtail(chan); else l2cap_send_sframe(chan, L2CAP_SUPER_RCV_READY); } static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u16 rx_control, struct sk_buff *skb) { BT_DBG("chan %p rx_control 0x%4.4x len %d", chan, rx_control, skb->len); if (L2CAP_CTRL_FINAL & rx_control && test_bit(CONN_WAIT_F, &chan->conn_state)) { __clear_monitor_timer(chan); if (chan->unacked_frames > 0) __set_retrans_timer(chan); clear_bit(CONN_WAIT_F, &chan->conn_state); } switch (rx_control & L2CAP_CTRL_SUPERVISE) { case L2CAP_SUPER_RCV_READY: l2cap_data_channel_rrframe(chan, rx_control); break; case L2CAP_SUPER_REJECT: l2cap_data_channel_rejframe(chan, rx_control); break; case L2CAP_SUPER_SELECT_REJECT: l2cap_data_channel_srejframe(chan, rx_control); break; case L2CAP_SUPER_RCV_NOT_READY: l2cap_data_channel_rnrframe(chan, rx_control); break; } kfree_skb(skb); return 0; } static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb) { struct l2cap_chan *chan = l2cap_pi(sk)->chan; u16 control; u8 req_seq; int len, next_tx_seq_offset, req_seq_offset; control = get_unaligned_le16(skb->data); skb_pull(skb, 2); len = skb->len; /* * We can just drop the corrupted I-frame here. * Receiver will miss it and start proper recovery * procedures and ask retransmission. */ if (l2cap_check_fcs(chan, skb)) goto drop; if (__is_sar_start(control) && __is_iframe(control)) len -= 2; if (chan->fcs == L2CAP_FCS_CRC16) len -= 2; if (len > chan->mps) { l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); goto drop; } req_seq = __get_reqseq(control); req_seq_offset = (req_seq - chan->expected_ack_seq) % 64; if (req_seq_offset < 0) req_seq_offset += 64; next_tx_seq_offset = (chan->next_tx_seq - chan->expected_ack_seq) % 64; if (next_tx_seq_offset < 0) next_tx_seq_offset += 64; /* check for invalid req-seq */ if (req_seq_offset > next_tx_seq_offset) { l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); goto drop; } if (__is_iframe(control)) { if (len < 0) { l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); goto drop; } l2cap_data_channel_iframe(chan, control, skb); } else { if (len != 0) { BT_ERR("%d", len); l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); goto drop; } l2cap_data_channel_sframe(chan, control, skb); } return 0; drop: kfree_skb(skb); return 0; } static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb) { struct l2cap_chan *chan; struct sock *sk = NULL; u16 control; u8 tx_seq; int len; chan = l2cap_get_chan_by_scid(conn, cid); if (!chan) { BT_DBG("unknown cid 0x%4.4x", cid); goto drop; } sk = chan->sk; BT_DBG("chan %p, len %d", chan, skb->len); if (chan->state != BT_CONNECTED) goto drop; switch (chan->mode) { case L2CAP_MODE_BASIC: /* If socket recv buffers overflows we drop data here * which is *bad* because L2CAP has to be reliable. * But we don't have any other choice. L2CAP doesn't * provide flow control mechanism. */ if (chan->imtu < skb->len) goto drop; if (!chan->ops->recv(chan->data, skb)) goto done; break; case L2CAP_MODE_ERTM: if (!sock_owned_by_user(sk)) { l2cap_ertm_data_rcv(sk, skb); } else { if (sk_add_backlog(sk, skb)) goto drop; } goto done; case L2CAP_MODE_STREAMING: control = get_unaligned_le16(skb->data); skb_pull(skb, 2); len = skb->len; if (l2cap_check_fcs(chan, skb)) goto drop; if (__is_sar_start(control)) len -= 2; if (chan->fcs == L2CAP_FCS_CRC16) len -= 2; if (len > chan->mps || len < 0 || __is_sframe(control)) goto drop; tx_seq = __get_txseq(control); if (chan->expected_tx_seq == tx_seq) chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64; else chan->expected_tx_seq = (tx_seq + 1) % 64; l2cap_streaming_reassembly_sdu(chan, skb, control); goto done; default: BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode); break; } drop: kfree_skb(skb); done: if (sk) bh_unlock_sock(sk); return 0; } static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb) { /* struct sock *sk = NULL; */ struct l2cap_chan *chan; chan = l2cap_global_chan_by_psm(0, psm, conn->src); if (!chan) goto drop; /* sk = chan->sk; */ /* bh_lock_sock(sk); */ /* BT_DBG("sk %p, len %d", sk, skb->len); */ BT_DBG("chan %p, len %d", chan, skb->len); if (chan->state != BT_BOUND && chan->state != BT_CONNECTED) goto drop; if (chan->imtu < skb->len) goto drop; if (!chan->ops->recv(chan->data, skb)) return 0; /* goto done; */ drop: kfree_skb(skb); /* done: */ /* if (sk) */ /* bh_unlock_sock(sk); */ return 0; } static inline int l2cap_att_channel(struct l2cap_conn *conn, __le16 cid, struct sk_buff *skb) { /* struct sock *sk = NULL; */ struct l2cap_chan *chan; chan = l2cap_global_chan_by_scid(0, cid, conn->src); if (!chan) goto drop; /* sk = chan->sk; */ /* bh_lock_sock(sk); */ /* BT_DBG("sk %p, len %d", sk, skb->len); */ BT_DBG("chan %p, len %d", chan, skb->len); if (chan->state != BT_BOUND && chan->state != BT_CONNECTED) goto drop; if (chan->imtu < skb->len) goto drop; if (!chan->ops->recv(chan->data, skb)) return 0; /* goto done; */ drop: kfree_skb(skb); /* done: */ /* if (sk) */ /* bh_unlock_sock(sk); */ return 0; } static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb) { struct l2cap_hdr *lh = (void *) skb->data; u16 cid, len; __le16 psm; skb_pull(skb, L2CAP_HDR_SIZE); cid = __le16_to_cpu(lh->cid); len = __le16_to_cpu(lh->len); if (len != skb->len) { kfree_skb(skb); return; } BT_DBG("len %d, cid 0x%4.4x", len, cid); switch (cid) { case L2CAP_CID_LE_SIGNALING: case L2CAP_CID_SIGNALING: l2cap_sig_channel(conn, skb); break; case L2CAP_CID_CONN_LESS: psm = get_unaligned_le16(skb->data); skb_pull(skb, 2); l2cap_conless_channel(conn, psm, skb); break; case L2CAP_CID_LE_DATA: l2cap_att_channel(conn, cid, skb); break; case L2CAP_CID_SMP: if (smp_sig_channel(conn, skb)) l2cap_conn_del(conn->hcon, EACCES); break; default: l2cap_data_channel(conn, cid, skb); break; } } /* ---- L2CAP interface with lower layer (HCI) ---- */ static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type) { int exact = 0, lm1 = 0, lm2 = 0; struct l2cap_chan *c; if (type != ACL_LINK) return -EINVAL; BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr)); /* Find listening sockets and check their link_mode */ read_lock(&chan_list_lock); list_for_each_entry(c, &chan_list, global_l) { struct sock *sk = c->sk; if (c->state != BT_LISTEN) continue; if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) { lm1 |= HCI_LM_ACCEPT; if (c->role_switch) lm1 |= HCI_LM_MASTER; exact++; } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) { lm2 |= HCI_LM_ACCEPT; if (c->role_switch) lm2 |= HCI_LM_MASTER; } } read_unlock(&chan_list_lock); return exact ? lm1 : lm2; } static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status) { struct l2cap_conn *conn; BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status); if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK)) return -EINVAL; if (!status) { conn = l2cap_conn_add(hcon, status); if (conn) l2cap_conn_ready(conn); } else l2cap_conn_del(hcon, bt_to_errno(status)); return 0; } static int l2cap_disconn_ind(struct hci_conn *hcon) { struct l2cap_conn *conn = hcon->l2cap_data; BT_DBG("hcon %p", hcon); if ((hcon->type != ACL_LINK && hcon->type != LE_LINK) || !conn) return 0x13; return conn->disc_reason; } static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason) { BT_DBG("hcon %p reason %d", hcon, reason); if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK)) return -EINVAL; l2cap_conn_del(hcon, bt_to_errno(reason)); return 0; } static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt) { if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) return; if (encrypt == 0x00) { if (chan->sec_level == BT_SECURITY_MEDIUM) { __clear_chan_timer(chan); /* change time format */ /* __set_chan_timer(chan, HZ * 5); */ __set_chan_timer(chan, L2CAP_ENC_TIMEOUT); } else if (chan->sec_level == BT_SECURITY_HIGH) l2cap_chan_close(chan, ECONNREFUSED); } else { if (chan->sec_level == BT_SECURITY_MEDIUM) __clear_chan_timer(chan); } } static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt) { struct l2cap_conn *conn = hcon->l2cap_data; struct l2cap_chan *chan; if (!conn) return 0; BT_DBG("conn %p, status %u", conn, status); /* to do */ if (hcon->type == LE_LINK) { smp_distribute_keys(conn, 0); del_timer(&conn->security_timer); /* cancel_delayed_work(&conn->security_timer); */ } read_lock(&conn->chan_lock); list_for_each_entry(chan, &conn->chan_l, list) { struct sock *sk = chan->sk; bh_lock_sock(sk); BT_DBG("chan->scid %d", chan->scid); if (chan->scid == L2CAP_CID_LE_DATA) { if (!status && encrypt) { chan->sec_level = hcon->sec_level; /* del_timer(&conn->security_timer); */ l2cap_chan_ready(sk); /* smp_distribute_keys(conn, 0); */ } bh_unlock_sock(sk); continue; } if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) { bh_unlock_sock(sk); continue; } if (!status && (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)) { l2cap_check_encryption(chan, encrypt); bh_unlock_sock(sk); continue; } if (chan->state == BT_CONNECT) { if (!status) { l2cap_send_conn_req(chan); } else { __clear_chan_timer(chan); /* change time format */ /* __set_chan_timer(chan, HZ / 10); */ __set_chan_timer(chan, L2CAP_DISC_TIMEOUT); } } else if (chan->state == BT_CONNECT2) { struct l2cap_conn_rsp rsp; __u16 res, stat; if (!status) { if (bt_sk(sk)->defer_setup) { struct sock *parent = bt_sk(sk)->parent; res = L2CAP_CR_PEND; stat = L2CAP_CS_AUTHOR_PEND; if (parent) parent->sk_data_ready(parent, 0); } else { l2cap_state_change(chan, BT_CONFIG); res = L2CAP_CR_SUCCESS; stat = L2CAP_CS_NO_INFO; } } else { l2cap_state_change(chan, BT_DISCONN); /* change time format */ /* __set_chan_timer(chan, HZ / 10); */ __set_chan_timer(chan, L2CAP_DISC_TIMEOUT); res = L2CAP_CR_SEC_BLOCK; stat = L2CAP_CS_NO_INFO; } rsp.scid = cpu_to_le16(chan->dcid); rsp.dcid = cpu_to_le16(chan->scid); rsp.result = cpu_to_le16(res); rsp.status = cpu_to_le16(stat); l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp); } bh_unlock_sock(sk); } read_unlock(&conn->chan_lock); return 0; } static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags) { struct l2cap_conn *conn = hcon->l2cap_data; if (!conn) conn = l2cap_conn_add(hcon, 0); if (!conn) goto drop; BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags); if (!(flags & ACL_CONT)) { struct l2cap_hdr *hdr; struct l2cap_chan *chan; u16 cid; int len; if (conn->rx_len) { BT_ERR("Unexpected start frame (len %d)", skb->len); kfree_skb(conn->rx_skb); conn->rx_skb = NULL; conn->rx_len = 0; l2cap_conn_unreliable(conn, ECOMM); } /* Start fragment always begin with Basic L2CAP header */ if (skb->len < L2CAP_HDR_SIZE) { BT_ERR("Frame is too short (len %d)", skb->len); l2cap_conn_unreliable(conn, ECOMM); goto drop; } hdr = (struct l2cap_hdr *) skb->data; len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE; cid = __le16_to_cpu(hdr->cid); if (len == skb->len) { /* Complete frame received */ l2cap_recv_frame(conn, skb); return 0; } BT_DBG("Start: total len %d, frag len %d", len, skb->len); if (skb->len > len) { BT_ERR("Frame is too long (len %d, expected len %d)", skb->len, len); l2cap_conn_unreliable(conn, ECOMM); goto drop; } chan = l2cap_get_chan_by_scid(conn, cid); if (chan && chan->sk) { struct sock *sk = chan->sk; if (chan->imtu < len - L2CAP_HDR_SIZE) { BT_ERR("Frame exceeding recv MTU (len %d, " "MTU %d)", len, chan->imtu); bh_unlock_sock(sk); l2cap_conn_unreliable(conn, ECOMM); goto drop; } bh_unlock_sock(sk); } /* Allocate skb for the complete frame (with header) */ conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC); if (!conn->rx_skb) goto drop; skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len), skb->len); conn->rx_len = len - skb->len; } else { BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len); if (!conn->rx_len) { BT_ERR("Unexpected continuation frame (len %d)", skb->len); l2cap_conn_unreliable(conn, ECOMM); goto drop; } if (skb->len > conn->rx_len) { BT_ERR("Fragment is too long (len %d, expected %d)", skb->len, conn->rx_len); kfree_skb(conn->rx_skb); conn->rx_skb = NULL; conn->rx_len = 0; l2cap_conn_unreliable(conn, ECOMM); goto drop; } skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len), skb->len); conn->rx_len -= skb->len; if (!conn->rx_len) { /* Complete frame received */ l2cap_recv_frame(conn, conn->rx_skb); conn->rx_skb = NULL; } } drop: kfree_skb(skb); return 0; } static int l2cap_debugfs_show(struct seq_file *f, void *p) { struct l2cap_chan *c; read_lock_bh(&chan_list_lock); list_for_each_entry(c, &chan_list, global_l) { struct sock *sk = c->sk; seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n", batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst), c->state, __le16_to_cpu(c->psm), c->scid, c->dcid, c->imtu, c->omtu, c->sec_level, c->mode); } read_unlock_bh(&chan_list_lock); return 0; } static int l2cap_debugfs_open(struct inode *inode, struct file *file) { return single_open(file, l2cap_debugfs_show, inode->i_private); } static const struct file_operations l2cap_debugfs_fops = { .open = l2cap_debugfs_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static struct dentry *l2cap_debugfs; static struct hci_proto l2cap_hci_proto = { .name = "L2CAP", .id = HCI_PROTO_L2CAP, .connect_ind = l2cap_connect_ind, .connect_cfm = l2cap_connect_cfm, .disconn_ind = l2cap_disconn_ind, .disconn_cfm = l2cap_disconn_cfm, .security_cfm = l2cap_security_cfm, .recv_acldata = l2cap_recv_acldata }; int __init l2cap_init(void) { int err; err = l2cap_init_sockets(); if (err < 0) return err; err = hci_register_proto(&l2cap_hci_proto); if (err < 0) { BT_ERR("L2CAP protocol registration failed"); bt_sock_unregister(BTPROTO_L2CAP); goto error; } if (bt_debugfs) { l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs, NULL, &l2cap_debugfs_fops); if (!l2cap_debugfs) BT_ERR("Failed to create L2CAP debug file"); } return 0; error: l2cap_cleanup_sockets(); return err; } void l2cap_exit(void) { debugfs_remove(l2cap_debugfs); if (hci_unregister_proto(&l2cap_hci_proto) < 0) BT_ERR("L2CAP protocol unregistration failed"); l2cap_cleanup_sockets(); } module_param(disable_ertm, bool, 0644); MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
gpl-2.0
CyberGrandChallenge/linux-source-3.13.11-ckt21-cgc
drivers/watchdog/geodewdt.c
321
6644
/* Watchdog timer for machines with the CS5535/CS5536 companion chip * * Copyright (C) 2006-2007, Advanced Micro Devices, Inc. * Copyright (C) 2009 Andres Salomon <dilinger@collabora.co.uk> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/types.h> #include <linux/miscdevice.h> #include <linux/watchdog.h> #include <linux/fs.h> #include <linux/platform_device.h> #include <linux/reboot.h> #include <linux/uaccess.h> #include <linux/cs5535.h> #define GEODEWDT_HZ 500 #define GEODEWDT_SCALE 6 #define GEODEWDT_MAX_SECONDS 131 #define WDT_FLAGS_OPEN 1 #define WDT_FLAGS_ORPHAN 2 #define DRV_NAME "geodewdt" #define WATCHDOG_NAME "Geode GX/LX WDT" #define WATCHDOG_TIMEOUT 60 static int timeout = WATCHDOG_TIMEOUT; module_param(timeout, int, 0); MODULE_PARM_DESC(timeout, "Watchdog timeout in seconds. 1<= timeout <=131, default=" __MODULE_STRING(WATCHDOG_TIMEOUT) "."); static bool nowayout = WATCHDOG_NOWAYOUT; module_param(nowayout, bool, 0); MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); static struct platform_device *geodewdt_platform_device; static unsigned long wdt_flags; static struct cs5535_mfgpt_timer *wdt_timer; static int safe_close; static void geodewdt_ping(void) { /* Stop the counter */ cs5535_mfgpt_write(wdt_timer, MFGPT_REG_SETUP, 0); /* Reset the counter */ cs5535_mfgpt_write(wdt_timer, MFGPT_REG_COUNTER, 0); /* Enable the counter */ cs5535_mfgpt_write(wdt_timer, MFGPT_REG_SETUP, MFGPT_SETUP_CNTEN); } static void geodewdt_disable(void) { cs5535_mfgpt_write(wdt_timer, MFGPT_REG_SETUP, 0); cs5535_mfgpt_write(wdt_timer, MFGPT_REG_COUNTER, 0); } static int geodewdt_set_heartbeat(int val) { if (val < 1 || val > GEODEWDT_MAX_SECONDS) return -EINVAL; cs5535_mfgpt_write(wdt_timer, MFGPT_REG_SETUP, 0); cs5535_mfgpt_write(wdt_timer, MFGPT_REG_CMP2, val * GEODEWDT_HZ); cs5535_mfgpt_write(wdt_timer, MFGPT_REG_COUNTER, 0); cs5535_mfgpt_write(wdt_timer, MFGPT_REG_SETUP, MFGPT_SETUP_CNTEN); timeout = val; return 0; } static int geodewdt_open(struct inode *inode, struct file *file) { if (test_and_set_bit(WDT_FLAGS_OPEN, &wdt_flags)) return -EBUSY; if (!test_and_clear_bit(WDT_FLAGS_ORPHAN, &wdt_flags)) __module_get(THIS_MODULE); geodewdt_ping(); return nonseekable_open(inode, file); } static int geodewdt_release(struct inode *inode, struct file *file) { if (safe_close) { geodewdt_disable(); module_put(THIS_MODULE); } else { pr_crit("Unexpected close - watchdog is not stopping\n"); geodewdt_ping(); set_bit(WDT_FLAGS_ORPHAN, &wdt_flags); } clear_bit(WDT_FLAGS_OPEN, &wdt_flags); safe_close = 0; return 0; } static ssize_t geodewdt_write(struct file *file, const char __user *data, size_t len, loff_t *ppos) { if (len) { if (!nowayout) { size_t i; safe_close = 0; for (i = 0; i != len; i++) { char c; if (get_user(c, data + i)) return -EFAULT; if (c == 'V') safe_close = 1; } } geodewdt_ping(); } return len; } static long geodewdt_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { void __user *argp = (void __user *)arg; int __user *p = argp; int interval; static const struct watchdog_info ident = { .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE, .firmware_version = 1, .identity = WATCHDOG_NAME, }; switch (cmd) { case WDIOC_GETSUPPORT: return copy_to_user(argp, &ident, sizeof(ident)) ? -EFAULT : 0; break; case WDIOC_GETSTATUS: case WDIOC_GETBOOTSTATUS: return put_user(0, p); case WDIOC_SETOPTIONS: { int options, ret = -EINVAL; if (get_user(options, p)) return -EFAULT; if (options & WDIOS_DISABLECARD) { geodewdt_disable(); ret = 0; } if (options & WDIOS_ENABLECARD) { geodewdt_ping(); ret = 0; } return ret; } case WDIOC_KEEPALIVE: geodewdt_ping(); return 0; case WDIOC_SETTIMEOUT: if (get_user(interval, p)) return -EFAULT; if (geodewdt_set_heartbeat(interval)) return -EINVAL; /* Fall through */ case WDIOC_GETTIMEOUT: return put_user(timeout, p); default: return -ENOTTY; } return 0; } static const struct file_operations geodewdt_fops = { .owner = THIS_MODULE, .llseek = no_llseek, .write = geodewdt_write, .unlocked_ioctl = geodewdt_ioctl, .open = geodewdt_open, .release = geodewdt_release, }; static struct miscdevice geodewdt_miscdev = { .minor = WATCHDOG_MINOR, .name = "watchdog", .fops = &geodewdt_fops, }; static int geodewdt_probe(struct platform_device *dev) { int ret; wdt_timer = cs5535_mfgpt_alloc_timer(MFGPT_TIMER_ANY, MFGPT_DOMAIN_WORKING); if (!wdt_timer) { pr_err("No timers were available\n"); return -ENODEV; } /* Set up the timer */ cs5535_mfgpt_write(wdt_timer, MFGPT_REG_SETUP, GEODEWDT_SCALE | (3 << 8)); /* Set up comparator 2 to reset when the event fires */ cs5535_mfgpt_toggle_event(wdt_timer, MFGPT_CMP2, MFGPT_EVENT_RESET, 1); /* Set up the initial timeout */ cs5535_mfgpt_write(wdt_timer, MFGPT_REG_CMP2, timeout * GEODEWDT_HZ); ret = misc_register(&geodewdt_miscdev); return ret; } static int geodewdt_remove(struct platform_device *dev) { misc_deregister(&geodewdt_miscdev); return 0; } static void geodewdt_shutdown(struct platform_device *dev) { geodewdt_disable(); } static struct platform_driver geodewdt_driver = { .probe = geodewdt_probe, .remove = geodewdt_remove, .shutdown = geodewdt_shutdown, .driver = { .owner = THIS_MODULE, .name = DRV_NAME, }, }; static int __init geodewdt_init(void) { int ret; ret = platform_driver_register(&geodewdt_driver); if (ret) return ret; geodewdt_platform_device = platform_device_register_simple(DRV_NAME, -1, NULL, 0); if (IS_ERR(geodewdt_platform_device)) { ret = PTR_ERR(geodewdt_platform_device); goto err; } return 0; err: platform_driver_unregister(&geodewdt_driver); return ret; } static void __exit geodewdt_exit(void) { platform_device_unregister(geodewdt_platform_device); platform_driver_unregister(&geodewdt_driver); } module_init(geodewdt_init); module_exit(geodewdt_exit); MODULE_AUTHOR("Advanced Micro Devices, Inc"); MODULE_DESCRIPTION("Geode GX/LX Watchdog Driver"); MODULE_LICENSE("GPL");
gpl-2.0
EPDCenterSpain/bq-DC-v1
drivers/staging/usbip/usbip_common.c
833
21018
/* * Copyright (C) 2003-2008 Takahiro Hirofuchi * * This is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, * USA. */ #include <asm/byteorder.h> #include <linux/file.h> #include <linux/fs.h> #include <linux/kernel.h> #include <linux/slab.h> #include <net/sock.h> #include "usbip_common.h" #define DRIVER_AUTHOR "Takahiro Hirofuchi <hirofuchi@users.sourceforge.net>" #define DRIVER_DESC "USB/IP Core" #ifdef CONFIG_USBIP_DEBUG unsigned long usbip_debug_flag = 0xffffffff; #else unsigned long usbip_debug_flag; #endif EXPORT_SYMBOL_GPL(usbip_debug_flag); /* FIXME */ struct device_attribute dev_attr_usbip_debug; EXPORT_SYMBOL_GPL(dev_attr_usbip_debug); static ssize_t show_flag(struct device *dev, struct device_attribute *attr, char *buf) { return sprintf(buf, "%lx\n", usbip_debug_flag); } static ssize_t store_flag(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { sscanf(buf, "%lx", &usbip_debug_flag); return count; } DEVICE_ATTR(usbip_debug, (S_IRUGO | S_IWUSR), show_flag, store_flag); static void usbip_dump_buffer(char *buff, int bufflen) { print_hex_dump(KERN_DEBUG, "usbip-core", DUMP_PREFIX_OFFSET, 16, 4, buff, bufflen, false); } static void usbip_dump_pipe(unsigned int p) { unsigned char type = usb_pipetype(p); unsigned char ep = usb_pipeendpoint(p); unsigned char dev = usb_pipedevice(p); unsigned char dir = usb_pipein(p); pr_debug("dev(%d) ep(%d) [%s] ", dev, ep, dir ? "IN" : "OUT"); switch (type) { case PIPE_ISOCHRONOUS: pr_debug("ISO\n"); break; case PIPE_INTERRUPT: pr_debug("INT\n"); break; case PIPE_CONTROL: pr_debug("CTRL\n"); break; case PIPE_BULK: pr_debug("BULK\n"); break; default: pr_debug("ERR\n"); break; } } static void usbip_dump_usb_device(struct usb_device *udev) { struct device *dev = &udev->dev; int i; dev_dbg(dev, " devnum(%d) devpath(%s) ", udev->devnum, udev->devpath); switch (udev->speed) { case USB_SPEED_HIGH: pr_debug("SPD_HIGH "); break; case USB_SPEED_FULL: pr_debug("SPD_FULL "); break; case USB_SPEED_LOW: pr_debug("SPD_LOW "); break; case USB_SPEED_UNKNOWN: pr_debug("SPD_UNKNOWN "); break; default: pr_debug("SPD_ERROR "); break; } pr_debug("tt %p, ttport %d\n", udev->tt, udev->ttport); dev_dbg(dev, " "); for (i = 0; i < 16; i++) pr_debug(" %2u", i); pr_debug("\n"); dev_dbg(dev, " toggle0(IN) :"); for (i = 0; i < 16; i++) pr_debug(" %2u", (udev->toggle[0] & (1 << i)) ? 1 : 0); pr_debug("\n"); dev_dbg(dev, " toggle1(OUT):"); for (i = 0; i < 16; i++) pr_debug(" %2u", (udev->toggle[1] & (1 << i)) ? 1 : 0); pr_debug("\n"); dev_dbg(dev, " epmaxp_in :"); for (i = 0; i < 16; i++) { if (udev->ep_in[i]) pr_debug(" %2u", le16_to_cpu(udev->ep_in[i]->desc.wMaxPacketSize)); } pr_debug("\n"); dev_dbg(dev, " epmaxp_out :"); for (i = 0; i < 16; i++) { if (udev->ep_out[i]) pr_debug(" %2u", le16_to_cpu(udev->ep_out[i]->desc.wMaxPacketSize)); } pr_debug("\n"); dev_dbg(dev, "parent %p, bus %p\n", udev->parent, udev->bus); dev_dbg(dev, "descriptor %p, config %p, actconfig %p, " "rawdescriptors %p\n", &udev->descriptor, udev->config, udev->actconfig, udev->rawdescriptors); dev_dbg(dev, "have_langid %d, string_langid %d\n", udev->have_langid, udev->string_langid); dev_dbg(dev, "maxchild %d, children %p\n", udev->maxchild, udev->children); } static void usbip_dump_request_type(__u8 rt) { switch (rt & USB_RECIP_MASK) { case USB_RECIP_DEVICE: pr_debug("DEVICE"); break; case USB_RECIP_INTERFACE: pr_debug("INTERF"); break; case USB_RECIP_ENDPOINT: pr_debug("ENDPOI"); break; case USB_RECIP_OTHER: pr_debug("OTHER "); break; default: pr_debug("------"); break; } } static void usbip_dump_usb_ctrlrequest(struct usb_ctrlrequest *cmd) { if (!cmd) { pr_debug(" : null pointer\n"); return; } pr_debug(" "); pr_debug("bRequestType(%02X) bRequest(%02X) wValue(%04X) wIndex(%04X) " "wLength(%04X) ", cmd->bRequestType, cmd->bRequest, cmd->wValue, cmd->wIndex, cmd->wLength); pr_debug("\n "); if ((cmd->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) { pr_debug("STANDARD "); switch (cmd->bRequest) { case USB_REQ_GET_STATUS: pr_debug("GET_STATUS\n"); break; case USB_REQ_CLEAR_FEATURE: pr_debug("CLEAR_FEAT\n"); break; case USB_REQ_SET_FEATURE: pr_debug("SET_FEAT \n"); break; case USB_REQ_SET_ADDRESS: pr_debug("SET_ADDRRS\n"); break; case USB_REQ_GET_DESCRIPTOR: pr_debug("GET_DESCRI\n"); break; case USB_REQ_SET_DESCRIPTOR: pr_debug("SET_DESCRI\n"); break; case USB_REQ_GET_CONFIGURATION: pr_debug("GET_CONFIG\n"); break; case USB_REQ_SET_CONFIGURATION: pr_debug("SET_CONFIG\n"); break; case USB_REQ_GET_INTERFACE: pr_debug("GET_INTERF\n"); break; case USB_REQ_SET_INTERFACE: pr_debug("SET_INTERF\n"); break; case USB_REQ_SYNCH_FRAME: pr_debug("SYNC_FRAME\n"); break; default: pr_debug("REQ(%02X) \n", cmd->bRequest); break; } usbip_dump_request_type(cmd->bRequestType); } else if ((cmd->bRequestType & USB_TYPE_MASK) == USB_TYPE_CLASS) { pr_debug("CLASS \n"); } else if ((cmd->bRequestType & USB_TYPE_MASK) == USB_TYPE_VENDOR) { pr_debug("VENDOR \n"); } else if ((cmd->bRequestType & USB_TYPE_MASK) == USB_TYPE_RESERVED) { pr_debug("RESERVED\n"); } } void usbip_dump_urb(struct urb *urb) { struct device *dev; if (!urb) { pr_debug("urb: null pointer!!\n"); return; } if (!urb->dev) { pr_debug("urb->dev: null pointer!!\n"); return; } dev = &urb->dev->dev; dev_dbg(dev, " urb :%p\n", urb); dev_dbg(dev, " dev :%p\n", urb->dev); usbip_dump_usb_device(urb->dev); dev_dbg(dev, " pipe :%08x ", urb->pipe); usbip_dump_pipe(urb->pipe); dev_dbg(dev, " status :%d\n", urb->status); dev_dbg(dev, " transfer_flags :%08X\n", urb->transfer_flags); dev_dbg(dev, " transfer_buffer :%p\n", urb->transfer_buffer); dev_dbg(dev, " transfer_buffer_length:%d\n", urb->transfer_buffer_length); dev_dbg(dev, " actual_length :%d\n", urb->actual_length); dev_dbg(dev, " setup_packet :%p\n", urb->setup_packet); if (urb->setup_packet && usb_pipetype(urb->pipe) == PIPE_CONTROL) usbip_dump_usb_ctrlrequest( (struct usb_ctrlrequest *)urb->setup_packet); dev_dbg(dev, " start_frame :%d\n", urb->start_frame); dev_dbg(dev, " number_of_packets :%d\n", urb->number_of_packets); dev_dbg(dev, " interval :%d\n", urb->interval); dev_dbg(dev, " error_count :%d\n", urb->error_count); dev_dbg(dev, " context :%p\n", urb->context); dev_dbg(dev, " complete :%p\n", urb->complete); } EXPORT_SYMBOL_GPL(usbip_dump_urb); void usbip_dump_header(struct usbip_header *pdu) { pr_debug("BASE: cmd %u seq %u devid %u dir %u ep %u\n", pdu->base.command, pdu->base.seqnum, pdu->base.devid, pdu->base.direction, pdu->base.ep); switch (pdu->base.command) { case USBIP_CMD_SUBMIT: pr_debug("USBIP_CMD_SUBMIT: " "x_flags %u x_len %u sf %u #p %d iv %d\n", pdu->u.cmd_submit.transfer_flags, pdu->u.cmd_submit.transfer_buffer_length, pdu->u.cmd_submit.start_frame, pdu->u.cmd_submit.number_of_packets, pdu->u.cmd_submit.interval); break; case USBIP_CMD_UNLINK: pr_debug("USBIP_CMD_UNLINK: seq %u\n", pdu->u.cmd_unlink.seqnum); break; case USBIP_RET_SUBMIT: pr_debug("USBIP_RET_SUBMIT: st %d al %u sf %d #p %d ec %d\n", pdu->u.ret_submit.status, pdu->u.ret_submit.actual_length, pdu->u.ret_submit.start_frame, pdu->u.ret_submit.number_of_packets, pdu->u.ret_submit.error_count); break; case USBIP_RET_UNLINK: pr_debug("USBIP_RET_UNLINK: status %d\n", pdu->u.ret_unlink.status); break; default: /* NOT REACHED */ pr_err("unknown command\n"); break; } } EXPORT_SYMBOL_GPL(usbip_dump_header); /* Send/receive messages over TCP/IP. I refer drivers/block/nbd.c */ int usbip_xmit(int send, struct socket *sock, char *buf, int size, int msg_flags) { int result; struct msghdr msg; struct kvec iov; int total = 0; /* for blocks of if (usbip_dbg_flag_xmit) */ char *bp = buf; int osize = size; usbip_dbg_xmit("enter\n"); if (!sock || !buf || !size) { pr_err("invalid arg, sock %p buff %p size %d\n", sock, buf, size); return -EINVAL; } if (usbip_dbg_flag_xmit) { if (send) { if (!in_interrupt()) pr_debug("%-10s:", current->comm); else pr_debug("interrupt :"); pr_debug("sending... , sock %p, buf %p, size %d, " "msg_flags %d\n", sock, buf, size, msg_flags); usbip_dump_buffer(buf, size); } } do { sock->sk->sk_allocation = GFP_NOIO; iov.iov_base = buf; iov.iov_len = size; msg.msg_name = NULL; msg.msg_namelen = 0; msg.msg_control = NULL; msg.msg_controllen = 0; msg.msg_namelen = 0; msg.msg_flags = msg_flags | MSG_NOSIGNAL; if (send) result = kernel_sendmsg(sock, &msg, &iov, 1, size); else result = kernel_recvmsg(sock, &msg, &iov, 1, size, MSG_WAITALL); if (result <= 0) { pr_debug("%s sock %p buf %p size %u ret %d total %d\n", send ? "send" : "receive", sock, buf, size, result, total); goto err; } size -= result; buf += result; total += result; } while (size > 0); if (usbip_dbg_flag_xmit) { if (!send) { if (!in_interrupt()) pr_debug("%-10s:", current->comm); else pr_debug("interrupt :"); pr_debug("receiving....\n"); usbip_dump_buffer(bp, osize); pr_debug("received, osize %d ret %d size %d total %d\n", osize, result, size, total); } if (send) pr_debug("send, total %d\n", total); } return total; err: return result; } EXPORT_SYMBOL_GPL(usbip_xmit); struct socket *sockfd_to_socket(unsigned int sockfd) { struct socket *socket; struct file *file; struct inode *inode; file = fget(sockfd); if (!file) { pr_err("invalid sockfd\n"); return NULL; } inode = file->f_dentry->d_inode; if (!inode || !S_ISSOCK(inode->i_mode)) return NULL; socket = SOCKET_I(inode); return socket; } EXPORT_SYMBOL_GPL(sockfd_to_socket); /* there may be more cases to tweak the flags. */ static unsigned int tweak_transfer_flags(unsigned int flags) { flags &= ~URB_NO_TRANSFER_DMA_MAP; return flags; } static void usbip_pack_cmd_submit(struct usbip_header *pdu, struct urb *urb, int pack) { struct usbip_header_cmd_submit *spdu = &pdu->u.cmd_submit; /* * Some members are not still implemented in usbip. I hope this issue * will be discussed when usbip is ported to other operating systems. */ if (pack) { /* vhci_tx.c */ spdu->transfer_flags = tweak_transfer_flags(urb->transfer_flags); spdu->transfer_buffer_length = urb->transfer_buffer_length; spdu->start_frame = urb->start_frame; spdu->number_of_packets = urb->number_of_packets; spdu->interval = urb->interval; } else { /* stub_rx.c */ urb->transfer_flags = spdu->transfer_flags; urb->transfer_buffer_length = spdu->transfer_buffer_length; urb->start_frame = spdu->start_frame; urb->number_of_packets = spdu->number_of_packets; urb->interval = spdu->interval; } } static void usbip_pack_ret_submit(struct usbip_header *pdu, struct urb *urb, int pack) { struct usbip_header_ret_submit *rpdu = &pdu->u.ret_submit; if (pack) { /* stub_tx.c */ rpdu->status = urb->status; rpdu->actual_length = urb->actual_length; rpdu->start_frame = urb->start_frame; rpdu->number_of_packets = urb->number_of_packets; rpdu->error_count = urb->error_count; } else { /* vhci_rx.c */ urb->status = rpdu->status; urb->actual_length = rpdu->actual_length; urb->start_frame = rpdu->start_frame; urb->number_of_packets = rpdu->number_of_packets; urb->error_count = rpdu->error_count; } } void usbip_pack_pdu(struct usbip_header *pdu, struct urb *urb, int cmd, int pack) { switch (cmd) { case USBIP_CMD_SUBMIT: usbip_pack_cmd_submit(pdu, urb, pack); break; case USBIP_RET_SUBMIT: usbip_pack_ret_submit(pdu, urb, pack); break; default: /* NOT REACHED */ pr_err("unknown command\n"); break; } } EXPORT_SYMBOL_GPL(usbip_pack_pdu); static void correct_endian_basic(struct usbip_header_basic *base, int send) { if (send) { base->command = cpu_to_be32(base->command); base->seqnum = cpu_to_be32(base->seqnum); base->devid = cpu_to_be32(base->devid); base->direction = cpu_to_be32(base->direction); base->ep = cpu_to_be32(base->ep); } else { base->command = be32_to_cpu(base->command); base->seqnum = be32_to_cpu(base->seqnum); base->devid = be32_to_cpu(base->devid); base->direction = be32_to_cpu(base->direction); base->ep = be32_to_cpu(base->ep); } } static void correct_endian_cmd_submit(struct usbip_header_cmd_submit *pdu, int send) { if (send) { pdu->transfer_flags = cpu_to_be32(pdu->transfer_flags); cpu_to_be32s(&pdu->transfer_buffer_length); cpu_to_be32s(&pdu->start_frame); cpu_to_be32s(&pdu->number_of_packets); cpu_to_be32s(&pdu->interval); } else { pdu->transfer_flags = be32_to_cpu(pdu->transfer_flags); be32_to_cpus(&pdu->transfer_buffer_length); be32_to_cpus(&pdu->start_frame); be32_to_cpus(&pdu->number_of_packets); be32_to_cpus(&pdu->interval); } } static void correct_endian_ret_submit(struct usbip_header_ret_submit *pdu, int send) { if (send) { cpu_to_be32s(&pdu->status); cpu_to_be32s(&pdu->actual_length); cpu_to_be32s(&pdu->start_frame); cpu_to_be32s(&pdu->number_of_packets); cpu_to_be32s(&pdu->error_count); } else { be32_to_cpus(&pdu->status); be32_to_cpus(&pdu->actual_length); be32_to_cpus(&pdu->start_frame); be32_to_cpus(&pdu->number_of_packets); be32_to_cpus(&pdu->error_count); } } static void correct_endian_cmd_unlink(struct usbip_header_cmd_unlink *pdu, int send) { if (send) pdu->seqnum = cpu_to_be32(pdu->seqnum); else pdu->seqnum = be32_to_cpu(pdu->seqnum); } static void correct_endian_ret_unlink(struct usbip_header_ret_unlink *pdu, int send) { if (send) cpu_to_be32s(&pdu->status); else be32_to_cpus(&pdu->status); } void usbip_header_correct_endian(struct usbip_header *pdu, int send) { __u32 cmd = 0; if (send) cmd = pdu->base.command; correct_endian_basic(&pdu->base, send); if (!send) cmd = pdu->base.command; switch (cmd) { case USBIP_CMD_SUBMIT: correct_endian_cmd_submit(&pdu->u.cmd_submit, send); break; case USBIP_RET_SUBMIT: correct_endian_ret_submit(&pdu->u.ret_submit, send); break; case USBIP_CMD_UNLINK: correct_endian_cmd_unlink(&pdu->u.cmd_unlink, send); break; case USBIP_RET_UNLINK: correct_endian_ret_unlink(&pdu->u.ret_unlink, send); break; default: /* NOT REACHED */ pr_err("unknown command\n"); break; } } EXPORT_SYMBOL_GPL(usbip_header_correct_endian); static void usbip_iso_pakcet_correct_endian( struct usbip_iso_packet_descriptor *iso, int send) { /* does not need all members. but copy all simply. */ if (send) { iso->offset = cpu_to_be32(iso->offset); iso->length = cpu_to_be32(iso->length); iso->status = cpu_to_be32(iso->status); iso->actual_length = cpu_to_be32(iso->actual_length); } else { iso->offset = be32_to_cpu(iso->offset); iso->length = be32_to_cpu(iso->length); iso->status = be32_to_cpu(iso->status); iso->actual_length = be32_to_cpu(iso->actual_length); } } static void usbip_pack_iso(struct usbip_iso_packet_descriptor *iso, struct usb_iso_packet_descriptor *uiso, int pack) { if (pack) { iso->offset = uiso->offset; iso->length = uiso->length; iso->status = uiso->status; iso->actual_length = uiso->actual_length; } else { uiso->offset = iso->offset; uiso->length = iso->length; uiso->status = iso->status; uiso->actual_length = iso->actual_length; } } /* must free buffer */ void *usbip_alloc_iso_desc_pdu(struct urb *urb, ssize_t *bufflen) { void *buff; struct usbip_iso_packet_descriptor *iso; int np = urb->number_of_packets; ssize_t size = np * sizeof(*iso); int i; buff = kzalloc(size, GFP_KERNEL); if (!buff) return NULL; for (i = 0; i < np; i++) { iso = buff + (i * sizeof(*iso)); usbip_pack_iso(iso, &urb->iso_frame_desc[i], 1); usbip_iso_pakcet_correct_endian(iso, 1); } *bufflen = size; return buff; } EXPORT_SYMBOL_GPL(usbip_alloc_iso_desc_pdu); /* some members of urb must be substituted before. */ int usbip_recv_iso(struct usbip_device *ud, struct urb *urb) { void *buff; struct usbip_iso_packet_descriptor *iso; int np = urb->number_of_packets; int size = np * sizeof(*iso); int i; int ret; int total_length = 0; if (!usb_pipeisoc(urb->pipe)) return 0; /* my Bluetooth dongle gets ISO URBs which are np = 0 */ if (np == 0) { /* pr_info("iso np == 0\n"); */ /* usbip_dump_urb(urb); */ return 0; } buff = kzalloc(size, GFP_KERNEL); if (!buff) return -ENOMEM; ret = usbip_xmit(0, ud->tcp_socket, buff, size, 0); if (ret != size) { dev_err(&urb->dev->dev, "recv iso_frame_descriptor, %d\n", ret); kfree(buff); if (ud->side == USBIP_STUB) usbip_event_add(ud, SDEV_EVENT_ERROR_TCP); else usbip_event_add(ud, VDEV_EVENT_ERROR_TCP); return -EPIPE; } for (i = 0; i < np; i++) { iso = buff + (i * sizeof(*iso)); usbip_iso_pakcet_correct_endian(iso, 0); usbip_pack_iso(iso, &urb->iso_frame_desc[i], 0); total_length += urb->iso_frame_desc[i].actual_length; } kfree(buff); if (total_length != urb->actual_length) { dev_err(&urb->dev->dev, "total length of iso packets %d not equal to actual " "length of buffer %d\n", total_length, urb->actual_length); if (ud->side == USBIP_STUB) usbip_event_add(ud, SDEV_EVENT_ERROR_TCP); else usbip_event_add(ud, VDEV_EVENT_ERROR_TCP); return -EPIPE; } return ret; } EXPORT_SYMBOL_GPL(usbip_recv_iso); /* * This functions restores the padding which was removed for optimizing * the bandwidth during transfer over tcp/ip * * buffer and iso packets need to be stored and be in propeper endian in urb * before calling this function */ void usbip_pad_iso(struct usbip_device *ud, struct urb *urb) { int np = urb->number_of_packets; int i; int actualoffset = urb->actual_length; if (!usb_pipeisoc(urb->pipe)) return; /* if no packets or length of data is 0, then nothing to unpack */ if (np == 0 || urb->actual_length == 0) return; /* * if actual_length is transfer_buffer_length then no padding is * present. */ if (urb->actual_length == urb->transfer_buffer_length) return; /* * loop over all packets from last to first (to prevent overwritting * memory when padding) and move them into the proper place */ for (i = np-1; i > 0; i--) { actualoffset -= urb->iso_frame_desc[i].actual_length; memmove(urb->transfer_buffer + urb->iso_frame_desc[i].offset, urb->transfer_buffer + actualoffset, urb->iso_frame_desc[i].actual_length); } } EXPORT_SYMBOL_GPL(usbip_pad_iso); /* some members of urb must be substituted before. */ int usbip_recv_xbuff(struct usbip_device *ud, struct urb *urb) { int ret; int size; if (ud->side == USBIP_STUB) { /* stub_rx.c */ /* the direction of urb must be OUT. */ if (usb_pipein(urb->pipe)) return 0; size = urb->transfer_buffer_length; } else { /* vhci_rx.c */ /* the direction of urb must be IN. */ if (usb_pipeout(urb->pipe)) return 0; size = urb->actual_length; } /* no need to recv xbuff */ if (!(size > 0)) return 0; ret = usbip_xmit(0, ud->tcp_socket, (char *)urb->transfer_buffer, size, 0); if (ret != size) { dev_err(&urb->dev->dev, "recv xbuf, %d\n", ret); if (ud->side == USBIP_STUB) { usbip_event_add(ud, SDEV_EVENT_ERROR_TCP); } else { usbip_event_add(ud, VDEV_EVENT_ERROR_TCP); return -EPIPE; } } return ret; } EXPORT_SYMBOL_GPL(usbip_recv_xbuff); static int __init usbip_common_init(void) { pr_info(DRIVER_DESC " v" USBIP_VERSION "\n"); return 0; } static void __exit usbip_common_exit(void) { return; } module_init(usbip_common_init); module_exit(usbip_common_exit); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); MODULE_VERSION(USBIP_VERSION);
gpl-2.0
lacvapps/linux
drivers/gpu/drm/radeon/si_smc.c
1345
7364
/* * Copyright 2011 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Alex Deucher */ #include <linux/firmware.h> #include "drmP.h" #include "radeon.h" #include "sid.h" #include "ppsmc.h" #include "radeon_ucode.h" #include "sislands_smc.h" static int si_set_smc_sram_address(struct radeon_device *rdev, u32 smc_address, u32 limit) { if (smc_address & 3) return -EINVAL; if ((smc_address + 3) > limit) return -EINVAL; WREG32(SMC_IND_INDEX_0, smc_address); WREG32_P(SMC_IND_ACCESS_CNTL, 0, ~AUTO_INCREMENT_IND_0); return 0; } int si_copy_bytes_to_smc(struct radeon_device *rdev, u32 smc_start_address, const u8 *src, u32 byte_count, u32 limit) { unsigned long flags; int ret = 0; u32 data, original_data, addr, extra_shift; if (smc_start_address & 3) return -EINVAL; if ((smc_start_address + byte_count) > limit) return -EINVAL; addr = smc_start_address; spin_lock_irqsave(&rdev->smc_idx_lock, flags); while (byte_count >= 4) { /* SMC address space is BE */ data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3]; ret = si_set_smc_sram_address(rdev, addr, limit); if (ret) goto done; WREG32(SMC_IND_DATA_0, data); src += 4; byte_count -= 4; addr += 4; } /* RMW for the final bytes */ if (byte_count > 0) { data = 0; ret = si_set_smc_sram_address(rdev, addr, limit); if (ret) goto done; original_data = RREG32(SMC_IND_DATA_0); extra_shift = 8 * (4 - byte_count); while (byte_count > 0) { /* SMC address space is BE */ data = (data << 8) + *src++; byte_count--; } data <<= extra_shift; data |= (original_data & ~((~0UL) << extra_shift)); ret = si_set_smc_sram_address(rdev, addr, limit); if (ret) goto done; WREG32(SMC_IND_DATA_0, data); } done: spin_unlock_irqrestore(&rdev->smc_idx_lock, flags); return ret; } void si_start_smc(struct radeon_device *rdev) { u32 tmp = RREG32_SMC(SMC_SYSCON_RESET_CNTL); tmp &= ~RST_REG; WREG32_SMC(SMC_SYSCON_RESET_CNTL, tmp); } void si_reset_smc(struct radeon_device *rdev) { u32 tmp; RREG32(CB_CGTT_SCLK_CTRL); RREG32(CB_CGTT_SCLK_CTRL); RREG32(CB_CGTT_SCLK_CTRL); RREG32(CB_CGTT_SCLK_CTRL); tmp = RREG32_SMC(SMC_SYSCON_RESET_CNTL); tmp |= RST_REG; WREG32_SMC(SMC_SYSCON_RESET_CNTL, tmp); } int si_program_jump_on_start(struct radeon_device *rdev) { static const u8 data[] = { 0x0E, 0x00, 0x40, 0x40 }; return si_copy_bytes_to_smc(rdev, 0x0, data, 4, sizeof(data)+1); } void si_stop_smc_clock(struct radeon_device *rdev) { u32 tmp = RREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0); tmp |= CK_DISABLE; WREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0, tmp); } void si_start_smc_clock(struct radeon_device *rdev) { u32 tmp = RREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0); tmp &= ~CK_DISABLE; WREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0, tmp); } bool si_is_smc_running(struct radeon_device *rdev) { u32 rst = RREG32_SMC(SMC_SYSCON_RESET_CNTL); u32 clk = RREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0); if (!(rst & RST_REG) && !(clk & CK_DISABLE)) return true; return false; } PPSMC_Result si_send_msg_to_smc(struct radeon_device *rdev, PPSMC_Msg msg) { u32 tmp; int i; if (!si_is_smc_running(rdev)) return PPSMC_Result_Failed; WREG32(SMC_MESSAGE_0, msg); for (i = 0; i < rdev->usec_timeout; i++) { tmp = RREG32(SMC_RESP_0); if (tmp != 0) break; udelay(1); } tmp = RREG32(SMC_RESP_0); return (PPSMC_Result)tmp; } PPSMC_Result si_wait_for_smc_inactive(struct radeon_device *rdev) { u32 tmp; int i; if (!si_is_smc_running(rdev)) return PPSMC_Result_OK; for (i = 0; i < rdev->usec_timeout; i++) { tmp = RREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0); if ((tmp & CKEN) == 0) break; udelay(1); } return PPSMC_Result_OK; } int si_load_smc_ucode(struct radeon_device *rdev, u32 limit) { unsigned long flags; u32 ucode_start_address; u32 ucode_size; const u8 *src; u32 data; if (!rdev->smc_fw) return -EINVAL; if (rdev->new_fw) { const struct smc_firmware_header_v1_0 *hdr = (const struct smc_firmware_header_v1_0 *)rdev->smc_fw->data; radeon_ucode_print_smc_hdr(&hdr->header); ucode_start_address = le32_to_cpu(hdr->ucode_start_addr); ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes); src = (const u8 *) (rdev->smc_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); } else { switch (rdev->family) { case CHIP_TAHITI: ucode_start_address = TAHITI_SMC_UCODE_START; ucode_size = TAHITI_SMC_UCODE_SIZE; break; case CHIP_PITCAIRN: ucode_start_address = PITCAIRN_SMC_UCODE_START; ucode_size = PITCAIRN_SMC_UCODE_SIZE; break; case CHIP_VERDE: ucode_start_address = VERDE_SMC_UCODE_START; ucode_size = VERDE_SMC_UCODE_SIZE; break; case CHIP_OLAND: ucode_start_address = OLAND_SMC_UCODE_START; ucode_size = OLAND_SMC_UCODE_SIZE; break; case CHIP_HAINAN: ucode_start_address = HAINAN_SMC_UCODE_START; ucode_size = HAINAN_SMC_UCODE_SIZE; break; default: DRM_ERROR("unknown asic in smc ucode loader\n"); BUG(); } src = (const u8 *)rdev->smc_fw->data; } if (ucode_size & 3) return -EINVAL; spin_lock_irqsave(&rdev->smc_idx_lock, flags); WREG32(SMC_IND_INDEX_0, ucode_start_address); WREG32_P(SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, ~AUTO_INCREMENT_IND_0); while (ucode_size >= 4) { /* SMC address space is BE */ data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3]; WREG32(SMC_IND_DATA_0, data); src += 4; ucode_size -= 4; } WREG32_P(SMC_IND_ACCESS_CNTL, 0, ~AUTO_INCREMENT_IND_0); spin_unlock_irqrestore(&rdev->smc_idx_lock, flags); return 0; } int si_read_smc_sram_dword(struct radeon_device *rdev, u32 smc_address, u32 *value, u32 limit) { unsigned long flags; int ret; spin_lock_irqsave(&rdev->smc_idx_lock, flags); ret = si_set_smc_sram_address(rdev, smc_address, limit); if (ret == 0) *value = RREG32(SMC_IND_DATA_0); spin_unlock_irqrestore(&rdev->smc_idx_lock, flags); return ret; } int si_write_smc_sram_dword(struct radeon_device *rdev, u32 smc_address, u32 value, u32 limit) { unsigned long flags; int ret; spin_lock_irqsave(&rdev->smc_idx_lock, flags); ret = si_set_smc_sram_address(rdev, smc_address, limit); if (ret == 0) WREG32(SMC_IND_DATA_0, value); spin_unlock_irqrestore(&rdev->smc_idx_lock, flags); return ret; }
gpl-2.0
Radium-Devices/Radium_shamu
drivers/mtd/nand/nuc900_nand.c
1601
7426
/* * Copyright © 2009 Nuvoton technology corporation. * * Wan ZongShun <mcuos.com@gmail.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation;version 2 of the License. * */ #include <linux/slab.h> #include <linux/init.h> #include <linux/module.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/platform_device.h> #include <linux/delay.h> #include <linux/clk.h> #include <linux/err.h> #include <linux/mtd/mtd.h> #include <linux/mtd/nand.h> #include <linux/mtd/partitions.h> #define REG_FMICSR 0x00 #define REG_SMCSR 0xa0 #define REG_SMISR 0xac #define REG_SMCMD 0xb0 #define REG_SMADDR 0xb4 #define REG_SMDATA 0xb8 #define RESET_FMI 0x01 #define NAND_EN 0x08 #define READYBUSY (0x01 << 18) #define SWRST 0x01 #define PSIZE (0x01 << 3) #define DMARWEN (0x03 << 1) #define BUSWID (0x01 << 4) #define ECC4EN (0x01 << 5) #define WP (0x01 << 24) #define NANDCS (0x01 << 25) #define ENDADDR (0x01 << 31) #define read_data_reg(dev) \ __raw_readl((dev)->reg + REG_SMDATA) #define write_data_reg(dev, val) \ __raw_writel((val), (dev)->reg + REG_SMDATA) #define write_cmd_reg(dev, val) \ __raw_writel((val), (dev)->reg + REG_SMCMD) #define write_addr_reg(dev, val) \ __raw_writel((val), (dev)->reg + REG_SMADDR) struct nuc900_nand { struct mtd_info mtd; struct nand_chip chip; void __iomem *reg; struct clk *clk; spinlock_t lock; }; static const struct mtd_partition partitions[] = { { .name = "NAND FS 0", .offset = 0, .size = 8 * 1024 * 1024 }, { .name = "NAND FS 1", .offset = MTDPART_OFS_APPEND, .size = MTDPART_SIZ_FULL } }; static unsigned char nuc900_nand_read_byte(struct mtd_info *mtd) { unsigned char ret; struct nuc900_nand *nand; nand = container_of(mtd, struct nuc900_nand, mtd); ret = (unsigned char)read_data_reg(nand); return ret; } static void nuc900_nand_read_buf(struct mtd_info *mtd, unsigned char *buf, int len) { int i; struct nuc900_nand *nand; nand = container_of(mtd, struct nuc900_nand, mtd); for (i = 0; i < len; i++) buf[i] = (unsigned char)read_data_reg(nand); } static void nuc900_nand_write_buf(struct mtd_info *mtd, const unsigned char *buf, int len) { int i; struct nuc900_nand *nand; nand = container_of(mtd, struct nuc900_nand, mtd); for (i = 0; i < len; i++) write_data_reg(nand, buf[i]); } static int nuc900_check_rb(struct nuc900_nand *nand) { unsigned int val; spin_lock(&nand->lock); val = __raw_readl(REG_SMISR); val &= READYBUSY; spin_unlock(&nand->lock); return val; } static int nuc900_nand_devready(struct mtd_info *mtd) { struct nuc900_nand *nand; int ready; nand = container_of(mtd, struct nuc900_nand, mtd); ready = (nuc900_check_rb(nand)) ? 1 : 0; return ready; } static void nuc900_nand_command_lp(struct mtd_info *mtd, unsigned int command, int column, int page_addr) { register struct nand_chip *chip = mtd->priv; struct nuc900_nand *nand; nand = container_of(mtd, struct nuc900_nand, mtd); if (command == NAND_CMD_READOOB) { column += mtd->writesize; command = NAND_CMD_READ0; } write_cmd_reg(nand, command & 0xff); if (column != -1 || page_addr != -1) { if (column != -1) { if (chip->options & NAND_BUSWIDTH_16) column >>= 1; write_addr_reg(nand, column); write_addr_reg(nand, column >> 8 | ENDADDR); } if (page_addr != -1) { write_addr_reg(nand, page_addr); if (chip->chipsize > (128 << 20)) { write_addr_reg(nand, page_addr >> 8); write_addr_reg(nand, page_addr >> 16 | ENDADDR); } else { write_addr_reg(nand, page_addr >> 8 | ENDADDR); } } } switch (command) { case NAND_CMD_CACHEDPROG: case NAND_CMD_PAGEPROG: case NAND_CMD_ERASE1: case NAND_CMD_ERASE2: case NAND_CMD_SEQIN: case NAND_CMD_RNDIN: case NAND_CMD_STATUS: return; case NAND_CMD_RESET: if (chip->dev_ready) break; udelay(chip->chip_delay); write_cmd_reg(nand, NAND_CMD_STATUS); write_cmd_reg(nand, command); while (!nuc900_check_rb(nand)) ; return; case NAND_CMD_RNDOUT: write_cmd_reg(nand, NAND_CMD_RNDOUTSTART); return; case NAND_CMD_READ0: write_cmd_reg(nand, NAND_CMD_READSTART); default: if (!chip->dev_ready) { udelay(chip->chip_delay); return; } } /* Apply this short delay always to ensure that we do wait tWB in * any case on any machine. */ ndelay(100); while (!chip->dev_ready(mtd)) ; } static void nuc900_nand_enable(struct nuc900_nand *nand) { unsigned int val; spin_lock(&nand->lock); __raw_writel(RESET_FMI, (nand->reg + REG_FMICSR)); val = __raw_readl(nand->reg + REG_FMICSR); if (!(val & NAND_EN)) __raw_writel(val | NAND_EN, nand->reg + REG_FMICSR); val = __raw_readl(nand->reg + REG_SMCSR); val &= ~(SWRST|PSIZE|DMARWEN|BUSWID|ECC4EN|NANDCS); val |= WP; __raw_writel(val, nand->reg + REG_SMCSR); spin_unlock(&nand->lock); } static int nuc900_nand_probe(struct platform_device *pdev) { struct nuc900_nand *nuc900_nand; struct nand_chip *chip; int retval; struct resource *res; retval = 0; nuc900_nand = kzalloc(sizeof(struct nuc900_nand), GFP_KERNEL); if (!nuc900_nand) return -ENOMEM; chip = &(nuc900_nand->chip); nuc900_nand->mtd.priv = chip; nuc900_nand->mtd.owner = THIS_MODULE; spin_lock_init(&nuc900_nand->lock); nuc900_nand->clk = clk_get(&pdev->dev, NULL); if (IS_ERR(nuc900_nand->clk)) { retval = -ENOENT; goto fail1; } clk_enable(nuc900_nand->clk); chip->cmdfunc = nuc900_nand_command_lp; chip->dev_ready = nuc900_nand_devready; chip->read_byte = nuc900_nand_read_byte; chip->write_buf = nuc900_nand_write_buf; chip->read_buf = nuc900_nand_read_buf; chip->chip_delay = 50; chip->options = 0; chip->ecc.mode = NAND_ECC_SOFT; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { retval = -ENXIO; goto fail1; } if (!request_mem_region(res->start, resource_size(res), pdev->name)) { retval = -EBUSY; goto fail1; } nuc900_nand->reg = ioremap(res->start, resource_size(res)); if (!nuc900_nand->reg) { retval = -ENOMEM; goto fail2; } nuc900_nand_enable(nuc900_nand); if (nand_scan(&(nuc900_nand->mtd), 1)) { retval = -ENXIO; goto fail3; } mtd_device_register(&(nuc900_nand->mtd), partitions, ARRAY_SIZE(partitions)); platform_set_drvdata(pdev, nuc900_nand); return retval; fail3: iounmap(nuc900_nand->reg); fail2: release_mem_region(res->start, resource_size(res)); fail1: kfree(nuc900_nand); return retval; } static int nuc900_nand_remove(struct platform_device *pdev) { struct nuc900_nand *nuc900_nand = platform_get_drvdata(pdev); struct resource *res; nand_release(&nuc900_nand->mtd); iounmap(nuc900_nand->reg); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); release_mem_region(res->start, resource_size(res)); clk_disable(nuc900_nand->clk); clk_put(nuc900_nand->clk); kfree(nuc900_nand); platform_set_drvdata(pdev, NULL); return 0; } static struct platform_driver nuc900_nand_driver = { .probe = nuc900_nand_probe, .remove = nuc900_nand_remove, .driver = { .name = "nuc900-fmi", .owner = THIS_MODULE, }, }; module_platform_driver(nuc900_nand_driver); MODULE_AUTHOR("Wan ZongShun <mcuos.com@gmail.com>"); MODULE_DESCRIPTION("w90p910/NUC9xx nand driver!"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:nuc900-fmi");
gpl-2.0
jituijiaqiezi/linux
sound/pcmcia/pdaudiocf/pdaudiocf_core.c
1857
9286
/* * Driver for Sound Core PDAudioCF soundcard * * Copyright (c) 2003 by Jaroslav Kysela <perex@perex.cz> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/delay.h> #include <linux/slab.h> #include <sound/core.h> #include <sound/info.h> #include "pdaudiocf.h" #include <sound/initval.h> /* * */ static unsigned char pdacf_ak4117_read(void *private_data, unsigned char reg) { struct snd_pdacf *chip = private_data; unsigned long timeout; unsigned long flags; unsigned char res; spin_lock_irqsave(&chip->ak4117_lock, flags); timeout = 1000; while (pdacf_reg_read(chip, PDAUDIOCF_REG_SCR) & PDAUDIOCF_AK_SBP) { udelay(5); if (--timeout == 0) { spin_unlock_irqrestore(&chip->ak4117_lock, flags); snd_printk(KERN_ERR "AK4117 ready timeout (read)\n"); return 0; } } pdacf_reg_write(chip, PDAUDIOCF_REG_AK_IFR, (u16)reg << 8); timeout = 1000; while (pdacf_reg_read(chip, PDAUDIOCF_REG_SCR) & PDAUDIOCF_AK_SBP) { udelay(5); if (--timeout == 0) { spin_unlock_irqrestore(&chip->ak4117_lock, flags); snd_printk(KERN_ERR "AK4117 read timeout (read2)\n"); return 0; } } res = (unsigned char)pdacf_reg_read(chip, PDAUDIOCF_REG_AK_IFR); spin_unlock_irqrestore(&chip->ak4117_lock, flags); return res; } static void pdacf_ak4117_write(void *private_data, unsigned char reg, unsigned char val) { struct snd_pdacf *chip = private_data; unsigned long timeout; unsigned long flags; spin_lock_irqsave(&chip->ak4117_lock, flags); timeout = 1000; while (inw(chip->port + PDAUDIOCF_REG_SCR) & PDAUDIOCF_AK_SBP) { udelay(5); if (--timeout == 0) { spin_unlock_irqrestore(&chip->ak4117_lock, flags); snd_printk(KERN_ERR "AK4117 ready timeout (write)\n"); return; } } outw((u16)reg << 8 | val | (1<<13), chip->port + PDAUDIOCF_REG_AK_IFR); spin_unlock_irqrestore(&chip->ak4117_lock, flags); } #if 0 void pdacf_dump(struct snd_pdacf *chip) { printk(KERN_DEBUG "PDAUDIOCF DUMP (0x%lx):\n", chip->port); printk(KERN_DEBUG "WPD : 0x%x\n", inw(chip->port + PDAUDIOCF_REG_WDP)); printk(KERN_DEBUG "RDP : 0x%x\n", inw(chip->port + PDAUDIOCF_REG_RDP)); printk(KERN_DEBUG "TCR : 0x%x\n", inw(chip->port + PDAUDIOCF_REG_TCR)); printk(KERN_DEBUG "SCR : 0x%x\n", inw(chip->port + PDAUDIOCF_REG_SCR)); printk(KERN_DEBUG "ISR : 0x%x\n", inw(chip->port + PDAUDIOCF_REG_ISR)); printk(KERN_DEBUG "IER : 0x%x\n", inw(chip->port + PDAUDIOCF_REG_IER)); printk(KERN_DEBUG "AK_IFR : 0x%x\n", inw(chip->port + PDAUDIOCF_REG_AK_IFR)); } #endif static int pdacf_reset(struct snd_pdacf *chip, int powerdown) { u16 val; val = pdacf_reg_read(chip, PDAUDIOCF_REG_SCR); val |= PDAUDIOCF_PDN; val &= ~PDAUDIOCF_RECORD; /* for sure */ pdacf_reg_write(chip, PDAUDIOCF_REG_SCR, val); udelay(5); val |= PDAUDIOCF_RST; pdacf_reg_write(chip, PDAUDIOCF_REG_SCR, val); udelay(200); val &= ~PDAUDIOCF_RST; pdacf_reg_write(chip, PDAUDIOCF_REG_SCR, val); udelay(5); if (!powerdown) { val &= ~PDAUDIOCF_PDN; pdacf_reg_write(chip, PDAUDIOCF_REG_SCR, val); udelay(200); } return 0; } void pdacf_reinit(struct snd_pdacf *chip, int resume) { pdacf_reset(chip, 0); if (resume) pdacf_reg_write(chip, PDAUDIOCF_REG_SCR, chip->suspend_reg_scr); snd_ak4117_reinit(chip->ak4117); pdacf_reg_write(chip, PDAUDIOCF_REG_TCR, chip->regmap[PDAUDIOCF_REG_TCR>>1]); pdacf_reg_write(chip, PDAUDIOCF_REG_IER, chip->regmap[PDAUDIOCF_REG_IER>>1]); } static void pdacf_proc_read(struct snd_info_entry * entry, struct snd_info_buffer *buffer) { struct snd_pdacf *chip = entry->private_data; u16 tmp; snd_iprintf(buffer, "PDAudioCF\n\n"); tmp = pdacf_reg_read(chip, PDAUDIOCF_REG_SCR); snd_iprintf(buffer, "FPGA revision : 0x%x\n", PDAUDIOCF_FPGAREV(tmp)); } static void pdacf_proc_init(struct snd_pdacf *chip) { struct snd_info_entry *entry; if (! snd_card_proc_new(chip->card, "pdaudiocf", &entry)) snd_info_set_text_ops(entry, chip, pdacf_proc_read); } struct snd_pdacf *snd_pdacf_create(struct snd_card *card) { struct snd_pdacf *chip; chip = kzalloc(sizeof(*chip), GFP_KERNEL); if (chip == NULL) return NULL; chip->card = card; mutex_init(&chip->reg_lock); spin_lock_init(&chip->ak4117_lock); card->private_data = chip; pdacf_proc_init(chip); return chip; } static void snd_pdacf_ak4117_change(struct ak4117 *ak4117, unsigned char c0, unsigned char c1) { struct snd_pdacf *chip = ak4117->change_callback_private; u16 val; if (!(c0 & AK4117_UNLCK)) return; mutex_lock(&chip->reg_lock); val = chip->regmap[PDAUDIOCF_REG_SCR>>1]; if (ak4117->rcs0 & AK4117_UNLCK) val |= PDAUDIOCF_BLUE_LED_OFF; else val &= ~PDAUDIOCF_BLUE_LED_OFF; pdacf_reg_write(chip, PDAUDIOCF_REG_SCR, val); mutex_unlock(&chip->reg_lock); } int snd_pdacf_ak4117_create(struct snd_pdacf *chip) { int err; u16 val; /* design note: if we unmask PLL unlock, parity, valid, audio or auto bit interrupts */ /* from AK4117 then INT1 pin from AK4117 will be high all time, because PCMCIA interrupts are */ /* egde based and FPGA does logical OR for all interrupt sources, we cannot use these */ /* high-rate sources */ static unsigned char pgm[5] = { AK4117_XTL_24_576M | AK4117_EXCT, /* AK4117_REG_PWRDN */ AK4117_CM_PLL_XTAL | AK4117_PKCS_128fs | AK4117_XCKS_128fs, /* AK4117_REQ_CLOCK */ AK4117_EFH_1024LRCLK | AK4117_DIF_24R | AK4117_IPS, /* AK4117_REG_IO */ 0xff, /* AK4117_REG_INT0_MASK */ AK4117_MAUTO | AK4117_MAUD | AK4117_MULK | AK4117_MPAR | AK4117_MV, /* AK4117_REG_INT1_MASK */ }; err = pdacf_reset(chip, 0); if (err < 0) return err; err = snd_ak4117_create(chip->card, pdacf_ak4117_read, pdacf_ak4117_write, pgm, chip, &chip->ak4117); if (err < 0) return err; val = pdacf_reg_read(chip, PDAUDIOCF_REG_TCR); #if 1 /* normal operation */ val &= ~(PDAUDIOCF_ELIMAKMBIT|PDAUDIOCF_TESTDATASEL); #else /* debug */ val |= PDAUDIOCF_ELIMAKMBIT; val &= ~PDAUDIOCF_TESTDATASEL; #endif pdacf_reg_write(chip, PDAUDIOCF_REG_TCR, val); /* setup the FPGA to match AK4117 setup */ val = pdacf_reg_read(chip, PDAUDIOCF_REG_SCR); val &= ~(PDAUDIOCF_CLKDIV0 | PDAUDIOCF_CLKDIV1); /* use 24.576Mhz clock */ val &= ~(PDAUDIOCF_RED_LED_OFF|PDAUDIOCF_BLUE_LED_OFF); val |= PDAUDIOCF_DATAFMT0 | PDAUDIOCF_DATAFMT1; /* 24-bit data */ pdacf_reg_write(chip, PDAUDIOCF_REG_SCR, val); /* setup LEDs and IRQ */ val = pdacf_reg_read(chip, PDAUDIOCF_REG_IER); val &= ~(PDAUDIOCF_IRQLVLEN0 | PDAUDIOCF_IRQLVLEN1); val &= ~(PDAUDIOCF_BLUEDUTY0 | PDAUDIOCF_REDDUTY0 | PDAUDIOCF_REDDUTY1); val |= PDAUDIOCF_BLUEDUTY1 | PDAUDIOCF_HALFRATE; val |= PDAUDIOCF_IRQOVREN | PDAUDIOCF_IRQAKMEN; pdacf_reg_write(chip, PDAUDIOCF_REG_IER, val); chip->ak4117->change_callback_private = chip; chip->ak4117->change_callback = snd_pdacf_ak4117_change; /* update LED status */ snd_pdacf_ak4117_change(chip->ak4117, AK4117_UNLCK, 0); return 0; } void snd_pdacf_powerdown(struct snd_pdacf *chip) { u16 val; val = pdacf_reg_read(chip, PDAUDIOCF_REG_SCR); chip->suspend_reg_scr = val; val |= PDAUDIOCF_RED_LED_OFF | PDAUDIOCF_BLUE_LED_OFF; pdacf_reg_write(chip, PDAUDIOCF_REG_SCR, val); /* disable interrupts, but use direct write to preserve old register value in chip->regmap */ val = inw(chip->port + PDAUDIOCF_REG_IER); val &= ~(PDAUDIOCF_IRQOVREN|PDAUDIOCF_IRQAKMEN|PDAUDIOCF_IRQLVLEN0|PDAUDIOCF_IRQLVLEN1); outw(val, chip->port + PDAUDIOCF_REG_IER); pdacf_reset(chip, 1); } #ifdef CONFIG_PM int snd_pdacf_suspend(struct snd_pdacf *chip) { u16 val; snd_power_change_state(chip->card, SNDRV_CTL_POWER_D3hot); snd_pcm_suspend_all(chip->pcm); /* disable interrupts, but use direct write to preserve old register value in chip->regmap */ val = inw(chip->port + PDAUDIOCF_REG_IER); val &= ~(PDAUDIOCF_IRQOVREN|PDAUDIOCF_IRQAKMEN|PDAUDIOCF_IRQLVLEN0|PDAUDIOCF_IRQLVLEN1); outw(val, chip->port + PDAUDIOCF_REG_IER); chip->chip_status |= PDAUDIOCF_STAT_IS_SUSPENDED; /* ignore interrupts from now */ snd_pdacf_powerdown(chip); return 0; } static inline int check_signal(struct snd_pdacf *chip) { return (chip->ak4117->rcs0 & AK4117_UNLCK) == 0; } int snd_pdacf_resume(struct snd_pdacf *chip) { int timeout = 40; pdacf_reinit(chip, 1); /* wait for AK4117's PLL */ while (timeout-- > 0 && (snd_ak4117_external_rate(chip->ak4117) <= 0 || !check_signal(chip))) mdelay(1); chip->chip_status &= ~PDAUDIOCF_STAT_IS_SUSPENDED; snd_power_change_state(chip->card, SNDRV_CTL_POWER_D0); return 0; } #endif
gpl-2.0
mi4i-dev/android_kernel_xiaomi_ferrari
arch/arm/mach-davinci/cpuidle.c
2113
2459
/* * CPU idle for DaVinci SoCs * * Copyright (C) 2009 Texas Instruments Incorporated. http://www.ti.com/ * * Derived from Marvell Kirkwood CPU idle code * (arch/arm/mach-kirkwood/cpuidle.c) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/cpuidle.h> #include <linux/io.h> #include <linux/export.h> #include <asm/proc-fns.h> #include <asm/cpuidle.h> #include <mach/cpuidle.h> #include <mach/ddr2.h> #define DAVINCI_CPUIDLE_MAX_STATES 2 static void __iomem *ddr2_reg_base; static bool ddr2_pdown; static void davinci_save_ddr_power(int enter, bool pdown) { u32 val; val = __raw_readl(ddr2_reg_base + DDR2_SDRCR_OFFSET); if (enter) { if (pdown) val |= DDR2_SRPD_BIT; else val &= ~DDR2_SRPD_BIT; val |= DDR2_LPMODEN_BIT; } else { val &= ~(DDR2_SRPD_BIT | DDR2_LPMODEN_BIT); } __raw_writel(val, ddr2_reg_base + DDR2_SDRCR_OFFSET); } /* Actual code that puts the SoC in different idle states */ static int davinci_enter_idle(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { davinci_save_ddr_power(1, ddr2_pdown); cpu_do_idle(); davinci_save_ddr_power(0, ddr2_pdown); return index; } static struct cpuidle_driver davinci_idle_driver = { .name = "cpuidle-davinci", .owner = THIS_MODULE, .states[0] = ARM_CPUIDLE_WFI_STATE, .states[1] = { .enter = davinci_enter_idle, .exit_latency = 10, .target_residency = 100000, .flags = CPUIDLE_FLAG_TIME_VALID, .name = "DDR SR", .desc = "WFI and DDR Self Refresh", }, .state_count = DAVINCI_CPUIDLE_MAX_STATES, }; static int __init davinci_cpuidle_probe(struct platform_device *pdev) { struct davinci_cpuidle_config *pdata = pdev->dev.platform_data; if (!pdata) { dev_err(&pdev->dev, "cannot get platform data\n"); return -ENOENT; } ddr2_reg_base = pdata->ddr2_ctlr_base; ddr2_pdown = pdata->ddr2_pdown; return cpuidle_register(&davinci_idle_driver, NULL); } static struct platform_driver davinci_cpuidle_driver = { .driver = { .name = "cpuidle-davinci", .owner = THIS_MODULE, }, }; static int __init davinci_cpuidle_init(void) { return platform_driver_probe(&davinci_cpuidle_driver, davinci_cpuidle_probe); } device_initcall(davinci_cpuidle_init);
gpl-2.0
wanam/Adam-Kernel-N8000
drivers/hwmon/applesmc.c
2113
32267
/* * drivers/hwmon/applesmc.c - driver for Apple's SMC (accelerometer, temperature * sensors, fan control, keyboard backlight control) used in Intel-based Apple * computers. * * Copyright (C) 2007 Nicolas Boichat <nicolas@boichat.ch> * Copyright (C) 2010 Henrik Rydberg <rydberg@euromail.se> * * Based on hdaps.c driver: * Copyright (C) 2005 Robert Love <rml@novell.com> * Copyright (C) 2005 Jesper Juhl <jesper.juhl@gmail.com> * * Fan control based on smcFanControl: * Copyright (C) 2006 Hendrik Holtmann <holtmann@mac.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License v2 as published by the * Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/delay.h> #include <linux/platform_device.h> #include <linux/input-polldev.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/timer.h> #include <linux/dmi.h> #include <linux/mutex.h> #include <linux/hwmon-sysfs.h> #include <linux/io.h> #include <linux/leds.h> #include <linux/hwmon.h> #include <linux/workqueue.h> /* data port used by Apple SMC */ #define APPLESMC_DATA_PORT 0x300 /* command/status port used by Apple SMC */ #define APPLESMC_CMD_PORT 0x304 #define APPLESMC_NR_PORTS 32 /* 0x300-0x31f */ #define APPLESMC_MAX_DATA_LENGTH 32 /* wait up to 32 ms for a status change. */ #define APPLESMC_MIN_WAIT 0x0040 #define APPLESMC_MAX_WAIT 0x8000 #define APPLESMC_STATUS_MASK 0x0f #define APPLESMC_READ_CMD 0x10 #define APPLESMC_WRITE_CMD 0x11 #define APPLESMC_GET_KEY_BY_INDEX_CMD 0x12 #define APPLESMC_GET_KEY_TYPE_CMD 0x13 #define KEY_COUNT_KEY "#KEY" /* r-o ui32 */ #define LIGHT_SENSOR_LEFT_KEY "ALV0" /* r-o {alv (6-10 bytes) */ #define LIGHT_SENSOR_RIGHT_KEY "ALV1" /* r-o {alv (6-10 bytes) */ #define BACKLIGHT_KEY "LKSB" /* w-o {lkb (2 bytes) */ #define CLAMSHELL_KEY "MSLD" /* r-o ui8 (unused) */ #define MOTION_SENSOR_X_KEY "MO_X" /* r-o sp78 (2 bytes) */ #define MOTION_SENSOR_Y_KEY "MO_Y" /* r-o sp78 (2 bytes) */ #define MOTION_SENSOR_Z_KEY "MO_Z" /* r-o sp78 (2 bytes) */ #define MOTION_SENSOR_KEY "MOCN" /* r/w ui16 */ #define FANS_COUNT "FNum" /* r-o ui8 */ #define FANS_MANUAL "FS! " /* r-w ui16 */ #define FAN_ID_FMT "F%dID" /* r-o char[16] */ /* List of keys used to read/write fan speeds */ static const char *const fan_speed_fmt[] = { "F%dAc", /* actual speed */ "F%dMn", /* minimum speed (rw) */ "F%dMx", /* maximum speed */ "F%dSf", /* safe speed - not all models */ "F%dTg", /* target speed (manual: rw) */ }; #define INIT_TIMEOUT_MSECS 5000 /* wait up to 5s for device init ... */ #define INIT_WAIT_MSECS 50 /* ... in 50ms increments */ #define APPLESMC_POLL_INTERVAL 50 /* msecs */ #define APPLESMC_INPUT_FUZZ 4 /* input event threshold */ #define APPLESMC_INPUT_FLAT 4 #define SENSOR_X 0 #define SENSOR_Y 1 #define SENSOR_Z 2 #define to_index(attr) (to_sensor_dev_attr(attr)->index & 0xffff) #define to_option(attr) (to_sensor_dev_attr(attr)->index >> 16) /* Dynamic device node attributes */ struct applesmc_dev_attr { struct sensor_device_attribute sda; /* hwmon attributes */ char name[32]; /* room for node file name */ }; /* Dynamic device node group */ struct applesmc_node_group { char *format; /* format string */ void *show; /* show function */ void *store; /* store function */ int option; /* function argument */ struct applesmc_dev_attr *nodes; /* dynamic node array */ }; /* AppleSMC entry - cached register information */ struct applesmc_entry { char key[5]; /* four-letter key code */ u8 valid; /* set when entry is successfully read once */ u8 len; /* bounded by APPLESMC_MAX_DATA_LENGTH */ char type[5]; /* four-letter type code */ u8 flags; /* 0x10: func; 0x40: write; 0x80: read */ }; /* Register lookup and registers common to all SMCs */ static struct applesmc_registers { struct mutex mutex; /* register read/write mutex */ unsigned int key_count; /* number of SMC registers */ unsigned int fan_count; /* number of fans */ unsigned int temp_count; /* number of temperature registers */ unsigned int temp_begin; /* temperature lower index bound */ unsigned int temp_end; /* temperature upper index bound */ int num_light_sensors; /* number of light sensors */ bool has_accelerometer; /* has motion sensor */ bool has_key_backlight; /* has keyboard backlight */ bool init_complete; /* true when fully initialized */ struct applesmc_entry *cache; /* cached key entries */ } smcreg = { .mutex = __MUTEX_INITIALIZER(smcreg.mutex), }; static const int debug; static struct platform_device *pdev; static s16 rest_x; static s16 rest_y; static u8 backlight_state[2]; static struct device *hwmon_dev; static struct input_polled_dev *applesmc_idev; /* * Last index written to key_at_index sysfs file, and value to use for all other * key_at_index_* sysfs files. */ static unsigned int key_at_index; static struct workqueue_struct *applesmc_led_wq; /* * __wait_status - Wait up to 32ms for the status port to get a certain value * (masked with 0x0f), returning zero if the value is obtained. Callers must * hold applesmc_lock. */ static int __wait_status(u8 val) { int us; val = val & APPLESMC_STATUS_MASK; for (us = APPLESMC_MIN_WAIT; us < APPLESMC_MAX_WAIT; us <<= 1) { udelay(us); if ((inb(APPLESMC_CMD_PORT) & APPLESMC_STATUS_MASK) == val) return 0; } return -EIO; } /* * special treatment of command port - on newer macbooks, it seems necessary * to resend the command byte before polling the status again. Callers must * hold applesmc_lock. */ static int send_command(u8 cmd) { int us; for (us = APPLESMC_MIN_WAIT; us < APPLESMC_MAX_WAIT; us <<= 1) { outb(cmd, APPLESMC_CMD_PORT); udelay(us); if ((inb(APPLESMC_CMD_PORT) & APPLESMC_STATUS_MASK) == 0x0c) return 0; } return -EIO; } static int send_argument(const char *key) { int i; for (i = 0; i < 4; i++) { outb(key[i], APPLESMC_DATA_PORT); if (__wait_status(0x04)) return -EIO; } return 0; } static int read_smc(u8 cmd, const char *key, u8 *buffer, u8 len) { int i; if (send_command(cmd) || send_argument(key)) { pr_warn("%s: read arg fail\n", key); return -EIO; } outb(len, APPLESMC_DATA_PORT); for (i = 0; i < len; i++) { if (__wait_status(0x05)) { pr_warn("%s: read data fail\n", key); return -EIO; } buffer[i] = inb(APPLESMC_DATA_PORT); } return 0; } static int write_smc(u8 cmd, const char *key, const u8 *buffer, u8 len) { int i; if (send_command(cmd) || send_argument(key)) { pr_warn("%s: write arg fail\n", key); return -EIO; } outb(len, APPLESMC_DATA_PORT); for (i = 0; i < len; i++) { if (__wait_status(0x04)) { pr_warn("%s: write data fail\n", key); return -EIO; } outb(buffer[i], APPLESMC_DATA_PORT); } return 0; } static int read_register_count(unsigned int *count) { __be32 be; int ret; ret = read_smc(APPLESMC_READ_CMD, KEY_COUNT_KEY, (u8 *)&be, 4); if (ret) return ret; *count = be32_to_cpu(be); return 0; } /* * Serialized I/O * * Returns zero on success or a negative error on failure. * All functions below are concurrency safe - callers should NOT hold lock. */ static int applesmc_read_entry(const struct applesmc_entry *entry, u8 *buf, u8 len) { int ret; if (entry->len != len) return -EINVAL; mutex_lock(&smcreg.mutex); ret = read_smc(APPLESMC_READ_CMD, entry->key, buf, len); mutex_unlock(&smcreg.mutex); return ret; } static int applesmc_write_entry(const struct applesmc_entry *entry, const u8 *buf, u8 len) { int ret; if (entry->len != len) return -EINVAL; mutex_lock(&smcreg.mutex); ret = write_smc(APPLESMC_WRITE_CMD, entry->key, buf, len); mutex_unlock(&smcreg.mutex); return ret; } static const struct applesmc_entry *applesmc_get_entry_by_index(int index) { struct applesmc_entry *cache = &smcreg.cache[index]; u8 key[4], info[6]; __be32 be; int ret = 0; if (cache->valid) return cache; mutex_lock(&smcreg.mutex); if (cache->valid) goto out; be = cpu_to_be32(index); ret = read_smc(APPLESMC_GET_KEY_BY_INDEX_CMD, (u8 *)&be, key, 4); if (ret) goto out; ret = read_smc(APPLESMC_GET_KEY_TYPE_CMD, key, info, 6); if (ret) goto out; memcpy(cache->key, key, 4); cache->len = info[0]; memcpy(cache->type, &info[1], 4); cache->flags = info[5]; cache->valid = 1; out: mutex_unlock(&smcreg.mutex); if (ret) return ERR_PTR(ret); return cache; } static int applesmc_get_lower_bound(unsigned int *lo, const char *key) { int begin = 0, end = smcreg.key_count; const struct applesmc_entry *entry; while (begin != end) { int middle = begin + (end - begin) / 2; entry = applesmc_get_entry_by_index(middle); if (IS_ERR(entry)) return PTR_ERR(entry); if (strcmp(entry->key, key) < 0) begin = middle + 1; else end = middle; } *lo = begin; return 0; } static int applesmc_get_upper_bound(unsigned int *hi, const char *key) { int begin = 0, end = smcreg.key_count; const struct applesmc_entry *entry; while (begin != end) { int middle = begin + (end - begin) / 2; entry = applesmc_get_entry_by_index(middle); if (IS_ERR(entry)) return PTR_ERR(entry); if (strcmp(key, entry->key) < 0) end = middle; else begin = middle + 1; } *hi = begin; return 0; } static const struct applesmc_entry *applesmc_get_entry_by_key(const char *key) { int begin, end; int ret; ret = applesmc_get_lower_bound(&begin, key); if (ret) return ERR_PTR(ret); ret = applesmc_get_upper_bound(&end, key); if (ret) return ERR_PTR(ret); if (end - begin != 1) return ERR_PTR(-EINVAL); return applesmc_get_entry_by_index(begin); } static int applesmc_read_key(const char *key, u8 *buffer, u8 len) { const struct applesmc_entry *entry; entry = applesmc_get_entry_by_key(key); if (IS_ERR(entry)) return PTR_ERR(entry); return applesmc_read_entry(entry, buffer, len); } static int applesmc_write_key(const char *key, const u8 *buffer, u8 len) { const struct applesmc_entry *entry; entry = applesmc_get_entry_by_key(key); if (IS_ERR(entry)) return PTR_ERR(entry); return applesmc_write_entry(entry, buffer, len); } static int applesmc_has_key(const char *key, bool *value) { const struct applesmc_entry *entry; entry = applesmc_get_entry_by_key(key); if (IS_ERR(entry) && PTR_ERR(entry) != -EINVAL) return PTR_ERR(entry); *value = !IS_ERR(entry); return 0; } /* * applesmc_read_motion_sensor - Read motion sensor (X, Y or Z). */ static int applesmc_read_motion_sensor(int index, s16 *value) { u8 buffer[2]; int ret; switch (index) { case SENSOR_X: ret = applesmc_read_key(MOTION_SENSOR_X_KEY, buffer, 2); break; case SENSOR_Y: ret = applesmc_read_key(MOTION_SENSOR_Y_KEY, buffer, 2); break; case SENSOR_Z: ret = applesmc_read_key(MOTION_SENSOR_Z_KEY, buffer, 2); break; default: ret = -EINVAL; } *value = ((s16)buffer[0] << 8) | buffer[1]; return ret; } /* * applesmc_device_init - initialize the accelerometer. Can sleep. */ static void applesmc_device_init(void) { int total; u8 buffer[2]; if (!smcreg.has_accelerometer) return; for (total = INIT_TIMEOUT_MSECS; total > 0; total -= INIT_WAIT_MSECS) { if (!applesmc_read_key(MOTION_SENSOR_KEY, buffer, 2) && (buffer[0] != 0x00 || buffer[1] != 0x00)) return; buffer[0] = 0xe0; buffer[1] = 0x00; applesmc_write_key(MOTION_SENSOR_KEY, buffer, 2); msleep(INIT_WAIT_MSECS); } pr_warn("failed to init the device\n"); } /* * applesmc_init_smcreg_try - Try to initialize register cache. Idempotent. */ static int applesmc_init_smcreg_try(void) { struct applesmc_registers *s = &smcreg; bool left_light_sensor, right_light_sensor; u8 tmp[1]; int ret; if (s->init_complete) return 0; ret = read_register_count(&s->key_count); if (ret) return ret; if (!s->cache) s->cache = kcalloc(s->key_count, sizeof(*s->cache), GFP_KERNEL); if (!s->cache) return -ENOMEM; ret = applesmc_read_key(FANS_COUNT, tmp, 1); if (ret) return ret; s->fan_count = tmp[0]; ret = applesmc_get_lower_bound(&s->temp_begin, "T"); if (ret) return ret; ret = applesmc_get_lower_bound(&s->temp_end, "U"); if (ret) return ret; s->temp_count = s->temp_end - s->temp_begin; ret = applesmc_has_key(LIGHT_SENSOR_LEFT_KEY, &left_light_sensor); if (ret) return ret; ret = applesmc_has_key(LIGHT_SENSOR_RIGHT_KEY, &right_light_sensor); if (ret) return ret; ret = applesmc_has_key(MOTION_SENSOR_KEY, &s->has_accelerometer); if (ret) return ret; ret = applesmc_has_key(BACKLIGHT_KEY, &s->has_key_backlight); if (ret) return ret; s->num_light_sensors = left_light_sensor + right_light_sensor; s->init_complete = true; pr_info("key=%d fan=%d temp=%d acc=%d lux=%d kbd=%d\n", s->key_count, s->fan_count, s->temp_count, s->has_accelerometer, s->num_light_sensors, s->has_key_backlight); return 0; } /* * applesmc_init_smcreg - Initialize register cache. * * Retries until initialization is successful, or the operation times out. * */ static int applesmc_init_smcreg(void) { int ms, ret; for (ms = 0; ms < INIT_TIMEOUT_MSECS; ms += INIT_WAIT_MSECS) { ret = applesmc_init_smcreg_try(); if (!ret) { if (ms) pr_info("init_smcreg() took %d ms\n", ms); return 0; } msleep(INIT_WAIT_MSECS); } kfree(smcreg.cache); smcreg.cache = NULL; return ret; } static void applesmc_destroy_smcreg(void) { kfree(smcreg.cache); smcreg.cache = NULL; smcreg.init_complete = false; } /* Device model stuff */ static int applesmc_probe(struct platform_device *dev) { int ret; ret = applesmc_init_smcreg(); if (ret) return ret; applesmc_device_init(); return 0; } /* Synchronize device with memorized backlight state */ static int applesmc_pm_resume(struct device *dev) { if (smcreg.has_key_backlight) applesmc_write_key(BACKLIGHT_KEY, backlight_state, 2); return 0; } /* Reinitialize device on resume from hibernation */ static int applesmc_pm_restore(struct device *dev) { applesmc_device_init(); return applesmc_pm_resume(dev); } static const struct dev_pm_ops applesmc_pm_ops = { .resume = applesmc_pm_resume, .restore = applesmc_pm_restore, }; static struct platform_driver applesmc_driver = { .probe = applesmc_probe, .driver = { .name = "applesmc", .owner = THIS_MODULE, .pm = &applesmc_pm_ops, }, }; /* * applesmc_calibrate - Set our "resting" values. Callers must * hold applesmc_lock. */ static void applesmc_calibrate(void) { applesmc_read_motion_sensor(SENSOR_X, &rest_x); applesmc_read_motion_sensor(SENSOR_Y, &rest_y); rest_x = -rest_x; } static void applesmc_idev_poll(struct input_polled_dev *dev) { struct input_dev *idev = dev->input; s16 x, y; if (applesmc_read_motion_sensor(SENSOR_X, &x)) return; if (applesmc_read_motion_sensor(SENSOR_Y, &y)) return; x = -x; input_report_abs(idev, ABS_X, x - rest_x); input_report_abs(idev, ABS_Y, y - rest_y); input_sync(idev); } /* Sysfs Files */ static ssize_t applesmc_name_show(struct device *dev, struct device_attribute *attr, char *buf) { return snprintf(buf, PAGE_SIZE, "applesmc\n"); } static ssize_t applesmc_position_show(struct device *dev, struct device_attribute *attr, char *buf) { int ret; s16 x, y, z; ret = applesmc_read_motion_sensor(SENSOR_X, &x); if (ret) goto out; ret = applesmc_read_motion_sensor(SENSOR_Y, &y); if (ret) goto out; ret = applesmc_read_motion_sensor(SENSOR_Z, &z); if (ret) goto out; out: if (ret) return ret; else return snprintf(buf, PAGE_SIZE, "(%d,%d,%d)\n", x, y, z); } static ssize_t applesmc_light_show(struct device *dev, struct device_attribute *attr, char *sysfsbuf) { const struct applesmc_entry *entry; static int data_length; int ret; u8 left = 0, right = 0; u8 buffer[10]; if (!data_length) { entry = applesmc_get_entry_by_key(LIGHT_SENSOR_LEFT_KEY); if (IS_ERR(entry)) return PTR_ERR(entry); if (entry->len > 10) return -ENXIO; data_length = entry->len; pr_info("light sensor data length set to %d\n", data_length); } ret = applesmc_read_key(LIGHT_SENSOR_LEFT_KEY, buffer, data_length); /* newer macbooks report a single 10-bit bigendian value */ if (data_length == 10) { left = be16_to_cpu(*(__be16 *)(buffer + 6)) >> 2; goto out; } left = buffer[2]; if (ret) goto out; ret = applesmc_read_key(LIGHT_SENSOR_RIGHT_KEY, buffer, data_length); right = buffer[2]; out: if (ret) return ret; else return snprintf(sysfsbuf, PAGE_SIZE, "(%d,%d)\n", left, right); } /* Displays sensor key as label */ static ssize_t applesmc_show_sensor_label(struct device *dev, struct device_attribute *devattr, char *sysfsbuf) { int index = smcreg.temp_begin + to_index(devattr); const struct applesmc_entry *entry; entry = applesmc_get_entry_by_index(index); if (IS_ERR(entry)) return PTR_ERR(entry); return snprintf(sysfsbuf, PAGE_SIZE, "%s\n", entry->key); } /* Displays degree Celsius * 1000 */ static ssize_t applesmc_show_temperature(struct device *dev, struct device_attribute *devattr, char *sysfsbuf) { int index = smcreg.temp_begin + to_index(devattr); const struct applesmc_entry *entry; int ret; u8 buffer[2]; unsigned int temp; entry = applesmc_get_entry_by_index(index); if (IS_ERR(entry)) return PTR_ERR(entry); if (entry->len > 2) return -EINVAL; ret = applesmc_read_entry(entry, buffer, entry->len); if (ret) return ret; if (entry->len == 2) { temp = buffer[0] * 1000; temp += (buffer[1] >> 6) * 250; } else { temp = buffer[0] * 4000; } return snprintf(sysfsbuf, PAGE_SIZE, "%u\n", temp); } static ssize_t applesmc_show_fan_speed(struct device *dev, struct device_attribute *attr, char *sysfsbuf) { int ret; unsigned int speed = 0; char newkey[5]; u8 buffer[2]; sprintf(newkey, fan_speed_fmt[to_option(attr)], to_index(attr)); ret = applesmc_read_key(newkey, buffer, 2); speed = ((buffer[0] << 8 | buffer[1]) >> 2); if (ret) return ret; else return snprintf(sysfsbuf, PAGE_SIZE, "%u\n", speed); } static ssize_t applesmc_store_fan_speed(struct device *dev, struct device_attribute *attr, const char *sysfsbuf, size_t count) { int ret; unsigned long speed; char newkey[5]; u8 buffer[2]; if (strict_strtoul(sysfsbuf, 10, &speed) < 0 || speed >= 0x4000) return -EINVAL; /* Bigger than a 14-bit value */ sprintf(newkey, fan_speed_fmt[to_option(attr)], to_index(attr)); buffer[0] = (speed >> 6) & 0xff; buffer[1] = (speed << 2) & 0xff; ret = applesmc_write_key(newkey, buffer, 2); if (ret) return ret; else return count; } static ssize_t applesmc_show_fan_manual(struct device *dev, struct device_attribute *attr, char *sysfsbuf) { int ret; u16 manual = 0; u8 buffer[2]; ret = applesmc_read_key(FANS_MANUAL, buffer, 2); manual = ((buffer[0] << 8 | buffer[1]) >> to_index(attr)) & 0x01; if (ret) return ret; else return snprintf(sysfsbuf, PAGE_SIZE, "%d\n", manual); } static ssize_t applesmc_store_fan_manual(struct device *dev, struct device_attribute *attr, const char *sysfsbuf, size_t count) { int ret; u8 buffer[2]; unsigned long input; u16 val; if (strict_strtoul(sysfsbuf, 10, &input) < 0) return -EINVAL; ret = applesmc_read_key(FANS_MANUAL, buffer, 2); val = (buffer[0] << 8 | buffer[1]); if (ret) goto out; if (input) val = val | (0x01 << to_index(attr)); else val = val & ~(0x01 << to_index(attr)); buffer[0] = (val >> 8) & 0xFF; buffer[1] = val & 0xFF; ret = applesmc_write_key(FANS_MANUAL, buffer, 2); out: if (ret) return ret; else return count; } static ssize_t applesmc_show_fan_position(struct device *dev, struct device_attribute *attr, char *sysfsbuf) { int ret; char newkey[5]; u8 buffer[17]; sprintf(newkey, FAN_ID_FMT, to_index(attr)); ret = applesmc_read_key(newkey, buffer, 16); buffer[16] = 0; if (ret) return ret; else return snprintf(sysfsbuf, PAGE_SIZE, "%s\n", buffer+4); } static ssize_t applesmc_calibrate_show(struct device *dev, struct device_attribute *attr, char *sysfsbuf) { return snprintf(sysfsbuf, PAGE_SIZE, "(%d,%d)\n", rest_x, rest_y); } static ssize_t applesmc_calibrate_store(struct device *dev, struct device_attribute *attr, const char *sysfsbuf, size_t count) { applesmc_calibrate(); return count; } static void applesmc_backlight_set(struct work_struct *work) { applesmc_write_key(BACKLIGHT_KEY, backlight_state, 2); } static DECLARE_WORK(backlight_work, &applesmc_backlight_set); static void applesmc_brightness_set(struct led_classdev *led_cdev, enum led_brightness value) { int ret; backlight_state[0] = value; ret = queue_work(applesmc_led_wq, &backlight_work); if (debug && (!ret)) printk(KERN_DEBUG "applesmc: work was already on the queue.\n"); } static ssize_t applesmc_key_count_show(struct device *dev, struct device_attribute *attr, char *sysfsbuf) { int ret; u8 buffer[4]; u32 count; ret = applesmc_read_key(KEY_COUNT_KEY, buffer, 4); count = ((u32)buffer[0]<<24) + ((u32)buffer[1]<<16) + ((u32)buffer[2]<<8) + buffer[3]; if (ret) return ret; else return snprintf(sysfsbuf, PAGE_SIZE, "%d\n", count); } static ssize_t applesmc_key_at_index_read_show(struct device *dev, struct device_attribute *attr, char *sysfsbuf) { const struct applesmc_entry *entry; int ret; entry = applesmc_get_entry_by_index(key_at_index); if (IS_ERR(entry)) return PTR_ERR(entry); ret = applesmc_read_entry(entry, sysfsbuf, entry->len); if (ret) return ret; return entry->len; } static ssize_t applesmc_key_at_index_data_length_show(struct device *dev, struct device_attribute *attr, char *sysfsbuf) { const struct applesmc_entry *entry; entry = applesmc_get_entry_by_index(key_at_index); if (IS_ERR(entry)) return PTR_ERR(entry); return snprintf(sysfsbuf, PAGE_SIZE, "%d\n", entry->len); } static ssize_t applesmc_key_at_index_type_show(struct device *dev, struct device_attribute *attr, char *sysfsbuf) { const struct applesmc_entry *entry; entry = applesmc_get_entry_by_index(key_at_index); if (IS_ERR(entry)) return PTR_ERR(entry); return snprintf(sysfsbuf, PAGE_SIZE, "%s\n", entry->type); } static ssize_t applesmc_key_at_index_name_show(struct device *dev, struct device_attribute *attr, char *sysfsbuf) { const struct applesmc_entry *entry; entry = applesmc_get_entry_by_index(key_at_index); if (IS_ERR(entry)) return PTR_ERR(entry); return snprintf(sysfsbuf, PAGE_SIZE, "%s\n", entry->key); } static ssize_t applesmc_key_at_index_show(struct device *dev, struct device_attribute *attr, char *sysfsbuf) { return snprintf(sysfsbuf, PAGE_SIZE, "%d\n", key_at_index); } static ssize_t applesmc_key_at_index_store(struct device *dev, struct device_attribute *attr, const char *sysfsbuf, size_t count) { unsigned long newkey; if (strict_strtoul(sysfsbuf, 10, &newkey) < 0 || newkey >= smcreg.key_count) return -EINVAL; key_at_index = newkey; return count; } static struct led_classdev applesmc_backlight = { .name = "smc::kbd_backlight", .default_trigger = "nand-disk", .brightness_set = applesmc_brightness_set, }; static struct applesmc_node_group info_group[] = { { "name", applesmc_name_show }, { "key_count", applesmc_key_count_show }, { "key_at_index", applesmc_key_at_index_show, applesmc_key_at_index_store }, { "key_at_index_name", applesmc_key_at_index_name_show }, { "key_at_index_type", applesmc_key_at_index_type_show }, { "key_at_index_data_length", applesmc_key_at_index_data_length_show }, { "key_at_index_data", applesmc_key_at_index_read_show }, { } }; static struct applesmc_node_group accelerometer_group[] = { { "position", applesmc_position_show }, { "calibrate", applesmc_calibrate_show, applesmc_calibrate_store }, { } }; static struct applesmc_node_group light_sensor_group[] = { { "light", applesmc_light_show }, { } }; static struct applesmc_node_group fan_group[] = { { "fan%d_label", applesmc_show_fan_position }, { "fan%d_input", applesmc_show_fan_speed, NULL, 0 }, { "fan%d_min", applesmc_show_fan_speed, applesmc_store_fan_speed, 1 }, { "fan%d_max", applesmc_show_fan_speed, NULL, 2 }, { "fan%d_safe", applesmc_show_fan_speed, NULL, 3 }, { "fan%d_output", applesmc_show_fan_speed, applesmc_store_fan_speed, 4 }, { "fan%d_manual", applesmc_show_fan_manual, applesmc_store_fan_manual }, { } }; static struct applesmc_node_group temp_group[] = { { "temp%d_label", applesmc_show_sensor_label }, { "temp%d_input", applesmc_show_temperature }, { } }; /* Module stuff */ /* * applesmc_destroy_nodes - remove files and free associated memory */ static void applesmc_destroy_nodes(struct applesmc_node_group *groups) { struct applesmc_node_group *grp; struct applesmc_dev_attr *node; for (grp = groups; grp->nodes; grp++) { for (node = grp->nodes; node->sda.dev_attr.attr.name; node++) sysfs_remove_file(&pdev->dev.kobj, &node->sda.dev_attr.attr); kfree(grp->nodes); grp->nodes = NULL; } } /* * applesmc_create_nodes - create a two-dimensional group of sysfs files */ static int applesmc_create_nodes(struct applesmc_node_group *groups, int num) { struct applesmc_node_group *grp; struct applesmc_dev_attr *node; struct attribute *attr; int ret, i; for (grp = groups; grp->format; grp++) { grp->nodes = kcalloc(num + 1, sizeof(*node), GFP_KERNEL); if (!grp->nodes) { ret = -ENOMEM; goto out; } for (i = 0; i < num; i++) { node = &grp->nodes[i]; sprintf(node->name, grp->format, i + 1); node->sda.index = (grp->option << 16) | (i & 0xffff); node->sda.dev_attr.show = grp->show; node->sda.dev_attr.store = grp->store; attr = &node->sda.dev_attr.attr; sysfs_attr_init(attr); attr->name = node->name; attr->mode = S_IRUGO | (grp->store ? S_IWUSR : 0); ret = sysfs_create_file(&pdev->dev.kobj, attr); if (ret) { attr->name = NULL; goto out; } } } return 0; out: applesmc_destroy_nodes(groups); return ret; } /* Create accelerometer ressources */ static int applesmc_create_accelerometer(void) { struct input_dev *idev; int ret; if (!smcreg.has_accelerometer) return 0; ret = applesmc_create_nodes(accelerometer_group, 1); if (ret) goto out; applesmc_idev = input_allocate_polled_device(); if (!applesmc_idev) { ret = -ENOMEM; goto out_sysfs; } applesmc_idev->poll = applesmc_idev_poll; applesmc_idev->poll_interval = APPLESMC_POLL_INTERVAL; /* initial calibrate for the input device */ applesmc_calibrate(); /* initialize the input device */ idev = applesmc_idev->input; idev->name = "applesmc"; idev->id.bustype = BUS_HOST; idev->dev.parent = &pdev->dev; idev->evbit[0] = BIT_MASK(EV_ABS); input_set_abs_params(idev, ABS_X, -256, 256, APPLESMC_INPUT_FUZZ, APPLESMC_INPUT_FLAT); input_set_abs_params(idev, ABS_Y, -256, 256, APPLESMC_INPUT_FUZZ, APPLESMC_INPUT_FLAT); ret = input_register_polled_device(applesmc_idev); if (ret) goto out_idev; return 0; out_idev: input_free_polled_device(applesmc_idev); out_sysfs: applesmc_destroy_nodes(accelerometer_group); out: pr_warn("driver init failed (ret=%d)!\n", ret); return ret; } /* Release all ressources used by the accelerometer */ static void applesmc_release_accelerometer(void) { if (!smcreg.has_accelerometer) return; input_unregister_polled_device(applesmc_idev); input_free_polled_device(applesmc_idev); applesmc_destroy_nodes(accelerometer_group); } static int applesmc_create_light_sensor(void) { if (!smcreg.num_light_sensors) return 0; return applesmc_create_nodes(light_sensor_group, 1); } static void applesmc_release_light_sensor(void) { if (!smcreg.num_light_sensors) return; applesmc_destroy_nodes(light_sensor_group); } static int applesmc_create_key_backlight(void) { if (!smcreg.has_key_backlight) return 0; applesmc_led_wq = create_singlethread_workqueue("applesmc-led"); if (!applesmc_led_wq) return -ENOMEM; return led_classdev_register(&pdev->dev, &applesmc_backlight); } static void applesmc_release_key_backlight(void) { if (!smcreg.has_key_backlight) return; led_classdev_unregister(&applesmc_backlight); destroy_workqueue(applesmc_led_wq); } static int applesmc_dmi_match(const struct dmi_system_id *id) { return 1; } /* Note that DMI_MATCH(...,"MacBook") will match "MacBookPro1,1". * So we need to put "Apple MacBook Pro" before "Apple MacBook". */ static __initdata struct dmi_system_id applesmc_whitelist[] = { { applesmc_dmi_match, "Apple MacBook Air", { DMI_MATCH(DMI_BOARD_VENDOR, "Apple"), DMI_MATCH(DMI_PRODUCT_NAME, "MacBookAir") }, }, { applesmc_dmi_match, "Apple MacBook Pro", { DMI_MATCH(DMI_BOARD_VENDOR, "Apple"), DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro") }, }, { applesmc_dmi_match, "Apple MacBook", { DMI_MATCH(DMI_BOARD_VENDOR, "Apple"), DMI_MATCH(DMI_PRODUCT_NAME, "MacBook") }, }, { applesmc_dmi_match, "Apple Macmini", { DMI_MATCH(DMI_BOARD_VENDOR, "Apple"), DMI_MATCH(DMI_PRODUCT_NAME, "Macmini") }, }, { applesmc_dmi_match, "Apple MacPro", { DMI_MATCH(DMI_BOARD_VENDOR, "Apple"), DMI_MATCH(DMI_PRODUCT_NAME, "MacPro") }, }, { applesmc_dmi_match, "Apple iMac", { DMI_MATCH(DMI_BOARD_VENDOR, "Apple"), DMI_MATCH(DMI_PRODUCT_NAME, "iMac") }, }, { .ident = NULL } }; static int __init applesmc_init(void) { int ret; if (!dmi_check_system(applesmc_whitelist)) { pr_warn("supported laptop not found!\n"); ret = -ENODEV; goto out; } if (!request_region(APPLESMC_DATA_PORT, APPLESMC_NR_PORTS, "applesmc")) { ret = -ENXIO; goto out; } ret = platform_driver_register(&applesmc_driver); if (ret) goto out_region; pdev = platform_device_register_simple("applesmc", APPLESMC_DATA_PORT, NULL, 0); if (IS_ERR(pdev)) { ret = PTR_ERR(pdev); goto out_driver; } /* create register cache */ ret = applesmc_init_smcreg(); if (ret) goto out_device; ret = applesmc_create_nodes(info_group, 1); if (ret) goto out_smcreg; ret = applesmc_create_nodes(fan_group, smcreg.fan_count); if (ret) goto out_info; ret = applesmc_create_nodes(temp_group, smcreg.temp_count); if (ret) goto out_fans; ret = applesmc_create_accelerometer(); if (ret) goto out_temperature; ret = applesmc_create_light_sensor(); if (ret) goto out_accelerometer; ret = applesmc_create_key_backlight(); if (ret) goto out_light_sysfs; hwmon_dev = hwmon_device_register(&pdev->dev); if (IS_ERR(hwmon_dev)) { ret = PTR_ERR(hwmon_dev); goto out_light_ledclass; } return 0; out_light_ledclass: applesmc_release_key_backlight(); out_light_sysfs: applesmc_release_light_sensor(); out_accelerometer: applesmc_release_accelerometer(); out_temperature: applesmc_destroy_nodes(temp_group); out_fans: applesmc_destroy_nodes(fan_group); out_info: applesmc_destroy_nodes(info_group); out_smcreg: applesmc_destroy_smcreg(); out_device: platform_device_unregister(pdev); out_driver: platform_driver_unregister(&applesmc_driver); out_region: release_region(APPLESMC_DATA_PORT, APPLESMC_NR_PORTS); out: pr_warn("driver init failed (ret=%d)!\n", ret); return ret; } static void __exit applesmc_exit(void) { hwmon_device_unregister(hwmon_dev); applesmc_release_key_backlight(); applesmc_release_light_sensor(); applesmc_release_accelerometer(); applesmc_destroy_nodes(temp_group); applesmc_destroy_nodes(fan_group); applesmc_destroy_nodes(info_group); applesmc_destroy_smcreg(); platform_device_unregister(pdev); platform_driver_unregister(&applesmc_driver); release_region(APPLESMC_DATA_PORT, APPLESMC_NR_PORTS); } module_init(applesmc_init); module_exit(applesmc_exit); MODULE_AUTHOR("Nicolas Boichat"); MODULE_DESCRIPTION("Apple SMC"); MODULE_LICENSE("GPL v2"); MODULE_DEVICE_TABLE(dmi, applesmc_whitelist);
gpl-2.0
balika011/android_kernel_lenovo_spark
arch/arm/mach-davinci/da8xx-dt.c
2113
2167
/* * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/ * * Modified from mach-omap/omap2/board-generic.c * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/io.h> #include <linux/of_irq.h> #include <linux/of_platform.h> #include <linux/irqdomain.h> #include <asm/mach/arch.h> #include <mach/common.h> #include <mach/cp_intc.h> #include <mach/da8xx.h> #define DA8XX_NUM_UARTS 3 static void __init da8xx_uart_clk_enable(void) { int i; for (i = 0; i < DA8XX_NUM_UARTS; i++) davinci_serial_setup_clk(i, NULL); } static struct of_device_id da8xx_irq_match[] __initdata = { { .compatible = "ti,cp-intc", .data = cp_intc_of_init, }, { } }; static void __init da8xx_init_irq(void) { of_irq_init(da8xx_irq_match); } static struct of_dev_auxdata da850_auxdata_lookup[] __initdata = { OF_DEV_AUXDATA("ti,davinci-i2c", 0x01c22000, "i2c_davinci.1", NULL), OF_DEV_AUXDATA("ti,davinci-wdt", 0x01c21000, "watchdog", NULL), OF_DEV_AUXDATA("ti,da830-mmc", 0x01c40000, "da830-mmc.0", NULL), OF_DEV_AUXDATA("ti,da850-ehrpwm", 0x01f00000, "ehrpwm", NULL), OF_DEV_AUXDATA("ti,da850-ehrpwm", 0x01f02000, "ehrpwm", NULL), OF_DEV_AUXDATA("ti,da850-ecap", 0x01f06000, "ecap", NULL), OF_DEV_AUXDATA("ti,da850-ecap", 0x01f07000, "ecap", NULL), OF_DEV_AUXDATA("ti,da850-ecap", 0x01f08000, "ecap", NULL), OF_DEV_AUXDATA("ti,da830-spi", 0x01f0e000, "spi_davinci.1", NULL), {} }; #ifdef CONFIG_ARCH_DAVINCI_DA850 static void __init da850_init_machine(void) { of_platform_populate(NULL, of_default_bus_match_table, da850_auxdata_lookup, NULL); da8xx_uart_clk_enable(); } static const char *da850_boards_compat[] __initdata = { "enbw,cmc", "ti,da850-evm", "ti,da850", NULL, }; DT_MACHINE_START(DA850_DT, "Generic DA850/OMAP-L138/AM18x") .map_io = da850_init, .init_irq = da8xx_init_irq, .init_time = davinci_timer_init, .init_machine = da850_init_machine, .dt_compat = da850_boards_compat, .init_late = davinci_init_late, .restart = da8xx_restart, MACHINE_END #endif
gpl-2.0
crdroid-devices/android_kernel_lge_msm8992
security/tomoyo/audit.c
2881
12710
/* * security/tomoyo/audit.c * * Copyright (C) 2005-2011 NTT DATA CORPORATION */ #include "common.h" #include <linux/slab.h> /** * tomoyo_print_bprm - Print "struct linux_binprm" for auditing. * * @bprm: Pointer to "struct linux_binprm". * @dump: Pointer to "struct tomoyo_page_dump". * * Returns the contents of @bprm on success, NULL otherwise. * * This function uses kzalloc(), so caller must kfree() if this function * didn't return NULL. */ static char *tomoyo_print_bprm(struct linux_binprm *bprm, struct tomoyo_page_dump *dump) { static const int tomoyo_buffer_len = 4096 * 2; char *buffer = kzalloc(tomoyo_buffer_len, GFP_NOFS); char *cp; char *last_start; int len; unsigned long pos = bprm->p; int offset = pos % PAGE_SIZE; int argv_count = bprm->argc; int envp_count = bprm->envc; bool truncated = false; if (!buffer) return NULL; len = snprintf(buffer, tomoyo_buffer_len - 1, "argv[]={ "); cp = buffer + len; if (!argv_count) { memmove(cp, "} envp[]={ ", 11); cp += 11; } last_start = cp; while (argv_count || envp_count) { if (!tomoyo_dump_page(bprm, pos, dump)) goto out; pos += PAGE_SIZE - offset; /* Read. */ while (offset < PAGE_SIZE) { const char *kaddr = dump->data; const unsigned char c = kaddr[offset++]; if (cp == last_start) *cp++ = '"'; if (cp >= buffer + tomoyo_buffer_len - 32) { /* Reserve some room for "..." string. */ truncated = true; } else if (c == '\\') { *cp++ = '\\'; *cp++ = '\\'; } else if (c > ' ' && c < 127) { *cp++ = c; } else if (!c) { *cp++ = '"'; *cp++ = ' '; last_start = cp; } else { *cp++ = '\\'; *cp++ = (c >> 6) + '0'; *cp++ = ((c >> 3) & 7) + '0'; *cp++ = (c & 7) + '0'; } if (c) continue; if (argv_count) { if (--argv_count == 0) { if (truncated) { cp = last_start; memmove(cp, "... ", 4); cp += 4; } memmove(cp, "} envp[]={ ", 11); cp += 11; last_start = cp; truncated = false; } } else if (envp_count) { if (--envp_count == 0) { if (truncated) { cp = last_start; memmove(cp, "... ", 4); cp += 4; } } } if (!argv_count && !envp_count) break; } offset = 0; } *cp++ = '}'; *cp = '\0'; return buffer; out: snprintf(buffer, tomoyo_buffer_len - 1, "argv[]={ ... } envp[]= { ... }"); return buffer; } /** * tomoyo_filetype - Get string representation of file type. * * @mode: Mode value for stat(). * * Returns file type string. */ static inline const char *tomoyo_filetype(const umode_t mode) { switch (mode & S_IFMT) { case S_IFREG: case 0: return tomoyo_condition_keyword[TOMOYO_TYPE_IS_FILE]; case S_IFDIR: return tomoyo_condition_keyword[TOMOYO_TYPE_IS_DIRECTORY]; case S_IFLNK: return tomoyo_condition_keyword[TOMOYO_TYPE_IS_SYMLINK]; case S_IFIFO: return tomoyo_condition_keyword[TOMOYO_TYPE_IS_FIFO]; case S_IFSOCK: return tomoyo_condition_keyword[TOMOYO_TYPE_IS_SOCKET]; case S_IFBLK: return tomoyo_condition_keyword[TOMOYO_TYPE_IS_BLOCK_DEV]; case S_IFCHR: return tomoyo_condition_keyword[TOMOYO_TYPE_IS_CHAR_DEV]; } return "unknown"; /* This should not happen. */ } /** * tomoyo_print_header - Get header line of audit log. * * @r: Pointer to "struct tomoyo_request_info". * * Returns string representation. * * This function uses kmalloc(), so caller must kfree() if this function * didn't return NULL. */ static char *tomoyo_print_header(struct tomoyo_request_info *r) { struct tomoyo_time stamp; const pid_t gpid = task_pid_nr(current); struct tomoyo_obj_info *obj = r->obj; static const int tomoyo_buffer_len = 4096; char *buffer = kmalloc(tomoyo_buffer_len, GFP_NOFS); int pos; u8 i; if (!buffer) return NULL; { struct timeval tv; do_gettimeofday(&tv); tomoyo_convert_time(tv.tv_sec, &stamp); } pos = snprintf(buffer, tomoyo_buffer_len - 1, "#%04u/%02u/%02u %02u:%02u:%02u# profile=%u mode=%s " "granted=%s (global-pid=%u) task={ pid=%u ppid=%u " "uid=%u gid=%u euid=%u egid=%u suid=%u sgid=%u " "fsuid=%u fsgid=%u }", stamp.year, stamp.month, stamp.day, stamp.hour, stamp.min, stamp.sec, r->profile, tomoyo_mode[r->mode], tomoyo_yesno(r->granted), gpid, tomoyo_sys_getpid(), tomoyo_sys_getppid(), from_kuid(&init_user_ns, current_uid()), from_kgid(&init_user_ns, current_gid()), from_kuid(&init_user_ns, current_euid()), from_kgid(&init_user_ns, current_egid()), from_kuid(&init_user_ns, current_suid()), from_kgid(&init_user_ns, current_sgid()), from_kuid(&init_user_ns, current_fsuid()), from_kgid(&init_user_ns, current_fsgid())); if (!obj) goto no_obj_info; if (!obj->validate_done) { tomoyo_get_attributes(obj); obj->validate_done = true; } for (i = 0; i < TOMOYO_MAX_PATH_STAT; i++) { struct tomoyo_mini_stat *stat; unsigned int dev; umode_t mode; if (!obj->stat_valid[i]) continue; stat = &obj->stat[i]; dev = stat->dev; mode = stat->mode; if (i & 1) { pos += snprintf(buffer + pos, tomoyo_buffer_len - 1 - pos, " path%u.parent={ uid=%u gid=%u " "ino=%lu perm=0%o }", (i >> 1) + 1, from_kuid(&init_user_ns, stat->uid), from_kgid(&init_user_ns, stat->gid), (unsigned long)stat->ino, stat->mode & S_IALLUGO); continue; } pos += snprintf(buffer + pos, tomoyo_buffer_len - 1 - pos, " path%u={ uid=%u gid=%u ino=%lu major=%u" " minor=%u perm=0%o type=%s", (i >> 1) + 1, from_kuid(&init_user_ns, stat->uid), from_kgid(&init_user_ns, stat->gid), (unsigned long)stat->ino, MAJOR(dev), MINOR(dev), mode & S_IALLUGO, tomoyo_filetype(mode)); if (S_ISCHR(mode) || S_ISBLK(mode)) { dev = stat->rdev; pos += snprintf(buffer + pos, tomoyo_buffer_len - 1 - pos, " dev_major=%u dev_minor=%u", MAJOR(dev), MINOR(dev)); } pos += snprintf(buffer + pos, tomoyo_buffer_len - 1 - pos, " }"); } no_obj_info: if (pos < tomoyo_buffer_len - 1) return buffer; kfree(buffer); return NULL; } /** * tomoyo_init_log - Allocate buffer for audit logs. * * @r: Pointer to "struct tomoyo_request_info". * @len: Buffer size needed for @fmt and @args. * @fmt: The printf()'s format string. * @args: va_list structure for @fmt. * * Returns pointer to allocated memory. * * This function uses kzalloc(), so caller must kfree() if this function * didn't return NULL. */ char *tomoyo_init_log(struct tomoyo_request_info *r, int len, const char *fmt, va_list args) { char *buf = NULL; char *bprm_info = NULL; const char *header = NULL; char *realpath = NULL; const char *symlink = NULL; int pos; const char *domainname = r->domain->domainname->name; header = tomoyo_print_header(r); if (!header) return NULL; /* +10 is for '\n' etc. and '\0'. */ len += strlen(domainname) + strlen(header) + 10; if (r->ee) { struct file *file = r->ee->bprm->file; realpath = tomoyo_realpath_from_path(&file->f_path); bprm_info = tomoyo_print_bprm(r->ee->bprm, &r->ee->dump); if (!realpath || !bprm_info) goto out; /* +80 is for " exec={ realpath=\"%s\" argc=%d envc=%d %s }" */ len += strlen(realpath) + 80 + strlen(bprm_info); } else if (r->obj && r->obj->symlink_target) { symlink = r->obj->symlink_target->name; /* +18 is for " symlink.target=\"%s\"" */ len += 18 + strlen(symlink); } len = tomoyo_round2(len); buf = kzalloc(len, GFP_NOFS); if (!buf) goto out; len--; pos = snprintf(buf, len, "%s", header); if (realpath) { struct linux_binprm *bprm = r->ee->bprm; pos += snprintf(buf + pos, len - pos, " exec={ realpath=\"%s\" argc=%d envc=%d %s }", realpath, bprm->argc, bprm->envc, bprm_info); } else if (symlink) pos += snprintf(buf + pos, len - pos, " symlink.target=\"%s\"", symlink); pos += snprintf(buf + pos, len - pos, "\n%s\n", domainname); vsnprintf(buf + pos, len - pos, fmt, args); out: kfree(realpath); kfree(bprm_info); kfree(header); return buf; } /* Wait queue for /sys/kernel/security/tomoyo/audit. */ static DECLARE_WAIT_QUEUE_HEAD(tomoyo_log_wait); /* Structure for audit log. */ struct tomoyo_log { struct list_head list; char *log; int size; }; /* The list for "struct tomoyo_log". */ static LIST_HEAD(tomoyo_log); /* Lock for "struct list_head tomoyo_log". */ static DEFINE_SPINLOCK(tomoyo_log_lock); /* Length of "stuct list_head tomoyo_log". */ static unsigned int tomoyo_log_count; /** * tomoyo_get_audit - Get audit mode. * * @ns: Pointer to "struct tomoyo_policy_namespace". * @profile: Profile number. * @index: Index number of functionality. * @is_granted: True if granted log, false otherwise. * * Returns true if this request should be audited, false otherwise. */ static bool tomoyo_get_audit(const struct tomoyo_policy_namespace *ns, const u8 profile, const u8 index, const struct tomoyo_acl_info *matched_acl, const bool is_granted) { u8 mode; const u8 category = tomoyo_index2category[index] + TOMOYO_MAX_MAC_INDEX; struct tomoyo_profile *p; if (!tomoyo_policy_loaded) return false; p = tomoyo_profile(ns, profile); if (tomoyo_log_count >= p->pref[TOMOYO_PREF_MAX_AUDIT_LOG]) return false; if (is_granted && matched_acl && matched_acl->cond && matched_acl->cond->grant_log != TOMOYO_GRANTLOG_AUTO) return matched_acl->cond->grant_log == TOMOYO_GRANTLOG_YES; mode = p->config[index]; if (mode == TOMOYO_CONFIG_USE_DEFAULT) mode = p->config[category]; if (mode == TOMOYO_CONFIG_USE_DEFAULT) mode = p->default_config; if (is_granted) return mode & TOMOYO_CONFIG_WANT_GRANT_LOG; return mode & TOMOYO_CONFIG_WANT_REJECT_LOG; } /** * tomoyo_write_log2 - Write an audit log. * * @r: Pointer to "struct tomoyo_request_info". * @len: Buffer size needed for @fmt and @args. * @fmt: The printf()'s format string. * @args: va_list structure for @fmt. * * Returns nothing. */ void tomoyo_write_log2(struct tomoyo_request_info *r, int len, const char *fmt, va_list args) { char *buf; struct tomoyo_log *entry; bool quota_exceeded = false; if (!tomoyo_get_audit(r->domain->ns, r->profile, r->type, r->matched_acl, r->granted)) goto out; buf = tomoyo_init_log(r, len, fmt, args); if (!buf) goto out; entry = kzalloc(sizeof(*entry), GFP_NOFS); if (!entry) { kfree(buf); goto out; } entry->log = buf; len = tomoyo_round2(strlen(buf) + 1); /* * The entry->size is used for memory quota checks. * Don't go beyond strlen(entry->log). */ entry->size = len + tomoyo_round2(sizeof(*entry)); spin_lock(&tomoyo_log_lock); if (tomoyo_memory_quota[TOMOYO_MEMORY_AUDIT] && tomoyo_memory_used[TOMOYO_MEMORY_AUDIT] + entry->size >= tomoyo_memory_quota[TOMOYO_MEMORY_AUDIT]) { quota_exceeded = true; } else { tomoyo_memory_used[TOMOYO_MEMORY_AUDIT] += entry->size; list_add_tail(&entry->list, &tomoyo_log); tomoyo_log_count++; } spin_unlock(&tomoyo_log_lock); if (quota_exceeded) { kfree(buf); kfree(entry); goto out; } wake_up(&tomoyo_log_wait); out: return; } /** * tomoyo_write_log - Write an audit log. * * @r: Pointer to "struct tomoyo_request_info". * @fmt: The printf()'s format string, followed by parameters. * * Returns nothing. */ void tomoyo_write_log(struct tomoyo_request_info *r, const char *fmt, ...) { va_list args; int len; va_start(args, fmt); len = vsnprintf((char *) &len, 1, fmt, args) + 1; va_end(args); va_start(args, fmt); tomoyo_write_log2(r, len, fmt, args); va_end(args); } /** * tomoyo_read_log - Read an audit log. * * @head: Pointer to "struct tomoyo_io_buffer". * * Returns nothing. */ void tomoyo_read_log(struct tomoyo_io_buffer *head) { struct tomoyo_log *ptr = NULL; if (head->r.w_pos) return; kfree(head->read_buf); head->read_buf = NULL; spin_lock(&tomoyo_log_lock); if (!list_empty(&tomoyo_log)) { ptr = list_entry(tomoyo_log.next, typeof(*ptr), list); list_del(&ptr->list); tomoyo_log_count--; tomoyo_memory_used[TOMOYO_MEMORY_AUDIT] -= ptr->size; } spin_unlock(&tomoyo_log_lock); if (ptr) { head->read_buf = ptr->log; head->r.w[head->r.w_pos++] = head->read_buf; kfree(ptr); } } /** * tomoyo_poll_log - Wait for an audit log. * * @file: Pointer to "struct file". * @wait: Pointer to "poll_table". Maybe NULL. * * Returns POLLIN | POLLRDNORM when ready to read an audit log. */ unsigned int tomoyo_poll_log(struct file *file, poll_table *wait) { if (tomoyo_log_count) return POLLIN | POLLRDNORM; poll_wait(file, &tomoyo_log_wait, wait); if (tomoyo_log_count) return POLLIN | POLLRDNORM; return 0; }
gpl-2.0
sdonati84/GalaxyS3_Kernel
drivers/media/video/bt8xx/bttv-risc.c
3137
25999
/* bttv-risc.c -- interfaces to other kernel modules bttv risc code handling - memory management - generation (c) 2000-2003 Gerd Knorr <kraxel@bytesex.org> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/pci.h> #include <linux/vmalloc.h> #include <linux/interrupt.h> #include <asm/page.h> #include <asm/pgtable.h> #include <media/v4l2-ioctl.h> #include "bttvp.h" #define VCR_HACK_LINES 4 /* ---------------------------------------------------------- */ /* risc code generators */ int bttv_risc_packed(struct bttv *btv, struct btcx_riscmem *risc, struct scatterlist *sglist, unsigned int offset, unsigned int bpl, unsigned int padding, unsigned int skip_lines, unsigned int store_lines) { u32 instructions,line,todo; struct scatterlist *sg; __le32 *rp; int rc; /* estimate risc mem: worst case is one write per page border + one write per scan line + sync + jump (all 2 dwords). padding can cause next bpl to start close to a page border. First DMA region may be smaller than PAGE_SIZE */ instructions = skip_lines * 4; instructions += (1 + ((bpl + padding) * store_lines) / PAGE_SIZE + store_lines) * 8; instructions += 2 * 8; if ((rc = btcx_riscmem_alloc(btv->c.pci,risc,instructions)) < 0) return rc; /* sync instruction */ rp = risc->cpu; *(rp++) = cpu_to_le32(BT848_RISC_SYNC|BT848_FIFO_STATUS_FM1); *(rp++) = cpu_to_le32(0); while (skip_lines-- > 0) { *(rp++) = cpu_to_le32(BT848_RISC_SKIP | BT848_RISC_SOL | BT848_RISC_EOL | bpl); } /* scan lines */ sg = sglist; for (line = 0; line < store_lines; line++) { if ((btv->opt_vcr_hack) && (line >= (store_lines - VCR_HACK_LINES))) continue; while (offset && offset >= sg_dma_len(sg)) { offset -= sg_dma_len(sg); sg++; } if (bpl <= sg_dma_len(sg)-offset) { /* fits into current chunk */ *(rp++)=cpu_to_le32(BT848_RISC_WRITE|BT848_RISC_SOL| BT848_RISC_EOL|bpl); *(rp++)=cpu_to_le32(sg_dma_address(sg)+offset); offset+=bpl; } else { /* scanline needs to be splitted */ todo = bpl; *(rp++)=cpu_to_le32(BT848_RISC_WRITE|BT848_RISC_SOL| (sg_dma_len(sg)-offset)); *(rp++)=cpu_to_le32(sg_dma_address(sg)+offset); todo -= (sg_dma_len(sg)-offset); offset = 0; sg++; while (todo > sg_dma_len(sg)) { *(rp++)=cpu_to_le32(BT848_RISC_WRITE| sg_dma_len(sg)); *(rp++)=cpu_to_le32(sg_dma_address(sg)); todo -= sg_dma_len(sg); sg++; } *(rp++)=cpu_to_le32(BT848_RISC_WRITE|BT848_RISC_EOL| todo); *(rp++)=cpu_to_le32(sg_dma_address(sg)); offset += todo; } offset += padding; } /* save pointer to jmp instruction address */ risc->jmp = rp; BUG_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size); return 0; } static int bttv_risc_planar(struct bttv *btv, struct btcx_riscmem *risc, struct scatterlist *sglist, unsigned int yoffset, unsigned int ybpl, unsigned int ypadding, unsigned int ylines, unsigned int uoffset, unsigned int voffset, unsigned int hshift, unsigned int vshift, unsigned int cpadding) { unsigned int instructions,line,todo,ylen,chroma; __le32 *rp; u32 ri; struct scatterlist *ysg; struct scatterlist *usg; struct scatterlist *vsg; int topfield = (0 == yoffset); int rc; /* estimate risc mem: worst case is one write per page border + one write per scan line (5 dwords) plus sync + jump (2 dwords) */ instructions = ((3 + (ybpl + ypadding) * ylines * 2) / PAGE_SIZE) + ylines; instructions += 2; if ((rc = btcx_riscmem_alloc(btv->c.pci,risc,instructions*4*5)) < 0) return rc; /* sync instruction */ rp = risc->cpu; *(rp++) = cpu_to_le32(BT848_RISC_SYNC|BT848_FIFO_STATUS_FM3); *(rp++) = cpu_to_le32(0); /* scan lines */ ysg = sglist; usg = sglist; vsg = sglist; for (line = 0; line < ylines; line++) { if ((btv->opt_vcr_hack) && (line >= (ylines - VCR_HACK_LINES))) continue; switch (vshift) { case 0: chroma = 1; break; case 1: if (topfield) chroma = ((line & 1) == 0); else chroma = ((line & 1) == 1); break; case 2: if (topfield) chroma = ((line & 3) == 0); else chroma = ((line & 3) == 2); break; default: chroma = 0; break; } for (todo = ybpl; todo > 0; todo -= ylen) { /* go to next sg entry if needed */ while (yoffset && yoffset >= sg_dma_len(ysg)) { yoffset -= sg_dma_len(ysg); ysg++; } while (uoffset && uoffset >= sg_dma_len(usg)) { uoffset -= sg_dma_len(usg); usg++; } while (voffset && voffset >= sg_dma_len(vsg)) { voffset -= sg_dma_len(vsg); vsg++; } /* calculate max number of bytes we can write */ ylen = todo; if (yoffset + ylen > sg_dma_len(ysg)) ylen = sg_dma_len(ysg) - yoffset; if (chroma) { if (uoffset + (ylen>>hshift) > sg_dma_len(usg)) ylen = (sg_dma_len(usg) - uoffset) << hshift; if (voffset + (ylen>>hshift) > sg_dma_len(vsg)) ylen = (sg_dma_len(vsg) - voffset) << hshift; ri = BT848_RISC_WRITE123; } else { ri = BT848_RISC_WRITE1S23; } if (ybpl == todo) ri |= BT848_RISC_SOL; if (ylen == todo) ri |= BT848_RISC_EOL; /* write risc instruction */ *(rp++)=cpu_to_le32(ri | ylen); *(rp++)=cpu_to_le32(((ylen >> hshift) << 16) | (ylen >> hshift)); *(rp++)=cpu_to_le32(sg_dma_address(ysg)+yoffset); yoffset += ylen; if (chroma) { *(rp++)=cpu_to_le32(sg_dma_address(usg)+uoffset); uoffset += ylen >> hshift; *(rp++)=cpu_to_le32(sg_dma_address(vsg)+voffset); voffset += ylen >> hshift; } } yoffset += ypadding; if (chroma) { uoffset += cpadding; voffset += cpadding; } } /* save pointer to jmp instruction address */ risc->jmp = rp; BUG_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size); return 0; } static int bttv_risc_overlay(struct bttv *btv, struct btcx_riscmem *risc, const struct bttv_format *fmt, struct bttv_overlay *ov, int skip_even, int skip_odd) { int dwords, rc, line, maxy, start, end; unsigned skip, nskips; struct btcx_skiplist *skips; __le32 *rp; u32 ri,ra; u32 addr; /* skip list for window clipping */ if (NULL == (skips = kmalloc(sizeof(*skips) * ov->nclips,GFP_KERNEL))) return -ENOMEM; /* estimate risc mem: worst case is (1.5*clip+1) * lines instructions + sync + jump (all 2 dwords) */ dwords = (3 * ov->nclips + 2) * ((skip_even || skip_odd) ? (ov->w.height+1)>>1 : ov->w.height); dwords += 4; if ((rc = btcx_riscmem_alloc(btv->c.pci,risc,dwords*4)) < 0) { kfree(skips); return rc; } /* sync instruction */ rp = risc->cpu; *(rp++) = cpu_to_le32(BT848_RISC_SYNC|BT848_FIFO_STATUS_FM1); *(rp++) = cpu_to_le32(0); addr = (unsigned long)btv->fbuf.base; addr += btv->fbuf.fmt.bytesperline * ov->w.top; addr += (fmt->depth >> 3) * ov->w.left; /* scan lines */ for (maxy = -1, line = 0; line < ov->w.height; line++, addr += btv->fbuf.fmt.bytesperline) { if ((btv->opt_vcr_hack) && (line >= (ov->w.height - VCR_HACK_LINES))) continue; if ((line%2) == 0 && skip_even) continue; if ((line%2) == 1 && skip_odd) continue; /* calculate clipping */ if (line > maxy) btcx_calc_skips(line, ov->w.width, &maxy, skips, &nskips, ov->clips, ov->nclips); /* write out risc code */ for (start = 0, skip = 0; start < ov->w.width; start = end) { if (skip >= nskips) { ri = BT848_RISC_WRITE; end = ov->w.width; } else if (start < skips[skip].start) { ri = BT848_RISC_WRITE; end = skips[skip].start; } else { ri = BT848_RISC_SKIP; end = skips[skip].end; skip++; } if (BT848_RISC_WRITE == ri) ra = addr + (fmt->depth>>3)*start; else ra = 0; if (0 == start) ri |= BT848_RISC_SOL; if (ov->w.width == end) ri |= BT848_RISC_EOL; ri |= (fmt->depth>>3) * (end-start); *(rp++)=cpu_to_le32(ri); if (0 != ra) *(rp++)=cpu_to_le32(ra); } } /* save pointer to jmp instruction address */ risc->jmp = rp; BUG_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size); kfree(skips); return 0; } /* ---------------------------------------------------------- */ static void bttv_calc_geo_old(struct bttv *btv, struct bttv_geometry *geo, int width, int height, int interleaved, const struct bttv_tvnorm *tvnorm) { u32 xsf, sr; int vdelay; int swidth = tvnorm->swidth; int totalwidth = tvnorm->totalwidth; int scaledtwidth = tvnorm->scaledtwidth; if (btv->input == btv->dig) { swidth = 720; totalwidth = 858; scaledtwidth = 858; } vdelay = tvnorm->vdelay; xsf = (width*scaledtwidth)/swidth; geo->hscale = ((totalwidth*4096UL)/xsf-4096); geo->hdelay = tvnorm->hdelayx1; geo->hdelay = (geo->hdelay*width)/swidth; geo->hdelay &= 0x3fe; sr = ((tvnorm->sheight >> (interleaved?0:1))*512)/height - 512; geo->vscale = (0x10000UL-sr) & 0x1fff; geo->crop = ((width>>8)&0x03) | ((geo->hdelay>>6)&0x0c) | ((tvnorm->sheight>>4)&0x30) | ((vdelay>>2)&0xc0); geo->vscale |= interleaved ? (BT848_VSCALE_INT<<8) : 0; geo->vdelay = vdelay; geo->width = width; geo->sheight = tvnorm->sheight; geo->vtotal = tvnorm->vtotal; if (btv->opt_combfilter) { geo->vtc = (width < 193) ? 2 : ((width < 385) ? 1 : 0); geo->comb = (width < 769) ? 1 : 0; } else { geo->vtc = 0; geo->comb = 0; } } static void bttv_calc_geo (struct bttv * btv, struct bttv_geometry * geo, unsigned int width, unsigned int height, int both_fields, const struct bttv_tvnorm * tvnorm, const struct v4l2_rect * crop) { unsigned int c_width; unsigned int c_height; u32 sr; if ((crop->left == tvnorm->cropcap.defrect.left && crop->top == tvnorm->cropcap.defrect.top && crop->width == tvnorm->cropcap.defrect.width && crop->height == tvnorm->cropcap.defrect.height && width <= tvnorm->swidth /* see PAL-Nc et al */) || btv->input == btv->dig) { bttv_calc_geo_old(btv, geo, width, height, both_fields, tvnorm); return; } /* For bug compatibility the image size checks permit scale factors > 16. See bttv_crop_calc_limits(). */ c_width = min((unsigned int) crop->width, width * 16); c_height = min((unsigned int) crop->height, height * 16); geo->width = width; geo->hscale = (c_width * 4096U + (width >> 1)) / width - 4096; /* Even to store Cb first, odd for Cr. */ geo->hdelay = ((crop->left * width + c_width) / c_width) & ~1; geo->sheight = c_height; geo->vdelay = crop->top - tvnorm->cropcap.bounds.top + MIN_VDELAY; sr = c_height >> !both_fields; sr = (sr * 512U + (height >> 1)) / height - 512; geo->vscale = (0x10000UL - sr) & 0x1fff; geo->vscale |= both_fields ? (BT848_VSCALE_INT << 8) : 0; geo->vtotal = tvnorm->vtotal; geo->crop = (((geo->width >> 8) & 0x03) | ((geo->hdelay >> 6) & 0x0c) | ((geo->sheight >> 4) & 0x30) | ((geo->vdelay >> 2) & 0xc0)); if (btv->opt_combfilter) { geo->vtc = (width < 193) ? 2 : ((width < 385) ? 1 : 0); geo->comb = (width < 769) ? 1 : 0; } else { geo->vtc = 0; geo->comb = 0; } } static void bttv_apply_geo(struct bttv *btv, struct bttv_geometry *geo, int odd) { int off = odd ? 0x80 : 0x00; if (geo->comb) btor(BT848_VSCALE_COMB, BT848_E_VSCALE_HI+off); else btand(~BT848_VSCALE_COMB, BT848_E_VSCALE_HI+off); btwrite(geo->vtc, BT848_E_VTC+off); btwrite(geo->hscale >> 8, BT848_E_HSCALE_HI+off); btwrite(geo->hscale & 0xff, BT848_E_HSCALE_LO+off); btaor((geo->vscale>>8), 0xe0, BT848_E_VSCALE_HI+off); btwrite(geo->vscale & 0xff, BT848_E_VSCALE_LO+off); btwrite(geo->width & 0xff, BT848_E_HACTIVE_LO+off); btwrite(geo->hdelay & 0xff, BT848_E_HDELAY_LO+off); btwrite(geo->sheight & 0xff, BT848_E_VACTIVE_LO+off); btwrite(geo->vdelay & 0xff, BT848_E_VDELAY_LO+off); btwrite(geo->crop, BT848_E_CROP+off); btwrite(geo->vtotal>>8, BT848_VTOTAL_HI); btwrite(geo->vtotal & 0xff, BT848_VTOTAL_LO); } /* ---------------------------------------------------------- */ /* risc group / risc main loop / dma management */ void bttv_set_dma(struct bttv *btv, int override) { unsigned long cmd; int capctl; btv->cap_ctl = 0; if (NULL != btv->curr.top) btv->cap_ctl |= 0x02; if (NULL != btv->curr.bottom) btv->cap_ctl |= 0x01; if (NULL != btv->cvbi) btv->cap_ctl |= 0x0c; capctl = 0; capctl |= (btv->cap_ctl & 0x03) ? 0x03 : 0x00; /* capture */ capctl |= (btv->cap_ctl & 0x0c) ? 0x0c : 0x00; /* vbi data */ capctl |= override; d2printk(KERN_DEBUG "bttv%d: capctl=%x lirq=%d top=%08Lx/%08Lx even=%08Lx/%08Lx\n", btv->c.nr,capctl,btv->loop_irq, btv->cvbi ? (unsigned long long)btv->cvbi->top.dma : 0, btv->curr.top ? (unsigned long long)btv->curr.top->top.dma : 0, btv->cvbi ? (unsigned long long)btv->cvbi->bottom.dma : 0, btv->curr.bottom ? (unsigned long long)btv->curr.bottom->bottom.dma : 0); cmd = BT848_RISC_JUMP; if (btv->loop_irq) { cmd |= BT848_RISC_IRQ; cmd |= (btv->loop_irq & 0x0f) << 16; cmd |= (~btv->loop_irq & 0x0f) << 20; } if (btv->curr.frame_irq || btv->loop_irq || btv->cvbi) { mod_timer(&btv->timeout, jiffies+BTTV_TIMEOUT); } else { del_timer(&btv->timeout); } btv->main.cpu[RISC_SLOT_LOOP] = cpu_to_le32(cmd); btaor(capctl, ~0x0f, BT848_CAP_CTL); if (capctl) { if (btv->dma_on) return; btwrite(btv->main.dma, BT848_RISC_STRT_ADD); btor(3, BT848_GPIO_DMA_CTL); btv->dma_on = 1; } else { if (!btv->dma_on) return; btand(~3, BT848_GPIO_DMA_CTL); btv->dma_on = 0; } return; } int bttv_risc_init_main(struct bttv *btv) { int rc; if ((rc = btcx_riscmem_alloc(btv->c.pci,&btv->main,PAGE_SIZE)) < 0) return rc; dprintk(KERN_DEBUG "bttv%d: risc main @ %08Lx\n", btv->c.nr,(unsigned long long)btv->main.dma); btv->main.cpu[0] = cpu_to_le32(BT848_RISC_SYNC | BT848_RISC_RESYNC | BT848_FIFO_STATUS_VRE); btv->main.cpu[1] = cpu_to_le32(0); btv->main.cpu[2] = cpu_to_le32(BT848_RISC_JUMP); btv->main.cpu[3] = cpu_to_le32(btv->main.dma + (4<<2)); /* top field */ btv->main.cpu[4] = cpu_to_le32(BT848_RISC_JUMP); btv->main.cpu[5] = cpu_to_le32(btv->main.dma + (6<<2)); btv->main.cpu[6] = cpu_to_le32(BT848_RISC_JUMP); btv->main.cpu[7] = cpu_to_le32(btv->main.dma + (8<<2)); btv->main.cpu[8] = cpu_to_le32(BT848_RISC_SYNC | BT848_RISC_RESYNC | BT848_FIFO_STATUS_VRO); btv->main.cpu[9] = cpu_to_le32(0); /* bottom field */ btv->main.cpu[10] = cpu_to_le32(BT848_RISC_JUMP); btv->main.cpu[11] = cpu_to_le32(btv->main.dma + (12<<2)); btv->main.cpu[12] = cpu_to_le32(BT848_RISC_JUMP); btv->main.cpu[13] = cpu_to_le32(btv->main.dma + (14<<2)); /* jump back to top field */ btv->main.cpu[14] = cpu_to_le32(BT848_RISC_JUMP); btv->main.cpu[15] = cpu_to_le32(btv->main.dma + (0<<2)); return 0; } int bttv_risc_hook(struct bttv *btv, int slot, struct btcx_riscmem *risc, int irqflags) { unsigned long cmd; unsigned long next = btv->main.dma + ((slot+2) << 2); if (NULL == risc) { d2printk(KERN_DEBUG "bttv%d: risc=%p slot[%d]=NULL\n", btv->c.nr,risc,slot); btv->main.cpu[slot+1] = cpu_to_le32(next); } else { d2printk(KERN_DEBUG "bttv%d: risc=%p slot[%d]=%08Lx irq=%d\n", btv->c.nr,risc,slot,(unsigned long long)risc->dma,irqflags); cmd = BT848_RISC_JUMP; if (irqflags) { cmd |= BT848_RISC_IRQ; cmd |= (irqflags & 0x0f) << 16; cmd |= (~irqflags & 0x0f) << 20; } risc->jmp[0] = cpu_to_le32(cmd); risc->jmp[1] = cpu_to_le32(next); btv->main.cpu[slot+1] = cpu_to_le32(risc->dma); } return 0; } void bttv_dma_free(struct videobuf_queue *q,struct bttv *btv, struct bttv_buffer *buf) { struct videobuf_dmabuf *dma=videobuf_to_dma(&buf->vb); BUG_ON(in_interrupt()); videobuf_waiton(q, &buf->vb, 0, 0); videobuf_dma_unmap(q->dev, dma); videobuf_dma_free(dma); btcx_riscmem_free(btv->c.pci,&buf->bottom); btcx_riscmem_free(btv->c.pci,&buf->top); buf->vb.state = VIDEOBUF_NEEDS_INIT; } int bttv_buffer_activate_vbi(struct bttv *btv, struct bttv_buffer *vbi) { struct btcx_riscmem *top; struct btcx_riscmem *bottom; int top_irq_flags; int bottom_irq_flags; top = NULL; bottom = NULL; top_irq_flags = 0; bottom_irq_flags = 0; if (vbi) { unsigned int crop, vdelay; vbi->vb.state = VIDEOBUF_ACTIVE; list_del(&vbi->vb.queue); /* VDELAY is start of video, end of VBI capturing. */ crop = btread(BT848_E_CROP); vdelay = btread(BT848_E_VDELAY_LO) + ((crop & 0xc0) << 2); if (vbi->geo.vdelay > vdelay) { vdelay = vbi->geo.vdelay & 0xfe; crop = (crop & 0x3f) | ((vbi->geo.vdelay >> 2) & 0xc0); btwrite(vdelay, BT848_E_VDELAY_LO); btwrite(crop, BT848_E_CROP); btwrite(vdelay, BT848_O_VDELAY_LO); btwrite(crop, BT848_O_CROP); } if (vbi->vbi_count[0] > 0) { top = &vbi->top; top_irq_flags = 4; } if (vbi->vbi_count[1] > 0) { top_irq_flags = 0; bottom = &vbi->bottom; bottom_irq_flags = 4; } } bttv_risc_hook(btv, RISC_SLOT_O_VBI, top, top_irq_flags); bttv_risc_hook(btv, RISC_SLOT_E_VBI, bottom, bottom_irq_flags); return 0; } int bttv_buffer_activate_video(struct bttv *btv, struct bttv_buffer_set *set) { /* video capture */ if (NULL != set->top && NULL != set->bottom) { if (set->top == set->bottom) { set->top->vb.state = VIDEOBUF_ACTIVE; if (set->top->vb.queue.next) list_del(&set->top->vb.queue); } else { set->top->vb.state = VIDEOBUF_ACTIVE; set->bottom->vb.state = VIDEOBUF_ACTIVE; if (set->top->vb.queue.next) list_del(&set->top->vb.queue); if (set->bottom->vb.queue.next) list_del(&set->bottom->vb.queue); } bttv_apply_geo(btv, &set->top->geo, 1); bttv_apply_geo(btv, &set->bottom->geo,0); bttv_risc_hook(btv, RISC_SLOT_O_FIELD, &set->top->top, set->top_irq); bttv_risc_hook(btv, RISC_SLOT_E_FIELD, &set->bottom->bottom, set->frame_irq); btaor((set->top->btformat & 0xf0) | (set->bottom->btformat & 0x0f), ~0xff, BT848_COLOR_FMT); btaor((set->top->btswap & 0x0a) | (set->bottom->btswap & 0x05), ~0x0f, BT848_COLOR_CTL); } else if (NULL != set->top) { set->top->vb.state = VIDEOBUF_ACTIVE; if (set->top->vb.queue.next) list_del(&set->top->vb.queue); bttv_apply_geo(btv, &set->top->geo,1); bttv_apply_geo(btv, &set->top->geo,0); bttv_risc_hook(btv, RISC_SLOT_O_FIELD, &set->top->top, set->frame_irq); bttv_risc_hook(btv, RISC_SLOT_E_FIELD, NULL, 0); btaor(set->top->btformat & 0xff, ~0xff, BT848_COLOR_FMT); btaor(set->top->btswap & 0x0f, ~0x0f, BT848_COLOR_CTL); } else if (NULL != set->bottom) { set->bottom->vb.state = VIDEOBUF_ACTIVE; if (set->bottom->vb.queue.next) list_del(&set->bottom->vb.queue); bttv_apply_geo(btv, &set->bottom->geo,1); bttv_apply_geo(btv, &set->bottom->geo,0); bttv_risc_hook(btv, RISC_SLOT_O_FIELD, NULL, 0); bttv_risc_hook(btv, RISC_SLOT_E_FIELD, &set->bottom->bottom, set->frame_irq); btaor(set->bottom->btformat & 0xff, ~0xff, BT848_COLOR_FMT); btaor(set->bottom->btswap & 0x0f, ~0x0f, BT848_COLOR_CTL); } else { bttv_risc_hook(btv, RISC_SLOT_O_FIELD, NULL, 0); bttv_risc_hook(btv, RISC_SLOT_E_FIELD, NULL, 0); } return 0; } /* ---------------------------------------------------------- */ /* calculate geometry, build risc code */ int bttv_buffer_risc(struct bttv *btv, struct bttv_buffer *buf) { const struct bttv_tvnorm *tvnorm = bttv_tvnorms + buf->tvnorm; struct videobuf_dmabuf *dma=videobuf_to_dma(&buf->vb); dprintk(KERN_DEBUG "bttv%d: buffer field: %s format: %s size: %dx%d\n", btv->c.nr, v4l2_field_names[buf->vb.field], buf->fmt->name, buf->vb.width, buf->vb.height); /* packed pixel modes */ if (buf->fmt->flags & FORMAT_FLAGS_PACKED) { int bpl = (buf->fmt->depth >> 3) * buf->vb.width; int bpf = bpl * (buf->vb.height >> 1); bttv_calc_geo(btv,&buf->geo,buf->vb.width,buf->vb.height, V4L2_FIELD_HAS_BOTH(buf->vb.field), tvnorm,&buf->crop); switch (buf->vb.field) { case V4L2_FIELD_TOP: bttv_risc_packed(btv,&buf->top,dma->sglist, /* offset */ 0,bpl, /* padding */ 0,/* skip_lines */ 0, buf->vb.height); break; case V4L2_FIELD_BOTTOM: bttv_risc_packed(btv,&buf->bottom,dma->sglist, 0,bpl,0,0,buf->vb.height); break; case V4L2_FIELD_INTERLACED: bttv_risc_packed(btv,&buf->top,dma->sglist, 0,bpl,bpl,0,buf->vb.height >> 1); bttv_risc_packed(btv,&buf->bottom,dma->sglist, bpl,bpl,bpl,0,buf->vb.height >> 1); break; case V4L2_FIELD_SEQ_TB: bttv_risc_packed(btv,&buf->top,dma->sglist, 0,bpl,0,0,buf->vb.height >> 1); bttv_risc_packed(btv,&buf->bottom,dma->sglist, bpf,bpl,0,0,buf->vb.height >> 1); break; default: BUG(); } } /* planar modes */ if (buf->fmt->flags & FORMAT_FLAGS_PLANAR) { int uoffset, voffset; int ypadding, cpadding, lines; /* calculate chroma offsets */ uoffset = buf->vb.width * buf->vb.height; voffset = buf->vb.width * buf->vb.height; if (buf->fmt->flags & FORMAT_FLAGS_CrCb) { /* Y-Cr-Cb plane order */ uoffset >>= buf->fmt->hshift; uoffset >>= buf->fmt->vshift; uoffset += voffset; } else { /* Y-Cb-Cr plane order */ voffset >>= buf->fmt->hshift; voffset >>= buf->fmt->vshift; voffset += uoffset; } switch (buf->vb.field) { case V4L2_FIELD_TOP: bttv_calc_geo(btv,&buf->geo,buf->vb.width, buf->vb.height,/* both_fields */ 0, tvnorm,&buf->crop); bttv_risc_planar(btv, &buf->top, dma->sglist, 0,buf->vb.width,0,buf->vb.height, uoffset,voffset,buf->fmt->hshift, buf->fmt->vshift,0); break; case V4L2_FIELD_BOTTOM: bttv_calc_geo(btv,&buf->geo,buf->vb.width, buf->vb.height,0, tvnorm,&buf->crop); bttv_risc_planar(btv, &buf->bottom, dma->sglist, 0,buf->vb.width,0,buf->vb.height, uoffset,voffset,buf->fmt->hshift, buf->fmt->vshift,0); break; case V4L2_FIELD_INTERLACED: bttv_calc_geo(btv,&buf->geo,buf->vb.width, buf->vb.height,1, tvnorm,&buf->crop); lines = buf->vb.height >> 1; ypadding = buf->vb.width; cpadding = buf->vb.width >> buf->fmt->hshift; bttv_risc_planar(btv,&buf->top, dma->sglist, 0,buf->vb.width,ypadding,lines, uoffset,voffset, buf->fmt->hshift, buf->fmt->vshift, cpadding); bttv_risc_planar(btv,&buf->bottom, dma->sglist, ypadding,buf->vb.width,ypadding,lines, uoffset+cpadding, voffset+cpadding, buf->fmt->hshift, buf->fmt->vshift, cpadding); break; case V4L2_FIELD_SEQ_TB: bttv_calc_geo(btv,&buf->geo,buf->vb.width, buf->vb.height,1, tvnorm,&buf->crop); lines = buf->vb.height >> 1; ypadding = buf->vb.width; cpadding = buf->vb.width >> buf->fmt->hshift; bttv_risc_planar(btv,&buf->top, dma->sglist, 0,buf->vb.width,0,lines, uoffset >> 1, voffset >> 1, buf->fmt->hshift, buf->fmt->vshift, 0); bttv_risc_planar(btv,&buf->bottom, dma->sglist, lines * ypadding,buf->vb.width,0,lines, lines * ypadding + (uoffset >> 1), lines * ypadding + (voffset >> 1), buf->fmt->hshift, buf->fmt->vshift, 0); break; default: BUG(); } } /* raw data */ if (buf->fmt->flags & FORMAT_FLAGS_RAW) { /* build risc code */ buf->vb.field = V4L2_FIELD_SEQ_TB; bttv_calc_geo(btv,&buf->geo,tvnorm->swidth,tvnorm->sheight, 1,tvnorm,&buf->crop); bttv_risc_packed(btv, &buf->top, dma->sglist, /* offset */ 0, RAW_BPL, /* padding */ 0, /* skip_lines */ 0, RAW_LINES); bttv_risc_packed(btv, &buf->bottom, dma->sglist, buf->vb.size/2 , RAW_BPL, 0, 0, RAW_LINES); } /* copy format info */ buf->btformat = buf->fmt->btformat; buf->btswap = buf->fmt->btswap; return 0; } /* ---------------------------------------------------------- */ /* calculate geometry, build risc code */ int bttv_overlay_risc(struct bttv *btv, struct bttv_overlay *ov, const struct bttv_format *fmt, struct bttv_buffer *buf) { /* check interleave, bottom+top fields */ dprintk(KERN_DEBUG "bttv%d: overlay fields: %s format: %s size: %dx%d\n", btv->c.nr, v4l2_field_names[buf->vb.field], fmt->name,ov->w.width,ov->w.height); /* calculate geometry */ bttv_calc_geo(btv,&buf->geo,ov->w.width,ov->w.height, V4L2_FIELD_HAS_BOTH(ov->field), &bttv_tvnorms[ov->tvnorm],&buf->crop); /* build risc code */ switch (ov->field) { case V4L2_FIELD_TOP: bttv_risc_overlay(btv, &buf->top, fmt, ov, 0, 0); break; case V4L2_FIELD_BOTTOM: bttv_risc_overlay(btv, &buf->bottom, fmt, ov, 0, 0); break; case V4L2_FIELD_INTERLACED: bttv_risc_overlay(btv, &buf->top, fmt, ov, 0, 1); bttv_risc_overlay(btv, &buf->bottom, fmt, ov, 1, 0); break; default: BUG(); } /* copy format info */ buf->btformat = fmt->btformat; buf->btswap = fmt->btswap; buf->vb.field = ov->field; return 0; } /* * Local variables: * c-basic-offset: 8 * End: */
gpl-2.0
SerenityS/Solid_Kernel-Stock-KK
arch/x86/xen/mmu.c
3137
59104
/* * Xen mmu operations * * This file contains the various mmu fetch and update operations. * The most important job they must perform is the mapping between the * domain's pfn and the overall machine mfns. * * Xen allows guests to directly update the pagetable, in a controlled * fashion. In other words, the guest modifies the same pagetable * that the CPU actually uses, which eliminates the overhead of having * a separate shadow pagetable. * * In order to allow this, it falls on the guest domain to map its * notion of a "physical" pfn - which is just a domain-local linear * address - into a real "machine address" which the CPU's MMU can * use. * * A pgd_t/pmd_t/pte_t will typically contain an mfn, and so can be * inserted directly into the pagetable. When creating a new * pte/pmd/pgd, it converts the passed pfn into an mfn. Conversely, * when reading the content back with __(pgd|pmd|pte)_val, it converts * the mfn back into a pfn. * * The other constraint is that all pages which make up a pagetable * must be mapped read-only in the guest. This prevents uncontrolled * guest updates to the pagetable. Xen strictly enforces this, and * will disallow any pagetable update which will end up mapping a * pagetable page RW, and will disallow using any writable page as a * pagetable. * * Naively, when loading %cr3 with the base of a new pagetable, Xen * would need to validate the whole pagetable before going on. * Naturally, this is quite slow. The solution is to "pin" a * pagetable, which enforces all the constraints on the pagetable even * when it is not actively in use. This menas that Xen can be assured * that it is still valid when you do load it into %cr3, and doesn't * need to revalidate it. * * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007 */ #include <linux/sched.h> #include <linux/highmem.h> #include <linux/debugfs.h> #include <linux/bug.h> #include <linux/vmalloc.h> #include <linux/module.h> #include <linux/gfp.h> #include <linux/memblock.h> #include <linux/seq_file.h> #include <trace/events/xen.h> #include <asm/pgtable.h> #include <asm/tlbflush.h> #include <asm/fixmap.h> #include <asm/mmu_context.h> #include <asm/setup.h> #include <asm/paravirt.h> #include <asm/e820.h> #include <asm/linkage.h> #include <asm/page.h> #include <asm/init.h> #include <asm/pat.h> #include <asm/smp.h> #include <asm/xen/hypercall.h> #include <asm/xen/hypervisor.h> #include <xen/xen.h> #include <xen/page.h> #include <xen/interface/xen.h> #include <xen/interface/hvm/hvm_op.h> #include <xen/interface/version.h> #include <xen/interface/memory.h> #include <xen/hvc-console.h> #include "multicalls.h" #include "mmu.h" #include "debugfs.h" /* * Protects atomic reservation decrease/increase against concurrent increases. * Also protects non-atomic updates of current_pages and balloon lists. */ DEFINE_SPINLOCK(xen_reservation_lock); /* * Identity map, in addition to plain kernel map. This needs to be * large enough to allocate page table pages to allocate the rest. * Each page can map 2MB. */ #define LEVEL1_IDENT_ENTRIES (PTRS_PER_PTE * 4) static RESERVE_BRK_ARRAY(pte_t, level1_ident_pgt, LEVEL1_IDENT_ENTRIES); #ifdef CONFIG_X86_64 /* l3 pud for userspace vsyscall mapping */ static pud_t level3_user_vsyscall[PTRS_PER_PUD] __page_aligned_bss; #endif /* CONFIG_X86_64 */ /* * Note about cr3 (pagetable base) values: * * xen_cr3 contains the current logical cr3 value; it contains the * last set cr3. This may not be the current effective cr3, because * its update may be being lazily deferred. However, a vcpu looking * at its own cr3 can use this value knowing that it everything will * be self-consistent. * * xen_current_cr3 contains the actual vcpu cr3; it is set once the * hypercall to set the vcpu cr3 is complete (so it may be a little * out of date, but it will never be set early). If one vcpu is * looking at another vcpu's cr3 value, it should use this variable. */ DEFINE_PER_CPU(unsigned long, xen_cr3); /* cr3 stored as physaddr */ DEFINE_PER_CPU(unsigned long, xen_current_cr3); /* actual vcpu cr3 */ /* * Just beyond the highest usermode address. STACK_TOP_MAX has a * redzone above it, so round it up to a PGD boundary. */ #define USER_LIMIT ((STACK_TOP_MAX + PGDIR_SIZE - 1) & PGDIR_MASK) unsigned long arbitrary_virt_to_mfn(void *vaddr) { xmaddr_t maddr = arbitrary_virt_to_machine(vaddr); return PFN_DOWN(maddr.maddr); } xmaddr_t arbitrary_virt_to_machine(void *vaddr) { unsigned long address = (unsigned long)vaddr; unsigned int level; pte_t *pte; unsigned offset; /* * if the PFN is in the linear mapped vaddr range, we can just use * the (quick) virt_to_machine() p2m lookup */ if (virt_addr_valid(vaddr)) return virt_to_machine(vaddr); /* otherwise we have to do a (slower) full page-table walk */ pte = lookup_address(address, &level); BUG_ON(pte == NULL); offset = address & ~PAGE_MASK; return XMADDR(((phys_addr_t)pte_mfn(*pte) << PAGE_SHIFT) + offset); } EXPORT_SYMBOL_GPL(arbitrary_virt_to_machine); void make_lowmem_page_readonly(void *vaddr) { pte_t *pte, ptev; unsigned long address = (unsigned long)vaddr; unsigned int level; pte = lookup_address(address, &level); if (pte == NULL) return; /* vaddr missing */ ptev = pte_wrprotect(*pte); if (HYPERVISOR_update_va_mapping(address, ptev, 0)) BUG(); } void make_lowmem_page_readwrite(void *vaddr) { pte_t *pte, ptev; unsigned long address = (unsigned long)vaddr; unsigned int level; pte = lookup_address(address, &level); if (pte == NULL) return; /* vaddr missing */ ptev = pte_mkwrite(*pte); if (HYPERVISOR_update_va_mapping(address, ptev, 0)) BUG(); } static bool xen_page_pinned(void *ptr) { struct page *page = virt_to_page(ptr); return PagePinned(page); } void xen_set_domain_pte(pte_t *ptep, pte_t pteval, unsigned domid) { struct multicall_space mcs; struct mmu_update *u; trace_xen_mmu_set_domain_pte(ptep, pteval, domid); mcs = xen_mc_entry(sizeof(*u)); u = mcs.args; /* ptep might be kmapped when using 32-bit HIGHPTE */ u->ptr = virt_to_machine(ptep).maddr; u->val = pte_val_ma(pteval); MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, domid); xen_mc_issue(PARAVIRT_LAZY_MMU); } EXPORT_SYMBOL_GPL(xen_set_domain_pte); static void xen_extend_mmu_update(const struct mmu_update *update) { struct multicall_space mcs; struct mmu_update *u; mcs = xen_mc_extend_args(__HYPERVISOR_mmu_update, sizeof(*u)); if (mcs.mc != NULL) { mcs.mc->args[1]++; } else { mcs = __xen_mc_entry(sizeof(*u)); MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, DOMID_SELF); } u = mcs.args; *u = *update; } static void xen_extend_mmuext_op(const struct mmuext_op *op) { struct multicall_space mcs; struct mmuext_op *u; mcs = xen_mc_extend_args(__HYPERVISOR_mmuext_op, sizeof(*u)); if (mcs.mc != NULL) { mcs.mc->args[1]++; } else { mcs = __xen_mc_entry(sizeof(*u)); MULTI_mmuext_op(mcs.mc, mcs.args, 1, NULL, DOMID_SELF); } u = mcs.args; *u = *op; } static void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val) { struct mmu_update u; preempt_disable(); xen_mc_batch(); /* ptr may be ioremapped for 64-bit pagetable setup */ u.ptr = arbitrary_virt_to_machine(ptr).maddr; u.val = pmd_val_ma(val); xen_extend_mmu_update(&u); xen_mc_issue(PARAVIRT_LAZY_MMU); preempt_enable(); } static void xen_set_pmd(pmd_t *ptr, pmd_t val) { trace_xen_mmu_set_pmd(ptr, val); /* If page is not pinned, we can just update the entry directly */ if (!xen_page_pinned(ptr)) { *ptr = val; return; } xen_set_pmd_hyper(ptr, val); } /* * Associate a virtual page frame with a given physical page frame * and protection flags for that frame. */ void set_pte_mfn(unsigned long vaddr, unsigned long mfn, pgprot_t flags) { set_pte_vaddr(vaddr, mfn_pte(mfn, flags)); } static bool xen_batched_set_pte(pte_t *ptep, pte_t pteval) { struct mmu_update u; if (paravirt_get_lazy_mode() != PARAVIRT_LAZY_MMU) return false; xen_mc_batch(); u.ptr = virt_to_machine(ptep).maddr | MMU_NORMAL_PT_UPDATE; u.val = pte_val_ma(pteval); xen_extend_mmu_update(&u); xen_mc_issue(PARAVIRT_LAZY_MMU); return true; } static inline void __xen_set_pte(pte_t *ptep, pte_t pteval) { if (!xen_batched_set_pte(ptep, pteval)) native_set_pte(ptep, pteval); } static void xen_set_pte(pte_t *ptep, pte_t pteval) { trace_xen_mmu_set_pte(ptep, pteval); __xen_set_pte(ptep, pteval); } static void xen_set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pteval) { trace_xen_mmu_set_pte_at(mm, addr, ptep, pteval); __xen_set_pte(ptep, pteval); } pte_t xen_ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { /* Just return the pte as-is. We preserve the bits on commit */ trace_xen_mmu_ptep_modify_prot_start(mm, addr, ptep, *ptep); return *ptep; } void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte) { struct mmu_update u; trace_xen_mmu_ptep_modify_prot_commit(mm, addr, ptep, pte); xen_mc_batch(); u.ptr = virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD; u.val = pte_val_ma(pte); xen_extend_mmu_update(&u); xen_mc_issue(PARAVIRT_LAZY_MMU); } /* Assume pteval_t is equivalent to all the other *val_t types. */ static pteval_t pte_mfn_to_pfn(pteval_t val) { if (val & _PAGE_PRESENT) { unsigned long mfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT; unsigned long pfn = mfn_to_pfn(mfn); pteval_t flags = val & PTE_FLAGS_MASK; if (unlikely(pfn == ~0)) val = flags & ~_PAGE_PRESENT; else val = ((pteval_t)pfn << PAGE_SHIFT) | flags; } return val; } static pteval_t pte_pfn_to_mfn(pteval_t val) { if (val & _PAGE_PRESENT) { unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT; pteval_t flags = val & PTE_FLAGS_MASK; unsigned long mfn; if (!xen_feature(XENFEAT_auto_translated_physmap)) mfn = get_phys_to_machine(pfn); else mfn = pfn; /* * If there's no mfn for the pfn, then just create an * empty non-present pte. Unfortunately this loses * information about the original pfn, so * pte_mfn_to_pfn is asymmetric. */ if (unlikely(mfn == INVALID_P2M_ENTRY)) { mfn = 0; flags = 0; } else { /* * Paramount to do this test _after_ the * INVALID_P2M_ENTRY as INVALID_P2M_ENTRY & * IDENTITY_FRAME_BIT resolves to true. */ mfn &= ~FOREIGN_FRAME_BIT; if (mfn & IDENTITY_FRAME_BIT) { mfn &= ~IDENTITY_FRAME_BIT; flags |= _PAGE_IOMAP; } } val = ((pteval_t)mfn << PAGE_SHIFT) | flags; } return val; } static pteval_t iomap_pte(pteval_t val) { if (val & _PAGE_PRESENT) { unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT; pteval_t flags = val & PTE_FLAGS_MASK; /* We assume the pte frame number is a MFN, so just use it as-is. */ val = ((pteval_t)pfn << PAGE_SHIFT) | flags; } return val; } static pteval_t xen_pte_val(pte_t pte) { pteval_t pteval = pte.pte; #if 0 /* If this is a WC pte, convert back from Xen WC to Linux WC */ if ((pteval & (_PAGE_PAT | _PAGE_PCD | _PAGE_PWT)) == _PAGE_PAT) { WARN_ON(!pat_enabled); pteval = (pteval & ~_PAGE_PAT) | _PAGE_PWT; } #endif if (xen_initial_domain() && (pteval & _PAGE_IOMAP)) return pteval; return pte_mfn_to_pfn(pteval); } PV_CALLEE_SAVE_REGS_THUNK(xen_pte_val); static pgdval_t xen_pgd_val(pgd_t pgd) { return pte_mfn_to_pfn(pgd.pgd); } PV_CALLEE_SAVE_REGS_THUNK(xen_pgd_val); /* * Xen's PAT setup is part of its ABI, though I assume entries 6 & 7 * are reserved for now, to correspond to the Intel-reserved PAT * types. * * We expect Linux's PAT set as follows: * * Idx PTE flags Linux Xen Default * 0 WB WB WB * 1 PWT WC WT WT * 2 PCD UC- UC- UC- * 3 PCD PWT UC UC UC * 4 PAT WB WC WB * 5 PAT PWT WC WP WT * 6 PAT PCD UC- UC UC- * 7 PAT PCD PWT UC UC UC */ void xen_set_pat(u64 pat) { /* We expect Linux to use a PAT setting of * UC UC- WC WB (ignoring the PAT flag) */ WARN_ON(pat != 0x0007010600070106ull); } static pte_t xen_make_pte(pteval_t pte) { phys_addr_t addr = (pte & PTE_PFN_MASK); #if 0 /* If Linux is trying to set a WC pte, then map to the Xen WC. * If _PAGE_PAT is set, then it probably means it is really * _PAGE_PSE, so avoid fiddling with the PAT mapping and hope * things work out OK... * * (We should never see kernel mappings with _PAGE_PSE set, * but we could see hugetlbfs mappings, I think.). */ if (pat_enabled && !WARN_ON(pte & _PAGE_PAT)) { if ((pte & (_PAGE_PCD | _PAGE_PWT)) == _PAGE_PWT) pte = (pte & ~(_PAGE_PCD | _PAGE_PWT)) | _PAGE_PAT; } #endif /* * Unprivileged domains are allowed to do IOMAPpings for * PCI passthrough, but not map ISA space. The ISA * mappings are just dummy local mappings to keep other * parts of the kernel happy. */ if (unlikely(pte & _PAGE_IOMAP) && (xen_initial_domain() || addr >= ISA_END_ADDRESS)) { pte = iomap_pte(pte); } else { pte &= ~_PAGE_IOMAP; pte = pte_pfn_to_mfn(pte); } return native_make_pte(pte); } PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte); static pgd_t xen_make_pgd(pgdval_t pgd) { pgd = pte_pfn_to_mfn(pgd); return native_make_pgd(pgd); } PV_CALLEE_SAVE_REGS_THUNK(xen_make_pgd); static pmdval_t xen_pmd_val(pmd_t pmd) { return pte_mfn_to_pfn(pmd.pmd); } PV_CALLEE_SAVE_REGS_THUNK(xen_pmd_val); static void xen_set_pud_hyper(pud_t *ptr, pud_t val) { struct mmu_update u; preempt_disable(); xen_mc_batch(); /* ptr may be ioremapped for 64-bit pagetable setup */ u.ptr = arbitrary_virt_to_machine(ptr).maddr; u.val = pud_val_ma(val); xen_extend_mmu_update(&u); xen_mc_issue(PARAVIRT_LAZY_MMU); preempt_enable(); } static void xen_set_pud(pud_t *ptr, pud_t val) { trace_xen_mmu_set_pud(ptr, val); /* If page is not pinned, we can just update the entry directly */ if (!xen_page_pinned(ptr)) { *ptr = val; return; } xen_set_pud_hyper(ptr, val); } #ifdef CONFIG_X86_PAE static void xen_set_pte_atomic(pte_t *ptep, pte_t pte) { trace_xen_mmu_set_pte_atomic(ptep, pte); set_64bit((u64 *)ptep, native_pte_val(pte)); } static void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { trace_xen_mmu_pte_clear(mm, addr, ptep); if (!xen_batched_set_pte(ptep, native_make_pte(0))) native_pte_clear(mm, addr, ptep); } static void xen_pmd_clear(pmd_t *pmdp) { trace_xen_mmu_pmd_clear(pmdp); set_pmd(pmdp, __pmd(0)); } #endif /* CONFIG_X86_PAE */ static pmd_t xen_make_pmd(pmdval_t pmd) { pmd = pte_pfn_to_mfn(pmd); return native_make_pmd(pmd); } PV_CALLEE_SAVE_REGS_THUNK(xen_make_pmd); #if PAGETABLE_LEVELS == 4 static pudval_t xen_pud_val(pud_t pud) { return pte_mfn_to_pfn(pud.pud); } PV_CALLEE_SAVE_REGS_THUNK(xen_pud_val); static pud_t xen_make_pud(pudval_t pud) { pud = pte_pfn_to_mfn(pud); return native_make_pud(pud); } PV_CALLEE_SAVE_REGS_THUNK(xen_make_pud); static pgd_t *xen_get_user_pgd(pgd_t *pgd) { pgd_t *pgd_page = (pgd_t *)(((unsigned long)pgd) & PAGE_MASK); unsigned offset = pgd - pgd_page; pgd_t *user_ptr = NULL; if (offset < pgd_index(USER_LIMIT)) { struct page *page = virt_to_page(pgd_page); user_ptr = (pgd_t *)page->private; if (user_ptr) user_ptr += offset; } return user_ptr; } static void __xen_set_pgd_hyper(pgd_t *ptr, pgd_t val) { struct mmu_update u; u.ptr = virt_to_machine(ptr).maddr; u.val = pgd_val_ma(val); xen_extend_mmu_update(&u); } /* * Raw hypercall-based set_pgd, intended for in early boot before * there's a page structure. This implies: * 1. The only existing pagetable is the kernel's * 2. It is always pinned * 3. It has no user pagetable attached to it */ static void __init xen_set_pgd_hyper(pgd_t *ptr, pgd_t val) { preempt_disable(); xen_mc_batch(); __xen_set_pgd_hyper(ptr, val); xen_mc_issue(PARAVIRT_LAZY_MMU); preempt_enable(); } static void xen_set_pgd(pgd_t *ptr, pgd_t val) { pgd_t *user_ptr = xen_get_user_pgd(ptr); trace_xen_mmu_set_pgd(ptr, user_ptr, val); /* If page is not pinned, we can just update the entry directly */ if (!xen_page_pinned(ptr)) { *ptr = val; if (user_ptr) { WARN_ON(xen_page_pinned(user_ptr)); *user_ptr = val; } return; } /* If it's pinned, then we can at least batch the kernel and user updates together. */ xen_mc_batch(); __xen_set_pgd_hyper(ptr, val); if (user_ptr) __xen_set_pgd_hyper(user_ptr, val); xen_mc_issue(PARAVIRT_LAZY_MMU); } #endif /* PAGETABLE_LEVELS == 4 */ /* * (Yet another) pagetable walker. This one is intended for pinning a * pagetable. This means that it walks a pagetable and calls the * callback function on each page it finds making up the page table, * at every level. It walks the entire pagetable, but it only bothers * pinning pte pages which are below limit. In the normal case this * will be STACK_TOP_MAX, but at boot we need to pin up to * FIXADDR_TOP. * * For 32-bit the important bit is that we don't pin beyond there, * because then we start getting into Xen's ptes. * * For 64-bit, we must skip the Xen hole in the middle of the address * space, just after the big x86-64 virtual hole. */ static int __xen_pgd_walk(struct mm_struct *mm, pgd_t *pgd, int (*func)(struct mm_struct *mm, struct page *, enum pt_level), unsigned long limit) { int flush = 0; unsigned hole_low, hole_high; unsigned pgdidx_limit, pudidx_limit, pmdidx_limit; unsigned pgdidx, pudidx, pmdidx; /* The limit is the last byte to be touched */ limit--; BUG_ON(limit >= FIXADDR_TOP); if (xen_feature(XENFEAT_auto_translated_physmap)) return 0; /* * 64-bit has a great big hole in the middle of the address * space, which contains the Xen mappings. On 32-bit these * will end up making a zero-sized hole and so is a no-op. */ hole_low = pgd_index(USER_LIMIT); hole_high = pgd_index(PAGE_OFFSET); pgdidx_limit = pgd_index(limit); #if PTRS_PER_PUD > 1 pudidx_limit = pud_index(limit); #else pudidx_limit = 0; #endif #if PTRS_PER_PMD > 1 pmdidx_limit = pmd_index(limit); #else pmdidx_limit = 0; #endif for (pgdidx = 0; pgdidx <= pgdidx_limit; pgdidx++) { pud_t *pud; if (pgdidx >= hole_low && pgdidx < hole_high) continue; if (!pgd_val(pgd[pgdidx])) continue; pud = pud_offset(&pgd[pgdidx], 0); if (PTRS_PER_PUD > 1) /* not folded */ flush |= (*func)(mm, virt_to_page(pud), PT_PUD); for (pudidx = 0; pudidx < PTRS_PER_PUD; pudidx++) { pmd_t *pmd; if (pgdidx == pgdidx_limit && pudidx > pudidx_limit) goto out; if (pud_none(pud[pudidx])) continue; pmd = pmd_offset(&pud[pudidx], 0); if (PTRS_PER_PMD > 1) /* not folded */ flush |= (*func)(mm, virt_to_page(pmd), PT_PMD); for (pmdidx = 0; pmdidx < PTRS_PER_PMD; pmdidx++) { struct page *pte; if (pgdidx == pgdidx_limit && pudidx == pudidx_limit && pmdidx > pmdidx_limit) goto out; if (pmd_none(pmd[pmdidx])) continue; pte = pmd_page(pmd[pmdidx]); flush |= (*func)(mm, pte, PT_PTE); } } } out: /* Do the top level last, so that the callbacks can use it as a cue to do final things like tlb flushes. */ flush |= (*func)(mm, virt_to_page(pgd), PT_PGD); return flush; } static int xen_pgd_walk(struct mm_struct *mm, int (*func)(struct mm_struct *mm, struct page *, enum pt_level), unsigned long limit) { return __xen_pgd_walk(mm, mm->pgd, func, limit); } /* If we're using split pte locks, then take the page's lock and return a pointer to it. Otherwise return NULL. */ static spinlock_t *xen_pte_lock(struct page *page, struct mm_struct *mm) { spinlock_t *ptl = NULL; #if USE_SPLIT_PTLOCKS ptl = __pte_lockptr(page); spin_lock_nest_lock(ptl, &mm->page_table_lock); #endif return ptl; } static void xen_pte_unlock(void *v) { spinlock_t *ptl = v; spin_unlock(ptl); } static void xen_do_pin(unsigned level, unsigned long pfn) { struct mmuext_op op; op.cmd = level; op.arg1.mfn = pfn_to_mfn(pfn); xen_extend_mmuext_op(&op); } static int xen_pin_page(struct mm_struct *mm, struct page *page, enum pt_level level) { unsigned pgfl = TestSetPagePinned(page); int flush; if (pgfl) flush = 0; /* already pinned */ else if (PageHighMem(page)) /* kmaps need flushing if we found an unpinned highpage */ flush = 1; else { void *pt = lowmem_page_address(page); unsigned long pfn = page_to_pfn(page); struct multicall_space mcs = __xen_mc_entry(0); spinlock_t *ptl; flush = 0; /* * We need to hold the pagetable lock between the time * we make the pagetable RO and when we actually pin * it. If we don't, then other users may come in and * attempt to update the pagetable by writing it, * which will fail because the memory is RO but not * pinned, so Xen won't do the trap'n'emulate. * * If we're using split pte locks, we can't hold the * entire pagetable's worth of locks during the * traverse, because we may wrap the preempt count (8 * bits). The solution is to mark RO and pin each PTE * page while holding the lock. This means the number * of locks we end up holding is never more than a * batch size (~32 entries, at present). * * If we're not using split pte locks, we needn't pin * the PTE pages independently, because we're * protected by the overall pagetable lock. */ ptl = NULL; if (level == PT_PTE) ptl = xen_pte_lock(page, mm); MULTI_update_va_mapping(mcs.mc, (unsigned long)pt, pfn_pte(pfn, PAGE_KERNEL_RO), level == PT_PGD ? UVMF_TLB_FLUSH : 0); if (ptl) { xen_do_pin(MMUEXT_PIN_L1_TABLE, pfn); /* Queue a deferred unlock for when this batch is completed. */ xen_mc_callback(xen_pte_unlock, ptl); } } return flush; } /* This is called just after a mm has been created, but it has not been used yet. We need to make sure that its pagetable is all read-only, and can be pinned. */ static void __xen_pgd_pin(struct mm_struct *mm, pgd_t *pgd) { trace_xen_mmu_pgd_pin(mm, pgd); xen_mc_batch(); if (__xen_pgd_walk(mm, pgd, xen_pin_page, USER_LIMIT)) { /* re-enable interrupts for flushing */ xen_mc_issue(0); kmap_flush_unused(); xen_mc_batch(); } #ifdef CONFIG_X86_64 { pgd_t *user_pgd = xen_get_user_pgd(pgd); xen_do_pin(MMUEXT_PIN_L4_TABLE, PFN_DOWN(__pa(pgd))); if (user_pgd) { xen_pin_page(mm, virt_to_page(user_pgd), PT_PGD); xen_do_pin(MMUEXT_PIN_L4_TABLE, PFN_DOWN(__pa(user_pgd))); } } #else /* CONFIG_X86_32 */ #ifdef CONFIG_X86_PAE /* Need to make sure unshared kernel PMD is pinnable */ xen_pin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]), PT_PMD); #endif xen_do_pin(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(pgd))); #endif /* CONFIG_X86_64 */ xen_mc_issue(0); } static void xen_pgd_pin(struct mm_struct *mm) { __xen_pgd_pin(mm, mm->pgd); } /* * On save, we need to pin all pagetables to make sure they get their * mfns turned into pfns. Search the list for any unpinned pgds and pin * them (unpinned pgds are not currently in use, probably because the * process is under construction or destruction). * * Expected to be called in stop_machine() ("equivalent to taking * every spinlock in the system"), so the locking doesn't really * matter all that much. */ void xen_mm_pin_all(void) { struct page *page; spin_lock(&pgd_lock); list_for_each_entry(page, &pgd_list, lru) { if (!PagePinned(page)) { __xen_pgd_pin(&init_mm, (pgd_t *)page_address(page)); SetPageSavePinned(page); } } spin_unlock(&pgd_lock); } /* * The init_mm pagetable is really pinned as soon as its created, but * that's before we have page structures to store the bits. So do all * the book-keeping now. */ static int __init xen_mark_pinned(struct mm_struct *mm, struct page *page, enum pt_level level) { SetPagePinned(page); return 0; } static void __init xen_mark_init_mm_pinned(void) { xen_pgd_walk(&init_mm, xen_mark_pinned, FIXADDR_TOP); } static int xen_unpin_page(struct mm_struct *mm, struct page *page, enum pt_level level) { unsigned pgfl = TestClearPagePinned(page); if (pgfl && !PageHighMem(page)) { void *pt = lowmem_page_address(page); unsigned long pfn = page_to_pfn(page); spinlock_t *ptl = NULL; struct multicall_space mcs; /* * Do the converse to pin_page. If we're using split * pte locks, we must be holding the lock for while * the pte page is unpinned but still RO to prevent * concurrent updates from seeing it in this * partially-pinned state. */ if (level == PT_PTE) { ptl = xen_pte_lock(page, mm); if (ptl) xen_do_pin(MMUEXT_UNPIN_TABLE, pfn); } mcs = __xen_mc_entry(0); MULTI_update_va_mapping(mcs.mc, (unsigned long)pt, pfn_pte(pfn, PAGE_KERNEL), level == PT_PGD ? UVMF_TLB_FLUSH : 0); if (ptl) { /* unlock when batch completed */ xen_mc_callback(xen_pte_unlock, ptl); } } return 0; /* never need to flush on unpin */ } /* Release a pagetables pages back as normal RW */ static void __xen_pgd_unpin(struct mm_struct *mm, pgd_t *pgd) { trace_xen_mmu_pgd_unpin(mm, pgd); xen_mc_batch(); xen_do_pin(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd))); #ifdef CONFIG_X86_64 { pgd_t *user_pgd = xen_get_user_pgd(pgd); if (user_pgd) { xen_do_pin(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(user_pgd))); xen_unpin_page(mm, virt_to_page(user_pgd), PT_PGD); } } #endif #ifdef CONFIG_X86_PAE /* Need to make sure unshared kernel PMD is unpinned */ xen_unpin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]), PT_PMD); #endif __xen_pgd_walk(mm, pgd, xen_unpin_page, USER_LIMIT); xen_mc_issue(0); } static void xen_pgd_unpin(struct mm_struct *mm) { __xen_pgd_unpin(mm, mm->pgd); } /* * On resume, undo any pinning done at save, so that the rest of the * kernel doesn't see any unexpected pinned pagetables. */ void xen_mm_unpin_all(void) { struct page *page; spin_lock(&pgd_lock); list_for_each_entry(page, &pgd_list, lru) { if (PageSavePinned(page)) { BUG_ON(!PagePinned(page)); __xen_pgd_unpin(&init_mm, (pgd_t *)page_address(page)); ClearPageSavePinned(page); } } spin_unlock(&pgd_lock); } static void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next) { spin_lock(&next->page_table_lock); xen_pgd_pin(next); spin_unlock(&next->page_table_lock); } static void xen_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm) { spin_lock(&mm->page_table_lock); xen_pgd_pin(mm); spin_unlock(&mm->page_table_lock); } #ifdef CONFIG_SMP /* Another cpu may still have their %cr3 pointing at the pagetable, so we need to repoint it somewhere else before we can unpin it. */ static void drop_other_mm_ref(void *info) { struct mm_struct *mm = info; struct mm_struct *active_mm; active_mm = this_cpu_read(cpu_tlbstate.active_mm); if (active_mm == mm && this_cpu_read(cpu_tlbstate.state) != TLBSTATE_OK) leave_mm(smp_processor_id()); /* If this cpu still has a stale cr3 reference, then make sure it has been flushed. */ if (this_cpu_read(xen_current_cr3) == __pa(mm->pgd)) load_cr3(swapper_pg_dir); } static void xen_drop_mm_ref(struct mm_struct *mm) { cpumask_var_t mask; unsigned cpu; if (current->active_mm == mm) { if (current->mm == mm) load_cr3(swapper_pg_dir); else leave_mm(smp_processor_id()); } /* Get the "official" set of cpus referring to our pagetable. */ if (!alloc_cpumask_var(&mask, GFP_ATOMIC)) { for_each_online_cpu(cpu) { if (!cpumask_test_cpu(cpu, mm_cpumask(mm)) && per_cpu(xen_current_cr3, cpu) != __pa(mm->pgd)) continue; smp_call_function_single(cpu, drop_other_mm_ref, mm, 1); } return; } cpumask_copy(mask, mm_cpumask(mm)); /* It's possible that a vcpu may have a stale reference to our cr3, because its in lazy mode, and it hasn't yet flushed its set of pending hypercalls yet. In this case, we can look at its actual current cr3 value, and force it to flush if needed. */ for_each_online_cpu(cpu) { if (per_cpu(xen_current_cr3, cpu) == __pa(mm->pgd)) cpumask_set_cpu(cpu, mask); } if (!cpumask_empty(mask)) smp_call_function_many(mask, drop_other_mm_ref, mm, 1); free_cpumask_var(mask); } #else static void xen_drop_mm_ref(struct mm_struct *mm) { if (current->active_mm == mm) load_cr3(swapper_pg_dir); } #endif /* * While a process runs, Xen pins its pagetables, which means that the * hypervisor forces it to be read-only, and it controls all updates * to it. This means that all pagetable updates have to go via the * hypervisor, which is moderately expensive. * * Since we're pulling the pagetable down, we switch to use init_mm, * unpin old process pagetable and mark it all read-write, which * allows further operations on it to be simple memory accesses. * * The only subtle point is that another CPU may be still using the * pagetable because of lazy tlb flushing. This means we need need to * switch all CPUs off this pagetable before we can unpin it. */ static void xen_exit_mmap(struct mm_struct *mm) { get_cpu(); /* make sure we don't move around */ xen_drop_mm_ref(mm); put_cpu(); spin_lock(&mm->page_table_lock); /* pgd may not be pinned in the error exit path of execve */ if (xen_page_pinned(mm->pgd)) xen_pgd_unpin(mm); spin_unlock(&mm->page_table_lock); } static void __init xen_pagetable_setup_start(pgd_t *base) { } static __init void xen_mapping_pagetable_reserve(u64 start, u64 end) { /* reserve the range used */ native_pagetable_reserve(start, end); /* set as RW the rest */ printk(KERN_DEBUG "xen: setting RW the range %llx - %llx\n", end, PFN_PHYS(pgt_buf_top)); while (end < PFN_PHYS(pgt_buf_top)) { make_lowmem_page_readwrite(__va(end)); end += PAGE_SIZE; } } static void xen_post_allocator_init(void); static void __init xen_pagetable_setup_done(pgd_t *base) { xen_setup_shared_info(); xen_post_allocator_init(); } static void xen_write_cr2(unsigned long cr2) { this_cpu_read(xen_vcpu)->arch.cr2 = cr2; } static unsigned long xen_read_cr2(void) { return this_cpu_read(xen_vcpu)->arch.cr2; } unsigned long xen_read_cr2_direct(void) { return this_cpu_read(xen_vcpu_info.arch.cr2); } static void xen_flush_tlb(void) { struct mmuext_op *op; struct multicall_space mcs; trace_xen_mmu_flush_tlb(0); preempt_disable(); mcs = xen_mc_entry(sizeof(*op)); op = mcs.args; op->cmd = MMUEXT_TLB_FLUSH_LOCAL; MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF); xen_mc_issue(PARAVIRT_LAZY_MMU); preempt_enable(); } static void xen_flush_tlb_single(unsigned long addr) { struct mmuext_op *op; struct multicall_space mcs; trace_xen_mmu_flush_tlb_single(addr); preempt_disable(); mcs = xen_mc_entry(sizeof(*op)); op = mcs.args; op->cmd = MMUEXT_INVLPG_LOCAL; op->arg1.linear_addr = addr & PAGE_MASK; MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF); xen_mc_issue(PARAVIRT_LAZY_MMU); preempt_enable(); } static void xen_flush_tlb_others(const struct cpumask *cpus, struct mm_struct *mm, unsigned long va) { struct { struct mmuext_op op; #ifdef CONFIG_SMP DECLARE_BITMAP(mask, num_processors); #else DECLARE_BITMAP(mask, NR_CPUS); #endif } *args; struct multicall_space mcs; trace_xen_mmu_flush_tlb_others(cpus, mm, va); if (cpumask_empty(cpus)) return; /* nothing to do */ mcs = xen_mc_entry(sizeof(*args)); args = mcs.args; args->op.arg2.vcpumask = to_cpumask(args->mask); /* Remove us, and any offline CPUS. */ cpumask_and(to_cpumask(args->mask), cpus, cpu_online_mask); cpumask_clear_cpu(smp_processor_id(), to_cpumask(args->mask)); if (va == TLB_FLUSH_ALL) { args->op.cmd = MMUEXT_TLB_FLUSH_MULTI; } else { args->op.cmd = MMUEXT_INVLPG_MULTI; args->op.arg1.linear_addr = va; } MULTI_mmuext_op(mcs.mc, &args->op, 1, NULL, DOMID_SELF); xen_mc_issue(PARAVIRT_LAZY_MMU); } static unsigned long xen_read_cr3(void) { return this_cpu_read(xen_cr3); } static void set_current_cr3(void *v) { this_cpu_write(xen_current_cr3, (unsigned long)v); } static void __xen_write_cr3(bool kernel, unsigned long cr3) { struct mmuext_op op; unsigned long mfn; trace_xen_mmu_write_cr3(kernel, cr3); if (cr3) mfn = pfn_to_mfn(PFN_DOWN(cr3)); else mfn = 0; WARN_ON(mfn == 0 && kernel); op.cmd = kernel ? MMUEXT_NEW_BASEPTR : MMUEXT_NEW_USER_BASEPTR; op.arg1.mfn = mfn; xen_extend_mmuext_op(&op); if (kernel) { this_cpu_write(xen_cr3, cr3); /* Update xen_current_cr3 once the batch has actually been submitted. */ xen_mc_callback(set_current_cr3, (void *)cr3); } } static void xen_write_cr3(unsigned long cr3) { BUG_ON(preemptible()); xen_mc_batch(); /* disables interrupts */ /* Update while interrupts are disabled, so its atomic with respect to ipis */ this_cpu_write(xen_cr3, cr3); __xen_write_cr3(true, cr3); #ifdef CONFIG_X86_64 { pgd_t *user_pgd = xen_get_user_pgd(__va(cr3)); if (user_pgd) __xen_write_cr3(false, __pa(user_pgd)); else __xen_write_cr3(false, 0); } #endif xen_mc_issue(PARAVIRT_LAZY_CPU); /* interrupts restored */ } static int xen_pgd_alloc(struct mm_struct *mm) { pgd_t *pgd = mm->pgd; int ret = 0; BUG_ON(PagePinned(virt_to_page(pgd))); #ifdef CONFIG_X86_64 { struct page *page = virt_to_page(pgd); pgd_t *user_pgd; BUG_ON(page->private != 0); ret = -ENOMEM; user_pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO); page->private = (unsigned long)user_pgd; if (user_pgd != NULL) { user_pgd[pgd_index(VSYSCALL_START)] = __pgd(__pa(level3_user_vsyscall) | _PAGE_TABLE); ret = 0; } BUG_ON(PagePinned(virt_to_page(xen_get_user_pgd(pgd)))); } #endif return ret; } static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd) { #ifdef CONFIG_X86_64 pgd_t *user_pgd = xen_get_user_pgd(pgd); if (user_pgd) free_page((unsigned long)user_pgd); #endif } #ifdef CONFIG_X86_32 static pte_t __init mask_rw_pte(pte_t *ptep, pte_t pte) { /* If there's an existing pte, then don't allow _PAGE_RW to be set */ if (pte_val_ma(*ptep) & _PAGE_PRESENT) pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) & pte_val_ma(pte)); return pte; } #else /* CONFIG_X86_64 */ static pte_t __init mask_rw_pte(pte_t *ptep, pte_t pte) { unsigned long pfn = pte_pfn(pte); /* * If the new pfn is within the range of the newly allocated * kernel pagetable, and it isn't being mapped into an * early_ioremap fixmap slot as a freshly allocated page, make sure * it is RO. */ if (((!is_early_ioremap_ptep(ptep) && pfn >= pgt_buf_start && pfn < pgt_buf_top)) || (is_early_ioremap_ptep(ptep) && pfn != (pgt_buf_end - 1))) pte = pte_wrprotect(pte); return pte; } #endif /* CONFIG_X86_64 */ /* Init-time set_pte while constructing initial pagetables, which doesn't allow RO pagetable pages to be remapped RW */ static void __init xen_set_pte_init(pte_t *ptep, pte_t pte) { pte = mask_rw_pte(ptep, pte); xen_set_pte(ptep, pte); } static void pin_pagetable_pfn(unsigned cmd, unsigned long pfn) { struct mmuext_op op; op.cmd = cmd; op.arg1.mfn = pfn_to_mfn(pfn); if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF)) BUG(); } /* Early in boot, while setting up the initial pagetable, assume everything is pinned. */ static void __init xen_alloc_pte_init(struct mm_struct *mm, unsigned long pfn) { #ifdef CONFIG_FLATMEM BUG_ON(mem_map); /* should only be used early */ #endif make_lowmem_page_readonly(__va(PFN_PHYS(pfn))); pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn); } /* Used for pmd and pud */ static void __init xen_alloc_pmd_init(struct mm_struct *mm, unsigned long pfn) { #ifdef CONFIG_FLATMEM BUG_ON(mem_map); /* should only be used early */ #endif make_lowmem_page_readonly(__va(PFN_PHYS(pfn))); } /* Early release_pte assumes that all pts are pinned, since there's only init_mm and anything attached to that is pinned. */ static void __init xen_release_pte_init(unsigned long pfn) { pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn); make_lowmem_page_readwrite(__va(PFN_PHYS(pfn))); } static void __init xen_release_pmd_init(unsigned long pfn) { make_lowmem_page_readwrite(__va(PFN_PHYS(pfn))); } static inline void __pin_pagetable_pfn(unsigned cmd, unsigned long pfn) { struct multicall_space mcs; struct mmuext_op *op; mcs = __xen_mc_entry(sizeof(*op)); op = mcs.args; op->cmd = cmd; op->arg1.mfn = pfn_to_mfn(pfn); MULTI_mmuext_op(mcs.mc, mcs.args, 1, NULL, DOMID_SELF); } static inline void __set_pfn_prot(unsigned long pfn, pgprot_t prot) { struct multicall_space mcs; unsigned long addr = (unsigned long)__va(pfn << PAGE_SHIFT); mcs = __xen_mc_entry(0); MULTI_update_va_mapping(mcs.mc, (unsigned long)addr, pfn_pte(pfn, prot), 0); } /* This needs to make sure the new pte page is pinned iff its being attached to a pinned pagetable. */ static inline void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn, unsigned level) { bool pinned = PagePinned(virt_to_page(mm->pgd)); trace_xen_mmu_alloc_ptpage(mm, pfn, level, pinned); if (pinned) { struct page *page = pfn_to_page(pfn); SetPagePinned(page); if (!PageHighMem(page)) { xen_mc_batch(); __set_pfn_prot(pfn, PAGE_KERNEL_RO); if (level == PT_PTE && USE_SPLIT_PTLOCKS) __pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn); xen_mc_issue(PARAVIRT_LAZY_MMU); } else { /* make sure there are no stray mappings of this page */ kmap_flush_unused(); } } } static void xen_alloc_pte(struct mm_struct *mm, unsigned long pfn) { xen_alloc_ptpage(mm, pfn, PT_PTE); } static void xen_alloc_pmd(struct mm_struct *mm, unsigned long pfn) { xen_alloc_ptpage(mm, pfn, PT_PMD); } /* This should never happen until we're OK to use struct page */ static inline void xen_release_ptpage(unsigned long pfn, unsigned level) { struct page *page = pfn_to_page(pfn); bool pinned = PagePinned(page); trace_xen_mmu_release_ptpage(pfn, level, pinned); if (pinned) { if (!PageHighMem(page)) { xen_mc_batch(); if (level == PT_PTE && USE_SPLIT_PTLOCKS) __pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn); __set_pfn_prot(pfn, PAGE_KERNEL); xen_mc_issue(PARAVIRT_LAZY_MMU); } ClearPagePinned(page); } } static void xen_release_pte(unsigned long pfn) { xen_release_ptpage(pfn, PT_PTE); } static void xen_release_pmd(unsigned long pfn) { xen_release_ptpage(pfn, PT_PMD); } #if PAGETABLE_LEVELS == 4 static void xen_alloc_pud(struct mm_struct *mm, unsigned long pfn) { xen_alloc_ptpage(mm, pfn, PT_PUD); } static void xen_release_pud(unsigned long pfn) { xen_release_ptpage(pfn, PT_PUD); } #endif void __init xen_reserve_top(void) { #ifdef CONFIG_X86_32 unsigned long top = HYPERVISOR_VIRT_START; struct xen_platform_parameters pp; if (HYPERVISOR_xen_version(XENVER_platform_parameters, &pp) == 0) top = pp.virt_start; reserve_top_address(-top); #endif /* CONFIG_X86_32 */ } /* * Like __va(), but returns address in the kernel mapping (which is * all we have until the physical memory mapping has been set up. */ static void *__ka(phys_addr_t paddr) { #ifdef CONFIG_X86_64 return (void *)(paddr + __START_KERNEL_map); #else return __va(paddr); #endif } /* Convert a machine address to physical address */ static unsigned long m2p(phys_addr_t maddr) { phys_addr_t paddr; maddr &= PTE_PFN_MASK; paddr = mfn_to_pfn(maddr >> PAGE_SHIFT) << PAGE_SHIFT; return paddr; } /* Convert a machine address to kernel virtual */ static void *m2v(phys_addr_t maddr) { return __ka(m2p(maddr)); } /* Set the page permissions on an identity-mapped pages */ static void set_page_prot(void *addr, pgprot_t prot) { unsigned long pfn = __pa(addr) >> PAGE_SHIFT; pte_t pte = pfn_pte(pfn, prot); if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, 0)) BUG(); } static void __init xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn) { unsigned pmdidx, pteidx; unsigned ident_pte; unsigned long pfn; level1_ident_pgt = extend_brk(sizeof(pte_t) * LEVEL1_IDENT_ENTRIES, PAGE_SIZE); ident_pte = 0; pfn = 0; for (pmdidx = 0; pmdidx < PTRS_PER_PMD && pfn < max_pfn; pmdidx++) { pte_t *pte_page; /* Reuse or allocate a page of ptes */ if (pmd_present(pmd[pmdidx])) pte_page = m2v(pmd[pmdidx].pmd); else { /* Check for free pte pages */ if (ident_pte == LEVEL1_IDENT_ENTRIES) break; pte_page = &level1_ident_pgt[ident_pte]; ident_pte += PTRS_PER_PTE; pmd[pmdidx] = __pmd(__pa(pte_page) | _PAGE_TABLE); } /* Install mappings */ for (pteidx = 0; pteidx < PTRS_PER_PTE; pteidx++, pfn++) { pte_t pte; #ifdef CONFIG_X86_32 if (pfn > max_pfn_mapped) max_pfn_mapped = pfn; #endif if (!pte_none(pte_page[pteidx])) continue; pte = pfn_pte(pfn, PAGE_KERNEL_EXEC); pte_page[pteidx] = pte; } } for (pteidx = 0; pteidx < ident_pte; pteidx += PTRS_PER_PTE) set_page_prot(&level1_ident_pgt[pteidx], PAGE_KERNEL_RO); set_page_prot(pmd, PAGE_KERNEL_RO); } void __init xen_setup_machphys_mapping(void) { struct xen_machphys_mapping mapping; if (HYPERVISOR_memory_op(XENMEM_machphys_mapping, &mapping) == 0) { machine_to_phys_mapping = (unsigned long *)mapping.v_start; machine_to_phys_nr = mapping.max_mfn + 1; } else { machine_to_phys_nr = MACH2PHYS_NR_ENTRIES; } #ifdef CONFIG_X86_32 WARN_ON((machine_to_phys_mapping + (machine_to_phys_nr - 1)) < machine_to_phys_mapping); #endif } #ifdef CONFIG_X86_64 static void convert_pfn_mfn(void *v) { pte_t *pte = v; int i; /* All levels are converted the same way, so just treat them as ptes. */ for (i = 0; i < PTRS_PER_PTE; i++) pte[i] = xen_make_pte(pte[i].pte); } /* * Set up the initial kernel pagetable. * * We can construct this by grafting the Xen provided pagetable into * head_64.S's preconstructed pagetables. We copy the Xen L2's into * level2_ident_pgt, level2_kernel_pgt and level2_fixmap_pgt. This * means that only the kernel has a physical mapping to start with - * but that's enough to get __va working. We need to fill in the rest * of the physical mapping once some sort of allocator has been set * up. */ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn) { pud_t *l3; pmd_t *l2; /* max_pfn_mapped is the last pfn mapped in the initial memory * mappings. Considering that on Xen after the kernel mappings we * have the mappings of some pages that don't exist in pfn space, we * set max_pfn_mapped to the last real pfn mapped. */ max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->mfn_list)); /* Zap identity mapping */ init_level4_pgt[0] = __pgd(0); /* Pre-constructed entries are in pfn, so convert to mfn */ convert_pfn_mfn(init_level4_pgt); convert_pfn_mfn(level3_ident_pgt); convert_pfn_mfn(level3_kernel_pgt); l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd); l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud); memcpy(level2_ident_pgt, l2, sizeof(pmd_t) * PTRS_PER_PMD); memcpy(level2_kernel_pgt, l2, sizeof(pmd_t) * PTRS_PER_PMD); l3 = m2v(pgd[pgd_index(__START_KERNEL_map + PMD_SIZE)].pgd); l2 = m2v(l3[pud_index(__START_KERNEL_map + PMD_SIZE)].pud); memcpy(level2_fixmap_pgt, l2, sizeof(pmd_t) * PTRS_PER_PMD); /* Set up identity map */ xen_map_identity_early(level2_ident_pgt, max_pfn); /* Make pagetable pieces RO */ set_page_prot(init_level4_pgt, PAGE_KERNEL_RO); set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO); set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO); set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO); set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO); set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO); /* Pin down new L4 */ pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE, PFN_DOWN(__pa_symbol(init_level4_pgt))); /* Unpin Xen-provided one */ pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd))); /* Switch over */ pgd = init_level4_pgt; /* * At this stage there can be no user pgd, and no page * structure to attach it to, so make sure we just set kernel * pgd. */ xen_mc_batch(); __xen_write_cr3(true, __pa(pgd)); xen_mc_issue(PARAVIRT_LAZY_CPU); memblock_reserve(__pa(xen_start_info->pt_base), xen_start_info->nr_pt_frames * PAGE_SIZE); return pgd; } #else /* !CONFIG_X86_64 */ static RESERVE_BRK_ARRAY(pmd_t, initial_kernel_pmd, PTRS_PER_PMD); static RESERVE_BRK_ARRAY(pmd_t, swapper_kernel_pmd, PTRS_PER_PMD); static void __init xen_write_cr3_init(unsigned long cr3) { unsigned long pfn = PFN_DOWN(__pa(swapper_pg_dir)); BUG_ON(read_cr3() != __pa(initial_page_table)); BUG_ON(cr3 != __pa(swapper_pg_dir)); /* * We are switching to swapper_pg_dir for the first time (from * initial_page_table) and therefore need to mark that page * read-only and then pin it. * * Xen disallows sharing of kernel PMDs for PAE * guests. Therefore we must copy the kernel PMD from * initial_page_table into a new kernel PMD to be used in * swapper_pg_dir. */ swapper_kernel_pmd = extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE); memcpy(swapper_kernel_pmd, initial_kernel_pmd, sizeof(pmd_t) * PTRS_PER_PMD); swapper_pg_dir[KERNEL_PGD_BOUNDARY] = __pgd(__pa(swapper_kernel_pmd) | _PAGE_PRESENT); set_page_prot(swapper_kernel_pmd, PAGE_KERNEL_RO); set_page_prot(swapper_pg_dir, PAGE_KERNEL_RO); xen_write_cr3(cr3); pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, pfn); pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(initial_page_table))); set_page_prot(initial_page_table, PAGE_KERNEL); set_page_prot(initial_kernel_pmd, PAGE_KERNEL); pv_mmu_ops.write_cr3 = &xen_write_cr3; } pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn) { pmd_t *kernel_pmd; initial_kernel_pmd = extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE); max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->pt_base) + xen_start_info->nr_pt_frames * PAGE_SIZE + 512*1024); kernel_pmd = m2v(pgd[KERNEL_PGD_BOUNDARY].pgd); memcpy(initial_kernel_pmd, kernel_pmd, sizeof(pmd_t) * PTRS_PER_PMD); xen_map_identity_early(initial_kernel_pmd, max_pfn); memcpy(initial_page_table, pgd, sizeof(pgd_t) * PTRS_PER_PGD); initial_page_table[KERNEL_PGD_BOUNDARY] = __pgd(__pa(initial_kernel_pmd) | _PAGE_PRESENT); set_page_prot(initial_kernel_pmd, PAGE_KERNEL_RO); set_page_prot(initial_page_table, PAGE_KERNEL_RO); set_page_prot(empty_zero_page, PAGE_KERNEL_RO); pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd))); pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(initial_page_table))); xen_write_cr3(__pa(initial_page_table)); memblock_reserve(__pa(xen_start_info->pt_base), xen_start_info->nr_pt_frames * PAGE_SIZE); return initial_page_table; } #endif /* CONFIG_X86_64 */ static unsigned char dummy_mapping[PAGE_SIZE] __page_aligned_bss; static unsigned char fake_ioapic_mapping[PAGE_SIZE] __page_aligned_bss; static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot) { pte_t pte; phys >>= PAGE_SHIFT; switch (idx) { case FIX_BTMAP_END ... FIX_BTMAP_BEGIN: #ifdef CONFIG_X86_F00F_BUG case FIX_F00F_IDT: #endif #ifdef CONFIG_X86_32 case FIX_WP_TEST: case FIX_VDSO: # ifdef CONFIG_HIGHMEM case FIX_KMAP_BEGIN ... FIX_KMAP_END: # endif #else case VSYSCALL_LAST_PAGE ... VSYSCALL_FIRST_PAGE: case VVAR_PAGE: #endif case FIX_TEXT_POKE0: case FIX_TEXT_POKE1: /* All local page mappings */ pte = pfn_pte(phys, prot); break; #ifdef CONFIG_X86_LOCAL_APIC case FIX_APIC_BASE: /* maps dummy local APIC */ pte = pfn_pte(PFN_DOWN(__pa(dummy_mapping)), PAGE_KERNEL); break; #endif #ifdef CONFIG_X86_IO_APIC case FIX_IO_APIC_BASE_0 ... FIX_IO_APIC_BASE_END: /* * We just don't map the IO APIC - all access is via * hypercalls. Keep the address in the pte for reference. */ pte = pfn_pte(PFN_DOWN(__pa(fake_ioapic_mapping)), PAGE_KERNEL); break; #endif case FIX_PARAVIRT_BOOTMAP: /* This is an MFN, but it isn't an IO mapping from the IO domain */ pte = mfn_pte(phys, prot); break; default: /* By default, set_fixmap is used for hardware mappings */ pte = mfn_pte(phys, __pgprot(pgprot_val(prot) | _PAGE_IOMAP)); break; } __native_set_fixmap(idx, pte); #ifdef CONFIG_X86_64 /* Replicate changes to map the vsyscall page into the user pagetable vsyscall mapping. */ if ((idx >= VSYSCALL_LAST_PAGE && idx <= VSYSCALL_FIRST_PAGE) || idx == VVAR_PAGE) { unsigned long vaddr = __fix_to_virt(idx); set_pte_vaddr_pud(level3_user_vsyscall, vaddr, pte); } #endif } void __init xen_ident_map_ISA(void) { unsigned long pa; /* * If we're dom0, then linear map the ISA machine addresses into * the kernel's address space. */ if (!xen_initial_domain()) return; xen_raw_printk("Xen: setup ISA identity maps\n"); for (pa = ISA_START_ADDRESS; pa < ISA_END_ADDRESS; pa += PAGE_SIZE) { pte_t pte = mfn_pte(PFN_DOWN(pa), PAGE_KERNEL_IO); if (HYPERVISOR_update_va_mapping(PAGE_OFFSET + pa, pte, 0)) BUG(); } xen_flush_tlb(); } static void __init xen_post_allocator_init(void) { pv_mmu_ops.set_pte = xen_set_pte; pv_mmu_ops.set_pmd = xen_set_pmd; pv_mmu_ops.set_pud = xen_set_pud; #if PAGETABLE_LEVELS == 4 pv_mmu_ops.set_pgd = xen_set_pgd; #endif /* This will work as long as patching hasn't happened yet (which it hasn't) */ pv_mmu_ops.alloc_pte = xen_alloc_pte; pv_mmu_ops.alloc_pmd = xen_alloc_pmd; pv_mmu_ops.release_pte = xen_release_pte; pv_mmu_ops.release_pmd = xen_release_pmd; #if PAGETABLE_LEVELS == 4 pv_mmu_ops.alloc_pud = xen_alloc_pud; pv_mmu_ops.release_pud = xen_release_pud; #endif #ifdef CONFIG_X86_64 SetPagePinned(virt_to_page(level3_user_vsyscall)); #endif xen_mark_init_mm_pinned(); } static void xen_leave_lazy_mmu(void) { preempt_disable(); xen_mc_flush(); paravirt_leave_lazy_mmu(); preempt_enable(); } static const struct pv_mmu_ops xen_mmu_ops __initconst = { .read_cr2 = xen_read_cr2, .write_cr2 = xen_write_cr2, .read_cr3 = xen_read_cr3, #ifdef CONFIG_X86_32 .write_cr3 = xen_write_cr3_init, #else .write_cr3 = xen_write_cr3, #endif .flush_tlb_user = xen_flush_tlb, .flush_tlb_kernel = xen_flush_tlb, .flush_tlb_single = xen_flush_tlb_single, .flush_tlb_others = xen_flush_tlb_others, .pte_update = paravirt_nop, .pte_update_defer = paravirt_nop, .pgd_alloc = xen_pgd_alloc, .pgd_free = xen_pgd_free, .alloc_pte = xen_alloc_pte_init, .release_pte = xen_release_pte_init, .alloc_pmd = xen_alloc_pmd_init, .release_pmd = xen_release_pmd_init, .set_pte = xen_set_pte_init, .set_pte_at = xen_set_pte_at, .set_pmd = xen_set_pmd_hyper, .ptep_modify_prot_start = __ptep_modify_prot_start, .ptep_modify_prot_commit = __ptep_modify_prot_commit, .pte_val = PV_CALLEE_SAVE(xen_pte_val), .pgd_val = PV_CALLEE_SAVE(xen_pgd_val), .make_pte = PV_CALLEE_SAVE(xen_make_pte), .make_pgd = PV_CALLEE_SAVE(xen_make_pgd), #ifdef CONFIG_X86_PAE .set_pte_atomic = xen_set_pte_atomic, .pte_clear = xen_pte_clear, .pmd_clear = xen_pmd_clear, #endif /* CONFIG_X86_PAE */ .set_pud = xen_set_pud_hyper, .make_pmd = PV_CALLEE_SAVE(xen_make_pmd), .pmd_val = PV_CALLEE_SAVE(xen_pmd_val), #if PAGETABLE_LEVELS == 4 .pud_val = PV_CALLEE_SAVE(xen_pud_val), .make_pud = PV_CALLEE_SAVE(xen_make_pud), .set_pgd = xen_set_pgd_hyper, .alloc_pud = xen_alloc_pmd_init, .release_pud = xen_release_pmd_init, #endif /* PAGETABLE_LEVELS == 4 */ .activate_mm = xen_activate_mm, .dup_mmap = xen_dup_mmap, .exit_mmap = xen_exit_mmap, .lazy_mode = { .enter = paravirt_enter_lazy_mmu, .leave = xen_leave_lazy_mmu, }, .set_fixmap = xen_set_fixmap, }; void __init xen_init_mmu_ops(void) { x86_init.mapping.pagetable_reserve = xen_mapping_pagetable_reserve; x86_init.paging.pagetable_setup_start = xen_pagetable_setup_start; x86_init.paging.pagetable_setup_done = xen_pagetable_setup_done; pv_mmu_ops = xen_mmu_ops; memset(dummy_mapping, 0xff, PAGE_SIZE); memset(fake_ioapic_mapping, 0xfd, PAGE_SIZE); } /* Protected by xen_reservation_lock. */ #define MAX_CONTIG_ORDER 9 /* 2MB */ static unsigned long discontig_frames[1<<MAX_CONTIG_ORDER]; #define VOID_PTE (mfn_pte(0, __pgprot(0))) static void xen_zap_pfn_range(unsigned long vaddr, unsigned int order, unsigned long *in_frames, unsigned long *out_frames) { int i; struct multicall_space mcs; xen_mc_batch(); for (i = 0; i < (1UL<<order); i++, vaddr += PAGE_SIZE) { mcs = __xen_mc_entry(0); if (in_frames) in_frames[i] = virt_to_mfn(vaddr); MULTI_update_va_mapping(mcs.mc, vaddr, VOID_PTE, 0); __set_phys_to_machine(virt_to_pfn(vaddr), INVALID_P2M_ENTRY); if (out_frames) out_frames[i] = virt_to_pfn(vaddr); } xen_mc_issue(0); } /* * Update the pfn-to-mfn mappings for a virtual address range, either to * point to an array of mfns, or contiguously from a single starting * mfn. */ static void xen_remap_exchanged_ptes(unsigned long vaddr, int order, unsigned long *mfns, unsigned long first_mfn) { unsigned i, limit; unsigned long mfn; xen_mc_batch(); limit = 1u << order; for (i = 0; i < limit; i++, vaddr += PAGE_SIZE) { struct multicall_space mcs; unsigned flags; mcs = __xen_mc_entry(0); if (mfns) mfn = mfns[i]; else mfn = first_mfn + i; if (i < (limit - 1)) flags = 0; else { if (order == 0) flags = UVMF_INVLPG | UVMF_ALL; else flags = UVMF_TLB_FLUSH | UVMF_ALL; } MULTI_update_va_mapping(mcs.mc, vaddr, mfn_pte(mfn, PAGE_KERNEL), flags); set_phys_to_machine(virt_to_pfn(vaddr), mfn); } xen_mc_issue(0); } /* * Perform the hypercall to exchange a region of our pfns to point to * memory with the required contiguous alignment. Takes the pfns as * input, and populates mfns as output. * * Returns a success code indicating whether the hypervisor was able to * satisfy the request or not. */ static int xen_exchange_memory(unsigned long extents_in, unsigned int order_in, unsigned long *pfns_in, unsigned long extents_out, unsigned int order_out, unsigned long *mfns_out, unsigned int address_bits) { long rc; int success; struct xen_memory_exchange exchange = { .in = { .nr_extents = extents_in, .extent_order = order_in, .extent_start = pfns_in, .domid = DOMID_SELF }, .out = { .nr_extents = extents_out, .extent_order = order_out, .extent_start = mfns_out, .address_bits = address_bits, .domid = DOMID_SELF } }; BUG_ON(extents_in << order_in != extents_out << order_out); rc = HYPERVISOR_memory_op(XENMEM_exchange, &exchange); success = (exchange.nr_exchanged == extents_in); BUG_ON(!success && ((exchange.nr_exchanged != 0) || (rc == 0))); BUG_ON(success && (rc != 0)); return success; } int xen_create_contiguous_region(unsigned long vstart, unsigned int order, unsigned int address_bits) { unsigned long *in_frames = discontig_frames, out_frame; unsigned long flags; int success; /* * Currently an auto-translated guest will not perform I/O, nor will * it require PAE page directories below 4GB. Therefore any calls to * this function are redundant and can be ignored. */ if (xen_feature(XENFEAT_auto_translated_physmap)) return 0; if (unlikely(order > MAX_CONTIG_ORDER)) return -ENOMEM; memset((void *) vstart, 0, PAGE_SIZE << order); spin_lock_irqsave(&xen_reservation_lock, flags); /* 1. Zap current PTEs, remembering MFNs. */ xen_zap_pfn_range(vstart, order, in_frames, NULL); /* 2. Get a new contiguous memory extent. */ out_frame = virt_to_pfn(vstart); success = xen_exchange_memory(1UL << order, 0, in_frames, 1, order, &out_frame, address_bits); /* 3. Map the new extent in place of old pages. */ if (success) xen_remap_exchanged_ptes(vstart, order, NULL, out_frame); else xen_remap_exchanged_ptes(vstart, order, in_frames, 0); spin_unlock_irqrestore(&xen_reservation_lock, flags); return success ? 0 : -ENOMEM; } EXPORT_SYMBOL_GPL(xen_create_contiguous_region); void xen_destroy_contiguous_region(unsigned long vstart, unsigned int order) { unsigned long *out_frames = discontig_frames, in_frame; unsigned long flags; int success; if (xen_feature(XENFEAT_auto_translated_physmap)) return; if (unlikely(order > MAX_CONTIG_ORDER)) return; memset((void *) vstart, 0, PAGE_SIZE << order); spin_lock_irqsave(&xen_reservation_lock, flags); /* 1. Find start MFN of contiguous extent. */ in_frame = virt_to_mfn(vstart); /* 2. Zap current PTEs. */ xen_zap_pfn_range(vstart, order, NULL, out_frames); /* 3. Do the exchange for non-contiguous MFNs. */ success = xen_exchange_memory(1, order, &in_frame, 1UL << order, 0, out_frames, 0); /* 4. Map new pages in place of old pages. */ if (success) xen_remap_exchanged_ptes(vstart, order, out_frames, 0); else xen_remap_exchanged_ptes(vstart, order, NULL, in_frame); spin_unlock_irqrestore(&xen_reservation_lock, flags); } EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region); #ifdef CONFIG_XEN_PVHVM static void xen_hvm_exit_mmap(struct mm_struct *mm) { struct xen_hvm_pagetable_dying a; int rc; a.domid = DOMID_SELF; a.gpa = __pa(mm->pgd); rc = HYPERVISOR_hvm_op(HVMOP_pagetable_dying, &a); WARN_ON_ONCE(rc < 0); } static int is_pagetable_dying_supported(void) { struct xen_hvm_pagetable_dying a; int rc = 0; a.domid = DOMID_SELF; a.gpa = 0x00; rc = HYPERVISOR_hvm_op(HVMOP_pagetable_dying, &a); if (rc < 0) { printk(KERN_DEBUG "HVMOP_pagetable_dying not supported\n"); return 0; } return 1; } void __init xen_hvm_init_mmu_ops(void) { if (is_pagetable_dying_supported()) pv_mmu_ops.exit_mmap = xen_hvm_exit_mmap; } #endif #define REMAP_BATCH_SIZE 16 struct remap_data { unsigned long mfn; pgprot_t prot; struct mmu_update *mmu_update; }; static int remap_area_mfn_pte_fn(pte_t *ptep, pgtable_t token, unsigned long addr, void *data) { struct remap_data *rmd = data; pte_t pte = pte_mkspecial(pfn_pte(rmd->mfn++, rmd->prot)); rmd->mmu_update->ptr = virt_to_machine(ptep).maddr; rmd->mmu_update->val = pte_val_ma(pte); rmd->mmu_update++; return 0; } int xen_remap_domain_mfn_range(struct vm_area_struct *vma, unsigned long addr, unsigned long mfn, int nr, pgprot_t prot, unsigned domid) { struct remap_data rmd; struct mmu_update mmu_update[REMAP_BATCH_SIZE]; int batch; unsigned long range; int err = 0; prot = __pgprot(pgprot_val(prot) | _PAGE_IOMAP); BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_RESERVED | VM_IO)) == (VM_PFNMAP | VM_RESERVED | VM_IO))); rmd.mfn = mfn; rmd.prot = prot; while (nr) { batch = min(REMAP_BATCH_SIZE, nr); range = (unsigned long)batch << PAGE_SHIFT; rmd.mmu_update = mmu_update; err = apply_to_page_range(vma->vm_mm, addr, range, remap_area_mfn_pte_fn, &rmd); if (err) goto out; err = -EFAULT; if (HYPERVISOR_mmu_update(mmu_update, batch, NULL, domid) < 0) goto out; nr -= batch; addr += range; } err = 0; out: flush_tlb_all(); return err; } EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_range);
gpl-2.0
naota/hfsplus
arch/frv/mb93090-mb00/pci-vdk.c
3905
12741
/* pci-vdk.c: MB93090-MB00 (VDK) PCI support * * Copyright (C) 2003, 2004 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/types.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/ioport.h> #include <linux/delay.h> #include <asm/segment.h> #include <asm/io.h> #include <asm/mb-regs.h> #include <asm/mb86943a.h> #include "pci-frv.h" unsigned int __nongpreldata pci_probe = 1; int __nongpreldata pcibios_last_bus = -1; struct pci_bus *__nongpreldata pci_root_bus; struct pci_ops *__nongpreldata pci_root_ops; /* * The accessible PCI window does not cover the entire CPU address space, but * there are devices we want to access outside of that window, so we need to * insert specific PCI bus resources instead of using the platform-level bus * resources directly for the PCI root bus. * * These are configured and inserted by pcibios_init() and are attached to the * root bus by pcibios_fixup_bus(). */ static struct resource pci_ioport_resource = { .name = "PCI IO", .start = 0, .end = IO_SPACE_LIMIT, .flags = IORESOURCE_IO, }; static struct resource pci_iomem_resource = { .name = "PCI mem", .start = 0, .end = -1, .flags = IORESOURCE_MEM, }; /* * Functions for accessing PCI configuration space */ #define CONFIG_CMD(bus, dev, where) \ (0x80000000 | (bus->number << 16) | (devfn << 8) | (where & ~3)) #define __set_PciCfgAddr(A) writel((A), (volatile void __iomem *) __region_CS1 + 0x80) #define __get_PciCfgDataB(A) readb((volatile void __iomem *) __region_CS1 + 0x88 + ((A) & 3)) #define __get_PciCfgDataW(A) readw((volatile void __iomem *) __region_CS1 + 0x88 + ((A) & 2)) #define __get_PciCfgDataL(A) readl((volatile void __iomem *) __region_CS1 + 0x88) #define __set_PciCfgDataB(A,V) \ writeb((V), (volatile void __iomem *) __region_CS1 + 0x88 + (3 - ((A) & 3))) #define __set_PciCfgDataW(A,V) \ writew((V), (volatile void __iomem *) __region_CS1 + 0x88 + (2 - ((A) & 2))) #define __set_PciCfgDataL(A,V) \ writel((V), (volatile void __iomem *) __region_CS1 + 0x88) #define __get_PciBridgeDataB(A) readb((volatile void __iomem *) __region_CS1 + 0x800 + (A)) #define __get_PciBridgeDataW(A) readw((volatile void __iomem *) __region_CS1 + 0x800 + (A)) #define __get_PciBridgeDataL(A) readl((volatile void __iomem *) __region_CS1 + 0x800 + (A)) #define __set_PciBridgeDataB(A,V) writeb((V), (volatile void __iomem *) __region_CS1 + 0x800 + (A)) #define __set_PciBridgeDataW(A,V) writew((V), (volatile void __iomem *) __region_CS1 + 0x800 + (A)) #define __set_PciBridgeDataL(A,V) writel((V), (volatile void __iomem *) __region_CS1 + 0x800 + (A)) static inline int __query(const struct pci_dev *dev) { // return dev->bus->number==0 && (dev->devfn==PCI_DEVFN(0,0)); // return dev->bus->number==1; // return dev->bus->number==0 && // (dev->devfn==PCI_DEVFN(2,0) || dev->devfn==PCI_DEVFN(3,0)); return 0; } /*****************************************************************************/ /* * */ static int pci_frv_read_config(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val) { u32 _value; if (bus->number == 0 && devfn == PCI_DEVFN(0, 0)) { _value = __get_PciBridgeDataL(where & ~3); } else { __set_PciCfgAddr(CONFIG_CMD(bus, devfn, where)); _value = __get_PciCfgDataL(where & ~3); } switch (size) { case 1: _value = _value >> ((where & 3) * 8); break; case 2: _value = _value >> ((where & 2) * 8); break; case 4: break; default: BUG(); } *val = _value; return PCIBIOS_SUCCESSFUL; } static int pci_frv_write_config(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 value) { switch (size) { case 1: if (bus->number == 0 && devfn == PCI_DEVFN(0, 0)) { __set_PciBridgeDataB(where, value); } else { __set_PciCfgAddr(CONFIG_CMD(bus, devfn, where)); __set_PciCfgDataB(where, value); } break; case 2: if (bus->number == 0 && devfn == PCI_DEVFN(0, 0)) { __set_PciBridgeDataW(where, value); } else { __set_PciCfgAddr(CONFIG_CMD(bus, devfn, where)); __set_PciCfgDataW(where, value); } break; case 4: if (bus->number == 0 && devfn == PCI_DEVFN(0, 0)) { __set_PciBridgeDataL(where, value); } else { __set_PciCfgAddr(CONFIG_CMD(bus, devfn, where)); __set_PciCfgDataL(where, value); } break; default: BUG(); } return PCIBIOS_SUCCESSFUL; } static struct pci_ops pci_direct_frv = { pci_frv_read_config, pci_frv_write_config, }; /* * Before we decide to use direct hardware access mechanisms, we try to do some * trivial checks to ensure it at least _seems_ to be working -- we just test * whether bus 00 contains a host bridge (this is similar to checking * techniques used in XFree86, but ours should be more reliable since we * attempt to make use of direct access hints provided by the PCI BIOS). * * This should be close to trivial, but it isn't, because there are buggy * chipsets (yes, you guessed it, by Intel and Compaq) that have no class ID. */ static int __init pci_sanity_check(struct pci_ops *o) { struct pci_bus bus; /* Fake bus and device */ u32 id; bus.number = 0; if (o->read(&bus, 0, PCI_VENDOR_ID, 4, &id) == PCIBIOS_SUCCESSFUL) { printk("PCI: VDK Bridge device:vendor: %08x\n", id); if (id == 0x200e10cf) return 1; } printk("PCI: VDK Bridge: Sanity check failed\n"); return 0; } static struct pci_ops * __init pci_check_direct(void) { unsigned long flags; local_irq_save(flags); /* check if access works */ if (pci_sanity_check(&pci_direct_frv)) { local_irq_restore(flags); printk("PCI: Using configuration frv\n"); // request_mem_region(0xBE040000, 256, "FRV bridge"); // request_mem_region(0xBFFFFFF4, 12, "PCI frv"); return &pci_direct_frv; } local_irq_restore(flags); return NULL; } /* * Discover remaining PCI buses in case there are peer host bridges. * We use the number of last PCI bus provided by the PCI BIOS. */ static void __init pcibios_fixup_peer_bridges(void) { struct pci_bus bus; struct pci_dev dev; int n; u16 l; if (pcibios_last_bus <= 0 || pcibios_last_bus >= 0xff) return; printk("PCI: Peer bridge fixup\n"); for (n=0; n <= pcibios_last_bus; n++) { if (pci_find_bus(0, n)) continue; bus.number = n; bus.ops = pci_root_ops; dev.bus = &bus; for(dev.devfn=0; dev.devfn<256; dev.devfn += 8) if (!pci_read_config_word(&dev, PCI_VENDOR_ID, &l) && l != 0x0000 && l != 0xffff) { printk("Found device at %02x:%02x [%04x]\n", n, dev.devfn, l); printk("PCI: Discovered peer bus %02x\n", n); pci_scan_bus(n, pci_root_ops, NULL); break; } } } /* * Exceptions for specific devices. Usually work-arounds for fatal design flaws. */ static void __init pci_fixup_umc_ide(struct pci_dev *d) { /* * UM8886BF IDE controller sets region type bits incorrectly, * therefore they look like memory despite of them being I/O. */ int i; printk("PCI: Fixing base address flags for device %s\n", pci_name(d)); for(i=0; i<4; i++) d->resource[i].flags |= PCI_BASE_ADDRESS_SPACE_IO; } static void __init pci_fixup_ide_bases(struct pci_dev *d) { int i; /* * PCI IDE controllers use non-standard I/O port decoding, respect it. */ if ((d->class >> 8) != PCI_CLASS_STORAGE_IDE) return; printk("PCI: IDE base address fixup for %s\n", pci_name(d)); for(i=0; i<4; i++) { struct resource *r = &d->resource[i]; if ((r->start & ~0x80) == 0x374) { r->start |= 2; r->end = r->start; } } } static void __init pci_fixup_ide_trash(struct pci_dev *d) { int i; /* * There exist PCI IDE controllers which have utter garbage * in first four base registers. Ignore that. */ printk("PCI: IDE base address trash cleared for %s\n", pci_name(d)); for(i=0; i<4; i++) d->resource[i].start = d->resource[i].end = d->resource[i].flags = 0; } static void __devinit pci_fixup_latency(struct pci_dev *d) { /* * SiS 5597 and 5598 chipsets require latency timer set to * at most 32 to avoid lockups. */ DBG("PCI: Setting max latency to 32\n"); pcibios_max_latency = 32; } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_UMC, PCI_DEVICE_ID_UMC_UM8886BF, pci_fixup_umc_ide); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_5513, pci_fixup_ide_trash); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_5597, pci_fixup_latency); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_5598, pci_fixup_latency); DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pci_fixup_ide_bases); /* * Called after each bus is probed, but before its children * are examined. */ void __init pcibios_fixup_bus(struct pci_bus *bus) { #if 0 printk("### PCIBIOS_FIXUP_BUS(%d)\n",bus->number); #endif if (bus->number == 0) { bus->resource[0] = &pci_ioport_resource; bus->resource[1] = &pci_iomem_resource; } pci_read_bridge_bases(bus); if (bus->number == 0) { struct list_head *ln; struct pci_dev *dev; for (ln=bus->devices.next; ln != &bus->devices; ln=ln->next) { dev = pci_dev_b(ln); if (dev->devfn == 0) { dev->resource[0].start = 0; dev->resource[0].end = 0; } } } } /* * Initialization. Try all known PCI access methods. Note that we support * using both PCI BIOS and direct access: in such cases, we use I/O ports * to access config space, but we still keep BIOS order of cards to be * compatible with 2.0.X. This should go away some day. */ int __init pcibios_init(void) { struct pci_ops *dir = NULL; if (!mb93090_mb00_detected) return -ENXIO; __reg_MB86943_sl_ctl |= MB86943_SL_CTL_DRCT_MASTER_SWAP | MB86943_SL_CTL_DRCT_SLAVE_SWAP; __reg_MB86943_ecs_base(1) = ((__region_CS2 + 0x01000000) >> 9) | 0x08000000; __reg_MB86943_ecs_base(2) = ((__region_CS2 + 0x00000000) >> 9) | 0x08000000; *(volatile uint32_t *) (__region_CS1 + 0x848) = 0xe0000000; *(volatile uint32_t *) (__region_CS1 + 0x8b8) = 0x00000000; __reg_MB86943_sl_pci_io_base = (__region_CS2 + 0x04000000) >> 9; __reg_MB86943_sl_pci_mem_base = (__region_CS2 + 0x08000000) >> 9; __reg_MB86943_pci_sl_io_base = __region_CS2 + 0x04000000; __reg_MB86943_pci_sl_mem_base = __region_CS2 + 0x08000000; mb(); /* enable PCI arbitration */ __reg_MB86943_pci_arbiter = MB86943_PCIARB_EN; pci_ioport_resource.start = (__reg_MB86943_sl_pci_io_base << 9) & 0xfffffc00; pci_ioport_resource.end = (__reg_MB86943_sl_pci_io_range << 9) | 0x3ff; pci_ioport_resource.end += pci_ioport_resource.start; printk("PCI IO window: %08llx-%08llx\n", (unsigned long long) pci_ioport_resource.start, (unsigned long long) pci_ioport_resource.end); pci_iomem_resource.start = (__reg_MB86943_sl_pci_mem_base << 9) & 0xfffffc00; pci_iomem_resource.end = (__reg_MB86943_sl_pci_mem_range << 9) | 0x3ff; pci_iomem_resource.end += pci_iomem_resource.start; /* Reserve somewhere to write to flush posted writes. This is used by * __flush_PCI_writes() from asm/io.h to force the write FIFO in the * CPU-PCI bridge to flush as this doesn't happen automatically when a * read is performed on the MB93090 development kit motherboard. */ pci_iomem_resource.start += 0x400; printk("PCI MEM window: %08llx-%08llx\n", (unsigned long long) pci_iomem_resource.start, (unsigned long long) pci_iomem_resource.end); printk("PCI DMA memory: %08lx-%08lx\n", dma_coherent_mem_start, dma_coherent_mem_end); if (insert_resource(&iomem_resource, &pci_iomem_resource) < 0) panic("Unable to insert PCI IOMEM resource\n"); if (insert_resource(&ioport_resource, &pci_ioport_resource) < 0) panic("Unable to insert PCI IOPORT resource\n"); if (!pci_probe) return -ENXIO; dir = pci_check_direct(); if (dir) pci_root_ops = dir; else { printk("PCI: No PCI bus detected\n"); return -ENXIO; } printk("PCI: Probing PCI hardware\n"); pci_root_bus = pci_scan_bus(0, pci_root_ops, NULL); pcibios_irq_init(); pcibios_fixup_peer_bridges(); pcibios_fixup_irqs(); pcibios_resource_survey(); return 0; } arch_initcall(pcibios_init); char * __init pcibios_setup(char *str) { if (!strcmp(str, "off")) { pci_probe = 0; return NULL; } else if (!strncmp(str, "lastbus=", 8)) { pcibios_last_bus = simple_strtol(str+8, NULL, 0); return NULL; } return str; } int pcibios_enable_device(struct pci_dev *dev, int mask) { int err; if ((err = pci_enable_resources(dev, mask)) < 0) return err; if (!dev->msi_enabled) pcibios_enable_irq(dev); return 0; }
gpl-2.0
JAV-Team-qcom/android_kernel_wingtech_msm8916
drivers/media/dvb-frontends/itd1000.c
3905
11590
/* * Driver for the Integrant ITD1000 "Zero-IF Tuner IC for Direct Broadcast Satellite" * * Copyright (c) 2007-8 Patrick Boettcher <pb@linuxtv.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.= */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/delay.h> #include <linux/dvb/frontend.h> #include <linux/i2c.h> #include <linux/slab.h> #include "dvb_frontend.h" #include "itd1000.h" #include "itd1000_priv.h" /* Max transfer size done by I2C transfer functions */ #define MAX_XFER_SIZE 64 static int debug; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "Turn on/off debugging (default:off)."); #define itd_dbg(args...) do { \ if (debug) { \ printk(KERN_DEBUG "ITD1000: " args);\ } \ } while (0) #define itd_warn(args...) do { \ printk(KERN_WARNING "ITD1000: " args); \ } while (0) #define itd_info(args...) do { \ printk(KERN_INFO "ITD1000: " args); \ } while (0) /* don't write more than one byte with flexcop behind */ static int itd1000_write_regs(struct itd1000_state *state, u8 reg, u8 v[], u8 len) { u8 buf[MAX_XFER_SIZE]; struct i2c_msg msg = { .addr = state->cfg->i2c_address, .flags = 0, .buf = buf, .len = len+1 }; if (1 + len > sizeof(buf)) { printk(KERN_WARNING "itd1000: i2c wr reg=%04x: len=%d is too big!\n", reg, len); return -EINVAL; } buf[0] = reg; memcpy(&buf[1], v, len); /* itd_dbg("wr %02x: %02x\n", reg, v[0]); */ if (i2c_transfer(state->i2c, &msg, 1) != 1) { printk(KERN_WARNING "itd1000 I2C write failed\n"); return -EREMOTEIO; } return 0; } static int itd1000_read_reg(struct itd1000_state *state, u8 reg) { u8 val; struct i2c_msg msg[2] = { { .addr = state->cfg->i2c_address, .flags = 0, .buf = &reg, .len = 1 }, { .addr = state->cfg->i2c_address, .flags = I2C_M_RD, .buf = &val, .len = 1 }, }; /* ugly flexcop workaround */ itd1000_write_regs(state, (reg - 1) & 0xff, &state->shadow[(reg - 1) & 0xff], 1); if (i2c_transfer(state->i2c, msg, 2) != 2) { itd_warn("itd1000 I2C read failed\n"); return -EREMOTEIO; } return val; } static inline int itd1000_write_reg(struct itd1000_state *state, u8 r, u8 v) { int ret = itd1000_write_regs(state, r, &v, 1); state->shadow[r] = v; return ret; } static struct { u32 symbol_rate; u8 pgaext : 4; /* PLLFH */ u8 bbgvmin : 4; /* BBGVMIN */ } itd1000_lpf_pga[] = { { 0, 0x8, 0x3 }, { 5200000, 0x8, 0x3 }, { 12200000, 0x4, 0x3 }, { 15400000, 0x2, 0x3 }, { 19800000, 0x2, 0x3 }, { 21500000, 0x2, 0x3 }, { 24500000, 0x2, 0x3 }, { 28400000, 0x2, 0x3 }, { 33400000, 0x2, 0x3 }, { 34400000, 0x1, 0x4 }, { 34400000, 0x1, 0x4 }, { 38400000, 0x1, 0x4 }, { 38400000, 0x1, 0x4 }, { 40400000, 0x1, 0x4 }, { 45400000, 0x1, 0x4 }, }; static void itd1000_set_lpf_bw(struct itd1000_state *state, u32 symbol_rate) { u8 i; u8 con1 = itd1000_read_reg(state, CON1) & 0xfd; u8 pllfh = itd1000_read_reg(state, PLLFH) & 0x0f; u8 bbgvmin = itd1000_read_reg(state, BBGVMIN) & 0xf0; u8 bw = itd1000_read_reg(state, BW) & 0xf0; itd_dbg("symbol_rate = %d\n", symbol_rate); /* not sure what is that ? - starting to download the table */ itd1000_write_reg(state, CON1, con1 | (1 << 1)); for (i = 0; i < ARRAY_SIZE(itd1000_lpf_pga); i++) if (symbol_rate < itd1000_lpf_pga[i].symbol_rate) { itd_dbg("symrate: index: %d pgaext: %x, bbgvmin: %x\n", i, itd1000_lpf_pga[i].pgaext, itd1000_lpf_pga[i].bbgvmin); itd1000_write_reg(state, PLLFH, pllfh | (itd1000_lpf_pga[i].pgaext << 4)); itd1000_write_reg(state, BBGVMIN, bbgvmin | (itd1000_lpf_pga[i].bbgvmin)); itd1000_write_reg(state, BW, bw | (i & 0x0f)); break; } itd1000_write_reg(state, CON1, con1 | (0 << 1)); } static struct { u8 vcorg; u32 fmax_rg; } itd1000_vcorg[] = { { 1, 920000 }, { 2, 971000 }, { 3, 1031000 }, { 4, 1091000 }, { 5, 1171000 }, { 6, 1281000 }, { 7, 1381000 }, { 8, 500000 }, /* this is intentional. */ { 9, 1451000 }, { 10, 1531000 }, { 11, 1631000 }, { 12, 1741000 }, { 13, 1891000 }, { 14, 2071000 }, { 15, 2250000 }, }; static void itd1000_set_vco(struct itd1000_state *state, u32 freq_khz) { u8 i; u8 gvbb_i2c = itd1000_read_reg(state, GVBB_I2C) & 0xbf; u8 vco_chp1_i2c = itd1000_read_reg(state, VCO_CHP1_I2C) & 0x0f; u8 adcout; /* reserved bit again (reset ?) */ itd1000_write_reg(state, GVBB_I2C, gvbb_i2c | (1 << 6)); for (i = 0; i < ARRAY_SIZE(itd1000_vcorg); i++) { if (freq_khz < itd1000_vcorg[i].fmax_rg) { itd1000_write_reg(state, VCO_CHP1_I2C, vco_chp1_i2c | (itd1000_vcorg[i].vcorg << 4)); msleep(1); adcout = itd1000_read_reg(state, PLLLOCK) & 0x0f; itd_dbg("VCO: %dkHz: %d -> ADCOUT: %d %02x\n", freq_khz, itd1000_vcorg[i].vcorg, adcout, vco_chp1_i2c); if (adcout > 13) { if (!(itd1000_vcorg[i].vcorg == 7 || itd1000_vcorg[i].vcorg == 15)) itd1000_write_reg(state, VCO_CHP1_I2C, vco_chp1_i2c | ((itd1000_vcorg[i].vcorg + 1) << 4)); } else if (adcout < 2) { if (!(itd1000_vcorg[i].vcorg == 1 || itd1000_vcorg[i].vcorg == 9)) itd1000_write_reg(state, VCO_CHP1_I2C, vco_chp1_i2c | ((itd1000_vcorg[i].vcorg - 1) << 4)); } break; } } } static const struct { u32 freq; u8 values[10]; /* RFTR, RFST1 - RFST9 */ } itd1000_fre_values[] = { { 1075000, { 0x59, 0x1d, 0x1c, 0x17, 0x16, 0x0f, 0x0e, 0x0c, 0x0b, 0x0a } }, { 1250000, { 0x89, 0x1e, 0x1d, 0x17, 0x15, 0x0f, 0x0e, 0x0c, 0x0b, 0x0a } }, { 1450000, { 0x89, 0x1e, 0x1d, 0x17, 0x15, 0x0f, 0x0e, 0x0c, 0x0b, 0x0a } }, { 1650000, { 0x69, 0x1e, 0x1d, 0x17, 0x15, 0x0f, 0x0e, 0x0c, 0x0b, 0x0a } }, { 1750000, { 0x69, 0x1e, 0x17, 0x15, 0x14, 0x0f, 0x0e, 0x0c, 0x0b, 0x0a } }, { 1850000, { 0x69, 0x1d, 0x17, 0x16, 0x14, 0x0f, 0x0e, 0x0d, 0x0b, 0x0a } }, { 1900000, { 0x69, 0x1d, 0x17, 0x15, 0x14, 0x0f, 0x0e, 0x0d, 0x0b, 0x0a } }, { 1950000, { 0x69, 0x1d, 0x17, 0x16, 0x14, 0x13, 0x0e, 0x0d, 0x0b, 0x0a } }, { 2050000, { 0x69, 0x1e, 0x1d, 0x17, 0x16, 0x14, 0x13, 0x0e, 0x0b, 0x0a } }, { 2150000, { 0x69, 0x1d, 0x1c, 0x17, 0x15, 0x14, 0x13, 0x0f, 0x0e, 0x0b } } }; #define FREF 16 static void itd1000_set_lo(struct itd1000_state *state, u32 freq_khz) { int i, j; u32 plln, pllf; u64 tmp; plln = (freq_khz * 1000) / 2 / FREF; /* Compute the factional part times 1000 */ tmp = plln % 1000000; plln /= 1000000; tmp *= 1048576; do_div(tmp, 1000000); pllf = (u32) tmp; state->frequency = ((plln * 1000) + (pllf * 1000)/1048576) * 2*FREF; itd_dbg("frequency: %dkHz (wanted) %dkHz (set), PLLF = %d, PLLN = %d\n", freq_khz, state->frequency, pllf, plln); itd1000_write_reg(state, PLLNH, 0x80); /* PLLNH */ itd1000_write_reg(state, PLLNL, plln & 0xff); itd1000_write_reg(state, PLLFH, (itd1000_read_reg(state, PLLFH) & 0xf0) | ((pllf >> 16) & 0x0f)); itd1000_write_reg(state, PLLFM, (pllf >> 8) & 0xff); itd1000_write_reg(state, PLLFL, (pllf >> 0) & 0xff); for (i = 0; i < ARRAY_SIZE(itd1000_fre_values); i++) { if (freq_khz <= itd1000_fre_values[i].freq) { itd_dbg("fre_values: %d\n", i); itd1000_write_reg(state, RFTR, itd1000_fre_values[i].values[0]); for (j = 0; j < 9; j++) itd1000_write_reg(state, RFST1+j, itd1000_fre_values[i].values[j+1]); break; } } itd1000_set_vco(state, freq_khz); } static int itd1000_set_parameters(struct dvb_frontend *fe) { struct dtv_frontend_properties *c = &fe->dtv_property_cache; struct itd1000_state *state = fe->tuner_priv; u8 pllcon1; itd1000_set_lo(state, c->frequency); itd1000_set_lpf_bw(state, c->symbol_rate); pllcon1 = itd1000_read_reg(state, PLLCON1) & 0x7f; itd1000_write_reg(state, PLLCON1, pllcon1 | (1 << 7)); itd1000_write_reg(state, PLLCON1, pllcon1); return 0; } static int itd1000_get_frequency(struct dvb_frontend *fe, u32 *frequency) { struct itd1000_state *state = fe->tuner_priv; *frequency = state->frequency; return 0; } static int itd1000_get_bandwidth(struct dvb_frontend *fe, u32 *bandwidth) { return 0; } static u8 itd1000_init_tab[][2] = { { PLLCON1, 0x65 }, /* Register does not change */ { PLLNH, 0x80 }, /* Bits [7:6] do not change */ { RESERVED_0X6D, 0x3b }, { VCO_CHP2_I2C, 0x12 }, { 0x72, 0xf9 }, /* No such regsister defined */ { RESERVED_0X73, 0xff }, { RESERVED_0X74, 0xb2 }, { RESERVED_0X75, 0xc7 }, { EXTGVBBRF, 0xf0 }, { DIVAGCCK, 0x80 }, { BBTR, 0xa0 }, { RESERVED_0X7E, 0x4f }, { 0x82, 0x88 }, /* No such regsister defined */ { 0x83, 0x80 }, /* No such regsister defined */ { 0x84, 0x80 }, /* No such regsister defined */ { RESERVED_0X85, 0x74 }, { RESERVED_0X86, 0xff }, { RESERVED_0X88, 0x02 }, { RESERVED_0X89, 0x16 }, { RFST0, 0x1f }, { RESERVED_0X94, 0x66 }, { RESERVED_0X95, 0x66 }, { RESERVED_0X96, 0x77 }, { RESERVED_0X97, 0x99 }, { RESERVED_0X98, 0xff }, { RESERVED_0X99, 0xfc }, { RESERVED_0X9A, 0xba }, { RESERVED_0X9B, 0xaa }, }; static u8 itd1000_reinit_tab[][2] = { { VCO_CHP1_I2C, 0x8a }, { BW, 0x87 }, { GVBB_I2C, 0x03 }, { BBGVMIN, 0x03 }, { CON1, 0x2e }, }; static int itd1000_init(struct dvb_frontend *fe) { struct itd1000_state *state = fe->tuner_priv; int i; for (i = 0; i < ARRAY_SIZE(itd1000_init_tab); i++) itd1000_write_reg(state, itd1000_init_tab[i][0], itd1000_init_tab[i][1]); for (i = 0; i < ARRAY_SIZE(itd1000_reinit_tab); i++) itd1000_write_reg(state, itd1000_reinit_tab[i][0], itd1000_reinit_tab[i][1]); return 0; } static int itd1000_sleep(struct dvb_frontend *fe) { return 0; } static int itd1000_release(struct dvb_frontend *fe) { kfree(fe->tuner_priv); fe->tuner_priv = NULL; return 0; } static const struct dvb_tuner_ops itd1000_tuner_ops = { .info = { .name = "Integrant ITD1000", .frequency_min = 950000, .frequency_max = 2150000, .frequency_step = 125, /* kHz for QPSK frontends */ }, .release = itd1000_release, .init = itd1000_init, .sleep = itd1000_sleep, .set_params = itd1000_set_parameters, .get_frequency = itd1000_get_frequency, .get_bandwidth = itd1000_get_bandwidth }; struct dvb_frontend *itd1000_attach(struct dvb_frontend *fe, struct i2c_adapter *i2c, struct itd1000_config *cfg) { struct itd1000_state *state = NULL; u8 i = 0; state = kzalloc(sizeof(struct itd1000_state), GFP_KERNEL); if (state == NULL) return NULL; state->cfg = cfg; state->i2c = i2c; i = itd1000_read_reg(state, 0); if (i != 0) { kfree(state); return NULL; } itd_info("successfully identified (ID: %d)\n", i); memset(state->shadow, 0xff, sizeof(state->shadow)); for (i = 0x65; i < 0x9c; i++) state->shadow[i] = itd1000_read_reg(state, i); memcpy(&fe->ops.tuner_ops, &itd1000_tuner_ops, sizeof(struct dvb_tuner_ops)); fe->tuner_priv = state; return fe; } EXPORT_SYMBOL(itd1000_attach); MODULE_AUTHOR("Patrick Boettcher <pb@linuxtv.org>"); MODULE_DESCRIPTION("Integrant ITD1000 driver"); MODULE_LICENSE("GPL");
gpl-2.0
Odin-Kernel/kernel_huawei_angler
drivers/media/dvb-frontends/itd1000.c
3905
11590
/* * Driver for the Integrant ITD1000 "Zero-IF Tuner IC for Direct Broadcast Satellite" * * Copyright (c) 2007-8 Patrick Boettcher <pb@linuxtv.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.= */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/delay.h> #include <linux/dvb/frontend.h> #include <linux/i2c.h> #include <linux/slab.h> #include "dvb_frontend.h" #include "itd1000.h" #include "itd1000_priv.h" /* Max transfer size done by I2C transfer functions */ #define MAX_XFER_SIZE 64 static int debug; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "Turn on/off debugging (default:off)."); #define itd_dbg(args...) do { \ if (debug) { \ printk(KERN_DEBUG "ITD1000: " args);\ } \ } while (0) #define itd_warn(args...) do { \ printk(KERN_WARNING "ITD1000: " args); \ } while (0) #define itd_info(args...) do { \ printk(KERN_INFO "ITD1000: " args); \ } while (0) /* don't write more than one byte with flexcop behind */ static int itd1000_write_regs(struct itd1000_state *state, u8 reg, u8 v[], u8 len) { u8 buf[MAX_XFER_SIZE]; struct i2c_msg msg = { .addr = state->cfg->i2c_address, .flags = 0, .buf = buf, .len = len+1 }; if (1 + len > sizeof(buf)) { printk(KERN_WARNING "itd1000: i2c wr reg=%04x: len=%d is too big!\n", reg, len); return -EINVAL; } buf[0] = reg; memcpy(&buf[1], v, len); /* itd_dbg("wr %02x: %02x\n", reg, v[0]); */ if (i2c_transfer(state->i2c, &msg, 1) != 1) { printk(KERN_WARNING "itd1000 I2C write failed\n"); return -EREMOTEIO; } return 0; } static int itd1000_read_reg(struct itd1000_state *state, u8 reg) { u8 val; struct i2c_msg msg[2] = { { .addr = state->cfg->i2c_address, .flags = 0, .buf = &reg, .len = 1 }, { .addr = state->cfg->i2c_address, .flags = I2C_M_RD, .buf = &val, .len = 1 }, }; /* ugly flexcop workaround */ itd1000_write_regs(state, (reg - 1) & 0xff, &state->shadow[(reg - 1) & 0xff], 1); if (i2c_transfer(state->i2c, msg, 2) != 2) { itd_warn("itd1000 I2C read failed\n"); return -EREMOTEIO; } return val; } static inline int itd1000_write_reg(struct itd1000_state *state, u8 r, u8 v) { int ret = itd1000_write_regs(state, r, &v, 1); state->shadow[r] = v; return ret; } static struct { u32 symbol_rate; u8 pgaext : 4; /* PLLFH */ u8 bbgvmin : 4; /* BBGVMIN */ } itd1000_lpf_pga[] = { { 0, 0x8, 0x3 }, { 5200000, 0x8, 0x3 }, { 12200000, 0x4, 0x3 }, { 15400000, 0x2, 0x3 }, { 19800000, 0x2, 0x3 }, { 21500000, 0x2, 0x3 }, { 24500000, 0x2, 0x3 }, { 28400000, 0x2, 0x3 }, { 33400000, 0x2, 0x3 }, { 34400000, 0x1, 0x4 }, { 34400000, 0x1, 0x4 }, { 38400000, 0x1, 0x4 }, { 38400000, 0x1, 0x4 }, { 40400000, 0x1, 0x4 }, { 45400000, 0x1, 0x4 }, }; static void itd1000_set_lpf_bw(struct itd1000_state *state, u32 symbol_rate) { u8 i; u8 con1 = itd1000_read_reg(state, CON1) & 0xfd; u8 pllfh = itd1000_read_reg(state, PLLFH) & 0x0f; u8 bbgvmin = itd1000_read_reg(state, BBGVMIN) & 0xf0; u8 bw = itd1000_read_reg(state, BW) & 0xf0; itd_dbg("symbol_rate = %d\n", symbol_rate); /* not sure what is that ? - starting to download the table */ itd1000_write_reg(state, CON1, con1 | (1 << 1)); for (i = 0; i < ARRAY_SIZE(itd1000_lpf_pga); i++) if (symbol_rate < itd1000_lpf_pga[i].symbol_rate) { itd_dbg("symrate: index: %d pgaext: %x, bbgvmin: %x\n", i, itd1000_lpf_pga[i].pgaext, itd1000_lpf_pga[i].bbgvmin); itd1000_write_reg(state, PLLFH, pllfh | (itd1000_lpf_pga[i].pgaext << 4)); itd1000_write_reg(state, BBGVMIN, bbgvmin | (itd1000_lpf_pga[i].bbgvmin)); itd1000_write_reg(state, BW, bw | (i & 0x0f)); break; } itd1000_write_reg(state, CON1, con1 | (0 << 1)); } static struct { u8 vcorg; u32 fmax_rg; } itd1000_vcorg[] = { { 1, 920000 }, { 2, 971000 }, { 3, 1031000 }, { 4, 1091000 }, { 5, 1171000 }, { 6, 1281000 }, { 7, 1381000 }, { 8, 500000 }, /* this is intentional. */ { 9, 1451000 }, { 10, 1531000 }, { 11, 1631000 }, { 12, 1741000 }, { 13, 1891000 }, { 14, 2071000 }, { 15, 2250000 }, }; static void itd1000_set_vco(struct itd1000_state *state, u32 freq_khz) { u8 i; u8 gvbb_i2c = itd1000_read_reg(state, GVBB_I2C) & 0xbf; u8 vco_chp1_i2c = itd1000_read_reg(state, VCO_CHP1_I2C) & 0x0f; u8 adcout; /* reserved bit again (reset ?) */ itd1000_write_reg(state, GVBB_I2C, gvbb_i2c | (1 << 6)); for (i = 0; i < ARRAY_SIZE(itd1000_vcorg); i++) { if (freq_khz < itd1000_vcorg[i].fmax_rg) { itd1000_write_reg(state, VCO_CHP1_I2C, vco_chp1_i2c | (itd1000_vcorg[i].vcorg << 4)); msleep(1); adcout = itd1000_read_reg(state, PLLLOCK) & 0x0f; itd_dbg("VCO: %dkHz: %d -> ADCOUT: %d %02x\n", freq_khz, itd1000_vcorg[i].vcorg, adcout, vco_chp1_i2c); if (adcout > 13) { if (!(itd1000_vcorg[i].vcorg == 7 || itd1000_vcorg[i].vcorg == 15)) itd1000_write_reg(state, VCO_CHP1_I2C, vco_chp1_i2c | ((itd1000_vcorg[i].vcorg + 1) << 4)); } else if (adcout < 2) { if (!(itd1000_vcorg[i].vcorg == 1 || itd1000_vcorg[i].vcorg == 9)) itd1000_write_reg(state, VCO_CHP1_I2C, vco_chp1_i2c | ((itd1000_vcorg[i].vcorg - 1) << 4)); } break; } } } static const struct { u32 freq; u8 values[10]; /* RFTR, RFST1 - RFST9 */ } itd1000_fre_values[] = { { 1075000, { 0x59, 0x1d, 0x1c, 0x17, 0x16, 0x0f, 0x0e, 0x0c, 0x0b, 0x0a } }, { 1250000, { 0x89, 0x1e, 0x1d, 0x17, 0x15, 0x0f, 0x0e, 0x0c, 0x0b, 0x0a } }, { 1450000, { 0x89, 0x1e, 0x1d, 0x17, 0x15, 0x0f, 0x0e, 0x0c, 0x0b, 0x0a } }, { 1650000, { 0x69, 0x1e, 0x1d, 0x17, 0x15, 0x0f, 0x0e, 0x0c, 0x0b, 0x0a } }, { 1750000, { 0x69, 0x1e, 0x17, 0x15, 0x14, 0x0f, 0x0e, 0x0c, 0x0b, 0x0a } }, { 1850000, { 0x69, 0x1d, 0x17, 0x16, 0x14, 0x0f, 0x0e, 0x0d, 0x0b, 0x0a } }, { 1900000, { 0x69, 0x1d, 0x17, 0x15, 0x14, 0x0f, 0x0e, 0x0d, 0x0b, 0x0a } }, { 1950000, { 0x69, 0x1d, 0x17, 0x16, 0x14, 0x13, 0x0e, 0x0d, 0x0b, 0x0a } }, { 2050000, { 0x69, 0x1e, 0x1d, 0x17, 0x16, 0x14, 0x13, 0x0e, 0x0b, 0x0a } }, { 2150000, { 0x69, 0x1d, 0x1c, 0x17, 0x15, 0x14, 0x13, 0x0f, 0x0e, 0x0b } } }; #define FREF 16 static void itd1000_set_lo(struct itd1000_state *state, u32 freq_khz) { int i, j; u32 plln, pllf; u64 tmp; plln = (freq_khz * 1000) / 2 / FREF; /* Compute the factional part times 1000 */ tmp = plln % 1000000; plln /= 1000000; tmp *= 1048576; do_div(tmp, 1000000); pllf = (u32) tmp; state->frequency = ((plln * 1000) + (pllf * 1000)/1048576) * 2*FREF; itd_dbg("frequency: %dkHz (wanted) %dkHz (set), PLLF = %d, PLLN = %d\n", freq_khz, state->frequency, pllf, plln); itd1000_write_reg(state, PLLNH, 0x80); /* PLLNH */ itd1000_write_reg(state, PLLNL, plln & 0xff); itd1000_write_reg(state, PLLFH, (itd1000_read_reg(state, PLLFH) & 0xf0) | ((pllf >> 16) & 0x0f)); itd1000_write_reg(state, PLLFM, (pllf >> 8) & 0xff); itd1000_write_reg(state, PLLFL, (pllf >> 0) & 0xff); for (i = 0; i < ARRAY_SIZE(itd1000_fre_values); i++) { if (freq_khz <= itd1000_fre_values[i].freq) { itd_dbg("fre_values: %d\n", i); itd1000_write_reg(state, RFTR, itd1000_fre_values[i].values[0]); for (j = 0; j < 9; j++) itd1000_write_reg(state, RFST1+j, itd1000_fre_values[i].values[j+1]); break; } } itd1000_set_vco(state, freq_khz); } static int itd1000_set_parameters(struct dvb_frontend *fe) { struct dtv_frontend_properties *c = &fe->dtv_property_cache; struct itd1000_state *state = fe->tuner_priv; u8 pllcon1; itd1000_set_lo(state, c->frequency); itd1000_set_lpf_bw(state, c->symbol_rate); pllcon1 = itd1000_read_reg(state, PLLCON1) & 0x7f; itd1000_write_reg(state, PLLCON1, pllcon1 | (1 << 7)); itd1000_write_reg(state, PLLCON1, pllcon1); return 0; } static int itd1000_get_frequency(struct dvb_frontend *fe, u32 *frequency) { struct itd1000_state *state = fe->tuner_priv; *frequency = state->frequency; return 0; } static int itd1000_get_bandwidth(struct dvb_frontend *fe, u32 *bandwidth) { return 0; } static u8 itd1000_init_tab[][2] = { { PLLCON1, 0x65 }, /* Register does not change */ { PLLNH, 0x80 }, /* Bits [7:6] do not change */ { RESERVED_0X6D, 0x3b }, { VCO_CHP2_I2C, 0x12 }, { 0x72, 0xf9 }, /* No such regsister defined */ { RESERVED_0X73, 0xff }, { RESERVED_0X74, 0xb2 }, { RESERVED_0X75, 0xc7 }, { EXTGVBBRF, 0xf0 }, { DIVAGCCK, 0x80 }, { BBTR, 0xa0 }, { RESERVED_0X7E, 0x4f }, { 0x82, 0x88 }, /* No such regsister defined */ { 0x83, 0x80 }, /* No such regsister defined */ { 0x84, 0x80 }, /* No such regsister defined */ { RESERVED_0X85, 0x74 }, { RESERVED_0X86, 0xff }, { RESERVED_0X88, 0x02 }, { RESERVED_0X89, 0x16 }, { RFST0, 0x1f }, { RESERVED_0X94, 0x66 }, { RESERVED_0X95, 0x66 }, { RESERVED_0X96, 0x77 }, { RESERVED_0X97, 0x99 }, { RESERVED_0X98, 0xff }, { RESERVED_0X99, 0xfc }, { RESERVED_0X9A, 0xba }, { RESERVED_0X9B, 0xaa }, }; static u8 itd1000_reinit_tab[][2] = { { VCO_CHP1_I2C, 0x8a }, { BW, 0x87 }, { GVBB_I2C, 0x03 }, { BBGVMIN, 0x03 }, { CON1, 0x2e }, }; static int itd1000_init(struct dvb_frontend *fe) { struct itd1000_state *state = fe->tuner_priv; int i; for (i = 0; i < ARRAY_SIZE(itd1000_init_tab); i++) itd1000_write_reg(state, itd1000_init_tab[i][0], itd1000_init_tab[i][1]); for (i = 0; i < ARRAY_SIZE(itd1000_reinit_tab); i++) itd1000_write_reg(state, itd1000_reinit_tab[i][0], itd1000_reinit_tab[i][1]); return 0; } static int itd1000_sleep(struct dvb_frontend *fe) { return 0; } static int itd1000_release(struct dvb_frontend *fe) { kfree(fe->tuner_priv); fe->tuner_priv = NULL; return 0; } static const struct dvb_tuner_ops itd1000_tuner_ops = { .info = { .name = "Integrant ITD1000", .frequency_min = 950000, .frequency_max = 2150000, .frequency_step = 125, /* kHz for QPSK frontends */ }, .release = itd1000_release, .init = itd1000_init, .sleep = itd1000_sleep, .set_params = itd1000_set_parameters, .get_frequency = itd1000_get_frequency, .get_bandwidth = itd1000_get_bandwidth }; struct dvb_frontend *itd1000_attach(struct dvb_frontend *fe, struct i2c_adapter *i2c, struct itd1000_config *cfg) { struct itd1000_state *state = NULL; u8 i = 0; state = kzalloc(sizeof(struct itd1000_state), GFP_KERNEL); if (state == NULL) return NULL; state->cfg = cfg; state->i2c = i2c; i = itd1000_read_reg(state, 0); if (i != 0) { kfree(state); return NULL; } itd_info("successfully identified (ID: %d)\n", i); memset(state->shadow, 0xff, sizeof(state->shadow)); for (i = 0x65; i < 0x9c; i++) state->shadow[i] = itd1000_read_reg(state, i); memcpy(&fe->ops.tuner_ops, &itd1000_tuner_ops, sizeof(struct dvb_tuner_ops)); fe->tuner_priv = state; return fe; } EXPORT_SYMBOL(itd1000_attach); MODULE_AUTHOR("Patrick Boettcher <pb@linuxtv.org>"); MODULE_DESCRIPTION("Integrant ITD1000 driver"); MODULE_LICENSE("GPL");
gpl-2.0
corcor67/SMPL_M8_GPE
drivers/hwmon/emc1403.c
4161
10966
/* * emc1403.c - SMSC Thermal Driver * * Copyright (C) 2008 Intel Corp * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * * TODO * - cache alarm and critical limit registers * - add emc1404 support */ #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/i2c.h> #include <linux/hwmon.h> #include <linux/hwmon-sysfs.h> #include <linux/err.h> #include <linux/sysfs.h> #include <linux/mutex.h> #define THERMAL_PID_REG 0xfd #define THERMAL_SMSC_ID_REG 0xfe #define THERMAL_REVISION_REG 0xff struct thermal_data { struct device *hwmon_dev; struct mutex mutex; /* * Cache the hyst value so we don't keep re-reading it. In theory * we could cache it forever as nobody else should be writing it. */ u8 cached_hyst; unsigned long hyst_valid; }; static ssize_t show_temp(struct device *dev, struct device_attribute *attr, char *buf) { struct i2c_client *client = to_i2c_client(dev); struct sensor_device_attribute *sda = to_sensor_dev_attr(attr); int retval = i2c_smbus_read_byte_data(client, sda->index); if (retval < 0) return retval; return sprintf(buf, "%d000\n", retval); } static ssize_t show_bit(struct device *dev, struct device_attribute *attr, char *buf) { struct i2c_client *client = to_i2c_client(dev); struct sensor_device_attribute_2 *sda = to_sensor_dev_attr_2(attr); int retval = i2c_smbus_read_byte_data(client, sda->nr); if (retval < 0) return retval; retval &= sda->index; return sprintf(buf, "%d\n", retval ? 1 : 0); } static ssize_t store_temp(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct sensor_device_attribute *sda = to_sensor_dev_attr(attr); struct i2c_client *client = to_i2c_client(dev); unsigned long val; int retval; if (kstrtoul(buf, 10, &val)) return -EINVAL; retval = i2c_smbus_write_byte_data(client, sda->index, DIV_ROUND_CLOSEST(val, 1000)); if (retval < 0) return retval; return count; } static ssize_t store_bit(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct i2c_client *client = to_i2c_client(dev); struct thermal_data *data = i2c_get_clientdata(client); struct sensor_device_attribute_2 *sda = to_sensor_dev_attr_2(attr); unsigned long val; int retval; if (kstrtoul(buf, 10, &val)) return -EINVAL; mutex_lock(&data->mutex); retval = i2c_smbus_read_byte_data(client, sda->nr); if (retval < 0) goto fail; retval &= ~sda->index; if (val) retval |= sda->index; retval = i2c_smbus_write_byte_data(client, sda->index, retval); if (retval == 0) retval = count; fail: mutex_unlock(&data->mutex); return retval; } static ssize_t show_hyst(struct device *dev, struct device_attribute *attr, char *buf) { struct i2c_client *client = to_i2c_client(dev); struct thermal_data *data = i2c_get_clientdata(client); struct sensor_device_attribute *sda = to_sensor_dev_attr(attr); int retval; int hyst; retval = i2c_smbus_read_byte_data(client, sda->index); if (retval < 0) return retval; if (time_after(jiffies, data->hyst_valid)) { hyst = i2c_smbus_read_byte_data(client, 0x21); if (hyst < 0) return retval; data->cached_hyst = hyst; data->hyst_valid = jiffies + HZ; } return sprintf(buf, "%d000\n", retval - data->cached_hyst); } static ssize_t store_hyst(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct i2c_client *client = to_i2c_client(dev); struct thermal_data *data = i2c_get_clientdata(client); struct sensor_device_attribute *sda = to_sensor_dev_attr(attr); int retval; int hyst; unsigned long val; if (kstrtoul(buf, 10, &val)) return -EINVAL; mutex_lock(&data->mutex); retval = i2c_smbus_read_byte_data(client, sda->index); if (retval < 0) goto fail; hyst = val - retval * 1000; hyst = DIV_ROUND_CLOSEST(hyst, 1000); if (hyst < 0 || hyst > 255) { retval = -ERANGE; goto fail; } retval = i2c_smbus_write_byte_data(client, 0x21, hyst); if (retval == 0) { retval = count; data->cached_hyst = hyst; data->hyst_valid = jiffies + HZ; } fail: mutex_unlock(&data->mutex); return retval; } /* * Sensors. We pass the actual i2c register to the methods. */ static SENSOR_DEVICE_ATTR(temp1_min, S_IRUGO | S_IWUSR, show_temp, store_temp, 0x06); static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO | S_IWUSR, show_temp, store_temp, 0x05); static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO | S_IWUSR, show_temp, store_temp, 0x20); static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL, 0x00); static SENSOR_DEVICE_ATTR_2(temp1_min_alarm, S_IRUGO, show_bit, NULL, 0x36, 0x01); static SENSOR_DEVICE_ATTR_2(temp1_max_alarm, S_IRUGO, show_bit, NULL, 0x35, 0x01); static SENSOR_DEVICE_ATTR_2(temp1_crit_alarm, S_IRUGO, show_bit, NULL, 0x37, 0x01); static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO | S_IWUSR, show_hyst, store_hyst, 0x20); static SENSOR_DEVICE_ATTR(temp2_min, S_IRUGO | S_IWUSR, show_temp, store_temp, 0x08); static SENSOR_DEVICE_ATTR(temp2_max, S_IRUGO | S_IWUSR, show_temp, store_temp, 0x07); static SENSOR_DEVICE_ATTR(temp2_crit, S_IRUGO | S_IWUSR, show_temp, store_temp, 0x19); static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, show_temp, NULL, 0x01); static SENSOR_DEVICE_ATTR_2(temp2_min_alarm, S_IRUGO, show_bit, NULL, 0x36, 0x02); static SENSOR_DEVICE_ATTR_2(temp2_max_alarm, S_IRUGO, show_bit, NULL, 0x35, 0x02); static SENSOR_DEVICE_ATTR_2(temp2_crit_alarm, S_IRUGO, show_bit, NULL, 0x37, 0x02); static SENSOR_DEVICE_ATTR(temp2_crit_hyst, S_IRUGO | S_IWUSR, show_hyst, store_hyst, 0x19); static SENSOR_DEVICE_ATTR(temp3_min, S_IRUGO | S_IWUSR, show_temp, store_temp, 0x16); static SENSOR_DEVICE_ATTR(temp3_max, S_IRUGO | S_IWUSR, show_temp, store_temp, 0x15); static SENSOR_DEVICE_ATTR(temp3_crit, S_IRUGO | S_IWUSR, show_temp, store_temp, 0x1A); static SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO, show_temp, NULL, 0x23); static SENSOR_DEVICE_ATTR_2(temp3_min_alarm, S_IRUGO, show_bit, NULL, 0x36, 0x04); static SENSOR_DEVICE_ATTR_2(temp3_max_alarm, S_IRUGO, show_bit, NULL, 0x35, 0x04); static SENSOR_DEVICE_ATTR_2(temp3_crit_alarm, S_IRUGO, show_bit, NULL, 0x37, 0x04); static SENSOR_DEVICE_ATTR(temp3_crit_hyst, S_IRUGO | S_IWUSR, show_hyst, store_hyst, 0x1A); static SENSOR_DEVICE_ATTR_2(power_state, S_IRUGO | S_IWUSR, show_bit, store_bit, 0x03, 0x40); static struct attribute *mid_att_thermal[] = { &sensor_dev_attr_temp1_min.dev_attr.attr, &sensor_dev_attr_temp1_max.dev_attr.attr, &sensor_dev_attr_temp1_crit.dev_attr.attr, &sensor_dev_attr_temp1_input.dev_attr.attr, &sensor_dev_attr_temp1_min_alarm.dev_attr.attr, &sensor_dev_attr_temp1_max_alarm.dev_attr.attr, &sensor_dev_attr_temp1_crit_alarm.dev_attr.attr, &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr, &sensor_dev_attr_temp2_min.dev_attr.attr, &sensor_dev_attr_temp2_max.dev_attr.attr, &sensor_dev_attr_temp2_crit.dev_attr.attr, &sensor_dev_attr_temp2_input.dev_attr.attr, &sensor_dev_attr_temp2_min_alarm.dev_attr.attr, &sensor_dev_attr_temp2_max_alarm.dev_attr.attr, &sensor_dev_attr_temp2_crit_alarm.dev_attr.attr, &sensor_dev_attr_temp2_crit_hyst.dev_attr.attr, &sensor_dev_attr_temp3_min.dev_attr.attr, &sensor_dev_attr_temp3_max.dev_attr.attr, &sensor_dev_attr_temp3_crit.dev_attr.attr, &sensor_dev_attr_temp3_input.dev_attr.attr, &sensor_dev_attr_temp3_min_alarm.dev_attr.attr, &sensor_dev_attr_temp3_max_alarm.dev_attr.attr, &sensor_dev_attr_temp3_crit_alarm.dev_attr.attr, &sensor_dev_attr_temp3_crit_hyst.dev_attr.attr, &sensor_dev_attr_power_state.dev_attr.attr, NULL }; static const struct attribute_group m_thermal_gr = { .attrs = mid_att_thermal }; static int emc1403_detect(struct i2c_client *client, struct i2c_board_info *info) { int id; /* Check if thermal chip is SMSC and EMC1403 or EMC1423 */ id = i2c_smbus_read_byte_data(client, THERMAL_SMSC_ID_REG); if (id != 0x5d) return -ENODEV; id = i2c_smbus_read_byte_data(client, THERMAL_PID_REG); switch (id) { case 0x21: strlcpy(info->type, "emc1403", I2C_NAME_SIZE); break; case 0x23: strlcpy(info->type, "emc1423", I2C_NAME_SIZE); break; /* * Note: 0x25 is the 1404 which is very similar and this * driver could be extended */ default: return -ENODEV; } id = i2c_smbus_read_byte_data(client, THERMAL_REVISION_REG); if (id != 0x01) return -ENODEV; return 0; } static int emc1403_probe(struct i2c_client *client, const struct i2c_device_id *id) { int res; struct thermal_data *data; data = kzalloc(sizeof(struct thermal_data), GFP_KERNEL); if (data == NULL) { dev_warn(&client->dev, "out of memory"); return -ENOMEM; } i2c_set_clientdata(client, data); mutex_init(&data->mutex); data->hyst_valid = jiffies - 1; /* Expired */ res = sysfs_create_group(&client->dev.kobj, &m_thermal_gr); if (res) { dev_warn(&client->dev, "create group failed\n"); goto thermal_error1; } data->hwmon_dev = hwmon_device_register(&client->dev); if (IS_ERR(data->hwmon_dev)) { res = PTR_ERR(data->hwmon_dev); dev_warn(&client->dev, "register hwmon dev failed\n"); goto thermal_error2; } dev_info(&client->dev, "EMC1403 Thermal chip found\n"); return res; thermal_error2: sysfs_remove_group(&client->dev.kobj, &m_thermal_gr); thermal_error1: kfree(data); return res; } static int emc1403_remove(struct i2c_client *client) { struct thermal_data *data = i2c_get_clientdata(client); hwmon_device_unregister(data->hwmon_dev); sysfs_remove_group(&client->dev.kobj, &m_thermal_gr); kfree(data); return 0; } static const unsigned short emc1403_address_list[] = { 0x18, 0x29, 0x4c, 0x4d, I2C_CLIENT_END }; static const struct i2c_device_id emc1403_idtable[] = { { "emc1403", 0 }, { "emc1423", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, emc1403_idtable); static struct i2c_driver sensor_emc1403 = { .class = I2C_CLASS_HWMON, .driver = { .name = "emc1403", }, .detect = emc1403_detect, .probe = emc1403_probe, .remove = emc1403_remove, .id_table = emc1403_idtable, .address_list = emc1403_address_list, }; module_i2c_driver(sensor_emc1403); MODULE_AUTHOR("Kalhan Trisal <kalhan.trisal@intel.com"); MODULE_DESCRIPTION("emc1403 Thermal Driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
acklinr/omap-android-3.4
mm/mempool.c
4417
10659
/* * linux/mm/mempool.c * * memory buffer pool support. Such pools are mostly used * for guaranteed, deadlock-free memory allocations during * extreme VM load. * * started by Ingo Molnar, Copyright (C) 2001 */ #include <linux/mm.h> #include <linux/slab.h> #include <linux/export.h> #include <linux/mempool.h> #include <linux/blkdev.h> #include <linux/writeback.h> static void add_element(mempool_t *pool, void *element) { BUG_ON(pool->curr_nr >= pool->min_nr); pool->elements[pool->curr_nr++] = element; } static void *remove_element(mempool_t *pool) { BUG_ON(pool->curr_nr <= 0); return pool->elements[--pool->curr_nr]; } /** * mempool_destroy - deallocate a memory pool * @pool: pointer to the memory pool which was allocated via * mempool_create(). * * Free all reserved elements in @pool and @pool itself. This function * only sleeps if the free_fn() function sleeps. */ void mempool_destroy(mempool_t *pool) { while (pool->curr_nr) { void *element = remove_element(pool); pool->free(element, pool->pool_data); } kfree(pool->elements); kfree(pool); } EXPORT_SYMBOL(mempool_destroy); /** * mempool_create - create a memory pool * @min_nr: the minimum number of elements guaranteed to be * allocated for this pool. * @alloc_fn: user-defined element-allocation function. * @free_fn: user-defined element-freeing function. * @pool_data: optional private data available to the user-defined functions. * * this function creates and allocates a guaranteed size, preallocated * memory pool. The pool can be used from the mempool_alloc() and mempool_free() * functions. This function might sleep. Both the alloc_fn() and the free_fn() * functions might sleep - as long as the mempool_alloc() function is not called * from IRQ contexts. */ mempool_t *mempool_create(int min_nr, mempool_alloc_t *alloc_fn, mempool_free_t *free_fn, void *pool_data) { return mempool_create_node(min_nr,alloc_fn,free_fn, pool_data,-1); } EXPORT_SYMBOL(mempool_create); mempool_t *mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn, mempool_free_t *free_fn, void *pool_data, int node_id) { mempool_t *pool; pool = kmalloc_node(sizeof(*pool), GFP_KERNEL | __GFP_ZERO, node_id); if (!pool) return NULL; pool->elements = kmalloc_node(min_nr * sizeof(void *), GFP_KERNEL, node_id); if (!pool->elements) { kfree(pool); return NULL; } spin_lock_init(&pool->lock); pool->min_nr = min_nr; pool->pool_data = pool_data; init_waitqueue_head(&pool->wait); pool->alloc = alloc_fn; pool->free = free_fn; /* * First pre-allocate the guaranteed number of buffers. */ while (pool->curr_nr < pool->min_nr) { void *element; element = pool->alloc(GFP_KERNEL, pool->pool_data); if (unlikely(!element)) { mempool_destroy(pool); return NULL; } add_element(pool, element); } return pool; } EXPORT_SYMBOL(mempool_create_node); /** * mempool_resize - resize an existing memory pool * @pool: pointer to the memory pool which was allocated via * mempool_create(). * @new_min_nr: the new minimum number of elements guaranteed to be * allocated for this pool. * @gfp_mask: the usual allocation bitmask. * * This function shrinks/grows the pool. In the case of growing, * it cannot be guaranteed that the pool will be grown to the new * size immediately, but new mempool_free() calls will refill it. * * Note, the caller must guarantee that no mempool_destroy is called * while this function is running. mempool_alloc() & mempool_free() * might be called (eg. from IRQ contexts) while this function executes. */ int mempool_resize(mempool_t *pool, int new_min_nr, gfp_t gfp_mask) { void *element; void **new_elements; unsigned long flags; BUG_ON(new_min_nr <= 0); spin_lock_irqsave(&pool->lock, flags); if (new_min_nr <= pool->min_nr) { while (new_min_nr < pool->curr_nr) { element = remove_element(pool); spin_unlock_irqrestore(&pool->lock, flags); pool->free(element, pool->pool_data); spin_lock_irqsave(&pool->lock, flags); } pool->min_nr = new_min_nr; goto out_unlock; } spin_unlock_irqrestore(&pool->lock, flags); /* Grow the pool */ new_elements = kmalloc(new_min_nr * sizeof(*new_elements), gfp_mask); if (!new_elements) return -ENOMEM; spin_lock_irqsave(&pool->lock, flags); if (unlikely(new_min_nr <= pool->min_nr)) { /* Raced, other resize will do our work */ spin_unlock_irqrestore(&pool->lock, flags); kfree(new_elements); goto out; } memcpy(new_elements, pool->elements, pool->curr_nr * sizeof(*new_elements)); kfree(pool->elements); pool->elements = new_elements; pool->min_nr = new_min_nr; while (pool->curr_nr < pool->min_nr) { spin_unlock_irqrestore(&pool->lock, flags); element = pool->alloc(gfp_mask, pool->pool_data); if (!element) goto out; spin_lock_irqsave(&pool->lock, flags); if (pool->curr_nr < pool->min_nr) { add_element(pool, element); } else { spin_unlock_irqrestore(&pool->lock, flags); pool->free(element, pool->pool_data); /* Raced */ goto out; } } out_unlock: spin_unlock_irqrestore(&pool->lock, flags); out: return 0; } EXPORT_SYMBOL(mempool_resize); /** * mempool_alloc - allocate an element from a specific memory pool * @pool: pointer to the memory pool which was allocated via * mempool_create(). * @gfp_mask: the usual allocation bitmask. * * this function only sleeps if the alloc_fn() function sleeps or * returns NULL. Note that due to preallocation, this function * *never* fails when called from process contexts. (it might * fail if called from an IRQ context.) */ void * mempool_alloc(mempool_t *pool, gfp_t gfp_mask) { void *element; unsigned long flags; wait_queue_t wait; gfp_t gfp_temp; might_sleep_if(gfp_mask & __GFP_WAIT); gfp_mask |= __GFP_NOMEMALLOC; /* don't allocate emergency reserves */ gfp_mask |= __GFP_NORETRY; /* don't loop in __alloc_pages */ gfp_mask |= __GFP_NOWARN; /* failures are OK */ gfp_temp = gfp_mask & ~(__GFP_WAIT|__GFP_IO); repeat_alloc: element = pool->alloc(gfp_temp, pool->pool_data); if (likely(element != NULL)) return element; spin_lock_irqsave(&pool->lock, flags); if (likely(pool->curr_nr)) { element = remove_element(pool); spin_unlock_irqrestore(&pool->lock, flags); /* paired with rmb in mempool_free(), read comment there */ smp_wmb(); return element; } /* * We use gfp mask w/o __GFP_WAIT or IO for the first round. If * alloc failed with that and @pool was empty, retry immediately. */ if (gfp_temp != gfp_mask) { spin_unlock_irqrestore(&pool->lock, flags); gfp_temp = gfp_mask; goto repeat_alloc; } /* We must not sleep if !__GFP_WAIT */ if (!(gfp_mask & __GFP_WAIT)) { spin_unlock_irqrestore(&pool->lock, flags); return NULL; } /* Let's wait for someone else to return an element to @pool */ init_wait(&wait); prepare_to_wait(&pool->wait, &wait, TASK_UNINTERRUPTIBLE); spin_unlock_irqrestore(&pool->lock, flags); /* * FIXME: this should be io_schedule(). The timeout is there as a * workaround for some DM problems in 2.6.18. */ io_schedule_timeout(5*HZ); finish_wait(&pool->wait, &wait); goto repeat_alloc; } EXPORT_SYMBOL(mempool_alloc); /** * mempool_free - return an element to the pool. * @element: pool element pointer. * @pool: pointer to the memory pool which was allocated via * mempool_create(). * * this function only sleeps if the free_fn() function sleeps. */ void mempool_free(void *element, mempool_t *pool) { unsigned long flags; if (unlikely(element == NULL)) return; /* * Paired with the wmb in mempool_alloc(). The preceding read is * for @element and the following @pool->curr_nr. This ensures * that the visible value of @pool->curr_nr is from after the * allocation of @element. This is necessary for fringe cases * where @element was passed to this task without going through * barriers. * * For example, assume @p is %NULL at the beginning and one task * performs "p = mempool_alloc(...);" while another task is doing * "while (!p) cpu_relax(); mempool_free(p, ...);". This function * may end up using curr_nr value which is from before allocation * of @p without the following rmb. */ smp_rmb(); /* * For correctness, we need a test which is guaranteed to trigger * if curr_nr + #allocated == min_nr. Testing curr_nr < min_nr * without locking achieves that and refilling as soon as possible * is desirable. * * Because curr_nr visible here is always a value after the * allocation of @element, any task which decremented curr_nr below * min_nr is guaranteed to see curr_nr < min_nr unless curr_nr gets * incremented to min_nr afterwards. If curr_nr gets incremented * to min_nr after the allocation of @element, the elements * allocated after that are subject to the same guarantee. * * Waiters happen iff curr_nr is 0 and the above guarantee also * ensures that there will be frees which return elements to the * pool waking up the waiters. */ if (pool->curr_nr < pool->min_nr) { spin_lock_irqsave(&pool->lock, flags); if (pool->curr_nr < pool->min_nr) { add_element(pool, element); spin_unlock_irqrestore(&pool->lock, flags); wake_up(&pool->wait); return; } spin_unlock_irqrestore(&pool->lock, flags); } pool->free(element, pool->pool_data); } EXPORT_SYMBOL(mempool_free); /* * A commonly used alloc and free fn. */ void *mempool_alloc_slab(gfp_t gfp_mask, void *pool_data) { struct kmem_cache *mem = pool_data; return kmem_cache_alloc(mem, gfp_mask); } EXPORT_SYMBOL(mempool_alloc_slab); void mempool_free_slab(void *element, void *pool_data) { struct kmem_cache *mem = pool_data; kmem_cache_free(mem, element); } EXPORT_SYMBOL(mempool_free_slab); /* * A commonly used alloc and free fn that kmalloc/kfrees the amount of memory * specified by pool_data */ void *mempool_kmalloc(gfp_t gfp_mask, void *pool_data) { size_t size = (size_t)pool_data; return kmalloc(size, gfp_mask); } EXPORT_SYMBOL(mempool_kmalloc); void mempool_kfree(void *element, void *pool_data) { kfree(element); } EXPORT_SYMBOL(mempool_kfree); /* * A simple mempool-backed page allocator that allocates pages * of the order specified by pool_data. */ void *mempool_alloc_pages(gfp_t gfp_mask, void *pool_data) { int order = (int)(long)pool_data; return alloc_pages(gfp_mask, order); } EXPORT_SYMBOL(mempool_alloc_pages); void mempool_free_pages(void *element, void *pool_data) { int order = (int)(long)pool_data; __free_pages(element, order); } EXPORT_SYMBOL(mempool_free_pages);
gpl-2.0
javifo/SM-G920F-kernel
drivers/scsi/isci/phy.c
4673
47134
/* * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. * The full GNU General Public License is included in this distribution * in the file called LICENSE.GPL. * * BSD LICENSE * * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name of Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "isci.h" #include "host.h" #include "phy.h" #include "scu_event_codes.h" #include "probe_roms.h" #undef C #define C(a) (#a) static const char *phy_state_name(enum sci_phy_states state) { static const char * const strings[] = PHY_STATES; return strings[state]; } #undef C /* Maximum arbitration wait time in micro-seconds */ #define SCIC_SDS_PHY_MAX_ARBITRATION_WAIT_TIME (700) enum sas_linkrate sci_phy_linkrate(struct isci_phy *iphy) { return iphy->max_negotiated_speed; } static struct isci_host *phy_to_host(struct isci_phy *iphy) { struct isci_phy *table = iphy - iphy->phy_index; struct isci_host *ihost = container_of(table, typeof(*ihost), phys[0]); return ihost; } static struct device *sciphy_to_dev(struct isci_phy *iphy) { return &phy_to_host(iphy)->pdev->dev; } static enum sci_status sci_phy_transport_layer_initialization(struct isci_phy *iphy, struct scu_transport_layer_registers __iomem *reg) { u32 tl_control; iphy->transport_layer_registers = reg; writel(SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX, &iphy->transport_layer_registers->stp_rni); /* * Hardware team recommends that we enable the STP prefetch for all * transports */ tl_control = readl(&iphy->transport_layer_registers->control); tl_control |= SCU_TLCR_GEN_BIT(STP_WRITE_DATA_PREFETCH); writel(tl_control, &iphy->transport_layer_registers->control); return SCI_SUCCESS; } static enum sci_status sci_phy_link_layer_initialization(struct isci_phy *iphy, struct scu_link_layer_registers __iomem *llr) { struct isci_host *ihost = iphy->owning_port->owning_controller; struct sci_phy_user_params *phy_user; struct sci_phy_oem_params *phy_oem; int phy_idx = iphy->phy_index; struct sci_phy_cap phy_cap; u32 phy_configuration; u32 parity_check = 0; u32 parity_count = 0; u32 llctl, link_rate; u32 clksm_value = 0; u32 sp_timeouts = 0; phy_user = &ihost->user_parameters.phys[phy_idx]; phy_oem = &ihost->oem_parameters.phys[phy_idx]; iphy->link_layer_registers = llr; /* Set our IDENTIFY frame data */ #define SCI_END_DEVICE 0x01 writel(SCU_SAS_TIID_GEN_BIT(SMP_INITIATOR) | SCU_SAS_TIID_GEN_BIT(SSP_INITIATOR) | SCU_SAS_TIID_GEN_BIT(STP_INITIATOR) | SCU_SAS_TIID_GEN_BIT(DA_SATA_HOST) | SCU_SAS_TIID_GEN_VAL(DEVICE_TYPE, SCI_END_DEVICE), &llr->transmit_identification); /* Write the device SAS Address */ writel(0xFEDCBA98, &llr->sas_device_name_high); writel(phy_idx, &llr->sas_device_name_low); /* Write the source SAS Address */ writel(phy_oem->sas_address.high, &llr->source_sas_address_high); writel(phy_oem->sas_address.low, &llr->source_sas_address_low); /* Clear and Set the PHY Identifier */ writel(0, &llr->identify_frame_phy_id); writel(SCU_SAS_TIPID_GEN_VALUE(ID, phy_idx), &llr->identify_frame_phy_id); /* Change the initial state of the phy configuration register */ phy_configuration = readl(&llr->phy_configuration); /* Hold OOB state machine in reset */ phy_configuration |= SCU_SAS_PCFG_GEN_BIT(OOB_RESET); writel(phy_configuration, &llr->phy_configuration); /* Configure the SNW capabilities */ phy_cap.all = 0; phy_cap.start = 1; phy_cap.gen3_no_ssc = 1; phy_cap.gen2_no_ssc = 1; phy_cap.gen1_no_ssc = 1; if (ihost->oem_parameters.controller.do_enable_ssc) { struct scu_afe_registers __iomem *afe = &ihost->scu_registers->afe; struct scu_afe_transceiver __iomem *xcvr = &afe->scu_afe_xcvr[phy_idx]; struct isci_pci_info *pci_info = to_pci_info(ihost->pdev); bool en_sas = false; bool en_sata = false; u32 sas_type = 0; u32 sata_spread = 0x2; u32 sas_spread = 0x2; phy_cap.gen3_ssc = 1; phy_cap.gen2_ssc = 1; phy_cap.gen1_ssc = 1; if (pci_info->orom->hdr.version < ISCI_ROM_VER_1_1) en_sas = en_sata = true; else { sata_spread = ihost->oem_parameters.controller.ssc_sata_tx_spread_level; sas_spread = ihost->oem_parameters.controller.ssc_sas_tx_spread_level; if (sata_spread) en_sata = true; if (sas_spread) { en_sas = true; sas_type = ihost->oem_parameters.controller.ssc_sas_tx_type; } } if (en_sas) { u32 reg; reg = readl(&xcvr->afe_xcvr_control0); reg |= (0x00100000 | (sas_type << 19)); writel(reg, &xcvr->afe_xcvr_control0); reg = readl(&xcvr->afe_tx_ssc_control); reg |= sas_spread << 8; writel(reg, &xcvr->afe_tx_ssc_control); } if (en_sata) { u32 reg; reg = readl(&xcvr->afe_tx_ssc_control); reg |= sata_spread; writel(reg, &xcvr->afe_tx_ssc_control); reg = readl(&llr->stp_control); reg |= 1 << 12; writel(reg, &llr->stp_control); } } /* The SAS specification indicates that the phy_capabilities that * are transmitted shall have an even parity. Calculate the parity. */ parity_check = phy_cap.all; while (parity_check != 0) { if (parity_check & 0x1) parity_count++; parity_check >>= 1; } /* If parity indicates there are an odd number of bits set, then * set the parity bit to 1 in the phy capabilities. */ if ((parity_count % 2) != 0) phy_cap.parity = 1; writel(phy_cap.all, &llr->phy_capabilities); /* Set the enable spinup period but disable the ability to send * notify enable spinup */ writel(SCU_ENSPINUP_GEN_VAL(COUNT, phy_user->notify_enable_spin_up_insertion_frequency), &llr->notify_enable_spinup_control); /* Write the ALIGN Insertion Ferequency for connected phy and * inpendent of connected state */ clksm_value = SCU_ALIGN_INSERTION_FREQUENCY_GEN_VAL(CONNECTED, phy_user->in_connection_align_insertion_frequency); clksm_value |= SCU_ALIGN_INSERTION_FREQUENCY_GEN_VAL(GENERAL, phy_user->align_insertion_frequency); writel(clksm_value, &llr->clock_skew_management); if (is_c0(ihost->pdev) || is_c1(ihost->pdev)) { writel(0x04210400, &llr->afe_lookup_table_control); writel(0x020A7C05, &llr->sas_primitive_timeout); } else writel(0x02108421, &llr->afe_lookup_table_control); llctl = SCU_SAS_LLCTL_GEN_VAL(NO_OUTBOUND_TASK_TIMEOUT, (u8)ihost->user_parameters.no_outbound_task_timeout); switch (phy_user->max_speed_generation) { case SCIC_SDS_PARM_GEN3_SPEED: link_rate = SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_GEN3; break; case SCIC_SDS_PARM_GEN2_SPEED: link_rate = SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_GEN2; break; default: link_rate = SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_GEN1; break; } llctl |= SCU_SAS_LLCTL_GEN_VAL(MAX_LINK_RATE, link_rate); writel(llctl, &llr->link_layer_control); sp_timeouts = readl(&llr->sas_phy_timeouts); /* Clear the default 0x36 (54us) RATE_CHANGE timeout value. */ sp_timeouts &= ~SCU_SAS_PHYTOV_GEN_VAL(RATE_CHANGE, 0xFF); /* Set RATE_CHANGE timeout value to 0x3B (59us). This ensures SCU can * lock with 3Gb drive when SCU max rate is set to 1.5Gb. */ sp_timeouts |= SCU_SAS_PHYTOV_GEN_VAL(RATE_CHANGE, 0x3B); writel(sp_timeouts, &llr->sas_phy_timeouts); if (is_a2(ihost->pdev)) { /* Program the max ARB time for the PHY to 700us so we * inter-operate with the PMC expander which shuts down * PHYs if the expander PHY generates too many breaks. * This time value will guarantee that the initiator PHY * will generate the break. */ writel(SCIC_SDS_PHY_MAX_ARBITRATION_WAIT_TIME, &llr->maximum_arbitration_wait_timer_timeout); } /* Disable link layer hang detection, rely on the OS timeout for * I/O timeouts. */ writel(0, &llr->link_layer_hang_detection_timeout); /* We can exit the initial state to the stopped state */ sci_change_state(&iphy->sm, SCI_PHY_STOPPED); return SCI_SUCCESS; } static void phy_sata_timeout(unsigned long data) { struct sci_timer *tmr = (struct sci_timer *)data; struct isci_phy *iphy = container_of(tmr, typeof(*iphy), sata_timer); struct isci_host *ihost = iphy->owning_port->owning_controller; unsigned long flags; spin_lock_irqsave(&ihost->scic_lock, flags); if (tmr->cancel) goto done; dev_dbg(sciphy_to_dev(iphy), "%s: SCIC SDS Phy 0x%p did not receive signature fis before " "timeout.\n", __func__, iphy); sci_change_state(&iphy->sm, SCI_PHY_STARTING); done: spin_unlock_irqrestore(&ihost->scic_lock, flags); } /** * This method returns the port currently containing this phy. If the phy is * currently contained by the dummy port, then the phy is considered to not * be part of a port. * @sci_phy: This parameter specifies the phy for which to retrieve the * containing port. * * This method returns a handle to a port that contains the supplied phy. * NULL This value is returned if the phy is not part of a real * port (i.e. it's contained in the dummy port). !NULL All other * values indicate a handle/pointer to the port containing the phy. */ struct isci_port *phy_get_non_dummy_port(struct isci_phy *iphy) { struct isci_port *iport = iphy->owning_port; if (iport->physical_port_index == SCIC_SDS_DUMMY_PORT) return NULL; return iphy->owning_port; } /** * This method will assign a port to the phy object. * @out]: iphy This parameter specifies the phy for which to assign a port * object. * * */ void sci_phy_set_port( struct isci_phy *iphy, struct isci_port *iport) { iphy->owning_port = iport; if (iphy->bcn_received_while_port_unassigned) { iphy->bcn_received_while_port_unassigned = false; sci_port_broadcast_change_received(iphy->owning_port, iphy); } } enum sci_status sci_phy_initialize(struct isci_phy *iphy, struct scu_transport_layer_registers __iomem *tl, struct scu_link_layer_registers __iomem *ll) { /* Perfrom the initialization of the TL hardware */ sci_phy_transport_layer_initialization(iphy, tl); /* Perofrm the initialization of the PE hardware */ sci_phy_link_layer_initialization(iphy, ll); /* There is nothing that needs to be done in this state just * transition to the stopped state */ sci_change_state(&iphy->sm, SCI_PHY_STOPPED); return SCI_SUCCESS; } /** * This method assigns the direct attached device ID for this phy. * * @iphy The phy for which the direct attached device id is to * be assigned. * @device_id The direct attached device ID to assign to the phy. * This will either be the RNi for the device or an invalid RNi if there * is no current device assigned to the phy. */ void sci_phy_setup_transport(struct isci_phy *iphy, u32 device_id) { u32 tl_control; writel(device_id, &iphy->transport_layer_registers->stp_rni); /* * The read should guarantee that the first write gets posted * before the next write */ tl_control = readl(&iphy->transport_layer_registers->control); tl_control |= SCU_TLCR_GEN_BIT(CLEAR_TCI_NCQ_MAPPING_TABLE); writel(tl_control, &iphy->transport_layer_registers->control); } static void sci_phy_suspend(struct isci_phy *iphy) { u32 scu_sas_pcfg_value; scu_sas_pcfg_value = readl(&iphy->link_layer_registers->phy_configuration); scu_sas_pcfg_value |= SCU_SAS_PCFG_GEN_BIT(SUSPEND_PROTOCOL_ENGINE); writel(scu_sas_pcfg_value, &iphy->link_layer_registers->phy_configuration); sci_phy_setup_transport(iphy, SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX); } void sci_phy_resume(struct isci_phy *iphy) { u32 scu_sas_pcfg_value; scu_sas_pcfg_value = readl(&iphy->link_layer_registers->phy_configuration); scu_sas_pcfg_value &= ~SCU_SAS_PCFG_GEN_BIT(SUSPEND_PROTOCOL_ENGINE); writel(scu_sas_pcfg_value, &iphy->link_layer_registers->phy_configuration); } void sci_phy_get_sas_address(struct isci_phy *iphy, struct sci_sas_address *sas) { sas->high = readl(&iphy->link_layer_registers->source_sas_address_high); sas->low = readl(&iphy->link_layer_registers->source_sas_address_low); } void sci_phy_get_attached_sas_address(struct isci_phy *iphy, struct sci_sas_address *sas) { struct sas_identify_frame *iaf; iaf = &iphy->frame_rcvd.iaf; memcpy(sas, iaf->sas_addr, SAS_ADDR_SIZE); } void sci_phy_get_protocols(struct isci_phy *iphy, struct sci_phy_proto *proto) { proto->all = readl(&iphy->link_layer_registers->transmit_identification); } enum sci_status sci_phy_start(struct isci_phy *iphy) { enum sci_phy_states state = iphy->sm.current_state_id; if (state != SCI_PHY_STOPPED) { dev_dbg(sciphy_to_dev(iphy), "%s: in wrong state: %s\n", __func__, phy_state_name(state)); return SCI_FAILURE_INVALID_STATE; } sci_change_state(&iphy->sm, SCI_PHY_STARTING); return SCI_SUCCESS; } enum sci_status sci_phy_stop(struct isci_phy *iphy) { enum sci_phy_states state = iphy->sm.current_state_id; switch (state) { case SCI_PHY_SUB_INITIAL: case SCI_PHY_SUB_AWAIT_OSSP_EN: case SCI_PHY_SUB_AWAIT_SAS_SPEED_EN: case SCI_PHY_SUB_AWAIT_SAS_POWER: case SCI_PHY_SUB_AWAIT_SATA_POWER: case SCI_PHY_SUB_AWAIT_SATA_PHY_EN: case SCI_PHY_SUB_AWAIT_SATA_SPEED_EN: case SCI_PHY_SUB_AWAIT_SIG_FIS_UF: case SCI_PHY_SUB_FINAL: case SCI_PHY_READY: break; default: dev_dbg(sciphy_to_dev(iphy), "%s: in wrong state: %s\n", __func__, phy_state_name(state)); return SCI_FAILURE_INVALID_STATE; } sci_change_state(&iphy->sm, SCI_PHY_STOPPED); return SCI_SUCCESS; } enum sci_status sci_phy_reset(struct isci_phy *iphy) { enum sci_phy_states state = iphy->sm.current_state_id; if (state != SCI_PHY_READY) { dev_dbg(sciphy_to_dev(iphy), "%s: in wrong state: %s\n", __func__, phy_state_name(state)); return SCI_FAILURE_INVALID_STATE; } sci_change_state(&iphy->sm, SCI_PHY_RESETTING); return SCI_SUCCESS; } enum sci_status sci_phy_consume_power_handler(struct isci_phy *iphy) { enum sci_phy_states state = iphy->sm.current_state_id; switch (state) { case SCI_PHY_SUB_AWAIT_SAS_POWER: { u32 enable_spinup; enable_spinup = readl(&iphy->link_layer_registers->notify_enable_spinup_control); enable_spinup |= SCU_ENSPINUP_GEN_BIT(ENABLE); writel(enable_spinup, &iphy->link_layer_registers->notify_enable_spinup_control); /* Change state to the final state this substate machine has run to completion */ sci_change_state(&iphy->sm, SCI_PHY_SUB_FINAL); return SCI_SUCCESS; } case SCI_PHY_SUB_AWAIT_SATA_POWER: { u32 scu_sas_pcfg_value; /* Release the spinup hold state and reset the OOB state machine */ scu_sas_pcfg_value = readl(&iphy->link_layer_registers->phy_configuration); scu_sas_pcfg_value &= ~(SCU_SAS_PCFG_GEN_BIT(SATA_SPINUP_HOLD) | SCU_SAS_PCFG_GEN_BIT(OOB_ENABLE)); scu_sas_pcfg_value |= SCU_SAS_PCFG_GEN_BIT(OOB_RESET); writel(scu_sas_pcfg_value, &iphy->link_layer_registers->phy_configuration); /* Now restart the OOB operation */ scu_sas_pcfg_value &= ~SCU_SAS_PCFG_GEN_BIT(OOB_RESET); scu_sas_pcfg_value |= SCU_SAS_PCFG_GEN_BIT(OOB_ENABLE); writel(scu_sas_pcfg_value, &iphy->link_layer_registers->phy_configuration); /* Change state to the final state this substate machine has run to completion */ sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_SATA_PHY_EN); return SCI_SUCCESS; } default: dev_dbg(sciphy_to_dev(iphy), "%s: in wrong state: %s\n", __func__, phy_state_name(state)); return SCI_FAILURE_INVALID_STATE; } } static void sci_phy_start_sas_link_training(struct isci_phy *iphy) { /* continue the link training for the phy as if it were a SAS PHY * instead of a SATA PHY. This is done because the completion queue had a SAS * PHY DETECTED event when the state machine was expecting a SATA PHY event. */ u32 phy_control; phy_control = readl(&iphy->link_layer_registers->phy_configuration); phy_control |= SCU_SAS_PCFG_GEN_BIT(SATA_SPINUP_HOLD); writel(phy_control, &iphy->link_layer_registers->phy_configuration); sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_SAS_SPEED_EN); iphy->protocol = SAS_PROTOCOL_SSP; } static void sci_phy_start_sata_link_training(struct isci_phy *iphy) { /* This method continues the link training for the phy as if it were a SATA PHY * instead of a SAS PHY. This is done because the completion queue had a SATA * SPINUP HOLD event when the state machine was expecting a SAS PHY event. none */ sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_SATA_POWER); iphy->protocol = SAS_PROTOCOL_SATA; } /** * sci_phy_complete_link_training - perform processing common to * all protocols upon completion of link training. * @sci_phy: This parameter specifies the phy object for which link training * has completed. * @max_link_rate: This parameter specifies the maximum link rate to be * associated with this phy. * @next_state: This parameter specifies the next state for the phy's starting * sub-state machine. * */ static void sci_phy_complete_link_training(struct isci_phy *iphy, enum sas_linkrate max_link_rate, u32 next_state) { iphy->max_negotiated_speed = max_link_rate; sci_change_state(&iphy->sm, next_state); } static const char *phy_event_name(u32 event_code) { switch (scu_get_event_code(event_code)) { case SCU_EVENT_PORT_SELECTOR_DETECTED: return "port selector"; case SCU_EVENT_SENT_PORT_SELECTION: return "port selection"; case SCU_EVENT_HARD_RESET_TRANSMITTED: return "tx hard reset"; case SCU_EVENT_HARD_RESET_RECEIVED: return "rx hard reset"; case SCU_EVENT_RECEIVED_IDENTIFY_TIMEOUT: return "identify timeout"; case SCU_EVENT_LINK_FAILURE: return "link fail"; case SCU_EVENT_SATA_SPINUP_HOLD: return "sata spinup hold"; case SCU_EVENT_SAS_15_SSC: case SCU_EVENT_SAS_15: return "sas 1.5"; case SCU_EVENT_SAS_30_SSC: case SCU_EVENT_SAS_30: return "sas 3.0"; case SCU_EVENT_SAS_60_SSC: case SCU_EVENT_SAS_60: return "sas 6.0"; case SCU_EVENT_SATA_15_SSC: case SCU_EVENT_SATA_15: return "sata 1.5"; case SCU_EVENT_SATA_30_SSC: case SCU_EVENT_SATA_30: return "sata 3.0"; case SCU_EVENT_SATA_60_SSC: case SCU_EVENT_SATA_60: return "sata 6.0"; case SCU_EVENT_SAS_PHY_DETECTED: return "sas detect"; case SCU_EVENT_SATA_PHY_DETECTED: return "sata detect"; default: return "unknown"; } } #define phy_event_dbg(iphy, state, code) \ dev_dbg(sciphy_to_dev(iphy), "phy-%d:%d: %s event: %s (%x)\n", \ phy_to_host(iphy)->id, iphy->phy_index, \ phy_state_name(state), phy_event_name(code), code) #define phy_event_warn(iphy, state, code) \ dev_warn(sciphy_to_dev(iphy), "phy-%d:%d: %s event: %s (%x)\n", \ phy_to_host(iphy)->id, iphy->phy_index, \ phy_state_name(state), phy_event_name(code), code) void scu_link_layer_set_txcomsas_timeout(struct isci_phy *iphy, u32 timeout) { u32 val; /* Extend timeout */ val = readl(&iphy->link_layer_registers->transmit_comsas_signal); val &= ~SCU_SAS_LLTXCOMSAS_GEN_VAL(NEGTIME, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_MASK); val |= SCU_SAS_LLTXCOMSAS_GEN_VAL(NEGTIME, timeout); writel(val, &iphy->link_layer_registers->transmit_comsas_signal); } enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code) { enum sci_phy_states state = iphy->sm.current_state_id; switch (state) { case SCI_PHY_SUB_AWAIT_OSSP_EN: switch (scu_get_event_code(event_code)) { case SCU_EVENT_SAS_PHY_DETECTED: sci_phy_start_sas_link_training(iphy); iphy->is_in_link_training = true; break; case SCU_EVENT_SATA_SPINUP_HOLD: sci_phy_start_sata_link_training(iphy); iphy->is_in_link_training = true; break; case SCU_EVENT_RECEIVED_IDENTIFY_TIMEOUT: /* Extend timeout value */ scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_EXTENDED); /* Start the oob/sn state machine over again */ sci_change_state(&iphy->sm, SCI_PHY_STARTING); break; default: phy_event_dbg(iphy, state, event_code); return SCI_FAILURE; } return SCI_SUCCESS; case SCI_PHY_SUB_AWAIT_SAS_SPEED_EN: switch (scu_get_event_code(event_code)) { case SCU_EVENT_SAS_PHY_DETECTED: /* * Why is this being reported again by the controller? * We would re-enter this state so just stay here */ break; case SCU_EVENT_SAS_15: case SCU_EVENT_SAS_15_SSC: sci_phy_complete_link_training(iphy, SAS_LINK_RATE_1_5_GBPS, SCI_PHY_SUB_AWAIT_IAF_UF); break; case SCU_EVENT_SAS_30: case SCU_EVENT_SAS_30_SSC: sci_phy_complete_link_training(iphy, SAS_LINK_RATE_3_0_GBPS, SCI_PHY_SUB_AWAIT_IAF_UF); break; case SCU_EVENT_SAS_60: case SCU_EVENT_SAS_60_SSC: sci_phy_complete_link_training(iphy, SAS_LINK_RATE_6_0_GBPS, SCI_PHY_SUB_AWAIT_IAF_UF); break; case SCU_EVENT_SATA_SPINUP_HOLD: /* * We were doing SAS PHY link training and received a SATA PHY event * continue OOB/SN as if this were a SATA PHY */ sci_phy_start_sata_link_training(iphy); break; case SCU_EVENT_LINK_FAILURE: /* Change the timeout value to default */ scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_DEFAULT); /* Link failure change state back to the starting state */ sci_change_state(&iphy->sm, SCI_PHY_STARTING); break; case SCU_EVENT_RECEIVED_IDENTIFY_TIMEOUT: /* Extend the timeout value */ scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_EXTENDED); /* Start the oob/sn state machine over again */ sci_change_state(&iphy->sm, SCI_PHY_STARTING); break; default: phy_event_warn(iphy, state, event_code); return SCI_FAILURE; break; } return SCI_SUCCESS; case SCI_PHY_SUB_AWAIT_IAF_UF: switch (scu_get_event_code(event_code)) { case SCU_EVENT_SAS_PHY_DETECTED: /* Backup the state machine */ sci_phy_start_sas_link_training(iphy); break; case SCU_EVENT_SATA_SPINUP_HOLD: /* We were doing SAS PHY link training and received a * SATA PHY event continue OOB/SN as if this were a * SATA PHY */ sci_phy_start_sata_link_training(iphy); break; case SCU_EVENT_RECEIVED_IDENTIFY_TIMEOUT: /* Extend the timeout value */ scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_EXTENDED); /* Start the oob/sn state machine over again */ sci_change_state(&iphy->sm, SCI_PHY_STARTING); break; case SCU_EVENT_LINK_FAILURE: scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_DEFAULT); case SCU_EVENT_HARD_RESET_RECEIVED: /* Start the oob/sn state machine over again */ sci_change_state(&iphy->sm, SCI_PHY_STARTING); break; default: phy_event_warn(iphy, state, event_code); return SCI_FAILURE; } return SCI_SUCCESS; case SCI_PHY_SUB_AWAIT_SAS_POWER: switch (scu_get_event_code(event_code)) { case SCU_EVENT_LINK_FAILURE: /* Change the timeout value to default */ scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_DEFAULT); /* Link failure change state back to the starting state */ sci_change_state(&iphy->sm, SCI_PHY_STARTING); break; default: phy_event_warn(iphy, state, event_code); return SCI_FAILURE; } return SCI_SUCCESS; case SCI_PHY_SUB_AWAIT_SATA_POWER: switch (scu_get_event_code(event_code)) { case SCU_EVENT_LINK_FAILURE: /* Change the timeout value to default */ scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_DEFAULT); /* Link failure change state back to the starting state */ sci_change_state(&iphy->sm, SCI_PHY_STARTING); break; case SCU_EVENT_SATA_SPINUP_HOLD: /* These events are received every 10ms and are * expected while in this state */ break; case SCU_EVENT_SAS_PHY_DETECTED: /* There has been a change in the phy type before OOB/SN for the * SATA finished start down the SAS link traning path. */ sci_phy_start_sas_link_training(iphy); break; default: phy_event_warn(iphy, state, event_code); return SCI_FAILURE; } return SCI_SUCCESS; case SCI_PHY_SUB_AWAIT_SATA_PHY_EN: switch (scu_get_event_code(event_code)) { case SCU_EVENT_LINK_FAILURE: /* Change the timeout value to default */ scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_DEFAULT); /* Link failure change state back to the starting state */ sci_change_state(&iphy->sm, SCI_PHY_STARTING); break; case SCU_EVENT_SATA_SPINUP_HOLD: /* These events might be received since we dont know how many may be in * the completion queue while waiting for power */ break; case SCU_EVENT_SATA_PHY_DETECTED: iphy->protocol = SAS_PROTOCOL_SATA; /* We have received the SATA PHY notification change state */ sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_SATA_SPEED_EN); break; case SCU_EVENT_SAS_PHY_DETECTED: /* There has been a change in the phy type before OOB/SN for the * SATA finished start down the SAS link traning path. */ sci_phy_start_sas_link_training(iphy); break; default: phy_event_warn(iphy, state, event_code); return SCI_FAILURE; } return SCI_SUCCESS; case SCI_PHY_SUB_AWAIT_SATA_SPEED_EN: switch (scu_get_event_code(event_code)) { case SCU_EVENT_SATA_PHY_DETECTED: /* * The hardware reports multiple SATA PHY detected events * ignore the extras */ break; case SCU_EVENT_SATA_15: case SCU_EVENT_SATA_15_SSC: sci_phy_complete_link_training(iphy, SAS_LINK_RATE_1_5_GBPS, SCI_PHY_SUB_AWAIT_SIG_FIS_UF); break; case SCU_EVENT_SATA_30: case SCU_EVENT_SATA_30_SSC: sci_phy_complete_link_training(iphy, SAS_LINK_RATE_3_0_GBPS, SCI_PHY_SUB_AWAIT_SIG_FIS_UF); break; case SCU_EVENT_SATA_60: case SCU_EVENT_SATA_60_SSC: sci_phy_complete_link_training(iphy, SAS_LINK_RATE_6_0_GBPS, SCI_PHY_SUB_AWAIT_SIG_FIS_UF); break; case SCU_EVENT_LINK_FAILURE: /* Change the timeout value to default */ scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_DEFAULT); /* Link failure change state back to the starting state */ sci_change_state(&iphy->sm, SCI_PHY_STARTING); break; case SCU_EVENT_SAS_PHY_DETECTED: /* * There has been a change in the phy type before OOB/SN for the * SATA finished start down the SAS link traning path. */ sci_phy_start_sas_link_training(iphy); break; default: phy_event_warn(iphy, state, event_code); return SCI_FAILURE; } return SCI_SUCCESS; case SCI_PHY_SUB_AWAIT_SIG_FIS_UF: switch (scu_get_event_code(event_code)) { case SCU_EVENT_SATA_PHY_DETECTED: /* Backup the state machine */ sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_SATA_SPEED_EN); break; case SCU_EVENT_LINK_FAILURE: /* Change the timeout value to default */ scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_DEFAULT); /* Link failure change state back to the starting state */ sci_change_state(&iphy->sm, SCI_PHY_STARTING); break; default: phy_event_warn(iphy, state, event_code); return SCI_FAILURE; } return SCI_SUCCESS; case SCI_PHY_READY: switch (scu_get_event_code(event_code)) { case SCU_EVENT_LINK_FAILURE: /* Set default timeout */ scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_DEFAULT); /* Link failure change state back to the starting state */ sci_change_state(&iphy->sm, SCI_PHY_STARTING); break; case SCU_EVENT_BROADCAST_CHANGE: case SCU_EVENT_BROADCAST_SES: case SCU_EVENT_BROADCAST_RESERVED0: case SCU_EVENT_BROADCAST_RESERVED1: case SCU_EVENT_BROADCAST_EXPANDER: case SCU_EVENT_BROADCAST_AEN: /* Broadcast change received. Notify the port. */ if (phy_get_non_dummy_port(iphy) != NULL) sci_port_broadcast_change_received(iphy->owning_port, iphy); else iphy->bcn_received_while_port_unassigned = true; break; case SCU_EVENT_BROADCAST_RESERVED3: case SCU_EVENT_BROADCAST_RESERVED4: default: phy_event_warn(iphy, state, event_code); return SCI_FAILURE_INVALID_STATE; } return SCI_SUCCESS; case SCI_PHY_RESETTING: switch (scu_get_event_code(event_code)) { case SCU_EVENT_HARD_RESET_TRANSMITTED: /* Link failure change state back to the starting state */ sci_change_state(&iphy->sm, SCI_PHY_STARTING); break; default: phy_event_warn(iphy, state, event_code); return SCI_FAILURE_INVALID_STATE; break; } return SCI_SUCCESS; default: dev_dbg(sciphy_to_dev(iphy), "%s: in wrong state: %s\n", __func__, phy_state_name(state)); return SCI_FAILURE_INVALID_STATE; } } enum sci_status sci_phy_frame_handler(struct isci_phy *iphy, u32 frame_index) { enum sci_phy_states state = iphy->sm.current_state_id; struct isci_host *ihost = iphy->owning_port->owning_controller; enum sci_status result; unsigned long flags; switch (state) { case SCI_PHY_SUB_AWAIT_IAF_UF: { u32 *frame_words; struct sas_identify_frame iaf; result = sci_unsolicited_frame_control_get_header(&ihost->uf_control, frame_index, (void **)&frame_words); if (result != SCI_SUCCESS) return result; sci_swab32_cpy(&iaf, frame_words, sizeof(iaf) / sizeof(u32)); if (iaf.frame_type == 0) { u32 state; spin_lock_irqsave(&iphy->sas_phy.frame_rcvd_lock, flags); memcpy(&iphy->frame_rcvd.iaf, &iaf, sizeof(iaf)); spin_unlock_irqrestore(&iphy->sas_phy.frame_rcvd_lock, flags); if (iaf.smp_tport) { /* We got the IAF for an expander PHY go to the final * state since there are no power requirements for * expander phys. */ state = SCI_PHY_SUB_FINAL; } else { /* We got the IAF we can now go to the await spinup * semaphore state */ state = SCI_PHY_SUB_AWAIT_SAS_POWER; } sci_change_state(&iphy->sm, state); result = SCI_SUCCESS; } else dev_warn(sciphy_to_dev(iphy), "%s: PHY starting substate machine received " "unexpected frame id %x\n", __func__, frame_index); sci_controller_release_frame(ihost, frame_index); return result; } case SCI_PHY_SUB_AWAIT_SIG_FIS_UF: { struct dev_to_host_fis *frame_header; u32 *fis_frame_data; result = sci_unsolicited_frame_control_get_header(&ihost->uf_control, frame_index, (void **)&frame_header); if (result != SCI_SUCCESS) return result; if ((frame_header->fis_type == FIS_REGD2H) && !(frame_header->status & ATA_BUSY)) { sci_unsolicited_frame_control_get_buffer(&ihost->uf_control, frame_index, (void **)&fis_frame_data); spin_lock_irqsave(&iphy->sas_phy.frame_rcvd_lock, flags); sci_controller_copy_sata_response(&iphy->frame_rcvd.fis, frame_header, fis_frame_data); spin_unlock_irqrestore(&iphy->sas_phy.frame_rcvd_lock, flags); /* got IAF we can now go to the await spinup semaphore state */ sci_change_state(&iphy->sm, SCI_PHY_SUB_FINAL); result = SCI_SUCCESS; } else dev_warn(sciphy_to_dev(iphy), "%s: PHY starting substate machine received " "unexpected frame id %x\n", __func__, frame_index); /* Regardless of the result we are done with this frame with it */ sci_controller_release_frame(ihost, frame_index); return result; } default: dev_dbg(sciphy_to_dev(iphy), "%s: in wrong state: %s\n", __func__, phy_state_name(state)); return SCI_FAILURE_INVALID_STATE; } } static void sci_phy_starting_initial_substate_enter(struct sci_base_state_machine *sm) { struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); /* This is just an temporary state go off to the starting state */ sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_OSSP_EN); } static void sci_phy_starting_await_sas_power_substate_enter(struct sci_base_state_machine *sm) { struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); struct isci_host *ihost = iphy->owning_port->owning_controller; sci_controller_power_control_queue_insert(ihost, iphy); } static void sci_phy_starting_await_sas_power_substate_exit(struct sci_base_state_machine *sm) { struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); struct isci_host *ihost = iphy->owning_port->owning_controller; sci_controller_power_control_queue_remove(ihost, iphy); } static void sci_phy_starting_await_sata_power_substate_enter(struct sci_base_state_machine *sm) { struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); struct isci_host *ihost = iphy->owning_port->owning_controller; sci_controller_power_control_queue_insert(ihost, iphy); } static void sci_phy_starting_await_sata_power_substate_exit(struct sci_base_state_machine *sm) { struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); struct isci_host *ihost = iphy->owning_port->owning_controller; sci_controller_power_control_queue_remove(ihost, iphy); } static void sci_phy_starting_await_sata_phy_substate_enter(struct sci_base_state_machine *sm) { struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); sci_mod_timer(&iphy->sata_timer, SCIC_SDS_SATA_LINK_TRAINING_TIMEOUT); } static void sci_phy_starting_await_sata_phy_substate_exit(struct sci_base_state_machine *sm) { struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); sci_del_timer(&iphy->sata_timer); } static void sci_phy_starting_await_sata_speed_substate_enter(struct sci_base_state_machine *sm) { struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); sci_mod_timer(&iphy->sata_timer, SCIC_SDS_SATA_LINK_TRAINING_TIMEOUT); } static void sci_phy_starting_await_sata_speed_substate_exit(struct sci_base_state_machine *sm) { struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); sci_del_timer(&iphy->sata_timer); } static void sci_phy_starting_await_sig_fis_uf_substate_enter(struct sci_base_state_machine *sm) { struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); if (sci_port_link_detected(iphy->owning_port, iphy)) { /* * Clear the PE suspend condition so we can actually * receive SIG FIS * The hardware will not respond to the XRDY until the PE * suspend condition is cleared. */ sci_phy_resume(iphy); sci_mod_timer(&iphy->sata_timer, SCIC_SDS_SIGNATURE_FIS_TIMEOUT); } else iphy->is_in_link_training = false; } static void sci_phy_starting_await_sig_fis_uf_substate_exit(struct sci_base_state_machine *sm) { struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); sci_del_timer(&iphy->sata_timer); } static void sci_phy_starting_final_substate_enter(struct sci_base_state_machine *sm) { struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); /* State machine has run to completion so exit out and change * the base state machine to the ready state */ sci_change_state(&iphy->sm, SCI_PHY_READY); } /** * * @sci_phy: This is the struct isci_phy object to stop. * * This method will stop the struct isci_phy object. This does not reset the * protocol engine it just suspends it and places it in a state where it will * not cause the end device to power up. none */ static void scu_link_layer_stop_protocol_engine( struct isci_phy *iphy) { u32 scu_sas_pcfg_value; u32 enable_spinup_value; /* Suspend the protocol engine and place it in a sata spinup hold state */ scu_sas_pcfg_value = readl(&iphy->link_layer_registers->phy_configuration); scu_sas_pcfg_value |= (SCU_SAS_PCFG_GEN_BIT(OOB_RESET) | SCU_SAS_PCFG_GEN_BIT(SUSPEND_PROTOCOL_ENGINE) | SCU_SAS_PCFG_GEN_BIT(SATA_SPINUP_HOLD)); writel(scu_sas_pcfg_value, &iphy->link_layer_registers->phy_configuration); /* Disable the notify enable spinup primitives */ enable_spinup_value = readl(&iphy->link_layer_registers->notify_enable_spinup_control); enable_spinup_value &= ~SCU_ENSPINUP_GEN_BIT(ENABLE); writel(enable_spinup_value, &iphy->link_layer_registers->notify_enable_spinup_control); } static void scu_link_layer_start_oob(struct isci_phy *iphy) { struct scu_link_layer_registers __iomem *ll = iphy->link_layer_registers; u32 val; /** Reset OOB sequence - start */ val = readl(&ll->phy_configuration); val &= ~(SCU_SAS_PCFG_GEN_BIT(OOB_RESET) | SCU_SAS_PCFG_GEN_BIT(OOB_ENABLE) | SCU_SAS_PCFG_GEN_BIT(HARD_RESET)); writel(val, &ll->phy_configuration); readl(&ll->phy_configuration); /* flush */ /** Reset OOB sequence - end */ /** Start OOB sequence - start */ val = readl(&ll->phy_configuration); val |= SCU_SAS_PCFG_GEN_BIT(OOB_ENABLE); writel(val, &ll->phy_configuration); readl(&ll->phy_configuration); /* flush */ /** Start OOB sequence - end */ } /** * * * This method will transmit a hard reset request on the specified phy. The SCU * hardware requires that we reset the OOB state machine and set the hard reset * bit in the phy configuration register. We then must start OOB over with the * hard reset bit set. */ static void scu_link_layer_tx_hard_reset( struct isci_phy *iphy) { u32 phy_configuration_value; /* * SAS Phys must wait for the HARD_RESET_TX event notification to transition * to the starting state. */ phy_configuration_value = readl(&iphy->link_layer_registers->phy_configuration); phy_configuration_value &= ~(SCU_SAS_PCFG_GEN_BIT(OOB_ENABLE)); phy_configuration_value |= (SCU_SAS_PCFG_GEN_BIT(HARD_RESET) | SCU_SAS_PCFG_GEN_BIT(OOB_RESET)); writel(phy_configuration_value, &iphy->link_layer_registers->phy_configuration); /* Now take the OOB state machine out of reset */ phy_configuration_value |= SCU_SAS_PCFG_GEN_BIT(OOB_ENABLE); phy_configuration_value &= ~SCU_SAS_PCFG_GEN_BIT(OOB_RESET); writel(phy_configuration_value, &iphy->link_layer_registers->phy_configuration); } static void sci_phy_stopped_state_enter(struct sci_base_state_machine *sm) { struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); struct isci_port *iport = iphy->owning_port; struct isci_host *ihost = iport->owning_controller; /* * @todo We need to get to the controller to place this PE in a * reset state */ sci_del_timer(&iphy->sata_timer); scu_link_layer_stop_protocol_engine(iphy); if (iphy->sm.previous_state_id != SCI_PHY_INITIAL) sci_controller_link_down(ihost, phy_get_non_dummy_port(iphy), iphy); } static void sci_phy_starting_state_enter(struct sci_base_state_machine *sm) { struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); struct isci_port *iport = iphy->owning_port; struct isci_host *ihost = iport->owning_controller; scu_link_layer_stop_protocol_engine(iphy); scu_link_layer_start_oob(iphy); /* We don't know what kind of phy we are going to be just yet */ iphy->protocol = SAS_PROTOCOL_NONE; iphy->bcn_received_while_port_unassigned = false; if (iphy->sm.previous_state_id == SCI_PHY_READY) sci_controller_link_down(ihost, phy_get_non_dummy_port(iphy), iphy); sci_change_state(&iphy->sm, SCI_PHY_SUB_INITIAL); } static void sci_phy_ready_state_enter(struct sci_base_state_machine *sm) { struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); struct isci_port *iport = iphy->owning_port; struct isci_host *ihost = iport->owning_controller; sci_controller_link_up(ihost, phy_get_non_dummy_port(iphy), iphy); } static void sci_phy_ready_state_exit(struct sci_base_state_machine *sm) { struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); sci_phy_suspend(iphy); } static void sci_phy_resetting_state_enter(struct sci_base_state_machine *sm) { struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); /* The phy is being reset, therefore deactivate it from the port. In * the resetting state we don't notify the user regarding link up and * link down notifications */ sci_port_deactivate_phy(iphy->owning_port, iphy, false); if (iphy->protocol == SAS_PROTOCOL_SSP) { scu_link_layer_tx_hard_reset(iphy); } else { /* The SCU does not need to have a discrete reset state so * just go back to the starting state. */ sci_change_state(&iphy->sm, SCI_PHY_STARTING); } } static const struct sci_base_state sci_phy_state_table[] = { [SCI_PHY_INITIAL] = { }, [SCI_PHY_STOPPED] = { .enter_state = sci_phy_stopped_state_enter, }, [SCI_PHY_STARTING] = { .enter_state = sci_phy_starting_state_enter, }, [SCI_PHY_SUB_INITIAL] = { .enter_state = sci_phy_starting_initial_substate_enter, }, [SCI_PHY_SUB_AWAIT_OSSP_EN] = { }, [SCI_PHY_SUB_AWAIT_SAS_SPEED_EN] = { }, [SCI_PHY_SUB_AWAIT_IAF_UF] = { }, [SCI_PHY_SUB_AWAIT_SAS_POWER] = { .enter_state = sci_phy_starting_await_sas_power_substate_enter, .exit_state = sci_phy_starting_await_sas_power_substate_exit, }, [SCI_PHY_SUB_AWAIT_SATA_POWER] = { .enter_state = sci_phy_starting_await_sata_power_substate_enter, .exit_state = sci_phy_starting_await_sata_power_substate_exit }, [SCI_PHY_SUB_AWAIT_SATA_PHY_EN] = { .enter_state = sci_phy_starting_await_sata_phy_substate_enter, .exit_state = sci_phy_starting_await_sata_phy_substate_exit }, [SCI_PHY_SUB_AWAIT_SATA_SPEED_EN] = { .enter_state = sci_phy_starting_await_sata_speed_substate_enter, .exit_state = sci_phy_starting_await_sata_speed_substate_exit }, [SCI_PHY_SUB_AWAIT_SIG_FIS_UF] = { .enter_state = sci_phy_starting_await_sig_fis_uf_substate_enter, .exit_state = sci_phy_starting_await_sig_fis_uf_substate_exit }, [SCI_PHY_SUB_FINAL] = { .enter_state = sci_phy_starting_final_substate_enter, }, [SCI_PHY_READY] = { .enter_state = sci_phy_ready_state_enter, .exit_state = sci_phy_ready_state_exit, }, [SCI_PHY_RESETTING] = { .enter_state = sci_phy_resetting_state_enter, }, [SCI_PHY_FINAL] = { }, }; void sci_phy_construct(struct isci_phy *iphy, struct isci_port *iport, u8 phy_index) { sci_init_sm(&iphy->sm, sci_phy_state_table, SCI_PHY_INITIAL); /* Copy the rest of the input data to our locals */ iphy->owning_port = iport; iphy->phy_index = phy_index; iphy->bcn_received_while_port_unassigned = false; iphy->protocol = SAS_PROTOCOL_NONE; iphy->link_layer_registers = NULL; iphy->max_negotiated_speed = SAS_LINK_RATE_UNKNOWN; /* Create the SIGNATURE FIS Timeout timer for this phy */ sci_init_timer(&iphy->sata_timer, phy_sata_timeout); } void isci_phy_init(struct isci_phy *iphy, struct isci_host *ihost, int index) { struct sci_oem_params *oem = &ihost->oem_parameters; u64 sci_sas_addr; __be64 sas_addr; sci_sas_addr = oem->phys[index].sas_address.high; sci_sas_addr <<= 32; sci_sas_addr |= oem->phys[index].sas_address.low; sas_addr = cpu_to_be64(sci_sas_addr); memcpy(iphy->sas_addr, &sas_addr, sizeof(sas_addr)); iphy->sas_phy.enabled = 0; iphy->sas_phy.id = index; iphy->sas_phy.sas_addr = &iphy->sas_addr[0]; iphy->sas_phy.frame_rcvd = (u8 *)&iphy->frame_rcvd; iphy->sas_phy.ha = &ihost->sas_ha; iphy->sas_phy.lldd_phy = iphy; iphy->sas_phy.enabled = 1; iphy->sas_phy.class = SAS; iphy->sas_phy.iproto = SAS_PROTOCOL_ALL; iphy->sas_phy.tproto = 0; iphy->sas_phy.type = PHY_TYPE_PHYSICAL; iphy->sas_phy.role = PHY_ROLE_INITIATOR; iphy->sas_phy.oob_mode = OOB_NOT_CONNECTED; iphy->sas_phy.linkrate = SAS_LINK_RATE_UNKNOWN; memset(&iphy->frame_rcvd, 0, sizeof(iphy->frame_rcvd)); } /** * isci_phy_control() - This function is one of the SAS Domain Template * functions. This is a phy management function. * @phy: This parameter specifies the sphy being controlled. * @func: This parameter specifies the phy control function being invoked. * @buf: This parameter is specific to the phy function being invoked. * * status, zero indicates success. */ int isci_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func, void *buf) { int ret = 0; struct isci_phy *iphy = sas_phy->lldd_phy; struct asd_sas_port *port = sas_phy->port; struct isci_host *ihost = sas_phy->ha->lldd_ha; unsigned long flags; dev_dbg(&ihost->pdev->dev, "%s: phy %p; func %d; buf %p; isci phy %p, port %p\n", __func__, sas_phy, func, buf, iphy, port); switch (func) { case PHY_FUNC_DISABLE: spin_lock_irqsave(&ihost->scic_lock, flags); scu_link_layer_start_oob(iphy); sci_phy_stop(iphy); spin_unlock_irqrestore(&ihost->scic_lock, flags); break; case PHY_FUNC_LINK_RESET: spin_lock_irqsave(&ihost->scic_lock, flags); scu_link_layer_start_oob(iphy); sci_phy_stop(iphy); sci_phy_start(iphy); spin_unlock_irqrestore(&ihost->scic_lock, flags); break; case PHY_FUNC_HARD_RESET: if (!port) return -ENODEV; ret = isci_port_perform_hard_reset(ihost, port->lldd_port, iphy); break; case PHY_FUNC_GET_EVENTS: { struct scu_link_layer_registers __iomem *r; struct sas_phy *phy = sas_phy->phy; r = iphy->link_layer_registers; phy->running_disparity_error_count = readl(&r->running_disparity_error_count); phy->loss_of_dword_sync_count = readl(&r->loss_of_sync_error_count); phy->phy_reset_problem_count = readl(&r->phy_reset_problem_count); phy->invalid_dword_count = readl(&r->invalid_dword_counter); break; } default: dev_dbg(&ihost->pdev->dev, "%s: phy %p; func %d NOT IMPLEMENTED!\n", __func__, sas_phy, func); ret = -ENOSYS; break; } return ret; }
gpl-2.0
RepoBackups/Canuck
drivers/scsi/scsi_netlink.c
4929
16136
/* * scsi_netlink.c - SCSI Transport Netlink Interface * * Copyright (C) 2006 James Smart, Emulex Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/time.h> #include <linux/jiffies.h> #include <linux/security.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/export.h> #include <net/sock.h> #include <net/netlink.h> #include <scsi/scsi_netlink.h> #include "scsi_priv.h" struct sock *scsi_nl_sock = NULL; EXPORT_SYMBOL_GPL(scsi_nl_sock); static DEFINE_SPINLOCK(scsi_nl_lock); static struct list_head scsi_nl_drivers; static u32 scsi_nl_state; #define STATE_EHANDLER_BSY 0x00000001 struct scsi_nl_transport { int (*msg_handler)(struct sk_buff *); void (*event_handler)(struct notifier_block *, unsigned long, void *); unsigned int refcnt; int flags; }; /* flags values (bit flags) */ #define HANDLER_DELETING 0x1 static struct scsi_nl_transport transports[SCSI_NL_MAX_TRANSPORTS] = { {NULL, }, }; struct scsi_nl_drvr { struct list_head next; int (*dmsg_handler)(struct Scsi_Host *shost, void *payload, u32 len, u32 pid); void (*devt_handler)(struct notifier_block *nb, unsigned long event, void *notify_ptr); struct scsi_host_template *hostt; u64 vendor_id; unsigned int refcnt; int flags; }; /** * scsi_nl_rcv_msg - Receive message handler. * @skb: socket receive buffer * * Description: Extracts message from a receive buffer. * Validates message header and calls appropriate transport message handler * * **/ static void scsi_nl_rcv_msg(struct sk_buff *skb) { struct nlmsghdr *nlh; struct scsi_nl_hdr *hdr; unsigned long flags; u32 rlen; int err, tport; while (skb->len >= NLMSG_SPACE(0)) { err = 0; nlh = nlmsg_hdr(skb); if ((nlh->nlmsg_len < (sizeof(*nlh) + sizeof(*hdr))) || (skb->len < nlh->nlmsg_len)) { printk(KERN_WARNING "%s: discarding partial skb\n", __func__); return; } rlen = NLMSG_ALIGN(nlh->nlmsg_len); if (rlen > skb->len) rlen = skb->len; if (nlh->nlmsg_type != SCSI_TRANSPORT_MSG) { err = -EBADMSG; goto next_msg; } hdr = NLMSG_DATA(nlh); if ((hdr->version != SCSI_NL_VERSION) || (hdr->magic != SCSI_NL_MAGIC)) { err = -EPROTOTYPE; goto next_msg; } if (!capable(CAP_SYS_ADMIN)) { err = -EPERM; goto next_msg; } if (nlh->nlmsg_len < (sizeof(*nlh) + hdr->msglen)) { printk(KERN_WARNING "%s: discarding partial message\n", __func__); goto next_msg; } /* * Deliver message to the appropriate transport */ spin_lock_irqsave(&scsi_nl_lock, flags); tport = hdr->transport; if ((tport < SCSI_NL_MAX_TRANSPORTS) && !(transports[tport].flags & HANDLER_DELETING) && (transports[tport].msg_handler)) { transports[tport].refcnt++; spin_unlock_irqrestore(&scsi_nl_lock, flags); err = transports[tport].msg_handler(skb); spin_lock_irqsave(&scsi_nl_lock, flags); transports[tport].refcnt--; } else err = -ENOENT; spin_unlock_irqrestore(&scsi_nl_lock, flags); next_msg: if ((err) || (nlh->nlmsg_flags & NLM_F_ACK)) netlink_ack(skb, nlh, err); skb_pull(skb, rlen); } } /** * scsi_nl_rcv_event - Event handler for a netlink socket. * @this: event notifier block * @event: event type * @ptr: event payload * **/ static int scsi_nl_rcv_event(struct notifier_block *this, unsigned long event, void *ptr) { struct netlink_notify *n = ptr; struct scsi_nl_drvr *driver; unsigned long flags; int tport; if (n->protocol != NETLINK_SCSITRANSPORT) return NOTIFY_DONE; spin_lock_irqsave(&scsi_nl_lock, flags); scsi_nl_state |= STATE_EHANDLER_BSY; /* * Pass event on to any transports that may be listening */ for (tport = 0; tport < SCSI_NL_MAX_TRANSPORTS; tport++) { if (!(transports[tport].flags & HANDLER_DELETING) && (transports[tport].event_handler)) { spin_unlock_irqrestore(&scsi_nl_lock, flags); transports[tport].event_handler(this, event, ptr); spin_lock_irqsave(&scsi_nl_lock, flags); } } /* * Pass event on to any drivers that may be listening */ list_for_each_entry(driver, &scsi_nl_drivers, next) { if (!(driver->flags & HANDLER_DELETING) && (driver->devt_handler)) { spin_unlock_irqrestore(&scsi_nl_lock, flags); driver->devt_handler(this, event, ptr); spin_lock_irqsave(&scsi_nl_lock, flags); } } scsi_nl_state &= ~STATE_EHANDLER_BSY; spin_unlock_irqrestore(&scsi_nl_lock, flags); return NOTIFY_DONE; } static struct notifier_block scsi_netlink_notifier = { .notifier_call = scsi_nl_rcv_event, }; /* * GENERIC SCSI transport receive and event handlers */ /** * scsi_generic_msg_handler - receive message handler for GENERIC transport messages * @skb: socket receive buffer **/ static int scsi_generic_msg_handler(struct sk_buff *skb) { struct nlmsghdr *nlh = nlmsg_hdr(skb); struct scsi_nl_hdr *snlh = NLMSG_DATA(nlh); struct scsi_nl_drvr *driver; struct Scsi_Host *shost; unsigned long flags; int err = 0, match, pid; pid = NETLINK_CREDS(skb)->pid; switch (snlh->msgtype) { case SCSI_NL_SHOST_VENDOR: { struct scsi_nl_host_vendor_msg *msg = NLMSG_DATA(nlh); /* Locate the driver that corresponds to the message */ spin_lock_irqsave(&scsi_nl_lock, flags); match = 0; list_for_each_entry(driver, &scsi_nl_drivers, next) { if (driver->vendor_id == msg->vendor_id) { match = 1; break; } } if ((!match) || (!driver->dmsg_handler)) { spin_unlock_irqrestore(&scsi_nl_lock, flags); err = -ESRCH; goto rcv_exit; } if (driver->flags & HANDLER_DELETING) { spin_unlock_irqrestore(&scsi_nl_lock, flags); err = -ESHUTDOWN; goto rcv_exit; } driver->refcnt++; spin_unlock_irqrestore(&scsi_nl_lock, flags); /* if successful, scsi_host_lookup takes a shost reference */ shost = scsi_host_lookup(msg->host_no); if (!shost) { err = -ENODEV; goto driver_exit; } /* is this host owned by the vendor ? */ if (shost->hostt != driver->hostt) { err = -EINVAL; goto vendormsg_put; } /* pass message on to the driver */ err = driver->dmsg_handler(shost, (void *)&msg[1], msg->vmsg_datalen, pid); vendormsg_put: /* release reference by scsi_host_lookup */ scsi_host_put(shost); driver_exit: /* release our own reference on the registration object */ spin_lock_irqsave(&scsi_nl_lock, flags); driver->refcnt--; spin_unlock_irqrestore(&scsi_nl_lock, flags); break; } default: err = -EBADR; break; } rcv_exit: if (err) printk(KERN_WARNING "%s: Msgtype %d failed - err %d\n", __func__, snlh->msgtype, err); return err; } /** * scsi_nl_add_transport - * Registers message and event handlers for a transport. Enables * receipt of netlink messages and events to a transport. * * @tport: transport registering handlers * @msg_handler: receive message handler callback * @event_handler: receive event handler callback **/ int scsi_nl_add_transport(u8 tport, int (*msg_handler)(struct sk_buff *), void (*event_handler)(struct notifier_block *, unsigned long, void *)) { unsigned long flags; int err = 0; if (tport >= SCSI_NL_MAX_TRANSPORTS) return -EINVAL; spin_lock_irqsave(&scsi_nl_lock, flags); if (scsi_nl_state & STATE_EHANDLER_BSY) { spin_unlock_irqrestore(&scsi_nl_lock, flags); msleep(1); spin_lock_irqsave(&scsi_nl_lock, flags); } if (transports[tport].msg_handler || transports[tport].event_handler) { err = -EALREADY; goto register_out; } transports[tport].msg_handler = msg_handler; transports[tport].event_handler = event_handler; transports[tport].flags = 0; transports[tport].refcnt = 0; register_out: spin_unlock_irqrestore(&scsi_nl_lock, flags); return err; } EXPORT_SYMBOL_GPL(scsi_nl_add_transport); /** * scsi_nl_remove_transport - * Disable transport receiption of messages and events * * @tport: transport deregistering handlers * **/ void scsi_nl_remove_transport(u8 tport) { unsigned long flags; spin_lock_irqsave(&scsi_nl_lock, flags); if (scsi_nl_state & STATE_EHANDLER_BSY) { spin_unlock_irqrestore(&scsi_nl_lock, flags); msleep(1); spin_lock_irqsave(&scsi_nl_lock, flags); } if (tport < SCSI_NL_MAX_TRANSPORTS) { transports[tport].flags |= HANDLER_DELETING; while (transports[tport].refcnt != 0) { spin_unlock_irqrestore(&scsi_nl_lock, flags); schedule_timeout_uninterruptible(HZ/4); spin_lock_irqsave(&scsi_nl_lock, flags); } transports[tport].msg_handler = NULL; transports[tport].event_handler = NULL; transports[tport].flags = 0; } spin_unlock_irqrestore(&scsi_nl_lock, flags); return; } EXPORT_SYMBOL_GPL(scsi_nl_remove_transport); /** * scsi_nl_add_driver - * A driver is registering its interfaces for SCSI netlink messages * * @vendor_id: A unique identification value for the driver. * @hostt: address of the driver's host template. Used * to verify an shost is bound to the driver * @nlmsg_handler: receive message handler callback * @nlevt_handler: receive event handler callback * * Returns: * 0 on Success * error result otherwise **/ int scsi_nl_add_driver(u64 vendor_id, struct scsi_host_template *hostt, int (*nlmsg_handler)(struct Scsi_Host *shost, void *payload, u32 len, u32 pid), void (*nlevt_handler)(struct notifier_block *nb, unsigned long event, void *notify_ptr)) { struct scsi_nl_drvr *driver; unsigned long flags; driver = kzalloc(sizeof(*driver), GFP_KERNEL); if (unlikely(!driver)) { printk(KERN_ERR "%s: allocation failure\n", __func__); return -ENOMEM; } driver->dmsg_handler = nlmsg_handler; driver->devt_handler = nlevt_handler; driver->hostt = hostt; driver->vendor_id = vendor_id; spin_lock_irqsave(&scsi_nl_lock, flags); if (scsi_nl_state & STATE_EHANDLER_BSY) { spin_unlock_irqrestore(&scsi_nl_lock, flags); msleep(1); spin_lock_irqsave(&scsi_nl_lock, flags); } list_add_tail(&driver->next, &scsi_nl_drivers); spin_unlock_irqrestore(&scsi_nl_lock, flags); return 0; } EXPORT_SYMBOL_GPL(scsi_nl_add_driver); /** * scsi_nl_remove_driver - * An driver is unregistering with the SCSI netlink messages * * @vendor_id: The unique identification value for the driver. **/ void scsi_nl_remove_driver(u64 vendor_id) { struct scsi_nl_drvr *driver; unsigned long flags; spin_lock_irqsave(&scsi_nl_lock, flags); if (scsi_nl_state & STATE_EHANDLER_BSY) { spin_unlock_irqrestore(&scsi_nl_lock, flags); msleep(1); spin_lock_irqsave(&scsi_nl_lock, flags); } list_for_each_entry(driver, &scsi_nl_drivers, next) { if (driver->vendor_id == vendor_id) { driver->flags |= HANDLER_DELETING; while (driver->refcnt != 0) { spin_unlock_irqrestore(&scsi_nl_lock, flags); schedule_timeout_uninterruptible(HZ/4); spin_lock_irqsave(&scsi_nl_lock, flags); } list_del(&driver->next); kfree(driver); spin_unlock_irqrestore(&scsi_nl_lock, flags); return; } } spin_unlock_irqrestore(&scsi_nl_lock, flags); printk(KERN_ERR "%s: removal of driver failed - vendor_id 0x%llx\n", __func__, (unsigned long long)vendor_id); return; } EXPORT_SYMBOL_GPL(scsi_nl_remove_driver); /** * scsi_netlink_init - Called by SCSI subsystem to initialize * the SCSI transport netlink interface * **/ void scsi_netlink_init(void) { int error; INIT_LIST_HEAD(&scsi_nl_drivers); error = netlink_register_notifier(&scsi_netlink_notifier); if (error) { printk(KERN_ERR "%s: register of event handler failed - %d\n", __func__, error); return; } scsi_nl_sock = netlink_kernel_create(&init_net, NETLINK_SCSITRANSPORT, SCSI_NL_GRP_CNT, scsi_nl_rcv_msg, NULL, THIS_MODULE); if (!scsi_nl_sock) { printk(KERN_ERR "%s: register of receive handler failed\n", __func__); netlink_unregister_notifier(&scsi_netlink_notifier); return; } /* Register the entry points for the generic SCSI transport */ error = scsi_nl_add_transport(SCSI_NL_TRANSPORT, scsi_generic_msg_handler, NULL); if (error) printk(KERN_ERR "%s: register of GENERIC transport handler" " failed - %d\n", __func__, error); return; } /** * scsi_netlink_exit - Called by SCSI subsystem to disable the SCSI transport netlink interface * **/ void scsi_netlink_exit(void) { scsi_nl_remove_transport(SCSI_NL_TRANSPORT); if (scsi_nl_sock) { netlink_kernel_release(scsi_nl_sock); netlink_unregister_notifier(&scsi_netlink_notifier); } return; } /* * Exported Interfaces */ /** * scsi_nl_send_transport_msg - * Generic function to send a single message from a SCSI transport to * a single process * * @pid: receiving pid * @hdr: message payload * **/ void scsi_nl_send_transport_msg(u32 pid, struct scsi_nl_hdr *hdr) { struct sk_buff *skb; struct nlmsghdr *nlh; const char *fn; char *datab; u32 len, skblen; int err; if (!scsi_nl_sock) { err = -ENOENT; fn = "netlink socket"; goto msg_fail; } len = NLMSG_SPACE(hdr->msglen); skblen = NLMSG_SPACE(len); skb = alloc_skb(skblen, GFP_KERNEL); if (!skb) { err = -ENOBUFS; fn = "alloc_skb"; goto msg_fail; } nlh = nlmsg_put(skb, pid, 0, SCSI_TRANSPORT_MSG, len - sizeof(*nlh), 0); if (!nlh) { err = -ENOBUFS; fn = "nlmsg_put"; goto msg_fail_skb; } datab = NLMSG_DATA(nlh); memcpy(datab, hdr, hdr->msglen); err = nlmsg_unicast(scsi_nl_sock, skb, pid); if (err < 0) { fn = "nlmsg_unicast"; /* nlmsg_unicast already kfree_skb'd */ goto msg_fail; } return; msg_fail_skb: kfree_skb(skb); msg_fail: printk(KERN_WARNING "%s: Dropped Message : pid %d Transport %d, msgtype x%x, " "msglen %d: %s : err %d\n", __func__, pid, hdr->transport, hdr->msgtype, hdr->msglen, fn, err); return; } EXPORT_SYMBOL_GPL(scsi_nl_send_transport_msg); /** * scsi_nl_send_vendor_msg - called to send a shost vendor unique message * to a specific process id. * * @pid: process id of the receiver * @host_no: host # sending the message * @vendor_id: unique identifier for the driver's vendor * @data_len: amount, in bytes, of vendor unique payload data * @data_buf: pointer to vendor unique data buffer * * Returns: * 0 on successful return * otherwise, failing error code * * Notes: * This routine assumes no locks are held on entry. */ int scsi_nl_send_vendor_msg(u32 pid, unsigned short host_no, u64 vendor_id, char *data_buf, u32 data_len) { struct sk_buff *skb; struct nlmsghdr *nlh; struct scsi_nl_host_vendor_msg *msg; u32 len, skblen; int err; if (!scsi_nl_sock) { err = -ENOENT; goto send_vendor_fail; } len = SCSI_NL_MSGALIGN(sizeof(*msg) + data_len); skblen = NLMSG_SPACE(len); skb = alloc_skb(skblen, GFP_KERNEL); if (!skb) { err = -ENOBUFS; goto send_vendor_fail; } nlh = nlmsg_put(skb, 0, 0, SCSI_TRANSPORT_MSG, skblen - sizeof(*nlh), 0); if (!nlh) { err = -ENOBUFS; goto send_vendor_fail_skb; } msg = NLMSG_DATA(nlh); INIT_SCSI_NL_HDR(&msg->snlh, SCSI_NL_TRANSPORT, SCSI_NL_SHOST_VENDOR, len); msg->vendor_id = vendor_id; msg->host_no = host_no; msg->vmsg_datalen = data_len; /* bytes */ memcpy(&msg[1], data_buf, data_len); err = nlmsg_unicast(scsi_nl_sock, skb, pid); if (err) /* nlmsg_multicast already kfree_skb'd */ goto send_vendor_fail; return 0; send_vendor_fail_skb: kfree_skb(skb); send_vendor_fail: printk(KERN_WARNING "%s: Dropped SCSI Msg : host %d vendor_unique - err %d\n", __func__, host_no, err); return err; } EXPORT_SYMBOL(scsi_nl_send_vendor_msg);
gpl-2.0
SaberMod/lge-kernel-mako
drivers/input/touchscreen/ads7846.c
4929
34257
/* * ADS7846 based touchscreen and sensor driver * * Copyright (c) 2005 David Brownell * Copyright (c) 2006 Nokia Corporation * Various changes: Imre Deak <imre.deak@nokia.com> * * Using code from: * - corgi_ts.c * Copyright (C) 2004-2005 Richard Purdie * - omap_ts.[hc], ads7846.h, ts_osk.c * Copyright (C) 2002 MontaVista Software * Copyright (C) 2004 Texas Instruments * Copyright (C) 2005 Dirk Behme * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/types.h> #include <linux/hwmon.h> #include <linux/init.h> #include <linux/err.h> #include <linux/sched.h> #include <linux/delay.h> #include <linux/input.h> #include <linux/interrupt.h> #include <linux/slab.h> #include <linux/pm.h> #include <linux/gpio.h> #include <linux/spi/spi.h> #include <linux/spi/ads7846.h> #include <linux/regulator/consumer.h> #include <linux/module.h> #include <asm/irq.h> /* * This code has been heavily tested on a Nokia 770, and lightly * tested on other ads7846 devices (OSK/Mistral, Lubbock, Spitz). * TSC2046 is just newer ads7846 silicon. * Support for ads7843 tested on Atmel at91sam926x-EK. * Support for ads7845 has only been stubbed in. * Support for Analog Devices AD7873 and AD7843 tested. * * IRQ handling needs a workaround because of a shortcoming in handling * edge triggered IRQs on some platforms like the OMAP1/2. These * platforms don't handle the ARM lazy IRQ disabling properly, thus we * have to maintain our own SW IRQ disabled status. This should be * removed as soon as the affected platform's IRQ handling is fixed. * * App note sbaa036 talks in more detail about accurate sampling... * that ought to help in situations like LCDs inducing noise (which * can also be helped by using synch signals) and more generally. * This driver tries to utilize the measures described in the app * note. The strength of filtering can be set in the board-* specific * files. */ #define TS_POLL_DELAY 1 /* ms delay before the first sample */ #define TS_POLL_PERIOD 5 /* ms delay between samples */ /* this driver doesn't aim at the peak continuous sample rate */ #define SAMPLE_BITS (8 /*cmd*/ + 16 /*sample*/ + 2 /* before, after */) struct ts_event { /* * For portability, we can't read 12 bit values using SPI (which * would make the controller deliver them as native byte order u16 * with msbs zeroed). Instead, we read them as two 8-bit values, * *** WHICH NEED BYTESWAPPING *** and range adjustment. */ u16 x; u16 y; u16 z1, z2; bool ignore; u8 x_buf[3]; u8 y_buf[3]; }; /* * We allocate this separately to avoid cache line sharing issues when * driver is used with DMA-based SPI controllers (like atmel_spi) on * systems where main memory is not DMA-coherent (most non-x86 boards). */ struct ads7846_packet { u8 read_x, read_y, read_z1, read_z2, pwrdown; u16 dummy; /* for the pwrdown read */ struct ts_event tc; /* for ads7845 with mpc5121 psc spi we use 3-byte buffers */ u8 read_x_cmd[3], read_y_cmd[3], pwrdown_cmd[3]; }; struct ads7846 { struct input_dev *input; char phys[32]; char name[32]; struct spi_device *spi; struct regulator *reg; #if defined(CONFIG_HWMON) || defined(CONFIG_HWMON_MODULE) struct attribute_group *attr_group; struct device *hwmon; #endif u16 model; u16 vref_mv; u16 vref_delay_usecs; u16 x_plate_ohms; u16 pressure_max; bool swap_xy; bool use_internal; struct ads7846_packet *packet; struct spi_transfer xfer[18]; struct spi_message msg[5]; int msg_count; wait_queue_head_t wait; bool pendown; int read_cnt; int read_rep; int last_read; u16 debounce_max; u16 debounce_tol; u16 debounce_rep; u16 penirq_recheck_delay_usecs; struct mutex lock; bool stopped; /* P: lock */ bool disabled; /* P: lock */ bool suspended; /* P: lock */ int (*filter)(void *data, int data_idx, int *val); void *filter_data; void (*filter_cleanup)(void *data); int (*get_pendown_state)(void); int gpio_pendown; void (*wait_for_sync)(void); }; /* leave chip selected when we're done, for quicker re-select? */ #if 0 #define CS_CHANGE(xfer) ((xfer).cs_change = 1) #else #define CS_CHANGE(xfer) ((xfer).cs_change = 0) #endif /*--------------------------------------------------------------------------*/ /* The ADS7846 has touchscreen and other sensors. * Earlier ads784x chips are somewhat compatible. */ #define ADS_START (1 << 7) #define ADS_A2A1A0_d_y (1 << 4) /* differential */ #define ADS_A2A1A0_d_z1 (3 << 4) /* differential */ #define ADS_A2A1A0_d_z2 (4 << 4) /* differential */ #define ADS_A2A1A0_d_x (5 << 4) /* differential */ #define ADS_A2A1A0_temp0 (0 << 4) /* non-differential */ #define ADS_A2A1A0_vbatt (2 << 4) /* non-differential */ #define ADS_A2A1A0_vaux (6 << 4) /* non-differential */ #define ADS_A2A1A0_temp1 (7 << 4) /* non-differential */ #define ADS_8_BIT (1 << 3) #define ADS_12_BIT (0 << 3) #define ADS_SER (1 << 2) /* non-differential */ #define ADS_DFR (0 << 2) /* differential */ #define ADS_PD10_PDOWN (0 << 0) /* low power mode + penirq */ #define ADS_PD10_ADC_ON (1 << 0) /* ADC on */ #define ADS_PD10_REF_ON (2 << 0) /* vREF on + penirq */ #define ADS_PD10_ALL_ON (3 << 0) /* ADC + vREF on */ #define MAX_12BIT ((1<<12)-1) /* leave ADC powered up (disables penirq) between differential samples */ #define READ_12BIT_DFR(x, adc, vref) (ADS_START | ADS_A2A1A0_d_ ## x \ | ADS_12_BIT | ADS_DFR | \ (adc ? ADS_PD10_ADC_ON : 0) | (vref ? ADS_PD10_REF_ON : 0)) #define READ_Y(vref) (READ_12BIT_DFR(y, 1, vref)) #define READ_Z1(vref) (READ_12BIT_DFR(z1, 1, vref)) #define READ_Z2(vref) (READ_12BIT_DFR(z2, 1, vref)) #define READ_X(vref) (READ_12BIT_DFR(x, 1, vref)) #define PWRDOWN (READ_12BIT_DFR(y, 0, 0)) /* LAST */ /* single-ended samples need to first power up reference voltage; * we leave both ADC and VREF powered */ #define READ_12BIT_SER(x) (ADS_START | ADS_A2A1A0_ ## x \ | ADS_12_BIT | ADS_SER) #define REF_ON (READ_12BIT_DFR(x, 1, 1)) #define REF_OFF (READ_12BIT_DFR(y, 0, 0)) /* Must be called with ts->lock held */ static void ads7846_stop(struct ads7846 *ts) { if (!ts->disabled && !ts->suspended) { /* Signal IRQ thread to stop polling and disable the handler. */ ts->stopped = true; mb(); wake_up(&ts->wait); disable_irq(ts->spi->irq); } } /* Must be called with ts->lock held */ static void ads7846_restart(struct ads7846 *ts) { if (!ts->disabled && !ts->suspended) { /* Tell IRQ thread that it may poll the device. */ ts->stopped = false; mb(); enable_irq(ts->spi->irq); } } /* Must be called with ts->lock held */ static void __ads7846_disable(struct ads7846 *ts) { ads7846_stop(ts); regulator_disable(ts->reg); /* * We know the chip's in low power mode since we always * leave it that way after every request */ } /* Must be called with ts->lock held */ static void __ads7846_enable(struct ads7846 *ts) { regulator_enable(ts->reg); ads7846_restart(ts); } static void ads7846_disable(struct ads7846 *ts) { mutex_lock(&ts->lock); if (!ts->disabled) { if (!ts->suspended) __ads7846_disable(ts); ts->disabled = true; } mutex_unlock(&ts->lock); } static void ads7846_enable(struct ads7846 *ts) { mutex_lock(&ts->lock); if (ts->disabled) { ts->disabled = false; if (!ts->suspended) __ads7846_enable(ts); } mutex_unlock(&ts->lock); } /*--------------------------------------------------------------------------*/ /* * Non-touchscreen sensors only use single-ended conversions. * The range is GND..vREF. The ads7843 and ads7835 must use external vREF; * ads7846 lets that pin be unconnected, to use internal vREF. */ struct ser_req { u8 ref_on; u8 command; u8 ref_off; u16 scratch; struct spi_message msg; struct spi_transfer xfer[6]; /* * DMA (thus cache coherency maintenance) requires the * transfer buffers to live in their own cache lines. */ __be16 sample ____cacheline_aligned; }; struct ads7845_ser_req { u8 command[3]; struct spi_message msg; struct spi_transfer xfer[2]; /* * DMA (thus cache coherency maintenance) requires the * transfer buffers to live in their own cache lines. */ u8 sample[3] ____cacheline_aligned; }; static int ads7846_read12_ser(struct device *dev, unsigned command) { struct spi_device *spi = to_spi_device(dev); struct ads7846 *ts = dev_get_drvdata(dev); struct ser_req *req; int status; req = kzalloc(sizeof *req, GFP_KERNEL); if (!req) return -ENOMEM; spi_message_init(&req->msg); /* maybe turn on internal vREF, and let it settle */ if (ts->use_internal) { req->ref_on = REF_ON; req->xfer[0].tx_buf = &req->ref_on; req->xfer[0].len = 1; spi_message_add_tail(&req->xfer[0], &req->msg); req->xfer[1].rx_buf = &req->scratch; req->xfer[1].len = 2; /* for 1uF, settle for 800 usec; no cap, 100 usec. */ req->xfer[1].delay_usecs = ts->vref_delay_usecs; spi_message_add_tail(&req->xfer[1], &req->msg); /* Enable reference voltage */ command |= ADS_PD10_REF_ON; } /* Enable ADC in every case */ command |= ADS_PD10_ADC_ON; /* take sample */ req->command = (u8) command; req->xfer[2].tx_buf = &req->command; req->xfer[2].len = 1; spi_message_add_tail(&req->xfer[2], &req->msg); req->xfer[3].rx_buf = &req->sample; req->xfer[3].len = 2; spi_message_add_tail(&req->xfer[3], &req->msg); /* REVISIT: take a few more samples, and compare ... */ /* converter in low power mode & enable PENIRQ */ req->ref_off = PWRDOWN; req->xfer[4].tx_buf = &req->ref_off; req->xfer[4].len = 1; spi_message_add_tail(&req->xfer[4], &req->msg); req->xfer[5].rx_buf = &req->scratch; req->xfer[5].len = 2; CS_CHANGE(req->xfer[5]); spi_message_add_tail(&req->xfer[5], &req->msg); mutex_lock(&ts->lock); ads7846_stop(ts); status = spi_sync(spi, &req->msg); ads7846_restart(ts); mutex_unlock(&ts->lock); if (status == 0) { /* on-wire is a must-ignore bit, a BE12 value, then padding */ status = be16_to_cpu(req->sample); status = status >> 3; status &= 0x0fff; } kfree(req); return status; } static int ads7845_read12_ser(struct device *dev, unsigned command) { struct spi_device *spi = to_spi_device(dev); struct ads7846 *ts = dev_get_drvdata(dev); struct ads7845_ser_req *req; int status; req = kzalloc(sizeof *req, GFP_KERNEL); if (!req) return -ENOMEM; spi_message_init(&req->msg); req->command[0] = (u8) command; req->xfer[0].tx_buf = req->command; req->xfer[0].rx_buf = req->sample; req->xfer[0].len = 3; spi_message_add_tail(&req->xfer[0], &req->msg); mutex_lock(&ts->lock); ads7846_stop(ts); status = spi_sync(spi, &req->msg); ads7846_restart(ts); mutex_unlock(&ts->lock); if (status == 0) { /* BE12 value, then padding */ status = be16_to_cpu(*((u16 *)&req->sample[1])); status = status >> 3; status &= 0x0fff; } kfree(req); return status; } #if defined(CONFIG_HWMON) || defined(CONFIG_HWMON_MODULE) #define SHOW(name, var, adjust) static ssize_t \ name ## _show(struct device *dev, struct device_attribute *attr, char *buf) \ { \ struct ads7846 *ts = dev_get_drvdata(dev); \ ssize_t v = ads7846_read12_ser(dev, \ READ_12BIT_SER(var)); \ if (v < 0) \ return v; \ return sprintf(buf, "%u\n", adjust(ts, v)); \ } \ static DEVICE_ATTR(name, S_IRUGO, name ## _show, NULL); /* Sysfs conventions report temperatures in millidegrees Celsius. * ADS7846 could use the low-accuracy two-sample scheme, but can't do the high * accuracy scheme without calibration data. For now we won't try either; * userspace sees raw sensor values, and must scale/calibrate appropriately. */ static inline unsigned null_adjust(struct ads7846 *ts, ssize_t v) { return v; } SHOW(temp0, temp0, null_adjust) /* temp1_input */ SHOW(temp1, temp1, null_adjust) /* temp2_input */ /* sysfs conventions report voltages in millivolts. We can convert voltages * if we know vREF. userspace may need to scale vAUX to match the board's * external resistors; we assume that vBATT only uses the internal ones. */ static inline unsigned vaux_adjust(struct ads7846 *ts, ssize_t v) { unsigned retval = v; /* external resistors may scale vAUX into 0..vREF */ retval *= ts->vref_mv; retval = retval >> 12; return retval; } static inline unsigned vbatt_adjust(struct ads7846 *ts, ssize_t v) { unsigned retval = vaux_adjust(ts, v); /* ads7846 has a resistor ladder to scale this signal down */ if (ts->model == 7846) retval *= 4; return retval; } SHOW(in0_input, vaux, vaux_adjust) SHOW(in1_input, vbatt, vbatt_adjust) static struct attribute *ads7846_attributes[] = { &dev_attr_temp0.attr, &dev_attr_temp1.attr, &dev_attr_in0_input.attr, &dev_attr_in1_input.attr, NULL, }; static struct attribute_group ads7846_attr_group = { .attrs = ads7846_attributes, }; static struct attribute *ads7843_attributes[] = { &dev_attr_in0_input.attr, &dev_attr_in1_input.attr, NULL, }; static struct attribute_group ads7843_attr_group = { .attrs = ads7843_attributes, }; static struct attribute *ads7845_attributes[] = { &dev_attr_in0_input.attr, NULL, }; static struct attribute_group ads7845_attr_group = { .attrs = ads7845_attributes, }; static int ads784x_hwmon_register(struct spi_device *spi, struct ads7846 *ts) { struct device *hwmon; int err; /* hwmon sensors need a reference voltage */ switch (ts->model) { case 7846: if (!ts->vref_mv) { dev_dbg(&spi->dev, "assuming 2.5V internal vREF\n"); ts->vref_mv = 2500; ts->use_internal = true; } break; case 7845: case 7843: if (!ts->vref_mv) { dev_warn(&spi->dev, "external vREF for ADS%d not specified\n", ts->model); return 0; } break; } /* different chips have different sensor groups */ switch (ts->model) { case 7846: ts->attr_group = &ads7846_attr_group; break; case 7845: ts->attr_group = &ads7845_attr_group; break; case 7843: ts->attr_group = &ads7843_attr_group; break; default: dev_dbg(&spi->dev, "ADS%d not recognized\n", ts->model); return 0; } err = sysfs_create_group(&spi->dev.kobj, ts->attr_group); if (err) return err; hwmon = hwmon_device_register(&spi->dev); if (IS_ERR(hwmon)) { sysfs_remove_group(&spi->dev.kobj, ts->attr_group); return PTR_ERR(hwmon); } ts->hwmon = hwmon; return 0; } static void ads784x_hwmon_unregister(struct spi_device *spi, struct ads7846 *ts) { if (ts->hwmon) { sysfs_remove_group(&spi->dev.kobj, ts->attr_group); hwmon_device_unregister(ts->hwmon); } } #else static inline int ads784x_hwmon_register(struct spi_device *spi, struct ads7846 *ts) { return 0; } static inline void ads784x_hwmon_unregister(struct spi_device *spi, struct ads7846 *ts) { } #endif static ssize_t ads7846_pen_down_show(struct device *dev, struct device_attribute *attr, char *buf) { struct ads7846 *ts = dev_get_drvdata(dev); return sprintf(buf, "%u\n", ts->pendown); } static DEVICE_ATTR(pen_down, S_IRUGO, ads7846_pen_down_show, NULL); static ssize_t ads7846_disable_show(struct device *dev, struct device_attribute *attr, char *buf) { struct ads7846 *ts = dev_get_drvdata(dev); return sprintf(buf, "%u\n", ts->disabled); } static ssize_t ads7846_disable_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct ads7846 *ts = dev_get_drvdata(dev); unsigned int i; int err; err = kstrtouint(buf, 10, &i); if (err) return err; if (i) ads7846_disable(ts); else ads7846_enable(ts); return count; } static DEVICE_ATTR(disable, 0664, ads7846_disable_show, ads7846_disable_store); static struct attribute *ads784x_attributes[] = { &dev_attr_pen_down.attr, &dev_attr_disable.attr, NULL, }; static struct attribute_group ads784x_attr_group = { .attrs = ads784x_attributes, }; /*--------------------------------------------------------------------------*/ static int get_pendown_state(struct ads7846 *ts) { if (ts->get_pendown_state) return ts->get_pendown_state(); return !gpio_get_value(ts->gpio_pendown); } static void null_wait_for_sync(void) { } static int ads7846_debounce_filter(void *ads, int data_idx, int *val) { struct ads7846 *ts = ads; if (!ts->read_cnt || (abs(ts->last_read - *val) > ts->debounce_tol)) { /* Start over collecting consistent readings. */ ts->read_rep = 0; /* * Repeat it, if this was the first read or the read * wasn't consistent enough. */ if (ts->read_cnt < ts->debounce_max) { ts->last_read = *val; ts->read_cnt++; return ADS7846_FILTER_REPEAT; } else { /* * Maximum number of debouncing reached and still * not enough number of consistent readings. Abort * the whole sample, repeat it in the next sampling * period. */ ts->read_cnt = 0; return ADS7846_FILTER_IGNORE; } } else { if (++ts->read_rep > ts->debounce_rep) { /* * Got a good reading for this coordinate, * go for the next one. */ ts->read_cnt = 0; ts->read_rep = 0; return ADS7846_FILTER_OK; } else { /* Read more values that are consistent. */ ts->read_cnt++; return ADS7846_FILTER_REPEAT; } } } static int ads7846_no_filter(void *ads, int data_idx, int *val) { return ADS7846_FILTER_OK; } static int ads7846_get_value(struct ads7846 *ts, struct spi_message *m) { struct spi_transfer *t = list_entry(m->transfers.prev, struct spi_transfer, transfer_list); if (ts->model == 7845) { return be16_to_cpup((__be16 *)&(((char*)t->rx_buf)[1])) >> 3; } else { /* * adjust: on-wire is a must-ignore bit, a BE12 value, then * padding; built from two 8 bit values written msb-first. */ return be16_to_cpup((__be16 *)t->rx_buf) >> 3; } } static void ads7846_update_value(struct spi_message *m, int val) { struct spi_transfer *t = list_entry(m->transfers.prev, struct spi_transfer, transfer_list); *(u16 *)t->rx_buf = val; } static void ads7846_read_state(struct ads7846 *ts) { struct ads7846_packet *packet = ts->packet; struct spi_message *m; int msg_idx = 0; int val; int action; int error; while (msg_idx < ts->msg_count) { ts->wait_for_sync(); m = &ts->msg[msg_idx]; error = spi_sync(ts->spi, m); if (error) { dev_err(&ts->spi->dev, "spi_async --> %d\n", error); packet->tc.ignore = true; return; } /* * Last message is power down request, no need to convert * or filter the value. */ if (msg_idx < ts->msg_count - 1) { val = ads7846_get_value(ts, m); action = ts->filter(ts->filter_data, msg_idx, &val); switch (action) { case ADS7846_FILTER_REPEAT: continue; case ADS7846_FILTER_IGNORE: packet->tc.ignore = true; msg_idx = ts->msg_count - 1; continue; case ADS7846_FILTER_OK: ads7846_update_value(m, val); packet->tc.ignore = false; msg_idx++; break; default: BUG(); } } else { msg_idx++; } } } static void ads7846_report_state(struct ads7846 *ts) { struct ads7846_packet *packet = ts->packet; unsigned int Rt; u16 x, y, z1, z2; /* * ads7846_get_value() does in-place conversion (including byte swap) * from on-the-wire format as part of debouncing to get stable * readings. */ if (ts->model == 7845) { x = *(u16 *)packet->tc.x_buf; y = *(u16 *)packet->tc.y_buf; z1 = 0; z2 = 0; } else { x = packet->tc.x; y = packet->tc.y; z1 = packet->tc.z1; z2 = packet->tc.z2; } /* range filtering */ if (x == MAX_12BIT) x = 0; if (ts->model == 7843) { Rt = ts->pressure_max / 2; } else if (ts->model == 7845) { if (get_pendown_state(ts)) Rt = ts->pressure_max / 2; else Rt = 0; dev_vdbg(&ts->spi->dev, "x/y: %d/%d, PD %d\n", x, y, Rt); } else if (likely(x && z1)) { /* compute touch pressure resistance using equation #2 */ Rt = z2; Rt -= z1; Rt *= x; Rt *= ts->x_plate_ohms; Rt /= z1; Rt = (Rt + 2047) >> 12; } else { Rt = 0; } /* * Sample found inconsistent by debouncing or pressure is beyond * the maximum. Don't report it to user space, repeat at least * once more the measurement */ if (packet->tc.ignore || Rt > ts->pressure_max) { dev_vdbg(&ts->spi->dev, "ignored %d pressure %d\n", packet->tc.ignore, Rt); return; } /* * Maybe check the pendown state before reporting. This discards * false readings when the pen is lifted. */ if (ts->penirq_recheck_delay_usecs) { udelay(ts->penirq_recheck_delay_usecs); if (!get_pendown_state(ts)) Rt = 0; } /* * NOTE: We can't rely on the pressure to determine the pen down * state, even this controller has a pressure sensor. The pressure * value can fluctuate for quite a while after lifting the pen and * in some cases may not even settle at the expected value. * * The only safe way to check for the pen up condition is in the * timer by reading the pen signal state (it's a GPIO _and_ IRQ). */ if (Rt) { struct input_dev *input = ts->input; if (ts->swap_xy) swap(x, y); if (!ts->pendown) { input_report_key(input, BTN_TOUCH, 1); ts->pendown = true; dev_vdbg(&ts->spi->dev, "DOWN\n"); } input_report_abs(input, ABS_X, x); input_report_abs(input, ABS_Y, y); input_report_abs(input, ABS_PRESSURE, ts->pressure_max - Rt); input_sync(input); dev_vdbg(&ts->spi->dev, "%4d/%4d/%4d\n", x, y, Rt); } } static irqreturn_t ads7846_hard_irq(int irq, void *handle) { struct ads7846 *ts = handle; return get_pendown_state(ts) ? IRQ_WAKE_THREAD : IRQ_HANDLED; } static irqreturn_t ads7846_irq(int irq, void *handle) { struct ads7846 *ts = handle; /* Start with a small delay before checking pendown state */ msleep(TS_POLL_DELAY); while (!ts->stopped && get_pendown_state(ts)) { /* pen is down, continue with the measurement */ ads7846_read_state(ts); if (!ts->stopped) ads7846_report_state(ts); wait_event_timeout(ts->wait, ts->stopped, msecs_to_jiffies(TS_POLL_PERIOD)); } if (ts->pendown) { struct input_dev *input = ts->input; input_report_key(input, BTN_TOUCH, 0); input_report_abs(input, ABS_PRESSURE, 0); input_sync(input); ts->pendown = false; dev_vdbg(&ts->spi->dev, "UP\n"); } return IRQ_HANDLED; } #ifdef CONFIG_PM_SLEEP static int ads7846_suspend(struct device *dev) { struct ads7846 *ts = dev_get_drvdata(dev); mutex_lock(&ts->lock); if (!ts->suspended) { if (!ts->disabled) __ads7846_disable(ts); if (device_may_wakeup(&ts->spi->dev)) enable_irq_wake(ts->spi->irq); ts->suspended = true; } mutex_unlock(&ts->lock); return 0; } static int ads7846_resume(struct device *dev) { struct ads7846 *ts = dev_get_drvdata(dev); mutex_lock(&ts->lock); if (ts->suspended) { ts->suspended = false; if (device_may_wakeup(&ts->spi->dev)) disable_irq_wake(ts->spi->irq); if (!ts->disabled) __ads7846_enable(ts); } mutex_unlock(&ts->lock); return 0; } #endif static SIMPLE_DEV_PM_OPS(ads7846_pm, ads7846_suspend, ads7846_resume); static int __devinit ads7846_setup_pendown(struct spi_device *spi, struct ads7846 *ts) { struct ads7846_platform_data *pdata = spi->dev.platform_data; int err; /* * REVISIT when the irq can be triggered active-low, or if for some * reason the touchscreen isn't hooked up, we don't need to access * the pendown state. */ if (pdata->get_pendown_state) { ts->get_pendown_state = pdata->get_pendown_state; } else if (gpio_is_valid(pdata->gpio_pendown)) { err = gpio_request_one(pdata->gpio_pendown, GPIOF_IN, "ads7846_pendown"); if (err) { dev_err(&spi->dev, "failed to request/setup pendown GPIO%d: %d\n", pdata->gpio_pendown, err); return err; } ts->gpio_pendown = pdata->gpio_pendown; } else { dev_err(&spi->dev, "no get_pendown_state nor gpio_pendown?\n"); return -EINVAL; } return 0; } /* * Set up the transfers to read touchscreen state; this assumes we * use formula #2 for pressure, not #3. */ static void __devinit ads7846_setup_spi_msg(struct ads7846 *ts, const struct ads7846_platform_data *pdata) { struct spi_message *m = &ts->msg[0]; struct spi_transfer *x = ts->xfer; struct ads7846_packet *packet = ts->packet; int vref = pdata->keep_vref_on; if (ts->model == 7873) { /* * The AD7873 is almost identical to the ADS7846 * keep VREF off during differential/ratiometric * conversion modes. */ ts->model = 7846; vref = 0; } ts->msg_count = 1; spi_message_init(m); m->context = ts; if (ts->model == 7845) { packet->read_y_cmd[0] = READ_Y(vref); packet->read_y_cmd[1] = 0; packet->read_y_cmd[2] = 0; x->tx_buf = &packet->read_y_cmd[0]; x->rx_buf = &packet->tc.y_buf[0]; x->len = 3; spi_message_add_tail(x, m); } else { /* y- still on; turn on only y+ (and ADC) */ packet->read_y = READ_Y(vref); x->tx_buf = &packet->read_y; x->len = 1; spi_message_add_tail(x, m); x++; x->rx_buf = &packet->tc.y; x->len = 2; spi_message_add_tail(x, m); } /* * The first sample after switching drivers can be low quality; * optionally discard it, using a second one after the signals * have had enough time to stabilize. */ if (pdata->settle_delay_usecs) { x->delay_usecs = pdata->settle_delay_usecs; x++; x->tx_buf = &packet->read_y; x->len = 1; spi_message_add_tail(x, m); x++; x->rx_buf = &packet->tc.y; x->len = 2; spi_message_add_tail(x, m); } ts->msg_count++; m++; spi_message_init(m); m->context = ts; if (ts->model == 7845) { x++; packet->read_x_cmd[0] = READ_X(vref); packet->read_x_cmd[1] = 0; packet->read_x_cmd[2] = 0; x->tx_buf = &packet->read_x_cmd[0]; x->rx_buf = &packet->tc.x_buf[0]; x->len = 3; spi_message_add_tail(x, m); } else { /* turn y- off, x+ on, then leave in lowpower */ x++; packet->read_x = READ_X(vref); x->tx_buf = &packet->read_x; x->len = 1; spi_message_add_tail(x, m); x++; x->rx_buf = &packet->tc.x; x->len = 2; spi_message_add_tail(x, m); } /* ... maybe discard first sample ... */ if (pdata->settle_delay_usecs) { x->delay_usecs = pdata->settle_delay_usecs; x++; x->tx_buf = &packet->read_x; x->len = 1; spi_message_add_tail(x, m); x++; x->rx_buf = &packet->tc.x; x->len = 2; spi_message_add_tail(x, m); } /* turn y+ off, x- on; we'll use formula #2 */ if (ts->model == 7846) { ts->msg_count++; m++; spi_message_init(m); m->context = ts; x++; packet->read_z1 = READ_Z1(vref); x->tx_buf = &packet->read_z1; x->len = 1; spi_message_add_tail(x, m); x++; x->rx_buf = &packet->tc.z1; x->len = 2; spi_message_add_tail(x, m); /* ... maybe discard first sample ... */ if (pdata->settle_delay_usecs) { x->delay_usecs = pdata->settle_delay_usecs; x++; x->tx_buf = &packet->read_z1; x->len = 1; spi_message_add_tail(x, m); x++; x->rx_buf = &packet->tc.z1; x->len = 2; spi_message_add_tail(x, m); } ts->msg_count++; m++; spi_message_init(m); m->context = ts; x++; packet->read_z2 = READ_Z2(vref); x->tx_buf = &packet->read_z2; x->len = 1; spi_message_add_tail(x, m); x++; x->rx_buf = &packet->tc.z2; x->len = 2; spi_message_add_tail(x, m); /* ... maybe discard first sample ... */ if (pdata->settle_delay_usecs) { x->delay_usecs = pdata->settle_delay_usecs; x++; x->tx_buf = &packet->read_z2; x->len = 1; spi_message_add_tail(x, m); x++; x->rx_buf = &packet->tc.z2; x->len = 2; spi_message_add_tail(x, m); } } /* power down */ ts->msg_count++; m++; spi_message_init(m); m->context = ts; if (ts->model == 7845) { x++; packet->pwrdown_cmd[0] = PWRDOWN; packet->pwrdown_cmd[1] = 0; packet->pwrdown_cmd[2] = 0; x->tx_buf = &packet->pwrdown_cmd[0]; x->len = 3; } else { x++; packet->pwrdown = PWRDOWN; x->tx_buf = &packet->pwrdown; x->len = 1; spi_message_add_tail(x, m); x++; x->rx_buf = &packet->dummy; x->len = 2; } CS_CHANGE(*x); spi_message_add_tail(x, m); } static int __devinit ads7846_probe(struct spi_device *spi) { struct ads7846 *ts; struct ads7846_packet *packet; struct input_dev *input_dev; struct ads7846_platform_data *pdata = spi->dev.platform_data; unsigned long irq_flags; int err; if (!spi->irq) { dev_dbg(&spi->dev, "no IRQ?\n"); return -ENODEV; } if (!pdata) { dev_dbg(&spi->dev, "no platform data?\n"); return -ENODEV; } /* don't exceed max specified sample rate */ if (spi->max_speed_hz > (125000 * SAMPLE_BITS)) { dev_dbg(&spi->dev, "f(sample) %d KHz?\n", (spi->max_speed_hz/SAMPLE_BITS)/1000); return -EINVAL; } /* We'd set TX word size 8 bits and RX word size to 13 bits ... except * that even if the hardware can do that, the SPI controller driver * may not. So we stick to very-portable 8 bit words, both RX and TX. */ spi->bits_per_word = 8; spi->mode = SPI_MODE_0; err = spi_setup(spi); if (err < 0) return err; ts = kzalloc(sizeof(struct ads7846), GFP_KERNEL); packet = kzalloc(sizeof(struct ads7846_packet), GFP_KERNEL); input_dev = input_allocate_device(); if (!ts || !packet || !input_dev) { err = -ENOMEM; goto err_free_mem; } dev_set_drvdata(&spi->dev, ts); ts->packet = packet; ts->spi = spi; ts->input = input_dev; ts->vref_mv = pdata->vref_mv; ts->swap_xy = pdata->swap_xy; mutex_init(&ts->lock); init_waitqueue_head(&ts->wait); ts->model = pdata->model ? : 7846; ts->vref_delay_usecs = pdata->vref_delay_usecs ? : 100; ts->x_plate_ohms = pdata->x_plate_ohms ? : 400; ts->pressure_max = pdata->pressure_max ? : ~0; if (pdata->filter != NULL) { if (pdata->filter_init != NULL) { err = pdata->filter_init(pdata, &ts->filter_data); if (err < 0) goto err_free_mem; } ts->filter = pdata->filter; ts->filter_cleanup = pdata->filter_cleanup; } else if (pdata->debounce_max) { ts->debounce_max = pdata->debounce_max; if (ts->debounce_max < 2) ts->debounce_max = 2; ts->debounce_tol = pdata->debounce_tol; ts->debounce_rep = pdata->debounce_rep; ts->filter = ads7846_debounce_filter; ts->filter_data = ts; } else { ts->filter = ads7846_no_filter; } err = ads7846_setup_pendown(spi, ts); if (err) goto err_cleanup_filter; if (pdata->penirq_recheck_delay_usecs) ts->penirq_recheck_delay_usecs = pdata->penirq_recheck_delay_usecs; ts->wait_for_sync = pdata->wait_for_sync ? : null_wait_for_sync; snprintf(ts->phys, sizeof(ts->phys), "%s/input0", dev_name(&spi->dev)); snprintf(ts->name, sizeof(ts->name), "ADS%d Touchscreen", ts->model); input_dev->name = ts->name; input_dev->phys = ts->phys; input_dev->dev.parent = &spi->dev; input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS); input_dev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH); input_set_abs_params(input_dev, ABS_X, pdata->x_min ? : 0, pdata->x_max ? : MAX_12BIT, 0, 0); input_set_abs_params(input_dev, ABS_Y, pdata->y_min ? : 0, pdata->y_max ? : MAX_12BIT, 0, 0); input_set_abs_params(input_dev, ABS_PRESSURE, pdata->pressure_min, pdata->pressure_max, 0, 0); ads7846_setup_spi_msg(ts, pdata); ts->reg = regulator_get(&spi->dev, "vcc"); if (IS_ERR(ts->reg)) { err = PTR_ERR(ts->reg); dev_err(&spi->dev, "unable to get regulator: %d\n", err); goto err_free_gpio; } err = regulator_enable(ts->reg); if (err) { dev_err(&spi->dev, "unable to enable regulator: %d\n", err); goto err_put_regulator; } irq_flags = pdata->irq_flags ? : IRQF_TRIGGER_FALLING; irq_flags |= IRQF_ONESHOT; err = request_threaded_irq(spi->irq, ads7846_hard_irq, ads7846_irq, irq_flags, spi->dev.driver->name, ts); if (err && !pdata->irq_flags) { dev_info(&spi->dev, "trying pin change workaround on irq %d\n", spi->irq); irq_flags |= IRQF_TRIGGER_RISING; err = request_threaded_irq(spi->irq, ads7846_hard_irq, ads7846_irq, irq_flags, spi->dev.driver->name, ts); } if (err) { dev_dbg(&spi->dev, "irq %d busy?\n", spi->irq); goto err_disable_regulator; } err = ads784x_hwmon_register(spi, ts); if (err) goto err_free_irq; dev_info(&spi->dev, "touchscreen, irq %d\n", spi->irq); /* * Take a first sample, leaving nPENIRQ active and vREF off; avoid * the touchscreen, in case it's not connected. */ if (ts->model == 7845) ads7845_read12_ser(&spi->dev, PWRDOWN); else (void) ads7846_read12_ser(&spi->dev, READ_12BIT_SER(vaux)); err = sysfs_create_group(&spi->dev.kobj, &ads784x_attr_group); if (err) goto err_remove_hwmon; err = input_register_device(input_dev); if (err) goto err_remove_attr_group; device_init_wakeup(&spi->dev, pdata->wakeup); return 0; err_remove_attr_group: sysfs_remove_group(&spi->dev.kobj, &ads784x_attr_group); err_remove_hwmon: ads784x_hwmon_unregister(spi, ts); err_free_irq: free_irq(spi->irq, ts); err_disable_regulator: regulator_disable(ts->reg); err_put_regulator: regulator_put(ts->reg); err_free_gpio: if (!ts->get_pendown_state) gpio_free(ts->gpio_pendown); err_cleanup_filter: if (ts->filter_cleanup) ts->filter_cleanup(ts->filter_data); err_free_mem: input_free_device(input_dev); kfree(packet); kfree(ts); return err; } static int __devexit ads7846_remove(struct spi_device *spi) { struct ads7846 *ts = dev_get_drvdata(&spi->dev); device_init_wakeup(&spi->dev, false); sysfs_remove_group(&spi->dev.kobj, &ads784x_attr_group); ads7846_disable(ts); free_irq(ts->spi->irq, ts); input_unregister_device(ts->input); ads784x_hwmon_unregister(spi, ts); regulator_disable(ts->reg); regulator_put(ts->reg); if (!ts->get_pendown_state) { /* * If we are not using specialized pendown method we must * have been relying on gpio we set up ourselves. */ gpio_free(ts->gpio_pendown); } if (ts->filter_cleanup) ts->filter_cleanup(ts->filter_data); kfree(ts->packet); kfree(ts); dev_dbg(&spi->dev, "unregistered touchscreen\n"); return 0; } static struct spi_driver ads7846_driver = { .driver = { .name = "ads7846", .owner = THIS_MODULE, .pm = &ads7846_pm, }, .probe = ads7846_probe, .remove = __devexit_p(ads7846_remove), }; module_spi_driver(ads7846_driver); MODULE_DESCRIPTION("ADS7846 TouchScreen Driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("spi:ads7846");
gpl-2.0
andip71/boeffla-kernel-oos-bacon
net/ipx/ipx_proc.c
5185
8235
/* * IPX proc routines * * Copyright(C) Arnaldo Carvalho de Melo <acme@conectiva.com.br>, 2002 */ #include <linux/init.h> #ifdef CONFIG_PROC_FS #include <linux/proc_fs.h> #include <linux/spinlock.h> #include <linux/seq_file.h> #include <linux/export.h> #include <net/net_namespace.h> #include <net/tcp_states.h> #include <net/ipx.h> static void *ipx_seq_interface_start(struct seq_file *seq, loff_t *pos) { spin_lock_bh(&ipx_interfaces_lock); return seq_list_start_head(&ipx_interfaces, *pos); } static void *ipx_seq_interface_next(struct seq_file *seq, void *v, loff_t *pos) { return seq_list_next(v, &ipx_interfaces, pos); } static void ipx_seq_interface_stop(struct seq_file *seq, void *v) { spin_unlock_bh(&ipx_interfaces_lock); } static int ipx_seq_interface_show(struct seq_file *seq, void *v) { struct ipx_interface *i; if (v == &ipx_interfaces) { seq_puts(seq, "Network Node_Address Primary Device " "Frame_Type"); #ifdef IPX_REFCNT_DEBUG seq_puts(seq, " refcnt"); #endif seq_puts(seq, "\n"); goto out; } i = list_entry(v, struct ipx_interface, node); seq_printf(seq, "%08lX ", (unsigned long int)ntohl(i->if_netnum)); seq_printf(seq, "%02X%02X%02X%02X%02X%02X ", i->if_node[0], i->if_node[1], i->if_node[2], i->if_node[3], i->if_node[4], i->if_node[5]); seq_printf(seq, "%-9s", i == ipx_primary_net ? "Yes" : "No"); seq_printf(seq, "%-11s", ipx_device_name(i)); seq_printf(seq, "%-9s", ipx_frame_name(i->if_dlink_type)); #ifdef IPX_REFCNT_DEBUG seq_printf(seq, "%6d", atomic_read(&i->refcnt)); #endif seq_puts(seq, "\n"); out: return 0; } static void *ipx_seq_route_start(struct seq_file *seq, loff_t *pos) { read_lock_bh(&ipx_routes_lock); return seq_list_start_head(&ipx_routes, *pos); } static void *ipx_seq_route_next(struct seq_file *seq, void *v, loff_t *pos) { return seq_list_next(v, &ipx_routes, pos); } static void ipx_seq_route_stop(struct seq_file *seq, void *v) { read_unlock_bh(&ipx_routes_lock); } static int ipx_seq_route_show(struct seq_file *seq, void *v) { struct ipx_route *rt; if (v == &ipx_routes) { seq_puts(seq, "Network Router_Net Router_Node\n"); goto out; } rt = list_entry(v, struct ipx_route, node); seq_printf(seq, "%08lX ", (unsigned long int)ntohl(rt->ir_net)); if (rt->ir_routed) seq_printf(seq, "%08lX %02X%02X%02X%02X%02X%02X\n", (long unsigned int)ntohl(rt->ir_intrfc->if_netnum), rt->ir_router_node[0], rt->ir_router_node[1], rt->ir_router_node[2], rt->ir_router_node[3], rt->ir_router_node[4], rt->ir_router_node[5]); else seq_puts(seq, "Directly Connected\n"); out: return 0; } static __inline__ struct sock *ipx_get_socket_idx(loff_t pos) { struct sock *s = NULL; struct hlist_node *node; struct ipx_interface *i; list_for_each_entry(i, &ipx_interfaces, node) { spin_lock_bh(&i->if_sklist_lock); sk_for_each(s, node, &i->if_sklist) { if (!pos) break; --pos; } spin_unlock_bh(&i->if_sklist_lock); if (!pos) { if (node) goto found; break; } } s = NULL; found: return s; } static void *ipx_seq_socket_start(struct seq_file *seq, loff_t *pos) { loff_t l = *pos; spin_lock_bh(&ipx_interfaces_lock); return l ? ipx_get_socket_idx(--l) : SEQ_START_TOKEN; } static void *ipx_seq_socket_next(struct seq_file *seq, void *v, loff_t *pos) { struct sock* sk, *next; struct ipx_interface *i; struct ipx_sock *ipxs; ++*pos; if (v == SEQ_START_TOKEN) { sk = NULL; i = ipx_interfaces_head(); if (!i) goto out; sk = sk_head(&i->if_sklist); if (sk) spin_lock_bh(&i->if_sklist_lock); goto out; } sk = v; next = sk_next(sk); if (next) { sk = next; goto out; } ipxs = ipx_sk(sk); i = ipxs->intrfc; spin_unlock_bh(&i->if_sklist_lock); sk = NULL; for (;;) { if (i->node.next == &ipx_interfaces) break; i = list_entry(i->node.next, struct ipx_interface, node); spin_lock_bh(&i->if_sklist_lock); if (!hlist_empty(&i->if_sklist)) { sk = sk_head(&i->if_sklist); break; } spin_unlock_bh(&i->if_sklist_lock); } out: return sk; } static int ipx_seq_socket_show(struct seq_file *seq, void *v) { struct sock *s; struct ipx_sock *ipxs; if (v == SEQ_START_TOKEN) { #ifdef CONFIG_IPX_INTERN seq_puts(seq, "Local_Address " "Remote_Address Tx_Queue " "Rx_Queue State Uid\n"); #else seq_puts(seq, "Local_Address Remote_Address " "Tx_Queue Rx_Queue State Uid\n"); #endif goto out; } s = v; ipxs = ipx_sk(s); #ifdef CONFIG_IPX_INTERN seq_printf(seq, "%08lX:%02X%02X%02X%02X%02X%02X:%04X ", (unsigned long)ntohl(ipxs->intrfc->if_netnum), ipxs->node[0], ipxs->node[1], ipxs->node[2], ipxs->node[3], ipxs->node[4], ipxs->node[5], ntohs(ipxs->port)); #else seq_printf(seq, "%08lX:%04X ", (unsigned long) ntohl(ipxs->intrfc->if_netnum), ntohs(ipxs->port)); #endif /* CONFIG_IPX_INTERN */ if (s->sk_state != TCP_ESTABLISHED) seq_printf(seq, "%-28s", "Not_Connected"); else { seq_printf(seq, "%08lX:%02X%02X%02X%02X%02X%02X:%04X ", (unsigned long)ntohl(ipxs->dest_addr.net), ipxs->dest_addr.node[0], ipxs->dest_addr.node[1], ipxs->dest_addr.node[2], ipxs->dest_addr.node[3], ipxs->dest_addr.node[4], ipxs->dest_addr.node[5], ntohs(ipxs->dest_addr.sock)); } seq_printf(seq, "%08X %08X %02X %03d\n", sk_wmem_alloc_get(s), sk_rmem_alloc_get(s), s->sk_state, SOCK_INODE(s->sk_socket)->i_uid); out: return 0; } static const struct seq_operations ipx_seq_interface_ops = { .start = ipx_seq_interface_start, .next = ipx_seq_interface_next, .stop = ipx_seq_interface_stop, .show = ipx_seq_interface_show, }; static const struct seq_operations ipx_seq_route_ops = { .start = ipx_seq_route_start, .next = ipx_seq_route_next, .stop = ipx_seq_route_stop, .show = ipx_seq_route_show, }; static const struct seq_operations ipx_seq_socket_ops = { .start = ipx_seq_socket_start, .next = ipx_seq_socket_next, .stop = ipx_seq_interface_stop, .show = ipx_seq_socket_show, }; static int ipx_seq_route_open(struct inode *inode, struct file *file) { return seq_open(file, &ipx_seq_route_ops); } static int ipx_seq_interface_open(struct inode *inode, struct file *file) { return seq_open(file, &ipx_seq_interface_ops); } static int ipx_seq_socket_open(struct inode *inode, struct file *file) { return seq_open(file, &ipx_seq_socket_ops); } static const struct file_operations ipx_seq_interface_fops = { .owner = THIS_MODULE, .open = ipx_seq_interface_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; static const struct file_operations ipx_seq_route_fops = { .owner = THIS_MODULE, .open = ipx_seq_route_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; static const struct file_operations ipx_seq_socket_fops = { .owner = THIS_MODULE, .open = ipx_seq_socket_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; static struct proc_dir_entry *ipx_proc_dir; int __init ipx_proc_init(void) { struct proc_dir_entry *p; int rc = -ENOMEM; ipx_proc_dir = proc_mkdir("ipx", init_net.proc_net); if (!ipx_proc_dir) goto out; p = proc_create("interface", S_IRUGO, ipx_proc_dir, &ipx_seq_interface_fops); if (!p) goto out_interface; p = proc_create("route", S_IRUGO, ipx_proc_dir, &ipx_seq_route_fops); if (!p) goto out_route; p = proc_create("socket", S_IRUGO, ipx_proc_dir, &ipx_seq_socket_fops); if (!p) goto out_socket; rc = 0; out: return rc; out_socket: remove_proc_entry("route", ipx_proc_dir); out_route: remove_proc_entry("interface", ipx_proc_dir); out_interface: remove_proc_entry("ipx", init_net.proc_net); goto out; } void __exit ipx_proc_exit(void) { remove_proc_entry("interface", ipx_proc_dir); remove_proc_entry("route", ipx_proc_dir); remove_proc_entry("socket", ipx_proc_dir); remove_proc_entry("ipx", init_net.proc_net); } #else /* CONFIG_PROC_FS */ int __init ipx_proc_init(void) { return 0; } void __exit ipx_proc_exit(void) { } #endif /* CONFIG_PROC_FS */
gpl-2.0
aapav01/samsung_ms013g_SWA
drivers/watchdog/sb_wdog.c
7233
8770
/* * Watchdog driver for SiByte SB1 SoCs * * Copyright (C) 2007 OnStor, Inc. * Andrew Sharp <andy.sharp@lsi.com> * * This driver is intended to make the second of two hardware watchdogs * on the Sibyte 12XX and 11XX SoCs available to the user. There are two * such devices available on the SoC, but it seems that there isn't an * enumeration class for watchdogs in Linux like there is for RTCs. * The second is used rather than the first because it uses IRQ 1, * thereby avoiding all that IRQ 0 problematic nonsense. * * I have not tried this driver on a 1480 processor; it might work * just well enough to really screw things up. * * It is a simple timer, and there is an interrupt that is raised the * first time the timer expires. The second time it expires, the chip * is reset and there is no way to redirect that NMI. Which could * be problematic in some cases where this chip is sitting on the HT * bus and has just taken responsibility for providing a cache block. * Since the reset can't be redirected to the external reset pin, it is * possible that other HT connected processors might hang and not reset. * For Linux, a soft reset would probably be even worse than a hard reset. * There you have it. * * The timer takes 23 bits of a 64 bit register (?) as a count value, * and decrements the count every microsecond, for a max value of * 0x7fffff usec or about 8.3ish seconds. * * This watchdog borrows some user semantics from the softdog driver, * in that if you close the fd, it leaves the watchdog running, unless * you previously wrote a 'V' to the fd, in which case it disables * the watchdog when you close the fd like some other drivers. * * Based on various other watchdog drivers, which are probably all * loosely based on something Alan Cox wrote years ago. * * (c) Copyright 1996 Alan Cox <alan@lxorguk.ukuu.org.uk>, * All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 1 or 2 as published by the Free Software Foundation. * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/io.h> #include <linux/uaccess.h> #include <linux/fs.h> #include <linux/reboot.h> #include <linux/miscdevice.h> #include <linux/watchdog.h> #include <linux/interrupt.h> #include <asm/sibyte/sb1250.h> #include <asm/sibyte/sb1250_regs.h> #include <asm/sibyte/sb1250_int.h> #include <asm/sibyte/sb1250_scd.h> static DEFINE_SPINLOCK(sbwd_lock); /* * set the initial count value of a timer * * wdog is the iomem address of the cfg register */ void sbwdog_set(char __iomem *wdog, unsigned long t) { spin_lock(&sbwd_lock); __raw_writeb(0, wdog); __raw_writeq(t & 0x7fffffUL, wdog - 0x10); spin_unlock(&sbwd_lock); } /* * cause the timer to [re]load it's initial count and start counting * all over again * * wdog is the iomem address of the cfg register */ void sbwdog_pet(char __iomem *wdog) { spin_lock(&sbwd_lock); __raw_writeb(__raw_readb(wdog) | 1, wdog); spin_unlock(&sbwd_lock); } static unsigned long sbwdog_gate; /* keeps it to one thread only */ static char __iomem *kern_dog = (char __iomem *)(IO_BASE + (A_SCD_WDOG_CFG_0)); static char __iomem *user_dog = (char __iomem *)(IO_BASE + (A_SCD_WDOG_CFG_1)); static unsigned long timeout = 0x7fffffUL; /* useconds: 8.3ish secs. */ static int expect_close; static const struct watchdog_info ident = { .options = WDIOF_CARDRESET | WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE, .identity = "SiByte Watchdog", }; /* * Allow only a single thread to walk the dog */ static int sbwdog_open(struct inode *inode, struct file *file) { nonseekable_open(inode, file); if (test_and_set_bit(0, &sbwdog_gate)) return -EBUSY; __module_get(THIS_MODULE); /* * Activate the timer */ sbwdog_set(user_dog, timeout); __raw_writeb(1, user_dog); return 0; } /* * Put the dog back in the kennel. */ static int sbwdog_release(struct inode *inode, struct file *file) { if (expect_close == 42) { __raw_writeb(0, user_dog); module_put(THIS_MODULE); } else { pr_crit("%s: Unexpected close, not stopping watchdog!\n", ident.identity); sbwdog_pet(user_dog); } clear_bit(0, &sbwdog_gate); expect_close = 0; return 0; } /* * 42 - the answer */ static ssize_t sbwdog_write(struct file *file, const char __user *data, size_t len, loff_t *ppos) { int i; if (len) { /* * restart the timer */ expect_close = 0; for (i = 0; i != len; i++) { char c; if (get_user(c, data + i)) return -EFAULT; if (c == 'V') expect_close = 42; } sbwdog_pet(user_dog); } return len; } static long sbwdog_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { int ret = -ENOTTY; unsigned long time; void __user *argp = (void __user *)arg; int __user *p = argp; switch (cmd) { case WDIOC_GETSUPPORT: ret = copy_to_user(argp, &ident, sizeof(ident)) ? -EFAULT : 0; break; case WDIOC_GETSTATUS: case WDIOC_GETBOOTSTATUS: ret = put_user(0, p); break; case WDIOC_KEEPALIVE: sbwdog_pet(user_dog); ret = 0; break; case WDIOC_SETTIMEOUT: ret = get_user(time, p); if (ret) break; time *= 1000000; if (time > 0x7fffffUL) { ret = -EINVAL; break; } timeout = time; sbwdog_set(user_dog, timeout); sbwdog_pet(user_dog); case WDIOC_GETTIMEOUT: /* * get the remaining count from the ... count register * which is 1*8 before the config register */ ret = put_user(__raw_readq(user_dog - 8) / 1000000, p); break; } return ret; } /* * Notifier for system down */ static int sbwdog_notify_sys(struct notifier_block *this, unsigned long code, void *erf) { if (code == SYS_DOWN || code == SYS_HALT) { /* * sit and sit */ __raw_writeb(0, user_dog); __raw_writeb(0, kern_dog); } return NOTIFY_DONE; } static const struct file_operations sbwdog_fops = { .owner = THIS_MODULE, .llseek = no_llseek, .write = sbwdog_write, .unlocked_ioctl = sbwdog_ioctl, .open = sbwdog_open, .release = sbwdog_release, }; static struct miscdevice sbwdog_miscdev = { .minor = WATCHDOG_MINOR, .name = "watchdog", .fops = &sbwdog_fops, }; static struct notifier_block sbwdog_notifier = { .notifier_call = sbwdog_notify_sys, }; /* * interrupt handler * * doesn't do a whole lot for user, but oh so cleverly written so kernel * code can use it to re-up the watchdog, thereby saving the kernel from * having to create and maintain a timer, just to tickle another timer, * which is just so wrong. */ irqreturn_t sbwdog_interrupt(int irq, void *addr) { unsigned long wd_init; char *wd_cfg_reg = (char *)addr; u8 cfg; cfg = __raw_readb(wd_cfg_reg); wd_init = __raw_readq(wd_cfg_reg - 8) & 0x7fffff; /* * if it's the second watchdog timer, it's for those users */ if (wd_cfg_reg == user_dog) pr_crit("%s in danger of initiating system reset " "in %ld.%01ld seconds\n", ident.identity, wd_init / 1000000, (wd_init / 100000) % 10); else cfg |= 1; __raw_writeb(cfg, wd_cfg_reg); return IRQ_HANDLED; } static int __init sbwdog_init(void) { int ret; /* * register a reboot notifier */ ret = register_reboot_notifier(&sbwdog_notifier); if (ret) { pr_err("%s: cannot register reboot notifier (err=%d)\n", ident.identity, ret); return ret; } /* * get the resources */ ret = request_irq(1, sbwdog_interrupt, IRQF_SHARED, ident.identity, (void *)user_dog); if (ret) { pr_err("%s: failed to request irq 1 - %d\n", ident.identity, ret); goto out; } ret = misc_register(&sbwdog_miscdev); if (ret == 0) { pr_info("%s: timeout is %ld.%ld secs\n", ident.identity, timeout / 1000000, (timeout / 100000) % 10); return 0; } free_irq(1, (void *)user_dog); out: unregister_reboot_notifier(&sbwdog_notifier); return ret; } static void __exit sbwdog_exit(void) { misc_deregister(&sbwdog_miscdev); free_irq(1, (void *)user_dog); unregister_reboot_notifier(&sbwdog_notifier); } module_init(sbwdog_init); module_exit(sbwdog_exit); MODULE_AUTHOR("Andrew Sharp <andy.sharp@lsi.com>"); MODULE_DESCRIPTION("SiByte Watchdog"); module_param(timeout, ulong, 0); MODULE_PARM_DESC(timeout, "Watchdog timeout in microseconds (max/default 8388607 or 8.3ish secs)"); MODULE_LICENSE("GPL"); MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); /* * example code that can be put in a platform code area to utilize the * first watchdog timer for the kernels own purpose. void platform_wd_setup(void) { int ret; ret = request_irq(1, sbwdog_interrupt, IRQF_SHARED, "Kernel Watchdog", IOADDR(A_SCD_WDOG_CFG_0)); if (ret) { pr_crit("Watchdog IRQ zero(0) failed to be requested - %d\n", ret); } } */
gpl-2.0
ausdim/boeffla-kernel-jb-u8-s3_m3
drivers/media/video/au0828/au0828-dvb.c
8001
11129
/* * Driver for the Auvitek USB bridge * * Copyright (c) 2008 Steven Toth <stoth@linuxtv.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/device.h> #include <linux/suspend.h> #include <media/v4l2-common.h> #include "au0828.h" #include "au8522.h" #include "xc5000.h" #include "mxl5007t.h" #include "tda18271.h" DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr); #define _AU0828_BULKPIPE 0x83 #define _BULKPIPESIZE 0xe522 static u8 hauppauge_hvr950q_led_states[] = { 0x00, /* off */ 0x02, /* yellow */ 0x04, /* green */ }; static struct au8522_led_config hauppauge_hvr950q_led_cfg = { .gpio_output = 0x00e0, .gpio_output_enable = 0x6006, .gpio_output_disable = 0x0660, .gpio_leds = 0x00e2, .led_states = hauppauge_hvr950q_led_states, .num_led_states = sizeof(hauppauge_hvr950q_led_states), .vsb8_strong = 20 /* dB */ * 10, .qam64_strong = 25 /* dB */ * 10, .qam256_strong = 32 /* dB */ * 10, }; static struct au8522_config hauppauge_hvr950q_config = { .demod_address = 0x8e >> 1, .status_mode = AU8522_DEMODLOCKING, .qam_if = AU8522_IF_6MHZ, .vsb_if = AU8522_IF_6MHZ, .led_cfg = &hauppauge_hvr950q_led_cfg, }; static struct au8522_config fusionhdtv7usb_config = { .demod_address = 0x8e >> 1, .status_mode = AU8522_DEMODLOCKING, .qam_if = AU8522_IF_6MHZ, .vsb_if = AU8522_IF_6MHZ, }; static struct au8522_config hauppauge_woodbury_config = { .demod_address = 0x8e >> 1, .status_mode = AU8522_DEMODLOCKING, .qam_if = AU8522_IF_4MHZ, .vsb_if = AU8522_IF_3_25MHZ, }; static struct xc5000_config hauppauge_hvr950q_tunerconfig = { .i2c_address = 0x61, .if_khz = 6000, }; static struct mxl5007t_config mxl5007t_hvr950q_config = { .xtal_freq_hz = MxL_XTAL_24_MHZ, .if_freq_hz = MxL_IF_6_MHZ, }; static struct tda18271_config hauppauge_woodbury_tunerconfig = { .gate = TDA18271_GATE_DIGITAL, }; /*-------------------------------------------------------------------*/ static void urb_completion(struct urb *purb) { struct au0828_dev *dev = purb->context; int ptype = usb_pipetype(purb->pipe); dprintk(2, "%s()\n", __func__); if (!dev) return; if (dev->urb_streaming == 0) return; if (ptype != PIPE_BULK) { printk(KERN_ERR "%s() Unsupported URB type %d\n", __func__, ptype); return; } /* Feed the transport payload into the kernel demux */ dvb_dmx_swfilter_packets(&dev->dvb.demux, purb->transfer_buffer, purb->actual_length / 188); /* Clean the buffer before we requeue */ memset(purb->transfer_buffer, 0, URB_BUFSIZE); /* Requeue URB */ usb_submit_urb(purb, GFP_ATOMIC); } static int stop_urb_transfer(struct au0828_dev *dev) { int i; dprintk(2, "%s()\n", __func__); for (i = 0; i < URB_COUNT; i++) { usb_kill_urb(dev->urbs[i]); kfree(dev->urbs[i]->transfer_buffer); usb_free_urb(dev->urbs[i]); } dev->urb_streaming = 0; return 0; } static int start_urb_transfer(struct au0828_dev *dev) { struct urb *purb; int i, ret = -ENOMEM; dprintk(2, "%s()\n", __func__); if (dev->urb_streaming) { dprintk(2, "%s: bulk xfer already running!\n", __func__); return 0; } for (i = 0; i < URB_COUNT; i++) { dev->urbs[i] = usb_alloc_urb(0, GFP_KERNEL); if (!dev->urbs[i]) goto err; purb = dev->urbs[i]; purb->transfer_buffer = kzalloc(URB_BUFSIZE, GFP_KERNEL); if (!purb->transfer_buffer) { usb_free_urb(purb); dev->urbs[i] = NULL; goto err; } purb->status = -EINPROGRESS; usb_fill_bulk_urb(purb, dev->usbdev, usb_rcvbulkpipe(dev->usbdev, _AU0828_BULKPIPE), purb->transfer_buffer, URB_BUFSIZE, urb_completion, dev); } for (i = 0; i < URB_COUNT; i++) { ret = usb_submit_urb(dev->urbs[i], GFP_ATOMIC); if (ret != 0) { stop_urb_transfer(dev); printk(KERN_ERR "%s: failed urb submission, " "err = %d\n", __func__, ret); return ret; } } dev->urb_streaming = 1; ret = 0; err: return ret; } static int au0828_dvb_start_feed(struct dvb_demux_feed *feed) { struct dvb_demux *demux = feed->demux; struct au0828_dev *dev = (struct au0828_dev *) demux->priv; struct au0828_dvb *dvb = &dev->dvb; int ret = 0; dprintk(1, "%s()\n", __func__); if (!demux->dmx.frontend) return -EINVAL; if (dvb) { mutex_lock(&dvb->lock); if (dvb->feeding++ == 0) { /* Start transport */ au0828_write(dev, 0x608, 0x90); au0828_write(dev, 0x609, 0x72); au0828_write(dev, 0x60a, 0x71); au0828_write(dev, 0x60b, 0x01); ret = start_urb_transfer(dev); } mutex_unlock(&dvb->lock); } return ret; } static int au0828_dvb_stop_feed(struct dvb_demux_feed *feed) { struct dvb_demux *demux = feed->demux; struct au0828_dev *dev = (struct au0828_dev *) demux->priv; struct au0828_dvb *dvb = &dev->dvb; int ret = 0; dprintk(1, "%s()\n", __func__); if (dvb) { mutex_lock(&dvb->lock); if (--dvb->feeding == 0) { /* Stop transport */ au0828_write(dev, 0x608, 0x00); au0828_write(dev, 0x609, 0x00); au0828_write(dev, 0x60a, 0x00); au0828_write(dev, 0x60b, 0x00); ret = stop_urb_transfer(dev); } mutex_unlock(&dvb->lock); } return ret; } static int dvb_register(struct au0828_dev *dev) { struct au0828_dvb *dvb = &dev->dvb; int result; dprintk(1, "%s()\n", __func__); /* register adapter */ result = dvb_register_adapter(&dvb->adapter, DRIVER_NAME, THIS_MODULE, &dev->usbdev->dev, adapter_nr); if (result < 0) { printk(KERN_ERR "%s: dvb_register_adapter failed " "(errno = %d)\n", DRIVER_NAME, result); goto fail_adapter; } dvb->adapter.priv = dev; /* register frontend */ result = dvb_register_frontend(&dvb->adapter, dvb->frontend); if (result < 0) { printk(KERN_ERR "%s: dvb_register_frontend failed " "(errno = %d)\n", DRIVER_NAME, result); goto fail_frontend; } /* register demux stuff */ dvb->demux.dmx.capabilities = DMX_TS_FILTERING | DMX_SECTION_FILTERING | DMX_MEMORY_BASED_FILTERING; dvb->demux.priv = dev; dvb->demux.filternum = 256; dvb->demux.feednum = 256; dvb->demux.start_feed = au0828_dvb_start_feed; dvb->demux.stop_feed = au0828_dvb_stop_feed; result = dvb_dmx_init(&dvb->demux); if (result < 0) { printk(KERN_ERR "%s: dvb_dmx_init failed (errno = %d)\n", DRIVER_NAME, result); goto fail_dmx; } dvb->dmxdev.filternum = 256; dvb->dmxdev.demux = &dvb->demux.dmx; dvb->dmxdev.capabilities = 0; result = dvb_dmxdev_init(&dvb->dmxdev, &dvb->adapter); if (result < 0) { printk(KERN_ERR "%s: dvb_dmxdev_init failed (errno = %d)\n", DRIVER_NAME, result); goto fail_dmxdev; } dvb->fe_hw.source = DMX_FRONTEND_0; result = dvb->demux.dmx.add_frontend(&dvb->demux.dmx, &dvb->fe_hw); if (result < 0) { printk(KERN_ERR "%s: add_frontend failed " "(DMX_FRONTEND_0, errno = %d)\n", DRIVER_NAME, result); goto fail_fe_hw; } dvb->fe_mem.source = DMX_MEMORY_FE; result = dvb->demux.dmx.add_frontend(&dvb->demux.dmx, &dvb->fe_mem); if (result < 0) { printk(KERN_ERR "%s: add_frontend failed " "(DMX_MEMORY_FE, errno = %d)\n", DRIVER_NAME, result); goto fail_fe_mem; } result = dvb->demux.dmx.connect_frontend(&dvb->demux.dmx, &dvb->fe_hw); if (result < 0) { printk(KERN_ERR "%s: connect_frontend failed (errno = %d)\n", DRIVER_NAME, result); goto fail_fe_conn; } /* register network adapter */ dvb_net_init(&dvb->adapter, &dvb->net, &dvb->demux.dmx); return 0; fail_fe_conn: dvb->demux.dmx.remove_frontend(&dvb->demux.dmx, &dvb->fe_mem); fail_fe_mem: dvb->demux.dmx.remove_frontend(&dvb->demux.dmx, &dvb->fe_hw); fail_fe_hw: dvb_dmxdev_release(&dvb->dmxdev); fail_dmxdev: dvb_dmx_release(&dvb->demux); fail_dmx: dvb_unregister_frontend(dvb->frontend); fail_frontend: dvb_frontend_detach(dvb->frontend); dvb_unregister_adapter(&dvb->adapter); fail_adapter: return result; } void au0828_dvb_unregister(struct au0828_dev *dev) { struct au0828_dvb *dvb = &dev->dvb; dprintk(1, "%s()\n", __func__); if (dvb->frontend == NULL) return; dvb_net_release(&dvb->net); dvb->demux.dmx.remove_frontend(&dvb->demux.dmx, &dvb->fe_mem); dvb->demux.dmx.remove_frontend(&dvb->demux.dmx, &dvb->fe_hw); dvb_dmxdev_release(&dvb->dmxdev); dvb_dmx_release(&dvb->demux); dvb_unregister_frontend(dvb->frontend); dvb_frontend_detach(dvb->frontend); dvb_unregister_adapter(&dvb->adapter); } /* All the DVB attach calls go here, this function get's modified * for each new card. No other function in this file needs * to change. */ int au0828_dvb_register(struct au0828_dev *dev) { struct au0828_dvb *dvb = &dev->dvb; int ret; dprintk(1, "%s()\n", __func__); /* init frontend */ switch (dev->boardnr) { case AU0828_BOARD_HAUPPAUGE_HVR850: case AU0828_BOARD_HAUPPAUGE_HVR950Q: dvb->frontend = dvb_attach(au8522_attach, &hauppauge_hvr950q_config, &dev->i2c_adap); if (dvb->frontend != NULL) dvb_attach(xc5000_attach, dvb->frontend, &dev->i2c_adap, &hauppauge_hvr950q_tunerconfig); break; case AU0828_BOARD_HAUPPAUGE_HVR950Q_MXL: dvb->frontend = dvb_attach(au8522_attach, &hauppauge_hvr950q_config, &dev->i2c_adap); if (dvb->frontend != NULL) dvb_attach(mxl5007t_attach, dvb->frontend, &dev->i2c_adap, 0x60, &mxl5007t_hvr950q_config); break; case AU0828_BOARD_HAUPPAUGE_WOODBURY: dvb->frontend = dvb_attach(au8522_attach, &hauppauge_woodbury_config, &dev->i2c_adap); if (dvb->frontend != NULL) dvb_attach(tda18271_attach, dvb->frontend, 0x60, &dev->i2c_adap, &hauppauge_woodbury_tunerconfig); break; case AU0828_BOARD_DVICO_FUSIONHDTV7: dvb->frontend = dvb_attach(au8522_attach, &fusionhdtv7usb_config, &dev->i2c_adap); if (dvb->frontend != NULL) { dvb_attach(xc5000_attach, dvb->frontend, &dev->i2c_adap, &hauppauge_hvr950q_tunerconfig); } break; default: printk(KERN_WARNING "The frontend of your DVB/ATSC card " "isn't supported yet\n"); break; } if (NULL == dvb->frontend) { printk(KERN_ERR "%s() Frontend initialization failed\n", __func__); return -1; } /* define general-purpose callback pointer */ dvb->frontend->callback = au0828_tuner_callback; /* register everything */ ret = dvb_register(dev); if (ret < 0) { if (dvb->frontend->ops.release) dvb->frontend->ops.release(dvb->frontend); return ret; } return 0; }
gpl-2.0
AKKP/lge-kernel-star
drivers/media/dvb/frontends/tda665x.c
9281
6350
/* TDA665x tuner driver Copyright (C) Manu Abraham (abraham.manu@gmail.com) This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> #include "dvb_frontend.h" #include "tda665x.h" struct tda665x_state { struct dvb_frontend *fe; struct i2c_adapter *i2c; const struct tda665x_config *config; u32 frequency; u32 bandwidth; }; static int tda665x_read(struct tda665x_state *state, u8 *buf) { const struct tda665x_config *config = state->config; int err = 0; struct i2c_msg msg = { .addr = config->addr, .flags = I2C_M_RD, .buf = buf, .len = 2 }; err = i2c_transfer(state->i2c, &msg, 1); if (err != 1) goto exit; return err; exit: printk(KERN_ERR "%s: I/O Error err=<%d>\n", __func__, err); return err; } static int tda665x_write(struct tda665x_state *state, u8 *buf, u8 length) { const struct tda665x_config *config = state->config; int err = 0; struct i2c_msg msg = { .addr = config->addr, .flags = 0, .buf = buf, .len = length }; err = i2c_transfer(state->i2c, &msg, 1); if (err != 1) goto exit; return err; exit: printk(KERN_ERR "%s: I/O Error err=<%d>\n", __func__, err); return err; } static int tda665x_get_state(struct dvb_frontend *fe, enum tuner_param param, struct tuner_state *tstate) { struct tda665x_state *state = fe->tuner_priv; int err = 0; switch (param) { case DVBFE_TUNER_FREQUENCY: tstate->frequency = state->frequency; break; case DVBFE_TUNER_BANDWIDTH: break; default: printk(KERN_ERR "%s: Unknown parameter (param=%d)\n", __func__, param); err = -EINVAL; break; } return err; } static int tda665x_get_status(struct dvb_frontend *fe, u32 *status) { struct tda665x_state *state = fe->tuner_priv; u8 result = 0; int err = 0; *status = 0; err = tda665x_read(state, &result); if (err < 0) goto exit; if ((result >> 6) & 0x01) { printk(KERN_DEBUG "%s: Tuner Phase Locked\n", __func__); *status = 1; } return err; exit: printk(KERN_ERR "%s: I/O Error\n", __func__); return err; } static int tda665x_set_state(struct dvb_frontend *fe, enum tuner_param param, struct tuner_state *tstate) { struct tda665x_state *state = fe->tuner_priv; const struct tda665x_config *config = state->config; u32 frequency, status = 0; u8 buf[4]; int err = 0; if (param & DVBFE_TUNER_FREQUENCY) { frequency = tstate->frequency; if ((frequency < config->frequency_max) || (frequency > config->frequency_min)) { printk(KERN_ERR "%s: Frequency beyond limits, frequency=%d\n", __func__, frequency); return -EINVAL; } frequency += config->frequency_offst; frequency *= config->ref_multiplier; frequency += config->ref_divider >> 1; frequency /= config->ref_divider; buf[0] = (u8) ((frequency & 0x7f00) >> 8); buf[1] = (u8) (frequency & 0x00ff) >> 0; buf[2] = 0x80 | 0x40 | 0x02; buf[3] = 0x00; /* restore frequency */ frequency = tstate->frequency; if (frequency < 153000000) { /* VHF-L */ buf[3] |= 0x01; /* fc, Low Band, 47 - 153 MHz */ if (frequency < 68000000) buf[3] |= 0x40; /* 83uA */ if (frequency < 1040000000) buf[3] |= 0x60; /* 122uA */ if (frequency < 1250000000) buf[3] |= 0x80; /* 163uA */ else buf[3] |= 0xa0; /* 254uA */ } else if (frequency < 438000000) { /* VHF-H */ buf[3] |= 0x02; /* fc, Mid Band, 153 - 438 MHz */ if (frequency < 230000000) buf[3] |= 0x40; if (frequency < 300000000) buf[3] |= 0x60; else buf[3] |= 0x80; } else { /* UHF */ buf[3] |= 0x04; /* fc, High Band, 438 - 862 MHz */ if (frequency < 470000000) buf[3] |= 0x60; if (frequency < 526000000) buf[3] |= 0x80; else buf[3] |= 0xa0; } /* Set params */ err = tda665x_write(state, buf, 5); if (err < 0) goto exit; /* sleep for some time */ printk(KERN_DEBUG "%s: Waiting to Phase LOCK\n", __func__); msleep(20); /* check status */ err = tda665x_get_status(fe, &status); if (err < 0) goto exit; if (status == 1) { printk(KERN_DEBUG "%s: Tuner Phase locked: status=%d\n", __func__, status); state->frequency = frequency; /* cache successful state */ } else { printk(KERN_ERR "%s: No Phase lock: status=%d\n", __func__, status); } } else { printk(KERN_ERR "%s: Unknown parameter (param=%d)\n", __func__, param); return -EINVAL; } return 0; exit: printk(KERN_ERR "%s: I/O Error\n", __func__); return err; } static int tda665x_release(struct dvb_frontend *fe) { struct tda665x_state *state = fe->tuner_priv; fe->tuner_priv = NULL; kfree(state); return 0; } static struct dvb_tuner_ops tda665x_ops = { .set_state = tda665x_set_state, .get_state = tda665x_get_state, .get_status = tda665x_get_status, .release = tda665x_release }; struct dvb_frontend *tda665x_attach(struct dvb_frontend *fe, const struct tda665x_config *config, struct i2c_adapter *i2c) { struct tda665x_state *state = NULL; struct dvb_tuner_info *info; state = kzalloc(sizeof(struct tda665x_state), GFP_KERNEL); if (state == NULL) goto exit; state->config = config; state->i2c = i2c; state->fe = fe; fe->tuner_priv = state; fe->ops.tuner_ops = tda665x_ops; info = &fe->ops.tuner_ops.info; memcpy(info->name, config->name, sizeof(config->name)); info->frequency_min = config->frequency_min; info->frequency_max = config->frequency_max; info->frequency_step = config->frequency_offst; printk(KERN_DEBUG "%s: Attaching TDA665x (%s) tuner\n", __func__, info->name); return fe; exit: kfree(state); return NULL; } EXPORT_SYMBOL(tda665x_attach); MODULE_DESCRIPTION("TDA665x driver"); MODULE_AUTHOR("Manu Abraham"); MODULE_LICENSE("GPL");
gpl-2.0
task650/kernel_htc_msm8974
arch/s390/kernel/stacktrace.c
9281
2545
/* * arch/s390/kernel/stacktrace.c * * Stack trace management functions * * Copyright (C) IBM Corp. 2006 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com> */ #include <linux/sched.h> #include <linux/stacktrace.h> #include <linux/kallsyms.h> #include <linux/module.h> static unsigned long save_context_stack(struct stack_trace *trace, unsigned long sp, unsigned long low, unsigned long high, int savesched) { struct stack_frame *sf; struct pt_regs *regs; unsigned long addr; while(1) { sp &= PSW_ADDR_INSN; if (sp < low || sp > high) return sp; sf = (struct stack_frame *)sp; while(1) { addr = sf->gprs[8] & PSW_ADDR_INSN; if (!trace->skip) trace->entries[trace->nr_entries++] = addr; else trace->skip--; if (trace->nr_entries >= trace->max_entries) return sp; low = sp; sp = sf->back_chain & PSW_ADDR_INSN; if (!sp) break; if (sp <= low || sp > high - sizeof(*sf)) return sp; sf = (struct stack_frame *)sp; } /* Zero backchain detected, check for interrupt frame. */ sp = (unsigned long)(sf + 1); if (sp <= low || sp > high - sizeof(*regs)) return sp; regs = (struct pt_regs *)sp; addr = regs->psw.addr & PSW_ADDR_INSN; if (savesched || !in_sched_functions(addr)) { if (!trace->skip) trace->entries[trace->nr_entries++] = addr; else trace->skip--; } if (trace->nr_entries >= trace->max_entries) return sp; low = sp; sp = regs->gprs[15]; } } void save_stack_trace(struct stack_trace *trace) { register unsigned long sp asm ("15"); unsigned long orig_sp, new_sp; orig_sp = sp & PSW_ADDR_INSN; new_sp = save_context_stack(trace, orig_sp, S390_lowcore.panic_stack - PAGE_SIZE, S390_lowcore.panic_stack, 1); if (new_sp != orig_sp) return; new_sp = save_context_stack(trace, new_sp, S390_lowcore.async_stack - ASYNC_SIZE, S390_lowcore.async_stack, 1); if (new_sp != orig_sp) return; save_context_stack(trace, new_sp, S390_lowcore.thread_info, S390_lowcore.thread_info + THREAD_SIZE, 1); } EXPORT_SYMBOL_GPL(save_stack_trace); void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) { unsigned long sp, low, high; sp = tsk->thread.ksp & PSW_ADDR_INSN; low = (unsigned long) task_stack_page(tsk); high = (unsigned long) task_pt_regs(tsk); save_context_stack(trace, sp, low, high, 0); if (trace->nr_entries < trace->max_entries) trace->entries[trace->nr_entries++] = ULONG_MAX; } EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
gpl-2.0
ayushtyagi28/android_kernel_cyanogen_msm8994
drivers/net/wan/lmc/lmc_debug.c
14657
1815
#include <linux/types.h> #include <linux/netdevice.h> #include <linux/interrupt.h> #include "lmc_debug.h" /* * Prints out len, max to 80 octets using printk, 20 per line */ #ifdef DEBUG #ifdef LMC_PACKET_LOG void lmcConsoleLog(char *type, unsigned char *ucData, int iLen) { int iNewLine = 1; char str[80], *pstr; sprintf(str, KERN_DEBUG "lmc: %s: ", type); pstr = str+strlen(str); if(iLen > 240){ printk(KERN_DEBUG "lmc: Printing 240 chars... out of: %d\n", iLen); iLen = 240; } else{ printk(KERN_DEBUG "lmc: Printing %d chars\n", iLen); } while(iLen > 0) { sprintf(pstr, "%02x ", *ucData); pstr+=3; ucData++; if( !(iNewLine % 20)) { sprintf(pstr, "\n"); printk(str); sprintf(str, KERN_DEBUG "lmc: %s: ", type); pstr=str+strlen(str); } iNewLine++; iLen--; } sprintf(pstr, "\n"); printk(str); } #endif #endif #ifdef DEBUG u32 lmcEventLogIndex; u32 lmcEventLogBuf[LMC_EVENTLOGSIZE * LMC_EVENTLOGARGS]; void lmcEventLog(u32 EventNum, u32 arg2, u32 arg3) { lmcEventLogBuf[lmcEventLogIndex++] = EventNum; lmcEventLogBuf[lmcEventLogIndex++] = arg2; lmcEventLogBuf[lmcEventLogIndex++] = arg3; lmcEventLogBuf[lmcEventLogIndex++] = jiffies; lmcEventLogIndex &= (LMC_EVENTLOGSIZE * LMC_EVENTLOGARGS) - 1; } #endif /* DEBUG */ void lmc_trace(struct net_device *dev, char *msg){ #ifdef LMC_TRACE unsigned long j = jiffies + 3; /* Wait for 50 ms */ if(in_interrupt()){ printk("%s: * %s\n", dev->name, msg); // while(time_before(jiffies, j+10)) // ; } else { printk("%s: %s\n", dev->name, msg); while(time_before(jiffies, j)) schedule(); } #endif } /* --------------------------- end if_lmc_linux.c ------------------------ */
gpl-2.0
Shimejing/linux
drivers/pinctrl/intel/pinctrl-intel.c
66
29184
/* * Intel pinctrl/GPIO core driver. * * Copyright (C) 2015, Intel Corporation * Authors: Mathias Nyman <mathias.nyman@linux.intel.com> * Mika Westerberg <mika.westerberg@linux.intel.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/init.h> #include <linux/acpi.h> #include <linux/gpio.h> #include <linux/gpio/driver.h> #include <linux/platform_device.h> #include <linux/pm.h> #include <linux/pinctrl/pinctrl.h> #include <linux/pinctrl/pinmux.h> #include <linux/pinctrl/pinconf.h> #include <linux/pinctrl/pinconf-generic.h> #include "pinctrl-intel.h" /* Maximum number of pads in each group */ #define NPADS_IN_GPP 24 /* Offset from regs */ #define PADBAR 0x00c #define GPI_IS 0x100 #define GPI_GPE_STS 0x140 #define GPI_GPE_EN 0x160 #define PADOWN_BITS 4 #define PADOWN_SHIFT(p) ((p) % 8 * PADOWN_BITS) #define PADOWN_MASK(p) (0xf << PADOWN_SHIFT(p)) /* Offset from pad_regs */ #define PADCFG0 0x000 #define PADCFG0_RXEVCFG_SHIFT 25 #define PADCFG0_RXEVCFG_MASK (3 << PADCFG0_RXEVCFG_SHIFT) #define PADCFG0_RXEVCFG_LEVEL 0 #define PADCFG0_RXEVCFG_EDGE 1 #define PADCFG0_RXEVCFG_DISABLED 2 #define PADCFG0_RXEVCFG_EDGE_BOTH 3 #define PADCFG0_RXINV BIT(23) #define PADCFG0_GPIROUTIOXAPIC BIT(20) #define PADCFG0_GPIROUTSCI BIT(19) #define PADCFG0_GPIROUTSMI BIT(18) #define PADCFG0_GPIROUTNMI BIT(17) #define PADCFG0_PMODE_SHIFT 10 #define PADCFG0_PMODE_MASK (0xf << PADCFG0_PMODE_SHIFT) #define PADCFG0_GPIORXDIS BIT(9) #define PADCFG0_GPIOTXDIS BIT(8) #define PADCFG0_GPIORXSTATE BIT(1) #define PADCFG0_GPIOTXSTATE BIT(0) #define PADCFG1 0x004 #define PADCFG1_TERM_UP BIT(13) #define PADCFG1_TERM_SHIFT 10 #define PADCFG1_TERM_MASK (7 << PADCFG1_TERM_SHIFT) #define PADCFG1_TERM_20K 4 #define PADCFG1_TERM_2K 3 #define PADCFG1_TERM_5K 2 #define PADCFG1_TERM_1K 1 struct intel_pad_context { u32 padcfg0; u32 padcfg1; }; struct intel_community_context { u32 *intmask; }; struct intel_pinctrl_context { struct intel_pad_context *pads; struct intel_community_context *communities; }; /** * struct intel_pinctrl - Intel pinctrl private structure * @dev: Pointer to the device structure * @lock: Lock to serialize register access * @pctldesc: Pin controller description * @pctldev: Pointer to the pin controller device * @chip: GPIO chip in this pin controller * @soc: SoC/PCH specific pin configuration data * @communities: All communities in this pin controller * @ncommunities: Number of communities in this pin controller * @context: Configuration saved over system sleep */ struct intel_pinctrl { struct device *dev; spinlock_t lock; struct pinctrl_desc pctldesc; struct pinctrl_dev *pctldev; struct gpio_chip chip; const struct intel_pinctrl_soc_data *soc; struct intel_community *communities; size_t ncommunities; struct intel_pinctrl_context context; }; #define gpiochip_to_pinctrl(c) container_of(c, struct intel_pinctrl, chip) #define pin_to_padno(c, p) ((p) - (c)->pin_base) static struct intel_community *intel_get_community(struct intel_pinctrl *pctrl, unsigned pin) { struct intel_community *community; int i; for (i = 0; i < pctrl->ncommunities; i++) { community = &pctrl->communities[i]; if (pin >= community->pin_base && pin < community->pin_base + community->npins) return community; } dev_warn(pctrl->dev, "failed to find community for pin %u\n", pin); return NULL; } static void __iomem *intel_get_padcfg(struct intel_pinctrl *pctrl, unsigned pin, unsigned reg) { const struct intel_community *community; unsigned padno; community = intel_get_community(pctrl, pin); if (!community) return NULL; padno = pin_to_padno(community, pin); return community->pad_regs + reg + padno * 8; } static bool intel_pad_owned_by_host(struct intel_pinctrl *pctrl, unsigned pin) { const struct intel_community *community; unsigned padno, gpp, gpp_offset, offset; void __iomem *padown; community = intel_get_community(pctrl, pin); if (!community) return false; if (!community->padown_offset) return true; padno = pin_to_padno(community, pin); gpp = padno / NPADS_IN_GPP; gpp_offset = padno % NPADS_IN_GPP; offset = community->padown_offset + gpp * 16 + (gpp_offset / 8) * 4; padown = community->regs + offset; return !(readl(padown) & PADOWN_MASK(padno)); } static bool intel_pad_reserved_for_acpi(struct intel_pinctrl *pctrl, unsigned pin) { const struct intel_community *community; unsigned padno, gpp, offset; void __iomem *hostown; community = intel_get_community(pctrl, pin); if (!community) return true; if (!community->hostown_offset) return false; padno = pin_to_padno(community, pin); gpp = padno / NPADS_IN_GPP; offset = community->hostown_offset + gpp * 4; hostown = community->regs + offset; return !(readl(hostown) & BIT(padno % NPADS_IN_GPP)); } static bool intel_pad_locked(struct intel_pinctrl *pctrl, unsigned pin) { struct intel_community *community; unsigned padno, gpp, offset; u32 value; community = intel_get_community(pctrl, pin); if (!community) return true; if (!community->padcfglock_offset) return false; padno = pin_to_padno(community, pin); gpp = padno / NPADS_IN_GPP; /* * If PADCFGLOCK and PADCFGLOCKTX bits are both clear for this pad, * the pad is considered unlocked. Any other case means that it is * either fully or partially locked and we don't touch it. */ offset = community->padcfglock_offset + gpp * 8; value = readl(community->regs + offset); if (value & BIT(pin % NPADS_IN_GPP)) return true; offset = community->padcfglock_offset + 4 + gpp * 8; value = readl(community->regs + offset); if (value & BIT(pin % NPADS_IN_GPP)) return true; return false; } static bool intel_pad_usable(struct intel_pinctrl *pctrl, unsigned pin) { return intel_pad_owned_by_host(pctrl, pin) && !intel_pad_reserved_for_acpi(pctrl, pin) && !intel_pad_locked(pctrl, pin); } static int intel_get_groups_count(struct pinctrl_dev *pctldev) { struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); return pctrl->soc->ngroups; } static const char *intel_get_group_name(struct pinctrl_dev *pctldev, unsigned group) { struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); return pctrl->soc->groups[group].name; } static int intel_get_group_pins(struct pinctrl_dev *pctldev, unsigned group, const unsigned **pins, unsigned *npins) { struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); *pins = pctrl->soc->groups[group].pins; *npins = pctrl->soc->groups[group].npins; return 0; } static void intel_pin_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *s, unsigned pin) { struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); u32 cfg0, cfg1, mode; bool locked, acpi; if (!intel_pad_owned_by_host(pctrl, pin)) { seq_puts(s, "not available"); return; } cfg0 = readl(intel_get_padcfg(pctrl, pin, PADCFG0)); cfg1 = readl(intel_get_padcfg(pctrl, pin, PADCFG1)); mode = (cfg0 & PADCFG0_PMODE_MASK) >> PADCFG0_PMODE_SHIFT; if (!mode) seq_puts(s, "GPIO "); else seq_printf(s, "mode %d ", mode); seq_printf(s, "0x%08x 0x%08x", cfg0, cfg1); locked = intel_pad_locked(pctrl, pin); acpi = intel_pad_reserved_for_acpi(pctrl, pin); if (locked || acpi) { seq_puts(s, " ["); if (locked) { seq_puts(s, "LOCKED"); if (acpi) seq_puts(s, ", "); } if (acpi) seq_puts(s, "ACPI"); seq_puts(s, "]"); } } static const struct pinctrl_ops intel_pinctrl_ops = { .get_groups_count = intel_get_groups_count, .get_group_name = intel_get_group_name, .get_group_pins = intel_get_group_pins, .pin_dbg_show = intel_pin_dbg_show, }; static int intel_get_functions_count(struct pinctrl_dev *pctldev) { struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); return pctrl->soc->nfunctions; } static const char *intel_get_function_name(struct pinctrl_dev *pctldev, unsigned function) { struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); return pctrl->soc->functions[function].name; } static int intel_get_function_groups(struct pinctrl_dev *pctldev, unsigned function, const char * const **groups, unsigned * const ngroups) { struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); *groups = pctrl->soc->functions[function].groups; *ngroups = pctrl->soc->functions[function].ngroups; return 0; } static int intel_pinmux_set_mux(struct pinctrl_dev *pctldev, unsigned function, unsigned group) { struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); const struct intel_pingroup *grp = &pctrl->soc->groups[group]; unsigned long flags; int i; spin_lock_irqsave(&pctrl->lock, flags); /* * All pins in the groups needs to be accessible and writable * before we can enable the mux for this group. */ for (i = 0; i < grp->npins; i++) { if (!intel_pad_usable(pctrl, grp->pins[i])) { spin_unlock_irqrestore(&pctrl->lock, flags); return -EBUSY; } } /* Now enable the mux setting for each pin in the group */ for (i = 0; i < grp->npins; i++) { void __iomem *padcfg0; u32 value; padcfg0 = intel_get_padcfg(pctrl, grp->pins[i], PADCFG0); value = readl(padcfg0); value &= ~PADCFG0_PMODE_MASK; value |= grp->mode << PADCFG0_PMODE_SHIFT; writel(value, padcfg0); } spin_unlock_irqrestore(&pctrl->lock, flags); return 0; } static int intel_gpio_request_enable(struct pinctrl_dev *pctldev, struct pinctrl_gpio_range *range, unsigned pin) { struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); void __iomem *padcfg0; unsigned long flags; u32 value; spin_lock_irqsave(&pctrl->lock, flags); if (!intel_pad_usable(pctrl, pin)) { spin_unlock_irqrestore(&pctrl->lock, flags); return -EBUSY; } padcfg0 = intel_get_padcfg(pctrl, pin, PADCFG0); /* Put the pad into GPIO mode */ value = readl(padcfg0) & ~PADCFG0_PMODE_MASK; /* Disable SCI/SMI/NMI generation */ value &= ~(PADCFG0_GPIROUTIOXAPIC | PADCFG0_GPIROUTSCI); value &= ~(PADCFG0_GPIROUTSMI | PADCFG0_GPIROUTNMI); /* Disable TX buffer and enable RX (this will be input) */ value &= ~PADCFG0_GPIORXDIS; value |= PADCFG0_GPIOTXDIS; writel(value, padcfg0); spin_unlock_irqrestore(&pctrl->lock, flags); return 0; } static int intel_gpio_set_direction(struct pinctrl_dev *pctldev, struct pinctrl_gpio_range *range, unsigned pin, bool input) { struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); void __iomem *padcfg0; unsigned long flags; u32 value; spin_lock_irqsave(&pctrl->lock, flags); padcfg0 = intel_get_padcfg(pctrl, pin, PADCFG0); value = readl(padcfg0); if (input) value |= PADCFG0_GPIOTXDIS; else value &= ~PADCFG0_GPIOTXDIS; writel(value, padcfg0); spin_unlock_irqrestore(&pctrl->lock, flags); return 0; } static const struct pinmux_ops intel_pinmux_ops = { .get_functions_count = intel_get_functions_count, .get_function_name = intel_get_function_name, .get_function_groups = intel_get_function_groups, .set_mux = intel_pinmux_set_mux, .gpio_request_enable = intel_gpio_request_enable, .gpio_set_direction = intel_gpio_set_direction, }; static int intel_config_get(struct pinctrl_dev *pctldev, unsigned pin, unsigned long *config) { struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); enum pin_config_param param = pinconf_to_config_param(*config); u32 value, term; u16 arg = 0; if (!intel_pad_owned_by_host(pctrl, pin)) return -ENOTSUPP; value = readl(intel_get_padcfg(pctrl, pin, PADCFG1)); term = (value & PADCFG1_TERM_MASK) >> PADCFG1_TERM_SHIFT; switch (param) { case PIN_CONFIG_BIAS_DISABLE: if (term) return -EINVAL; break; case PIN_CONFIG_BIAS_PULL_UP: if (!term || !(value & PADCFG1_TERM_UP)) return -EINVAL; switch (term) { case PADCFG1_TERM_1K: arg = 1000; break; case PADCFG1_TERM_2K: arg = 2000; break; case PADCFG1_TERM_5K: arg = 5000; break; case PADCFG1_TERM_20K: arg = 20000; break; } break; case PIN_CONFIG_BIAS_PULL_DOWN: if (!term || value & PADCFG1_TERM_UP) return -EINVAL; switch (term) { case PADCFG1_TERM_5K: arg = 5000; break; case PADCFG1_TERM_20K: arg = 20000; break; } break; default: return -ENOTSUPP; } *config = pinconf_to_config_packed(param, arg); return 0; } static int intel_config_set_pull(struct intel_pinctrl *pctrl, unsigned pin, unsigned long config) { unsigned param = pinconf_to_config_param(config); unsigned arg = pinconf_to_config_argument(config); void __iomem *padcfg1; unsigned long flags; int ret = 0; u32 value; spin_lock_irqsave(&pctrl->lock, flags); padcfg1 = intel_get_padcfg(pctrl, pin, PADCFG1); value = readl(padcfg1); switch (param) { case PIN_CONFIG_BIAS_DISABLE: value &= ~(PADCFG1_TERM_MASK | PADCFG1_TERM_UP); break; case PIN_CONFIG_BIAS_PULL_UP: value &= ~PADCFG1_TERM_MASK; value |= PADCFG1_TERM_UP; switch (arg) { case 20000: value |= PADCFG1_TERM_20K << PADCFG1_TERM_SHIFT; break; case 5000: value |= PADCFG1_TERM_5K << PADCFG1_TERM_SHIFT; break; case 2000: value |= PADCFG1_TERM_2K << PADCFG1_TERM_SHIFT; break; case 1000: value |= PADCFG1_TERM_1K << PADCFG1_TERM_SHIFT; break; default: ret = -EINVAL; } break; case PIN_CONFIG_BIAS_PULL_DOWN: value &= ~(PADCFG1_TERM_UP | PADCFG1_TERM_MASK); switch (arg) { case 20000: value |= PADCFG1_TERM_20K << PADCFG1_TERM_SHIFT; break; case 5000: value |= PADCFG1_TERM_5K << PADCFG1_TERM_SHIFT; break; default: ret = -EINVAL; } break; } if (!ret) writel(value, padcfg1); spin_unlock_irqrestore(&pctrl->lock, flags); return ret; } static int intel_config_set(struct pinctrl_dev *pctldev, unsigned pin, unsigned long *configs, unsigned nconfigs) { struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); int i, ret; if (!intel_pad_usable(pctrl, pin)) return -ENOTSUPP; for (i = 0; i < nconfigs; i++) { switch (pinconf_to_config_param(configs[i])) { case PIN_CONFIG_BIAS_DISABLE: case PIN_CONFIG_BIAS_PULL_UP: case PIN_CONFIG_BIAS_PULL_DOWN: ret = intel_config_set_pull(pctrl, pin, configs[i]); if (ret) return ret; break; default: return -ENOTSUPP; } } return 0; } static const struct pinconf_ops intel_pinconf_ops = { .is_generic = true, .pin_config_get = intel_config_get, .pin_config_set = intel_config_set, }; static const struct pinctrl_desc intel_pinctrl_desc = { .pctlops = &intel_pinctrl_ops, .pmxops = &intel_pinmux_ops, .confops = &intel_pinconf_ops, .owner = THIS_MODULE, }; static int intel_gpio_request(struct gpio_chip *chip, unsigned offset) { return pinctrl_request_gpio(chip->base + offset); } static void intel_gpio_free(struct gpio_chip *chip, unsigned offset) { pinctrl_free_gpio(chip->base + offset); } static int intel_gpio_get(struct gpio_chip *chip, unsigned offset) { struct intel_pinctrl *pctrl = gpiochip_to_pinctrl(chip); void __iomem *reg; reg = intel_get_padcfg(pctrl, offset, PADCFG0); if (!reg) return -EINVAL; return !!(readl(reg) & PADCFG0_GPIORXSTATE); } static void intel_gpio_set(struct gpio_chip *chip, unsigned offset, int value) { struct intel_pinctrl *pctrl = gpiochip_to_pinctrl(chip); void __iomem *reg; reg = intel_get_padcfg(pctrl, offset, PADCFG0); if (reg) { unsigned long flags; u32 padcfg0; spin_lock_irqsave(&pctrl->lock, flags); padcfg0 = readl(reg); if (value) padcfg0 |= PADCFG0_GPIOTXSTATE; else padcfg0 &= ~PADCFG0_GPIOTXSTATE; writel(padcfg0, reg); spin_unlock_irqrestore(&pctrl->lock, flags); } } static int intel_gpio_direction_input(struct gpio_chip *chip, unsigned offset) { return pinctrl_gpio_direction_input(chip->base + offset); } static int intel_gpio_direction_output(struct gpio_chip *chip, unsigned offset, int value) { intel_gpio_set(chip, offset, value); return pinctrl_gpio_direction_output(chip->base + offset); } static const struct gpio_chip intel_gpio_chip = { .owner = THIS_MODULE, .request = intel_gpio_request, .free = intel_gpio_free, .direction_input = intel_gpio_direction_input, .direction_output = intel_gpio_direction_output, .get = intel_gpio_get, .set = intel_gpio_set, }; static void intel_gpio_irq_ack(struct irq_data *d) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); struct intel_pinctrl *pctrl = gpiochip_to_pinctrl(gc); const struct intel_community *community; unsigned pin = irqd_to_hwirq(d); spin_lock(&pctrl->lock); community = intel_get_community(pctrl, pin); if (community) { unsigned padno = pin_to_padno(community, pin); unsigned gpp_offset = padno % NPADS_IN_GPP; unsigned gpp = padno / NPADS_IN_GPP; writel(BIT(gpp_offset), community->regs + GPI_IS + gpp * 4); } spin_unlock(&pctrl->lock); } static void intel_gpio_irq_mask_unmask(struct irq_data *d, bool mask) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); struct intel_pinctrl *pctrl = gpiochip_to_pinctrl(gc); const struct intel_community *community; unsigned pin = irqd_to_hwirq(d); unsigned long flags; spin_lock_irqsave(&pctrl->lock, flags); community = intel_get_community(pctrl, pin); if (community) { unsigned padno = pin_to_padno(community, pin); unsigned gpp_offset = padno % NPADS_IN_GPP; unsigned gpp = padno / NPADS_IN_GPP; void __iomem *reg; u32 value; reg = community->regs + community->ie_offset + gpp * 4; value = readl(reg); if (mask) value &= ~BIT(gpp_offset); else value |= BIT(gpp_offset); writel(value, reg); } spin_unlock_irqrestore(&pctrl->lock, flags); } static void intel_gpio_irq_mask(struct irq_data *d) { intel_gpio_irq_mask_unmask(d, true); } static void intel_gpio_irq_unmask(struct irq_data *d) { intel_gpio_irq_mask_unmask(d, false); } static int intel_gpio_irq_type(struct irq_data *d, unsigned type) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); struct intel_pinctrl *pctrl = gpiochip_to_pinctrl(gc); unsigned pin = irqd_to_hwirq(d); unsigned long flags; void __iomem *reg; u32 value; reg = intel_get_padcfg(pctrl, pin, PADCFG0); if (!reg) return -EINVAL; spin_lock_irqsave(&pctrl->lock, flags); value = readl(reg); value &= ~(PADCFG0_RXEVCFG_MASK | PADCFG0_RXINV); if ((type & IRQ_TYPE_EDGE_BOTH) == IRQ_TYPE_EDGE_BOTH) { value |= PADCFG0_RXEVCFG_EDGE_BOTH << PADCFG0_RXEVCFG_SHIFT; } else if (type & IRQ_TYPE_EDGE_FALLING) { value |= PADCFG0_RXEVCFG_EDGE << PADCFG0_RXEVCFG_SHIFT; value |= PADCFG0_RXINV; } else if (type & IRQ_TYPE_EDGE_RISING) { value |= PADCFG0_RXEVCFG_EDGE << PADCFG0_RXEVCFG_SHIFT; } else if (type & IRQ_TYPE_LEVEL_LOW) { value |= PADCFG0_RXINV; } else { value |= PADCFG0_RXEVCFG_DISABLED << PADCFG0_RXEVCFG_SHIFT; } writel(value, reg); if (type & IRQ_TYPE_EDGE_BOTH) irq_set_handler_locked(d, handle_edge_irq); else if (type & IRQ_TYPE_LEVEL_MASK) irq_set_handler_locked(d, handle_level_irq); spin_unlock_irqrestore(&pctrl->lock, flags); return 0; } static int intel_gpio_irq_wake(struct irq_data *d, unsigned int on) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); struct intel_pinctrl *pctrl = gpiochip_to_pinctrl(gc); const struct intel_community *community; unsigned pin = irqd_to_hwirq(d); unsigned padno, gpp, gpp_offset; u32 gpe_en; community = intel_get_community(pctrl, pin); if (!community) return -EINVAL; padno = pin_to_padno(community, pin); gpp = padno / NPADS_IN_GPP; gpp_offset = padno % NPADS_IN_GPP; /* Clear the existing wake status */ writel(BIT(gpp_offset), community->regs + GPI_GPE_STS + gpp * 4); /* * The controller will generate wake when GPE of the corresponding * pad is enabled and it is not routed to SCI (GPIROUTSCI is not * set). */ gpe_en = readl(community->regs + GPI_GPE_EN + gpp * 4); if (on) gpe_en |= BIT(gpp_offset); else gpe_en &= ~BIT(gpp_offset); writel(gpe_en, community->regs + GPI_GPE_EN + gpp * 4); dev_dbg(pctrl->dev, "%sable wake for pin %u\n", on ? "en" : "dis", pin); return 0; } static void intel_gpio_community_irq_handler(struct gpio_chip *gc, const struct intel_community *community) { int gpp; for (gpp = 0; gpp < community->ngpps; gpp++) { unsigned long pending, enabled, gpp_offset; pending = readl(community->regs + GPI_IS + gpp * 4); enabled = readl(community->regs + community->ie_offset + gpp * 4); /* Only interrupts that are enabled */ pending &= enabled; for_each_set_bit(gpp_offset, &pending, NPADS_IN_GPP) { unsigned padno, irq; /* * The last group in community can have less pins * than NPADS_IN_GPP. */ padno = gpp_offset + gpp * NPADS_IN_GPP; if (padno >= community->npins) break; irq = irq_find_mapping(gc->irqdomain, community->pin_base + padno); generic_handle_irq(irq); } } } static void intel_gpio_irq_handler(unsigned irq, struct irq_desc *desc) { struct gpio_chip *gc = irq_desc_get_handler_data(desc); struct intel_pinctrl *pctrl = gpiochip_to_pinctrl(gc); struct irq_chip *chip = irq_desc_get_chip(desc); int i; chained_irq_enter(chip, desc); /* Need to check all communities for pending interrupts */ for (i = 0; i < pctrl->ncommunities; i++) intel_gpio_community_irq_handler(gc, &pctrl->communities[i]); chained_irq_exit(chip, desc); } static struct irq_chip intel_gpio_irqchip = { .name = "intel-gpio", .irq_ack = intel_gpio_irq_ack, .irq_mask = intel_gpio_irq_mask, .irq_unmask = intel_gpio_irq_unmask, .irq_set_type = intel_gpio_irq_type, .irq_set_wake = intel_gpio_irq_wake, }; static void intel_gpio_irq_init(struct intel_pinctrl *pctrl) { size_t i; for (i = 0; i < pctrl->ncommunities; i++) { const struct intel_community *community; void __iomem *base; unsigned gpp; community = &pctrl->communities[i]; base = community->regs; for (gpp = 0; gpp < community->ngpps; gpp++) { /* Mask and clear all interrupts */ writel(0, base + community->ie_offset + gpp * 4); writel(0xffff, base + GPI_IS + gpp * 4); } } } static int intel_gpio_probe(struct intel_pinctrl *pctrl, int irq) { int ret; pctrl->chip = intel_gpio_chip; pctrl->chip.ngpio = pctrl->soc->npins; pctrl->chip.label = dev_name(pctrl->dev); pctrl->chip.dev = pctrl->dev; pctrl->chip.base = -1; ret = gpiochip_add(&pctrl->chip); if (ret) { dev_err(pctrl->dev, "failed to register gpiochip\n"); return ret; } ret = gpiochip_add_pin_range(&pctrl->chip, dev_name(pctrl->dev), 0, 0, pctrl->soc->npins); if (ret) { dev_err(pctrl->dev, "failed to add GPIO pin range\n"); gpiochip_remove(&pctrl->chip); return ret; } ret = gpiochip_irqchip_add(&pctrl->chip, &intel_gpio_irqchip, 0, handle_simple_irq, IRQ_TYPE_NONE); if (ret) { dev_err(pctrl->dev, "failed to add irqchip\n"); gpiochip_remove(&pctrl->chip); return ret; } gpiochip_set_chained_irqchip(&pctrl->chip, &intel_gpio_irqchip, irq, intel_gpio_irq_handler); return 0; } static int intel_pinctrl_pm_init(struct intel_pinctrl *pctrl) { #ifdef CONFIG_PM_SLEEP const struct intel_pinctrl_soc_data *soc = pctrl->soc; struct intel_community_context *communities; struct intel_pad_context *pads; int i; pads = devm_kcalloc(pctrl->dev, soc->npins, sizeof(*pads), GFP_KERNEL); if (!pads) return -ENOMEM; communities = devm_kcalloc(pctrl->dev, pctrl->ncommunities, sizeof(*communities), GFP_KERNEL); if (!communities) return -ENOMEM; for (i = 0; i < pctrl->ncommunities; i++) { struct intel_community *community = &pctrl->communities[i]; u32 *intmask; intmask = devm_kcalloc(pctrl->dev, community->ngpps, sizeof(*intmask), GFP_KERNEL); if (!intmask) return -ENOMEM; communities[i].intmask = intmask; } pctrl->context.pads = pads; pctrl->context.communities = communities; #endif return 0; } int intel_pinctrl_probe(struct platform_device *pdev, const struct intel_pinctrl_soc_data *soc_data) { struct intel_pinctrl *pctrl; int i, ret, irq; if (!soc_data) return -EINVAL; pctrl = devm_kzalloc(&pdev->dev, sizeof(*pctrl), GFP_KERNEL); if (!pctrl) return -ENOMEM; pctrl->dev = &pdev->dev; pctrl->soc = soc_data; spin_lock_init(&pctrl->lock); /* * Make a copy of the communities which we can use to hold pointers * to the registers. */ pctrl->ncommunities = pctrl->soc->ncommunities; pctrl->communities = devm_kcalloc(&pdev->dev, pctrl->ncommunities, sizeof(*pctrl->communities), GFP_KERNEL); if (!pctrl->communities) return -ENOMEM; for (i = 0; i < pctrl->ncommunities; i++) { struct intel_community *community = &pctrl->communities[i]; struct resource *res; void __iomem *regs; u32 padbar; *community = pctrl->soc->communities[i]; res = platform_get_resource(pdev, IORESOURCE_MEM, community->barno); regs = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(regs)) return PTR_ERR(regs); /* Read offset of the pad configuration registers */ padbar = readl(regs + PADBAR); community->regs = regs; community->pad_regs = regs + padbar; community->ngpps = DIV_ROUND_UP(community->npins, NPADS_IN_GPP); } irq = platform_get_irq(pdev, 0); if (irq < 0) { dev_err(&pdev->dev, "failed to get interrupt number\n"); return irq; } ret = intel_pinctrl_pm_init(pctrl); if (ret) return ret; pctrl->pctldesc = intel_pinctrl_desc; pctrl->pctldesc.name = dev_name(&pdev->dev); pctrl->pctldesc.pins = pctrl->soc->pins; pctrl->pctldesc.npins = pctrl->soc->npins; pctrl->pctldev = pinctrl_register(&pctrl->pctldesc, &pdev->dev, pctrl); if (IS_ERR(pctrl->pctldev)) { dev_err(&pdev->dev, "failed to register pinctrl driver\n"); return PTR_ERR(pctrl->pctldev); } ret = intel_gpio_probe(pctrl, irq); if (ret) { pinctrl_unregister(pctrl->pctldev); return ret; } platform_set_drvdata(pdev, pctrl); return 0; } EXPORT_SYMBOL_GPL(intel_pinctrl_probe); int intel_pinctrl_remove(struct platform_device *pdev) { struct intel_pinctrl *pctrl = platform_get_drvdata(pdev); gpiochip_remove(&pctrl->chip); pinctrl_unregister(pctrl->pctldev); return 0; } EXPORT_SYMBOL_GPL(intel_pinctrl_remove); #ifdef CONFIG_PM_SLEEP int intel_pinctrl_suspend(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct intel_pinctrl *pctrl = platform_get_drvdata(pdev); struct intel_community_context *communities; struct intel_pad_context *pads; int i; pads = pctrl->context.pads; for (i = 0; i < pctrl->soc->npins; i++) { const struct pinctrl_pin_desc *desc = &pctrl->soc->pins[i]; u32 val; if (!intel_pad_usable(pctrl, desc->number)) continue; val = readl(intel_get_padcfg(pctrl, desc->number, PADCFG0)); pads[i].padcfg0 = val & ~PADCFG0_GPIORXSTATE; val = readl(intel_get_padcfg(pctrl, desc->number, PADCFG1)); pads[i].padcfg1 = val; } communities = pctrl->context.communities; for (i = 0; i < pctrl->ncommunities; i++) { struct intel_community *community = &pctrl->communities[i]; void __iomem *base; unsigned gpp; base = community->regs + community->ie_offset; for (gpp = 0; gpp < community->ngpps; gpp++) communities[i].intmask[gpp] = readl(base + gpp * 4); } return 0; } EXPORT_SYMBOL_GPL(intel_pinctrl_suspend); int intel_pinctrl_resume(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct intel_pinctrl *pctrl = platform_get_drvdata(pdev); const struct intel_community_context *communities; const struct intel_pad_context *pads; int i; /* Mask all interrupts */ intel_gpio_irq_init(pctrl); pads = pctrl->context.pads; for (i = 0; i < pctrl->soc->npins; i++) { const struct pinctrl_pin_desc *desc = &pctrl->soc->pins[i]; void __iomem *padcfg; u32 val; if (!intel_pad_usable(pctrl, desc->number)) continue; padcfg = intel_get_padcfg(pctrl, desc->number, PADCFG0); val = readl(padcfg) & ~PADCFG0_GPIORXSTATE; if (val != pads[i].padcfg0) { writel(pads[i].padcfg0, padcfg); dev_dbg(dev, "restored pin %u padcfg0 %#08x\n", desc->number, readl(padcfg)); } padcfg = intel_get_padcfg(pctrl, desc->number, PADCFG1); val = readl(padcfg); if (val != pads[i].padcfg1) { writel(pads[i].padcfg1, padcfg); dev_dbg(dev, "restored pin %u padcfg1 %#08x\n", desc->number, readl(padcfg)); } } communities = pctrl->context.communities; for (i = 0; i < pctrl->ncommunities; i++) { struct intel_community *community = &pctrl->communities[i]; void __iomem *base; unsigned gpp; base = community->regs + community->ie_offset; for (gpp = 0; gpp < community->ngpps; gpp++) { writel(communities[i].intmask[gpp], base + gpp * 4); dev_dbg(dev, "restored mask %d/%u %#08x\n", i, gpp, readl(base + gpp * 4)); } } return 0; } EXPORT_SYMBOL_GPL(intel_pinctrl_resume); #endif MODULE_AUTHOR("Mathias Nyman <mathias.nyman@linux.intel.com>"); MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>"); MODULE_DESCRIPTION("Intel pinctrl/GPIO core driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
joeduong/bideas-openrex-linux-3.14
drivers/input/misc/ixp4xx-beeper.c
322
4089
/* * Generic IXP4xx beeper driver * * Copyright (C) 2005 Tower Technologies * * based on nslu2-io.c * Copyright (C) 2004 Karen Spearel * * Author: Alessandro Zummo <a.zummo@towertech.it> * Maintainers: http://www.nslu2-linux.org/ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/module.h> #include <linux/input.h> #include <linux/delay.h> #include <linux/platform_device.h> #include <linux/interrupt.h> #include <linux/gpio.h> #include <mach/hardware.h> MODULE_AUTHOR("Alessandro Zummo <a.zummo@towertech.it>"); MODULE_DESCRIPTION("ixp4xx beeper driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:ixp4xx-beeper"); static DEFINE_SPINLOCK(beep_lock); static void ixp4xx_spkr_control(unsigned int pin, unsigned int count) { unsigned long flags; spin_lock_irqsave(&beep_lock, flags); if (count) { gpio_direction_output(pin, 0); *IXP4XX_OSRT2 = (count & ~IXP4XX_OST_RELOAD_MASK) | IXP4XX_OST_ENABLE; } else { gpio_direction_output(pin, 1); gpio_direction_input(pin); *IXP4XX_OSRT2 = 0; } spin_unlock_irqrestore(&beep_lock, flags); } static int ixp4xx_spkr_event(struct input_dev *dev, unsigned int type, unsigned int code, int value) { unsigned int pin = (unsigned int) input_get_drvdata(dev); unsigned int count = 0; if (type != EV_SND) return -1; switch (code) { case SND_BELL: if (value) value = 1000; case SND_TONE: break; default: return -1; } if (value > 20 && value < 32767) count = (IXP4XX_TIMER_FREQ / (value * 4)) - 1; ixp4xx_spkr_control(pin, count); return 0; } static irqreturn_t ixp4xx_spkr_interrupt(int irq, void *dev_id) { unsigned int pin = (unsigned int) dev_id; /* clear interrupt */ *IXP4XX_OSST = IXP4XX_OSST_TIMER_2_PEND; /* flip the beeper output */ gpio_set_value(pin, !gpio_get_value(pin)); return IRQ_HANDLED; } static int ixp4xx_spkr_probe(struct platform_device *dev) { struct input_dev *input_dev; int err; input_dev = input_allocate_device(); if (!input_dev) return -ENOMEM; input_set_drvdata(input_dev, (void *) dev->id); input_dev->name = "ixp4xx beeper", input_dev->phys = "ixp4xx/gpio"; input_dev->id.bustype = BUS_HOST; input_dev->id.vendor = 0x001f; input_dev->id.product = 0x0001; input_dev->id.version = 0x0100; input_dev->dev.parent = &dev->dev; input_dev->evbit[0] = BIT_MASK(EV_SND); input_dev->sndbit[0] = BIT_MASK(SND_BELL) | BIT_MASK(SND_TONE); input_dev->event = ixp4xx_spkr_event; err = gpio_request(dev->id, "ixp4-beeper"); if (err) goto err_free_device; err = request_irq(IRQ_IXP4XX_TIMER2, &ixp4xx_spkr_interrupt, IRQF_NO_SUSPEND, "ixp4xx-beeper", (void *) dev->id); if (err) goto err_free_gpio; err = input_register_device(input_dev); if (err) goto err_free_irq; platform_set_drvdata(dev, input_dev); return 0; err_free_irq: free_irq(IRQ_IXP4XX_TIMER2, (void *)dev->id); err_free_gpio: gpio_free(dev->id); err_free_device: input_free_device(input_dev); return err; } static int ixp4xx_spkr_remove(struct platform_device *dev) { struct input_dev *input_dev = platform_get_drvdata(dev); unsigned int pin = (unsigned int) input_get_drvdata(input_dev); input_unregister_device(input_dev); /* turn the speaker off */ disable_irq(IRQ_IXP4XX_TIMER2); ixp4xx_spkr_control(pin, 0); free_irq(IRQ_IXP4XX_TIMER2, (void *)dev->id); gpio_free(dev->id); return 0; } static void ixp4xx_spkr_shutdown(struct platform_device *dev) { struct input_dev *input_dev = platform_get_drvdata(dev); unsigned int pin = (unsigned int) input_get_drvdata(input_dev); /* turn off the speaker */ disable_irq(IRQ_IXP4XX_TIMER2); ixp4xx_spkr_control(pin, 0); } static struct platform_driver ixp4xx_spkr_platform_driver = { .driver = { .name = "ixp4xx-beeper", .owner = THIS_MODULE, }, .probe = ixp4xx_spkr_probe, .remove = ixp4xx_spkr_remove, .shutdown = ixp4xx_spkr_shutdown, }; module_platform_driver(ixp4xx_spkr_platform_driver);
gpl-2.0
SerenityS/android_kernel_samsung_msm8916
fs/sdcardfs/dentry.c
322
4830
/* * fs/sdcardfs/dentry.c * * Copyright (c) 2013 Samsung Electronics Co. Ltd * Authors: Daeho Jeong, Woojoong Lee, Seunghwan Hyun, * Sunghwan Yun, Sungjong Seo * * This program has been developed as a stackable file system based on * the WrapFS which written by * * Copyright (c) 1998-2011 Erez Zadok * Copyright (c) 2009 Shrikar Archak * Copyright (c) 2003-2011 Stony Brook University * Copyright (c) 2003-2011 The Research Foundation of SUNY * * This file is dual licensed. It may be redistributed and/or modified * under the terms of the Apache 2.0 License OR version 2 of the GNU * General Public License. */ #include "sdcardfs.h" #include "linux/ctype.h" /* * returns: -ERRNO if error (returned to user) * 0: tell VFS to invalidate dentry * 1: dentry is valid */ static int sdcardfs_d_revalidate(struct dentry *dentry, unsigned int flags) { int err = 1; struct path parent_lower_path, lower_path; struct dentry *parent_dentry = NULL; struct dentry *parent_lower_dentry = NULL; struct dentry *lower_cur_parent_dentry = NULL; struct dentry *lower_dentry = NULL; if (flags & LOOKUP_RCU) return -ECHILD; spin_lock(&dentry->d_lock); if (IS_ROOT(dentry)) { spin_unlock(&dentry->d_lock); return 1; } spin_unlock(&dentry->d_lock); /* check uninitialized obb_dentry and * whether the base obbpath has been changed or not */ if (is_obbpath_invalid(dentry)) { d_drop(dentry); return 0; } parent_dentry = dget_parent(dentry); sdcardfs_get_lower_path(parent_dentry, &parent_lower_path); sdcardfs_get_real_lower(dentry, &lower_path); parent_lower_dentry = parent_lower_path.dentry; lower_dentry = lower_path.dentry; lower_cur_parent_dentry = dget_parent(lower_dentry); spin_lock(&lower_dentry->d_lock); if (d_unhashed(lower_dentry)) { spin_unlock(&lower_dentry->d_lock); d_drop(dentry); err = 0; goto out; } spin_unlock(&lower_dentry->d_lock); if (parent_lower_dentry != lower_cur_parent_dentry) { d_drop(dentry); err = 0; goto out; } if (dentry == lower_dentry) { err = 0; panic("sdcardfs: dentry is equal to lower_dentry\n"); goto out; } if (dentry < lower_dentry) { spin_lock(&dentry->d_lock); spin_lock(&lower_dentry->d_lock); } else { spin_lock(&lower_dentry->d_lock); spin_lock(&dentry->d_lock); } if (dentry->d_name.len != lower_dentry->d_name.len) { __d_drop(dentry); err = 0; } else if (strncasecmp(dentry->d_name.name, lower_dentry->d_name.name, dentry->d_name.len) != 0) { __d_drop(dentry); err = 0; } if (dentry < lower_dentry) { spin_unlock(&lower_dentry->d_lock); spin_unlock(&dentry->d_lock); } else { spin_unlock(&dentry->d_lock); spin_unlock(&lower_dentry->d_lock); } out: dput(parent_dentry); dput(lower_cur_parent_dentry); sdcardfs_put_lower_path(parent_dentry, &parent_lower_path); sdcardfs_put_real_lower(dentry, &lower_path); return err; } static void sdcardfs_d_release(struct dentry *dentry) { /* release and reset the lower paths */ if(has_graft_path(dentry)) { sdcardfs_put_reset_orig_path(dentry); } sdcardfs_put_reset_lower_path(dentry); free_dentry_private_data(dentry); return; } static int sdcardfs_hash_ci(const struct dentry *dentry, const struct inode *inode, struct qstr *qstr) { /* * This function is copy of vfat_hashi. * FIXME Should we support national language? * Refer to vfat_hashi() * struct nls_table *t = MSDOS_SB(dentry->d_sb)->nls_io; */ const unsigned char *name; unsigned int len; unsigned long hash; name = qstr->name; //len = vfat_striptail_len(qstr); len = qstr->len; hash = init_name_hash(); while (len--) //hash = partial_name_hash(nls_tolower(t, *name++), hash); hash = partial_name_hash(tolower(*name++), hash); qstr->hash = end_name_hash(hash); return 0; } /* * Case insensitive compare of two vfat names. */ static int sdcardfs_cmp_ci(const struct dentry *parent, const struct inode *pinode, const struct dentry *dentry, const struct inode *inode, unsigned int len, const char *str, const struct qstr *name) { /* This function is copy of vfat_cmpi */ // FIXME Should we support national language? //struct nls_table *t = MSDOS_SB(parent->d_sb)->nls_io; //unsigned int alen, blen; /* A filename cannot end in '.' or we treat it like it has none */ /* alen = vfat_striptail_len(name); blen = __vfat_striptail_len(len, str); if (alen == blen) { if (nls_strnicmp(t, name->name, str, alen) == 0) return 0; } */ if (name->len == len) { if (strncasecmp(name->name, str, len) == 0) return 0; } return 1; } const struct dentry_operations sdcardfs_ci_dops = { .d_revalidate = sdcardfs_d_revalidate, .d_release = sdcardfs_d_release, .d_hash = sdcardfs_hash_ci, .d_compare = sdcardfs_cmp_ci, };
gpl-2.0
daveti/prov-kernel
drivers/scsi/u14-34f.c
578
67682
/* * u14-34f.c - Low-level driver for UltraStor 14F/34F SCSI host adapters. * * 03 Jun 2003 Rev. 8.10 for linux-2.5.70 * + Update for new IRQ API. * + Use "goto" when appropriate. * + Drop u14-34f.h. * + Update for new module_param API. * + Module parameters can now be specified only in the * same format as the kernel boot options. * * boot option old module param * ----------- ------------------ * addr,... io_port=addr,... * lc:[y|n] linked_comm=[1|0] * mq:xx max_queue_depth=xx * tm:[0|1|2] tag_mode=[0|1|2] * et:[y|n] ext_tran=[1|0] * of:[y|n] have_old_firmware=[1|0] * * A valid example using the new parameter format is: * modprobe u14-34f "u14-34f=0x340,0x330,lc:y,tm:0,mq:4" * * which is equivalent to the old format: * modprobe u14-34f io_port=0x340,0x330 linked_comm=1 tag_mode=0 \ * max_queue_depth=4 * * With actual module code, u14-34f and u14_34f are equivalent * as module parameter names. * * 12 Feb 2003 Rev. 8.04 for linux 2.5.60 * + Release irq before calling scsi_register. * * 12 Nov 2002 Rev. 8.02 for linux 2.5.47 * + Release driver_lock before calling scsi_register. * * 11 Nov 2002 Rev. 8.01 for linux 2.5.47 * + Fixed bios_param and scsicam_bios_param calling parameters. * * 28 Oct 2002 Rev. 8.00 for linux 2.5.44-ac4 * + Use new tcq and adjust_queue_depth api. * + New command line option (tm:[0-2]) to choose the type of tags: * 0 -> disable tagging ; 1 -> simple tags ; 2 -> ordered tags. * Default is tm:0 (tagged commands disabled). * For compatibility the "tc:" option is an alias of the "tm:" * option; tc:n is equivalent to tm:0 and tc:y is equivalent to * tm:1. * * 10 Oct 2002 Rev. 7.70 for linux 2.5.42 * + Foreport from revision 6.70. * * 25 Jun 2002 Rev. 6.70 for linux 2.4.19 * + Fixed endian-ness problem due to bitfields. * * 21 Feb 2002 Rev. 6.52 for linux 2.4.18 * + Backport from rev. 7.22 (use io_request_lock). * * 20 Feb 2002 Rev. 7.22 for linux 2.5.5 * + Remove any reference to virt_to_bus(). * + Fix pio hang while detecting multiple HBAs. * * 01 Jan 2002 Rev. 7.20 for linux 2.5.1 * + Use the dynamic DMA mapping API. * * 19 Dec 2001 Rev. 7.02 for linux 2.5.1 * + Use SCpnt->sc_data_direction if set. * + Use sglist.page instead of sglist.address. * * 11 Dec 2001 Rev. 7.00 for linux 2.5.1 * + Use host->host_lock instead of io_request_lock. * * 1 May 2001 Rev. 6.05 for linux 2.4.4 * + Fix data transfer direction for opcode SEND_CUE_SHEET (0x5d) * * 25 Jan 2001 Rev. 6.03 for linux 2.4.0 * + "check_region" call replaced by "request_region". * * 22 Nov 2000 Rev. 6.02 for linux 2.4.0-test11 * + Removed old scsi error handling support. * + The obsolete boot option flag eh:n is silently ignored. * + Removed error messages while a disk drive is powered up at * boot time. * + Improved boot messages: all tagged capable device are * indicated as "tagged". * * 16 Sep 1999 Rev. 5.11 for linux 2.2.12 and 2.3.18 * + Updated to the new __setup interface for boot command line options. * + When loaded as a module, accepts the new parameter boot_options * which value is a string with the same format of the kernel boot * command line options. A valid example is: * modprobe u14-34f 'boot_options="0x230,0x340,lc:y,mq:4"' * * 22 Jul 1999 Rev. 5.00 for linux 2.2.10 and 2.3.11 * + Removed pre-2.2 source code compatibility. * * 26 Jul 1998 Rev. 4.33 for linux 2.0.35 and 2.1.111 * Added command line option (et:[y|n]) to use the existing * translation (returned by scsicam_bios_param) as disk geometry. * The default is et:n, which uses the disk geometry jumpered * on the board. * The default value et:n is compatible with all previous revisions * of this driver. * * 28 May 1998 Rev. 4.32 for linux 2.0.33 and 2.1.104 * Increased busy timeout from 10 msec. to 200 msec. while * processing interrupts. * * 18 May 1998 Rev. 4.31 for linux 2.0.33 and 2.1.102 * Improved abort handling during the eh recovery process. * * 13 May 1998 Rev. 4.30 for linux 2.0.33 and 2.1.101 * The driver is now fully SMP safe, including the * abort and reset routines. * Added command line options (eh:[y|n]) to choose between * new_eh_code and the old scsi code. * If linux version >= 2.1.101 the default is eh:y, while the eh * option is ignored for previous releases and the old scsi code * is used. * * 18 Apr 1998 Rev. 4.20 for linux 2.0.33 and 2.1.97 * Reworked interrupt handler. * * 11 Apr 1998 rev. 4.05 for linux 2.0.33 and 2.1.95 * Major reliability improvement: when a batch with overlapping * requests is detected, requests are queued one at a time * eliminating any possible board or drive reordering. * * 10 Apr 1998 rev. 4.04 for linux 2.0.33 and 2.1.95 * Improved SMP support (if linux version >= 2.1.95). * * 9 Apr 1998 rev. 4.03 for linux 2.0.33 and 2.1.94 * Performance improvement: when sequential i/o is detected, * always use direct sort instead of reverse sort. * * 4 Apr 1998 rev. 4.02 for linux 2.0.33 and 2.1.92 * io_port is now unsigned long. * * 17 Mar 1998 rev. 4.01 for linux 2.0.33 and 2.1.88 * Use new scsi error handling code (if linux version >= 2.1.88). * Use new interrupt code. * * 12 Sep 1997 rev. 3.11 for linux 2.0.30 and 2.1.55 * Use of udelay inside the wait loops to avoid timeout * problems with fast cpus. * Removed check about useless calls to the interrupt service * routine (reported on SMP systems only). * At initialization time "sorted/unsorted" is displayed instead * of "linked/unlinked" to reinforce the fact that "linking" is * nothing but "elevator sorting" in the actual implementation. * * 17 May 1997 rev. 3.10 for linux 2.0.30 and 2.1.38 * Use of serial_number_at_timeout in abort and reset processing. * Use of the __initfunc and __initdata macro in setup code. * Minor cleanups in the list_statistics code. * * 24 Feb 1997 rev. 3.00 for linux 2.0.29 and 2.1.26 * When loading as a module, parameter passing is now supported * both in 2.0 and in 2.1 style. * Fixed data transfer direction for some SCSI opcodes. * Immediate acknowledge to request sense commands. * Linked commands to each disk device are now reordered by elevator * sorting. Rare cases in which reordering of write requests could * cause wrong results are managed. * * 18 Jan 1997 rev. 2.60 for linux 2.1.21 and 2.0.28 * Added command line options to enable/disable linked commands * (lc:[y|n]), old firmware support (of:[y|n]) and to set the max * queue depth (mq:xx). Default is "u14-34f=lc:n,of:n,mq:8". * Improved command linking. * * 8 Jan 1997 rev. 2.50 for linux 2.1.20 and 2.0.27 * Added linked command support. * * 3 Dec 1996 rev. 2.40 for linux 2.1.14 and 2.0.27 * Added queue depth adjustment. * * 22 Nov 1996 rev. 2.30 for linux 2.1.12 and 2.0.26 * The list of i/o ports to be probed can be overwritten by the * "u14-34f=port0,port1,...." boot command line option. * Scatter/gather lists are now allocated by a number of kmalloc * calls, in order to avoid the previous size limit of 64Kb. * * 16 Nov 1996 rev. 2.20 for linux 2.1.10 and 2.0.25 * Added multichannel support. * * 27 Sep 1996 rev. 2.12 for linux 2.1.0 * Portability cleanups (virtual/bus addressing, little/big endian * support). * * 09 Jul 1996 rev. 2.11 for linux 2.0.4 * "Data over/under-run" no longer implies a redo on all targets. * Number of internal retries is now limited. * * 16 Apr 1996 rev. 2.10 for linux 1.3.90 * New argument "reset_flags" to the reset routine. * * 21 Jul 1995 rev. 2.02 for linux 1.3.11 * Fixed Data Transfer Direction for some SCSI commands. * * 13 Jun 1995 rev. 2.01 for linux 1.2.10 * HAVE_OLD_UX4F_FIRMWARE should be defined for U34F boards when * the firmware prom is not the latest one (28008-006). * * 11 Mar 1995 rev. 2.00 for linux 1.2.0 * Fixed a bug which prevented media change detection for removable * disk drives. * * 23 Feb 1995 rev. 1.18 for linux 1.1.94 * Added a check for scsi_register returning NULL. * * 11 Feb 1995 rev. 1.17 for linux 1.1.91 * U14F qualified to run with 32 sglists. * Now DEBUG_RESET is disabled by default. * * 9 Feb 1995 rev. 1.16 for linux 1.1.90 * Use host->wish_block instead of host->block. * * 8 Feb 1995 rev. 1.15 for linux 1.1.89 * Cleared target_time_out counter while performing a reset. * * 28 Jan 1995 rev. 1.14 for linux 1.1.86 * Added module support. * Log and do a retry when a disk drive returns a target status * different from zero on a recovered error. * Auto detects if U14F boards have an old firmware revision. * Max number of scatter/gather lists set to 16 for all boards * (most installation run fine using 33 sglists, while other * has problems when using more than 16). * * 16 Jan 1995 rev. 1.13 for linux 1.1.81 * Display a message if check_region detects a port address * already in use. * * 15 Dec 1994 rev. 1.12 for linux 1.1.74 * The host->block flag is set for all the detected ISA boards. * * 30 Nov 1994 rev. 1.11 for linux 1.1.68 * Redo i/o on target status CHECK_CONDITION for TYPE_DISK only. * Added optional support for using a single board at a time. * * 14 Nov 1994 rev. 1.10 for linux 1.1.63 * * 28 Oct 1994 rev. 1.09 for linux 1.1.58 Final BETA release. * 16 Jul 1994 rev. 1.00 for linux 1.1.29 Initial ALPHA release. * * This driver is a total replacement of the original UltraStor * scsi driver, but it supports ONLY the 14F and 34F boards. * It can be configured in the same kernel in which the original * ultrastor driver is configured to allow the original U24F * support. * * Multiple U14F and/or U34F host adapters are supported. * * Copyright (C) 1994-2003 Dario Ballabio (ballabio_dario@emc.com) * * Alternate email: dario.ballabio@inwind.it, dario.ballabio@tiscalinet.it * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that redistributions of source * code retain the above copyright notice and this comment without * modification. * * WARNING: if your 14/34F board has an old firmware revision (see below) * you must change "#undef" into "#define" in the following * statement. */ #undef HAVE_OLD_UX4F_FIRMWARE /* * The UltraStor 14F, 24F, and 34F are a family of intelligent, high * performance SCSI-2 host adapters. * Here is the scoop on the various models: * * 14F - ISA first-party DMA HA with floppy support and WD1003 emulation. * 24F - EISA Bus Master HA with floppy support and WD1003 emulation. * 34F - VESA Local-Bus Bus Master HA (no WD1003 emulation). * * This code has been tested with up to two U14F boards, using both * firmware 28004-005/38004-004 (BIOS rev. 2.00) and the latest firmware * 28004-006/38004-005 (BIOS rev. 2.01). * * The latest firmware is required in order to get reliable operations when * clustering is enabled. ENABLE_CLUSTERING provides a performance increase * up to 50% on sequential access. * * Since the struct scsi_host_template structure is shared among all 14F and 34F, * the last setting of use_clustering is in effect for all of these boards. * * Here a sample configuration using two U14F boards: * U14F0: ISA 0x330, BIOS 0xc8000, IRQ 11, DMA 5, SG 32, MB 16, of:n, lc:y, mq:8. U14F1: ISA 0x340, BIOS 0x00000, IRQ 10, DMA 6, SG 32, MB 16, of:n, lc:y, mq:8. * * The boot controller must have its BIOS enabled, while other boards can * have their BIOS disabled, or enabled to an higher address. * Boards are named Ux4F0, Ux4F1..., according to the port address order in * the io_port[] array. * * The following facts are based on real testing results (not on * documentation) on the above U14F board. * * - The U14F board should be jumpered for bus on time less or equal to 7 * microseconds, while the default is 11 microseconds. This is order to * get acceptable performance while using floppy drive and hard disk * together. The jumpering for 7 microseconds is: JP13 pin 15-16, * JP14 pin 7-8 and pin 9-10. * The reduction has a little impact on scsi performance. * * - If scsi bus length exceeds 3m., the scsi bus speed needs to be reduced * from 10Mhz to 5Mhz (do this by inserting a jumper on JP13 pin 7-8). * * - If U14F on board firmware is older than 28004-006/38004-005, * the U14F board is unable to provide reliable operations if the scsi * request length exceeds 16Kbyte. When this length is exceeded the * behavior is: * - adapter_status equal 0x96 or 0xa3 or 0x93 or 0x94; * - adapter_status equal 0 and target_status equal 2 on for all targets * in the next operation following the reset. * This sequence takes a long time (>3 seconds), so in the meantime * the SD_TIMEOUT in sd.c could expire giving rise to scsi aborts * (SD_TIMEOUT has been increased from 3 to 6 seconds in 1.1.31). * Because of this I had to DISABLE_CLUSTERING and to work around the * bus reset in the interrupt service routine, returning DID_BUS_BUSY * so that the operations are retried without complains from the scsi.c * code. * Any reset of the scsi bus is going to kill tape operations, since * no retry is allowed for tapes. Bus resets are more likely when the * scsi bus is under heavy load. * Requests using scatter/gather have a maximum length of 16 x 1024 bytes * when DISABLE_CLUSTERING is in effect, but unscattered requests could be * larger than 16Kbyte. * * The new firmware has fixed all the above problems. * * For U34F boards the latest bios prom is 38008-002 (BIOS rev. 2.01), * the latest firmware prom is 28008-006. Older firmware 28008-005 has * problems when using more than 16 scatter/gather lists. * * The list of i/o ports to be probed can be totally replaced by the * boot command line option: "u14-34f=port0,port1,port2,...", where the * port0, port1... arguments are ISA/VESA addresses to be probed. * For example using "u14-34f=0x230,0x340", the driver probes only the two * addresses 0x230 and 0x340 in this order; "u14-34f=0" totally disables * this driver. * * After the optional list of detection probes, other possible command line * options are: * * et:y use disk geometry returned by scsicam_bios_param; * et:n use disk geometry jumpered on the board; * lc:y enables linked commands; * lc:n disables linked commands; * tm:0 disables tagged commands (same as tc:n); * tm:1 use simple queue tags (same as tc:y); * tm:2 use ordered queue tags (same as tc:2); * of:y enables old firmware support; * of:n disables old firmware support; * mq:xx set the max queue depth to the value xx (2 <= xx <= 8). * * The default value is: "u14-34f=lc:n,of:n,mq:8,tm:0,et:n". * An example using the list of detection probes could be: * "u14-34f=0x230,0x340,lc:y,tm:2,of:n,mq:4,et:n". * * When loading as a module, parameters can be specified as well. * The above example would be (use 1 in place of y and 0 in place of n): * * modprobe u14-34f io_port=0x230,0x340 linked_comm=1 have_old_firmware=0 \ * max_queue_depth=4 ext_tran=0 tag_mode=2 * * ---------------------------------------------------------------------------- * In this implementation, linked commands are designed to work with any DISK * or CD-ROM, since this linking has only the intent of clustering (time-wise) * and reordering by elevator sorting commands directed to each device, * without any relation with the actual SCSI protocol between the controller * and the device. * If Q is the queue depth reported at boot time for each device (also named * cmds/lun) and Q > 2, whenever there is already an active command to the * device all other commands to the same device (up to Q-1) are kept waiting * in the elevator sorting queue. When the active command completes, the * commands in this queue are sorted by sector address. The sort is chosen * between increasing or decreasing by minimizing the seek distance between * the sector of the commands just completed and the sector of the first * command in the list to be sorted. * Trivial math assures that the unsorted average seek distance when doing * random seeks over S sectors is S/3. * When (Q-1) requests are uniformly distributed over S sectors, the average * distance between two adjacent requests is S/((Q-1) + 1), so the sorted * average seek distance for (Q-1) random requests over S sectors is S/Q. * The elevator sorting hence divides the seek distance by a factor Q/3. * The above pure geometric remarks are valid in all cases and the * driver effectively reduces the seek distance by the predicted factor * when there are Q concurrent read i/o operations on the device, but this * does not necessarily results in a noticeable performance improvement: * your mileage may vary.... * * Note: command reordering inside a batch of queued commands could cause * wrong results only if there is at least one write request and the * intersection (sector-wise) of all requests is not empty. * When the driver detects a batch including overlapping requests * (a really rare event) strict serial (pid) order is enforced. * ---------------------------------------------------------------------------- * * The boards are named Ux4F0, Ux4F1,... according to the detection order. * * In order to support multiple ISA boards in a reliable way, * the driver sets host->wish_block = TRUE for all ISA boards. */ #include <linux/string.h> #include <linux/kernel.h> #include <linux/ioport.h> #include <linux/delay.h> #include <asm/io.h> #include <asm/system.h> #include <asm/byteorder.h> #include <linux/proc_fs.h> #include <linux/blkdev.h> #include <linux/interrupt.h> #include <linux/stat.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/ctype.h> #include <linux/spinlock.h> #include <asm/dma.h> #include <asm/irq.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include <scsi/scsi_host.h> #include <scsi/scsi_tcq.h> #include <scsi/scsicam.h> static int u14_34f_detect(struct scsi_host_template *); static int u14_34f_release(struct Scsi_Host *); static int u14_34f_queuecommand(struct scsi_cmnd *, void (*done)(struct scsi_cmnd *)); static int u14_34f_eh_abort(struct scsi_cmnd *); static int u14_34f_eh_host_reset(struct scsi_cmnd *); static int u14_34f_bios_param(struct scsi_device *, struct block_device *, sector_t, int *); static int u14_34f_slave_configure(struct scsi_device *); static struct scsi_host_template driver_template = { .name = "UltraStor 14F/34F rev. 8.10.00 ", .detect = u14_34f_detect, .release = u14_34f_release, .queuecommand = u14_34f_queuecommand, .eh_abort_handler = u14_34f_eh_abort, .eh_host_reset_handler = u14_34f_eh_host_reset, .bios_param = u14_34f_bios_param, .slave_configure = u14_34f_slave_configure, .this_id = 7, .unchecked_isa_dma = 1, .use_clustering = ENABLE_CLUSTERING, }; #if !defined(__BIG_ENDIAN_BITFIELD) && !defined(__LITTLE_ENDIAN_BITFIELD) #error "Adjust your <asm/byteorder.h> defines" #endif /* Values for the PRODUCT_ID ports for the 14/34F */ #define PRODUCT_ID1 0x56 #define PRODUCT_ID2 0x40 /* NOTE: Only upper nibble is used */ /* Subversion values */ #define ISA 0 #define ESA 1 #define OP_HOST_ADAPTER 0x1 #define OP_SCSI 0x2 #define OP_RESET 0x4 #define DTD_SCSI 0x0 #define DTD_IN 0x1 #define DTD_OUT 0x2 #define DTD_NONE 0x3 #define HA_CMD_INQUIRY 0x1 #define HA_CMD_SELF_DIAG 0x2 #define HA_CMD_READ_BUFF 0x3 #define HA_CMD_WRITE_BUFF 0x4 #undef DEBUG_LINKED_COMMANDS #undef DEBUG_DETECT #undef DEBUG_INTERRUPT #undef DEBUG_RESET #undef DEBUG_GENERATE_ERRORS #undef DEBUG_GENERATE_ABORTS #undef DEBUG_GEOMETRY #define MAX_ISA 3 #define MAX_VESA 1 #define MAX_EISA 0 #define MAX_PCI 0 #define MAX_BOARDS (MAX_ISA + MAX_VESA + MAX_EISA + MAX_PCI) #define MAX_CHANNEL 1 #define MAX_LUN 8 #define MAX_TARGET 8 #define MAX_MAILBOXES 16 #define MAX_SGLIST 32 #define MAX_SAFE_SGLIST 16 #define MAX_INTERNAL_RETRIES 64 #define MAX_CMD_PER_LUN 2 #define MAX_TAGGED_CMD_PER_LUN (MAX_MAILBOXES - MAX_CMD_PER_LUN) #define SKIP ULONG_MAX #define FALSE 0 #define TRUE 1 #define FREE 0 #define IN_USE 1 #define LOCKED 2 #define IN_RESET 3 #define IGNORE 4 #define READY 5 #define ABORTING 6 #define NO_DMA 0xff #define MAXLOOP 10000 #define TAG_DISABLED 0 #define TAG_SIMPLE 1 #define TAG_ORDERED 2 #define REG_LCL_MASK 0 #define REG_LCL_INTR 1 #define REG_SYS_MASK 2 #define REG_SYS_INTR 3 #define REG_PRODUCT_ID1 4 #define REG_PRODUCT_ID2 5 #define REG_CONFIG1 6 #define REG_CONFIG2 7 #define REG_OGM 8 #define REG_ICM 12 #define REGION_SIZE 13UL #define BSY_ASSERTED 0x01 #define IRQ_ASSERTED 0x01 #define CMD_RESET 0xc0 #define CMD_OGM_INTR 0x01 #define CMD_CLR_INTR 0x01 #define CMD_ENA_INTR 0x81 #define ASOK 0x00 #define ASST 0x91 #define YESNO(a) ((a) ? 'y' : 'n') #define TLDEV(type) ((type) == TYPE_DISK || (type) == TYPE_ROM) #define PACKED __attribute__((packed)) struct sg_list { unsigned int address; /* Segment Address */ unsigned int num_bytes; /* Segment Length */ }; /* MailBox SCSI Command Packet */ struct mscp { #if defined(__BIG_ENDIAN_BITFIELD) unsigned char sg:1, ca:1, dcn:1, xdir:2, opcode:3; unsigned char lun: 3, channel:2, target:3; #else unsigned char opcode: 3, /* type of command */ xdir: 2, /* data transfer direction */ dcn: 1, /* disable disconnect */ ca: 1, /* use cache (if available) */ sg: 1; /* scatter/gather operation */ unsigned char target: 3, /* SCSI target id */ channel: 2, /* SCSI channel number */ lun: 3; /* SCSI logical unit number */ #endif unsigned int data_address PACKED; /* transfer data pointer */ unsigned int data_len PACKED; /* length in bytes */ unsigned int link_address PACKED; /* for linking command chains */ unsigned char clink_id; /* identifies command in chain */ unsigned char use_sg; /* (if sg is set) 8 bytes per list */ unsigned char sense_len; unsigned char cdb_len; /* 6, 10, or 12 */ unsigned char cdb[12]; /* SCSI Command Descriptor Block */ unsigned char adapter_status; /* non-zero indicates HA error */ unsigned char target_status; /* non-zero indicates target error */ unsigned int sense_addr PACKED; /* Additional fields begin here. */ struct scsi_cmnd *SCpnt; unsigned int cpp_index; /* cp index */ /* All the cp structure is zero filled by queuecommand except the following CP_TAIL_SIZE bytes, initialized by detect */ dma_addr_t cp_dma_addr; /* dma handle for this cp structure */ struct sg_list *sglist; /* pointer to the allocated SG list */ }; #define CP_TAIL_SIZE (sizeof(struct sglist *) + sizeof(dma_addr_t)) struct hostdata { struct mscp cp[MAX_MAILBOXES]; /* Mailboxes for this board */ unsigned int cp_stat[MAX_MAILBOXES]; /* FREE, IN_USE, LOCKED, IN_RESET */ unsigned int last_cp_used; /* Index of last mailbox used */ unsigned int iocount; /* Total i/o done for this board */ int board_number; /* Number of this board */ char board_name[16]; /* Name of this board */ int in_reset; /* True if board is doing a reset */ int target_to[MAX_TARGET][MAX_CHANNEL]; /* N. of timeout errors on target */ int target_redo[MAX_TARGET][MAX_CHANNEL]; /* If TRUE redo i/o on target */ unsigned int retries; /* Number of internal retries */ unsigned long last_retried_pid; /* Pid of last retried command */ unsigned char subversion; /* Bus type, either ISA or ESA */ struct pci_dev *pdev; /* Always NULL */ unsigned char heads; unsigned char sectors; char board_id[256]; /* data from INQUIRY on this board */ }; static struct Scsi_Host *sh[MAX_BOARDS + 1]; static const char *driver_name = "Ux4F"; static char sha[MAX_BOARDS]; static DEFINE_SPINLOCK(driver_lock); /* Initialize num_boards so that ihdlr can work while detect is in progress */ static unsigned int num_boards = MAX_BOARDS; static unsigned long io_port[] = { /* Space for MAX_INT_PARAM ports usable while loading as a module */ SKIP, SKIP, SKIP, SKIP, SKIP, SKIP, SKIP, SKIP, SKIP, SKIP, /* Possible ISA/VESA ports */ 0x330, 0x340, 0x230, 0x240, 0x210, 0x130, 0x140, /* End of list */ 0x0 }; #define HD(board) ((struct hostdata *) &sh[board]->hostdata) #define BN(board) (HD(board)->board_name) /* Device is Little Endian */ #define H2DEV(x) cpu_to_le32(x) #define DEV2H(x) le32_to_cpu(x) static irqreturn_t do_interrupt_handler(int, void *); static void flush_dev(struct scsi_device *, unsigned long, unsigned int, unsigned int); static int do_trace = FALSE; static int setup_done = FALSE; static int link_statistics; static int ext_tran = FALSE; #if defined(HAVE_OLD_UX4F_FIRMWARE) static int have_old_firmware = TRUE; #else static int have_old_firmware = FALSE; #endif #if defined(CONFIG_SCSI_U14_34F_TAGGED_QUEUE) static int tag_mode = TAG_SIMPLE; #else static int tag_mode = TAG_DISABLED; #endif #if defined(CONFIG_SCSI_U14_34F_LINKED_COMMANDS) static int linked_comm = TRUE; #else static int linked_comm = FALSE; #endif #if defined(CONFIG_SCSI_U14_34F_MAX_TAGS) static int max_queue_depth = CONFIG_SCSI_U14_34F_MAX_TAGS; #else static int max_queue_depth = MAX_CMD_PER_LUN; #endif #define MAX_INT_PARAM 10 #define MAX_BOOT_OPTIONS_SIZE 256 static char boot_options[MAX_BOOT_OPTIONS_SIZE]; #if defined(MODULE) #include <linux/module.h> #include <linux/moduleparam.h> module_param_string(u14_34f, boot_options, MAX_BOOT_OPTIONS_SIZE, 0); MODULE_PARM_DESC(u14_34f, " equivalent to the \"u14-34f=...\" kernel boot " \ "option." \ " Example: modprobe u14-34f \"u14_34f=0x340,0x330,lc:y,tm:0,mq:4\""); MODULE_AUTHOR("Dario Ballabio"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("UltraStor 14F/34F SCSI Driver"); #endif static int u14_34f_slave_configure(struct scsi_device *dev) { int j, tqd, utqd; char *tag_suffix, *link_suffix; struct Scsi_Host *host = dev->host; j = ((struct hostdata *) host->hostdata)->board_number; utqd = MAX_CMD_PER_LUN; tqd = max_queue_depth; if (TLDEV(dev->type) && dev->tagged_supported) if (tag_mode == TAG_SIMPLE) { scsi_adjust_queue_depth(dev, MSG_SIMPLE_TAG, tqd); tag_suffix = ", simple tags"; } else if (tag_mode == TAG_ORDERED) { scsi_adjust_queue_depth(dev, MSG_ORDERED_TAG, tqd); tag_suffix = ", ordered tags"; } else { scsi_adjust_queue_depth(dev, 0, tqd); tag_suffix = ", no tags"; } else if (TLDEV(dev->type) && linked_comm) { scsi_adjust_queue_depth(dev, 0, tqd); tag_suffix = ", untagged"; } else { scsi_adjust_queue_depth(dev, 0, utqd); tag_suffix = ""; } if (TLDEV(dev->type) && linked_comm && dev->queue_depth > 2) link_suffix = ", sorted"; else if (TLDEV(dev->type)) link_suffix = ", unsorted"; else link_suffix = ""; sdev_printk(KERN_INFO, dev, "cmds/lun %d%s%s.\n", dev->queue_depth, link_suffix, tag_suffix); return FALSE; } static int wait_on_busy(unsigned long iobase, unsigned int loop) { while (inb(iobase + REG_LCL_INTR) & BSY_ASSERTED) { udelay(1L); if (--loop == 0) return TRUE; } return FALSE; } static int board_inquiry(unsigned int j) { struct mscp *cpp; dma_addr_t id_dma_addr; unsigned int limit = 0; unsigned long time; id_dma_addr = pci_map_single(HD(j)->pdev, HD(j)->board_id, sizeof(HD(j)->board_id), PCI_DMA_BIDIRECTIONAL); cpp = &HD(j)->cp[0]; cpp->cp_dma_addr = pci_map_single(HD(j)->pdev, cpp, sizeof(struct mscp), PCI_DMA_BIDIRECTIONAL); memset(cpp, 0, sizeof(struct mscp) - CP_TAIL_SIZE); cpp->opcode = OP_HOST_ADAPTER; cpp->xdir = DTD_IN; cpp->data_address = H2DEV(id_dma_addr); cpp->data_len = H2DEV(sizeof(HD(j)->board_id)); cpp->cdb_len = 6; cpp->cdb[0] = HA_CMD_INQUIRY; if (wait_on_busy(sh[j]->io_port, MAXLOOP)) { printk("%s: board_inquiry, adapter busy.\n", BN(j)); return TRUE; } HD(j)->cp_stat[0] = IGNORE; /* Clear the interrupt indication */ outb(CMD_CLR_INTR, sh[j]->io_port + REG_SYS_INTR); /* Store pointer in OGM address bytes */ outl(H2DEV(cpp->cp_dma_addr), sh[j]->io_port + REG_OGM); /* Issue OGM interrupt */ outb(CMD_OGM_INTR, sh[j]->io_port + REG_LCL_INTR); spin_unlock_irq(&driver_lock); time = jiffies; while ((jiffies - time) < HZ && limit++ < 20000) udelay(100L); spin_lock_irq(&driver_lock); if (cpp->adapter_status || HD(j)->cp_stat[0] != FREE) { HD(j)->cp_stat[0] = FREE; printk("%s: board_inquiry, err 0x%x.\n", BN(j), cpp->adapter_status); return TRUE; } pci_unmap_single(HD(j)->pdev, cpp->cp_dma_addr, sizeof(struct mscp), PCI_DMA_BIDIRECTIONAL); pci_unmap_single(HD(j)->pdev, id_dma_addr, sizeof(HD(j)->board_id), PCI_DMA_BIDIRECTIONAL); return FALSE; } static int port_detect \ (unsigned long port_base, unsigned int j, struct scsi_host_template *tpnt) { unsigned char irq, dma_channel, subversion, i; unsigned char in_byte; char *bus_type, dma_name[16]; /* Allowed BIOS base addresses (NULL indicates reserved) */ unsigned long bios_segment_table[8] = { 0, 0xc4000, 0xc8000, 0xcc000, 0xd0000, 0xd4000, 0xd8000, 0xdc000 }; /* Allowed IRQs */ unsigned char interrupt_table[4] = { 15, 14, 11, 10 }; /* Allowed DMA channels for ISA (0 indicates reserved) */ unsigned char dma_channel_table[4] = { 5, 6, 7, 0 }; /* Head/sector mappings */ struct { unsigned char heads; unsigned char sectors; } mapping_table[4] = { { 16, 63 }, { 64, 32 }, { 64, 63 }, { 64, 32 } }; struct config_1 { #if defined(__BIG_ENDIAN_BITFIELD) unsigned char dma_channel: 2, interrupt:2, removable_disks_as_fixed:1, bios_segment: 3; #else unsigned char bios_segment: 3, removable_disks_as_fixed: 1, interrupt: 2, dma_channel: 2; #endif } config_1; struct config_2 { #if defined(__BIG_ENDIAN_BITFIELD) unsigned char tfr_port: 2, bios_drive_number: 1, mapping_mode: 2, ha_scsi_id: 3; #else unsigned char ha_scsi_id: 3, mapping_mode: 2, bios_drive_number: 1, tfr_port: 2; #endif } config_2; char name[16]; sprintf(name, "%s%d", driver_name, j); if (!request_region(port_base, REGION_SIZE, driver_name)) { #if defined(DEBUG_DETECT) printk("%s: address 0x%03lx in use, skipping probe.\n", name, port_base); #endif goto fail; } spin_lock_irq(&driver_lock); if (inb(port_base + REG_PRODUCT_ID1) != PRODUCT_ID1) goto freelock; in_byte = inb(port_base + REG_PRODUCT_ID2); if ((in_byte & 0xf0) != PRODUCT_ID2) goto freelock; *(char *)&config_1 = inb(port_base + REG_CONFIG1); *(char *)&config_2 = inb(port_base + REG_CONFIG2); irq = interrupt_table[config_1.interrupt]; dma_channel = dma_channel_table[config_1.dma_channel]; subversion = (in_byte & 0x0f); /* Board detected, allocate its IRQ */ if (request_irq(irq, do_interrupt_handler, IRQF_DISABLED | ((subversion == ESA) ? IRQF_SHARED : 0), driver_name, (void *) &sha[j])) { printk("%s: unable to allocate IRQ %u, detaching.\n", name, irq); goto freelock; } if (subversion == ISA && request_dma(dma_channel, driver_name)) { printk("%s: unable to allocate DMA channel %u, detaching.\n", name, dma_channel); goto freeirq; } if (have_old_firmware) tpnt->use_clustering = DISABLE_CLUSTERING; spin_unlock_irq(&driver_lock); sh[j] = scsi_register(tpnt, sizeof(struct hostdata)); spin_lock_irq(&driver_lock); if (sh[j] == NULL) { printk("%s: unable to register host, detaching.\n", name); goto freedma; } sh[j]->io_port = port_base; sh[j]->unique_id = port_base; sh[j]->n_io_port = REGION_SIZE; sh[j]->base = bios_segment_table[config_1.bios_segment]; sh[j]->irq = irq; sh[j]->sg_tablesize = MAX_SGLIST; sh[j]->this_id = config_2.ha_scsi_id; sh[j]->can_queue = MAX_MAILBOXES; sh[j]->cmd_per_lun = MAX_CMD_PER_LUN; #if defined(DEBUG_DETECT) { unsigned char sys_mask, lcl_mask; sys_mask = inb(sh[j]->io_port + REG_SYS_MASK); lcl_mask = inb(sh[j]->io_port + REG_LCL_MASK); printk("SYS_MASK 0x%x, LCL_MASK 0x%x.\n", sys_mask, lcl_mask); } #endif /* Probably a bogus host scsi id, set it to the dummy value */ if (sh[j]->this_id == 0) sh[j]->this_id = -1; /* If BIOS is disabled, force enable interrupts */ if (sh[j]->base == 0) outb(CMD_ENA_INTR, sh[j]->io_port + REG_SYS_MASK); memset(HD(j), 0, sizeof(struct hostdata)); HD(j)->heads = mapping_table[config_2.mapping_mode].heads; HD(j)->sectors = mapping_table[config_2.mapping_mode].sectors; HD(j)->subversion = subversion; HD(j)->pdev = NULL; HD(j)->board_number = j; if (have_old_firmware) sh[j]->sg_tablesize = MAX_SAFE_SGLIST; if (HD(j)->subversion == ESA) { sh[j]->unchecked_isa_dma = FALSE; sh[j]->dma_channel = NO_DMA; sprintf(BN(j), "U34F%d", j); bus_type = "VESA"; } else { unsigned long flags; sh[j]->unchecked_isa_dma = TRUE; flags=claim_dma_lock(); disable_dma(dma_channel); clear_dma_ff(dma_channel); set_dma_mode(dma_channel, DMA_MODE_CASCADE); enable_dma(dma_channel); release_dma_lock(flags); sh[j]->dma_channel = dma_channel; sprintf(BN(j), "U14F%d", j); bus_type = "ISA"; } sh[j]->max_channel = MAX_CHANNEL - 1; sh[j]->max_id = MAX_TARGET; sh[j]->max_lun = MAX_LUN; if (HD(j)->subversion == ISA && !board_inquiry(j)) { HD(j)->board_id[40] = 0; if (strcmp(&HD(j)->board_id[32], "06000600")) { printk("%s: %s.\n", BN(j), &HD(j)->board_id[8]); printk("%s: firmware %s is outdated, FW PROM should be 28004-006.\n", BN(j), &HD(j)->board_id[32]); sh[j]->hostt->use_clustering = DISABLE_CLUSTERING; sh[j]->sg_tablesize = MAX_SAFE_SGLIST; } } if (dma_channel == NO_DMA) sprintf(dma_name, "%s", "BMST"); else sprintf(dma_name, "DMA %u", dma_channel); spin_unlock_irq(&driver_lock); for (i = 0; i < sh[j]->can_queue; i++) HD(j)->cp[i].cp_dma_addr = pci_map_single(HD(j)->pdev, &HD(j)->cp[i], sizeof(struct mscp), PCI_DMA_BIDIRECTIONAL); for (i = 0; i < sh[j]->can_queue; i++) if (! ((&HD(j)->cp[i])->sglist = kmalloc( sh[j]->sg_tablesize * sizeof(struct sg_list), (sh[j]->unchecked_isa_dma ? GFP_DMA : 0) | GFP_ATOMIC))) { printk("%s: kmalloc SGlist failed, mbox %d, detaching.\n", BN(j), i); goto release; } if (max_queue_depth > MAX_TAGGED_CMD_PER_LUN) max_queue_depth = MAX_TAGGED_CMD_PER_LUN; if (max_queue_depth < MAX_CMD_PER_LUN) max_queue_depth = MAX_CMD_PER_LUN; if (tag_mode != TAG_DISABLED && tag_mode != TAG_SIMPLE) tag_mode = TAG_ORDERED; if (j == 0) { printk("UltraStor 14F/34F: Copyright (C) 1994-2003 Dario Ballabio.\n"); printk("%s config options -> of:%c, tm:%d, lc:%c, mq:%d, et:%c.\n", driver_name, YESNO(have_old_firmware), tag_mode, YESNO(linked_comm), max_queue_depth, YESNO(ext_tran)); } printk("%s: %s 0x%03lx, BIOS 0x%05x, IRQ %u, %s, SG %d, MB %d.\n", BN(j), bus_type, (unsigned long)sh[j]->io_port, (int)sh[j]->base, sh[j]->irq, dma_name, sh[j]->sg_tablesize, sh[j]->can_queue); if (sh[j]->max_id > 8 || sh[j]->max_lun > 8) printk("%s: wide SCSI support enabled, max_id %u, max_lun %u.\n", BN(j), sh[j]->max_id, sh[j]->max_lun); for (i = 0; i <= sh[j]->max_channel; i++) printk("%s: SCSI channel %u enabled, host target ID %d.\n", BN(j), i, sh[j]->this_id); return TRUE; freedma: if (subversion == ISA) free_dma(dma_channel); freeirq: free_irq(irq, &sha[j]); freelock: spin_unlock_irq(&driver_lock); release_region(port_base, REGION_SIZE); fail: return FALSE; release: u14_34f_release(sh[j]); return FALSE; } static void internal_setup(char *str, int *ints) { int i, argc = ints[0]; char *cur = str, *pc; if (argc > 0) { if (argc > MAX_INT_PARAM) argc = MAX_INT_PARAM; for (i = 0; i < argc; i++) io_port[i] = ints[i + 1]; io_port[i] = 0; setup_done = TRUE; } while (cur && (pc = strchr(cur, ':'))) { int val = 0, c = *++pc; if (c == 'n' || c == 'N') val = FALSE; else if (c == 'y' || c == 'Y') val = TRUE; else val = (int) simple_strtoul(pc, NULL, 0); if (!strncmp(cur, "lc:", 3)) linked_comm = val; else if (!strncmp(cur, "of:", 3)) have_old_firmware = val; else if (!strncmp(cur, "tm:", 3)) tag_mode = val; else if (!strncmp(cur, "tc:", 3)) tag_mode = val; else if (!strncmp(cur, "mq:", 3)) max_queue_depth = val; else if (!strncmp(cur, "ls:", 3)) link_statistics = val; else if (!strncmp(cur, "et:", 3)) ext_tran = val; if ((cur = strchr(cur, ','))) ++cur; } return; } static int option_setup(char *str) { int ints[MAX_INT_PARAM]; char *cur = str; int i = 1; while (cur && isdigit(*cur) && i <= MAX_INT_PARAM) { ints[i++] = simple_strtoul(cur, NULL, 0); if ((cur = strchr(cur, ',')) != NULL) cur++; } ints[0] = i - 1; internal_setup(cur, ints); return 1; } static int u14_34f_detect(struct scsi_host_template *tpnt) { unsigned int j = 0, k; tpnt->proc_name = "u14-34f"; if(strlen(boot_options)) option_setup(boot_options); #if defined(MODULE) /* io_port could have been modified when loading as a module */ if(io_port[0] != SKIP) { setup_done = TRUE; io_port[MAX_INT_PARAM] = 0; } #endif for (k = 0; k < MAX_BOARDS + 1; k++) sh[k] = NULL; for (k = 0; io_port[k]; k++) { if (io_port[k] == SKIP) continue; if (j < MAX_BOARDS && port_detect(io_port[k], j, tpnt)) j++; } num_boards = j; return j; } static void map_dma(unsigned int i, unsigned int j) { unsigned int data_len = 0; unsigned int k, pci_dir; int count; struct scatterlist *sg; struct mscp *cpp; struct scsi_cmnd *SCpnt; cpp = &HD(j)->cp[i]; SCpnt = cpp->SCpnt; pci_dir = SCpnt->sc_data_direction; if (SCpnt->sense_buffer) cpp->sense_addr = H2DEV(pci_map_single(HD(j)->pdev, SCpnt->sense_buffer, SCSI_SENSE_BUFFERSIZE, PCI_DMA_FROMDEVICE)); cpp->sense_len = SCSI_SENSE_BUFFERSIZE; if (scsi_bufflen(SCpnt)) { count = scsi_dma_map(SCpnt); BUG_ON(count < 0); scsi_for_each_sg(SCpnt, sg, count, k) { cpp->sglist[k].address = H2DEV(sg_dma_address(sg)); cpp->sglist[k].num_bytes = H2DEV(sg_dma_len(sg)); data_len += sg->length; } cpp->sg = TRUE; cpp->use_sg = scsi_sg_count(SCpnt); cpp->data_address = H2DEV(pci_map_single(HD(j)->pdev, cpp->sglist, cpp->use_sg * sizeof(struct sg_list), pci_dir)); cpp->data_len = H2DEV(data_len); } else { pci_dir = PCI_DMA_BIDIRECTIONAL; cpp->data_len = H2DEV(scsi_bufflen(SCpnt)); } } static void unmap_dma(unsigned int i, unsigned int j) { unsigned int pci_dir; struct mscp *cpp; struct scsi_cmnd *SCpnt; cpp = &HD(j)->cp[i]; SCpnt = cpp->SCpnt; pci_dir = SCpnt->sc_data_direction; if (DEV2H(cpp->sense_addr)) pci_unmap_single(HD(j)->pdev, DEV2H(cpp->sense_addr), DEV2H(cpp->sense_len), PCI_DMA_FROMDEVICE); scsi_dma_unmap(SCpnt); if (!DEV2H(cpp->data_len)) pci_dir = PCI_DMA_BIDIRECTIONAL; if (DEV2H(cpp->data_address)) pci_unmap_single(HD(j)->pdev, DEV2H(cpp->data_address), DEV2H(cpp->data_len), pci_dir); } static void sync_dma(unsigned int i, unsigned int j) { unsigned int pci_dir; struct mscp *cpp; struct scsi_cmnd *SCpnt; cpp = &HD(j)->cp[i]; SCpnt = cpp->SCpnt; pci_dir = SCpnt->sc_data_direction; if (DEV2H(cpp->sense_addr)) pci_dma_sync_single_for_cpu(HD(j)->pdev, DEV2H(cpp->sense_addr), DEV2H(cpp->sense_len), PCI_DMA_FROMDEVICE); if (scsi_sg_count(SCpnt)) pci_dma_sync_sg_for_cpu(HD(j)->pdev, scsi_sglist(SCpnt), scsi_sg_count(SCpnt), pci_dir); if (!DEV2H(cpp->data_len)) pci_dir = PCI_DMA_BIDIRECTIONAL; if (DEV2H(cpp->data_address)) pci_dma_sync_single_for_cpu(HD(j)->pdev, DEV2H(cpp->data_address), DEV2H(cpp->data_len), pci_dir); } static void scsi_to_dev_dir(unsigned int i, unsigned int j) { unsigned int k; static const unsigned char data_out_cmds[] = { 0x0a, 0x2a, 0x15, 0x55, 0x04, 0x07, 0x18, 0x1d, 0x24, 0x2e, 0x30, 0x31, 0x32, 0x38, 0x39, 0x3a, 0x3b, 0x3d, 0x3f, 0x40, 0x41, 0x4c, 0xaa, 0xae, 0xb0, 0xb1, 0xb2, 0xb6, 0xea, 0x1b, 0x5d }; static const unsigned char data_none_cmds[] = { 0x01, 0x0b, 0x10, 0x11, 0x13, 0x16, 0x17, 0x19, 0x2b, 0x1e, 0x2c, 0xac, 0x2f, 0xaf, 0x33, 0xb3, 0x35, 0x36, 0x45, 0x47, 0x48, 0x49, 0xa9, 0x4b, 0xa5, 0xa6, 0xb5, 0x00 }; struct mscp *cpp; struct scsi_cmnd *SCpnt; cpp = &HD(j)->cp[i]; SCpnt = cpp->SCpnt; if (SCpnt->sc_data_direction == DMA_FROM_DEVICE) { cpp->xdir = DTD_IN; return; } else if (SCpnt->sc_data_direction == DMA_TO_DEVICE) { cpp->xdir = DTD_OUT; return; } else if (SCpnt->sc_data_direction == DMA_NONE) { cpp->xdir = DTD_NONE; return; } if (SCpnt->sc_data_direction != DMA_BIDIRECTIONAL) panic("%s: qcomm, invalid SCpnt->sc_data_direction.\n", BN(j)); cpp->xdir = DTD_IN; for (k = 0; k < ARRAY_SIZE(data_out_cmds); k++) if (SCpnt->cmnd[0] == data_out_cmds[k]) { cpp->xdir = DTD_OUT; break; } if (cpp->xdir == DTD_IN) for (k = 0; k < ARRAY_SIZE(data_none_cmds); k++) if (SCpnt->cmnd[0] == data_none_cmds[k]) { cpp->xdir = DTD_NONE; break; } } static int u14_34f_queuecommand(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) { unsigned int i, j, k; struct mscp *cpp; /* j is the board number */ j = ((struct hostdata *) SCpnt->device->host->hostdata)->board_number; if (SCpnt->host_scribble) panic("%s: qcomm, pid %ld, SCpnt %p already active.\n", BN(j), SCpnt->serial_number, SCpnt); /* i is the mailbox number, look for the first free mailbox starting from last_cp_used */ i = HD(j)->last_cp_used + 1; for (k = 0; k < sh[j]->can_queue; k++, i++) { if (i >= sh[j]->can_queue) i = 0; if (HD(j)->cp_stat[i] == FREE) { HD(j)->last_cp_used = i; break; } } if (k == sh[j]->can_queue) { printk("%s: qcomm, no free mailbox.\n", BN(j)); return 1; } /* Set pointer to control packet structure */ cpp = &HD(j)->cp[i]; memset(cpp, 0, sizeof(struct mscp) - CP_TAIL_SIZE); SCpnt->scsi_done = done; cpp->cpp_index = i; SCpnt->host_scribble = (unsigned char *) &cpp->cpp_index; if (do_trace) printk("%s: qcomm, mbox %d, target %d.%d:%d, pid %ld.\n", BN(j), i, SCpnt->device->channel, SCpnt->device->id, SCpnt->device->lun, SCpnt->serial_number); cpp->opcode = OP_SCSI; cpp->channel = SCpnt->device->channel; cpp->target = SCpnt->device->id; cpp->lun = SCpnt->device->lun; cpp->SCpnt = SCpnt; cpp->cdb_len = SCpnt->cmd_len; memcpy(cpp->cdb, SCpnt->cmnd, SCpnt->cmd_len); /* Use data transfer direction SCpnt->sc_data_direction */ scsi_to_dev_dir(i, j); /* Map DMA buffers and SG list */ map_dma(i, j); if (linked_comm && SCpnt->device->queue_depth > 2 && TLDEV(SCpnt->device->type)) { HD(j)->cp_stat[i] = READY; flush_dev(SCpnt->device, blk_rq_pos(SCpnt->request), j, FALSE); return 0; } if (wait_on_busy(sh[j]->io_port, MAXLOOP)) { unmap_dma(i, j); SCpnt->host_scribble = NULL; scmd_printk(KERN_INFO, SCpnt, "qcomm, pid %ld, adapter busy.\n", SCpnt->serial_number); return 1; } /* Store pointer in OGM address bytes */ outl(H2DEV(cpp->cp_dma_addr), sh[j]->io_port + REG_OGM); /* Issue OGM interrupt */ outb(CMD_OGM_INTR, sh[j]->io_port + REG_LCL_INTR); HD(j)->cp_stat[i] = IN_USE; return 0; } static int u14_34f_eh_abort(struct scsi_cmnd *SCarg) { unsigned int i, j; j = ((struct hostdata *) SCarg->device->host->hostdata)->board_number; if (SCarg->host_scribble == NULL) { scmd_printk(KERN_INFO, SCarg, "abort, pid %ld inactive.\n", SCarg->serial_number); return SUCCESS; } i = *(unsigned int *)SCarg->host_scribble; scmd_printk(KERN_INFO, SCarg, "abort, mbox %d, pid %ld.\n", i, SCarg->serial_number); if (i >= sh[j]->can_queue) panic("%s: abort, invalid SCarg->host_scribble.\n", BN(j)); if (wait_on_busy(sh[j]->io_port, MAXLOOP)) { printk("%s: abort, timeout error.\n", BN(j)); return FAILED; } if (HD(j)->cp_stat[i] == FREE) { printk("%s: abort, mbox %d is free.\n", BN(j), i); return SUCCESS; } if (HD(j)->cp_stat[i] == IN_USE) { printk("%s: abort, mbox %d is in use.\n", BN(j), i); if (SCarg != HD(j)->cp[i].SCpnt) panic("%s: abort, mbox %d, SCarg %p, cp SCpnt %p.\n", BN(j), i, SCarg, HD(j)->cp[i].SCpnt); if (inb(sh[j]->io_port + REG_SYS_INTR) & IRQ_ASSERTED) printk("%s: abort, mbox %d, interrupt pending.\n", BN(j), i); return FAILED; } if (HD(j)->cp_stat[i] == IN_RESET) { printk("%s: abort, mbox %d is in reset.\n", BN(j), i); return FAILED; } if (HD(j)->cp_stat[i] == LOCKED) { printk("%s: abort, mbox %d is locked.\n", BN(j), i); return SUCCESS; } if (HD(j)->cp_stat[i] == READY || HD(j)->cp_stat[i] == ABORTING) { unmap_dma(i, j); SCarg->result = DID_ABORT << 16; SCarg->host_scribble = NULL; HD(j)->cp_stat[i] = FREE; printk("%s, abort, mbox %d ready, DID_ABORT, pid %ld done.\n", BN(j), i, SCarg->serial_number); SCarg->scsi_done(SCarg); return SUCCESS; } panic("%s: abort, mbox %d, invalid cp_stat.\n", BN(j), i); } static int u14_34f_eh_host_reset(struct scsi_cmnd *SCarg) { unsigned int i, j, k, c, limit = 0; unsigned long time; int arg_done = FALSE; struct scsi_cmnd *SCpnt; j = ((struct hostdata *) SCarg->device->host->hostdata)->board_number; scmd_printk(KERN_INFO, SCarg, "reset, enter, pid %ld.\n", SCarg->serial_number); spin_lock_irq(sh[j]->host_lock); if (SCarg->host_scribble == NULL) printk("%s: reset, pid %ld inactive.\n", BN(j), SCarg->serial_number); if (HD(j)->in_reset) { printk("%s: reset, exit, already in reset.\n", BN(j)); spin_unlock_irq(sh[j]->host_lock); return FAILED; } if (wait_on_busy(sh[j]->io_port, MAXLOOP)) { printk("%s: reset, exit, timeout error.\n", BN(j)); spin_unlock_irq(sh[j]->host_lock); return FAILED; } HD(j)->retries = 0; for (c = 0; c <= sh[j]->max_channel; c++) for (k = 0; k < sh[j]->max_id; k++) { HD(j)->target_redo[k][c] = TRUE; HD(j)->target_to[k][c] = 0; } for (i = 0; i < sh[j]->can_queue; i++) { if (HD(j)->cp_stat[i] == FREE) continue; if (HD(j)->cp_stat[i] == LOCKED) { HD(j)->cp_stat[i] = FREE; printk("%s: reset, locked mbox %d forced free.\n", BN(j), i); continue; } if (!(SCpnt = HD(j)->cp[i].SCpnt)) panic("%s: reset, mbox %d, SCpnt == NULL.\n", BN(j), i); if (HD(j)->cp_stat[i] == READY || HD(j)->cp_stat[i] == ABORTING) { HD(j)->cp_stat[i] = ABORTING; printk("%s: reset, mbox %d aborting, pid %ld.\n", BN(j), i, SCpnt->serial_number); } else { HD(j)->cp_stat[i] = IN_RESET; printk("%s: reset, mbox %d in reset, pid %ld.\n", BN(j), i, SCpnt->serial_number); } if (SCpnt->host_scribble == NULL) panic("%s: reset, mbox %d, garbled SCpnt.\n", BN(j), i); if (*(unsigned int *)SCpnt->host_scribble != i) panic("%s: reset, mbox %d, index mismatch.\n", BN(j), i); if (SCpnt->scsi_done == NULL) panic("%s: reset, mbox %d, SCpnt->scsi_done == NULL.\n", BN(j), i); if (SCpnt == SCarg) arg_done = TRUE; } if (wait_on_busy(sh[j]->io_port, MAXLOOP)) { printk("%s: reset, cannot reset, timeout error.\n", BN(j)); spin_unlock_irq(sh[j]->host_lock); return FAILED; } outb(CMD_RESET, sh[j]->io_port + REG_LCL_INTR); printk("%s: reset, board reset done, enabling interrupts.\n", BN(j)); #if defined(DEBUG_RESET) do_trace = TRUE; #endif HD(j)->in_reset = TRUE; spin_unlock_irq(sh[j]->host_lock); time = jiffies; while ((jiffies - time) < (10 * HZ) && limit++ < 200000) udelay(100L); spin_lock_irq(sh[j]->host_lock); printk("%s: reset, interrupts disabled, loops %d.\n", BN(j), limit); for (i = 0; i < sh[j]->can_queue; i++) { if (HD(j)->cp_stat[i] == IN_RESET) { SCpnt = HD(j)->cp[i].SCpnt; unmap_dma(i, j); SCpnt->result = DID_RESET << 16; SCpnt->host_scribble = NULL; /* This mailbox is still waiting for its interrupt */ HD(j)->cp_stat[i] = LOCKED; printk("%s, reset, mbox %d locked, DID_RESET, pid %ld done.\n", BN(j), i, SCpnt->serial_number); } else if (HD(j)->cp_stat[i] == ABORTING) { SCpnt = HD(j)->cp[i].SCpnt; unmap_dma(i, j); SCpnt->result = DID_RESET << 16; SCpnt->host_scribble = NULL; /* This mailbox was never queued to the adapter */ HD(j)->cp_stat[i] = FREE; printk("%s, reset, mbox %d aborting, DID_RESET, pid %ld done.\n", BN(j), i, SCpnt->serial_number); } else /* Any other mailbox has already been set free by interrupt */ continue; SCpnt->scsi_done(SCpnt); } HD(j)->in_reset = FALSE; do_trace = FALSE; if (arg_done) printk("%s: reset, exit, pid %ld done.\n", BN(j), SCarg->serial_number); else printk("%s: reset, exit.\n", BN(j)); spin_unlock_irq(sh[j]->host_lock); return SUCCESS; } static int u14_34f_bios_param(struct scsi_device *disk, struct block_device *bdev, sector_t capacity, int *dkinfo) { unsigned int j = 0; unsigned int size = capacity; dkinfo[0] = HD(j)->heads; dkinfo[1] = HD(j)->sectors; dkinfo[2] = size / (HD(j)->heads * HD(j)->sectors); if (ext_tran && (scsicam_bios_param(bdev, capacity, dkinfo) < 0)) { dkinfo[0] = 255; dkinfo[1] = 63; dkinfo[2] = size / (dkinfo[0] * dkinfo[1]); } #if defined (DEBUG_GEOMETRY) printk ("%s: bios_param, head=%d, sec=%d, cyl=%d.\n", driver_name, dkinfo[0], dkinfo[1], dkinfo[2]); #endif return FALSE; } static void sort(unsigned long sk[], unsigned int da[], unsigned int n, unsigned int rev) { unsigned int i, j, k, y; unsigned long x; for (i = 0; i < n - 1; i++) { k = i; for (j = k + 1; j < n; j++) if (rev) { if (sk[j] > sk[k]) k = j; } else { if (sk[j] < sk[k]) k = j; } if (k != i) { x = sk[k]; sk[k] = sk[i]; sk[i] = x; y = da[k]; da[k] = da[i]; da[i] = y; } } return; } static int reorder(unsigned int j, unsigned long cursec, unsigned int ihdlr, unsigned int il[], unsigned int n_ready) { struct scsi_cmnd *SCpnt; struct mscp *cpp; unsigned int k, n; unsigned int rev = FALSE, s = TRUE, r = TRUE; unsigned int input_only = TRUE, overlap = FALSE; unsigned long sl[n_ready], pl[n_ready], ll[n_ready]; unsigned long maxsec = 0, minsec = ULONG_MAX, seek = 0, iseek = 0; unsigned long ioseek = 0; static unsigned int flushcount = 0, batchcount = 0, sortcount = 0; static unsigned int readycount = 0, ovlcount = 0, inputcount = 0; static unsigned int readysorted = 0, revcount = 0; static unsigned long seeksorted = 0, seeknosort = 0; if (link_statistics && !(++flushcount % link_statistics)) printk("fc %d bc %d ic %d oc %d rc %d rs %d sc %d re %d"\ " av %ldK as %ldK.\n", flushcount, batchcount, inputcount, ovlcount, readycount, readysorted, sortcount, revcount, seeknosort / (readycount + 1), seeksorted / (readycount + 1)); if (n_ready <= 1) return FALSE; for (n = 0; n < n_ready; n++) { k = il[n]; cpp = &HD(j)->cp[k]; SCpnt = cpp->SCpnt; if (!(cpp->xdir == DTD_IN)) input_only = FALSE; if (blk_rq_pos(SCpnt->request) < minsec) minsec = blk_rq_pos(SCpnt->request); if (blk_rq_pos(SCpnt->request) > maxsec) maxsec = blk_rq_pos(SCpnt->request); sl[n] = blk_rq_pos(SCpnt->request); ioseek += blk_rq_sectors(SCpnt->request); if (!n) continue; if (sl[n] < sl[n - 1]) s = FALSE; if (sl[n] > sl[n - 1]) r = FALSE; if (link_statistics) { if (sl[n] > sl[n - 1]) seek += sl[n] - sl[n - 1]; else seek += sl[n - 1] - sl[n]; } } if (link_statistics) { if (cursec > sl[0]) seek += cursec - sl[0]; else seek += sl[0] - cursec; } if (cursec > ((maxsec + minsec) / 2)) rev = TRUE; if (ioseek > ((maxsec - minsec) / 2)) rev = FALSE; if (!((rev && r) || (!rev && s))) sort(sl, il, n_ready, rev); if (!input_only) for (n = 0; n < n_ready; n++) { k = il[n]; cpp = &HD(j)->cp[k]; SCpnt = cpp->SCpnt; ll[n] = blk_rq_sectors(SCpnt->request); pl[n] = SCpnt->serial_number; if (!n) continue; if ((sl[n] == sl[n - 1]) || (!rev && ((sl[n - 1] + ll[n - 1]) > sl[n])) || (rev && ((sl[n] + ll[n]) > sl[n - 1]))) overlap = TRUE; } if (overlap) sort(pl, il, n_ready, FALSE); if (link_statistics) { if (cursec > sl[0]) iseek = cursec - sl[0]; else iseek = sl[0] - cursec; batchcount++; readycount += n_ready; seeknosort += seek / 1024; if (input_only) inputcount++; if (overlap) { ovlcount++; seeksorted += iseek / 1024; } else seeksorted += (iseek + maxsec - minsec) / 1024; if (rev && !r) { revcount++; readysorted += n_ready; } if (!rev && !s) { sortcount++; readysorted += n_ready; } } #if defined(DEBUG_LINKED_COMMANDS) if (link_statistics && (overlap || !(flushcount % link_statistics))) for (n = 0; n < n_ready; n++) { k = il[n]; cpp = &HD(j)->cp[k]; SCpnt = cpp->SCpnt; printk("%s %d.%d:%d pid %ld mb %d fc %d nr %d sec %ld ns %u"\ " cur %ld s:%c r:%c rev:%c in:%c ov:%c xd %d.\n", (ihdlr ? "ihdlr" : "qcomm"), SCpnt->channel, SCpnt->target, SCpnt->lun, SCpnt->serial_number, k, flushcount, n_ready, blk_rq_pos(SCpnt->request), blk_rq_sectors(SCpnt->request), cursec, YESNO(s), YESNO(r), YESNO(rev), YESNO(input_only), YESNO(overlap), cpp->xdir); } #endif return overlap; } static void flush_dev(struct scsi_device *dev, unsigned long cursec, unsigned int j, unsigned int ihdlr) { struct scsi_cmnd *SCpnt; struct mscp *cpp; unsigned int k, n, n_ready = 0, il[MAX_MAILBOXES]; for (k = 0; k < sh[j]->can_queue; k++) { if (HD(j)->cp_stat[k] != READY && HD(j)->cp_stat[k] != IN_USE) continue; cpp = &HD(j)->cp[k]; SCpnt = cpp->SCpnt; if (SCpnt->device != dev) continue; if (HD(j)->cp_stat[k] == IN_USE) return; il[n_ready++] = k; } if (reorder(j, cursec, ihdlr, il, n_ready)) n_ready = 1; for (n = 0; n < n_ready; n++) { k = il[n]; cpp = &HD(j)->cp[k]; SCpnt = cpp->SCpnt; if (wait_on_busy(sh[j]->io_port, MAXLOOP)) { scmd_printk(KERN_INFO, SCpnt, "%s, pid %ld, mbox %d, adapter" " busy, will abort.\n", (ihdlr ? "ihdlr" : "qcomm"), SCpnt->serial_number, k); HD(j)->cp_stat[k] = ABORTING; continue; } outl(H2DEV(cpp->cp_dma_addr), sh[j]->io_port + REG_OGM); outb(CMD_OGM_INTR, sh[j]->io_port + REG_LCL_INTR); HD(j)->cp_stat[k] = IN_USE; } } static irqreturn_t ihdlr(unsigned int j) { struct scsi_cmnd *SCpnt; unsigned int i, k, c, status, tstatus, reg, ret; struct mscp *spp, *cpp; int irq = sh[j]->irq; /* Check if this board need to be serviced */ if (!((reg = inb(sh[j]->io_port + REG_SYS_INTR)) & IRQ_ASSERTED)) goto none; HD(j)->iocount++; if (do_trace) printk("%s: ihdlr, enter, irq %d, count %d.\n", BN(j), irq, HD(j)->iocount); /* Check if this board is still busy */ if (wait_on_busy(sh[j]->io_port, 20 * MAXLOOP)) { outb(CMD_CLR_INTR, sh[j]->io_port + REG_SYS_INTR); printk("%s: ihdlr, busy timeout error, irq %d, reg 0x%x, count %d.\n", BN(j), irq, reg, HD(j)->iocount); goto none; } ret = inl(sh[j]->io_port + REG_ICM); /* Clear interrupt pending flag */ outb(CMD_CLR_INTR, sh[j]->io_port + REG_SYS_INTR); /* Find the mailbox to be serviced on this board */ for (i = 0; i < sh[j]->can_queue; i++) if (H2DEV(HD(j)->cp[i].cp_dma_addr) == ret) break; if (i >= sh[j]->can_queue) panic("%s: ihdlr, invalid mscp bus address %p, cp0 %p.\n", BN(j), (void *)ret, (void *)H2DEV(HD(j)->cp[0].cp_dma_addr)); cpp = &(HD(j)->cp[i]); spp = cpp; #if defined(DEBUG_GENERATE_ABORTS) if ((HD(j)->iocount > 500) && ((HD(j)->iocount % 500) < 3)) goto handled; #endif if (HD(j)->cp_stat[i] == IGNORE) { HD(j)->cp_stat[i] = FREE; goto handled; } else if (HD(j)->cp_stat[i] == LOCKED) { HD(j)->cp_stat[i] = FREE; printk("%s: ihdlr, mbox %d unlocked, count %d.\n", BN(j), i, HD(j)->iocount); goto handled; } else if (HD(j)->cp_stat[i] == FREE) { printk("%s: ihdlr, mbox %d is free, count %d.\n", BN(j), i, HD(j)->iocount); goto handled; } else if (HD(j)->cp_stat[i] == IN_RESET) printk("%s: ihdlr, mbox %d is in reset.\n", BN(j), i); else if (HD(j)->cp_stat[i] != IN_USE) panic("%s: ihdlr, mbox %d, invalid cp_stat: %d.\n", BN(j), i, HD(j)->cp_stat[i]); HD(j)->cp_stat[i] = FREE; SCpnt = cpp->SCpnt; if (SCpnt == NULL) panic("%s: ihdlr, mbox %d, SCpnt == NULL.\n", BN(j), i); if (SCpnt->host_scribble == NULL) panic("%s: ihdlr, mbox %d, pid %ld, SCpnt %p garbled.\n", BN(j), i, SCpnt->serial_number, SCpnt); if (*(unsigned int *)SCpnt->host_scribble != i) panic("%s: ihdlr, mbox %d, pid %ld, index mismatch %d.\n", BN(j), i, SCpnt->serial_number, *(unsigned int *)SCpnt->host_scribble); sync_dma(i, j); if (linked_comm && SCpnt->device->queue_depth > 2 && TLDEV(SCpnt->device->type)) flush_dev(SCpnt->device, blk_rq_pos(SCpnt->request), j, TRUE); tstatus = status_byte(spp->target_status); #if defined(DEBUG_GENERATE_ERRORS) if ((HD(j)->iocount > 500) && ((HD(j)->iocount % 200) < 2)) spp->adapter_status = 0x01; #endif switch (spp->adapter_status) { case ASOK: /* status OK */ /* Forces a reset if a disk drive keeps returning BUSY */ if (tstatus == BUSY && SCpnt->device->type != TYPE_TAPE) status = DID_ERROR << 16; /* If there was a bus reset, redo operation on each target */ else if (tstatus != GOOD && SCpnt->device->type == TYPE_DISK && HD(j)->target_redo[scmd_id(SCpnt)][scmd_channel(SCpnt)]) status = DID_BUS_BUSY << 16; /* Works around a flaw in scsi.c */ else if (tstatus == CHECK_CONDITION && SCpnt->device->type == TYPE_DISK && (SCpnt->sense_buffer[2] & 0xf) == RECOVERED_ERROR) status = DID_BUS_BUSY << 16; else status = DID_OK << 16; if (tstatus == GOOD) HD(j)->target_redo[scmd_id(SCpnt)][scmd_channel(SCpnt)] = FALSE; if (spp->target_status && SCpnt->device->type == TYPE_DISK && (!(tstatus == CHECK_CONDITION && HD(j)->iocount <= 1000 && (SCpnt->sense_buffer[2] & 0xf) == NOT_READY))) scmd_printk(KERN_INFO, SCpnt, "ihdlr, pid %ld, target_status 0x%x, sense key 0x%x.\n", SCpnt->serial_number, spp->target_status, SCpnt->sense_buffer[2]); HD(j)->target_to[scmd_id(SCpnt)][scmd_channel(SCpnt)] = 0; if (HD(j)->last_retried_pid == SCpnt->serial_number) HD(j)->retries = 0; break; case ASST: /* Selection Time Out */ if (HD(j)->target_to[scmd_id(SCpnt)][scmd_channel(SCpnt)] > 1) status = DID_ERROR << 16; else { status = DID_TIME_OUT << 16; HD(j)->target_to[scmd_id(SCpnt)][scmd_channel(SCpnt)]++; } break; /* Perform a limited number of internal retries */ case 0x93: /* Unexpected bus free */ case 0x94: /* Target bus phase sequence failure */ case 0x96: /* Illegal SCSI command */ case 0xa3: /* SCSI bus reset error */ for (c = 0; c <= sh[j]->max_channel; c++) for (k = 0; k < sh[j]->max_id; k++) HD(j)->target_redo[k][c] = TRUE; case 0x92: /* Data over/under-run */ if (SCpnt->device->type != TYPE_TAPE && HD(j)->retries < MAX_INTERNAL_RETRIES) { #if defined(DID_SOFT_ERROR) status = DID_SOFT_ERROR << 16; #else status = DID_BUS_BUSY << 16; #endif HD(j)->retries++; HD(j)->last_retried_pid = SCpnt->serial_number; } else status = DID_ERROR << 16; break; case 0x01: /* Invalid command */ case 0x02: /* Invalid parameters */ case 0x03: /* Invalid data list */ case 0x84: /* SCSI bus abort error */ case 0x9b: /* Auto request sense error */ case 0x9f: /* Unexpected command complete message error */ case 0xff: /* Invalid parameter in the S/G list */ default: status = DID_ERROR << 16; break; } SCpnt->result = status | spp->target_status; #if defined(DEBUG_INTERRUPT) if (SCpnt->result || do_trace) #else if ((spp->adapter_status != ASOK && HD(j)->iocount > 1000) || (spp->adapter_status != ASOK && spp->adapter_status != ASST && HD(j)->iocount <= 1000) || do_trace || msg_byte(spp->target_status)) #endif scmd_printk(KERN_INFO, SCpnt, "ihdlr, mbox %2d, err 0x%x:%x,"\ " pid %ld, reg 0x%x, count %d.\n", i, spp->adapter_status, spp->target_status, SCpnt->serial_number, reg, HD(j)->iocount); unmap_dma(i, j); /* Set the command state to inactive */ SCpnt->host_scribble = NULL; SCpnt->scsi_done(SCpnt); if (do_trace) printk("%s: ihdlr, exit, irq %d, count %d.\n", BN(j), irq, HD(j)->iocount); handled: return IRQ_HANDLED; none: return IRQ_NONE; } static irqreturn_t do_interrupt_handler(int irq, void *shap) { unsigned int j; unsigned long spin_flags; irqreturn_t ret; /* Check if the interrupt must be processed by this handler */ if ((j = (unsigned int)((char *)shap - sha)) >= num_boards) return IRQ_NONE; spin_lock_irqsave(sh[j]->host_lock, spin_flags); ret = ihdlr(j); spin_unlock_irqrestore(sh[j]->host_lock, spin_flags); return ret; } static int u14_34f_release(struct Scsi_Host *shpnt) { unsigned int i, j; for (j = 0; sh[j] != NULL && sh[j] != shpnt; j++); if (sh[j] == NULL) panic("%s: release, invalid Scsi_Host pointer.\n", driver_name); for (i = 0; i < sh[j]->can_queue; i++) kfree((&HD(j)->cp[i])->sglist); for (i = 0; i < sh[j]->can_queue; i++) pci_unmap_single(HD(j)->pdev, HD(j)->cp[i].cp_dma_addr, sizeof(struct mscp), PCI_DMA_BIDIRECTIONAL); free_irq(sh[j]->irq, &sha[j]); if (sh[j]->dma_channel != NO_DMA) free_dma(sh[j]->dma_channel); release_region(sh[j]->io_port, sh[j]->n_io_port); scsi_unregister(sh[j]); return FALSE; } #include "scsi_module.c" #ifndef MODULE __setup("u14-34f=", option_setup); #endif /* end MODULE */
gpl-2.0
vic-nation/kernel_goghvmu
kernel/pm_qos_params.c
834
13405
/* * This module exposes the interface to kernel space for specifying * QoS dependencies. It provides infrastructure for registration of: * * Dependents on a QoS value : register requests * Watchers of QoS value : get notified when target QoS value changes * * This QoS design is best effort based. Dependents register their QoS needs. * Watchers register to keep track of the current QoS needs of the system. * * There are 3 basic classes of QoS parameter: latency, timeout, throughput * each have defined units: * latency: usec * timeout: usec <-- currently not used. * throughput: kbs (kilo byte / sec) * * There are lists of pm_qos_objects each one wrapping requests, notifiers * * User mode requests on a QOS parameter register themselves to the * subsystem by opening the device node /dev/... and writing there request to * the node. As long as the process holds a file handle open to the node the * client continues to be accounted for. Upon file release the usermode * request is removed and a new qos target is computed. This way when the * request that the application has is cleaned up when closes the file * pointer or exits the pm_qos_object will get an opportunity to clean up. * * Mark Gross <mgross@linux.intel.com> */ /*#define DEBUG*/ #include <linux/pm_qos_params.h> #include <linux/sched.h> #include <linux/spinlock.h> #include <linux/slab.h> #include <linux/time.h> #include <linux/fs.h> #include <linux/device.h> #include <linux/miscdevice.h> #include <linux/string.h> #include <linux/platform_device.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/uaccess.h> /* * locking rule: all changes to requests or notifiers lists * or pm_qos_object list and pm_qos_objects need to happen with pm_qos_lock * held, taken with _irqsave. One lock to rule them all */ enum pm_qos_type { PM_QOS_MAX, /* return the largest value */ PM_QOS_MIN /* return the smallest value */ }; /* * Note: The lockless read path depends on the CPU accessing * target_value atomically. Atomic access is only guaranteed on all CPU * types linux supports for 32 bit quantites */ struct pm_qos_object { struct plist_head requests; struct blocking_notifier_head *notifiers; struct miscdevice pm_qos_power_miscdev; char *name; s32 target_value; /* Do not change to 64 bit */ s32 default_value; enum pm_qos_type type; }; static DEFINE_SPINLOCK(pm_qos_lock); static struct pm_qos_object null_pm_qos; static BLOCKING_NOTIFIER_HEAD(cpu_dma_lat_notifier); static struct pm_qos_object cpu_dma_pm_qos = { .requests = PLIST_HEAD_INIT(cpu_dma_pm_qos.requests), .notifiers = &cpu_dma_lat_notifier, .name = "cpu_dma_latency", .target_value = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE, .default_value = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE, .type = PM_QOS_MIN, }; static BLOCKING_NOTIFIER_HEAD(network_lat_notifier); static struct pm_qos_object network_lat_pm_qos = { .requests = PLIST_HEAD_INIT(network_lat_pm_qos.requests), .notifiers = &network_lat_notifier, .name = "network_latency", .target_value = PM_QOS_NETWORK_LAT_DEFAULT_VALUE, .default_value = PM_QOS_NETWORK_LAT_DEFAULT_VALUE, .type = PM_QOS_MIN }; static BLOCKING_NOTIFIER_HEAD(network_throughput_notifier); static struct pm_qos_object network_throughput_pm_qos = { .requests = PLIST_HEAD_INIT(network_throughput_pm_qos.requests), .notifiers = &network_throughput_notifier, .name = "network_throughput", .target_value = PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE, .default_value = PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE, .type = PM_QOS_MAX, }; static struct pm_qos_object *pm_qos_array[] = { &null_pm_qos, &cpu_dma_pm_qos, &network_lat_pm_qos, &network_throughput_pm_qos }; static ssize_t pm_qos_power_write(struct file *filp, const char __user *buf, size_t count, loff_t *f_pos); static ssize_t pm_qos_power_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos); static int pm_qos_power_open(struct inode *inode, struct file *filp); static int pm_qos_power_release(struct inode *inode, struct file *filp); static const struct file_operations pm_qos_power_fops = { .write = pm_qos_power_write, .read = pm_qos_power_read, .open = pm_qos_power_open, .release = pm_qos_power_release, .llseek = noop_llseek, }; /* unlocked internal variant */ static inline int pm_qos_get_value(struct pm_qos_object *o) { if (plist_head_empty(&o->requests)) return o->default_value; switch (o->type) { case PM_QOS_MIN: return plist_first(&o->requests)->prio; case PM_QOS_MAX: return plist_last(&o->requests)->prio; default: /* runtime check for not using enum */ BUG(); } } static inline s32 pm_qos_read_value(struct pm_qos_object *o) { return o->target_value; } static inline void pm_qos_set_value(struct pm_qos_object *o, s32 value) { o->target_value = value; } static void update_target(struct pm_qos_object *o, struct plist_node *node, int del, int value) { unsigned long flags; int prev_value, curr_value; spin_lock_irqsave(&pm_qos_lock, flags); prev_value = pm_qos_get_value(o); /* PM_QOS_DEFAULT_VALUE is a signal that the value is unchanged */ if (value != PM_QOS_DEFAULT_VALUE) { /* * to change the list, we atomically remove, reinit * with new value and add, then see if the extremal * changed */ plist_del(node, &o->requests); plist_node_init(node, value); plist_add(node, &o->requests); } else if (del) { plist_del(node, &o->requests); } else { plist_add(node, &o->requests); } curr_value = pm_qos_get_value(o); pm_qos_set_value(o, curr_value); spin_unlock_irqrestore(&pm_qos_lock, flags); if (prev_value != curr_value) blocking_notifier_call_chain(o->notifiers, (unsigned long)curr_value, NULL); } static int register_pm_qos_misc(struct pm_qos_object *qos) { qos->pm_qos_power_miscdev.minor = MISC_DYNAMIC_MINOR; qos->pm_qos_power_miscdev.name = qos->name; qos->pm_qos_power_miscdev.fops = &pm_qos_power_fops; return misc_register(&qos->pm_qos_power_miscdev); } static int find_pm_qos_object_by_minor(int minor) { int pm_qos_class; for (pm_qos_class = 0; pm_qos_class < PM_QOS_NUM_CLASSES; pm_qos_class++) { if (minor == pm_qos_array[pm_qos_class]->pm_qos_power_miscdev.minor) return pm_qos_class; } return -1; } /** * pm_qos_request - returns current system wide qos expectation * @pm_qos_class: identification of which qos value is requested * * This function returns the current target value. */ int pm_qos_request(int pm_qos_class) { return pm_qos_read_value(pm_qos_array[pm_qos_class]); } EXPORT_SYMBOL_GPL(pm_qos_request); int pm_qos_request_active(struct pm_qos_request_list *req) { return req->pm_qos_class != 0; } EXPORT_SYMBOL_GPL(pm_qos_request_active); /** * pm_qos_add_request - inserts new qos request into the list * @dep: pointer to a preallocated handle * @pm_qos_class: identifies which list of qos request to use * @value: defines the qos request * * This function inserts a new entry in the pm_qos_class list of requested qos * performance characteristics. It recomputes the aggregate QoS expectations * for the pm_qos_class of parameters and initializes the pm_qos_request_list * handle. Caller needs to save this handle for later use in updates and * removal. */ void pm_qos_add_request(struct pm_qos_request_list *dep, int pm_qos_class, s32 value) { struct pm_qos_object *o = pm_qos_array[pm_qos_class]; int new_value; if (pm_qos_request_active(dep)) { WARN(1, KERN_ERR "pm_qos_add_request() called for already added request\n"); return; } if (value == PM_QOS_DEFAULT_VALUE) new_value = o->default_value; else new_value = value; plist_node_init(&dep->list, new_value); dep->pm_qos_class = pm_qos_class; update_target(o, &dep->list, 0, PM_QOS_DEFAULT_VALUE); } EXPORT_SYMBOL_GPL(pm_qos_add_request); /** * pm_qos_update_request - modifies an existing qos request * @pm_qos_req : handle to list element holding a pm_qos request to use * @value: defines the qos request * * Updates an existing qos request for the pm_qos_class of parameters along * with updating the target pm_qos_class value. * * Attempts are made to make this code callable on hot code paths. */ void pm_qos_update_request(struct pm_qos_request_list *pm_qos_req, s32 new_value) { s32 temp; struct pm_qos_object *o; if (!pm_qos_req) /*guard against callers passing in null */ return; if (!pm_qos_request_active(pm_qos_req)) { WARN(1, KERN_ERR "pm_qos_update_request() called for unknown object\n"); return; } o = pm_qos_array[pm_qos_req->pm_qos_class]; if (new_value == PM_QOS_DEFAULT_VALUE) temp = o->default_value; else temp = new_value; if (temp != pm_qos_req->list.prio) update_target(o, &pm_qos_req->list, 0, temp); } EXPORT_SYMBOL_GPL(pm_qos_update_request); /** * pm_qos_remove_request - modifies an existing qos request * @pm_qos_req: handle to request list element * * Will remove pm qos request from the list of requests and * recompute the current target value for the pm_qos_class. Call this * on slow code paths. */ void pm_qos_remove_request(struct pm_qos_request_list *pm_qos_req) { struct pm_qos_object *o; if (pm_qos_req == NULL) return; /* silent return to keep pcm code cleaner */ if (!pm_qos_request_active(pm_qos_req)) { WARN(1, KERN_ERR "pm_qos_remove_request() called for unknown object\n"); return; } o = pm_qos_array[pm_qos_req->pm_qos_class]; update_target(o, &pm_qos_req->list, 1, PM_QOS_DEFAULT_VALUE); memset(pm_qos_req, 0, sizeof(*pm_qos_req)); } EXPORT_SYMBOL_GPL(pm_qos_remove_request); /** * pm_qos_add_notifier - sets notification entry for changes to target value * @pm_qos_class: identifies which qos target changes should be notified. * @notifier: notifier block managed by caller. * * will register the notifier into a notification chain that gets called * upon changes to the pm_qos_class target value. */ int pm_qos_add_notifier(int pm_qos_class, struct notifier_block *notifier) { int retval; retval = blocking_notifier_chain_register( pm_qos_array[pm_qos_class]->notifiers, notifier); return retval; } EXPORT_SYMBOL_GPL(pm_qos_add_notifier); /** * pm_qos_remove_notifier - deletes notification entry from chain. * @pm_qos_class: identifies which qos target changes are notified. * @notifier: notifier block to be removed. * * will remove the notifier from the notification chain that gets called * upon changes to the pm_qos_class target value. */ int pm_qos_remove_notifier(int pm_qos_class, struct notifier_block *notifier) { int retval; retval = blocking_notifier_chain_unregister( pm_qos_array[pm_qos_class]->notifiers, notifier); return retval; } EXPORT_SYMBOL_GPL(pm_qos_remove_notifier); static int pm_qos_power_open(struct inode *inode, struct file *filp) { long pm_qos_class; pm_qos_class = find_pm_qos_object_by_minor(iminor(inode)); if (pm_qos_class >= 0) { struct pm_qos_request_list *req = kzalloc(sizeof(*req), GFP_KERNEL); if (!req) return -ENOMEM; pm_qos_add_request(req, pm_qos_class, PM_QOS_DEFAULT_VALUE); filp->private_data = req; if (filp->private_data) return 0; } return -EPERM; } static int pm_qos_power_release(struct inode *inode, struct file *filp) { struct pm_qos_request_list *req; req = filp->private_data; pm_qos_remove_request(req); kfree(req); return 0; } static ssize_t pm_qos_power_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos) { s32 value; unsigned long flags; struct pm_qos_object *o; struct pm_qos_request_list *pm_qos_req = filp->private_data; if (!pm_qos_req) return -EINVAL; if (!pm_qos_request_active(pm_qos_req)) return -EINVAL; o = pm_qos_array[pm_qos_req->pm_qos_class]; spin_lock_irqsave(&pm_qos_lock, flags); value = pm_qos_get_value(o); spin_unlock_irqrestore(&pm_qos_lock, flags); return simple_read_from_buffer(buf, count, f_pos, &value, sizeof(s32)); } static ssize_t pm_qos_power_write(struct file *filp, const char __user *buf, size_t count, loff_t *f_pos) { s32 value; struct pm_qos_request_list *pm_qos_req; if (count == sizeof(s32)) { if (copy_from_user(&value, buf, sizeof(s32))) return -EFAULT; } else if (count <= 11) { /* ASCII perhaps? */ char ascii_value[11]; unsigned long int ulval; int ret; if (copy_from_user(ascii_value, buf, count)) return -EFAULT; if (count > 10) { if (ascii_value[10] == '\n') ascii_value[10] = '\0'; else return -EINVAL; } else { ascii_value[count] = '\0'; } ret = strict_strtoul(ascii_value, 16, &ulval); if (ret) { pr_debug("%s, 0x%lx, 0x%x\n", ascii_value, ulval, ret); return -EINVAL; } value = (s32)lower_32_bits(ulval); } else { return -EINVAL; } pm_qos_req = filp->private_data; pm_qos_update_request(pm_qos_req, value); return count; } static int __init pm_qos_power_init(void) { int ret = 0; ret = register_pm_qos_misc(&cpu_dma_pm_qos); if (ret < 0) { printk(KERN_ERR "pm_qos_param: cpu_dma_latency setup failed\n"); return ret; } ret = register_pm_qos_misc(&network_lat_pm_qos); if (ret < 0) { printk(KERN_ERR "pm_qos_param: network_latency setup failed\n"); return ret; } ret = register_pm_qos_misc(&network_throughput_pm_qos); if (ret < 0) { printk(KERN_ERR "pm_qos_param: network_throughput setup failed\n"); return 0; } return ret; } late_initcall(pm_qos_power_init);
gpl-2.0
fcooper/ti-linux
drivers/video/fbdev/core/fb_cmdline.c
1858
2458
/* * linux/drivers/video/fb_cmdline.c * * Copyright (C) 2014 Intel Corp * Copyright (C) 1994 Martin Schaller * * 2001 - Documented with DocBook * - Brad Douglas <brad@neruo.com> * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive * for more details. * * Authors: * Vetter <danie.vetter@ffwll.ch> */ #include <linux/init.h> #include <linux/fb.h> static char *video_options[FB_MAX] __read_mostly; static int ofonly __read_mostly; const char *fb_mode_option; EXPORT_SYMBOL_GPL(fb_mode_option); /** * fb_get_options - get kernel boot parameters * @name: framebuffer name as it would appear in * the boot parameter line * (video=<name>:<options>) * @option: the option will be stored here * * NOTE: Needed to maintain backwards compatibility */ int fb_get_options(const char *name, char **option) { char *opt, *options = NULL; int retval = 0; int name_len = strlen(name), i; if (name_len && ofonly && strncmp(name, "offb", 4)) retval = 1; if (name_len && !retval) { for (i = 0; i < FB_MAX; i++) { if (video_options[i] == NULL) continue; if (!video_options[i][0]) continue; opt = video_options[i]; if (!strncmp(name, opt, name_len) && opt[name_len] == ':') options = opt + name_len + 1; } } /* No match, pass global option */ if (!options && option && fb_mode_option) options = kstrdup(fb_mode_option, GFP_KERNEL); if (options && !strncmp(options, "off", 3)) retval = 1; if (option) *option = options; return retval; } EXPORT_SYMBOL(fb_get_options); /** * video_setup - process command line options * @options: string of options * * Process command line options for frame buffer subsystem. * * NOTE: This function is a __setup and __init function. * It only stores the options. Drivers have to call * fb_get_options() as necessary. * * Returns zero. * */ static int __init video_setup(char *options) { int i, global = 0; if (!options || !*options) global = 1; if (!global && !strncmp(options, "ofonly", 6)) { ofonly = 1; global = 1; } if (!global && !strchr(options, ':')) { fb_mode_option = options; global = 1; } if (!global) { for (i = 0; i < FB_MAX; i++) { if (video_options[i] == NULL) { video_options[i] = options; break; } } } return 1; } __setup("video=", video_setup);
gpl-2.0