code
stringlengths
4
1.01M
/* * SuperH IrDA Driver * * Copyright (C) 2009 Renesas Solutions Corp. * Kuninori Morimoto <morimoto.kuninori@renesas.com> * * Based on bfin_sir.c * Copyright 2006-2009 Analog Devices Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/io.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <net/irda/wrapper.h> #include <net/irda/irda_device.h> #include <asm/clock.h> #define DRIVER_NAME "sh_sir" #define RX_PHASE (1 << 0) #define TX_PHASE (1 << 1) #define TX_COMP_PHASE (1 << 2) /* tx complete */ #define NONE_PHASE (1 << 31) #define IRIF_RINTCLR 0x0016 /* DMA rx interrupt source clear */ #define IRIF_TINTCLR 0x0018 /* DMA tx interrupt source clear */ #define IRIF_SIR0 0x0020 /* IrDA-SIR10 control */ #define IRIF_SIR1 0x0022 /* IrDA-SIR10 baudrate error correction */ #define IRIF_SIR2 0x0024 /* IrDA-SIR10 baudrate count */ #define IRIF_SIR3 0x0026 /* IrDA-SIR10 status */ #define IRIF_SIR_FRM 0x0028 /* Hardware frame processing set */ #define IRIF_SIR_EOF 0x002A /* EOF value */ #define IRIF_SIR_FLG 0x002C /* Flag clear */ #define IRIF_UART_STS2 0x002E /* UART status 2 */ #define IRIF_UART0 0x0030 /* UART control */ #define IRIF_UART1 0x0032 /* UART status */ #define IRIF_UART2 0x0034 /* UART mode */ #define IRIF_UART3 0x0036 /* UART transmit data */ #define IRIF_UART4 0x0038 /* UART receive data */ #define IRIF_UART5 0x003A /* UART interrupt mask */ #define IRIF_UART6 0x003C /* UART baud rate error correction */ #define IRIF_UART7 0x003E /* UART baud rate count set */ #define IRIF_CRC0 0x0040 /* CRC engine control */ #define IRIF_CRC1 0x0042 /* CRC engine input data */ #define IRIF_CRC2 0x0044 /* CRC engine calculation */ #define IRIF_CRC3 0x0046 /* CRC engine output data 1 */ #define IRIF_CRC4 0x0048 /* CRC engine output data 2 */ /* IRIF_SIR0 */ #define IRTPW (1 << 1) /* transmit pulse width select */ #define IRERRC (1 << 0) /* Clear receive pulse width error */ /* IRIF_SIR3 */ #define IRERR (1 << 0) /* received pulse width Error */ /* IRIF_SIR_FRM */ #define EOFD (1 << 9) /* EOF detection flag */ #define FRER (1 << 8) /* Frame Error bit */ #define FRP (1 << 0) /* Frame processing set */ /* IRIF_UART_STS2 */ #define IRSME (1 << 6) /* Receive Sum Error flag */ #define IROVE (1 << 5) /* Receive Overrun Error flag */ #define IRFRE (1 << 4) /* Receive Framing Error flag */ #define IRPRE (1 << 3) /* Receive Parity Error flag */ /* IRIF_UART0_*/ #define TBEC (1 << 2) /* Transmit Data Clear */ #define RIE (1 << 1) /* Receive Enable */ #define TIE (1 << 0) /* Transmit Enable */ /* IRIF_UART1 */ #define URSME (1 << 6) /* Receive Sum Error Flag */ #define UROVE (1 << 5) /* Receive Overrun Error Flag */ #define URFRE (1 << 4) /* Receive Framing Error Flag */ #define URPRE (1 << 3) /* Receive Parity Error Flag */ #define RBF (1 << 2) /* Receive Buffer Full Flag */ #define TSBE (1 << 1) /* Transmit Shift Buffer Empty Flag */ #define TBE (1 << 0) /* Transmit Buffer Empty flag */ #define TBCOMP (TSBE | TBE) /* IRIF_UART5 */ #define RSEIM (1 << 6) /* Receive Sum Error Flag IRQ Mask */ #define RBFIM (1 << 2) /* Receive Buffer Full Flag IRQ Mask */ #define TSBEIM (1 << 1) /* Transmit Shift Buffer Empty Flag IRQ Mask */ #define TBEIM (1 << 0) /* Transmit Buffer Empty Flag IRQ Mask */ #define RX_MASK (RSEIM | RBFIM) /* IRIF_CRC0 */ #define CRC_RST (1 << 15) /* CRC Engine Reset */ #define CRC_CT_MASK 0x0FFF /************************************************************************ structure ************************************************************************/ struct sh_sir_self { void __iomem *membase; unsigned int irq; struct clk *clk; struct net_device *ndev; struct irlap_cb *irlap; struct qos_info qos; iobuff_t tx_buff; iobuff_t rx_buff; }; /************************************************************************ common function ************************************************************************/ static void sh_sir_write(struct sh_sir_self *self, u32 offset, u16 data) { iowrite16(data, self->membase + offset); } static u16 sh_sir_read(struct sh_sir_self *self, u32 offset) { return ioread16(self->membase + offset); } static void sh_sir_update_bits(struct sh_sir_self *self, u32 offset, u16 mask, u16 data) { u16 old, new; old = sh_sir_read(self, offset); new = (old & ~mask) | data; if (old != new) sh_sir_write(self, offset, new); } /************************************************************************ CRC function ************************************************************************/ static void sh_sir_crc_reset(struct sh_sir_self *self) { sh_sir_write(self, IRIF_CRC0, CRC_RST); } static void sh_sir_crc_add(struct sh_sir_self *self, u8 data) { sh_sir_write(self, IRIF_CRC1, (u16)data); } static u16 sh_sir_crc_cnt(struct sh_sir_self *self) { return CRC_CT_MASK & sh_sir_read(self, IRIF_CRC0); } static u16 sh_sir_crc_out(struct sh_sir_self *self) { return sh_sir_read(self, IRIF_CRC4); } static int sh_sir_crc_init(struct sh_sir_self *self) { struct device *dev = &self->ndev->dev; int ret = -EIO; u16 val; sh_sir_crc_reset(self); sh_sir_crc_add(self, 0xCC); sh_sir_crc_add(self, 0xF5); sh_sir_crc_add(self, 0xF1); sh_sir_crc_add(self, 0xA7); val = sh_sir_crc_cnt(self); if (4 != val) { dev_err(dev, "CRC count error %x\n", val); goto crc_init_out; } val = sh_sir_crc_out(self); if (0x51DF != val) { dev_err(dev, "CRC result error%x\n", val); goto crc_init_out; } ret = 0; crc_init_out: sh_sir_crc_reset(self); return ret; } /************************************************************************ baud rate functions ************************************************************************/ #define SCLK_BASE 1843200 /* 1.8432MHz */ static u32 sh_sir_find_sclk(struct clk *irda_clk) { struct cpufreq_frequency_table *freq_table = irda_clk->freq_table; struct clk *pclk = clk_get(NULL, "peripheral_clk"); u32 limit, min = 0xffffffff, tmp; int i, index = 0; limit = clk_get_rate(pclk); clk_put(pclk); /* IrDA can not set over peripheral_clk */ for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++) { u32 freq = freq_table[i].frequency; if (freq == CPUFREQ_ENTRY_INVALID) continue; /* IrDA should not over peripheral_clk */ if (freq > limit) continue; tmp = freq % SCLK_BASE; if (tmp < min) { min = tmp; index = i; } } return freq_table[index].frequency; } #define ERR_ROUNDING(a) ((a + 5000) / 10000) static int sh_sir_set_baudrate(struct sh_sir_self *self, u32 baudrate) { struct clk *clk; struct device *dev = &self->ndev->dev; u32 rate; u16 uabca, uabc; u16 irbca, irbc; u32 min, rerr, tmp; int i; /* Baud Rate Error Correction x 10000 */ u32 rate_err_array[] = { 0, 625, 1250, 1875, 2500, 3125, 3750, 4375, 5000, 5625, 6250, 6875, 7500, 8125, 8750, 9375, }; /* * FIXME * * it support 9600 only now */ switch (baudrate) { case 9600: break; default: dev_err(dev, "un-supported baudrate %d\n", baudrate); return -EIO; } clk = clk_get(NULL, "irda_clk"); if (IS_ERR(clk)) { dev_err(dev, "can not get irda_clk\n"); return -EIO; } clk_set_rate(clk, sh_sir_find_sclk(clk)); rate = clk_get_rate(clk); clk_put(clk); dev_dbg(dev, "selected sclk = %d\n", rate); /* * CALCULATION * * 1843200 = system rate / (irbca + (irbc + 1)) */ irbc = rate / SCLK_BASE; tmp = rate - (SCLK_BASE * irbc); tmp *= 10000; rerr = tmp / SCLK_BASE; min = 0xffffffff; irbca = 0; for (i = 0; i < ARRAY_SIZE(rate_err_array); i++) { tmp = abs(rate_err_array[i] - rerr); if (min > tmp) { min = tmp; irbca = i; } } tmp = rate / (irbc + ERR_ROUNDING(rate_err_array[irbca])); if ((SCLK_BASE / 100) < abs(tmp - SCLK_BASE)) dev_warn(dev, "IrDA freq error margin over %d\n", tmp); dev_dbg(dev, "target = %d, result = %d, infrared = %d.%d\n", SCLK_BASE, tmp, irbc, rate_err_array[irbca]); irbca = (irbca & 0xF) << 4; irbc = (irbc - 1) & 0xF; if (!irbc) { dev_err(dev, "sh_sir can not set 0 in IRIF_SIR2\n"); return -EIO; } sh_sir_write(self, IRIF_SIR0, IRTPW | IRERRC); sh_sir_write(self, IRIF_SIR1, irbca); sh_sir_write(self, IRIF_SIR2, irbc); /* * CALCULATION * * BaudRate[bps] = system rate / (uabca + (uabc + 1) x 16) */ uabc = rate / baudrate; uabc = (uabc / 16) - 1; uabc = (uabc + 1) * 16; tmp = rate - (uabc * baudrate); tmp *= 10000; rerr = tmp / baudrate; min = 0xffffffff; uabca = 0; for (i = 0; i < ARRAY_SIZE(rate_err_array); i++) { tmp = abs(rate_err_array[i] - rerr); if (min > tmp) { min = tmp; uabca = i; } } tmp = rate / (uabc + ERR_ROUNDING(rate_err_array[uabca])); if ((baudrate / 100) < abs(tmp - baudrate)) dev_warn(dev, "UART freq error margin over %d\n", tmp); dev_dbg(dev, "target = %d, result = %d, uart = %d.%d\n", baudrate, tmp, uabc, rate_err_array[uabca]); uabca = (uabca & 0xF) << 4; uabc = (uabc / 16) - 1; sh_sir_write(self, IRIF_UART6, uabca); sh_sir_write(self, IRIF_UART7, uabc); return 0; } /************************************************************************ iobuf function ************************************************************************/ static int __sh_sir_init_iobuf(iobuff_t *io, int size) { io->head = kmalloc(size, GFP_KERNEL); if (!io->head) return -ENOMEM; io->truesize = size; io->in_frame = FALSE; io->state = OUTSIDE_FRAME; io->data = io->head; return 0; } static void sh_sir_remove_iobuf(struct sh_sir_self *self) { kfree(self->rx_buff.head); kfree(self->tx_buff.head); self->rx_buff.head = NULL; self->tx_buff.head = NULL; } static int sh_sir_init_iobuf(struct sh_sir_self *self, int rxsize, int txsize) { int err = -ENOMEM; if (self->rx_buff.head || self->tx_buff.head) { dev_err(&self->ndev->dev, "iobuff has already existed."); return err; } err = __sh_sir_init_iobuf(&self->rx_buff, rxsize); if (err) goto iobuf_err; err = __sh_sir_init_iobuf(&self->tx_buff, txsize); iobuf_err: if (err) sh_sir_remove_iobuf(self); return err; } /************************************************************************ status function ************************************************************************/ static void sh_sir_clear_all_err(struct sh_sir_self *self) { /* Clear error flag for receive pulse width */ sh_sir_update_bits(self, IRIF_SIR0, IRERRC, IRERRC); /* Clear frame / EOF error flag */ sh_sir_write(self, IRIF_SIR_FLG, 0xffff); /* Clear all status error */ sh_sir_write(self, IRIF_UART_STS2, 0); } static void sh_sir_set_phase(struct sh_sir_self *self, int phase) { u16 uart5 = 0; u16 uart0 = 0; switch (phase) { case TX_PHASE: uart5 = TBEIM; uart0 = TBEC | TIE; break; case TX_COMP_PHASE: uart5 = TSBEIM; uart0 = TIE; break; case RX_PHASE: uart5 = RX_MASK; uart0 = RIE; break; default: break; } sh_sir_write(self, IRIF_UART5, uart5); sh_sir_write(self, IRIF_UART0, uart0); } static int sh_sir_is_which_phase(struct sh_sir_self *self) { u16 val = sh_sir_read(self, IRIF_UART5); if (val & TBEIM) return TX_PHASE; if (val & TSBEIM) return TX_COMP_PHASE; if (val & RX_MASK) return RX_PHASE; return NONE_PHASE; } static void sh_sir_tx(struct sh_sir_self *self, int phase) { switch (phase) { case TX_PHASE: if (0 >= self->tx_buff.len) { sh_sir_set_phase(self, TX_COMP_PHASE); } else { sh_sir_write(self, IRIF_UART3, self->tx_buff.data[0]); self->tx_buff.len--; self->tx_buff.data++; } break; case TX_COMP_PHASE: sh_sir_set_phase(self, RX_PHASE); netif_wake_queue(self->ndev); break; default: dev_err(&self->ndev->dev, "should not happen\n"); break; } } static int sh_sir_read_data(struct sh_sir_self *self) { u16 val = 0; int timeout = 1024; while (timeout--) { val = sh_sir_read(self, IRIF_UART1); /* data get */ if (val & RBF) { if (val & (URSME | UROVE | URFRE | URPRE)) break; return (int)sh_sir_read(self, IRIF_UART4); } udelay(1); } dev_err(&self->ndev->dev, "UART1 %04x : STATUS %04x\n", val, sh_sir_read(self, IRIF_UART_STS2)); /* read data register for clear error */ sh_sir_read(self, IRIF_UART4); return -1; } static void sh_sir_rx(struct sh_sir_self *self) { int timeout = 1024; int data; while (timeout--) { data = sh_sir_read_data(self); if (data < 0) break; async_unwrap_char(self->ndev, &self->ndev->stats, &self->rx_buff, (u8)data); self->ndev->last_rx = jiffies; if (EOFD & sh_sir_read(self, IRIF_SIR_FRM)) continue; break; } } static irqreturn_t sh_sir_irq(int irq, void *dev_id) { struct sh_sir_self *self = dev_id; struct device *dev = &self->ndev->dev; int phase = sh_sir_is_which_phase(self); switch (phase) { case TX_COMP_PHASE: case TX_PHASE: sh_sir_tx(self, phase); break; case RX_PHASE: if (sh_sir_read(self, IRIF_SIR3)) dev_err(dev, "rcv pulse width error occurred\n"); sh_sir_rx(self); sh_sir_clear_all_err(self); break; default: dev_err(dev, "unknown interrupt\n"); } return IRQ_HANDLED; } /************************************************************************ net_device_ops function ************************************************************************/ static int sh_sir_hard_xmit(struct sk_buff *skb, struct net_device *ndev) { struct sh_sir_self *self = netdev_priv(ndev); int speed = irda_get_next_speed(skb); if ((0 < speed) && (9600 != speed)) { dev_err(&ndev->dev, "support 9600 only (%d)\n", speed); return -EIO; } netif_stop_queue(ndev); self->tx_buff.data = self->tx_buff.head; self->tx_buff.len = 0; if (skb->len) self->tx_buff.len = async_wrap_skb(skb, self->tx_buff.data, self->tx_buff.truesize); sh_sir_set_phase(self, TX_PHASE); dev_kfree_skb(skb); return 0; } static int sh_sir_ioctl(struct net_device *ndev, struct ifreq *ifreq, int cmd) { /* * FIXME * * This function is needed for irda framework. * But nothing to do now */ return 0; } static struct net_device_stats *sh_sir_stats(struct net_device *ndev) { struct sh_sir_self *self = netdev_priv(ndev); return &self->ndev->stats; } static int sh_sir_open(struct net_device *ndev) { struct sh_sir_self *self = netdev_priv(ndev); int err; clk_enable(self->clk); err = sh_sir_crc_init(self); if (err) goto open_err; sh_sir_set_baudrate(self, 9600); self->irlap = irlap_open(ndev, &self->qos, DRIVER_NAME); if (!self->irlap) { err = -ENODEV; goto open_err; } /* * Now enable the interrupt then start the queue */ sh_sir_update_bits(self, IRIF_SIR_FRM, FRP, FRP); sh_sir_read(self, IRIF_UART1); /* flag clear */ sh_sir_read(self, IRIF_UART4); /* flag clear */ sh_sir_set_phase(self, RX_PHASE); netif_start_queue(ndev); dev_info(&self->ndev->dev, "opened\n"); return 0; open_err: clk_disable(self->clk); return err; } static int sh_sir_stop(struct net_device *ndev) { struct sh_sir_self *self = netdev_priv(ndev); /* Stop IrLAP */ if (self->irlap) { irlap_close(self->irlap); self->irlap = NULL; } netif_stop_queue(ndev); dev_info(&ndev->dev, "stopped\n"); return 0; } static const struct net_device_ops sh_sir_ndo = { .ndo_open = sh_sir_open, .ndo_stop = sh_sir_stop, .ndo_start_xmit = sh_sir_hard_xmit, .ndo_do_ioctl = sh_sir_ioctl, .ndo_get_stats = sh_sir_stats, }; /************************************************************************ platform_driver function ************************************************************************/ static int __devinit sh_sir_probe(struct platform_device *pdev) { struct net_device *ndev; struct sh_sir_self *self; struct resource *res; char clk_name[8]; int irq; int err = -ENOMEM; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); irq = platform_get_irq(pdev, 0); if (!res || irq < 0) { dev_err(&pdev->dev, "Not enough platform resources.\n"); goto exit; } ndev = alloc_irdadev(sizeof(*self)); if (!ndev) goto exit; self = netdev_priv(ndev); self->membase = ioremap_nocache(res->start, resource_size(res)); if (!self->membase) { err = -ENXIO; dev_err(&pdev->dev, "Unable to ioremap.\n"); goto err_mem_1; } err = sh_sir_init_iobuf(self, IRDA_SKB_MAX_MTU, IRDA_SIR_MAX_FRAME); if (err) goto err_mem_2; snprintf(clk_name, sizeof(clk_name), "irda%d", pdev->id); self->clk = clk_get(&pdev->dev, clk_name); if (IS_ERR(self->clk)) { dev_err(&pdev->dev, "cannot get clock \"%s\"\n", clk_name); goto err_mem_3; } irda_init_max_qos_capabilies(&self->qos); ndev->netdev_ops = &sh_sir_ndo; ndev->irq = irq; self->ndev = ndev; self->qos.baud_rate.bits &= IR_9600; /* FIXME */ self->qos.min_turn_time.bits = 1; /* 10 ms or more */ irda_qos_bits_to_value(&self->qos); err = register_netdev(ndev); if (err) goto err_mem_4; platform_set_drvdata(pdev, ndev); if (request_irq(irq, sh_sir_irq, IRQF_DISABLED, "sh_sir", self)) { dev_warn(&pdev->dev, "Unable to attach sh_sir interrupt\n"); goto err_mem_4; } dev_info(&pdev->dev, "SuperH IrDA probed\n"); goto exit; err_mem_4: clk_put(self->clk); err_mem_3: sh_sir_remove_iobuf(self); err_mem_2: iounmap(self->membase); err_mem_1: free_netdev(ndev); exit: return err; } static int __devexit sh_sir_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); struct sh_sir_self *self = netdev_priv(ndev); if (!self) return 0; unregister_netdev(ndev); clk_put(self->clk); sh_sir_remove_iobuf(self); iounmap(self->membase); free_netdev(ndev); platform_set_drvdata(pdev, NULL); return 0; } static struct platform_driver sh_sir_driver = { .probe = sh_sir_probe, .remove = __devexit_p(sh_sir_remove), .driver = { .name = DRIVER_NAME, }, }; module_platform_driver(sh_sir_driver); MODULE_AUTHOR("Kuninori Morimoto <morimoto.kuninori@renesas.com>"); MODULE_DESCRIPTION("SuperH IrDA driver"); MODULE_LICENSE("GPL");
<?php /** * Drupal_Sniffs_Files_TxtFileLineLengthSniff. * * PHP version 5 * * @category PHP * @package PHP_CodeSniffer * @author Klaus Purer * @link http://pear.php.net/package/PHP_CodeSniffer */ /** * Drupal_Sniffs_Files_TxtFileLineLengthSniff. * * Checks all lines in a *.txt or *.md file and throws warnings if they are over 80 * characters in length. * * @category PHP * @package PHP_CodeSniffer * @author Klaus Purer * @link http://pear.php.net/package/PHP_CodeSniffer */ class Drupal_Sniffs_Files_TxtFileLineLengthSniff implements PHP_CodeSniffer_Sniff { /** * Returns an array of tokens this test wants to listen for. * * @return array */ public function register() { return array(T_INLINE_HTML); }//end register() /** * Processes this test, when one of its tokens is encountered. * * @param PHP_CodeSniffer_File $phpcsFile The file being scanned. * @param int $stackPtr The position of the current token in the * stack passed in $tokens. * * @return void */ public function process(PHP_CodeSniffer_File $phpcsFile, $stackPtr) { $fileExtension = strtolower(substr($phpcsFile->getFilename(), -3)); if ($fileExtension === 'txt' || $fileExtension === '.md') { $tokens = $phpcsFile->getTokens(); $content = rtrim($tokens[$stackPtr]['content']); $lineLength = mb_strlen($content, 'UTF-8'); if ($lineLength > 80) { $data = array( 80, $lineLength, ); $warning = 'Line exceeds %s characters; contains %s characters'; $phpcsFile->addWarning($warning, $stackPtr, 'TooLong', $data); } } }//end process() }//end class ?>
/* * linux/mm/madvise.c * * Copyright (C) 1999 Linus Torvalds * Copyright (C) 2002 Christoph Hellwig */ #include <linux/mman.h> #include <linux/pagemap.h> #include <linux/syscalls.h> #include <linux/mempolicy.h> #include <linux/hugetlb.h> #include <linux/sched.h> #include <linux/ksm.h> #include <linux/file.h> /* * Any behaviour which results in changes to the vma->vm_flags needs to * take mmap_sem for writing. Others, which simply traverse vmas, need * to only take it for reading. */ static int madvise_need_mmap_write(int behavior) { switch (behavior) { case MADV_REMOVE: case MADV_WILLNEED: case MADV_DONTNEED: return 0; default: /* be safe, default to 1. list exceptions explicitly */ return 1; } } /* * We can potentially split a vm area into separate * areas, each area with its own behavior. */ static long madvise_behavior(struct vm_area_struct * vma, struct vm_area_struct **prev, unsigned long start, unsigned long end, int behavior) { struct mm_struct * mm = vma->vm_mm; int error = 0; pgoff_t pgoff; unsigned long new_flags = vma->vm_flags; switch (behavior) { case MADV_NORMAL: new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ; break; case MADV_SEQUENTIAL: new_flags = (new_flags & ~VM_RAND_READ) | VM_SEQ_READ; break; case MADV_RANDOM: new_flags = (new_flags & ~VM_SEQ_READ) | VM_RAND_READ; break; case MADV_DONTFORK: new_flags |= VM_DONTCOPY; break; case MADV_DOFORK: if (vma->vm_flags & VM_IO) { error = -EINVAL; goto out; } new_flags &= ~VM_DONTCOPY; break; case MADV_MERGEABLE: case MADV_UNMERGEABLE: error = ksm_madvise(vma, start, end, behavior, &new_flags); if (error) goto out; break; } if (new_flags == vma->vm_flags) { *prev = vma; goto out; } pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT); *prev = vma_merge(mm, *prev, start, end, new_flags, vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma)); if (*prev) { vma = *prev; goto success; } *prev = vma; if (start != vma->vm_start) { error = split_vma(mm, vma, start, 1); if (error) goto out; } if (end != vma->vm_end) { error = split_vma(mm, vma, end, 0); if (error) goto out; } success: /* * vm_flags is protected by the mmap_sem held in write mode. */ vma->vm_flags = new_flags; out: if (error == -ENOMEM) error = -EAGAIN; return error; } /* * Schedule all required I/O operations. Do not wait for completion. */ static long madvise_willneed(struct vm_area_struct * vma, struct vm_area_struct ** prev, unsigned long start, unsigned long end) { struct file *file = vma->vm_file; if (!file) return -EBADF; if (file->f_mapping->a_ops->get_xip_mem) { /* no bad return value, but ignore advice */ return 0; } *prev = vma; start = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; if (end > vma->vm_end) end = vma->vm_end; end = ((end - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; force_page_cache_readahead(file->f_mapping, file, start, end - start); return 0; } /* * Application no longer needs these pages. If the pages are dirty, * it's OK to just throw them away. The app will be more careful about * data it wants to keep. Be sure to free swap resources too. The * zap_page_range call sets things up for shrink_active_list to actually free * these pages later if no one else has touched them in the meantime, * although we could add these pages to a global reuse list for * shrink_active_list to pick up before reclaiming other pages. * * NB: This interface discards data rather than pushes it out to swap, * as some implementations do. This has performance implications for * applications like large transactional databases which want to discard * pages in anonymous maps after committing to backing store the data * that was kept in them. There is no reason to write this data out to * the swap area if the application is discarding it. * * An interface that causes the system to free clean pages and flush * dirty pages is already available as msync(MS_INVALIDATE). */ static long madvise_dontneed(struct vm_area_struct * vma, struct vm_area_struct ** prev, unsigned long start, unsigned long end) { *prev = vma; if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP)) return -EINVAL; if (unlikely(vma->vm_flags & VM_NONLINEAR)) { struct zap_details details = { .nonlinear_vma = vma, .last_index = ULONG_MAX, }; zap_page_range(vma, start, end - start, &details); } else zap_page_range(vma, start, end - start, NULL); return 0; } /* * Application wants to free up the pages and associated backing store. * This is effectively punching a hole into the middle of a file. * * NOTE: Currently, only shmfs/tmpfs is supported for this operation. * Other filesystems return -ENOSYS. */ static long madvise_remove(struct vm_area_struct *vma, struct vm_area_struct **prev, unsigned long start, unsigned long end) { struct address_space *mapping; loff_t offset, endoff; int error; struct file *f; *prev = NULL; /* tell sys_madvise we drop mmap_sem */ if (vma->vm_flags & (VM_LOCKED|VM_NONLINEAR|VM_HUGETLB)) return -EINVAL; f = vma->vm_file; if (!f || !f->f_mapping || !f->f_mapping->host) { return -EINVAL; } if ((vma->vm_flags & (VM_SHARED|VM_WRITE)) != (VM_SHARED|VM_WRITE)) return -EACCES; mapping = vma->vm_file->f_mapping; offset = (loff_t)(start - vma->vm_start) + ((loff_t)vma->vm_pgoff << PAGE_SHIFT); endoff = (loff_t)(end - vma->vm_start - 1) + ((loff_t)vma->vm_pgoff << PAGE_SHIFT); /* * vmtruncate_range may need to take i_mutex and i_alloc_sem. * We need to explicitly grab a reference because the vma (and * hence the vma's reference to the file) can go away as soon as * we drop mmap_sem. */ get_file(f); up_read(&current->mm->mmap_sem); error = vmtruncate_range(mapping->host, offset, endoff); fput(f); down_read(&current->mm->mmap_sem); return error; } #ifdef CONFIG_MEMORY_FAILURE /* * Error injection support for memory error handling. */ static int madvise_hwpoison(unsigned long start, unsigned long end) { int ret = 0; if (!capable(CAP_SYS_ADMIN)) return -EPERM; for (; start < end; start += PAGE_SIZE) { struct page *p; int ret = get_user_pages(current, current->mm, start, 1, 0, 0, &p, NULL); if (ret != 1) return ret; printk(KERN_INFO "Injecting memory failure for page %lx at %lx\n", page_to_pfn(p), start); /* Ignore return value for now */ __memory_failure(page_to_pfn(p), 0, 1); put_page(p); } return ret; } #endif static long madvise_vma(struct vm_area_struct *vma, struct vm_area_struct **prev, unsigned long start, unsigned long end, int behavior) { switch (behavior) { case MADV_REMOVE: return madvise_remove(vma, prev, start, end); case MADV_WILLNEED: return madvise_willneed(vma, prev, start, end); case MADV_DONTNEED: return madvise_dontneed(vma, prev, start, end); default: return madvise_behavior(vma, prev, start, end, behavior); } } static int madvise_behavior_valid(int behavior) { switch (behavior) { case MADV_DOFORK: case MADV_DONTFORK: case MADV_NORMAL: case MADV_SEQUENTIAL: case MADV_RANDOM: case MADV_REMOVE: case MADV_WILLNEED: case MADV_DONTNEED: #ifdef CONFIG_KSM case MADV_MERGEABLE: case MADV_UNMERGEABLE: #endif return 1; default: return 0; } } /* * The madvise(2) system call. * * Applications can use madvise() to advise the kernel how it should * handle paging I/O in this VM area. The idea is to help the kernel * use appropriate read-ahead and caching techniques. The information * provided is advisory only, and can be safely disregarded by the * kernel without affecting the correct operation of the application. * * behavior values: * MADV_NORMAL - the default behavior is to read clusters. This * results in some read-ahead and read-behind. * MADV_RANDOM - the system should read the minimum amount of data * on any access, since it is unlikely that the appli- * cation will need more than what it asks for. * MADV_SEQUENTIAL - pages in the given range will probably be accessed * once, so they can be aggressively read ahead, and * can be freed soon after they are accessed. * MADV_WILLNEED - the application is notifying the system to read * some pages ahead. * MADV_DONTNEED - the application is finished with the given range, * so the kernel can free resources associated with it. * MADV_REMOVE - the application wants to free up the given range of * pages and associated backing store. * MADV_DONTFORK - omit this area from child's address space when forking: * typically, to avoid COWing pages pinned by get_user_pages(). * MADV_DOFORK - cancel MADV_DONTFORK: no longer omit this area when forking. * MADV_MERGEABLE - the application recommends that KSM try to merge pages in * this area with pages of identical content from other such areas. * MADV_UNMERGEABLE- cancel MADV_MERGEABLE: no longer merge pages with others. * * return values: * zero - success * -EINVAL - start + len < 0, start is not page-aligned, * "behavior" is not a valid value, or application * is attempting to release locked or shared pages. * -ENOMEM - addresses in the specified range are not currently * mapped, or are outside the AS of the process. * -EIO - an I/O error occurred while paging in data. * -EBADF - map exists, but area maps something that isn't a file. * -EAGAIN - a kernel resource was temporarily unavailable. */ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior) { unsigned long end, tmp; struct vm_area_struct * vma, *prev; int unmapped_error = 0; int error = -EINVAL; int write; size_t len; #ifdef CONFIG_MEMORY_FAILURE if (behavior == MADV_HWPOISON) return madvise_hwpoison(start, start+len_in); #endif if (!madvise_behavior_valid(behavior)) return error; write = madvise_need_mmap_write(behavior); if (write) down_write(&current->mm->mmap_sem); else down_read(&current->mm->mmap_sem); if (start & ~PAGE_MASK) goto out; len = (len_in + ~PAGE_MASK) & PAGE_MASK; /* Check to see whether len was rounded up from small -ve to zero */ if (len_in && !len) goto out; end = start + len; if (end < start) goto out; error = 0; if (end == start) goto out; /* * If the interval [start,end) covers some unmapped address * ranges, just ignore them, but return -ENOMEM at the end. * - different from the way of handling in mlock etc. */ vma = find_vma_prev(current->mm, start, &prev); if (vma && start > vma->vm_start) prev = vma; for (;;) { /* Still start < end. */ error = -ENOMEM; if (!vma) goto out; /* Here start < (end|vma->vm_end). */ if (start < vma->vm_start) { unmapped_error = -ENOMEM; start = vma->vm_start; if (start >= end) goto out; } /* Here vma->vm_start <= start < (end|vma->vm_end) */ tmp = vma->vm_end; if (end < tmp) tmp = end; /* Here vma->vm_start <= start < tmp <= (end|vma->vm_end). */ error = madvise_vma(vma, &prev, start, tmp, behavior); if (error) goto out; start = tmp; if (prev && start < prev->vm_end) start = prev->vm_end; error = unmapped_error; if (start >= end) goto out; if (prev) vma = prev->vm_next; else /* madvise_remove dropped mmap_sem */ vma = find_vma(current->mm, start); } out: if (write) up_write(&current->mm->mmap_sem); else up_read(&current->mm->mmap_sem); return error; }
/* * Copyright (C) 2011-2014 MediaTek Inc. * * This program is free software: you can redistribute it and/or modify it under the terms of the * GNU General Public License version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; * without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * See the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along with this program. * If not, see <http://www.gnu.org/licenses/>. */ #ifndef _MT_SYNC_WRITE_H #define _MT_SYNC_WRITE_H #if defined(__KERNEL__) #include <linux/io.h> #include <asm/cacheflush.h> #include <asm/system.h> /* * Define macros. */ #define mt65xx_reg_sync_writel(v, a) \ do { \ __raw_writel((v), IOMEM((a))); \ dsb(); \ } while (0) #define mt65xx_reg_sync_writew(v, a) \ do { \ __raw_writew((v), IOMEM((a))); \ dsb(); \ } while (0) #define mt65xx_reg_sync_writeb(v, a) \ do { \ __raw_writeb((v), IOMEM((a))); \ dsb(); \ } while (0) #define mt_reg_sync_writel(v, a) \ do { \ __raw_writel((v), IOMEM((a))); \ dsb(); \ } while (0) #define mt_reg_sync_writew(v, a) \ do { \ __raw_writew((v), IOMEM((a))); \ dsb(); \ } while (0) #define mt_reg_sync_writeb(v, a) \ do { \ __raw_writeb((v), IOMEM((a))); \ dsb(); \ } while (0) #else /* __KERNEL__ */ #include <sys/types.h> #include <sys/stat.h> #include <fcntl.h> #include <unistd.h> #include <string.h> #define dsb() \ do { \ __asm__ __volatile__ ("dsb" : : : "memory"); \ } while (0) #define mt65xx_reg_sync_writel(v, a) \ do { \ *(volatile unsigned int *)(a) = (v); \ dsb(); \ } while (0) #define mt65xx_reg_sync_writew(v, a) \ do { \ *(volatile unsigned short *)(a) = (v); \ dsb(); \ } while (0) #define mt65xx_reg_sync_writeb(v, a) \ do { \ *(volatile unsigned char *)(a) = (v); \ dsb(); \ } while (0) #define mt_reg_sync_writel(v, a) \ do { \ *(volatile unsigned int *)(a) = (v); \ dsb(); \ } while (0) #define mt_reg_sync_writew(v, a) \ do { \ *(volatile unsigned short *)(a) = (v); \ dsb(); \ } while (0) #define mt_reg_sync_writeb(v, a) \ do { \ *(volatile unsigned char *)(a) = (v); \ dsb(); \ } while (0) #endif /* __KERNEL__ */ #endif /* !_MT_SYNC_WRITE_H */
<?php ///////////////////////////////////////////////////////////////////////////// // // // NOTICE OF COPYRIGHT // // // // Moodle - Calendar extension // // // // Copyright (C) 2003-2004 Greek School Network www.sch.gr // // // // Designed by: // // Avgoustos Tsinakos (tsinakos@teikav.edu.gr) // // Jon Papaioannou (pj@moodle.org) // // // // Programming and development: // // Jon Papaioannou (pj@moodle.org) // // // // For bugs, suggestions, etc contact: // // Jon Papaioannou (pj@moodle.org) // // // // The current module was developed at the University of Macedonia // // (www.uom.gr) under the funding of the Greek School Network (www.sch.gr) // // The aim of this project is to provide additional and improved // // functionality to the Asynchronous Distance Education service that the // // Greek School Network deploys. // // // // This program is free software; you can redistribute it and/or modify // // it under the terms of the GNU General Public License as published by // // the Free Software Foundation; either version 2 of the License, or // // (at your option) any later version. // // // // This program is distributed in the hope that it will be useful, // // but WITHOUT ANY WARRANTY; without even the implied warranty of // // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // // GNU General Public License for more details: // // // // http://www.gnu.org/copyleft/gpl.html // // // ///////////////////////////////////////////////////////////////////////////// /** * This file is part of the User section Moodle * * @copyright 2003-2004 Jon Papaioannou (pj@moodle.org) * @license http://www.gnu.org/copyleft/gpl.html GNU GPL v2 or later * @package calendar */ require_once('../config.php'); require_once($CFG->dirroot.'/course/lib.php'); require_once($CFG->dirroot.'/calendar/lib.php'); if (empty($CFG->enablecalendarexport)) { die('no export'); } $courseid = optional_param('course', SITEID, PARAM_INT); $action = optional_param('action', '', PARAM_ALPHA); $day = optional_param('cal_d', 0, PARAM_INT); $mon = optional_param('cal_m', 0, PARAM_INT); $year = optional_param('cal_y', 0, PARAM_INT); $time = optional_param('time', 0, PARAM_INT); $generateurl = optional_param('generateurl', 0, PARAM_BOOL); // If a day, month and year were passed then convert it to a timestamp. If these were passed // then we can assume the day, month and year are passed as Gregorian, as no where in core // should we be passing these values rather than the time. This is done for BC. if (!empty($day) && !empty($mon) && !empty($year)) { if (checkdate($mon, $day, $year)) { $time = make_timestamp($year, $mon, $day); } else { $time = time(); } } else if (empty($time)) { $time = time(); } if ($courseid != SITEID && !empty($courseid)) { // Course ID must be valid and existing. $course = $DB->get_record('course', array('id' => $courseid), '*', MUST_EXIST); $courses = array($course->id => $course); $issite = false; } else { $course = get_site(); $courses = calendar_get_default_courses(); $issite = true; } require_login($course, false); $url = new moodle_url('/calendar/export.php', array('time' => $time)); if ($action !== '') { $url->param('action', $action); } if ($course !== NULL) { $url->param('course', $course->id); } $PAGE->set_url($url); $calendar = new calendar_information(0, 0, 0, $time); $calendar->set_sources($course, $courses); $pagetitle = get_string('export', 'calendar'); // Print title and header if ($issite) { $PAGE->navbar->add($course->shortname, new moodle_url('/course/view.php', array('id'=>$course->id))); } $link = new moodle_url(CALENDAR_URL.'view.php', array('view'=>'upcoming', 'course'=>$calendar->courseid)); $PAGE->navbar->add(get_string('calendar', 'calendar'), calendar_get_link_href($link, 0, 0, 0, $time)); $PAGE->navbar->add($pagetitle); $PAGE->set_title($course->shortname.': '.get_string('calendar', 'calendar').': '.$pagetitle); $PAGE->set_heading($course->fullname); $PAGE->set_pagelayout('standard'); $renderer = $PAGE->get_renderer('core_calendar'); $calendar->add_sidecalendar_blocks($renderer); // Get the calendar type we are using. $calendartype = \core_calendar\type_factory::get_calendar_instance(); $now = $calendartype->timestamp_to_date_array($time); $weekend = CALENDAR_DEFAULT_WEEKEND; if (isset($CFG->calendar_weekend)) { $weekend = intval($CFG->calendar_weekend); } $numberofdaysinweek = $calendartype->get_num_weekdays(); $formdata = array( // Let's populate some vars to let "common tasks" be somewhat smart... // If today it's weekend, give the "next week" option. 'allownextweek' => $weekend & (1 << $now['wday']), // If it's the last week of the month, give the "next month" option. 'allownextmonth' => calendar_days_in_month($now['mon'], $now['year']) - $now['mday'] < $numberofdaysinweek, // If today it's weekend but tomorrow it isn't, do NOT give the "this week" option. 'allowthisweek' => !(($weekend & (1 << $now['wday'])) && !($weekend & (1 << (($now['wday'] + 1) % $numberofdaysinweek)))) ); $exportform = new core_calendar_export_form(null, $formdata); $calendarurl = ''; if ($data = $exportform->get_data()) { $password = $DB->get_record('user', array('id' => $USER->id), 'password'); $params = array(); $params['userid'] = $USER->id; $params['authtoken'] = sha1($USER->id . (isset($password->password) ? $password->password : '') . $CFG->calendar_exportsalt); $params['preset_what'] = $data->events['exportevents']; $params['preset_time'] = $data->period['timeperiod']; $link = new moodle_url('/calendar/export_execute.php', $params); if (!empty($data->generateurl)) { $urlclasses = array('class' => 'generalbox calendarurl'); $calendarurl = html_writer::tag( 'div', get_string('calendarurl', 'calendar', $link->out()), $urlclasses); } if (!empty($data->export)) { redirect($link); } } echo $OUTPUT->header(); echo $renderer->start_layout(); echo $OUTPUT->heading(get_string('exportcalendar', 'calendar')); if ($action != 'advanced') { $exportform->display(); } echo $calendarurl; echo $renderer->complete_layout(); echo $OUTPUT->footer();
/* * This file is part of MPlayer. * * MPlayer is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * MPlayer is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with MPlayer; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef MPLAYER_FASTMEMCPY_H #define MPLAYER_FASTMEMCPY_H #include "config.h" #include <inttypes.h> #include <string.h> #include <stddef.h> void * fast_memcpy(void * to, const void * from, size_t len); void * mem2agpcpy(void * to, const void * from, size_t len); #if ! defined(CONFIG_FASTMEMCPY) || ! (HAVE_MMX || HAVE_MMX2 || HAVE_AMD3DNOW /* || HAVE_SSE || HAVE_SSE2 */) #define mem2agpcpy(a,b,c) memcpy(a,b,c) #define fast_memcpy(a,b,c) memcpy(a,b,c) #endif static inline void * mem2agpcpy_pic(void * dst, const void * src, int bytesPerLine, int height, int dstStride, int srcStride) { int i; void *retval=dst; if(dstStride == srcStride) { if (srcStride < 0) { src = (const uint8_t*)src + (height-1)*srcStride; dst = (uint8_t*)dst + (height-1)*dstStride; srcStride = -srcStride; } mem2agpcpy(dst, src, srcStride*height); } else { for(i=0; i<height; i++) { mem2agpcpy(dst, src, bytesPerLine); src = (const uint8_t*)src + srcStride; dst = (uint8_t*)dst + dstStride; } } return retval; } #define memcpy_pic(d, s, b, h, ds, ss) memcpy_pic2(d, s, b, h, ds, ss, 0) #define my_memcpy_pic(d, s, b, h, ds, ss) memcpy_pic2(d, s, b, h, ds, ss, 1) /** * \param limit2width always skip data between end of line and start of next * instead of copying the full block when strides are the same */ static inline void * memcpy_pic2(void * dst, const void * src, int bytesPerLine, int height, int dstStride, int srcStride, int limit2width) { int i; void *retval=dst; if(!limit2width && dstStride == srcStride) { if (srcStride < 0) { src = (const uint8_t*)src + (height-1)*srcStride; dst = (uint8_t*)dst + (height-1)*dstStride; srcStride = -srcStride; } fast_memcpy(dst, src, srcStride*height); } else { for(i=0; i<height; i++) { fast_memcpy(dst, src, bytesPerLine); src = (const uint8_t*)src + srcStride; dst = (uint8_t*)dst + dstStride; } } return retval; } #endif /* MPLAYER_FASTMEMCPY_H */
/** * Copyright 2011 Google Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.google.bitcoin.core; import static com.google.common.base.Preconditions.checkArgument; /** * <p>Represents the "inv" P2P network message. An inv contains a list of hashes of either blocks or transactions. It's * a bandwidth optimization - on receiving some data, a (fully validating) peer sends every connected peer an inv * containing the hash of what it saw. It'll only transmit the full thing if a peer asks for it with a * {@link GetDataMessage}.</p> */ public class InventoryMessage extends ListMessage { private static final long serialVersionUID = -7050246551646107066L; public InventoryMessage(NetworkParameters params, byte[] bytes) throws ProtocolException { super(params, bytes); } /** * Deserializes an 'inv' message. * @param params NetworkParameters object. * @param msg Bitcoin protocol formatted byte array containing message content. * @param parseLazy Whether to perform a full parse immediately or delay until a read is requested. * @param parseRetain Whether to retain the backing byte array for quick reserialization. * If true and the backing byte array is invalidated due to modification of a field then * the cached bytes may be repopulated and retained if the message is serialized again in the future. * @param length The length of message if known. Usually this is provided when deserializing of the wire * as the length will be provided as part of the header. If unknown then set to Message.UNKNOWN_LENGTH * @throws ProtocolException */ public InventoryMessage(NetworkParameters params, byte[] msg, boolean parseLazy, boolean parseRetain, int length) throws ProtocolException { super(params, msg, parseLazy, parseRetain, length); } public InventoryMessage(NetworkParameters params) { super(params); } public void addBlock(Block block) { addItem(new InventoryItem(InventoryItem.Type.Block, block.getHash())); } public void addTransaction(Transaction tx) { addItem(new InventoryItem(InventoryItem.Type.Transaction, tx.getHash())); } /** Creates a new inv message for the given transactions. */ public static InventoryMessage with(Transaction... txns) { checkArgument(txns.length > 0); InventoryMessage result = new InventoryMessage(txns[0].getParams()); for (Transaction tx : txns) result.addTransaction(tx); return result; } }
#region License, Terms and Author(s) // // ELMAH - Error Logging Modules and Handlers for ASP.NET // Copyright (c) 2004-9 Atif Aziz. All rights reserved. // // Author(s): // // Atif Aziz, http://www.raboof.com // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // #endregion namespace Elmah { #region Imports using System; using System.Data; using System.Reflection; #endregion /// <summary> /// Extension methods for <see cref="IDbCommand"/> objects. /// </summary> static class DbCommandExtensions { /// <remarks> /// Use <see cref="Missing.Value"/> for parameter value to avoid /// having it set by the returned function. /// </remarks> public static Func<string, DbType?, object, IDbDataParameter> ParameterAdder(this IDbCommand command) { return ParameterAdder(command, cmd => cmd.CreateParameter()); } /// <remarks> /// Use <see cref="Missing.Value"/> for parameter value to avoid /// having it set by the returned function. /// </remarks> public static Func<string, DbType?, object, TParameter> ParameterAdder<TCommand, TParameter>(this TCommand command, Func<TCommand, TParameter> parameterCreator) where TCommand : IDbCommand where TParameter : IDataParameter { // ReSharper disable CompareNonConstrainedGenericWithNull if (command == null) throw new ArgumentNullException("command"); // ReSharper restore CompareNonConstrainedGenericWithNull if (parameterCreator == null) throw new ArgumentNullException("parameterCreator"); return (name, dbType, value) => { var parameter = parameterCreator(command); parameter.ParameterName = name; if (dbType != null) parameter.DbType = dbType.Value; if (Missing.Value != value) parameter.Value = value; command.Parameters.Add(parameter); return parameter; }; } } }
/* * Copyright (C) 2007 Eric Seidel <eric@webkit.org> * Copyright (C) 2007 Nikolas Zimmermann <zimmermann@kde.org> * Copyright (C) Research In Motion Limited 2010. All rights reserved. * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Library General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Library General Public License for more details. * * You should have received a copy of the GNU Library General Public License * along with this library; see the file COPYING.LIB. If not, write to * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, * Boston, MA 02110-1301, USA. */ #ifndef SVGFontElement_h #define SVGFontElement_h #if ENABLE(SVG_FONTS) #include "SVGAnimatedBoolean.h" #include "SVGExternalResourcesRequired.h" #include "SVGGlyphElement.h" #include "SVGGlyphMap.h" #include "SVGParserUtilities.h" #include "SVGStyledElement.h" namespace WebCore { // Describe an SVG <hkern>/<vkern> element struct SVGKerningPair { float kerning; UnicodeRanges unicodeRange1; UnicodeRanges unicodeRange2; HashSet<String> unicodeName1; HashSet<String> unicodeName2; HashSet<String> glyphName1; HashSet<String> glyphName2; SVGKerningPair() : kerning(0.0f) { } }; typedef Vector<SVGKerningPair> KerningPairVector; class SVGMissingGlyphElement; class SVGFontElement : public SVGStyledElement , public SVGExternalResourcesRequired { public: static PassRefPtr<SVGFontElement> create(const QualifiedName&, Document*); void invalidateGlyphCache(); void getGlyphIdentifiersForString(const String&, Vector<SVGGlyph>&) const; float horizontalKerningForPairOfStringsAndGlyphs(const String& u1, const String& g1, const String& u2, const String& g2) const; float verticalKerningForPairOfStringsAndGlyphs(const String& u1, const String& g1, const String& u2, const String& g2) const; SVGMissingGlyphElement* firstMissingGlyphElement() const; private: SVGFontElement(const QualifiedName&, Document*); virtual void synchronizeProperty(const QualifiedName&); virtual bool rendererIsNeeded(RenderStyle*) { return false; } virtual void fillAttributeToPropertyTypeMap(); virtual AttributeToPropertyTypeMap& attributeToPropertyTypeMap(); void ensureGlyphCache() const; // Animated property declarations // SVGExternalResourcesRequired DECLARE_ANIMATED_BOOLEAN(ExternalResourcesRequired, externalResourcesRequired) mutable KerningPairVector m_horizontalKerningPairs; mutable KerningPairVector m_verticalKerningPairs; mutable SVGGlyphMap m_glyphMap; mutable bool m_isGlyphCacheValid; }; } // namespace WebCore #endif // ENABLE(SVG_FONTS) #endif
// [Name] SVGLinearGradientElement-dom-x1-attr.js // [Expected rendering result] green ellipse, no red visible - and a series of PASS messages description("Tests dynamic updates of the 'x1' attribute of the SVGLinearGradientElement object") createSVGTestCase(); var ellipseElement = createSVGElement("ellipse"); ellipseElement.setAttribute("cx", "150"); ellipseElement.setAttribute("cy", "150"); ellipseElement.setAttribute("rx", "100"); ellipseElement.setAttribute("ry", "150"); ellipseElement.setAttribute("fill", "url(#gradient)"); var defsElement = createSVGElement("defs"); rootSVGElement.appendChild(defsElement); var linearGradientElement = createSVGElement("linearGradient"); linearGradientElement.setAttribute("id", "gradient"); linearGradientElement.setAttribute("x1", "100%"); var firstStopElement = createSVGElement("stop"); firstStopElement.setAttribute("offset", "0"); firstStopElement.setAttribute("stop-color", "red"); linearGradientElement.appendChild(firstStopElement); var lastStopElement = createSVGElement("stop"); lastStopElement.setAttribute("offset", "1"); lastStopElement.setAttribute("stop-color", "green"); linearGradientElement.appendChild(lastStopElement); defsElement.appendChild(linearGradientElement); rootSVGElement.appendChild(ellipseElement); shouldBeEqualToString("linearGradientElement.getAttribute('x1')", "100%"); function repaintTest() { linearGradientElement.setAttribute("x1", "200%"); shouldBeEqualToString("linearGradientElement.getAttribute('x1')", "200%"); } var successfullyParsed = true;
<?php /** * Zend Framework * * LICENSE * * This source file is subject to the new BSD license that is bundled * with this package in the file LICENSE.txt. * It is also available through the world-wide-web at this URL: * http://framework.zend.com/license/new-bsd * If you did not receive a copy of the license and are unable to * obtain it through the world-wide-web, please send an email * to license@zend.com so we can send you a copy immediately. * * @category Zend * @package Zend_Navigation * @copyright Copyright (c) 2005-2012 Zend Technologies USA Inc. (http://www.zend.com) * @license http://framework.zend.com/license/new-bsd New BSD License * @version $Id$ */ /** * Zend_Navigation_Container * * Container class for Zend_Navigation_Page classes. * * @category Zend * @package Zend_Navigation * @copyright Copyright (c) 2005-2012 Zend Technologies USA Inc. (http://www.zend.com) * @license http://framework.zend.com/license/new-bsd New BSD License */ abstract class Zend_Navigation_Container implements RecursiveIterator, Countable { /** * Contains sub pages * * @var array */ protected $_pages = array(); /** * An index that contains the order in which to iterate pages * * @var array */ protected $_index = array(); /** * Whether index is dirty and needs to be re-arranged * * @var bool */ protected $_dirtyIndex = false; // Internal methods: /** * Sorts the page index according to page order * * @return void */ protected function _sort() { if ($this->_dirtyIndex) { $newIndex = array(); $index = 0; foreach ($this->_pages as $hash => $page) { $order = $page->getOrder(); if ($order === null) { $newIndex[$hash] = $index; $index++; } else { $newIndex[$hash] = $order; } } asort($newIndex); $this->_index = $newIndex; $this->_dirtyIndex = false; } } // Public methods: /** * Notifies container that the order of pages are updated * * @return void */ public function notifyOrderUpdated() { $this->_dirtyIndex = true; } /** * Adds a page to the container * * This method will inject the container as the given page's parent by * calling {@link Zend_Navigation_Page::setParent()}. * * @param Zend_Navigation_Page|array|Zend_Config $page page to add * @return Zend_Navigation_Container fluent interface, * returns self * @throws Zend_Navigation_Exception if page is invalid */ public function addPage($page) { if ($page === $this) { require_once 'Zend/Navigation/Exception.php'; throw new Zend_Navigation_Exception( 'A page cannot have itself as a parent'); } if (is_array($page) || $page instanceof Zend_Config) { require_once 'Zend/Navigation/Page.php'; $page = Zend_Navigation_Page::factory($page); } elseif (!$page instanceof Zend_Navigation_Page) { require_once 'Zend/Navigation/Exception.php'; throw new Zend_Navigation_Exception( 'Invalid argument: $page must be an instance of ' . 'Zend_Navigation_Page or Zend_Config, or an array'); } $hash = $page->hashCode(); if (array_key_exists($hash, $this->_index)) { // page is already in container return $this; } // adds page to container and sets dirty flag $this->_pages[$hash] = $page; $this->_index[$hash] = $page->getOrder(); $this->_dirtyIndex = true; // inject self as page parent $page->setParent($this); return $this; } /** * Adds several pages at once * * @param array|Zend_Config|Zend_Navigation_Container $pages pages to add * @return Zend_Navigation_Container fluent interface, * returns self * @throws Zend_Navigation_Exception if $pages is not * array, Zend_Config or * Zend_Navigation_Container */ public function addPages($pages) { if ($pages instanceof Zend_Config) { $pages = $pages->toArray(); } if ($pages instanceof Zend_Navigation_Container) { $pages = iterator_to_array($pages); } if (!is_array($pages)) { require_once 'Zend/Navigation/Exception.php'; throw new Zend_Navigation_Exception( 'Invalid argument: $pages must be an array, an ' . 'instance of Zend_Config or an instance of ' . 'Zend_Navigation_Container'); } foreach ($pages as $page) { $this->addPage($page); } return $this; } /** * Sets pages this container should have, removing existing pages * * @param array $pages pages to set * @return Zend_Navigation_Container fluent interface, returns self */ public function setPages(array $pages) { $this->removePages(); return $this->addPages($pages); } /** * Returns pages in the container * * @return array array of Zend_Navigation_Page instances */ public function getPages() { return $this->_pages; } /** * Removes the given page from the container * * @param Zend_Navigation_Page|int $page page to remove, either a page * instance or a specific page order * @return bool whether the removal was * successful */ public function removePage($page) { if ($page instanceof Zend_Navigation_Page) { $hash = $page->hashCode(); } elseif (is_int($page)) { $this->_sort(); if (!$hash = array_search($page, $this->_index)) { return false; } } else { return false; } if (isset($this->_pages[$hash])) { unset($this->_pages[$hash]); unset($this->_index[$hash]); $this->_dirtyIndex = true; return true; } return false; } /** * Removes all pages in container * * @return Zend_Navigation_Container fluent interface, returns self */ public function removePages() { $this->_pages = array(); $this->_index = array(); return $this; } /** * Checks if the container has the given page * * @param Zend_Navigation_Page $page page to look for * @param bool $recursive [optional] whether to search * recursively. Default is false. * @return bool whether page is in container */ public function hasPage(Zend_Navigation_Page $page, $recursive = false) { if (array_key_exists($page->hashCode(), $this->_index)) { return true; } elseif ($recursive) { foreach ($this->_pages as $childPage) { if ($childPage->hasPage($page, true)) { return true; } } } return false; } /** * Returns true if container contains any pages * * @return bool whether container has any pages */ public function hasPages() { return count($this->_index) > 0; } /** * Returns a child page matching $property == $value or * preg_match($value, $property), or null if not found * * @param string $property name of property to match against * @param mixed $value value to match property against * @param bool $useRegex [optional] if true PHP's preg_match * is used. Default is false. * @return Zend_Navigation_Page|null matching page or null */ public function findOneBy($property, $value, $useRegex = false) { $iterator = new RecursiveIteratorIterator( $this, RecursiveIteratorIterator::SELF_FIRST ); foreach ($iterator as $page) { $pageProperty = $page->get($property); // Rel and rev if (is_array($pageProperty)) { foreach ($pageProperty as $item) { if (is_array($item)) { // Use regex? if (true === $useRegex) { foreach ($item as $item2) { if (0 !== preg_match($value, $item2)) { return $page; } } } else { if (in_array($value, $item)) { return $page; } } } else { // Use regex? if (true === $useRegex) { if (0 !== preg_match($value, $item)) { return $page; } } else { if ($item == $value) { return $page; } } } } continue; } // Use regex? if (true === $useRegex) { if (preg_match($value, $pageProperty)) { return $page; } } else { if ($pageProperty == $value) { return $page; } } } return null; } /** * Returns all child pages matching $property == $value or * preg_match($value, $property), or an empty array if no pages are found * * @param string $property name of property to match against * @param mixed $value value to match property against * @param bool $useRegex [optional] if true PHP's preg_match is used. * Default is false. * @return array array containing only Zend_Navigation_Page * instances */ public function findAllBy($property, $value, $useRegex = false) { $found = array(); $iterator = new RecursiveIteratorIterator( $this, RecursiveIteratorIterator::SELF_FIRST ); foreach ($iterator as $page) { $pageProperty = $page->get($property); // Rel and rev if (is_array($pageProperty)) { foreach ($pageProperty as $item) { if (is_array($item)) { // Use regex? if (true === $useRegex) { foreach ($item as $item2) { if (0 !== preg_match($value, $item2)) { $found[] = $page; } } } else { if (in_array($value, $item)) { $found[] = $page; } } } else { // Use regex? if (true === $useRegex) { if (0 !== preg_match($value, $item)) { $found[] = $page; } } else { if ($item == $value) { $found[] = $page; } } } } continue; } // Use regex? if (true === $useRegex) { if (0 !== preg_match($value, $pageProperty)) { $found[] = $page; } } else { if ($pageProperty == $value) { $found[] = $page; } } } return $found; } /** * Returns page(s) matching $property == $value or * preg_match($value, $property) * * @param string $property name of property to match against * @param mixed $value value to match property against * @param bool $all [optional] whether an array of all matching * pages should be returned, or only the first. * If true, an array will be returned, even if not * matching pages are found. If false, null will * be returned if no matching page is found. * Default is false. * @param bool $useRegex [optional] if true PHP's preg_match is used. * Default is false. * @return Zend_Navigation_Page|null matching page or null */ public function findBy($property, $value, $all = false, $useRegex = false) { if ($all) { return $this->findAllBy($property, $value, $useRegex); } else { return $this->findOneBy($property, $value, $useRegex); } } /** * Magic overload: Proxy calls to finder methods * * Examples of finder calls: * <code> * // METHOD // SAME AS * $nav->findByLabel('foo'); // $nav->findOneBy('label', 'foo'); * $nav->findByLabel('/foo/', true); // $nav->findBy('label', '/foo/', true); * $nav->findOneByLabel('foo'); // $nav->findOneBy('label', 'foo'); * $nav->findAllByClass('foo'); // $nav->findAllBy('class', 'foo'); * </code> * * @param string $method method name * @param array $arguments method arguments * @return mixed Zend_Navigation|array|null matching page, array of pages * or null * @throws Zend_Navigation_Exception if method does not exist */ public function __call($method, $arguments) { if (@preg_match('/(find(?:One|All)?By)(.+)/', $method, $match)) { return $this->{$match[1]}($match[2], $arguments[0], !empty($arguments[1])); } require_once 'Zend/Navigation/Exception.php'; throw new Zend_Navigation_Exception( sprintf( 'Bad method call: Unknown method %s::%s', get_class($this), $method ) ); } /** * Returns an array representation of all pages in container * * @return array */ public function toArray() { $pages = array(); $this->_dirtyIndex = true; $this->_sort(); $indexes = array_keys($this->_index); foreach ($indexes as $hash) { $pages[] = $this->_pages[$hash]->toArray(); } return $pages; } // RecursiveIterator interface: /** * Returns current page * * Implements RecursiveIterator interface. * * @return Zend_Navigation_Page current page or null * @throws Zend_Navigation_Exception if the index is invalid */ public function current() { $this->_sort(); current($this->_index); $hash = key($this->_index); if (isset($this->_pages[$hash])) { return $this->_pages[$hash]; } else { require_once 'Zend/Navigation/Exception.php'; throw new Zend_Navigation_Exception( 'Corruption detected in container; ' . 'invalid key found in internal iterator'); } } /** * Returns hash code of current page * * Implements RecursiveIterator interface. * * @return string hash code of current page */ public function key() { $this->_sort(); return key($this->_index); } /** * Moves index pointer to next page in the container * * Implements RecursiveIterator interface. * * @return void */ public function next() { $this->_sort(); next($this->_index); } /** * Sets index pointer to first page in the container * * Implements RecursiveIterator interface. * * @return void */ public function rewind() { $this->_sort(); reset($this->_index); } /** * Checks if container index is valid * * Implements RecursiveIterator interface. * * @return bool */ public function valid() { $this->_sort(); return current($this->_index) !== false; } /** * Proxy to hasPages() * * Implements RecursiveIterator interface. * * @return bool whether container has any pages */ public function hasChildren() { return $this->hasPages(); } /** * Returns the child container. * * Implements RecursiveIterator interface. * * @return Zend_Navigation_Page|null */ public function getChildren() { $hash = key($this->_index); if (isset($this->_pages[$hash])) { return $this->_pages[$hash]; } return null; } // Countable interface: /** * Returns number of pages in container * * Implements Countable interface. * * @return int number of pages in the container */ public function count() { return count($this->_index); } }
<span>This test passes if the caret is at the start of the first line.</span> <p id="result"></p> <div id="edit" contentEditable="true" style="width: 350px; height: 100px; border: 1px solid blue;">a <span style=color:blue;">xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx</span></div> <script> if (window.testRunner) testRunner.dumpAsText(); var edit = document.getElementById("edit"); window.getSelection().collapse(edit.childNodes.item(1).childNodes.item(0), 2); window.getSelection().modify("move", "backward", "line"); window.getSelection().modify("move", "backward", "line"); document.getElementById("result").innerText = getSelection().baseOffset == 0 ? "PASS" : "FAIL"; </script>
<?php /** * CodeIgniter * * An open source application development framework for PHP 5.2.4 or newer * * This content is released under the MIT License (MIT) * * Copyright (c) 2014, British Columbia Institute of Technology * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. * * @package CodeIgniter * @author EllisLab Dev Team * @copyright Copyright (c) 2008 - 2014, EllisLab, Inc. (http://ellislab.com/) * @copyright Copyright (c) 2014, British Columbia Institute of Technology (http://bcit.ca/) * @license http://opensource.org/licenses/MIT MIT License * @link http://codeigniter.com * @since Version 2.1.0 * @filesource */ defined('BASEPATH') OR exit('No direct script access allowed'); /** * PDO Database Adapter Class * * Note: _DB is an extender class that the app controller * creates dynamically based on whether the query builder * class is being used or not. * * @package CodeIgniter * @subpackage Drivers * @category Database * @author EllisLab Dev Team * @link http://codeigniter.com/user_guide/database/ */ class CI_DB_pdo_driver extends CI_DB { /** * Database driver * * @var string */ public $dbdriver = 'pdo'; /** * PDO Options * * @var array */ public $options = array(); // -------------------------------------------------------------------- /** * Class constructor * * Validates the DSN string and/or detects the subdriver. * * @param array $params * @return void */ public function __construct($params) { parent::__construct($params); if (preg_match('/([^:]+):/', $this->dsn, $match) && count($match) === 2) { // If there is a minimum valid dsn string pattern found, we're done // This is for general PDO users, who tend to have a full DSN string. $this->subdriver = $match[1]; return; } // Legacy support for DSN specified in the hostname field elseif (preg_match('/([^:]+):/', $this->hostname, $match) && count($match) === 2) { $this->dsn = $this->hostname; $this->hostname = NULL; $this->subdriver = $match[1]; return; } elseif (in_array($this->subdriver, array('mssql', 'sybase'), TRUE)) { $this->subdriver = 'dblib'; } elseif ($this->subdriver === '4D') { $this->subdriver = '4d'; } elseif ( ! in_array($this->subdriver, array('4d', 'cubrid', 'dblib', 'firebird', 'ibm', 'informix', 'mysql', 'oci', 'odbc', 'pgsql', 'sqlite', 'sqlsrv'), TRUE)) { log_message('error', 'PDO: Invalid or non-existent subdriver'); if ($this->db_debug) { show_error('Invalid or non-existent PDO subdriver'); } } $this->dsn = NULL; } // -------------------------------------------------------------------- /** * Database connection * * @param bool $persistent * @return object */ public function db_connect($persistent = FALSE) { $this->options[PDO::ATTR_PERSISTENT] = $persistent; try { return new PDO($this->dsn, $this->username, $this->password, $this->options); } catch (PDOException $e) { if ($this->db_debug && empty($this->failover)) { $this->display_error($e->getMessage(), '', TRUE); } return FALSE; } } // -------------------------------------------------------------------- /** * Database version number * * @return string */ public function version() { if (isset($this->data_cache['version'])) { return $this->data_cache['version']; } elseif ( ! $this->conn_id) { $this->initialize(); } // Not all subdrivers support the getAttribute() method try { return $this->data_cache['version'] = $this->conn_id->getAttribute(PDO::ATTR_SERVER_VERSION); } catch (PDOException $e) { return parent::version(); } } // -------------------------------------------------------------------- /** * Execute the query * * @param string $sql SQL query * @return mixed */ protected function _execute($sql) { return $this->conn_id->query($sql); } // -------------------------------------------------------------------- /** * Begin Transaction * * @param bool $test_mode * @return bool */ public function trans_begin($test_mode = FALSE) { // When transactions are nested we only begin/commit/rollback the outermost ones if ( ! $this->trans_enabled OR $this->_trans_depth > 0) { return TRUE; } // Reset the transaction failure flag. // If the $test_mode flag is set to TRUE transactions will be rolled back // even if the queries produce a successful result. $this->_trans_failure = ($test_mode === TRUE); return $this->conn_id->beginTransaction(); } // -------------------------------------------------------------------- /** * Commit Transaction * * @return bool */ public function trans_commit() { // When transactions are nested we only begin/commit/rollback the outermost ones if ( ! $this->trans_enabled OR $this->_trans_depth > 0) { return TRUE; } return $this->conn_id->commit(); } // -------------------------------------------------------------------- /** * Rollback Transaction * * @return bool */ public function trans_rollback() { // When transactions are nested we only begin/commit/rollback the outermost ones if ( ! $this->trans_enabled OR $this->_trans_depth > 0) { return TRUE; } return $this->conn_id->rollBack(); } // -------------------------------------------------------------------- /** * Platform-dependant string escape * * @param string * @return string */ protected function _escape_str($str) { // Escape the string $str = $this->conn_id->quote($str); // If there are duplicated quotes, trim them away return ($str[0] === "'") ? substr($str, 1, -1) : $str; } // -------------------------------------------------------------------- /** * Affected Rows * * @return int */ public function affected_rows() { return is_object($this->result_id) ? $this->result_id->rowCount() : 0; } // -------------------------------------------------------------------- /** * Insert ID * * @param string $name * @return int */ public function insert_id($name = NULL) { return $this->conn_id->lastInsertId($name); } // -------------------------------------------------------------------- /** * Field data query * * Generates a platform-specific query so that the column data can be retrieved * * @param string $table * @return string */ protected function _field_data($table) { return 'SELECT TOP 1 * FROM '.$this->protect_identifiers($table); } // -------------------------------------------------------------------- /** * Error * * Returns an array containing code and message of the last * database error that has occured. * * @return array */ public function error() { $error = array('code' => '00000', 'message' => ''); $pdo_error = $this->conn_id->errorInfo(); if (empty($pdo_error[0])) { return $error; } $error['code'] = isset($pdo_error[1]) ? $pdo_error[0].'/'.$pdo_error[1] : $pdo_error[0]; if (isset($pdo_error[2])) { $error['message'] = $pdo_error[2]; } return $error; } // -------------------------------------------------------------------- /** * Update_Batch statement * * Generates a platform-specific batch update string from the supplied data * * @param string $table Table name * @param array $values Update data * @param string $index WHERE key * @return string */ protected function _update_batch($table, $values, $index) { $ids = array(); foreach ($values as $key => $val) { $ids[] = $val[$index]; foreach (array_keys($val) as $field) { if ($field !== $index) { $final[$field][] = 'WHEN '.$index.' = '.$val[$index].' THEN '.$val[$field]; } } } $cases = ''; foreach ($final as $k => $v) { $cases .= $k.' = CASE '."\n"; foreach ($v as $row) { $cases .= $row."\n"; } $cases .= 'ELSE '.$k.' END, '; } $this->where($index.' IN('.implode(',', $ids).')', NULL, FALSE); return 'UPDATE '.$table.' SET '.substr($cases, 0, -2).$this->_compile_wh('qb_where'); } // -------------------------------------------------------------------- /** * Truncate statement * * Generates a platform-specific truncate string from the supplied data * * If the database does not support the TRUNCATE statement, * then this method maps to 'DELETE FROM table' * * @param string $table * @return string */ protected function _truncate($table) { return 'TRUNCATE TABLE '.$table; } } /* End of file pdo_driver.php */ /* Location: ./system/database/drivers/pdo/pdo_driver.php */
/* * Copyright (c) 2013-2015, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #undef TRACE_SYSTEM #define TRACE_SYSTEM ufs #if !defined(_TRACE_UFS_H) || defined(TRACE_HEADER_MULTI_READ) #define _TRACE_UFS_H #include <linux/tracepoint.h> DECLARE_EVENT_CLASS(ufshcd_state_change_template, TP_PROTO(const char *dev_name, const char *state), TP_ARGS(dev_name, state), TP_STRUCT__entry( __string(dev_name, dev_name) __string(state, state) ), TP_fast_assign( __assign_str(dev_name, dev_name); __assign_str(state, state); ), TP_printk("%s: state changed to %s", __get_str(dev_name), __get_str(state)) ); DEFINE_EVENT(ufshcd_state_change_template, ufshcd_clk_gating, TP_PROTO(const char *dev_name, const char *state), TP_ARGS(dev_name, state)); DEFINE_EVENT(ufshcd_state_change_template, ufshcd_hibern8_on_idle, TP_PROTO(const char *dev_name, const char *state), TP_ARGS(dev_name, state)); DEFINE_EVENT(ufshcd_state_change_template, ufshcd_auto_bkops_state, TP_PROTO(const char *dev_name, const char *state), TP_ARGS(dev_name, state)); TRACE_EVENT(ufshcd_clk_scaling, TP_PROTO(const char *dev_name, const char *state, const char *clk, u32 prev_state, u32 curr_state), TP_ARGS(dev_name, state, clk, prev_state, curr_state), TP_STRUCT__entry( __string(dev_name, dev_name) __string(state, state) __string(clk, clk) __field(u32, prev_state) __field(u32, curr_state) ), TP_fast_assign( __assign_str(dev_name, dev_name); __assign_str(state, state); __assign_str(clk, clk); __entry->prev_state = prev_state; __entry->curr_state = curr_state; ), TP_printk("%s: %s %s from %u to %u Hz", __get_str(dev_name), __get_str(state), __get_str(clk), __entry->prev_state, __entry->curr_state) ); DECLARE_EVENT_CLASS(ufshcd_profiling_template, TP_PROTO(const char *dev_name, const char *profile_info, s64 time_us, int err), TP_ARGS(dev_name, profile_info, time_us, err), TP_STRUCT__entry( __string(dev_name, dev_name) __string(profile_info, profile_info) __field(s64, time_us) __field(int, err) ), TP_fast_assign( __assign_str(dev_name, dev_name); __assign_str(profile_info, profile_info); __entry->time_us = time_us; __entry->err = err; ), TP_printk("%s: %s: took %lld usecs, err %d", __get_str(dev_name), __get_str(profile_info), __entry->time_us, __entry->err) ); DEFINE_EVENT(ufshcd_profiling_template, ufshcd_profile_hibern8, TP_PROTO(const char *dev_name, const char *profile_info, s64 time_us, int err), TP_ARGS(dev_name, profile_info, time_us, err)); DEFINE_EVENT(ufshcd_profiling_template, ufshcd_profile_clk_gating, TP_PROTO(const char *dev_name, const char *profile_info, s64 time_us, int err), TP_ARGS(dev_name, profile_info, time_us, err)); DEFINE_EVENT(ufshcd_profiling_template, ufshcd_profile_clk_scaling, TP_PROTO(const char *dev_name, const char *profile_info, s64 time_us, int err), TP_ARGS(dev_name, profile_info, time_us, err)); DECLARE_EVENT_CLASS(ufshcd_template, TP_PROTO(const char *dev_name, int err, s64 usecs, const char *dev_state, const char *link_state), TP_ARGS(dev_name, err, usecs, dev_state, link_state), TP_STRUCT__entry( __field(s64, usecs) __field(int, err) __string(dev_name, dev_name) __string(dev_state, dev_state) __string(link_state, link_state) ), TP_fast_assign( __entry->usecs = usecs; __entry->err = err; __assign_str(dev_name, dev_name); __assign_str(dev_state, dev_state); __assign_str(link_state, link_state); ), TP_printk( "%s: took %lld usecs, dev_state: %s, link_state: %s, err %d", __get_str(dev_name), __entry->usecs, __get_str(dev_state), __get_str(link_state), __entry->err ) ); DEFINE_EVENT(ufshcd_template, ufshcd_system_suspend, TP_PROTO(const char *dev_name, int err, s64 usecs, const char *dev_state, const char *link_state), TP_ARGS(dev_name, err, usecs, dev_state, link_state)); DEFINE_EVENT(ufshcd_template, ufshcd_system_resume, TP_PROTO(const char *dev_name, int err, s64 usecs, const char *dev_state, const char *link_state), TP_ARGS(dev_name, err, usecs, dev_state, link_state)); DEFINE_EVENT(ufshcd_template, ufshcd_runtime_suspend, TP_PROTO(const char *dev_name, int err, s64 usecs, const char *dev_state, const char *link_state), TP_ARGS(dev_name, err, usecs, dev_state, link_state)); DEFINE_EVENT(ufshcd_template, ufshcd_runtime_resume, TP_PROTO(const char *dev_name, int err, s64 usecs, const char *dev_state, const char *link_state), TP_ARGS(dev_name, err, usecs, dev_state, link_state)); DEFINE_EVENT(ufshcd_template, ufshcd_init, TP_PROTO(const char *dev_name, int err, s64 usecs, const char *dev_state, const char *link_state), TP_ARGS(dev_name, err, usecs, dev_state, link_state)); TRACE_EVENT(ufshcd_command, TP_PROTO(const char *dev_name, const char *str, unsigned int tag, u32 doorbell, int transfer_len, u32 intr, u64 lba, u8 opcode), TP_ARGS(dev_name, str, tag, doorbell, transfer_len, intr, lba, opcode), TP_STRUCT__entry( __string(dev_name, dev_name) __string(str, str) __field(unsigned int, tag) __field(u32, doorbell) __field(int, transfer_len) __field(u32, intr) __field(u64, lba) __field(u8, opcode) ), TP_fast_assign( __assign_str(dev_name, dev_name); __assign_str(str, str); __entry->tag = tag; __entry->doorbell = doorbell; __entry->transfer_len = transfer_len; __entry->intr = intr; __entry->lba = lba; __entry->opcode = opcode; ), TP_printk( "%s: %s: tag: %u, DB: 0x%x, size: %d, IS: %u, LBA: %llu, opcode: 0x%x", __get_str(str), __get_str(dev_name), __entry->tag, __entry->doorbell, __entry->transfer_len, __entry->intr, __entry->lba, (u32)__entry->opcode ) ); #endif /* if !defined(_TRACE_UFS_H) || defined(TRACE_HEADER_MULTI_READ) */ /* This part must be outside protection */ #include <trace/define_trace.h>
/* * Copyright (C) 2005-2013 Team XBMC * http://xbmc.org * * This Program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This Program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with XBMC; see the file COPYING. If not, see * <http://www.gnu.org/licenses/>. * */ #ifndef RENDER_SYSTEM_GLES_H #define RENDER_SYSTEM_GLES_H #pragma once #include "system.h" #include "system_gl.h" #include "rendering/RenderSystem.h" #include "xbmc/guilib/GUIShader.h" enum ESHADERMETHOD { SM_DEFAULT, SM_TEXTURE, SM_MULTI, SM_FONTS, SM_TEXTURE_NOBLEND, SM_MULTI_BLENDCOLOR, SM_TEXTURE_RGBA, SM_TEXTURE_RGBA_OES, SM_TEXTURE_RGBA_BLENDCOLOR, SM_TEXTURE_RGBA_BOB, SM_TEXTURE_RGBA_BOB_OES, SM_ESHADERCOUNT }; class CRenderSystemGLES : public CRenderSystemBase { public: CRenderSystemGLES(); virtual ~CRenderSystemGLES(); bool InitRenderSystem() override; bool DestroyRenderSystem() override; bool ResetRenderSystem(int width, int height, bool fullScreen, float refreshRate) override; bool BeginRender() override; bool EndRender() override; void PresentRender(bool rendered, bool videoLayer) override; bool ClearBuffers(color_t color) override; bool IsExtSupported(const char* extension) override; void SetVSync(bool vsync); void ResetVSync() { m_bVsyncInit = false; } void SetViewPort(CRect& viewPort) override; void GetViewPort(CRect& viewPort) override; bool ScissorsCanEffectClipping() override; CRect ClipRectToScissorRect(const CRect &rect) override; void SetScissors(const CRect& rect) override; void ResetScissors() override; void CaptureStateBlock() override; void ApplyStateBlock() override; void SetCameraPosition(const CPoint &camera, int screenWidth, int screenHeight, float stereoFactor = 0.0f) override; void ApplyHardwareTransform(const TransformMatrix &matrix) override; void RestoreHardwareTransform() override; bool SupportsStereo(RENDER_STEREO_MODE mode) const override; bool TestRender() override; void Project(float &x, float &y, float &z) override; void InitialiseGUIShader(); void EnableGUIShader(ESHADERMETHOD method); void DisableGUIShader(); GLint GUIShaderGetPos(); GLint GUIShaderGetCol(); GLint GUIShaderGetCoord0(); GLint GUIShaderGetCoord1(); GLint GUIShaderGetUniCol(); GLint GUIShaderGetCoord0Matrix(); GLint GUIShaderGetField(); GLint GUIShaderGetStep(); GLint GUIShaderGetContrast(); GLint GUIShaderGetBrightness(); GLint GUIShaderGetModel(); protected: virtual void SetVSyncImpl(bool enable) = 0; virtual void PresentRenderImpl(bool rendered) = 0; void CalculateMaxTexturesize(); int m_iVSyncMode; int m_iVSyncErrors; bool m_bVsyncInit; int m_width; int m_height; std::string m_RenderExtensions; CGUIShader **m_pGUIshader = nullptr; // One GUI shader for each method ESHADERMETHOD m_method = SM_DEFAULT; // Current GUI Shader method GLint m_viewPort[4]; }; #endif // RENDER_SYSTEM_H
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.camel.itest.springboot; import org.apache.camel.itest.springboot.util.ArquillianPackager; import org.jboss.arquillian.container.test.api.Deployment; import org.jboss.arquillian.junit.Arquillian; import org.jboss.shrinkwrap.api.Archive; import org.junit.Test; import org.junit.runner.RunWith; @RunWith(Arquillian.class) public class CamelUnivocityParsersTest extends AbstractSpringBootTestSupport { @Deployment public static Archive<?> createSpringBootPackage() throws Exception { return ArquillianPackager.springBootPackage(createTestConfig()); } public static ITestConfig createTestConfig() { return new ITestConfigBuilder() .module(inferModuleName(CamelUnivocityParsersTest.class)) .build(); } @Test public void componentTests() throws Exception { this.runDataformatTest(config, "univocity-csv"); this.runDataformatTest(config, "univocity-fixed"); this.runDataformatTest(config, "univocity-tsv"); this.runModuleUnitTestsIfEnabled(config); } }
package com.marshalchen.ultimaterecyclerview.demo; import android.graphics.Color; import android.os.Bundle; import android.os.Handler; import android.support.v4.widget.SwipeRefreshLayout; import android.support.v7.app.ActionBarActivity; import android.support.v7.app.AppCompatActivity; import android.support.v7.widget.LinearLayoutManager; import android.support.v7.widget.RecyclerView; import android.support.v7.widget.Toolbar; import android.util.Log; import android.view.ActionMode; import android.view.LayoutInflater; import android.view.Menu; import android.view.MenuItem; import android.view.View; import android.view.ViewGroup; import com.google.android.gms.ads.AdRequest; import com.google.android.gms.ads.AdSize; import com.google.android.gms.ads.AdView; import com.marshalchen.ultimaterecyclerview.AdmobAdapter; import com.marshalchen.ultimaterecyclerview.URLogs; import com.marshalchen.ultimaterecyclerview.UltimateRecyclerView; import com.marshalchen.ultimaterecyclerview.demo.modules.FastBinding; import com.marshalchen.ultimaterecyclerview.demo.modules.SampleDataboxset; import com.marshalchen.ultimaterecyclerview.demo.modules.admobdfpadapter; import java.util.ArrayList; import java.util.List; /** * Created by hesk on 20/5/15. */ public class TestAdMob extends AppCompatActivity { UltimateRecyclerView ultimateRecyclerView; admobdfpadapter simpleRecyclerViewAdapter = null; LinearLayoutManager linearLayoutManager; int moreNum = 2; private ActionMode actionMode; Toolbar toolbar; boolean isDrag = true; private boolean admob_test_mode = false; private AdView createadmob() { AdView mAdView = new AdView(this); mAdView.setAdSize(AdSize.MEDIUM_RECTANGLE); mAdView.setAdUnitId("/1015938/Hypebeast_App_320x50"); mAdView.setLayoutParams(new ViewGroup.LayoutParams(ViewGroup.LayoutParams.MATCH_PARENT, ViewGroup.LayoutParams.MATCH_PARENT)); // Create an ad request. AdRequest.Builder adRequestBuilder = new AdRequest.Builder(); if (admob_test_mode) // Optionally populate the ad request builder. adRequestBuilder.addTestDevice(AdRequest.DEVICE_ID_EMULATOR); // Start loading the ad. mAdView.loadAd(adRequestBuilder.build()); return mAdView; } private void enableSwipe() { } private void enableRefreshAndLoadMore() { ultimateRecyclerView.setDefaultOnRefreshListener(new SwipeRefreshLayout.OnRefreshListener() { @Override public void onRefresh() { new Handler().postDelayed(new Runnable() { @Override public void run() { simpleRecyclerViewAdapter.insert(moreNum++ + " Refresh things"); ultimateRecyclerView.setRefreshing(false); // ultimateRecyclerView.scrollBy(0, -50); linearLayoutManager.scrollToPosition(0); // ultimateRecyclerView.setAdapter(simpleRecyclerViewAdapter); // simpleRecyclerViewAdapter.notifyDataSetChanged(); } }, 1000); } }); ultimateRecyclerView.setOnLoadMoreListener(new UltimateRecyclerView.OnLoadMoreListener() { @Override public void loadMore(int itemsCount, final int maxLastVisiblePosition) { Handler handler = new Handler(); handler.postDelayed(new Runnable() { public void run() { Log.d("loadmore", maxLastVisiblePosition + " position"); SampleDataboxset.insertMore(simpleRecyclerViewAdapter, 1); // linearLayoutManager.scrollToPosition(linearLayoutManager.getChildCount() - 1); } }, 5000); } }); simpleRecyclerViewAdapter.setCustomLoadMoreView(LayoutInflater.from(this).inflate(R.layout.custom_bottom_progressbar, null)); ultimateRecyclerView.enableLoadmore(); } private void enableClick() { } private void impleAddDrop() { findViewById(R.id.add).setOnClickListener(new View.OnClickListener() { @Override public void onClick(View v) { SampleDataboxset.insertMore(simpleRecyclerViewAdapter, 1); } }); findViewById(R.id.del).setOnClickListener(new View.OnClickListener() { @Override public void onClick(View v) { simpleRecyclerViewAdapter.remove(3); } }); } @Override protected void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); setContentView(R.layout.activity_main); toolbar = (Toolbar) findViewById(R.id.tool_bar); setSupportActionBar(toolbar); getSupportActionBar().setDisplayShowTitleEnabled(false); ultimateRecyclerView = (UltimateRecyclerView) findViewById(R.id.ultimate_recycler_view); ultimateRecyclerView.setHasFixedSize(false); /** * wokring example 1 implementation of Admob banner with static Adview */ // simpleRecyclerViewAdapter = new admobdfpadapter(createadmob(), 5, stringList); /** * working example 2 with multiple called Adviews */ simpleRecyclerViewAdapter = new admobdfpadapter(createadmob(), 3, SampleDataboxset.newListFromGen(), new AdmobAdapter.AdviewListener() { @Override public AdView onGenerateAdview() { return createadmob(); } }); linearLayoutManager = new LinearLayoutManager(this); ultimateRecyclerView.setLayoutManager(linearLayoutManager); ultimateRecyclerView.setAdapter(simpleRecyclerViewAdapter); ultimateRecyclerView.setRecylerViewBackgroundColor(Color.parseColor("#ffffff")); enableRefreshAndLoadMore(); enableClick(); impleAddDrop(); } private void toggleSelection(int position) { simpleRecyclerViewAdapter.toggleSelection(position); actionMode.setTitle("Selected " + "1"); } @Override protected void onDestroy() { super.onDestroy(); } public int getScreenHeight() { return findViewById(android.R.id.content).getHeight(); } // @Override public boolean onCreateOptionsMenu(Menu menu) { // Inflate the menu; this adds items to the action bar if it is present. getMenuInflater().inflate(R.menu.menu_main, menu); return true; } @Override public boolean onOptionsItemSelected(MenuItem item) { FastBinding.startactivity(this, item.getItemId()); return super.onOptionsItemSelected(item); } }
<?php /** * Zend Framework * * LICENSE * * This source file is subject to the new BSD license that is bundled * with this package in the file LICENSE.txt. * It is also available through the world-wide-web at this URL: * http://framework.zend.com/license/new-bsd * If you did not receive a copy of the license and are unable to * obtain it through the world-wide-web, please send an email * to license@zend.com so we can send you a copy immediately. * * @category Zend * @package Zend_InfoCard * @subpackage Zend_InfoCard_Xml * @copyright Copyright (c) 2005-2012 Zend Technologies USA Inc. (http://www.zend.com) * @license http://framework.zend.com/license/new-bsd New BSD License * @version $Id$ */ /** * Zend_InfoCard_Xml_Element */ require_once 'Zend/InfoCard/Xml/Element.php'; /** * Represents a SecurityTokenReference XML block * * @category Zend * @package Zend_InfoCard * @subpackage Zend_InfoCard_Xml * @copyright Copyright (c) 2005-2012 Zend Technologies USA Inc. (http://www.zend.com) * @license http://framework.zend.com/license/new-bsd New BSD License */ class Zend_InfoCard_Xml_SecurityTokenReference extends Zend_InfoCard_Xml_Element { /** * Base64 Binary Encoding URI */ const ENCODING_BASE64BIN = 'http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-soap-message-security-1.0#Base64Binary'; /** * Return an instance of the object based on the input XML * * @param string $xmlData The SecurityTokenReference XML Block * @return Zend_InfoCard_Xml_SecurityTokenReference * @throws Zend_InfoCard_Xml_Exception */ static public function getInstance($xmlData) { if($xmlData instanceof Zend_InfoCard_Xml_Element) { $strXmlData = $xmlData->asXML(); } else if (is_string($xmlData)) { $strXmlData = $xmlData; } else { throw new Zend_InfoCard_Xml_Exception("Invalid Data provided to create instance"); } $sxe = simplexml_load_string($strXmlData); if($sxe->getName() != "SecurityTokenReference") { throw new Zend_InfoCard_Xml_Exception("Invalid XML Block provided for SecurityTokenReference"); } return simplexml_load_string($strXmlData, "Zend_InfoCard_Xml_SecurityTokenReference"); } /** * Return the Key Identifier XML Object * * @return Zend_InfoCard_Xml_Element * @throws Zend_InfoCard_Xml_Exception */ protected function _getKeyIdentifier() { $this->registerXPathNamespace('o', 'http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-wssecurity-secext-1.0.xsd'); list($keyident) = $this->xpath('//o:KeyIdentifier'); if(!($keyident instanceof Zend_InfoCard_Xml_Element)) { throw new Zend_InfoCard_Xml_Exception("Failed to retrieve Key Identifier"); } return $keyident; } /** * Return the Key URI identifying the thumbprint type used * * @return string The thumbprint type URI * @throws Zend_InfoCard_Xml_Exception */ public function getKeyThumbprintType() { $keyident = $this->_getKeyIdentifier(); $dom = self::convertToDOM($keyident); if(!$dom->hasAttribute('ValueType')) { throw new Zend_InfoCard_Xml_Exception("Key Identifier did not provide a type for the value"); } return $dom->getAttribute('ValueType'); } /** * Return the thumbprint encoding type used as a URI * * @return string the URI of the thumbprint encoding used * @throws Zend_InfoCard_Xml_Exception */ public function getKeyThumbprintEncodingType() { $keyident = $this->_getKeyIdentifier(); $dom = self::convertToDOM($keyident); if(!$dom->hasAttribute('EncodingType')) { throw new Zend_InfoCard_Xml_Exception("Unable to determine the encoding type for the key identifier"); } return $dom->getAttribute('EncodingType'); } /** * Get the key reference data used to identify the public key * * @param bool $decode if true, will return a decoded version of the key * @return string the key reference thumbprint, either in binary or encoded form * @throws Zend_InfoCard_Xml_Exception */ public function getKeyReference($decode = true) { $keyIdentifier = $this->_getKeyIdentifier(); $dom = self::convertToDOM($keyIdentifier); $encoded = $dom->nodeValue; if(empty($encoded)) { throw new Zend_InfoCard_Xml_Exception("Could not find the Key Reference Encoded Value"); } if($decode) { $decoded = ""; switch($this->getKeyThumbprintEncodingType()) { case self::ENCODING_BASE64BIN: if(version_compare(PHP_VERSION, "5.2.0", ">=")) { $decoded = base64_decode($encoded, true); } else { $decoded = base64_decode($encoded); } break; default: throw new Zend_InfoCard_Xml_Exception("Unknown Key Reference Encoding Type: {$this->getKeyThumbprintEncodingType()}"); } if(!$decoded || empty($decoded)) { throw new Zend_InfoCard_Xml_Exception("Failed to decode key reference"); } return $decoded; } return $encoded; } }
<?php /** * Zend Framework * * LICENSE * * This source file is subject to the new BSD license that is bundled * with this package in the file LICENSE.txt. * It is also available through the world-wide-web at this URL: * http://framework.zend.com/license/new-bsd * If you did not receive a copy of the license and are unable to * obtain it through the world-wide-web, please send an email * to license@zend.com so we can send you a copy immediately. * * @category Zend * @package Zend_Db * @subpackage Profiler * @copyright Copyright (c) 2005-2012 Zend Technologies USA Inc. (http://www.zend.com) * @license http://framework.zend.com/license/new-bsd New BSD License * @version $Id$ */ /** * @see Zend_Db_Exception */ require_once 'Zend/Db/Exception.php'; /** * @category Zend * @package Zend_Db * @subpackage Profiler * @copyright Copyright (c) 2005-2012 Zend Technologies USA Inc. (http://www.zend.com) * @license http://framework.zend.com/license/new-bsd New BSD License */ class Zend_Db_Profiler_Exception extends Zend_Db_Exception { }
MessageFormat.locale.es = function ( n ) { if ( n === 1 ) { return "one"; } return "other"; };
/* binder.c * * Android IPC Subsystem * * Copyright (C) 2007-2008 Google, Inc. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <asm/cacheflush.h> #include <linux/fdtable.h> #include <linux/file.h> #include <linux/fs.h> #include <linux/list.h> #include <linux/miscdevice.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/nsproxy.h> #include <linux/poll.h> #include <linux/debugfs.h> #include <linux/rbtree.h> #include <linux/sched.h> #include <linux/seq_file.h> #include <linux/uaccess.h> #include <linux/vmalloc.h> #include <linux/slab.h> #include <linux/pid_namespace.h> #include "binder.h" #include "binder_trace.h" static DEFINE_MUTEX(binder_main_lock); static DEFINE_MUTEX(binder_deferred_lock); static DEFINE_MUTEX(binder_mmap_lock); static HLIST_HEAD(binder_procs); static HLIST_HEAD(binder_deferred_list); static HLIST_HEAD(binder_dead_nodes); static struct dentry *binder_debugfs_dir_entry_root; static struct dentry *binder_debugfs_dir_entry_proc; static struct binder_node *binder_context_mgr_node; static kuid_t binder_context_mgr_uid = INVALID_UID; static int binder_last_id; static struct workqueue_struct *binder_deferred_workqueue; #define BINDER_DEBUG_ENTRY(name) \ static int binder_##name##_open(struct inode *inode, struct file *file) \ { \ return single_open(file, binder_##name##_show, inode->i_private); \ } \ \ static const struct file_operations binder_##name##_fops = { \ .owner = THIS_MODULE, \ .open = binder_##name##_open, \ .read = seq_read, \ .llseek = seq_lseek, \ .release = single_release, \ } static int binder_proc_show(struct seq_file *m, void *unused); BINDER_DEBUG_ENTRY(proc); /* This is only defined in include/asm-arm/sizes.h */ #ifndef SZ_1K #define SZ_1K 0x400 #endif #ifndef SZ_4M #define SZ_4M 0x400000 #endif #define FORBIDDEN_MMAP_FLAGS (VM_WRITE) #define BINDER_SMALL_BUF_SIZE (PAGE_SIZE * 64) enum { BINDER_DEBUG_USER_ERROR = 1U << 0, BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1, BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2, BINDER_DEBUG_OPEN_CLOSE = 1U << 3, BINDER_DEBUG_DEAD_BINDER = 1U << 4, BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5, BINDER_DEBUG_READ_WRITE = 1U << 6, BINDER_DEBUG_USER_REFS = 1U << 7, BINDER_DEBUG_THREADS = 1U << 8, BINDER_DEBUG_TRANSACTION = 1U << 9, BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10, BINDER_DEBUG_FREE_BUFFER = 1U << 11, BINDER_DEBUG_INTERNAL_REFS = 1U << 12, BINDER_DEBUG_BUFFER_ALLOC = 1U << 13, BINDER_DEBUG_PRIORITY_CAP = 1U << 14, BINDER_DEBUG_BUFFER_ALLOC_ASYNC = 1U << 15, }; static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR | BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION; module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR | S_IRUGO); static bool binder_debug_no_lock; module_param_named(proc_no_lock, binder_debug_no_lock, bool, S_IWUSR | S_IRUGO); static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait); static int binder_stop_on_user_error; static int binder_set_stop_on_user_error(const char *val, struct kernel_param *kp) { int ret; ret = param_set_int(val, kp); if (binder_stop_on_user_error < 2) wake_up(&binder_user_error_wait); return ret; } module_param_call(stop_on_user_error, binder_set_stop_on_user_error, param_get_int, &binder_stop_on_user_error, S_IWUSR | S_IRUGO); #define binder_debug(mask, x...) \ do { \ if (binder_debug_mask & mask) \ pr_info(x); \ } while (0) #define binder_user_error(x...) \ do { \ if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \ pr_info(x); \ if (binder_stop_on_user_error) \ binder_stop_on_user_error = 2; \ } while (0) enum binder_stat_types { BINDER_STAT_PROC, BINDER_STAT_THREAD, BINDER_STAT_NODE, BINDER_STAT_REF, BINDER_STAT_DEATH, BINDER_STAT_TRANSACTION, BINDER_STAT_TRANSACTION_COMPLETE, BINDER_STAT_COUNT }; struct binder_stats { int br[_IOC_NR(BR_FAILED_REPLY) + 1]; int bc[_IOC_NR(BC_DEAD_BINDER_DONE) + 1]; int obj_created[BINDER_STAT_COUNT]; int obj_deleted[BINDER_STAT_COUNT]; }; static struct binder_stats binder_stats; static inline void binder_stats_deleted(enum binder_stat_types type) { binder_stats.obj_deleted[type]++; } static inline void binder_stats_created(enum binder_stat_types type) { binder_stats.obj_created[type]++; } struct binder_transaction_log_entry { int debug_id; int call_type; int from_proc; int from_thread; int target_handle; int to_proc; int to_thread; int to_node; int data_size; int offsets_size; }; struct binder_transaction_log { int next; int full; struct binder_transaction_log_entry entry[32]; }; static struct binder_transaction_log binder_transaction_log; static struct binder_transaction_log binder_transaction_log_failed; static struct binder_transaction_log_entry *binder_transaction_log_add( struct binder_transaction_log *log) { struct binder_transaction_log_entry *e; e = &log->entry[log->next]; memset(e, 0, sizeof(*e)); log->next++; if (log->next == ARRAY_SIZE(log->entry)) { log->next = 0; log->full = 1; } return e; } struct binder_work { struct list_head entry; enum { BINDER_WORK_TRANSACTION = 1, BINDER_WORK_TRANSACTION_COMPLETE, BINDER_WORK_NODE, BINDER_WORK_DEAD_BINDER, BINDER_WORK_DEAD_BINDER_AND_CLEAR, BINDER_WORK_CLEAR_DEATH_NOTIFICATION, } type; }; struct binder_node { int debug_id; struct binder_work work; union { struct rb_node rb_node; struct hlist_node dead_node; }; struct binder_proc *proc; struct hlist_head refs; int internal_strong_refs; int local_weak_refs; int local_strong_refs; void __user *ptr; void __user *cookie; unsigned has_strong_ref:1; unsigned pending_strong_ref:1; unsigned has_weak_ref:1; unsigned pending_weak_ref:1; unsigned has_async_transaction:1; unsigned accept_fds:1; unsigned min_priority:8; struct list_head async_todo; }; struct binder_ref_death { struct binder_work work; void __user *cookie; }; struct binder_ref { /* Lookups needed: */ /* node + proc => ref (transaction) */ /* desc + proc => ref (transaction, inc/dec ref) */ /* node => refs + procs (proc exit) */ int debug_id; struct rb_node rb_node_desc; struct rb_node rb_node_node; struct hlist_node node_entry; struct binder_proc *proc; struct binder_node *node; uint32_t desc; int strong; int weak; struct binder_ref_death *death; }; struct binder_buffer { struct list_head entry; /* free and allocated entries by address */ struct rb_node rb_node; /* free entry by size or allocated entry */ /* by address */ unsigned free:1; unsigned allow_user_free:1; unsigned async_transaction:1; unsigned debug_id:29; struct binder_transaction *transaction; struct binder_node *target_node; size_t data_size; size_t offsets_size; uint8_t data[0]; }; enum binder_deferred_state { BINDER_DEFERRED_PUT_FILES = 0x01, BINDER_DEFERRED_FLUSH = 0x02, BINDER_DEFERRED_RELEASE = 0x04, }; struct binder_proc { struct hlist_node proc_node; struct rb_root threads; struct rb_root nodes; struct rb_root refs_by_desc; struct rb_root refs_by_node; int pid; struct vm_area_struct *vma; struct mm_struct *vma_vm_mm; struct task_struct *tsk; struct files_struct *files; struct hlist_node deferred_work_node; int deferred_work; void *buffer; ptrdiff_t user_buffer_offset; struct list_head buffers; struct rb_root free_buffers; struct rb_root allocated_buffers; size_t free_async_space; struct page **pages; size_t buffer_size; uint32_t buffer_free; struct list_head todo; wait_queue_head_t wait; struct binder_stats stats; struct list_head delivered_death; int max_threads; int requested_threads; int requested_threads_started; int ready_threads; long default_priority; struct dentry *debugfs_entry; }; enum { BINDER_LOOPER_STATE_REGISTERED = 0x01, BINDER_LOOPER_STATE_ENTERED = 0x02, BINDER_LOOPER_STATE_EXITED = 0x04, BINDER_LOOPER_STATE_INVALID = 0x08, BINDER_LOOPER_STATE_WAITING = 0x10, BINDER_LOOPER_STATE_NEED_RETURN = 0x20 }; struct binder_thread { struct binder_proc *proc; struct rb_node rb_node; int pid; int looper; struct binder_transaction *transaction_stack; struct list_head todo; uint32_t return_error; /* Write failed, return error code in read buf */ uint32_t return_error2; /* Write failed, return error code in read */ /* buffer. Used when sending a reply to a dead process that */ /* we are also waiting on */ wait_queue_head_t wait; struct binder_stats stats; }; struct binder_transaction { int debug_id; struct binder_work work; struct binder_thread *from; struct binder_transaction *from_parent; struct binder_proc *to_proc; struct binder_thread *to_thread; struct binder_transaction *to_parent; unsigned need_reply:1; /* unsigned is_dead:1; */ /* not used at the moment */ struct binder_buffer *buffer; unsigned int code; unsigned int flags; long priority; long saved_priority; kuid_t sender_euid; }; static void binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer); static int task_get_unused_fd_flags(struct binder_proc *proc, int flags) { struct files_struct *files = proc->files; unsigned long rlim_cur; unsigned long irqs; if (files == NULL) return -ESRCH; if (!lock_task_sighand(proc->tsk, &irqs)) return -EMFILE; rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE); unlock_task_sighand(proc->tsk, &irqs); return __alloc_fd(files, 0, rlim_cur, flags); } /* * copied from fd_install */ static void task_fd_install( struct binder_proc *proc, unsigned int fd, struct file *file) { if (proc->files) __fd_install(proc->files, fd, file); } /* * copied from sys_close */ static long task_close_fd(struct binder_proc *proc, unsigned int fd) { int retval; if (proc->files == NULL) return -ESRCH; retval = __close_fd(proc->files, fd); /* can't restart close syscall because file table entry was cleared */ if (unlikely(retval == -ERESTARTSYS || retval == -ERESTARTNOINTR || retval == -ERESTARTNOHAND || retval == -ERESTART_RESTARTBLOCK)) retval = -EINTR; return retval; } static inline void binder_lock(const char *tag) { trace_binder_lock(tag); mutex_lock(&binder_main_lock); trace_binder_locked(tag); } static inline void binder_unlock(const char *tag) { trace_binder_unlock(tag); mutex_unlock(&binder_main_lock); } static void binder_set_nice(long nice) { long min_nice; if (can_nice(current, nice)) { set_user_nice(current, nice); return; } min_nice = 20 - current->signal->rlim[RLIMIT_NICE].rlim_cur; binder_debug(BINDER_DEBUG_PRIORITY_CAP, "%d: nice value %ld not allowed use %ld instead\n", current->pid, nice, min_nice); set_user_nice(current, min_nice); if (min_nice < 20) return; binder_user_error("%d RLIMIT_NICE not set\n", current->pid); } static size_t binder_buffer_size(struct binder_proc *proc, struct binder_buffer *buffer) { if (list_is_last(&buffer->entry, &proc->buffers)) return proc->buffer + proc->buffer_size - (void *)buffer->data; else return (size_t)list_entry(buffer->entry.next, struct binder_buffer, entry) - (size_t)buffer->data; } static void binder_insert_free_buffer(struct binder_proc *proc, struct binder_buffer *new_buffer) { struct rb_node **p = &proc->free_buffers.rb_node; struct rb_node *parent = NULL; struct binder_buffer *buffer; size_t buffer_size; size_t new_buffer_size; BUG_ON(!new_buffer->free); new_buffer_size = binder_buffer_size(proc, new_buffer); binder_debug(BINDER_DEBUG_BUFFER_ALLOC, "%d: add free buffer, size %zd, at %p\n", proc->pid, new_buffer_size, new_buffer); while (*p) { parent = *p; buffer = rb_entry(parent, struct binder_buffer, rb_node); BUG_ON(!buffer->free); buffer_size = binder_buffer_size(proc, buffer); if (new_buffer_size < buffer_size) p = &parent->rb_left; else p = &parent->rb_right; } rb_link_node(&new_buffer->rb_node, parent, p); rb_insert_color(&new_buffer->rb_node, &proc->free_buffers); } static void binder_insert_allocated_buffer(struct binder_proc *proc, struct binder_buffer *new_buffer) { struct rb_node **p = &proc->allocated_buffers.rb_node; struct rb_node *parent = NULL; struct binder_buffer *buffer; BUG_ON(new_buffer->free); while (*p) { parent = *p; buffer = rb_entry(parent, struct binder_buffer, rb_node); BUG_ON(buffer->free); if (new_buffer < buffer) p = &parent->rb_left; else if (new_buffer > buffer) p = &parent->rb_right; else BUG(); } rb_link_node(&new_buffer->rb_node, parent, p); rb_insert_color(&new_buffer->rb_node, &proc->allocated_buffers); } static struct binder_buffer *binder_buffer_lookup(struct binder_proc *proc, void __user *user_ptr) { struct rb_node *n = proc->allocated_buffers.rb_node; struct binder_buffer *buffer; struct binder_buffer *kern_ptr; kern_ptr = user_ptr - proc->user_buffer_offset - offsetof(struct binder_buffer, data); while (n) { buffer = rb_entry(n, struct binder_buffer, rb_node); BUG_ON(buffer->free); if (kern_ptr < buffer) n = n->rb_left; else if (kern_ptr > buffer) n = n->rb_right; else return buffer; } return NULL; } static int binder_update_page_range(struct binder_proc *proc, int allocate, void *start, void *end, struct vm_area_struct *vma) { void *page_addr; unsigned long user_page_addr; struct vm_struct tmp_area; struct page **page; struct mm_struct *mm; binder_debug(BINDER_DEBUG_BUFFER_ALLOC, "%d: %s pages %p-%p\n", proc->pid, allocate ? "allocate" : "free", start, end); if (end <= start) return 0; trace_binder_update_page_range(proc, allocate, start, end); if (vma) mm = NULL; else mm = get_task_mm(proc->tsk); if (mm) { down_write(&mm->mmap_sem); vma = proc->vma; if (vma && mm != proc->vma_vm_mm) { pr_err("%d: vma mm and task mm mismatch\n", proc->pid); vma = NULL; } } if (allocate == 0) goto free_range; if (vma == NULL) { pr_err("%d: binder_alloc_buf failed to map pages in userspace, no vma\n", proc->pid); goto err_no_vma; } for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) { int ret; struct page **page_array_ptr; page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE]; BUG_ON(*page); *page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO); if (*page == NULL) { pr_err("%d: binder_alloc_buf failed for page at %p\n", proc->pid, page_addr); goto err_alloc_page_failed; } tmp_area.addr = page_addr; tmp_area.size = PAGE_SIZE + PAGE_SIZE /* guard page? */; page_array_ptr = page; ret = map_vm_area(&tmp_area, PAGE_KERNEL, &page_array_ptr); if (ret) { pr_err("%d: binder_alloc_buf failed to map page at %p in kernel\n", proc->pid, page_addr); goto err_map_kernel_failed; } user_page_addr = (uintptr_t)page_addr + proc->user_buffer_offset; ret = vm_insert_page(vma, user_page_addr, page[0]); if (ret) { pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n", proc->pid, user_page_addr); goto err_vm_insert_page_failed; } /* vm_insert_page does not seem to increment the refcount */ } if (mm) { up_write(&mm->mmap_sem); mmput(mm); } return 0; free_range: for (page_addr = end - PAGE_SIZE; page_addr >= start; page_addr -= PAGE_SIZE) { page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE]; if (vma) zap_page_range(vma, (uintptr_t)page_addr + proc->user_buffer_offset, PAGE_SIZE, NULL); err_vm_insert_page_failed: unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE); err_map_kernel_failed: __free_page(*page); *page = NULL; err_alloc_page_failed: ; } err_no_vma: if (mm) { up_write(&mm->mmap_sem); mmput(mm); } return -ENOMEM; } static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc, size_t data_size, size_t offsets_size, int is_async) { struct rb_node *n = proc->free_buffers.rb_node; struct binder_buffer *buffer; size_t buffer_size; struct rb_node *best_fit = NULL; void *has_page_addr; void *end_page_addr; size_t size; if (proc->vma == NULL) { pr_err("%d: binder_alloc_buf, no vma\n", proc->pid); return NULL; } size = ALIGN(data_size, sizeof(void *)) + ALIGN(offsets_size, sizeof(void *)); if (size < data_size || size < offsets_size) { binder_user_error("%d: got transaction with invalid size %zd-%zd\n", proc->pid, data_size, offsets_size); return NULL; } if (is_async && proc->free_async_space < size + sizeof(struct binder_buffer)) { binder_debug(BINDER_DEBUG_BUFFER_ALLOC, "%d: binder_alloc_buf size %zd failed, no async space left\n", proc->pid, size); return NULL; } while (n) { buffer = rb_entry(n, struct binder_buffer, rb_node); BUG_ON(!buffer->free); buffer_size = binder_buffer_size(proc, buffer); if (size < buffer_size) { best_fit = n; n = n->rb_left; } else if (size > buffer_size) n = n->rb_right; else { best_fit = n; break; } } if (best_fit == NULL) { pr_err("%d: binder_alloc_buf size %zd failed, no address space\n", proc->pid, size); return NULL; } if (n == NULL) { buffer = rb_entry(best_fit, struct binder_buffer, rb_node); buffer_size = binder_buffer_size(proc, buffer); } binder_debug(BINDER_DEBUG_BUFFER_ALLOC, "%d: binder_alloc_buf size %zd got buffer %p size %zd\n", proc->pid, size, buffer, buffer_size); has_page_addr = (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK); if (n == NULL) { if (size + sizeof(struct binder_buffer) + 4 >= buffer_size) buffer_size = size; /* no room for other buffers */ else buffer_size = size + sizeof(struct binder_buffer); } end_page_addr = (void *)PAGE_ALIGN((uintptr_t)buffer->data + buffer_size); if (end_page_addr > has_page_addr) end_page_addr = has_page_addr; if (binder_update_page_range(proc, 1, (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr, NULL)) return NULL; rb_erase(best_fit, &proc->free_buffers); buffer->free = 0; binder_insert_allocated_buffer(proc, buffer); if (buffer_size != size) { struct binder_buffer *new_buffer = (void *)buffer->data + size; list_add(&new_buffer->entry, &buffer->entry); new_buffer->free = 1; binder_insert_free_buffer(proc, new_buffer); } binder_debug(BINDER_DEBUG_BUFFER_ALLOC, "%d: binder_alloc_buf size %zd got %p\n", proc->pid, size, buffer); buffer->data_size = data_size; buffer->offsets_size = offsets_size; buffer->async_transaction = is_async; if (is_async) { proc->free_async_space -= size + sizeof(struct binder_buffer); binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC, "%d: binder_alloc_buf size %zd async free %zd\n", proc->pid, size, proc->free_async_space); } return buffer; } static void *buffer_start_page(struct binder_buffer *buffer) { return (void *)((uintptr_t)buffer & PAGE_MASK); } static void *buffer_end_page(struct binder_buffer *buffer) { return (void *)(((uintptr_t)(buffer + 1) - 1) & PAGE_MASK); } static void binder_delete_free_buffer(struct binder_proc *proc, struct binder_buffer *buffer) { struct binder_buffer *prev, *next = NULL; int free_page_end = 1; int free_page_start = 1; BUG_ON(proc->buffers.next == &buffer->entry); prev = list_entry(buffer->entry.prev, struct binder_buffer, entry); BUG_ON(!prev->free); if (buffer_end_page(prev) == buffer_start_page(buffer)) { free_page_start = 0; if (buffer_end_page(prev) == buffer_end_page(buffer)) free_page_end = 0; binder_debug(BINDER_DEBUG_BUFFER_ALLOC, "%d: merge free, buffer %p share page with %p\n", proc->pid, buffer, prev); } if (!list_is_last(&buffer->entry, &proc->buffers)) { next = list_entry(buffer->entry.next, struct binder_buffer, entry); if (buffer_start_page(next) == buffer_end_page(buffer)) { free_page_end = 0; if (buffer_start_page(next) == buffer_start_page(buffer)) free_page_start = 0; binder_debug(BINDER_DEBUG_BUFFER_ALLOC, "%d: merge free, buffer %p share page with %p\n", proc->pid, buffer, prev); } } list_del(&buffer->entry); if (free_page_start || free_page_end) { binder_debug(BINDER_DEBUG_BUFFER_ALLOC, "%d: merge free, buffer %p do not share page%s%s with with %p or %p\n", proc->pid, buffer, free_page_start ? "" : " end", free_page_end ? "" : " start", prev, next); binder_update_page_range(proc, 0, free_page_start ? buffer_start_page(buffer) : buffer_end_page(buffer), (free_page_end ? buffer_end_page(buffer) : buffer_start_page(buffer)) + PAGE_SIZE, NULL); } } static void binder_free_buf(struct binder_proc *proc, struct binder_buffer *buffer) { size_t size, buffer_size; buffer_size = binder_buffer_size(proc, buffer); size = ALIGN(buffer->data_size, sizeof(void *)) + ALIGN(buffer->offsets_size, sizeof(void *)); binder_debug(BINDER_DEBUG_BUFFER_ALLOC, "%d: binder_free_buf %p size %zd buffer_size %zd\n", proc->pid, buffer, size, buffer_size); BUG_ON(buffer->free); BUG_ON(size > buffer_size); BUG_ON(buffer->transaction != NULL); BUG_ON((void *)buffer < proc->buffer); BUG_ON((void *)buffer > proc->buffer + proc->buffer_size); if (buffer->async_transaction) { proc->free_async_space += size + sizeof(struct binder_buffer); binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC, "%d: binder_free_buf size %zd async free %zd\n", proc->pid, size, proc->free_async_space); } binder_update_page_range(proc, 0, (void *)PAGE_ALIGN((uintptr_t)buffer->data), (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK), NULL); rb_erase(&buffer->rb_node, &proc->allocated_buffers); buffer->free = 1; if (!list_is_last(&buffer->entry, &proc->buffers)) { struct binder_buffer *next = list_entry(buffer->entry.next, struct binder_buffer, entry); if (next->free) { rb_erase(&next->rb_node, &proc->free_buffers); binder_delete_free_buffer(proc, next); } } if (proc->buffers.next != &buffer->entry) { struct binder_buffer *prev = list_entry(buffer->entry.prev, struct binder_buffer, entry); if (prev->free) { binder_delete_free_buffer(proc, buffer); rb_erase(&prev->rb_node, &proc->free_buffers); buffer = prev; } } binder_insert_free_buffer(proc, buffer); } static struct binder_node *binder_get_node(struct binder_proc *proc, void __user *ptr) { struct rb_node *n = proc->nodes.rb_node; struct binder_node *node; while (n) { node = rb_entry(n, struct binder_node, rb_node); if (ptr < node->ptr) n = n->rb_left; else if (ptr > node->ptr) n = n->rb_right; else return node; } return NULL; } static struct binder_node *binder_new_node(struct binder_proc *proc, void __user *ptr, void __user *cookie) { struct rb_node **p = &proc->nodes.rb_node; struct rb_node *parent = NULL; struct binder_node *node; while (*p) { parent = *p; node = rb_entry(parent, struct binder_node, rb_node); if (ptr < node->ptr) p = &(*p)->rb_left; else if (ptr > node->ptr) p = &(*p)->rb_right; else return NULL; } node = kzalloc(sizeof(*node), GFP_KERNEL); if (node == NULL) return NULL; binder_stats_created(BINDER_STAT_NODE); rb_link_node(&node->rb_node, parent, p); rb_insert_color(&node->rb_node, &proc->nodes); node->debug_id = ++binder_last_id; node->proc = proc; node->ptr = ptr; node->cookie = cookie; node->work.type = BINDER_WORK_NODE; INIT_LIST_HEAD(&node->work.entry); INIT_LIST_HEAD(&node->async_todo); binder_debug(BINDER_DEBUG_INTERNAL_REFS, "%d:%d node %d u%p c%p created\n", proc->pid, current->pid, node->debug_id, node->ptr, node->cookie); return node; } static int binder_inc_node(struct binder_node *node, int strong, int internal, struct list_head *target_list) { if (strong) { if (internal) { if (target_list == NULL && node->internal_strong_refs == 0 && !(node == binder_context_mgr_node && node->has_strong_ref)) { pr_err("invalid inc strong node for %d\n", node->debug_id); return -EINVAL; } node->internal_strong_refs++; } else node->local_strong_refs++; if (!node->has_strong_ref && target_list) { list_del_init(&node->work.entry); list_add_tail(&node->work.entry, target_list); } } else { if (!internal) node->local_weak_refs++; if (!node->has_weak_ref && list_empty(&node->work.entry)) { if (target_list == NULL) { pr_err("invalid inc weak node for %d\n", node->debug_id); return -EINVAL; } list_add_tail(&node->work.entry, target_list); } } return 0; } static int binder_dec_node(struct binder_node *node, int strong, int internal) { if (strong) { if (internal) node->internal_strong_refs--; else node->local_strong_refs--; if (node->local_strong_refs || node->internal_strong_refs) return 0; } else { if (!internal) node->local_weak_refs--; if (node->local_weak_refs || !hlist_empty(&node->refs)) return 0; } if (node->proc && (node->has_strong_ref || node->has_weak_ref)) { if (list_empty(&node->work.entry)) { list_add_tail(&node->work.entry, &node->proc->todo); wake_up_interruptible(&node->proc->wait); } } else { if (hlist_empty(&node->refs) && !node->local_strong_refs && !node->local_weak_refs) { list_del_init(&node->work.entry); if (node->proc) { rb_erase(&node->rb_node, &node->proc->nodes); binder_debug(BINDER_DEBUG_INTERNAL_REFS, "refless node %d deleted\n", node->debug_id); } else { hlist_del(&node->dead_node); binder_debug(BINDER_DEBUG_INTERNAL_REFS, "dead node %d deleted\n", node->debug_id); } kfree(node); binder_stats_deleted(BINDER_STAT_NODE); } } return 0; } static struct binder_ref *binder_get_ref(struct binder_proc *proc, uint32_t desc) { struct rb_node *n = proc->refs_by_desc.rb_node; struct binder_ref *ref; while (n) { ref = rb_entry(n, struct binder_ref, rb_node_desc); if (desc < ref->desc) n = n->rb_left; else if (desc > ref->desc) n = n->rb_right; else return ref; } return NULL; } static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc, struct binder_node *node) { struct rb_node *n; struct rb_node **p = &proc->refs_by_node.rb_node; struct rb_node *parent = NULL; struct binder_ref *ref, *new_ref; while (*p) { parent = *p; ref = rb_entry(parent, struct binder_ref, rb_node_node); if (node < ref->node) p = &(*p)->rb_left; else if (node > ref->node) p = &(*p)->rb_right; else return ref; } new_ref = kzalloc(sizeof(*ref), GFP_KERNEL); if (new_ref == NULL) return NULL; binder_stats_created(BINDER_STAT_REF); new_ref->debug_id = ++binder_last_id; new_ref->proc = proc; new_ref->node = node; rb_link_node(&new_ref->rb_node_node, parent, p); rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node); new_ref->desc = (node == binder_context_mgr_node) ? 0 : 1; for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) { ref = rb_entry(n, struct binder_ref, rb_node_desc); if (ref->desc > new_ref->desc) break; new_ref->desc = ref->desc + 1; } p = &proc->refs_by_desc.rb_node; while (*p) { parent = *p; ref = rb_entry(parent, struct binder_ref, rb_node_desc); if (new_ref->desc < ref->desc) p = &(*p)->rb_left; else if (new_ref->desc > ref->desc) p = &(*p)->rb_right; else BUG(); } rb_link_node(&new_ref->rb_node_desc, parent, p); rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc); if (node) { hlist_add_head(&new_ref->node_entry, &node->refs); binder_debug(BINDER_DEBUG_INTERNAL_REFS, "%d new ref %d desc %d for node %d\n", proc->pid, new_ref->debug_id, new_ref->desc, node->debug_id); } else { binder_debug(BINDER_DEBUG_INTERNAL_REFS, "%d new ref %d desc %d for dead node\n", proc->pid, new_ref->debug_id, new_ref->desc); } return new_ref; } static void binder_delete_ref(struct binder_ref *ref) { binder_debug(BINDER_DEBUG_INTERNAL_REFS, "%d delete ref %d desc %d for node %d\n", ref->proc->pid, ref->debug_id, ref->desc, ref->node->debug_id); rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc); rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node); if (ref->strong) binder_dec_node(ref->node, 1, 1); hlist_del(&ref->node_entry); binder_dec_node(ref->node, 0, 1); if (ref->death) { binder_debug(BINDER_DEBUG_DEAD_BINDER, "%d delete ref %d desc %d has death notification\n", ref->proc->pid, ref->debug_id, ref->desc); list_del(&ref->death->work.entry); kfree(ref->death); binder_stats_deleted(BINDER_STAT_DEATH); } kfree(ref); binder_stats_deleted(BINDER_STAT_REF); } static int binder_inc_ref(struct binder_ref *ref, int strong, struct list_head *target_list) { int ret; if (strong) { if (ref->strong == 0) { ret = binder_inc_node(ref->node, 1, 1, target_list); if (ret) return ret; } ref->strong++; } else { if (ref->weak == 0) { ret = binder_inc_node(ref->node, 0, 1, target_list); if (ret) return ret; } ref->weak++; } return 0; } static int binder_dec_ref(struct binder_ref *ref, int strong) { if (strong) { if (ref->strong == 0) { binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n", ref->proc->pid, ref->debug_id, ref->desc, ref->strong, ref->weak); return -EINVAL; } ref->strong--; if (ref->strong == 0) { int ret; ret = binder_dec_node(ref->node, strong, 1); if (ret) return ret; } } else { if (ref->weak == 0) { binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n", ref->proc->pid, ref->debug_id, ref->desc, ref->strong, ref->weak); return -EINVAL; } ref->weak--; } if (ref->strong == 0 && ref->weak == 0) binder_delete_ref(ref); return 0; } static void binder_pop_transaction(struct binder_thread *target_thread, struct binder_transaction *t) { if (target_thread) { BUG_ON(target_thread->transaction_stack != t); BUG_ON(target_thread->transaction_stack->from != target_thread); target_thread->transaction_stack = target_thread->transaction_stack->from_parent; t->from = NULL; } t->need_reply = 0; if (t->buffer) t->buffer->transaction = NULL; kfree(t); binder_stats_deleted(BINDER_STAT_TRANSACTION); } static void binder_send_failed_reply(struct binder_transaction *t, uint32_t error_code) { struct binder_thread *target_thread; BUG_ON(t->flags & TF_ONE_WAY); while (1) { target_thread = t->from; if (target_thread) { if (target_thread->return_error != BR_OK && target_thread->return_error2 == BR_OK) { target_thread->return_error2 = target_thread->return_error; target_thread->return_error = BR_OK; } if (target_thread->return_error == BR_OK) { binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, "send failed reply for transaction %d to %d:%d\n", t->debug_id, target_thread->proc->pid, target_thread->pid); binder_pop_transaction(target_thread, t); target_thread->return_error = error_code; wake_up_interruptible(&target_thread->wait); } else { pr_err("reply failed, target thread, %d:%d, has error code %d already\n", target_thread->proc->pid, target_thread->pid, target_thread->return_error); } return; } else { struct binder_transaction *next = t->from_parent; binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, "send failed reply for transaction %d, target dead\n", t->debug_id); binder_pop_transaction(target_thread, t); if (next == NULL) { binder_debug(BINDER_DEBUG_DEAD_BINDER, "reply failed, no target thread at root\n"); return; } t = next; binder_debug(BINDER_DEBUG_DEAD_BINDER, "reply failed, no target thread -- retry %d\n", t->debug_id); } } } static void binder_transaction_buffer_release(struct binder_proc *proc, struct binder_buffer *buffer, size_t *failed_at) { size_t *offp, *off_end; int debug_id = buffer->debug_id; binder_debug(BINDER_DEBUG_TRANSACTION, "%d buffer release %d, size %zd-%zd, failed at %p\n", proc->pid, buffer->debug_id, buffer->data_size, buffer->offsets_size, failed_at); if (buffer->target_node) binder_dec_node(buffer->target_node, 1, 0); offp = (size_t *)(buffer->data + ALIGN(buffer->data_size, sizeof(void *))); if (failed_at) off_end = failed_at; else off_end = (void *)offp + buffer->offsets_size; for (; offp < off_end; offp++) { struct flat_binder_object *fp; if (*offp > buffer->data_size - sizeof(*fp) || buffer->data_size < sizeof(*fp) || !IS_ALIGNED(*offp, sizeof(void *))) { pr_err("transaction release %d bad offset %zd, size %zd\n", debug_id, *offp, buffer->data_size); continue; } fp = (struct flat_binder_object *)(buffer->data + *offp); switch (fp->type) { case BINDER_TYPE_BINDER: case BINDER_TYPE_WEAK_BINDER: { struct binder_node *node = binder_get_node(proc, fp->binder); if (node == NULL) { pr_err("transaction release %d bad node %p\n", debug_id, fp->binder); break; } binder_debug(BINDER_DEBUG_TRANSACTION, " node %d u%p\n", node->debug_id, node->ptr); binder_dec_node(node, fp->type == BINDER_TYPE_BINDER, 0); } break; case BINDER_TYPE_HANDLE: case BINDER_TYPE_WEAK_HANDLE: { struct binder_ref *ref = binder_get_ref(proc, fp->handle); if (ref == NULL) { pr_err("transaction release %d bad handle %ld\n", debug_id, fp->handle); break; } binder_debug(BINDER_DEBUG_TRANSACTION, " ref %d desc %d (node %d)\n", ref->debug_id, ref->desc, ref->node->debug_id); binder_dec_ref(ref, fp->type == BINDER_TYPE_HANDLE); } break; case BINDER_TYPE_FD: binder_debug(BINDER_DEBUG_TRANSACTION, " fd %ld\n", fp->handle); if (failed_at) task_close_fd(proc, fp->handle); break; default: pr_err("transaction release %d bad object type %lx\n", debug_id, fp->type); break; } } } static void binder_transaction(struct binder_proc *proc, struct binder_thread *thread, struct binder_transaction_data *tr, int reply) { struct binder_transaction *t; struct binder_work *tcomplete; size_t *offp, *off_end; struct binder_proc *target_proc; struct binder_thread *target_thread = NULL; struct binder_node *target_node = NULL; struct list_head *target_list; wait_queue_head_t *target_wait; struct binder_transaction *in_reply_to = NULL; struct binder_transaction_log_entry *e; uint32_t return_error; e = binder_transaction_log_add(&binder_transaction_log); e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY); e->from_proc = proc->pid; e->from_thread = thread->pid; e->target_handle = tr->target.handle; e->data_size = tr->data_size; e->offsets_size = tr->offsets_size; if (reply) { in_reply_to = thread->transaction_stack; if (in_reply_to == NULL) { binder_user_error("%d:%d got reply transaction with no transaction stack\n", proc->pid, thread->pid); return_error = BR_FAILED_REPLY; goto err_empty_call_stack; } binder_set_nice(in_reply_to->saved_priority); if (in_reply_to->to_thread != thread) { binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n", proc->pid, thread->pid, in_reply_to->debug_id, in_reply_to->to_proc ? in_reply_to->to_proc->pid : 0, in_reply_to->to_thread ? in_reply_to->to_thread->pid : 0); return_error = BR_FAILED_REPLY; in_reply_to = NULL; goto err_bad_call_stack; } thread->transaction_stack = in_reply_to->to_parent; target_thread = in_reply_to->from; if (target_thread == NULL) { return_error = BR_DEAD_REPLY; goto err_dead_binder; } if (target_thread->transaction_stack != in_reply_to) { binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n", proc->pid, thread->pid, target_thread->transaction_stack ? target_thread->transaction_stack->debug_id : 0, in_reply_to->debug_id); return_error = BR_FAILED_REPLY; in_reply_to = NULL; target_thread = NULL; goto err_dead_binder; } target_proc = target_thread->proc; } else { if (tr->target.handle) { struct binder_ref *ref; ref = binder_get_ref(proc, tr->target.handle); if (ref == NULL) { binder_user_error("%d:%d got transaction to invalid handle\n", proc->pid, thread->pid); return_error = BR_FAILED_REPLY; goto err_invalid_target_handle; } target_node = ref->node; } else { target_node = binder_context_mgr_node; if (target_node == NULL) { return_error = BR_DEAD_REPLY; goto err_no_context_mgr_node; } } e->to_node = target_node->debug_id; target_proc = target_node->proc; if (target_proc == NULL) { return_error = BR_DEAD_REPLY; goto err_dead_binder; } if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) { struct binder_transaction *tmp; tmp = thread->transaction_stack; if (tmp->to_thread != thread) { binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n", proc->pid, thread->pid, tmp->debug_id, tmp->to_proc ? tmp->to_proc->pid : 0, tmp->to_thread ? tmp->to_thread->pid : 0); return_error = BR_FAILED_REPLY; goto err_bad_call_stack; } while (tmp) { if (tmp->from && tmp->from->proc == target_proc) target_thread = tmp->from; tmp = tmp->from_parent; } } } if (target_thread) { e->to_thread = target_thread->pid; target_list = &target_thread->todo; target_wait = &target_thread->wait; } else { target_list = &target_proc->todo; target_wait = &target_proc->wait; } e->to_proc = target_proc->pid; /* TODO: reuse incoming transaction for reply */ t = kzalloc(sizeof(*t), GFP_KERNEL); if (t == NULL) { return_error = BR_FAILED_REPLY; goto err_alloc_t_failed; } binder_stats_created(BINDER_STAT_TRANSACTION); tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL); if (tcomplete == NULL) { return_error = BR_FAILED_REPLY; goto err_alloc_tcomplete_failed; } binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE); t->debug_id = ++binder_last_id; e->debug_id = t->debug_id; if (reply) binder_debug(BINDER_DEBUG_TRANSACTION, "%d:%d BC_REPLY %d -> %d:%d, data %p-%p size %zd-%zd\n", proc->pid, thread->pid, t->debug_id, target_proc->pid, target_thread->pid, tr->data.ptr.buffer, tr->data.ptr.offsets, tr->data_size, tr->offsets_size); else binder_debug(BINDER_DEBUG_TRANSACTION, "%d:%d BC_TRANSACTION %d -> %d - node %d, data %p-%p size %zd-%zd\n", proc->pid, thread->pid, t->debug_id, target_proc->pid, target_node->debug_id, tr->data.ptr.buffer, tr->data.ptr.offsets, tr->data_size, tr->offsets_size); if (!reply && !(tr->flags & TF_ONE_WAY)) t->from = thread; else t->from = NULL; t->sender_euid = proc->tsk->cred->euid; t->to_proc = target_proc; t->to_thread = target_thread; t->code = tr->code; t->flags = tr->flags; t->priority = task_nice(current); trace_binder_transaction(reply, t, target_node); t->buffer = binder_alloc_buf(target_proc, tr->data_size, tr->offsets_size, !reply && (t->flags & TF_ONE_WAY)); if (t->buffer == NULL) { return_error = BR_FAILED_REPLY; goto err_binder_alloc_buf_failed; } t->buffer->allow_user_free = 0; t->buffer->debug_id = t->debug_id; t->buffer->transaction = t; t->buffer->target_node = target_node; trace_binder_transaction_alloc_buf(t->buffer); if (target_node) binder_inc_node(target_node, 1, 0, NULL); offp = (size_t *)(t->buffer->data + ALIGN(tr->data_size, sizeof(void *))); if (copy_from_user(t->buffer->data, tr->data.ptr.buffer, tr->data_size)) { binder_user_error("%d:%d got transaction with invalid data ptr\n", proc->pid, thread->pid); return_error = BR_FAILED_REPLY; goto err_copy_data_failed; } if (copy_from_user(offp, tr->data.ptr.offsets, tr->offsets_size)) { binder_user_error("%d:%d got transaction with invalid offsets ptr\n", proc->pid, thread->pid); return_error = BR_FAILED_REPLY; goto err_copy_data_failed; } if (!IS_ALIGNED(tr->offsets_size, sizeof(size_t))) { binder_user_error("%d:%d got transaction with invalid offsets size, %zd\n", proc->pid, thread->pid, tr->offsets_size); return_error = BR_FAILED_REPLY; goto err_bad_offset; } off_end = (void *)offp + tr->offsets_size; for (; offp < off_end; offp++) { struct flat_binder_object *fp; if (*offp > t->buffer->data_size - sizeof(*fp) || t->buffer->data_size < sizeof(*fp) || !IS_ALIGNED(*offp, sizeof(void *))) { binder_user_error("%d:%d got transaction with invalid offset, %zd\n", proc->pid, thread->pid, *offp); return_error = BR_FAILED_REPLY; goto err_bad_offset; } fp = (struct flat_binder_object *)(t->buffer->data + *offp); switch (fp->type) { case BINDER_TYPE_BINDER: case BINDER_TYPE_WEAK_BINDER: { struct binder_ref *ref; struct binder_node *node = binder_get_node(proc, fp->binder); if (node == NULL) { node = binder_new_node(proc, fp->binder, fp->cookie); if (node == NULL) { return_error = BR_FAILED_REPLY; goto err_binder_new_node_failed; } node->min_priority = fp->flags & FLAT_BINDER_FLAG_PRIORITY_MASK; node->accept_fds = !!(fp->flags & FLAT_BINDER_FLAG_ACCEPTS_FDS); } if (fp->cookie != node->cookie) { binder_user_error("%d:%d sending u%p node %d, cookie mismatch %p != %p\n", proc->pid, thread->pid, fp->binder, node->debug_id, fp->cookie, node->cookie); goto err_binder_get_ref_for_node_failed; } ref = binder_get_ref_for_node(target_proc, node); if (ref == NULL) { return_error = BR_FAILED_REPLY; goto err_binder_get_ref_for_node_failed; } if (fp->type == BINDER_TYPE_BINDER) fp->type = BINDER_TYPE_HANDLE; else fp->type = BINDER_TYPE_WEAK_HANDLE; fp->handle = ref->desc; binder_inc_ref(ref, fp->type == BINDER_TYPE_HANDLE, &thread->todo); trace_binder_transaction_node_to_ref(t, node, ref); binder_debug(BINDER_DEBUG_TRANSACTION, " node %d u%p -> ref %d desc %d\n", node->debug_id, node->ptr, ref->debug_id, ref->desc); } break; case BINDER_TYPE_HANDLE: case BINDER_TYPE_WEAK_HANDLE: { struct binder_ref *ref = binder_get_ref(proc, fp->handle); if (ref == NULL) { binder_user_error("%d:%d got transaction with invalid handle, %ld\n", proc->pid, thread->pid, fp->handle); return_error = BR_FAILED_REPLY; goto err_binder_get_ref_failed; } if (ref->node->proc == target_proc) { if (fp->type == BINDER_TYPE_HANDLE) fp->type = BINDER_TYPE_BINDER; else fp->type = BINDER_TYPE_WEAK_BINDER; fp->binder = ref->node->ptr; fp->cookie = ref->node->cookie; binder_inc_node(ref->node, fp->type == BINDER_TYPE_BINDER, 0, NULL); trace_binder_transaction_ref_to_node(t, ref); binder_debug(BINDER_DEBUG_TRANSACTION, " ref %d desc %d -> node %d u%p\n", ref->debug_id, ref->desc, ref->node->debug_id, ref->node->ptr); } else { struct binder_ref *new_ref; new_ref = binder_get_ref_for_node(target_proc, ref->node); if (new_ref == NULL) { return_error = BR_FAILED_REPLY; goto err_binder_get_ref_for_node_failed; } fp->handle = new_ref->desc; binder_inc_ref(new_ref, fp->type == BINDER_TYPE_HANDLE, NULL); trace_binder_transaction_ref_to_ref(t, ref, new_ref); binder_debug(BINDER_DEBUG_TRANSACTION, " ref %d desc %d -> ref %d desc %d (node %d)\n", ref->debug_id, ref->desc, new_ref->debug_id, new_ref->desc, ref->node->debug_id); } } break; case BINDER_TYPE_FD: { int target_fd; struct file *file; if (reply) { if (!(in_reply_to->flags & TF_ACCEPT_FDS)) { binder_user_error("%d:%d got reply with fd, %ld, but target does not allow fds\n", proc->pid, thread->pid, fp->handle); return_error = BR_FAILED_REPLY; goto err_fd_not_allowed; } } else if (!target_node->accept_fds) { binder_user_error("%d:%d got transaction with fd, %ld, but target does not allow fds\n", proc->pid, thread->pid, fp->handle); return_error = BR_FAILED_REPLY; goto err_fd_not_allowed; } file = fget(fp->handle); if (file == NULL) { binder_user_error("%d:%d got transaction with invalid fd, %ld\n", proc->pid, thread->pid, fp->handle); return_error = BR_FAILED_REPLY; goto err_fget_failed; } target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC); if (target_fd < 0) { fput(file); return_error = BR_FAILED_REPLY; goto err_get_unused_fd_failed; } task_fd_install(target_proc, target_fd, file); trace_binder_transaction_fd(t, fp->handle, target_fd); binder_debug(BINDER_DEBUG_TRANSACTION, " fd %ld -> %d\n", fp->handle, target_fd); /* TODO: fput? */ fp->handle = target_fd; } break; default: binder_user_error("%d:%d got transaction with invalid object type, %lx\n", proc->pid, thread->pid, fp->type); return_error = BR_FAILED_REPLY; goto err_bad_object_type; } } if (reply) { BUG_ON(t->buffer->async_transaction != 0); binder_pop_transaction(target_thread, in_reply_to); } else if (!(t->flags & TF_ONE_WAY)) { BUG_ON(t->buffer->async_transaction != 0); t->need_reply = 1; t->from_parent = thread->transaction_stack; thread->transaction_stack = t; } else { BUG_ON(target_node == NULL); BUG_ON(t->buffer->async_transaction != 1); if (target_node->has_async_transaction) { target_list = &target_node->async_todo; target_wait = NULL; } else target_node->has_async_transaction = 1; } t->work.type = BINDER_WORK_TRANSACTION; list_add_tail(&t->work.entry, target_list); tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE; list_add_tail(&tcomplete->entry, &thread->todo); if (target_wait) wake_up_interruptible(target_wait); return; err_get_unused_fd_failed: err_fget_failed: err_fd_not_allowed: err_binder_get_ref_for_node_failed: err_binder_get_ref_failed: err_binder_new_node_failed: err_bad_object_type: err_bad_offset: err_copy_data_failed: trace_binder_transaction_failed_buffer_release(t->buffer); binder_transaction_buffer_release(target_proc, t->buffer, offp); t->buffer->transaction = NULL; binder_free_buf(target_proc, t->buffer); err_binder_alloc_buf_failed: kfree(tcomplete); binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); err_alloc_tcomplete_failed: kfree(t); binder_stats_deleted(BINDER_STAT_TRANSACTION); err_alloc_t_failed: err_bad_call_stack: err_empty_call_stack: err_dead_binder: err_invalid_target_handle: err_no_context_mgr_node: binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, "%d:%d transaction failed %d, size %zd-%zd\n", proc->pid, thread->pid, return_error, tr->data_size, tr->offsets_size); { struct binder_transaction_log_entry *fe; fe = binder_transaction_log_add(&binder_transaction_log_failed); *fe = *e; } BUG_ON(thread->return_error != BR_OK); if (in_reply_to) { thread->return_error = BR_TRANSACTION_COMPLETE; binder_send_failed_reply(in_reply_to, return_error); } else thread->return_error = return_error; } int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread, void __user *buffer, int size, signed long *consumed) { uint32_t cmd; void __user *ptr = buffer + *consumed; void __user *end = buffer + size; while (ptr < end && thread->return_error == BR_OK) { if (get_user(cmd, (uint32_t __user *)ptr)) return -EFAULT; ptr += sizeof(uint32_t); trace_binder_command(cmd); if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) { binder_stats.bc[_IOC_NR(cmd)]++; proc->stats.bc[_IOC_NR(cmd)]++; thread->stats.bc[_IOC_NR(cmd)]++; } switch (cmd) { case BC_INCREFS: case BC_ACQUIRE: case BC_RELEASE: case BC_DECREFS: { uint32_t target; struct binder_ref *ref; const char *debug_string; if (get_user(target, (uint32_t __user *)ptr)) return -EFAULT; ptr += sizeof(uint32_t); if (target == 0 && binder_context_mgr_node && (cmd == BC_INCREFS || cmd == BC_ACQUIRE)) { ref = binder_get_ref_for_node(proc, binder_context_mgr_node); if (ref->desc != target) { binder_user_error("%d:%d tried to acquire reference to desc 0, got %d instead\n", proc->pid, thread->pid, ref->desc); } } else ref = binder_get_ref(proc, target); if (ref == NULL) { binder_user_error("%d:%d refcount change on invalid ref %d\n", proc->pid, thread->pid, target); break; } switch (cmd) { case BC_INCREFS: debug_string = "IncRefs"; binder_inc_ref(ref, 0, NULL); break; case BC_ACQUIRE: debug_string = "Acquire"; binder_inc_ref(ref, 1, NULL); break; case BC_RELEASE: debug_string = "Release"; binder_dec_ref(ref, 1); break; case BC_DECREFS: default: debug_string = "DecRefs"; binder_dec_ref(ref, 0); break; } binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s ref %d desc %d s %d w %d for node %d\n", proc->pid, thread->pid, debug_string, ref->debug_id, ref->desc, ref->strong, ref->weak, ref->node->debug_id); break; } case BC_INCREFS_DONE: case BC_ACQUIRE_DONE: { void __user *node_ptr; void *cookie; struct binder_node *node; if (get_user(node_ptr, (void * __user *)ptr)) return -EFAULT; ptr += sizeof(void *); if (get_user(cookie, (void * __user *)ptr)) return -EFAULT; ptr += sizeof(void *); node = binder_get_node(proc, node_ptr); if (node == NULL) { binder_user_error("%d:%d %s u%p no match\n", proc->pid, thread->pid, cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE", node_ptr); break; } if (cookie != node->cookie) { binder_user_error("%d:%d %s u%p node %d cookie mismatch %p != %p\n", proc->pid, thread->pid, cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE", node_ptr, node->debug_id, cookie, node->cookie); break; } if (cmd == BC_ACQUIRE_DONE) { if (node->pending_strong_ref == 0) { binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n", proc->pid, thread->pid, node->debug_id); break; } node->pending_strong_ref = 0; } else { if (node->pending_weak_ref == 0) { binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n", proc->pid, thread->pid, node->debug_id); break; } node->pending_weak_ref = 0; } binder_dec_node(node, cmd == BC_ACQUIRE_DONE, 0); binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s node %d ls %d lw %d\n", proc->pid, thread->pid, cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE", node->debug_id, node->local_strong_refs, node->local_weak_refs); break; } case BC_ATTEMPT_ACQUIRE: pr_err("BC_ATTEMPT_ACQUIRE not supported\n"); return -EINVAL; case BC_ACQUIRE_RESULT: pr_err("BC_ACQUIRE_RESULT not supported\n"); return -EINVAL; case BC_FREE_BUFFER: { void __user *data_ptr; struct binder_buffer *buffer; if (get_user(data_ptr, (void * __user *)ptr)) return -EFAULT; ptr += sizeof(void *); buffer = binder_buffer_lookup(proc, data_ptr); if (buffer == NULL) { binder_user_error("%d:%d BC_FREE_BUFFER u%p no match\n", proc->pid, thread->pid, data_ptr); break; } if (!buffer->allow_user_free) { binder_user_error("%d:%d BC_FREE_BUFFER u%p matched unreturned buffer\n", proc->pid, thread->pid, data_ptr); break; } binder_debug(BINDER_DEBUG_FREE_BUFFER, "%d:%d BC_FREE_BUFFER u%p found buffer %d for %s transaction\n", proc->pid, thread->pid, data_ptr, buffer->debug_id, buffer->transaction ? "active" : "finished"); if (buffer->transaction) { buffer->transaction->buffer = NULL; buffer->transaction = NULL; } if (buffer->async_transaction && buffer->target_node) { BUG_ON(!buffer->target_node->has_async_transaction); if (list_empty(&buffer->target_node->async_todo)) buffer->target_node->has_async_transaction = 0; else list_move_tail(buffer->target_node->async_todo.next, &thread->todo); } trace_binder_transaction_buffer_release(buffer); binder_transaction_buffer_release(proc, buffer, NULL); binder_free_buf(proc, buffer); break; } case BC_TRANSACTION: case BC_REPLY: { struct binder_transaction_data tr; if (copy_from_user(&tr, ptr, sizeof(tr))) return -EFAULT; ptr += sizeof(tr); binder_transaction(proc, thread, &tr, cmd == BC_REPLY); break; } case BC_REGISTER_LOOPER: binder_debug(BINDER_DEBUG_THREADS, "%d:%d BC_REGISTER_LOOPER\n", proc->pid, thread->pid); if (thread->looper & BINDER_LOOPER_STATE_ENTERED) { thread->looper |= BINDER_LOOPER_STATE_INVALID; binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n", proc->pid, thread->pid); } else if (proc->requested_threads == 0) { thread->looper |= BINDER_LOOPER_STATE_INVALID; binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n", proc->pid, thread->pid); } else { proc->requested_threads--; proc->requested_threads_started++; } thread->looper |= BINDER_LOOPER_STATE_REGISTERED; break; case BC_ENTER_LOOPER: binder_debug(BINDER_DEBUG_THREADS, "%d:%d BC_ENTER_LOOPER\n", proc->pid, thread->pid); if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) { thread->looper |= BINDER_LOOPER_STATE_INVALID; binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n", proc->pid, thread->pid); } thread->looper |= BINDER_LOOPER_STATE_ENTERED; break; case BC_EXIT_LOOPER: binder_debug(BINDER_DEBUG_THREADS, "%d:%d BC_EXIT_LOOPER\n", proc->pid, thread->pid); thread->looper |= BINDER_LOOPER_STATE_EXITED; break; case BC_REQUEST_DEATH_NOTIFICATION: case BC_CLEAR_DEATH_NOTIFICATION: { uint32_t target; void __user *cookie; struct binder_ref *ref; struct binder_ref_death *death; if (get_user(target, (uint32_t __user *)ptr)) return -EFAULT; ptr += sizeof(uint32_t); if (get_user(cookie, (void __user * __user *)ptr)) return -EFAULT; ptr += sizeof(void *); ref = binder_get_ref(proc, target); if (ref == NULL) { binder_user_error("%d:%d %s invalid ref %d\n", proc->pid, thread->pid, cmd == BC_REQUEST_DEATH_NOTIFICATION ? "BC_REQUEST_DEATH_NOTIFICATION" : "BC_CLEAR_DEATH_NOTIFICATION", target); break; } binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION, "%d:%d %s %p ref %d desc %d s %d w %d for node %d\n", proc->pid, thread->pid, cmd == BC_REQUEST_DEATH_NOTIFICATION ? "BC_REQUEST_DEATH_NOTIFICATION" : "BC_CLEAR_DEATH_NOTIFICATION", cookie, ref->debug_id, ref->desc, ref->strong, ref->weak, ref->node->debug_id); if (cmd == BC_REQUEST_DEATH_NOTIFICATION) { if (ref->death) { binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n", proc->pid, thread->pid); break; } death = kzalloc(sizeof(*death), GFP_KERNEL); if (death == NULL) { thread->return_error = BR_ERROR; binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n", proc->pid, thread->pid); break; } binder_stats_created(BINDER_STAT_DEATH); INIT_LIST_HEAD(&death->work.entry); death->cookie = cookie; ref->death = death; if (ref->node->proc == NULL) { ref->death->work.type = BINDER_WORK_DEAD_BINDER; if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) { list_add_tail(&ref->death->work.entry, &thread->todo); } else { list_add_tail(&ref->death->work.entry, &proc->todo); wake_up_interruptible(&proc->wait); } } } else { if (ref->death == NULL) { binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n", proc->pid, thread->pid); break; } death = ref->death; if (death->cookie != cookie) { binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %p != %p\n", proc->pid, thread->pid, death->cookie, cookie); break; } ref->death = NULL; if (list_empty(&death->work.entry)) { death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION; if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) { list_add_tail(&death->work.entry, &thread->todo); } else { list_add_tail(&death->work.entry, &proc->todo); wake_up_interruptible(&proc->wait); } } else { BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER); death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR; } } } break; case BC_DEAD_BINDER_DONE: { struct binder_work *w; void __user *cookie; struct binder_ref_death *death = NULL; if (get_user(cookie, (void __user * __user *)ptr)) return -EFAULT; ptr += sizeof(void *); list_for_each_entry(w, &proc->delivered_death, entry) { struct binder_ref_death *tmp_death = container_of(w, struct binder_ref_death, work); if (tmp_death->cookie == cookie) { death = tmp_death; break; } } binder_debug(BINDER_DEBUG_DEAD_BINDER, "%d:%d BC_DEAD_BINDER_DONE %p found %p\n", proc->pid, thread->pid, cookie, death); if (death == NULL) { binder_user_error("%d:%d BC_DEAD_BINDER_DONE %p not found\n", proc->pid, thread->pid, cookie); break; } list_del_init(&death->work.entry); if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) { death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION; if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) { list_add_tail(&death->work.entry, &thread->todo); } else { list_add_tail(&death->work.entry, &proc->todo); wake_up_interruptible(&proc->wait); } } } break; default: pr_err("%d:%d unknown command %d\n", proc->pid, thread->pid, cmd); return -EINVAL; } *consumed = ptr - buffer; } return 0; } void binder_stat_br(struct binder_proc *proc, struct binder_thread *thread, uint32_t cmd) { trace_binder_return(cmd); if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) { binder_stats.br[_IOC_NR(cmd)]++; proc->stats.br[_IOC_NR(cmd)]++; thread->stats.br[_IOC_NR(cmd)]++; } } static int binder_has_proc_work(struct binder_proc *proc, struct binder_thread *thread) { return !list_empty(&proc->todo) || (thread->looper & BINDER_LOOPER_STATE_NEED_RETURN); } static int binder_has_thread_work(struct binder_thread *thread) { return !list_empty(&thread->todo) || thread->return_error != BR_OK || (thread->looper & BINDER_LOOPER_STATE_NEED_RETURN); } static int binder_thread_read(struct binder_proc *proc, struct binder_thread *thread, void __user *buffer, int size, signed long *consumed, int non_block) { void __user *ptr = buffer + *consumed; void __user *end = buffer + size; int ret = 0; int wait_for_proc_work; if (*consumed == 0) { if (put_user(BR_NOOP, (uint32_t __user *)ptr)) return -EFAULT; ptr += sizeof(uint32_t); } retry: wait_for_proc_work = thread->transaction_stack == NULL && list_empty(&thread->todo); if (thread->return_error != BR_OK && ptr < end) { if (thread->return_error2 != BR_OK) { if (put_user(thread->return_error2, (uint32_t __user *)ptr)) return -EFAULT; ptr += sizeof(uint32_t); binder_stat_br(proc, thread, thread->return_error2); if (ptr == end) goto done; thread->return_error2 = BR_OK; } if (put_user(thread->return_error, (uint32_t __user *)ptr)) return -EFAULT; ptr += sizeof(uint32_t); binder_stat_br(proc, thread, thread->return_error); thread->return_error = BR_OK; goto done; } thread->looper |= BINDER_LOOPER_STATE_WAITING; if (wait_for_proc_work) proc->ready_threads++; binder_unlock(__func__); trace_binder_wait_for_work(wait_for_proc_work, !!thread->transaction_stack, !list_empty(&thread->todo)); if (wait_for_proc_work) { if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED))) { binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n", proc->pid, thread->pid, thread->looper); wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2); } binder_set_nice(proc->default_priority); if (non_block) { if (!binder_has_proc_work(proc, thread)) ret = -EAGAIN; } else ret = wait_event_interruptible_exclusive(proc->wait, binder_has_proc_work(proc, thread)); } else { if (non_block) { if (!binder_has_thread_work(thread)) ret = -EAGAIN; } else ret = wait_event_interruptible(thread->wait, binder_has_thread_work(thread)); } binder_lock(__func__); if (wait_for_proc_work) proc->ready_threads--; thread->looper &= ~BINDER_LOOPER_STATE_WAITING; if (ret) return ret; while (1) { uint32_t cmd; struct binder_transaction_data tr; struct binder_work *w; struct binder_transaction *t = NULL; if (!list_empty(&thread->todo)) w = list_first_entry(&thread->todo, struct binder_work, entry); else if (!list_empty(&proc->todo) && wait_for_proc_work) w = list_first_entry(&proc->todo, struct binder_work, entry); else { if (ptr - buffer == 4 && !(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN)) /* no data added */ goto retry; break; } if (end - ptr < sizeof(tr) + 4) break; switch (w->type) { case BINDER_WORK_TRANSACTION: { t = container_of(w, struct binder_transaction, work); } break; case BINDER_WORK_TRANSACTION_COMPLETE: { cmd = BR_TRANSACTION_COMPLETE; if (put_user(cmd, (uint32_t __user *)ptr)) return -EFAULT; ptr += sizeof(uint32_t); binder_stat_br(proc, thread, cmd); binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE, "%d:%d BR_TRANSACTION_COMPLETE\n", proc->pid, thread->pid); list_del(&w->entry); kfree(w); binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); } break; case BINDER_WORK_NODE: { struct binder_node *node = container_of(w, struct binder_node, work); uint32_t cmd = BR_NOOP; const char *cmd_name; int strong = node->internal_strong_refs || node->local_strong_refs; int weak = !hlist_empty(&node->refs) || node->local_weak_refs || strong; if (weak && !node->has_weak_ref) { cmd = BR_INCREFS; cmd_name = "BR_INCREFS"; node->has_weak_ref = 1; node->pending_weak_ref = 1; node->local_weak_refs++; } else if (strong && !node->has_strong_ref) { cmd = BR_ACQUIRE; cmd_name = "BR_ACQUIRE"; node->has_strong_ref = 1; node->pending_strong_ref = 1; node->local_strong_refs++; } else if (!strong && node->has_strong_ref) { cmd = BR_RELEASE; cmd_name = "BR_RELEASE"; node->has_strong_ref = 0; } else if (!weak && node->has_weak_ref) { cmd = BR_DECREFS; cmd_name = "BR_DECREFS"; node->has_weak_ref = 0; } if (cmd != BR_NOOP) { if (put_user(cmd, (uint32_t __user *)ptr)) return -EFAULT; ptr += sizeof(uint32_t); if (put_user(node->ptr, (void * __user *)ptr)) return -EFAULT; ptr += sizeof(void *); if (put_user(node->cookie, (void * __user *)ptr)) return -EFAULT; ptr += sizeof(void *); binder_stat_br(proc, thread, cmd); binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%p c%p\n", proc->pid, thread->pid, cmd_name, node->debug_id, node->ptr, node->cookie); } else { list_del_init(&w->entry); if (!weak && !strong) { binder_debug(BINDER_DEBUG_INTERNAL_REFS, "%d:%d node %d u%p c%p deleted\n", proc->pid, thread->pid, node->debug_id, node->ptr, node->cookie); rb_erase(&node->rb_node, &proc->nodes); kfree(node); binder_stats_deleted(BINDER_STAT_NODE); } else { binder_debug(BINDER_DEBUG_INTERNAL_REFS, "%d:%d node %d u%p c%p state unchanged\n", proc->pid, thread->pid, node->debug_id, node->ptr, node->cookie); } } } break; case BINDER_WORK_DEAD_BINDER: case BINDER_WORK_DEAD_BINDER_AND_CLEAR: case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: { struct binder_ref_death *death; uint32_t cmd; death = container_of(w, struct binder_ref_death, work); if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE; else cmd = BR_DEAD_BINDER; if (put_user(cmd, (uint32_t __user *)ptr)) return -EFAULT; ptr += sizeof(uint32_t); if (put_user(death->cookie, (void * __user *)ptr)) return -EFAULT; ptr += sizeof(void *); binder_stat_br(proc, thread, cmd); binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION, "%d:%d %s %p\n", proc->pid, thread->pid, cmd == BR_DEAD_BINDER ? "BR_DEAD_BINDER" : "BR_CLEAR_DEATH_NOTIFICATION_DONE", death->cookie); if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) { list_del(&w->entry); kfree(death); binder_stats_deleted(BINDER_STAT_DEATH); } else list_move(&w->entry, &proc->delivered_death); if (cmd == BR_DEAD_BINDER) goto done; /* DEAD_BINDER notifications can cause transactions */ } break; } if (!t) continue; BUG_ON(t->buffer == NULL); if (t->buffer->target_node) { struct binder_node *target_node = t->buffer->target_node; tr.target.ptr = target_node->ptr; tr.cookie = target_node->cookie; t->saved_priority = task_nice(current); if (t->priority < target_node->min_priority && !(t->flags & TF_ONE_WAY)) binder_set_nice(t->priority); else if (!(t->flags & TF_ONE_WAY) || t->saved_priority > target_node->min_priority) binder_set_nice(target_node->min_priority); cmd = BR_TRANSACTION; } else { tr.target.ptr = NULL; tr.cookie = NULL; cmd = BR_REPLY; } tr.code = t->code; tr.flags = t->flags; tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid); if (t->from) { struct task_struct *sender = t->from->proc->tsk; tr.sender_pid = task_tgid_nr_ns(sender, task_active_pid_ns(current)); } else { tr.sender_pid = 0; } tr.data_size = t->buffer->data_size; tr.offsets_size = t->buffer->offsets_size; tr.data.ptr.buffer = (void *)t->buffer->data + proc->user_buffer_offset; tr.data.ptr.offsets = tr.data.ptr.buffer + ALIGN(t->buffer->data_size, sizeof(void *)); if (put_user(cmd, (uint32_t __user *)ptr)) return -EFAULT; ptr += sizeof(uint32_t); if (copy_to_user(ptr, &tr, sizeof(tr))) return -EFAULT; ptr += sizeof(tr); trace_binder_transaction_received(t); binder_stat_br(proc, thread, cmd); binder_debug(BINDER_DEBUG_TRANSACTION, "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %p-%p\n", proc->pid, thread->pid, (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" : "BR_REPLY", t->debug_id, t->from ? t->from->proc->pid : 0, t->from ? t->from->pid : 0, cmd, t->buffer->data_size, t->buffer->offsets_size, tr.data.ptr.buffer, tr.data.ptr.offsets); list_del(&t->work.entry); t->buffer->allow_user_free = 1; if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) { t->to_parent = thread->transaction_stack; t->to_thread = thread; thread->transaction_stack = t; } else { t->buffer->transaction = NULL; kfree(t); binder_stats_deleted(BINDER_STAT_TRANSACTION); } break; } done: *consumed = ptr - buffer; if (proc->requested_threads + proc->ready_threads == 0 && proc->requested_threads_started < proc->max_threads && (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */ /*spawn a new thread if we leave this out */) { proc->requested_threads++; binder_debug(BINDER_DEBUG_THREADS, "%d:%d BR_SPAWN_LOOPER\n", proc->pid, thread->pid); if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer)) return -EFAULT; binder_stat_br(proc, thread, BR_SPAWN_LOOPER); } return 0; } static void binder_release_work(struct list_head *list) { struct binder_work *w; while (!list_empty(list)) { w = list_first_entry(list, struct binder_work, entry); list_del_init(&w->entry); switch (w->type) { case BINDER_WORK_TRANSACTION: { struct binder_transaction *t; t = container_of(w, struct binder_transaction, work); if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) { binder_send_failed_reply(t, BR_DEAD_REPLY); } else { binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, "undelivered transaction %d\n", t->debug_id); t->buffer->transaction = NULL; kfree(t); binder_stats_deleted(BINDER_STAT_TRANSACTION); } } break; case BINDER_WORK_TRANSACTION_COMPLETE: { binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, "undelivered TRANSACTION_COMPLETE\n"); kfree(w); binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); } break; case BINDER_WORK_DEAD_BINDER_AND_CLEAR: case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: { struct binder_ref_death *death; death = container_of(w, struct binder_ref_death, work); binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, "undelivered death notification, %p\n", death->cookie); kfree(death); binder_stats_deleted(BINDER_STAT_DEATH); } break; default: pr_err("unexpected work type, %d, not freed\n", w->type); break; } } } static struct binder_thread *binder_get_thread(struct binder_proc *proc) { struct binder_thread *thread = NULL; struct rb_node *parent = NULL; struct rb_node **p = &proc->threads.rb_node; while (*p) { parent = *p; thread = rb_entry(parent, struct binder_thread, rb_node); if (current->pid < thread->pid) p = &(*p)->rb_left; else if (current->pid > thread->pid) p = &(*p)->rb_right; else break; } if (*p == NULL) { thread = kzalloc(sizeof(*thread), GFP_KERNEL); if (thread == NULL) return NULL; binder_stats_created(BINDER_STAT_THREAD); thread->proc = proc; thread->pid = current->pid; init_waitqueue_head(&thread->wait); INIT_LIST_HEAD(&thread->todo); rb_link_node(&thread->rb_node, parent, p); rb_insert_color(&thread->rb_node, &proc->threads); thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN; thread->return_error = BR_OK; thread->return_error2 = BR_OK; } return thread; } static int binder_free_thread(struct binder_proc *proc, struct binder_thread *thread) { struct binder_transaction *t; struct binder_transaction *send_reply = NULL; int active_transactions = 0; rb_erase(&thread->rb_node, &proc->threads); t = thread->transaction_stack; if (t && t->to_thread == thread) send_reply = t; while (t) { active_transactions++; binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, "release %d:%d transaction %d %s, still active\n", proc->pid, thread->pid, t->debug_id, (t->to_thread == thread) ? "in" : "out"); if (t->to_thread == thread) { t->to_proc = NULL; t->to_thread = NULL; if (t->buffer) { t->buffer->transaction = NULL; t->buffer = NULL; } t = t->to_parent; } else if (t->from == thread) { t->from = NULL; t = t->from_parent; } else BUG(); } if (send_reply) binder_send_failed_reply(send_reply, BR_DEAD_REPLY); binder_release_work(&thread->todo); kfree(thread); binder_stats_deleted(BINDER_STAT_THREAD); return active_transactions; } static unsigned int binder_poll(struct file *filp, struct poll_table_struct *wait) { struct binder_proc *proc = filp->private_data; struct binder_thread *thread = NULL; int wait_for_proc_work; binder_lock(__func__); thread = binder_get_thread(proc); wait_for_proc_work = thread->transaction_stack == NULL && list_empty(&thread->todo) && thread->return_error == BR_OK; binder_unlock(__func__); if (wait_for_proc_work) { if (binder_has_proc_work(proc, thread)) return POLLIN; poll_wait(filp, &proc->wait, wait); if (binder_has_proc_work(proc, thread)) return POLLIN; } else { if (binder_has_thread_work(thread)) return POLLIN; poll_wait(filp, &thread->wait, wait); if (binder_has_thread_work(thread)) return POLLIN; } return 0; } static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { int ret; struct binder_proc *proc = filp->private_data; struct binder_thread *thread; unsigned int size = _IOC_SIZE(cmd); void __user *ubuf = (void __user *)arg; /*pr_info("binder_ioctl: %d:%d %x %lx\n", proc->pid, current->pid, cmd, arg);*/ trace_binder_ioctl(cmd, arg); ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2); if (ret) goto err_unlocked; binder_lock(__func__); thread = binder_get_thread(proc); if (thread == NULL) { ret = -ENOMEM; goto err; } switch (cmd) { case BINDER_WRITE_READ: { struct binder_write_read bwr; if (size != sizeof(struct binder_write_read)) { ret = -EINVAL; goto err; } if (copy_from_user(&bwr, ubuf, sizeof(bwr))) { ret = -EFAULT; goto err; } binder_debug(BINDER_DEBUG_READ_WRITE, "%d:%d write %ld at %08lx, read %ld at %08lx\n", proc->pid, thread->pid, bwr.write_size, bwr.write_buffer, bwr.read_size, bwr.read_buffer); if (bwr.write_size > 0) { ret = binder_thread_write(proc, thread, (void __user *)bwr.write_buffer, bwr.write_size, &bwr.write_consumed); trace_binder_write_done(ret); if (ret < 0) { bwr.read_consumed = 0; if (copy_to_user(ubuf, &bwr, sizeof(bwr))) ret = -EFAULT; goto err; } } if (bwr.read_size > 0) { ret = binder_thread_read(proc, thread, (void __user *)bwr.read_buffer, bwr.read_size, &bwr.read_consumed, filp->f_flags & O_NONBLOCK); trace_binder_read_done(ret); if (!list_empty(&proc->todo)) wake_up_interruptible(&proc->wait); if (ret < 0) { if (copy_to_user(ubuf, &bwr, sizeof(bwr))) ret = -EFAULT; goto err; } } binder_debug(BINDER_DEBUG_READ_WRITE, "%d:%d wrote %ld of %ld, read return %ld of %ld\n", proc->pid, thread->pid, bwr.write_consumed, bwr.write_size, bwr.read_consumed, bwr.read_size); if (copy_to_user(ubuf, &bwr, sizeof(bwr))) { ret = -EFAULT; goto err; } break; } case BINDER_SET_MAX_THREADS: if (copy_from_user(&proc->max_threads, ubuf, sizeof(proc->max_threads))) { ret = -EINVAL; goto err; } break; case BINDER_SET_CONTEXT_MGR: if (binder_context_mgr_node != NULL) { pr_err("BINDER_SET_CONTEXT_MGR already set\n"); ret = -EBUSY; goto err; } if (uid_valid(binder_context_mgr_uid)) { if (!uid_eq(binder_context_mgr_uid, current->cred->euid)) { pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n", from_kuid(&init_user_ns, current->cred->euid), from_kuid(&init_user_ns, binder_context_mgr_uid)); ret = -EPERM; goto err; } } else binder_context_mgr_uid = current->cred->euid; binder_context_mgr_node = binder_new_node(proc, NULL, NULL); if (binder_context_mgr_node == NULL) { ret = -ENOMEM; goto err; } binder_context_mgr_node->local_weak_refs++; binder_context_mgr_node->local_strong_refs++; binder_context_mgr_node->has_strong_ref = 1; binder_context_mgr_node->has_weak_ref = 1; break; case BINDER_THREAD_EXIT: binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n", proc->pid, thread->pid); binder_free_thread(proc, thread); thread = NULL; break; case BINDER_VERSION: if (size != sizeof(struct binder_version)) { ret = -EINVAL; goto err; } if (put_user(BINDER_CURRENT_PROTOCOL_VERSION, &((struct binder_version *)ubuf)->protocol_version)) { ret = -EINVAL; goto err; } break; default: ret = -EINVAL; goto err; } ret = 0; err: if (thread) thread->looper &= ~BINDER_LOOPER_STATE_NEED_RETURN; binder_unlock(__func__); wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2); if (ret && ret != -ERESTARTSYS) pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret); err_unlocked: trace_binder_ioctl_done(ret); return ret; } static void binder_vma_open(struct vm_area_struct *vma) { struct binder_proc *proc = vma->vm_private_data; binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n", proc->pid, vma->vm_start, vma->vm_end, (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, (unsigned long)pgprot_val(vma->vm_page_prot)); } static void binder_vma_close(struct vm_area_struct *vma) { struct binder_proc *proc = vma->vm_private_data; binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n", proc->pid, vma->vm_start, vma->vm_end, (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, (unsigned long)pgprot_val(vma->vm_page_prot)); proc->vma = NULL; proc->vma_vm_mm = NULL; binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES); } static struct vm_operations_struct binder_vm_ops = { .open = binder_vma_open, .close = binder_vma_close, }; static int binder_mmap(struct file *filp, struct vm_area_struct *vma) { int ret; struct vm_struct *area; struct binder_proc *proc = filp->private_data; const char *failure_string; struct binder_buffer *buffer; if (proc->tsk != current) return -EINVAL; if ((vma->vm_end - vma->vm_start) > SZ_4M) vma->vm_end = vma->vm_start + SZ_4M; binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_mmap: %d %lx-%lx (%ld K) vma %lx pagep %lx\n", proc->pid, vma->vm_start, vma->vm_end, (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, (unsigned long)pgprot_val(vma->vm_page_prot)); if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) { ret = -EPERM; failure_string = "bad vm_flags"; goto err_bad_arg; } vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE; mutex_lock(&binder_mmap_lock); if (proc->buffer) { ret = -EBUSY; failure_string = "already mapped"; goto err_already_mapped; } area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP); if (area == NULL) { ret = -ENOMEM; failure_string = "get_vm_area"; goto err_get_vm_area_failed; } proc->buffer = area->addr; proc->user_buffer_offset = vma->vm_start - (uintptr_t)proc->buffer; mutex_unlock(&binder_mmap_lock); #ifdef CONFIG_CPU_CACHE_VIPT if (cache_is_vipt_aliasing()) { while (CACHE_COLOUR((vma->vm_start ^ (uint32_t)proc->buffer))) { pr_info("binder_mmap: %d %lx-%lx maps %p bad alignment\n", proc->pid, vma->vm_start, vma->vm_end, proc->buffer); vma->vm_start += PAGE_SIZE; } } #endif proc->pages = kzalloc(sizeof(proc->pages[0]) * ((vma->vm_end - vma->vm_start) / PAGE_SIZE), GFP_KERNEL); if (proc->pages == NULL) { ret = -ENOMEM; failure_string = "alloc page array"; goto err_alloc_pages_failed; } proc->buffer_size = vma->vm_end - vma->vm_start; vma->vm_ops = &binder_vm_ops; vma->vm_private_data = proc; if (binder_update_page_range(proc, 1, proc->buffer, proc->buffer + PAGE_SIZE, vma)) { ret = -ENOMEM; failure_string = "alloc small buf"; goto err_alloc_small_buf_failed; } buffer = proc->buffer; INIT_LIST_HEAD(&proc->buffers); list_add(&buffer->entry, &proc->buffers); buffer->free = 1; binder_insert_free_buffer(proc, buffer); proc->free_async_space = proc->buffer_size / 2; barrier(); proc->files = get_files_struct(current); proc->vma = vma; proc->vma_vm_mm = vma->vm_mm; /*pr_info("binder_mmap: %d %lx-%lx maps %p\n", proc->pid, vma->vm_start, vma->vm_end, proc->buffer);*/ return 0; err_alloc_small_buf_failed: kfree(proc->pages); proc->pages = NULL; err_alloc_pages_failed: mutex_lock(&binder_mmap_lock); vfree(proc->buffer); proc->buffer = NULL; err_get_vm_area_failed: err_already_mapped: mutex_unlock(&binder_mmap_lock); err_bad_arg: pr_err("binder_mmap: %d %lx-%lx %s failed %d\n", proc->pid, vma->vm_start, vma->vm_end, failure_string, ret); return ret; } static int binder_open(struct inode *nodp, struct file *filp) { struct binder_proc *proc; binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_open: %d:%d\n", current->group_leader->pid, current->pid); proc = kzalloc(sizeof(*proc), GFP_KERNEL); if (proc == NULL) return -ENOMEM; get_task_struct(current); proc->tsk = current; INIT_LIST_HEAD(&proc->todo); init_waitqueue_head(&proc->wait); proc->default_priority = task_nice(current); binder_lock(__func__); binder_stats_created(BINDER_STAT_PROC); hlist_add_head(&proc->proc_node, &binder_procs); proc->pid = current->group_leader->pid; INIT_LIST_HEAD(&proc->delivered_death); filp->private_data = proc; binder_unlock(__func__); if (binder_debugfs_dir_entry_proc) { char strbuf[11]; snprintf(strbuf, sizeof(strbuf), "%u", proc->pid); proc->debugfs_entry = debugfs_create_file(strbuf, S_IRUGO, binder_debugfs_dir_entry_proc, proc, &binder_proc_fops); } return 0; } static int binder_flush(struct file *filp, fl_owner_t id) { struct binder_proc *proc = filp->private_data; binder_defer_work(proc, BINDER_DEFERRED_FLUSH); return 0; } static void binder_deferred_flush(struct binder_proc *proc) { struct rb_node *n; int wake_count = 0; for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) { struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node); thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN; if (thread->looper & BINDER_LOOPER_STATE_WAITING) { wake_up_interruptible(&thread->wait); wake_count++; } } wake_up_interruptible_all(&proc->wait); binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_flush: %d woke %d threads\n", proc->pid, wake_count); } static int binder_release(struct inode *nodp, struct file *filp) { struct binder_proc *proc = filp->private_data; debugfs_remove(proc->debugfs_entry); binder_defer_work(proc, BINDER_DEFERRED_RELEASE); return 0; } static void binder_deferred_release(struct binder_proc *proc) { struct binder_transaction *t; struct rb_node *n; int threads, nodes, incoming_refs, outgoing_refs, buffers, active_transactions, page_count; BUG_ON(proc->vma); BUG_ON(proc->files); hlist_del(&proc->proc_node); if (binder_context_mgr_node && binder_context_mgr_node->proc == proc) { binder_debug(BINDER_DEBUG_DEAD_BINDER, "binder_release: %d context_mgr_node gone\n", proc->pid); binder_context_mgr_node = NULL; } threads = 0; active_transactions = 0; while ((n = rb_first(&proc->threads))) { struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node); threads++; active_transactions += binder_free_thread(proc, thread); } nodes = 0; incoming_refs = 0; while ((n = rb_first(&proc->nodes))) { struct binder_node *node = rb_entry(n, struct binder_node, rb_node); nodes++; rb_erase(&node->rb_node, &proc->nodes); list_del_init(&node->work.entry); binder_release_work(&node->async_todo); if (hlist_empty(&node->refs)) { kfree(node); binder_stats_deleted(BINDER_STAT_NODE); } else { struct binder_ref *ref; int death = 0; node->proc = NULL; node->local_strong_refs = 0; node->local_weak_refs = 0; hlist_add_head(&node->dead_node, &binder_dead_nodes); hlist_for_each_entry(ref, &node->refs, node_entry) { incoming_refs++; if (ref->death) { death++; if (list_empty(&ref->death->work.entry)) { ref->death->work.type = BINDER_WORK_DEAD_BINDER; list_add_tail(&ref->death->work.entry, &ref->proc->todo); wake_up_interruptible(&ref->proc->wait); } else BUG(); } } binder_debug(BINDER_DEBUG_DEAD_BINDER, "node %d now dead, refs %d, death %d\n", node->debug_id, incoming_refs, death); } } outgoing_refs = 0; while ((n = rb_first(&proc->refs_by_desc))) { struct binder_ref *ref = rb_entry(n, struct binder_ref, rb_node_desc); outgoing_refs++; binder_delete_ref(ref); } binder_release_work(&proc->todo); binder_release_work(&proc->delivered_death); buffers = 0; while ((n = rb_first(&proc->allocated_buffers))) { struct binder_buffer *buffer = rb_entry(n, struct binder_buffer, rb_node); t = buffer->transaction; if (t) { t->buffer = NULL; buffer->transaction = NULL; pr_err("release proc %d, transaction %d, not freed\n", proc->pid, t->debug_id); /*BUG();*/ } binder_free_buf(proc, buffer); buffers++; } binder_stats_deleted(BINDER_STAT_PROC); page_count = 0; if (proc->pages) { int i; for (i = 0; i < proc->buffer_size / PAGE_SIZE; i++) { if (proc->pages[i]) { void *page_addr = proc->buffer + i * PAGE_SIZE; binder_debug(BINDER_DEBUG_BUFFER_ALLOC, "binder_release: %d: page %d at %p not freed\n", proc->pid, i, page_addr); unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE); __free_page(proc->pages[i]); page_count++; } } kfree(proc->pages); vfree(proc->buffer); } put_task_struct(proc->tsk); binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_release: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d, buffers %d, pages %d\n", proc->pid, threads, nodes, incoming_refs, outgoing_refs, active_transactions, buffers, page_count); kfree(proc); } static void binder_deferred_func(struct work_struct *work) { struct binder_proc *proc; struct files_struct *files; int defer; do { binder_lock(__func__); mutex_lock(&binder_deferred_lock); if (!hlist_empty(&binder_deferred_list)) { proc = hlist_entry(binder_deferred_list.first, struct binder_proc, deferred_work_node); hlist_del_init(&proc->deferred_work_node); defer = proc->deferred_work; proc->deferred_work = 0; } else { proc = NULL; defer = 0; } mutex_unlock(&binder_deferred_lock); files = NULL; if (defer & BINDER_DEFERRED_PUT_FILES) { files = proc->files; if (files) proc->files = NULL; } if (defer & BINDER_DEFERRED_FLUSH) binder_deferred_flush(proc); if (defer & BINDER_DEFERRED_RELEASE) binder_deferred_release(proc); /* frees proc */ binder_unlock(__func__); if (files) put_files_struct(files); } while (proc); } static DECLARE_WORK(binder_deferred_work, binder_deferred_func); static void binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer) { mutex_lock(&binder_deferred_lock); proc->deferred_work |= defer; if (hlist_unhashed(&proc->deferred_work_node)) { hlist_add_head(&proc->deferred_work_node, &binder_deferred_list); queue_work(binder_deferred_workqueue, &binder_deferred_work); } mutex_unlock(&binder_deferred_lock); } static void print_binder_transaction(struct seq_file *m, const char *prefix, struct binder_transaction *t) { seq_printf(m, "%s %d: %p from %d:%d to %d:%d code %x flags %x pri %ld r%d", prefix, t->debug_id, t, t->from ? t->from->proc->pid : 0, t->from ? t->from->pid : 0, t->to_proc ? t->to_proc->pid : 0, t->to_thread ? t->to_thread->pid : 0, t->code, t->flags, t->priority, t->need_reply); if (t->buffer == NULL) { seq_puts(m, " buffer free\n"); return; } if (t->buffer->target_node) seq_printf(m, " node %d", t->buffer->target_node->debug_id); seq_printf(m, " size %zd:%zd data %p\n", t->buffer->data_size, t->buffer->offsets_size, t->buffer->data); } static void print_binder_buffer(struct seq_file *m, const char *prefix, struct binder_buffer *buffer) { seq_printf(m, "%s %d: %p size %zd:%zd %s\n", prefix, buffer->debug_id, buffer->data, buffer->data_size, buffer->offsets_size, buffer->transaction ? "active" : "delivered"); } static void print_binder_work(struct seq_file *m, const char *prefix, const char *transaction_prefix, struct binder_work *w) { struct binder_node *node; struct binder_transaction *t; switch (w->type) { case BINDER_WORK_TRANSACTION: t = container_of(w, struct binder_transaction, work); print_binder_transaction(m, transaction_prefix, t); break; case BINDER_WORK_TRANSACTION_COMPLETE: seq_printf(m, "%stransaction complete\n", prefix); break; case BINDER_WORK_NODE: node = container_of(w, struct binder_node, work); seq_printf(m, "%snode work %d: u%p c%p\n", prefix, node->debug_id, node->ptr, node->cookie); break; case BINDER_WORK_DEAD_BINDER: seq_printf(m, "%shas dead binder\n", prefix); break; case BINDER_WORK_DEAD_BINDER_AND_CLEAR: seq_printf(m, "%shas cleared dead binder\n", prefix); break; case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: seq_printf(m, "%shas cleared death notification\n", prefix); break; default: seq_printf(m, "%sunknown work: type %d\n", prefix, w->type); break; } } static void print_binder_thread(struct seq_file *m, struct binder_thread *thread, int print_always) { struct binder_transaction *t; struct binder_work *w; size_t start_pos = m->count; size_t header_pos; seq_printf(m, " thread %d: l %02x\n", thread->pid, thread->looper); header_pos = m->count; t = thread->transaction_stack; while (t) { if (t->from == thread) { print_binder_transaction(m, " outgoing transaction", t); t = t->from_parent; } else if (t->to_thread == thread) { print_binder_transaction(m, " incoming transaction", t); t = t->to_parent; } else { print_binder_transaction(m, " bad transaction", t); t = NULL; } } list_for_each_entry(w, &thread->todo, entry) { print_binder_work(m, " ", " pending transaction", w); } if (!print_always && m->count == header_pos) m->count = start_pos; } static void print_binder_node(struct seq_file *m, struct binder_node *node) { struct binder_ref *ref; struct binder_work *w; int count; count = 0; hlist_for_each_entry(ref, &node->refs, node_entry) count++; seq_printf(m, " node %d: u%p c%p hs %d hw %d ls %d lw %d is %d iw %d", node->debug_id, node->ptr, node->cookie, node->has_strong_ref, node->has_weak_ref, node->local_strong_refs, node->local_weak_refs, node->internal_strong_refs, count); if (count) { seq_puts(m, " proc"); hlist_for_each_entry(ref, &node->refs, node_entry) seq_printf(m, " %d", ref->proc->pid); } seq_puts(m, "\n"); list_for_each_entry(w, &node->async_todo, entry) print_binder_work(m, " ", " pending async transaction", w); } static void print_binder_ref(struct seq_file *m, struct binder_ref *ref) { seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %p\n", ref->debug_id, ref->desc, ref->node->proc ? "" : "dead ", ref->node->debug_id, ref->strong, ref->weak, ref->death); } static void print_binder_proc(struct seq_file *m, struct binder_proc *proc, int print_all) { struct binder_work *w; struct rb_node *n; size_t start_pos = m->count; size_t header_pos; seq_printf(m, "proc %d\n", proc->pid); header_pos = m->count; for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) print_binder_thread(m, rb_entry(n, struct binder_thread, rb_node), print_all); for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) { struct binder_node *node = rb_entry(n, struct binder_node, rb_node); if (print_all || node->has_async_transaction) print_binder_node(m, node); } if (print_all) { for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) print_binder_ref(m, rb_entry(n, struct binder_ref, rb_node_desc)); } for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n)) print_binder_buffer(m, " buffer", rb_entry(n, struct binder_buffer, rb_node)); list_for_each_entry(w, &proc->todo, entry) print_binder_work(m, " ", " pending transaction", w); list_for_each_entry(w, &proc->delivered_death, entry) { seq_puts(m, " has delivered dead binder\n"); break; } if (!print_all && m->count == header_pos) m->count = start_pos; } static const char * const binder_return_strings[] = { "BR_ERROR", "BR_OK", "BR_TRANSACTION", "BR_REPLY", "BR_ACQUIRE_RESULT", "BR_DEAD_REPLY", "BR_TRANSACTION_COMPLETE", "BR_INCREFS", "BR_ACQUIRE", "BR_RELEASE", "BR_DECREFS", "BR_ATTEMPT_ACQUIRE", "BR_NOOP", "BR_SPAWN_LOOPER", "BR_FINISHED", "BR_DEAD_BINDER", "BR_CLEAR_DEATH_NOTIFICATION_DONE", "BR_FAILED_REPLY" }; static const char * const binder_command_strings[] = { "BC_TRANSACTION", "BC_REPLY", "BC_ACQUIRE_RESULT", "BC_FREE_BUFFER", "BC_INCREFS", "BC_ACQUIRE", "BC_RELEASE", "BC_DECREFS", "BC_INCREFS_DONE", "BC_ACQUIRE_DONE", "BC_ATTEMPT_ACQUIRE", "BC_REGISTER_LOOPER", "BC_ENTER_LOOPER", "BC_EXIT_LOOPER", "BC_REQUEST_DEATH_NOTIFICATION", "BC_CLEAR_DEATH_NOTIFICATION", "BC_DEAD_BINDER_DONE" }; static const char * const binder_objstat_strings[] = { "proc", "thread", "node", "ref", "death", "transaction", "transaction_complete" }; static void print_binder_stats(struct seq_file *m, const char *prefix, struct binder_stats *stats) { int i; BUILD_BUG_ON(ARRAY_SIZE(stats->bc) != ARRAY_SIZE(binder_command_strings)); for (i = 0; i < ARRAY_SIZE(stats->bc); i++) { if (stats->bc[i]) seq_printf(m, "%s%s: %d\n", prefix, binder_command_strings[i], stats->bc[i]); } BUILD_BUG_ON(ARRAY_SIZE(stats->br) != ARRAY_SIZE(binder_return_strings)); for (i = 0; i < ARRAY_SIZE(stats->br); i++) { if (stats->br[i]) seq_printf(m, "%s%s: %d\n", prefix, binder_return_strings[i], stats->br[i]); } BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) != ARRAY_SIZE(binder_objstat_strings)); BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) != ARRAY_SIZE(stats->obj_deleted)); for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) { if (stats->obj_created[i] || stats->obj_deleted[i]) seq_printf(m, "%s%s: active %d total %d\n", prefix, binder_objstat_strings[i], stats->obj_created[i] - stats->obj_deleted[i], stats->obj_created[i]); } } static void print_binder_proc_stats(struct seq_file *m, struct binder_proc *proc) { struct binder_work *w; struct rb_node *n; int count, strong, weak; seq_printf(m, "proc %d\n", proc->pid); count = 0; for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) count++; seq_printf(m, " threads: %d\n", count); seq_printf(m, " requested threads: %d+%d/%d\n" " ready threads %d\n" " free async space %zd\n", proc->requested_threads, proc->requested_threads_started, proc->max_threads, proc->ready_threads, proc->free_async_space); count = 0; for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) count++; seq_printf(m, " nodes: %d\n", count); count = 0; strong = 0; weak = 0; for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) { struct binder_ref *ref = rb_entry(n, struct binder_ref, rb_node_desc); count++; strong += ref->strong; weak += ref->weak; } seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak); count = 0; for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n)) count++; seq_printf(m, " buffers: %d\n", count); count = 0; list_for_each_entry(w, &proc->todo, entry) { switch (w->type) { case BINDER_WORK_TRANSACTION: count++; break; default: break; } } seq_printf(m, " pending transactions: %d\n", count); print_binder_stats(m, " ", &proc->stats); } static int binder_state_show(struct seq_file *m, void *unused) { struct binder_proc *proc; struct binder_node *node; int do_lock = !binder_debug_no_lock; if (do_lock) binder_lock(__func__); seq_puts(m, "binder state:\n"); if (!hlist_empty(&binder_dead_nodes)) seq_puts(m, "dead nodes:\n"); hlist_for_each_entry(node, &binder_dead_nodes, dead_node) print_binder_node(m, node); hlist_for_each_entry(proc, &binder_procs, proc_node) print_binder_proc(m, proc, 1); if (do_lock) binder_unlock(__func__); return 0; } static int binder_stats_show(struct seq_file *m, void *unused) { struct binder_proc *proc; int do_lock = !binder_debug_no_lock; if (do_lock) binder_lock(__func__); seq_puts(m, "binder stats:\n"); print_binder_stats(m, "", &binder_stats); hlist_for_each_entry(proc, &binder_procs, proc_node) print_binder_proc_stats(m, proc); if (do_lock) binder_unlock(__func__); return 0; } static int binder_transactions_show(struct seq_file *m, void *unused) { struct binder_proc *proc; int do_lock = !binder_debug_no_lock; if (do_lock) binder_lock(__func__); seq_puts(m, "binder transactions:\n"); hlist_for_each_entry(proc, &binder_procs, proc_node) print_binder_proc(m, proc, 0); if (do_lock) binder_unlock(__func__); return 0; } static int binder_proc_show(struct seq_file *m, void *unused) { struct binder_proc *proc = m->private; int do_lock = !binder_debug_no_lock; if (do_lock) binder_lock(__func__); seq_puts(m, "binder proc state:\n"); print_binder_proc(m, proc, 1); if (do_lock) binder_unlock(__func__); return 0; } static void print_binder_transaction_log_entry(struct seq_file *m, struct binder_transaction_log_entry *e) { seq_printf(m, "%d: %s from %d:%d to %d:%d node %d handle %d size %d:%d\n", e->debug_id, (e->call_type == 2) ? "reply" : ((e->call_type == 1) ? "async" : "call "), e->from_proc, e->from_thread, e->to_proc, e->to_thread, e->to_node, e->target_handle, e->data_size, e->offsets_size); } static int binder_transaction_log_show(struct seq_file *m, void *unused) { struct binder_transaction_log *log = m->private; int i; if (log->full) { for (i = log->next; i < ARRAY_SIZE(log->entry); i++) print_binder_transaction_log_entry(m, &log->entry[i]); } for (i = 0; i < log->next; i++) print_binder_transaction_log_entry(m, &log->entry[i]); return 0; } static const struct file_operations binder_fops = { .owner = THIS_MODULE, .poll = binder_poll, .unlocked_ioctl = binder_ioctl, .mmap = binder_mmap, .open = binder_open, .flush = binder_flush, .release = binder_release, }; static struct miscdevice binder_miscdev = { .minor = MISC_DYNAMIC_MINOR, .name = "binder", .fops = &binder_fops }; BINDER_DEBUG_ENTRY(state); BINDER_DEBUG_ENTRY(stats); BINDER_DEBUG_ENTRY(transactions); BINDER_DEBUG_ENTRY(transaction_log); static int __init binder_init(void) { int ret; binder_deferred_workqueue = create_singlethread_workqueue("binder"); if (!binder_deferred_workqueue) return -ENOMEM; binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL); if (binder_debugfs_dir_entry_root) binder_debugfs_dir_entry_proc = debugfs_create_dir("proc", binder_debugfs_dir_entry_root); ret = misc_register(&binder_miscdev); if (binder_debugfs_dir_entry_root) { debugfs_create_file("state", S_IRUGO, binder_debugfs_dir_entry_root, NULL, &binder_state_fops); debugfs_create_file("stats", S_IRUGO, binder_debugfs_dir_entry_root, NULL, &binder_stats_fops); debugfs_create_file("transactions", S_IRUGO, binder_debugfs_dir_entry_root, NULL, &binder_transactions_fops); debugfs_create_file("transaction_log", S_IRUGO, binder_debugfs_dir_entry_root, &binder_transaction_log, &binder_transaction_log_fops); debugfs_create_file("failed_transaction_log", S_IRUGO, binder_debugfs_dir_entry_root, &binder_transaction_log_failed, &binder_transaction_log_fops); } return ret; } device_initcall(binder_init); #define CREATE_TRACE_POINTS #include "binder_trace.h" MODULE_LICENSE("GPL v2");
def func(a1): """ Parameters: a1 (:class:`MyClass`): used to call :def:`my_function` and access :attr:`my_attr` Raises: :class:`MyException`: thrown in case of any error """
package main import ( "strings" "time" "github.com/docker/docker/integration-cli/checker" "github.com/docker/docker/integration-cli/cli" "github.com/go-check/check" "github.com/gotestyourself/gotestyourself/icmd" ) func (s *DockerSuite) TestUpdateRestartPolicy(c *check.C) { out := cli.DockerCmd(c, "run", "-d", "--restart=on-failure:3", "busybox", "sh", "-c", "sleep 1 && false").Combined() timeout := 60 * time.Second if testEnv.DaemonPlatform() == "windows" { timeout = 180 * time.Second } id := strings.TrimSpace(string(out)) // update restart policy to on-failure:5 cli.DockerCmd(c, "update", "--restart=on-failure:5", id) cli.WaitExited(c, id, timeout) count := inspectField(c, id, "RestartCount") c.Assert(count, checker.Equals, "5") maximumRetryCount := inspectField(c, id, "HostConfig.RestartPolicy.MaximumRetryCount") c.Assert(maximumRetryCount, checker.Equals, "5") } func (s *DockerSuite) TestUpdateRestartWithAutoRemoveFlag(c *check.C) { out := runSleepingContainer(c, "--rm") id := strings.TrimSpace(out) // update restart policy for an AutoRemove container cli.Docker(cli.Args("update", "--restart=always", id)).Assert(c, icmd.Expected{ ExitCode: 1, Err: "Restart policy cannot be updated because AutoRemove is enabled for the container", }) }
.yui-overlay,.yui-panel-container { visibility: hidden; position: absolute; z-index: 2; } .yui-panel { position: relative; } .yui-panel-container form { margin: 0; } .mask { z-index: 1; display: none; position: absolute; top: 0; left: 0; right: 0; bottom: 0; } .mask.block-scrollbars { overflow: auto; } .masked select,.drag select,.hide-select select { _visibility: hidden; } .yui-panel-container select { _visibility: inherit; } .hide-scrollbars,.hide-scrollbars * { overflow: hidden; } .hide-scrollbars select { display: none; } .show-scrollbars { overflow: auto; } .yui-panel-container.show-scrollbars,.yui-tt.show-scrollbars { overflow: visible; } .yui-panel-container.show-scrollbars .underlay,.yui-tt.show-scrollbars .yui-tt-shadow { overflow: auto; } .yui-panel-container.shadow .underlay.yui-force-redraw { padding-bottom: 1px; } .yui-effect-fade .underlay,.yui-effect-fade .yui-tt-shadow { display: none; } .yui-tt-shadow { position: absolute; } .yui-override-padding { padding: 0 !important; } .yui-panel-container .container-close { overflow: hidden; text-indent: -10000em; text-decoration: none; } .yui-overlay.yui-force-redraw,.yui-panel-container.yui-force-redraw { margin-bottom: 1px; } .mask { background-color: #000; opacity: .25; filter: alpha(opacity = 25); } .yui-panel-container { padding: 0 1px; * padding: 2px; } .yui-panel { position: relative; left: 0; top: 0; border-style: solid; border-width: 1px 0; border-color: #808080; z-index: 1; * border-width: 1px; * zoom: 1; _zoom: normal; } .yui-panel .hd,.yui-panel .bd,.yui-panel .ft { border-style: solid; border-width: 0 1px; border-color: #808080; margin: 0 -1px; * margin: 0; * border: 0; } .yui-panel .hd { border-bottom: solid 1px #ccc; } .yui-panel .bd,.yui-panel .ft { background-color: #F2F2F2; } .yui-panel .hd { padding: 0 10px; font-size: 93%; line-height: 2; * line-height: 1.9; font-weight: bold; color: #000; background: url("../../images/2/yui_sprite.png") repeat-x 0 -200px; } .yui-panel .bd { padding: 10px 15px; } .yui-panel .ft { border-top: solid 1px #808080; padding: 5px 10px; font-size: 77%; } .container-close { position: absolute; top: 5px; right: 6px; width: 25px; height: 15px; background: url("../../images/2/yui_sprite.png") no-repeat 0 -300px; cursor: pointer; } .yui-panel-container .underlay { right: -1px; left: -1px; } .yui-panel-container.matte { padding: 9px 10px; background-color: #fff; } .yui-panel-container.shadow { _padding: 2px 4px 0 2px; } .yui-panel-container.shadow .underlay { position: absolute; top: 2px; left: -3px; right: -3px; bottom: -3px; * top: 4px; * left: -1px; * right: -1px; * bottom: -1px; _top: 0; _left: 0; _right: 0; _bottom: 0; _margin-top: 3px; _margin-left: -1px; background-color: #000; opacity: .12; filter: alpha(opacity = 12); } .yui-dialog .ft { border-top: none; padding: 10px; font-size: 100%; } .yui-dialog .ft .button-group { display: block; text-align: right; } .yui-dialog .ft button.default { font-weight: bold; } .yui-dialog .ft span.default { border-color: #304369; background-position: 0 -1400px; } .yui-dialog .ft span.default .first-child { border-color: #304369; } .yui-dialog .ft span.default button { color: #fff; } .yui-dialog .ft span.yui-button-disabled { background-position: 0 -1500px; border-color: #ccc; } .yui-dialog .ft span.yui-button-disabled .first-child { border-color: #ccc; } .yui-dialog .ft span.yui-button-disabled button { color: #a6a6a6; } .yui-simple-dialog .bd .yui-icon { background: url("../../images/2/yui_sprite.png") no-repeat 0 0; width: 16px; height: 16px; margin-right: 10px; float: left; } .yui-simple-dialog .bd span.blckicon { background-position: 0 -1100px; } .yui-simple-dialog .bd span.alrticon { background-position: 0 -1050px; } .yui-simple-dialog .bd span.hlpicon { background-position: 0 -1150px; } .yui-simple-dialog .bd span.infoicon { background-position: 0 -1200px; } .yui-simple-dialog .bd span.warnicon { background-position: 0 -1900px; } .yui-simple-dialog .bd span.tipicon { background-position: 0 -1250px; } .yui-tt .bd { position: relative; top: 0; left: 0; z-index: 1; color: #000; padding: 2px 5px; border-color: #D4C237 #A6982B #A6982B #A6982B; border-width: 1px; border-style: solid; background-color: #FFEE69; } .yui-tt.show-scrollbars .bd { overflow: auto; } .yui-tt-shadow { top: 2px; right: -3px; left: -3px; bottom: -3px; background-color: #000; } .yui-tt-shadow-visible { opacity: .12; filter: alpha(opacity = 12); } .table-options-row { clear: both; } .table-options-key { float: left; } .table-options-value { float: right; }
CLANG_LEVEL := ../../.. TD_SRC_DIR = $(PROJ_SRC_DIR)/../Basic BUILT_SOURCES = AttrPCHRead.inc AttrPCHWrite.inc TABLEGEN_INC_FILES_COMMON = 1 include $(CLANG_LEVEL)/Makefile $(ObjDir)/AttrPCHRead.inc.tmp : $(TD_SRC_DIR)/Attr.td $(CLANG_TBLGEN) \ $(ObjDir)/.dir $(Echo) "Building Clang PCH reader with tblgen" $(Verb) $(ClangTableGen) -gen-clang-attr-pch-read -o $(call SYSPATH, $@) \ -I $(PROJ_SRC_DIR)/../../ $< $(ObjDir)/AttrPCHWrite.inc.tmp : $(TD_SRC_DIR)/Attr.td $(CLANG_TBLGEN) \ $(ObjDir)/.dir $(Echo) "Building Clang PCH writer with tblgen" $(Verb) $(ClangTableGen) -gen-clang-attr-pch-write -o $(call SYSPATH, $@) \ -I $(PROJ_SRC_DIR)/../../ $<
// SPDX-License-Identifier: GPL-2.0-only /* * tboot.c: main implementation of helper functions used by kernel for * runtime support of Intel(R) Trusted Execution Technology * * Copyright (c) 2006-2009, Intel Corporation */ #include <linux/intel-iommu.h> #include <linux/init_task.h> #include <linux/spinlock.h> #include <linux/export.h> #include <linux/delay.h> #include <linux/sched.h> #include <linux/init.h> #include <linux/dmar.h> #include <linux/cpu.h> #include <linux/pfn.h> #include <linux/mm.h> #include <linux/tboot.h> #include <linux/debugfs.h> #include <asm/realmode.h> #include <asm/processor.h> #include <asm/bootparam.h> #include <asm/pgtable.h> #include <asm/pgalloc.h> #include <asm/swiotlb.h> #include <asm/fixmap.h> #include <asm/proto.h> #include <asm/setup.h> #include <asm/e820/api.h> #include <asm/io.h> #include "../realmode/rm/wakeup.h" /* Global pointer to shared data; NULL means no measured launch. */ struct tboot *tboot __read_mostly; EXPORT_SYMBOL(tboot); /* timeout for APs (in secs) to enter wait-for-SIPI state during shutdown */ #define AP_WAIT_TIMEOUT 1 #undef pr_fmt #define pr_fmt(fmt) "tboot: " fmt static u8 tboot_uuid[16] __initdata = TBOOT_UUID; void __init tboot_probe(void) { /* Look for valid page-aligned address for shared page. */ if (!boot_params.tboot_addr) return; /* * also verify that it is mapped as we expect it before calling * set_fixmap(), to reduce chance of garbage value causing crash */ if (!e820__mapped_any(boot_params.tboot_addr, boot_params.tboot_addr, E820_TYPE_RESERVED)) { pr_warning("non-0 tboot_addr but it is not of type E820_TYPE_RESERVED\n"); return; } /* Map and check for tboot UUID. */ set_fixmap(FIX_TBOOT_BASE, boot_params.tboot_addr); tboot = (struct tboot *)fix_to_virt(FIX_TBOOT_BASE); if (memcmp(&tboot_uuid, &tboot->uuid, sizeof(tboot->uuid))) { pr_warning("tboot at 0x%llx is invalid\n", boot_params.tboot_addr); tboot = NULL; return; } if (tboot->version < 5) { pr_warning("tboot version is invalid: %u\n", tboot->version); tboot = NULL; return; } pr_info("found shared page at phys addr 0x%llx:\n", boot_params.tboot_addr); pr_debug("version: %d\n", tboot->version); pr_debug("log_addr: 0x%08x\n", tboot->log_addr); pr_debug("shutdown_entry: 0x%x\n", tboot->shutdown_entry); pr_debug("tboot_base: 0x%08x\n", tboot->tboot_base); pr_debug("tboot_size: 0x%x\n", tboot->tboot_size); } static pgd_t *tboot_pg_dir; static struct mm_struct tboot_mm = { .mm_rb = RB_ROOT, .pgd = swapper_pg_dir, .mm_users = ATOMIC_INIT(2), .mm_count = ATOMIC_INIT(1), .mmap_sem = __RWSEM_INITIALIZER(init_mm.mmap_sem), .page_table_lock = __SPIN_LOCK_UNLOCKED(init_mm.page_table_lock), .mmlist = LIST_HEAD_INIT(init_mm.mmlist), }; static inline void switch_to_tboot_pt(void) { write_cr3(virt_to_phys(tboot_pg_dir)); } static int map_tboot_page(unsigned long vaddr, unsigned long pfn, pgprot_t prot) { pgd_t *pgd; p4d_t *p4d; pud_t *pud; pmd_t *pmd; pte_t *pte; pgd = pgd_offset(&tboot_mm, vaddr); p4d = p4d_alloc(&tboot_mm, pgd, vaddr); if (!p4d) return -1; pud = pud_alloc(&tboot_mm, p4d, vaddr); if (!pud) return -1; pmd = pmd_alloc(&tboot_mm, pud, vaddr); if (!pmd) return -1; pte = pte_alloc_map(&tboot_mm, pmd, vaddr); if (!pte) return -1; set_pte_at(&tboot_mm, vaddr, pte, pfn_pte(pfn, prot)); pte_unmap(pte); /* * PTI poisons low addresses in the kernel page tables in the * name of making them unusable for userspace. To execute * code at such a low address, the poison must be cleared. * * Note: 'pgd' actually gets set in p4d_alloc() _or_ * pud_alloc() depending on 4/5-level paging. */ pgd->pgd &= ~_PAGE_NX; return 0; } static int map_tboot_pages(unsigned long vaddr, unsigned long start_pfn, unsigned long nr) { /* Reuse the original kernel mapping */ tboot_pg_dir = pgd_alloc(&tboot_mm); if (!tboot_pg_dir) return -1; for (; nr > 0; nr--, vaddr += PAGE_SIZE, start_pfn++) { if (map_tboot_page(vaddr, start_pfn, PAGE_KERNEL_EXEC)) return -1; } return 0; } static void tboot_create_trampoline(void) { u32 map_base, map_size; /* Create identity map for tboot shutdown code. */ map_base = PFN_DOWN(tboot->tboot_base); map_size = PFN_UP(tboot->tboot_size); if (map_tboot_pages(map_base << PAGE_SHIFT, map_base, map_size)) panic("tboot: Error mapping tboot pages (mfns) @ 0x%x, 0x%x\n", map_base, map_size); } #ifdef CONFIG_ACPI_SLEEP static void add_mac_region(phys_addr_t start, unsigned long size) { struct tboot_mac_region *mr; phys_addr_t end = start + size; if (tboot->num_mac_regions >= MAX_TB_MAC_REGIONS) panic("tboot: Too many MAC regions\n"); if (start && size) { mr = &tboot->mac_regions[tboot->num_mac_regions++]; mr->start = round_down(start, PAGE_SIZE); mr->size = round_up(end, PAGE_SIZE) - mr->start; } } static int tboot_setup_sleep(void) { int i; tboot->num_mac_regions = 0; for (i = 0; i < e820_table->nr_entries; i++) { if ((e820_table->entries[i].type != E820_TYPE_RAM) && (e820_table->entries[i].type != E820_TYPE_RESERVED_KERN)) continue; add_mac_region(e820_table->entries[i].addr, e820_table->entries[i].size); } tboot->acpi_sinfo.kernel_s3_resume_vector = real_mode_header->wakeup_start; return 0; } #else /* no CONFIG_ACPI_SLEEP */ static int tboot_setup_sleep(void) { /* S3 shutdown requested, but S3 not supported by the kernel... */ BUG(); return -1; } #endif void tboot_shutdown(u32 shutdown_type) { void (*shutdown)(void); if (!tboot_enabled()) return; /* * if we're being called before the 1:1 mapping is set up then just * return and let the normal shutdown happen; this should only be * due to very early panic() */ if (!tboot_pg_dir) return; /* if this is S3 then set regions to MAC */ if (shutdown_type == TB_SHUTDOWN_S3) if (tboot_setup_sleep()) return; tboot->shutdown_type = shutdown_type; switch_to_tboot_pt(); shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry; shutdown(); /* should not reach here */ while (1) halt(); } static void tboot_copy_fadt(const struct acpi_table_fadt *fadt) { #define TB_COPY_GAS(tbg, g) \ tbg.space_id = g.space_id; \ tbg.bit_width = g.bit_width; \ tbg.bit_offset = g.bit_offset; \ tbg.access_width = g.access_width; \ tbg.address = g.address; TB_COPY_GAS(tboot->acpi_sinfo.pm1a_cnt_blk, fadt->xpm1a_control_block); TB_COPY_GAS(tboot->acpi_sinfo.pm1b_cnt_blk, fadt->xpm1b_control_block); TB_COPY_GAS(tboot->acpi_sinfo.pm1a_evt_blk, fadt->xpm1a_event_block); TB_COPY_GAS(tboot->acpi_sinfo.pm1b_evt_blk, fadt->xpm1b_event_block); /* * We need phys addr of waking vector, but can't use virt_to_phys() on * &acpi_gbl_FACS because it is ioremap'ed, so calc from FACS phys * addr. */ tboot->acpi_sinfo.wakeup_vector = fadt->facs + offsetof(struct acpi_table_facs, firmware_waking_vector); } static int tboot_sleep(u8 sleep_state, u32 pm1a_control, u32 pm1b_control) { static u32 acpi_shutdown_map[ACPI_S_STATE_COUNT] = { /* S0,1,2: */ -1, -1, -1, /* S3: */ TB_SHUTDOWN_S3, /* S4: */ TB_SHUTDOWN_S4, /* S5: */ TB_SHUTDOWN_S5 }; if (!tboot_enabled()) return 0; tboot_copy_fadt(&acpi_gbl_FADT); tboot->acpi_sinfo.pm1a_cnt_val = pm1a_control; tboot->acpi_sinfo.pm1b_cnt_val = pm1b_control; /* we always use the 32b wakeup vector */ tboot->acpi_sinfo.vector_width = 32; if (sleep_state >= ACPI_S_STATE_COUNT || acpi_shutdown_map[sleep_state] == -1) { pr_warning("unsupported sleep state 0x%x\n", sleep_state); return -1; } tboot_shutdown(acpi_shutdown_map[sleep_state]); return 0; } static int tboot_extended_sleep(u8 sleep_state, u32 val_a, u32 val_b) { if (!tboot_enabled()) return 0; pr_warning("tboot is not able to suspend on platforms with reduced hardware sleep (ACPIv5)"); return -ENODEV; } static atomic_t ap_wfs_count; static int tboot_wait_for_aps(int num_aps) { unsigned long timeout; timeout = AP_WAIT_TIMEOUT*HZ; while (atomic_read((atomic_t *)&tboot->num_in_wfs) != num_aps && timeout) { mdelay(1); timeout--; } if (timeout) pr_warning("tboot wait for APs timeout\n"); return !(atomic_read((atomic_t *)&tboot->num_in_wfs) == num_aps); } static int tboot_dying_cpu(unsigned int cpu) { atomic_inc(&ap_wfs_count); if (num_online_cpus() == 1) { if (tboot_wait_for_aps(atomic_read(&ap_wfs_count))) return -EBUSY; } return 0; } #ifdef CONFIG_DEBUG_FS #define TBOOT_LOG_UUID { 0x26, 0x25, 0x19, 0xc0, 0x30, 0x6b, 0xb4, 0x4d, \ 0x4c, 0x84, 0xa3, 0xe9, 0x53, 0xb8, 0x81, 0x74 } #define TBOOT_SERIAL_LOG_ADDR 0x60000 #define TBOOT_SERIAL_LOG_SIZE 0x08000 #define LOG_MAX_SIZE_OFF 16 #define LOG_BUF_OFF 24 static uint8_t tboot_log_uuid[16] = TBOOT_LOG_UUID; static ssize_t tboot_log_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { void __iomem *log_base; u8 log_uuid[16]; u32 max_size; void *kbuf; int ret = -EFAULT; log_base = ioremap_nocache(TBOOT_SERIAL_LOG_ADDR, TBOOT_SERIAL_LOG_SIZE); if (!log_base) return ret; memcpy_fromio(log_uuid, log_base, sizeof(log_uuid)); if (memcmp(&tboot_log_uuid, log_uuid, sizeof(log_uuid))) goto err_iounmap; max_size = readl(log_base + LOG_MAX_SIZE_OFF); if (*ppos >= max_size) { ret = 0; goto err_iounmap; } if (*ppos + count > max_size) count = max_size - *ppos; kbuf = kmalloc(count, GFP_KERNEL); if (!kbuf) { ret = -ENOMEM; goto err_iounmap; } memcpy_fromio(kbuf, log_base + LOG_BUF_OFF + *ppos, count); if (copy_to_user(user_buf, kbuf, count)) goto err_kfree; *ppos += count; ret = count; err_kfree: kfree(kbuf); err_iounmap: iounmap(log_base); return ret; } static const struct file_operations tboot_log_fops = { .read = tboot_log_read, .llseek = default_llseek, }; #endif /* CONFIG_DEBUG_FS */ static __init int tboot_late_init(void) { if (!tboot_enabled()) return 0; tboot_create_trampoline(); atomic_set(&ap_wfs_count, 0); cpuhp_setup_state(CPUHP_AP_X86_TBOOT_DYING, "x86/tboot:dying", NULL, tboot_dying_cpu); #ifdef CONFIG_DEBUG_FS debugfs_create_file("tboot_log", S_IRUSR, arch_debugfs_dir, NULL, &tboot_log_fops); #endif acpi_os_set_prepare_sleep(&tboot_sleep); acpi_os_set_prepare_extended_sleep(&tboot_extended_sleep); return 0; } late_initcall(tboot_late_init); /* * TXT configuration registers (offsets from TXT_{PUB, PRIV}_CONFIG_REGS_BASE) */ #define TXT_PUB_CONFIG_REGS_BASE 0xfed30000 #define TXT_PRIV_CONFIG_REGS_BASE 0xfed20000 /* # pages for each config regs space - used by fixmap */ #define NR_TXT_CONFIG_PAGES ((TXT_PUB_CONFIG_REGS_BASE - \ TXT_PRIV_CONFIG_REGS_BASE) >> PAGE_SHIFT) /* offsets from pub/priv config space */ #define TXTCR_HEAP_BASE 0x0300 #define TXTCR_HEAP_SIZE 0x0308 #define SHA1_SIZE 20 struct sha1_hash { u8 hash[SHA1_SIZE]; }; struct sinit_mle_data { u32 version; /* currently 6 */ struct sha1_hash bios_acm_id; u32 edx_senter_flags; u64 mseg_valid; struct sha1_hash sinit_hash; struct sha1_hash mle_hash; struct sha1_hash stm_hash; struct sha1_hash lcp_policy_hash; u32 lcp_policy_control; u32 rlp_wakeup_addr; u32 reserved; u32 num_mdrs; u32 mdrs_off; u32 num_vtd_dmars; u32 vtd_dmars_off; } __packed; struct acpi_table_header *tboot_get_dmar_table(struct acpi_table_header *dmar_tbl) { void *heap_base, *heap_ptr, *config; if (!tboot_enabled()) return dmar_tbl; /* * ACPI tables may not be DMA protected by tboot, so use DMAR copy * SINIT saved in SinitMleData in TXT heap (which is DMA protected) */ /* map config space in order to get heap addr */ config = ioremap(TXT_PUB_CONFIG_REGS_BASE, NR_TXT_CONFIG_PAGES * PAGE_SIZE); if (!config) return NULL; /* now map TXT heap */ heap_base = ioremap(*(u64 *)(config + TXTCR_HEAP_BASE), *(u64 *)(config + TXTCR_HEAP_SIZE)); iounmap(config); if (!heap_base) return NULL; /* walk heap to SinitMleData */ /* skip BiosData */ heap_ptr = heap_base + *(u64 *)heap_base; /* skip OsMleData */ heap_ptr += *(u64 *)heap_ptr; /* skip OsSinitData */ heap_ptr += *(u64 *)heap_ptr; /* now points to SinitMleDataSize; set to SinitMleData */ heap_ptr += sizeof(u64); /* get addr of DMAR table */ dmar_tbl = (struct acpi_table_header *)(heap_ptr + ((struct sinit_mle_data *)heap_ptr)->vtd_dmars_off - sizeof(u64)); /* don't unmap heap because dmar.c needs access to this */ return dmar_tbl; } int tboot_force_iommu(void) { if (!tboot_enabled()) return 0; if (intel_iommu_tboot_noforce) return 1; if (no_iommu || swiotlb || dmar_disabled) pr_warning("Forcing Intel-IOMMU to enabled\n"); dmar_disabled = 0; #ifdef CONFIG_SWIOTLB swiotlb = 0; #endif no_iommu = 0; return 1; }
package registry import ( "fmt" "net/http" "net/http/httputil" "net/url" "strings" "testing" "github.com/docker/distribution/registry/client/transport" "github.com/docker/docker/reference" "github.com/docker/engine-api/types" registrytypes "github.com/docker/engine-api/types/registry" ) var ( token = []string{"fake-token"} ) const ( imageID = "42d718c941f5c532ac049bf0b0ab53f0062f09a03afd4aa4a02c098e46032b9d" REPO = "foo42/bar" ) func spawnTestRegistrySession(t *testing.T) *Session { authConfig := &types.AuthConfig{} endpoint, err := NewV1Endpoint(makeIndex("/v1/"), "", nil) if err != nil { t.Fatal(err) } userAgent := "docker test client" var tr http.RoundTripper = debugTransport{NewTransport(nil), t.Log} tr = transport.NewTransport(AuthTransport(tr, authConfig, false), DockerHeaders(userAgent, nil)...) client := HTTPClient(tr) r, err := NewSession(client, authConfig, endpoint) if err != nil { t.Fatal(err) } // In a normal scenario for the v1 registry, the client should send a `X-Docker-Token: true` // header while authenticating, in order to retrieve a token that can be later used to // perform authenticated actions. // // The mock v1 registry does not support that, (TODO(tiborvass): support it), instead, // it will consider authenticated any request with the header `X-Docker-Token: fake-token`. // // Because we know that the client's transport is an `*authTransport` we simply cast it, // in order to set the internal cached token to the fake token, and thus send that fake token // upon every subsequent requests. r.client.Transport.(*authTransport).token = token return r } func TestPingRegistryEndpoint(t *testing.T) { testPing := func(index *registrytypes.IndexInfo, expectedStandalone bool, assertMessage string) { ep, err := NewV1Endpoint(index, "", nil) if err != nil { t.Fatal(err) } regInfo, err := ep.Ping() if err != nil { t.Fatal(err) } assertEqual(t, regInfo.Standalone, expectedStandalone, assertMessage) } testPing(makeIndex("/v1/"), true, "Expected standalone to be true (default)") testPing(makeHTTPSIndex("/v1/"), true, "Expected standalone to be true (default)") testPing(makePublicIndex(), false, "Expected standalone to be false for public index") } func TestEndpoint(t *testing.T) { // Simple wrapper to fail test if err != nil expandEndpoint := func(index *registrytypes.IndexInfo) *V1Endpoint { endpoint, err := NewV1Endpoint(index, "", nil) if err != nil { t.Fatal(err) } return endpoint } assertInsecureIndex := func(index *registrytypes.IndexInfo) { index.Secure = true _, err := NewV1Endpoint(index, "", nil) assertNotEqual(t, err, nil, index.Name+": Expected error for insecure index") assertEqual(t, strings.Contains(err.Error(), "insecure-registry"), true, index.Name+": Expected insecure-registry error for insecure index") index.Secure = false } assertSecureIndex := func(index *registrytypes.IndexInfo) { index.Secure = true _, err := NewV1Endpoint(index, "", nil) assertNotEqual(t, err, nil, index.Name+": Expected cert error for secure index") assertEqual(t, strings.Contains(err.Error(), "certificate signed by unknown authority"), true, index.Name+": Expected cert error for secure index") index.Secure = false } index := &registrytypes.IndexInfo{} index.Name = makeURL("/v1/") endpoint := expandEndpoint(index) assertEqual(t, endpoint.String(), index.Name, "Expected endpoint to be "+index.Name) assertInsecureIndex(index) index.Name = makeURL("") endpoint = expandEndpoint(index) assertEqual(t, endpoint.String(), index.Name+"/v1/", index.Name+": Expected endpoint to be "+index.Name+"/v1/") assertInsecureIndex(index) httpURL := makeURL("") index.Name = strings.SplitN(httpURL, "://", 2)[1] endpoint = expandEndpoint(index) assertEqual(t, endpoint.String(), httpURL+"/v1/", index.Name+": Expected endpoint to be "+httpURL+"/v1/") assertInsecureIndex(index) index.Name = makeHTTPSURL("/v1/") endpoint = expandEndpoint(index) assertEqual(t, endpoint.String(), index.Name, "Expected endpoint to be "+index.Name) assertSecureIndex(index) index.Name = makeHTTPSURL("") endpoint = expandEndpoint(index) assertEqual(t, endpoint.String(), index.Name+"/v1/", index.Name+": Expected endpoint to be "+index.Name+"/v1/") assertSecureIndex(index) httpsURL := makeHTTPSURL("") index.Name = strings.SplitN(httpsURL, "://", 2)[1] endpoint = expandEndpoint(index) assertEqual(t, endpoint.String(), httpsURL+"/v1/", index.Name+": Expected endpoint to be "+httpsURL+"/v1/") assertSecureIndex(index) badEndpoints := []string{ "http://127.0.0.1/v1/", "https://127.0.0.1/v1/", "http://127.0.0.1", "https://127.0.0.1", "127.0.0.1", } for _, address := range badEndpoints { index.Name = address _, err := NewV1Endpoint(index, "", nil) checkNotEqual(t, err, nil, "Expected error while expanding bad endpoint") } } func TestGetRemoteHistory(t *testing.T) { r := spawnTestRegistrySession(t) hist, err := r.GetRemoteHistory(imageID, makeURL("/v1/")) if err != nil { t.Fatal(err) } assertEqual(t, len(hist), 2, "Expected 2 images in history") assertEqual(t, hist[0], imageID, "Expected "+imageID+"as first ancestry") assertEqual(t, hist[1], "77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20", "Unexpected second ancestry") } func TestLookupRemoteImage(t *testing.T) { r := spawnTestRegistrySession(t) err := r.LookupRemoteImage(imageID, makeURL("/v1/")) assertEqual(t, err, nil, "Expected error of remote lookup to nil") if err := r.LookupRemoteImage("abcdef", makeURL("/v1/")); err == nil { t.Fatal("Expected error of remote lookup to not nil") } } func TestGetRemoteImageJSON(t *testing.T) { r := spawnTestRegistrySession(t) json, size, err := r.GetRemoteImageJSON(imageID, makeURL("/v1/")) if err != nil { t.Fatal(err) } assertEqual(t, size, int64(154), "Expected size 154") if len(json) == 0 { t.Fatal("Expected non-empty json") } _, _, err = r.GetRemoteImageJSON("abcdef", makeURL("/v1/")) if err == nil { t.Fatal("Expected image not found error") } } func TestGetRemoteImageLayer(t *testing.T) { r := spawnTestRegistrySession(t) data, err := r.GetRemoteImageLayer(imageID, makeURL("/v1/"), 0) if err != nil { t.Fatal(err) } if data == nil { t.Fatal("Expected non-nil data result") } _, err = r.GetRemoteImageLayer("abcdef", makeURL("/v1/"), 0) if err == nil { t.Fatal("Expected image not found error") } } func TestGetRemoteTag(t *testing.T) { r := spawnTestRegistrySession(t) repoRef, err := reference.ParseNamed(REPO) if err != nil { t.Fatal(err) } tag, err := r.GetRemoteTag([]string{makeURL("/v1/")}, repoRef, "test") if err != nil { t.Fatal(err) } assertEqual(t, tag, imageID, "Expected tag test to map to "+imageID) bazRef, err := reference.ParseNamed("foo42/baz") if err != nil { t.Fatal(err) } _, err = r.GetRemoteTag([]string{makeURL("/v1/")}, bazRef, "foo") if err != ErrRepoNotFound { t.Fatal("Expected ErrRepoNotFound error when fetching tag for bogus repo") } } func TestGetRemoteTags(t *testing.T) { r := spawnTestRegistrySession(t) repoRef, err := reference.ParseNamed(REPO) if err != nil { t.Fatal(err) } tags, err := r.GetRemoteTags([]string{makeURL("/v1/")}, repoRef) if err != nil { t.Fatal(err) } assertEqual(t, len(tags), 2, "Expected two tags") assertEqual(t, tags["latest"], imageID, "Expected tag latest to map to "+imageID) assertEqual(t, tags["test"], imageID, "Expected tag test to map to "+imageID) bazRef, err := reference.ParseNamed("foo42/baz") if err != nil { t.Fatal(err) } _, err = r.GetRemoteTags([]string{makeURL("/v1/")}, bazRef) if err != ErrRepoNotFound { t.Fatal("Expected ErrRepoNotFound error when fetching tags for bogus repo") } } func TestGetRepositoryData(t *testing.T) { r := spawnTestRegistrySession(t) parsedURL, err := url.Parse(makeURL("/v1/")) if err != nil { t.Fatal(err) } host := "http://" + parsedURL.Host + "/v1/" repoRef, err := reference.ParseNamed(REPO) if err != nil { t.Fatal(err) } data, err := r.GetRepositoryData(repoRef) if err != nil { t.Fatal(err) } assertEqual(t, len(data.ImgList), 2, "Expected 2 images in ImgList") assertEqual(t, len(data.Endpoints), 2, fmt.Sprintf("Expected 2 endpoints in Endpoints, found %d instead", len(data.Endpoints))) assertEqual(t, data.Endpoints[0], host, fmt.Sprintf("Expected first endpoint to be %s but found %s instead", host, data.Endpoints[0])) assertEqual(t, data.Endpoints[1], "http://test.example.com/v1/", fmt.Sprintf("Expected first endpoint to be http://test.example.com/v1/ but found %s instead", data.Endpoints[1])) } func TestPushImageJSONRegistry(t *testing.T) { r := spawnTestRegistrySession(t) imgData := &ImgData{ ID: "77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20", Checksum: "sha256:1ac330d56e05eef6d438586545ceff7550d3bdcb6b19961f12c5ba714ee1bb37", } err := r.PushImageJSONRegistry(imgData, []byte{0x42, 0xdf, 0x0}, makeURL("/v1/")) if err != nil { t.Fatal(err) } } func TestPushImageLayerRegistry(t *testing.T) { r := spawnTestRegistrySession(t) layer := strings.NewReader("") _, _, err := r.PushImageLayerRegistry(imageID, layer, makeURL("/v1/"), []byte{}) if err != nil { t.Fatal(err) } } func TestParseRepositoryInfo(t *testing.T) { type staticRepositoryInfo struct { Index *registrytypes.IndexInfo RemoteName string CanonicalName string LocalName string Official bool } expectedRepoInfos := map[string]staticRepositoryInfo{ "fooo/bar": { Index: &registrytypes.IndexInfo{ Name: IndexName, Official: true, }, RemoteName: "fooo/bar", LocalName: "fooo/bar", CanonicalName: "docker.io/fooo/bar", Official: false, }, "library/ubuntu": { Index: &registrytypes.IndexInfo{ Name: IndexName, Official: true, }, RemoteName: "library/ubuntu", LocalName: "ubuntu", CanonicalName: "docker.io/library/ubuntu", Official: true, }, "nonlibrary/ubuntu": { Index: &registrytypes.IndexInfo{ Name: IndexName, Official: true, }, RemoteName: "nonlibrary/ubuntu", LocalName: "nonlibrary/ubuntu", CanonicalName: "docker.io/nonlibrary/ubuntu", Official: false, }, "ubuntu": { Index: &registrytypes.IndexInfo{ Name: IndexName, Official: true, }, RemoteName: "library/ubuntu", LocalName: "ubuntu", CanonicalName: "docker.io/library/ubuntu", Official: true, }, "other/library": { Index: &registrytypes.IndexInfo{ Name: IndexName, Official: true, }, RemoteName: "other/library", LocalName: "other/library", CanonicalName: "docker.io/other/library", Official: false, }, "127.0.0.1:8000/private/moonbase": { Index: &registrytypes.IndexInfo{ Name: "127.0.0.1:8000", Official: false, }, RemoteName: "private/moonbase", LocalName: "127.0.0.1:8000/private/moonbase", CanonicalName: "127.0.0.1:8000/private/moonbase", Official: false, }, "127.0.0.1:8000/privatebase": { Index: &registrytypes.IndexInfo{ Name: "127.0.0.1:8000", Official: false, }, RemoteName: "privatebase", LocalName: "127.0.0.1:8000/privatebase", CanonicalName: "127.0.0.1:8000/privatebase", Official: false, }, "localhost:8000/private/moonbase": { Index: &registrytypes.IndexInfo{ Name: "localhost:8000", Official: false, }, RemoteName: "private/moonbase", LocalName: "localhost:8000/private/moonbase", CanonicalName: "localhost:8000/private/moonbase", Official: false, }, "localhost:8000/privatebase": { Index: &registrytypes.IndexInfo{ Name: "localhost:8000", Official: false, }, RemoteName: "privatebase", LocalName: "localhost:8000/privatebase", CanonicalName: "localhost:8000/privatebase", Official: false, }, "example.com/private/moonbase": { Index: &registrytypes.IndexInfo{ Name: "example.com", Official: false, }, RemoteName: "private/moonbase", LocalName: "example.com/private/moonbase", CanonicalName: "example.com/private/moonbase", Official: false, }, "example.com/privatebase": { Index: &registrytypes.IndexInfo{ Name: "example.com", Official: false, }, RemoteName: "privatebase", LocalName: "example.com/privatebase", CanonicalName: "example.com/privatebase", Official: false, }, "example.com:8000/private/moonbase": { Index: &registrytypes.IndexInfo{ Name: "example.com:8000", Official: false, }, RemoteName: "private/moonbase", LocalName: "example.com:8000/private/moonbase", CanonicalName: "example.com:8000/private/moonbase", Official: false, }, "example.com:8000/privatebase": { Index: &registrytypes.IndexInfo{ Name: "example.com:8000", Official: false, }, RemoteName: "privatebase", LocalName: "example.com:8000/privatebase", CanonicalName: "example.com:8000/privatebase", Official: false, }, "localhost/private/moonbase": { Index: &registrytypes.IndexInfo{ Name: "localhost", Official: false, }, RemoteName: "private/moonbase", LocalName: "localhost/private/moonbase", CanonicalName: "localhost/private/moonbase", Official: false, }, "localhost/privatebase": { Index: &registrytypes.IndexInfo{ Name: "localhost", Official: false, }, RemoteName: "privatebase", LocalName: "localhost/privatebase", CanonicalName: "localhost/privatebase", Official: false, }, IndexName + "/public/moonbase": { Index: &registrytypes.IndexInfo{ Name: IndexName, Official: true, }, RemoteName: "public/moonbase", LocalName: "public/moonbase", CanonicalName: "docker.io/public/moonbase", Official: false, }, "index." + IndexName + "/public/moonbase": { Index: &registrytypes.IndexInfo{ Name: IndexName, Official: true, }, RemoteName: "public/moonbase", LocalName: "public/moonbase", CanonicalName: "docker.io/public/moonbase", Official: false, }, "ubuntu-12.04-base": { Index: &registrytypes.IndexInfo{ Name: IndexName, Official: true, }, RemoteName: "library/ubuntu-12.04-base", LocalName: "ubuntu-12.04-base", CanonicalName: "docker.io/library/ubuntu-12.04-base", Official: true, }, IndexName + "/ubuntu-12.04-base": { Index: &registrytypes.IndexInfo{ Name: IndexName, Official: true, }, RemoteName: "library/ubuntu-12.04-base", LocalName: "ubuntu-12.04-base", CanonicalName: "docker.io/library/ubuntu-12.04-base", Official: true, }, "index." + IndexName + "/ubuntu-12.04-base": { Index: &registrytypes.IndexInfo{ Name: IndexName, Official: true, }, RemoteName: "library/ubuntu-12.04-base", LocalName: "ubuntu-12.04-base", CanonicalName: "docker.io/library/ubuntu-12.04-base", Official: true, }, } for reposName, expectedRepoInfo := range expectedRepoInfos { named, err := reference.WithName(reposName) if err != nil { t.Error(err) } repoInfo, err := ParseRepositoryInfo(named) if err != nil { t.Error(err) } else { checkEqual(t, repoInfo.Index.Name, expectedRepoInfo.Index.Name, reposName) checkEqual(t, repoInfo.RemoteName(), expectedRepoInfo.RemoteName, reposName) checkEqual(t, repoInfo.Name(), expectedRepoInfo.LocalName, reposName) checkEqual(t, repoInfo.FullName(), expectedRepoInfo.CanonicalName, reposName) checkEqual(t, repoInfo.Index.Official, expectedRepoInfo.Index.Official, reposName) checkEqual(t, repoInfo.Official, expectedRepoInfo.Official, reposName) } } } func TestNewIndexInfo(t *testing.T) { testIndexInfo := func(config *serviceConfig, expectedIndexInfos map[string]*registrytypes.IndexInfo) { for indexName, expectedIndexInfo := range expectedIndexInfos { index, err := newIndexInfo(config, indexName) if err != nil { t.Fatal(err) } else { checkEqual(t, index.Name, expectedIndexInfo.Name, indexName+" name") checkEqual(t, index.Official, expectedIndexInfo.Official, indexName+" is official") checkEqual(t, index.Secure, expectedIndexInfo.Secure, indexName+" is secure") checkEqual(t, len(index.Mirrors), len(expectedIndexInfo.Mirrors), indexName+" mirrors") } } } config := newServiceConfig(ServiceOptions{}) noMirrors := []string{} expectedIndexInfos := map[string]*registrytypes.IndexInfo{ IndexName: { Name: IndexName, Official: true, Secure: true, Mirrors: noMirrors, }, "index." + IndexName: { Name: IndexName, Official: true, Secure: true, Mirrors: noMirrors, }, "example.com": { Name: "example.com", Official: false, Secure: true, Mirrors: noMirrors, }, "127.0.0.1:5000": { Name: "127.0.0.1:5000", Official: false, Secure: false, Mirrors: noMirrors, }, } testIndexInfo(config, expectedIndexInfos) publicMirrors := []string{"http://mirror1.local", "http://mirror2.local"} config = makeServiceConfig(publicMirrors, []string{"example.com"}) expectedIndexInfos = map[string]*registrytypes.IndexInfo{ IndexName: { Name: IndexName, Official: true, Secure: true, Mirrors: publicMirrors, }, "index." + IndexName: { Name: IndexName, Official: true, Secure: true, Mirrors: publicMirrors, }, "example.com": { Name: "example.com", Official: false, Secure: false, Mirrors: noMirrors, }, "example.com:5000": { Name: "example.com:5000", Official: false, Secure: true, Mirrors: noMirrors, }, "127.0.0.1": { Name: "127.0.0.1", Official: false, Secure: false, Mirrors: noMirrors, }, "127.0.0.1:5000": { Name: "127.0.0.1:5000", Official: false, Secure: false, Mirrors: noMirrors, }, "other.com": { Name: "other.com", Official: false, Secure: true, Mirrors: noMirrors, }, } testIndexInfo(config, expectedIndexInfos) config = makeServiceConfig(nil, []string{"42.42.0.0/16"}) expectedIndexInfos = map[string]*registrytypes.IndexInfo{ "example.com": { Name: "example.com", Official: false, Secure: false, Mirrors: noMirrors, }, "example.com:5000": { Name: "example.com:5000", Official: false, Secure: false, Mirrors: noMirrors, }, "127.0.0.1": { Name: "127.0.0.1", Official: false, Secure: false, Mirrors: noMirrors, }, "127.0.0.1:5000": { Name: "127.0.0.1:5000", Official: false, Secure: false, Mirrors: noMirrors, }, "other.com": { Name: "other.com", Official: false, Secure: true, Mirrors: noMirrors, }, } testIndexInfo(config, expectedIndexInfos) } func TestMirrorEndpointLookup(t *testing.T) { containsMirror := func(endpoints []APIEndpoint) bool { for _, pe := range endpoints { if pe.URL.Host == "my.mirror" { return true } } return false } s := DefaultService{config: makeServiceConfig([]string{"my.mirror"}, nil)} imageName, err := reference.WithName(IndexName + "/test/image") if err != nil { t.Error(err) } pushAPIEndpoints, err := s.LookupPushEndpoints(imageName.Hostname()) if err != nil { t.Fatal(err) } if containsMirror(pushAPIEndpoints) { t.Fatal("Push endpoint should not contain mirror") } pullAPIEndpoints, err := s.LookupPullEndpoints(imageName.Hostname()) if err != nil { t.Fatal(err) } if !containsMirror(pullAPIEndpoints) { t.Fatal("Pull endpoint should contain mirror") } } func TestPushRegistryTag(t *testing.T) { r := spawnTestRegistrySession(t) repoRef, err := reference.ParseNamed(REPO) if err != nil { t.Fatal(err) } err = r.PushRegistryTag(repoRef, imageID, "stable", makeURL("/v1/")) if err != nil { t.Fatal(err) } } func TestPushImageJSONIndex(t *testing.T) { r := spawnTestRegistrySession(t) imgData := []*ImgData{ { ID: "77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20", Checksum: "sha256:1ac330d56e05eef6d438586545ceff7550d3bdcb6b19961f12c5ba714ee1bb37", }, { ID: "42d718c941f5c532ac049bf0b0ab53f0062f09a03afd4aa4a02c098e46032b9d", Checksum: "sha256:bea7bf2e4bacd479344b737328db47b18880d09096e6674165533aa994f5e9f2", }, } repoRef, err := reference.ParseNamed(REPO) if err != nil { t.Fatal(err) } repoData, err := r.PushImageJSONIndex(repoRef, imgData, false, nil) if err != nil { t.Fatal(err) } if repoData == nil { t.Fatal("Expected RepositoryData object") } repoData, err = r.PushImageJSONIndex(repoRef, imgData, true, []string{r.indexEndpoint.String()}) if err != nil { t.Fatal(err) } if repoData == nil { t.Fatal("Expected RepositoryData object") } } func TestSearchRepositories(t *testing.T) { r := spawnTestRegistrySession(t) results, err := r.SearchRepositories("fakequery", 25) if err != nil { t.Fatal(err) } if results == nil { t.Fatal("Expected non-nil SearchResults object") } assertEqual(t, results.NumResults, 1, "Expected 1 search results") assertEqual(t, results.Query, "fakequery", "Expected 'fakequery' as query") assertEqual(t, results.Results[0].StarCount, 42, "Expected 'fakeimage' to have 42 stars") } func TestTrustedLocation(t *testing.T) { for _, url := range []string{"http://example.com", "https://example.com:7777", "http://docker.io", "http://test.docker.com", "https://fakedocker.com"} { req, _ := http.NewRequest("GET", url, nil) if trustedLocation(req) == true { t.Fatalf("'%s' shouldn't be detected as a trusted location", url) } } for _, url := range []string{"https://docker.io", "https://test.docker.com:80"} { req, _ := http.NewRequest("GET", url, nil) if trustedLocation(req) == false { t.Fatalf("'%s' should be detected as a trusted location", url) } } } func TestAddRequiredHeadersToRedirectedRequests(t *testing.T) { for _, urls := range [][]string{ {"http://docker.io", "https://docker.com"}, {"https://foo.docker.io:7777", "http://bar.docker.com"}, {"https://foo.docker.io", "https://example.com"}, } { reqFrom, _ := http.NewRequest("GET", urls[0], nil) reqFrom.Header.Add("Content-Type", "application/json") reqFrom.Header.Add("Authorization", "super_secret") reqTo, _ := http.NewRequest("GET", urls[1], nil) addRequiredHeadersToRedirectedRequests(reqTo, []*http.Request{reqFrom}) if len(reqTo.Header) != 1 { t.Fatalf("Expected 1 headers, got %d", len(reqTo.Header)) } if reqTo.Header.Get("Content-Type") != "application/json" { t.Fatal("'Content-Type' should be 'application/json'") } if reqTo.Header.Get("Authorization") != "" { t.Fatal("'Authorization' should be empty") } } for _, urls := range [][]string{ {"https://docker.io", "https://docker.com"}, {"https://foo.docker.io:7777", "https://bar.docker.com"}, } { reqFrom, _ := http.NewRequest("GET", urls[0], nil) reqFrom.Header.Add("Content-Type", "application/json") reqFrom.Header.Add("Authorization", "super_secret") reqTo, _ := http.NewRequest("GET", urls[1], nil) addRequiredHeadersToRedirectedRequests(reqTo, []*http.Request{reqFrom}) if len(reqTo.Header) != 2 { t.Fatalf("Expected 2 headers, got %d", len(reqTo.Header)) } if reqTo.Header.Get("Content-Type") != "application/json" { t.Fatal("'Content-Type' should be 'application/json'") } if reqTo.Header.Get("Authorization") != "super_secret" { t.Fatal("'Authorization' should be 'super_secret'") } } } func TestIsSecureIndex(t *testing.T) { tests := []struct { addr string insecureRegistries []string expected bool }{ {IndexName, nil, true}, {"example.com", []string{}, true}, {"example.com", []string{"example.com"}, false}, {"localhost", []string{"localhost:5000"}, false}, {"localhost:5000", []string{"localhost:5000"}, false}, {"localhost", []string{"example.com"}, false}, {"127.0.0.1:5000", []string{"127.0.0.1:5000"}, false}, {"localhost", nil, false}, {"localhost:5000", nil, false}, {"127.0.0.1", nil, false}, {"localhost", []string{"example.com"}, false}, {"127.0.0.1", []string{"example.com"}, false}, {"example.com", nil, true}, {"example.com", []string{"example.com"}, false}, {"127.0.0.1", []string{"example.com"}, false}, {"127.0.0.1:5000", []string{"example.com"}, false}, {"example.com:5000", []string{"42.42.0.0/16"}, false}, {"example.com", []string{"42.42.0.0/16"}, false}, {"example.com:5000", []string{"42.42.42.42/8"}, false}, {"127.0.0.1:5000", []string{"127.0.0.0/8"}, false}, {"42.42.42.42:5000", []string{"42.1.1.1/8"}, false}, {"invalid.domain.com", []string{"42.42.0.0/16"}, true}, {"invalid.domain.com", []string{"invalid.domain.com"}, false}, {"invalid.domain.com:5000", []string{"invalid.domain.com"}, true}, {"invalid.domain.com:5000", []string{"invalid.domain.com:5000"}, false}, } for _, tt := range tests { config := makeServiceConfig(nil, tt.insecureRegistries) if sec := isSecureIndex(config, tt.addr); sec != tt.expected { t.Errorf("isSecureIndex failed for %q %v, expected %v got %v", tt.addr, tt.insecureRegistries, tt.expected, sec) } } } type debugTransport struct { http.RoundTripper log func(...interface{}) } func (tr debugTransport) RoundTrip(req *http.Request) (*http.Response, error) { dump, err := httputil.DumpRequestOut(req, false) if err != nil { tr.log("could not dump request") } tr.log(string(dump)) resp, err := tr.RoundTripper.RoundTrip(req) if err != nil { return nil, err } dump, err = httputil.DumpResponse(resp, false) if err != nil { tr.log("could not dump response") } tr.log(string(dump)) return resp, err }
"format register"; System.register("angular2/src/mock/animation_builder_mock", ["angular2/src/core/di", "angular2/src/animate/animation_builder", "angular2/src/animate/css_animation_builder", "angular2/src/animate/animation", "angular2/src/animate/browser_details"], true, function(require, exports, module) { var global = System.global, __define = global.define; global.define = undefined; "use strict"; var __extends = (this && this.__extends) || function(d, b) { for (var p in b) if (b.hasOwnProperty(p)) d[p] = b[p]; function __() { this.constructor = d; } d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __()); }; var __decorate = (this && this.__decorate) || function(decorators, target, key, desc) { var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d; if (typeof Reflect === "object" && typeof Reflect.decorate === "function") r = Reflect.decorate(decorators, target, key, desc); else for (var i = decorators.length - 1; i >= 0; i--) if (d = decorators[i]) r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r; return c > 3 && r && Object.defineProperty(target, key, r), r; }; var __metadata = (this && this.__metadata) || function(k, v) { if (typeof Reflect === "object" && typeof Reflect.metadata === "function") return Reflect.metadata(k, v); }; var di_1 = require("angular2/src/core/di"); var animation_builder_1 = require("angular2/src/animate/animation_builder"); var css_animation_builder_1 = require("angular2/src/animate/css_animation_builder"); var animation_1 = require("angular2/src/animate/animation"); var browser_details_1 = require("angular2/src/animate/browser_details"); var MockAnimationBuilder = (function(_super) { __extends(MockAnimationBuilder, _super); function MockAnimationBuilder() { _super.call(this, null); } MockAnimationBuilder.prototype.css = function() { return new MockCssAnimationBuilder(); }; MockAnimationBuilder = __decorate([di_1.Injectable(), __metadata('design:paramtypes', [])], MockAnimationBuilder); return MockAnimationBuilder; }(animation_builder_1.AnimationBuilder)); exports.MockAnimationBuilder = MockAnimationBuilder; var MockCssAnimationBuilder = (function(_super) { __extends(MockCssAnimationBuilder, _super); function MockCssAnimationBuilder() { _super.call(this, null); } MockCssAnimationBuilder.prototype.start = function(element) { return new MockAnimation(element, this.data); }; return MockCssAnimationBuilder; }(css_animation_builder_1.CssAnimationBuilder)); var MockBrowserAbstraction = (function(_super) { __extends(MockBrowserAbstraction, _super); function MockBrowserAbstraction() { _super.apply(this, arguments); } MockBrowserAbstraction.prototype.doesElapsedTimeIncludesDelay = function() { this.elapsedTimeIncludesDelay = false; }; return MockBrowserAbstraction; }(browser_details_1.BrowserDetails)); var MockAnimation = (function(_super) { __extends(MockAnimation, _super); function MockAnimation(element, data) { _super.call(this, element, data, new MockBrowserAbstraction()); } MockAnimation.prototype.wait = function(callback) { this._callback = callback; }; MockAnimation.prototype.flush = function() { this._callback(0); this._callback = null; }; return MockAnimation; }(animation_1.Animation)); global.define = __define; return module.exports; }); System.register("angular2/src/mock/directive_resolver_mock", ["angular2/src/core/di", "angular2/src/facade/collection", "angular2/src/facade/lang", "angular2/src/core/metadata", "angular2/src/core/linker/directive_resolver"], true, function(require, exports, module) { var global = System.global, __define = global.define; global.define = undefined; "use strict"; var __extends = (this && this.__extends) || function(d, b) { for (var p in b) if (b.hasOwnProperty(p)) d[p] = b[p]; function __() { this.constructor = d; } d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __()); }; var __decorate = (this && this.__decorate) || function(decorators, target, key, desc) { var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d; if (typeof Reflect === "object" && typeof Reflect.decorate === "function") r = Reflect.decorate(decorators, target, key, desc); else for (var i = decorators.length - 1; i >= 0; i--) if (d = decorators[i]) r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r; return c > 3 && r && Object.defineProperty(target, key, r), r; }; var __metadata = (this && this.__metadata) || function(k, v) { if (typeof Reflect === "object" && typeof Reflect.metadata === "function") return Reflect.metadata(k, v); }; var di_1 = require("angular2/src/core/di"); var collection_1 = require("angular2/src/facade/collection"); var lang_1 = require("angular2/src/facade/lang"); var metadata_1 = require("angular2/src/core/metadata"); var directive_resolver_1 = require("angular2/src/core/linker/directive_resolver"); var MockDirectiveResolver = (function(_super) { __extends(MockDirectiveResolver, _super); function MockDirectiveResolver() { _super.apply(this, arguments); this._providerOverrides = new collection_1.Map(); this.viewProviderOverrides = new collection_1.Map(); } MockDirectiveResolver.prototype.resolve = function(type) { var dm = _super.prototype.resolve.call(this, type); var providerOverrides = this._providerOverrides.get(type); var viewProviderOverrides = this.viewProviderOverrides.get(type); var providers = dm.providers; if (lang_1.isPresent(providerOverrides)) { providers = dm.providers.concat(providerOverrides); } if (dm instanceof metadata_1.ComponentMetadata) { var viewProviders = dm.viewProviders; if (lang_1.isPresent(viewProviderOverrides)) { viewProviders = dm.viewProviders.concat(viewProviderOverrides); } return new metadata_1.ComponentMetadata({ selector: dm.selector, inputs: dm.inputs, outputs: dm.outputs, host: dm.host, exportAs: dm.exportAs, moduleId: dm.moduleId, queries: dm.queries, changeDetection: dm.changeDetection, providers: providers, viewProviders: viewProviders }); } return new metadata_1.DirectiveMetadata({ selector: dm.selector, inputs: dm.inputs, outputs: dm.outputs, host: dm.host, providers: providers, exportAs: dm.exportAs, queries: dm.queries }); }; MockDirectiveResolver.prototype.setBindingsOverride = function(type, bindings) { this._providerOverrides.set(type, bindings); }; MockDirectiveResolver.prototype.setViewBindingsOverride = function(type, viewBindings) { this.viewProviderOverrides.set(type, viewBindings); }; MockDirectiveResolver.prototype.setProvidersOverride = function(type, providers) { this._providerOverrides.set(type, providers); }; MockDirectiveResolver.prototype.setViewProvidersOverride = function(type, viewProviders) { this.viewProviderOverrides.set(type, viewProviders); }; MockDirectiveResolver = __decorate([di_1.Injectable(), __metadata('design:paramtypes', [])], MockDirectiveResolver); return MockDirectiveResolver; }(directive_resolver_1.DirectiveResolver)); exports.MockDirectiveResolver = MockDirectiveResolver; global.define = __define; return module.exports; }); System.register("angular2/src/mock/view_resolver_mock", ["angular2/src/core/di", "angular2/src/facade/collection", "angular2/src/facade/lang", "angular2/src/facade/exceptions", "angular2/src/core/metadata", "angular2/src/core/linker/view_resolver"], true, function(require, exports, module) { var global = System.global, __define = global.define; global.define = undefined; "use strict"; var __extends = (this && this.__extends) || function(d, b) { for (var p in b) if (b.hasOwnProperty(p)) d[p] = b[p]; function __() { this.constructor = d; } d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __()); }; var __decorate = (this && this.__decorate) || function(decorators, target, key, desc) { var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d; if (typeof Reflect === "object" && typeof Reflect.decorate === "function") r = Reflect.decorate(decorators, target, key, desc); else for (var i = decorators.length - 1; i >= 0; i--) if (d = decorators[i]) r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r; return c > 3 && r && Object.defineProperty(target, key, r), r; }; var __metadata = (this && this.__metadata) || function(k, v) { if (typeof Reflect === "object" && typeof Reflect.metadata === "function") return Reflect.metadata(k, v); }; var di_1 = require("angular2/src/core/di"); var collection_1 = require("angular2/src/facade/collection"); var lang_1 = require("angular2/src/facade/lang"); var exceptions_1 = require("angular2/src/facade/exceptions"); var metadata_1 = require("angular2/src/core/metadata"); var view_resolver_1 = require("angular2/src/core/linker/view_resolver"); var MockViewResolver = (function(_super) { __extends(MockViewResolver, _super); function MockViewResolver() { _super.call(this); this._views = new collection_1.Map(); this._inlineTemplates = new collection_1.Map(); this._viewCache = new collection_1.Map(); this._directiveOverrides = new collection_1.Map(); } MockViewResolver.prototype.setView = function(component, view) { this._checkOverrideable(component); this._views.set(component, view); }; MockViewResolver.prototype.setInlineTemplate = function(component, template) { this._checkOverrideable(component); this._inlineTemplates.set(component, template); }; MockViewResolver.prototype.overrideViewDirective = function(component, from, to) { this._checkOverrideable(component); var overrides = this._directiveOverrides.get(component); if (lang_1.isBlank(overrides)) { overrides = new collection_1.Map(); this._directiveOverrides.set(component, overrides); } overrides.set(from, to); }; MockViewResolver.prototype.resolve = function(component) { var view = this._viewCache.get(component); if (lang_1.isPresent(view)) return view; view = this._views.get(component); if (lang_1.isBlank(view)) { view = _super.prototype.resolve.call(this, component); } var directives = view.directives; var overrides = this._directiveOverrides.get(component); if (lang_1.isPresent(overrides) && lang_1.isPresent(directives)) { directives = collection_1.ListWrapper.clone(view.directives); overrides.forEach(function(to, from) { var srcIndex = directives.indexOf(from); if (srcIndex == -1) { throw new exceptions_1.BaseException("Overriden directive " + lang_1.stringify(from) + " not found in the template of " + lang_1.stringify(component)); } directives[srcIndex] = to; }); view = new metadata_1.ViewMetadata({ template: view.template, templateUrl: view.templateUrl, directives: directives }); } var inlineTemplate = this._inlineTemplates.get(component); if (lang_1.isPresent(inlineTemplate)) { view = new metadata_1.ViewMetadata({ template: inlineTemplate, templateUrl: null, directives: view.directives }); } this._viewCache.set(component, view); return view; }; MockViewResolver.prototype._checkOverrideable = function(component) { var cached = this._viewCache.get(component); if (lang_1.isPresent(cached)) { throw new exceptions_1.BaseException("The component " + lang_1.stringify(component) + " has already been compiled, its configuration can not be changed"); } }; MockViewResolver = __decorate([di_1.Injectable(), __metadata('design:paramtypes', [])], MockViewResolver); return MockViewResolver; }(view_resolver_1.ViewResolver)); exports.MockViewResolver = MockViewResolver; global.define = __define; return module.exports; }); System.register("angular2/src/router/location/location_strategy", ["angular2/src/facade/lang", "angular2/core"], true, function(require, exports, module) { var global = System.global, __define = global.define; global.define = undefined; "use strict"; var lang_1 = require("angular2/src/facade/lang"); var core_1 = require("angular2/core"); var LocationStrategy = (function() { function LocationStrategy() {} return LocationStrategy; }()); exports.LocationStrategy = LocationStrategy; exports.APP_BASE_HREF = lang_1.CONST_EXPR(new core_1.OpaqueToken('appBaseHref')); function normalizeQueryParams(params) { return (params.length > 0 && params.substring(0, 1) != '?') ? ('?' + params) : params; } exports.normalizeQueryParams = normalizeQueryParams; function joinWithSlash(start, end) { if (start.length == 0) { return end; } if (end.length == 0) { return start; } var slashes = 0; if (start.endsWith('/')) { slashes++; } if (end.startsWith('/')) { slashes++; } if (slashes == 2) { return start + end.substring(1); } if (slashes == 1) { return start + end; } return start + '/' + end; } exports.joinWithSlash = joinWithSlash; global.define = __define; return module.exports; }); System.register("angular2/src/mock/ng_zone_mock", ["angular2/src/core/di", "angular2/src/core/zone/ng_zone", "angular2/src/facade/async"], true, function(require, exports, module) { var global = System.global, __define = global.define; global.define = undefined; "use strict"; var __extends = (this && this.__extends) || function(d, b) { for (var p in b) if (b.hasOwnProperty(p)) d[p] = b[p]; function __() { this.constructor = d; } d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __()); }; var __decorate = (this && this.__decorate) || function(decorators, target, key, desc) { var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d; if (typeof Reflect === "object" && typeof Reflect.decorate === "function") r = Reflect.decorate(decorators, target, key, desc); else for (var i = decorators.length - 1; i >= 0; i--) if (d = decorators[i]) r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r; return c > 3 && r && Object.defineProperty(target, key, r), r; }; var __metadata = (this && this.__metadata) || function(k, v) { if (typeof Reflect === "object" && typeof Reflect.metadata === "function") return Reflect.metadata(k, v); }; var di_1 = require("angular2/src/core/di"); var ng_zone_1 = require("angular2/src/core/zone/ng_zone"); var async_1 = require("angular2/src/facade/async"); var MockNgZone = (function(_super) { __extends(MockNgZone, _super); function MockNgZone() { _super.call(this, {enableLongStackTrace: false}); this._mockOnStable = new async_1.EventEmitter(false); } Object.defineProperty(MockNgZone.prototype, "onStable", { get: function() { return this._mockOnStable; }, enumerable: true, configurable: true }); MockNgZone.prototype.run = function(fn) { return fn(); }; MockNgZone.prototype.runOutsideAngular = function(fn) { return fn(); }; MockNgZone.prototype.simulateZoneExit = function() { async_1.ObservableWrapper.callNext(this.onStable, null); }; MockNgZone = __decorate([di_1.Injectable(), __metadata('design:paramtypes', [])], MockNgZone); return MockNgZone; }(ng_zone_1.NgZone)); exports.MockNgZone = MockNgZone; global.define = __define; return module.exports; }); System.register("angular2/src/testing/utils", ["angular2/core", "angular2/src/facade/collection", "angular2/src/platform/dom/dom_adapter", "angular2/src/facade/lang"], true, function(require, exports, module) { var global = System.global, __define = global.define; global.define = undefined; "use strict"; var __decorate = (this && this.__decorate) || function(decorators, target, key, desc) { var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d; if (typeof Reflect === "object" && typeof Reflect.decorate === "function") r = Reflect.decorate(decorators, target, key, desc); else for (var i = decorators.length - 1; i >= 0; i--) if (d = decorators[i]) r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r; return c > 3 && r && Object.defineProperty(target, key, r), r; }; var __metadata = (this && this.__metadata) || function(k, v) { if (typeof Reflect === "object" && typeof Reflect.metadata === "function") return Reflect.metadata(k, v); }; var core_1 = require("angular2/core"); var collection_1 = require("angular2/src/facade/collection"); var dom_adapter_1 = require("angular2/src/platform/dom/dom_adapter"); var lang_1 = require("angular2/src/facade/lang"); var Log = (function() { function Log() { this.logItems = []; } Log.prototype.add = function(value) { this.logItems.push(value); }; Log.prototype.fn = function(value) { var _this = this; return function(a1, a2, a3, a4, a5) { if (a1 === void 0) { a1 = null; } if (a2 === void 0) { a2 = null; } if (a3 === void 0) { a3 = null; } if (a4 === void 0) { a4 = null; } if (a5 === void 0) { a5 = null; } _this.logItems.push(value); }; }; Log.prototype.clear = function() { this.logItems = []; }; Log.prototype.result = function() { return this.logItems.join("; "); }; Log = __decorate([core_1.Injectable(), __metadata('design:paramtypes', [])], Log); return Log; }()); exports.Log = Log; exports.browserDetection = null; var BrowserDetection = (function() { function BrowserDetection(ua) { if (lang_1.isPresent(ua)) { this._ua = ua; } else { this._ua = lang_1.isPresent(dom_adapter_1.DOM) ? dom_adapter_1.DOM.getUserAgent() : ''; } } BrowserDetection.setup = function() { exports.browserDetection = new BrowserDetection(null); }; Object.defineProperty(BrowserDetection.prototype, "isFirefox", { get: function() { return this._ua.indexOf('Firefox') > -1; }, enumerable: true, configurable: true }); Object.defineProperty(BrowserDetection.prototype, "isAndroid", { get: function() { return this._ua.indexOf('Mozilla/5.0') > -1 && this._ua.indexOf('Android') > -1 && this._ua.indexOf('AppleWebKit') > -1 && this._ua.indexOf('Chrome') == -1; }, enumerable: true, configurable: true }); Object.defineProperty(BrowserDetection.prototype, "isEdge", { get: function() { return this._ua.indexOf('Edge') > -1; }, enumerable: true, configurable: true }); Object.defineProperty(BrowserDetection.prototype, "isIE", { get: function() { return this._ua.indexOf('Trident') > -1; }, enumerable: true, configurable: true }); Object.defineProperty(BrowserDetection.prototype, "isWebkit", { get: function() { return this._ua.indexOf('AppleWebKit') > -1 && this._ua.indexOf('Edge') == -1; }, enumerable: true, configurable: true }); Object.defineProperty(BrowserDetection.prototype, "isIOS7", { get: function() { return this._ua.indexOf('iPhone OS 7') > -1 || this._ua.indexOf('iPad OS 7') > -1; }, enumerable: true, configurable: true }); Object.defineProperty(BrowserDetection.prototype, "isSlow", { get: function() { return this.isAndroid || this.isIE || this.isIOS7; }, enumerable: true, configurable: true }); Object.defineProperty(BrowserDetection.prototype, "supportsIntlApi", { get: function() { return this._ua.indexOf('Chrome/4') > -1 && this._ua.indexOf('Edge') == -1; }, enumerable: true, configurable: true }); return BrowserDetection; }()); exports.BrowserDetection = BrowserDetection; function dispatchEvent(element, eventType) { dom_adapter_1.DOM.dispatchEvent(element, dom_adapter_1.DOM.createEvent(eventType)); } exports.dispatchEvent = dispatchEvent; function el(html) { return dom_adapter_1.DOM.firstChild(dom_adapter_1.DOM.content(dom_adapter_1.DOM.createTemplate(html))); } exports.el = el; var _RE_SPECIAL_CHARS = ['-', '[', ']', '/', '{', '}', '\\', '(', ')', '*', '+', '?', '.', '^', '$', '|']; var _ESCAPE_RE = lang_1.RegExpWrapper.create("[\\" + _RE_SPECIAL_CHARS.join('\\') + "]"); function containsRegexp(input) { return lang_1.RegExpWrapper.create(lang_1.StringWrapper.replaceAllMapped(input, _ESCAPE_RE, function(match) { return ("\\" + match[0]); })); } exports.containsRegexp = containsRegexp; function normalizeCSS(css) { css = lang_1.StringWrapper.replaceAll(css, /\s+/g, ' '); css = lang_1.StringWrapper.replaceAll(css, /:\s/g, ':'); css = lang_1.StringWrapper.replaceAll(css, /'/g, '"'); css = lang_1.StringWrapper.replaceAll(css, / }/g, '}'); css = lang_1.StringWrapper.replaceAllMapped(css, /url\((\"|\s)(.+)(\"|\s)\)(\s*)/g, function(match) { return ("url(\"" + match[2] + "\")"); }); css = lang_1.StringWrapper.replaceAllMapped(css, /\[(.+)=([^"\]]+)\]/g, function(match) { return ("[" + match[1] + "=\"" + match[2] + "\"]"); }); return css; } exports.normalizeCSS = normalizeCSS; var _singleTagWhitelist = ['br', 'hr', 'input']; function stringifyElement(el) { var result = ''; if (dom_adapter_1.DOM.isElementNode(el)) { var tagName = dom_adapter_1.DOM.tagName(el).toLowerCase(); result += "<" + tagName; var attributeMap = dom_adapter_1.DOM.attributeMap(el); var keys = []; attributeMap.forEach(function(v, k) { return keys.push(k); }); collection_1.ListWrapper.sort(keys); for (var i = 0; i < keys.length; i++) { var key = keys[i]; var attValue = attributeMap.get(key); if (!lang_1.isString(attValue)) { result += " " + key; } else { result += " " + key + "=\"" + attValue + "\""; } } result += '>'; var childrenRoot = dom_adapter_1.DOM.templateAwareRoot(el); var children = lang_1.isPresent(childrenRoot) ? dom_adapter_1.DOM.childNodes(childrenRoot) : []; for (var j = 0; j < children.length; j++) { result += stringifyElement(children[j]); } if (!collection_1.ListWrapper.contains(_singleTagWhitelist, tagName)) { result += "</" + tagName + ">"; } } else if (dom_adapter_1.DOM.isCommentNode(el)) { result += "<!--" + dom_adapter_1.DOM.nodeValue(el) + "-->"; } else { result += dom_adapter_1.DOM.getText(el); } return result; } exports.stringifyElement = stringifyElement; global.define = __define; return module.exports; }); System.register("angular2/src/mock/mock_location_strategy", ["angular2/src/core/di", "angular2/src/facade/async", "angular2/src/router/location/location_strategy"], true, function(require, exports, module) { var global = System.global, __define = global.define; global.define = undefined; "use strict"; var __extends = (this && this.__extends) || function(d, b) { for (var p in b) if (b.hasOwnProperty(p)) d[p] = b[p]; function __() { this.constructor = d; } d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __()); }; var __decorate = (this && this.__decorate) || function(decorators, target, key, desc) { var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d; if (typeof Reflect === "object" && typeof Reflect.decorate === "function") r = Reflect.decorate(decorators, target, key, desc); else for (var i = decorators.length - 1; i >= 0; i--) if (d = decorators[i]) r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r; return c > 3 && r && Object.defineProperty(target, key, r), r; }; var __metadata = (this && this.__metadata) || function(k, v) { if (typeof Reflect === "object" && typeof Reflect.metadata === "function") return Reflect.metadata(k, v); }; var di_1 = require("angular2/src/core/di"); var async_1 = require("angular2/src/facade/async"); var location_strategy_1 = require("angular2/src/router/location/location_strategy"); var MockLocationStrategy = (function(_super) { __extends(MockLocationStrategy, _super); function MockLocationStrategy() { _super.call(this); this.internalBaseHref = '/'; this.internalPath = '/'; this.internalTitle = ''; this.urlChanges = []; this._subject = new async_1.EventEmitter(); } MockLocationStrategy.prototype.simulatePopState = function(url) { this.internalPath = url; async_1.ObservableWrapper.callEmit(this._subject, new _MockPopStateEvent(this.path())); }; MockLocationStrategy.prototype.path = function() { return this.internalPath; }; MockLocationStrategy.prototype.prepareExternalUrl = function(internal) { if (internal.startsWith('/') && this.internalBaseHref.endsWith('/')) { return this.internalBaseHref + internal.substring(1); } return this.internalBaseHref + internal; }; MockLocationStrategy.prototype.pushState = function(ctx, title, path, query) { this.internalTitle = title; var url = path + (query.length > 0 ? ('?' + query) : ''); this.internalPath = url; var externalUrl = this.prepareExternalUrl(url); this.urlChanges.push(externalUrl); }; MockLocationStrategy.prototype.replaceState = function(ctx, title, path, query) { this.internalTitle = title; var url = path + (query.length > 0 ? ('?' + query) : ''); this.internalPath = url; var externalUrl = this.prepareExternalUrl(url); this.urlChanges.push('replace: ' + externalUrl); }; MockLocationStrategy.prototype.onPopState = function(fn) { async_1.ObservableWrapper.subscribe(this._subject, fn); }; MockLocationStrategy.prototype.getBaseHref = function() { return this.internalBaseHref; }; MockLocationStrategy.prototype.back = function() { if (this.urlChanges.length > 0) { this.urlChanges.pop(); var nextUrl = this.urlChanges.length > 0 ? this.urlChanges[this.urlChanges.length - 1] : ''; this.simulatePopState(nextUrl); } }; MockLocationStrategy.prototype.forward = function() { throw 'not implemented'; }; MockLocationStrategy = __decorate([di_1.Injectable(), __metadata('design:paramtypes', [])], MockLocationStrategy); return MockLocationStrategy; }(location_strategy_1.LocationStrategy)); exports.MockLocationStrategy = MockLocationStrategy; var _MockPopStateEvent = (function() { function _MockPopStateEvent(newUrl) { this.newUrl = newUrl; this.pop = true; this.type = 'popstate'; } return _MockPopStateEvent; }()); global.define = __define; return module.exports; }); System.register("angular2/src/testing/test_component_builder", ["angular2/core", "angular2/src/facade/lang", "angular2/src/facade/collection", "angular2/src/testing/utils", "angular2/src/platform/dom/dom_tokens", "angular2/src/platform/dom/dom_adapter", "angular2/src/core/debug/debug_node"], true, function(require, exports, module) { var global = System.global, __define = global.define; global.define = undefined; "use strict"; var __extends = (this && this.__extends) || function(d, b) { for (var p in b) if (b.hasOwnProperty(p)) d[p] = b[p]; function __() { this.constructor = d; } d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __()); }; var __decorate = (this && this.__decorate) || function(decorators, target, key, desc) { var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d; if (typeof Reflect === "object" && typeof Reflect.decorate === "function") r = Reflect.decorate(decorators, target, key, desc); else for (var i = decorators.length - 1; i >= 0; i--) if (d = decorators[i]) r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r; return c > 3 && r && Object.defineProperty(target, key, r), r; }; var __metadata = (this && this.__metadata) || function(k, v) { if (typeof Reflect === "object" && typeof Reflect.metadata === "function") return Reflect.metadata(k, v); }; var core_1 = require("angular2/core"); var lang_1 = require("angular2/src/facade/lang"); var collection_1 = require("angular2/src/facade/collection"); var utils_1 = require("angular2/src/testing/utils"); var dom_tokens_1 = require("angular2/src/platform/dom/dom_tokens"); var dom_adapter_1 = require("angular2/src/platform/dom/dom_adapter"); var debug_node_1 = require("angular2/src/core/debug/debug_node"); var ComponentFixture = (function() { function ComponentFixture() {} return ComponentFixture; }()); exports.ComponentFixture = ComponentFixture; var ComponentFixture_ = (function(_super) { __extends(ComponentFixture_, _super); function ComponentFixture_(componentRef) { _super.call(this); this._componentParentView = componentRef.hostView.internalView; this.elementRef = this._componentParentView.appElements[0].ref; this.debugElement = debug_node_1.getDebugNode(this._componentParentView.rootNodesOrAppElements[0].nativeElement); this.componentInstance = this.debugElement.componentInstance; this.nativeElement = this.debugElement.nativeElement; this._componentRef = componentRef; } ComponentFixture_.prototype.detectChanges = function() { this._componentParentView.changeDetector.detectChanges(); this._componentParentView.changeDetector.checkNoChanges(); }; ComponentFixture_.prototype.destroy = function() { this._componentRef.dispose(); }; return ComponentFixture_; }(ComponentFixture)); exports.ComponentFixture_ = ComponentFixture_; var _nextRootElementId = 0; var TestComponentBuilder = (function() { function TestComponentBuilder(_injector) { this._injector = _injector; this._bindingsOverrides = new Map(); this._directiveOverrides = new Map(); this._templateOverrides = new Map(); this._viewBindingsOverrides = new Map(); this._viewOverrides = new Map(); } TestComponentBuilder.prototype._clone = function() { var clone = new TestComponentBuilder(this._injector); clone._viewOverrides = collection_1.MapWrapper.clone(this._viewOverrides); clone._directiveOverrides = collection_1.MapWrapper.clone(this._directiveOverrides); clone._templateOverrides = collection_1.MapWrapper.clone(this._templateOverrides); return clone; }; TestComponentBuilder.prototype.overrideTemplate = function(componentType, template) { var clone = this._clone(); clone._templateOverrides.set(componentType, template); return clone; }; TestComponentBuilder.prototype.overrideView = function(componentType, view) { var clone = this._clone(); clone._viewOverrides.set(componentType, view); return clone; }; TestComponentBuilder.prototype.overrideDirective = function(componentType, from, to) { var clone = this._clone(); var overridesForComponent = clone._directiveOverrides.get(componentType); if (!lang_1.isPresent(overridesForComponent)) { clone._directiveOverrides.set(componentType, new Map()); overridesForComponent = clone._directiveOverrides.get(componentType); } overridesForComponent.set(from, to); return clone; }; TestComponentBuilder.prototype.overrideProviders = function(type, providers) { var clone = this._clone(); clone._bindingsOverrides.set(type, providers); return clone; }; TestComponentBuilder.prototype.overrideBindings = function(type, providers) { return this.overrideProviders(type, providers); }; TestComponentBuilder.prototype.overrideViewProviders = function(type, providers) { var clone = this._clone(); clone._viewBindingsOverrides.set(type, providers); return clone; }; TestComponentBuilder.prototype.overrideViewBindings = function(type, providers) { return this.overrideViewProviders(type, providers); }; TestComponentBuilder.prototype.createAsync = function(rootComponentType) { var mockDirectiveResolver = this._injector.get(core_1.DirectiveResolver); var mockViewResolver = this._injector.get(core_1.ViewResolver); this._viewOverrides.forEach(function(view, type) { return mockViewResolver.setView(type, view); }); this._templateOverrides.forEach(function(template, type) { return mockViewResolver.setInlineTemplate(type, template); }); this._directiveOverrides.forEach(function(overrides, component) { overrides.forEach(function(to, from) { mockViewResolver.overrideViewDirective(component, from, to); }); }); this._bindingsOverrides.forEach(function(bindings, type) { return mockDirectiveResolver.setBindingsOverride(type, bindings); }); this._viewBindingsOverrides.forEach(function(bindings, type) { return mockDirectiveResolver.setViewBindingsOverride(type, bindings); }); var rootElId = "root" + _nextRootElementId++; var rootEl = utils_1.el("<div id=\"" + rootElId + "\"></div>"); var doc = this._injector.get(dom_tokens_1.DOCUMENT); var oldRoots = dom_adapter_1.DOM.querySelectorAll(doc, '[id^=root]'); for (var i = 0; i < oldRoots.length; i++) { dom_adapter_1.DOM.remove(oldRoots[i]); } dom_adapter_1.DOM.appendChild(doc.body, rootEl); var promise = this._injector.get(core_1.DynamicComponentLoader).loadAsRoot(rootComponentType, "#" + rootElId, this._injector); return promise.then(function(componentRef) { return new ComponentFixture_(componentRef); }); }; TestComponentBuilder = __decorate([core_1.Injectable(), __metadata('design:paramtypes', [core_1.Injector])], TestComponentBuilder); return TestComponentBuilder; }()); exports.TestComponentBuilder = TestComponentBuilder; global.define = __define; return module.exports; }); System.register("angular2/platform/testing/browser_static", ["angular2/core", "angular2/src/platform/browser_common", "angular2/src/platform/browser/browser_adapter", "angular2/src/animate/animation_builder", "angular2/src/mock/animation_builder_mock", "angular2/src/mock/directive_resolver_mock", "angular2/src/mock/view_resolver_mock", "angular2/src/mock/mock_location_strategy", "angular2/src/router/location/location_strategy", "angular2/src/mock/ng_zone_mock", "angular2/src/platform/browser/xhr_impl", "angular2/compiler", "angular2/src/testing/test_component_builder", "angular2/src/testing/utils", "angular2/platform/common_dom", "angular2/src/facade/lang", "angular2/src/testing/utils"], true, function(require, exports, module) { var global = System.global, __define = global.define; global.define = undefined; "use strict"; var core_1 = require("angular2/core"); var browser_common_1 = require("angular2/src/platform/browser_common"); var browser_adapter_1 = require("angular2/src/platform/browser/browser_adapter"); var animation_builder_1 = require("angular2/src/animate/animation_builder"); var animation_builder_mock_1 = require("angular2/src/mock/animation_builder_mock"); var directive_resolver_mock_1 = require("angular2/src/mock/directive_resolver_mock"); var view_resolver_mock_1 = require("angular2/src/mock/view_resolver_mock"); var mock_location_strategy_1 = require("angular2/src/mock/mock_location_strategy"); var location_strategy_1 = require("angular2/src/router/location/location_strategy"); var ng_zone_mock_1 = require("angular2/src/mock/ng_zone_mock"); var xhr_impl_1 = require("angular2/src/platform/browser/xhr_impl"); var compiler_1 = require("angular2/compiler"); var test_component_builder_1 = require("angular2/src/testing/test_component_builder"); var utils_1 = require("angular2/src/testing/utils"); var common_dom_1 = require("angular2/platform/common_dom"); var lang_1 = require("angular2/src/facade/lang"); var utils_2 = require("angular2/src/testing/utils"); function initBrowserTests() { browser_adapter_1.BrowserDomAdapter.makeCurrent(); utils_1.BrowserDetection.setup(); } exports.TEST_BROWSER_STATIC_PLATFORM_PROVIDERS = lang_1.CONST_EXPR([core_1.PLATFORM_COMMON_PROVIDERS, new core_1.Provider(core_1.PLATFORM_INITIALIZER, { useValue: initBrowserTests, multi: true })]); exports.ADDITIONAL_TEST_BROWSER_PROVIDERS = lang_1.CONST_EXPR([new core_1.Provider(core_1.APP_ID, {useValue: 'a'}), common_dom_1.ELEMENT_PROBE_PROVIDERS, new core_1.Provider(core_1.DirectiveResolver, {useClass: directive_resolver_mock_1.MockDirectiveResolver}), new core_1.Provider(core_1.ViewResolver, {useClass: view_resolver_mock_1.MockViewResolver}), utils_2.Log, test_component_builder_1.TestComponentBuilder, new core_1.Provider(core_1.NgZone, {useClass: ng_zone_mock_1.MockNgZone}), new core_1.Provider(location_strategy_1.LocationStrategy, {useClass: mock_location_strategy_1.MockLocationStrategy}), new core_1.Provider(animation_builder_1.AnimationBuilder, {useClass: animation_builder_mock_1.MockAnimationBuilder})]); exports.TEST_BROWSER_STATIC_APPLICATION_PROVIDERS = lang_1.CONST_EXPR([browser_common_1.BROWSER_APP_COMMON_PROVIDERS, new core_1.Provider(compiler_1.XHR, {useClass: xhr_impl_1.XHRImpl}), exports.ADDITIONAL_TEST_BROWSER_PROVIDERS]); global.define = __define; return module.exports; }); System.register("angular2/platform/testing/browser", ["angular2/platform/testing/browser_static", "angular2/platform/browser", "angular2/src/facade/lang", "angular2/platform/browser"], true, function(require, exports, module) { var global = System.global, __define = global.define; global.define = undefined; "use strict"; var browser_static_1 = require("angular2/platform/testing/browser_static"); var browser_1 = require("angular2/platform/browser"); var lang_1 = require("angular2/src/facade/lang"); var browser_2 = require("angular2/platform/browser"); exports.CACHED_TEMPLATE_PROVIDER = browser_2.CACHED_TEMPLATE_PROVIDER; exports.TEST_BROWSER_PLATFORM_PROVIDERS = lang_1.CONST_EXPR([browser_static_1.TEST_BROWSER_STATIC_PLATFORM_PROVIDERS]); exports.TEST_BROWSER_APPLICATION_PROVIDERS = lang_1.CONST_EXPR([browser_1.BROWSER_APP_PROVIDERS, browser_static_1.ADDITIONAL_TEST_BROWSER_PROVIDERS]); global.define = __define; return module.exports; }); System.register("angular2/src/mock/location_mock", ["angular2/src/core/di", "angular2/src/facade/async"], true, function(require, exports, module) { var global = System.global, __define = global.define; global.define = undefined; "use strict"; var __decorate = (this && this.__decorate) || function(decorators, target, key, desc) { var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d; if (typeof Reflect === "object" && typeof Reflect.decorate === "function") r = Reflect.decorate(decorators, target, key, desc); else for (var i = decorators.length - 1; i >= 0; i--) if (d = decorators[i]) r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r; return c > 3 && r && Object.defineProperty(target, key, r), r; }; var __metadata = (this && this.__metadata) || function(k, v) { if (typeof Reflect === "object" && typeof Reflect.metadata === "function") return Reflect.metadata(k, v); }; var di_1 = require("angular2/src/core/di"); var async_1 = require("angular2/src/facade/async"); var SpyLocation = (function() { function SpyLocation() { this.urlChanges = []; this._path = ''; this._query = ''; this._subject = new async_1.EventEmitter(); this._baseHref = ''; this.platformStrategy = null; } SpyLocation.prototype.setInitialPath = function(url) { this._path = url; }; SpyLocation.prototype.setBaseHref = function(url) { this._baseHref = url; }; SpyLocation.prototype.path = function() { return this._path; }; SpyLocation.prototype.simulateUrlPop = function(pathname) { async_1.ObservableWrapper.callEmit(this._subject, { 'url': pathname, 'pop': true }); }; SpyLocation.prototype.simulateHashChange = function(pathname) { this.setInitialPath(pathname); this.urlChanges.push('hash: ' + pathname); async_1.ObservableWrapper.callEmit(this._subject, { 'url': pathname, 'pop': true, 'type': 'hashchange' }); }; SpyLocation.prototype.prepareExternalUrl = function(url) { if (url.length > 0 && !url.startsWith('/')) { url = '/' + url; } return this._baseHref + url; }; SpyLocation.prototype.go = function(path, query) { if (query === void 0) { query = ''; } path = this.prepareExternalUrl(path); if (this._path == path && this._query == query) { return ; } this._path = path; this._query = query; var url = path + (query.length > 0 ? ('?' + query) : ''); this.urlChanges.push(url); }; SpyLocation.prototype.replaceState = function(path, query) { if (query === void 0) { query = ''; } path = this.prepareExternalUrl(path); this._path = path; this._query = query; var url = path + (query.length > 0 ? ('?' + query) : ''); this.urlChanges.push('replace: ' + url); }; SpyLocation.prototype.forward = function() {}; SpyLocation.prototype.back = function() {}; SpyLocation.prototype.subscribe = function(onNext, onThrow, onReturn) { if (onThrow === void 0) { onThrow = null; } if (onReturn === void 0) { onReturn = null; } return async_1.ObservableWrapper.subscribe(this._subject, onNext, onThrow, onReturn); }; SpyLocation.prototype.normalize = function(url) { return null; }; SpyLocation = __decorate([di_1.Injectable(), __metadata('design:paramtypes', [])], SpyLocation); return SpyLocation; }()); exports.SpyLocation = SpyLocation; global.define = __define; return module.exports; }); System.register("angular2/router/testing", ["angular2/src/mock/mock_location_strategy", "angular2/src/mock/location_mock"], true, function(require, exports, module) { var global = System.global, __define = global.define; global.define = undefined; "use strict"; function __export(m) { for (var p in m) if (!exports.hasOwnProperty(p)) exports[p] = m[p]; } __export(require("angular2/src/mock/mock_location_strategy")); __export(require("angular2/src/mock/location_mock")); global.define = __define; return module.exports; }); System.register("angular2/src/http/headers", ["angular2/src/facade/lang", "angular2/src/facade/exceptions", "angular2/src/facade/collection"], true, function(require, exports, module) { var global = System.global, __define = global.define; global.define = undefined; "use strict"; var lang_1 = require("angular2/src/facade/lang"); var exceptions_1 = require("angular2/src/facade/exceptions"); var collection_1 = require("angular2/src/facade/collection"); var Headers = (function() { function Headers(headers) { var _this = this; if (headers instanceof Headers) { this._headersMap = headers._headersMap; return ; } this._headersMap = new collection_1.Map(); if (lang_1.isBlank(headers)) { return ; } collection_1.StringMapWrapper.forEach(headers, function(v, k) { _this._headersMap.set(k, collection_1.isListLikeIterable(v) ? v : [v]); }); } Headers.fromResponseHeaderString = function(headersString) { return headersString.trim().split('\n').map(function(val) { return val.split(':'); }).map(function(_a) { var key = _a[0], parts = _a.slice(1); return ([key.trim(), parts.join(':').trim()]); }).reduce(function(headers, _a) { var key = _a[0], value = _a[1]; return !headers.set(key, value) && headers; }, new Headers()); }; Headers.prototype.append = function(name, value) { var mapName = this._headersMap.get(name); var list = collection_1.isListLikeIterable(mapName) ? mapName : []; list.push(value); this._headersMap.set(name, list); }; Headers.prototype.delete = function(name) { this._headersMap.delete(name); }; Headers.prototype.forEach = function(fn) { this._headersMap.forEach(fn); }; Headers.prototype.get = function(header) { return collection_1.ListWrapper.first(this._headersMap.get(header)); }; Headers.prototype.has = function(header) { return this._headersMap.has(header); }; Headers.prototype.keys = function() { return collection_1.MapWrapper.keys(this._headersMap); }; Headers.prototype.set = function(header, value) { var list = []; if (collection_1.isListLikeIterable(value)) { var pushValue = value.join(','); list.push(pushValue); } else { list.push(value); } this._headersMap.set(header, list); }; Headers.prototype.values = function() { return collection_1.MapWrapper.values(this._headersMap); }; Headers.prototype.toJSON = function() { var serializableHeaders = {}; this._headersMap.forEach(function(values, name) { var list = []; collection_1.iterateListLike(values, function(val) { return list = collection_1.ListWrapper.concat(list, val.split(',')); }); serializableHeaders[name] = list; }); return serializableHeaders; }; Headers.prototype.getAll = function(header) { var headers = this._headersMap.get(header); return collection_1.isListLikeIterable(headers) ? headers : []; }; Headers.prototype.entries = function() { throw new exceptions_1.BaseException('"entries" method is not implemented on Headers class'); }; return Headers; }()); exports.Headers = Headers; global.define = __define; return module.exports; }); System.register("angular2/src/http/enums", [], true, function(require, exports, module) { var global = System.global, __define = global.define; global.define = undefined; "use strict"; (function(RequestMethod) { RequestMethod[RequestMethod["Get"] = 0] = "Get"; RequestMethod[RequestMethod["Post"] = 1] = "Post"; RequestMethod[RequestMethod["Put"] = 2] = "Put"; RequestMethod[RequestMethod["Delete"] = 3] = "Delete"; RequestMethod[RequestMethod["Options"] = 4] = "Options"; RequestMethod[RequestMethod["Head"] = 5] = "Head"; RequestMethod[RequestMethod["Patch"] = 6] = "Patch"; })(exports.RequestMethod || (exports.RequestMethod = {})); var RequestMethod = exports.RequestMethod; (function(ReadyState) { ReadyState[ReadyState["Unsent"] = 0] = "Unsent"; ReadyState[ReadyState["Open"] = 1] = "Open"; ReadyState[ReadyState["HeadersReceived"] = 2] = "HeadersReceived"; ReadyState[ReadyState["Loading"] = 3] = "Loading"; ReadyState[ReadyState["Done"] = 4] = "Done"; ReadyState[ReadyState["Cancelled"] = 5] = "Cancelled"; })(exports.ReadyState || (exports.ReadyState = {})); var ReadyState = exports.ReadyState; (function(ResponseType) { ResponseType[ResponseType["Basic"] = 0] = "Basic"; ResponseType[ResponseType["Cors"] = 1] = "Cors"; ResponseType[ResponseType["Default"] = 2] = "Default"; ResponseType[ResponseType["Error"] = 3] = "Error"; ResponseType[ResponseType["Opaque"] = 4] = "Opaque"; })(exports.ResponseType || (exports.ResponseType = {})); var ResponseType = exports.ResponseType; global.define = __define; return module.exports; }); System.register("angular2/src/http/http_utils", ["angular2/src/facade/lang", "angular2/src/http/enums", "angular2/src/facade/exceptions", "angular2/src/facade/lang"], true, function(require, exports, module) { var global = System.global, __define = global.define; global.define = undefined; "use strict"; var lang_1 = require("angular2/src/facade/lang"); var enums_1 = require("angular2/src/http/enums"); var exceptions_1 = require("angular2/src/facade/exceptions"); function normalizeMethodName(method) { if (lang_1.isString(method)) { var originalMethod = method; method = method.replace(/(\w)(\w*)/g, function(g0, g1, g2) { return g1.toUpperCase() + g2.toLowerCase(); }); method = enums_1.RequestMethod[method]; if (typeof method !== 'number') throw exceptions_1.makeTypeError("Invalid request method. The method \"" + originalMethod + "\" is not supported."); } return method; } exports.normalizeMethodName = normalizeMethodName; exports.isSuccess = function(status) { return (status >= 200 && status < 300); }; function getResponseURL(xhr) { if ('responseURL' in xhr) { return xhr.responseURL; } if (/^X-Request-URL:/m.test(xhr.getAllResponseHeaders())) { return xhr.getResponseHeader('X-Request-URL'); } return ; } exports.getResponseURL = getResponseURL; var lang_2 = require("angular2/src/facade/lang"); exports.isJsObject = lang_2.isJsObject; global.define = __define; return module.exports; }); System.register("angular2/src/http/static_request", ["angular2/src/http/headers", "angular2/src/http/http_utils", "angular2/src/facade/lang"], true, function(require, exports, module) { var global = System.global, __define = global.define; global.define = undefined; "use strict"; var headers_1 = require("angular2/src/http/headers"); var http_utils_1 = require("angular2/src/http/http_utils"); var lang_1 = require("angular2/src/facade/lang"); var Request = (function() { function Request(requestOptions) { var url = requestOptions.url; this.url = requestOptions.url; if (lang_1.isPresent(requestOptions.search)) { var search = requestOptions.search.toString(); if (search.length > 0) { var prefix = '?'; if (lang_1.StringWrapper.contains(this.url, '?')) { prefix = (this.url[this.url.length - 1] == '&') ? '' : '&'; } this.url = url + prefix + search; } } this._body = requestOptions.body; this.method = http_utils_1.normalizeMethodName(requestOptions.method); this.headers = new headers_1.Headers(requestOptions.headers); } Request.prototype.text = function() { return lang_1.isPresent(this._body) ? this._body.toString() : ''; }; return Request; }()); exports.Request = Request; global.define = __define; return module.exports; }); System.register("angular2/src/http/backends/mock_backend", ["angular2/core", "angular2/src/http/static_request", "angular2/src/http/enums", "angular2/src/facade/lang", "angular2/src/facade/exceptions", "rxjs/Subject", "rxjs/subject/ReplaySubject", "rxjs/operator/take"], true, function(require, exports, module) { var global = System.global, __define = global.define; global.define = undefined; "use strict"; var __decorate = (this && this.__decorate) || function(decorators, target, key, desc) { var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d; if (typeof Reflect === "object" && typeof Reflect.decorate === "function") r = Reflect.decorate(decorators, target, key, desc); else for (var i = decorators.length - 1; i >= 0; i--) if (d = decorators[i]) r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r; return c > 3 && r && Object.defineProperty(target, key, r), r; }; var __metadata = (this && this.__metadata) || function(k, v) { if (typeof Reflect === "object" && typeof Reflect.metadata === "function") return Reflect.metadata(k, v); }; var core_1 = require("angular2/core"); var static_request_1 = require("angular2/src/http/static_request"); var enums_1 = require("angular2/src/http/enums"); var lang_1 = require("angular2/src/facade/lang"); var exceptions_1 = require("angular2/src/facade/exceptions"); var Subject_1 = require("rxjs/Subject"); var ReplaySubject_1 = require("rxjs/subject/ReplaySubject"); var take_1 = require("rxjs/operator/take"); var MockConnection = (function() { function MockConnection(req) { this.response = take_1.take.call(new ReplaySubject_1.ReplaySubject(1), 1); this.readyState = enums_1.ReadyState.Open; this.request = req; } MockConnection.prototype.mockRespond = function(res) { if (this.readyState === enums_1.ReadyState.Done || this.readyState === enums_1.ReadyState.Cancelled) { throw new exceptions_1.BaseException('Connection has already been resolved'); } this.readyState = enums_1.ReadyState.Done; this.response.next(res); this.response.complete(); }; MockConnection.prototype.mockDownload = function(res) {}; MockConnection.prototype.mockError = function(err) { this.readyState = enums_1.ReadyState.Done; this.response.error(err); }; return MockConnection; }()); exports.MockConnection = MockConnection; var MockBackend = (function() { function MockBackend() { var _this = this; this.connectionsArray = []; this.connections = new Subject_1.Subject(); this.connections.subscribe(function(connection) { return _this.connectionsArray.push(connection); }); this.pendingConnections = new Subject_1.Subject(); } MockBackend.prototype.verifyNoPendingRequests = function() { var pending = 0; this.pendingConnections.subscribe(function(c) { return pending++; }); if (pending > 0) throw new exceptions_1.BaseException(pending + " pending connections to be resolved"); }; MockBackend.prototype.resolveAllConnections = function() { this.connections.subscribe(function(c) { return c.readyState = 4; }); }; MockBackend.prototype.createConnection = function(req) { if (!lang_1.isPresent(req) || !(req instanceof static_request_1.Request)) { throw new exceptions_1.BaseException("createConnection requires an instance of Request, got " + req); } var connection = new MockConnection(req); this.connections.next(connection); return connection; }; MockBackend = __decorate([core_1.Injectable(), __metadata('design:paramtypes', [])], MockBackend); return MockBackend; }()); exports.MockBackend = MockBackend; global.define = __define; return module.exports; }); System.register("angular2/http/testing", ["angular2/src/http/backends/mock_backend"], true, function(require, exports, module) { var global = System.global, __define = global.define; global.define = undefined; "use strict"; function __export(m) { for (var p in m) if (!exports.hasOwnProperty(p)) exports[p] = m[p]; } __export(require("angular2/src/http/backends/mock_backend")); global.define = __define; return module.exports; }); System.register("angular2/src/testing/fake_async", ["angular2/src/facade/lang", "angular2/src/facade/exceptions", "angular2/src/facade/collection"], true, function(require, exports, module) { var global = System.global, __define = global.define; global.define = undefined; "use strict"; var lang_1 = require("angular2/src/facade/lang"); var exceptions_1 = require("angular2/src/facade/exceptions"); var collection_1 = require("angular2/src/facade/collection"); var _scheduler; var _microtasks = []; var _pendingPeriodicTimers = []; var _pendingTimers = []; var FakeAsyncZoneSpec = (function() { function FakeAsyncZoneSpec() { this.name = 'fakeAsync'; this.properties = {'inFakeAsyncZone': true}; } FakeAsyncZoneSpec.assertInZone = function() { if (!Zone.current.get('inFakeAsyncZone')) { throw new Error('The code should be running in the fakeAsync zone to call this function'); } }; FakeAsyncZoneSpec.prototype.onScheduleTask = function(delegate, current, target, task) { switch (task.type) { case 'microTask': _microtasks.push(task.invoke); break; case 'macroTask': switch (task.source) { case 'setTimeout': task.data['handleId'] = _setTimeout(task.invoke, task.data['delay'], task.data['args']); break; case 'setInterval': task.data['handleId'] = _setInterval(task.invoke, task.data['delay'], task.data['args']); break; default: task = delegate.scheduleTask(target, task); } break; case 'eventTask': task = delegate.scheduleTask(target, task); break; } return task; }; FakeAsyncZoneSpec.prototype.onCancelTask = function(delegate, current, target, task) { switch (task.source) { case 'setTimeout': return _clearTimeout(task.data['handleId']); case 'setInterval': return _clearInterval(task.data['handleId']); default: return delegate.scheduleTask(target, task); } }; return FakeAsyncZoneSpec; }()); function fakeAsync(fn) { if (Zone.current.get('inFakeAsyncZone')) { throw new Error('fakeAsync() calls can not be nested'); } var fakeAsyncZone = Zone.current.fork(new FakeAsyncZoneSpec()); return function() { var args = []; for (var _i = 0; _i < arguments.length; _i++) { args[_i - 0] = arguments[_i]; } _scheduler = new jasmine.DelayedFunctionScheduler(); clearPendingTimers(); var res = fakeAsyncZone.run(function() { var res = fn.apply(void 0, args); flushMicrotasks(); return res; }); if (_pendingPeriodicTimers.length > 0) { throw new exceptions_1.BaseException(_pendingPeriodicTimers.length + " periodic timer(s) still in the queue."); } if (_pendingTimers.length > 0) { throw new exceptions_1.BaseException(_pendingTimers.length + " timer(s) still in the queue."); } _scheduler = null; collection_1.ListWrapper.clear(_microtasks); return res; }; } exports.fakeAsync = fakeAsync; function clearPendingTimers() { collection_1.ListWrapper.clear(_microtasks); collection_1.ListWrapper.clear(_pendingPeriodicTimers); collection_1.ListWrapper.clear(_pendingTimers); } exports.clearPendingTimers = clearPendingTimers; function tick(millis) { if (millis === void 0) { millis = 0; } FakeAsyncZoneSpec.assertInZone(); flushMicrotasks(); _scheduler.tick(millis); } exports.tick = tick; function flushMicrotasks() { FakeAsyncZoneSpec.assertInZone(); while (_microtasks.length > 0) { var microtask = collection_1.ListWrapper.removeAt(_microtasks, 0); microtask(); } } exports.flushMicrotasks = flushMicrotasks; function _setTimeout(fn, delay, args) { var cb = _fnAndFlush(fn); var id = _scheduler.scheduleFunction(cb, delay, args); _pendingTimers.push(id); _scheduler.scheduleFunction(_dequeueTimer(id), delay); return id; } function _clearTimeout(id) { _dequeueTimer(id); return _scheduler.removeFunctionWithId(id); } function _setInterval(fn, interval) { var args = []; for (var _i = 2; _i < arguments.length; _i++) { args[_i - 2] = arguments[_i]; } var cb = _fnAndFlush(fn); var id = _scheduler.scheduleFunction(cb, interval, args, true); _pendingPeriodicTimers.push(id); return id; } function _clearInterval(id) { collection_1.ListWrapper.remove(_pendingPeriodicTimers, id); return _scheduler.removeFunctionWithId(id); } function _fnAndFlush(fn) { return function() { var args = []; for (var _i = 0; _i < arguments.length; _i++) { args[_i - 0] = arguments[_i]; } fn.apply(lang_1.global, args); flushMicrotasks(); }; } function _dequeueTimer(id) { return function() { collection_1.ListWrapper.remove(_pendingTimers, id); }; } global.define = __define; return module.exports; }); System.register("angular2/src/mock/mock_application_ref", ["angular2/src/core/application_ref", "angular2/src/core/di"], true, function(require, exports, module) { var global = System.global, __define = global.define; global.define = undefined; "use strict"; var __extends = (this && this.__extends) || function(d, b) { for (var p in b) if (b.hasOwnProperty(p)) d[p] = b[p]; function __() { this.constructor = d; } d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __()); }; var __decorate = (this && this.__decorate) || function(decorators, target, key, desc) { var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d; if (typeof Reflect === "object" && typeof Reflect.decorate === "function") r = Reflect.decorate(decorators, target, key, desc); else for (var i = decorators.length - 1; i >= 0; i--) if (d = decorators[i]) r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r; return c > 3 && r && Object.defineProperty(target, key, r), r; }; var __metadata = (this && this.__metadata) || function(k, v) { if (typeof Reflect === "object" && typeof Reflect.metadata === "function") return Reflect.metadata(k, v); }; var application_ref_1 = require("angular2/src/core/application_ref"); var di_1 = require("angular2/src/core/di"); var MockApplicationRef = (function(_super) { __extends(MockApplicationRef, _super); function MockApplicationRef() { _super.apply(this, arguments); } MockApplicationRef.prototype.registerBootstrapListener = function(listener) {}; MockApplicationRef.prototype.registerDisposeListener = function(dispose) {}; MockApplicationRef.prototype.bootstrap = function(componentType, bindings) { return null; }; Object.defineProperty(MockApplicationRef.prototype, "injector", { get: function() { return null; }, enumerable: true, configurable: true }); ; Object.defineProperty(MockApplicationRef.prototype, "zone", { get: function() { return null; }, enumerable: true, configurable: true }); ; MockApplicationRef.prototype.dispose = function() {}; MockApplicationRef.prototype.tick = function() {}; Object.defineProperty(MockApplicationRef.prototype, "componentTypes", { get: function() { return null; }, enumerable: true, configurable: true }); ; MockApplicationRef = __decorate([di_1.Injectable(), __metadata('design:paramtypes', [])], MockApplicationRef); return MockApplicationRef; }(application_ref_1.ApplicationRef)); exports.MockApplicationRef = MockApplicationRef; global.define = __define; return module.exports; }); System.register("angular2/src/testing/matchers", ["angular2/src/platform/dom/dom_adapter", "angular2/src/facade/lang", "angular2/src/facade/collection"], true, function(require, exports, module) { var global = System.global, __define = global.define; global.define = undefined; "use strict"; var dom_adapter_1 = require("angular2/src/platform/dom/dom_adapter"); var lang_1 = require("angular2/src/facade/lang"); var collection_1 = require("angular2/src/facade/collection"); var _global = (typeof window === 'undefined' ? lang_1.global : window); exports.expect = _global.expect; Map.prototype['jasmineToString'] = function() { var m = this; if (!m) { return '' + m; } var res = []; m.forEach(function(v, k) { res.push(k + ":" + v); }); return "{ " + res.join(',') + " }"; }; _global.beforeEach(function() { jasmine.addMatchers({ toEqual: function(util, customEqualityTesters) { return {compare: function(actual, expected) { return {pass: util.equals(actual, expected, [compareMap])}; }}; function compareMap(actual, expected) { if (actual instanceof Map) { var pass = actual.size === expected.size; if (pass) { actual.forEach(function(v, k) { pass = pass && util.equals(v, expected.get(k)); }); } return pass; } else { return undefined; } } }, toBePromise: function() { return {compare: function(actual, expectedClass) { var pass = typeof actual === 'object' && typeof actual.then === 'function'; return { pass: pass, get message() { return 'Expected ' + actual + ' to be a promise'; } }; }}; }, toBeAnInstanceOf: function() { return {compare: function(actual, expectedClass) { var pass = typeof actual === 'object' && actual instanceof expectedClass; return { pass: pass, get message() { return 'Expected ' + actual + ' to be an instance of ' + expectedClass; } }; }}; }, toHaveText: function() { return {compare: function(actual, expectedText) { var actualText = elementText(actual); return { pass: actualText == expectedText, get message() { return 'Expected ' + actualText + ' to be equal to ' + expectedText; } }; }}; }, toHaveCssClass: function() { return { compare: buildError(false), negativeCompare: buildError(true) }; function buildError(isNot) { return function(actual, className) { return { pass: dom_adapter_1.DOM.hasClass(actual, className) == !isNot, get message() { return "Expected " + actual.outerHTML + " " + (isNot ? 'not ' : '') + "to contain the CSS class \"" + className + "\""; } }; }; } }, toHaveCssStyle: function() { return {compare: function(actual, styles) { var allPassed; if (lang_1.isString(styles)) { allPassed = dom_adapter_1.DOM.hasStyle(actual, styles); } else { allPassed = !collection_1.StringMapWrapper.isEmpty(styles); collection_1.StringMapWrapper.forEach(styles, function(style, prop) { allPassed = allPassed && dom_adapter_1.DOM.hasStyle(actual, prop, style); }); } return { pass: allPassed, get message() { var expectedValueStr = lang_1.isString(styles) ? styles : JSON.stringify(styles); return "Expected " + actual.outerHTML + " " + (!allPassed ? ' ' : 'not ') + "to contain the\n CSS " + (lang_1.isString(styles) ? 'property' : 'styles') + " \"" + expectedValueStr + "\""; } }; }}; }, toContainError: function() { return {compare: function(actual, expectedText) { var errorMessage = actual.toString(); return { pass: errorMessage.indexOf(expectedText) > -1, get message() { return 'Expected ' + errorMessage + ' to contain ' + expectedText; } }; }}; }, toThrowErrorWith: function() { return {compare: function(actual, expectedText) { try { actual(); return { pass: false, get message() { return "Was expected to throw, but did not throw"; } }; } catch (e) { var errorMessage = e.toString(); return { pass: errorMessage.indexOf(expectedText) > -1, get message() { return 'Expected ' + errorMessage + ' to contain ' + expectedText; } }; } }}; }, toMatchPattern: function() { return { compare: buildError(false), negativeCompare: buildError(true) }; function buildError(isNot) { return function(actual, regex) { return { pass: regex.test(actual) == !isNot, get message() { return "Expected " + actual + " " + (isNot ? 'not ' : '') + "to match " + regex.toString(); } }; }; } }, toImplement: function() { return {compare: function(actualObject, expectedInterface) { var objProps = Object.keys(actualObject.constructor.prototype); var intProps = Object.keys(expectedInterface.prototype); var missedMethods = []; intProps.forEach(function(k) { if (!actualObject.constructor.prototype[k]) missedMethods.push(k); }); return { pass: missedMethods.length == 0, get message() { return 'Expected ' + actualObject + ' to have the following methods: ' + missedMethods.join(", "); } }; }}; } }); }); function elementText(n) { var hasNodes = function(n) { var children = dom_adapter_1.DOM.childNodes(n); return children && children.length > 0; }; if (n instanceof Array) { return n.map(elementText).join(""); } if (dom_adapter_1.DOM.isCommentNode(n)) { return ''; } if (dom_adapter_1.DOM.isElementNode(n) && dom_adapter_1.DOM.tagName(n) == 'CONTENT') { return elementText(Array.prototype.slice.apply(dom_adapter_1.DOM.getDistributedNodes(n))); } if (dom_adapter_1.DOM.hasShadowRoot(n)) { return elementText(dom_adapter_1.DOM.childNodesAsList(dom_adapter_1.DOM.getShadowRoot(n))); } if (hasNodes(n)) { return elementText(dom_adapter_1.DOM.childNodesAsList(n)); } return dom_adapter_1.DOM.getText(n); } global.define = __define; return module.exports; }); System.register("angular2/src/compiler/xhr_mock", ["angular2/src/compiler/xhr", "angular2/src/facade/collection", "angular2/src/facade/lang", "angular2/src/facade/exceptions", "angular2/src/facade/async"], true, function(require, exports, module) { var global = System.global, __define = global.define; global.define = undefined; "use strict"; var __extends = (this && this.__extends) || function(d, b) { for (var p in b) if (b.hasOwnProperty(p)) d[p] = b[p]; function __() { this.constructor = d; } d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __()); }; var xhr_1 = require("angular2/src/compiler/xhr"); var collection_1 = require("angular2/src/facade/collection"); var lang_1 = require("angular2/src/facade/lang"); var exceptions_1 = require("angular2/src/facade/exceptions"); var async_1 = require("angular2/src/facade/async"); var MockXHR = (function(_super) { __extends(MockXHR, _super); function MockXHR() { _super.apply(this, arguments); this._expectations = []; this._definitions = new collection_1.Map(); this._requests = []; } MockXHR.prototype.get = function(url) { var request = new _PendingRequest(url); this._requests.push(request); return request.getPromise(); }; MockXHR.prototype.expect = function(url, response) { var expectation = new _Expectation(url, response); this._expectations.push(expectation); }; MockXHR.prototype.when = function(url, response) { this._definitions.set(url, response); }; MockXHR.prototype.flush = function() { if (this._requests.length === 0) { throw new exceptions_1.BaseException('No pending requests to flush'); } do { this._processRequest(this._requests.shift()); } while (this._requests.length > 0); this.verifyNoOutstandingExpectations(); }; MockXHR.prototype.verifyNoOutstandingExpectations = function() { if (this._expectations.length === 0) return ; var urls = []; for (var i = 0; i < this._expectations.length; i++) { var expectation = this._expectations[i]; urls.push(expectation.url); } throw new exceptions_1.BaseException("Unsatisfied requests: " + urls.join(', ')); }; MockXHR.prototype._processRequest = function(request) { var url = request.url; if (this._expectations.length > 0) { var expectation = this._expectations[0]; if (expectation.url == url) { collection_1.ListWrapper.remove(this._expectations, expectation); request.complete(expectation.response); return ; } } if (this._definitions.has(url)) { var response = this._definitions.get(url); request.complete(lang_1.normalizeBlank(response)); return ; } throw new exceptions_1.BaseException("Unexpected request " + url); }; return MockXHR; }(xhr_1.XHR)); exports.MockXHR = MockXHR; var _PendingRequest = (function() { function _PendingRequest(url) { this.url = url; this.completer = async_1.PromiseWrapper.completer(); } _PendingRequest.prototype.complete = function(response) { if (lang_1.isBlank(response)) { this.completer.reject("Failed to load " + this.url, null); } else { this.completer.resolve(response); } }; _PendingRequest.prototype.getPromise = function() { return this.completer.promise; }; return _PendingRequest; }()); var _Expectation = (function() { function _Expectation(url, response) { this.url = url; this.response = response; } return _Expectation; }()); global.define = __define; return module.exports; }); System.register("angular2/src/testing/test_injector", ["angular2/core", "angular2/src/facade/exceptions", "angular2/src/facade/collection", "angular2/src/facade/lang"], true, function(require, exports, module) { var global = System.global, __define = global.define; global.define = undefined; "use strict"; var core_1 = require("angular2/core"); var exceptions_1 = require("angular2/src/facade/exceptions"); var collection_1 = require("angular2/src/facade/collection"); var lang_1 = require("angular2/src/facade/lang"); var TestInjector = (function() { function TestInjector() { this._instantiated = false; this._injector = null; this._providers = []; this.platformProviders = []; this.applicationProviders = []; } TestInjector.prototype.reset = function() { this._injector = null; this._providers = []; this._instantiated = false; }; TestInjector.prototype.addProviders = function(providers) { if (this._instantiated) { throw new exceptions_1.BaseException('Cannot add providers after test injector is instantiated'); } this._providers = collection_1.ListWrapper.concat(this._providers, providers); }; TestInjector.prototype.createInjector = function() { var rootInjector = core_1.Injector.resolveAndCreate(this.platformProviders); this._injector = rootInjector.resolveAndCreateChild(collection_1.ListWrapper.concat(this.applicationProviders, this._providers)); this._instantiated = true; return this._injector; }; TestInjector.prototype.execute = function(fn) { var additionalProviders = fn.additionalProviders(); if (additionalProviders.length > 0) { this.addProviders(additionalProviders); } if (!this._instantiated) { this.createInjector(); } return fn.execute(this._injector); }; return TestInjector; }()); exports.TestInjector = TestInjector; var _testInjector = null; function getTestInjector() { if (_testInjector == null) { _testInjector = new TestInjector(); } return _testInjector; } exports.getTestInjector = getTestInjector; function setBaseTestProviders(platformProviders, applicationProviders) { var testInjector = getTestInjector(); if (testInjector.platformProviders.length > 0 || testInjector.applicationProviders.length > 0) { throw new exceptions_1.BaseException('Cannot set base providers because it has already been called'); } testInjector.platformProviders = platformProviders; testInjector.applicationProviders = applicationProviders; var injector = testInjector.createInjector(); var inits = injector.getOptional(core_1.PLATFORM_INITIALIZER); if (lang_1.isPresent(inits)) { inits.forEach(function(init) { return init(); }); } testInjector.reset(); } exports.setBaseTestProviders = setBaseTestProviders; function resetBaseTestProviders() { var testInjector = getTestInjector(); testInjector.platformProviders = []; testInjector.applicationProviders = []; testInjector.reset(); } exports.resetBaseTestProviders = resetBaseTestProviders; function inject(tokens, fn) { return new FunctionWithParamTokens(tokens, fn, false); } exports.inject = inject; var InjectSetupWrapper = (function() { function InjectSetupWrapper(_providers) { this._providers = _providers; } InjectSetupWrapper.prototype.inject = function(tokens, fn) { return new FunctionWithParamTokens(tokens, fn, false, this._providers); }; InjectSetupWrapper.prototype.injectAsync = function(tokens, fn) { return new FunctionWithParamTokens(tokens, fn, true, this._providers); }; return InjectSetupWrapper; }()); exports.InjectSetupWrapper = InjectSetupWrapper; function withProviders(providers) { return new InjectSetupWrapper(providers); } exports.withProviders = withProviders; function injectAsync(tokens, fn) { return new FunctionWithParamTokens(tokens, fn, true); } exports.injectAsync = injectAsync; function emptyArray() { return []; } var FunctionWithParamTokens = (function() { function FunctionWithParamTokens(_tokens, _fn, isAsync, additionalProviders) { if (additionalProviders === void 0) { additionalProviders = emptyArray; } this._tokens = _tokens; this._fn = _fn; this.isAsync = isAsync; this.additionalProviders = additionalProviders; } FunctionWithParamTokens.prototype.execute = function(injector) { var params = this._tokens.map(function(t) { return injector.get(t); }); return lang_1.FunctionWrapper.apply(this._fn, params); }; FunctionWithParamTokens.prototype.hasToken = function(token) { return this._tokens.indexOf(token) > -1; }; return FunctionWithParamTokens; }()); exports.FunctionWithParamTokens = FunctionWithParamTokens; global.define = __define; return module.exports; }); System.register("angular2/src/testing/testing", ["angular2/src/facade/lang", "angular2/src/testing/test_injector", "angular2/src/testing/test_injector", "angular2/src/testing/matchers"], true, function(require, exports, module) { var global = System.global, __define = global.define; global.define = undefined; "use strict"; var lang_1 = require("angular2/src/facade/lang"); var test_injector_1 = require("angular2/src/testing/test_injector"); var test_injector_2 = require("angular2/src/testing/test_injector"); exports.inject = test_injector_2.inject; exports.injectAsync = test_injector_2.injectAsync; var matchers_1 = require("angular2/src/testing/matchers"); exports.expect = matchers_1.expect; var _global = (typeof window === 'undefined' ? lang_1.global : window); exports.afterEach = _global.afterEach; exports.describe = _global.describe; exports.ddescribe = _global.fdescribe; exports.fdescribe = _global.fdescribe; exports.xdescribe = _global.xdescribe; var jsmBeforeEach = _global.beforeEach; var jsmIt = _global.it; var jsmIIt = _global.fit; var jsmXIt = _global.xit; var testInjector = test_injector_1.getTestInjector(); jsmBeforeEach(function() { testInjector.reset(); }); function beforeEachProviders(fn) { jsmBeforeEach(function() { var providers = fn(); if (!providers) return ; try { testInjector.addProviders(providers); } catch (e) { throw new Error('beforeEachProviders was called after the injector had ' + 'been used in a beforeEach or it block. This invalidates the ' + 'test injector'); } }); } exports.beforeEachProviders = beforeEachProviders; function _isPromiseLike(input) { return input && !!(input.then); } function _it(jsmFn, name, testFn, testTimeOut) { var timeOut = testTimeOut; if (testFn instanceof test_injector_1.FunctionWithParamTokens) { var testFnT_1 = testFn; jsmFn(name, function(done) { var returnedTestValue; try { returnedTestValue = testInjector.execute(testFnT_1); } catch (err) { done.fail(err); return ; } if (testFnT_1.isAsync) { if (_isPromiseLike(returnedTestValue)) { returnedTestValue.then(function() { done(); }, function(err) { done.fail(err); }); } else { done.fail('Error: injectAsync was expected to return a promise, but the ' + ' returned value was: ' + returnedTestValue); } } else { if (!(returnedTestValue === undefined)) { done.fail('Error: inject returned a value. Did you mean to use injectAsync? Returned ' + 'value was: ' + returnedTestValue); } done(); } }, timeOut); } else { jsmFn(name, testFn, timeOut); } } function beforeEach(fn) { if (fn instanceof test_injector_1.FunctionWithParamTokens) { var fnT_1 = fn; jsmBeforeEach(function(done) { var returnedTestValue; try { returnedTestValue = testInjector.execute(fnT_1); } catch (err) { done.fail(err); return ; } if (fnT_1.isAsync) { if (_isPromiseLike(returnedTestValue)) { returnedTestValue.then(function() { done(); }, function(err) { done.fail(err); }); } else { done.fail('Error: injectAsync was expected to return a promise, but the ' + ' returned value was: ' + returnedTestValue); } } else { if (!(returnedTestValue === undefined)) { done.fail('Error: inject returned a value. Did you mean to use injectAsync? Returned ' + 'value was: ' + returnedTestValue); } done(); } }); } else { if (fn.length === 0) { jsmBeforeEach(function() { fn(); }); } else { jsmBeforeEach(function(done) { fn(done); }); } } } exports.beforeEach = beforeEach; function it(name, fn, timeOut) { if (timeOut === void 0) { timeOut = null; } return _it(jsmIt, name, fn, timeOut); } exports.it = it; function xit(name, fn, timeOut) { if (timeOut === void 0) { timeOut = null; } return _it(jsmXIt, name, fn, timeOut); } exports.xit = xit; function iit(name, fn, timeOut) { if (timeOut === void 0) { timeOut = null; } return _it(jsmIIt, name, fn, timeOut); } exports.iit = iit; function fit(name, fn, timeOut) { if (timeOut === void 0) { timeOut = null; } return _it(jsmIIt, name, fn, timeOut); } exports.fit = fit; global.define = __define; return module.exports; }); System.register("angular2/testing", ["angular2/src/testing/testing", "angular2/src/testing/test_component_builder", "angular2/src/testing/test_injector", "angular2/src/testing/fake_async", "angular2/src/mock/view_resolver_mock", "angular2/src/compiler/xhr_mock", "angular2/src/mock/ng_zone_mock", "angular2/src/mock/mock_application_ref", "angular2/src/mock/directive_resolver_mock"], true, function(require, exports, module) { var global = System.global, __define = global.define; global.define = undefined; "use strict"; function __export(m) { for (var p in m) if (!exports.hasOwnProperty(p)) exports[p] = m[p]; } __export(require("angular2/src/testing/testing")); var test_component_builder_1 = require("angular2/src/testing/test_component_builder"); exports.ComponentFixture = test_component_builder_1.ComponentFixture; exports.TestComponentBuilder = test_component_builder_1.TestComponentBuilder; __export(require("angular2/src/testing/test_injector")); __export(require("angular2/src/testing/fake_async")); var view_resolver_mock_1 = require("angular2/src/mock/view_resolver_mock"); exports.MockViewResolver = view_resolver_mock_1.MockViewResolver; var xhr_mock_1 = require("angular2/src/compiler/xhr_mock"); exports.MockXHR = xhr_mock_1.MockXHR; var ng_zone_mock_1 = require("angular2/src/mock/ng_zone_mock"); exports.MockNgZone = ng_zone_mock_1.MockNgZone; var mock_application_ref_1 = require("angular2/src/mock/mock_application_ref"); exports.MockApplicationRef = mock_application_ref_1.MockApplicationRef; var directive_resolver_mock_1 = require("angular2/src/mock/directive_resolver_mock"); exports.MockDirectiveResolver = directive_resolver_mock_1.MockDirectiveResolver; global.define = __define; return module.exports; }); //# sourceMappingURL=testing.dev.js.map
/* Copyright 2014 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package rest import ( "testing" "k8s.io/apimachinery/pkg/api/errors" genericapirequest "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/apiserver/pkg/registry/generic" genericregistry "k8s.io/apiserver/pkg/registry/generic/registry" api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/registry/registrytest" ) func TestPodLogValidates(t *testing.T) { config, server := registrytest.NewEtcdStorage(t, "") defer server.Terminate(t) s, destroyFunc, err := generic.NewRawStorage(config) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer destroyFunc() store := &genericregistry.Store{ Storage: genericregistry.DryRunnableStorage{Storage: s}, } logRest := &LogREST{Store: store, KubeletConn: nil} negativeOne := int64(-1) testCases := []*api.PodLogOptions{ {SinceSeconds: &negativeOne}, {TailLines: &negativeOne}, } for _, tc := range testCases { _, err := logRest.Get(genericapirequest.NewDefaultContext(), "test", tc) if !errors.IsInvalid(err) { t.Fatalf("Unexpected error: %v", err) } } }
/* -*- Mode: C; indent-tabs-mode:t ; c-basic-offset:8 -*- */ /* * Core functions for libusbx * Copyright © 2012-2013 Nathan Hjelm <hjelmn@cs.unm.edu> * Copyright © 2007-2008 Daniel Drake <dsd@gentoo.org> * Copyright © 2001 Johannes Erdfelt <johannes@erdfelt.com> * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "config.h" #include <errno.h> #include <stdarg.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #ifdef HAVE_SYS_TYPES_H #include <sys/types.h> #endif #ifdef HAVE_SYS_TIME_H #include <sys/time.h> #endif #ifdef __ANDROID__ #include <android/log.h> #endif #include "libusbi.h" #include "hotplug.h" #if defined(OS_LINUX) const struct usbi_os_backend * const usbi_backend = &linux_usbfs_backend; #elif defined(OS_DARWIN) const struct usbi_os_backend * const usbi_backend = &darwin_backend; #elif defined(OS_OPENBSD) const struct usbi_os_backend * const usbi_backend = &openbsd_backend; #elif defined(OS_WINDOWS) const struct usbi_os_backend * const usbi_backend = &windows_backend; #elif defined(OS_WINCE) const struct usbi_os_backend * const usbi_backend = &wince_backend; #else #error "Unsupported OS" #endif struct libusb_context *usbi_default_context = NULL; const struct libusb_version libusb_version_internal = { LIBUSB_MAJOR, LIBUSB_MINOR, LIBUSB_MICRO, LIBUSB_NANO, LIBUSB_RC, "http://libusbx.org" }; static int default_context_refcnt = 0; static usbi_mutex_static_t default_context_lock = USBI_MUTEX_INITIALIZER; static struct timeval timestamp_origin = { 0, 0 }; usbi_mutex_static_t active_contexts_lock = USBI_MUTEX_INITIALIZER; struct list_head active_contexts_list; /** * \mainpage libusbx-1.0 API Reference * * \section intro Introduction * * libusbx is an open source library that allows you to communicate with USB * devices from userspace. For more info, see the * <a href="http://libusbx.org">libusbx homepage</a>. * * This documentation is aimed at application developers wishing to * communicate with USB peripherals from their own software. After reviewing * this documentation, feedback and questions can be sent to the * <a href="http://mailing-list.libusbx.org">libusbx-devel mailing list</a>. * * This documentation assumes knowledge of how to operate USB devices from * a software standpoint (descriptors, configurations, interfaces, endpoints, * control/bulk/interrupt/isochronous transfers, etc). Full information * can be found in the <a href="http://www.usb.org/developers/docs/">USB 3.0 * Specification</a> which is available for free download. You can probably * find less verbose introductions by searching the web. * * \section features Library features * * - All transfer types supported (control/bulk/interrupt/isochronous) * - 2 transfer interfaces: * -# Synchronous (simple) * -# Asynchronous (more complicated, but more powerful) * - Thread safe (although the asynchronous interface means that you * usually won't need to thread) * - Lightweight with lean API * - Compatible with libusb-0.1 through the libusb-compat-0.1 translation layer * - Hotplug support (on some platforms). See \ref hotplug. * * \section gettingstarted Getting Started * * To begin reading the API documentation, start with the Modules page which * links to the different categories of libusbx's functionality. * * One decision you will have to make is whether to use the synchronous * or the asynchronous data transfer interface. The \ref io documentation * provides some insight into this topic. * * Some example programs can be found in the libusbx source distribution under * the "examples" subdirectory. The libusbx homepage includes a list of * real-life project examples which use libusbx. * * \section errorhandling Error handling * * libusbx functions typically return 0 on success or a negative error code * on failure. These negative error codes relate to LIBUSB_ERROR constants * which are listed on the \ref misc "miscellaneous" documentation page. * * \section msglog Debug message logging * * libusbx uses stderr for all logging. By default, logging is set to NONE, * which means that no output will be produced. However, unless the library * has been compiled with logging disabled, then any application calls to * libusb_set_debug(), or the setting of the environmental variable * LIBUSB_DEBUG outside of the application, can result in logging being * produced. Your application should therefore not close stderr, but instead * direct it to the null device if its output is undesireable. * * The libusb_set_debug() function can be used to enable logging of certain * messages. Under standard configuration, libusbx doesn't really log much * so you are advised to use this function to enable all error/warning/ * informational messages. It will help debug problems with your software. * * The logged messages are unstructured. There is no one-to-one correspondence * between messages being logged and success or failure return codes from * libusbx functions. There is no format to the messages, so you should not * try to capture or parse them. They are not and will not be localized. * These messages are not intended to being passed to your application user; * instead, you should interpret the error codes returned from libusbx functions * and provide appropriate notification to the user. The messages are simply * there to aid you as a programmer, and if you're confused because you're * getting a strange error code from a libusbx function, enabling message * logging may give you a suitable explanation. * * The LIBUSB_DEBUG environment variable can be used to enable message logging * at run-time. This environment variable should be set to a log level number, * which is interpreted the same as the libusb_set_debug() parameter. When this * environment variable is set, the message logging verbosity level is fixed * and libusb_set_debug() effectively does nothing. * * libusbx can be compiled without any logging functions, useful for embedded * systems. In this case, libusb_set_debug() and the LIBUSB_DEBUG environment * variable have no effects. * * libusbx can also be compiled with verbose debugging messages always. When * the library is compiled in this way, all messages of all verbosities are * always logged. libusb_set_debug() and the LIBUSB_DEBUG environment variable * have no effects. * * \section remarks Other remarks * * libusbx does have imperfections. The \ref caveats "caveats" page attempts * to document these. */ /** * \page caveats Caveats * * \section devresets Device resets * * The libusb_reset_device() function allows you to reset a device. If your * program has to call such a function, it should obviously be aware that * the reset will cause device state to change (e.g. register values may be * reset). * * The problem is that any other program could reset the device your program * is working with, at any time. libusbx does not offer a mechanism to inform * you when this has happened, so if someone else resets your device it will * not be clear to your own program why the device state has changed. * * Ultimately, this is a limitation of writing drivers in userspace. * Separation from the USB stack in the underlying kernel makes it difficult * for the operating system to deliver such notifications to your program. * The Linux kernel USB stack allows such reset notifications to be delivered * to in-kernel USB drivers, but it is not clear how such notifications could * be delivered to second-class drivers that live in userspace. * * \section blockonly Blocking-only functionality * * The functionality listed below is only available through synchronous, * blocking functions. There are no asynchronous/non-blocking alternatives, * and no clear ways of implementing these. * * - Configuration activation (libusb_set_configuration()) * - Interface/alternate setting activation (libusb_set_interface_alt_setting()) * - Releasing of interfaces (libusb_release_interface()) * - Clearing of halt/stall condition (libusb_clear_halt()) * - Device resets (libusb_reset_device()) * * \section configsel Configuration selection and handling * * When libusbx presents a device handle to an application, there is a chance * that the corresponding device may be in unconfigured state. For devices * with multiple configurations, there is also a chance that the configuration * currently selected is not the one that the application wants to use. * * The obvious solution is to add a call to libusb_set_configuration() early * on during your device initialization routines, but there are caveats to * be aware of: * -# If the device is already in the desired configuration, calling * libusb_set_configuration() using the same configuration value will cause * a lightweight device reset. This may not be desirable behaviour. * -# libusbx will be unable to change configuration if the device is in * another configuration and other programs or drivers have claimed * interfaces under that configuration. * -# In the case where the desired configuration is already active, libusbx * may not even be able to perform a lightweight device reset. For example, * take my USB keyboard with fingerprint reader: I'm interested in driving * the fingerprint reader interface through libusbx, but the kernel's * USB-HID driver will almost always have claimed the keyboard interface. * Because the kernel has claimed an interface, it is not even possible to * perform the lightweight device reset, so libusb_set_configuration() will * fail. (Luckily the device in question only has a single configuration.) * * One solution to some of the above problems is to consider the currently * active configuration. If the configuration we want is already active, then * we don't have to select any configuration: \code cfg = libusb_get_configuration(dev); if (cfg != desired) libusb_set_configuration(dev, desired); \endcode * * This is probably suitable for most scenarios, but is inherently racy: * another application or driver may change the selected configuration * <em>after</em> the libusb_get_configuration() call. * * Even in cases where libusb_set_configuration() succeeds, consider that other * applications or drivers may change configuration after your application * calls libusb_set_configuration(). * * One possible way to lock your device into a specific configuration is as * follows: * -# Set the desired configuration (or use the logic above to realise that * it is already in the desired configuration) * -# Claim the interface that you wish to use * -# Check that the currently active configuration is the one that you want * to use. * * The above method works because once an interface is claimed, no application * or driver is able to select another configuration. * * \section earlycomp Early transfer completion * * NOTE: This section is currently Linux-centric. I am not sure if any of these * considerations apply to Darwin or other platforms. * * When a transfer completes early (i.e. when less data is received/sent in * any one packet than the transfer buffer allows for) then libusbx is designed * to terminate the transfer immediately, not transferring or receiving any * more data unless other transfers have been queued by the user. * * On legacy platforms, libusbx is unable to do this in all situations. After * the incomplete packet occurs, "surplus" data may be transferred. For recent * versions of libusbx, this information is kept (the data length of the * transfer is updated) and, for device-to-host transfers, any surplus data was * added to the buffer. Still, this is not a nice solution because it loses the * information about the end of the short packet, and the user probably wanted * that surplus data to arrive in the next logical transfer. * * * \section zlp Zero length packets * * - libusbx is able to send a packet of zero length to an endpoint simply by * submitting a transfer of zero length. * - The \ref libusb_transfer_flags::LIBUSB_TRANSFER_ADD_ZERO_PACKET * "LIBUSB_TRANSFER_ADD_ZERO_PACKET" flag is currently only supported on Linux. */ /** * \page contexts Contexts * * It is possible that libusbx may be used simultaneously from two independent * libraries linked into the same executable. For example, if your application * has a plugin-like system which allows the user to dynamically load a range * of modules into your program, it is feasible that two independently * developed modules may both use libusbx. * * libusbx is written to allow for these multiple user scenarios. The two * "instances" of libusbx will not interfere: libusb_set_debug() calls * from one user will not affect the same settings for other users, other * users can continue using libusbx after one of them calls libusb_exit(), etc. * * This is made possible through libusbx's <em>context</em> concept. When you * call libusb_init(), you are (optionally) given a context. You can then pass * this context pointer back into future libusbx functions. * * In order to keep things simple for more simplistic applications, it is * legal to pass NULL to all functions requiring a context pointer (as long as * you're sure no other code will attempt to use libusbx from the same process). * When you pass NULL, the default context will be used. The default context * is created the first time a process calls libusb_init() when no other * context is alive. Contexts are destroyed during libusb_exit(). * * The default context is reference-counted and can be shared. That means that * if libusb_init(NULL) is called twice within the same process, the two * users end up sharing the same context. The deinitialization and freeing of * the default context will only happen when the last user calls libusb_exit(). * In other words, the default context is created and initialized when its * reference count goes from 0 to 1, and is deinitialized and destroyed when * its reference count goes from 1 to 0. * * You may be wondering why only a subset of libusbx functions require a * context pointer in their function definition. Internally, libusbx stores * context pointers in other objects (e.g. libusb_device instances) and hence * can infer the context from those objects. */ /** * @defgroup lib Library initialization/deinitialization * This page details how to initialize and deinitialize libusbx. Initialization * must be performed before using any libusbx functionality, and similarly you * must not call any libusbx functions after deinitialization. */ /** * @defgroup dev Device handling and enumeration * The functionality documented below is designed to help with the following * operations: * - Enumerating the USB devices currently attached to the system * - Choosing a device to operate from your software * - Opening and closing the chosen device * * \section nutshell In a nutshell... * * The description below really makes things sound more complicated than they * actually are. The following sequence of function calls will be suitable * for almost all scenarios and does not require you to have such a deep * understanding of the resource management issues: * \code // discover devices libusb_device **list; libusb_device *found = NULL; ssize_t cnt = libusb_get_device_list(NULL, &list); ssize_t i = 0; int err = 0; if (cnt < 0) error(); for (i = 0; i < cnt; i++) { libusb_device *device = list[i]; if (is_interesting(device)) { found = device; break; } } if (found) { libusb_device_handle *handle; err = libusb_open(found, &handle); if (err) error(); // etc } libusb_free_device_list(list, 1); \endcode * * The two important points: * - You asked libusb_free_device_list() to unreference the devices (2nd * parameter) * - You opened the device before freeing the list and unreferencing the * devices * * If you ended up with a handle, you can now proceed to perform I/O on the * device. * * \section devshandles Devices and device handles * libusbx has a concept of a USB device, represented by the * \ref libusb_device opaque type. A device represents a USB device that * is currently or was previously connected to the system. Using a reference * to a device, you can determine certain information about the device (e.g. * you can read the descriptor data). * * The libusb_get_device_list() function can be used to obtain a list of * devices currently connected to the system. This is known as device * discovery. * * Just because you have a reference to a device does not mean it is * necessarily usable. The device may have been unplugged, you may not have * permission to operate such device, or another program or driver may be * using the device. * * When you've found a device that you'd like to operate, you must ask * libusbx to open the device using the libusb_open() function. Assuming * success, libusbx then returns you a <em>device handle</em> * (a \ref libusb_device_handle pointer). All "real" I/O operations then * operate on the handle rather than the original device pointer. * * \section devref Device discovery and reference counting * * Device discovery (i.e. calling libusb_get_device_list()) returns a * freshly-allocated list of devices. The list itself must be freed when * you are done with it. libusbx also needs to know when it is OK to free * the contents of the list - the devices themselves. * * To handle these issues, libusbx provides you with two separate items: * - A function to free the list itself * - A reference counting system for the devices inside * * New devices presented by the libusb_get_device_list() function all have a * reference count of 1. You can increase and decrease reference count using * libusb_ref_device() and libusb_unref_device(). A device is destroyed when * its reference count reaches 0. * * With the above information in mind, the process of opening a device can * be viewed as follows: * -# Discover devices using libusb_get_device_list(). * -# Choose the device that you want to operate, and call libusb_open(). * -# Unref all devices in the discovered device list. * -# Free the discovered device list. * * The order is important - you must not unreference the device before * attempting to open it, because unreferencing it may destroy the device. * * For convenience, the libusb_free_device_list() function includes a * parameter to optionally unreference all the devices in the list before * freeing the list itself. This combines steps 3 and 4 above. * * As an implementation detail, libusb_open() actually adds a reference to * the device in question. This is because the device remains available * through the handle via libusb_get_device(). The reference is deleted during * libusb_close(). */ /** @defgroup misc Miscellaneous */ /* we traverse usbfs without knowing how many devices we are going to find. * so we create this discovered_devs model which is similar to a linked-list * which grows when required. it can be freed once discovery has completed, * eliminating the need for a list node in the libusb_device structure * itself. */ #define DISCOVERED_DEVICES_SIZE_STEP 8 static struct discovered_devs *discovered_devs_alloc(void) { struct discovered_devs *ret = malloc(sizeof(*ret) + (sizeof(void *) * DISCOVERED_DEVICES_SIZE_STEP)); if (ret) { ret->len = 0; ret->capacity = DISCOVERED_DEVICES_SIZE_STEP; } return ret; } /* append a device to the discovered devices collection. may realloc itself, * returning new discdevs. returns NULL on realloc failure. */ struct discovered_devs *discovered_devs_append( struct discovered_devs *discdevs, struct libusb_device *dev) { size_t len = discdevs->len; size_t capacity; /* if there is space, just append the device */ if (len < discdevs->capacity) { discdevs->devices[len] = libusb_ref_device(dev); discdevs->len++; return discdevs; } /* exceeded capacity, need to grow */ usbi_dbg("need to increase capacity"); capacity = discdevs->capacity + DISCOVERED_DEVICES_SIZE_STEP; discdevs = usbi_reallocf(discdevs, sizeof(*discdevs) + (sizeof(void *) * capacity)); if (discdevs) { discdevs->capacity = capacity; discdevs->devices[len] = libusb_ref_device(dev); discdevs->len++; } return discdevs; } static void discovered_devs_free(struct discovered_devs *discdevs) { size_t i; for (i = 0; i < discdevs->len; i++) libusb_unref_device(discdevs->devices[i]); free(discdevs); } /* Allocate a new device with a specific session ID. The returned device has * a reference count of 1. */ struct libusb_device *usbi_alloc_device(struct libusb_context *ctx, unsigned long session_id) { size_t priv_size = usbi_backend->device_priv_size; struct libusb_device *dev = calloc(1, sizeof(*dev) + priv_size); int r; if (!dev) return NULL; r = usbi_mutex_init(&dev->lock, NULL); if (r) { free(dev); return NULL; } dev->ctx = ctx; dev->refcnt = 1; dev->session_data = session_id; dev->speed = LIBUSB_SPEED_UNKNOWN; if (!libusb_has_capability(LIBUSB_CAP_HAS_HOTPLUG)) { usbi_connect_device (dev); } return dev; } void usbi_connect_device(struct libusb_device *dev) { libusb_hotplug_message message; ssize_t ret; memset(&message, 0, sizeof(message)); message.event = LIBUSB_HOTPLUG_EVENT_DEVICE_ARRIVED; message.device = dev; dev->attached = 1; usbi_mutex_lock(&dev->ctx->usb_devs_lock); list_add(&dev->list, &dev->ctx->usb_devs); usbi_mutex_unlock(&dev->ctx->usb_devs_lock); /* Signal that an event has occurred for this device if we support hotplug AND * the hotplug pipe is ready. This prevents an event from getting raised during * initial enumeration. */ if (libusb_has_capability(LIBUSB_CAP_HAS_HOTPLUG) && dev->ctx->hotplug_pipe[1] > 0) { ret = usbi_write(dev->ctx->hotplug_pipe[1], &message, sizeof(message)); if (sizeof (message) != ret) { usbi_err(DEVICE_CTX(dev), "error writing hotplug message"); } } } void usbi_disconnect_device(struct libusb_device *dev) { libusb_hotplug_message message; struct libusb_context *ctx = dev->ctx; ssize_t ret; memset(&message, 0, sizeof(message)); message.event = LIBUSB_HOTPLUG_EVENT_DEVICE_LEFT; message.device = dev; usbi_mutex_lock(&dev->lock); dev->attached = 0; usbi_mutex_unlock(&dev->lock); /* Signal that an event has occurred for this device if we support hotplug AND * the hotplug pipe is ready. This prevents an event from getting raised during * initial enumeration. libusb_handle_events will take care of dereferencing the * device. */ if (libusb_has_capability(LIBUSB_CAP_HAS_HOTPLUG) && dev->ctx->hotplug_pipe[1] > 0) { ret = usbi_write(dev->ctx->hotplug_pipe[1], &message, sizeof(message)); if (sizeof(message) != ret) { usbi_err(DEVICE_CTX(dev), "error writing hotplug message"); } } usbi_mutex_lock(&ctx->usb_devs_lock); list_del(&dev->list); usbi_mutex_unlock(&ctx->usb_devs_lock); } /* Perform some final sanity checks on a newly discovered device. If this * function fails (negative return code), the device should not be added * to the discovered device list. */ int usbi_sanitize_device(struct libusb_device *dev) { int r; uint8_t num_configurations; r = usbi_device_cache_descriptor(dev); if (r < 0) return r; num_configurations = dev->device_descriptor.bNumConfigurations; if (num_configurations > USB_MAXCONFIG) { usbi_err(DEVICE_CTX(dev), "too many configurations"); return LIBUSB_ERROR_IO; } else if (0 == num_configurations) usbi_dbg("zero configurations, maybe an unauthorized device"); dev->num_configurations = num_configurations; return 0; } /* Examine libusbx's internal list of known devices, looking for one with * a specific session ID. Returns the matching device if it was found, and * NULL otherwise. */ struct libusb_device *usbi_get_device_by_session_id(struct libusb_context *ctx, unsigned long session_id) { struct libusb_device *dev; struct libusb_device *ret = NULL; usbi_mutex_lock(&ctx->usb_devs_lock); list_for_each_entry(dev, &ctx->usb_devs, list, struct libusb_device) if (dev->session_data == session_id) { ret = dev; break; } usbi_mutex_unlock(&ctx->usb_devs_lock); return ret; } /** @ingroup dev * Returns a list of USB devices currently attached to the system. This is * your entry point into finding a USB device to operate. * * You are expected to unreference all the devices when you are done with * them, and then free the list with libusb_free_device_list(). Note that * libusb_free_device_list() can unref all the devices for you. Be careful * not to unreference a device you are about to open until after you have * opened it. * * This return value of this function indicates the number of devices in * the resultant list. The list is actually one element larger, as it is * NULL-terminated. * * \param ctx the context to operate on, or NULL for the default context * \param list output location for a list of devices. Must be later freed with * libusb_free_device_list(). * \returns the number of devices in the outputted list, or any * \ref libusb_error according to errors encountered by the backend. */ ssize_t API_EXPORTED libusb_get_device_list(libusb_context *ctx, libusb_device ***list) { struct discovered_devs *discdevs = discovered_devs_alloc(); struct libusb_device **ret; int r = 0; ssize_t i, len; USBI_GET_CONTEXT(ctx); usbi_dbg(""); if (!discdevs) return LIBUSB_ERROR_NO_MEM; if (libusb_has_capability(LIBUSB_CAP_HAS_HOTPLUG)) { /* backend provides hotplug support */ struct libusb_device *dev; if (usbi_backend->hotplug_poll) usbi_backend->hotplug_poll(); usbi_mutex_lock(&ctx->usb_devs_lock); list_for_each_entry(dev, &ctx->usb_devs, list, struct libusb_device) { discdevs = discovered_devs_append(discdevs, dev); if (!discdevs) { r = LIBUSB_ERROR_NO_MEM; break; } } usbi_mutex_unlock(&ctx->usb_devs_lock); } else { /* backend does not provide hotplug support */ r = usbi_backend->get_device_list(ctx, &discdevs); } if (r < 0) { len = r; goto out; } /* convert discovered_devs into a list */ len = discdevs->len; ret = calloc(len + 1, sizeof(struct libusb_device *)); if (!ret) { len = LIBUSB_ERROR_NO_MEM; goto out; } ret[len] = NULL; for (i = 0; i < len; i++) { struct libusb_device *dev = discdevs->devices[i]; ret[i] = libusb_ref_device(dev); } *list = ret; out: discovered_devs_free(discdevs); return len; } /** \ingroup dev * Frees a list of devices previously discovered using * libusb_get_device_list(). If the unref_devices parameter is set, the * reference count of each device in the list is decremented by 1. * \param list the list to free * \param unref_devices whether to unref the devices in the list */ void API_EXPORTED libusb_free_device_list(libusb_device **list, int unref_devices) { if (!list) return; if (unref_devices) { int i = 0; struct libusb_device *dev; while ((dev = list[i++]) != NULL) libusb_unref_device(dev); } free(list); } /** \ingroup dev * Get the number of the bus that a device is connected to. * \param dev a device * \returns the bus number */ uint8_t API_EXPORTED libusb_get_bus_number(libusb_device *dev) { return dev->bus_number; } /** \ingroup dev * Get the number of the port that a device is connected to. * Unless the OS does something funky, or you are hot-plugging USB extension cards, * the port number returned by this call is usually guaranteed to be uniquely tied * to a physical port, meaning that different devices plugged on the same physical * port should return the same port number. * * But outside of this, there is no guarantee that the port number returned by this * call will remain the same, or even match the order in which ports have been * numbered by the HUB/HCD manufacturer. * * \param dev a device * \returns the port number (0 if not available) */ uint8_t API_EXPORTED libusb_get_port_number(libusb_device *dev) { return dev->port_number; } /** \ingroup dev * Get the list of all port numbers from root for the specified device * * Since version 1.0.16, \ref LIBUSBX_API_VERSION >= 0x01000102 * \param dev a device * \param port_numbers the array that should contain the port numbers * \param port_numbers_len the maximum length of the array. As per the USB 3.0 * specs, the current maximum limit for the depth is 7. * \returns the number of elements filled * \returns LIBUSB_ERROR_OVERFLOW if the array is too small */ int API_EXPORTED libusb_get_port_numbers(libusb_device *dev, uint8_t* port_numbers, int port_numbers_len) { int i = port_numbers_len; while(dev) { // HCDs can be listed as devices and would have port #0 // TODO: see how the other backends want to implement HCDs as parents if (dev->port_number == 0) break; i--; if (i < 0) { usbi_warn(DEVICE_CTX(dev), "port numbers array too small"); return LIBUSB_ERROR_OVERFLOW; } port_numbers[i] = dev->port_number; dev = dev->parent_dev; } memmove(port_numbers, &port_numbers[i], port_numbers_len - i); return port_numbers_len - i; } /** \ingroup dev * Deprecated please use libusb_get_port_numbers instead. */ int API_EXPORTED libusb_get_port_path(libusb_context *ctx, libusb_device *dev, uint8_t* port_numbers, uint8_t port_numbers_len) { UNUSED(ctx); return libusb_get_port_numbers(dev, port_numbers, port_numbers_len); } /** \ingroup dev * Get the the parent from the specified device. * \param dev a device * \returns the device parent or NULL if not available * You should issue a \ref libusb_get_device_list() before calling this * function and make sure that you only access the parent before issuing * \ref libusb_free_device_list(). The reason is that libusbx currently does * not maintain a permanent list of device instances, and therefore can * only guarantee that parents are fully instantiated within a * libusb_get_device_list() - libusb_free_device_list() block. */ DEFAULT_VISIBILITY libusb_device * LIBUSB_CALL libusb_get_parent(libusb_device *dev) { return dev->parent_dev; } /** \ingroup dev * Get the address of the device on the bus it is connected to. * \param dev a device * \returns the device address */ uint8_t API_EXPORTED libusb_get_device_address(libusb_device *dev) { return dev->device_address; } /** \ingroup dev * Get the negotiated connection speed for a device. * \param dev a device * \returns a \ref libusb_speed code, where LIBUSB_SPEED_UNKNOWN means that * the OS doesn't know or doesn't support returning the negotiated speed. */ int API_EXPORTED libusb_get_device_speed(libusb_device *dev) { return dev->speed; } static const struct libusb_endpoint_descriptor *find_endpoint( struct libusb_config_descriptor *config, unsigned char endpoint) { int iface_idx; for (iface_idx = 0; iface_idx < config->bNumInterfaces; iface_idx++) { const struct libusb_interface *iface = &config->interface[iface_idx]; int altsetting_idx; for (altsetting_idx = 0; altsetting_idx < iface->num_altsetting; altsetting_idx++) { const struct libusb_interface_descriptor *altsetting = &iface->altsetting[altsetting_idx]; int ep_idx; for (ep_idx = 0; ep_idx < altsetting->bNumEndpoints; ep_idx++) { const struct libusb_endpoint_descriptor *ep = &altsetting->endpoint[ep_idx]; if (ep->bEndpointAddress == endpoint) return ep; } } } return NULL; } /** \ingroup dev * Convenience function to retrieve the wMaxPacketSize value for a particular * endpoint in the active device configuration. * * This function was originally intended to be of assistance when setting up * isochronous transfers, but a design mistake resulted in this function * instead. It simply returns the wMaxPacketSize value without considering * its contents. If you're dealing with isochronous transfers, you probably * want libusb_get_max_iso_packet_size() instead. * * \param dev a device * \param endpoint address of the endpoint in question * \returns the wMaxPacketSize value * \returns LIBUSB_ERROR_NOT_FOUND if the endpoint does not exist * \returns LIBUSB_ERROR_OTHER on other failure */ int API_EXPORTED libusb_get_max_packet_size(libusb_device *dev, unsigned char endpoint) { struct libusb_config_descriptor *config; const struct libusb_endpoint_descriptor *ep; int r; r = libusb_get_active_config_descriptor(dev, &config); if (r < 0) { usbi_err(DEVICE_CTX(dev), "could not retrieve active config descriptor"); return LIBUSB_ERROR_OTHER; } ep = find_endpoint(config, endpoint); if (!ep) return LIBUSB_ERROR_NOT_FOUND; r = ep->wMaxPacketSize; libusb_free_config_descriptor(config); return r; } /** \ingroup dev * Calculate the maximum packet size which a specific endpoint is capable is * sending or receiving in the duration of 1 microframe * * Only the active configuration is examined. The calculation is based on the * wMaxPacketSize field in the endpoint descriptor as described in section * 9.6.6 in the USB 2.0 specifications. * * If acting on an isochronous or interrupt endpoint, this function will * multiply the value found in bits 0:10 by the number of transactions per * microframe (determined by bits 11:12). Otherwise, this function just * returns the numeric value found in bits 0:10. * * This function is useful for setting up isochronous transfers, for example * you might pass the return value from this function to * libusb_set_iso_packet_lengths() in order to set the length field of every * isochronous packet in a transfer. * * Since v1.0.3. * * \param dev a device * \param endpoint address of the endpoint in question * \returns the maximum packet size which can be sent/received on this endpoint * \returns LIBUSB_ERROR_NOT_FOUND if the endpoint does not exist * \returns LIBUSB_ERROR_OTHER on other failure */ int API_EXPORTED libusb_get_max_iso_packet_size(libusb_device *dev, unsigned char endpoint) { struct libusb_config_descriptor *config; const struct libusb_endpoint_descriptor *ep; enum libusb_transfer_type ep_type; uint16_t val; int r; r = libusb_get_active_config_descriptor(dev, &config); if (r < 0) { usbi_err(DEVICE_CTX(dev), "could not retrieve active config descriptor"); return LIBUSB_ERROR_OTHER; } ep = find_endpoint(config, endpoint); if (!ep) return LIBUSB_ERROR_NOT_FOUND; val = ep->wMaxPacketSize; ep_type = (enum libusb_transfer_type) (ep->bmAttributes & 0x3); libusb_free_config_descriptor(config); r = val & 0x07ff; if (ep_type == LIBUSB_TRANSFER_TYPE_ISOCHRONOUS || ep_type == LIBUSB_TRANSFER_TYPE_INTERRUPT) r *= (1 + ((val >> 11) & 3)); return r; } /** \ingroup dev * Increment the reference count of a device. * \param dev the device to reference * \returns the same device */ DEFAULT_VISIBILITY libusb_device * LIBUSB_CALL libusb_ref_device(libusb_device *dev) { usbi_mutex_lock(&dev->lock); dev->refcnt++; usbi_mutex_unlock(&dev->lock); return dev; } /** \ingroup dev * Decrement the reference count of a device. If the decrement operation * causes the reference count to reach zero, the device shall be destroyed. * \param dev the device to unreference */ void API_EXPORTED libusb_unref_device(libusb_device *dev) { int refcnt; if (!dev) return; usbi_mutex_lock(&dev->lock); refcnt = --dev->refcnt; usbi_mutex_unlock(&dev->lock); if (refcnt == 0) { usbi_dbg("destroy device %d.%d", dev->bus_number, dev->device_address); libusb_unref_device(dev->parent_dev); if (usbi_backend->destroy_device) usbi_backend->destroy_device(dev); if (!libusb_has_capability(LIBUSB_CAP_HAS_HOTPLUG)) { /* backend does not support hotplug */ usbi_disconnect_device(dev); } usbi_mutex_destroy(&dev->lock); free(dev); } } /* * Interrupt the iteration of the event handling thread, so that it picks * up the new fd. */ void usbi_fd_notification(struct libusb_context *ctx) { unsigned char dummy = 1; ssize_t r; if (ctx == NULL) return; /* record that we are messing with poll fds */ usbi_mutex_lock(&ctx->pollfd_modify_lock); ctx->pollfd_modify++; usbi_mutex_unlock(&ctx->pollfd_modify_lock); /* write some data on control pipe to interrupt event handlers */ r = usbi_write(ctx->ctrl_pipe[1], &dummy, sizeof(dummy)); if (r <= 0) { usbi_warn(ctx, "internal signalling write failed"); usbi_mutex_lock(&ctx->pollfd_modify_lock); ctx->pollfd_modify--; usbi_mutex_unlock(&ctx->pollfd_modify_lock); return; } /* take event handling lock */ libusb_lock_events(ctx); /* read the dummy data */ r = usbi_read(ctx->ctrl_pipe[0], &dummy, sizeof(dummy)); if (r <= 0) usbi_warn(ctx, "internal signalling read failed"); /* we're done with modifying poll fds */ usbi_mutex_lock(&ctx->pollfd_modify_lock); ctx->pollfd_modify--; usbi_mutex_unlock(&ctx->pollfd_modify_lock); /* Release event handling lock and wake up event waiters */ libusb_unlock_events(ctx); } /** \ingroup dev * Open a device and obtain a device handle. A handle allows you to perform * I/O on the device in question. * * Internally, this function adds a reference to the device and makes it * available to you through libusb_get_device(). This reference is removed * during libusb_close(). * * This is a non-blocking function; no requests are sent over the bus. * * \param dev the device to open * \param handle output location for the returned device handle pointer. Only * populated when the return code is 0. * \returns 0 on success * \returns LIBUSB_ERROR_NO_MEM on memory allocation failure * \returns LIBUSB_ERROR_ACCESS if the user has insufficient permissions * \returns LIBUSB_ERROR_NO_DEVICE if the device has been disconnected * \returns another LIBUSB_ERROR code on other failure */ int API_EXPORTED libusb_open(libusb_device *dev, libusb_device_handle **handle) { struct libusb_context *ctx = DEVICE_CTX(dev); struct libusb_device_handle *_handle; size_t priv_size = usbi_backend->device_handle_priv_size; int r; usbi_dbg("open %d.%d", dev->bus_number, dev->device_address); if (!dev->attached) { return LIBUSB_ERROR_NO_DEVICE; } _handle = malloc(sizeof(*_handle) + priv_size); if (!_handle) return LIBUSB_ERROR_NO_MEM; r = usbi_mutex_init(&_handle->lock, NULL); if (r) { free(_handle); return LIBUSB_ERROR_OTHER; } _handle->dev = libusb_ref_device(dev); _handle->auto_detach_kernel_driver = 0; _handle->claimed_interfaces = 0; memset(&_handle->os_priv, 0, priv_size); r = usbi_backend->open(_handle); if (r < 0) { usbi_dbg("open %d.%d returns %d", dev->bus_number, dev->device_address, r); libusb_unref_device(dev); usbi_mutex_destroy(&_handle->lock); free(_handle); return r; } usbi_mutex_lock(&ctx->open_devs_lock); list_add(&_handle->list, &ctx->open_devs); usbi_mutex_unlock(&ctx->open_devs_lock); *handle = _handle; /* At this point, we want to interrupt any existing event handlers so * that they realise the addition of the new device's poll fd. One * example when this is desirable is if the user is running a separate * dedicated libusbx events handling thread, which is running with a long * or infinite timeout. We want to interrupt that iteration of the loop, * so that it picks up the new fd, and then continues. */ usbi_fd_notification(ctx); return 0; } /** \ingroup dev * Convenience function for finding a device with a particular * <tt>idVendor</tt>/<tt>idProduct</tt> combination. This function is intended * for those scenarios where you are using libusbx to knock up a quick test * application - it allows you to avoid calling libusb_get_device_list() and * worrying about traversing/freeing the list. * * This function has limitations and is hence not intended for use in real * applications: if multiple devices have the same IDs it will only * give you the first one, etc. * * \param ctx the context to operate on, or NULL for the default context * \param vendor_id the idVendor value to search for * \param product_id the idProduct value to search for * \returns a handle for the first found device, or NULL on error or if the * device could not be found. */ DEFAULT_VISIBILITY libusb_device_handle * LIBUSB_CALL libusb_open_device_with_vid_pid( libusb_context *ctx, uint16_t vendor_id, uint16_t product_id) { struct libusb_device **devs; struct libusb_device *found = NULL; struct libusb_device *dev; struct libusb_device_handle *handle = NULL; size_t i = 0; int r; if (libusb_get_device_list(ctx, &devs) < 0) return NULL; while ((dev = devs[i++]) != NULL) { struct libusb_device_descriptor desc; r = libusb_get_device_descriptor(dev, &desc); if (r < 0) goto out; if (desc.idVendor == vendor_id && desc.idProduct == product_id) { found = dev; break; } } if (found) { r = libusb_open(found, &handle); if (r < 0) handle = NULL; } out: libusb_free_device_list(devs, 1); return handle; } static void do_close(struct libusb_context *ctx, struct libusb_device_handle *dev_handle) { struct usbi_transfer *itransfer; struct usbi_transfer *tmp; libusb_lock_events(ctx); /* remove any transfers in flight that are for this device */ usbi_mutex_lock(&ctx->flying_transfers_lock); /* safe iteration because transfers may be being deleted */ list_for_each_entry_safe(itransfer, tmp, &ctx->flying_transfers, list, struct usbi_transfer) { struct libusb_transfer *transfer = USBI_TRANSFER_TO_LIBUSB_TRANSFER(itransfer); if (transfer->dev_handle != dev_handle) continue; if (!(itransfer->flags & USBI_TRANSFER_DEVICE_DISAPPEARED)) { usbi_err(ctx, "Device handle closed while transfer was still being processed, but the device is still connected as far as we know"); if (itransfer->flags & USBI_TRANSFER_CANCELLING) usbi_warn(ctx, "A cancellation for an in-flight transfer hasn't completed but closing the device handle"); else usbi_err(ctx, "A cancellation hasn't even been scheduled on the transfer for which the device is closing"); } /* remove from the list of in-flight transfers and make sure * we don't accidentally use the device handle in the future * (or that such accesses will be easily caught and identified as a crash) */ usbi_mutex_lock(&itransfer->lock); list_del(&itransfer->list); transfer->dev_handle = NULL; usbi_mutex_unlock(&itransfer->lock); /* it is up to the user to free up the actual transfer struct. this is * just making sure that we don't attempt to process the transfer after * the device handle is invalid */ usbi_dbg("Removed transfer %p from the in-flight list because device handle %p closed", transfer, dev_handle); } usbi_mutex_unlock(&ctx->flying_transfers_lock); libusb_unlock_events(ctx); usbi_mutex_lock(&ctx->open_devs_lock); list_del(&dev_handle->list); usbi_mutex_unlock(&ctx->open_devs_lock); usbi_backend->close(dev_handle); libusb_unref_device(dev_handle->dev); usbi_mutex_destroy(&dev_handle->lock); free(dev_handle); } /** \ingroup dev * Close a device handle. Should be called on all open handles before your * application exits. * * Internally, this function destroys the reference that was added by * libusb_open() on the given device. * * This is a non-blocking function; no requests are sent over the bus. * * \param dev_handle the handle to close */ void API_EXPORTED libusb_close(libusb_device_handle *dev_handle) { struct libusb_context *ctx; unsigned char dummy = 1; ssize_t r; if (!dev_handle) return; usbi_dbg(""); ctx = HANDLE_CTX(dev_handle); /* Similarly to libusb_open(), we want to interrupt all event handlers * at this point. More importantly, we want to perform the actual close of * the device while holding the event handling lock (preventing any other * thread from doing event handling) because we will be removing a file * descriptor from the polling loop. */ /* record that we are messing with poll fds */ usbi_mutex_lock(&ctx->pollfd_modify_lock); ctx->pollfd_modify++; usbi_mutex_unlock(&ctx->pollfd_modify_lock); /* write some data on control pipe to interrupt event handlers */ r = usbi_write(ctx->ctrl_pipe[1], &dummy, sizeof(dummy)); if (r <= 0) { usbi_warn(ctx, "internal signalling write failed, closing anyway"); do_close(ctx, dev_handle); usbi_mutex_lock(&ctx->pollfd_modify_lock); ctx->pollfd_modify--; usbi_mutex_unlock(&ctx->pollfd_modify_lock); return; } /* take event handling lock */ libusb_lock_events(ctx); /* read the dummy data */ r = usbi_read(ctx->ctrl_pipe[0], &dummy, sizeof(dummy)); if (r <= 0) usbi_warn(ctx, "internal signalling read failed, closing anyway"); /* Close the device */ do_close(ctx, dev_handle); /* we're done with modifying poll fds */ usbi_mutex_lock(&ctx->pollfd_modify_lock); ctx->pollfd_modify--; usbi_mutex_unlock(&ctx->pollfd_modify_lock); /* Release event handling lock and wake up event waiters */ libusb_unlock_events(ctx); } /** \ingroup dev * Get the underlying device for a handle. This function does not modify * the reference count of the returned device, so do not feel compelled to * unreference it when you are done. * \param dev_handle a device handle * \returns the underlying device */ DEFAULT_VISIBILITY libusb_device * LIBUSB_CALL libusb_get_device(libusb_device_handle *dev_handle) { return dev_handle->dev; } /** \ingroup dev * Determine the bConfigurationValue of the currently active configuration. * * You could formulate your own control request to obtain this information, * but this function has the advantage that it may be able to retrieve the * information from operating system caches (no I/O involved). * * If the OS does not cache this information, then this function will block * while a control transfer is submitted to retrieve the information. * * This function will return a value of 0 in the <tt>config</tt> output * parameter if the device is in unconfigured state. * * \param dev a device handle * \param config output location for the bConfigurationValue of the active * configuration (only valid for return code 0) * \returns 0 on success * \returns LIBUSB_ERROR_NO_DEVICE if the device has been disconnected * \returns another LIBUSB_ERROR code on other failure */ int API_EXPORTED libusb_get_configuration(libusb_device_handle *dev, int *config) { int r = LIBUSB_ERROR_NOT_SUPPORTED; usbi_dbg(""); if (usbi_backend->get_configuration) r = usbi_backend->get_configuration(dev, config); if (r == LIBUSB_ERROR_NOT_SUPPORTED) { uint8_t tmp = 0; usbi_dbg("falling back to control message"); r = libusb_control_transfer(dev, LIBUSB_ENDPOINT_IN, LIBUSB_REQUEST_GET_CONFIGURATION, 0, 0, &tmp, 1, 1000); if (r == 0) { usbi_err(HANDLE_CTX(dev), "zero bytes returned in ctrl transfer?"); r = LIBUSB_ERROR_IO; } else if (r == 1) { r = 0; *config = tmp; } else { usbi_dbg("control failed, error %d", r); } } if (r == 0) usbi_dbg("active config %d", *config); return r; } /** \ingroup dev * Set the active configuration for a device. * * The operating system may or may not have already set an active * configuration on the device. It is up to your application to ensure the * correct configuration is selected before you attempt to claim interfaces * and perform other operations. * * If you call this function on a device already configured with the selected * configuration, then this function will act as a lightweight device reset: * it will issue a SET_CONFIGURATION request using the current configuration, * causing most USB-related device state to be reset (altsetting reset to zero, * endpoint halts cleared, toggles reset). * * You cannot change/reset configuration if your application has claimed * interfaces. It is advised to set the desired configuration before claiming * interfaces. * * Alternatively you can call libusb_release_interface() first. Note if you * do things this way you must ensure that auto_detach_kernel_driver for * <tt>dev</tt> is 0, otherwise the kernel driver will be re-attached when you * release the interface(s). * * You cannot change/reset configuration if other applications or drivers have * claimed interfaces. * * A configuration value of -1 will put the device in unconfigured state. * The USB specifications state that a configuration value of 0 does this, * however buggy devices exist which actually have a configuration 0. * * You should always use this function rather than formulating your own * SET_CONFIGURATION control request. This is because the underlying operating * system needs to know when such changes happen. * * This is a blocking function. * * \param dev a device handle * \param configuration the bConfigurationValue of the configuration you * wish to activate, or -1 if you wish to put the device in unconfigured state * \returns 0 on success * \returns LIBUSB_ERROR_NOT_FOUND if the requested configuration does not exist * \returns LIBUSB_ERROR_BUSY if interfaces are currently claimed * \returns LIBUSB_ERROR_NO_DEVICE if the device has been disconnected * \returns another LIBUSB_ERROR code on other failure * \see libusb_set_auto_detach_kernel_driver() */ int API_EXPORTED libusb_set_configuration(libusb_device_handle *dev, int configuration) { usbi_dbg("configuration %d", configuration); return usbi_backend->set_configuration(dev, configuration); } /** \ingroup dev * Claim an interface on a given device handle. You must claim the interface * you wish to use before you can perform I/O on any of its endpoints. * * It is legal to attempt to claim an already-claimed interface, in which * case libusbx just returns 0 without doing anything. * * If auto_detach_kernel_driver is set to 1 for <tt>dev</tt>, the kernel driver * will be detached if necessary, on failure the detach error is returned. * * Claiming of interfaces is a purely logical operation; it does not cause * any requests to be sent over the bus. Interface claiming is used to * instruct the underlying operating system that your application wishes * to take ownership of the interface. * * This is a non-blocking function. * * \param dev a device handle * \param interface_number the <tt>bInterfaceNumber</tt> of the interface you * wish to claim * \returns 0 on success * \returns LIBUSB_ERROR_NOT_FOUND if the requested interface does not exist * \returns LIBUSB_ERROR_BUSY if another program or driver has claimed the * interface * \returns LIBUSB_ERROR_NO_DEVICE if the device has been disconnected * \returns a LIBUSB_ERROR code on other failure * \see libusb_set_auto_detach_kernel_driver() */ int API_EXPORTED libusb_claim_interface(libusb_device_handle *dev, int interface_number) { int r = 0; usbi_dbg("interface %d", interface_number); if (interface_number >= USB_MAXINTERFACES) return LIBUSB_ERROR_INVALID_PARAM; if (!dev->dev->attached) return LIBUSB_ERROR_NO_DEVICE; usbi_mutex_lock(&dev->lock); if (dev->claimed_interfaces & (1 << interface_number)) goto out; r = usbi_backend->claim_interface(dev, interface_number); if (r == 0) dev->claimed_interfaces |= 1 << interface_number; out: usbi_mutex_unlock(&dev->lock); return r; } /** \ingroup dev * Release an interface previously claimed with libusb_claim_interface(). You * should release all claimed interfaces before closing a device handle. * * This is a blocking function. A SET_INTERFACE control request will be sent * to the device, resetting interface state to the first alternate setting. * * If auto_detach_kernel_driver is set to 1 for <tt>dev</tt>, the kernel * driver will be re-attached after releasing the interface. * * \param dev a device handle * \param interface_number the <tt>bInterfaceNumber</tt> of the * previously-claimed interface * \returns 0 on success * \returns LIBUSB_ERROR_NOT_FOUND if the interface was not claimed * \returns LIBUSB_ERROR_NO_DEVICE if the device has been disconnected * \returns another LIBUSB_ERROR code on other failure * \see libusb_set_auto_detach_kernel_driver() */ int API_EXPORTED libusb_release_interface(libusb_device_handle *dev, int interface_number) { int r; usbi_dbg("interface %d", interface_number); if (interface_number >= USB_MAXINTERFACES) return LIBUSB_ERROR_INVALID_PARAM; usbi_mutex_lock(&dev->lock); if (!(dev->claimed_interfaces & (1 << interface_number))) { r = LIBUSB_ERROR_NOT_FOUND; goto out; } r = usbi_backend->release_interface(dev, interface_number); if (r == 0) dev->claimed_interfaces &= ~(1 << interface_number); out: usbi_mutex_unlock(&dev->lock); return r; } /** \ingroup dev * Activate an alternate setting for an interface. The interface must have * been previously claimed with libusb_claim_interface(). * * You should always use this function rather than formulating your own * SET_INTERFACE control request. This is because the underlying operating * system needs to know when such changes happen. * * This is a blocking function. * * \param dev a device handle * \param interface_number the <tt>bInterfaceNumber</tt> of the * previously-claimed interface * \param alternate_setting the <tt>bAlternateSetting</tt> of the alternate * setting to activate * \returns 0 on success * \returns LIBUSB_ERROR_NOT_FOUND if the interface was not claimed, or the * requested alternate setting does not exist * \returns LIBUSB_ERROR_NO_DEVICE if the device has been disconnected * \returns another LIBUSB_ERROR code on other failure */ int API_EXPORTED libusb_set_interface_alt_setting(libusb_device_handle *dev, int interface_number, int alternate_setting) { usbi_dbg("interface %d altsetting %d", interface_number, alternate_setting); if (interface_number >= USB_MAXINTERFACES) return LIBUSB_ERROR_INVALID_PARAM; usbi_mutex_lock(&dev->lock); if (!dev->dev->attached) { usbi_mutex_unlock(&dev->lock); return LIBUSB_ERROR_NO_DEVICE; } if (!(dev->claimed_interfaces & (1 << interface_number))) { usbi_mutex_unlock(&dev->lock); return LIBUSB_ERROR_NOT_FOUND; } usbi_mutex_unlock(&dev->lock); return usbi_backend->set_interface_altsetting(dev, interface_number, alternate_setting); } /** \ingroup dev * Clear the halt/stall condition for an endpoint. Endpoints with halt status * are unable to receive or transmit data until the halt condition is stalled. * * You should cancel all pending transfers before attempting to clear the halt * condition. * * This is a blocking function. * * \param dev a device handle * \param endpoint the endpoint to clear halt status * \returns 0 on success * \returns LIBUSB_ERROR_NOT_FOUND if the endpoint does not exist * \returns LIBUSB_ERROR_NO_DEVICE if the device has been disconnected * \returns another LIBUSB_ERROR code on other failure */ int API_EXPORTED libusb_clear_halt(libusb_device_handle *dev, unsigned char endpoint) { usbi_dbg("endpoint %x", endpoint); if (!dev->dev->attached) return LIBUSB_ERROR_NO_DEVICE; return usbi_backend->clear_halt(dev, endpoint); } /** \ingroup dev * Perform a USB port reset to reinitialize a device. The system will attempt * to restore the previous configuration and alternate settings after the * reset has completed. * * If the reset fails, the descriptors change, or the previous state cannot be * restored, the device will appear to be disconnected and reconnected. This * means that the device handle is no longer valid (you should close it) and * rediscover the device. A return code of LIBUSB_ERROR_NOT_FOUND indicates * when this is the case. * * This is a blocking function which usually incurs a noticeable delay. * * \param dev a handle of the device to reset * \returns 0 on success * \returns LIBUSB_ERROR_NOT_FOUND if re-enumeration is required, or if the * device has been disconnected * \returns another LIBUSB_ERROR code on other failure */ int API_EXPORTED libusb_reset_device(libusb_device_handle *dev) { usbi_dbg(""); if (!dev->dev->attached) return LIBUSB_ERROR_NO_DEVICE; return usbi_backend->reset_device(dev); } /** \ingroup dev * Determine if a kernel driver is active on an interface. If a kernel driver * is active, you cannot claim the interface, and libusbx will be unable to * perform I/O. * * This functionality is not available on Windows. * * \param dev a device handle * \param interface_number the interface to check * \returns 0 if no kernel driver is active * \returns 1 if a kernel driver is active * \returns LIBUSB_ERROR_NO_DEVICE if the device has been disconnected * \returns LIBUSB_ERROR_NOT_SUPPORTED on platforms where the functionality * is not available * \returns another LIBUSB_ERROR code on other failure * \see libusb_detach_kernel_driver() */ int API_EXPORTED libusb_kernel_driver_active(libusb_device_handle *dev, int interface_number) { usbi_dbg("interface %d", interface_number); if (!dev->dev->attached) return LIBUSB_ERROR_NO_DEVICE; if (usbi_backend->kernel_driver_active) return usbi_backend->kernel_driver_active(dev, interface_number); else return LIBUSB_ERROR_NOT_SUPPORTED; } /** \ingroup dev * Detach a kernel driver from an interface. If successful, you will then be * able to claim the interface and perform I/O. * * This functionality is not available on Darwin or Windows. * * Note that libusbx itself also talks to the device through a special kernel * driver, if this driver is already attached to the device, this call will * not detach it and return LIBUSB_ERROR_NOT_FOUND. * * \param dev a device handle * \param interface_number the interface to detach the driver from * \returns 0 on success * \returns LIBUSB_ERROR_NOT_FOUND if no kernel driver was active * \returns LIBUSB_ERROR_INVALID_PARAM if the interface does not exist * \returns LIBUSB_ERROR_NO_DEVICE if the device has been disconnected * \returns LIBUSB_ERROR_NOT_SUPPORTED on platforms where the functionality * is not available * \returns another LIBUSB_ERROR code on other failure * \see libusb_kernel_driver_active() */ int API_EXPORTED libusb_detach_kernel_driver(libusb_device_handle *dev, int interface_number) { usbi_dbg("interface %d", interface_number); if (!dev->dev->attached) return LIBUSB_ERROR_NO_DEVICE; if (usbi_backend->detach_kernel_driver) return usbi_backend->detach_kernel_driver(dev, interface_number); else return LIBUSB_ERROR_NOT_SUPPORTED; } /** \ingroup dev * Re-attach an interface's kernel driver, which was previously detached * using libusb_detach_kernel_driver(). This call is only effective on * Linux and returns LIBUSB_ERROR_NOT_SUPPORTED on all other platforms. * * This functionality is not available on Darwin or Windows. * * \param dev a device handle * \param interface_number the interface to attach the driver from * \returns 0 on success * \returns LIBUSB_ERROR_NOT_FOUND if no kernel driver was active * \returns LIBUSB_ERROR_INVALID_PARAM if the interface does not exist * \returns LIBUSB_ERROR_NO_DEVICE if the device has been disconnected * \returns LIBUSB_ERROR_NOT_SUPPORTED on platforms where the functionality * is not available * \returns LIBUSB_ERROR_BUSY if the driver cannot be attached because the * interface is claimed by a program or driver * \returns another LIBUSB_ERROR code on other failure * \see libusb_kernel_driver_active() */ int API_EXPORTED libusb_attach_kernel_driver(libusb_device_handle *dev, int interface_number) { usbi_dbg("interface %d", interface_number); if (!dev->dev->attached) return LIBUSB_ERROR_NO_DEVICE; if (usbi_backend->attach_kernel_driver) return usbi_backend->attach_kernel_driver(dev, interface_number); else return LIBUSB_ERROR_NOT_SUPPORTED; } /** \ingroup dev * Enable/disable libusbx's automatic kernel driver detachment. When this is * enabled libusbx will automatically detach the kernel driver on an interface * when claiming the interface, and attach it when releasing the interface. * * Automatic kernel driver detachment is disabled on newly opened device * handles by default. * * On platforms which do not have LIBUSB_CAP_SUPPORTS_DETACH_KERNEL_DRIVER * this function will return LIBUSB_ERROR_NOT_SUPPORTED, and libusbx will * continue as if this function was never called. * * \param dev a device handle * \param enable whether to enable or disable auto kernel driver detachment * * \returns LIBUSB_SUCCESS on success * \returns LIBUSB_ERROR_NOT_SUPPORTED on platforms where the functionality * is not available * \see libusb_claim_interface() * \see libusb_release_interface() * \see libusb_set_configuration() */ int API_EXPORTED libusb_set_auto_detach_kernel_driver( libusb_device_handle *dev, int enable) { if (!(usbi_backend->caps & USBI_CAP_SUPPORTS_DETACH_KERNEL_DRIVER)) return LIBUSB_ERROR_NOT_SUPPORTED; dev->auto_detach_kernel_driver = enable; return LIBUSB_SUCCESS; } /** \ingroup lib * Set log message verbosity. * * The default level is LIBUSB_LOG_LEVEL_NONE, which means no messages are ever * printed. If you choose to increase the message verbosity level, ensure * that your application does not close the stdout/stderr file descriptors. * * You are advised to use level LIBUSB_LOG_LEVEL_WARNING. libusbx is conservative * with its message logging and most of the time, will only log messages that * explain error conditions and other oddities. This will help you debug * your software. * * If the LIBUSB_DEBUG environment variable was set when libusbx was * initialized, this function does nothing: the message verbosity is fixed * to the value in the environment variable. * * If libusbx was compiled without any message logging, this function does * nothing: you'll never get any messages. * * If libusbx was compiled with verbose debug message logging, this function * does nothing: you'll always get messages from all levels. * * \param ctx the context to operate on, or NULL for the default context * \param level debug level to set */ void API_EXPORTED libusb_set_debug(libusb_context *ctx, int level) { USBI_GET_CONTEXT(ctx); if (!ctx->debug_fixed) ctx->debug = level; } /** \ingroup lib * Initialize libusb. This function must be called before calling any other * libusbx function. * * If you do not provide an output location for a context pointer, a default * context will be created. If there was already a default context, it will * be reused (and nothing will be initialized/reinitialized). * * \param context Optional output location for context pointer. * Only valid on return code 0. * \returns 0 on success, or a LIBUSB_ERROR code on failure * \see contexts */ int API_EXPORTED libusb_init(libusb_context **context) { struct libusb_device *dev, *next; char *dbg = getenv("LIBUSB_DEBUG"); struct libusb_context *ctx; static int first_init = 1; int r = 0; usbi_mutex_static_lock(&default_context_lock); if (!timestamp_origin.tv_sec) { usbi_gettimeofday(&timestamp_origin, NULL); } if (!context && usbi_default_context) { usbi_dbg("reusing default context"); default_context_refcnt++; usbi_mutex_static_unlock(&default_context_lock); return 0; } ctx = calloc(1, sizeof(*ctx)); if (!ctx) { r = LIBUSB_ERROR_NO_MEM; goto err_unlock; } #ifdef ENABLE_DEBUG_LOGGING ctx->debug = LIBUSB_LOG_LEVEL_DEBUG; #endif if (dbg) { ctx->debug = atoi(dbg); if (ctx->debug) ctx->debug_fixed = 1; } /* default context should be initialized before calling usbi_dbg */ if (!usbi_default_context) { usbi_default_context = ctx; default_context_refcnt++; usbi_dbg("created default context"); } usbi_dbg("libusbx v%d.%d.%d.%d", libusb_version_internal.major, libusb_version_internal.minor, libusb_version_internal.micro, libusb_version_internal.nano); usbi_mutex_init(&ctx->usb_devs_lock, NULL); usbi_mutex_init(&ctx->open_devs_lock, NULL); usbi_mutex_init(&ctx->hotplug_cbs_lock, NULL); list_init(&ctx->usb_devs); list_init(&ctx->open_devs); list_init(&ctx->hotplug_cbs); usbi_mutex_static_lock(&active_contexts_lock); if (first_init) { first_init = 0; list_init (&active_contexts_list); } list_add (&ctx->list, &active_contexts_list); usbi_mutex_static_unlock(&active_contexts_lock); if (usbi_backend->init) { r = usbi_backend->init(ctx); if (r) goto err_free_ctx; } r = usbi_io_init(ctx); if (r < 0) goto err_backend_exit; usbi_mutex_static_unlock(&default_context_lock); if (context) *context = ctx; return 0; err_backend_exit: if (usbi_backend->exit) usbi_backend->exit(); err_free_ctx: if (ctx == usbi_default_context) usbi_default_context = NULL; usbi_mutex_destroy(&ctx->open_devs_lock); usbi_mutex_destroy(&ctx->usb_devs_lock); usbi_mutex_destroy(&ctx->hotplug_cbs_lock); usbi_mutex_static_lock(&active_contexts_lock); list_del (&ctx->list); usbi_mutex_static_unlock(&active_contexts_lock); usbi_mutex_lock(&ctx->usb_devs_lock); list_for_each_entry_safe(dev, next, &ctx->usb_devs, list, struct libusb_device) { list_del(&dev->list); libusb_unref_device(dev); } usbi_mutex_unlock(&ctx->usb_devs_lock); free(ctx); err_unlock: usbi_mutex_static_unlock(&default_context_lock); return r; } /** \ingroup lib * Deinitialize libusb. Should be called after closing all open devices and * before your application terminates. * \param ctx the context to deinitialize, or NULL for the default context */ void API_EXPORTED libusb_exit(struct libusb_context *ctx) { struct libusb_device *dev, *next; usbi_dbg(""); USBI_GET_CONTEXT(ctx); /* if working with default context, only actually do the deinitialization * if we're the last user */ usbi_mutex_static_lock(&default_context_lock); if (ctx == usbi_default_context) { if (--default_context_refcnt > 0) { usbi_dbg("not destroying default context"); usbi_mutex_static_unlock(&default_context_lock); return; } usbi_dbg("destroying default context"); usbi_default_context = NULL; } usbi_mutex_static_unlock(&default_context_lock); usbi_mutex_static_lock(&active_contexts_lock); list_del (&ctx->list); usbi_mutex_static_unlock(&active_contexts_lock); if (libusb_has_capability(LIBUSB_CAP_HAS_HOTPLUG)) { usbi_hotplug_deregister_all(ctx); usbi_mutex_lock(&ctx->usb_devs_lock); list_for_each_entry_safe(dev, next, &ctx->usb_devs, list, struct libusb_device) { list_del(&dev->list); libusb_unref_device(dev); } usbi_mutex_unlock(&ctx->usb_devs_lock); } /* a few sanity checks. don't bother with locking because unless * there is an application bug, nobody will be accessing these. */ if (!list_empty(&ctx->usb_devs)) usbi_warn(ctx, "some libusb_devices were leaked"); if (!list_empty(&ctx->open_devs)) usbi_warn(ctx, "application left some devices open"); usbi_io_exit(ctx); if (usbi_backend->exit) usbi_backend->exit(); usbi_mutex_destroy(&ctx->open_devs_lock); usbi_mutex_destroy(&ctx->usb_devs_lock); usbi_mutex_destroy(&ctx->hotplug_cbs_lock); free(ctx); } /** \ingroup misc * Check at runtime if the loaded library has a given capability. * This call should be performed after \ref libusb_init(), to ensure the * backend has updated its capability set. * * \param capability the \ref libusb_capability to check for * \returns nonzero if the running library has the capability, 0 otherwise */ int API_EXPORTED libusb_has_capability(uint32_t capability) { switch (capability) { case LIBUSB_CAP_HAS_CAPABILITY: return 1; case LIBUSB_CAP_HAS_HOTPLUG: return !(usbi_backend->get_device_list); case LIBUSB_CAP_HAS_HID_ACCESS: return (usbi_backend->caps & USBI_CAP_HAS_HID_ACCESS); case LIBUSB_CAP_SUPPORTS_DETACH_KERNEL_DRIVER: return (usbi_backend->caps & USBI_CAP_SUPPORTS_DETACH_KERNEL_DRIVER); } return 0; } /* this is defined in libusbi.h if needed */ #ifdef LIBUSB_GETTIMEOFDAY_WIN32 /* * gettimeofday * Implementation according to: * The Open Group Base Specifications Issue 6 * IEEE Std 1003.1, 2004 Edition */ /* * THIS SOFTWARE IS NOT COPYRIGHTED * * This source code is offered for use in the public domain. You may * use, modify or distribute it freely. * * This code is distributed in the hope that it will be useful but * WITHOUT ANY WARRANTY. ALL WARRANTIES, EXPRESS OR IMPLIED ARE HEREBY * DISCLAIMED. This includes but is not limited to warranties of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * * Contributed by: * Danny Smith <dannysmith@users.sourceforge.net> */ /* Offset between 1/1/1601 and 1/1/1970 in 100 nanosec units */ #define _W32_FT_OFFSET (116444736000000000) int usbi_gettimeofday(struct timeval *tp, void *tzp) { union { unsigned __int64 ns100; /* Time since 1 Jan 1601, in 100ns units */ FILETIME ft; } _now; UNUSED(tzp); if(tp) { #if defined(OS_WINCE) SYSTEMTIME st; GetSystemTime(&st); SystemTimeToFileTime(&st, &_now.ft); #else GetSystemTimeAsFileTime (&_now.ft); #endif tp->tv_usec=(long)((_now.ns100 / 10) % 1000000 ); tp->tv_sec= (long)((_now.ns100 - _W32_FT_OFFSET) / 10000000); } /* Always return 0 as per Open Group Base Specifications Issue 6. Do not set errno on error. */ return 0; } #endif static void usbi_log_str(struct libusb_context *ctx, const char * str) { UNUSED(ctx); fputs(str, stderr); } void usbi_log_v(struct libusb_context *ctx, enum libusb_log_level level, const char *function, const char *format, va_list args) { const char *prefix = ""; char buf[USBI_MAX_LOG_LEN]; struct timeval now; int global_debug, header_len, text_len; static int has_debug_header_been_displayed = 0; #ifdef ENABLE_DEBUG_LOGGING global_debug = 1; UNUSED(ctx); #else USBI_GET_CONTEXT(ctx); if (ctx == NULL) return; global_debug = (ctx->debug == LIBUSB_LOG_LEVEL_DEBUG); if (!ctx->debug) return; if (level == LIBUSB_LOG_LEVEL_WARNING && ctx->debug < LIBUSB_LOG_LEVEL_WARNING) return; if (level == LIBUSB_LOG_LEVEL_INFO && ctx->debug < LIBUSB_LOG_LEVEL_INFO) return; if (level == LIBUSB_LOG_LEVEL_DEBUG && ctx->debug < LIBUSB_LOG_LEVEL_DEBUG) return; #endif #ifdef __ANDROID__ int prio; switch (level) { case LOG_LEVEL_INFO: prio = ANDROID_LOG_INFO; break; case LOG_LEVEL_WARNING: prio = ANDROID_LOG_WARN; break; case LOG_LEVEL_ERROR: prio = ANDROID_LOG_ERROR; break; case LOG_LEVEL_DEBUG: prio = ANDROID_LOG_DEBUG; break; default: prio = ANDROID_LOG_UNKNOWN; break; } __android_log_vprint(prio, "LibUsb", format, args); #else usbi_gettimeofday(&now, NULL); if ((global_debug) && (!has_debug_header_been_displayed)) { has_debug_header_been_displayed = 1; usbi_log_str(ctx, "[timestamp] [threadID] facility level [function call] <message>\n"); usbi_log_str(ctx, "--------------------------------------------------------------------------------\n"); } if (now.tv_usec < timestamp_origin.tv_usec) { now.tv_sec--; now.tv_usec += 1000000; } now.tv_sec -= timestamp_origin.tv_sec; now.tv_usec -= timestamp_origin.tv_usec; switch (level) { case LIBUSB_LOG_LEVEL_INFO: prefix = "info"; break; case LIBUSB_LOG_LEVEL_WARNING: prefix = "warning"; break; case LIBUSB_LOG_LEVEL_ERROR: prefix = "error"; break; case LIBUSB_LOG_LEVEL_DEBUG: prefix = "debug"; break; case LIBUSB_LOG_LEVEL_NONE: break; default: prefix = "unknown"; break; } if (global_debug) { header_len = snprintf(buf, sizeof(buf), "[%2d.%06d] [%08x] libusbx: %s [%s] ", (int)now.tv_sec, (int)now.tv_usec, usbi_get_tid(), prefix, function); } else { header_len = snprintf(buf, sizeof(buf), "libusbx: %s [%s] ", prefix, function); } if (header_len < 0 || header_len >= sizeof(buf)) { /* Somehow snprintf failed to write to the buffer, * remove the header so something useful is output. */ header_len = 0; } /* Make sure buffer is NUL terminated */ buf[header_len] = '\0'; text_len = vsnprintf(buf + header_len, sizeof(buf) - header_len, format, args); if (text_len < 0 || text_len + header_len >= sizeof(buf)) { /* Truncated log output. On some platforms a -1 return value means * that the output was truncated. */ text_len = sizeof(buf) - header_len; } if (header_len + text_len + sizeof(USBI_LOG_LINE_END) >= sizeof(buf)) { /* Need to truncate the text slightly to fit on the terminator. */ text_len -= (header_len + text_len + sizeof(USBI_LOG_LINE_END)) - sizeof(buf); } strcpy(buf + header_len + text_len, USBI_LOG_LINE_END); usbi_log_str(ctx, buf); #endif } void usbi_log(struct libusb_context *ctx, enum libusb_log_level level, const char *function, const char *format, ...) { va_list args; va_start (args, format); usbi_log_v(ctx, level, function, format, args); va_end (args); } /** \ingroup misc * Returns a constant NULL-terminated string with the ASCII name of a libusbx * error or transfer status code. The caller must not free() the returned * string. * * \param error_code The \ref libusb_error or libusb_transfer_status code to * return the name of. * \returns The error name, or the string **UNKNOWN** if the value of * error_code is not a known error / status code. */ DEFAULT_VISIBILITY const char * LIBUSB_CALL libusb_error_name(int error_code) { switch (error_code) { case LIBUSB_ERROR_IO: return "LIBUSB_ERROR_IO"; case LIBUSB_ERROR_INVALID_PARAM: return "LIBUSB_ERROR_INVALID_PARAM"; case LIBUSB_ERROR_ACCESS: return "LIBUSB_ERROR_ACCESS"; case LIBUSB_ERROR_NO_DEVICE: return "LIBUSB_ERROR_NO_DEVICE"; case LIBUSB_ERROR_NOT_FOUND: return "LIBUSB_ERROR_NOT_FOUND"; case LIBUSB_ERROR_BUSY: return "LIBUSB_ERROR_BUSY"; case LIBUSB_ERROR_TIMEOUT: return "LIBUSB_ERROR_TIMEOUT"; case LIBUSB_ERROR_OVERFLOW: return "LIBUSB_ERROR_OVERFLOW"; case LIBUSB_ERROR_PIPE: return "LIBUSB_ERROR_PIPE"; case LIBUSB_ERROR_INTERRUPTED: return "LIBUSB_ERROR_INTERRUPTED"; case LIBUSB_ERROR_NO_MEM: return "LIBUSB_ERROR_NO_MEM"; case LIBUSB_ERROR_NOT_SUPPORTED: return "LIBUSB_ERROR_NOT_SUPPORTED"; case LIBUSB_ERROR_OTHER: return "LIBUSB_ERROR_OTHER"; case LIBUSB_TRANSFER_ERROR: return "LIBUSB_TRANSFER_ERROR"; case LIBUSB_TRANSFER_TIMED_OUT: return "LIBUSB_TRANSFER_TIMED_OUT"; case LIBUSB_TRANSFER_CANCELLED: return "LIBUSB_TRANSFER_CANCELLED"; case LIBUSB_TRANSFER_STALL: return "LIBUSB_TRANSFER_STALL"; case LIBUSB_TRANSFER_NO_DEVICE: return "LIBUSB_TRANSFER_NO_DEVICE"; case LIBUSB_TRANSFER_OVERFLOW: return "LIBUSB_TRANSFER_OVERFLOW"; case 0: return "LIBUSB_SUCCESS / LIBUSB_TRANSFER_COMPLETED"; default: return "**UNKNOWN**"; } } /** \ingroup misc * Returns a pointer to const struct libusb_version with the version * (major, minor, micro, nano and rc) of the running library. */ DEFAULT_VISIBILITY const struct libusb_version * LIBUSB_CALL libusb_get_version(void) { return &libusb_version_internal; }
'use strict'; exports.__esModule = true; var _createClass = function () { function defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if ("value" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } return function (Constructor, protoProps, staticProps) { if (protoProps) defineProperties(Constructor.prototype, protoProps); if (staticProps) defineProperties(Constructor, staticProps); return Constructor; }; }(); var _warnOnce = require('./warn-once'); var _warnOnce2 = _interopRequireDefault(_warnOnce); var _node = require('./node'); var _node2 = _interopRequireDefault(_node); function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; } function _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError("Cannot call a class as a function"); } } function _possibleConstructorReturn(self, call) { if (!self) { throw new ReferenceError("this hasn't been initialised - super() hasn't been called"); } return call && (typeof call === "object" || typeof call === "function") ? call : self; } function _inherits(subClass, superClass) { if (typeof superClass !== "function" && superClass !== null) { throw new TypeError("Super expression must either be null or a function, not " + typeof superClass); } subClass.prototype = Object.create(superClass && superClass.prototype, { constructor: { value: subClass, enumerable: false, writable: true, configurable: true } }); if (superClass) Object.setPrototypeOf ? Object.setPrototypeOf(subClass, superClass) : subClass.__proto__ = superClass; } var Declaration = function (_Node) { _inherits(Declaration, _Node); function Declaration(defaults) { _classCallCheck(this, Declaration); var _this = _possibleConstructorReturn(this, _Node.call(this, defaults)); _this.type = 'decl'; return _this; } /* istanbul ignore next */ _createClass(Declaration, [{ key: '_value', get: function get() { (0, _warnOnce2.default)('Node#_value was deprecated. Use Node#raws.value'); return this.raws.value; } /* istanbul ignore next */ , set: function set(val) { (0, _warnOnce2.default)('Node#_value was deprecated. Use Node#raws.value'); this.raws.value = val; } /* istanbul ignore next */ }, { key: '_important', get: function get() { (0, _warnOnce2.default)('Node#_important was deprecated. Use Node#raws.important'); return this.raws.important; } /* istanbul ignore next */ , set: function set(val) { (0, _warnOnce2.default)('Node#_important was deprecated. Use Node#raws.important'); this.raws.important = val; } }]); return Declaration; }(_node2.default); exports.default = Declaration; module.exports = exports['default'];
/* * TCP Vegas congestion control interface */ #ifndef __TCP_VEGAS_H #define __TCP_VEGAS_H 1 /* Vegas variables */ struct vegas { u32 beg_snd_nxt; /* right edge during last RTT */ u32 beg_snd_una; /* left edge during last RTT */ u32 beg_snd_cwnd; /* saves the size of the cwnd */ u8 doing_vegas_now;/* if true, do vegas for this RTT */ u16 cntRTT; /* # of RTTs measured within last RTT */ u32 minRTT; /* min of RTTs measured within last RTT (in usec) */ u32 baseRTT; /* the min of all Vegas RTT measurements seen (in usec) */ }; void tcp_vegas_init(struct sock *sk); void tcp_vegas_state(struct sock *sk, u8 ca_state); void tcp_vegas_pkts_acked(struct sock *sk, const struct ack_sample *sample); void tcp_vegas_cwnd_event(struct sock *sk, enum tcp_ca_event event); size_t tcp_vegas_get_info(struct sock *sk, u32 ext, int *attr, union tcp_cc_info *info); #endif /* __TCP_VEGAS_H */
<?php /* * This file is part of the Symfony package. * * (c) Fabien Potencier <fabien@symfony.com> * * For the full copyright and license information, please view the LICENSE * file that was distributed with this source code. */ namespace Symfony\Component\PropertyInfo\DependencyInjection; use Symfony\Component\DependencyInjection\Argument\IteratorArgument; use Symfony\Component\DependencyInjection\Compiler\CompilerPassInterface; use Symfony\Component\DependencyInjection\Compiler\PriorityTaggedServiceTrait; use Symfony\Component\DependencyInjection\ContainerBuilder; /** * Adds extractors to the property_info service. * * @author Kévin Dunglas <dunglas@gmail.com> */ class PropertyInfoPass implements CompilerPassInterface { use PriorityTaggedServiceTrait; private $propertyInfoService; private $listExtractorTag; private $typeExtractorTag; private $descriptionExtractorTag; private $accessExtractorTag; private $initializableExtractorTag; public function __construct(string $propertyInfoService = 'property_info', string $listExtractorTag = 'property_info.list_extractor', string $typeExtractorTag = 'property_info.type_extractor', string $descriptionExtractorTag = 'property_info.description_extractor', string $accessExtractorTag = 'property_info.access_extractor', string $initializableExtractorTag = 'property_info.initializable_extractor') { $this->propertyInfoService = $propertyInfoService; $this->listExtractorTag = $listExtractorTag; $this->typeExtractorTag = $typeExtractorTag; $this->descriptionExtractorTag = $descriptionExtractorTag; $this->accessExtractorTag = $accessExtractorTag; $this->initializableExtractorTag = $initializableExtractorTag; } /** * {@inheritdoc} */ public function process(ContainerBuilder $container) { if (!$container->hasDefinition($this->propertyInfoService)) { return; } $definition = $container->getDefinition($this->propertyInfoService); $listExtractors = $this->findAndSortTaggedServices($this->listExtractorTag, $container); $definition->replaceArgument(0, new IteratorArgument($listExtractors)); $typeExtractors = $this->findAndSortTaggedServices($this->typeExtractorTag, $container); $definition->replaceArgument(1, new IteratorArgument($typeExtractors)); $descriptionExtractors = $this->findAndSortTaggedServices($this->descriptionExtractorTag, $container); $definition->replaceArgument(2, new IteratorArgument($descriptionExtractors)); $accessExtractors = $this->findAndSortTaggedServices($this->accessExtractorTag, $container); $definition->replaceArgument(3, new IteratorArgument($accessExtractors)); $initializableExtractors = $this->findAndSortTaggedServices($this->initializableExtractorTag, $container); $definition->setArgument(4, new IteratorArgument($initializableExtractors)); } }
"""Tests for parabolic cylinder functions. """ import numpy as np from numpy.testing import assert_allclose, assert_equal import scipy.special as sc def test_pbwa_segfault(): # Regression test for https://github.com/scipy/scipy/issues/6208. # # Data generated by mpmath. # w = 1.02276567211316867161 wp = -0.48887053372346189882 assert_allclose(sc.pbwa(0, 0), (w, wp), rtol=1e-13, atol=0) def test_pbwa_nan(): # Check that NaN's are returned outside of the range in which the # implementation is accurate. pts = [(-6, -6), (-6, 6), (6, -6), (6, 6)] for p in pts: assert_equal(sc.pbwa(*p), (np.nan, np.nan))
/** * Require unassigned functions to be named inline * * Types: `Boolean` or `Object` * * Values: * - `true` * - `Object`: * - `allExcept`: array of quoted identifiers * * #### Example * * ```js * "requireNamedUnassignedFunctions": { "allExcept": ["describe", "it"] } * ``` * * ##### Valid * * ```js * [].forEach(function x() {}); * var y = function() {}; * function z() {} * it(function () {}); * ``` * * ##### Invalid * * ```js * [].forEach(function () {}); * before(function () {}); * ``` */ var assert = require('assert'); var pathval = require('pathval'); function getNodeName(node) { if (node.type === 'Identifier') { return node.name; } else { return node.value; } } module.exports = function() {}; module.exports.prototype = { configure: function(options) { assert( options === true || typeof options === 'object', this.getOptionName() + ' option requires true value ' + 'or an object with String[] `allExcept` property' ); // verify first item in `allExcept` property in object (if it's an object) assert( typeof options !== 'object' || Array.isArray(options.allExcept) && typeof options.allExcept[0] === 'string', 'Property `allExcept` in ' + this.getOptionName() + ' should be an array of strings' ); if (options.allExcept) { this._allExceptItems = options.allExcept.map(function(item) { var parts = pathval.parse(item).map(function extractPart(part) { return part.i !== undefined ? part.i : part.p; }); return JSON.stringify(parts); }); } }, getOptionName: function() { return 'requireNamedUnassignedFunctions'; }, check: function(file, errors) { var _this = this; file.iterateNodesByType('FunctionExpression', function(node) { var parentElement = node.parentElement; // If the function has been named via left hand assignment, skip it // e.g. `var hello = function() {`, `foo.bar = function() {` if (parentElement.type.match(/VariableDeclarator|Property|AssignmentExpression/)) { return; } // If the function has been named, skip it // e.g. `[].forEach(function hello() {` if (node.id !== null) { return; } // If we have exceptions and the function is being invoked, detect whether we excepted it if (_this._allExceptItems && parentElement.type === 'CallExpression') { // Determine the path that resolves to our call expression // We must cover both direct calls (e.g. `it(function() {`) and // member expressions (e.g. `foo.bar(function() {`) var memberNode = parentElement.callee; var canBeRepresented = true; var fullpathParts = []; while (memberNode) { if (memberNode.type.match(/Identifier|Literal/)) { fullpathParts.unshift(getNodeName(memberNode)); } else if (memberNode.type === 'MemberExpression') { fullpathParts.unshift(getNodeName(memberNode.property)); } else { canBeRepresented = false; break; } memberNode = memberNode.object; } // If the path is not-dynamic (i.e. can be represented by static parts), // then check it against our exceptions if (canBeRepresented) { var fullpath = JSON.stringify(fullpathParts); for (var i = 0, l = _this._allExceptItems.length; i < l; i++) { if (fullpath === _this._allExceptItems[i]) { return; } } } } // Complain that this function must be named errors.add('Inline functions need to be named', node); }); } };
/******************************************************************************* comedi/drivers/pci1723.c COMEDI - Linux Control and Measurement Device Interface Copyright (C) 2000 David A. Schleef <ds@schleef.org> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. *******************************************************************************/ /* Driver: adv_pci1723 Description: Advantech PCI-1723 Author: yonggang <rsmgnu@gmail.com>, Ian Abbott <abbotti@mev.co.uk> Devices: [Advantech] PCI-1723 (adv_pci1723) Updated: Mon, 14 Apr 2008 15:12:56 +0100 Status: works Configuration Options: [0] - PCI bus of device (optional) [1] - PCI slot of device (optional) If bus/slot is not specified, the first supported PCI device found will be used. Subdevice 0 is 8-channel AO, 16-bit, range +/- 10 V. Subdevice 1 is 16-channel DIO. The channels are configurable as input or output in 2 groups (0 to 7, 8 to 15). Configuring any channel implicitly configures all channels in the same group. TODO: 1. Add the two milliamp ranges to the AO subdevice (0 to 20 mA, 4 to 20 mA). 2. Read the initial ranges and values of the AO subdevice at start-up instead of reinitializing them. 3. Implement calibration. */ #include <linux/pci.h> #include "../comedidev.h" /* all the registers for the pci1723 board */ #define PCI1723_DA(N) ((N)<<1) /* W: D/A register N (0 to 7) */ #define PCI1723_SYN_SET 0x12 /* synchronized set register */ #define PCI1723_ALL_CHNNELE_SYN_STROBE 0x12 /* synchronized status register */ #define PCI1723_RANGE_CALIBRATION_MODE 0x14 /* range and calibration mode */ #define PCI1723_RANGE_CALIBRATION_STATUS 0x14 /* range and calibration status */ #define PCI1723_CONTROL_CMD_CALIBRATION_FUN 0x16 /* * SADC control command for * calibration function */ #define PCI1723_STATUS_CMD_CALIBRATION_FUN 0x16 /* * SADC control status for * calibration function */ #define PCI1723_CALIBRATION_PARA_STROBE 0x18 /* Calibration parameter strobe */ #define PCI1723_DIGITAL_IO_PORT_SET 0x1A /* Digital I/O port setting */ #define PCI1723_DIGITAL_IO_PORT_MODE 0x1A /* Digital I/O port mode */ #define PCI1723_WRITE_DIGITAL_OUTPUT_CMD 0x1C /* Write digital output command */ #define PCI1723_READ_DIGITAL_INPUT_DATA 0x1C /* Read digital input data */ #define PCI1723_WRITE_CAL_CMD 0x1E /* Write calibration command */ #define PCI1723_READ_CAL_STATUS 0x1E /* Read calibration status */ #define PCI1723_SYN_STROBE 0x20 /* Synchronized strobe */ #define PCI1723_RESET_ALL_CHN_STROBE 0x22 /* Reset all D/A channels strobe */ #define PCI1723_RESET_CAL_CONTROL_STROBE 0x24 /* * Reset the calibration * controller strobe */ #define PCI1723_CHANGE_CHA_OUTPUT_TYPE_STROBE 0x26 /* * Change D/A channels output * type strobe */ #define PCI1723_SELECT_CALIBRATION 0x28 /* Select the calibration Ref_V */ struct pci1723_private { unsigned char da_range[8]; /* D/A output range for each channel */ short ao_data[8]; /* data output buffer */ }; /* * The pci1723 card reset; */ static int pci1723_reset(struct comedi_device *dev) { struct pci1723_private *devpriv = dev->private; int i; outw(0x01, dev->iobase + PCI1723_SYN_SET); /* set synchronous output mode */ for (i = 0; i < 8; i++) { /* set all outputs to 0V */ devpriv->ao_data[i] = 0x8000; outw(devpriv->ao_data[i], dev->iobase + PCI1723_DA(i)); /* set all ranges to +/- 10V */ devpriv->da_range[i] = 0; outw(((devpriv->da_range[i] << 4) | i), PCI1723_RANGE_CALIBRATION_MODE); } outw(0, dev->iobase + PCI1723_CHANGE_CHA_OUTPUT_TYPE_STROBE); /* update ranges */ outw(0, dev->iobase + PCI1723_SYN_STROBE); /* update outputs */ /* set asynchronous output mode */ outw(0, dev->iobase + PCI1723_SYN_SET); return 0; } static int pci1723_insn_read_ao(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { struct pci1723_private *devpriv = dev->private; int n, chan; chan = CR_CHAN(insn->chanspec); for (n = 0; n < insn->n; n++) data[n] = devpriv->ao_data[chan]; return n; } /* analog data output; */ static int pci1723_ao_write_winsn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { struct pci1723_private *devpriv = dev->private; int n, chan; chan = CR_CHAN(insn->chanspec); for (n = 0; n < insn->n; n++) { devpriv->ao_data[chan] = data[n]; outw(data[n], dev->iobase + PCI1723_DA(chan)); } return n; } /* digital i/o config/query */ static int pci1723_dio_insn_config(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { unsigned int mask; unsigned int bits; unsigned short dio_mode; mask = 1 << CR_CHAN(insn->chanspec); if (mask & 0x00FF) bits = 0x00FF; else bits = 0xFF00; switch (data[0]) { case INSN_CONFIG_DIO_INPUT: s->io_bits &= ~bits; break; case INSN_CONFIG_DIO_OUTPUT: s->io_bits |= bits; break; case INSN_CONFIG_DIO_QUERY: data[1] = (s->io_bits & bits) ? COMEDI_OUTPUT : COMEDI_INPUT; return insn->n; default: return -EINVAL; } /* update hardware DIO mode */ dio_mode = 0x0000; /* low byte output, high byte output */ if ((s->io_bits & 0x00FF) == 0) dio_mode |= 0x0001; /* low byte input */ if ((s->io_bits & 0xFF00) == 0) dio_mode |= 0x0002; /* high byte input */ outw(dio_mode, dev->iobase + PCI1723_DIGITAL_IO_PORT_SET); return 1; } /* digital i/o bits read/write */ static int pci1723_dio_insn_bits(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { if (data[0]) { s->state &= ~data[0]; s->state |= (data[0] & data[1]); outw(s->state, dev->iobase + PCI1723_WRITE_DIGITAL_OUTPUT_CMD); } data[1] = inw(dev->iobase + PCI1723_READ_DIGITAL_INPUT_DATA); return insn->n; } static int pci1723_auto_attach(struct comedi_device *dev, unsigned long context_unused) { struct pci_dev *pcidev = comedi_to_pci_dev(dev); struct pci1723_private *devpriv; struct comedi_subdevice *s; int ret; dev->board_name = dev->driver->driver_name; devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL); if (!devpriv) return -ENOMEM; dev->private = devpriv; ret = comedi_pci_enable(pcidev, dev->board_name); if (ret) return ret; dev->iobase = pci_resource_start(pcidev, 2); ret = comedi_alloc_subdevices(dev, 2); if (ret) return ret; s = &dev->subdevices[0]; dev->write_subdev = s; s->type = COMEDI_SUBD_AO; s->subdev_flags = SDF_WRITEABLE | SDF_GROUND | SDF_COMMON; s->n_chan = 8; s->maxdata = 0xffff; s->len_chanlist = 8; s->range_table = &range_bipolar10; s->insn_write = pci1723_ao_write_winsn; s->insn_read = pci1723_insn_read_ao; s = &dev->subdevices[1]; s->type = COMEDI_SUBD_DIO; s->subdev_flags = SDF_READABLE | SDF_WRITABLE; s->n_chan = 16; s->maxdata = 1; s->len_chanlist = 16; s->range_table = &range_digital; s->insn_config = pci1723_dio_insn_config; s->insn_bits = pci1723_dio_insn_bits; /* read DIO config */ switch (inw(dev->iobase + PCI1723_DIGITAL_IO_PORT_MODE) & 0x03) { case 0x00: /* low byte output, high byte output */ s->io_bits = 0xFFFF; break; case 0x01: /* low byte input, high byte output */ s->io_bits = 0xFF00; break; case 0x02: /* low byte output, high byte input */ s->io_bits = 0x00FF; break; case 0x03: /* low byte input, high byte input */ s->io_bits = 0x0000; break; } /* read DIO port state */ s->state = inw(dev->iobase + PCI1723_READ_DIGITAL_INPUT_DATA); pci1723_reset(dev); dev_info(dev->class_dev, "%s attached\n", dev->board_name); return 0; } static void pci1723_detach(struct comedi_device *dev) { struct pci_dev *pcidev = comedi_to_pci_dev(dev); if (pcidev) { if (dev->iobase) { pci1723_reset(dev); comedi_pci_disable(pcidev); } } } static struct comedi_driver adv_pci1723_driver = { .driver_name = "adv_pci1723", .module = THIS_MODULE, .auto_attach = pci1723_auto_attach, .detach = pci1723_detach, }; static int adv_pci1723_pci_probe(struct pci_dev *dev, const struct pci_device_id *ent) { return comedi_pci_auto_config(dev, &adv_pci1723_driver); } static DEFINE_PCI_DEVICE_TABLE(adv_pci1723_pci_table) = { { PCI_DEVICE(PCI_VENDOR_ID_ADVANTECH, 0x1723) }, { 0 } }; MODULE_DEVICE_TABLE(pci, adv_pci1723_pci_table); static struct pci_driver adv_pci1723_pci_driver = { .name = "adv_pci1723", .id_table = adv_pci1723_pci_table, .probe = adv_pci1723_pci_probe, .remove = comedi_pci_auto_unconfig, }; module_comedi_pci_driver(adv_pci1723_driver, adv_pci1723_pci_driver); MODULE_AUTHOR("Comedi http://www.comedi.org"); MODULE_DESCRIPTION("Comedi low-level driver"); MODULE_LICENSE("GPL");
<?php if (!defined('_JEXEC')) die('Direct Access to ' . basename(__FILE__) . ' is not allowed.'); /** * IsAuthorized.class.php */ /** * * * @author Avalara * @copyright � 2004 - 2011 Avalara, Inc. All rights reserved. * @package Batch */ class IsAuthorized { private $Operations; // string public function setOperations($value){$this->Operations=$value;} // string public function getOperations(){return $this->Operations;} // string } ?>
<?php // displays the settings tab in Polylang settings $content_with_no_languages = $this->model->get_objects_with_no_lang() && $this->options['default_lang']; $page_on_front = 'page' == get_option('show_on_front') ? get_option('page_on_front') : 0; ?> <form id="options-lang" method="post" action="admin.php?page=mlang&amp;tab=settings&amp;noheader=true" class="validate"> <?php wp_nonce_field('options-lang', '_wpnonce_options-lang');?> <input type="hidden" name="pll_action" value="options" /> <table class="form-table"> <tr> <th <?php echo $content_with_no_languages ? 'rowspan=2' : ''; ?>> <label for='default_lang'><?php _e('Default language', 'polylang');?></label> </th> <td><?php $dropdown = new PLL_Walker_Dropdown; echo $dropdown->walk($listlanguages, array('name' => 'default_lang', 'selected' => $this->options['default_lang']));?> </td> </tr><?php // posts or terms without language set if ($content_with_no_languages) {?> <tr> <td> <label style="color: red"><?php printf( '<input name="fill_languages" type="checkbox" value="1" /> %s', __('There are posts, pages, categories or tags without language set. Do you want to set them all to default language ?', 'polylang') );?> </label> </td> </tr><?php }?> <tr> <th rowspan = <?php echo ($page_on_front ? 3 : 2) + $this->links_model->using_permalinks; ?>><?php _e('URL modifications', 'polylang') ?></th> <td><fieldset id='pll-force-lang'> <label><?php printf( '<input name="force_lang" type="radio" value="0" %s /> %s', $this->options['force_lang'] ? '' : 'checked="checked"', __('The language is set from content', 'polylang') );?> </label> <p class="description"><?php _e('Posts, pages, categories and tags urls are not modified.', 'polylang');?></p> <label><?php printf( '<input name="force_lang" type="radio" value="1" %s/> %s', 1 == $this->options['force_lang'] ? 'checked="checked"' : '', $this->links_model->using_permalinks ? __('The language is set from the directory name in pretty permalinks', 'polylang') : __('The language is set from the code in the URL', 'polylang') );?> </label> <p class="description"><?php echo __('Example:', 'polylang') . ' <code>'.esc_html(home_url($this->links_model->using_permalinks ? 'en/my-post/' : '?lang=en&p=1')).'</code>';?></p> <label><?php printf( '<input name="force_lang" type="radio" value="2" %s %s/> %s', $this->links_model->using_permalinks ? '' : 'disabled="disabled"', 2 == $this->options['force_lang'] ? 'checked="checked"' : '', __('The language is set from the subdomain name in pretty permalinks', 'polylang') );?> </label> <p class="description"><?php echo __('Example:', 'polylang') . ' <code>'.esc_html(str_replace(array('://', 'www.'), array('://en.', ''), home_url('my-post/'))).'</code>';?></p> <label><?php printf( '<input name="force_lang" type="radio" value="3" %s %s/> %s', $this->links_model->using_permalinks ? '' : 'disabled="disabled"', 3 == $this->options['force_lang'] ? 'checked="checked"' : '', __('The language is set from different domains', 'polylang') );?> </label> <table id="pll-domains-table" <?php echo 3 == $this->options['force_lang'] ? '' : 'style="display: none;"'; ?>><?php foreach ($listlanguages as $lg) { printf( '<tr><td><label for="pll-domain[%1$s]">%2$s</label></td>' . '<td><input name="domains[%1$s]" id="pll-domain[%1$s]" type="text" value="%3$s" size="40" aria-required="true" /></td></tr>', esc_attr($lg->slug), esc_attr($lg->name), esc_url(isset($this->options['domains'][$lg->slug]) ? $this->options['domains'][$lg->slug] : ($lg->slug == $this->options['default_lang'] ? $this->links_model->home : '')) ); }?> </table> </fieldset></td> </tr> <tr> <td id="pll-hide-default" <?php echo 3 > $this->options['force_lang'] ? '' : 'style="display: none;"'; ?>><fieldset> <label><?php printf( '<input name="hide_default" type="checkbox" value="1" %s /> %s', $this->options['hide_default'] ? 'checked="checked"' :'', __('Hide URL language information for default language', 'polylang') );?> </label> </fieldset></td> </tr><?php if ($this->links_model->using_permalinks) { ?> <tr> <td id="pll-rewrite" <?php echo 2 > $this->options['force_lang'] ? '' : 'style="display: none;"'; ?>><fieldset> <label><?php printf( '<input name="rewrite" type="radio" value="1" %s %s/> %s', $this->links_model->using_permalinks ? '' : 'disabled="disabled"', $this->options['rewrite'] ? 'checked="checked"' : '', __('Remove /language/ in pretty permalinks', 'polylang') );?> </label> <p class="description"><?php echo __('Example:', 'polylang') . ' <code>'.esc_html(home_url('en/')).'</code>';?></p> <label><?php printf( '<input name="rewrite" type="radio" value="0" %s %s/> %s', $this->links_model->using_permalinks ? '' : 'disabled="disabled"', $this->options['rewrite'] ? '' : 'checked="checked"', __('Keep /language/ in pretty permalinks', 'polylang') );?> </label> <p class="description"><?php echo __('Example:', 'polylang') . ' <code>'.esc_html(home_url('language/en/')).'</code>';?></p> </fieldset></td> </tr><?php } if ($page_on_front) { ?> <tr> <td><fieldset> <label><?php printf( '<input name="redirect_lang" type="checkbox" value="1" %s/> %s', $this->options['redirect_lang'] ? 'checked="checked"' :'', __('The front page url contains the language code instead of the page name or page id', 'polylang') );?> </label> <p class="description"><?php // that's nice to display the right home urls but don't forget that the page on front may have no language yet $lang = $this->model->get_post_language($page_on_front); $lang = $lang ? $lang : $this->model->get_language($this->options['default_lang']); printf( __('Example: %s instead of %s', 'polylang'), '<code>' . esc_html($this->links_model->home_url($lang)) . '</code>', '<code>' . esc_html(_get_page_link($page_on_front)) . '</code>' ); ?> </p> </fieldset></td> </tr><?php } ?> <tr id="pll-detect-browser" <?php echo 3 > $this->options['force_lang'] ? '' : 'style="display: none;"'; ?>> <th><?php _e('Detect browser language', 'polylang');?></th> <td> <label><?php printf( '<input name="browser" type="checkbox" value="1" %s /> %s', $this->options['browser'] ? 'checked="checked"' :'', __('When the front page is visited, set the language according to the browser preference', 'polylang') );?> </label> </td> </tr> <tr> <th scope="row"><?php _e('Media', 'polylang') ?></th> <td> <label><?php printf( '<input name="media_support" type="checkbox" value="1" %s /> %s', $this->options['media_support'] ? 'checked="checked"' :'', __('Activate languages and translations for media', 'polylang') );?> </label> </td> </tr><?php if (!empty($post_types)) {?> <tr> <th scope="row"><?php _e('Custom post types', 'polylang') ?></th> <td> <ul class="pll_inline_block"><?php foreach ($post_types as $post_type) { $pt = get_post_type_object($post_type); printf( '<li><label><input name="post_types[%s]" type="checkbox" value="1" %s /> %s</label></li>', esc_attr($post_type), in_array($post_type, $this->options['post_types']) ? 'checked="checked"' :'', esc_html($pt->labels->name) ); }?> </ul> <p class="description"><?php _e('Activate languages and translations for custom post types.', 'polylang');?></p> </td> </tr><?php } if (!empty($taxonomies)) {?> <tr> <th scope="row"><?php _e('Custom taxonomies', 'polylang') ?></th> <td> <ul class="pll_inline_block"><?php foreach ($taxonomies as $taxonomy) { $tax = get_taxonomy($taxonomy); printf( '<li><label><input name="taxonomies[%s]" type="checkbox" value="1" %s /> %s</label></li>', esc_attr($taxonomy), in_array($taxonomy, $this->options['taxonomies']) ? 'checked="checked"' :'', esc_html($tax->labels->name) ); }?> </ul> <p class="description"><?php _e('Activate languages and translations for custom taxonomies.', 'polylang');?></p> </td> </tr><?php }?> <tr> <th scope="row"><?php _e('Synchronization', 'polylang') ?></th> <td> <ul class="pll_inline_block"><?php foreach (self::list_metas_to_sync() as $key => $str) printf( '<li><label><input name="sync[%s]" type="checkbox" value="1" %s /> %s</label></li>', esc_attr($key), in_array($key, $this->options['sync']) ? 'checked="checked"' :'', esc_html($str) );?> </ul> <p class="description"><?php _e('The synchronization options allow to maintain exact same values (or translations in the case of taxonomies and page parent) of meta content between the translations of a post or page.', 'polylang');?></p> </td> </tr> </table> <?php submit_button(); // since WP 3.1 ?> </form>
<?php if(!defined('sugarEntry') || !sugarEntry) die('Not A Valid Entry Point'); /********************************************************************************* * SugarCRM Community Edition is a customer relationship management program developed by * SugarCRM, Inc. Copyright (C) 2004-2013 SugarCRM Inc. * SuiteCRM is an extension to SugarCRM Community Edition developed by Salesagility Ltd. * Copyright (C) 2011 - 2014 Salesagility Ltd. * * This program is free software; you can redistribute it and/or modify it under * the terms of the GNU Affero General Public License version 3 as published by the * Free Software Foundation with the addition of the following permission added * to Section 15 as permitted in Section 7(a): FOR ANY PART OF THE COVERED WORK * IN WHICH THE COPYRIGHT IS OWNED BY SUGARCRM, SUGARCRM DISCLAIMS THE WARRANTY * OF NON INFRINGEMENT OF THIRD PARTY RIGHTS. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS * FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more * details. * * You should have received a copy of the GNU Affero General Public License along with * this program; if not, see http://www.gnu.org/licenses or write to the Free * Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301 USA. * * You can contact SugarCRM, Inc. headquarters at 10050 North Wolfe Road, * SW2-130, Cupertino, CA 95014, USA. or at email address contact@sugarcrm.com. * * The interactive user interfaces in modified source and object code versions * of this program must display Appropriate Legal Notices, as required under * Section 5 of the GNU Affero General Public License version 3. * * In accordance with Section 7(b) of the GNU Affero General Public License version 3, * these Appropriate Legal Notices must retain the display of the "Powered by * SugarCRM" logo and "Supercharged by SuiteCRM" logo. If the display of the logos is not * reasonably feasible for technical reasons, the Appropriate Legal Notices must * display the words "Powered by SugarCRM" and "Supercharged by SuiteCRM". ********************************************************************************/ function additionalDetailsCampaign($fields) { static $mod_strings; if(empty($mod_strings)) { global $current_language; $mod_strings = return_module_language($current_language, 'Campaigns'); } $overlib_string = ''; if(!empty($fields['START_DATE'])) $overlib_string .= '<b>'. $mod_strings['LBL_CAMPAIGN_START_DATE'] . '</b> ' . $fields['START_DATE'] . '<br>'; if(!empty($fields['TRACKER_TEXT'])) $overlib_string .= '<b>'. $mod_strings['LBL_TRACKER_TEXT'] . '</b> ' . $fields['TRACKER_TEXT'] . '<br>'; if(!empty($fields['REFER_URL'])) $overlib_string .= '<a target=_blank href='. $fields['REFER_URL'] . '>' . $fields['REFER_URL'] . '</a><br>'; if(!empty($fields['OBJECTIVE'])) { $overlib_string .= '<b>'. $mod_strings['LBL_CAMPAIGN_OBJECTIVE'] . '</b> ' . substr($fields['OBJECTIVE'], 0, 300); if(strlen($fields['OBJECTIVE']) > 300) $overlib_string .= '...'; $overlib_string .= '<br>'; } if(!empty($fields['CONTENT'])) { $overlib_string .= '<b>'. $mod_strings['LBL_CAMPAIGN_CONTENT'] . '</b> ' . substr($fields['CONTENT'], 0, 300); if(strlen($fields['CONTENT']) > 300) $overlib_string .= '...'; } return array('fieldToAddTo' => 'NAME', 'string' => $overlib_string, 'editLink' => "index.php?action=EditView&module=Campaigns&return_module=Campaigns&record={$fields['ID']}", 'viewLink' => "index.php?action=DetailView&module=Campaigns&return_module=Campaigns&record={$fields['ID']}"); } ?>
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ // Usage: convert_computation <txt2bin|bin2txt> serialized_computation_proto // // bin2txt spits out the result to stdout. txt2bin modifies the file in place. #include <stdio.h> #include <unistd.h> #include <string> #include "tensorflow/compiler/xla/service/hlo.pb.h" #include "tensorflow/compiler/xla/statusor.h" #include "tensorflow/compiler/xla/types.h" #include "tensorflow/core/platform/env.h" #include "tensorflow/core/platform/init_main.h" #include "tensorflow/core/platform/logging.h" #include "tensorflow/core/platform/protobuf.h" namespace xla { namespace tools { void RealMain(const string& mode, const string& path) { HloSnapshot module; tensorflow::Env* env = tensorflow::Env::Default(); if (mode == "txt2bin") { TF_CHECK_OK(tensorflow::ReadTextProto(env, path, &module)); TF_CHECK_OK(tensorflow::WriteBinaryProto(env, path, module)); } else if (mode == "bin2txt") { TF_CHECK_OK(tensorflow::ReadBinaryProto(env, path, &module)); string out; tensorflow::protobuf::TextFormat::PrintToString(module, &out); fprintf(stdout, "%s", out.c_str()); } else { LOG(QFATAL) << "unknown mode for computation conversion: " << mode; } } } // namespace tools } // namespace xla int main(int argc, char** argv) { tensorflow::port::InitMain(argv[0], &argc, &argv); QCHECK_EQ(argc, 3) << "usage: " << argv[0] << " <txt2bin|bin2txt> <path>"; xla::tools::RealMain(argv[1], argv[2]); return 0; }
/** * Copyright 1999-2014 dangdang.com. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.alibaba.dubbo.common.serialize.support.fst; import com.alibaba.dubbo.common.serialize.ObjectOutput; import de.ruedigermoeller.serialization.FSTObjectOutput; import java.io.IOException; import java.io.OutputStream; /** * @author lishen */ public class FstObjectOutput implements ObjectOutput { private FSTObjectOutput output; public FstObjectOutput(OutputStream outputStream) { output = FstFactory.getDefaultFactory().getObjectOutput(outputStream); } public void writeBool(boolean v) throws IOException { output.writeBoolean(v); } public void writeByte(byte v) throws IOException { output.writeByte(v); } public void writeShort(short v) throws IOException { output.writeShort(v); } public void writeInt(int v) throws IOException { output.writeInt(v); } public void writeLong(long v) throws IOException { output.writeLong(v); } public void writeFloat(float v) throws IOException { output.writeFloat(v); } public void writeDouble(double v) throws IOException { output.writeDouble(v); } public void writeBytes(byte[] v) throws IOException { if (v == null) { output.writeInt(-1); } else { writeBytes(v, 0, v.length); } } public void writeBytes(byte[] v, int off, int len) throws IOException { if (v == null) { output.writeInt(-1); } else { output.writeInt(len); output.write(v, off, len); } } public void writeUTF(String v) throws IOException { output.writeUTF(v); } public void writeObject(Object v) throws IOException { output.writeObject(v); } public void flushBuffer() throws IOException { output.flush(); } }
/* * Copyright (c) 1997 - 2008 Kungliga Tekniska Högskolan * (Royal Institute of Technology, Stockholm, Sweden). * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * 3. Neither the name of the Institute nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE INSTITUTE AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE INSTITUTE OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "krb5_locl.h" void _krb5_evp_schedule(krb5_context context, struct _krb5_key_type *kt, struct _krb5_key_data *kd) { struct _krb5_evp_schedule *key = kd->schedule->data; const EVP_CIPHER *c = (*kt->evp)(); EVP_CIPHER_CTX_init(&key->ectx); EVP_CIPHER_CTX_init(&key->dctx); EVP_CipherInit_ex(&key->ectx, c, NULL, kd->key->keyvalue.data, NULL, 1); EVP_CipherInit_ex(&key->dctx, c, NULL, kd->key->keyvalue.data, NULL, 0); } void _krb5_evp_cleanup(krb5_context context, struct _krb5_key_data *kd) { struct _krb5_evp_schedule *key = kd->schedule->data; EVP_CIPHER_CTX_cleanup(&key->ectx); EVP_CIPHER_CTX_cleanup(&key->dctx); } krb5_error_code _krb5_evp_encrypt(krb5_context context, struct _krb5_key_data *key, void *data, size_t len, krb5_boolean encryptp, int usage, void *ivec) { struct _krb5_evp_schedule *ctx = key->schedule->data; EVP_CIPHER_CTX *c; c = encryptp ? &ctx->ectx : &ctx->dctx; if (ivec == NULL) { /* alloca ? */ size_t len2 = EVP_CIPHER_CTX_iv_length(c); void *loiv = malloc(len2); if (loiv == NULL) { krb5_clear_error_message(context); return ENOMEM; } memset(loiv, 0, len2); EVP_CipherInit_ex(c, NULL, NULL, NULL, loiv, -1); free(loiv); } else EVP_CipherInit_ex(c, NULL, NULL, NULL, ivec, -1); EVP_Cipher(c, data, data, len); return 0; } static const unsigned char zero_ivec[EVP_MAX_BLOCK_LENGTH] = { 0 }; krb5_error_code _krb5_evp_encrypt_cts(krb5_context context, struct _krb5_key_data *key, void *data, size_t len, krb5_boolean encryptp, int usage, void *ivec) { size_t i, blocksize; struct _krb5_evp_schedule *ctx = key->schedule->data; unsigned char tmp[EVP_MAX_BLOCK_LENGTH], ivec2[EVP_MAX_BLOCK_LENGTH]; EVP_CIPHER_CTX *c; unsigned char *p; c = encryptp ? &ctx->ectx : &ctx->dctx; blocksize = EVP_CIPHER_CTX_block_size(c); if (len < blocksize) { krb5_set_error_message(context, EINVAL, "message block too short"); return EINVAL; } else if (len == blocksize) { EVP_CipherInit_ex(c, NULL, NULL, NULL, zero_ivec, -1); EVP_Cipher(c, data, data, len); return 0; } if (ivec) EVP_CipherInit_ex(c, NULL, NULL, NULL, ivec, -1); else EVP_CipherInit_ex(c, NULL, NULL, NULL, zero_ivec, -1); if (encryptp) { p = data; i = ((len - 1) / blocksize) * blocksize; EVP_Cipher(c, p, p, i); p += i - blocksize; len -= i; memcpy(ivec2, p, blocksize); for (i = 0; i < len; i++) tmp[i] = p[i + blocksize] ^ ivec2[i]; for (; i < blocksize; i++) tmp[i] = 0 ^ ivec2[i]; EVP_CipherInit_ex(c, NULL, NULL, NULL, zero_ivec, -1); EVP_Cipher(c, p, tmp, blocksize); memcpy(p + blocksize, ivec2, len); if (ivec) memcpy(ivec, p, blocksize); } else { unsigned char tmp2[EVP_MAX_BLOCK_LENGTH], tmp3[EVP_MAX_BLOCK_LENGTH]; p = data; if (len > blocksize * 2) { /* remove last two blocks and round up, decrypt this with cbc, then do cts dance */ i = ((((len - blocksize * 2) + blocksize - 1) / blocksize) * blocksize); memcpy(ivec2, p + i - blocksize, blocksize); EVP_Cipher(c, p, p, i); p += i; len -= i + blocksize; } else { if (ivec) memcpy(ivec2, ivec, blocksize); else memcpy(ivec2, zero_ivec, blocksize); len -= blocksize; } memcpy(tmp, p, blocksize); EVP_CipherInit_ex(c, NULL, NULL, NULL, zero_ivec, -1); EVP_Cipher(c, tmp2, p, blocksize); memcpy(tmp3, p + blocksize, len); memcpy(tmp3 + len, tmp2 + len, blocksize - len); /* xor 0 */ for (i = 0; i < len; i++) p[i + blocksize] = tmp2[i] ^ tmp3[i]; EVP_CipherInit_ex(c, NULL, NULL, NULL, zero_ivec, -1); EVP_Cipher(c, p, tmp3, blocksize); for (i = 0; i < blocksize; i++) p[i] ^= ivec2[i]; if (ivec) memcpy(ivec, tmp, blocksize); } return 0; }
/****************************************************************************** ** ** FILE NAME : ifxmips_atm_ar9.c ** PROJECT : UEIP ** MODULES : ATM ** ** DATE : 7 Jul 2009 ** AUTHOR : Xu Liang ** DESCRIPTION : ATM driver common source file (core functions) ** COPYRIGHT : Copyright (c) 2006 ** Infineon Technologies AG ** Am Campeon 1-12, 85579 Neubiberg, Germany ** ** This program is free software; you can redistribute it and/or modify ** it under the terms of the GNU General Public License as published by ** the Free Software Foundation; either version 2 of the License, or ** (at your option) any later version. ** ** HISTORY ** $Date $Author $Comment ** 07 JUL 2009 Xu Liang Init Version *******************************************************************************/ /* * #################################### * Head File * #################################### */ /* * Common Head File */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/version.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/proc_fs.h> #include <linux/init.h> #include <linux/ioctl.h> #include <asm/delay.h> /* * Chip Specific Head File */ #include "ifxmips_atm_core.h" #include "ifxmips_atm_fw_ar9.h" #include "ifxmips_atm_fw_regs_ar9.h" #include <lantiq_soc.h> /* * #################################### * Definition * #################################### */ /* * EMA Settings */ #define EMA_CMD_BUF_LEN 0x0040 #define EMA_CMD_BASE_ADDR (0x00003B80 << 2) #define EMA_DATA_BUF_LEN 0x0100 #define EMA_DATA_BASE_ADDR (0x00003C00 << 2) #define EMA_WRITE_BURST 0x2 #define EMA_READ_BURST 0x2 /* * #################################### * Declaration * #################################### */ /* * Hardware Init/Uninit Functions */ static inline void init_pmu(void); static inline void uninit_pmu(void); static inline void reset_ppe(void); static inline void init_ema(void); static inline void init_mailbox(void); static inline void clear_share_buffer(void); /* * #################################### * Local Variable * #################################### */ /* * #################################### * Local Function * #################################### */ #define IFX_PMU_MODULE_PPE_SLL01 BIT(19) #define IFX_PMU_MODULE_PPE_TC BIT(21) #define IFX_PMU_MODULE_PPE_EMA BIT(22) #define IFX_PMU_MODULE_PPE_QSB BIT(18) #define IFX_PMU_MODULE_TPE BIT(13) #define IFX_PMU_MODULE_DSL_DFE BIT(9) static inline void init_pmu(void) { ltq_pmu_enable(IFX_PMU_MODULE_PPE_SLL01 | IFX_PMU_MODULE_PPE_TC | IFX_PMU_MODULE_PPE_EMA | IFX_PMU_MODULE_PPE_QSB | IFX_PMU_MODULE_TPE | IFX_PMU_MODULE_DSL_DFE); } static inline void uninit_pmu(void) { } static inline void reset_ppe(void) { #ifdef MODULE // reset PPE // ifx_rcu_rst(IFX_RCU_DOMAIN_PPE, IFX_RCU_MODULE_ATM); #endif } static inline void init_ema(void) { IFX_REG_W32((EMA_CMD_BUF_LEN << 16) | (EMA_CMD_BASE_ADDR >> 2), EMA_CMDCFG); IFX_REG_W32((EMA_DATA_BUF_LEN << 16) | (EMA_DATA_BASE_ADDR >> 2), EMA_DATACFG); IFX_REG_W32(0x000000FF, EMA_IER); IFX_REG_W32(EMA_READ_BURST | (EMA_WRITE_BURST << 2), EMA_CFG); } static inline void init_mailbox(void) { IFX_REG_W32(0xFFFFFFFF, MBOX_IGU1_ISRC); IFX_REG_W32(0x00000000, MBOX_IGU1_IER); IFX_REG_W32(0xFFFFFFFF, MBOX_IGU3_ISRC); IFX_REG_W32(0x00000000, MBOX_IGU3_IER); } static inline void clear_share_buffer(void) { volatile u32 *p = SB_RAM0_ADDR(0); unsigned int i; for ( i = 0; i < SB_RAM0_DWLEN + SB_RAM1_DWLEN + SB_RAM2_DWLEN + SB_RAM3_DWLEN + SB_RAM4_DWLEN; i++ ) IFX_REG_W32(0, p++); } static inline int pp32_download_code(u32 *code_src, unsigned int code_dword_len, u32 *data_src, unsigned int data_dword_len) { volatile u32 *dest; if ( code_src == 0 || ((unsigned long)code_src & 0x03) != 0 || data_src == 0 || ((unsigned long)data_src & 0x03) != 0 ) return -1; if ( code_dword_len <= CDM_CODE_MEMORYn_DWLEN(0) ) IFX_REG_W32(0x00, CDM_CFG); else IFX_REG_W32(0x04, CDM_CFG); /* copy code */ dest = CDM_CODE_MEMORY(0, 0); while ( code_dword_len-- > 0 ) IFX_REG_W32(*code_src++, dest++); /* copy data */ dest = CDM_DATA_MEMORY(0, 0); while ( data_dword_len-- > 0 ) IFX_REG_W32(*data_src++, dest++); return 0; } void ar9_fw_ver(unsigned int *major, unsigned int *minor) { ASSERT(major != NULL, "pointer is NULL"); ASSERT(minor != NULL, "pointer is NULL"); *major = FW_VER_ID->major; *minor = FW_VER_ID->minor; } void ar9_init(void) { init_pmu(); reset_ppe(); init_ema(); init_mailbox(); clear_share_buffer(); } void ar9_shutdown(void) { ltq_pmu_disable(IFX_PMU_MODULE_PPE_SLL01 | IFX_PMU_MODULE_PPE_TC | IFX_PMU_MODULE_PPE_EMA | IFX_PMU_MODULE_PPE_QSB | IFX_PMU_MODULE_TPE | IFX_PMU_MODULE_DSL_DFE); } int ar9_start(int pp32) { int ret; ret = pp32_download_code(ar9_fw_bin, sizeof(ar9_fw_bin) / sizeof(*ar9_fw_bin), ar9_fw_data, sizeof(ar9_fw_data) / sizeof(*ar9_fw_data)); if ( ret != 0 ) return ret; IFX_REG_W32(DBG_CTRL_RESTART, PP32_DBG_CTRL(0)); udelay(10); return 0; } void ar9_stop(int pp32) { IFX_REG_W32(DBG_CTRL_STOP, PP32_DBG_CTRL(0)); } struct ltq_atm_ops ar9_ops = { .init = ar9_init, .shutdown = ar9_shutdown, .start = ar9_start, .stop = ar9_stop, .fw_ver = ar9_fw_ver, };
/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) STMicroelectronics 2009 * Copyright (C) ST-Ericsson SA 2010 * * Author: Kumar Sanghvi <kumar.sanghvi@stericsson.com> * Author: Sundar Iyer <sundar.iyer@stericsson.com> * * PRCM Unit registers */ #ifndef __DB8500_PRCMU_REGS_H #define __DB8500_PRCMU_REGS_H #define BITS(_start, _end) ((BIT(_end) - BIT(_start)) + BIT(_end)) #define PRCM_ACLK_MGT (0x004) #define PRCM_SVAMMCSPCLK_MGT (0x008) #define PRCM_SIAMMDSPCLK_MGT (0x00C) #define PRCM_SGACLK_MGT (0x014) #define PRCM_UARTCLK_MGT (0x018) #define PRCM_MSP02CLK_MGT (0x01C) #define PRCM_I2CCLK_MGT (0x020) #define PRCM_SDMMCCLK_MGT (0x024) #define PRCM_SLIMCLK_MGT (0x028) #define PRCM_PER1CLK_MGT (0x02C) #define PRCM_PER2CLK_MGT (0x030) #define PRCM_PER3CLK_MGT (0x034) #define PRCM_PER5CLK_MGT (0x038) #define PRCM_PER6CLK_MGT (0x03C) #define PRCM_PER7CLK_MGT (0x040) #define PRCM_LCDCLK_MGT (0x044) #define PRCM_BMLCLK_MGT (0x04C) #define PRCM_HSITXCLK_MGT (0x050) #define PRCM_HSIRXCLK_MGT (0x054) #define PRCM_HDMICLK_MGT (0x058) #define PRCM_APEATCLK_MGT (0x05C) #define PRCM_APETRACECLK_MGT (0x060) #define PRCM_MCDECLK_MGT (0x064) #define PRCM_IPI2CCLK_MGT (0x068) #define PRCM_DSIALTCLK_MGT (0x06C) #define PRCM_DMACLK_MGT (0x074) #define PRCM_B2R2CLK_MGT (0x078) #define PRCM_TVCLK_MGT (0x07C) #define PRCM_UNIPROCLK_MGT (0x278) #define PRCM_SSPCLK_MGT (0x280) #define PRCM_RNGCLK_MGT (0x284) #define PRCM_UICCCLK_MGT (0x27C) #define PRCM_MSP1CLK_MGT (0x288) #define PRCM_ARM_PLLDIVPS (prcmu_base + 0x118) #define PRCM_ARM_PLLDIVPS_ARM_BRM_RATE 0x3f #define PRCM_ARM_PLLDIVPS_MAX_MASK 0xf #define PRCM_PLLARM_LOCKP (prcmu_base + 0x0a8) #define PRCM_PLLARM_LOCKP_PRCM_PLLARM_LOCKP3 0x2 #define PRCM_ARM_CHGCLKREQ (prcmu_base + 0x114) #define PRCM_ARM_CHGCLKREQ_PRCM_ARM_CHGCLKREQ BIT(0) #define PRCM_ARM_CHGCLKREQ_PRCM_ARM_DIVSEL BIT(16) #define PRCM_PLLARM_ENABLE (prcmu_base + 0x98) #define PRCM_PLLARM_ENABLE_PRCM_PLLARM_ENABLE 0x1 #define PRCM_PLLARM_ENABLE_PRCM_PLLARM_COUNTON 0x100 #define PRCM_ARMCLKFIX_MGT (prcmu_base + 0x0) #define PRCM_A9PL_FORCE_CLKEN (prcmu_base + 0x19C) #define PRCM_A9_RESETN_CLR (prcmu_base + 0x1f4) #define PRCM_A9_RESETN_SET (prcmu_base + 0x1f0) #define PRCM_ARM_LS_CLAMP (prcmu_base + 0x30c) #define PRCM_SRAM_A9 (prcmu_base + 0x308) #define PRCM_A9PL_FORCE_CLKEN_PRCM_A9PL_FORCE_CLKEN BIT(0) #define PRCM_A9PL_FORCE_CLKEN_PRCM_A9AXI_FORCE_CLKEN BIT(1) /* CPU mailbox registers */ #define PRCM_MBOX_CPU_VAL (prcmu_base + 0x0fc) #define PRCM_MBOX_CPU_SET (prcmu_base + 0x100) #define PRCM_MBOX_CPU_CLR (prcmu_base + 0x104) #define PRCM_HOSTACCESS_REQ (prcmu_base + 0x334) #define PRCM_HOSTACCESS_REQ_HOSTACCESS_REQ 0x1 #define PRCM_HOSTACCESS_REQ_WAKE_REQ BIT(16) #define ARM_WAKEUP_MODEM 0x1 #define PRCM_ARM_IT1_CLR (prcmu_base + 0x48C) #define PRCM_ARM_IT1_VAL (prcmu_base + 0x494) #define PRCM_HOLD_EVT (prcmu_base + 0x174) #define PRCM_MOD_AWAKE_STATUS (prcmu_base + 0x4A0) #define PRCM_MOD_AWAKE_STATUS_PRCM_MOD_COREPD_AWAKE BIT(0) #define PRCM_MOD_AWAKE_STATUS_PRCM_MOD_AAPD_AWAKE BIT(1) #define PRCM_MOD_AWAKE_STATUS_PRCM_MOD_VMODEM_OFF_ISO BIT(2) #define PRCM_ITSTATUS0 (prcmu_base + 0x148) #define PRCM_ITSTATUS1 (prcmu_base + 0x150) #define PRCM_ITSTATUS2 (prcmu_base + 0x158) #define PRCM_ITSTATUS3 (prcmu_base + 0x160) #define PRCM_ITSTATUS4 (prcmu_base + 0x168) #define PRCM_ITSTATUS5 (prcmu_base + 0x484) #define PRCM_ITCLEAR5 (prcmu_base + 0x488) #define PRCM_ARMIT_MASKXP70_IT (prcmu_base + 0x1018) /* System reset register */ #define PRCM_APE_SOFTRST (prcmu_base + 0x228) /* Level shifter and clamp control registers */ #define PRCM_MMIP_LS_CLAMP_SET (prcmu_base + 0x420) #define PRCM_MMIP_LS_CLAMP_CLR (prcmu_base + 0x424) #define PRCM_MMIP_LS_CLAMP_DSIPLL_CLAMP BIT(11) #define PRCM_MMIP_LS_CLAMP_DSIPLL_CLAMPI BIT(22) /* PRCMU clock/PLL/reset registers */ #define PRCM_PLLSOC0_FREQ (prcmu_base + 0x080) #define PRCM_PLLSOC1_FREQ (prcmu_base + 0x084) #define PRCM_PLLARM_FREQ (prcmu_base + 0x088) #define PRCM_PLLDDR_FREQ (prcmu_base + 0x08C) #define PRCM_PLL_FREQ_D_SHIFT 0 #define PRCM_PLL_FREQ_D_MASK BITS(0, 7) #define PRCM_PLL_FREQ_N_SHIFT 8 #define PRCM_PLL_FREQ_N_MASK BITS(8, 13) #define PRCM_PLL_FREQ_R_SHIFT 16 #define PRCM_PLL_FREQ_R_MASK BITS(16, 18) #define PRCM_PLL_FREQ_SELDIV2 BIT(24) #define PRCM_PLL_FREQ_DIV2EN BIT(25) #define PRCM_PLLDSI_FREQ (prcmu_base + 0x500) #define PRCM_PLLDSI_ENABLE (prcmu_base + 0x504) #define PRCM_PLLDSI_LOCKP (prcmu_base + 0x508) #define PRCM_DSI_PLLOUT_SEL (prcmu_base + 0x530) #define PRCM_DSITVCLK_DIV (prcmu_base + 0x52C) #define PRCM_PLLDSI_LOCKP (prcmu_base + 0x508) #define PRCM_APE_RESETN_SET (prcmu_base + 0x1E4) #define PRCM_APE_RESETN_CLR (prcmu_base + 0x1E8) #define PRCM_PLLDSI_ENABLE_PRCM_PLLDSI_ENABLE BIT(0) #define PRCM_PLLDSI_LOCKP_PRCM_PLLDSI_LOCKP10 BIT(0) #define PRCM_PLLDSI_LOCKP_PRCM_PLLDSI_LOCKP3 BIT(1) #define PRCM_DSI_PLLOUT_SEL_DSI0_PLLOUT_DIVSEL_SHIFT 0 #define PRCM_DSI_PLLOUT_SEL_DSI0_PLLOUT_DIVSEL_MASK BITS(0, 2) #define PRCM_DSI_PLLOUT_SEL_DSI1_PLLOUT_DIVSEL_SHIFT 8 #define PRCM_DSI_PLLOUT_SEL_DSI1_PLLOUT_DIVSEL_MASK BITS(8, 10) #define PRCM_DSI_PLLOUT_SEL_OFF 0 #define PRCM_DSI_PLLOUT_SEL_PHI 1 #define PRCM_DSI_PLLOUT_SEL_PHI_2 2 #define PRCM_DSI_PLLOUT_SEL_PHI_4 3 #define PRCM_DSITVCLK_DIV_DSI0_ESC_CLK_DIV_SHIFT 0 #define PRCM_DSITVCLK_DIV_DSI0_ESC_CLK_DIV_MASK BITS(0, 7) #define PRCM_DSITVCLK_DIV_DSI1_ESC_CLK_DIV_SHIFT 8 #define PRCM_DSITVCLK_DIV_DSI1_ESC_CLK_DIV_MASK BITS(8, 15) #define PRCM_DSITVCLK_DIV_DSI2_ESC_CLK_DIV_SHIFT 16 #define PRCM_DSITVCLK_DIV_DSI2_ESC_CLK_DIV_MASK BITS(16, 23) #define PRCM_DSITVCLK_DIV_DSI0_ESC_CLK_EN BIT(24) #define PRCM_DSITVCLK_DIV_DSI1_ESC_CLK_EN BIT(25) #define PRCM_DSITVCLK_DIV_DSI2_ESC_CLK_EN BIT(26) #define PRCM_APE_RESETN_DSIPLL_RESETN BIT(14) #define PRCM_CLKOCR (prcmu_base + 0x1CC) #define PRCM_CLKOCR_CLKOUT0_REF_CLK (1 << 0) #define PRCM_CLKOCR_CLKOUT0_MASK BITS(0, 13) #define PRCM_CLKOCR_CLKOUT1_REF_CLK (1 << 16) #define PRCM_CLKOCR_CLKOUT1_MASK BITS(16, 29) /* ePOD and memory power signal control registers */ #define PRCM_EPOD_C_SET (prcmu_base + 0x410) #define PRCM_SRAM_LS_SLEEP (prcmu_base + 0x304) /* Debug power control unit registers */ #define PRCM_POWER_STATE_SET (prcmu_base + 0x254) /* Miscellaneous unit registers */ #define PRCM_DSI_SW_RESET (prcmu_base + 0x324) #define PRCM_GPIOCR (prcmu_base + 0x138) #define PRCM_GPIOCR_DBG_STM_MOD_CMD1 0x800 #define PRCM_GPIOCR_DBG_UARTMOD_CMD0 0x1 /* PRCMU HW semaphore */ #define PRCM_SEM (prcmu_base + 0x400) #define PRCM_SEM_PRCM_SEM BIT(0) #define PRCM_TCR (prcmu_base + 0x1C8) #define PRCM_TCR_TENSEL_MASK BITS(0, 7) #define PRCM_TCR_STOP_TIMERS BIT(16) #define PRCM_TCR_DOZE_MODE BIT(17) #define PRCM_CLKOCR_CLKODIV0_SHIFT 0 #define PRCM_CLKOCR_CLKODIV0_MASK BITS(0, 5) #define PRCM_CLKOCR_CLKOSEL0_SHIFT 6 #define PRCM_CLKOCR_CLKOSEL0_MASK BITS(6, 8) #define PRCM_CLKOCR_CLKODIV1_SHIFT 16 #define PRCM_CLKOCR_CLKODIV1_MASK BITS(16, 21) #define PRCM_CLKOCR_CLKOSEL1_SHIFT 22 #define PRCM_CLKOCR_CLKOSEL1_MASK BITS(22, 24) #define PRCM_CLKOCR_CLK1TYPE BIT(28) #define PRCM_CLK_MGT_CLKPLLDIV_MASK BITS(0, 4) #define PRCM_CLK_MGT_CLKPLLSW_SOC0 BIT(5) #define PRCM_CLK_MGT_CLKPLLSW_SOC1 BIT(6) #define PRCM_CLK_MGT_CLKPLLSW_DDR BIT(7) #define PRCM_CLK_MGT_CLKPLLSW_MASK BITS(5, 7) #define PRCM_CLK_MGT_CLKEN BIT(8) #define PRCM_CLK_MGT_CLK38 BIT(9) #define PRCM_CLK_MGT_CLK38DIV BIT(11) #define PRCM_SGACLK_MGT_SGACLKDIV_BY_2_5_EN BIT(12) /* GPIOCR register */ #define PRCM_GPIOCR_SPI2_SELECT BIT(23) #define PRCM_DDR_SUBSYS_APE_MINBW (prcmu_base + 0x438) #define PRCM_CGATING_BYPASS (prcmu_base + 0x134) #define PRCM_CGATING_BYPASS_ICN2 BIT(6) /* Miscellaneous unit registers */ #define PRCM_RESOUTN_SET (prcmu_base + 0x214) #define PRCM_RESOUTN_CLR (prcmu_base + 0x218) /* System reset register */ #define PRCM_APE_SOFTRST (prcmu_base + 0x228) #endif /* __DB8500_PRCMU_REGS_H */
#ifndef _ASM_GENERIC_RWSEM_H #define _ASM_GENERIC_RWSEM_H #ifndef _LINUX_RWSEM_H #error "Please don't include <asm/rwsem.h> directly, use <linux/rwsem.h> instead." #endif #ifdef __KERNEL__ /* * R/W semaphores originally for PPC using the stuff in lib/rwsem.c. * Adapted largely from include/asm-i386/rwsem.h * by Paul Mackerras <paulus@samba.org>. */ /* * the semaphore definition */ #ifdef CONFIG_64BIT # define RWSEM_ACTIVE_MASK 0xffffffffL #else # define RWSEM_ACTIVE_MASK 0x0000ffffL #endif #define RWSEM_UNLOCKED_VALUE 0x00000000L #define RWSEM_ACTIVE_BIAS 0x00000001L #define RWSEM_WAITING_BIAS (-RWSEM_ACTIVE_MASK-1) #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) /* * lock for reading */ static inline void __down_read(struct rw_semaphore *sem) { if (unlikely(atomic_long_inc_return((atomic_long_t *)&sem->count) <= 0)) rwsem_down_read_failed(sem); } static inline int __down_read_trylock(struct rw_semaphore *sem) { long tmp; while ((tmp = sem->count) >= 0) { if (tmp == cmpxchg(&sem->count, tmp, tmp + RWSEM_ACTIVE_READ_BIAS)) { return 1; } } return 0; } /* * lock for writing */ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass) { long tmp; tmp = atomic_long_add_return(RWSEM_ACTIVE_WRITE_BIAS, (atomic_long_t *)&sem->count); if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS)) rwsem_down_write_failed(sem); } static inline void __down_write(struct rw_semaphore *sem) { __down_write_nested(sem, 0); } static inline int __down_write_trylock(struct rw_semaphore *sem) { long tmp; tmp = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE, RWSEM_ACTIVE_WRITE_BIAS); return tmp == RWSEM_UNLOCKED_VALUE; } /* * unlock after reading */ static inline void __up_read(struct rw_semaphore *sem) { long tmp; tmp = atomic_long_dec_return((atomic_long_t *)&sem->count); if (unlikely(tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0)) rwsem_wake(sem); } /* * unlock after writing */ static inline void __up_write(struct rw_semaphore *sem) { if (unlikely(atomic_long_sub_return(RWSEM_ACTIVE_WRITE_BIAS, (atomic_long_t *)&sem->count) < 0)) rwsem_wake(sem); } /* * implement atomic add functionality */ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem) { atomic_long_add(delta, (atomic_long_t *)&sem->count); } /* * downgrade write lock to read lock */ static inline void __downgrade_write(struct rw_semaphore *sem) { long tmp; tmp = atomic_long_add_return(-RWSEM_WAITING_BIAS, (atomic_long_t *)&sem->count); if (tmp < 0) rwsem_downgrade_wake(sem); } /* * implement exchange and add functionality */ static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem) { return atomic_long_add_return(delta, (atomic_long_t *)&sem->count); } #endif /* __KERNEL__ */ #endif /* _ASM_GENERIC_RWSEM_H */
import * as React from 'react'; import { CSSModule } from '../index'; export type FormTextProps<T = {}> = React.HTMLAttributes<HTMLElement> & { inline?: boolean; tag?: React.ReactType; color?: string; className?: string; cssModule?: CSSModule; } & T; declare class FormText<T = {[key: string]: any}> extends React.Component<FormTextProps<T>> {} export default FormText;
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> <html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en"> <body> <h1>MuseScore Regression Tests</h1> <h2> 09i-MidmeasureBarline</h2><hr/> <h3>Lilypond</h3> <hr/> <img src="lilypond/09i-MidmeasureBarline.png"/><br> <hr/> <h3>MuseScore</h3> <hr/> <img src="mscore/09i-MidmeasureBarline.png"/><br> </body> </html>
/* * GRUB -- GRand Unified Bootloader * Copyright (C) 2009 Free Software Foundation, Inc. * * GRUB is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * GRUB is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with GRUB. If not, see <http://www.gnu.org/licenses/>. */ #ifndef GRUB_MEMORY_MACHINE_HEADER #define GRUB_MEMORY_MACHINE_HEADER 1 #ifdef GRUB_CPU_MIPSEL #define GRUB_MACHINE_MEMORY_STACK_HIGH 0x806ffff0 #else #define GRUB_MACHINE_MEMORY_STACK_HIGH 0x881ffff0 #endif #ifndef ASM_FILE static inline grub_err_t grub_machine_mmap_register (grub_uint64_t start __attribute__ ((unused)), grub_uint64_t size __attribute__ ((unused)), int type __attribute__ ((unused)), int handle __attribute__ ((unused))) { return GRUB_ERR_NONE; } static inline grub_err_t grub_machine_mmap_unregister (int handle __attribute__ ((unused))) { return GRUB_ERR_NONE; } #endif #endif
# Default layout This is the default layout for the Laptreus ## Layout Some special things: * Lower/Raise are on the F/J keys. Tap to type the character, hold to activate the layer * Enter and Backspace also activate the Lift layer. * -- To get repeated backspace, hold BkLft and then press Shift ### Qwerty ,-----------------------------------------------------------------------------------. | Tab | Q | W | E | R | T | Y | U | I | O | P | Bksp | |------+------+------+------+------+-------------+------+------+------+------+------| | Esc | A | S | D | Lwr/F| G | H | Rse/J| K | L | ; | " | |------+------+------+------+------+------|------+------+------+------+------+------| | Shift| Z | X | C | V | B | N | M | , | . | / | Enter| |------+------+------+------+------+------+------+------+------+------+------+------| | Hyper|Ctrl/~|AltTab| Cmd | BkLft| Shift| Space|EntLft| Left | Down | Up | Right| `-----------------------------------------------------------------------------------' ### Dvorak ,-----------------------------------------------------------------------------------. | Tab | ' | , | . | P | Y | F | G | C | R | L | Bksp | |------+------+------+------+------+-------------+------+------+------+------+------| | Esc | A | O | E | Lwr/U| I | D | Rse/H| T | N | S | / | |------+------+------+------+------+------|------+------+------+------+------+------| | Shift| ; | Q | J | K | X | B | M | W | V | Z | Enter| |------+------+------+------+------+------+------+------+------+------+------+------| | Hyper|Ctrl/~|AltTab| Cmd | BkLft| Shift| Space|EntLft| Left | Down | Up | Right| `-----------------------------------------------------------------------------------' ### Raise ,-----------------------------------------------------------------------------------. | | < | > | { | } | | | | | | | | |------+------+------+------+------+-------------+------+------+------+------+------| | | ( | ) | [ | ] | | | | | | | | |------+------+------+------+------+------|------+------+------+------+------+------| | | | | | | | | | | | | | |------+------+------+------+------+------+------+------+------+------+------+------| | | | | | BkLft| Shift| Space|EntLft| | | | | `-----------------------------------------------------------------------------------' ### Lower ,-----------------------------------------------------------------------------------. | | | | | | | | 7 | 8 | 9 | | | |------+------+------+------+------+-------------+------+------+------+------+------| | | | | | | | | 4 | 5 | 6 | | | |------+------+------+------+------+------|------+------+------+------+------+------| | | | | | | | | 1 | 2 | 3 | | | |------+------+------+------+------+------+------+------+------+------+------+------| | | | | | BkLft| Shift| Space| 0 | 0 | . | | | `-----------------------------------------------------------------------------------' ### Lift ,-----------------------------------------------------------------------------------. | |Hist ←| Tab ←| ↑ | Tab →|Hist →| Mute | |ScrLft|ScrFul|ScrRgt| | |------+------+------+------+------+-------------+------+------+------+------+------| | | | ← | ↓ | → | Pg Up|Vol up|Slk up| | |Qwerty| | |------+------+------+------+------+------|------+------+------+------+------+------| | | | | | | Pg Dn|Vol dn|Slk dn| | |Dvorak| | |------+------+------+------+------+------+------+------+------+------+------+------| | | Reset| | | | Bksp | | Shift| Cmd | Opt | Ctrl | | `-----------------------------------------------------------------------------------' Special things: * Hist: Back button in your browser * Tab: Move one tab left/right in most mac apps * Slk: Go to the previous/next unread conversation in Slack * Scr: Make this app fill the left/right side of your monitor (or full screen) * Since arrows are on this layer, the command keys are moved to the right side
package digitalocean import ( "fmt" "log" "strings" "time" "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/helper/schema" "github.com/pearkes/digitalocean" ) func resourceDigitalOceanDroplet() *schema.Resource { return &schema.Resource{ Create: resourceDigitalOceanDropletCreate, Read: resourceDigitalOceanDropletRead, Update: resourceDigitalOceanDropletUpdate, Delete: resourceDigitalOceanDropletDelete, Schema: map[string]*schema.Schema{ "image": &schema.Schema{ Type: schema.TypeString, Required: true, ForceNew: true, }, "name": &schema.Schema{ Type: schema.TypeString, Required: true, }, "region": &schema.Schema{ Type: schema.TypeString, Required: true, ForceNew: true, }, "size": &schema.Schema{ Type: schema.TypeString, Required: true, }, "status": &schema.Schema{ Type: schema.TypeString, Computed: true, }, "locked": &schema.Schema{ Type: schema.TypeString, Computed: true, }, "backups": &schema.Schema{ Type: schema.TypeBool, Optional: true, }, "ipv6": &schema.Schema{ Type: schema.TypeBool, Optional: true, }, "ipv6_address": &schema.Schema{ Type: schema.TypeString, Computed: true, }, "ipv6_address_private": &schema.Schema{ Type: schema.TypeString, Computed: true, }, "private_networking": &schema.Schema{ Type: schema.TypeBool, Optional: true, }, "ipv4_address": &schema.Schema{ Type: schema.TypeString, Computed: true, }, "ipv4_address_private": &schema.Schema{ Type: schema.TypeString, Computed: true, }, "ssh_keys": &schema.Schema{ Type: schema.TypeList, Optional: true, Elem: &schema.Schema{Type: schema.TypeString}, }, "user_data": &schema.Schema{ Type: schema.TypeString, Optional: true, }, }, } } func resourceDigitalOceanDropletCreate(d *schema.ResourceData, meta interface{}) error { client := meta.(*digitalocean.Client) // Build up our creation options opts := &digitalocean.CreateDroplet{ Image: d.Get("image").(string), Name: d.Get("name").(string), Region: d.Get("region").(string), Size: d.Get("size").(string), } if attr, ok := d.GetOk("backups"); ok { opts.Backups = attr.(bool) } if attr, ok := d.GetOk("ipv6"); ok { opts.IPV6 = attr.(bool) } if attr, ok := d.GetOk("private_networking"); ok { opts.PrivateNetworking = attr.(bool) } if attr, ok := d.GetOk("user_data"); ok { opts.UserData = attr.(string) } // Get configured ssh_keys ssh_keys := d.Get("ssh_keys.#").(int) if ssh_keys > 0 { opts.SSHKeys = make([]string, 0, ssh_keys) for i := 0; i < ssh_keys; i++ { key := fmt.Sprintf("ssh_keys.%d", i) opts.SSHKeys = append(opts.SSHKeys, d.Get(key).(string)) } } log.Printf("[DEBUG] Droplet create configuration: %#v", opts) id, err := client.CreateDroplet(opts) if err != nil { return fmt.Errorf("Error creating droplet: %s", err) } // Assign the droplets id d.SetId(id) log.Printf("[INFO] Droplet ID: %s", d.Id()) _, err = WaitForDropletAttribute(d, "active", []string{"new"}, "status", meta) if err != nil { return fmt.Errorf( "Error waiting for droplet (%s) to become ready: %s", d.Id(), err) } return resourceDigitalOceanDropletRead(d, meta) } func resourceDigitalOceanDropletRead(d *schema.ResourceData, meta interface{}) error { client := meta.(*digitalocean.Client) // Retrieve the droplet properties for updating the state droplet, err := client.RetrieveDroplet(d.Id()) if err != nil { // check if the droplet no longer exists. if err.Error() == "Error retrieving droplet: API Error: 404 Not Found" { d.SetId("") return nil } return fmt.Errorf("Error retrieving droplet: %s", err) } if droplet.ImageSlug() != "" { d.Set("image", droplet.ImageSlug()) } else { d.Set("image", droplet.ImageId()) } d.Set("name", droplet.Name) d.Set("region", droplet.RegionSlug()) d.Set("size", droplet.SizeSlug) d.Set("status", droplet.Status) d.Set("locked", droplet.IsLocked()) if droplet.IPV6Address("public") != "" { d.Set("ipv6", true) d.Set("ipv6_address", droplet.IPV6Address("public")) d.Set("ipv6_address_private", droplet.IPV6Address("private")) } d.Set("ipv4_address", droplet.IPV4Address("public")) if droplet.NetworkingType() == "private" { d.Set("private_networking", true) d.Set("ipv4_address_private", droplet.IPV4Address("private")) } // Initialize the connection info d.SetConnInfo(map[string]string{ "type": "ssh", "host": droplet.IPV4Address("public"), }) return nil } func resourceDigitalOceanDropletUpdate(d *schema.ResourceData, meta interface{}) error { client := meta.(*digitalocean.Client) if d.HasChange("size") { oldSize, newSize := d.GetChange("size") err := client.PowerOff(d.Id()) if err != nil && !strings.Contains(err.Error(), "Droplet is already powered off") { return fmt.Errorf( "Error powering off droplet (%s): %s", d.Id(), err) } // Wait for power off _, err = WaitForDropletAttribute(d, "off", []string{"active"}, "status", client) if err != nil { return fmt.Errorf( "Error waiting for droplet (%s) to become powered off: %s", d.Id(), err) } // Resize the droplet err = client.Resize(d.Id(), newSize.(string)) if err != nil { newErr := powerOnAndWait(d, meta) if newErr != nil { return fmt.Errorf( "Error powering on droplet (%s) after failed resize: %s", d.Id(), err) } return fmt.Errorf( "Error resizing droplet (%s): %s", d.Id(), err) } // Wait for the size to change _, err = WaitForDropletAttribute( d, newSize.(string), []string{"", oldSize.(string)}, "size", meta) if err != nil { newErr := powerOnAndWait(d, meta) if newErr != nil { return fmt.Errorf( "Error powering on droplet (%s) after waiting for resize to finish: %s", d.Id(), err) } return fmt.Errorf( "Error waiting for resize droplet (%s) to finish: %s", d.Id(), err) } err = client.PowerOn(d.Id()) if err != nil { return fmt.Errorf( "Error powering on droplet (%s) after resize: %s", d.Id(), err) } // Wait for power off _, err = WaitForDropletAttribute(d, "active", []string{"off"}, "status", meta) if err != nil { return err } } if d.HasChange("name") { oldName, newName := d.GetChange("name") // Rename the droplet err := client.Rename(d.Id(), newName.(string)) if err != nil { return fmt.Errorf( "Error renaming droplet (%s): %s", d.Id(), err) } // Wait for the name to change _, err = WaitForDropletAttribute( d, newName.(string), []string{"", oldName.(string)}, "name", meta) if err != nil { return fmt.Errorf( "Error waiting for rename droplet (%s) to finish: %s", d.Id(), err) } } // As there is no way to disable private networking, // we only check if it needs to be enabled if d.HasChange("private_networking") && d.Get("private_networking").(bool) { err := client.EnablePrivateNetworking(d.Id()) if err != nil { return fmt.Errorf( "Error enabling private networking for droplet (%s): %s", d.Id(), err) } // Wait for the private_networking to turn on _, err = WaitForDropletAttribute( d, "true", []string{"", "false"}, "private_networking", meta) return fmt.Errorf( "Error waiting for private networking to be enabled on for droplet (%s): %s", d.Id(), err) } // As there is no way to disable IPv6, we only check if it needs to be enabled if d.HasChange("ipv6") && d.Get("ipv6").(bool) { err := client.EnableIPV6s(d.Id()) if err != nil { return fmt.Errorf( "Error turning on ipv6 for droplet (%s): %s", d.Id(), err) } // Wait for ipv6 to turn on _, err = WaitForDropletAttribute( d, "true", []string{"", "false"}, "ipv6", meta) if err != nil { return fmt.Errorf( "Error waiting for ipv6 to be turned on for droplet (%s): %s", d.Id(), err) } } return resourceDigitalOceanDropletRead(d, meta) } func resourceDigitalOceanDropletDelete(d *schema.ResourceData, meta interface{}) error { client := meta.(*digitalocean.Client) _, err := WaitForDropletAttribute( d, "false", []string{"", "true"}, "locked", meta) if err != nil { return fmt.Errorf( "Error waiting for droplet to be unlocked for destroy (%s): %s", d.Id(), err) } log.Printf("[INFO] Deleting droplet: %s", d.Id()) // Destroy the droplet err = client.DestroyDroplet(d.Id()) // Handle remotely destroyed droplets if err != nil && strings.Contains(err.Error(), "404 Not Found") { return nil } if err != nil { return fmt.Errorf("Error deleting droplet: %s", err) } return nil } func WaitForDropletAttribute( d *schema.ResourceData, target string, pending []string, attribute string, meta interface{}) (interface{}, error) { // Wait for the droplet so we can get the networking attributes // that show up after a while log.Printf( "[INFO] Waiting for droplet (%s) to have %s of %s", d.Id(), attribute, target) stateConf := &resource.StateChangeConf{ Pending: pending, Target: target, Refresh: newDropletStateRefreshFunc(d, attribute, meta), Timeout: 60 * time.Minute, Delay: 10 * time.Second, MinTimeout: 3 * time.Second, // This is a hack around DO API strangeness. // https://github.com/hashicorp/terraform/issues/481 // NotFoundChecks: 60, } return stateConf.WaitForState() } // TODO This function still needs a little more refactoring to make it // cleaner and more efficient func newDropletStateRefreshFunc( d *schema.ResourceData, attribute string, meta interface{}) resource.StateRefreshFunc { client := meta.(*digitalocean.Client) return func() (interface{}, string, error) { err := resourceDigitalOceanDropletRead(d, meta) if err != nil { return nil, "", err } // If the droplet is locked, continue waiting. We can // only perform actions on unlocked droplets, so it's // pointless to look at that status if d.Get("locked").(string) == "true" { log.Println("[DEBUG] Droplet is locked, skipping status check and retrying") return nil, "", nil } // See if we can access our attribute if attr, ok := d.GetOk(attribute); ok { // Retrieve the droplet properties droplet, err := client.RetrieveDroplet(d.Id()) if err != nil { return nil, "", fmt.Errorf("Error retrieving droplet: %s", err) } return &droplet, attr.(string), nil } return nil, "", nil } } // Powers on the droplet and waits for it to be active func powerOnAndWait(d *schema.ResourceData, meta interface{}) error { client := meta.(*digitalocean.Client) err := client.PowerOn(d.Id()) if err != nil { return err } // Wait for power on _, err = WaitForDropletAttribute(d, "active", []string{"off"}, "status", client) if err != nil { return err } return nil }
/* * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.facebook.presto.operator.aggregation; import com.facebook.presto.operator.aggregation.state.RegressionState; import com.facebook.presto.spi.block.BlockBuilder; import com.facebook.presto.spi.type.StandardTypes; import com.facebook.presto.type.SqlType; import static com.facebook.presto.operator.aggregation.AggregationUtils.mergeRegressionState; import static com.facebook.presto.operator.aggregation.AggregationUtils.updateRegressionState; import static com.facebook.presto.spi.type.DoubleType.DOUBLE; @AggregationFunction("") // Names are on output methods public class RegressionAggregation { private RegressionAggregation() {} @InputFunction public static void input(RegressionState state, @SqlType(StandardTypes.DOUBLE) double dependentValue, @SqlType(StandardTypes.DOUBLE) double independentValue) { updateRegressionState(state, independentValue, dependentValue); } @CombineFunction public static void combine(RegressionState state, RegressionState otherState) { mergeRegressionState(state, otherState); } @AggregationFunction("regr_slope") @OutputFunction(StandardTypes.DOUBLE) public static void regrSlope(RegressionState state, BlockBuilder out) { // Math comes from ISO9075-2:2011(E) 10.9 General Rules 7 c xii double dividend = state.getCount() * state.getSumXY() - state.getSumX() * state.getSumY(); double divisor = state.getCount() * state.getSumXSquare() - state.getSumX() * state.getSumX(); // divisor deliberately not checked for zero because the result can be Infty or NaN even if it is not zero double result = dividend / divisor; if (Double.isFinite(result)) { DOUBLE.writeDouble(out, result); } else { out.appendNull(); } } @AggregationFunction("regr_intercept") @OutputFunction(StandardTypes.DOUBLE) public static void regrIntercept(RegressionState state, BlockBuilder out) { // Math comes from ISO9075-2:2011(E) 10.9 General Rules 7 c xiii double dividend = state.getSumY() * state.getSumXSquare() - state.getSumX() * state.getSumXY(); double divisor = state.getCount() * state.getSumXSquare() - state.getSumX() * state.getSumX(); // divisor deliberately not checked for zero because the result can be Infty or NaN even if it is not zero double result = dividend / divisor; if (Double.isFinite(result)) { DOUBLE.writeDouble(out, result); } else { out.appendNull(); } } }
package com.intellij.codeInsight.daemon.quickFix; import com.intellij.codeInspection.LocalInspectionTool; import com.intellij.codeInspection.uncheckedWarnings.UncheckedWarningLocalInspection; import org.jetbrains.annotations.NotNull; public class GenerifyFileTest extends LightQuickFixAvailabilityTestCase { @NotNull @Override protected LocalInspectionTool[] configureLocalInspectionTools() { return new LocalInspectionTool[] {new UncheckedWarningLocalInspection()}; } public void test() throws Exception { doAllTests(); } @Override protected String getBasePath() { return "/codeInsight/daemonCodeAnalyzer/quickFix/generifyFile"; } }
/* { dg-do run { target { ia32 } } } */ /* { dg-options "-O0 -mtune=pentium" } */ #include "readeflags-1.c"
// (C) Copyright 2008 CodeRage, LLC (turkanis at coderage dot com) // (C) Copyright 2003-2007 Jonathan Turkanis // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt.) // See http://www.boost.org/libs/iostreams for documentation. #ifndef BOOST_IOSTREAMS_STREAM_HPP_INCLUDED #define BOOST_IOSTREAMS_STREAM_HPP_INCLUDED #if defined(_MSC_VER) # pragma once #endif #include <boost/iostreams/constants.hpp> #include <boost/iostreams/detail/char_traits.hpp> #include <boost/iostreams/detail/config/overload_resolution.hpp> #include <boost/iostreams/detail/forward.hpp> #include <boost/iostreams/detail/iostream.hpp> // standard streams. #include <boost/iostreams/detail/select.hpp> #include <boost/iostreams/stream_buffer.hpp> #include <boost/mpl/and.hpp> #include <boost/type_traits/is_convertible.hpp> #include <boost/utility/base_from_member.hpp> namespace boost { namespace iostreams { namespace detail { template<typename Device, typename Tr> struct stream_traits { typedef typename char_type_of<Device>::type char_type; typedef Tr traits_type; typedef typename category_of<Device>::type mode; typedef typename iostreams::select< // Disambiguation required for Tru64. mpl::and_< is_convertible<mode, input>, is_convertible<mode, output> >, BOOST_IOSTREAMS_BASIC_IOSTREAM(char_type, traits_type), is_convertible<mode, input>, BOOST_IOSTREAMS_BASIC_ISTREAM(char_type, traits_type), else_, BOOST_IOSTREAMS_BASIC_OSTREAM(char_type, traits_type) >::type stream_type; typedef typename iostreams::select< // Disambiguation required for Tru64. mpl::and_< is_convertible<mode, input>, is_convertible<mode, output> >, iostream_tag, is_convertible<mode, input>, istream_tag, else_, ostream_tag >::type stream_tag; }; #if defined(BOOST_MSVC) && (BOOST_MSVC == 1700) # pragma warning(push) // https://connect.microsoft.com/VisualStudio/feedback/details/733720/ # pragma warning(disable: 4250) #endif // By encapsulating initialization in a base, we can define the macro // BOOST_IOSTREAMS_DEFINE_FORWARDING_FUNCTIONS to generate constructors // without base member initializer lists. template< typename Device, typename Tr = BOOST_IOSTREAMS_CHAR_TRAITS( BOOST_DEDUCED_TYPENAME char_type_of<Device>::type ), typename Alloc = std::allocator< BOOST_DEDUCED_TYPENAME char_type_of<Device>::type >, typename Base = // VC6 Workaround. BOOST_DEDUCED_TYPENAME detail::stream_traits<Device, Tr>::stream_type > class stream_base : protected base_from_member< stream_buffer<Device, Tr, Alloc> >, public Base { private: typedef base_from_member< stream_buffer<Device, Tr, Alloc> > pbase_type; typedef typename stream_traits<Device, Tr>::stream_type stream_type; protected: using pbase_type::member; // Avoid warning about 'this' in initializer list. public: stream_base() : pbase_type(), stream_type(&member) { } }; #if defined(BOOST_MSVC) && (BOOST_MSVC == 1700) # pragma warning(pop) #endif } } } // End namespaces detail, iostreams, boost. #ifdef BOOST_IOSTREAMS_BROKEN_OVERLOAD_RESOLUTION # include <boost/iostreams/detail/broken_overload_resolution/stream.hpp> #else namespace boost { namespace iostreams { #if defined(BOOST_MSVC) && (BOOST_MSVC == 1700) # pragma warning(push) // https://connect.microsoft.com/VisualStudio/feedback/details/733720/ # pragma warning(disable: 4250) #endif // // Template name: stream. // Description: A iostream which reads from and writes to an instance of a // designated device type. // Template parameters: // Device - A device type. // Alloc - The allocator type. // template< typename Device, typename Tr = BOOST_IOSTREAMS_CHAR_TRAITS( BOOST_DEDUCED_TYPENAME char_type_of<Device>::type ), typename Alloc = std::allocator< BOOST_DEDUCED_TYPENAME char_type_of<Device>::type > > struct stream : detail::stream_base<Device, Tr, Alloc> { public: typedef typename char_type_of<Device>::type char_type; struct category : mode_of<Device>::type, closable_tag, detail::stream_traits<Device, Tr>::stream_tag { }; BOOST_IOSTREAMS_STREAMBUF_TYPEDEFS(Tr) private: typedef typename detail::stream_traits< Device, Tr >::stream_type stream_type; public: stream() { } BOOST_IOSTREAMS_FORWARD( stream, open_impl, Device, BOOST_IOSTREAMS_PUSH_PARAMS, BOOST_IOSTREAMS_PUSH_ARGS ) bool is_open() const { return this->member.is_open(); } void close() { this->member.close(); } bool auto_close() const { return this->member.auto_close(); } void set_auto_close(bool close) { this->member.set_auto_close(close); } bool strict_sync() { return this->member.strict_sync(); } Device& operator*() { return *this->member; } Device* operator->() { return &*this->member; } Device* component() { return this->member.component(); } private: void open_impl(const Device& dev BOOST_IOSTREAMS_PUSH_PARAMS()) // For forwarding. { this->clear(); this->member.open(dev BOOST_IOSTREAMS_PUSH_ARGS()); } }; #if defined(BOOST_MSVC) && (BOOST_MSVC == 1700) # pragma warning(pop) #endif } } // End namespaces iostreams, boost. #endif // #ifdef BOOST_IOSTREAMS_BROKEN_OVERLOAD_RESOLUTION #endif // #ifndef BOOST_IOSTREAMS_stream_HPP_INCLUDED
<!doctype html> <html> <head> <title>Sample App</title> </head> <body> <div id='root'> </div> <script src="/static/bundle.js"></script> </body> </html>
require 'rails_helper' RSpec.describe Comment, type: :model do pending "add some examples to (or delete) #{__FILE__}" end
<!doctype html> <html lang="en"> <head> <meta charset="utf-8"> <title>jQuery UI Sortable - Drop placeholder</title> <link rel="stylesheet" href="../../themes/base/jquery.ui.all.css"> <script src="../../jquery-1.10.2.js"></script> <script src="../../ui/jquery.ui.core.js"></script> <script src="../../ui/jquery.ui.widget.js"></script> <script src="../../ui/jquery.ui.mouse.js"></script> <script src="../../ui/jquery.ui.sortable.js"></script> <link rel="stylesheet" href="../demos.css"> <style> #sortable { list-style-type: none; margin: 0; padding: 0; width: 60%; } #sortable li { margin: 0 5px 5px 5px; padding: 5px; font-size: 1.2em; height: 1.5em; } html>body #sortable li { height: 1.5em; line-height: 1.2em; } .ui-state-highlight { height: 1.5em; line-height: 1.2em; } </style> <script> $(function() { $( "#sortable" ).sortable({ placeholder: "ui-state-highlight" }); $( "#sortable" ).disableSelection(); }); </script> </head> <body> <ul id="sortable"> <li class="ui-state-default">Item 1</li> <li class="ui-state-default">Item 2</li> <li class="ui-state-default">Item 3</li> <li class="ui-state-default">Item 4</li> <li class="ui-state-default">Item 5</li> <li class="ui-state-default">Item 6</li> <li class="ui-state-default">Item 7</li> </ul> <div class="demo-description"> <p> When dragging a sortable item to a new location, other items will make room for the that item by shifting to allow white space between them. Pass a class into the <code>placeholder</code> option to style that space to be visible. Use the boolean <code>forcePlaceholderSize</code> option to set dimensions on the placeholder. </p> </div> </body> </html>
<?php /* * This file is part of the Symfony package. * * (c) Fabien Potencier <fabien@symfony.com> * * For the full copyright and license information, please view the LICENSE * file that was distributed with this source code. */ namespace Symfony\Component\Routing\Generator\Dumper; /** * PhpGeneratorDumper creates a PHP class able to generate URLs for a given set of routes. * * @author Fabien Potencier <fabien@symfony.com> * @author Tobias Schultze <http://tobion.de> * * @api */ class PhpGeneratorDumper extends GeneratorDumper { /** * Dumps a set of routes to a PHP class. * * Available options: * * * class: The class name * * base_class: The base class name * * @param array $options An array of options * * @return string A PHP class representing the generator class * * @api */ public function dump(array $options = array()) { $options = array_merge(array( 'class' => 'ProjectUrlGenerator', 'base_class' => 'Symfony\\Component\\Routing\\Generator\\UrlGenerator', ), $options); return <<<EOF <?php use Symfony\Component\Routing\RequestContext; use Symfony\Component\Routing\Exception\RouteNotFoundException; use Psr\Log\LoggerInterface; /** * {$options['class']} * * This class has been auto-generated * by the Symfony Routing Component. */ class {$options['class']} extends {$options['base_class']} { private static \$declaredRoutes = {$this->generateDeclaredRoutes()}; /** * Constructor. */ public function __construct(RequestContext \$context, LoggerInterface \$logger = null) { \$this->context = \$context; \$this->logger = \$logger; } {$this->generateGenerateMethod()} } EOF; } /** * Generates PHP code representing an array of defined routes * together with the routes properties (e.g. requirements). * * @return string PHP code */ private function generateDeclaredRoutes() { $routes = "array(\n"; foreach ($this->getRoutes()->all() as $name => $route) { $compiledRoute = $route->compile(); $properties = array(); $properties[] = $compiledRoute->getVariables(); $properties[] = $route->getDefaults(); $properties[] = $route->getRequirements(); $properties[] = $compiledRoute->getTokens(); $properties[] = $compiledRoute->getHostTokens(); $routes .= sprintf(" '%s' => %s,\n", $name, str_replace("\n", '', var_export($properties, true))); } $routes .= ' )'; return $routes; } /** * Generates PHP code representing the `generate` method that implements the UrlGeneratorInterface. * * @return string PHP code */ private function generateGenerateMethod() { return <<<EOF public function generate(\$name, \$parameters = array(), \$referenceType = self::ABSOLUTE_PATH) { if (!isset(self::\$declaredRoutes[\$name])) { throw new RouteNotFoundException(sprintf('Unable to generate a URL for the named route "%s" as such route does not exist.', \$name)); } list(\$variables, \$defaults, \$requirements, \$tokens, \$hostTokens) = self::\$declaredRoutes[\$name]; return \$this->doGenerate(\$variables, \$defaults, \$requirements, \$tokens, \$parameters, \$name, \$referenceType, \$hostTokens); } EOF; } }
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB /* * Copyright (c) 2013-2020, Mellanox Technologies inc. All rights reserved. * Copyright (c) 2020, Intel Corporation. All rights reserved. */ #include <linux/debugfs.h> #include <linux/highmem.h> #include <linux/module.h> #include <linux/init.h> #include <linux/errno.h> #include <linux/pci.h> #include <linux/dma-mapping.h> #include <linux/slab.h> #include <linux/bitmap.h> #include <linux/sched.h> #include <linux/sched/mm.h> #include <linux/sched/task.h> #include <linux/delay.h> #include <rdma/ib_user_verbs.h> #include <rdma/ib_addr.h> #include <rdma/ib_cache.h> #include <linux/mlx5/port.h> #include <linux/mlx5/vport.h> #include <linux/mlx5/fs.h> #include <linux/mlx5/eswitch.h> #include <linux/list.h> #include <rdma/ib_smi.h> #include <rdma/ib_umem.h> #include <rdma/lag.h> #include <linux/in.h> #include <linux/etherdevice.h> #include "mlx5_ib.h" #include "ib_rep.h" #include "cmd.h" #include "devx.h" #include "dm.h" #include "fs.h" #include "srq.h" #include "qp.h" #include "wr.h" #include "restrack.h" #include "counters.h" #include <linux/mlx5/accel.h> #include <rdma/uverbs_std_types.h> #include <rdma/uverbs_ioctl.h> #include <rdma/mlx5_user_ioctl_verbs.h> #include <rdma/mlx5_user_ioctl_cmds.h> #include <rdma/ib_umem_odp.h> #define UVERBS_MODULE_NAME mlx5_ib #include <rdma/uverbs_named_ioctl.h> MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>"); MODULE_DESCRIPTION("Mellanox 5th generation network adapters (ConnectX series) IB driver"); MODULE_LICENSE("Dual BSD/GPL"); struct mlx5_ib_event_work { struct work_struct work; union { struct mlx5_ib_dev *dev; struct mlx5_ib_multiport_info *mpi; }; bool is_slave; unsigned int event; void *param; }; enum { MLX5_ATOMIC_SIZE_QP_8BYTES = 1 << 3, }; static struct workqueue_struct *mlx5_ib_event_wq; static LIST_HEAD(mlx5_ib_unaffiliated_port_list); static LIST_HEAD(mlx5_ib_dev_list); /* * This mutex should be held when accessing either of the above lists */ static DEFINE_MUTEX(mlx5_ib_multiport_mutex); struct mlx5_ib_dev *mlx5_ib_get_ibdev_from_mpi(struct mlx5_ib_multiport_info *mpi) { struct mlx5_ib_dev *dev; mutex_lock(&mlx5_ib_multiport_mutex); dev = mpi->ibdev; mutex_unlock(&mlx5_ib_multiport_mutex); return dev; } static enum rdma_link_layer mlx5_port_type_cap_to_rdma_ll(int port_type_cap) { switch (port_type_cap) { case MLX5_CAP_PORT_TYPE_IB: return IB_LINK_LAYER_INFINIBAND; case MLX5_CAP_PORT_TYPE_ETH: return IB_LINK_LAYER_ETHERNET; default: return IB_LINK_LAYER_UNSPECIFIED; } } static enum rdma_link_layer mlx5_ib_port_link_layer(struct ib_device *device, u32 port_num) { struct mlx5_ib_dev *dev = to_mdev(device); int port_type_cap = MLX5_CAP_GEN(dev->mdev, port_type); return mlx5_port_type_cap_to_rdma_ll(port_type_cap); } static int get_port_state(struct ib_device *ibdev, u32 port_num, enum ib_port_state *state) { struct ib_port_attr attr; int ret; memset(&attr, 0, sizeof(attr)); ret = ibdev->ops.query_port(ibdev, port_num, &attr); if (!ret) *state = attr.state; return ret; } static struct mlx5_roce *mlx5_get_rep_roce(struct mlx5_ib_dev *dev, struct net_device *ndev, struct net_device *upper, u32 *port_num) { struct net_device *rep_ndev; struct mlx5_ib_port *port; int i; for (i = 0; i < dev->num_ports; i++) { port = &dev->port[i]; if (!port->rep) continue; if (upper == ndev && port->rep->vport == MLX5_VPORT_UPLINK) { *port_num = i + 1; return &port->roce; } if (upper && port->rep->vport == MLX5_VPORT_UPLINK) continue; read_lock(&port->roce.netdev_lock); rep_ndev = mlx5_ib_get_rep_netdev(port->rep->esw, port->rep->vport); if (rep_ndev == ndev) { read_unlock(&port->roce.netdev_lock); *port_num = i + 1; return &port->roce; } read_unlock(&port->roce.netdev_lock); } return NULL; } static int mlx5_netdev_event(struct notifier_block *this, unsigned long event, void *ptr) { struct mlx5_roce *roce = container_of(this, struct mlx5_roce, nb); struct net_device *ndev = netdev_notifier_info_to_dev(ptr); u32 port_num = roce->native_port_num; struct mlx5_core_dev *mdev; struct mlx5_ib_dev *ibdev; ibdev = roce->dev; mdev = mlx5_ib_get_native_port_mdev(ibdev, port_num, NULL); if (!mdev) return NOTIFY_DONE; switch (event) { case NETDEV_REGISTER: /* Should already be registered during the load */ if (ibdev->is_rep) break; write_lock(&roce->netdev_lock); if (ndev->dev.parent == mdev->device) roce->netdev = ndev; write_unlock(&roce->netdev_lock); break; case NETDEV_UNREGISTER: /* In case of reps, ib device goes away before the netdevs */ write_lock(&roce->netdev_lock); if (roce->netdev == ndev) roce->netdev = NULL; write_unlock(&roce->netdev_lock); break; case NETDEV_CHANGE: case NETDEV_UP: case NETDEV_DOWN: { struct net_device *lag_ndev = mlx5_lag_get_roce_netdev(mdev); struct net_device *upper = NULL; if (lag_ndev) { upper = netdev_master_upper_dev_get(lag_ndev); dev_put(lag_ndev); } if (ibdev->is_rep) roce = mlx5_get_rep_roce(ibdev, ndev, upper, &port_num); if (!roce) return NOTIFY_DONE; if ((upper == ndev || ((!upper || ibdev->is_rep) && ndev == roce->netdev)) && ibdev->ib_active) { struct ib_event ibev = { }; enum ib_port_state port_state; if (get_port_state(&ibdev->ib_dev, port_num, &port_state)) goto done; if (roce->last_port_state == port_state) goto done; roce->last_port_state = port_state; ibev.device = &ibdev->ib_dev; if (port_state == IB_PORT_DOWN) ibev.event = IB_EVENT_PORT_ERR; else if (port_state == IB_PORT_ACTIVE) ibev.event = IB_EVENT_PORT_ACTIVE; else goto done; ibev.element.port_num = port_num; ib_dispatch_event(&ibev); } break; } default: break; } done: mlx5_ib_put_native_port_mdev(ibdev, port_num); return NOTIFY_DONE; } static struct net_device *mlx5_ib_get_netdev(struct ib_device *device, u32 port_num) { struct mlx5_ib_dev *ibdev = to_mdev(device); struct net_device *ndev; struct mlx5_core_dev *mdev; mdev = mlx5_ib_get_native_port_mdev(ibdev, port_num, NULL); if (!mdev) return NULL; ndev = mlx5_lag_get_roce_netdev(mdev); if (ndev) goto out; /* Ensure ndev does not disappear before we invoke dev_hold() */ read_lock(&ibdev->port[port_num - 1].roce.netdev_lock); ndev = ibdev->port[port_num - 1].roce.netdev; if (ndev) dev_hold(ndev); read_unlock(&ibdev->port[port_num - 1].roce.netdev_lock); out: mlx5_ib_put_native_port_mdev(ibdev, port_num); return ndev; } struct mlx5_core_dev *mlx5_ib_get_native_port_mdev(struct mlx5_ib_dev *ibdev, u32 ib_port_num, u32 *native_port_num) { enum rdma_link_layer ll = mlx5_ib_port_link_layer(&ibdev->ib_dev, ib_port_num); struct mlx5_core_dev *mdev = NULL; struct mlx5_ib_multiport_info *mpi; struct mlx5_ib_port *port; if (!mlx5_core_mp_enabled(ibdev->mdev) || ll != IB_LINK_LAYER_ETHERNET) { if (native_port_num) *native_port_num = ib_port_num; return ibdev->mdev; } if (native_port_num) *native_port_num = 1; port = &ibdev->port[ib_port_num - 1]; spin_lock(&port->mp.mpi_lock); mpi = ibdev->port[ib_port_num - 1].mp.mpi; if (mpi && !mpi->unaffiliate) { mdev = mpi->mdev; /* If it's the master no need to refcount, it'll exist * as long as the ib_dev exists. */ if (!mpi->is_master) mpi->mdev_refcnt++; } spin_unlock(&port->mp.mpi_lock); return mdev; } void mlx5_ib_put_native_port_mdev(struct mlx5_ib_dev *ibdev, u32 port_num) { enum rdma_link_layer ll = mlx5_ib_port_link_layer(&ibdev->ib_dev, port_num); struct mlx5_ib_multiport_info *mpi; struct mlx5_ib_port *port; if (!mlx5_core_mp_enabled(ibdev->mdev) || ll != IB_LINK_LAYER_ETHERNET) return; port = &ibdev->port[port_num - 1]; spin_lock(&port->mp.mpi_lock); mpi = ibdev->port[port_num - 1].mp.mpi; if (mpi->is_master) goto out; mpi->mdev_refcnt--; if (mpi->unaffiliate) complete(&mpi->unref_comp); out: spin_unlock(&port->mp.mpi_lock); } static int translate_eth_legacy_proto_oper(u32 eth_proto_oper, u16 *active_speed, u8 *active_width) { switch (eth_proto_oper) { case MLX5E_PROT_MASK(MLX5E_1000BASE_CX_SGMII): case MLX5E_PROT_MASK(MLX5E_1000BASE_KX): case MLX5E_PROT_MASK(MLX5E_100BASE_TX): case MLX5E_PROT_MASK(MLX5E_1000BASE_T): *active_width = IB_WIDTH_1X; *active_speed = IB_SPEED_SDR; break; case MLX5E_PROT_MASK(MLX5E_10GBASE_T): case MLX5E_PROT_MASK(MLX5E_10GBASE_CX4): case MLX5E_PROT_MASK(MLX5E_10GBASE_KX4): case MLX5E_PROT_MASK(MLX5E_10GBASE_KR): case MLX5E_PROT_MASK(MLX5E_10GBASE_CR): case MLX5E_PROT_MASK(MLX5E_10GBASE_SR): case MLX5E_PROT_MASK(MLX5E_10GBASE_ER): *active_width = IB_WIDTH_1X; *active_speed = IB_SPEED_QDR; break; case MLX5E_PROT_MASK(MLX5E_25GBASE_CR): case MLX5E_PROT_MASK(MLX5E_25GBASE_KR): case MLX5E_PROT_MASK(MLX5E_25GBASE_SR): *active_width = IB_WIDTH_1X; *active_speed = IB_SPEED_EDR; break; case MLX5E_PROT_MASK(MLX5E_40GBASE_CR4): case MLX5E_PROT_MASK(MLX5E_40GBASE_KR4): case MLX5E_PROT_MASK(MLX5E_40GBASE_SR4): case MLX5E_PROT_MASK(MLX5E_40GBASE_LR4): *active_width = IB_WIDTH_4X; *active_speed = IB_SPEED_QDR; break; case MLX5E_PROT_MASK(MLX5E_50GBASE_CR2): case MLX5E_PROT_MASK(MLX5E_50GBASE_KR2): case MLX5E_PROT_MASK(MLX5E_50GBASE_SR2): *active_width = IB_WIDTH_1X; *active_speed = IB_SPEED_HDR; break; case MLX5E_PROT_MASK(MLX5E_56GBASE_R4): *active_width = IB_WIDTH_4X; *active_speed = IB_SPEED_FDR; break; case MLX5E_PROT_MASK(MLX5E_100GBASE_CR4): case MLX5E_PROT_MASK(MLX5E_100GBASE_SR4): case MLX5E_PROT_MASK(MLX5E_100GBASE_KR4): case MLX5E_PROT_MASK(MLX5E_100GBASE_LR4): *active_width = IB_WIDTH_4X; *active_speed = IB_SPEED_EDR; break; default: return -EINVAL; } return 0; } static int translate_eth_ext_proto_oper(u32 eth_proto_oper, u16 *active_speed, u8 *active_width) { switch (eth_proto_oper) { case MLX5E_PROT_MASK(MLX5E_SGMII_100M): case MLX5E_PROT_MASK(MLX5E_1000BASE_X_SGMII): *active_width = IB_WIDTH_1X; *active_speed = IB_SPEED_SDR; break; case MLX5E_PROT_MASK(MLX5E_5GBASE_R): *active_width = IB_WIDTH_1X; *active_speed = IB_SPEED_DDR; break; case MLX5E_PROT_MASK(MLX5E_10GBASE_XFI_XAUI_1): *active_width = IB_WIDTH_1X; *active_speed = IB_SPEED_QDR; break; case MLX5E_PROT_MASK(MLX5E_40GBASE_XLAUI_4_XLPPI_4): *active_width = IB_WIDTH_4X; *active_speed = IB_SPEED_QDR; break; case MLX5E_PROT_MASK(MLX5E_25GAUI_1_25GBASE_CR_KR): *active_width = IB_WIDTH_1X; *active_speed = IB_SPEED_EDR; break; case MLX5E_PROT_MASK(MLX5E_50GAUI_2_LAUI_2_50GBASE_CR2_KR2): *active_width = IB_WIDTH_2X; *active_speed = IB_SPEED_EDR; break; case MLX5E_PROT_MASK(MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR): *active_width = IB_WIDTH_1X; *active_speed = IB_SPEED_HDR; break; case MLX5E_PROT_MASK(MLX5E_CAUI_4_100GBASE_CR4_KR4): *active_width = IB_WIDTH_4X; *active_speed = IB_SPEED_EDR; break; case MLX5E_PROT_MASK(MLX5E_100GAUI_2_100GBASE_CR2_KR2): *active_width = IB_WIDTH_2X; *active_speed = IB_SPEED_HDR; break; case MLX5E_PROT_MASK(MLX5E_100GAUI_1_100GBASE_CR_KR): *active_width = IB_WIDTH_1X; *active_speed = IB_SPEED_NDR; break; case MLX5E_PROT_MASK(MLX5E_200GAUI_4_200GBASE_CR4_KR4): *active_width = IB_WIDTH_4X; *active_speed = IB_SPEED_HDR; break; case MLX5E_PROT_MASK(MLX5E_200GAUI_2_200GBASE_CR2_KR2): *active_width = IB_WIDTH_2X; *active_speed = IB_SPEED_NDR; break; case MLX5E_PROT_MASK(MLX5E_400GAUI_4_400GBASE_CR4_KR4): *active_width = IB_WIDTH_4X; *active_speed = IB_SPEED_NDR; break; default: return -EINVAL; } return 0; } static int translate_eth_proto_oper(u32 eth_proto_oper, u16 *active_speed, u8 *active_width, bool ext) { return ext ? translate_eth_ext_proto_oper(eth_proto_oper, active_speed, active_width) : translate_eth_legacy_proto_oper(eth_proto_oper, active_speed, active_width); } static int mlx5_query_port_roce(struct ib_device *device, u32 port_num, struct ib_port_attr *props) { struct mlx5_ib_dev *dev = to_mdev(device); u32 out[MLX5_ST_SZ_DW(ptys_reg)] = {0}; struct mlx5_core_dev *mdev; struct net_device *ndev, *upper; enum ib_mtu ndev_ib_mtu; bool put_mdev = true; u32 eth_prot_oper; u32 mdev_port_num; bool ext; int err; mdev = mlx5_ib_get_native_port_mdev(dev, port_num, &mdev_port_num); if (!mdev) { /* This means the port isn't affiliated yet. Get the * info for the master port instead. */ put_mdev = false; mdev = dev->mdev; mdev_port_num = 1; port_num = 1; } /* Possible bad flows are checked before filling out props so in case * of an error it will still be zeroed out. * Use native port in case of reps */ if (dev->is_rep) err = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN, 1); else err = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN, mdev_port_num); if (err) goto out; ext = !!MLX5_GET_ETH_PROTO(ptys_reg, out, true, eth_proto_capability); eth_prot_oper = MLX5_GET_ETH_PROTO(ptys_reg, out, ext, eth_proto_oper); props->active_width = IB_WIDTH_4X; props->active_speed = IB_SPEED_QDR; translate_eth_proto_oper(eth_prot_oper, &props->active_speed, &props->active_width, ext); if (!dev->is_rep && dev->mdev->roce.roce_en) { u16 qkey_viol_cntr; props->port_cap_flags |= IB_PORT_CM_SUP; props->ip_gids = true; props->gid_tbl_len = MLX5_CAP_ROCE(dev->mdev, roce_address_table_size); mlx5_query_nic_vport_qkey_viol_cntr(mdev, &qkey_viol_cntr); props->qkey_viol_cntr = qkey_viol_cntr; } props->max_mtu = IB_MTU_4096; props->max_msg_sz = 1 << MLX5_CAP_GEN(dev->mdev, log_max_msg); props->pkey_tbl_len = 1; props->state = IB_PORT_DOWN; props->phys_state = IB_PORT_PHYS_STATE_DISABLED; /* If this is a stub query for an unaffiliated port stop here */ if (!put_mdev) goto out; ndev = mlx5_ib_get_netdev(device, port_num); if (!ndev) goto out; if (dev->lag_active) { rcu_read_lock(); upper = netdev_master_upper_dev_get_rcu(ndev); if (upper) { dev_put(ndev); ndev = upper; dev_hold(ndev); } rcu_read_unlock(); } if (netif_running(ndev) && netif_carrier_ok(ndev)) { props->state = IB_PORT_ACTIVE; props->phys_state = IB_PORT_PHYS_STATE_LINK_UP; } ndev_ib_mtu = iboe_get_mtu(ndev->mtu); dev_put(ndev); props->active_mtu = min(props->max_mtu, ndev_ib_mtu); out: if (put_mdev) mlx5_ib_put_native_port_mdev(dev, port_num); return err; } static int set_roce_addr(struct mlx5_ib_dev *dev, u32 port_num, unsigned int index, const union ib_gid *gid, const struct ib_gid_attr *attr) { enum ib_gid_type gid_type; u16 vlan_id = 0xffff; u8 roce_version = 0; u8 roce_l3_type = 0; u8 mac[ETH_ALEN]; int ret; gid_type = attr->gid_type; if (gid) { ret = rdma_read_gid_l2_fields(attr, &vlan_id, &mac[0]); if (ret) return ret; } switch (gid_type) { case IB_GID_TYPE_ROCE: roce_version = MLX5_ROCE_VERSION_1; break; case IB_GID_TYPE_ROCE_UDP_ENCAP: roce_version = MLX5_ROCE_VERSION_2; if (gid && ipv6_addr_v4mapped((void *)gid)) roce_l3_type = MLX5_ROCE_L3_TYPE_IPV4; else roce_l3_type = MLX5_ROCE_L3_TYPE_IPV6; break; default: mlx5_ib_warn(dev, "Unexpected GID type %u\n", gid_type); } return mlx5_core_roce_gid_set(dev->mdev, index, roce_version, roce_l3_type, gid->raw, mac, vlan_id < VLAN_CFI_MASK, vlan_id, port_num); } static int mlx5_ib_add_gid(const struct ib_gid_attr *attr, __always_unused void **context) { return set_roce_addr(to_mdev(attr->device), attr->port_num, attr->index, &attr->gid, attr); } static int mlx5_ib_del_gid(const struct ib_gid_attr *attr, __always_unused void **context) { return set_roce_addr(to_mdev(attr->device), attr->port_num, attr->index, NULL, attr); } __be16 mlx5_get_roce_udp_sport_min(const struct mlx5_ib_dev *dev, const struct ib_gid_attr *attr) { if (attr->gid_type != IB_GID_TYPE_ROCE_UDP_ENCAP) return 0; return cpu_to_be16(MLX5_CAP_ROCE(dev->mdev, r_roce_min_src_udp_port)); } static int mlx5_use_mad_ifc(struct mlx5_ib_dev *dev) { if (MLX5_CAP_GEN(dev->mdev, port_type) == MLX5_CAP_PORT_TYPE_IB) return !MLX5_CAP_GEN(dev->mdev, ib_virt); return 0; } enum { MLX5_VPORT_ACCESS_METHOD_MAD, MLX5_VPORT_ACCESS_METHOD_HCA, MLX5_VPORT_ACCESS_METHOD_NIC, }; static int mlx5_get_vport_access_method(struct ib_device *ibdev) { if (mlx5_use_mad_ifc(to_mdev(ibdev))) return MLX5_VPORT_ACCESS_METHOD_MAD; if (mlx5_ib_port_link_layer(ibdev, 1) == IB_LINK_LAYER_ETHERNET) return MLX5_VPORT_ACCESS_METHOD_NIC; return MLX5_VPORT_ACCESS_METHOD_HCA; } static void get_atomic_caps(struct mlx5_ib_dev *dev, u8 atomic_size_qp, struct ib_device_attr *props) { u8 tmp; u8 atomic_operations = MLX5_CAP_ATOMIC(dev->mdev, atomic_operations); u8 atomic_req_8B_endianness_mode = MLX5_CAP_ATOMIC(dev->mdev, atomic_req_8B_endianness_mode); /* Check if HW supports 8 bytes standard atomic operations and capable * of host endianness respond */ tmp = MLX5_ATOMIC_OPS_CMP_SWAP | MLX5_ATOMIC_OPS_FETCH_ADD; if (((atomic_operations & tmp) == tmp) && (atomic_size_qp & MLX5_ATOMIC_SIZE_QP_8BYTES) && (atomic_req_8B_endianness_mode)) { props->atomic_cap = IB_ATOMIC_HCA; } else { props->atomic_cap = IB_ATOMIC_NONE; } } static void get_atomic_caps_qp(struct mlx5_ib_dev *dev, struct ib_device_attr *props) { u8 atomic_size_qp = MLX5_CAP_ATOMIC(dev->mdev, atomic_size_qp); get_atomic_caps(dev, atomic_size_qp, props); } static int mlx5_query_system_image_guid(struct ib_device *ibdev, __be64 *sys_image_guid) { struct mlx5_ib_dev *dev = to_mdev(ibdev); struct mlx5_core_dev *mdev = dev->mdev; u64 tmp; int err; switch (mlx5_get_vport_access_method(ibdev)) { case MLX5_VPORT_ACCESS_METHOD_MAD: return mlx5_query_mad_ifc_system_image_guid(ibdev, sys_image_guid); case MLX5_VPORT_ACCESS_METHOD_HCA: err = mlx5_query_hca_vport_system_image_guid(mdev, &tmp); break; case MLX5_VPORT_ACCESS_METHOD_NIC: err = mlx5_query_nic_vport_system_image_guid(mdev, &tmp); break; default: return -EINVAL; } if (!err) *sys_image_guid = cpu_to_be64(tmp); return err; } static int mlx5_query_max_pkeys(struct ib_device *ibdev, u16 *max_pkeys) { struct mlx5_ib_dev *dev = to_mdev(ibdev); struct mlx5_core_dev *mdev = dev->mdev; switch (mlx5_get_vport_access_method(ibdev)) { case MLX5_VPORT_ACCESS_METHOD_MAD: return mlx5_query_mad_ifc_max_pkeys(ibdev, max_pkeys); case MLX5_VPORT_ACCESS_METHOD_HCA: case MLX5_VPORT_ACCESS_METHOD_NIC: *max_pkeys = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(mdev, pkey_table_size)); return 0; default: return -EINVAL; } } static int mlx5_query_vendor_id(struct ib_device *ibdev, u32 *vendor_id) { struct mlx5_ib_dev *dev = to_mdev(ibdev); switch (mlx5_get_vport_access_method(ibdev)) { case MLX5_VPORT_ACCESS_METHOD_MAD: return mlx5_query_mad_ifc_vendor_id(ibdev, vendor_id); case MLX5_VPORT_ACCESS_METHOD_HCA: case MLX5_VPORT_ACCESS_METHOD_NIC: return mlx5_core_query_vendor_id(dev->mdev, vendor_id); default: return -EINVAL; } } static int mlx5_query_node_guid(struct mlx5_ib_dev *dev, __be64 *node_guid) { u64 tmp; int err; switch (mlx5_get_vport_access_method(&dev->ib_dev)) { case MLX5_VPORT_ACCESS_METHOD_MAD: return mlx5_query_mad_ifc_node_guid(dev, node_guid); case MLX5_VPORT_ACCESS_METHOD_HCA: err = mlx5_query_hca_vport_node_guid(dev->mdev, &tmp); break; case MLX5_VPORT_ACCESS_METHOD_NIC: err = mlx5_query_nic_vport_node_guid(dev->mdev, &tmp); break; default: return -EINVAL; } if (!err) *node_guid = cpu_to_be64(tmp); return err; } struct mlx5_reg_node_desc { u8 desc[IB_DEVICE_NODE_DESC_MAX]; }; static int mlx5_query_node_desc(struct mlx5_ib_dev *dev, char *node_desc) { struct mlx5_reg_node_desc in; if (mlx5_use_mad_ifc(dev)) return mlx5_query_mad_ifc_node_desc(dev, node_desc); memset(&in, 0, sizeof(in)); return mlx5_core_access_reg(dev->mdev, &in, sizeof(in), node_desc, sizeof(struct mlx5_reg_node_desc), MLX5_REG_NODE_DESC, 0, 0); } static int mlx5_ib_query_device(struct ib_device *ibdev, struct ib_device_attr *props, struct ib_udata *uhw) { size_t uhw_outlen = (uhw) ? uhw->outlen : 0; struct mlx5_ib_dev *dev = to_mdev(ibdev); struct mlx5_core_dev *mdev = dev->mdev; int err = -ENOMEM; int max_sq_desc; int max_rq_sg; int max_sq_sg; u64 min_page_size = 1ull << MLX5_CAP_GEN(mdev, log_pg_sz); bool raw_support = !mlx5_core_mp_enabled(mdev); struct mlx5_ib_query_device_resp resp = {}; size_t resp_len; u64 max_tso; resp_len = sizeof(resp.comp_mask) + sizeof(resp.response_length); if (uhw_outlen && uhw_outlen < resp_len) return -EINVAL; resp.response_length = resp_len; if (uhw && uhw->inlen && !ib_is_udata_cleared(uhw, 0, uhw->inlen)) return -EINVAL; memset(props, 0, sizeof(*props)); err = mlx5_query_system_image_guid(ibdev, &props->sys_image_guid); if (err) return err; props->max_pkeys = dev->pkey_table_len; err = mlx5_query_vendor_id(ibdev, &props->vendor_id); if (err) return err; props->fw_ver = ((u64)fw_rev_maj(dev->mdev) << 32) | (fw_rev_min(dev->mdev) << 16) | fw_rev_sub(dev->mdev); props->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT | IB_DEVICE_PORT_ACTIVE_EVENT | IB_DEVICE_SYS_IMAGE_GUID | IB_DEVICE_RC_RNR_NAK_GEN; if (MLX5_CAP_GEN(mdev, pkv)) props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR; if (MLX5_CAP_GEN(mdev, qkv)) props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR; if (MLX5_CAP_GEN(mdev, apm)) props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG; if (MLX5_CAP_GEN(mdev, xrc)) props->device_cap_flags |= IB_DEVICE_XRC; if (MLX5_CAP_GEN(mdev, imaicl)) { props->device_cap_flags |= IB_DEVICE_MEM_WINDOW | IB_DEVICE_MEM_WINDOW_TYPE_2B; props->max_mw = 1 << MLX5_CAP_GEN(mdev, log_max_mkey); /* We support 'Gappy' memory registration too */ props->device_cap_flags |= IB_DEVICE_SG_GAPS_REG; } /* IB_WR_REG_MR always requires changing the entity size with UMR */ if (!MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled)) props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS; if (MLX5_CAP_GEN(mdev, sho)) { props->device_cap_flags |= IB_DEVICE_INTEGRITY_HANDOVER; /* At this stage no support for signature handover */ props->sig_prot_cap = IB_PROT_T10DIF_TYPE_1 | IB_PROT_T10DIF_TYPE_2 | IB_PROT_T10DIF_TYPE_3; props->sig_guard_cap = IB_GUARD_T10DIF_CRC | IB_GUARD_T10DIF_CSUM; } if (MLX5_CAP_GEN(mdev, block_lb_mc)) props->device_cap_flags |= IB_DEVICE_BLOCK_MULTICAST_LOOPBACK; if (MLX5_CAP_GEN(dev->mdev, eth_net_offloads) && raw_support) { if (MLX5_CAP_ETH(mdev, csum_cap)) { /* Legacy bit to support old userspace libraries */ props->device_cap_flags |= IB_DEVICE_RAW_IP_CSUM; props->raw_packet_caps |= IB_RAW_PACKET_CAP_IP_CSUM; } if (MLX5_CAP_ETH(dev->mdev, vlan_cap)) props->raw_packet_caps |= IB_RAW_PACKET_CAP_CVLAN_STRIPPING; if (offsetofend(typeof(resp), tso_caps) <= uhw_outlen) { max_tso = MLX5_CAP_ETH(mdev, max_lso_cap); if (max_tso) { resp.tso_caps.max_tso = 1 << max_tso; resp.tso_caps.supported_qpts |= 1 << IB_QPT_RAW_PACKET; resp.response_length += sizeof(resp.tso_caps); } } if (offsetofend(typeof(resp), rss_caps) <= uhw_outlen) { resp.rss_caps.rx_hash_function = MLX5_RX_HASH_FUNC_TOEPLITZ; resp.rss_caps.rx_hash_fields_mask = MLX5_RX_HASH_SRC_IPV4 | MLX5_RX_HASH_DST_IPV4 | MLX5_RX_HASH_SRC_IPV6 | MLX5_RX_HASH_DST_IPV6 | MLX5_RX_HASH_SRC_PORT_TCP | MLX5_RX_HASH_DST_PORT_TCP | MLX5_RX_HASH_SRC_PORT_UDP | MLX5_RX_HASH_DST_PORT_UDP | MLX5_RX_HASH_INNER; if (mlx5_accel_ipsec_device_caps(dev->mdev) & MLX5_ACCEL_IPSEC_CAP_DEVICE) resp.rss_caps.rx_hash_fields_mask |= MLX5_RX_HASH_IPSEC_SPI; resp.response_length += sizeof(resp.rss_caps); } } else { if (offsetofend(typeof(resp), tso_caps) <= uhw_outlen) resp.response_length += sizeof(resp.tso_caps); if (offsetofend(typeof(resp), rss_caps) <= uhw_outlen) resp.response_length += sizeof(resp.rss_caps); } if (MLX5_CAP_GEN(mdev, ipoib_basic_offloads)) { props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM; props->device_cap_flags |= IB_DEVICE_UD_TSO; } if (MLX5_CAP_GEN(dev->mdev, rq_delay_drop) && MLX5_CAP_GEN(dev->mdev, general_notification_event) && raw_support) props->raw_packet_caps |= IB_RAW_PACKET_CAP_DELAY_DROP; if (MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads) && MLX5_CAP_IPOIB_ENHANCED(mdev, csum_cap)) props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM; if (MLX5_CAP_GEN(dev->mdev, eth_net_offloads) && MLX5_CAP_ETH(dev->mdev, scatter_fcs) && raw_support) { /* Legacy bit to support old userspace libraries */ props->device_cap_flags |= IB_DEVICE_RAW_SCATTER_FCS; props->raw_packet_caps |= IB_RAW_PACKET_CAP_SCATTER_FCS; } if (MLX5_CAP_DEV_MEM(mdev, memic)) { props->max_dm_size = MLX5_CAP_DEV_MEM(mdev, max_memic_size); } if (mlx5_get_flow_namespace(dev->mdev, MLX5_FLOW_NAMESPACE_BYPASS)) props->device_cap_flags |= IB_DEVICE_MANAGED_FLOW_STEERING; if (MLX5_CAP_GEN(mdev, end_pad)) props->device_cap_flags |= IB_DEVICE_PCI_WRITE_END_PADDING; props->vendor_part_id = mdev->pdev->device; props->hw_ver = mdev->pdev->revision; props->max_mr_size = ~0ull; props->page_size_cap = ~(min_page_size - 1); props->max_qp = 1 << MLX5_CAP_GEN(mdev, log_max_qp); props->max_qp_wr = 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz); max_rq_sg = MLX5_CAP_GEN(mdev, max_wqe_sz_rq) / sizeof(struct mlx5_wqe_data_seg); max_sq_desc = min_t(int, MLX5_CAP_GEN(mdev, max_wqe_sz_sq), 512); max_sq_sg = (max_sq_desc - sizeof(struct mlx5_wqe_ctrl_seg) - sizeof(struct mlx5_wqe_raddr_seg)) / sizeof(struct mlx5_wqe_data_seg); props->max_send_sge = max_sq_sg; props->max_recv_sge = max_rq_sg; props->max_sge_rd = MLX5_MAX_SGE_RD; props->max_cq = 1 << MLX5_CAP_GEN(mdev, log_max_cq); props->max_cqe = (1 << MLX5_CAP_GEN(mdev, log_max_cq_sz)) - 1; props->max_mr = 1 << MLX5_CAP_GEN(mdev, log_max_mkey); props->max_pd = 1 << MLX5_CAP_GEN(mdev, log_max_pd); props->max_qp_rd_atom = 1 << MLX5_CAP_GEN(mdev, log_max_ra_req_qp); props->max_qp_init_rd_atom = 1 << MLX5_CAP_GEN(mdev, log_max_ra_res_qp); props->max_srq = 1 << MLX5_CAP_GEN(mdev, log_max_srq); props->max_srq_wr = (1 << MLX5_CAP_GEN(mdev, log_max_srq_sz)) - 1; props->local_ca_ack_delay = MLX5_CAP_GEN(mdev, local_ca_ack_delay); props->max_res_rd_atom = props->max_qp_rd_atom * props->max_qp; props->max_srq_sge = max_rq_sg - 1; props->max_fast_reg_page_list_len = 1 << MLX5_CAP_GEN(mdev, log_max_klm_list_size); props->max_pi_fast_reg_page_list_len = props->max_fast_reg_page_list_len / 2; props->max_sgl_rd = MLX5_CAP_GEN(mdev, max_sgl_for_optimized_performance); get_atomic_caps_qp(dev, props); props->masked_atomic_cap = IB_ATOMIC_NONE; props->max_mcast_grp = 1 << MLX5_CAP_GEN(mdev, log_max_mcg); props->max_mcast_qp_attach = MLX5_CAP_GEN(mdev, max_qp_mcg); props->max_total_mcast_qp_attach = props->max_mcast_qp_attach * props->max_mcast_grp; props->max_ah = INT_MAX; props->hca_core_clock = MLX5_CAP_GEN(mdev, device_frequency_khz); props->timestamp_mask = 0x7FFFFFFFFFFFFFFFULL; if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) { if (dev->odp_caps.general_caps & IB_ODP_SUPPORT) props->device_cap_flags |= IB_DEVICE_ON_DEMAND_PAGING; props->odp_caps = dev->odp_caps; if (!uhw) { /* ODP for kernel QPs is not implemented for receive * WQEs and SRQ WQEs */ props->odp_caps.per_transport_caps.rc_odp_caps &= ~(IB_ODP_SUPPORT_READ | IB_ODP_SUPPORT_SRQ_RECV); props->odp_caps.per_transport_caps.uc_odp_caps &= ~(IB_ODP_SUPPORT_READ | IB_ODP_SUPPORT_SRQ_RECV); props->odp_caps.per_transport_caps.ud_odp_caps &= ~(IB_ODP_SUPPORT_READ | IB_ODP_SUPPORT_SRQ_RECV); props->odp_caps.per_transport_caps.xrc_odp_caps &= ~(IB_ODP_SUPPORT_READ | IB_ODP_SUPPORT_SRQ_RECV); } } if (MLX5_CAP_GEN(mdev, cd)) props->device_cap_flags |= IB_DEVICE_CROSS_CHANNEL; if (mlx5_core_is_vf(mdev)) props->device_cap_flags |= IB_DEVICE_VIRTUAL_FUNCTION; if (mlx5_ib_port_link_layer(ibdev, 1) == IB_LINK_LAYER_ETHERNET && raw_support) { props->rss_caps.max_rwq_indirection_tables = 1 << MLX5_CAP_GEN(dev->mdev, log_max_rqt); props->rss_caps.max_rwq_indirection_table_size = 1 << MLX5_CAP_GEN(dev->mdev, log_max_rqt_size); props->rss_caps.supported_qpts = 1 << IB_QPT_RAW_PACKET; props->max_wq_type_rq = 1 << MLX5_CAP_GEN(dev->mdev, log_max_rq); } if (MLX5_CAP_GEN(mdev, tag_matching)) { props->tm_caps.max_num_tags = (1 << MLX5_CAP_GEN(mdev, log_tag_matching_list_sz)) - 1; props->tm_caps.max_ops = 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz); props->tm_caps.max_sge = MLX5_TM_MAX_SGE; } if (MLX5_CAP_GEN(mdev, tag_matching) && MLX5_CAP_GEN(mdev, rndv_offload_rc)) { props->tm_caps.flags = IB_TM_CAP_RNDV_RC; props->tm_caps.max_rndv_hdr_size = MLX5_TM_MAX_RNDV_MSG_SIZE; } if (MLX5_CAP_GEN(dev->mdev, cq_moderation)) { props->cq_caps.max_cq_moderation_count = MLX5_MAX_CQ_COUNT; props->cq_caps.max_cq_moderation_period = MLX5_MAX_CQ_PERIOD; } if (offsetofend(typeof(resp), cqe_comp_caps) <= uhw_outlen) { resp.response_length += sizeof(resp.cqe_comp_caps); if (MLX5_CAP_GEN(dev->mdev, cqe_compression)) { resp.cqe_comp_caps.max_num = MLX5_CAP_GEN(dev->mdev, cqe_compression_max_num); resp.cqe_comp_caps.supported_format = MLX5_IB_CQE_RES_FORMAT_HASH | MLX5_IB_CQE_RES_FORMAT_CSUM; if (MLX5_CAP_GEN(dev->mdev, mini_cqe_resp_stride_index)) resp.cqe_comp_caps.supported_format |= MLX5_IB_CQE_RES_FORMAT_CSUM_STRIDX; } } if (offsetofend(typeof(resp), packet_pacing_caps) <= uhw_outlen && raw_support) { if (MLX5_CAP_QOS(mdev, packet_pacing) && MLX5_CAP_GEN(mdev, qos)) { resp.packet_pacing_caps.qp_rate_limit_max = MLX5_CAP_QOS(mdev, packet_pacing_max_rate); resp.packet_pacing_caps.qp_rate_limit_min = MLX5_CAP_QOS(mdev, packet_pacing_min_rate); resp.packet_pacing_caps.supported_qpts |= 1 << IB_QPT_RAW_PACKET; if (MLX5_CAP_QOS(mdev, packet_pacing_burst_bound) && MLX5_CAP_QOS(mdev, packet_pacing_typical_size)) resp.packet_pacing_caps.cap_flags |= MLX5_IB_PP_SUPPORT_BURST; } resp.response_length += sizeof(resp.packet_pacing_caps); } if (offsetofend(typeof(resp), mlx5_ib_support_multi_pkt_send_wqes) <= uhw_outlen) { if (MLX5_CAP_ETH(mdev, multi_pkt_send_wqe)) resp.mlx5_ib_support_multi_pkt_send_wqes = MLX5_IB_ALLOW_MPW; if (MLX5_CAP_ETH(mdev, enhanced_multi_pkt_send_wqe)) resp.mlx5_ib_support_multi_pkt_send_wqes |= MLX5_IB_SUPPORT_EMPW; resp.response_length += sizeof(resp.mlx5_ib_support_multi_pkt_send_wqes); } if (offsetofend(typeof(resp), flags) <= uhw_outlen) { resp.response_length += sizeof(resp.flags); if (MLX5_CAP_GEN(mdev, cqe_compression_128)) resp.flags |= MLX5_IB_QUERY_DEV_RESP_FLAGS_CQE_128B_COMP; if (MLX5_CAP_GEN(mdev, cqe_128_always)) resp.flags |= MLX5_IB_QUERY_DEV_RESP_FLAGS_CQE_128B_PAD; if (MLX5_CAP_GEN(mdev, qp_packet_based)) resp.flags |= MLX5_IB_QUERY_DEV_RESP_PACKET_BASED_CREDIT_MODE; resp.flags |= MLX5_IB_QUERY_DEV_RESP_FLAGS_SCAT2CQE_DCT; } if (offsetofend(typeof(resp), sw_parsing_caps) <= uhw_outlen) { resp.response_length += sizeof(resp.sw_parsing_caps); if (MLX5_CAP_ETH(mdev, swp)) { resp.sw_parsing_caps.sw_parsing_offloads |= MLX5_IB_SW_PARSING; if (MLX5_CAP_ETH(mdev, swp_csum)) resp.sw_parsing_caps.sw_parsing_offloads |= MLX5_IB_SW_PARSING_CSUM; if (MLX5_CAP_ETH(mdev, swp_lso)) resp.sw_parsing_caps.sw_parsing_offloads |= MLX5_IB_SW_PARSING_LSO; if (resp.sw_parsing_caps.sw_parsing_offloads) resp.sw_parsing_caps.supported_qpts = BIT(IB_QPT_RAW_PACKET); } } if (offsetofend(typeof(resp), striding_rq_caps) <= uhw_outlen && raw_support) { resp.response_length += sizeof(resp.striding_rq_caps); if (MLX5_CAP_GEN(mdev, striding_rq)) { resp.striding_rq_caps.min_single_stride_log_num_of_bytes = MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES; resp.striding_rq_caps.max_single_stride_log_num_of_bytes = MLX5_MAX_SINGLE_STRIDE_LOG_NUM_BYTES; if (MLX5_CAP_GEN(dev->mdev, ext_stride_num_range)) resp.striding_rq_caps .min_single_wqe_log_num_of_strides = MLX5_EXT_MIN_SINGLE_WQE_LOG_NUM_STRIDES; else resp.striding_rq_caps .min_single_wqe_log_num_of_strides = MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES; resp.striding_rq_caps.max_single_wqe_log_num_of_strides = MLX5_MAX_SINGLE_WQE_LOG_NUM_STRIDES; resp.striding_rq_caps.supported_qpts = BIT(IB_QPT_RAW_PACKET); } } if (offsetofend(typeof(resp), tunnel_offloads_caps) <= uhw_outlen) { resp.response_length += sizeof(resp.tunnel_offloads_caps); if (MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan)) resp.tunnel_offloads_caps |= MLX5_IB_TUNNELED_OFFLOADS_VXLAN; if (MLX5_CAP_ETH(mdev, tunnel_stateless_geneve_rx)) resp.tunnel_offloads_caps |= MLX5_IB_TUNNELED_OFFLOADS_GENEVE; if (MLX5_CAP_ETH(mdev, tunnel_stateless_gre)) resp.tunnel_offloads_caps |= MLX5_IB_TUNNELED_OFFLOADS_GRE; if (MLX5_CAP_ETH(mdev, tunnel_stateless_mpls_over_gre)) resp.tunnel_offloads_caps |= MLX5_IB_TUNNELED_OFFLOADS_MPLS_GRE; if (MLX5_CAP_ETH(mdev, tunnel_stateless_mpls_over_udp)) resp.tunnel_offloads_caps |= MLX5_IB_TUNNELED_OFFLOADS_MPLS_UDP; } if (offsetofend(typeof(resp), dci_streams_caps) <= uhw_outlen) { resp.response_length += sizeof(resp.dci_streams_caps); resp.dci_streams_caps.max_log_num_concurent = MLX5_CAP_GEN(mdev, log_max_dci_stream_channels); resp.dci_streams_caps.max_log_num_errored = MLX5_CAP_GEN(mdev, log_max_dci_errored_streams); } if (uhw_outlen) { err = ib_copy_to_udata(uhw, &resp, resp.response_length); if (err) return err; } return 0; } static void translate_active_width(struct ib_device *ibdev, u16 active_width, u8 *ib_width) { struct mlx5_ib_dev *dev = to_mdev(ibdev); if (active_width & MLX5_PTYS_WIDTH_1X) *ib_width = IB_WIDTH_1X; else if (active_width & MLX5_PTYS_WIDTH_2X) *ib_width = IB_WIDTH_2X; else if (active_width & MLX5_PTYS_WIDTH_4X) *ib_width = IB_WIDTH_4X; else if (active_width & MLX5_PTYS_WIDTH_8X) *ib_width = IB_WIDTH_8X; else if (active_width & MLX5_PTYS_WIDTH_12X) *ib_width = IB_WIDTH_12X; else { mlx5_ib_dbg(dev, "Invalid active_width %d, setting width to default value: 4x\n", active_width); *ib_width = IB_WIDTH_4X; } return; } static int mlx5_mtu_to_ib_mtu(int mtu) { switch (mtu) { case 256: return 1; case 512: return 2; case 1024: return 3; case 2048: return 4; case 4096: return 5; default: pr_warn("invalid mtu\n"); return -1; } } enum ib_max_vl_num { __IB_MAX_VL_0 = 1, __IB_MAX_VL_0_1 = 2, __IB_MAX_VL_0_3 = 3, __IB_MAX_VL_0_7 = 4, __IB_MAX_VL_0_14 = 5, }; enum mlx5_vl_hw_cap { MLX5_VL_HW_0 = 1, MLX5_VL_HW_0_1 = 2, MLX5_VL_HW_0_2 = 3, MLX5_VL_HW_0_3 = 4, MLX5_VL_HW_0_4 = 5, MLX5_VL_HW_0_5 = 6, MLX5_VL_HW_0_6 = 7, MLX5_VL_HW_0_7 = 8, MLX5_VL_HW_0_14 = 15 }; static int translate_max_vl_num(struct ib_device *ibdev, u8 vl_hw_cap, u8 *max_vl_num) { switch (vl_hw_cap) { case MLX5_VL_HW_0: *max_vl_num = __IB_MAX_VL_0; break; case MLX5_VL_HW_0_1: *max_vl_num = __IB_MAX_VL_0_1; break; case MLX5_VL_HW_0_3: *max_vl_num = __IB_MAX_VL_0_3; break; case MLX5_VL_HW_0_7: *max_vl_num = __IB_MAX_VL_0_7; break; case MLX5_VL_HW_0_14: *max_vl_num = __IB_MAX_VL_0_14; break; default: return -EINVAL; } return 0; } static int mlx5_query_hca_port(struct ib_device *ibdev, u32 port, struct ib_port_attr *props) { struct mlx5_ib_dev *dev = to_mdev(ibdev); struct mlx5_core_dev *mdev = dev->mdev; struct mlx5_hca_vport_context *rep; u16 max_mtu; u16 oper_mtu; int err; u16 ib_link_width_oper; u8 vl_hw_cap; rep = kzalloc(sizeof(*rep), GFP_KERNEL); if (!rep) { err = -ENOMEM; goto out; } /* props being zeroed by the caller, avoid zeroing it here */ err = mlx5_query_hca_vport_context(mdev, 0, port, 0, rep); if (err) goto out; props->lid = rep->lid; props->lmc = rep->lmc; props->sm_lid = rep->sm_lid; props->sm_sl = rep->sm_sl; props->state = rep->vport_state; props->phys_state = rep->port_physical_state; props->port_cap_flags = rep->cap_mask1; props->gid_tbl_len = mlx5_get_gid_table_len(MLX5_CAP_GEN(mdev, gid_table_size)); props->max_msg_sz = 1 << MLX5_CAP_GEN(mdev, log_max_msg); props->pkey_tbl_len = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(mdev, pkey_table_size)); props->bad_pkey_cntr = rep->pkey_violation_counter; props->qkey_viol_cntr = rep->qkey_violation_counter; props->subnet_timeout = rep->subnet_timeout; props->init_type_reply = rep->init_type_reply; if (props->port_cap_flags & IB_PORT_CAP_MASK2_SUP) props->port_cap_flags2 = rep->cap_mask2; err = mlx5_query_ib_port_oper(mdev, &ib_link_width_oper, &props->active_speed, port); if (err) goto out; translate_active_width(ibdev, ib_link_width_oper, &props->active_width); mlx5_query_port_max_mtu(mdev, &max_mtu, port); props->max_mtu = mlx5_mtu_to_ib_mtu(max_mtu); mlx5_query_port_oper_mtu(mdev, &oper_mtu, port); props->active_mtu = mlx5_mtu_to_ib_mtu(oper_mtu); err = mlx5_query_port_vl_hw_cap(mdev, &vl_hw_cap, port); if (err) goto out; err = translate_max_vl_num(ibdev, vl_hw_cap, &props->max_vl_num); out: kfree(rep); return err; } int mlx5_ib_query_port(struct ib_device *ibdev, u32 port, struct ib_port_attr *props) { unsigned int count; int ret; switch (mlx5_get_vport_access_method(ibdev)) { case MLX5_VPORT_ACCESS_METHOD_MAD: ret = mlx5_query_mad_ifc_port(ibdev, port, props); break; case MLX5_VPORT_ACCESS_METHOD_HCA: ret = mlx5_query_hca_port(ibdev, port, props); break; case MLX5_VPORT_ACCESS_METHOD_NIC: ret = mlx5_query_port_roce(ibdev, port, props); break; default: ret = -EINVAL; } if (!ret && props) { struct mlx5_ib_dev *dev = to_mdev(ibdev); struct mlx5_core_dev *mdev; bool put_mdev = true; mdev = mlx5_ib_get_native_port_mdev(dev, port, NULL); if (!mdev) { /* If the port isn't affiliated yet query the master. * The master and slave will have the same values. */ mdev = dev->mdev; port = 1; put_mdev = false; } count = mlx5_core_reserved_gids_count(mdev); if (put_mdev) mlx5_ib_put_native_port_mdev(dev, port); props->gid_tbl_len -= count; } return ret; } static int mlx5_ib_rep_query_port(struct ib_device *ibdev, u32 port, struct ib_port_attr *props) { return mlx5_query_port_roce(ibdev, port, props); } static int mlx5_ib_rep_query_pkey(struct ib_device *ibdev, u32 port, u16 index, u16 *pkey) { /* Default special Pkey for representor device port as per the * IB specification 1.3 section 10.9.1.2. */ *pkey = 0xffff; return 0; } static int mlx5_ib_query_gid(struct ib_device *ibdev, u32 port, int index, union ib_gid *gid) { struct mlx5_ib_dev *dev = to_mdev(ibdev); struct mlx5_core_dev *mdev = dev->mdev; switch (mlx5_get_vport_access_method(ibdev)) { case MLX5_VPORT_ACCESS_METHOD_MAD: return mlx5_query_mad_ifc_gids(ibdev, port, index, gid); case MLX5_VPORT_ACCESS_METHOD_HCA: return mlx5_query_hca_vport_gid(mdev, 0, port, 0, index, gid); default: return -EINVAL; } } static int mlx5_query_hca_nic_pkey(struct ib_device *ibdev, u32 port, u16 index, u16 *pkey) { struct mlx5_ib_dev *dev = to_mdev(ibdev); struct mlx5_core_dev *mdev; bool put_mdev = true; u32 mdev_port_num; int err; mdev = mlx5_ib_get_native_port_mdev(dev, port, &mdev_port_num); if (!mdev) { /* The port isn't affiliated yet, get the PKey from the master * port. For RoCE the PKey tables will be the same. */ put_mdev = false; mdev = dev->mdev; mdev_port_num = 1; } err = mlx5_query_hca_vport_pkey(mdev, 0, mdev_port_num, 0, index, pkey); if (put_mdev) mlx5_ib_put_native_port_mdev(dev, port); return err; } static int mlx5_ib_query_pkey(struct ib_device *ibdev, u32 port, u16 index, u16 *pkey) { switch (mlx5_get_vport_access_method(ibdev)) { case MLX5_VPORT_ACCESS_METHOD_MAD: return mlx5_query_mad_ifc_pkey(ibdev, port, index, pkey); case MLX5_VPORT_ACCESS_METHOD_HCA: case MLX5_VPORT_ACCESS_METHOD_NIC: return mlx5_query_hca_nic_pkey(ibdev, port, index, pkey); default: return -EINVAL; } } static int mlx5_ib_modify_device(struct ib_device *ibdev, int mask, struct ib_device_modify *props) { struct mlx5_ib_dev *dev = to_mdev(ibdev); struct mlx5_reg_node_desc in; struct mlx5_reg_node_desc out; int err; if (mask & ~IB_DEVICE_MODIFY_NODE_DESC) return -EOPNOTSUPP; if (!(mask & IB_DEVICE_MODIFY_NODE_DESC)) return 0; /* * If possible, pass node desc to FW, so it can generate * a 144 trap. If cmd fails, just ignore. */ memcpy(&in, props->node_desc, IB_DEVICE_NODE_DESC_MAX); err = mlx5_core_access_reg(dev->mdev, &in, sizeof(in), &out, sizeof(out), MLX5_REG_NODE_DESC, 0, 1); if (err) return err; memcpy(ibdev->node_desc, props->node_desc, IB_DEVICE_NODE_DESC_MAX); return err; } static int set_port_caps_atomic(struct mlx5_ib_dev *dev, u32 port_num, u32 mask, u32 value) { struct mlx5_hca_vport_context ctx = {}; struct mlx5_core_dev *mdev; u32 mdev_port_num; int err; mdev = mlx5_ib_get_native_port_mdev(dev, port_num, &mdev_port_num); if (!mdev) return -ENODEV; err = mlx5_query_hca_vport_context(mdev, 0, mdev_port_num, 0, &ctx); if (err) goto out; if (~ctx.cap_mask1_perm & mask) { mlx5_ib_warn(dev, "trying to change bitmask 0x%X but change supported 0x%X\n", mask, ctx.cap_mask1_perm); err = -EINVAL; goto out; } ctx.cap_mask1 = value; ctx.cap_mask1_perm = mask; err = mlx5_core_modify_hca_vport_context(mdev, 0, mdev_port_num, 0, &ctx); out: mlx5_ib_put_native_port_mdev(dev, port_num); return err; } static int mlx5_ib_modify_port(struct ib_device *ibdev, u32 port, int mask, struct ib_port_modify *props) { struct mlx5_ib_dev *dev = to_mdev(ibdev); struct ib_port_attr attr; u32 tmp; int err; u32 change_mask; u32 value; bool is_ib = (mlx5_ib_port_link_layer(ibdev, port) == IB_LINK_LAYER_INFINIBAND); /* CM layer calls ib_modify_port() regardless of the link layer. For * Ethernet ports, qkey violation and Port capabilities are meaningless. */ if (!is_ib) return 0; if (MLX5_CAP_GEN(dev->mdev, ib_virt) && is_ib) { change_mask = props->clr_port_cap_mask | props->set_port_cap_mask; value = ~props->clr_port_cap_mask | props->set_port_cap_mask; return set_port_caps_atomic(dev, port, change_mask, value); } mutex_lock(&dev->cap_mask_mutex); err = ib_query_port(ibdev, port, &attr); if (err) goto out; tmp = (attr.port_cap_flags | props->set_port_cap_mask) & ~props->clr_port_cap_mask; err = mlx5_set_port_caps(dev->mdev, port, tmp); out: mutex_unlock(&dev->cap_mask_mutex); return err; } static void print_lib_caps(struct mlx5_ib_dev *dev, u64 caps) { mlx5_ib_dbg(dev, "MLX5_LIB_CAP_4K_UAR = %s\n", caps & MLX5_LIB_CAP_4K_UAR ? "y" : "n"); } static u16 calc_dynamic_bfregs(int uars_per_sys_page) { /* Large page with non 4k uar support might limit the dynamic size */ if (uars_per_sys_page == 1 && PAGE_SIZE > 4096) return MLX5_MIN_DYN_BFREGS; return MLX5_MAX_DYN_BFREGS; } static int calc_total_bfregs(struct mlx5_ib_dev *dev, bool lib_uar_4k, struct mlx5_ib_alloc_ucontext_req_v2 *req, struct mlx5_bfreg_info *bfregi) { int uars_per_sys_page; int bfregs_per_sys_page; int ref_bfregs = req->total_num_bfregs; if (req->total_num_bfregs == 0) return -EINVAL; BUILD_BUG_ON(MLX5_MAX_BFREGS % MLX5_NON_FP_BFREGS_IN_PAGE); BUILD_BUG_ON(MLX5_MAX_BFREGS < MLX5_NON_FP_BFREGS_IN_PAGE); if (req->total_num_bfregs > MLX5_MAX_BFREGS) return -ENOMEM; uars_per_sys_page = get_uars_per_sys_page(dev, lib_uar_4k); bfregs_per_sys_page = uars_per_sys_page * MLX5_NON_FP_BFREGS_PER_UAR; /* This holds the required static allocation asked by the user */ req->total_num_bfregs = ALIGN(req->total_num_bfregs, bfregs_per_sys_page); if (req->num_low_latency_bfregs > req->total_num_bfregs - 1) return -EINVAL; bfregi->num_static_sys_pages = req->total_num_bfregs / bfregs_per_sys_page; bfregi->num_dyn_bfregs = ALIGN(calc_dynamic_bfregs(uars_per_sys_page), bfregs_per_sys_page); bfregi->total_num_bfregs = req->total_num_bfregs + bfregi->num_dyn_bfregs; bfregi->num_sys_pages = bfregi->total_num_bfregs / bfregs_per_sys_page; mlx5_ib_dbg(dev, "uar_4k: fw support %s, lib support %s, user requested %d bfregs, allocated %d, total bfregs %d, using %d sys pages\n", MLX5_CAP_GEN(dev->mdev, uar_4k) ? "yes" : "no", lib_uar_4k ? "yes" : "no", ref_bfregs, req->total_num_bfregs, bfregi->total_num_bfregs, bfregi->num_sys_pages); return 0; } static int allocate_uars(struct mlx5_ib_dev *dev, struct mlx5_ib_ucontext *context) { struct mlx5_bfreg_info *bfregi; int err; int i; bfregi = &context->bfregi; for (i = 0; i < bfregi->num_static_sys_pages; i++) { err = mlx5_cmd_uar_alloc(dev->mdev, &bfregi->sys_pages[i], context->devx_uid); if (err) goto error; mlx5_ib_dbg(dev, "allocated uar %d\n", bfregi->sys_pages[i]); } for (i = bfregi->num_static_sys_pages; i < bfregi->num_sys_pages; i++) bfregi->sys_pages[i] = MLX5_IB_INVALID_UAR_INDEX; return 0; error: for (--i; i >= 0; i--) if (mlx5_cmd_uar_dealloc(dev->mdev, bfregi->sys_pages[i], context->devx_uid)) mlx5_ib_warn(dev, "failed to free uar %d\n", i); return err; } static void deallocate_uars(struct mlx5_ib_dev *dev, struct mlx5_ib_ucontext *context) { struct mlx5_bfreg_info *bfregi; int i; bfregi = &context->bfregi; for (i = 0; i < bfregi->num_sys_pages; i++) if (i < bfregi->num_static_sys_pages || bfregi->sys_pages[i] != MLX5_IB_INVALID_UAR_INDEX) mlx5_cmd_uar_dealloc(dev->mdev, bfregi->sys_pages[i], context->devx_uid); } int mlx5_ib_enable_lb(struct mlx5_ib_dev *dev, bool td, bool qp) { int err = 0; mutex_lock(&dev->lb.mutex); if (td) dev->lb.user_td++; if (qp) dev->lb.qps++; if (dev->lb.user_td == 2 || dev->lb.qps == 1) { if (!dev->lb.enabled) { err = mlx5_nic_vport_update_local_lb(dev->mdev, true); dev->lb.enabled = true; } } mutex_unlock(&dev->lb.mutex); return err; } void mlx5_ib_disable_lb(struct mlx5_ib_dev *dev, bool td, bool qp) { mutex_lock(&dev->lb.mutex); if (td) dev->lb.user_td--; if (qp) dev->lb.qps--; if (dev->lb.user_td == 1 && dev->lb.qps == 0) { if (dev->lb.enabled) { mlx5_nic_vport_update_local_lb(dev->mdev, false); dev->lb.enabled = false; } } mutex_unlock(&dev->lb.mutex); } static int mlx5_ib_alloc_transport_domain(struct mlx5_ib_dev *dev, u32 *tdn, u16 uid) { int err; if (!MLX5_CAP_GEN(dev->mdev, log_max_transport_domain)) return 0; err = mlx5_cmd_alloc_transport_domain(dev->mdev, tdn, uid); if (err) return err; if ((MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) || (!MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) && !MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc))) return err; return mlx5_ib_enable_lb(dev, true, false); } static void mlx5_ib_dealloc_transport_domain(struct mlx5_ib_dev *dev, u32 tdn, u16 uid) { if (!MLX5_CAP_GEN(dev->mdev, log_max_transport_domain)) return; mlx5_cmd_dealloc_transport_domain(dev->mdev, tdn, uid); if ((MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) || (!MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) && !MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc))) return; mlx5_ib_disable_lb(dev, true, false); } static int set_ucontext_resp(struct ib_ucontext *uctx, struct mlx5_ib_alloc_ucontext_resp *resp) { struct ib_device *ibdev = uctx->device; struct mlx5_ib_dev *dev = to_mdev(ibdev); struct mlx5_ib_ucontext *context = to_mucontext(uctx); struct mlx5_bfreg_info *bfregi = &context->bfregi; int err; if (MLX5_CAP_GEN(dev->mdev, dump_fill_mkey)) { err = mlx5_cmd_dump_fill_mkey(dev->mdev, &resp->dump_fill_mkey); if (err) return err; resp->comp_mask |= MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_DUMP_FILL_MKEY; } resp->qp_tab_size = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp); if (dev->wc_support) resp->bf_reg_size = 1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size); resp->cache_line_size = cache_line_size(); resp->max_sq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq); resp->max_rq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq); resp->max_send_wqebb = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz); resp->max_recv_wr = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz); resp->max_srq_recv_wr = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz); resp->cqe_version = context->cqe_version; resp->log_uar_size = MLX5_CAP_GEN(dev->mdev, uar_4k) ? MLX5_ADAPTER_PAGE_SHIFT : PAGE_SHIFT; resp->num_uars_per_page = MLX5_CAP_GEN(dev->mdev, uar_4k) ? MLX5_CAP_GEN(dev->mdev, num_of_uars_per_page) : 1; if (mlx5_accel_ipsec_device_caps(dev->mdev) & MLX5_ACCEL_IPSEC_CAP_DEVICE) { if (mlx5_get_flow_namespace(dev->mdev, MLX5_FLOW_NAMESPACE_EGRESS)) resp->flow_action_flags |= MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM; if (mlx5_accel_ipsec_device_caps(dev->mdev) & MLX5_ACCEL_IPSEC_CAP_REQUIRED_METADATA) resp->flow_action_flags |= MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_REQ_METADATA; if (MLX5_CAP_FLOWTABLE(dev->mdev, flow_table_properties_nic_receive.ft_field_support.outer_esp_spi)) resp->flow_action_flags |= MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_SPI_STEERING; if (mlx5_accel_ipsec_device_caps(dev->mdev) & MLX5_ACCEL_IPSEC_CAP_TX_IV_IS_ESN) resp->flow_action_flags |= MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_TX_IV_IS_ESN; /* MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_FULL_OFFLOAD is currently always 0 */ } resp->tot_bfregs = bfregi->lib_uar_dyn ? 0 : bfregi->total_num_bfregs - bfregi->num_dyn_bfregs; resp->num_ports = dev->num_ports; resp->cmds_supp_uhw |= MLX5_USER_CMDS_SUPP_UHW_QUERY_DEVICE | MLX5_USER_CMDS_SUPP_UHW_CREATE_AH; if (mlx5_ib_port_link_layer(ibdev, 1) == IB_LINK_LAYER_ETHERNET) { mlx5_query_min_inline(dev->mdev, &resp->eth_min_inline); resp->eth_min_inline++; } if (dev->mdev->clock_info) resp->clock_info_versions = BIT(MLX5_IB_CLOCK_INFO_V1); /* * We don't want to expose information from the PCI bar that is located * after 4096 bytes, so if the arch only supports larger pages, let's * pretend we don't support reading the HCA's core clock. This is also * forced by mmap function. */ if (PAGE_SIZE <= 4096) { resp->comp_mask |= MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_CORE_CLOCK_OFFSET; resp->hca_core_clock_offset = offsetof(struct mlx5_init_seg, internal_timer_h) % PAGE_SIZE; } if (MLX5_CAP_GEN(dev->mdev, ece_support)) resp->comp_mask |= MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_ECE; if (rt_supported(MLX5_CAP_GEN(dev->mdev, sq_ts_format)) && rt_supported(MLX5_CAP_GEN(dev->mdev, rq_ts_format)) && rt_supported(MLX5_CAP_ROCE(dev->mdev, qp_ts_format))) resp->comp_mask |= MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_REAL_TIME_TS; resp->num_dyn_bfregs = bfregi->num_dyn_bfregs; if (MLX5_CAP_GEN(dev->mdev, drain_sigerr)) resp->comp_mask |= MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_SQD2RTS; return 0; } static int mlx5_ib_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata) { struct ib_device *ibdev = uctx->device; struct mlx5_ib_dev *dev = to_mdev(ibdev); struct mlx5_ib_alloc_ucontext_req_v2 req = {}; struct mlx5_ib_alloc_ucontext_resp resp = {}; struct mlx5_ib_ucontext *context = to_mucontext(uctx); struct mlx5_bfreg_info *bfregi; int ver; int err; size_t min_req_v2 = offsetof(struct mlx5_ib_alloc_ucontext_req_v2, max_cqe_version); bool lib_uar_4k; bool lib_uar_dyn; if (!dev->ib_active) return -EAGAIN; if (udata->inlen == sizeof(struct mlx5_ib_alloc_ucontext_req)) ver = 0; else if (udata->inlen >= min_req_v2) ver = 2; else return -EINVAL; err = ib_copy_from_udata(&req, udata, min(udata->inlen, sizeof(req))); if (err) return err; if (req.flags & ~MLX5_IB_ALLOC_UCTX_DEVX) return -EOPNOTSUPP; if (req.comp_mask || req.reserved0 || req.reserved1 || req.reserved2) return -EOPNOTSUPP; req.total_num_bfregs = ALIGN(req.total_num_bfregs, MLX5_NON_FP_BFREGS_PER_UAR); if (req.num_low_latency_bfregs > req.total_num_bfregs - 1) return -EINVAL; if (req.flags & MLX5_IB_ALLOC_UCTX_DEVX) { err = mlx5_ib_devx_create(dev, true); if (err < 0) goto out_ctx; context->devx_uid = err; } lib_uar_4k = req.lib_caps & MLX5_LIB_CAP_4K_UAR; lib_uar_dyn = req.lib_caps & MLX5_LIB_CAP_DYN_UAR; bfregi = &context->bfregi; if (lib_uar_dyn) { bfregi->lib_uar_dyn = lib_uar_dyn; goto uar_done; } /* updates req->total_num_bfregs */ err = calc_total_bfregs(dev, lib_uar_4k, &req, bfregi); if (err) goto out_devx; mutex_init(&bfregi->lock); bfregi->lib_uar_4k = lib_uar_4k; bfregi->count = kcalloc(bfregi->total_num_bfregs, sizeof(*bfregi->count), GFP_KERNEL); if (!bfregi->count) { err = -ENOMEM; goto out_devx; } bfregi->sys_pages = kcalloc(bfregi->num_sys_pages, sizeof(*bfregi->sys_pages), GFP_KERNEL); if (!bfregi->sys_pages) { err = -ENOMEM; goto out_count; } err = allocate_uars(dev, context); if (err) goto out_sys_pages; uar_done: err = mlx5_ib_alloc_transport_domain(dev, &context->tdn, context->devx_uid); if (err) goto out_uars; INIT_LIST_HEAD(&context->db_page_list); mutex_init(&context->db_page_mutex); context->cqe_version = min_t(__u8, (__u8)MLX5_CAP_GEN(dev->mdev, cqe_version), req.max_cqe_version); err = set_ucontext_resp(uctx, &resp); if (err) goto out_mdev; resp.response_length = min(udata->outlen, sizeof(resp)); err = ib_copy_to_udata(udata, &resp, resp.response_length); if (err) goto out_mdev; bfregi->ver = ver; bfregi->num_low_latency_bfregs = req.num_low_latency_bfregs; context->lib_caps = req.lib_caps; print_lib_caps(dev, context->lib_caps); if (mlx5_ib_lag_should_assign_affinity(dev)) { u32 port = mlx5_core_native_port_num(dev->mdev) - 1; atomic_set(&context->tx_port_affinity, atomic_add_return( 1, &dev->port[port].roce.tx_port_affinity)); } return 0; out_mdev: mlx5_ib_dealloc_transport_domain(dev, context->tdn, context->devx_uid); out_uars: deallocate_uars(dev, context); out_sys_pages: kfree(bfregi->sys_pages); out_count: kfree(bfregi->count); out_devx: if (req.flags & MLX5_IB_ALLOC_UCTX_DEVX) mlx5_ib_devx_destroy(dev, context->devx_uid); out_ctx: return err; } static int mlx5_ib_query_ucontext(struct ib_ucontext *ibcontext, struct uverbs_attr_bundle *attrs) { struct mlx5_ib_alloc_ucontext_resp uctx_resp = {}; int ret; ret = set_ucontext_resp(ibcontext, &uctx_resp); if (ret) return ret; uctx_resp.response_length = min_t(size_t, uverbs_attr_get_len(attrs, MLX5_IB_ATTR_QUERY_CONTEXT_RESP_UCTX), sizeof(uctx_resp)); ret = uverbs_copy_to_struct_or_zero(attrs, MLX5_IB_ATTR_QUERY_CONTEXT_RESP_UCTX, &uctx_resp, sizeof(uctx_resp)); return ret; } static void mlx5_ib_dealloc_ucontext(struct ib_ucontext *ibcontext) { struct mlx5_ib_ucontext *context = to_mucontext(ibcontext); struct mlx5_ib_dev *dev = to_mdev(ibcontext->device); struct mlx5_bfreg_info *bfregi; bfregi = &context->bfregi; mlx5_ib_dealloc_transport_domain(dev, context->tdn, context->devx_uid); deallocate_uars(dev, context); kfree(bfregi->sys_pages); kfree(bfregi->count); if (context->devx_uid) mlx5_ib_devx_destroy(dev, context->devx_uid); } static phys_addr_t uar_index2pfn(struct mlx5_ib_dev *dev, int uar_idx) { int fw_uars_per_page; fw_uars_per_page = MLX5_CAP_GEN(dev->mdev, uar_4k) ? MLX5_UARS_IN_PAGE : 1; return (dev->mdev->bar_addr >> PAGE_SHIFT) + uar_idx / fw_uars_per_page; } static u64 uar_index2paddress(struct mlx5_ib_dev *dev, int uar_idx) { unsigned int fw_uars_per_page; fw_uars_per_page = MLX5_CAP_GEN(dev->mdev, uar_4k) ? MLX5_UARS_IN_PAGE : 1; return (dev->mdev->bar_addr + (uar_idx / fw_uars_per_page) * PAGE_SIZE); } static int get_command(unsigned long offset) { return (offset >> MLX5_IB_MMAP_CMD_SHIFT) & MLX5_IB_MMAP_CMD_MASK; } static int get_arg(unsigned long offset) { return offset & ((1 << MLX5_IB_MMAP_CMD_SHIFT) - 1); } static int get_index(unsigned long offset) { return get_arg(offset); } /* Index resides in an extra byte to enable larger values than 255 */ static int get_extended_index(unsigned long offset) { return get_arg(offset) | ((offset >> 16) & 0xff) << 8; } static void mlx5_ib_disassociate_ucontext(struct ib_ucontext *ibcontext) { } static inline char *mmap_cmd2str(enum mlx5_ib_mmap_cmd cmd) { switch (cmd) { case MLX5_IB_MMAP_WC_PAGE: return "WC"; case MLX5_IB_MMAP_REGULAR_PAGE: return "best effort WC"; case MLX5_IB_MMAP_NC_PAGE: return "NC"; case MLX5_IB_MMAP_DEVICE_MEM: return "Device Memory"; default: return NULL; } } static int mlx5_ib_mmap_clock_info_page(struct mlx5_ib_dev *dev, struct vm_area_struct *vma, struct mlx5_ib_ucontext *context) { if ((vma->vm_end - vma->vm_start != PAGE_SIZE) || !(vma->vm_flags & VM_SHARED)) return -EINVAL; if (get_index(vma->vm_pgoff) != MLX5_IB_CLOCK_INFO_V1) return -EOPNOTSUPP; if (vma->vm_flags & (VM_WRITE | VM_EXEC)) return -EPERM; vma->vm_flags &= ~VM_MAYWRITE; if (!dev->mdev->clock_info) return -EOPNOTSUPP; return vm_insert_page(vma, vma->vm_start, virt_to_page(dev->mdev->clock_info)); } static void mlx5_ib_mmap_free(struct rdma_user_mmap_entry *entry) { struct mlx5_user_mmap_entry *mentry = to_mmmap(entry); struct mlx5_ib_dev *dev = to_mdev(entry->ucontext->device); struct mlx5_var_table *var_table = &dev->var_table; struct mlx5_ib_ucontext *context = to_mucontext(entry->ucontext); switch (mentry->mmap_flag) { case MLX5_IB_MMAP_TYPE_MEMIC: case MLX5_IB_MMAP_TYPE_MEMIC_OP: mlx5_ib_dm_mmap_free(dev, mentry); break; case MLX5_IB_MMAP_TYPE_VAR: mutex_lock(&var_table->bitmap_lock); clear_bit(mentry->page_idx, var_table->bitmap); mutex_unlock(&var_table->bitmap_lock); kfree(mentry); break; case MLX5_IB_MMAP_TYPE_UAR_WC: case MLX5_IB_MMAP_TYPE_UAR_NC: mlx5_cmd_uar_dealloc(dev->mdev, mentry->page_idx, context->devx_uid); kfree(mentry); break; default: WARN_ON(true); } } static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd, struct vm_area_struct *vma, struct mlx5_ib_ucontext *context) { struct mlx5_bfreg_info *bfregi = &context->bfregi; int err; unsigned long idx; phys_addr_t pfn; pgprot_t prot; u32 bfreg_dyn_idx = 0; u32 uar_index; int dyn_uar = (cmd == MLX5_IB_MMAP_ALLOC_WC); int max_valid_idx = dyn_uar ? bfregi->num_sys_pages : bfregi->num_static_sys_pages; if (bfregi->lib_uar_dyn) return -EINVAL; if (vma->vm_end - vma->vm_start != PAGE_SIZE) return -EINVAL; if (dyn_uar) idx = get_extended_index(vma->vm_pgoff) + bfregi->num_static_sys_pages; else idx = get_index(vma->vm_pgoff); if (idx >= max_valid_idx) { mlx5_ib_warn(dev, "invalid uar index %lu, max=%d\n", idx, max_valid_idx); return -EINVAL; } switch (cmd) { case MLX5_IB_MMAP_WC_PAGE: case MLX5_IB_MMAP_ALLOC_WC: case MLX5_IB_MMAP_REGULAR_PAGE: /* For MLX5_IB_MMAP_REGULAR_PAGE do the best effort to get WC */ prot = pgprot_writecombine(vma->vm_page_prot); break; case MLX5_IB_MMAP_NC_PAGE: prot = pgprot_noncached(vma->vm_page_prot); break; default: return -EINVAL; } if (dyn_uar) { int uars_per_page; uars_per_page = get_uars_per_sys_page(dev, bfregi->lib_uar_4k); bfreg_dyn_idx = idx * (uars_per_page * MLX5_NON_FP_BFREGS_PER_UAR); if (bfreg_dyn_idx >= bfregi->total_num_bfregs) { mlx5_ib_warn(dev, "invalid bfreg_dyn_idx %u, max=%u\n", bfreg_dyn_idx, bfregi->total_num_bfregs); return -EINVAL; } mutex_lock(&bfregi->lock); /* Fail if uar already allocated, first bfreg index of each * page holds its count. */ if (bfregi->count[bfreg_dyn_idx]) { mlx5_ib_warn(dev, "wrong offset, idx %lu is busy, bfregn=%u\n", idx, bfreg_dyn_idx); mutex_unlock(&bfregi->lock); return -EINVAL; } bfregi->count[bfreg_dyn_idx]++; mutex_unlock(&bfregi->lock); err = mlx5_cmd_uar_alloc(dev->mdev, &uar_index, context->devx_uid); if (err) { mlx5_ib_warn(dev, "UAR alloc failed\n"); goto free_bfreg; } } else { uar_index = bfregi->sys_pages[idx]; } pfn = uar_index2pfn(dev, uar_index); mlx5_ib_dbg(dev, "uar idx 0x%lx, pfn %pa\n", idx, &pfn); err = rdma_user_mmap_io(&context->ibucontext, vma, pfn, PAGE_SIZE, prot, NULL); if (err) { mlx5_ib_err(dev, "rdma_user_mmap_io failed with error=%d, mmap_cmd=%s\n", err, mmap_cmd2str(cmd)); goto err; } if (dyn_uar) bfregi->sys_pages[idx] = uar_index; return 0; err: if (!dyn_uar) return err; mlx5_cmd_uar_dealloc(dev->mdev, idx, context->devx_uid); free_bfreg: mlx5_ib_free_bfreg(dev, bfregi, bfreg_dyn_idx); return err; } static unsigned long mlx5_vma_to_pgoff(struct vm_area_struct *vma) { unsigned long idx; u8 command; command = get_command(vma->vm_pgoff); idx = get_extended_index(vma->vm_pgoff); return (command << 16 | idx); } static int mlx5_ib_mmap_offset(struct mlx5_ib_dev *dev, struct vm_area_struct *vma, struct ib_ucontext *ucontext) { struct mlx5_user_mmap_entry *mentry; struct rdma_user_mmap_entry *entry; unsigned long pgoff; pgprot_t prot; phys_addr_t pfn; int ret; pgoff = mlx5_vma_to_pgoff(vma); entry = rdma_user_mmap_entry_get_pgoff(ucontext, pgoff); if (!entry) return -EINVAL; mentry = to_mmmap(entry); pfn = (mentry->address >> PAGE_SHIFT); if (mentry->mmap_flag == MLX5_IB_MMAP_TYPE_VAR || mentry->mmap_flag == MLX5_IB_MMAP_TYPE_UAR_NC) prot = pgprot_noncached(vma->vm_page_prot); else prot = pgprot_writecombine(vma->vm_page_prot); ret = rdma_user_mmap_io(ucontext, vma, pfn, entry->npages * PAGE_SIZE, prot, entry); rdma_user_mmap_entry_put(&mentry->rdma_entry); return ret; } static u64 mlx5_entry_to_mmap_offset(struct mlx5_user_mmap_entry *entry) { u64 cmd = (entry->rdma_entry.start_pgoff >> 16) & 0xFFFF; u64 index = entry->rdma_entry.start_pgoff & 0xFFFF; return (((index >> 8) << 16) | (cmd << MLX5_IB_MMAP_CMD_SHIFT) | (index & 0xFF)) << PAGE_SHIFT; } static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma) { struct mlx5_ib_ucontext *context = to_mucontext(ibcontext); struct mlx5_ib_dev *dev = to_mdev(ibcontext->device); unsigned long command; phys_addr_t pfn; command = get_command(vma->vm_pgoff); switch (command) { case MLX5_IB_MMAP_WC_PAGE: case MLX5_IB_MMAP_ALLOC_WC: if (!dev->wc_support) return -EPERM; fallthrough; case MLX5_IB_MMAP_NC_PAGE: case MLX5_IB_MMAP_REGULAR_PAGE: return uar_mmap(dev, command, vma, context); case MLX5_IB_MMAP_GET_CONTIGUOUS_PAGES: return -ENOSYS; case MLX5_IB_MMAP_CORE_CLOCK: if (vma->vm_end - vma->vm_start != PAGE_SIZE) return -EINVAL; if (vma->vm_flags & VM_WRITE) return -EPERM; vma->vm_flags &= ~VM_MAYWRITE; /* Don't expose to user-space information it shouldn't have */ if (PAGE_SIZE > 4096) return -EOPNOTSUPP; pfn = (dev->mdev->iseg_base + offsetof(struct mlx5_init_seg, internal_timer_h)) >> PAGE_SHIFT; return rdma_user_mmap_io(&context->ibucontext, vma, pfn, PAGE_SIZE, pgprot_noncached(vma->vm_page_prot), NULL); case MLX5_IB_MMAP_CLOCK_INFO: return mlx5_ib_mmap_clock_info_page(dev, vma, context); default: return mlx5_ib_mmap_offset(dev, vma, ibcontext); } return 0; } static int mlx5_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) { struct mlx5_ib_pd *pd = to_mpd(ibpd); struct ib_device *ibdev = ibpd->device; struct mlx5_ib_alloc_pd_resp resp; int err; u32 out[MLX5_ST_SZ_DW(alloc_pd_out)] = {}; u32 in[MLX5_ST_SZ_DW(alloc_pd_in)] = {}; u16 uid = 0; struct mlx5_ib_ucontext *context = rdma_udata_to_drv_context( udata, struct mlx5_ib_ucontext, ibucontext); uid = context ? context->devx_uid : 0; MLX5_SET(alloc_pd_in, in, opcode, MLX5_CMD_OP_ALLOC_PD); MLX5_SET(alloc_pd_in, in, uid, uid); err = mlx5_cmd_exec_inout(to_mdev(ibdev)->mdev, alloc_pd, in, out); if (err) return err; pd->pdn = MLX5_GET(alloc_pd_out, out, pd); pd->uid = uid; if (udata) { resp.pdn = pd->pdn; if (ib_copy_to_udata(udata, &resp, sizeof(resp))) { mlx5_cmd_dealloc_pd(to_mdev(ibdev)->mdev, pd->pdn, uid); return -EFAULT; } } return 0; } static int mlx5_ib_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata) { struct mlx5_ib_dev *mdev = to_mdev(pd->device); struct mlx5_ib_pd *mpd = to_mpd(pd); return mlx5_cmd_dealloc_pd(mdev->mdev, mpd->pdn, mpd->uid); } static int mlx5_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) { struct mlx5_ib_dev *dev = to_mdev(ibqp->device); struct mlx5_ib_qp *mqp = to_mqp(ibqp); int err; u16 uid; uid = ibqp->pd ? to_mpd(ibqp->pd)->uid : 0; if (mqp->flags & IB_QP_CREATE_SOURCE_QPN) { mlx5_ib_dbg(dev, "Attaching a multi cast group to underlay QP is not supported\n"); return -EOPNOTSUPP; } err = mlx5_cmd_attach_mcg(dev->mdev, gid, ibqp->qp_num, uid); if (err) mlx5_ib_warn(dev, "failed attaching QPN 0x%x, MGID %pI6\n", ibqp->qp_num, gid->raw); return err; } static int mlx5_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) { struct mlx5_ib_dev *dev = to_mdev(ibqp->device); int err; u16 uid; uid = ibqp->pd ? to_mpd(ibqp->pd)->uid : 0; err = mlx5_cmd_detach_mcg(dev->mdev, gid, ibqp->qp_num, uid); if (err) mlx5_ib_warn(dev, "failed detaching QPN 0x%x, MGID %pI6\n", ibqp->qp_num, gid->raw); return err; } static int init_node_data(struct mlx5_ib_dev *dev) { int err; err = mlx5_query_node_desc(dev, dev->ib_dev.node_desc); if (err) return err; dev->mdev->rev_id = dev->mdev->pdev->revision; return mlx5_query_node_guid(dev, &dev->ib_dev.node_guid); } static ssize_t fw_pages_show(struct device *device, struct device_attribute *attr, char *buf) { struct mlx5_ib_dev *dev = rdma_device_to_drv_device(device, struct mlx5_ib_dev, ib_dev); return sysfs_emit(buf, "%d\n", dev->mdev->priv.fw_pages); } static DEVICE_ATTR_RO(fw_pages); static ssize_t reg_pages_show(struct device *device, struct device_attribute *attr, char *buf) { struct mlx5_ib_dev *dev = rdma_device_to_drv_device(device, struct mlx5_ib_dev, ib_dev); return sysfs_emit(buf, "%d\n", atomic_read(&dev->mdev->priv.reg_pages)); } static DEVICE_ATTR_RO(reg_pages); static ssize_t hca_type_show(struct device *device, struct device_attribute *attr, char *buf) { struct mlx5_ib_dev *dev = rdma_device_to_drv_device(device, struct mlx5_ib_dev, ib_dev); return sysfs_emit(buf, "MT%d\n", dev->mdev->pdev->device); } static DEVICE_ATTR_RO(hca_type); static ssize_t hw_rev_show(struct device *device, struct device_attribute *attr, char *buf) { struct mlx5_ib_dev *dev = rdma_device_to_drv_device(device, struct mlx5_ib_dev, ib_dev); return sysfs_emit(buf, "%x\n", dev->mdev->rev_id); } static DEVICE_ATTR_RO(hw_rev); static ssize_t board_id_show(struct device *device, struct device_attribute *attr, char *buf) { struct mlx5_ib_dev *dev = rdma_device_to_drv_device(device, struct mlx5_ib_dev, ib_dev); return sysfs_emit(buf, "%.*s\n", MLX5_BOARD_ID_LEN, dev->mdev->board_id); } static DEVICE_ATTR_RO(board_id); static struct attribute *mlx5_class_attributes[] = { &dev_attr_hw_rev.attr, &dev_attr_hca_type.attr, &dev_attr_board_id.attr, &dev_attr_fw_pages.attr, &dev_attr_reg_pages.attr, NULL, }; static const struct attribute_group mlx5_attr_group = { .attrs = mlx5_class_attributes, }; static void pkey_change_handler(struct work_struct *work) { struct mlx5_ib_port_resources *ports = container_of(work, struct mlx5_ib_port_resources, pkey_change_work); if (!ports->gsi) /* * We got this event before device was fully configured * and MAD registration code wasn't called/finished yet. */ return; mlx5_ib_gsi_pkey_change(ports->gsi); } static void mlx5_ib_handle_internal_error(struct mlx5_ib_dev *ibdev) { struct mlx5_ib_qp *mqp; struct mlx5_ib_cq *send_mcq, *recv_mcq; struct mlx5_core_cq *mcq; struct list_head cq_armed_list; unsigned long flags_qp; unsigned long flags_cq; unsigned long flags; INIT_LIST_HEAD(&cq_armed_list); /* Go over qp list reside on that ibdev, sync with create/destroy qp.*/ spin_lock_irqsave(&ibdev->reset_flow_resource_lock, flags); list_for_each_entry(mqp, &ibdev->qp_list, qps_list) { spin_lock_irqsave(&mqp->sq.lock, flags_qp); if (mqp->sq.tail != mqp->sq.head) { send_mcq = to_mcq(mqp->ibqp.send_cq); spin_lock_irqsave(&send_mcq->lock, flags_cq); if (send_mcq->mcq.comp && mqp->ibqp.send_cq->comp_handler) { if (!send_mcq->mcq.reset_notify_added) { send_mcq->mcq.reset_notify_added = 1; list_add_tail(&send_mcq->mcq.reset_notify, &cq_armed_list); } } spin_unlock_irqrestore(&send_mcq->lock, flags_cq); } spin_unlock_irqrestore(&mqp->sq.lock, flags_qp); spin_lock_irqsave(&mqp->rq.lock, flags_qp); /* no handling is needed for SRQ */ if (!mqp->ibqp.srq) { if (mqp->rq.tail != mqp->rq.head) { recv_mcq = to_mcq(mqp->ibqp.recv_cq); spin_lock_irqsave(&recv_mcq->lock, flags_cq); if (recv_mcq->mcq.comp && mqp->ibqp.recv_cq->comp_handler) { if (!recv_mcq->mcq.reset_notify_added) { recv_mcq->mcq.reset_notify_added = 1; list_add_tail(&recv_mcq->mcq.reset_notify, &cq_armed_list); } } spin_unlock_irqrestore(&recv_mcq->lock, flags_cq); } } spin_unlock_irqrestore(&mqp->rq.lock, flags_qp); } /*At that point all inflight post send were put to be executed as of we * lock/unlock above locks Now need to arm all involved CQs. */ list_for_each_entry(mcq, &cq_armed_list, reset_notify) { mcq->comp(mcq, NULL); } spin_unlock_irqrestore(&ibdev->reset_flow_resource_lock, flags); } static void delay_drop_handler(struct work_struct *work) { int err; struct mlx5_ib_delay_drop *delay_drop = container_of(work, struct mlx5_ib_delay_drop, delay_drop_work); atomic_inc(&delay_drop->events_cnt); mutex_lock(&delay_drop->lock); err = mlx5_core_set_delay_drop(delay_drop->dev, delay_drop->timeout); if (err) { mlx5_ib_warn(delay_drop->dev, "Failed to set delay drop, timeout=%u\n", delay_drop->timeout); delay_drop->activate = false; } mutex_unlock(&delay_drop->lock); } static void handle_general_event(struct mlx5_ib_dev *ibdev, struct mlx5_eqe *eqe, struct ib_event *ibev) { u32 port = (eqe->data.port.port >> 4) & 0xf; switch (eqe->sub_type) { case MLX5_GENERAL_SUBTYPE_DELAY_DROP_TIMEOUT: if (mlx5_ib_port_link_layer(&ibdev->ib_dev, port) == IB_LINK_LAYER_ETHERNET) schedule_work(&ibdev->delay_drop.delay_drop_work); break; default: /* do nothing */ return; } } static int handle_port_change(struct mlx5_ib_dev *ibdev, struct mlx5_eqe *eqe, struct ib_event *ibev) { u32 port = (eqe->data.port.port >> 4) & 0xf; ibev->element.port_num = port; switch (eqe->sub_type) { case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE: case MLX5_PORT_CHANGE_SUBTYPE_DOWN: case MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED: /* In RoCE, port up/down events are handled in * mlx5_netdev_event(). */ if (mlx5_ib_port_link_layer(&ibdev->ib_dev, port) == IB_LINK_LAYER_ETHERNET) return -EINVAL; ibev->event = (eqe->sub_type == MLX5_PORT_CHANGE_SUBTYPE_ACTIVE) ? IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR; break; case MLX5_PORT_CHANGE_SUBTYPE_LID: ibev->event = IB_EVENT_LID_CHANGE; break; case MLX5_PORT_CHANGE_SUBTYPE_PKEY: ibev->event = IB_EVENT_PKEY_CHANGE; schedule_work(&ibdev->devr.ports[port - 1].pkey_change_work); break; case MLX5_PORT_CHANGE_SUBTYPE_GUID: ibev->event = IB_EVENT_GID_CHANGE; break; case MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG: ibev->event = IB_EVENT_CLIENT_REREGISTER; break; default: return -EINVAL; } return 0; } static void mlx5_ib_handle_event(struct work_struct *_work) { struct mlx5_ib_event_work *work = container_of(_work, struct mlx5_ib_event_work, work); struct mlx5_ib_dev *ibdev; struct ib_event ibev; bool fatal = false; if (work->is_slave) { ibdev = mlx5_ib_get_ibdev_from_mpi(work->mpi); if (!ibdev) goto out; } else { ibdev = work->dev; } switch (work->event) { case MLX5_DEV_EVENT_SYS_ERROR: ibev.event = IB_EVENT_DEVICE_FATAL; mlx5_ib_handle_internal_error(ibdev); ibev.element.port_num = (u8)(unsigned long)work->param; fatal = true; break; case MLX5_EVENT_TYPE_PORT_CHANGE: if (handle_port_change(ibdev, work->param, &ibev)) goto out; break; case MLX5_EVENT_TYPE_GENERAL_EVENT: handle_general_event(ibdev, work->param, &ibev); fallthrough; default: goto out; } ibev.device = &ibdev->ib_dev; if (!rdma_is_port_valid(&ibdev->ib_dev, ibev.element.port_num)) { mlx5_ib_warn(ibdev, "warning: event on port %d\n", ibev.element.port_num); goto out; } if (ibdev->ib_active) ib_dispatch_event(&ibev); if (fatal) ibdev->ib_active = false; out: kfree(work); } static int mlx5_ib_event(struct notifier_block *nb, unsigned long event, void *param) { struct mlx5_ib_event_work *work; work = kmalloc(sizeof(*work), GFP_ATOMIC); if (!work) return NOTIFY_DONE; INIT_WORK(&work->work, mlx5_ib_handle_event); work->dev = container_of(nb, struct mlx5_ib_dev, mdev_events); work->is_slave = false; work->param = param; work->event = event; queue_work(mlx5_ib_event_wq, &work->work); return NOTIFY_OK; } static int mlx5_ib_event_slave_port(struct notifier_block *nb, unsigned long event, void *param) { struct mlx5_ib_event_work *work; work = kmalloc(sizeof(*work), GFP_ATOMIC); if (!work) return NOTIFY_DONE; INIT_WORK(&work->work, mlx5_ib_handle_event); work->mpi = container_of(nb, struct mlx5_ib_multiport_info, mdev_events); work->is_slave = true; work->param = param; work->event = event; queue_work(mlx5_ib_event_wq, &work->work); return NOTIFY_OK; } static int set_has_smi_cap(struct mlx5_ib_dev *dev) { struct mlx5_hca_vport_context vport_ctx; int err; int port; for (port = 1; port <= ARRAY_SIZE(dev->port_caps); port++) { dev->port_caps[port - 1].has_smi = false; if (MLX5_CAP_GEN(dev->mdev, port_type) == MLX5_CAP_PORT_TYPE_IB) { if (MLX5_CAP_GEN(dev->mdev, ib_virt)) { err = mlx5_query_hca_vport_context(dev->mdev, 0, port, 0, &vport_ctx); if (err) { mlx5_ib_err(dev, "query_hca_vport_context for port=%d failed %d\n", port, err); return err; } dev->port_caps[port - 1].has_smi = vport_ctx.has_smi; } else { dev->port_caps[port - 1].has_smi = true; } } } return 0; } static void get_ext_port_caps(struct mlx5_ib_dev *dev) { unsigned int port; rdma_for_each_port (&dev->ib_dev, port) mlx5_query_ext_port_caps(dev, port); } static u8 mlx5_get_umr_fence(u8 umr_fence_cap) { switch (umr_fence_cap) { case MLX5_CAP_UMR_FENCE_NONE: return MLX5_FENCE_MODE_NONE; case MLX5_CAP_UMR_FENCE_SMALL: return MLX5_FENCE_MODE_INITIATOR_SMALL; default: return MLX5_FENCE_MODE_STRONG_ORDERING; } } static int mlx5_ib_dev_res_init(struct mlx5_ib_dev *dev) { struct mlx5_ib_resources *devr = &dev->devr; struct ib_srq_init_attr attr; struct ib_device *ibdev; struct ib_cq_init_attr cq_attr = {.cqe = 1}; int port; int ret = 0; ibdev = &dev->ib_dev; if (!MLX5_CAP_GEN(dev->mdev, xrc)) return -EOPNOTSUPP; devr->p0 = ib_alloc_pd(ibdev, 0); if (IS_ERR(devr->p0)) return PTR_ERR(devr->p0); devr->c0 = ib_create_cq(ibdev, NULL, NULL, NULL, &cq_attr); if (IS_ERR(devr->c0)) { ret = PTR_ERR(devr->c0); goto error1; } ret = mlx5_cmd_xrcd_alloc(dev->mdev, &devr->xrcdn0, 0); if (ret) goto error2; ret = mlx5_cmd_xrcd_alloc(dev->mdev, &devr->xrcdn1, 0); if (ret) goto error3; memset(&attr, 0, sizeof(attr)); attr.attr.max_sge = 1; attr.attr.max_wr = 1; attr.srq_type = IB_SRQT_XRC; attr.ext.cq = devr->c0; devr->s0 = ib_create_srq(devr->p0, &attr); if (IS_ERR(devr->s0)) { ret = PTR_ERR(devr->s0); goto err_create; } memset(&attr, 0, sizeof(attr)); attr.attr.max_sge = 1; attr.attr.max_wr = 1; attr.srq_type = IB_SRQT_BASIC; devr->s1 = ib_create_srq(devr->p0, &attr); if (IS_ERR(devr->s1)) { ret = PTR_ERR(devr->s1); goto error6; } for (port = 0; port < ARRAY_SIZE(devr->ports); ++port) INIT_WORK(&devr->ports[port].pkey_change_work, pkey_change_handler); return 0; error6: ib_destroy_srq(devr->s0); err_create: mlx5_cmd_xrcd_dealloc(dev->mdev, devr->xrcdn1, 0); error3: mlx5_cmd_xrcd_dealloc(dev->mdev, devr->xrcdn0, 0); error2: ib_destroy_cq(devr->c0); error1: ib_dealloc_pd(devr->p0); return ret; } static void mlx5_ib_dev_res_cleanup(struct mlx5_ib_dev *dev) { struct mlx5_ib_resources *devr = &dev->devr; int port; /* * Make sure no change P_Key work items are still executing. * * At this stage, the mlx5_ib_event should be unregistered * and it ensures that no new works are added. */ for (port = 0; port < ARRAY_SIZE(devr->ports); ++port) cancel_work_sync(&devr->ports[port].pkey_change_work); ib_destroy_srq(devr->s1); ib_destroy_srq(devr->s0); mlx5_cmd_xrcd_dealloc(dev->mdev, devr->xrcdn1, 0); mlx5_cmd_xrcd_dealloc(dev->mdev, devr->xrcdn0, 0); ib_destroy_cq(devr->c0); ib_dealloc_pd(devr->p0); } static u32 get_core_cap_flags(struct ib_device *ibdev, struct mlx5_hca_vport_context *rep) { struct mlx5_ib_dev *dev = to_mdev(ibdev); enum rdma_link_layer ll = mlx5_ib_port_link_layer(ibdev, 1); u8 l3_type_cap = MLX5_CAP_ROCE(dev->mdev, l3_type); u8 roce_version_cap = MLX5_CAP_ROCE(dev->mdev, roce_version); bool raw_support = !mlx5_core_mp_enabled(dev->mdev); u32 ret = 0; if (rep->grh_required) ret |= RDMA_CORE_CAP_IB_GRH_REQUIRED; if (ll == IB_LINK_LAYER_INFINIBAND) return ret | RDMA_CORE_PORT_IBA_IB; if (raw_support) ret |= RDMA_CORE_PORT_RAW_PACKET; if (!(l3_type_cap & MLX5_ROCE_L3_TYPE_IPV4_CAP)) return ret; if (!(l3_type_cap & MLX5_ROCE_L3_TYPE_IPV6_CAP)) return ret; if (roce_version_cap & MLX5_ROCE_VERSION_1_CAP) ret |= RDMA_CORE_PORT_IBA_ROCE; if (roce_version_cap & MLX5_ROCE_VERSION_2_CAP) ret |= RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP; return ret; } static int mlx5_port_immutable(struct ib_device *ibdev, u32 port_num, struct ib_port_immutable *immutable) { struct ib_port_attr attr; struct mlx5_ib_dev *dev = to_mdev(ibdev); enum rdma_link_layer ll = mlx5_ib_port_link_layer(ibdev, port_num); struct mlx5_hca_vport_context rep = {0}; int err; err = ib_query_port(ibdev, port_num, &attr); if (err) return err; if (ll == IB_LINK_LAYER_INFINIBAND) { err = mlx5_query_hca_vport_context(dev->mdev, 0, port_num, 0, &rep); if (err) return err; } immutable->pkey_tbl_len = attr.pkey_tbl_len; immutable->gid_tbl_len = attr.gid_tbl_len; immutable->core_cap_flags = get_core_cap_flags(ibdev, &rep); immutable->max_mad_size = IB_MGMT_MAD_SIZE; return 0; } static int mlx5_port_rep_immutable(struct ib_device *ibdev, u32 port_num, struct ib_port_immutable *immutable) { struct ib_port_attr attr; int err; immutable->core_cap_flags = RDMA_CORE_PORT_RAW_PACKET; err = ib_query_port(ibdev, port_num, &attr); if (err) return err; immutable->pkey_tbl_len = attr.pkey_tbl_len; immutable->gid_tbl_len = attr.gid_tbl_len; immutable->core_cap_flags = RDMA_CORE_PORT_RAW_PACKET; return 0; } static void get_dev_fw_str(struct ib_device *ibdev, char *str) { struct mlx5_ib_dev *dev = container_of(ibdev, struct mlx5_ib_dev, ib_dev); snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%04d", fw_rev_maj(dev->mdev), fw_rev_min(dev->mdev), fw_rev_sub(dev->mdev)); } static int mlx5_eth_lag_init(struct mlx5_ib_dev *dev) { struct mlx5_core_dev *mdev = dev->mdev; struct mlx5_flow_namespace *ns = mlx5_get_flow_namespace(mdev, MLX5_FLOW_NAMESPACE_LAG); struct mlx5_flow_table *ft; int err; if (!ns || !mlx5_lag_is_active(mdev)) return 0; err = mlx5_cmd_create_vport_lag(mdev); if (err) return err; ft = mlx5_create_lag_demux_flow_table(ns, 0, 0); if (IS_ERR(ft)) { err = PTR_ERR(ft); goto err_destroy_vport_lag; } dev->flow_db->lag_demux_ft = ft; dev->lag_active = true; return 0; err_destroy_vport_lag: mlx5_cmd_destroy_vport_lag(mdev); return err; } static void mlx5_eth_lag_cleanup(struct mlx5_ib_dev *dev) { struct mlx5_core_dev *mdev = dev->mdev; if (dev->lag_active) { dev->lag_active = false; mlx5_destroy_flow_table(dev->flow_db->lag_demux_ft); dev->flow_db->lag_demux_ft = NULL; mlx5_cmd_destroy_vport_lag(mdev); } } static int mlx5_add_netdev_notifier(struct mlx5_ib_dev *dev, u32 port_num) { int err; dev->port[port_num].roce.nb.notifier_call = mlx5_netdev_event; err = register_netdevice_notifier(&dev->port[port_num].roce.nb); if (err) { dev->port[port_num].roce.nb.notifier_call = NULL; return err; } return 0; } static void mlx5_remove_netdev_notifier(struct mlx5_ib_dev *dev, u32 port_num) { if (dev->port[port_num].roce.nb.notifier_call) { unregister_netdevice_notifier(&dev->port[port_num].roce.nb); dev->port[port_num].roce.nb.notifier_call = NULL; } } static int mlx5_enable_eth(struct mlx5_ib_dev *dev) { int err; if (!dev->is_rep && dev->profile != &raw_eth_profile) { err = mlx5_nic_vport_enable_roce(dev->mdev); if (err) return err; } err = mlx5_eth_lag_init(dev); if (err) goto err_disable_roce; return 0; err_disable_roce: if (!dev->is_rep && dev->profile != &raw_eth_profile) mlx5_nic_vport_disable_roce(dev->mdev); return err; } static void mlx5_disable_eth(struct mlx5_ib_dev *dev) { mlx5_eth_lag_cleanup(dev); if (!dev->is_rep && dev->profile != &raw_eth_profile) mlx5_nic_vport_disable_roce(dev->mdev); } static int mlx5_ib_rn_get_params(struct ib_device *device, u32 port_num, enum rdma_netdev_t type, struct rdma_netdev_alloc_params *params) { if (type != RDMA_NETDEV_IPOIB) return -EOPNOTSUPP; return mlx5_rdma_rn_get_params(to_mdev(device)->mdev, device, params); } static ssize_t delay_drop_timeout_read(struct file *filp, char __user *buf, size_t count, loff_t *pos) { struct mlx5_ib_delay_drop *delay_drop = filp->private_data; char lbuf[20]; int len; len = snprintf(lbuf, sizeof(lbuf), "%u\n", delay_drop->timeout); return simple_read_from_buffer(buf, count, pos, lbuf, len); } static ssize_t delay_drop_timeout_write(struct file *filp, const char __user *buf, size_t count, loff_t *pos) { struct mlx5_ib_delay_drop *delay_drop = filp->private_data; u32 timeout; u32 var; if (kstrtouint_from_user(buf, count, 0, &var)) return -EFAULT; timeout = min_t(u32, roundup(var, 100), MLX5_MAX_DELAY_DROP_TIMEOUT_MS * 1000); if (timeout != var) mlx5_ib_dbg(delay_drop->dev, "Round delay drop timeout to %u usec\n", timeout); delay_drop->timeout = timeout; return count; } static const struct file_operations fops_delay_drop_timeout = { .owner = THIS_MODULE, .open = simple_open, .write = delay_drop_timeout_write, .read = delay_drop_timeout_read, }; static void mlx5_ib_unbind_slave_port(struct mlx5_ib_dev *ibdev, struct mlx5_ib_multiport_info *mpi) { u32 port_num = mlx5_core_native_port_num(mpi->mdev) - 1; struct mlx5_ib_port *port = &ibdev->port[port_num]; int comps; int err; int i; lockdep_assert_held(&mlx5_ib_multiport_mutex); mlx5_ib_cleanup_cong_debugfs(ibdev, port_num); spin_lock(&port->mp.mpi_lock); if (!mpi->ibdev) { spin_unlock(&port->mp.mpi_lock); return; } mpi->ibdev = NULL; spin_unlock(&port->mp.mpi_lock); if (mpi->mdev_events.notifier_call) mlx5_notifier_unregister(mpi->mdev, &mpi->mdev_events); mpi->mdev_events.notifier_call = NULL; mlx5_remove_netdev_notifier(ibdev, port_num); spin_lock(&port->mp.mpi_lock); comps = mpi->mdev_refcnt; if (comps) { mpi->unaffiliate = true; init_completion(&mpi->unref_comp); spin_unlock(&port->mp.mpi_lock); for (i = 0; i < comps; i++) wait_for_completion(&mpi->unref_comp); spin_lock(&port->mp.mpi_lock); mpi->unaffiliate = false; } port->mp.mpi = NULL; spin_unlock(&port->mp.mpi_lock); err = mlx5_nic_vport_unaffiliate_multiport(mpi->mdev); mlx5_ib_dbg(ibdev, "unaffiliated port %u\n", port_num + 1); /* Log an error, still needed to cleanup the pointers and add * it back to the list. */ if (err) mlx5_ib_err(ibdev, "Failed to unaffiliate port %u\n", port_num + 1); ibdev->port[port_num].roce.last_port_state = IB_PORT_DOWN; } static bool mlx5_ib_bind_slave_port(struct mlx5_ib_dev *ibdev, struct mlx5_ib_multiport_info *mpi) { u32 port_num = mlx5_core_native_port_num(mpi->mdev) - 1; int err; lockdep_assert_held(&mlx5_ib_multiport_mutex); spin_lock(&ibdev->port[port_num].mp.mpi_lock); if (ibdev->port[port_num].mp.mpi) { mlx5_ib_dbg(ibdev, "port %u already affiliated.\n", port_num + 1); spin_unlock(&ibdev->port[port_num].mp.mpi_lock); return false; } ibdev->port[port_num].mp.mpi = mpi; mpi->ibdev = ibdev; mpi->mdev_events.notifier_call = NULL; spin_unlock(&ibdev->port[port_num].mp.mpi_lock); err = mlx5_nic_vport_affiliate_multiport(ibdev->mdev, mpi->mdev); if (err) goto unbind; err = mlx5_add_netdev_notifier(ibdev, port_num); if (err) { mlx5_ib_err(ibdev, "failed adding netdev notifier for port %u\n", port_num + 1); goto unbind; } mpi->mdev_events.notifier_call = mlx5_ib_event_slave_port; mlx5_notifier_register(mpi->mdev, &mpi->mdev_events); mlx5_ib_init_cong_debugfs(ibdev, port_num); return true; unbind: mlx5_ib_unbind_slave_port(ibdev, mpi); return false; } static int mlx5_ib_init_multiport_master(struct mlx5_ib_dev *dev) { u32 port_num = mlx5_core_native_port_num(dev->mdev) - 1; enum rdma_link_layer ll = mlx5_ib_port_link_layer(&dev->ib_dev, port_num + 1); struct mlx5_ib_multiport_info *mpi; int err; u32 i; if (!mlx5_core_is_mp_master(dev->mdev) || ll != IB_LINK_LAYER_ETHERNET) return 0; err = mlx5_query_nic_vport_system_image_guid(dev->mdev, &dev->sys_image_guid); if (err) return err; err = mlx5_nic_vport_enable_roce(dev->mdev); if (err) return err; mutex_lock(&mlx5_ib_multiport_mutex); for (i = 0; i < dev->num_ports; i++) { bool bound = false; /* build a stub multiport info struct for the native port. */ if (i == port_num) { mpi = kzalloc(sizeof(*mpi), GFP_KERNEL); if (!mpi) { mutex_unlock(&mlx5_ib_multiport_mutex); mlx5_nic_vport_disable_roce(dev->mdev); return -ENOMEM; } mpi->is_master = true; mpi->mdev = dev->mdev; mpi->sys_image_guid = dev->sys_image_guid; dev->port[i].mp.mpi = mpi; mpi->ibdev = dev; mpi = NULL; continue; } list_for_each_entry(mpi, &mlx5_ib_unaffiliated_port_list, list) { if (dev->sys_image_guid == mpi->sys_image_guid && (mlx5_core_native_port_num(mpi->mdev) - 1) == i) { bound = mlx5_ib_bind_slave_port(dev, mpi); } if (bound) { dev_dbg(mpi->mdev->device, "removing port from unaffiliated list.\n"); mlx5_ib_dbg(dev, "port %d bound\n", i + 1); list_del(&mpi->list); break; } } if (!bound) mlx5_ib_dbg(dev, "no free port found for port %d\n", i + 1); } list_add_tail(&dev->ib_dev_list, &mlx5_ib_dev_list); mutex_unlock(&mlx5_ib_multiport_mutex); return err; } static void mlx5_ib_cleanup_multiport_master(struct mlx5_ib_dev *dev) { u32 port_num = mlx5_core_native_port_num(dev->mdev) - 1; enum rdma_link_layer ll = mlx5_ib_port_link_layer(&dev->ib_dev, port_num + 1); u32 i; if (!mlx5_core_is_mp_master(dev->mdev) || ll != IB_LINK_LAYER_ETHERNET) return; mutex_lock(&mlx5_ib_multiport_mutex); for (i = 0; i < dev->num_ports; i++) { if (dev->port[i].mp.mpi) { /* Destroy the native port stub */ if (i == port_num) { kfree(dev->port[i].mp.mpi); dev->port[i].mp.mpi = NULL; } else { mlx5_ib_dbg(dev, "unbinding port_num: %u\n", i + 1); list_add_tail(&dev->port[i].mp.mpi->list, &mlx5_ib_unaffiliated_port_list); mlx5_ib_unbind_slave_port(dev, dev->port[i].mp.mpi); } } } mlx5_ib_dbg(dev, "removing from devlist\n"); list_del(&dev->ib_dev_list); mutex_unlock(&mlx5_ib_multiport_mutex); mlx5_nic_vport_disable_roce(dev->mdev); } static int mmap_obj_cleanup(struct ib_uobject *uobject, enum rdma_remove_reason why, struct uverbs_attr_bundle *attrs) { struct mlx5_user_mmap_entry *obj = uobject->object; rdma_user_mmap_entry_remove(&obj->rdma_entry); return 0; } static int mlx5_rdma_user_mmap_entry_insert(struct mlx5_ib_ucontext *c, struct mlx5_user_mmap_entry *entry, size_t length) { return rdma_user_mmap_entry_insert_range( &c->ibucontext, &entry->rdma_entry, length, (MLX5_IB_MMAP_OFFSET_START << 16), ((MLX5_IB_MMAP_OFFSET_END << 16) + (1UL << 16) - 1)); } static struct mlx5_user_mmap_entry * alloc_var_entry(struct mlx5_ib_ucontext *c) { struct mlx5_user_mmap_entry *entry; struct mlx5_var_table *var_table; u32 page_idx; int err; var_table = &to_mdev(c->ibucontext.device)->var_table; entry = kzalloc(sizeof(*entry), GFP_KERNEL); if (!entry) return ERR_PTR(-ENOMEM); mutex_lock(&var_table->bitmap_lock); page_idx = find_first_zero_bit(var_table->bitmap, var_table->num_var_hw_entries); if (page_idx >= var_table->num_var_hw_entries) { err = -ENOSPC; mutex_unlock(&var_table->bitmap_lock); goto end; } set_bit(page_idx, var_table->bitmap); mutex_unlock(&var_table->bitmap_lock); entry->address = var_table->hw_start_addr + (page_idx * var_table->stride_size); entry->page_idx = page_idx; entry->mmap_flag = MLX5_IB_MMAP_TYPE_VAR; err = mlx5_rdma_user_mmap_entry_insert(c, entry, var_table->stride_size); if (err) goto err_insert; return entry; err_insert: mutex_lock(&var_table->bitmap_lock); clear_bit(page_idx, var_table->bitmap); mutex_unlock(&var_table->bitmap_lock); end: kfree(entry); return ERR_PTR(err); } static int UVERBS_HANDLER(MLX5_IB_METHOD_VAR_OBJ_ALLOC)( struct uverbs_attr_bundle *attrs) { struct ib_uobject *uobj = uverbs_attr_get_uobject( attrs, MLX5_IB_ATTR_VAR_OBJ_ALLOC_HANDLE); struct mlx5_ib_ucontext *c; struct mlx5_user_mmap_entry *entry; u64 mmap_offset; u32 length; int err; c = to_mucontext(ib_uverbs_get_ucontext(attrs)); if (IS_ERR(c)) return PTR_ERR(c); entry = alloc_var_entry(c); if (IS_ERR(entry)) return PTR_ERR(entry); mmap_offset = mlx5_entry_to_mmap_offset(entry); length = entry->rdma_entry.npages * PAGE_SIZE; uobj->object = entry; uverbs_finalize_uobj_create(attrs, MLX5_IB_ATTR_VAR_OBJ_ALLOC_HANDLE); err = uverbs_copy_to(attrs, MLX5_IB_ATTR_VAR_OBJ_ALLOC_MMAP_OFFSET, &mmap_offset, sizeof(mmap_offset)); if (err) return err; err = uverbs_copy_to(attrs, MLX5_IB_ATTR_VAR_OBJ_ALLOC_PAGE_ID, &entry->page_idx, sizeof(entry->page_idx)); if (err) return err; err = uverbs_copy_to(attrs, MLX5_IB_ATTR_VAR_OBJ_ALLOC_MMAP_LENGTH, &length, sizeof(length)); return err; } DECLARE_UVERBS_NAMED_METHOD( MLX5_IB_METHOD_VAR_OBJ_ALLOC, UVERBS_ATTR_IDR(MLX5_IB_ATTR_VAR_OBJ_ALLOC_HANDLE, MLX5_IB_OBJECT_VAR, UVERBS_ACCESS_NEW, UA_MANDATORY), UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_VAR_OBJ_ALLOC_PAGE_ID, UVERBS_ATTR_TYPE(u32), UA_MANDATORY), UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_VAR_OBJ_ALLOC_MMAP_LENGTH, UVERBS_ATTR_TYPE(u32), UA_MANDATORY), UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_VAR_OBJ_ALLOC_MMAP_OFFSET, UVERBS_ATTR_TYPE(u64), UA_MANDATORY)); DECLARE_UVERBS_NAMED_METHOD_DESTROY( MLX5_IB_METHOD_VAR_OBJ_DESTROY, UVERBS_ATTR_IDR(MLX5_IB_ATTR_VAR_OBJ_DESTROY_HANDLE, MLX5_IB_OBJECT_VAR, UVERBS_ACCESS_DESTROY, UA_MANDATORY)); DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_VAR, UVERBS_TYPE_ALLOC_IDR(mmap_obj_cleanup), &UVERBS_METHOD(MLX5_IB_METHOD_VAR_OBJ_ALLOC), &UVERBS_METHOD(MLX5_IB_METHOD_VAR_OBJ_DESTROY)); static bool var_is_supported(struct ib_device *device) { struct mlx5_ib_dev *dev = to_mdev(device); return (MLX5_CAP_GEN_64(dev->mdev, general_obj_types) & MLX5_GENERAL_OBJ_TYPES_CAP_VIRTIO_NET_Q); } static struct mlx5_user_mmap_entry * alloc_uar_entry(struct mlx5_ib_ucontext *c, enum mlx5_ib_uapi_uar_alloc_type alloc_type) { struct mlx5_user_mmap_entry *entry; struct mlx5_ib_dev *dev; u32 uar_index; int err; entry = kzalloc(sizeof(*entry), GFP_KERNEL); if (!entry) return ERR_PTR(-ENOMEM); dev = to_mdev(c->ibucontext.device); err = mlx5_cmd_uar_alloc(dev->mdev, &uar_index, c->devx_uid); if (err) goto end; entry->page_idx = uar_index; entry->address = uar_index2paddress(dev, uar_index); if (alloc_type == MLX5_IB_UAPI_UAR_ALLOC_TYPE_BF) entry->mmap_flag = MLX5_IB_MMAP_TYPE_UAR_WC; else entry->mmap_flag = MLX5_IB_MMAP_TYPE_UAR_NC; err = mlx5_rdma_user_mmap_entry_insert(c, entry, PAGE_SIZE); if (err) goto err_insert; return entry; err_insert: mlx5_cmd_uar_dealloc(dev->mdev, uar_index, c->devx_uid); end: kfree(entry); return ERR_PTR(err); } static int UVERBS_HANDLER(MLX5_IB_METHOD_UAR_OBJ_ALLOC)( struct uverbs_attr_bundle *attrs) { struct ib_uobject *uobj = uverbs_attr_get_uobject( attrs, MLX5_IB_ATTR_UAR_OBJ_ALLOC_HANDLE); enum mlx5_ib_uapi_uar_alloc_type alloc_type; struct mlx5_ib_ucontext *c; struct mlx5_user_mmap_entry *entry; u64 mmap_offset; u32 length; int err; c = to_mucontext(ib_uverbs_get_ucontext(attrs)); if (IS_ERR(c)) return PTR_ERR(c); err = uverbs_get_const(&alloc_type, attrs, MLX5_IB_ATTR_UAR_OBJ_ALLOC_TYPE); if (err) return err; if (alloc_type != MLX5_IB_UAPI_UAR_ALLOC_TYPE_BF && alloc_type != MLX5_IB_UAPI_UAR_ALLOC_TYPE_NC) return -EOPNOTSUPP; if (!to_mdev(c->ibucontext.device)->wc_support && alloc_type == MLX5_IB_UAPI_UAR_ALLOC_TYPE_BF) return -EOPNOTSUPP; entry = alloc_uar_entry(c, alloc_type); if (IS_ERR(entry)) return PTR_ERR(entry); mmap_offset = mlx5_entry_to_mmap_offset(entry); length = entry->rdma_entry.npages * PAGE_SIZE; uobj->object = entry; uverbs_finalize_uobj_create(attrs, MLX5_IB_ATTR_UAR_OBJ_ALLOC_HANDLE); err = uverbs_copy_to(attrs, MLX5_IB_ATTR_UAR_OBJ_ALLOC_MMAP_OFFSET, &mmap_offset, sizeof(mmap_offset)); if (err) return err; err = uverbs_copy_to(attrs, MLX5_IB_ATTR_UAR_OBJ_ALLOC_PAGE_ID, &entry->page_idx, sizeof(entry->page_idx)); if (err) return err; err = uverbs_copy_to(attrs, MLX5_IB_ATTR_UAR_OBJ_ALLOC_MMAP_LENGTH, &length, sizeof(length)); return err; } DECLARE_UVERBS_NAMED_METHOD( MLX5_IB_METHOD_UAR_OBJ_ALLOC, UVERBS_ATTR_IDR(MLX5_IB_ATTR_UAR_OBJ_ALLOC_HANDLE, MLX5_IB_OBJECT_UAR, UVERBS_ACCESS_NEW, UA_MANDATORY), UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_UAR_OBJ_ALLOC_TYPE, enum mlx5_ib_uapi_uar_alloc_type, UA_MANDATORY), UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_UAR_OBJ_ALLOC_PAGE_ID, UVERBS_ATTR_TYPE(u32), UA_MANDATORY), UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_UAR_OBJ_ALLOC_MMAP_LENGTH, UVERBS_ATTR_TYPE(u32), UA_MANDATORY), UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_UAR_OBJ_ALLOC_MMAP_OFFSET, UVERBS_ATTR_TYPE(u64), UA_MANDATORY)); DECLARE_UVERBS_NAMED_METHOD_DESTROY( MLX5_IB_METHOD_UAR_OBJ_DESTROY, UVERBS_ATTR_IDR(MLX5_IB_ATTR_UAR_OBJ_DESTROY_HANDLE, MLX5_IB_OBJECT_UAR, UVERBS_ACCESS_DESTROY, UA_MANDATORY)); DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_UAR, UVERBS_TYPE_ALLOC_IDR(mmap_obj_cleanup), &UVERBS_METHOD(MLX5_IB_METHOD_UAR_OBJ_ALLOC), &UVERBS_METHOD(MLX5_IB_METHOD_UAR_OBJ_DESTROY)); ADD_UVERBS_ATTRIBUTES_SIMPLE( mlx5_ib_flow_action, UVERBS_OBJECT_FLOW_ACTION, UVERBS_METHOD_FLOW_ACTION_ESP_CREATE, UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_CREATE_FLOW_ACTION_FLAGS, enum mlx5_ib_uapi_flow_action_flags)); ADD_UVERBS_ATTRIBUTES_SIMPLE( mlx5_ib_query_context, UVERBS_OBJECT_DEVICE, UVERBS_METHOD_QUERY_CONTEXT, UVERBS_ATTR_PTR_OUT( MLX5_IB_ATTR_QUERY_CONTEXT_RESP_UCTX, UVERBS_ATTR_STRUCT(struct mlx5_ib_alloc_ucontext_resp, dump_fill_mkey), UA_MANDATORY)); static const struct uapi_definition mlx5_ib_defs[] = { UAPI_DEF_CHAIN(mlx5_ib_devx_defs), UAPI_DEF_CHAIN(mlx5_ib_flow_defs), UAPI_DEF_CHAIN(mlx5_ib_qos_defs), UAPI_DEF_CHAIN(mlx5_ib_std_types_defs), UAPI_DEF_CHAIN(mlx5_ib_dm_defs), UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_FLOW_ACTION, &mlx5_ib_flow_action), UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_DEVICE, &mlx5_ib_query_context), UAPI_DEF_CHAIN_OBJ_TREE_NAMED(MLX5_IB_OBJECT_VAR, UAPI_DEF_IS_OBJ_SUPPORTED(var_is_supported)), UAPI_DEF_CHAIN_OBJ_TREE_NAMED(MLX5_IB_OBJECT_UAR), {} }; static void mlx5_ib_stage_init_cleanup(struct mlx5_ib_dev *dev) { mlx5_ib_cleanup_multiport_master(dev); WARN_ON(!xa_empty(&dev->odp_mkeys)); mutex_destroy(&dev->cap_mask_mutex); WARN_ON(!xa_empty(&dev->sig_mrs)); WARN_ON(!bitmap_empty(dev->dm.memic_alloc_pages, MLX5_MAX_MEMIC_PAGES)); } static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev) { struct mlx5_core_dev *mdev = dev->mdev; int err; int i; dev->ib_dev.node_type = RDMA_NODE_IB_CA; dev->ib_dev.local_dma_lkey = 0 /* not supported for now */; dev->ib_dev.phys_port_cnt = dev->num_ports; dev->ib_dev.dev.parent = mdev->device; dev->ib_dev.lag_flags = RDMA_LAG_FLAGS_HASH_ALL_SLAVES; for (i = 0; i < dev->num_ports; i++) { spin_lock_init(&dev->port[i].mp.mpi_lock); rwlock_init(&dev->port[i].roce.netdev_lock); dev->port[i].roce.dev = dev; dev->port[i].roce.native_port_num = i + 1; dev->port[i].roce.last_port_state = IB_PORT_DOWN; } err = mlx5_ib_init_multiport_master(dev); if (err) return err; err = set_has_smi_cap(dev); if (err) goto err_mp; err = mlx5_query_max_pkeys(&dev->ib_dev, &dev->pkey_table_len); if (err) goto err_mp; if (mlx5_use_mad_ifc(dev)) get_ext_port_caps(dev); dev->ib_dev.num_comp_vectors = mlx5_comp_vectors_count(mdev); mutex_init(&dev->cap_mask_mutex); INIT_LIST_HEAD(&dev->qp_list); spin_lock_init(&dev->reset_flow_resource_lock); xa_init(&dev->odp_mkeys); xa_init(&dev->sig_mrs); atomic_set(&dev->mkey_var, 0); spin_lock_init(&dev->dm.lock); dev->dm.dev = mdev; return 0; err_mp: mlx5_ib_cleanup_multiport_master(dev); return err; } static int mlx5_ib_enable_driver(struct ib_device *dev) { struct mlx5_ib_dev *mdev = to_mdev(dev); int ret; ret = mlx5_ib_test_wc(mdev); mlx5_ib_dbg(mdev, "Write-Combining %s", mdev->wc_support ? "supported" : "not supported"); return ret; } static const struct ib_device_ops mlx5_ib_dev_ops = { .owner = THIS_MODULE, .driver_id = RDMA_DRIVER_MLX5, .uverbs_abi_ver = MLX5_IB_UVERBS_ABI_VERSION, .add_gid = mlx5_ib_add_gid, .alloc_mr = mlx5_ib_alloc_mr, .alloc_mr_integrity = mlx5_ib_alloc_mr_integrity, .alloc_pd = mlx5_ib_alloc_pd, .alloc_ucontext = mlx5_ib_alloc_ucontext, .attach_mcast = mlx5_ib_mcg_attach, .check_mr_status = mlx5_ib_check_mr_status, .create_ah = mlx5_ib_create_ah, .create_cq = mlx5_ib_create_cq, .create_qp = mlx5_ib_create_qp, .create_srq = mlx5_ib_create_srq, .create_user_ah = mlx5_ib_create_ah, .dealloc_pd = mlx5_ib_dealloc_pd, .dealloc_ucontext = mlx5_ib_dealloc_ucontext, .del_gid = mlx5_ib_del_gid, .dereg_mr = mlx5_ib_dereg_mr, .destroy_ah = mlx5_ib_destroy_ah, .destroy_cq = mlx5_ib_destroy_cq, .destroy_qp = mlx5_ib_destroy_qp, .destroy_srq = mlx5_ib_destroy_srq, .detach_mcast = mlx5_ib_mcg_detach, .disassociate_ucontext = mlx5_ib_disassociate_ucontext, .drain_rq = mlx5_ib_drain_rq, .drain_sq = mlx5_ib_drain_sq, .device_group = &mlx5_attr_group, .enable_driver = mlx5_ib_enable_driver, .get_dev_fw_str = get_dev_fw_str, .get_dma_mr = mlx5_ib_get_dma_mr, .get_link_layer = mlx5_ib_port_link_layer, .map_mr_sg = mlx5_ib_map_mr_sg, .map_mr_sg_pi = mlx5_ib_map_mr_sg_pi, .mmap = mlx5_ib_mmap, .mmap_free = mlx5_ib_mmap_free, .modify_cq = mlx5_ib_modify_cq, .modify_device = mlx5_ib_modify_device, .modify_port = mlx5_ib_modify_port, .modify_qp = mlx5_ib_modify_qp, .modify_srq = mlx5_ib_modify_srq, .poll_cq = mlx5_ib_poll_cq, .post_recv = mlx5_ib_post_recv_nodrain, .post_send = mlx5_ib_post_send_nodrain, .post_srq_recv = mlx5_ib_post_srq_recv, .process_mad = mlx5_ib_process_mad, .query_ah = mlx5_ib_query_ah, .query_device = mlx5_ib_query_device, .query_gid = mlx5_ib_query_gid, .query_pkey = mlx5_ib_query_pkey, .query_qp = mlx5_ib_query_qp, .query_srq = mlx5_ib_query_srq, .query_ucontext = mlx5_ib_query_ucontext, .reg_user_mr = mlx5_ib_reg_user_mr, .reg_user_mr_dmabuf = mlx5_ib_reg_user_mr_dmabuf, .req_notify_cq = mlx5_ib_arm_cq, .rereg_user_mr = mlx5_ib_rereg_user_mr, .resize_cq = mlx5_ib_resize_cq, INIT_RDMA_OBJ_SIZE(ib_ah, mlx5_ib_ah, ibah), INIT_RDMA_OBJ_SIZE(ib_counters, mlx5_ib_mcounters, ibcntrs), INIT_RDMA_OBJ_SIZE(ib_cq, mlx5_ib_cq, ibcq), INIT_RDMA_OBJ_SIZE(ib_pd, mlx5_ib_pd, ibpd), INIT_RDMA_OBJ_SIZE(ib_qp, mlx5_ib_qp, ibqp), INIT_RDMA_OBJ_SIZE(ib_srq, mlx5_ib_srq, ibsrq), INIT_RDMA_OBJ_SIZE(ib_ucontext, mlx5_ib_ucontext, ibucontext), }; static const struct ib_device_ops mlx5_ib_dev_ipoib_enhanced_ops = { .rdma_netdev_get_params = mlx5_ib_rn_get_params, }; static const struct ib_device_ops mlx5_ib_dev_sriov_ops = { .get_vf_config = mlx5_ib_get_vf_config, .get_vf_guid = mlx5_ib_get_vf_guid, .get_vf_stats = mlx5_ib_get_vf_stats, .set_vf_guid = mlx5_ib_set_vf_guid, .set_vf_link_state = mlx5_ib_set_vf_link_state, }; static const struct ib_device_ops mlx5_ib_dev_mw_ops = { .alloc_mw = mlx5_ib_alloc_mw, .dealloc_mw = mlx5_ib_dealloc_mw, INIT_RDMA_OBJ_SIZE(ib_mw, mlx5_ib_mw, ibmw), }; static const struct ib_device_ops mlx5_ib_dev_xrc_ops = { .alloc_xrcd = mlx5_ib_alloc_xrcd, .dealloc_xrcd = mlx5_ib_dealloc_xrcd, INIT_RDMA_OBJ_SIZE(ib_xrcd, mlx5_ib_xrcd, ibxrcd), }; static int mlx5_ib_init_var_table(struct mlx5_ib_dev *dev) { struct mlx5_core_dev *mdev = dev->mdev; struct mlx5_var_table *var_table = &dev->var_table; u8 log_doorbell_bar_size; u8 log_doorbell_stride; u64 bar_size; log_doorbell_bar_size = MLX5_CAP_DEV_VDPA_EMULATION(mdev, log_doorbell_bar_size); log_doorbell_stride = MLX5_CAP_DEV_VDPA_EMULATION(mdev, log_doorbell_stride); var_table->hw_start_addr = dev->mdev->bar_addr + MLX5_CAP64_DEV_VDPA_EMULATION(mdev, doorbell_bar_offset); bar_size = (1ULL << log_doorbell_bar_size) * 4096; var_table->stride_size = 1ULL << log_doorbell_stride; var_table->num_var_hw_entries = div_u64(bar_size, var_table->stride_size); mutex_init(&var_table->bitmap_lock); var_table->bitmap = bitmap_zalloc(var_table->num_var_hw_entries, GFP_KERNEL); return (var_table->bitmap) ? 0 : -ENOMEM; } static void mlx5_ib_stage_caps_cleanup(struct mlx5_ib_dev *dev) { bitmap_free(dev->var_table.bitmap); } static int mlx5_ib_stage_caps_init(struct mlx5_ib_dev *dev) { struct mlx5_core_dev *mdev = dev->mdev; int err; if (MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads) && IS_ENABLED(CONFIG_MLX5_CORE_IPOIB)) ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_ipoib_enhanced_ops); if (mlx5_core_is_pf(mdev)) ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_sriov_ops); dev->umr_fence = mlx5_get_umr_fence(MLX5_CAP_GEN(mdev, umr_fence)); if (MLX5_CAP_GEN(mdev, imaicl)) ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_mw_ops); if (MLX5_CAP_GEN(mdev, xrc)) ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_xrc_ops); if (MLX5_CAP_DEV_MEM(mdev, memic) || MLX5_CAP_GEN_64(dev->mdev, general_obj_types) & MLX5_GENERAL_OBJ_TYPES_CAP_SW_ICM) ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_dm_ops); ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_ops); if (IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS)) dev->ib_dev.driver_def = mlx5_ib_defs; err = init_node_data(dev); if (err) return err; if ((MLX5_CAP_GEN(dev->mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH) && (MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) || MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc))) mutex_init(&dev->lb.mutex); if (MLX5_CAP_GEN_64(dev->mdev, general_obj_types) & MLX5_GENERAL_OBJ_TYPES_CAP_VIRTIO_NET_Q) { err = mlx5_ib_init_var_table(dev); if (err) return err; } dev->ib_dev.use_cq_dim = true; return 0; } static const struct ib_device_ops mlx5_ib_dev_port_ops = { .get_port_immutable = mlx5_port_immutable, .query_port = mlx5_ib_query_port, }; static int mlx5_ib_stage_non_default_cb(struct mlx5_ib_dev *dev) { ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_port_ops); return 0; } static const struct ib_device_ops mlx5_ib_dev_port_rep_ops = { .get_port_immutable = mlx5_port_rep_immutable, .query_port = mlx5_ib_rep_query_port, .query_pkey = mlx5_ib_rep_query_pkey, }; static int mlx5_ib_stage_raw_eth_non_default_cb(struct mlx5_ib_dev *dev) { ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_port_rep_ops); return 0; } static const struct ib_device_ops mlx5_ib_dev_common_roce_ops = { .create_rwq_ind_table = mlx5_ib_create_rwq_ind_table, .create_wq = mlx5_ib_create_wq, .destroy_rwq_ind_table = mlx5_ib_destroy_rwq_ind_table, .destroy_wq = mlx5_ib_destroy_wq, .get_netdev = mlx5_ib_get_netdev, .modify_wq = mlx5_ib_modify_wq, INIT_RDMA_OBJ_SIZE(ib_rwq_ind_table, mlx5_ib_rwq_ind_table, ib_rwq_ind_tbl), }; static int mlx5_ib_roce_init(struct mlx5_ib_dev *dev) { struct mlx5_core_dev *mdev = dev->mdev; enum rdma_link_layer ll; int port_type_cap; u32 port_num = 0; int err; port_type_cap = MLX5_CAP_GEN(mdev, port_type); ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap); if (ll == IB_LINK_LAYER_ETHERNET) { ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_common_roce_ops); port_num = mlx5_core_native_port_num(dev->mdev) - 1; /* Register only for native ports */ err = mlx5_add_netdev_notifier(dev, port_num); if (err) return err; err = mlx5_enable_eth(dev); if (err) goto cleanup; } return 0; cleanup: mlx5_remove_netdev_notifier(dev, port_num); return err; } static void mlx5_ib_roce_cleanup(struct mlx5_ib_dev *dev) { struct mlx5_core_dev *mdev = dev->mdev; enum rdma_link_layer ll; int port_type_cap; u32 port_num; port_type_cap = MLX5_CAP_GEN(mdev, port_type); ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap); if (ll == IB_LINK_LAYER_ETHERNET) { mlx5_disable_eth(dev); port_num = mlx5_core_native_port_num(dev->mdev) - 1; mlx5_remove_netdev_notifier(dev, port_num); } } static int mlx5_ib_stage_cong_debugfs_init(struct mlx5_ib_dev *dev) { mlx5_ib_init_cong_debugfs(dev, mlx5_core_native_port_num(dev->mdev) - 1); return 0; } static void mlx5_ib_stage_cong_debugfs_cleanup(struct mlx5_ib_dev *dev) { mlx5_ib_cleanup_cong_debugfs(dev, mlx5_core_native_port_num(dev->mdev) - 1); } static int mlx5_ib_stage_uar_init(struct mlx5_ib_dev *dev) { dev->mdev->priv.uar = mlx5_get_uars_page(dev->mdev); return PTR_ERR_OR_ZERO(dev->mdev->priv.uar); } static void mlx5_ib_stage_uar_cleanup(struct mlx5_ib_dev *dev) { mlx5_put_uars_page(dev->mdev, dev->mdev->priv.uar); } static int mlx5_ib_stage_bfrag_init(struct mlx5_ib_dev *dev) { int err; err = mlx5_alloc_bfreg(dev->mdev, &dev->bfreg, false, false); if (err) return err; err = mlx5_alloc_bfreg(dev->mdev, &dev->fp_bfreg, false, true); if (err) mlx5_free_bfreg(dev->mdev, &dev->bfreg); return err; } static void mlx5_ib_stage_bfrag_cleanup(struct mlx5_ib_dev *dev) { mlx5_free_bfreg(dev->mdev, &dev->fp_bfreg); mlx5_free_bfreg(dev->mdev, &dev->bfreg); } static int mlx5_ib_stage_ib_reg_init(struct mlx5_ib_dev *dev) { const char *name; if (!mlx5_lag_is_active(dev->mdev)) name = "mlx5_%d"; else name = "mlx5_bond_%d"; return ib_register_device(&dev->ib_dev, name, &dev->mdev->pdev->dev); } static void mlx5_ib_stage_pre_ib_reg_umr_cleanup(struct mlx5_ib_dev *dev) { int err; err = mlx5_mr_cache_cleanup(dev); if (err) mlx5_ib_warn(dev, "mr cache cleanup failed\n"); if (dev->umrc.qp) ib_destroy_qp(dev->umrc.qp); if (dev->umrc.cq) ib_free_cq(dev->umrc.cq); if (dev->umrc.pd) ib_dealloc_pd(dev->umrc.pd); } static void mlx5_ib_stage_ib_reg_cleanup(struct mlx5_ib_dev *dev) { ib_unregister_device(&dev->ib_dev); } enum { MAX_UMR_WR = 128, }; static int mlx5_ib_stage_post_ib_reg_umr_init(struct mlx5_ib_dev *dev) { struct ib_qp_init_attr *init_attr = NULL; struct ib_qp_attr *attr = NULL; struct ib_pd *pd; struct ib_cq *cq; struct ib_qp *qp; int ret; attr = kzalloc(sizeof(*attr), GFP_KERNEL); init_attr = kzalloc(sizeof(*init_attr), GFP_KERNEL); if (!attr || !init_attr) { ret = -ENOMEM; goto error_0; } pd = ib_alloc_pd(&dev->ib_dev, 0); if (IS_ERR(pd)) { mlx5_ib_dbg(dev, "Couldn't create PD for sync UMR QP\n"); ret = PTR_ERR(pd); goto error_0; } cq = ib_alloc_cq(&dev->ib_dev, NULL, 128, 0, IB_POLL_SOFTIRQ); if (IS_ERR(cq)) { mlx5_ib_dbg(dev, "Couldn't create CQ for sync UMR QP\n"); ret = PTR_ERR(cq); goto error_2; } init_attr->send_cq = cq; init_attr->recv_cq = cq; init_attr->sq_sig_type = IB_SIGNAL_ALL_WR; init_attr->cap.max_send_wr = MAX_UMR_WR; init_attr->cap.max_send_sge = 1; init_attr->qp_type = MLX5_IB_QPT_REG_UMR; init_attr->port_num = 1; qp = ib_create_qp(pd, init_attr); if (IS_ERR(qp)) { mlx5_ib_dbg(dev, "Couldn't create sync UMR QP\n"); ret = PTR_ERR(qp); goto error_3; } attr->qp_state = IB_QPS_INIT; attr->port_num = 1; ret = ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT); if (ret) { mlx5_ib_dbg(dev, "Couldn't modify UMR QP\n"); goto error_4; } memset(attr, 0, sizeof(*attr)); attr->qp_state = IB_QPS_RTR; attr->path_mtu = IB_MTU_256; ret = ib_modify_qp(qp, attr, IB_QP_STATE); if (ret) { mlx5_ib_dbg(dev, "Couldn't modify umr QP to rtr\n"); goto error_4; } memset(attr, 0, sizeof(*attr)); attr->qp_state = IB_QPS_RTS; ret = ib_modify_qp(qp, attr, IB_QP_STATE); if (ret) { mlx5_ib_dbg(dev, "Couldn't modify umr QP to rts\n"); goto error_4; } dev->umrc.qp = qp; dev->umrc.cq = cq; dev->umrc.pd = pd; sema_init(&dev->umrc.sem, MAX_UMR_WR); ret = mlx5_mr_cache_init(dev); if (ret) { mlx5_ib_warn(dev, "mr cache init failed %d\n", ret); goto error_4; } kfree(attr); kfree(init_attr); return 0; error_4: ib_destroy_qp(qp); dev->umrc.qp = NULL; error_3: ib_free_cq(cq); dev->umrc.cq = NULL; error_2: ib_dealloc_pd(pd); dev->umrc.pd = NULL; error_0: kfree(attr); kfree(init_attr); return ret; } static int mlx5_ib_stage_delay_drop_init(struct mlx5_ib_dev *dev) { struct dentry *root; if (!(dev->ib_dev.attrs.raw_packet_caps & IB_RAW_PACKET_CAP_DELAY_DROP)) return 0; mutex_init(&dev->delay_drop.lock); dev->delay_drop.dev = dev; dev->delay_drop.activate = false; dev->delay_drop.timeout = MLX5_MAX_DELAY_DROP_TIMEOUT_MS * 1000; INIT_WORK(&dev->delay_drop.delay_drop_work, delay_drop_handler); atomic_set(&dev->delay_drop.rqs_cnt, 0); atomic_set(&dev->delay_drop.events_cnt, 0); if (!mlx5_debugfs_root) return 0; root = debugfs_create_dir("delay_drop", dev->mdev->priv.dbg_root); dev->delay_drop.dir_debugfs = root; debugfs_create_atomic_t("num_timeout_events", 0400, root, &dev->delay_drop.events_cnt); debugfs_create_atomic_t("num_rqs", 0400, root, &dev->delay_drop.rqs_cnt); debugfs_create_file("timeout", 0600, root, &dev->delay_drop, &fops_delay_drop_timeout); return 0; } static void mlx5_ib_stage_delay_drop_cleanup(struct mlx5_ib_dev *dev) { if (!(dev->ib_dev.attrs.raw_packet_caps & IB_RAW_PACKET_CAP_DELAY_DROP)) return; cancel_work_sync(&dev->delay_drop.delay_drop_work); if (!dev->delay_drop.dir_debugfs) return; debugfs_remove_recursive(dev->delay_drop.dir_debugfs); dev->delay_drop.dir_debugfs = NULL; } static int mlx5_ib_stage_dev_notifier_init(struct mlx5_ib_dev *dev) { dev->mdev_events.notifier_call = mlx5_ib_event; mlx5_notifier_register(dev->mdev, &dev->mdev_events); return 0; } static void mlx5_ib_stage_dev_notifier_cleanup(struct mlx5_ib_dev *dev) { mlx5_notifier_unregister(dev->mdev, &dev->mdev_events); } void __mlx5_ib_remove(struct mlx5_ib_dev *dev, const struct mlx5_ib_profile *profile, int stage) { dev->ib_active = false; /* Number of stages to cleanup */ while (stage) { stage--; if (profile->stage[stage].cleanup) profile->stage[stage].cleanup(dev); } kfree(dev->port); ib_dealloc_device(&dev->ib_dev); } int __mlx5_ib_add(struct mlx5_ib_dev *dev, const struct mlx5_ib_profile *profile) { int err; int i; dev->profile = profile; for (i = 0; i < MLX5_IB_STAGE_MAX; i++) { if (profile->stage[i].init) { err = profile->stage[i].init(dev); if (err) goto err_out; } } dev->ib_active = true; return 0; err_out: /* Clean up stages which were initialized */ while (i) { i--; if (profile->stage[i].cleanup) profile->stage[i].cleanup(dev); } return -ENOMEM; } static const struct mlx5_ib_profile pf_profile = { STAGE_CREATE(MLX5_IB_STAGE_INIT, mlx5_ib_stage_init_init, mlx5_ib_stage_init_cleanup), STAGE_CREATE(MLX5_IB_STAGE_FS, mlx5_ib_fs_init, mlx5_ib_fs_cleanup), STAGE_CREATE(MLX5_IB_STAGE_CAPS, mlx5_ib_stage_caps_init, mlx5_ib_stage_caps_cleanup), STAGE_CREATE(MLX5_IB_STAGE_NON_DEFAULT_CB, mlx5_ib_stage_non_default_cb, NULL), STAGE_CREATE(MLX5_IB_STAGE_ROCE, mlx5_ib_roce_init, mlx5_ib_roce_cleanup), STAGE_CREATE(MLX5_IB_STAGE_QP, mlx5_init_qp_table, mlx5_cleanup_qp_table), STAGE_CREATE(MLX5_IB_STAGE_SRQ, mlx5_init_srq_table, mlx5_cleanup_srq_table), STAGE_CREATE(MLX5_IB_STAGE_DEVICE_RESOURCES, mlx5_ib_dev_res_init, mlx5_ib_dev_res_cleanup), STAGE_CREATE(MLX5_IB_STAGE_DEVICE_NOTIFIER, mlx5_ib_stage_dev_notifier_init, mlx5_ib_stage_dev_notifier_cleanup), STAGE_CREATE(MLX5_IB_STAGE_ODP, mlx5_ib_odp_init_one, mlx5_ib_odp_cleanup_one), STAGE_CREATE(MLX5_IB_STAGE_COUNTERS, mlx5_ib_counters_init, mlx5_ib_counters_cleanup), STAGE_CREATE(MLX5_IB_STAGE_CONG_DEBUGFS, mlx5_ib_stage_cong_debugfs_init, mlx5_ib_stage_cong_debugfs_cleanup), STAGE_CREATE(MLX5_IB_STAGE_UAR, mlx5_ib_stage_uar_init, mlx5_ib_stage_uar_cleanup), STAGE_CREATE(MLX5_IB_STAGE_BFREG, mlx5_ib_stage_bfrag_init, mlx5_ib_stage_bfrag_cleanup), STAGE_CREATE(MLX5_IB_STAGE_PRE_IB_REG_UMR, NULL, mlx5_ib_stage_pre_ib_reg_umr_cleanup), STAGE_CREATE(MLX5_IB_STAGE_WHITELIST_UID, mlx5_ib_devx_init, mlx5_ib_devx_cleanup), STAGE_CREATE(MLX5_IB_STAGE_IB_REG, mlx5_ib_stage_ib_reg_init, mlx5_ib_stage_ib_reg_cleanup), STAGE_CREATE(MLX5_IB_STAGE_POST_IB_REG_UMR, mlx5_ib_stage_post_ib_reg_umr_init, NULL), STAGE_CREATE(MLX5_IB_STAGE_DELAY_DROP, mlx5_ib_stage_delay_drop_init, mlx5_ib_stage_delay_drop_cleanup), STAGE_CREATE(MLX5_IB_STAGE_RESTRACK, mlx5_ib_restrack_init, NULL), }; const struct mlx5_ib_profile raw_eth_profile = { STAGE_CREATE(MLX5_IB_STAGE_INIT, mlx5_ib_stage_init_init, mlx5_ib_stage_init_cleanup), STAGE_CREATE(MLX5_IB_STAGE_FS, mlx5_ib_fs_init, mlx5_ib_fs_cleanup), STAGE_CREATE(MLX5_IB_STAGE_CAPS, mlx5_ib_stage_caps_init, mlx5_ib_stage_caps_cleanup), STAGE_CREATE(MLX5_IB_STAGE_NON_DEFAULT_CB, mlx5_ib_stage_raw_eth_non_default_cb, NULL), STAGE_CREATE(MLX5_IB_STAGE_ROCE, mlx5_ib_roce_init, mlx5_ib_roce_cleanup), STAGE_CREATE(MLX5_IB_STAGE_QP, mlx5_init_qp_table, mlx5_cleanup_qp_table), STAGE_CREATE(MLX5_IB_STAGE_SRQ, mlx5_init_srq_table, mlx5_cleanup_srq_table), STAGE_CREATE(MLX5_IB_STAGE_DEVICE_RESOURCES, mlx5_ib_dev_res_init, mlx5_ib_dev_res_cleanup), STAGE_CREATE(MLX5_IB_STAGE_DEVICE_NOTIFIER, mlx5_ib_stage_dev_notifier_init, mlx5_ib_stage_dev_notifier_cleanup), STAGE_CREATE(MLX5_IB_STAGE_COUNTERS, mlx5_ib_counters_init, mlx5_ib_counters_cleanup), STAGE_CREATE(MLX5_IB_STAGE_CONG_DEBUGFS, mlx5_ib_stage_cong_debugfs_init, mlx5_ib_stage_cong_debugfs_cleanup), STAGE_CREATE(MLX5_IB_STAGE_UAR, mlx5_ib_stage_uar_init, mlx5_ib_stage_uar_cleanup), STAGE_CREATE(MLX5_IB_STAGE_BFREG, mlx5_ib_stage_bfrag_init, mlx5_ib_stage_bfrag_cleanup), STAGE_CREATE(MLX5_IB_STAGE_PRE_IB_REG_UMR, NULL, mlx5_ib_stage_pre_ib_reg_umr_cleanup), STAGE_CREATE(MLX5_IB_STAGE_WHITELIST_UID, mlx5_ib_devx_init, mlx5_ib_devx_cleanup), STAGE_CREATE(MLX5_IB_STAGE_IB_REG, mlx5_ib_stage_ib_reg_init, mlx5_ib_stage_ib_reg_cleanup), STAGE_CREATE(MLX5_IB_STAGE_POST_IB_REG_UMR, mlx5_ib_stage_post_ib_reg_umr_init, NULL), STAGE_CREATE(MLX5_IB_STAGE_RESTRACK, mlx5_ib_restrack_init, NULL), }; static int mlx5r_mp_probe(struct auxiliary_device *adev, const struct auxiliary_device_id *id) { struct mlx5_adev *idev = container_of(adev, struct mlx5_adev, adev); struct mlx5_core_dev *mdev = idev->mdev; struct mlx5_ib_multiport_info *mpi; struct mlx5_ib_dev *dev; bool bound = false; int err; mpi = kzalloc(sizeof(*mpi), GFP_KERNEL); if (!mpi) return -ENOMEM; mpi->mdev = mdev; err = mlx5_query_nic_vport_system_image_guid(mdev, &mpi->sys_image_guid); if (err) { kfree(mpi); return err; } mutex_lock(&mlx5_ib_multiport_mutex); list_for_each_entry(dev, &mlx5_ib_dev_list, ib_dev_list) { if (dev->sys_image_guid == mpi->sys_image_guid) bound = mlx5_ib_bind_slave_port(dev, mpi); if (bound) { rdma_roce_rescan_device(&dev->ib_dev); mpi->ibdev->ib_active = true; break; } } if (!bound) { list_add_tail(&mpi->list, &mlx5_ib_unaffiliated_port_list); dev_dbg(mdev->device, "no suitable IB device found to bind to, added to unaffiliated list.\n"); } mutex_unlock(&mlx5_ib_multiport_mutex); dev_set_drvdata(&adev->dev, mpi); return 0; } static void mlx5r_mp_remove(struct auxiliary_device *adev) { struct mlx5_ib_multiport_info *mpi; mpi = dev_get_drvdata(&adev->dev); mutex_lock(&mlx5_ib_multiport_mutex); if (mpi->ibdev) mlx5_ib_unbind_slave_port(mpi->ibdev, mpi); else list_del(&mpi->list); mutex_unlock(&mlx5_ib_multiport_mutex); kfree(mpi); } static int mlx5r_probe(struct auxiliary_device *adev, const struct auxiliary_device_id *id) { struct mlx5_adev *idev = container_of(adev, struct mlx5_adev, adev); struct mlx5_core_dev *mdev = idev->mdev; const struct mlx5_ib_profile *profile; int port_type_cap, num_ports, ret; enum rdma_link_layer ll; struct mlx5_ib_dev *dev; port_type_cap = MLX5_CAP_GEN(mdev, port_type); ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap); num_ports = max(MLX5_CAP_GEN(mdev, num_ports), MLX5_CAP_GEN(mdev, num_vhca_ports)); dev = ib_alloc_device(mlx5_ib_dev, ib_dev); if (!dev) return -ENOMEM; dev->port = kcalloc(num_ports, sizeof(*dev->port), GFP_KERNEL); if (!dev->port) { ib_dealloc_device(&dev->ib_dev); return -ENOMEM; } dev->mdev = mdev; dev->num_ports = num_ports; if (ll == IB_LINK_LAYER_ETHERNET && !mlx5_is_roce_init_enabled(mdev)) profile = &raw_eth_profile; else profile = &pf_profile; ret = __mlx5_ib_add(dev, profile); if (ret) { kfree(dev->port); ib_dealloc_device(&dev->ib_dev); return ret; } dev_set_drvdata(&adev->dev, dev); return 0; } static void mlx5r_remove(struct auxiliary_device *adev) { struct mlx5_ib_dev *dev; dev = dev_get_drvdata(&adev->dev); __mlx5_ib_remove(dev, dev->profile, MLX5_IB_STAGE_MAX); } static const struct auxiliary_device_id mlx5r_mp_id_table[] = { { .name = MLX5_ADEV_NAME ".multiport", }, {}, }; static const struct auxiliary_device_id mlx5r_id_table[] = { { .name = MLX5_ADEV_NAME ".rdma", }, {}, }; MODULE_DEVICE_TABLE(auxiliary, mlx5r_mp_id_table); MODULE_DEVICE_TABLE(auxiliary, mlx5r_id_table); static struct auxiliary_driver mlx5r_mp_driver = { .name = "multiport", .probe = mlx5r_mp_probe, .remove = mlx5r_mp_remove, .id_table = mlx5r_mp_id_table, }; static struct auxiliary_driver mlx5r_driver = { .name = "rdma", .probe = mlx5r_probe, .remove = mlx5r_remove, .id_table = mlx5r_id_table, }; static int __init mlx5_ib_init(void) { int ret; xlt_emergency_page = (void *)__get_free_page(GFP_KERNEL); if (!xlt_emergency_page) return -ENOMEM; mlx5_ib_event_wq = alloc_ordered_workqueue("mlx5_ib_event_wq", 0); if (!mlx5_ib_event_wq) { free_page((unsigned long)xlt_emergency_page); return -ENOMEM; } mlx5_ib_odp_init(); ret = mlx5r_rep_init(); if (ret) goto rep_err; ret = auxiliary_driver_register(&mlx5r_mp_driver); if (ret) goto mp_err; ret = auxiliary_driver_register(&mlx5r_driver); if (ret) goto drv_err; return 0; drv_err: auxiliary_driver_unregister(&mlx5r_mp_driver); mp_err: mlx5r_rep_cleanup(); rep_err: destroy_workqueue(mlx5_ib_event_wq); free_page((unsigned long)xlt_emergency_page); return ret; } static void __exit mlx5_ib_cleanup(void) { auxiliary_driver_unregister(&mlx5r_driver); auxiliary_driver_unregister(&mlx5r_mp_driver); mlx5r_rep_cleanup(); destroy_workqueue(mlx5_ib_event_wq); free_page((unsigned long)xlt_emergency_page); } module_init(mlx5_ib_init); module_exit(mlx5_ib_cleanup);
.region-content ul.flippy { margin: auto; padding: 0px 20px; text-align: center; } ul.flippy li { margin: 0; padding: 10px; display: inline; width: auto; list-style-type: none; list-style-image: none; background: none; white-space: nowrap; } .flippy li.first.empty, .flippy li.last.empty { display: none; } .flippy li.previous.empty, .flippy li.next.empty { color: LightGrey; }
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs; import static org.junit.Assert.*; import java.io.IOException; import java.net.InetSocketAddress; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Random; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations; import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol; import org.apache.hadoop.ipc.RemoteException; import org.junit.Test; /** * This class tests if getblocks request works correctly. */ public class TestGetBlocks { private static final int blockSize = 8192; private static final String racks[] = new String[] { "/d1/r1", "/d1/r1", "/d1/r2", "/d1/r2", "/d1/r2", "/d2/r3", "/d2/r3" }; private static final int numDatanodes = racks.length; /** * Stop the heartbeat of a datanode in the MiniDFSCluster * * @param cluster * The MiniDFSCluster * @param hostName * The hostName of the datanode to be stopped * @return The DataNode whose heartbeat has been stopped */ private DataNode stopDataNodeHeartbeat(MiniDFSCluster cluster, String hostName) { for (DataNode dn : cluster.getDataNodes()) { if (dn.getDatanodeId().getHostName().equals(hostName)) { DataNodeTestUtils.setHeartbeatsDisabledForTests(dn, true); return dn; } } return null; } /** * Test if the datanodes returned by * {@link ClientProtocol#getBlockLocations(String, long, long)} is correct * when stale nodes checking is enabled. Also test during the scenario when 1) * stale nodes checking is enabled, 2) a writing is going on, 3) a datanode * becomes stale happen simultaneously * * @throws Exception */ @Test public void testReadSelectNonStaleDatanode() throws Exception { HdfsConfiguration conf = new HdfsConfiguration(); conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_READ_KEY, true); long staleInterval = 30 * 1000 * 60; conf.setLong(DFSConfigKeys.DFS_NAMENODE_STALE_DATANODE_INTERVAL_KEY, staleInterval); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) .numDataNodes(numDatanodes).racks(racks).build(); cluster.waitActive(); InetSocketAddress addr = new InetSocketAddress("localhost", cluster.getNameNodePort()); DFSClient client = new DFSClient(addr, conf); List<DatanodeDescriptor> nodeInfoList = cluster.getNameNode() .getNamesystem().getBlockManager().getDatanodeManager() .getDatanodeListForReport(DatanodeReportType.LIVE); assertEquals("Unexpected number of datanodes", numDatanodes, nodeInfoList.size()); FileSystem fileSys = cluster.getFileSystem(); FSDataOutputStream stm = null; try { // do the writing but do not close the FSDataOutputStream // in order to mimic the ongoing writing final Path fileName = new Path("/file1"); stm = fileSys.create(fileName, true, fileSys.getConf().getInt( CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096), (short) 3, blockSize); stm.write(new byte[(blockSize * 3) / 2]); // We do not close the stream so that // the writing seems to be still ongoing stm.hflush(); LocatedBlocks blocks = client.getNamenode().getBlockLocations( fileName.toString(), 0, blockSize); DatanodeInfo[] nodes = blocks.get(0).getLocations(); assertEquals(nodes.length, 3); DataNode staleNode = null; DatanodeDescriptor staleNodeInfo = null; // stop the heartbeat of the first node staleNode = this.stopDataNodeHeartbeat(cluster, nodes[0].getHostName()); assertNotNull(staleNode); // set the first node as stale staleNodeInfo = cluster.getNameNode().getNamesystem().getBlockManager() .getDatanodeManager() .getDatanode(staleNode.getDatanodeId()); DFSTestUtil.resetLastUpdatesWithOffset(staleNodeInfo, -(staleInterval + 1)); LocatedBlocks blocksAfterStale = client.getNamenode().getBlockLocations( fileName.toString(), 0, blockSize); DatanodeInfo[] nodesAfterStale = blocksAfterStale.get(0).getLocations(); assertEquals(nodesAfterStale.length, 3); assertEquals(nodesAfterStale[2].getHostName(), nodes[0].getHostName()); // restart the staleNode's heartbeat DataNodeTestUtils.setHeartbeatsDisabledForTests(staleNode, false); // reset the first node as non-stale, so as to avoid two stale nodes DFSTestUtil.resetLastUpdatesWithOffset(staleNodeInfo, 0); LocatedBlock lastBlock = client.getLocatedBlocks(fileName.toString(), 0, Long.MAX_VALUE).getLastLocatedBlock(); nodes = lastBlock.getLocations(); assertEquals(nodes.length, 3); // stop the heartbeat of the first node for the last block staleNode = this.stopDataNodeHeartbeat(cluster, nodes[0].getHostName()); assertNotNull(staleNode); // set the node as stale DatanodeDescriptor dnDesc = cluster.getNameNode().getNamesystem() .getBlockManager().getDatanodeManager() .getDatanode(staleNode.getDatanodeId()); DFSTestUtil.resetLastUpdatesWithOffset(dnDesc, -(staleInterval + 1)); LocatedBlock lastBlockAfterStale = client.getLocatedBlocks( fileName.toString(), 0, Long.MAX_VALUE).getLastLocatedBlock(); nodesAfterStale = lastBlockAfterStale.getLocations(); assertEquals(nodesAfterStale.length, 3); assertEquals(nodesAfterStale[2].getHostName(), nodes[0].getHostName()); } finally { if (stm != null) { stm.close(); } client.close(); cluster.shutdown(); } } /** test getBlocks */ @Test public void testGetBlocks() throws Exception { final Configuration CONF = new HdfsConfiguration(); final short REPLICATION_FACTOR = (short) 2; final int DEFAULT_BLOCK_SIZE = 1024; CONF.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DEFAULT_BLOCK_SIZE); MiniDFSCluster cluster = new MiniDFSCluster.Builder(CONF).numDataNodes( REPLICATION_FACTOR).build(); try { cluster.waitActive(); long fileLen = 2 * DEFAULT_BLOCK_SIZE; DFSTestUtil.createFile(cluster.getFileSystem(), new Path("/tmp.txt"), fileLen, REPLICATION_FACTOR, 0L); // get blocks & data nodes List<LocatedBlock> locatedBlocks; DatanodeInfo[] dataNodes = null; boolean notWritten; do { final DFSClient dfsclient = new DFSClient(NameNode.getAddress(CONF), CONF); locatedBlocks = dfsclient.getNamenode() .getBlockLocations("/tmp.txt", 0, fileLen).getLocatedBlocks(); assertEquals(2, locatedBlocks.size()); notWritten = false; for (int i = 0; i < 2; i++) { dataNodes = locatedBlocks.get(i).getLocations(); if (dataNodes.length != REPLICATION_FACTOR) { notWritten = true; try { Thread.sleep(10); } catch (InterruptedException e) { } break; } } } while (notWritten); // get RPC client to namenode InetSocketAddress addr = new InetSocketAddress("localhost", cluster.getNameNodePort()); NamenodeProtocol namenode = NameNodeProxies.createProxy(CONF, NameNode.getUri(addr), NamenodeProtocol.class).getProxy(); // get blocks of size fileLen from dataNodes[0] BlockWithLocations[] locs; locs = namenode.getBlocks(dataNodes[0], fileLen).getBlocks(); assertEquals(locs.length, 2); assertEquals(locs[0].getStorageIDs().length, 2); assertEquals(locs[1].getStorageIDs().length, 2); // get blocks of size BlockSize from dataNodes[0] locs = namenode.getBlocks(dataNodes[0], DEFAULT_BLOCK_SIZE).getBlocks(); assertEquals(locs.length, 1); assertEquals(locs[0].getStorageIDs().length, 2); // get blocks of size 1 from dataNodes[0] locs = namenode.getBlocks(dataNodes[0], 1).getBlocks(); assertEquals(locs.length, 1); assertEquals(locs[0].getStorageIDs().length, 2); // get blocks of size 0 from dataNodes[0] getBlocksWithException(namenode, dataNodes[0], 0); // get blocks of size -1 from dataNodes[0] getBlocksWithException(namenode, dataNodes[0], -1); // get blocks of size BlockSize from a non-existent datanode DatanodeInfo info = DFSTestUtil.getDatanodeInfo("1.2.3.4"); getBlocksWithException(namenode, info, 2); } finally { cluster.shutdown(); } } private void getBlocksWithException(NamenodeProtocol namenode, DatanodeInfo datanode, long size) throws IOException { boolean getException = false; try { namenode.getBlocks(DFSTestUtil.getLocalDatanodeInfo(), 2); } catch (RemoteException e) { getException = true; assertTrue(e.getClassName().contains("HadoopIllegalArgumentException")); } assertTrue(getException); } @Test public void testBlockKey() { Map<Block, Long> map = new HashMap<Block, Long>(); final Random RAN = new Random(); final long seed = RAN.nextLong(); System.out.println("seed=" + seed); RAN.setSeed(seed); long[] blkids = new long[10]; for (int i = 0; i < blkids.length; i++) { blkids[i] = 1000L + RAN.nextInt(100000); map.put(new Block(blkids[i], 0, blkids[i]), blkids[i]); } System.out.println("map=" + map.toString().replace(",", "\n ")); for (int i = 0; i < blkids.length; i++) { Block b = new Block(blkids[i], 0, HdfsConstants.GRANDFATHER_GENERATION_STAMP); Long v = map.get(b); System.out.println(b + " => " + v); assertEquals(blkids[i], v.longValue()); } } }
/**************************************************************************//** * @file efm32zg_romtable.h * @brief EFM32ZG_ROMTABLE register and bit field definitions * @version 5.1.2 ****************************************************************************** * @section License * <b>Copyright 2017 Silicon Laboratories, Inc. http://www.silabs.com</b> ****************************************************************************** * * Permission is granted to anyone to use this software for any purpose, * including commercial applications, and to alter it and redistribute it * freely, subject to the following restrictions: * * 1. The origin of this software must not be misrepresented; you must not * claim that you wrote the original software.@n * 2. Altered source versions must be plainly marked as such, and must not be * misrepresented as being the original software.@n * 3. This notice may not be removed or altered from any source distribution. * * DISCLAIMER OF WARRANTY/LIMITATION OF REMEDIES: Silicon Laboratories, Inc. * has no obligation to support this Software. Silicon Laboratories, Inc. is * providing the Software "AS IS", with no express or implied warranties of any * kind, including, but not limited to, any implied warranties of * merchantability or fitness for any particular purpose or warranties against * infringement of any proprietary rights of a third party. * * Silicon Laboratories, Inc. will not be liable for any consequential, * incidental, or special damages, or any other relief, or for any claim by * any third party, arising from your use of this Software. * *****************************************************************************/ /**************************************************************************//** * @addtogroup Parts * @{ ******************************************************************************/ /**************************************************************************//** * @defgroup EFM32ZG_ROMTABLE * @{ * @brief Chip Information, Revision numbers *****************************************************************************/ typedef struct { __IM uint32_t PID4; /**< JEP_106_BANK */ __IM uint32_t PID5; /**< Unused */ __IM uint32_t PID6; /**< Unused */ __IM uint32_t PID7; /**< Unused */ __IM uint32_t PID0; /**< Chip family LSB, chip major revision */ __IM uint32_t PID1; /**< JEP_106_NO, Chip family MSB */ __IM uint32_t PID2; /**< Chip minor rev MSB, JEP_106_PRESENT, JEP_106_NO */ __IM uint32_t PID3; /**< Chip minor rev LSB */ __IM uint32_t CID0; /**< Unused */ } ROMTABLE_TypeDef; /** @} */ /**************************************************************************//** * @defgroup EFM32ZG_ROMTABLE_BitFields * @{ *****************************************************************************/ /* Bit fields for EFM32ZG_ROMTABLE */ #define _ROMTABLE_PID0_FAMILYLSB_MASK 0x000000C0UL /**< Least Significant Bits [1:0] of CHIP FAMILY, mask */ #define _ROMTABLE_PID0_FAMILYLSB_SHIFT 6 /**< Least Significant Bits [1:0] of CHIP FAMILY, shift */ #define _ROMTABLE_PID0_REVMAJOR_MASK 0x0000003FUL /**< CHIP MAJOR Revison, mask */ #define _ROMTABLE_PID0_REVMAJOR_SHIFT 0 /**< CHIP MAJOR Revison, shift */ #define _ROMTABLE_PID1_FAMILYMSB_MASK 0x0000000FUL /**< Most Significant Bits [5:2] of CHIP FAMILY, mask */ #define _ROMTABLE_PID1_FAMILYMSB_SHIFT 0 /**< Most Significant Bits [5:2] of CHIP FAMILY, shift */ #define _ROMTABLE_PID2_REVMINORMSB_MASK 0x000000F0UL /**< Most Significant Bits [7:4] of CHIP MINOR revision, mask */ #define _ROMTABLE_PID2_REVMINORMSB_SHIFT 4 /**< Most Significant Bits [7:4] of CHIP MINOR revision, mask */ #define _ROMTABLE_PID3_REVMINORLSB_MASK 0x000000F0UL /**< Least Significant Bits [3:0] of CHIP MINOR revision, mask */ #define _ROMTABLE_PID3_REVMINORLSB_SHIFT 4 /**< Least Significant Bits [3:0] of CHIP MINOR revision, shift */ /** @} End of group EFM32ZG_ROMTABLE */ /** @} End of group Parts */
/* * Copyright 2000-2011 JetBrains s.r.o. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.intellij.openapi.roots.ui.configuration.projectRoot.daemon; import com.intellij.openapi.project.Project; import com.intellij.openapi.roots.ui.configuration.ProjectStructureConfigurable; import com.intellij.openapi.util.ActionCallback; import com.intellij.ui.navigation.Place; import org.jetbrains.annotations.NotNull; /** * @author nik */ public class PlaceInProjectStructureBase extends PlaceInProjectStructure { private final Project myProject; private final Place myPlace; private final ProjectStructureElement myElement; public PlaceInProjectStructureBase(Project project, Place place, ProjectStructureElement element) { myProject = project; myPlace = place; myElement = element; } @Override public String getPlacePath() { return null; } @NotNull @Override public ProjectStructureElement getContainingElement() { return myElement; } @NotNull @Override public ActionCallback navigate() { return ProjectStructureConfigurable.getInstance(myProject).navigateTo(myPlace, true); } }
<?php namespace Doctrine\Tests\ORM\Tools\Console\Command; use Doctrine\ORM\Tools\Console\Command\ClearCache\CollectionRegionCommand; use Doctrine\ORM\Tools\Console\Helper\EntityManagerHelper; use Symfony\Component\Console\Tester\CommandTester; use Symfony\Component\Console\Helper\HelperSet; use Symfony\Component\Console\Application; use Doctrine\Tests\OrmFunctionalTestCase; use Doctrine\ORM\Tools\Console\Command\InfoCommand; class InfoCommandTest extends OrmFunctionalTestCase { /** * @var \Symfony\Component\Console\Application */ private $application; /** * @var \Doctrine\ORM\Tools\Console\Command\InfoCommand */ private $command; /** * @var \Symfony\Component\Console\Tester\CommandTester */ private $tester; protected function setUp() { parent::setUp(); $this->application = new Application(); $command = new InfoCommand(); $this->application->setHelperSet(new HelperSet(array( 'em' => new EntityManagerHelper($this->_em) ))); $this->application->add($command); $this->command = $this->application->find('orm:info'); $this->tester = new CommandTester($command); } public function testListAllClasses() { $this->tester->execute(array( 'command' => $this->command->getName(), )); $this->assertContains('Doctrine\Tests\Models\Cache\AttractionInfo', $this->tester->getDisplay()); $this->assertContains('Doctrine\Tests\Models\Cache\City', $this->tester->getDisplay()); } }
function X2JS(_1){ "use strict"; var _2="1.1.2"; _1=_1||{}; _3(); function _3(){ if(_1.escapeMode===undefined){ _1.escapeMode=true; } if(_1.attributePrefix===undefined){ _1.attributePrefix="_"; } if(_1.arrayAccessForm===undefined){ _1.arrayAccessForm="none"; } if(_1.emptyNodeForm===undefined){ _1.emptyNodeForm="text"; } }; var _4={ELEMENT_NODE:1,TEXT_NODE:3,CDATA_SECTION_NODE:4,DOCUMENT_NODE:9}; function _5(_6){ var _7=_6.localName; if(_7==null){ _7=_6.baseName; } if(_7==null||_7==""){ _7=_6.nodeName; } return _7; }; function _8(_9){ return _9.prefix; }; function _a(_b){ if(typeof (_b)=="string"){ return _b.replace(/&/g,"&amp;").replace(/</g,"&lt;").replace(/>/g,"&gt;").replace(/"/g,"&quot;").replace(/'/g,"&#x27;").replace(/\//g,"&#x2F;"); }else{ return _b; } }; function _c(_d){ return _d.replace(/&amp;/g,"&").replace(/&lt;/g,"<").replace(/&gt;/g,">").replace(/&quot;/g,"\"").replace(/&#x27;/g,"'").replace(/&#x2F;/g,"/"); }; function _e(_f,_10){ switch(_1.arrayAccessForm){ case "property": if(!(_f[_10] instanceof Array)){ _f[_10+"_asArray"]=[_f[_10]]; }else{ _f[_10+"_asArray"]=_f[_10]; } break; } }; function _11(_12){ if(_12.nodeType==_4.DOCUMENT_NODE){ var _13=new Object; var _14=_12.firstChild; var _15=_5(_14); _13[_15]=_11(_14); return _13; }else{ if(_12.nodeType==_4.ELEMENT_NODE){ var _13=new Object; _13.__cnt=0; var _16=_12.childNodes; for(var _17=0;_17<_16.length;_17++){ var _14=_16.item(_17); var _15=_5(_14); _13.__cnt++; if(_13[_15]==null){ _13[_15]=_11(_14); _e(_13,_15); }else{ if(_13[_15]!=null){ if(!(_13[_15] instanceof Array)){ _13[_15]=[_13[_15]]; _e(_13,_15); } } var _18=0; while(_13[_15][_18]!=null){ _18++; } (_13[_15])[_18]=_11(_14); } } for(var _19=0;_19<_12.attributes.length;_19++){ var _1a=_12.attributes.item(_19); _13.__cnt++; _13[_1.attributePrefix+_1a.name]=_1a.value; } var _1b=_8(_12); if(_1b!=null&&_1b!=""){ _13.__cnt++; _13.__prefix=_1b; } if(_13["#text"]!=null){ _13.__text=_13["#text"]; if(_13.__text instanceof Array){ _13.__text=_13.__text.join("\n"); } if(_1.escapeMode){ _13.__text=_c(_13.__text); } delete _13["#text"]; if(_1.arrayAccessForm=="property"){ delete _13["#text_asArray"]; } } if(_13["#cdata-section"]!=null){ _13.__cdata=_13["#cdata-section"]; delete _13["#cdata-section"]; if(_1.arrayAccessForm=="property"){ delete _13["#cdata-section_asArray"]; } } if(_13.__cnt==1&&_13.__text!=null){ _13=_13.__text; }else{ if(_13.__cnt==0&&_1.emptyNodeForm=="text"){ _13=""; } } delete _13.__cnt; if(_13.__text!=null||_13.__cdata!=null){ _13.toString=function(){ return (this.__text!=null?this.__text:"")+(this.__cdata!=null?this.__cdata:""); }; } return _13; }else{ if(_12.nodeType==_4.TEXT_NODE||_12.nodeType==_4.CDATA_SECTION_NODE){ return _12.nodeValue; } } } }; function _1c(_1d,_1e,_1f,_20){ var _21="<"+((_1d!=null&&_1d.__prefix!=null)?(_1d.__prefix+":"):"")+_1e; if(_1f!=null){ for(var _22=0;_22<_1f.length;_22++){ var _23=_1f[_22]; var _24=_1d[_23]; _21+=" "+_23.substr(_1.attributePrefix.length)+"='"+_24+"'"; } } if(!_20){ _21+=">"; }else{ _21+="/>"; } return _21; }; function _25(_26,_27){ return "</"+(_26.__prefix!=null?(_26.__prefix+":"):"")+_27+">"; }; function _28(str,_29){ return str.indexOf(_29,str.length-_29.length)!==-1; }; function _2a(_2b,_2c){ if((_1.arrayAccessForm=="property"&&_28(_2c.toString(),("_asArray")))||_2c.toString().indexOf(_1.attributePrefix)==0||_2c.toString().indexOf("__")==0||(_2b[_2c] instanceof Function)){ return true; }else{ return false; } }; function _2d(_2e){ var _2f=0; if(_2e instanceof Object){ for(var it in _2e){ if(_2a(_2e,it)){ continue; } _2f++; } } return _2f; }; function _30(_31){ var _32=[]; if(_31 instanceof Object){ for(var ait in _31){ if(ait.toString().indexOf("__")==-1&&ait.toString().indexOf(_1.attributePrefix)==0){ _32.push(ait); } } } return _32; }; function _33(_34){ var _35=""; if(_34.__cdata!=null){ _35+="<![CDATA["+_34.__cdata+"]]>"; } if(_34.__text!=null){ if(_1.escapeMode){ _35+=_a(_34.__text); }else{ _35+=_34.__text; } } return _35; }; function _36(_37){ var _38=""; if(_37 instanceof Object){ _38+=_33(_37); }else{ if(_37!=null){ if(_1.escapeMode){ _38+=_a(_37); }else{ _38+=_37; } } } return _38; }; function _39(_3a,_3b,_3c){ var _3d=""; if(_3a.length==0){ _3d+=_1c(_3a,_3b,_3c,true); }else{ for(var _3e=0;_3e<_3a.length;_3e++){ _3d+=_1c(_3a[_3e],_3b,_30(_3a[_3e]),false); _3d+=_3f(_3a[_3e]); _3d+=_25(_3a[_3e],_3b); } } return _3d; }; function _3f(_40){ var _41=""; var _42=_2d(_40); if(_42>0){ for(var it in _40){ if(_2a(_40,it)){ continue; } var _43=_40[it]; var _44=_30(_43); if(_43==null||_43==undefined){ _41+=_1c(_43,it,_44,true); }else{ if(_43 instanceof Object){ if(_43 instanceof Array){ _41+=_39(_43,it,_44); }else{ var _45=_2d(_43); if(_45>0||_43.__text!=null||_43.__cdata!=null){ _41+=_1c(_43,it,_44,false); _41+=_3f(_43); _41+=_25(_43,it); }else{ _41+=_1c(_43,it,_44,true); } } }else{ _41+=_1c(_43,it,_44,false); _41+=_36(_43); _41+=_25(_43,it); } } } } _41+=_36(_40); return _41; }; this.parseXmlString=function(_46){ if(_46===undefined){ return null; } var _47; if(window.DOMParser){ var _48=new window.DOMParser(); _47=_48.parseFromString(_46,"text/xml"); }else{ if(_46.indexOf("<?")==0){ _46=_46.substr(_46.indexOf("?>")+2); } _47=new ActiveXObject("Microsoft.XMLDOM"); _47.async="false"; _47.loadXML(_46); } return _47; }; this.asArray=function(_49){ if(_49 instanceof Array){ return _49; }else{ return [_49]; } }; this.xml2json=function(_4a){ return _11(_4a); }; this.xml_str2json=function(_4b){ var _4c=this.parseXmlString(_4b); return this.xml2json(_4c); }; this.json2xml_str=function(_4d){ return _3f(_4d); }; this.json2xml=function(_4e){ var _4f=this.json2xml_str(_4e); return this.parseXmlString(_4f); }; this.getVersion=function(){ return _2; }; };
'use strict'; var path = require('path'); exports.name = 'cssmin'; // // Output a config for the furnished block // The context variable is used both to take the files to be treated // (inFiles) and to output the one(s) created (outFiles). // It aslo conveys whether or not the current process is the last of the pipe // exports.createConfig = function(context, block) { var cfg = {files: []}; // FIXME: check context has all the needed info var outfile = path.join(context.outDir, block.dest); // Depending whether or not we're the last of the step we're not going to output the same thing var files = {}; files.dest = outfile; files.src = []; context.inFiles.forEach(function(f) { files.src.push(path.join(context.inDir, f));} ); cfg.files.push(files); context.outFiles = [block.dest]; return cfg; };
import * as React from 'react'; import { IconBaseProps } from 'react-icon-base'; declare class IoIosRefresh extends React.Component<IconBaseProps> { } export = IoIosRefresh;
/* * O(1) TX queue with built-in allocator for ST-Ericsson CW1200 drivers * * Copyright (c) 2010, ST-Ericsson * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <net/mac80211.h> #include <linux/sched.h> #include "queue.h" #include "cw1200.h" #include "debug.h" /* private */ struct cw1200_queue_item { struct list_head head; struct sk_buff *skb; u32 packet_id; unsigned long queue_timestamp; unsigned long xmit_timestamp; struct cw1200_txpriv txpriv; u8 generation; }; static inline void __cw1200_queue_lock(struct cw1200_queue *queue) { struct cw1200_queue_stats *stats = queue->stats; if (queue->tx_locked_cnt++ == 0) { pr_debug("[TX] Queue %d is locked.\n", queue->queue_id); ieee80211_stop_queue(stats->priv->hw, queue->queue_id); } } static inline void __cw1200_queue_unlock(struct cw1200_queue *queue) { struct cw1200_queue_stats *stats = queue->stats; BUG_ON(!queue->tx_locked_cnt); if (--queue->tx_locked_cnt == 0) { pr_debug("[TX] Queue %d is unlocked.\n", queue->queue_id); ieee80211_wake_queue(stats->priv->hw, queue->queue_id); } } static inline void cw1200_queue_parse_id(u32 packet_id, u8 *queue_generation, u8 *queue_id, u8 *item_generation, u8 *item_id) { *item_id = (packet_id >> 0) & 0xFF; *item_generation = (packet_id >> 8) & 0xFF; *queue_id = (packet_id >> 16) & 0xFF; *queue_generation = (packet_id >> 24) & 0xFF; } static inline u32 cw1200_queue_mk_packet_id(u8 queue_generation, u8 queue_id, u8 item_generation, u8 item_id) { return ((u32)item_id << 0) | ((u32)item_generation << 8) | ((u32)queue_id << 16) | ((u32)queue_generation << 24); } static void cw1200_queue_post_gc(struct cw1200_queue_stats *stats, struct list_head *gc_list) { struct cw1200_queue_item *item, *tmp; list_for_each_entry_safe(item, tmp, gc_list, head) { list_del(&item->head); stats->skb_dtor(stats->priv, item->skb, &item->txpriv); kfree(item); } } static void cw1200_queue_register_post_gc(struct list_head *gc_list, struct cw1200_queue_item *item) { struct cw1200_queue_item *gc_item; gc_item = kmalloc(sizeof(struct cw1200_queue_item), GFP_ATOMIC); BUG_ON(!gc_item); memcpy(gc_item, item, sizeof(struct cw1200_queue_item)); list_add_tail(&gc_item->head, gc_list); } static void __cw1200_queue_gc(struct cw1200_queue *queue, struct list_head *head, bool unlock) { struct cw1200_queue_stats *stats = queue->stats; struct cw1200_queue_item *item = NULL, *tmp; bool wakeup_stats = false; list_for_each_entry_safe(item, tmp, &queue->queue, head) { if (jiffies - item->queue_timestamp < queue->ttl) break; --queue->num_queued; --queue->link_map_cache[item->txpriv.link_id]; spin_lock_bh(&stats->lock); --stats->num_queued; if (!--stats->link_map_cache[item->txpriv.link_id]) wakeup_stats = true; spin_unlock_bh(&stats->lock); cw1200_debug_tx_ttl(stats->priv); cw1200_queue_register_post_gc(head, item); item->skb = NULL; list_move_tail(&item->head, &queue->free_pool); } if (wakeup_stats) wake_up(&stats->wait_link_id_empty); if (queue->overfull) { if (queue->num_queued <= (queue->capacity >> 1)) { queue->overfull = false; if (unlock) __cw1200_queue_unlock(queue); } else if (item) { unsigned long tmo = item->queue_timestamp + queue->ttl; mod_timer(&queue->gc, tmo); cw1200_pm_stay_awake(&stats->priv->pm_state, tmo - jiffies); } } } static void cw1200_queue_gc(struct timer_list *t) { LIST_HEAD(list); struct cw1200_queue *queue = from_timer(queue, t, gc); spin_lock_bh(&queue->lock); __cw1200_queue_gc(queue, &list, true); spin_unlock_bh(&queue->lock); cw1200_queue_post_gc(queue->stats, &list); } int cw1200_queue_stats_init(struct cw1200_queue_stats *stats, size_t map_capacity, cw1200_queue_skb_dtor_t skb_dtor, struct cw1200_common *priv) { memset(stats, 0, sizeof(*stats)); stats->map_capacity = map_capacity; stats->skb_dtor = skb_dtor; stats->priv = priv; spin_lock_init(&stats->lock); init_waitqueue_head(&stats->wait_link_id_empty); stats->link_map_cache = kzalloc(sizeof(int) * map_capacity, GFP_KERNEL); if (!stats->link_map_cache) return -ENOMEM; return 0; } int cw1200_queue_init(struct cw1200_queue *queue, struct cw1200_queue_stats *stats, u8 queue_id, size_t capacity, unsigned long ttl) { size_t i; memset(queue, 0, sizeof(*queue)); queue->stats = stats; queue->capacity = capacity; queue->queue_id = queue_id; queue->ttl = ttl; INIT_LIST_HEAD(&queue->queue); INIT_LIST_HEAD(&queue->pending); INIT_LIST_HEAD(&queue->free_pool); spin_lock_init(&queue->lock); timer_setup(&queue->gc, cw1200_queue_gc, 0); queue->pool = kzalloc(sizeof(struct cw1200_queue_item) * capacity, GFP_KERNEL); if (!queue->pool) return -ENOMEM; queue->link_map_cache = kzalloc(sizeof(int) * stats->map_capacity, GFP_KERNEL); if (!queue->link_map_cache) { kfree(queue->pool); queue->pool = NULL; return -ENOMEM; } for (i = 0; i < capacity; ++i) list_add_tail(&queue->pool[i].head, &queue->free_pool); return 0; } int cw1200_queue_clear(struct cw1200_queue *queue) { int i; LIST_HEAD(gc_list); struct cw1200_queue_stats *stats = queue->stats; struct cw1200_queue_item *item, *tmp; spin_lock_bh(&queue->lock); queue->generation++; list_splice_tail_init(&queue->queue, &queue->pending); list_for_each_entry_safe(item, tmp, &queue->pending, head) { WARN_ON(!item->skb); cw1200_queue_register_post_gc(&gc_list, item); item->skb = NULL; list_move_tail(&item->head, &queue->free_pool); } queue->num_queued = 0; queue->num_pending = 0; spin_lock_bh(&stats->lock); for (i = 0; i < stats->map_capacity; ++i) { stats->num_queued -= queue->link_map_cache[i]; stats->link_map_cache[i] -= queue->link_map_cache[i]; queue->link_map_cache[i] = 0; } spin_unlock_bh(&stats->lock); if (queue->overfull) { queue->overfull = false; __cw1200_queue_unlock(queue); } spin_unlock_bh(&queue->lock); wake_up(&stats->wait_link_id_empty); cw1200_queue_post_gc(stats, &gc_list); return 0; } void cw1200_queue_stats_deinit(struct cw1200_queue_stats *stats) { kfree(stats->link_map_cache); stats->link_map_cache = NULL; } void cw1200_queue_deinit(struct cw1200_queue *queue) { cw1200_queue_clear(queue); del_timer_sync(&queue->gc); INIT_LIST_HEAD(&queue->free_pool); kfree(queue->pool); kfree(queue->link_map_cache); queue->pool = NULL; queue->link_map_cache = NULL; queue->capacity = 0; } size_t cw1200_queue_get_num_queued(struct cw1200_queue *queue, u32 link_id_map) { size_t ret; int i, bit; size_t map_capacity = queue->stats->map_capacity; if (!link_id_map) return 0; spin_lock_bh(&queue->lock); if (link_id_map == (u32)-1) { ret = queue->num_queued - queue->num_pending; } else { ret = 0; for (i = 0, bit = 1; i < map_capacity; ++i, bit <<= 1) { if (link_id_map & bit) ret += queue->link_map_cache[i]; } } spin_unlock_bh(&queue->lock); return ret; } int cw1200_queue_put(struct cw1200_queue *queue, struct sk_buff *skb, struct cw1200_txpriv *txpriv) { int ret = 0; LIST_HEAD(gc_list); struct cw1200_queue_stats *stats = queue->stats; if (txpriv->link_id >= queue->stats->map_capacity) return -EINVAL; spin_lock_bh(&queue->lock); if (!WARN_ON(list_empty(&queue->free_pool))) { struct cw1200_queue_item *item = list_first_entry( &queue->free_pool, struct cw1200_queue_item, head); BUG_ON(item->skb); list_move_tail(&item->head, &queue->queue); item->skb = skb; item->txpriv = *txpriv; item->generation = 0; item->packet_id = cw1200_queue_mk_packet_id(queue->generation, queue->queue_id, item->generation, item - queue->pool); item->queue_timestamp = jiffies; ++queue->num_queued; ++queue->link_map_cache[txpriv->link_id]; spin_lock_bh(&stats->lock); ++stats->num_queued; ++stats->link_map_cache[txpriv->link_id]; spin_unlock_bh(&stats->lock); /* TX may happen in parallel sometimes. * Leave extra queue slots so we don't overflow. */ if (queue->overfull == false && queue->num_queued >= (queue->capacity - (num_present_cpus() - 1))) { queue->overfull = true; __cw1200_queue_lock(queue); mod_timer(&queue->gc, jiffies); } } else { ret = -ENOENT; } spin_unlock_bh(&queue->lock); return ret; } int cw1200_queue_get(struct cw1200_queue *queue, u32 link_id_map, struct wsm_tx **tx, struct ieee80211_tx_info **tx_info, const struct cw1200_txpriv **txpriv) { int ret = -ENOENT; struct cw1200_queue_item *item; struct cw1200_queue_stats *stats = queue->stats; bool wakeup_stats = false; spin_lock_bh(&queue->lock); list_for_each_entry(item, &queue->queue, head) { if (link_id_map & BIT(item->txpriv.link_id)) { ret = 0; break; } } if (!WARN_ON(ret)) { *tx = (struct wsm_tx *)item->skb->data; *tx_info = IEEE80211_SKB_CB(item->skb); *txpriv = &item->txpriv; (*tx)->packet_id = item->packet_id; list_move_tail(&item->head, &queue->pending); ++queue->num_pending; --queue->link_map_cache[item->txpriv.link_id]; item->xmit_timestamp = jiffies; spin_lock_bh(&stats->lock); --stats->num_queued; if (!--stats->link_map_cache[item->txpriv.link_id]) wakeup_stats = true; spin_unlock_bh(&stats->lock); } spin_unlock_bh(&queue->lock); if (wakeup_stats) wake_up(&stats->wait_link_id_empty); return ret; } int cw1200_queue_requeue(struct cw1200_queue *queue, u32 packet_id) { int ret = 0; u8 queue_generation, queue_id, item_generation, item_id; struct cw1200_queue_item *item; struct cw1200_queue_stats *stats = queue->stats; cw1200_queue_parse_id(packet_id, &queue_generation, &queue_id, &item_generation, &item_id); item = &queue->pool[item_id]; spin_lock_bh(&queue->lock); BUG_ON(queue_id != queue->queue_id); if (queue_generation != queue->generation) { ret = -ENOENT; } else if (item_id >= (unsigned) queue->capacity) { WARN_ON(1); ret = -EINVAL; } else if (item->generation != item_generation) { WARN_ON(1); ret = -ENOENT; } else { --queue->num_pending; ++queue->link_map_cache[item->txpriv.link_id]; spin_lock_bh(&stats->lock); ++stats->num_queued; ++stats->link_map_cache[item->txpriv.link_id]; spin_unlock_bh(&stats->lock); item->generation = ++item_generation; item->packet_id = cw1200_queue_mk_packet_id(queue_generation, queue_id, item_generation, item_id); list_move(&item->head, &queue->queue); } spin_unlock_bh(&queue->lock); return ret; } int cw1200_queue_requeue_all(struct cw1200_queue *queue) { struct cw1200_queue_item *item, *tmp; struct cw1200_queue_stats *stats = queue->stats; spin_lock_bh(&queue->lock); list_for_each_entry_safe_reverse(item, tmp, &queue->pending, head) { --queue->num_pending; ++queue->link_map_cache[item->txpriv.link_id]; spin_lock_bh(&stats->lock); ++stats->num_queued; ++stats->link_map_cache[item->txpriv.link_id]; spin_unlock_bh(&stats->lock); ++item->generation; item->packet_id = cw1200_queue_mk_packet_id(queue->generation, queue->queue_id, item->generation, item - queue->pool); list_move(&item->head, &queue->queue); } spin_unlock_bh(&queue->lock); return 0; } int cw1200_queue_remove(struct cw1200_queue *queue, u32 packet_id) { int ret = 0; u8 queue_generation, queue_id, item_generation, item_id; struct cw1200_queue_item *item; struct cw1200_queue_stats *stats = queue->stats; struct sk_buff *gc_skb = NULL; struct cw1200_txpriv gc_txpriv; cw1200_queue_parse_id(packet_id, &queue_generation, &queue_id, &item_generation, &item_id); item = &queue->pool[item_id]; spin_lock_bh(&queue->lock); BUG_ON(queue_id != queue->queue_id); if (queue_generation != queue->generation) { ret = -ENOENT; } else if (item_id >= (unsigned) queue->capacity) { WARN_ON(1); ret = -EINVAL; } else if (item->generation != item_generation) { WARN_ON(1); ret = -ENOENT; } else { gc_txpriv = item->txpriv; gc_skb = item->skb; item->skb = NULL; --queue->num_pending; --queue->num_queued; ++queue->num_sent; ++item->generation; /* Do not use list_move_tail here, but list_move: * try to utilize cache row. */ list_move(&item->head, &queue->free_pool); if (queue->overfull && (queue->num_queued <= (queue->capacity >> 1))) { queue->overfull = false; __cw1200_queue_unlock(queue); } } spin_unlock_bh(&queue->lock); if (gc_skb) stats->skb_dtor(stats->priv, gc_skb, &gc_txpriv); return ret; } int cw1200_queue_get_skb(struct cw1200_queue *queue, u32 packet_id, struct sk_buff **skb, const struct cw1200_txpriv **txpriv) { int ret = 0; u8 queue_generation, queue_id, item_generation, item_id; struct cw1200_queue_item *item; cw1200_queue_parse_id(packet_id, &queue_generation, &queue_id, &item_generation, &item_id); item = &queue->pool[item_id]; spin_lock_bh(&queue->lock); BUG_ON(queue_id != queue->queue_id); if (queue_generation != queue->generation) { ret = -ENOENT; } else if (item_id >= (unsigned) queue->capacity) { WARN_ON(1); ret = -EINVAL; } else if (item->generation != item_generation) { WARN_ON(1); ret = -ENOENT; } else { *skb = item->skb; *txpriv = &item->txpriv; } spin_unlock_bh(&queue->lock); return ret; } void cw1200_queue_lock(struct cw1200_queue *queue) { spin_lock_bh(&queue->lock); __cw1200_queue_lock(queue); spin_unlock_bh(&queue->lock); } void cw1200_queue_unlock(struct cw1200_queue *queue) { spin_lock_bh(&queue->lock); __cw1200_queue_unlock(queue); spin_unlock_bh(&queue->lock); } bool cw1200_queue_get_xmit_timestamp(struct cw1200_queue *queue, unsigned long *timestamp, u32 pending_frame_id) { struct cw1200_queue_item *item; bool ret; spin_lock_bh(&queue->lock); ret = !list_empty(&queue->pending); if (ret) { list_for_each_entry(item, &queue->pending, head) { if (item->packet_id != pending_frame_id) if (time_before(item->xmit_timestamp, *timestamp)) *timestamp = item->xmit_timestamp; } } spin_unlock_bh(&queue->lock); return ret; } bool cw1200_queue_stats_is_empty(struct cw1200_queue_stats *stats, u32 link_id_map) { bool empty = true; spin_lock_bh(&stats->lock); if (link_id_map == (u32)-1) { empty = stats->num_queued == 0; } else { int i; for (i = 0; i < stats->map_capacity; ++i) { if (link_id_map & BIT(i)) { if (stats->link_map_cache[i]) { empty = false; break; } } } } spin_unlock_bh(&stats->lock); return empty; }
/* * wm8988.c -- WM8988 ALSA SoC audio driver * * Copyright 2009 Wolfson Microelectronics plc * Copyright 2005 Openedhand Ltd. * * Author: Mark Brown <broonie@opensource.wolfsonmicro.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/pm.h> #include <linux/i2c.h> #include <linux/spi/spi.h> #include <linux/slab.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/tlv.h> #include <sound/soc.h> #include <sound/initval.h> #include "wm8988.h" /* * wm8988 register cache * We can't read the WM8988 register space when we * are using 2 wire for device control, so we cache them instead. */ static const struct reg_default wm8988_reg_defaults[] = { { 0, 0x0097 }, { 1, 0x0097 }, { 2, 0x0079 }, { 3, 0x0079 }, { 5, 0x0008 }, { 7, 0x000a }, { 8, 0x0000 }, { 10, 0x00ff }, { 11, 0x00ff }, { 12, 0x000f }, { 13, 0x000f }, { 16, 0x0000 }, { 17, 0x007b }, { 18, 0x0000 }, { 19, 0x0032 }, { 20, 0x0000 }, { 21, 0x00c3 }, { 22, 0x00c3 }, { 23, 0x00c0 }, { 24, 0x0000 }, { 25, 0x0000 }, { 26, 0x0000 }, { 27, 0x0000 }, { 31, 0x0000 }, { 32, 0x0000 }, { 33, 0x0000 }, { 34, 0x0050 }, { 35, 0x0050 }, { 36, 0x0050 }, { 37, 0x0050 }, { 40, 0x0079 }, { 41, 0x0079 }, { 42, 0x0079 }, }; static bool wm8988_writeable(struct device *dev, unsigned int reg) { switch (reg) { case WM8988_LINVOL: case WM8988_RINVOL: case WM8988_LOUT1V: case WM8988_ROUT1V: case WM8988_ADCDAC: case WM8988_IFACE: case WM8988_SRATE: case WM8988_LDAC: case WM8988_RDAC: case WM8988_BASS: case WM8988_TREBLE: case WM8988_RESET: case WM8988_3D: case WM8988_ALC1: case WM8988_ALC2: case WM8988_ALC3: case WM8988_NGATE: case WM8988_LADC: case WM8988_RADC: case WM8988_ADCTL1: case WM8988_ADCTL2: case WM8988_PWR1: case WM8988_PWR2: case WM8988_ADCTL3: case WM8988_ADCIN: case WM8988_LADCIN: case WM8988_RADCIN: case WM8988_LOUTM1: case WM8988_LOUTM2: case WM8988_ROUTM1: case WM8988_ROUTM2: case WM8988_LOUT2V: case WM8988_ROUT2V: case WM8988_LPPB: return true; default: return false; } } /* codec private data */ struct wm8988_priv { struct regmap *regmap; unsigned int sysclk; const struct snd_pcm_hw_constraint_list *sysclk_constraints; }; #define wm8988_reset(c) snd_soc_write(c, WM8988_RESET, 0) /* * WM8988 Controls */ static const char *bass_boost_txt[] = {"Linear Control", "Adaptive Boost"}; static SOC_ENUM_SINGLE_DECL(bass_boost, WM8988_BASS, 7, bass_boost_txt); static const char *bass_filter_txt[] = { "130Hz @ 48kHz", "200Hz @ 48kHz" }; static SOC_ENUM_SINGLE_DECL(bass_filter, WM8988_BASS, 6, bass_filter_txt); static const char *treble_txt[] = {"8kHz", "4kHz"}; static SOC_ENUM_SINGLE_DECL(treble, WM8988_TREBLE, 6, treble_txt); static const char *stereo_3d_lc_txt[] = {"200Hz", "500Hz"}; static SOC_ENUM_SINGLE_DECL(stereo_3d_lc, WM8988_3D, 5, stereo_3d_lc_txt); static const char *stereo_3d_uc_txt[] = {"2.2kHz", "1.5kHz"}; static SOC_ENUM_SINGLE_DECL(stereo_3d_uc, WM8988_3D, 6, stereo_3d_uc_txt); static const char *stereo_3d_func_txt[] = {"Capture", "Playback"}; static SOC_ENUM_SINGLE_DECL(stereo_3d_func, WM8988_3D, 7, stereo_3d_func_txt); static const char *alc_func_txt[] = {"Off", "Right", "Left", "Stereo"}; static SOC_ENUM_SINGLE_DECL(alc_func, WM8988_ALC1, 7, alc_func_txt); static const char *ng_type_txt[] = {"Constant PGA Gain", "Mute ADC Output"}; static SOC_ENUM_SINGLE_DECL(ng_type, WM8988_NGATE, 1, ng_type_txt); static const char *deemph_txt[] = {"None", "32Khz", "44.1Khz", "48Khz"}; static SOC_ENUM_SINGLE_DECL(deemph, WM8988_ADCDAC, 1, deemph_txt); static const char *adcpol_txt[] = {"Normal", "L Invert", "R Invert", "L + R Invert"}; static SOC_ENUM_SINGLE_DECL(adcpol, WM8988_ADCDAC, 5, adcpol_txt); static const DECLARE_TLV_DB_SCALE(pga_tlv, -1725, 75, 0); static const DECLARE_TLV_DB_SCALE(adc_tlv, -9750, 50, 1); static const DECLARE_TLV_DB_SCALE(dac_tlv, -12750, 50, 1); static const DECLARE_TLV_DB_SCALE(out_tlv, -12100, 100, 1); static const DECLARE_TLV_DB_SCALE(bypass_tlv, -1500, 300, 0); static const struct snd_kcontrol_new wm8988_snd_controls[] = { SOC_ENUM("Bass Boost", bass_boost), SOC_ENUM("Bass Filter", bass_filter), SOC_SINGLE("Bass Volume", WM8988_BASS, 0, 15, 1), SOC_SINGLE("Treble Volume", WM8988_TREBLE, 0, 15, 0), SOC_ENUM("Treble Cut-off", treble), SOC_SINGLE("3D Switch", WM8988_3D, 0, 1, 0), SOC_SINGLE("3D Volume", WM8988_3D, 1, 15, 0), SOC_ENUM("3D Lower Cut-off", stereo_3d_lc), SOC_ENUM("3D Upper Cut-off", stereo_3d_uc), SOC_ENUM("3D Mode", stereo_3d_func), SOC_SINGLE("ALC Capture Target Volume", WM8988_ALC1, 0, 7, 0), SOC_SINGLE("ALC Capture Max Volume", WM8988_ALC1, 4, 7, 0), SOC_ENUM("ALC Capture Function", alc_func), SOC_SINGLE("ALC Capture ZC Switch", WM8988_ALC2, 7, 1, 0), SOC_SINGLE("ALC Capture Hold Time", WM8988_ALC2, 0, 15, 0), SOC_SINGLE("ALC Capture Decay Time", WM8988_ALC3, 4, 15, 0), SOC_SINGLE("ALC Capture Attack Time", WM8988_ALC3, 0, 15, 0), SOC_SINGLE("ALC Capture NG Threshold", WM8988_NGATE, 3, 31, 0), SOC_ENUM("ALC Capture NG Type", ng_type), SOC_SINGLE("ALC Capture NG Switch", WM8988_NGATE, 0, 1, 0), SOC_SINGLE("ZC Timeout Switch", WM8988_ADCTL1, 0, 1, 0), SOC_DOUBLE_R_TLV("Capture Digital Volume", WM8988_LADC, WM8988_RADC, 0, 255, 0, adc_tlv), SOC_DOUBLE_R_TLV("Capture Volume", WM8988_LINVOL, WM8988_RINVOL, 0, 63, 0, pga_tlv), SOC_DOUBLE_R("Capture ZC Switch", WM8988_LINVOL, WM8988_RINVOL, 6, 1, 0), SOC_DOUBLE_R("Capture Switch", WM8988_LINVOL, WM8988_RINVOL, 7, 1, 1), SOC_ENUM("Playback De-emphasis", deemph), SOC_ENUM("Capture Polarity", adcpol), SOC_SINGLE("Playback 6dB Attenuate", WM8988_ADCDAC, 7, 1, 0), SOC_SINGLE("Capture 6dB Attenuate", WM8988_ADCDAC, 8, 1, 0), SOC_DOUBLE_R_TLV("PCM Volume", WM8988_LDAC, WM8988_RDAC, 0, 255, 0, dac_tlv), SOC_SINGLE_TLV("Left Mixer Left Bypass Volume", WM8988_LOUTM1, 4, 7, 1, bypass_tlv), SOC_SINGLE_TLV("Left Mixer Right Bypass Volume", WM8988_LOUTM2, 4, 7, 1, bypass_tlv), SOC_SINGLE_TLV("Right Mixer Left Bypass Volume", WM8988_ROUTM1, 4, 7, 1, bypass_tlv), SOC_SINGLE_TLV("Right Mixer Right Bypass Volume", WM8988_ROUTM2, 4, 7, 1, bypass_tlv), SOC_DOUBLE_R("Output 1 Playback ZC Switch", WM8988_LOUT1V, WM8988_ROUT1V, 7, 1, 0), SOC_DOUBLE_R_TLV("Output 1 Playback Volume", WM8988_LOUT1V, WM8988_ROUT1V, 0, 127, 0, out_tlv), SOC_DOUBLE_R("Output 2 Playback ZC Switch", WM8988_LOUT2V, WM8988_ROUT2V, 7, 1, 0), SOC_DOUBLE_R_TLV("Output 2 Playback Volume", WM8988_LOUT2V, WM8988_ROUT2V, 0, 127, 0, out_tlv), }; /* * DAPM Controls */ static int wm8988_lrc_control(struct snd_soc_dapm_widget *w, struct snd_kcontrol *kcontrol, int event) { struct snd_soc_codec *codec = w->codec; u16 adctl2 = snd_soc_read(codec, WM8988_ADCTL2); /* Use the DAC to gate LRC if active, otherwise use ADC */ if (snd_soc_read(codec, WM8988_PWR2) & 0x180) adctl2 &= ~0x4; else adctl2 |= 0x4; return snd_soc_write(codec, WM8988_ADCTL2, adctl2); } static const char *wm8988_line_texts[] = { "Line 1", "Line 2", "PGA", "Differential"}; static const unsigned int wm8988_line_values[] = { 0, 1, 3, 4}; static const struct soc_enum wm8988_lline_enum = SOC_VALUE_ENUM_SINGLE(WM8988_LOUTM1, 0, 7, ARRAY_SIZE(wm8988_line_texts), wm8988_line_texts, wm8988_line_values); static const struct snd_kcontrol_new wm8988_left_line_controls = SOC_DAPM_ENUM("Route", wm8988_lline_enum); static const struct soc_enum wm8988_rline_enum = SOC_VALUE_ENUM_SINGLE(WM8988_ROUTM1, 0, 7, ARRAY_SIZE(wm8988_line_texts), wm8988_line_texts, wm8988_line_values); static const struct snd_kcontrol_new wm8988_right_line_controls = SOC_DAPM_ENUM("Route", wm8988_lline_enum); /* Left Mixer */ static const struct snd_kcontrol_new wm8988_left_mixer_controls[] = { SOC_DAPM_SINGLE("Playback Switch", WM8988_LOUTM1, 8, 1, 0), SOC_DAPM_SINGLE("Left Bypass Switch", WM8988_LOUTM1, 7, 1, 0), SOC_DAPM_SINGLE("Right Playback Switch", WM8988_LOUTM2, 8, 1, 0), SOC_DAPM_SINGLE("Right Bypass Switch", WM8988_LOUTM2, 7, 1, 0), }; /* Right Mixer */ static const struct snd_kcontrol_new wm8988_right_mixer_controls[] = { SOC_DAPM_SINGLE("Left Playback Switch", WM8988_ROUTM1, 8, 1, 0), SOC_DAPM_SINGLE("Left Bypass Switch", WM8988_ROUTM1, 7, 1, 0), SOC_DAPM_SINGLE("Playback Switch", WM8988_ROUTM2, 8, 1, 0), SOC_DAPM_SINGLE("Right Bypass Switch", WM8988_ROUTM2, 7, 1, 0), }; static const char *wm8988_pga_sel[] = {"Line 1", "Line 2", "Differential"}; static const unsigned int wm8988_pga_val[] = { 0, 1, 3 }; /* Left PGA Mux */ static const struct soc_enum wm8988_lpga_enum = SOC_VALUE_ENUM_SINGLE(WM8988_LADCIN, 6, 3, ARRAY_SIZE(wm8988_pga_sel), wm8988_pga_sel, wm8988_pga_val); static const struct snd_kcontrol_new wm8988_left_pga_controls = SOC_DAPM_ENUM("Route", wm8988_lpga_enum); /* Right PGA Mux */ static const struct soc_enum wm8988_rpga_enum = SOC_VALUE_ENUM_SINGLE(WM8988_RADCIN, 6, 3, ARRAY_SIZE(wm8988_pga_sel), wm8988_pga_sel, wm8988_pga_val); static const struct snd_kcontrol_new wm8988_right_pga_controls = SOC_DAPM_ENUM("Route", wm8988_rpga_enum); /* Differential Mux */ static const char *wm8988_diff_sel[] = {"Line 1", "Line 2"}; static SOC_ENUM_SINGLE_DECL(diffmux, WM8988_ADCIN, 8, wm8988_diff_sel); static const struct snd_kcontrol_new wm8988_diffmux_controls = SOC_DAPM_ENUM("Route", diffmux); /* Mono ADC Mux */ static const char *wm8988_mono_mux[] = {"Stereo", "Mono (Left)", "Mono (Right)", "Digital Mono"}; static SOC_ENUM_SINGLE_DECL(monomux, WM8988_ADCIN, 6, wm8988_mono_mux); static const struct snd_kcontrol_new wm8988_monomux_controls = SOC_DAPM_ENUM("Route", monomux); static const struct snd_soc_dapm_widget wm8988_dapm_widgets[] = { SND_SOC_DAPM_SUPPLY("Mic Bias", WM8988_PWR1, 1, 0, NULL, 0), SND_SOC_DAPM_MUX("Differential Mux", SND_SOC_NOPM, 0, 0, &wm8988_diffmux_controls), SND_SOC_DAPM_MUX("Left ADC Mux", SND_SOC_NOPM, 0, 0, &wm8988_monomux_controls), SND_SOC_DAPM_MUX("Right ADC Mux", SND_SOC_NOPM, 0, 0, &wm8988_monomux_controls), SND_SOC_DAPM_MUX("Left PGA Mux", WM8988_PWR1, 5, 0, &wm8988_left_pga_controls), SND_SOC_DAPM_MUX("Right PGA Mux", WM8988_PWR1, 4, 0, &wm8988_right_pga_controls), SND_SOC_DAPM_MUX("Left Line Mux", SND_SOC_NOPM, 0, 0, &wm8988_left_line_controls), SND_SOC_DAPM_MUX("Right Line Mux", SND_SOC_NOPM, 0, 0, &wm8988_right_line_controls), SND_SOC_DAPM_ADC("Right ADC", "Right Capture", WM8988_PWR1, 2, 0), SND_SOC_DAPM_ADC("Left ADC", "Left Capture", WM8988_PWR1, 3, 0), SND_SOC_DAPM_DAC("Right DAC", "Right Playback", WM8988_PWR2, 7, 0), SND_SOC_DAPM_DAC("Left DAC", "Left Playback", WM8988_PWR2, 8, 0), SND_SOC_DAPM_MIXER("Left Mixer", SND_SOC_NOPM, 0, 0, &wm8988_left_mixer_controls[0], ARRAY_SIZE(wm8988_left_mixer_controls)), SND_SOC_DAPM_MIXER("Right Mixer", SND_SOC_NOPM, 0, 0, &wm8988_right_mixer_controls[0], ARRAY_SIZE(wm8988_right_mixer_controls)), SND_SOC_DAPM_PGA("Right Out 2", WM8988_PWR2, 3, 0, NULL, 0), SND_SOC_DAPM_PGA("Left Out 2", WM8988_PWR2, 4, 0, NULL, 0), SND_SOC_DAPM_PGA("Right Out 1", WM8988_PWR2, 5, 0, NULL, 0), SND_SOC_DAPM_PGA("Left Out 1", WM8988_PWR2, 6, 0, NULL, 0), SND_SOC_DAPM_POST("LRC control", wm8988_lrc_control), SND_SOC_DAPM_OUTPUT("LOUT1"), SND_SOC_DAPM_OUTPUT("ROUT1"), SND_SOC_DAPM_OUTPUT("LOUT2"), SND_SOC_DAPM_OUTPUT("ROUT2"), SND_SOC_DAPM_OUTPUT("VREF"), SND_SOC_DAPM_INPUT("LINPUT1"), SND_SOC_DAPM_INPUT("LINPUT2"), SND_SOC_DAPM_INPUT("RINPUT1"), SND_SOC_DAPM_INPUT("RINPUT2"), }; static const struct snd_soc_dapm_route wm8988_dapm_routes[] = { { "Left Line Mux", "Line 1", "LINPUT1" }, { "Left Line Mux", "Line 2", "LINPUT2" }, { "Left Line Mux", "PGA", "Left PGA Mux" }, { "Left Line Mux", "Differential", "Differential Mux" }, { "Right Line Mux", "Line 1", "RINPUT1" }, { "Right Line Mux", "Line 2", "RINPUT2" }, { "Right Line Mux", "PGA", "Right PGA Mux" }, { "Right Line Mux", "Differential", "Differential Mux" }, { "Left PGA Mux", "Line 1", "LINPUT1" }, { "Left PGA Mux", "Line 2", "LINPUT2" }, { "Left PGA Mux", "Differential", "Differential Mux" }, { "Right PGA Mux", "Line 1", "RINPUT1" }, { "Right PGA Mux", "Line 2", "RINPUT2" }, { "Right PGA Mux", "Differential", "Differential Mux" }, { "Differential Mux", "Line 1", "LINPUT1" }, { "Differential Mux", "Line 1", "RINPUT1" }, { "Differential Mux", "Line 2", "LINPUT2" }, { "Differential Mux", "Line 2", "RINPUT2" }, { "Left ADC Mux", "Stereo", "Left PGA Mux" }, { "Left ADC Mux", "Mono (Left)", "Left PGA Mux" }, { "Left ADC Mux", "Digital Mono", "Left PGA Mux" }, { "Right ADC Mux", "Stereo", "Right PGA Mux" }, { "Right ADC Mux", "Mono (Right)", "Right PGA Mux" }, { "Right ADC Mux", "Digital Mono", "Right PGA Mux" }, { "Left ADC", NULL, "Left ADC Mux" }, { "Right ADC", NULL, "Right ADC Mux" }, { "Left Line Mux", "Line 1", "LINPUT1" }, { "Left Line Mux", "Line 2", "LINPUT2" }, { "Left Line Mux", "PGA", "Left PGA Mux" }, { "Left Line Mux", "Differential", "Differential Mux" }, { "Right Line Mux", "Line 1", "RINPUT1" }, { "Right Line Mux", "Line 2", "RINPUT2" }, { "Right Line Mux", "PGA", "Right PGA Mux" }, { "Right Line Mux", "Differential", "Differential Mux" }, { "Left Mixer", "Playback Switch", "Left DAC" }, { "Left Mixer", "Left Bypass Switch", "Left Line Mux" }, { "Left Mixer", "Right Playback Switch", "Right DAC" }, { "Left Mixer", "Right Bypass Switch", "Right Line Mux" }, { "Right Mixer", "Left Playback Switch", "Left DAC" }, { "Right Mixer", "Left Bypass Switch", "Left Line Mux" }, { "Right Mixer", "Playback Switch", "Right DAC" }, { "Right Mixer", "Right Bypass Switch", "Right Line Mux" }, { "Left Out 1", NULL, "Left Mixer" }, { "LOUT1", NULL, "Left Out 1" }, { "Right Out 1", NULL, "Right Mixer" }, { "ROUT1", NULL, "Right Out 1" }, { "Left Out 2", NULL, "Left Mixer" }, { "LOUT2", NULL, "Left Out 2" }, { "Right Out 2", NULL, "Right Mixer" }, { "ROUT2", NULL, "Right Out 2" }, }; struct _coeff_div { u32 mclk; u32 rate; u16 fs; u8 sr:5; u8 usb:1; }; /* codec hifi mclk clock divider coefficients */ static const struct _coeff_div coeff_div[] = { /* 8k */ {12288000, 8000, 1536, 0x6, 0x0}, {11289600, 8000, 1408, 0x16, 0x0}, {18432000, 8000, 2304, 0x7, 0x0}, {16934400, 8000, 2112, 0x17, 0x0}, {12000000, 8000, 1500, 0x6, 0x1}, /* 11.025k */ {11289600, 11025, 1024, 0x18, 0x0}, {16934400, 11025, 1536, 0x19, 0x0}, {12000000, 11025, 1088, 0x19, 0x1}, /* 16k */ {12288000, 16000, 768, 0xa, 0x0}, {18432000, 16000, 1152, 0xb, 0x0}, {12000000, 16000, 750, 0xa, 0x1}, /* 22.05k */ {11289600, 22050, 512, 0x1a, 0x0}, {16934400, 22050, 768, 0x1b, 0x0}, {12000000, 22050, 544, 0x1b, 0x1}, /* 32k */ {12288000, 32000, 384, 0xc, 0x0}, {18432000, 32000, 576, 0xd, 0x0}, {12000000, 32000, 375, 0xa, 0x1}, /* 44.1k */ {11289600, 44100, 256, 0x10, 0x0}, {16934400, 44100, 384, 0x11, 0x0}, {12000000, 44100, 272, 0x11, 0x1}, /* 48k */ {12288000, 48000, 256, 0x0, 0x0}, {18432000, 48000, 384, 0x1, 0x0}, {12000000, 48000, 250, 0x0, 0x1}, /* 88.2k */ {11289600, 88200, 128, 0x1e, 0x0}, {16934400, 88200, 192, 0x1f, 0x0}, {12000000, 88200, 136, 0x1f, 0x1}, /* 96k */ {12288000, 96000, 128, 0xe, 0x0}, {18432000, 96000, 192, 0xf, 0x0}, {12000000, 96000, 125, 0xe, 0x1}, }; static inline int get_coeff(int mclk, int rate) { int i; for (i = 0; i < ARRAY_SIZE(coeff_div); i++) { if (coeff_div[i].rate == rate && coeff_div[i].mclk == mclk) return i; } return -EINVAL; } /* The set of rates we can generate from the above for each SYSCLK */ static const unsigned int rates_12288[] = { 8000, 12000, 16000, 24000, 24000, 32000, 48000, 96000, }; static const struct snd_pcm_hw_constraint_list constraints_12288 = { .count = ARRAY_SIZE(rates_12288), .list = rates_12288, }; static const unsigned int rates_112896[] = { 8000, 11025, 22050, 44100, }; static const struct snd_pcm_hw_constraint_list constraints_112896 = { .count = ARRAY_SIZE(rates_112896), .list = rates_112896, }; static const unsigned int rates_12[] = { 8000, 11025, 12000, 16000, 22050, 2400, 32000, 41100, 48000, 48000, 88235, 96000, }; static const struct snd_pcm_hw_constraint_list constraints_12 = { .count = ARRAY_SIZE(rates_12), .list = rates_12, }; /* * Note that this should be called from init rather than from hw_params. */ static int wm8988_set_dai_sysclk(struct snd_soc_dai *codec_dai, int clk_id, unsigned int freq, int dir) { struct snd_soc_codec *codec = codec_dai->codec; struct wm8988_priv *wm8988 = snd_soc_codec_get_drvdata(codec); switch (freq) { case 11289600: case 18432000: case 22579200: case 36864000: wm8988->sysclk_constraints = &constraints_112896; wm8988->sysclk = freq; return 0; case 12288000: case 16934400: case 24576000: case 33868800: wm8988->sysclk_constraints = &constraints_12288; wm8988->sysclk = freq; return 0; case 12000000: case 24000000: wm8988->sysclk_constraints = &constraints_12; wm8988->sysclk = freq; return 0; } return -EINVAL; } static int wm8988_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt) { struct snd_soc_codec *codec = codec_dai->codec; u16 iface = 0; /* set master/slave audio interface */ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { case SND_SOC_DAIFMT_CBM_CFM: iface = 0x0040; break; case SND_SOC_DAIFMT_CBS_CFS: break; default: return -EINVAL; } /* interface format */ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { case SND_SOC_DAIFMT_I2S: iface |= 0x0002; break; case SND_SOC_DAIFMT_RIGHT_J: break; case SND_SOC_DAIFMT_LEFT_J: iface |= 0x0001; break; case SND_SOC_DAIFMT_DSP_A: iface |= 0x0003; break; case SND_SOC_DAIFMT_DSP_B: iface |= 0x0013; break; default: return -EINVAL; } /* clock inversion */ switch (fmt & SND_SOC_DAIFMT_INV_MASK) { case SND_SOC_DAIFMT_NB_NF: break; case SND_SOC_DAIFMT_IB_IF: iface |= 0x0090; break; case SND_SOC_DAIFMT_IB_NF: iface |= 0x0080; break; case SND_SOC_DAIFMT_NB_IF: iface |= 0x0010; break; default: return -EINVAL; } snd_soc_write(codec, WM8988_IFACE, iface); return 0; } static int wm8988_pcm_startup(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { struct snd_soc_codec *codec = dai->codec; struct wm8988_priv *wm8988 = snd_soc_codec_get_drvdata(codec); /* The set of sample rates that can be supported depends on the * MCLK supplied to the CODEC - enforce this. */ if (!wm8988->sysclk) { dev_err(codec->dev, "No MCLK configured, call set_sysclk() on init\n"); return -EINVAL; } snd_pcm_hw_constraint_list(substream->runtime, 0, SNDRV_PCM_HW_PARAM_RATE, wm8988->sysclk_constraints); return 0; } static int wm8988_pcm_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, struct snd_soc_dai *dai) { struct snd_soc_codec *codec = dai->codec; struct wm8988_priv *wm8988 = snd_soc_codec_get_drvdata(codec); u16 iface = snd_soc_read(codec, WM8988_IFACE) & 0x1f3; u16 srate = snd_soc_read(codec, WM8988_SRATE) & 0x180; int coeff; coeff = get_coeff(wm8988->sysclk, params_rate(params)); if (coeff < 0) { coeff = get_coeff(wm8988->sysclk / 2, params_rate(params)); srate |= 0x40; } if (coeff < 0) { dev_err(codec->dev, "Unable to configure sample rate %dHz with %dHz MCLK\n", params_rate(params), wm8988->sysclk); return coeff; } /* bit size */ switch (params_format(params)) { case SNDRV_PCM_FORMAT_S16_LE: break; case SNDRV_PCM_FORMAT_S20_3LE: iface |= 0x0004; break; case SNDRV_PCM_FORMAT_S24_LE: iface |= 0x0008; break; case SNDRV_PCM_FORMAT_S32_LE: iface |= 0x000c; break; } /* set iface & srate */ snd_soc_write(codec, WM8988_IFACE, iface); if (coeff >= 0) snd_soc_write(codec, WM8988_SRATE, srate | (coeff_div[coeff].sr << 1) | coeff_div[coeff].usb); return 0; } static int wm8988_mute(struct snd_soc_dai *dai, int mute) { struct snd_soc_codec *codec = dai->codec; u16 mute_reg = snd_soc_read(codec, WM8988_ADCDAC) & 0xfff7; if (mute) snd_soc_write(codec, WM8988_ADCDAC, mute_reg | 0x8); else snd_soc_write(codec, WM8988_ADCDAC, mute_reg); return 0; } static int wm8988_set_bias_level(struct snd_soc_codec *codec, enum snd_soc_bias_level level) { struct wm8988_priv *wm8988 = snd_soc_codec_get_drvdata(codec); u16 pwr_reg = snd_soc_read(codec, WM8988_PWR1) & ~0x1c1; switch (level) { case SND_SOC_BIAS_ON: break; case SND_SOC_BIAS_PREPARE: /* VREF, VMID=2x50k, digital enabled */ snd_soc_write(codec, WM8988_PWR1, pwr_reg | 0x00c0); break; case SND_SOC_BIAS_STANDBY: if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) { regcache_sync(wm8988->regmap); /* VREF, VMID=2x5k */ snd_soc_write(codec, WM8988_PWR1, pwr_reg | 0x1c1); /* Charge caps */ msleep(100); } /* VREF, VMID=2*500k, digital stopped */ snd_soc_write(codec, WM8988_PWR1, pwr_reg | 0x0141); break; case SND_SOC_BIAS_OFF: snd_soc_write(codec, WM8988_PWR1, 0x0000); break; } codec->dapm.bias_level = level; return 0; } #define WM8988_RATES SNDRV_PCM_RATE_8000_96000 #define WM8988_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE |\ SNDRV_PCM_FMTBIT_S24_LE) static const struct snd_soc_dai_ops wm8988_ops = { .startup = wm8988_pcm_startup, .hw_params = wm8988_pcm_hw_params, .set_fmt = wm8988_set_dai_fmt, .set_sysclk = wm8988_set_dai_sysclk, .digital_mute = wm8988_mute, }; static struct snd_soc_dai_driver wm8988_dai = { .name = "wm8988-hifi", .playback = { .stream_name = "Playback", .channels_min = 1, .channels_max = 2, .rates = WM8988_RATES, .formats = WM8988_FORMATS, }, .capture = { .stream_name = "Capture", .channels_min = 1, .channels_max = 2, .rates = WM8988_RATES, .formats = WM8988_FORMATS, }, .ops = &wm8988_ops, .symmetric_rates = 1, }; static int wm8988_suspend(struct snd_soc_codec *codec) { struct wm8988_priv *wm8988 = snd_soc_codec_get_drvdata(codec); wm8988_set_bias_level(codec, SND_SOC_BIAS_OFF); regcache_mark_dirty(wm8988->regmap); return 0; } static int wm8988_resume(struct snd_soc_codec *codec) { wm8988_set_bias_level(codec, SND_SOC_BIAS_STANDBY); return 0; } static int wm8988_probe(struct snd_soc_codec *codec) { int ret = 0; ret = wm8988_reset(codec); if (ret < 0) { dev_err(codec->dev, "Failed to issue reset\n"); return ret; } /* set the update bits (we always update left then right) */ snd_soc_update_bits(codec, WM8988_RADC, 0x0100, 0x0100); snd_soc_update_bits(codec, WM8988_RDAC, 0x0100, 0x0100); snd_soc_update_bits(codec, WM8988_ROUT1V, 0x0100, 0x0100); snd_soc_update_bits(codec, WM8988_ROUT2V, 0x0100, 0x0100); snd_soc_update_bits(codec, WM8988_RINVOL, 0x0100, 0x0100); wm8988_set_bias_level(codec, SND_SOC_BIAS_STANDBY); return 0; } static int wm8988_remove(struct snd_soc_codec *codec) { wm8988_set_bias_level(codec, SND_SOC_BIAS_OFF); return 0; } static struct snd_soc_codec_driver soc_codec_dev_wm8988 = { .probe = wm8988_probe, .remove = wm8988_remove, .suspend = wm8988_suspend, .resume = wm8988_resume, .set_bias_level = wm8988_set_bias_level, .controls = wm8988_snd_controls, .num_controls = ARRAY_SIZE(wm8988_snd_controls), .dapm_widgets = wm8988_dapm_widgets, .num_dapm_widgets = ARRAY_SIZE(wm8988_dapm_widgets), .dapm_routes = wm8988_dapm_routes, .num_dapm_routes = ARRAY_SIZE(wm8988_dapm_routes), }; static struct regmap_config wm8988_regmap = { .reg_bits = 7, .val_bits = 9, .max_register = WM8988_LPPB, .writeable_reg = wm8988_writeable, .cache_type = REGCACHE_RBTREE, .reg_defaults = wm8988_reg_defaults, .num_reg_defaults = ARRAY_SIZE(wm8988_reg_defaults), }; #if defined(CONFIG_SPI_MASTER) static int wm8988_spi_probe(struct spi_device *spi) { struct wm8988_priv *wm8988; int ret; wm8988 = devm_kzalloc(&spi->dev, sizeof(struct wm8988_priv), GFP_KERNEL); if (wm8988 == NULL) return -ENOMEM; wm8988->regmap = devm_regmap_init_spi(spi, &wm8988_regmap); if (IS_ERR(wm8988->regmap)) { ret = PTR_ERR(wm8988->regmap); dev_err(&spi->dev, "Failed to init regmap: %d\n", ret); return ret; } spi_set_drvdata(spi, wm8988); ret = snd_soc_register_codec(&spi->dev, &soc_codec_dev_wm8988, &wm8988_dai, 1); return ret; } static int wm8988_spi_remove(struct spi_device *spi) { snd_soc_unregister_codec(&spi->dev); return 0; } static struct spi_driver wm8988_spi_driver = { .driver = { .name = "wm8988", .owner = THIS_MODULE, }, .probe = wm8988_spi_probe, .remove = wm8988_spi_remove, }; #endif /* CONFIG_SPI_MASTER */ #if IS_ENABLED(CONFIG_I2C) static int wm8988_i2c_probe(struct i2c_client *i2c, const struct i2c_device_id *id) { struct wm8988_priv *wm8988; int ret; wm8988 = devm_kzalloc(&i2c->dev, sizeof(struct wm8988_priv), GFP_KERNEL); if (wm8988 == NULL) return -ENOMEM; i2c_set_clientdata(i2c, wm8988); wm8988->regmap = devm_regmap_init_i2c(i2c, &wm8988_regmap); if (IS_ERR(wm8988->regmap)) { ret = PTR_ERR(wm8988->regmap); dev_err(&i2c->dev, "Failed to init regmap: %d\n", ret); return ret; } ret = snd_soc_register_codec(&i2c->dev, &soc_codec_dev_wm8988, &wm8988_dai, 1); return ret; } static int wm8988_i2c_remove(struct i2c_client *client) { snd_soc_unregister_codec(&client->dev); return 0; } static const struct i2c_device_id wm8988_i2c_id[] = { { "wm8988", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, wm8988_i2c_id); static struct i2c_driver wm8988_i2c_driver = { .driver = { .name = "wm8988", .owner = THIS_MODULE, }, .probe = wm8988_i2c_probe, .remove = wm8988_i2c_remove, .id_table = wm8988_i2c_id, }; #endif static int __init wm8988_modinit(void) { int ret = 0; #if IS_ENABLED(CONFIG_I2C) ret = i2c_add_driver(&wm8988_i2c_driver); if (ret != 0) { printk(KERN_ERR "Failed to register WM8988 I2C driver: %d\n", ret); } #endif #if defined(CONFIG_SPI_MASTER) ret = spi_register_driver(&wm8988_spi_driver); if (ret != 0) { printk(KERN_ERR "Failed to register WM8988 SPI driver: %d\n", ret); } #endif return ret; } module_init(wm8988_modinit); static void __exit wm8988_exit(void) { #if IS_ENABLED(CONFIG_I2C) i2c_del_driver(&wm8988_i2c_driver); #endif #if defined(CONFIG_SPI_MASTER) spi_unregister_driver(&wm8988_spi_driver); #endif } module_exit(wm8988_exit); MODULE_DESCRIPTION("ASoC WM8988 driver"); MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>"); MODULE_LICENSE("GPL");
/* * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc. * Copyright (c) 2013 Red Hat, Inc. * All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it would be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include "xfs.h" #include "xfs_fs.h" #include "xfs_format.h" #include "xfs_log_format.h" #include "xfs_trans_resv.h" #include "xfs_sb.h" #include "xfs_ag.h" #include "xfs_mount.h" #include "xfs_da_format.h" #include "xfs_da_btree.h" #include "xfs_inode.h" #include "xfs_dir2.h" #include "xfs_dir2_priv.h" #include "xfs_error.h" #include "xfs_trans.h" #include "xfs_buf_item.h" #include "xfs_cksum.h" /* * Check the consistency of the data block. * The input can also be a block-format directory. * Return 0 is the buffer is good, otherwise an error. */ int __xfs_dir3_data_check( struct xfs_inode *dp, /* incore inode pointer */ struct xfs_buf *bp) /* data block's buffer */ { xfs_dir2_dataptr_t addr; /* addr for leaf lookup */ xfs_dir2_data_free_t *bf; /* bestfree table */ xfs_dir2_block_tail_t *btp=NULL; /* block tail */ int count; /* count of entries found */ xfs_dir2_data_hdr_t *hdr; /* data block header */ xfs_dir2_data_entry_t *dep; /* data entry */ xfs_dir2_data_free_t *dfp; /* bestfree entry */ xfs_dir2_data_unused_t *dup; /* unused entry */ char *endp; /* end of useful data */ int freeseen; /* mask of bestfrees seen */ xfs_dahash_t hash; /* hash of current name */ int i; /* leaf index */ int lastfree; /* last entry was unused */ xfs_dir2_leaf_entry_t *lep=NULL; /* block leaf entries */ xfs_mount_t *mp; /* filesystem mount point */ char *p; /* current data position */ int stale; /* count of stale leaves */ struct xfs_name name; const struct xfs_dir_ops *ops; mp = bp->b_target->bt_mount; /* * We can be passed a null dp here from a verifier, so we need to go the * hard way to get them. */ ops = xfs_dir_get_ops(mp, dp); hdr = bp->b_addr; p = (char *)ops->data_entry_p(hdr); switch (hdr->magic) { case cpu_to_be32(XFS_DIR3_BLOCK_MAGIC): case cpu_to_be32(XFS_DIR2_BLOCK_MAGIC): btp = xfs_dir2_block_tail_p(mp, hdr); lep = xfs_dir2_block_leaf_p(btp); endp = (char *)lep; /* * The number of leaf entries is limited by the size of the * block and the amount of space used by the data entries. * We don't know how much space is used by the data entries yet, * so just ensure that the count falls somewhere inside the * block right now. */ XFS_WANT_CORRUPTED_RETURN(be32_to_cpu(btp->count) < ((char *)btp - p) / sizeof(struct xfs_dir2_leaf_entry)); break; case cpu_to_be32(XFS_DIR3_DATA_MAGIC): case cpu_to_be32(XFS_DIR2_DATA_MAGIC): endp = (char *)hdr + mp->m_dirblksize; break; default: XFS_ERROR_REPORT("Bad Magic", XFS_ERRLEVEL_LOW, mp); return EFSCORRUPTED; } /* * Account for zero bestfree entries. */ bf = ops->data_bestfree_p(hdr); count = lastfree = freeseen = 0; if (!bf[0].length) { XFS_WANT_CORRUPTED_RETURN(!bf[0].offset); freeseen |= 1 << 0; } if (!bf[1].length) { XFS_WANT_CORRUPTED_RETURN(!bf[1].offset); freeseen |= 1 << 1; } if (!bf[2].length) { XFS_WANT_CORRUPTED_RETURN(!bf[2].offset); freeseen |= 1 << 2; } XFS_WANT_CORRUPTED_RETURN(be16_to_cpu(bf[0].length) >= be16_to_cpu(bf[1].length)); XFS_WANT_CORRUPTED_RETURN(be16_to_cpu(bf[1].length) >= be16_to_cpu(bf[2].length)); /* * Loop over the data/unused entries. */ while (p < endp) { dup = (xfs_dir2_data_unused_t *)p; /* * If it's unused, look for the space in the bestfree table. * If we find it, account for that, else make sure it * doesn't need to be there. */ if (be16_to_cpu(dup->freetag) == XFS_DIR2_DATA_FREE_TAG) { XFS_WANT_CORRUPTED_RETURN(lastfree == 0); XFS_WANT_CORRUPTED_RETURN( be16_to_cpu(*xfs_dir2_data_unused_tag_p(dup)) == (char *)dup - (char *)hdr); dfp = xfs_dir2_data_freefind(hdr, bf, dup); if (dfp) { i = (int)(dfp - bf); XFS_WANT_CORRUPTED_RETURN( (freeseen & (1 << i)) == 0); freeseen |= 1 << i; } else { XFS_WANT_CORRUPTED_RETURN( be16_to_cpu(dup->length) <= be16_to_cpu(bf[2].length)); } p += be16_to_cpu(dup->length); lastfree = 1; continue; } /* * It's a real entry. Validate the fields. * If this is a block directory then make sure it's * in the leaf section of the block. * The linear search is crude but this is DEBUG code. */ dep = (xfs_dir2_data_entry_t *)p; XFS_WANT_CORRUPTED_RETURN(dep->namelen != 0); XFS_WANT_CORRUPTED_RETURN( !xfs_dir_ino_validate(mp, be64_to_cpu(dep->inumber))); XFS_WANT_CORRUPTED_RETURN( be16_to_cpu(*ops->data_entry_tag_p(dep)) == (char *)dep - (char *)hdr); XFS_WANT_CORRUPTED_RETURN( ops->data_get_ftype(dep) < XFS_DIR3_FT_MAX); count++; lastfree = 0; if (hdr->magic == cpu_to_be32(XFS_DIR2_BLOCK_MAGIC) || hdr->magic == cpu_to_be32(XFS_DIR3_BLOCK_MAGIC)) { addr = xfs_dir2_db_off_to_dataptr(mp, mp->m_dirdatablk, (xfs_dir2_data_aoff_t) ((char *)dep - (char *)hdr)); name.name = dep->name; name.len = dep->namelen; hash = mp->m_dirnameops->hashname(&name); for (i = 0; i < be32_to_cpu(btp->count); i++) { if (be32_to_cpu(lep[i].address) == addr && be32_to_cpu(lep[i].hashval) == hash) break; } XFS_WANT_CORRUPTED_RETURN(i < be32_to_cpu(btp->count)); } p += ops->data_entsize(dep->namelen); } /* * Need to have seen all the entries and all the bestfree slots. */ XFS_WANT_CORRUPTED_RETURN(freeseen == 7); if (hdr->magic == cpu_to_be32(XFS_DIR2_BLOCK_MAGIC) || hdr->magic == cpu_to_be32(XFS_DIR3_BLOCK_MAGIC)) { for (i = stale = 0; i < be32_to_cpu(btp->count); i++) { if (lep[i].address == cpu_to_be32(XFS_DIR2_NULL_DATAPTR)) stale++; if (i > 0) XFS_WANT_CORRUPTED_RETURN( be32_to_cpu(lep[i].hashval) >= be32_to_cpu(lep[i - 1].hashval)); } XFS_WANT_CORRUPTED_RETURN(count == be32_to_cpu(btp->count) - be32_to_cpu(btp->stale)); XFS_WANT_CORRUPTED_RETURN(stale == be32_to_cpu(btp->stale)); } return 0; } static bool xfs_dir3_data_verify( struct xfs_buf *bp) { struct xfs_mount *mp = bp->b_target->bt_mount; struct xfs_dir3_blk_hdr *hdr3 = bp->b_addr; if (xfs_sb_version_hascrc(&mp->m_sb)) { if (hdr3->magic != cpu_to_be32(XFS_DIR3_DATA_MAGIC)) return false; if (!uuid_equal(&hdr3->uuid, &mp->m_sb.sb_uuid)) return false; if (be64_to_cpu(hdr3->blkno) != bp->b_bn) return false; } else { if (hdr3->magic != cpu_to_be32(XFS_DIR2_DATA_MAGIC)) return false; } if (__xfs_dir3_data_check(NULL, bp)) return false; return true; } /* * Readahead of the first block of the directory when it is opened is completely * oblivious to the format of the directory. Hence we can either get a block * format buffer or a data format buffer on readahead. */ static void xfs_dir3_data_reada_verify( struct xfs_buf *bp) { struct xfs_dir2_data_hdr *hdr = bp->b_addr; switch (hdr->magic) { case cpu_to_be32(XFS_DIR2_BLOCK_MAGIC): case cpu_to_be32(XFS_DIR3_BLOCK_MAGIC): bp->b_ops = &xfs_dir3_block_buf_ops; bp->b_ops->verify_read(bp); return; case cpu_to_be32(XFS_DIR2_DATA_MAGIC): case cpu_to_be32(XFS_DIR3_DATA_MAGIC): xfs_dir3_data_verify(bp); return; default: xfs_buf_ioerror(bp, EFSCORRUPTED); xfs_verifier_error(bp); break; } } static void xfs_dir3_data_read_verify( struct xfs_buf *bp) { struct xfs_mount *mp = bp->b_target->bt_mount; if (xfs_sb_version_hascrc(&mp->m_sb) && !xfs_buf_verify_cksum(bp, XFS_DIR3_DATA_CRC_OFF)) xfs_buf_ioerror(bp, EFSBADCRC); else if (!xfs_dir3_data_verify(bp)) xfs_buf_ioerror(bp, EFSCORRUPTED); if (bp->b_error) xfs_verifier_error(bp); } static void xfs_dir3_data_write_verify( struct xfs_buf *bp) { struct xfs_mount *mp = bp->b_target->bt_mount; struct xfs_buf_log_item *bip = bp->b_fspriv; struct xfs_dir3_blk_hdr *hdr3 = bp->b_addr; if (!xfs_dir3_data_verify(bp)) { xfs_buf_ioerror(bp, EFSCORRUPTED); xfs_verifier_error(bp); return; } if (!xfs_sb_version_hascrc(&mp->m_sb)) return; if (bip) hdr3->lsn = cpu_to_be64(bip->bli_item.li_lsn); xfs_buf_update_cksum(bp, XFS_DIR3_DATA_CRC_OFF); } const struct xfs_buf_ops xfs_dir3_data_buf_ops = { .verify_read = xfs_dir3_data_read_verify, .verify_write = xfs_dir3_data_write_verify, }; static const struct xfs_buf_ops xfs_dir3_data_reada_buf_ops = { .verify_read = xfs_dir3_data_reada_verify, .verify_write = xfs_dir3_data_write_verify, }; int xfs_dir3_data_read( struct xfs_trans *tp, struct xfs_inode *dp, xfs_dablk_t bno, xfs_daddr_t mapped_bno, struct xfs_buf **bpp) { int err; err = xfs_da_read_buf(tp, dp, bno, mapped_bno, bpp, XFS_DATA_FORK, &xfs_dir3_data_buf_ops); if (!err && tp) xfs_trans_buf_set_type(tp, *bpp, XFS_BLFT_DIR_DATA_BUF); return err; } int xfs_dir3_data_readahead( struct xfs_trans *tp, struct xfs_inode *dp, xfs_dablk_t bno, xfs_daddr_t mapped_bno) { return xfs_da_reada_buf(tp, dp, bno, mapped_bno, XFS_DATA_FORK, &xfs_dir3_data_reada_buf_ops); } /* * Given a data block and an unused entry from that block, * return the bestfree entry if any that corresponds to it. */ xfs_dir2_data_free_t * xfs_dir2_data_freefind( struct xfs_dir2_data_hdr *hdr, /* data block header */ struct xfs_dir2_data_free *bf, /* bestfree table pointer */ struct xfs_dir2_data_unused *dup) /* unused space */ { xfs_dir2_data_free_t *dfp; /* bestfree entry */ xfs_dir2_data_aoff_t off; /* offset value needed */ #ifdef DEBUG int matched; /* matched the value */ int seenzero; /* saw a 0 bestfree entry */ #endif off = (xfs_dir2_data_aoff_t)((char *)dup - (char *)hdr); #ifdef DEBUG /* * Validate some consistency in the bestfree table. * Check order, non-overlapping entries, and if we find the * one we're looking for it has to be exact. */ ASSERT(hdr->magic == cpu_to_be32(XFS_DIR2_DATA_MAGIC) || hdr->magic == cpu_to_be32(XFS_DIR3_DATA_MAGIC) || hdr->magic == cpu_to_be32(XFS_DIR2_BLOCK_MAGIC) || hdr->magic == cpu_to_be32(XFS_DIR3_BLOCK_MAGIC)); for (dfp = &bf[0], seenzero = matched = 0; dfp < &bf[XFS_DIR2_DATA_FD_COUNT]; dfp++) { if (!dfp->offset) { ASSERT(!dfp->length); seenzero = 1; continue; } ASSERT(seenzero == 0); if (be16_to_cpu(dfp->offset) == off) { matched = 1; ASSERT(dfp->length == dup->length); } else if (off < be16_to_cpu(dfp->offset)) ASSERT(off + be16_to_cpu(dup->length) <= be16_to_cpu(dfp->offset)); else ASSERT(be16_to_cpu(dfp->offset) + be16_to_cpu(dfp->length) <= off); ASSERT(matched || be16_to_cpu(dfp->length) >= be16_to_cpu(dup->length)); if (dfp > &bf[0]) ASSERT(be16_to_cpu(dfp[-1].length) >= be16_to_cpu(dfp[0].length)); } #endif /* * If this is smaller than the smallest bestfree entry, * it can't be there since they're sorted. */ if (be16_to_cpu(dup->length) < be16_to_cpu(bf[XFS_DIR2_DATA_FD_COUNT - 1].length)) return NULL; /* * Look at the three bestfree entries for our guy. */ for (dfp = &bf[0]; dfp < &bf[XFS_DIR2_DATA_FD_COUNT]; dfp++) { if (!dfp->offset) return NULL; if (be16_to_cpu(dfp->offset) == off) return dfp; } /* * Didn't find it. This only happens if there are duplicate lengths. */ return NULL; } /* * Insert an unused-space entry into the bestfree table. */ xfs_dir2_data_free_t * /* entry inserted */ xfs_dir2_data_freeinsert( struct xfs_dir2_data_hdr *hdr, /* data block pointer */ struct xfs_dir2_data_free *dfp, /* bestfree table pointer */ struct xfs_dir2_data_unused *dup, /* unused space */ int *loghead) /* log the data header (out) */ { xfs_dir2_data_free_t new; /* new bestfree entry */ ASSERT(hdr->magic == cpu_to_be32(XFS_DIR2_DATA_MAGIC) || hdr->magic == cpu_to_be32(XFS_DIR2_BLOCK_MAGIC) || hdr->magic == cpu_to_be32(XFS_DIR3_DATA_MAGIC) || hdr->magic == cpu_to_be32(XFS_DIR3_BLOCK_MAGIC)); new.length = dup->length; new.offset = cpu_to_be16((char *)dup - (char *)hdr); /* * Insert at position 0, 1, or 2; or not at all. */ if (be16_to_cpu(new.length) > be16_to_cpu(dfp[0].length)) { dfp[2] = dfp[1]; dfp[1] = dfp[0]; dfp[0] = new; *loghead = 1; return &dfp[0]; } if (be16_to_cpu(new.length) > be16_to_cpu(dfp[1].length)) { dfp[2] = dfp[1]; dfp[1] = new; *loghead = 1; return &dfp[1]; } if (be16_to_cpu(new.length) > be16_to_cpu(dfp[2].length)) { dfp[2] = new; *loghead = 1; return &dfp[2]; } return NULL; } /* * Remove a bestfree entry from the table. */ STATIC void xfs_dir2_data_freeremove( struct xfs_dir2_data_hdr *hdr, /* data block header */ struct xfs_dir2_data_free *bf, /* bestfree table pointer */ struct xfs_dir2_data_free *dfp, /* bestfree entry pointer */ int *loghead) /* out: log data header */ { ASSERT(hdr->magic == cpu_to_be32(XFS_DIR2_DATA_MAGIC) || hdr->magic == cpu_to_be32(XFS_DIR2_BLOCK_MAGIC) || hdr->magic == cpu_to_be32(XFS_DIR3_DATA_MAGIC) || hdr->magic == cpu_to_be32(XFS_DIR3_BLOCK_MAGIC)); /* * It's the first entry, slide the next 2 up. */ if (dfp == &bf[0]) { bf[0] = bf[1]; bf[1] = bf[2]; } /* * It's the second entry, slide the 3rd entry up. */ else if (dfp == &bf[1]) bf[1] = bf[2]; /* * Must be the last entry. */ else ASSERT(dfp == &bf[2]); /* * Clear the 3rd entry, must be zero now. */ bf[2].length = 0; bf[2].offset = 0; *loghead = 1; } /* * Given a data block, reconstruct its bestfree map. */ void xfs_dir2_data_freescan( struct xfs_inode *dp, struct xfs_dir2_data_hdr *hdr, int *loghead) { xfs_dir2_block_tail_t *btp; /* block tail */ xfs_dir2_data_entry_t *dep; /* active data entry */ xfs_dir2_data_unused_t *dup; /* unused data entry */ struct xfs_dir2_data_free *bf; char *endp; /* end of block's data */ char *p; /* current entry pointer */ ASSERT(hdr->magic == cpu_to_be32(XFS_DIR2_DATA_MAGIC) || hdr->magic == cpu_to_be32(XFS_DIR3_DATA_MAGIC) || hdr->magic == cpu_to_be32(XFS_DIR2_BLOCK_MAGIC) || hdr->magic == cpu_to_be32(XFS_DIR3_BLOCK_MAGIC)); /* * Start by clearing the table. */ bf = dp->d_ops->data_bestfree_p(hdr); memset(bf, 0, sizeof(*bf) * XFS_DIR2_DATA_FD_COUNT); *loghead = 1; /* * Set up pointers. */ p = (char *)dp->d_ops->data_entry_p(hdr); if (hdr->magic == cpu_to_be32(XFS_DIR2_BLOCK_MAGIC) || hdr->magic == cpu_to_be32(XFS_DIR3_BLOCK_MAGIC)) { btp = xfs_dir2_block_tail_p(dp->i_mount, hdr); endp = (char *)xfs_dir2_block_leaf_p(btp); } else endp = (char *)hdr + dp->i_mount->m_dirblksize; /* * Loop over the block's entries. */ while (p < endp) { dup = (xfs_dir2_data_unused_t *)p; /* * If it's a free entry, insert it. */ if (be16_to_cpu(dup->freetag) == XFS_DIR2_DATA_FREE_TAG) { ASSERT((char *)dup - (char *)hdr == be16_to_cpu(*xfs_dir2_data_unused_tag_p(dup))); xfs_dir2_data_freeinsert(hdr, bf, dup, loghead); p += be16_to_cpu(dup->length); } /* * For active entries, check their tags and skip them. */ else { dep = (xfs_dir2_data_entry_t *)p; ASSERT((char *)dep - (char *)hdr == be16_to_cpu(*dp->d_ops->data_entry_tag_p(dep))); p += dp->d_ops->data_entsize(dep->namelen); } } } /* * Initialize a data block at the given block number in the directory. * Give back the buffer for the created block. */ int /* error */ xfs_dir3_data_init( xfs_da_args_t *args, /* directory operation args */ xfs_dir2_db_t blkno, /* logical dir block number */ struct xfs_buf **bpp) /* output block buffer */ { struct xfs_buf *bp; /* block buffer */ xfs_dir2_data_hdr_t *hdr; /* data block header */ xfs_inode_t *dp; /* incore directory inode */ xfs_dir2_data_unused_t *dup; /* unused entry pointer */ struct xfs_dir2_data_free *bf; int error; /* error return value */ int i; /* bestfree index */ xfs_mount_t *mp; /* filesystem mount point */ xfs_trans_t *tp; /* transaction pointer */ int t; /* temp */ dp = args->dp; mp = dp->i_mount; tp = args->trans; /* * Get the buffer set up for the block. */ error = xfs_da_get_buf(tp, dp, xfs_dir2_db_to_da(mp, blkno), -1, &bp, XFS_DATA_FORK); if (error) return error; bp->b_ops = &xfs_dir3_data_buf_ops; xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DIR_DATA_BUF); /* * Initialize the header. */ hdr = bp->b_addr; if (xfs_sb_version_hascrc(&mp->m_sb)) { struct xfs_dir3_blk_hdr *hdr3 = bp->b_addr; memset(hdr3, 0, sizeof(*hdr3)); hdr3->magic = cpu_to_be32(XFS_DIR3_DATA_MAGIC); hdr3->blkno = cpu_to_be64(bp->b_bn); hdr3->owner = cpu_to_be64(dp->i_ino); uuid_copy(&hdr3->uuid, &mp->m_sb.sb_uuid); } else hdr->magic = cpu_to_be32(XFS_DIR2_DATA_MAGIC); bf = dp->d_ops->data_bestfree_p(hdr); bf[0].offset = cpu_to_be16(dp->d_ops->data_entry_offset); for (i = 1; i < XFS_DIR2_DATA_FD_COUNT; i++) { bf[i].length = 0; bf[i].offset = 0; } /* * Set up an unused entry for the block's body. */ dup = dp->d_ops->data_unused_p(hdr); dup->freetag = cpu_to_be16(XFS_DIR2_DATA_FREE_TAG); t = mp->m_dirblksize - (uint)dp->d_ops->data_entry_offset; bf[0].length = cpu_to_be16(t); dup->length = cpu_to_be16(t); *xfs_dir2_data_unused_tag_p(dup) = cpu_to_be16((char *)dup - (char *)hdr); /* * Log it and return it. */ xfs_dir2_data_log_header(tp, dp, bp); xfs_dir2_data_log_unused(tp, bp, dup); *bpp = bp; return 0; } /* * Log an active data entry from the block. */ void xfs_dir2_data_log_entry( struct xfs_trans *tp, struct xfs_inode *dp, struct xfs_buf *bp, xfs_dir2_data_entry_t *dep) /* data entry pointer */ { struct xfs_dir2_data_hdr *hdr = bp->b_addr; ASSERT(hdr->magic == cpu_to_be32(XFS_DIR2_DATA_MAGIC) || hdr->magic == cpu_to_be32(XFS_DIR3_DATA_MAGIC) || hdr->magic == cpu_to_be32(XFS_DIR2_BLOCK_MAGIC) || hdr->magic == cpu_to_be32(XFS_DIR3_BLOCK_MAGIC)); xfs_trans_log_buf(tp, bp, (uint)((char *)dep - (char *)hdr), (uint)((char *)(dp->d_ops->data_entry_tag_p(dep) + 1) - (char *)hdr - 1)); } /* * Log a data block header. */ void xfs_dir2_data_log_header( struct xfs_trans *tp, struct xfs_inode *dp, struct xfs_buf *bp) { #ifdef DEBUG struct xfs_dir2_data_hdr *hdr = bp->b_addr; ASSERT(hdr->magic == cpu_to_be32(XFS_DIR2_DATA_MAGIC) || hdr->magic == cpu_to_be32(XFS_DIR3_DATA_MAGIC) || hdr->magic == cpu_to_be32(XFS_DIR2_BLOCK_MAGIC) || hdr->magic == cpu_to_be32(XFS_DIR3_BLOCK_MAGIC)); #endif xfs_trans_log_buf(tp, bp, 0, dp->d_ops->data_entry_offset - 1); } /* * Log a data unused entry. */ void xfs_dir2_data_log_unused( struct xfs_trans *tp, struct xfs_buf *bp, xfs_dir2_data_unused_t *dup) /* data unused pointer */ { xfs_dir2_data_hdr_t *hdr = bp->b_addr; ASSERT(hdr->magic == cpu_to_be32(XFS_DIR2_DATA_MAGIC) || hdr->magic == cpu_to_be32(XFS_DIR3_DATA_MAGIC) || hdr->magic == cpu_to_be32(XFS_DIR2_BLOCK_MAGIC) || hdr->magic == cpu_to_be32(XFS_DIR3_BLOCK_MAGIC)); /* * Log the first part of the unused entry. */ xfs_trans_log_buf(tp, bp, (uint)((char *)dup - (char *)hdr), (uint)((char *)&dup->length + sizeof(dup->length) - 1 - (char *)hdr)); /* * Log the end (tag) of the unused entry. */ xfs_trans_log_buf(tp, bp, (uint)((char *)xfs_dir2_data_unused_tag_p(dup) - (char *)hdr), (uint)((char *)xfs_dir2_data_unused_tag_p(dup) - (char *)hdr + sizeof(xfs_dir2_data_off_t) - 1)); } /* * Make a byte range in the data block unused. * Its current contents are unimportant. */ void xfs_dir2_data_make_free( struct xfs_trans *tp, struct xfs_inode *dp, struct xfs_buf *bp, xfs_dir2_data_aoff_t offset, /* starting byte offset */ xfs_dir2_data_aoff_t len, /* length in bytes */ int *needlogp, /* out: log header */ int *needscanp) /* out: regen bestfree */ { xfs_dir2_data_hdr_t *hdr; /* data block pointer */ xfs_dir2_data_free_t *dfp; /* bestfree pointer */ char *endptr; /* end of data area */ xfs_mount_t *mp; /* filesystem mount point */ int needscan; /* need to regen bestfree */ xfs_dir2_data_unused_t *newdup; /* new unused entry */ xfs_dir2_data_unused_t *postdup; /* unused entry after us */ xfs_dir2_data_unused_t *prevdup; /* unused entry before us */ struct xfs_dir2_data_free *bf; mp = tp->t_mountp; hdr = bp->b_addr; /* * Figure out where the end of the data area is. */ if (hdr->magic == cpu_to_be32(XFS_DIR2_DATA_MAGIC) || hdr->magic == cpu_to_be32(XFS_DIR3_DATA_MAGIC)) endptr = (char *)hdr + mp->m_dirblksize; else { xfs_dir2_block_tail_t *btp; /* block tail */ ASSERT(hdr->magic == cpu_to_be32(XFS_DIR2_BLOCK_MAGIC) || hdr->magic == cpu_to_be32(XFS_DIR3_BLOCK_MAGIC)); btp = xfs_dir2_block_tail_p(mp, hdr); endptr = (char *)xfs_dir2_block_leaf_p(btp); } /* * If this isn't the start of the block, then back up to * the previous entry and see if it's free. */ if (offset > dp->d_ops->data_entry_offset) { __be16 *tagp; /* tag just before us */ tagp = (__be16 *)((char *)hdr + offset) - 1; prevdup = (xfs_dir2_data_unused_t *)((char *)hdr + be16_to_cpu(*tagp)); if (be16_to_cpu(prevdup->freetag) != XFS_DIR2_DATA_FREE_TAG) prevdup = NULL; } else prevdup = NULL; /* * If this isn't the end of the block, see if the entry after * us is free. */ if ((char *)hdr + offset + len < endptr) { postdup = (xfs_dir2_data_unused_t *)((char *)hdr + offset + len); if (be16_to_cpu(postdup->freetag) != XFS_DIR2_DATA_FREE_TAG) postdup = NULL; } else postdup = NULL; ASSERT(*needscanp == 0); needscan = 0; /* * Previous and following entries are both free, * merge everything into a single free entry. */ bf = dp->d_ops->data_bestfree_p(hdr); if (prevdup && postdup) { xfs_dir2_data_free_t *dfp2; /* another bestfree pointer */ /* * See if prevdup and/or postdup are in bestfree table. */ dfp = xfs_dir2_data_freefind(hdr, bf, prevdup); dfp2 = xfs_dir2_data_freefind(hdr, bf, postdup); /* * We need a rescan unless there are exactly 2 free entries * namely our two. Then we know what's happening, otherwise * since the third bestfree is there, there might be more * entries. */ needscan = (bf[2].length != 0); /* * Fix up the new big freespace. */ be16_add_cpu(&prevdup->length, len + be16_to_cpu(postdup->length)); *xfs_dir2_data_unused_tag_p(prevdup) = cpu_to_be16((char *)prevdup - (char *)hdr); xfs_dir2_data_log_unused(tp, bp, prevdup); if (!needscan) { /* * Has to be the case that entries 0 and 1 are * dfp and dfp2 (don't know which is which), and * entry 2 is empty. * Remove entry 1 first then entry 0. */ ASSERT(dfp && dfp2); if (dfp == &bf[1]) { dfp = &bf[0]; ASSERT(dfp2 == dfp); dfp2 = &bf[1]; } xfs_dir2_data_freeremove(hdr, bf, dfp2, needlogp); xfs_dir2_data_freeremove(hdr, bf, dfp, needlogp); /* * Now insert the new entry. */ dfp = xfs_dir2_data_freeinsert(hdr, bf, prevdup, needlogp); ASSERT(dfp == &bf[0]); ASSERT(dfp->length == prevdup->length); ASSERT(!dfp[1].length); ASSERT(!dfp[2].length); } } /* * The entry before us is free, merge with it. */ else if (prevdup) { dfp = xfs_dir2_data_freefind(hdr, bf, prevdup); be16_add_cpu(&prevdup->length, len); *xfs_dir2_data_unused_tag_p(prevdup) = cpu_to_be16((char *)prevdup - (char *)hdr); xfs_dir2_data_log_unused(tp, bp, prevdup); /* * If the previous entry was in the table, the new entry * is longer, so it will be in the table too. Remove * the old one and add the new one. */ if (dfp) { xfs_dir2_data_freeremove(hdr, bf, dfp, needlogp); xfs_dir2_data_freeinsert(hdr, bf, prevdup, needlogp); } /* * Otherwise we need a scan if the new entry is big enough. */ else { needscan = be16_to_cpu(prevdup->length) > be16_to_cpu(bf[2].length); } } /* * The following entry is free, merge with it. */ else if (postdup) { dfp = xfs_dir2_data_freefind(hdr, bf, postdup); newdup = (xfs_dir2_data_unused_t *)((char *)hdr + offset); newdup->freetag = cpu_to_be16(XFS_DIR2_DATA_FREE_TAG); newdup->length = cpu_to_be16(len + be16_to_cpu(postdup->length)); *xfs_dir2_data_unused_tag_p(newdup) = cpu_to_be16((char *)newdup - (char *)hdr); xfs_dir2_data_log_unused(tp, bp, newdup); /* * If the following entry was in the table, the new entry * is longer, so it will be in the table too. Remove * the old one and add the new one. */ if (dfp) { xfs_dir2_data_freeremove(hdr, bf, dfp, needlogp); xfs_dir2_data_freeinsert(hdr, bf, newdup, needlogp); } /* * Otherwise we need a scan if the new entry is big enough. */ else { needscan = be16_to_cpu(newdup->length) > be16_to_cpu(bf[2].length); } } /* * Neither neighbor is free. Make a new entry. */ else { newdup = (xfs_dir2_data_unused_t *)((char *)hdr + offset); newdup->freetag = cpu_to_be16(XFS_DIR2_DATA_FREE_TAG); newdup->length = cpu_to_be16(len); *xfs_dir2_data_unused_tag_p(newdup) = cpu_to_be16((char *)newdup - (char *)hdr); xfs_dir2_data_log_unused(tp, bp, newdup); xfs_dir2_data_freeinsert(hdr, bf, newdup, needlogp); } *needscanp = needscan; } /* * Take a byte range out of an existing unused space and make it un-free. */ void xfs_dir2_data_use_free( struct xfs_trans *tp, struct xfs_inode *dp, struct xfs_buf *bp, xfs_dir2_data_unused_t *dup, /* unused entry */ xfs_dir2_data_aoff_t offset, /* starting offset to use */ xfs_dir2_data_aoff_t len, /* length to use */ int *needlogp, /* out: need to log header */ int *needscanp) /* out: need regen bestfree */ { xfs_dir2_data_hdr_t *hdr; /* data block header */ xfs_dir2_data_free_t *dfp; /* bestfree pointer */ int matchback; /* matches end of freespace */ int matchfront; /* matches start of freespace */ int needscan; /* need to regen bestfree */ xfs_dir2_data_unused_t *newdup; /* new unused entry */ xfs_dir2_data_unused_t *newdup2; /* another new unused entry */ int oldlen; /* old unused entry's length */ struct xfs_dir2_data_free *bf; hdr = bp->b_addr; ASSERT(hdr->magic == cpu_to_be32(XFS_DIR2_DATA_MAGIC) || hdr->magic == cpu_to_be32(XFS_DIR3_DATA_MAGIC) || hdr->magic == cpu_to_be32(XFS_DIR2_BLOCK_MAGIC) || hdr->magic == cpu_to_be32(XFS_DIR3_BLOCK_MAGIC)); ASSERT(be16_to_cpu(dup->freetag) == XFS_DIR2_DATA_FREE_TAG); ASSERT(offset >= (char *)dup - (char *)hdr); ASSERT(offset + len <= (char *)dup + be16_to_cpu(dup->length) - (char *)hdr); ASSERT((char *)dup - (char *)hdr == be16_to_cpu(*xfs_dir2_data_unused_tag_p(dup))); /* * Look up the entry in the bestfree table. */ oldlen = be16_to_cpu(dup->length); bf = dp->d_ops->data_bestfree_p(hdr); dfp = xfs_dir2_data_freefind(hdr, bf, dup); ASSERT(dfp || oldlen <= be16_to_cpu(bf[2].length)); /* * Check for alignment with front and back of the entry. */ matchfront = (char *)dup - (char *)hdr == offset; matchback = (char *)dup + oldlen - (char *)hdr == offset + len; ASSERT(*needscanp == 0); needscan = 0; /* * If we matched it exactly we just need to get rid of it from * the bestfree table. */ if (matchfront && matchback) { if (dfp) { needscan = (bf[2].offset != 0); if (!needscan) xfs_dir2_data_freeremove(hdr, bf, dfp, needlogp); } } /* * We match the first part of the entry. * Make a new entry with the remaining freespace. */ else if (matchfront) { newdup = (xfs_dir2_data_unused_t *)((char *)hdr + offset + len); newdup->freetag = cpu_to_be16(XFS_DIR2_DATA_FREE_TAG); newdup->length = cpu_to_be16(oldlen - len); *xfs_dir2_data_unused_tag_p(newdup) = cpu_to_be16((char *)newdup - (char *)hdr); xfs_dir2_data_log_unused(tp, bp, newdup); /* * If it was in the table, remove it and add the new one. */ if (dfp) { xfs_dir2_data_freeremove(hdr, bf, dfp, needlogp); dfp = xfs_dir2_data_freeinsert(hdr, bf, newdup, needlogp); ASSERT(dfp != NULL); ASSERT(dfp->length == newdup->length); ASSERT(be16_to_cpu(dfp->offset) == (char *)newdup - (char *)hdr); /* * If we got inserted at the last slot, * that means we don't know if there was a better * choice for the last slot, or not. Rescan. */ needscan = dfp == &bf[2]; } } /* * We match the last part of the entry. * Trim the allocated space off the tail of the entry. */ else if (matchback) { newdup = dup; newdup->length = cpu_to_be16(((char *)hdr + offset) - (char *)newdup); *xfs_dir2_data_unused_tag_p(newdup) = cpu_to_be16((char *)newdup - (char *)hdr); xfs_dir2_data_log_unused(tp, bp, newdup); /* * If it was in the table, remove it and add the new one. */ if (dfp) { xfs_dir2_data_freeremove(hdr, bf, dfp, needlogp); dfp = xfs_dir2_data_freeinsert(hdr, bf, newdup, needlogp); ASSERT(dfp != NULL); ASSERT(dfp->length == newdup->length); ASSERT(be16_to_cpu(dfp->offset) == (char *)newdup - (char *)hdr); /* * If we got inserted at the last slot, * that means we don't know if there was a better * choice for the last slot, or not. Rescan. */ needscan = dfp == &bf[2]; } } /* * Poking out the middle of an entry. * Make two new entries. */ else { newdup = dup; newdup->length = cpu_to_be16(((char *)hdr + offset) - (char *)newdup); *xfs_dir2_data_unused_tag_p(newdup) = cpu_to_be16((char *)newdup - (char *)hdr); xfs_dir2_data_log_unused(tp, bp, newdup); newdup2 = (xfs_dir2_data_unused_t *)((char *)hdr + offset + len); newdup2->freetag = cpu_to_be16(XFS_DIR2_DATA_FREE_TAG); newdup2->length = cpu_to_be16(oldlen - len - be16_to_cpu(newdup->length)); *xfs_dir2_data_unused_tag_p(newdup2) = cpu_to_be16((char *)newdup2 - (char *)hdr); xfs_dir2_data_log_unused(tp, bp, newdup2); /* * If the old entry was in the table, we need to scan * if the 3rd entry was valid, since these entries * are smaller than the old one. * If we don't need to scan that means there were 1 or 2 * entries in the table, and removing the old and adding * the 2 new will work. */ if (dfp) { needscan = (bf[2].length != 0); if (!needscan) { xfs_dir2_data_freeremove(hdr, bf, dfp, needlogp); xfs_dir2_data_freeinsert(hdr, bf, newdup, needlogp); xfs_dir2_data_freeinsert(hdr, bf, newdup2, needlogp); } } } *needscanp = needscan; }
/*============================================================================= Copyright (c) 2011 Hartmut Kaiser Distributed under the Boost Software License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) ==============================================================================*/ #if !defined(BOOST_PHOENIX_PREPROCESSED_BIND_DETAIL_FUNCTION_PTR_HPP) #define BOOST_PHOENIX_PREPROCESSED_BIND_DETAIL_FUNCTION_PTR_HPP #if BOOST_PHOENIX_LIMIT <= 10 #include <boost/phoenix/bind/detail/preprocessed/function_ptr_10.hpp> #elif BOOST_PHOENIX_LIMIT <= 20 #include <boost/phoenix/bind/detail/preprocessed/function_ptr_20.hpp> #elif BOOST_PHOENIX_LIMIT <= 30 #include <boost/phoenix/bind/detail/preprocessed/function_ptr_30.hpp> #elif BOOST_PHOENIX_LIMIT <= 40 #include <boost/phoenix/bind/detail/preprocessed/function_ptr_40.hpp> #elif BOOST_PHOENIX_LIMIT <= 50 #include <boost/phoenix/bind/detail/preprocessed/function_ptr_50.hpp> #else #error "BOOST_PHOENIX_LIMIT out of bounds for preprocessed headers" #endif #endif
#ifndef _XRC_URLINFO_H #define _XRC_URLINFO_H /******************************************************************************/ /* */ /* X r d C l i e n t U r l I n f o . h h */ /* */ /* Author: Fabrizio Furano (INFN Padova, 2004) */ /* Adapted from TXNetFile (root.cern.ch) originally done by */ /* Alvise Dorigo, Fabrizio Furano, INFN Padova, 2003 */ /* Revised by G. Ganis, CERN, June 2005 */ /* */ /* This file is part of the XRootD software suite. */ /* */ /* XRootD is free software: you can redistribute it and/or modify it under */ /* the terms of the GNU Lesser General Public License as published by the */ /* Free Software Foundation, either version 3 of the License, or (at your */ /* option) any later version. */ /* */ /* XRootD is distributed in the hope that it will be useful, but WITHOUT */ /* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or */ /* FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public */ /* License for more details. */ /* */ /* You should have received a copy of the GNU Lesser General Public License */ /* along with XRootD in a file called COPYING.LESSER (LGPL license) and file */ /* COPYING (GPL license). If not, see <http://www.gnu.org/licenses/>. */ /* */ /* The copyright holder's institutional names and contributor's names may not */ /* be used to endorse or promote products derived from this software without */ /* specific prior written permission of the institution or contributor. */ /******************************************************************************/ ////////////////////////////////////////////////////////////////////////// // // // Class handling information about an url // // The purpose of this class is to allow: // // - parsing a string url into its components // // - reading/writing the single components // // - reading the modified full url // // // ////////////////////////////////////////////////////////////////////////// #include "XrdOuc/XrdOucString.hh" // // The information an url may contain // Plus utilities for parsing and rebuilding an url // class XrdClientUrlInfo { public: XrdOucString Proto; XrdOucString Passwd; XrdOucString User; XrdOucString Host; int Port; XrdOucString HostAddr; XrdOucString HostWPort; XrdOucString File; void Clear(); void TakeUrl(XrdOucString url); XrdOucString GetUrl(); XrdClientUrlInfo(const char *url); XrdClientUrlInfo(const XrdOucString &url); XrdClientUrlInfo(const XrdClientUrlInfo &url); XrdClientUrlInfo(); void SetAddrFromHost(); inline bool IsValid() { return (Port >= 0); } XrdClientUrlInfo &operator=(const XrdOucString &url); XrdClientUrlInfo &operator=(const XrdClientUrlInfo &url); }; #endif
#!/usr/bin/env python # # Copyright 2008 Jose Fonseca # # This program is free software: you can redistribute it and/or modify it # under the terms of the GNU Lesser General Public License as published # by the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # """Generate a dot graph from the output of several profilers.""" __author__ = "Jose Fonseca" __version__ = "1.0" import sys import math import os.path import re import textwrap import optparse try: # Debugging helper module import debug except ImportError: pass def percentage(p): return "%.02f%%" % (p*100.0,) def add(a, b): return a + b def equal(a, b): if a == b: return a else: return None def fail(a, b): assert False def ratio(numerator, denominator): numerator = float(numerator) denominator = float(denominator) assert 0.0 <= numerator assert numerator <= denominator try: return numerator/denominator except ZeroDivisionError: # 0/0 is undefined, but 1.0 yields more useful results return 1.0 class UndefinedEvent(Exception): """Raised when attempting to get an event which is undefined.""" def __init__(self, event): Exception.__init__(self) self.event = event def __str__(self): return 'unspecified event %s' % self.event.name class Event(object): """Describe a kind of event, and its basic operations.""" def __init__(self, name, null, aggregator, formatter = str): self.name = name self._null = null self._aggregator = aggregator self._formatter = formatter def __eq__(self, other): return self is other def __hash__(self): return id(self) def null(self): return self._null def aggregate(self, val1, val2): """Aggregate two event values.""" assert val1 is not None assert val2 is not None return self._aggregator(val1, val2) def format(self, val): """Format an event value.""" assert val is not None return self._formatter(val) MODULE = Event("Module", None, equal) PROCESS = Event("Process", None, equal) CALLS = Event("Calls", 0, add) SAMPLES = Event("Samples", 0, add) TIME = Event("Time", 0.0, add, lambda x: '(' + str(x) + ')') TIME_RATIO = Event("Time ratio", 0.0, add, lambda x: '(' + percentage(x) + ')') TOTAL_TIME = Event("Total time", 0.0, fail) TOTAL_TIME_RATIO = Event("Total time ratio", 0.0, fail, percentage) CALL_RATIO = Event("Call ratio", 0.0, add, percentage) PRUNE_RATIO = Event("Prune ratio", 0.0, add, percentage) class Object(object): """Base class for all objects in profile which can store events.""" def __init__(self, events=None): if events is None: self.events = {} else: self.events = events def __hash__(self): return id(self) def __eq__(self, other): return self is other def __contains__(self, event): return event in self.events def __getitem__(self, event): try: return self.events[event] except KeyError: raise UndefinedEvent(event) def __setitem__(self, event, value): if value is None: if event in self.events: del self.events[event] else: self.events[event] = value class Call(Object): """A call between functions. There should be at most one call object for every pair of functions. """ def __init__(self, callee_id): Object.__init__(self) self.callee_id = callee_id class Function(Object): """A function.""" def __init__(self, id, name): Object.__init__(self) self.id = id self.name = name self.calls = {} self.cycle = None def add_call(self, call): if call.callee_id in self.calls: sys.stderr.write('warning: overwriting call from function %s to %s\n' % (str(self.id), str(call.callee_id))) self.calls[call.callee_id] = call # TODO: write utility functions def __repr__(self): return self.name class Cycle(Object): """A cycle made from recursive function calls.""" def __init__(self): Object.__init__(self) # XXX: Do cycles need an id? self.functions = set() def add_function(self, function): assert function not in self.functions self.functions.add(function) # XXX: Aggregate events? if function.cycle is not None: for other in function.cycle.functions: if function not in self.functions: self.add_function(other) function.cycle = self class Profile(Object): """The whole profile.""" def __init__(self): Object.__init__(self) self.functions = {} self.cycles = [] def add_function(self, function): if function.id in self.functions: sys.stderr.write('warning: overwriting function %s (id %s)\n' % (function.name, str(function.id))) self.functions[function.id] = function def add_cycle(self, cycle): self.cycles.append(cycle) def validate(self): """Validate the edges.""" for function in self.functions.itervalues(): for callee_id in function.calls.keys(): assert function.calls[callee_id].callee_id == callee_id if callee_id not in self.functions: sys.stderr.write('warning: call to undefined function %s from function %s\n' % (str(callee_id), function.name)) del function.calls[callee_id] def find_cycles(self): """Find cycles using Tarjan's strongly connected components algorithm.""" # Apply the Tarjan's algorithm successively until all functions are visited visited = set() for function in self.functions.itervalues(): if function not in visited: self._tarjan(function, 0, [], {}, {}, visited) cycles = [] for function in self.functions.itervalues(): if function.cycle is not None and function.cycle not in cycles: cycles.append(function.cycle) self.cycles = cycles if 0: for cycle in cycles: sys.stderr.write("Cycle:\n") for member in cycle.functions: sys.stderr.write("\t%s\n" % member.name) def _tarjan(self, function, order, stack, orders, lowlinks, visited): """Tarjan's strongly connected components algorithm. See also: - http://en.wikipedia.org/wiki/Tarjan's_strongly_connected_components_algorithm """ visited.add(function) orders[function] = order lowlinks[function] = order order += 1 pos = len(stack) stack.append(function) for call in function.calls.itervalues(): callee = self.functions[call.callee_id] # TODO: use a set to optimize lookup if callee not in orders: order = self._tarjan(callee, order, stack, orders, lowlinks, visited) lowlinks[function] = min(lowlinks[function], lowlinks[callee]) elif callee in stack: lowlinks[function] = min(lowlinks[function], orders[callee]) if lowlinks[function] == orders[function]: # Strongly connected component found members = stack[pos:] del stack[pos:] if len(members) > 1: cycle = Cycle() for member in members: cycle.add_function(member) return order def call_ratios(self, event): # Aggregate for incoming calls cycle_totals = {} for cycle in self.cycles: cycle_totals[cycle] = 0.0 function_totals = {} for function in self.functions.itervalues(): function_totals[function] = 0.0 for function in self.functions.itervalues(): for call in function.calls.itervalues(): if call.callee_id != function.id: callee = self.functions[call.callee_id] function_totals[callee] += call[event] if callee.cycle is not None and callee.cycle is not function.cycle: cycle_totals[callee.cycle] += call[event] # Compute the ratios for function in self.functions.itervalues(): for call in function.calls.itervalues(): assert CALL_RATIO not in call if call.callee_id != function.id: callee = self.functions[call.callee_id] if callee.cycle is not None and callee.cycle is not function.cycle: total = cycle_totals[callee.cycle] else: total = function_totals[callee] call[CALL_RATIO] = ratio(call[event], total) def integrate(self, outevent, inevent): """Propagate function time ratio allong the function calls. Must be called after finding the cycles. See also: - http://citeseer.ist.psu.edu/graham82gprof.html """ # Sanity checking assert outevent not in self for function in self.functions.itervalues(): assert outevent not in function assert inevent in function for call in function.calls.itervalues(): assert outevent not in call if call.callee_id != function.id: assert CALL_RATIO in call # Aggregate the input for each cycle for cycle in self.cycles: total = inevent.null() for function in self.functions.itervalues(): total = inevent.aggregate(total, function[inevent]) self[inevent] = total # Integrate along the edges total = inevent.null() for function in self.functions.itervalues(): total = inevent.aggregate(total, function[inevent]) self._integrate_function(function, outevent, inevent) self[outevent] = total def _integrate_function(self, function, outevent, inevent): if function.cycle is not None: return self._integrate_cycle(function.cycle, outevent, inevent) else: if outevent not in function: total = function[inevent] for call in function.calls.itervalues(): if call.callee_id != function.id: total += self._integrate_call(call, outevent, inevent) function[outevent] = total return function[outevent] def _integrate_call(self, call, outevent, inevent): assert outevent not in call assert CALL_RATIO in call callee = self.functions[call.callee_id] subtotal = call[CALL_RATIO]*self._integrate_function(callee, outevent, inevent) call[outevent] = subtotal return subtotal def _integrate_cycle(self, cycle, outevent, inevent): if outevent not in cycle: total = inevent.null() for member in cycle.functions: subtotal = member[inevent] for call in member.calls.itervalues(): callee = self.functions[call.callee_id] if callee.cycle is not cycle: subtotal += self._integrate_call(call, outevent, inevent) total += subtotal cycle[outevent] = total callees = {} for function in self.functions.itervalues(): if function.cycle is not cycle: for call in function.calls.itervalues(): callee = self.functions[call.callee_id] if callee.cycle is cycle: try: callees[callee] += call[CALL_RATIO] except KeyError: callees[callee] = call[CALL_RATIO] for callee, call_ratio in callees.iteritems(): ranks = {} call_ratios = {} partials = {} self._rank_cycle_function(cycle, callee, 0, ranks) self._call_ratios_cycle(cycle, callee, ranks, call_ratios, set()) partial = self._integrate_cycle_function(cycle, callee, call_ratio, partials, ranks, call_ratios, outevent, inevent) assert partial == max(partials.values()) assert not total or abs(1.0 - partial/(call_ratio*total)) <= 0.001 return cycle[outevent] def _rank_cycle_function(self, cycle, function, rank, ranks): if function not in ranks or ranks[function] > rank: ranks[function] = rank for call in function.calls.itervalues(): if call.callee_id != function.id: callee = self.functions[call.callee_id] if callee.cycle is cycle: self._rank_cycle_function(cycle, callee, rank + 1, ranks) def _call_ratios_cycle(self, cycle, function, ranks, call_ratios, visited): if function not in visited: visited.add(function) for call in function.calls.itervalues(): if call.callee_id != function.id: callee = self.functions[call.callee_id] if callee.cycle is cycle: if ranks[callee] > ranks[function]: call_ratios[callee] = call_ratios.get(callee, 0.0) + call[CALL_RATIO] self._call_ratios_cycle(cycle, callee, ranks, call_ratios, visited) def _integrate_cycle_function(self, cycle, function, partial_ratio, partials, ranks, call_ratios, outevent, inevent): if function not in partials: partial = partial_ratio*function[inevent] for call in function.calls.itervalues(): if call.callee_id != function.id: callee = self.functions[call.callee_id] if callee.cycle is not cycle: assert outevent in call partial += partial_ratio*call[outevent] else: if ranks[callee] > ranks[function]: callee_partial = self._integrate_cycle_function(cycle, callee, partial_ratio, partials, ranks, call_ratios, outevent, inevent) call_ratio = ratio(call[CALL_RATIO], call_ratios[callee]) call_partial = call_ratio*callee_partial try: call[outevent] += call_partial except UndefinedEvent: call[outevent] = call_partial partial += call_partial partials[function] = partial try: function[outevent] += partial except UndefinedEvent: function[outevent] = partial return partials[function] def aggregate(self, event): """Aggregate an event for the whole profile.""" total = event.null() for function in self.functions.itervalues(): try: total = event.aggregate(total, function[event]) except UndefinedEvent: return self[event] = total def ratio(self, outevent, inevent): assert outevent not in self assert inevent in self for function in self.functions.itervalues(): assert outevent not in function assert inevent in function function[outevent] = ratio(function[inevent], self[inevent]) for call in function.calls.itervalues(): assert outevent not in call if inevent in call: call[outevent] = ratio(call[inevent], self[inevent]) self[outevent] = 1.0 def prune(self, node_thres, edge_thres): """Prune the profile""" # compute the prune ratios for function in self.functions.itervalues(): try: function[PRUNE_RATIO] = function[TOTAL_TIME_RATIO] except UndefinedEvent: pass for call in function.calls.itervalues(): callee = self.functions[call.callee_id] if TOTAL_TIME_RATIO in call: # handle exact cases first call[PRUNE_RATIO] = call[TOTAL_TIME_RATIO] else: try: # make a safe estimate call[PRUNE_RATIO] = min(function[TOTAL_TIME_RATIO], callee[TOTAL_TIME_RATIO]) except UndefinedEvent: pass # prune the nodes for function_id in self.functions.keys(): function = self.functions[function_id] try: if function[PRUNE_RATIO] < node_thres: del self.functions[function_id] except UndefinedEvent: pass # prune the egdes for function in self.functions.itervalues(): for callee_id in function.calls.keys(): call = function.calls[callee_id] try: if callee_id not in self.functions or call[PRUNE_RATIO] < edge_thres: del function.calls[callee_id] except UndefinedEvent: pass def dump(self): for function in self.functions.itervalues(): sys.stderr.write('Function %s:\n' % (function.name,)) self._dump_events(function.events) for call in function.calls.itervalues(): callee = self.functions[call.callee_id] sys.stderr.write(' Call %s:\n' % (callee.name,)) self._dump_events(call.events) def _dump_events(self, events): for event, value in events.iteritems(): sys.stderr.write(' %s: %s\n' % (event.name, event.format(value))) class Struct: """Masquerade a dictionary with a structure-like behavior.""" def __init__(self, attrs = None): if attrs is None: attrs = {} self.__dict__['_attrs'] = attrs def __getattr__(self, name): try: return self._attrs[name] except KeyError: raise AttributeError(name) def __setattr__(self, name, value): self._attrs[name] = value def __str__(self): return str(self._attrs) def __repr__(self): return repr(self._attrs) class ParseError(Exception): """Raised when parsing to signal mismatches.""" def __init__(self, msg, line): self.msg = msg # TODO: store more source line information self.line = line def __str__(self): return '%s: %r' % (self.msg, self.line) class Parser: """Parser interface.""" def __init__(self): pass def parse(self): raise NotImplementedError class LineParser(Parser): """Base class for parsers that read line-based formats.""" def __init__(self, file): Parser.__init__(self) self._file = file self.__line = None self.__eof = False def readline(self): line = self._file.readline() if not line: self.__line = '' self.__eof = True self.__line = line.rstrip('\r\n') def lookahead(self): assert self.__line is not None return self.__line def consume(self): assert self.__line is not None line = self.__line self.readline() return line def eof(self): assert self.__line is not None return self.__eof class GprofParser(Parser): """Parser for GNU gprof output. See also: - Chapter "Interpreting gprof's Output" from the GNU gprof manual http://sourceware.org/binutils/docs-2.18/gprof/Call-Graph.html#Call-Graph - File "cg_print.c" from the GNU gprof source code http://sourceware.org/cgi-bin/cvsweb.cgi/~checkout~/src/gprof/cg_print.c?rev=1.12&cvsroot=src """ def __init__(self, fp): Parser.__init__(self) self.fp = fp self.functions = {} self.cycles = {} def readline(self): line = self.fp.readline() if not line: sys.stderr.write('error: unexpected end of file\n') sys.exit(1) line = line.rstrip('\r\n') return line _int_re = re.compile(r'^\d+$') _float_re = re.compile(r'^\d+\.\d+$') def translate(self, mo): """Extract a structure from a match object, while translating the types in the process.""" attrs = {} groupdict = mo.groupdict() for name, value in groupdict.iteritems(): if value is None: value = None elif self._int_re.match(value): value = int(value) elif self._float_re.match(value): value = float(value) attrs[name] = (value) return Struct(attrs) _cg_header_re = re.compile( # original gprof header r'^\s+called/total\s+parents\s*$|' + r'^index\s+%time\s+self\s+descendents\s+called\+self\s+name\s+index\s*$|' + r'^\s+called/total\s+children\s*$|' + # GNU gprof header r'^index\s+%\s+time\s+self\s+children\s+called\s+name\s*$' ) _cg_ignore_re = re.compile( # spontaneous r'^\s+<spontaneous>\s*$|' # internal calls (such as "mcount") r'^.*\((\d+)\)$' ) _cg_primary_re = re.compile( r'^\[(?P<index>\d+)\]' + r'\s+(?P<percentage_time>\d+\.\d+)' + r'\s+(?P<self>\d+\.\d+)' + r'\s+(?P<descendants>\d+\.\d+)' + r'\s+(?:(?P<called>\d+)(?:\+(?P<called_self>\d+))?)?' + r'\s+(?P<name>\S.*?)' + r'(?:\s+<cycle\s(?P<cycle>\d+)>)?' + r'\s\[(\d+)\]$' ) _cg_parent_re = re.compile( r'^\s+(?P<self>\d+\.\d+)?' + r'\s+(?P<descendants>\d+\.\d+)?' + r'\s+(?P<called>\d+)(?:/(?P<called_total>\d+))?' + r'\s+(?P<name>\S.*?)' + r'(?:\s+<cycle\s(?P<cycle>\d+)>)?' + r'\s\[(?P<index>\d+)\]$' ) _cg_child_re = _cg_parent_re _cg_cycle_header_re = re.compile( r'^\[(?P<index>\d+)\]' + r'\s+(?P<percentage_time>\d+\.\d+)' + r'\s+(?P<self>\d+\.\d+)' + r'\s+(?P<descendants>\d+\.\d+)' + r'\s+(?:(?P<called>\d+)(?:\+(?P<called_self>\d+))?)?' + r'\s+<cycle\s(?P<cycle>\d+)\sas\sa\swhole>' + r'\s\[(\d+)\]$' ) _cg_cycle_member_re = re.compile( r'^\s+(?P<self>\d+\.\d+)?' + r'\s+(?P<descendants>\d+\.\d+)?' + r'\s+(?P<called>\d+)(?:\+(?P<called_self>\d+))?' + r'\s+(?P<name>\S.*?)' + r'(?:\s+<cycle\s(?P<cycle>\d+)>)?' + r'\s\[(?P<index>\d+)\]$' ) _cg_sep_re = re.compile(r'^--+$') def parse_function_entry(self, lines): parents = [] children = [] while True: if not lines: sys.stderr.write('warning: unexpected end of entry\n') line = lines.pop(0) if line.startswith('['): break # read function parent line mo = self._cg_parent_re.match(line) if not mo: if self._cg_ignore_re.match(line): continue sys.stderr.write('warning: unrecognized call graph entry: %r\n' % line) else: parent = self.translate(mo) parents.append(parent) # read primary line mo = self._cg_primary_re.match(line) if not mo: sys.stderr.write('warning: unrecognized call graph entry: %r\n' % line) return else: function = self.translate(mo) while lines: line = lines.pop(0) # read function subroutine line mo = self._cg_child_re.match(line) if not mo: if self._cg_ignore_re.match(line): continue sys.stderr.write('warning: unrecognized call graph entry: %r\n' % line) else: child = self.translate(mo) children.append(child) function.parents = parents function.children = children self.functions[function.index] = function def parse_cycle_entry(self, lines): # read cycle header line line = lines[0] mo = self._cg_cycle_header_re.match(line) if not mo: sys.stderr.write('warning: unrecognized call graph entry: %r\n' % line) return cycle = self.translate(mo) # read cycle member lines cycle.functions = [] for line in lines[1:]: mo = self._cg_cycle_member_re.match(line) if not mo: sys.stderr.write('warning: unrecognized call graph entry: %r\n' % line) continue call = self.translate(mo) cycle.functions.append(call) self.cycles[cycle.cycle] = cycle def parse_cg_entry(self, lines): if lines[0].startswith("["): self.parse_cycle_entry(lines) else: self.parse_function_entry(lines) def parse_cg(self): """Parse the call graph.""" # skip call graph header while not self._cg_header_re.match(self.readline()): pass line = self.readline() while self._cg_header_re.match(line): line = self.readline() # process call graph entries entry_lines = [] while line != '\014': # form feed if line and not line.isspace(): if self._cg_sep_re.match(line): self.parse_cg_entry(entry_lines) entry_lines = [] else: entry_lines.append(line) line = self.readline() def parse(self): self.parse_cg() self.fp.close() profile = Profile() profile[TIME] = 0.0 cycles = {} for index in self.cycles.iterkeys(): cycles[index] = Cycle() for entry in self.functions.itervalues(): # populate the function function = Function(entry.index, entry.name) function[TIME] = entry.self if entry.called is not None: function[CALLS] = entry.called if entry.called_self is not None: call = Call(entry.index) call[CALLS] = entry.called_self function[CALLS] += entry.called_self # populate the function calls for child in entry.children: call = Call(child.index) assert child.called is not None call[CALLS] = child.called if child.index not in self.functions: # NOTE: functions that were never called but were discovered by gprof's # static call graph analysis dont have a call graph entry so we need # to add them here missing = Function(child.index, child.name) function[TIME] = 0.0 function[CALLS] = 0 profile.add_function(missing) function.add_call(call) profile.add_function(function) if entry.cycle is not None: cycles[entry.cycle].add_function(function) profile[TIME] = profile[TIME] + function[TIME] for cycle in cycles.itervalues(): profile.add_cycle(cycle) # Compute derived events profile.validate() profile.ratio(TIME_RATIO, TIME) profile.call_ratios(CALLS) profile.integrate(TOTAL_TIME, TIME) profile.ratio(TOTAL_TIME_RATIO, TOTAL_TIME) return profile class OprofileParser(LineParser): """Parser for oprofile callgraph output. See also: - http://oprofile.sourceforge.net/doc/opreport.html#opreport-callgraph """ _fields_re = { 'samples': r'(?P<samples>\d+)', '%': r'(?P<percentage>\S+)', 'linenr info': r'(?P<source>\(no location information\)|\S+:\d+)', 'image name': r'(?P<image>\S+(?:\s\(tgid:[^)]*\))?)', 'app name': r'(?P<application>\S+)', 'symbol name': r'(?P<symbol>\(no symbols\)|.+?)', } def __init__(self, infile): LineParser.__init__(self, infile) self.entries = {} self.entry_re = None def add_entry(self, callers, function, callees): try: entry = self.entries[function.id] except KeyError: self.entries[function.id] = (callers, function, callees) else: callers_total, function_total, callees_total = entry self.update_subentries_dict(callers_total, callers) function_total.samples += function.samples self.update_subentries_dict(callees_total, callees) def update_subentries_dict(self, totals, partials): for partial in partials.itervalues(): try: total = totals[partial.id] except KeyError: totals[partial.id] = partial else: total.samples += partial.samples def parse(self): # read lookahead self.readline() self.parse_header() while self.lookahead(): self.parse_entry() profile = Profile() reverse_call_samples = {} # populate the profile profile[SAMPLES] = 0 for _callers, _function, _callees in self.entries.itervalues(): function = Function(_function.id, _function.name) function[SAMPLES] = _function.samples profile.add_function(function) profile[SAMPLES] += _function.samples if _function.application: function[PROCESS] = os.path.basename(_function.application) if _function.image: function[MODULE] = os.path.basename(_function.image) total_callee_samples = 0 for _callee in _callees.itervalues(): total_callee_samples += _callee.samples for _callee in _callees.itervalues(): if not _callee.self: call = Call(_callee.id) call[SAMPLES] = _callee.samples function.add_call(call) # compute derived data profile.validate() profile.find_cycles() profile.ratio(TIME_RATIO, SAMPLES) profile.call_ratios(SAMPLES) profile.integrate(TOTAL_TIME_RATIO, TIME_RATIO) return profile def parse_header(self): while not self.match_header(): self.consume() line = self.lookahead() fields = re.split(r'\s\s+', line) entry_re = r'^\s*' + r'\s+'.join([self._fields_re[field] for field in fields]) + r'(?P<self>\s+\[self\])?$' self.entry_re = re.compile(entry_re) self.skip_separator() def parse_entry(self): callers = self.parse_subentries() if self.match_primary(): function = self.parse_subentry() if function is not None: callees = self.parse_subentries() self.add_entry(callers, function, callees) self.skip_separator() def parse_subentries(self): subentries = {} while self.match_secondary(): subentry = self.parse_subentry() subentries[subentry.id] = subentry return subentries def parse_subentry(self): entry = Struct() line = self.consume() mo = self.entry_re.match(line) if not mo: raise ParseError('failed to parse', line) fields = mo.groupdict() entry.samples = int(fields.get('samples', 0)) entry.percentage = float(fields.get('percentage', 0.0)) if 'source' in fields and fields['source'] != '(no location information)': source = fields['source'] filename, lineno = source.split(':') entry.filename = filename entry.lineno = int(lineno) else: source = '' entry.filename = None entry.lineno = None entry.image = fields.get('image', '') entry.application = fields.get('application', '') if 'symbol' in fields and fields['symbol'] != '(no symbols)': entry.symbol = fields['symbol'] else: entry.symbol = '' if entry.symbol.startswith('"') and entry.symbol.endswith('"'): entry.symbol = entry.symbol[1:-1] entry.id = ':'.join((entry.application, entry.image, source, entry.symbol)) entry.self = fields.get('self', None) != None if entry.self: entry.id += ':self' if entry.symbol: entry.name = entry.symbol else: entry.name = entry.image return entry def skip_separator(self): while not self.match_separator(): self.consume() self.consume() def match_header(self): line = self.lookahead() return line.startswith('samples') def match_separator(self): line = self.lookahead() return line == '-'*len(line) def match_primary(self): line = self.lookahead() return not line[:1].isspace() def match_secondary(self): line = self.lookahead() return line[:1].isspace() class SharkParser(LineParser): """Parser for MacOSX Shark output. Author: tom@dbservice.com """ def __init__(self, infile): LineParser.__init__(self, infile) self.stack = [] self.entries = {} def add_entry(self, function): try: entry = self.entries[function.id] except KeyError: self.entries[function.id] = (function, { }) else: function_total, callees_total = entry function_total.samples += function.samples def add_callee(self, function, callee): func, callees = self.entries[function.id] try: entry = callees[callee.id] except KeyError: callees[callee.id] = callee else: entry.samples += callee.samples def parse(self): self.readline() self.readline() self.readline() self.readline() match = re.compile(r'(?P<prefix>[|+ ]*)(?P<samples>\d+), (?P<symbol>[^,]+), (?P<image>.*)') while self.lookahead(): line = self.consume() mo = match.match(line) if not mo: raise ParseError('failed to parse', line) fields = mo.groupdict() prefix = len(fields.get('prefix', 0)) / 2 - 1 symbol = str(fields.get('symbol', 0)) image = str(fields.get('image', 0)) entry = Struct() entry.id = ':'.join([symbol, image]) entry.samples = int(fields.get('samples', 0)) entry.name = symbol entry.image = image # adjust the callstack if prefix < len(self.stack): del self.stack[prefix:] if prefix == len(self.stack): self.stack.append(entry) # if the callstack has had an entry, it's this functions caller if prefix > 0: self.add_callee(self.stack[prefix - 1], entry) self.add_entry(entry) profile = Profile() profile[SAMPLES] = 0 for _function, _callees in self.entries.itervalues(): function = Function(_function.id, _function.name) function[SAMPLES] = _function.samples profile.add_function(function) profile[SAMPLES] += _function.samples if _function.image: function[MODULE] = os.path.basename(_function.image) for _callee in _callees.itervalues(): call = Call(_callee.id) call[SAMPLES] = _callee.samples function.add_call(call) # compute derived data profile.validate() profile.find_cycles() profile.ratio(TIME_RATIO, SAMPLES) profile.call_ratios(SAMPLES) profile.integrate(TOTAL_TIME_RATIO, TIME_RATIO) return profile class PstatsParser: """Parser python profiling statistics saved with te pstats module.""" def __init__(self, *filename): import pstats self.stats = pstats.Stats(*filename) self.profile = Profile() self.function_ids = {} def get_function_name(self, (filename, line, name)): module = os.path.splitext(filename)[0] module = os.path.basename(module) return "%s:%d:%s" % (module, line, name) def get_function(self, key): try: id = self.function_ids[key] except KeyError: id = len(self.function_ids) name = self.get_function_name(key) function = Function(id, name) self.profile.functions[id] = function self.function_ids[key] = id else: function = self.profile.functions[id] return function def parse(self): self.profile[TIME] = 0.0 self.profile[TOTAL_TIME] = self.stats.total_tt for fn, (cc, nc, tt, ct, callers) in self.stats.stats.iteritems(): callee = self.get_function(fn) callee[CALLS] = nc callee[TOTAL_TIME] = ct callee[TIME] = tt self.profile[TIME] += tt self.profile[TOTAL_TIME] = max(self.profile[TOTAL_TIME], ct) for fn, value in callers.iteritems(): caller = self.get_function(fn) call = Call(callee.id) if isinstance(value, tuple): for i in xrange(0, len(value), 4): nc, cc, tt, ct = value[i:i+4] if CALLS in call: call[CALLS] += cc else: call[CALLS] = cc if TOTAL_TIME in call: call[TOTAL_TIME] += ct else: call[TOTAL_TIME] = ct else: call[CALLS] = value call[TOTAL_TIME] = ratio(value, nc)*ct caller.add_call(call) #self.stats.print_stats() #self.stats.print_callees() # Compute derived events self.profile.validate() self.profile.ratio(TIME_RATIO, TIME) self.profile.ratio(TOTAL_TIME_RATIO, TOTAL_TIME) return self.profile class Theme: def __init__(self, bgcolor = (0.0, 0.0, 1.0), mincolor = (0.0, 0.0, 0.0), maxcolor = (0.0, 0.0, 1.0), fontname = "Arial", minfontsize = 10.0, maxfontsize = 10.0, minpenwidth = 0.5, maxpenwidth = 4.0, gamma = 2.2): self.bgcolor = bgcolor self.mincolor = mincolor self.maxcolor = maxcolor self.fontname = fontname self.minfontsize = minfontsize self.maxfontsize = maxfontsize self.minpenwidth = minpenwidth self.maxpenwidth = maxpenwidth self.gamma = gamma def graph_bgcolor(self): return self.hsl_to_rgb(*self.bgcolor) def graph_fontname(self): return self.fontname def graph_fontsize(self): return self.minfontsize def node_bgcolor(self, weight): return self.color(weight) def node_fgcolor(self, weight): return self.graph_bgcolor() def node_fontsize(self, weight): return self.fontsize(weight) def edge_color(self, weight): return self.color(weight) def edge_fontsize(self, weight): return self.fontsize(weight) def edge_penwidth(self, weight): return max(weight*self.maxpenwidth, self.minpenwidth) def edge_arrowsize(self, weight): return 0.5 * math.sqrt(self.edge_penwidth(weight)) def fontsize(self, weight): return max(weight**2 * self.maxfontsize, self.minfontsize) def color(self, weight): weight = min(max(weight, 0.0), 1.0) hmin, smin, lmin = self.mincolor hmax, smax, lmax = self.maxcolor h = hmin + weight*(hmax - hmin) s = smin + weight*(smax - smin) l = lmin + weight*(lmax - lmin) return self.hsl_to_rgb(h, s, l) def hsl_to_rgb(self, h, s, l): """Convert a color from HSL color-model to RGB. See also: - http://www.w3.org/TR/css3-color/#hsl-color """ h = h % 1.0 s = min(max(s, 0.0), 1.0) l = min(max(l, 0.0), 1.0) if l <= 0.5: m2 = l*(s + 1.0) else: m2 = l + s - l*s m1 = l*2.0 - m2 r = self._hue_to_rgb(m1, m2, h + 1.0/3.0) g = self._hue_to_rgb(m1, m2, h) b = self._hue_to_rgb(m1, m2, h - 1.0/3.0) # Apply gamma correction r **= self.gamma g **= self.gamma b **= self.gamma return (r, g, b) def _hue_to_rgb(self, m1, m2, h): if h < 0.0: h += 1.0 elif h > 1.0: h -= 1.0 if h*6 < 1.0: return m1 + (m2 - m1)*h*6.0 elif h*2 < 1.0: return m2 elif h*3 < 2.0: return m1 + (m2 - m1)*(2.0/3.0 - h)*6.0 else: return m1 TEMPERATURE_COLORMAP = Theme( mincolor = (2.0/3.0, 0.80, 0.25), # dark blue maxcolor = (0.0, 1.0, 0.5), # satured red gamma = 1.0 ) PINK_COLORMAP = Theme( mincolor = (0.0, 1.0, 0.90), # pink maxcolor = (0.0, 1.0, 0.5), # satured red ) GRAY_COLORMAP = Theme( mincolor = (0.0, 0.0, 0.85), # light gray maxcolor = (0.0, 0.0, 0.0), # black ) BW_COLORMAP = Theme( minfontsize = 8.0, maxfontsize = 24.0, mincolor = (0.0, 0.0, 0.0), # black maxcolor = (0.0, 0.0, 0.0), # black minpenwidth = 0.1, maxpenwidth = 8.0, ) class DotWriter: """Writer for the DOT language. See also: - "The DOT Language" specification http://www.graphviz.org/doc/info/lang.html """ def __init__(self, fp): self.fp = fp def graph(self, profile, theme): self.begin_graph() fontname = theme.graph_fontname() self.attr('graph', fontname=fontname, ranksep=0.25, nodesep=0.125) self.attr('node', fontname=fontname, shape="box", style="filled,rounded", fontcolor="white", width=0, height=0) self.attr('edge', fontname=fontname) for function in profile.functions.itervalues(): labels = [] for event in PROCESS, MODULE: if event in function.events: label = event.format(function[event]) labels.append(label) labels.append(function.name) for event in TOTAL_TIME_RATIO, TIME_RATIO, CALLS: if event in function.events: label = event.format(function[event]) labels.append(label) try: weight = function[PRUNE_RATIO] except UndefinedEvent: weight = 0.0 label = '\n'.join(labels) self.node(function.id, label = label, color = self.color(theme.node_bgcolor(weight)), fontcolor = self.color(theme.node_fgcolor(weight)), fontsize = "%.2f" % theme.node_fontsize(weight), ) for call in function.calls.itervalues(): callee = profile.functions[call.callee_id] labels = [] for event in TOTAL_TIME_RATIO, CALLS: if event in call.events: label = event.format(call[event]) labels.append(label) try: weight = call[PRUNE_RATIO] except UndefinedEvent: try: weight = callee[PRUNE_RATIO] except UndefinedEvent: weight = 0.0 label = '\n'.join(labels) self.edge(function.id, call.callee_id, label = label, color = self.color(theme.edge_color(weight)), fontcolor = self.color(theme.edge_color(weight)), fontsize = "%.2f" % theme.edge_fontsize(weight), penwidth = "%.2f" % theme.edge_penwidth(weight), labeldistance = "%.2f" % theme.edge_penwidth(weight), arrowsize = "%.2f" % theme.edge_arrowsize(weight), ) self.end_graph() def begin_graph(self): self.write('digraph {\n') def end_graph(self): self.write('}\n') def attr(self, what, **attrs): self.write("\t") self.write(what) self.attr_list(attrs) self.write(";\n") def node(self, node, **attrs): self.write("\t") self.id(node) self.attr_list(attrs) self.write(";\n") def edge(self, src, dst, **attrs): self.write("\t") self.id(src) self.write(" -> ") self.id(dst) self.attr_list(attrs) self.write(";\n") def attr_list(self, attrs): if not attrs: return self.write(' [') first = True for name, value in attrs.iteritems(): if first: first = False else: self.write(", ") self.id(name) self.write('=') self.id(value) self.write(']') def id(self, id): if isinstance(id, (int, float)): s = str(id) elif isinstance(id, str): if id.isalnum(): s = id else: s = self.escape(id) else: raise TypeError self.write(s) def color(self, (r, g, b)): def float2int(f): if f <= 0.0: return 0 if f >= 1.0: return 255 return int(255.0*f + 0.5) return "#" + "".join(["%02x" % float2int(c) for c in (r, g, b)]) def escape(self, s): s = s.encode('utf-8') s = s.replace('\\', r'\\') s = s.replace('\n', r'\n') s = s.replace('\t', r'\t') s = s.replace('"', r'\"') return '"' + s + '"' def write(self, s): self.fp.write(s) class Main: """Main program.""" themes = { "color": TEMPERATURE_COLORMAP, "pink": PINK_COLORMAP, "gray": GRAY_COLORMAP, "bw": BW_COLORMAP, } def main(self): """Main program.""" parser = optparse.OptionParser( usage="\n\t%prog [options] [file] ...", version="%%prog %s" % __version__) parser.add_option( '-o', '--output', metavar='FILE', type="string", dest="output", help="output filename [stdout]") parser.add_option( '-n', '--node-thres', metavar='PERCENTAGE', type="float", dest="node_thres", default=0.5, help="eliminate nodes below this threshold [default: %default]") parser.add_option( '-e', '--edge-thres', metavar='PERCENTAGE', type="float", dest="edge_thres", default=0.1, help="eliminate edges below this threshold [default: %default]") parser.add_option( '-f', '--format', type="choice", choices=('prof', 'oprofile', 'pstats', 'shark'), dest="format", default="prof", help="profile format: prof, oprofile, or pstats [default: %default]") parser.add_option( '-c', '--colormap', type="choice", choices=('color', 'pink', 'gray', 'bw'), dest="theme", default="color", help="color map: color, pink, gray, or bw [default: %default]") parser.add_option( '-s', '--strip', action="store_true", dest="strip", default=False, help="strip function parameters, template parameters, and const modifiers from demangled C++ function names") parser.add_option( '-w', '--wrap', action="store_true", dest="wrap", default=False, help="wrap function names") (self.options, self.args) = parser.parse_args(sys.argv[1:]) if len(self.args) > 1 and self.options.format != 'pstats': parser.error('incorrect number of arguments') try: self.theme = self.themes[self.options.theme] except KeyError: parser.error('invalid colormap \'%s\'' % self.options.theme) if self.options.format == 'prof': if not self.args: fp = sys.stdin else: fp = open(self.args[0], 'rt') parser = GprofParser(fp) elif self.options.format == 'oprofile': if not self.args: fp = sys.stdin else: fp = open(self.args[0], 'rt') parser = OprofileParser(fp) elif self.options.format == 'pstats': if not self.args: parser.error('at least a file must be specified for pstats input') parser = PstatsParser(*self.args) elif self.options.format == 'shark': if not self.args: fp = sys.stdin else: fp = open(self.args[0], 'rt') parser = SharkParser(fp) else: parser.error('invalid format \'%s\'' % self.options.format) self.profile = parser.parse() if self.options.output is None: self.output = sys.stdout else: self.output = open(self.options.output, 'wt') self.write_graph() _parenthesis_re = re.compile(r'\([^()]*\)') _angles_re = re.compile(r'<[^<>]*>') _const_re = re.compile(r'\s+const$') def strip_function_name(self, name): """Remove extraneous information from C++ demangled function names.""" # Strip function parameters from name by recursively removing paired parenthesis while True: name, n = self._parenthesis_re.subn('', name) if not n: break # Strip const qualifier name = self._const_re.sub('', name) # Strip template parameters from name by recursively removing paired angles while True: name, n = self._angles_re.subn('', name) if not n: break return name def wrap_function_name(self, name): """Split the function name on multiple lines.""" if len(name) > 32: ratio = 2.0/3.0 height = max(int(len(name)/(1.0 - ratio) + 0.5), 1) width = max(len(name)/height, 32) # TODO: break lines in symbols name = textwrap.fill(name, width, break_long_words=False) # Take away spaces name = name.replace(", ", ",") name = name.replace("> >", ">>") name = name.replace("> >", ">>") # catch consecutive return name def compress_function_name(self, name): """Compress function name according to the user preferences.""" if self.options.strip: name = self.strip_function_name(name) if self.options.wrap: name = self.wrap_function_name(name) # TODO: merge functions with same resulting name return name def write_graph(self): dot = DotWriter(self.output) profile = self.profile profile.prune(self.options.node_thres/100.0, self.options.edge_thres/100.0) for function in profile.functions.itervalues(): function.name = self.compress_function_name(function.name) dot.graph(profile, self.theme) if __name__ == '__main__': Main().main()
/* * Copyright (c) 2012, Freescale Semiconductor, Inc. * All rights reserved. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ // File: i2c3_iomux_config.c /* ------------------------------------------------------------------------------ * <auto-generated> * This code was generated by a tool. * Runtime Version:3.4.0.0 * * Changes to this file may cause incorrect behavior and will be lost if * the code is regenerated. * </auto-generated> * ------------------------------------------------------------------------------ */ #include "iomux_config.h" #include "registers/regsiomuxc.h" // Function to configure IOMUXC for i2c3 module. void i2c3_iomux_config(void) { // Config i2c3.I2C3_SCL to pad GPIO03(R7) // HW_IOMUXC_SW_MUX_CTL_PAD_GPIO03_WR(0x00000012); // HW_IOMUXC_SW_PAD_CTL_PAD_GPIO03_WR(0x0001B0B0); // HW_IOMUXC_I2C3_SCL_IN_SELECT_INPUT_WR(0x00000001); // Mux Register: // IOMUXC_SW_MUX_CTL_PAD_GPIO03(0x020E0228) // SION [4] - Software Input On Field Reset: DISABLED // Force the selected mux mode Input path no matter of MUX_MODE functionality. // DISABLED (0) - Input Path is determined by functionality of the selected mux mode (regular). // ENABLED (1) - Force input path of pad. // MUX_MODE [2:0] - MUX Mode Select Field Reset: ALT5 // Select iomux modes to be used for pad. // ALT0 (0) - Select instance: esai signal: ESAI_RX_HF_CLK // ALT2 (2) - Select instance: i2c3 signal: I2C3_SCL // ALT3 (3) - Select instance: xtalosc signal: XTALOSC_REF_CLK_24M // ALT4 (4) - Select instance: ccm signal: CCM_CLKO2 // ALT5 (5) - Select instance: gpio1 signal: GPIO1_IO03 // ALT6 (6) - Select instance: usb signal: USB_H1_OC // ALT7 (7) - Select instance: mlb signal: MLB_CLK HW_IOMUXC_SW_MUX_CTL_PAD_GPIO03_WR( BF_IOMUXC_SW_MUX_CTL_PAD_GPIO03_SION_V(ENABLED) | BF_IOMUXC_SW_MUX_CTL_PAD_GPIO03_MUX_MODE_V(ALT2)); // Pad Control Register: // IOMUXC_SW_PAD_CTL_PAD_GPIO03(0x020E05F8) // HYS [16] - Hysteresis Enable Field Reset: ENABLED // DISABLED (0) - CMOS input // ENABLED (1) - Schmitt trigger input // PUS [15:14] - Pull Up / Down Config. Field Reset: 100K_OHM_PU // 100K_OHM_PD (0) - 100K Ohm Pull Down // 47K_OHM_PU (1) - 47K Ohm Pull Up // 100K_OHM_PU (2) - 100K Ohm Pull Up // 22K_OHM_PU (3) - 22K Ohm Pull Up // PUE [13] - Pull / Keep Select Field Reset: PULL // KEEP (0) - Keeper Enabled // PULL (1) - Pull Enabled // PKE [12] - Pull / Keep Enable Field Reset: ENABLED // DISABLED (0) - Pull/Keeper Disabled // ENABLED (1) - Pull/Keeper Enabled // ODE [11] - Open Drain Enable Field Reset: DISABLED // Enables open drain of the pin. // DISABLED (0) - Output is CMOS. // ENABLED (1) - Output is Open Drain. // SPEED [7:6] - Speed Field Reset: 100MHZ // RESERVED0 (0) - Reserved // 50MHZ (1) - Low (50 MHz) // 100MHZ (2) - Medium (100 MHz) // 200MHZ (3) - Maximum (200 MHz) // DSE [5:3] - Drive Strength Field Reset: 40_OHM // HIZ (0) - HI-Z // 240_OHM (1) - 240 Ohm // 120_OHM (2) - 120 Ohm // 80_OHM (3) - 80 Ohm // 60_OHM (4) - 60 Ohm // 48_OHM (5) - 48 Ohm // 40_OHM (6) - 40 Ohm // 34_OHM (7) - 34 Ohm // SRE [0] - Slew Rate Field Reset: SLOW // Slew rate control. // SLOW (0) - Slow Slew Rate // FAST (1) - Fast Slew Rate HW_IOMUXC_SW_PAD_CTL_PAD_GPIO03_WR( BF_IOMUXC_SW_PAD_CTL_PAD_GPIO03_HYS_V(ENABLED) | BF_IOMUXC_SW_PAD_CTL_PAD_GPIO03_PUS_V(100K_OHM_PU) | BF_IOMUXC_SW_PAD_CTL_PAD_GPIO03_PUE_V(PULL) | BF_IOMUXC_SW_PAD_CTL_PAD_GPIO03_PKE_V(ENABLED) | BF_IOMUXC_SW_PAD_CTL_PAD_GPIO03_ODE_V(DISABLED) | BF_IOMUXC_SW_PAD_CTL_PAD_GPIO03_SPEED_V(100MHZ) | BF_IOMUXC_SW_PAD_CTL_PAD_GPIO03_DSE_V(40_OHM) | BF_IOMUXC_SW_PAD_CTL_PAD_GPIO03_SRE_V(SLOW)); // Pad GPIO03 is involved in Daisy Chain. // Input Select Register: // IOMUXC_I2C3_SCL_IN_SELECT_INPUT(0x020E0878) // DAISY [1:0] - MUX Mode Select Field Reset: EIM_DATA17_ALT6 // Selecting Pads Involved in Daisy Chain. // EIM_DATA17_ALT6 (0) - Select signal i2c3 I2C3_SCL as input from pad EIM_DATA17(ALT6). // GPIO03_ALT2 (1) - Select signal i2c3 I2C3_SCL as input from pad GPIO03(ALT2). // GPIO05_ALT6 (2) - Select signal i2c3 I2C3_SCL as input from pad GPIO05(ALT6). HW_IOMUXC_I2C3_SCL_IN_SELECT_INPUT_WR( BF_IOMUXC_I2C3_SCL_IN_SELECT_INPUT_DAISY_V(GPIO03_ALT2)); // Config i2c3.I2C3_SDA to pad GPIO06(T3) // HW_IOMUXC_SW_MUX_CTL_PAD_GPIO06_WR(0x00000012); // HW_IOMUXC_SW_PAD_CTL_PAD_GPIO06_WR(0x0001B0B0); // HW_IOMUXC_I2C3_SDA_IN_SELECT_INPUT_WR(0x00000002); // Mux Register: // IOMUXC_SW_MUX_CTL_PAD_GPIO06(0x020E0234) // SION [4] - Software Input On Field Reset: DISABLED // Force the selected mux mode Input path no matter of MUX_MODE functionality. // DISABLED (0) - Input Path is determined by functionality of the selected mux mode (regular). // ENABLED (1) - Force input path of pad. // MUX_MODE [2:0] - MUX Mode Select Field Reset: ALT5 // Select iomux modes to be used for pad. // ALT0 (0) - Select instance: esai signal: ESAI_TX_CLK // ALT2 (2) - Select instance: i2c3 signal: I2C3_SDA // ALT5 (5) - Select instance: gpio1 signal: GPIO1_IO06 // ALT6 (6) - Select instance: usdhc2 signal: SD2_LCTL // ALT7 (7) - Select instance: mlb signal: MLB_SIG HW_IOMUXC_SW_MUX_CTL_PAD_GPIO06_WR( BF_IOMUXC_SW_MUX_CTL_PAD_GPIO06_SION_V(ENABLED) | BF_IOMUXC_SW_MUX_CTL_PAD_GPIO06_MUX_MODE_V(ALT2)); // Pad Control Register: // IOMUXC_SW_PAD_CTL_PAD_GPIO06(0x020E0604) // HYS [16] - Hysteresis Enable Field Reset: ENABLED // DISABLED (0) - CMOS input // ENABLED (1) - Schmitt trigger input // PUS [15:14] - Pull Up / Down Config. Field Reset: 100K_OHM_PU // 100K_OHM_PD (0) - 100K Ohm Pull Down // 47K_OHM_PU (1) - 47K Ohm Pull Up // 100K_OHM_PU (2) - 100K Ohm Pull Up // 22K_OHM_PU (3) - 22K Ohm Pull Up // PUE [13] - Pull / Keep Select Field Reset: PULL // KEEP (0) - Keeper Enabled // PULL (1) - Pull Enabled // PKE [12] - Pull / Keep Enable Field Reset: ENABLED // DISABLED (0) - Pull/Keeper Disabled // ENABLED (1) - Pull/Keeper Enabled // ODE [11] - Open Drain Enable Field Reset: DISABLED // Enables open drain of the pin. // DISABLED (0) - Output is CMOS. // ENABLED (1) - Output is Open Drain. // SPEED [7:6] - Speed Field Reset: 100MHZ // RESERVED0 (0) - Reserved // 50MHZ (1) - Low (50 MHz) // 100MHZ (2) - Medium (100 MHz) // 200MHZ (3) - Maximum (200 MHz) // DSE [5:3] - Drive Strength Field Reset: 40_OHM // HIZ (0) - HI-Z // 240_OHM (1) - 240 Ohm // 120_OHM (2) - 120 Ohm // 80_OHM (3) - 80 Ohm // 60_OHM (4) - 60 Ohm // 48_OHM (5) - 48 Ohm // 40_OHM (6) - 40 Ohm // 34_OHM (7) - 34 Ohm // SRE [0] - Slew Rate Field Reset: SLOW // Slew rate control. // SLOW (0) - Slow Slew Rate // FAST (1) - Fast Slew Rate HW_IOMUXC_SW_PAD_CTL_PAD_GPIO06_WR( BF_IOMUXC_SW_PAD_CTL_PAD_GPIO06_HYS_V(ENABLED) | BF_IOMUXC_SW_PAD_CTL_PAD_GPIO06_PUS_V(100K_OHM_PU) | BF_IOMUXC_SW_PAD_CTL_PAD_GPIO06_PUE_V(PULL) | BF_IOMUXC_SW_PAD_CTL_PAD_GPIO06_PKE_V(ENABLED) | BF_IOMUXC_SW_PAD_CTL_PAD_GPIO06_ODE_V(DISABLED) | BF_IOMUXC_SW_PAD_CTL_PAD_GPIO06_SPEED_V(100MHZ) | BF_IOMUXC_SW_PAD_CTL_PAD_GPIO06_DSE_V(40_OHM) | BF_IOMUXC_SW_PAD_CTL_PAD_GPIO06_SRE_V(SLOW)); // Pad GPIO06 is involved in Daisy Chain. // Input Select Register: // IOMUXC_I2C3_SDA_IN_SELECT_INPUT(0x020E087C) // DAISY [1:0] - MUX Mode Select Field Reset: EIM_DATA18_ALT6 // Selecting Pads Involved in Daisy Chain. // EIM_DATA18_ALT6 (0) - Select signal i2c3 I2C3_SDA as input from pad EIM_DATA18(ALT6). // GPIO16_ALT6 (1) - Select signal i2c3 I2C3_SDA as input from pad GPIO16(ALT6). // GPIO06_ALT2 (2) - Select signal i2c3 I2C3_SDA as input from pad GPIO06(ALT2). HW_IOMUXC_I2C3_SDA_IN_SELECT_INPUT_WR( BF_IOMUXC_I2C3_SDA_IN_SELECT_INPUT_DAISY_V(GPIO06_ALT2)); }
/* Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package options import ( stdjson "encoding/json" "fmt" "io/ioutil" "os" "path/filepath" "testing" "github.com/spf13/pflag" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "gopkg.in/natefinch/lumberjack.v2" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" auditv1 "k8s.io/apiserver/pkg/apis/audit/v1" "k8s.io/apiserver/pkg/server" v1 "k8s.io/client-go/tools/clientcmd/api/v1" ) func TestAuditValidOptions(t *testing.T) { tmpDir := t.TempDir() auditPath := filepath.Join(tmpDir, "audit") webhookConfig := makeTmpWebhookConfig(t) defer os.Remove(webhookConfig) policy := makeTmpPolicy(t) defer os.Remove(policy) testCases := []struct { name string options func() *AuditOptions expected string }{{ name: "default", options: NewAuditOptions, }, { name: "default log", options: func() *AuditOptions { o := NewAuditOptions() o.LogOptions.Path = auditPath o.PolicyFile = policy return o }, expected: "ignoreErrors<log>", }, { name: "stdout log", options: func() *AuditOptions { o := NewAuditOptions() o.LogOptions.Path = "-" o.PolicyFile = policy return o }, expected: "ignoreErrors<log>", }, { name: "default log no policy", options: func() *AuditOptions { o := NewAuditOptions() o.LogOptions.Path = auditPath return o }, expected: "", }, { name: "default webhook", options: func() *AuditOptions { o := NewAuditOptions() o.WebhookOptions.ConfigFile = webhookConfig o.PolicyFile = policy return o }, expected: "buffered<webhook>", }, { name: "default webhook no policy", options: func() *AuditOptions { o := NewAuditOptions() o.WebhookOptions.ConfigFile = webhookConfig return o }, expected: "", }, { name: "strict webhook", options: func() *AuditOptions { o := NewAuditOptions() o.WebhookOptions.ConfigFile = webhookConfig o.WebhookOptions.BatchOptions.Mode = ModeBlockingStrict o.PolicyFile = policy return o }, expected: "webhook", }, { name: "default union", options: func() *AuditOptions { o := NewAuditOptions() o.LogOptions.Path = auditPath o.WebhookOptions.ConfigFile = webhookConfig o.PolicyFile = policy return o }, expected: "union[ignoreErrors<log>,buffered<webhook>]", }, { name: "custom", options: func() *AuditOptions { o := NewAuditOptions() o.LogOptions.BatchOptions.Mode = ModeBatch o.LogOptions.Path = auditPath o.WebhookOptions.BatchOptions.Mode = ModeBlocking o.WebhookOptions.ConfigFile = webhookConfig o.PolicyFile = policy return o }, expected: "union[buffered<log>,ignoreErrors<webhook>]", }, { name: "default webhook with truncating", options: func() *AuditOptions { o := NewAuditOptions() o.WebhookOptions.ConfigFile = webhookConfig o.WebhookOptions.TruncateOptions.Enabled = true o.PolicyFile = policy return o }, expected: "truncate<buffered<webhook>>", }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { options := tc.options() require.NotNil(t, options) // Verify flags don't change defaults. fs := pflag.NewFlagSet("Test", pflag.PanicOnError) options.AddFlags(fs) require.NoError(t, fs.Parse(nil)) assert.Equal(t, tc.options(), options, "Flag defaults should match default options.") assert.Empty(t, options.Validate(), "Options should be valid.") config := &server.Config{} require.NoError(t, options.ApplyTo(config)) if tc.expected == "" { assert.Nil(t, config.AuditBackend) } else { assert.Equal(t, tc.expected, fmt.Sprintf("%s", config.AuditBackend)) } w, err := options.LogOptions.getWriter() require.NoError(t, err, "Writer creation should not fail.") // Don't check writer if logging is disabled. if w == nil { return } if options.LogOptions.Path == "-" { assert.Equal(t, os.Stdout, w) assert.NoFileExists(t, options.LogOptions.Path) } else { assert.IsType(t, (*lumberjack.Logger)(nil), w) assert.FileExists(t, options.LogOptions.Path) } }) } } func TestAuditInvalidOptions(t *testing.T) { tmpDir := t.TempDir() auditPath := filepath.Join(tmpDir, "audit") testCases := []struct { name string options func() *AuditOptions }{{ name: "invalid log format", options: func() *AuditOptions { o := NewAuditOptions() o.LogOptions.Path = auditPath o.LogOptions.Format = "foo" return o }, }, { name: "invalid log mode", options: func() *AuditOptions { o := NewAuditOptions() o.LogOptions.Path = auditPath o.LogOptions.BatchOptions.Mode = "foo" return o }, }, { name: "invalid log buffer size", options: func() *AuditOptions { o := NewAuditOptions() o.LogOptions.Path = auditPath o.LogOptions.BatchOptions.Mode = "batch" o.LogOptions.BatchOptions.BatchConfig.BufferSize = -3 return o }, }, { name: "invalid webhook mode", options: func() *AuditOptions { o := NewAuditOptions() o.WebhookOptions.ConfigFile = auditPath o.WebhookOptions.BatchOptions.Mode = "foo" return o }, }, { name: "invalid webhook buffer throttle qps", options: func() *AuditOptions { o := NewAuditOptions() o.WebhookOptions.ConfigFile = auditPath o.WebhookOptions.BatchOptions.Mode = "batch" o.WebhookOptions.BatchOptions.BatchConfig.ThrottleQPS = -1 return o }, }, { name: "invalid webhook truncate max event size", options: func() *AuditOptions { o := NewAuditOptions() o.WebhookOptions.ConfigFile = auditPath o.WebhookOptions.TruncateOptions.Enabled = true o.WebhookOptions.TruncateOptions.TruncateConfig.MaxEventSize = -1 return o }, }, { name: "invalid webhook truncate max batch size", options: func() *AuditOptions { o := NewAuditOptions() o.WebhookOptions.ConfigFile = auditPath o.WebhookOptions.TruncateOptions.Enabled = true o.WebhookOptions.TruncateOptions.TruncateConfig.MaxEventSize = 2 o.WebhookOptions.TruncateOptions.TruncateConfig.MaxBatchSize = 1 return o }, }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { options := tc.options() require.NotNil(t, options) assert.NotEmpty(t, options.Validate(), "Options should be invalid.") }) } } func makeTmpWebhookConfig(t *testing.T) string { config := v1.Config{ Clusters: []v1.NamedCluster{ {Cluster: v1.Cluster{Server: "localhost", InsecureSkipTLSVerify: true}}, }, } f, err := ioutil.TempFile("", "k8s_audit_webhook_test_") require.NoError(t, err, "creating temp file") require.NoError(t, stdjson.NewEncoder(f).Encode(config), "writing webhook kubeconfig") require.NoError(t, f.Close()) return f.Name() } func makeTmpPolicy(t *testing.T) string { pol := auditv1.Policy{ TypeMeta: metav1.TypeMeta{ APIVersion: "audit.k8s.io/v1", }, Rules: []auditv1.PolicyRule{ { Level: auditv1.LevelRequestResponse, }, }, } f, err := ioutil.TempFile("", "k8s_audit_policy_test_") require.NoError(t, err, "creating temp file") require.NoError(t, stdjson.NewEncoder(f).Encode(pol), "writing policy file") require.NoError(t, f.Close()) return f.Name() }
<?php /** * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. * http://www.gnu.org/copyleft/gpl.html * * @since 1.25 * * @file * * @license GNU GPL v2+ */ class SitesCacheFileBuilder { /** * @var SiteLookup */ private $siteLookup; /** * @var string */ private $cacheFile; /** * @param SiteLookup $siteLookup * @param string $cacheFile */ public function __construct( SiteLookup $siteLookup, $cacheFile ) { $this->siteLookup = $siteLookup; $this->cacheFile = $cacheFile; } public function build() { $this->sites = $this->siteLookup->getSites(); $this->cacheSites( $this->sites->getArrayCopy() ); } /** * @param Site[] $sites * * @throws MWException if in manualRecache mode * @return bool */ private function cacheSites( array $sites ) { $sitesArray = array(); foreach ( $sites as $site ) { $globalId = $site->getGlobalId(); $sitesArray[$globalId] = $this->getSiteAsArray( $site ); } $json = json_encode( array( 'sites' => $sitesArray ) ); $result = file_put_contents( $this->cacheFile, $json ); return $result !== false; } /** * @param Site $site * * @return array */ private function getSiteAsArray( Site $site ) { $siteEntry = unserialize( $site->serialize() ); $siteIdentifiers = $this->buildLocalIdentifiers( $site ); $identifiersArray = array(); foreach ( $siteIdentifiers as $identifier ) { $identifiersArray[] = $identifier; } $siteEntry['identifiers'] = $identifiersArray; return $siteEntry; } /** * @param Site $site * * @return array Site local identifiers */ private function buildLocalIdentifiers( Site $site ) { $localIds = array(); foreach ( $site->getLocalIds() as $idType => $ids ) { foreach ( $ids as $id ) { $localIds[] = array( 'type' => $idType, 'key' => $id ); } } return $localIds; } }
/* $Id: transport_loop_test.c 3553 2011-05-05 06:14:19Z nanang $ */ /* * Copyright (C) 2008-2011 Teluu Inc. (http://www.teluu.com) * Copyright (C) 2003-2008 Benny Prijono <benny@prijono.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include "test.h" #include <pjsip.h> #include <pjlib.h> #define THIS_FILE "transport_loop_test.c" static int datagram_loop_test() { enum { LOOP = 8 }; pjsip_transport *loop; int i, pkt_lost; pj_sockaddr_in addr; pj_status_t status; long ref_cnt; int rtt[LOOP], min_rtt; PJ_LOG(3,(THIS_FILE, "testing datagram loop transport")); /* Test acquire transport. */ status = pjsip_endpt_acquire_transport( endpt, PJSIP_TRANSPORT_LOOP_DGRAM, &addr, sizeof(addr), NULL, &loop); if (status != PJ_SUCCESS) { app_perror(" error: loop transport is not configured", status); return -20; } /* Get initial reference counter */ ref_cnt = pj_atomic_get(loop->ref_cnt); /* Test basic transport attributes */ status = generic_transport_test(loop); if (status != PJ_SUCCESS) return status; /* Basic transport's send/receive loopback test. */ for (i=0; i<LOOP; ++i) { status = transport_send_recv_test(PJSIP_TRANSPORT_LOOP_DGRAM, loop, "sip:bob@130.0.0.1;transport=loop-dgram", &rtt[i]); if (status != 0) return status; } min_rtt = 0xFFFFFFF; for (i=0; i<LOOP; ++i) if (rtt[i] < min_rtt) min_rtt = rtt[i]; report_ival("loop-rtt-usec", min_rtt, "usec", "Best Loopback transport round trip time, in microseconds " "(time from sending request until response is received. " "Tests were performed on local machine only)"); /* Multi-threaded round-trip test. */ status = transport_rt_test(PJSIP_TRANSPORT_LOOP_DGRAM, loop, "sip:bob@130.0.0.1;transport=loop-dgram", &pkt_lost); if (status != 0) return status; if (pkt_lost != 0) { PJ_LOG(3,(THIS_FILE, " error: %d packet(s) was lost", pkt_lost)); return -40; } /* Put delay. */ PJ_LOG(3,(THIS_FILE," setting network delay to 10 ms")); pjsip_loop_set_delay(loop, 10); /* Multi-threaded round-trip test. */ status = transport_rt_test(PJSIP_TRANSPORT_LOOP_DGRAM, loop, "sip:bob@130.0.0.1;transport=loop-dgram", &pkt_lost); if (status != 0) return status; if (pkt_lost != 0) { PJ_LOG(3,(THIS_FILE, " error: %d packet(s) was lost", pkt_lost)); return -50; } /* Restore delay. */ pjsip_loop_set_delay(loop, 0); /* Check reference counter. */ if (pj_atomic_get(loop->ref_cnt) != ref_cnt) { PJ_LOG(3,(THIS_FILE, " error: ref counter is not %d (%d)", ref_cnt, pj_atomic_get(loop->ref_cnt))); return -51; } /* Decrement reference. */ pjsip_transport_dec_ref(loop); return 0; } int transport_loop_test(void) { int status; status = datagram_loop_test(); if (status != 0) return status; return 0; }
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.camel.component.ironmq; import org.apache.camel.CamelContext; import org.apache.camel.EndpointInject; import org.apache.camel.builder.RouteBuilder; import org.apache.camel.component.mock.MockEndpoint; import org.apache.camel.test.junit4.CamelTestSupport; import org.junit.Assert; import org.junit.Test; public class IronMQPreserveHeadersTest extends CamelTestSupport { private IronMQEndpoint endpoint; @EndpointInject(uri = "mock:result") private MockEndpoint result; @Test public void testPreserveHeaders() throws Exception { MockEndpoint mock = getMockEndpoint("mock:result"); mock.expectedMinimumMessageCount(1); mock.expectedBodiesReceived("some payload"); mock.expectedHeaderReceived("MyHeader", "HeaderValue"); template.sendBodyAndHeader("direct:start", "some payload", "MyHeader", "HeaderValue"); assertMockEndpointsSatisfied(); String id = mock.getExchanges().get(0).getIn().getHeader(IronMQConstants.MESSAGE_ID, String.class); Assert.assertNotNull(id); } @Override protected CamelContext createCamelContext() throws Exception { CamelContext context = super.createCamelContext(); IronMQComponent component = new IronMQComponent(context); endpoint = (IronMQEndpoint)component.createEndpoint("ironmq://TestQueue?projectId=xxx&token=yyy&preserveHeaders=true"); endpoint.setClient(new IronMQClientMock("dummy", "dummy")); context.addComponent("ironmq", component); return context; } @Override protected RouteBuilder createRouteBuilder() throws Exception { return new RouteBuilder() { public void configure() { from("direct:start").to(endpoint); from(endpoint).to("mock:result"); } }; } }
# -*- test-case-name: twisted.test.test_twistd -*- # Copyright (c) 2001-2008 Twisted Matrix Laboratories. # See LICENSE for details. """ The Twisted Daemon: platform-independent interface. @author: Christopher Armstrong """ from twisted.application import app from twisted.python.runtime import platformType if platformType == "win32": from twisted.scripts._twistw import ServerOptions, \ WindowsApplicationRunner as _SomeApplicationRunner else: from twisted.scripts._twistd_unix import ServerOptions, \ UnixApplicationRunner as _SomeApplicationRunner def runApp(config): _SomeApplicationRunner(config).run() def run(): app.run(runApp, ServerOptions) __all__ = ['run', 'runApp']
<?php /* * This file is part of Twig. * * (c) 2009 Fabien Potencier * * For the full copyright and license information, please view the LICENSE * file that was distributed with this source code. */ /** * Stores the Twig configuration. * * @package twig * @author Fabien Potencier <fabien@symfony.com> */ class Twig_Environment { const VERSION = '1.8.2'; protected $charset; protected $loader; protected $debug; protected $autoReload; protected $cache; protected $lexer; protected $parser; protected $compiler; protected $baseTemplateClass; protected $extensions; protected $parsers; protected $visitors; protected $filters; protected $tests; protected $functions; protected $globals; protected $runtimeInitialized; protected $loadedTemplates; protected $strictVariables; protected $unaryOperators; protected $binaryOperators; protected $templateClassPrefix = '__TwigTemplate_'; protected $functionCallbacks; protected $filterCallbacks; protected $staging; /** * Constructor. * * Available options: * * * debug: When set to true, it automatically set "auto_reload" to true as * well (default to false). * * * charset: The charset used by the templates (default to utf-8). * * * base_template_class: The base template class to use for generated * templates (default to Twig_Template). * * * cache: An absolute path where to store the compiled templates, or * false to disable compilation cache (default). * * * auto_reload: Whether to reload the template is the original source changed. * If you don't provide the auto_reload option, it will be * determined automatically base on the debug value. * * * strict_variables: Whether to ignore invalid variables in templates * (default to false). * * * autoescape: Whether to enable auto-escaping (default to html): * * false: disable auto-escaping * * true: equivalent to html * * html, js: set the autoescaping to one of the supported strategies * * PHP callback: a PHP callback that returns an escaping strategy based on the template "filename" * * * optimizations: A flag that indicates which optimizations to apply * (default to -1 which means that all optimizations are enabled; * set it to 0 to disable). * * @param Twig_LoaderInterface $loader A Twig_LoaderInterface instance * @param array $options An array of options */ public function __construct(Twig_LoaderInterface $loader = null, $options = array()) { if (null !== $loader) { $this->setLoader($loader); } $options = array_merge(array( 'debug' => false, 'charset' => 'UTF-8', 'base_template_class' => 'Twig_Template', 'strict_variables' => false, 'autoescape' => 'html', 'cache' => false, 'auto_reload' => null, 'optimizations' => -1, ), $options); $this->debug = (bool) $options['debug']; $this->charset = $options['charset']; $this->baseTemplateClass = $options['base_template_class']; $this->autoReload = null === $options['auto_reload'] ? $this->debug : (bool) $options['auto_reload']; $this->extensions = array( 'core' => new Twig_Extension_Core(), 'escaper' => new Twig_Extension_Escaper($options['autoescape']), 'optimizer' => new Twig_Extension_Optimizer($options['optimizations']), ); $this->strictVariables = (bool) $options['strict_variables']; $this->runtimeInitialized = false; $this->setCache($options['cache']); $this->functionCallbacks = array(); $this->filterCallbacks = array(); $this->staging = array( 'functions' => array(), 'filters' => array(), 'tests' => array(), 'token_parsers' => array(), 'visitors' => array(), 'globals' => array(), ); } /** * Gets the base template class for compiled templates. * * @return string The base template class name */ public function getBaseTemplateClass() { return $this->baseTemplateClass; } /** * Sets the base template class for compiled templates. * * @param string $class The base template class name */ public function setBaseTemplateClass($class) { $this->baseTemplateClass = $class; } /** * Enables debugging mode. */ public function enableDebug() { $this->debug = true; } /** * Disables debugging mode. */ public function disableDebug() { $this->debug = false; } /** * Checks if debug mode is enabled. * * @return Boolean true if debug mode is enabled, false otherwise */ public function isDebug() { return $this->debug; } /** * Enables the auto_reload option. */ public function enableAutoReload() { $this->autoReload = true; } /** * Disables the auto_reload option. */ public function disableAutoReload() { $this->autoReload = false; } /** * Checks if the auto_reload option is enabled. * * @return Boolean true if auto_reload is enabled, false otherwise */ public function isAutoReload() { return $this->autoReload; } /** * Enables the strict_variables option. */ public function enableStrictVariables() { $this->strictVariables = true; } /** * Disables the strict_variables option. */ public function disableStrictVariables() { $this->strictVariables = false; } /** * Checks if the strict_variables option is enabled. * * @return Boolean true if strict_variables is enabled, false otherwise */ public function isStrictVariables() { return $this->strictVariables; } /** * Gets the cache directory or false if cache is disabled. * * @return string|false */ public function getCache() { return $this->cache; } /** * Sets the cache directory or false if cache is disabled. * * @param string|false $cache The absolute path to the compiled templates, * or false to disable cache */ public function setCache($cache) { $this->cache = $cache ? $cache : false; } /** * Gets the cache filename for a given template. * * @param string $name The template name * * @return string The cache file name */ public function getCacheFilename($name) { if (false === $this->cache) { return false; } $class = substr($this->getTemplateClass($name), strlen($this->templateClassPrefix)); return $this->getCache().'/'.substr($class, 0, 2).'/'.substr($class, 2, 2).'/'.substr($class, 4).'.php'; } /** * Gets the template class associated with the given string. * * @param string $name The name for which to calculate the template class name * @param integer $index The index if it is an embedded template * * @return string The template class name */ public function getTemplateClass($name, $index = null) { return $this->templateClassPrefix.md5($this->loader->getCacheKey($name)).(null === $index ? '' : '_'.$index); } /** * Gets the template class prefix. * * @return string The template class prefix */ public function getTemplateClassPrefix() { return $this->templateClassPrefix; } /** * Renders a template. * * @param string $name The template name * @param array $context An array of parameters to pass to the template * * @return string The rendered template */ public function render($name, array $context = array()) { return $this->loadTemplate($name)->render($context); } /** * Displays a template. * * @param string $name The template name * @param array $context An array of parameters to pass to the template */ public function display($name, array $context = array()) { $this->loadTemplate($name)->display($context); } /** * Loads a template by name. * * @param string $name The template name * @param integer $index The index if it is an embedded template * * @return Twig_TemplateInterface A template instance representing the given template name */ public function loadTemplate($name, $index = null) { $cls = $this->getTemplateClass($name, $index); if (isset($this->loadedTemplates[$cls])) { return $this->loadedTemplates[$cls]; } if (!class_exists($cls, false)) { if (false === $cache = $this->getCacheFilename($name)) { eval('?>'.$this->compileSource($this->loader->getSource($name), $name)); } else { if (!is_file($cache) || ($this->isAutoReload() && !$this->isTemplateFresh($name, filemtime($cache)))) { $this->writeCacheFile($cache, $this->compileSource($this->loader->getSource($name), $name)); } require_once $cache; } } if (!$this->runtimeInitialized) { $this->initRuntime(); } return $this->loadedTemplates[$cls] = new $cls($this); } /** * Returns true if the template is still fresh. * * Besides checking the loader for freshness information, * this method also checks if the enabled extensions have * not changed. * * @param string $name The template name * @param timestamp $time The last modification time of the cached template * * @return Boolean true if the template is fresh, false otherwise */ public function isTemplateFresh($name, $time) { foreach ($this->extensions as $extension) { $r = new ReflectionObject($extension); if (filemtime($r->getFileName()) > $time) { return false; } } return $this->loader->isFresh($name, $time); } public function resolveTemplate($names) { if (!is_array($names)) { $names = array($names); } foreach ($names as $name) { if ($name instanceof Twig_Template) { return $name; } try { return $this->loadTemplate($name); } catch (Twig_Error_Loader $e) { } } if (1 === count($names)) { throw $e; } throw new Twig_Error_Loader(sprintf('Unable to find one of the following templates: "%s".', implode('", "', $names))); } /** * Clears the internal template cache. */ public function clearTemplateCache() { $this->loadedTemplates = array(); } /** * Clears the template cache files on the filesystem. */ public function clearCacheFiles() { if (false === $this->cache) { return; } foreach (new RecursiveIteratorIterator(new RecursiveDirectoryIterator($this->cache), RecursiveIteratorIterator::LEAVES_ONLY) as $file) { if ($file->isFile()) { @unlink($file->getPathname()); } } } /** * Gets the Lexer instance. * * @return Twig_LexerInterface A Twig_LexerInterface instance */ public function getLexer() { if (null === $this->lexer) { $this->lexer = new Twig_Lexer($this); } return $this->lexer; } /** * Sets the Lexer instance. * * @param Twig_LexerInterface A Twig_LexerInterface instance */ public function setLexer(Twig_LexerInterface $lexer) { $this->lexer = $lexer; } /** * Tokenizes a source code. * * @param string $source The template source code * @param string $name The template name * * @return Twig_TokenStream A Twig_TokenStream instance */ public function tokenize($source, $name = null) { return $this->getLexer()->tokenize($source, $name); } /** * Gets the Parser instance. * * @return Twig_ParserInterface A Twig_ParserInterface instance */ public function getParser() { if (null === $this->parser) { $this->parser = new Twig_Parser($this); } return $this->parser; } /** * Sets the Parser instance. * * @param Twig_ParserInterface A Twig_ParserInterface instance */ public function setParser(Twig_ParserInterface $parser) { $this->parser = $parser; } /** * Parses a token stream. * * @param Twig_TokenStream $tokens A Twig_TokenStream instance * * @return Twig_Node_Module A Node tree */ public function parse(Twig_TokenStream $tokens) { return $this->getParser()->parse($tokens); } /** * Gets the Compiler instance. * * @return Twig_CompilerInterface A Twig_CompilerInterface instance */ public function getCompiler() { if (null === $this->compiler) { $this->compiler = new Twig_Compiler($this); } return $this->compiler; } /** * Sets the Compiler instance. * * @param Twig_CompilerInterface $compiler A Twig_CompilerInterface instance */ public function setCompiler(Twig_CompilerInterface $compiler) { $this->compiler = $compiler; } /** * Compiles a Node. * * @param Twig_NodeInterface $node A Twig_NodeInterface instance * * @return string The compiled PHP source code */ public function compile(Twig_NodeInterface $node) { return $this->getCompiler()->compile($node)->getSource(); } /** * Compiles a template source code. * * @param string $source The template source code * @param string $name The template name * * @return string The compiled PHP source code */ public function compileSource($source, $name = null) { try { return $this->compile($this->parse($this->tokenize($source, $name))); } catch (Twig_Error $e) { $e->setTemplateFile($name); throw $e; } catch (Exception $e) { throw new Twig_Error_Runtime(sprintf('An exception has been thrown during the compilation of a template ("%s").', $e->getMessage()), -1, $name, $e); } } /** * Sets the Loader instance. * * @param Twig_LoaderInterface $loader A Twig_LoaderInterface instance */ public function setLoader(Twig_LoaderInterface $loader) { $this->loader = $loader; } /** * Gets the Loader instance. * * @return Twig_LoaderInterface A Twig_LoaderInterface instance */ public function getLoader() { return $this->loader; } /** * Sets the default template charset. * * @param string $charset The default charset */ public function setCharset($charset) { $this->charset = $charset; } /** * Gets the default template charset. * * @return string The default charset */ public function getCharset() { return $this->charset; } /** * Initializes the runtime environment. */ public function initRuntime() { $this->runtimeInitialized = true; foreach ($this->getExtensions() as $extension) { $extension->initRuntime($this); } } /** * Returns true if the given extension is registered. * * @param string $name The extension name * * @return Boolean Whether the extension is registered or not */ public function hasExtension($name) { return isset($this->extensions[$name]); } /** * Gets an extension by name. * * @param string $name The extension name * * @return Twig_ExtensionInterface A Twig_ExtensionInterface instance */ public function getExtension($name) { if (!isset($this->extensions[$name])) { throw new Twig_Error_Runtime(sprintf('The "%s" extension is not enabled.', $name)); } return $this->extensions[$name]; } /** * Registers an extension. * * @param Twig_ExtensionInterface $extension A Twig_ExtensionInterface instance */ public function addExtension(Twig_ExtensionInterface $extension) { $this->extensions[$extension->getName()] = $extension; $this->parsers = null; $this->visitors = null; $this->filters = null; $this->tests = null; $this->functions = null; $this->globals = null; } /** * Removes an extension by name. * * @param string $name The extension name */ public function removeExtension($name) { unset($this->extensions[$name]); $this->parsers = null; $this->visitors = null; $this->filters = null; $this->tests = null; $this->functions = null; $this->globals = null; } /** * Registers an array of extensions. * * @param array $extensions An array of extensions */ public function setExtensions(array $extensions) { foreach ($extensions as $extension) { $this->addExtension($extension); } } /** * Returns all registered extensions. * * @return array An array of extensions */ public function getExtensions() { return $this->extensions; } /** * Registers a Token Parser. * * @param Twig_TokenParserInterface $parser A Twig_TokenParserInterface instance */ public function addTokenParser(Twig_TokenParserInterface $parser) { $this->staging['token_parsers'][] = $parser; $this->parsers = null; } /** * Gets the registered Token Parsers. * * @return Twig_TokenParserBrokerInterface A broker containing token parsers */ public function getTokenParsers() { if (null === $this->parsers) { $this->parsers = new Twig_TokenParserBroker(); if (isset($this->staging['token_parsers'])) { foreach ($this->staging['token_parsers'] as $parser) { $this->parsers->addTokenParser($parser); } } foreach ($this->getExtensions() as $extension) { $parsers = $extension->getTokenParsers(); foreach ($parsers as $parser) { if ($parser instanceof Twig_TokenParserInterface) { $this->parsers->addTokenParser($parser); } elseif ($parser instanceof Twig_TokenParserBrokerInterface) { $this->parsers->addTokenParserBroker($parser); } else { throw new Twig_Error_Runtime('getTokenParsers() must return an array of Twig_TokenParserInterface or Twig_TokenParserBrokerInterface instances'); } } } } return $this->parsers; } /** * Gets registered tags. * * Be warned that this method cannot return tags defined by Twig_TokenParserBrokerInterface classes. * * @return Twig_TokenParserInterface[] An array of Twig_TokenParserInterface instances */ public function getTags() { $tags = array(); foreach ($this->getTokenParsers()->getParsers() as $parser) { if ($parser instanceof Twig_TokenParserInterface) { $tags[$parser->getTag()] = $parser; } } return $tags; } /** * Registers a Node Visitor. * * @param Twig_NodeVisitorInterface $visitor A Twig_NodeVisitorInterface instance */ public function addNodeVisitor(Twig_NodeVisitorInterface $visitor) { $this->staging['visitors'][] = $visitor; $this->visitors = null; } /** * Gets the registered Node Visitors. * * @return Twig_NodeVisitorInterface[] An array of Twig_NodeVisitorInterface instances */ public function getNodeVisitors() { if (null === $this->visitors) { foreach ($this->getExtensions() as $extension) { foreach ($extension->getNodeVisitors() as $visitor) { $this->addNodeVisitor($visitor); } } $this->visitors = $this->staging['visitors']; } return $this->visitors; } /** * Registers a Filter. * * @param string $name The filter name * @param Twig_FilterInterface $filter A Twig_FilterInterface instance */ public function addFilter($name, Twig_FilterInterface $filter) { $this->staging['filters'][$name] = $filter; $this->filters = null; } /** * Get a filter by name. * * Subclasses may override this method and load filters differently; * so no list of filters is available. * * @param string $name The filter name * * @return Twig_Filter|false A Twig_Filter instance or false if the filter does not exists */ public function getFilter($name) { if (null === $this->filters) { $this->getFilters(); } if (isset($this->filters[$name])) { return $this->filters[$name]; } foreach ($this->filters as $pattern => $filter) { $pattern = str_replace('\\*', '(.*?)', preg_quote($pattern, '#'), $count); if ($count) { if (preg_match('#^'.$pattern.'$#', $name, $matches)) { array_shift($matches); $filter->setArguments($matches); return $filter; } } } foreach ($this->filterCallbacks as $callback) { if (false !== $filter = call_user_func($callback, $name)) { return $filter; } } return false; } public function registerUndefinedFilterCallback($callable) { $this->filterCallbacks[] = $callable; } /** * Gets the registered Filters. * * Be warned that this method cannot return filters defined with registerUndefinedFunctionCallback. * * @return Twig_FilterInterface[] An array of Twig_FilterInterface instances * * @see registerUndefinedFilterCallback */ public function getFilters() { if (null === $this->filters) { foreach ($this->getExtensions() as $extension) { foreach ($extension->getFilters() as $name => $filter) { $this->addFilter($name, $filter); } } $this->filters = $this->staging['filters']; } return $this->filters; } /** * Registers a Test. * * @param string $name The test name * @param Twig_TestInterface $test A Twig_TestInterface instance */ public function addTest($name, Twig_TestInterface $test) { $this->staging['tests'][$name] = $test; $this->tests = null; } /** * Gets the registered Tests. * * @return Twig_TestInterface[] An array of Twig_TestInterface instances */ public function getTests() { if (null === $this->tests) { foreach ($this->getExtensions() as $extension) { foreach ($extension->getTests() as $name => $test) { $this->addTest($name, $test); } } $this->tests = $this->staging['tests']; } return $this->tests; } /** * Registers a Function. * * @param string $name The function name * @param Twig_FunctionInterface $function A Twig_FunctionInterface instance */ public function addFunction($name, Twig_FunctionInterface $function) { $this->staging['functions'][$name] = $function; $this->functions = null; } /** * Get a function by name. * * Subclasses may override this method and load functions differently; * so no list of functions is available. * * @param string $name function name * * @return Twig_Function|false A Twig_Function instance or false if the function does not exists */ public function getFunction($name) { if (null === $this->functions) { $this->getFunctions(); } if (isset($this->functions[$name])) { return $this->functions[$name]; } foreach ($this->functions as $pattern => $function) { $pattern = str_replace('\\*', '(.*?)', preg_quote($pattern, '#'), $count); if ($count) { if (preg_match('#^'.$pattern.'$#', $name, $matches)) { array_shift($matches); $function->setArguments($matches); return $function; } } } foreach ($this->functionCallbacks as $callback) { if (false !== $function = call_user_func($callback, $name)) { return $function; } } return false; } public function registerUndefinedFunctionCallback($callable) { $this->functionCallbacks[] = $callable; } /** * Gets registered functions. * * Be warned that this method cannot return functions defined with registerUndefinedFunctionCallback. * * @return Twig_FunctionInterface[] An array of Twig_FunctionInterface instances * * @see registerUndefinedFunctionCallback */ public function getFunctions() { if (null === $this->functions) { foreach ($this->getExtensions() as $extension) { foreach ($extension->getFunctions() as $name => $function) { $this->addFunction($name, $function); } } $this->functions = $this->staging['functions']; } return $this->functions; } /** * Registers a Global. * * @param string $name The global name * @param mixed $value The global value */ public function addGlobal($name, $value) { $this->staging['globals'][$name] = $value; $this->globals = null; } /** * Gets the registered Globals. * * @return array An array of globals */ public function getGlobals() { if (null === $this->globals) { $this->globals = isset($this->staging['globals']) ? $this->staging['globals'] : array(); foreach ($this->getExtensions() as $extension) { $this->globals = array_merge($this->globals, $extension->getGlobals()); } } return $this->globals; } /** * Merges a context with the defined globals. * * @param array $context An array representing the context * * @return array The context merged with the globals */ public function mergeGlobals(array $context) { // we don't use array_merge as the context being generally // bigger than globals, this code is faster. foreach ($this->getGlobals() as $key => $value) { if (!array_key_exists($key, $context)) { $context[$key] = $value; } } return $context; } /** * Gets the registered unary Operators. * * @return array An array of unary operators */ public function getUnaryOperators() { if (null === $this->unaryOperators) { $this->initOperators(); } return $this->unaryOperators; } /** * Gets the registered binary Operators. * * @return array An array of binary operators */ public function getBinaryOperators() { if (null === $this->binaryOperators) { $this->initOperators(); } return $this->binaryOperators; } public function computeAlternatives($name, $items) { $alternatives = array(); foreach ($items as $item) { $lev = levenshtein($name, $item); if ($lev <= strlen($name) / 3 || false !== strpos($item, $name)) { $alternatives[$item] = $lev; } } asort($alternatives); return array_keys($alternatives); } protected function initOperators() { $this->unaryOperators = array(); $this->binaryOperators = array(); foreach ($this->getExtensions() as $extension) { $operators = $extension->getOperators(); if (!$operators) { continue; } if (2 !== count($operators)) { throw new InvalidArgumentException(sprintf('"%s::getOperators()" does not return a valid operators array.', get_class($extension))); } $this->unaryOperators = array_merge($this->unaryOperators, $operators[0]); $this->binaryOperators = array_merge($this->binaryOperators, $operators[1]); } } protected function writeCacheFile($file, $content) { $dir = dirname($file); if (!is_dir($dir)) { if (false === @mkdir($dir, 0777, true) && !is_dir($dir)) { throw new RuntimeException(sprintf("Unable to create the cache directory (%s).", $dir)); } } elseif (!is_writable($dir)) { throw new RuntimeException(sprintf("Unable to write in the cache directory (%s).", $dir)); } $tmpFile = tempnam(dirname($file), basename($file)); if (false !== @file_put_contents($tmpFile, $content)) { // rename does not work on Win32 before 5.2.6 if (@rename($tmpFile, $file) || (@copy($tmpFile, $file) && unlink($tmpFile))) { @chmod($file, 0644); return; } } throw new Twig_Error_Runtime(sprintf('Failed to write cache file "%s".', $file)); } }
// Copyright 2015 The rkt Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package testutils import ( "crypto/sha1" "fmt" "io" "os" "path/filepath" "github.com/coreos/rkt/pkg/group" "github.com/hashicorp/errwrap" ) const casDbPerm = os.FileMode(0660) var ( // dirs relative to data directory dirs = map[string]os.FileMode{ ".": os.FileMode(0750 | os.ModeSetgid), "tmp": os.FileMode(0750 | os.ModeSetgid), // Cas directories. // Please keep in sync with dist/init/systemd/tmpfiles.d/rkt.conf // Make sure 'rkt' group can read/write some of the 'cas' // directories so that users in the group can fetch images "cas": os.FileMode(0770 | os.ModeSetgid), "cas/db": os.FileMode(0770 | os.ModeSetgid), "cas/imagelocks": os.FileMode(0770 | os.ModeSetgid), "cas/imageManifest": os.FileMode(0770 | os.ModeSetgid), "cas/blob": os.FileMode(0770 | os.ModeSetgid), "cas/tmp": os.FileMode(0770 | os.ModeSetgid), "cas/tree": os.FileMode(0700 | os.ModeSetgid), "cas/treestorelocks": os.FileMode(0700 | os.ModeSetgid), "locks": os.FileMode(0750 | os.ModeSetgid), // Pods directories. "pods": os.FileMode(0750 | os.ModeSetgid), "pods/embryo": os.FileMode(0750 | os.ModeSetgid), "pods/prepare": os.FileMode(0750 | os.ModeSetgid), "pods/prepared": os.FileMode(0750 | os.ModeSetgid), "pods/run": os.FileMode(0750 | os.ModeSetgid), "pods/exited-garbage": os.FileMode(0750 | os.ModeSetgid), "pods/garbage": os.FileMode(0750 | os.ModeSetgid), } ) func createFileWithPermissions(path string, uid int, gid int, perm os.FileMode) error { _, err := os.OpenFile(path, os.O_CREATE|os.O_EXCL|os.O_RDWR, 0666) if err != nil { if !os.IsExist(err) { return err } // file exists } return setPermissions(path, uid, gid, perm) } func setPermissions(path string, uid int, gid int, perm os.FileMode) error { if err := os.Chown(path, uid, gid); err != nil { return errwrap.Wrap(fmt.Errorf("error setting %q directory group", path), err) } if err := os.Chmod(path, perm); err != nil { return errwrap.Wrap(fmt.Errorf("error setting %q directory permissions", path), err) } return nil } func createDirStructure(dataDir string, gid int) error { for dir, perm := range dirs { path := filepath.Join(dataDir, dir) if err := os.MkdirAll(path, perm); err != nil { return errwrap.Wrap(fmt.Errorf("error creating %q directory", path), err) } if err := setPermissions(path, 0, gid, perm); err != nil { return err } } return nil } func setCasDbFilesPermissions(casDbPath string, gid int, perm os.FileMode) error { casDbWalker := func(path string, info os.FileInfo, err error) error { if err != nil { return err } if info.Mode().IsRegular() { if err := setPermissions(path, 0, gid, perm); err != nil { return err } } return nil } if err := filepath.Walk(casDbPath, casDbWalker); err != nil { return err } return nil } func createDbFiles(casDbPath string, gid int, perm os.FileMode) error { // HACK: to avoid some import cycles we don't use store.DbFilename DbFilename := "ql.db" dbPath := filepath.Join(casDbPath, DbFilename) if err := createFileWithPermissions(dbPath, 0, gid, perm); err != nil { return errwrap.Wrap(fmt.Errorf("error creating %s", dbPath), err) } // ql database uses a Write-Ahead Logging (WAL) file whose name is // generated from the sha1 hash of the database name h := sha1.New() io.WriteString(h, DbFilename) walFilename := fmt.Sprintf(".%x", h.Sum(nil)) walFilePath := filepath.Join(casDbPath, walFilename) if err := createFileWithPermissions(walFilePath, 0, gid, perm); err != nil { return errwrap.Wrap(fmt.Errorf("error creating %s", walFilename), err) } return nil } func setupDataDir(dataDir string) error { gid, err := group.LookupGid("rkt") if err != nil { return err } if err := createDirStructure(dataDir, gid); err != nil { return err } casDbPath := filepath.Join(dataDir, "cas", "db") if err := setCasDbFilesPermissions(casDbPath, gid, casDbPerm); err != nil { return err } if err := createDbFiles(casDbPath, gid, casDbPerm); err != nil { return err } return nil }
using System; using System.Collections.Generic; using System.Data; using System.Runtime.CompilerServices; using System.Xml; using umbraco.DataLayer; using umbraco.BusinessLogic; namespace umbraco.cms.businesslogic.property { public class Properties : List<Property> { } }
/* $Id: signal.c,v 1.110 2002/02/08 03:57:14 davem Exp $ * linux/arch/sparc/kernel/signal.c * * Copyright (C) 1991, 1992 Linus Torvalds * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) * Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx) * Copyright (C) 1997 Eddie C. Dost (ecd@skynet.be) */ #include <linux/sched.h> #include <linux/kernel.h> #include <linux/signal.h> #include <linux/errno.h> #include <linux/wait.h> #include <linux/ptrace.h> #include <linux/unistd.h> #include <linux/mm.h> #include <linux/tty.h> #include <linux/smp.h> #include <linux/binfmts.h> /* do_coredum */ #include <linux/bitops.h> #include <asm/uaccess.h> #include <asm/ptrace.h> #include <asm/svr4.h> #include <asm/pgalloc.h> #include <asm/pgtable.h> #include <asm/cacheflush.h> /* flush_sig_insns */ #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) extern void fpsave(unsigned long *fpregs, unsigned long *fsr, void *fpqueue, unsigned long *fpqdepth); extern void fpload(unsigned long *fpregs, unsigned long *fsr); /* Signal frames: the original one (compatible with SunOS): * * Set up a signal frame... Make the stack look the way SunOS * expects it to look which is basically: * * ---------------------------------- <-- %sp at signal time * Struct sigcontext * Signal address * Ptr to sigcontext area above * Signal code * The signal number itself * One register window * ---------------------------------- <-- New %sp */ struct signal_sframe { struct reg_window sig_window; int sig_num; int sig_code; struct sigcontext __user *sig_scptr; int sig_address; struct sigcontext sig_context; unsigned int extramask[_NSIG_WORDS - 1]; }; /* * And the new one, intended to be used for Linux applications only * (we have enough in there to work with clone). * All the interesting bits are in the info field. */ struct new_signal_frame { struct sparc_stackf ss; __siginfo_t info; __siginfo_fpu_t __user *fpu_save; unsigned long insns[2] __attribute__ ((aligned (8))); unsigned int extramask[_NSIG_WORDS - 1]; unsigned int extra_size; /* Should be 0 */ __siginfo_fpu_t fpu_state; }; struct rt_signal_frame { struct sparc_stackf ss; siginfo_t info; struct pt_regs regs; sigset_t mask; __siginfo_fpu_t __user *fpu_save; unsigned int insns[2]; stack_t stack; unsigned int extra_size; /* Should be 0 */ __siginfo_fpu_t fpu_state; }; /* Align macros */ #define SF_ALIGNEDSZ (((sizeof(struct signal_sframe) + 7) & (~7))) #define NF_ALIGNEDSZ (((sizeof(struct new_signal_frame) + 7) & (~7))) #define RT_ALIGNEDSZ (((sizeof(struct rt_signal_frame) + 7) & (~7))) static int _sigpause_common(old_sigset_t set) { set &= _BLOCKABLE; spin_lock_irq(&current->sighand->siglock); current->saved_sigmask = current->blocked; siginitset(&current->blocked, set); recalc_sigpending(); spin_unlock_irq(&current->sighand->siglock); current->state = TASK_INTERRUPTIBLE; schedule(); set_thread_flag(TIF_RESTORE_SIGMASK); return -ERESTARTNOHAND; } asmlinkage int sys_sigpause(unsigned int set) { return _sigpause_common(set); } asmlinkage int sys_sigsuspend(old_sigset_t set) { return _sigpause_common(set); } static inline int restore_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu) { int err; #ifdef CONFIG_SMP if (test_tsk_thread_flag(current, TIF_USEDFPU)) regs->psr &= ~PSR_EF; #else if (current == last_task_used_math) { last_task_used_math = NULL; regs->psr &= ~PSR_EF; } #endif set_used_math(); clear_tsk_thread_flag(current, TIF_USEDFPU); if (!access_ok(VERIFY_READ, fpu, sizeof(*fpu))) return -EFAULT; err = __copy_from_user(&current->thread.float_regs[0], &fpu->si_float_regs[0], (sizeof(unsigned long) * 32)); err |= __get_user(current->thread.fsr, &fpu->si_fsr); err |= __get_user(current->thread.fpqdepth, &fpu->si_fpqdepth); if (current->thread.fpqdepth != 0) err |= __copy_from_user(&current->thread.fpqueue[0], &fpu->si_fpqueue[0], ((sizeof(unsigned long) + (sizeof(unsigned long *)))*16)); return err; } static inline void do_new_sigreturn (struct pt_regs *regs) { struct new_signal_frame __user *sf; unsigned long up_psr, pc, npc; sigset_t set; __siginfo_fpu_t __user *fpu_save; int err; sf = (struct new_signal_frame __user *) regs->u_regs[UREG_FP]; /* 1. Make sure we are not getting garbage from the user */ if (!access_ok(VERIFY_READ, sf, sizeof(*sf))) goto segv_and_exit; if (((unsigned long) sf) & 3) goto segv_and_exit; err = __get_user(pc, &sf->info.si_regs.pc); err |= __get_user(npc, &sf->info.si_regs.npc); if ((pc | npc) & 3) goto segv_and_exit; /* 2. Restore the state */ up_psr = regs->psr; err |= __copy_from_user(regs, &sf->info.si_regs, sizeof(struct pt_regs)); /* User can only change condition codes and FPU enabling in %psr. */ regs->psr = (up_psr & ~(PSR_ICC | PSR_EF)) | (regs->psr & (PSR_ICC | PSR_EF)); err |= __get_user(fpu_save, &sf->fpu_save); if (fpu_save) err |= restore_fpu_state(regs, fpu_save); /* This is pretty much atomic, no amount locking would prevent * the races which exist anyways. */ err |= __get_user(set.sig[0], &sf->info.si_mask); err |= __copy_from_user(&set.sig[1], &sf->extramask, (_NSIG_WORDS-1) * sizeof(unsigned int)); if (err) goto segv_and_exit; sigdelsetmask(&set, ~_BLOCKABLE); spin_lock_irq(&current->sighand->siglock); current->blocked = set; recalc_sigpending(); spin_unlock_irq(&current->sighand->siglock); return; segv_and_exit: force_sig(SIGSEGV, current); } asmlinkage void do_sigreturn(struct pt_regs *regs) { struct sigcontext __user *scptr; unsigned long pc, npc, psr; sigset_t set; int err; /* Always make any pending restarted system calls return -EINTR */ current_thread_info()->restart_block.fn = do_no_restart_syscall; synchronize_user_stack(); if (current->thread.new_signal) { do_new_sigreturn(regs); return; } scptr = (struct sigcontext __user *) regs->u_regs[UREG_I0]; /* Check sanity of the user arg. */ if (!access_ok(VERIFY_READ, scptr, sizeof(struct sigcontext)) || (((unsigned long) scptr) & 3)) goto segv_and_exit; err = __get_user(pc, &scptr->sigc_pc); err |= __get_user(npc, &scptr->sigc_npc); if ((pc | npc) & 3) goto segv_and_exit; /* This is pretty much atomic, no amount locking would prevent * the races which exist anyways. */ err |= __get_user(set.sig[0], &scptr->sigc_mask); /* Note that scptr + 1 points to extramask */ err |= __copy_from_user(&set.sig[1], scptr + 1, (_NSIG_WORDS - 1) * sizeof(unsigned int)); if (err) goto segv_and_exit; sigdelsetmask(&set, ~_BLOCKABLE); spin_lock_irq(&current->sighand->siglock); current->blocked = set; recalc_sigpending(); spin_unlock_irq(&current->sighand->siglock); regs->pc = pc; regs->npc = npc; err = __get_user(regs->u_regs[UREG_FP], &scptr->sigc_sp); err |= __get_user(regs->u_regs[UREG_I0], &scptr->sigc_o0); err |= __get_user(regs->u_regs[UREG_G1], &scptr->sigc_g1); /* User can only change condition codes in %psr. */ err |= __get_user(psr, &scptr->sigc_psr); if (err) goto segv_and_exit; regs->psr &= ~(PSR_ICC); regs->psr |= (psr & PSR_ICC); return; segv_and_exit: force_sig(SIGSEGV, current); } asmlinkage void do_rt_sigreturn(struct pt_regs *regs) { struct rt_signal_frame __user *sf; unsigned int psr, pc, npc; __siginfo_fpu_t __user *fpu_save; mm_segment_t old_fs; sigset_t set; stack_t st; int err; synchronize_user_stack(); sf = (struct rt_signal_frame __user *) regs->u_regs[UREG_FP]; if (!access_ok(VERIFY_READ, sf, sizeof(*sf)) || (((unsigned long) sf) & 0x03)) goto segv; err = __get_user(pc, &sf->regs.pc); err |= __get_user(npc, &sf->regs.npc); err |= ((pc | npc) & 0x03); err |= __get_user(regs->y, &sf->regs.y); err |= __get_user(psr, &sf->regs.psr); err |= __copy_from_user(&regs->u_regs[UREG_G1], &sf->regs.u_regs[UREG_G1], 15 * sizeof(u32)); regs->psr = (regs->psr & ~PSR_ICC) | (psr & PSR_ICC); err |= __get_user(fpu_save, &sf->fpu_save); if (fpu_save) err |= restore_fpu_state(regs, fpu_save); err |= __copy_from_user(&set, &sf->mask, sizeof(sigset_t)); err |= __copy_from_user(&st, &sf->stack, sizeof(stack_t)); if (err) goto segv; regs->pc = pc; regs->npc = npc; /* It is more difficult to avoid calling this function than to * call it and ignore errors. */ old_fs = get_fs(); set_fs(KERNEL_DS); do_sigaltstack((const stack_t __user *) &st, NULL, (unsigned long)sf); set_fs(old_fs); sigdelsetmask(&set, ~_BLOCKABLE); spin_lock_irq(&current->sighand->siglock); current->blocked = set; recalc_sigpending(); spin_unlock_irq(&current->sighand->siglock); return; segv: force_sig(SIGSEGV, current); } /* Checks if the fp is valid */ static inline int invalid_frame_pointer(void __user *fp, int fplen) { if ((((unsigned long) fp) & 7) || !__access_ok((unsigned long)fp, fplen) || ((sparc_cpu_model == sun4 || sparc_cpu_model == sun4c) && ((unsigned long) fp < 0xe0000000 && (unsigned long) fp >= 0x20000000))) return 1; return 0; } static inline void __user *get_sigframe(struct sigaction *sa, struct pt_regs *regs, unsigned long framesize) { unsigned long sp; sp = regs->u_regs[UREG_FP]; /* This is the X/Open sanctioned signal stack switching. */ if (sa->sa_flags & SA_ONSTACK) { if (!on_sig_stack(sp) && !((current->sas_ss_sp + current->sas_ss_size) & 7)) sp = current->sas_ss_sp + current->sas_ss_size; } return (void __user *)(sp - framesize); } static inline void setup_frame(struct sigaction *sa, struct pt_regs *regs, int signr, sigset_t *oldset, siginfo_t *info) { struct signal_sframe __user *sframep; struct sigcontext __user *sc; int window = 0, err; unsigned long pc = regs->pc; unsigned long npc = regs->npc; struct thread_info *tp = current_thread_info(); void __user *sig_address; int sig_code; synchronize_user_stack(); sframep = (struct signal_sframe __user *) get_sigframe(sa, regs, SF_ALIGNEDSZ); if (invalid_frame_pointer(sframep, sizeof(*sframep))){ /* Don't change signal code and address, so that * post mortem debuggers can have a look. */ goto sigill_and_return; } sc = &sframep->sig_context; /* We've already made sure frame pointer isn't in kernel space... */ err = __put_user((sas_ss_flags(regs->u_regs[UREG_FP]) == SS_ONSTACK), &sc->sigc_onstack); err |= __put_user(oldset->sig[0], &sc->sigc_mask); err |= __copy_to_user(sframep->extramask, &oldset->sig[1], (_NSIG_WORDS - 1) * sizeof(unsigned int)); err |= __put_user(regs->u_regs[UREG_FP], &sc->sigc_sp); err |= __put_user(pc, &sc->sigc_pc); err |= __put_user(npc, &sc->sigc_npc); err |= __put_user(regs->psr, &sc->sigc_psr); err |= __put_user(regs->u_regs[UREG_G1], &sc->sigc_g1); err |= __put_user(regs->u_regs[UREG_I0], &sc->sigc_o0); err |= __put_user(tp->w_saved, &sc->sigc_oswins); if (tp->w_saved) for (window = 0; window < tp->w_saved; window++) { put_user((char *)tp->rwbuf_stkptrs[window], &sc->sigc_spbuf[window]); err |= __copy_to_user(&sc->sigc_wbuf[window], &tp->reg_window[window], sizeof(struct reg_window)); } else err |= __copy_to_user(sframep, (char *) regs->u_regs[UREG_FP], sizeof(struct reg_window)); tp->w_saved = 0; /* So process is allowed to execute. */ err |= __put_user(signr, &sframep->sig_num); sig_address = NULL; sig_code = 0; if (SI_FROMKERNEL (info) && (info->si_code & __SI_MASK) == __SI_FAULT) { sig_address = info->si_addr; switch (signr) { case SIGSEGV: switch (info->si_code) { case SEGV_MAPERR: sig_code = SUBSIG_NOMAPPING; break; default: sig_code = SUBSIG_PROTECTION; break; } break; case SIGILL: switch (info->si_code) { case ILL_ILLOPC: sig_code = SUBSIG_ILLINST; break; case ILL_PRVOPC: sig_code = SUBSIG_PRIVINST; break; case ILL_ILLTRP: sig_code = SUBSIG_BADTRAP(info->si_trapno); break; default: sig_code = SUBSIG_STACK; break; } break; case SIGFPE: switch (info->si_code) { case FPE_INTDIV: sig_code = SUBSIG_IDIVZERO; break; case FPE_INTOVF: sig_code = SUBSIG_FPINTOVFL; break; case FPE_FLTDIV: sig_code = SUBSIG_FPDIVZERO; break; case FPE_FLTOVF: sig_code = SUBSIG_FPOVFLOW; break; case FPE_FLTUND: sig_code = SUBSIG_FPUNFLOW; break; case FPE_FLTRES: sig_code = SUBSIG_FPINEXACT; break; case FPE_FLTINV: sig_code = SUBSIG_FPOPERROR; break; default: sig_code = SUBSIG_FPERROR; break; } break; case SIGBUS: switch (info->si_code) { case BUS_ADRALN: sig_code = SUBSIG_ALIGNMENT; break; case BUS_ADRERR: sig_code = SUBSIG_MISCERROR; break; default: sig_code = SUBSIG_BUSTIMEOUT; break; } break; case SIGEMT: switch (info->si_code) { case EMT_TAGOVF: sig_code = SUBSIG_TAG; break; } break; case SIGSYS: if (info->si_code == (__SI_FAULT|0x100)) { /* See sys_sunos.c */ sig_code = info->si_trapno; break; } default: sig_address = NULL; } } err |= __put_user((unsigned long)sig_address, &sframep->sig_address); err |= __put_user(sig_code, &sframep->sig_code); err |= __put_user(sc, &sframep->sig_scptr); if (err) goto sigsegv; regs->u_regs[UREG_FP] = (unsigned long) sframep; regs->pc = (unsigned long) sa->sa_handler; regs->npc = (regs->pc + 4); return; sigill_and_return: do_exit(SIGILL); sigsegv: force_sigsegv(signr, current); } static inline int save_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu) { int err = 0; #ifdef CONFIG_SMP if (test_tsk_thread_flag(current, TIF_USEDFPU)) { put_psr(get_psr() | PSR_EF); fpsave(&current->thread.float_regs[0], &current->thread.fsr, &current->thread.fpqueue[0], &current->thread.fpqdepth); regs->psr &= ~(PSR_EF); clear_tsk_thread_flag(current, TIF_USEDFPU); } #else if (current == last_task_used_math) { put_psr(get_psr() | PSR_EF); fpsave(&current->thread.float_regs[0], &current->thread.fsr, &current->thread.fpqueue[0], &current->thread.fpqdepth); last_task_used_math = NULL; regs->psr &= ~(PSR_EF); } #endif err |= __copy_to_user(&fpu->si_float_regs[0], &current->thread.float_regs[0], (sizeof(unsigned long) * 32)); err |= __put_user(current->thread.fsr, &fpu->si_fsr); err |= __put_user(current->thread.fpqdepth, &fpu->si_fpqdepth); if (current->thread.fpqdepth != 0) err |= __copy_to_user(&fpu->si_fpqueue[0], &current->thread.fpqueue[0], ((sizeof(unsigned long) + (sizeof(unsigned long *)))*16)); clear_used_math(); return err; } static inline void new_setup_frame(struct k_sigaction *ka, struct pt_regs *regs, int signo, sigset_t *oldset) { struct new_signal_frame __user *sf; int sigframe_size, err; /* 1. Make sure everything is clean */ synchronize_user_stack(); sigframe_size = NF_ALIGNEDSZ; if (!used_math()) sigframe_size -= sizeof(__siginfo_fpu_t); sf = (struct new_signal_frame __user *) get_sigframe(&ka->sa, regs, sigframe_size); if (invalid_frame_pointer(sf, sigframe_size)) goto sigill_and_return; if (current_thread_info()->w_saved != 0) goto sigill_and_return; /* 2. Save the current process state */ err = __copy_to_user(&sf->info.si_regs, regs, sizeof(struct pt_regs)); err |= __put_user(0, &sf->extra_size); if (used_math()) { err |= save_fpu_state(regs, &sf->fpu_state); err |= __put_user(&sf->fpu_state, &sf->fpu_save); } else { err |= __put_user(0, &sf->fpu_save); } err |= __put_user(oldset->sig[0], &sf->info.si_mask); err |= __copy_to_user(sf->extramask, &oldset->sig[1], (_NSIG_WORDS - 1) * sizeof(unsigned int)); err |= __copy_to_user(sf, (char *) regs->u_regs[UREG_FP], sizeof(struct reg_window)); if (err) goto sigsegv; /* 3. signal handler back-trampoline and parameters */ regs->u_regs[UREG_FP] = (unsigned long) sf; regs->u_regs[UREG_I0] = signo; regs->u_regs[UREG_I1] = (unsigned long) &sf->info; regs->u_regs[UREG_I2] = (unsigned long) &sf->info; /* 4. signal handler */ regs->pc = (unsigned long) ka->sa.sa_handler; regs->npc = (regs->pc + 4); /* 5. return to kernel instructions */ if (ka->ka_restorer) regs->u_regs[UREG_I7] = (unsigned long)ka->ka_restorer; else { regs->u_regs[UREG_I7] = (unsigned long)(&(sf->insns[0]) - 2); /* mov __NR_sigreturn, %g1 */ err |= __put_user(0x821020d8, &sf->insns[0]); /* t 0x10 */ err |= __put_user(0x91d02010, &sf->insns[1]); if (err) goto sigsegv; /* Flush instruction space. */ flush_sig_insns(current->mm, (unsigned long) &(sf->insns[0])); } return; sigill_and_return: do_exit(SIGILL); sigsegv: force_sigsegv(signo, current); } static inline void new_setup_rt_frame(struct k_sigaction *ka, struct pt_regs *regs, int signo, sigset_t *oldset, siginfo_t *info) { struct rt_signal_frame __user *sf; int sigframe_size; unsigned int psr; int err; synchronize_user_stack(); sigframe_size = RT_ALIGNEDSZ; if (!used_math()) sigframe_size -= sizeof(__siginfo_fpu_t); sf = (struct rt_signal_frame __user *) get_sigframe(&ka->sa, regs, sigframe_size); if (invalid_frame_pointer(sf, sigframe_size)) goto sigill; if (current_thread_info()->w_saved != 0) goto sigill; err = __put_user(regs->pc, &sf->regs.pc); err |= __put_user(regs->npc, &sf->regs.npc); err |= __put_user(regs->y, &sf->regs.y); psr = regs->psr; if (used_math()) psr |= PSR_EF; err |= __put_user(psr, &sf->regs.psr); err |= __copy_to_user(&sf->regs.u_regs, regs->u_regs, sizeof(regs->u_regs)); err |= __put_user(0, &sf->extra_size); if (psr & PSR_EF) { err |= save_fpu_state(regs, &sf->fpu_state); err |= __put_user(&sf->fpu_state, &sf->fpu_save); } else { err |= __put_user(0, &sf->fpu_save); } err |= __copy_to_user(&sf->mask, &oldset->sig[0], sizeof(sigset_t)); /* Setup sigaltstack */ err |= __put_user(current->sas_ss_sp, &sf->stack.ss_sp); err |= __put_user(sas_ss_flags(regs->u_regs[UREG_FP]), &sf->stack.ss_flags); err |= __put_user(current->sas_ss_size, &sf->stack.ss_size); err |= __copy_to_user(sf, (char *) regs->u_regs[UREG_FP], sizeof(struct reg_window)); err |= copy_siginfo_to_user(&sf->info, info); if (err) goto sigsegv; regs->u_regs[UREG_FP] = (unsigned long) sf; regs->u_regs[UREG_I0] = signo; regs->u_regs[UREG_I1] = (unsigned long) &sf->info; regs->u_regs[UREG_I2] = (unsigned long) &sf->regs; regs->pc = (unsigned long) ka->sa.sa_handler; regs->npc = (regs->pc + 4); if (ka->ka_restorer) regs->u_regs[UREG_I7] = (unsigned long)ka->ka_restorer; else { regs->u_regs[UREG_I7] = (unsigned long)(&(sf->insns[0]) - 2); /* mov __NR_sigreturn, %g1 */ err |= __put_user(0x821020d8, &sf->insns[0]); /* t 0x10 */ err |= __put_user(0x91d02010, &sf->insns[1]); if (err) goto sigsegv; /* Flush instruction space. */ flush_sig_insns(current->mm, (unsigned long) &(sf->insns[0])); } return; sigill: do_exit(SIGILL); sigsegv: force_sigsegv(signo, current); } /* Setup a Solaris stack frame */ static inline void setup_svr4_frame(struct sigaction *sa, unsigned long pc, unsigned long npc, struct pt_regs *regs, int signr, sigset_t *oldset) { svr4_signal_frame_t __user *sfp; svr4_gregset_t __user *gr; svr4_siginfo_t __user *si; svr4_mcontext_t __user *mc; svr4_gwindows_t __user *gw; svr4_ucontext_t __user *uc; svr4_sigset_t setv; struct thread_info *tp = current_thread_info(); int window = 0, err; synchronize_user_stack(); sfp = (svr4_signal_frame_t __user *) get_sigframe(sa, regs, SVR4_SF_ALIGNED + sizeof(struct reg_window)); if (invalid_frame_pointer(sfp, sizeof(*sfp))) goto sigill_and_return; /* Start with a clean frame pointer and fill it */ err = __clear_user(sfp, sizeof(*sfp)); /* Setup convenience variables */ si = &sfp->si; uc = &sfp->uc; gw = &sfp->gw; mc = &uc->mcontext; gr = &mc->greg; /* FIXME: where am I supposed to put this? * sc->sigc_onstack = old_status; * anyways, it does not look like it is used for anything at all. */ setv.sigbits[0] = oldset->sig[0]; setv.sigbits[1] = oldset->sig[1]; if (_NSIG_WORDS >= 4) { setv.sigbits[2] = oldset->sig[2]; setv.sigbits[3] = oldset->sig[3]; err |= __copy_to_user(&uc->sigmask, &setv, sizeof(svr4_sigset_t)); } else err |= __copy_to_user(&uc->sigmask, &setv, 2 * sizeof(unsigned int)); /* Store registers */ err |= __put_user(regs->pc, &((*gr)[SVR4_PC])); err |= __put_user(regs->npc, &((*gr)[SVR4_NPC])); err |= __put_user(regs->psr, &((*gr)[SVR4_PSR])); err |= __put_user(regs->y, &((*gr)[SVR4_Y])); /* Copy g[1..7] and o[0..7] registers */ err |= __copy_to_user(&(*gr)[SVR4_G1], &regs->u_regs[UREG_G1], sizeof(long) * 7); err |= __copy_to_user(&(*gr)[SVR4_O0], &regs->u_regs[UREG_I0], sizeof(long) * 8); /* Setup sigaltstack */ err |= __put_user(current->sas_ss_sp, &uc->stack.sp); err |= __put_user(sas_ss_flags(regs->u_regs[UREG_FP]), &uc->stack.flags); err |= __put_user(current->sas_ss_size, &uc->stack.size); /* Save the currently window file: */ /* 1. Link sfp->uc->gwins to our windows */ err |= __put_user(gw, &mc->gwin); /* 2. Number of windows to restore at setcontext(): */ err |= __put_user(tp->w_saved, &gw->count); /* 3. Save each valid window * Currently, it makes a copy of the windows from the kernel copy. * David's code for SunOS, makes the copy but keeps the pointer to * the kernel. My version makes the pointer point to a userland * copy of those. Mhm, I wonder if I shouldn't just ignore those * on setcontext and use those that are on the kernel, the signal * handler should not be modyfing those, mhm. * * These windows are just used in case synchronize_user_stack failed * to flush the user windows. */ for (window = 0; window < tp->w_saved; window++) { err |= __put_user((int __user *) &(gw->win[window]), &gw->winptr[window]); err |= __copy_to_user(&gw->win[window], &tp->reg_window[window], sizeof(svr4_rwindow_t)); err |= __put_user(0, gw->winptr[window]); } /* 4. We just pay attention to the gw->count field on setcontext */ tp->w_saved = 0; /* So process is allowed to execute. */ /* Setup the signal information. Solaris expects a bunch of * information to be passed to the signal handler, we don't provide * that much currently, should use siginfo. */ err |= __put_user(signr, &si->siginfo.signo); err |= __put_user(SVR4_SINOINFO, &si->siginfo.code); if (err) goto sigsegv; regs->u_regs[UREG_FP] = (unsigned long) sfp; regs->pc = (unsigned long) sa->sa_handler; regs->npc = (regs->pc + 4); /* Arguments passed to signal handler */ if (regs->u_regs[14]){ struct reg_window __user *rw = (struct reg_window __user *) regs->u_regs[14]; err |= __put_user(signr, &rw->ins[0]); err |= __put_user(si, &rw->ins[1]); err |= __put_user(uc, &rw->ins[2]); err |= __put_user(sfp, &rw->ins[6]); /* frame pointer */ if (err) goto sigsegv; regs->u_regs[UREG_I0] = signr; regs->u_regs[UREG_I1] = (unsigned long) si; regs->u_regs[UREG_I2] = (unsigned long) uc; } return; sigill_and_return: do_exit(SIGILL); sigsegv: force_sigsegv(signr, current); } asmlinkage int svr4_getcontext(svr4_ucontext_t __user *uc, struct pt_regs *regs) { svr4_gregset_t __user *gr; svr4_mcontext_t __user *mc; svr4_sigset_t setv; int err = 0; synchronize_user_stack(); if (current_thread_info()->w_saved) return -EFAULT; err = clear_user(uc, sizeof(*uc)); if (err) return -EFAULT; /* Setup convenience variables */ mc = &uc->mcontext; gr = &mc->greg; setv.sigbits[0] = current->blocked.sig[0]; setv.sigbits[1] = current->blocked.sig[1]; if (_NSIG_WORDS >= 4) { setv.sigbits[2] = current->blocked.sig[2]; setv.sigbits[3] = current->blocked.sig[3]; err |= __copy_to_user(&uc->sigmask, &setv, sizeof(svr4_sigset_t)); } else err |= __copy_to_user(&uc->sigmask, &setv, 2 * sizeof(unsigned int)); /* Store registers */ err |= __put_user(regs->pc, &uc->mcontext.greg[SVR4_PC]); err |= __put_user(regs->npc, &uc->mcontext.greg[SVR4_NPC]); err |= __put_user(regs->psr, &uc->mcontext.greg[SVR4_PSR]); err |= __put_user(regs->y, &uc->mcontext.greg[SVR4_Y]); /* Copy g[1..7] and o[0..7] registers */ err |= __copy_to_user(&(*gr)[SVR4_G1], &regs->u_regs[UREG_G1], sizeof(uint) * 7); err |= __copy_to_user(&(*gr)[SVR4_O0], &regs->u_regs[UREG_I0], sizeof(uint) * 8); /* Setup sigaltstack */ err |= __put_user(current->sas_ss_sp, &uc->stack.sp); err |= __put_user(sas_ss_flags(regs->u_regs[UREG_FP]), &uc->stack.flags); err |= __put_user(current->sas_ss_size, &uc->stack.size); /* The register file is not saved * we have already stuffed all of it with sync_user_stack */ return (err ? -EFAULT : 0); } /* Set the context for a svr4 application, this is Solaris way to sigreturn */ asmlinkage int svr4_setcontext(svr4_ucontext_t __user *c, struct pt_regs *regs) { svr4_gregset_t __user *gr; unsigned long pc, npc, psr; mm_segment_t old_fs; sigset_t set; svr4_sigset_t setv; int err; stack_t st; /* Fixme: restore windows, or is this already taken care of in * svr4_setup_frame when sync_user_windows is done? */ flush_user_windows(); if (current_thread_info()->w_saved) goto sigsegv_and_return; if (((unsigned long) c) & 3) goto sigsegv_and_return; if (!__access_ok((unsigned long)c, sizeof(*c))) goto sigsegv_and_return; /* Check for valid PC and nPC */ gr = &c->mcontext.greg; err = __get_user(pc, &((*gr)[SVR4_PC])); err |= __get_user(npc, &((*gr)[SVR4_NPC])); if ((pc | npc) & 3) goto sigsegv_and_return; /* Retrieve information from passed ucontext */ /* note that nPC is ored a 1, this is used to inform entry.S */ /* that we don't want it to mess with our PC and nPC */ /* This is pretty much atomic, no amount locking would prevent * the races which exist anyways. */ err |= __copy_from_user(&setv, &c->sigmask, sizeof(svr4_sigset_t)); err |= __get_user(st.ss_sp, &c->stack.sp); err |= __get_user(st.ss_flags, &c->stack.flags); err |= __get_user(st.ss_size, &c->stack.size); if (err) goto sigsegv_and_return; /* It is more difficult to avoid calling this function than to call it and ignore errors. */ old_fs = get_fs(); set_fs(KERNEL_DS); do_sigaltstack((const stack_t __user *) &st, NULL, regs->u_regs[UREG_I6]); set_fs(old_fs); set.sig[0] = setv.sigbits[0]; set.sig[1] = setv.sigbits[1]; if (_NSIG_WORDS >= 4) { set.sig[2] = setv.sigbits[2]; set.sig[3] = setv.sigbits[3]; } sigdelsetmask(&set, ~_BLOCKABLE); spin_lock_irq(&current->sighand->siglock); current->blocked = set; recalc_sigpending(); spin_unlock_irq(&current->sighand->siglock); regs->pc = pc; regs->npc = npc | 1; err |= __get_user(regs->y, &((*gr)[SVR4_Y])); err |= __get_user(psr, &((*gr)[SVR4_PSR])); regs->psr &= ~(PSR_ICC); regs->psr |= (psr & PSR_ICC); /* Restore g[1..7] and o[0..7] registers */ err |= __copy_from_user(&regs->u_regs[UREG_G1], &(*gr)[SVR4_G1], sizeof(long) * 7); err |= __copy_from_user(&regs->u_regs[UREG_I0], &(*gr)[SVR4_O0], sizeof(long) * 8); return (err ? -EFAULT : 0); sigsegv_and_return: force_sig(SIGSEGV, current); return -EFAULT; } static inline void handle_signal(unsigned long signr, struct k_sigaction *ka, siginfo_t *info, sigset_t *oldset, struct pt_regs *regs, int svr4_signal) { if (svr4_signal) setup_svr4_frame(&ka->sa, regs->pc, regs->npc, regs, signr, oldset); else { if (ka->sa.sa_flags & SA_SIGINFO) new_setup_rt_frame(ka, regs, signr, oldset, info); else if (current->thread.new_signal) new_setup_frame(ka, regs, signr, oldset); else setup_frame(&ka->sa, regs, signr, oldset, info); } spin_lock_irq(&current->sighand->siglock); sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask); if (!(ka->sa.sa_flags & SA_NOMASK)) sigaddset(&current->blocked, signr); recalc_sigpending(); spin_unlock_irq(&current->sighand->siglock); } static inline void syscall_restart(unsigned long orig_i0, struct pt_regs *regs, struct sigaction *sa) { switch(regs->u_regs[UREG_I0]) { case ERESTART_RESTARTBLOCK: case ERESTARTNOHAND: no_system_call_restart: regs->u_regs[UREG_I0] = EINTR; regs->psr |= PSR_C; break; case ERESTARTSYS: if (!(sa->sa_flags & SA_RESTART)) goto no_system_call_restart; /* fallthrough */ case ERESTARTNOINTR: regs->u_regs[UREG_I0] = orig_i0; regs->pc -= 4; regs->npc -= 4; } } /* Note that 'init' is a special process: it doesn't get signals it doesn't * want to handle. Thus you cannot kill init even with a SIGKILL even by * mistake. */ asmlinkage void do_signal(struct pt_regs * regs, unsigned long orig_i0, int restart_syscall) { siginfo_t info; struct sparc_deliver_cookie cookie; struct k_sigaction ka; int signr; sigset_t *oldset; /* * XXX Disable svr4 signal handling until solaris emulation works. * It is buggy - Anton */ #define SVR4_SIGNAL_BROKEN 1 #ifdef SVR4_SIGNAL_BROKEN int svr4_signal = 0; #else int svr4_signal = current->personality == PER_SVR4; #endif cookie.restart_syscall = restart_syscall; cookie.orig_i0 = orig_i0; if (test_thread_flag(TIF_RESTORE_SIGMASK)) oldset = &current->saved_sigmask; else oldset = &current->blocked; signr = get_signal_to_deliver(&info, &ka, regs, &cookie); if (signr > 0) { if (cookie.restart_syscall) syscall_restart(cookie.orig_i0, regs, &ka.sa); handle_signal(signr, &ka, &info, oldset, regs, svr4_signal); /* a signal was successfully delivered; the saved * sigmask will have been stored in the signal frame, * and will be restored by sigreturn, so we can simply * clear the TIF_RESTORE_SIGMASK flag. */ if (test_thread_flag(TIF_RESTORE_SIGMASK)) clear_thread_flag(TIF_RESTORE_SIGMASK); return; } if (cookie.restart_syscall && (regs->u_regs[UREG_I0] == ERESTARTNOHAND || regs->u_regs[UREG_I0] == ERESTARTSYS || regs->u_regs[UREG_I0] == ERESTARTNOINTR)) { /* replay the system call when we are done */ regs->u_regs[UREG_I0] = cookie.orig_i0; regs->pc -= 4; regs->npc -= 4; } if (cookie.restart_syscall && regs->u_regs[UREG_I0] == ERESTART_RESTARTBLOCK) { regs->u_regs[UREG_G1] = __NR_restart_syscall; regs->pc -= 4; regs->npc -= 4; } /* if there's no signal to deliver, we just put the saved sigmask * back */ if (test_thread_flag(TIF_RESTORE_SIGMASK)) { clear_thread_flag(TIF_RESTORE_SIGMASK); sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL); } } asmlinkage int do_sys_sigstack(struct sigstack __user *ssptr, struct sigstack __user *ossptr, unsigned long sp) { int ret = -EFAULT; /* First see if old state is wanted. */ if (ossptr) { if (put_user(current->sas_ss_sp + current->sas_ss_size, &ossptr->the_stack) || __put_user(on_sig_stack(sp), &ossptr->cur_status)) goto out; } /* Now see if we want to update the new state. */ if (ssptr) { char *ss_sp; if (get_user(ss_sp, &ssptr->the_stack)) goto out; /* If the current stack was set with sigaltstack, don't swap stacks while we are on it. */ ret = -EPERM; if (current->sas_ss_sp && on_sig_stack(sp)) goto out; /* Since we don't know the extent of the stack, and we don't track onstack-ness, but rather calculate it, we must presume a size. Ho hum this interface is lossy. */ current->sas_ss_sp = (unsigned long)ss_sp - SIGSTKSZ; current->sas_ss_size = SIGSTKSZ; } ret = 0; out: return ret; } void ptrace_signal_deliver(struct pt_regs *regs, void *cookie) { struct sparc_deliver_cookie *cp = cookie; if (cp->restart_syscall && (regs->u_regs[UREG_I0] == ERESTARTNOHAND || regs->u_regs[UREG_I0] == ERESTARTSYS || regs->u_regs[UREG_I0] == ERESTARTNOINTR)) { /* replay the system call when we are done */ regs->u_regs[UREG_I0] = cp->orig_i0; regs->pc -= 4; regs->npc -= 4; cp->restart_syscall = 0; } if (cp->restart_syscall && regs->u_regs[UREG_I0] == ERESTART_RESTARTBLOCK) { regs->u_regs[UREG_G1] = __NR_restart_syscall; regs->pc -= 4; regs->npc -= 4; cp->restart_syscall = 0; } }
/* Copyright 2014 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Package registrytest provides tests for Registry implementations // for storing Minions, Pods, Schedulers and Services. package registrytest // import "k8s.io/kubernetes/pkg/registry/registrytest"
/* * timer.h * * Created on: 2012-4-25 * Author: Benn Huang (benn@allwinnertech.com) */ #ifndef TIMER_H_ #define TIMER_H_ /* * Timer */ #define AW_TMR_IRQ_EN_REG 0x0000 #define AW_TMR_IRQ_STA_REG 0x0004 #define AW_TMR0_CTRL_REG 0x0010 #define AW_TMR0_INTV_VALUE_REG 0x0014 #define AW_TMR0_CUR_VALUE_REG 0x0018 #define AW_AVS_CNT_CTL_REG 0x0080 #define AW_AVS_CNT0_REG 0x0084 #define AW_AVS_CNT1_REG 0x0088 #define AW_AVS_CNT_DIV_REG 0x008c #endif /* TIMER_H_ */
/*! UIkit 2.24.2 | http://www.getuikit.com | (c) 2014 YOOtheme | MIT License */ /* ======================================================================== Component: Nestable ========================================================================== */ .uk-nestable { padding: 0; list-style: none; } /* * Disables the default callout shown when you touch and hold a touch target * Currently only works in Webkit */ .uk-nestable a, .uk-nestable img { -webkit-touch-callout: none; } /* Sub-object `uk-nestable-list` ========================================================================== */ .uk-nestable-list { margin: 0; padding-left: 40px; list-style: none; } /* Sub-modifier `uk-nestable-item` ========================================================================== */ /* * 1. Deactivate browser touch actions in IE11 */ .uk-nestable-item { /* 1 */ touch-action: none; } .uk-nestable-item + .uk-nestable-item { margin-top: 10px; } .uk-nestable-list:not(.uk-nestable-dragged) > .uk-nestable-item:first-child { margin-top: 10px; } /* Sub-modifier `uk-nestable-dragged` ========================================================================== */ /* * 1. Reset style */ .uk-nestable-dragged { position: absolute; z-index: 1050; pointer-events: none; /* 1 */ padding-left: 0; } /* Sub-modifier `uk-nestable-placeholder` ========================================================================== */ .uk-nestable-placeholder { position: relative; } .uk-nestable-placeholder > * { opacity: 0; } .uk-nestable-placeholder:after { content: ''; position: absolute; top: 0; bottom: 0; left: 0; right: 0; border: 1px dashed #dddddd; opacity: 1; } /* Empty List ========================================================================== */ .uk-nestable-empty { min-height: 30px; } /* Sub-object `uk-nestable-handle` ========================================================================== */ /* * Deactivate browser touch actions in IE11 */ .uk-nestable-handle { touch-action: none; } /* Hover */ .uk-nestable-handle:hover { cursor: move; } /* Sub-object `uk-nestable-moving` ========================================================================== */ .uk-nestable-moving, .uk-nestable-moving * { cursor: move; } /* [data-nestable-action='toggle'] ========================================================================== */ /* * 1. Makes text unselectable. Happens if double clicked by mistake */ [data-nestable-action='toggle'] { cursor: pointer; /* 1 */ -moz-user-select: none; -webkit-user-select: none; -ms-user-select: none; user-select: none; } /* Sub-object `.uk-nestable-toggle` ========================================================================== */ .uk-nestable-toggle { display: inline-block; visibility: hidden; } .uk-nestable-toggle:after { content: "\f147"; font-family: FontAwesome; } .uk-parent > :not(.uk-nestable-list) .uk-nestable-toggle { visibility: visible; } /* * Collapsed */ .uk-collapsed .uk-nestable-list { display: none; } .uk-collapsed .uk-nestable-toggle:after { content: "\f196"; } /* Sub-object `uk-nestable-panel` ========================================================================== */ .uk-nestable-panel { padding: 5px; background: #f5f5f5; }
<?php /** * Hybrid_Providers_Rhapsody - Rhapsody provider adapter based on OAuth2 protocol */ class Hybrid_Providers_Rhapsody extends Hybrid_Provider_Model_OAuth2 { /** * Initializer */ function initialize() { parent::initialize(); $this->api->api_base_url = 'https://api.rhapsody.com/'; $this->api->authorize_url = 'https://api.rhapsody.com/oauth/authorize'; $this->api->token_url = 'https://api.rhapsody.com/oauth/access_token'; $this->api->curl_authenticate_method = "POST"; $this->api->curl_useragent = "CWM"; } /** * Begin login step */ function loginBegin() { // redirect the user to the provider authentication url Hybrid_Auth::redirect($this->api->authorizeUrl()); } /** * Request method with Bearer access_token auth * @param $url * @return mixed */ function request($url, $params = null) { $ch = curl_init(); if ($params) { $url = $url . ( strpos( $url, '?' ) ? '&' : '?' ) . http_build_query($params, '', '&'); } curl_setopt($ch, CURLOPT_HEADER, 0); curl_setopt($ch, CURLOPT_RETURNTRANSFER, 1); curl_setopt($ch, CURLOPT_USERAGENT, $this->api->curl_useragent); curl_setopt($ch, CURLOPT_URL, $this->api->api_base_url . 'v1/' . $url); $headers = array('Authorization: Bearer ' . $this->api->access_token); curl_setopt($ch, CURLOPT_HTTPHEADER, $headers); $data = curl_exec($ch); curl_close($ch); $data = json_decode($data, true); return $data; } /** * Returns artists from library * @return null|string */ function getArtists() { $result = $this->request('me/library/artists'); return $result; } /** * Returns albums from library * @return null|string */ function getAlbums() { $result = $this->request('me/library/albums'); return $result; } /** * Returns favourite tracks * @return null|string */ function getFavourites() { $result = $this->request('me/favorites'); return $result; } /** * Returns user`s playlists * @param $user_id * @return mixed */ function getPlaylists() { $result = $this->request('me/playlists'); return $result; } /** * Returns playlist artists * @param $user_id * @return mixed */ function getPlaylistTracks($playlist_id) { $result = $this->request('me/playlists/' . $playlist_id . '/tracks'); return $result; } }
<?php /* * This file is part of the Symfony package. * * (c) Fabien Potencier <fabien@symfony.com> * * For the full copyright and license information, please view the LICENSE * file that was distributed with this source code. */ namespace Symfony\Component\Validator\Constraints; use Symfony\Component\Validator\Context\ExecutionContextInterface; use Symfony\Component\Validator\Constraint; use Symfony\Component\Validator\ConstraintValidator; use Symfony\Component\Validator\Exception\UnexpectedTypeException; /** * @author Bernhard Schussek <bschussek@gmail.com> * * @api */ class TypeValidator extends ConstraintValidator { /** * {@inheritdoc} */ public function validate($value, Constraint $constraint) { if (!$constraint instanceof Type) { throw new UnexpectedTypeException($constraint, __NAMESPACE__.'\Type'); } if (null === $value) { return; } $type = strtolower($constraint->type); $type = $type == 'boolean' ? 'bool' : $constraint->type; $isFunction = 'is_'.$type; $ctypeFunction = 'ctype_'.$type; if (function_exists($isFunction) && $isFunction($value)) { return; } elseif (function_exists($ctypeFunction) && $ctypeFunction($value)) { return; } elseif ($value instanceof $constraint->type) { return; } if ($this->context instanceof ExecutionContextInterface) { $this->context->buildViolation($constraint->message) ->setParameter('{{ value }}', $this->formatValue($value)) ->setParameter('{{ type }}', $constraint->type) ->setCode(Type::INVALID_TYPE_ERROR) ->addViolation(); } else { $this->buildViolation($constraint->message) ->setParameter('{{ value }}', $this->formatValue($value)) ->setParameter('{{ type }}', $constraint->type) ->setCode(Type::INVALID_TYPE_ERROR) ->addViolation(); } } }
/* -*- Mode:C++; c-file-style:"gnu"; indent-tabs-mode:nil; -*- */ /* * Copyright (c) 2005 INRIA * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation; * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * Author: Mathieu Lacage <mathieu.lacage@sophia.inria.fr> */ #ifndef IPV4_END_POINT_DEMUX_H #define IPV4_END_POINT_DEMUX_H #include <stdint.h> #include <list> #include "ns3/ipv4-address.h" #include "ipv4-interface.h" namespace ns3 { class Ipv4EndPoint; /** * \brief Demultiplexes packets to various transport layer endpoints * * This class serves as a lookup table to match partial or full information * about a four-tuple to an ns3::Ipv4EndPoint. It internally contains a list * of endpoints, and has APIs to add and find endpoints in this demux. This * code is shared in common to TCP and UDP protocols in ns3. This demux * sits between ns3's layer four and the socket layer */ class Ipv4EndPointDemux { public: typedef std::list<Ipv4EndPoint *> EndPoints; typedef std::list<Ipv4EndPoint *>::iterator EndPointsI; Ipv4EndPointDemux (); ~Ipv4EndPointDemux (); EndPoints GetAllEndPoints (void); bool LookupPortLocal (uint16_t port); bool LookupLocal (Ipv4Address addr, uint16_t port); EndPoints Lookup (Ipv4Address daddr, uint16_t dport, Ipv4Address saddr, uint16_t sport, Ptr<Ipv4Interface> incomingInterface); Ipv4EndPoint *SimpleLookup (Ipv4Address daddr, uint16_t dport, Ipv4Address saddr, uint16_t sport); Ipv4EndPoint *Allocate (void); Ipv4EndPoint *Allocate (Ipv4Address address); Ipv4EndPoint *Allocate (uint16_t port); Ipv4EndPoint *Allocate (Ipv4Address address, uint16_t port); Ipv4EndPoint *Allocate (Ipv4Address localAddress, uint16_t localPort, Ipv4Address peerAddress, uint16_t peerPort); void DeAllocate (Ipv4EndPoint *endPoint); private: uint16_t AllocateEphemeralPort (void); uint16_t m_ephemeral; uint16_t m_portLast; uint16_t m_portFirst; EndPoints m_endPoints; }; } // namespace ns3 #endif /* IPV4_END_POINTS_H */
<!doctype html> <html lang="en"> <head> <meta charset="UTF-8"> <title>Example - example-example77-debug</title> <script src="../../../angular.js"></script> </head> <body ng-app=""> <input ng-keydown="count = count + 1" ng-init="count=0"> key down count: {{count}} </body> </html>
export * from './src/index';
require File.expand_path('../../../shared/complex/abs', __FILE__) ruby_version_is ""..."1.9" do require 'complex' describe "Complex#abs" do it_behaves_like(:complex_abs, :abs) end end
/* * jquery.inputmask.numeric.extensions.js * http://github.com/RobinHerbots/jquery.inputmask * Copyright (c) 2010 - 2014 Robin Herbots * Licensed under the MIT license (http://www.opensource.org/licenses/mit-license.php) * Version: 3.1.26 */ (function (factory) {if (typeof define === 'function' && define.amd) {define(["jquery","./jquery.inputmask"], factory);} else {factory(jQuery);}}/* Input Mask plugin extensions http://github.com/RobinHerbots/jquery.inputmask Copyright (c) 2010 - 2014 Robin Herbots Licensed under the MIT license (http://www.opensource.org/licenses/mit-license.php) Version: 0.0.0 Optional extensions on the jquery.inputmask base */ (function ($) { //number aliases $.extend($.inputmask.defaults.aliases, { 'numeric': { mask: function (opts) { if (opts.repeat !== 0 && isNaN(opts.integerDigits)) { opts.integerDigits = opts.repeat; } opts.repeat = 0; if (opts.groupSeparator == opts.radixPoint) { //treat equal separator and radixpoint if (opts.radixPoint == ".") opts.groupSeparator = ","; else if (opts.radixPoint == ",") opts.groupSeparator = "."; else opts.groupSeparator = ""; } if (opts.groupSeparator === " ") { //prevent conflict with default skipOptionalPartCharacter opts.skipOptionalPartCharacter = undefined; } opts.autoGroup = opts.autoGroup && opts.groupSeparator != ""; if (opts.autoGroup && isFinite(opts.integerDigits)) { var seps = Math.floor(opts.integerDigits / opts.groupSize); var mod = opts.integerDigits % opts.groupSize; opts.integerDigits += mod == 0 ? seps - 1 : seps; } opts.definitions[";"] = opts.definitions["~"]; //clone integer def for decimals var mask = opts.prefix; mask += "[+]"; mask += "~{1," + opts.integerDigits + "}"; if (opts.digits != undefined && (isNaN(opts.digits) || parseInt(opts.digits) > 0)) { if (opts.digitsOptional) mask += "[" + (opts.decimalProtect ? ":" : opts.radixPoint) + ";{" + opts.digits + "}]"; else mask += (opts.decimalProtect ? ":" : opts.radixPoint) + ";{" + opts.digits + "}"; } mask += opts.suffix; return mask; }, placeholder: "", greedy: false, digits: "*", //number of fractionalDigits digitsOptional: true, groupSeparator: "",//",", // | "." radixPoint: ".", groupSize: 3, autoGroup: false, allowPlus: true, allowMinus: true, integerDigits: "+", //number of integerDigits prefix: "", suffix: "", rightAlign: true, decimalProtect: true, //do not allow assumption of decimals input without entering the radixpoint postFormat: function (buffer, pos, reformatOnly, opts) { //this needs to be removed // this is crap var needsRefresh = false, charAtPos = buffer[pos]; if (opts.groupSeparator == "" || ($.inArray(opts.radixPoint, buffer) != -1 && pos >= $.inArray(opts.radixPoint, buffer)) || new RegExp('[-\+]').test(charAtPos) ) return { pos: pos }; var cbuf = buffer.slice(); if (charAtPos == opts.groupSeparator) { cbuf.splice(pos--, 1); charAtPos = cbuf[pos]; } if (reformatOnly) cbuf[pos] = "?"; else cbuf.splice(pos, 0, "?"); //set position indicator var bufVal = cbuf.join(''); if (opts.autoGroup || (reformatOnly && bufVal.indexOf(opts.groupSeparator) != -1)) { var escapedGroupSeparator = $.inputmask.escapeRegex.call(this, opts.groupSeparator); needsRefresh = bufVal.indexOf(opts.groupSeparator) == 0; bufVal = bufVal.replace(new RegExp(escapedGroupSeparator, "g"), ''); var radixSplit = bufVal.split(opts.radixPoint); bufVal = radixSplit[0]; if (bufVal != (opts.prefix + "?0") && bufVal.length >= (opts.groupSize + opts.prefix.length)) { needsRefresh = true; var reg = new RegExp('([-\+]?[\\d\?]+)([\\d\?]{' + opts.groupSize + '})'); while (reg.test(bufVal)) { bufVal = bufVal.replace(reg, '$1' + opts.groupSeparator + '$2'); bufVal = bufVal.replace(opts.groupSeparator + opts.groupSeparator, opts.groupSeparator); } } if (radixSplit.length > 1) bufVal += opts.radixPoint + radixSplit[1]; } buffer.length = bufVal.length; //align the length for (var i = 0, l = bufVal.length; i < l; i++) { buffer[i] = bufVal.charAt(i); } var newPos = $.inArray("?", buffer); if (reformatOnly) buffer[newPos] = charAtPos; else buffer.splice(newPos, 1); return { pos: newPos, "refreshFromBuffer": needsRefresh }; }, onKeyDown: function (e, buffer, caretPos, opts) { if (e.keyCode == $.inputmask.keyCode.TAB && opts.placeholder.charAt(0) != "0") { var radixPosition = $.inArray(opts.radixPoint, buffer); if (radixPosition != -1 && isFinite(opts.digits)) { for (var i = 1; i <= opts.digits; i++) { if (buffer[radixPosition + i] == undefined || buffer[radixPosition + i] == opts.placeholder.charAt(0)) buffer[radixPosition + i] = "0"; } return { "refreshFromBuffer": { start: ++radixPosition, end: radixPosition + opts.digits } }; } } else if (opts.autoGroup && (e.keyCode == $.inputmask.keyCode.DELETE || e.keyCode == $.inputmask.keyCode.BACKSPACE)) { var rslt = opts.postFormat(buffer, caretPos - 1, true, opts); rslt.caret = rslt.pos + 1; return rslt; } }, onKeyPress: function (e, buffer, caretPos, opts) { if (opts.autoGroup /*&& String.fromCharCode(k) == opts.radixPoint*/) { var rslt = opts.postFormat(buffer, caretPos - 1, true, opts); rslt.caret = rslt.pos + 1; return rslt; } }, regex: { integerPart: function (opts) { return new RegExp('[-\+]?\\d+'); } }, negationhandler: function (chrs, buffer, pos, strict, opts) { if (!strict && opts.allowMinus && chrs === "-") { var matchRslt = buffer.join('').match(opts.regex.integerPart(opts)); if (matchRslt.length > 0) { if (buffer[matchRslt.index] == "+") { return { "pos": matchRslt.index, "c": "-", "remove": matchRslt.index, "caret": pos }; } else if (buffer[matchRslt.index] == "-") { return { "remove": matchRslt.index, "caret": pos - 1 }; } else { return { "pos": matchRslt.index, "c": "-", "caret": pos + 1 }; } } } return false; }, radixhandler: function (chrs, maskset, pos, strict, opts) { if (!strict && chrs === opts.radixPoint) { var radixPos = $.inArray(opts.radixPoint, maskset.buffer), integerValue = maskset.buffer.join('').match(opts.regex.integerPart(opts)); if (radixPos != -1) { if (maskset["validPositions"][radixPos - 1]) return { "caret": radixPos + 1 }; else return { "pos": integerValue.index, c: integerValue[0], "caret": radixPos + 1 }; } } return false; }, leadingZeroHandler: function (chrs, maskset, pos, strict, opts) { var matchRslt = maskset.buffer.join('').match(opts.regex.integerPart(opts)), radixPosition = $.inArray(opts.radixPoint, maskset.buffer); if (matchRslt && !strict && (radixPosition == -1 || matchRslt.index < radixPosition)) { if (matchRslt["0"].indexOf("0") == 0 && pos >= opts.prefix.length) { if (radixPosition == -1 || (pos <= radixPosition && maskset["validPositions"][radixPosition] == undefined)) { maskset.buffer.splice(matchRslt.index, 1); pos = pos > matchRslt.index ? pos - 1 : matchRslt.index; return { "pos": pos, "remove": matchRslt.index }; } else if (pos > matchRslt.index && pos <= radixPosition) { maskset.buffer.splice(matchRslt.index, 1); pos = pos > matchRslt.index ? pos - 1 : matchRslt.index; return { "pos": pos, "remove": matchRslt.index }; } } else if (chrs == "0" && pos <= matchRslt.index) { return false; } } return true; }, definitions: { '~': { validator: function (chrs, maskset, pos, strict, opts) { var isValid = opts.negationhandler(chrs, maskset.buffer, pos, strict, opts); if (!isValid) { isValid = opts.radixhandler(chrs, maskset, pos, strict, opts); if (!isValid) { isValid = strict ? new RegExp("[0-9" + $.inputmask.escapeRegex.call(this, opts.groupSeparator) + "]").test(chrs) : new RegExp("[0-9]").test(chrs); if (isValid === true) { isValid = opts.leadingZeroHandler(chrs, maskset, pos, strict, opts); if (isValid === true) { //handle overwrite when fixed precision var radixPosition = $.inArray(opts.radixPoint, maskset.buffer); if (opts.digitsOptional === false && pos > radixPosition && !strict) { return { "pos": pos, "remove": pos }; } else return { pos: pos }; } } } } return isValid; }, cardinality: 1, prevalidator: null }, '+': { validator: function (chrs, maskset, pos, strict, opts) { var signed = "["; if (opts.allowMinus === true) signed += "-"; if (opts.allowPlus === true) signed += "\+"; signed += "]"; return new RegExp(signed).test(chrs); }, cardinality: 1, prevalidator: null, placeholder: '' }, ':': { validator: function (chrs, maskset, pos, strict, opts) { var isValid = opts.negationhandler(chrs, maskset.buffer, pos, strict, opts); if (!isValid) { var radix = "[" + $.inputmask.escapeRegex.call(this, opts.radixPoint) + "]"; isValid = new RegExp(radix).test(chrs); if (isValid && maskset["validPositions"][pos] && maskset["validPositions"][pos]["match"].placeholder == opts.radixPoint) { isValid = { "pos": pos, "remove": pos }; } } return isValid; }, cardinality: 1, prevalidator: null, placeholder: function (opts) { return opts.radixPoint; } } }, insertMode: true, autoUnmask: false, onUnMask: function (maskedValue, unmaskedValue, opts) { var processValue = maskedValue.replace(opts.prefix, ""); processValue = processValue.replace(opts.suffix, ""); processValue = processValue.replace(new RegExp($.inputmask.escapeRegex.call(this, opts.groupSeparator), "g"), ""); //processValue = processValue.replace($.inputmask.escapeRegex.call(this, opts.radixPoint), "."); return processValue; }, isComplete: function (buffer, opts) { var maskedValue = buffer.join(''), bufClone = buffer.slice(); //verify separator positions opts.postFormat(bufClone, 0, true, opts); if (bufClone.join('') != maskedValue) return false; var processValue = maskedValue.replace(opts.prefix, ""); processValue = processValue.replace(opts.suffix, ""); processValue = processValue.replace(new RegExp($.inputmask.escapeRegex.call(this, opts.groupSeparator), "g"), ""); processValue = processValue.replace($.inputmask.escapeRegex.call(this, opts.radixPoint), "."); return isFinite(processValue); }, onBeforeMask: function (initialValue, opts) { if (isFinite(initialValue)) { return initialValue.toString().replace(".", opts.radixPoint); } else { var kommaMatches = initialValue.match(/,/g); var dotMatches = initialValue.match(/\./g); if (dotMatches && kommaMatches) { if (dotMatches.length > kommaMatches.length) { initialValue = initialValue.replace(/\./g, ""); initialValue = initialValue.replace(",", opts.radixPoint); } else if (kommaMatches.length > dotMatches.length) { initialValue = initialValue.replace(/,/g, ""); initialValue = initialValue.replace(".", opts.radixPoint); } } else { initialValue = initialValue.replace(new RegExp($.inputmask.escapeRegex.call(this, opts.groupSeparator), "g"), ""); } return initialValue; } } }, 'currency': { prefix: "$ ", groupSeparator: ",", radixPoint: ".", alias: "numeric", placeholder: "0", autoGroup: true, digits: 2, digitsOptional: false, clearMaskOnLostFocus: false, decimalProtect: true, }, 'decimal': { alias: "numeric" }, 'integer': { alias: "numeric", digits: "0" } }); return $.fn.inputmask; }));
<!DOCTYPE html><!-- This page is a placeholder for generated extensions api doc. Note: 1) The <head> information in this page is significant, should be uniform across api docs and should be edited only with knowledge of the templating mechanism. 3) All <body>.innerHTML is genereated as an rendering step. If viewed in a browser, it will be re-generated from the template, json schema and authored overview content. 4) The <body>.innerHTML is also generated by an offline step so that this page may easily be indexed by search engines. --><html xmlns="http://www.w3.org/1999/xhtml"><head> <meta http-equiv="Content-Type" content="text/html; charset=UTF-8"> <link href="css/ApiRefStyles.css" rel="stylesheet" type="text/css"> <link href="css/print.css" rel="stylesheet" type="text/css" media="print"> <script type="text/javascript" src="../../../third_party/jstemplate/jstemplate_compiled.js"> </script> <script type="text/javascript" src="js/api_page_generator.js"></script> <script type="text/javascript" src="js/bootstrap.js"></script> <script type="text/javascript" src="js/sidebar.js"></script> <title>Options - Google Chrome Extensions - Google Code</title></head> <body> <div id="gc-container" class="labs"> <div id="devModeWarning"> You are viewing extension docs in chrome via the 'file:' scheme: are you expecting to see local changes when you refresh? You'll need run chrome with --allow-file-access-from-files. </div> <!-- SUBTEMPLATES: DO NOT MOVE FROM THIS LOCATION --> <!-- In particular, sub-templates that recurse, must be used by allowing jstemplate to make a copy of the template in this section which are not operated on by way of the jsskip="true" --> <div style="display:none"> <!-- VALUE --> <div id="valueTemplate"> <dt> <var>paramName</var> <em> <!-- TYPE --> <div style="display:inline"> ( <span class="optional">optional</span> <span class="enum">enumerated</span> <span id="typeTemplate"> <span> <a> Type</a> </span> <span> <span> array of <span><span></span></span> </span> <span>paramType</span> <span></span> </span> </span> ) </div> </em> </dt> <dd class="todo"> Undocumented. </dd> <dd> Description of this parameter from the json schema. </dd> <dd> This parameter was added in version <b><span></span></b>. You must omit this parameter in earlier versions, and you may omit it in any version. If you require this parameter, the manifest key <a href="manifest.html#minimum_chrome_version">minimum_chrome_version</a> can ensure that your extension won't be run in an earlier browser version. </dd> <!-- OBJECT PROPERTIES --> <dd> <dl> <div> <div> </div> </div> </dl> </dd> <!-- OBJECT METHODS --> <dd> <div></div> </dd> <!-- OBJECT EVENT FIELDS --> <dd> <div></div> </dd> <!-- FUNCTION PARAMETERS --> <dd> <div></div> </dd> </div> <!-- /VALUE --> <div id="functionParametersTemplate"> <h5>Parameters</h5> <dl> <div> <div> </div> </div> </dl> </div> </div> <!-- /SUBTEMPLATES --> <a id="top"></a> <div id="skipto"> <a href="#gc-pagecontent">Skip to page content</a> <a href="#gc-toc">Skip to main navigation</a> </div> <!-- API HEADER --> <table id="header" width="100%" cellspacing="0" border="0"> <tbody><tr> <td valign="middle"><a href="http://code.google.com/"><img src="images/code_labs_logo.gif" height="43" width="161" alt="Google Code Labs" style="border:0; margin:0;"></a></td> <td valign="middle" width="100%" style="padding-left:0.6em;"> <form action="http://www.google.com/cse" id="cse" style="margin-top:0.5em"> <div id="gsc-search-box"> <input type="hidden" name="cx" value="002967670403910741006:61_cvzfqtno"> <input type="hidden" name="ie" value="UTF-8"> <input type="text" name="q" value="" size="55"> <input class="gsc-search-button" type="submit" name="sa" value="Search"> <br> <span class="greytext">e.g. "page action" or "tabs"</span> </div> </form> <script type="text/javascript" src="http://www.google.com/jsapi"></script> <script type="text/javascript">google.load("elements", "1", {packages: "transliteration"});</script> <script type="text/javascript" src="http://www.google.com/coop/cse/t13n?form=cse&amp;t13n_langs=en"></script> <script type="text/javascript" src="http://www.google.com/coop/cse/brand?form=cse&amp;lang=en"></script> </td> </tr> </tbody></table> <div id="codesiteContent" class=""> <a id="gc-topnav-anchor"></a> <div id="gc-topnav"> <h1>Google Chrome Extensions (<a href="http://code.google.com/labs/">Labs</a>)</h1> <ul id="home" class="gc-topnav-tabs"> <li id="home_link"> <a href="index.html" title="Google Chrome Extensions home page">Home</a> </li> <li id="docs_link"> <a href="docs.html" title="Official Google Chrome Extensions documentation">Docs</a> </li> <li id="faq_link"> <a href="faq.html" title="Answers to frequently asked questions about Google Chrome Extensions">FAQ</a> </li> <li id="samples_link"> <a href="samples.html" title="Sample extensions (with source code)">Samples</a> </li> <li id="group_link"> <a href="http://groups.google.com/a/chromium.org/group/chromium-extensions" title="Google Chrome Extensions developer forum">Group</a> </li> </ul> </div> <!-- end gc-topnav --> <div class="g-section g-tpl-170"> <!-- SIDENAV --> <div class="g-unit g-first" id="gc-toc"> <ul> <li><a href="getstarted.html">Getting Started</a></li> <li><a href="overview.html">Overview</a></li> <li><a href="whats_new.html">What's New?</a></li> <li><h2><a href="devguide.html">Developer's Guide</a></h2> <ul> <li>Browser UI <ul> <li><a href="browserAction.html">Browser Actions</a></li> <li><a href="contextMenus.html">Context Menus</a></li> <li><a href="notifications.html">Desktop Notifications</a></li> <li><a href="omnibox.html">Omnibox</a></li> <li class="leftNavSelected">Options Pages</li> <li><a href="override.html">Override Pages</a></li> <li><a href="pageAction.html">Page Actions</a></li> </ul> </li> <li>Browser Interaction <ul> <li><a href="bookmarks.html">Bookmarks</a></li> <li><a href="cookies.html">Cookies</a></li> <li><a href="events.html">Events</a></li> <li><a href="history.html">History</a></li> <li><a href="management.html">Management</a></li> <li><a href="tabs.html">Tabs</a></li> <li><a href="windows.html">Windows</a></li> </ul> </li> <li>Implementation <ul> <li><a href="a11y.html">Accessibility</a></li> <li><a href="background_pages.html">Background Pages</a></li> <li><a href="content_scripts.html">Content Scripts</a></li> <li><a href="xhr.html">Cross-Origin XHR</a></li> <li><a href="idle.html">Idle</a></li> <li><a href="i18n.html">Internationalization</a></li> <li><a href="messaging.html">Message Passing</a></li> <li><a href="npapi.html">NPAPI Plugins</a></li> </ul> </li> <li>Finishing <ul> <li><a href="hosting.html">Hosting</a></li> <li><a href="external_extensions.html">Other Deployment Options</a></li> </ul> </li> </ul> </li> <li><h2><a href="apps.html">Packaged Apps</a></h2></li> <li><h2><a href="tutorials.html">Tutorials</a></h2> <ul> <li><a href="tut_debugging.html">Debugging</a></li> <li><a href="tut_analytics.html">Google Analytics</a></li> <li><a href="tut_oauth.html">OAuth</a></li> </ul> </li> <li><h2>Reference</h2> <ul> <li>Formats <ul> <li><a href="manifest.html">Manifest Files</a></li> <li><a href="match_patterns.html">Match Patterns</a></li> </ul> </li> <li><a href="permission_warnings.html">Permission Warnings</a></li> <li><a href="api_index.html">chrome.* APIs</a></li> <li><a href="api_other.html">Other APIs</a></li> </ul> </li> <li><h2><a href="samples.html">Samples</a></h2></li> <div class="line"> </div> <li><h2>More</h2> <ul> <li><a href="http://code.google.com/chrome/webstore/docs/index.html">Chrome Web Store</a></li> <li><a href="http://code.google.com/chrome/apps/docs/developers_guide.html">Hosted Apps</a></li> <li><a href="themes.html">Themes</a></li> </ul> </li> </ul> </div> <script> initToggles(); </script> <div class="g-unit" id="gc-pagecontent"> <div id="pageTitle"> <h1 class="page_title">Options</h1> </div> <!-- TABLE OF CONTENTS --> <div id="toc"> <h2>Contents</h2> <ol> <li> <a href="#H2-0">Step 1: Declare your options page in the manifest</a> <ol> <li style="display: none; "> <a>h3Name</a> </li> </ol> </li><li> <a href="#H2-1">Step 2: Write your options page</a> <ol> <li style="display: none; "> <a>h3Name</a> </li> </ol> </li><li> <a href="#H2-2">Important notes</a> <ol> <li style="display: none; "> <a>h3Name</a> </li> </ol> </li> <li style="display: none; "> <a href="#apiReference">API reference</a> <ol> <li> <a href="#properties">Properties</a> <ol> <li> <a href="#property-anchor">propertyName</a> </li> </ol> </li> <li> <a>Methods</a> <ol> <li> <a href="#method-anchor">methodName</a> </li> </ol> </li> <li> <a>Events</a> <ol> <li> <a href="#event-anchor">eventName</a> </li> </ol> </li> <li> <a href="#types">Types</a> <ol> <li> <a href="#id-anchor">id</a> </li> </ol> </li> </ol> </li> </ol> </div> <!-- /TABLE OF CONTENTS --> <!-- Standard content lead-in for experimental API pages --> <p id="classSummary" style="display: none; "> For information on how to use experimental APIs, see the <a href="experimental.html">chrome.experimental.* APIs</a> page. </p> <!-- STATIC CONTENT PLACEHOLDER --> <div id="static"><div id="pageData-name" class="pageData">Options</div> <div id="pageData-showTOC" class="pageData">true</div> <p>To allow users to customize the behavior of your extension, you may wish to provide an options page. If you do, a link to it will be provided from the extensions management page at chrome://extensions. Clicking the Options link opens a new tab pointing at your options page. </p><a name="H2-0"></a><h2>Step 1: Declare your options page in the manifest</h2> <pre>{ "name": "My extension", ... <b>"options_page": "options.html"</b>, ... }</pre> <a name="H2-1"></a><h2>Step 2: Write your options page</h2> Here is an example options page: <pre>&lt;html&gt; &lt;head&gt;&lt;title&gt;My Test Extension Options&lt;/title&gt;&lt;/head&gt; &lt;script type="text/javascript"&gt; // Saves options to localStorage. function save_options() { var select = document.getElementById("color"); var color = select.children[select.selectedIndex].value; localStorage["favorite_color"] = color; // Update status to let user know options were saved. var status = document.getElementById("status"); status.innerHTML = "Options Saved."; setTimeout(function() { status.innerHTML = ""; }, 750); } // Restores select box state to saved value from localStorage. function restore_options() { var favorite = localStorage["favorite_color"]; if (!favorite) { return; } var select = document.getElementById("color"); for (var i = 0; i &lt; select.children.length; i++) { var child = select.children[i]; if (child.value == favorite) { child.selected = "true"; break; } } } &lt;/script&gt; &lt;body onload="restore_options()"&gt; Favorite Color: &lt;select id="color"&gt; &lt;option value="red"&gt;red&lt;/option&gt; &lt;option value="green"&gt;green&lt;/option&gt; &lt;option value="blue"&gt;blue&lt;/option&gt; &lt;option value="yellow"&gt;yellow&lt;/option&gt; &lt;/select&gt; &lt;br&gt; &lt;button onclick="save_options()"&gt;Save&lt;/button&gt; &lt;/body&gt; &lt;/html&gt; </pre> <a name="H2-2"></a><h2>Important notes</h2> <ul> <li>This feature is checked in to the trunk and should land in official builds sometime <b>after</b> version 4.0.222.x.</li> <li>We plan on providing some default css styles to encourage a consistent look across different extensions' options pages. You can star <a href="http://crbug.com/25317">crbug.com/25317</a> to be notified of updates.</li> </ul> </div> <!-- API PAGE --> <div class="apiPage" style="display: none; "> <a name="apiReference"></a> <h2>API reference: chrome.apiname </h2> <!-- PROPERTIES --> <div class="apiGroup"> <a name="properties"></a> <h3 id="properties">Properties</h3> <div> <a></a> <h4>getLastError</h4> <div class="summary"> <!-- Note: intentionally longer 80 columns --> <span>chrome.extension</span><span>lastError</span> </div> <div> </div> </div> </div> <!-- /apiGroup --> <!-- METHODS --> <div id="methodsTemplate" class="apiGroup"> <a></a> <h3>Methods</h3> <!-- iterates over all functions --> <div class="apiItem"> <a></a> <!-- method-anchor --> <h4>method name</h4> <div class="summary"><span>void</span> <!-- Note: intentionally longer 80 columns --> <span>chrome.module.methodName</span>(<span><span>, </span><span></span> <var><span></span></var></span>)</div> <div class="description"> <p class="todo">Undocumented.</p> <p> A description from the json schema def of the function goes here. </p> <!-- PARAMETERS --> <h4>Parameters</h4> <dl> <div> <div> </div> </div> </dl> <!-- RETURNS --> <h4>Returns</h4> <dl> <div> <div> </div> </div> </dl> <!-- CALLBACK --> <div> <div> <h4>Callback function</h4> <p> The callback <em>parameter</em> should specify a function that looks like this: </p> <p> If you specify the <em>callback</em> parameter, it should specify a function that looks like this: </p> <!-- Note: intentionally longer 80 columns --> <pre>function(<span>Type param1, Type param2</span>) <span class="subdued">{...}</span>;</pre> <dl> <div> <div> </div> </div> </dl> </div> </div> <!-- MIN_VERSION --> <p> This function was added in version <b><span></span></b>. If you require this function, the manifest key <a href="manifest.html#minimum_chrome_version">minimum_chrome_version</a> can ensure that your extension won't be run in an earlier browser version. </p> </div> <!-- /description --> </div> <!-- /apiItem --> </div> <!-- /apiGroup --> <!-- EVENTS --> <div id="eventsTemplate" class="apiGroup"> <a></a> <h3>Events</h3> <!-- iterates over all events --> <div class="apiItem"> <a></a> <h4>event name</h4> <div class="summary"> <!-- Note: intentionally longer 80 columns --> <span class="subdued">chrome.bookmarks</span><span>onEvent</span><span class="subdued">.addListener</span>(function(<span>Type param1, Type param2</span>) <span class="subdued">{...}</span>); </div> <div class="description"> <p class="todo">Undocumented.</p> <p> A description from the json schema def of the event goes here. </p> <!-- PARAMETERS --> <div> <h4>Parameters</h4> <dl> <div> <div> </div> </div> </dl> </div> </div> <!-- /decription --> </div> <!-- /apiItem --> </div> <!-- /apiGroup --> <!-- TYPES --> <div class="apiGroup"> <a name="types"></a> <h3 id="types">Types</h3> <!-- iterates over all types --> <div class="apiItem"> <a></a> <h4>type name</h4> <div> </div> </div> <!-- /apiItem --> </div> <!-- /apiGroup --> </div> <!-- /apiPage --> </div> <!-- /gc-pagecontent --> </div> <!-- /g-section --> </div> <!-- /codesiteContent --> <div id="gc-footer" --=""> <div class="text"> <p> Except as otherwise <a href="http://code.google.com/policies.html#restrictions">noted</a>, the content of this page is licensed under the <a rel="license" href="http://creativecommons.org/licenses/by/3.0/">Creative Commons Attribution 3.0 License</a>, and code samples are licensed under the <a rel="license" href="http://code.google.com/google_bsd_license.html">BSD License</a>. </p> <p> ©2011 Google </p> <!-- begin analytics --> <script src="http://www.google-analytics.com/urchin.js" type="text/javascript"></script> <script src="http://www.google-analytics.com/ga.js" type="text/javascript"></script> <script type="text/javascript"> // chrome doc tracking try { var engdocs = _gat._getTracker("YT-10763712-2"); engdocs._trackPageview(); } catch(err) {} // code.google.com site-wide tracking try { _uacct="UA-18071-1"; _uanchor=1; _uff=0; urchinTracker(); } catch(e) {/* urchinTracker not available. */} </script> <!-- end analytics --> </div> </div> <!-- /gc-footer --> </div> <!-- /gc-container --> </body></html>
/* * Copyright (C) 2011 The Guava Authors. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except * in compliance with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express * or implied. See the License for the specific language governing permissions and limitations under * the License. */ // TODO(user): when things stabilize, flesh this out /** * Hash functions and related structures. * * <p>See the Guava User Guide article on <a href= * "http://code.google.com/p/guava-libraries/wiki/HashingExplained"> * hashing</a>. */ @ParametersAreNonnullByDefault package com.google.common.hash; import javax.annotation.ParametersAreNonnullByDefault;
// // Copyright (c) Microsoft and contributors. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // // See the License for the specific language governing permissions and // limitations under the License. // // Warning: This code was generated by a tool. // // Changes to this file may cause incorrect behavior and will be lost if the // code is regenerated. using System; using System.Linq; using Microsoft.Azure.Management.WebSites.Models; namespace Microsoft.Azure.Management.WebSites.Models { /// <summary> /// Describes a website. /// </summary> public partial class WebSiteCloneBase : ResourceBase { private WebSiteCloneBaseProperties _properties; /// <summary> /// Optional. Represents the properties of the website. /// </summary> public WebSiteCloneBaseProperties Properties { get { return this._properties; } set { this._properties = value; } } /// <summary> /// Initializes a new instance of the WebSiteCloneBase class. /// </summary> public WebSiteCloneBase() { } /// <summary> /// Initializes a new instance of the WebSiteCloneBase class with /// required arguments. /// </summary> public WebSiteCloneBase(string location) : this() { if (location == null) { throw new ArgumentNullException("location"); } this.Location = location; } } }
// Copyright (c) 2010 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. // See net/disk_cache/disk_cache.h for the public interface. #ifndef NET_DISK_CACHE_STORAGE_BLOCK_H__ #define NET_DISK_CACHE_STORAGE_BLOCK_H__ #pragma once #include "net/disk_cache/addr.h" #include "net/disk_cache/mapped_file.h" namespace disk_cache { // This class encapsulates common behavior of a single "block" of data that is // stored on a block-file. It implements the FileBlock interface, so it can be // serialized directly to the backing file. // This object provides a memory buffer for the related data, and it can be used // to actually share that memory with another instance of the class. // // The following example shows how to share storage with another object: // StorageBlock<TypeA> a(file, address); // StorageBlock<TypeB> b(file, address); // a.Load(); // DoSomething(a.Data()); // b.SetData(a.Data()); // ModifySomething(b.Data()); // // Data modified on the previous call will be saved by b's destructor. // b.set_modified(); template<typename T> class StorageBlock : public FileBlock { public: StorageBlock(MappedFile* file, Addr address); virtual ~StorageBlock(); // FileBlock interface. virtual void* buffer() const; virtual size_t size() const; virtual int offset() const; // Allows the overide of dummy values passed on the constructor. bool LazyInit(MappedFile* file, Addr address); // Sets the internal storage to share the memory provided by other instance. void SetData(T* other); // Deletes the data, even if it was modified and not saved. This object must // own the memory buffer (it cannot be shared). void Discard(); // Stops sharing the data with another object. void StopSharingData(); // Sets the object to lazily save the in-memory data on destruction. void set_modified(); // Gets a pointer to the internal storage (allocates storage if needed). T* Data(); // Returns true if there is data associated with this object. bool HasData() const; // Returns true if this object owns the data buffer, false if it is shared. bool own_data() const; const Addr address() const; // Loads and store the data. bool Load(); bool Store(); private: void AllocateData(); void DeleteData(); T* data_; MappedFile* file_; Addr address_; bool modified_; bool own_data_; // Is data_ owned by this object or shared with someone else. bool extended_; // Used to store an entry of more than one block. DISALLOW_COPY_AND_ASSIGN(StorageBlock); }; typedef StorageBlock<EntryStore> CacheEntryBlock; typedef StorageBlock<RankingsNode> CacheRankingsBlock; } // namespace disk_cache #endif // NET_DISK_CACHE_STORAGE_BLOCK_H__
// Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package asn1 import ( "bytes" "errors" "fmt" "io" "math/big" "reflect" "time" "unicode/utf8" ) // A forkableWriter is an in-memory buffer that can be // 'forked' to create new forkableWriters that bracket the // original. After // pre, post := w.fork() // the overall sequence of bytes represented is logically w+pre+post. type forkableWriter struct { *bytes.Buffer pre, post *forkableWriter } func newForkableWriter() *forkableWriter { return &forkableWriter{new(bytes.Buffer), nil, nil} } func (f *forkableWriter) fork() (pre, post *forkableWriter) { if f.pre != nil || f.post != nil { panic("have already forked") } f.pre = newForkableWriter() f.post = newForkableWriter() return f.pre, f.post } func (f *forkableWriter) Len() (l int) { l += f.Buffer.Len() if f.pre != nil { l += f.pre.Len() } if f.post != nil { l += f.post.Len() } return } func (f *forkableWriter) writeTo(out io.Writer) (n int, err error) { n, err = out.Write(f.Bytes()) if err != nil { return } var nn int if f.pre != nil { nn, err = f.pre.writeTo(out) n += nn if err != nil { return } } if f.post != nil { nn, err = f.post.writeTo(out) n += nn } return } func marshalBase128Int(out *forkableWriter, n int64) (err error) { if n == 0 { err = out.WriteByte(0) return } l := 0 for i := n; i > 0; i >>= 7 { l++ } for i := l - 1; i >= 0; i-- { o := byte(n >> uint(i*7)) o &= 0x7f if i != 0 { o |= 0x80 } err = out.WriteByte(o) if err != nil { return } } return nil } func marshalInt64(out *forkableWriter, i int64) (err error) { n := int64Length(i) for ; n > 0; n-- { err = out.WriteByte(byte(i >> uint((n-1)*8))) if err != nil { return } } return nil } func int64Length(i int64) (numBytes int) { numBytes = 1 for i > 127 { numBytes++ i >>= 8 } for i < -128 { numBytes++ i >>= 8 } return } func marshalBigInt(out *forkableWriter, n *big.Int) (err error) { if n.Sign() < 0 { // A negative number has to be converted to two's-complement // form. So we'll subtract 1 and invert. If the // most-significant-bit isn't set then we'll need to pad the // beginning with 0xff in order to keep the number negative. nMinus1 := new(big.Int).Neg(n) nMinus1.Sub(nMinus1, bigOne) bytes := nMinus1.Bytes() for i := range bytes { bytes[i] ^= 0xff } if len(bytes) == 0 || bytes[0]&0x80 == 0 { err = out.WriteByte(0xff) if err != nil { return } } _, err = out.Write(bytes) } else if n.Sign() == 0 { // Zero is written as a single 0 zero rather than no bytes. err = out.WriteByte(0x00) } else { bytes := n.Bytes() if len(bytes) > 0 && bytes[0]&0x80 != 0 { // We'll have to pad this with 0x00 in order to stop it // looking like a negative number. err = out.WriteByte(0) if err != nil { return } } _, err = out.Write(bytes) } return } func marshalLength(out *forkableWriter, i int) (err error) { n := lengthLength(i) for ; n > 0; n-- { err = out.WriteByte(byte(i >> uint((n-1)*8))) if err != nil { return } } return nil } func lengthLength(i int) (numBytes int) { numBytes = 1 for i > 255 { numBytes++ i >>= 8 } return } func marshalTagAndLength(out *forkableWriter, t tagAndLength) (err error) { b := uint8(t.class) << 6 if t.isCompound { b |= 0x20 } if t.tag >= 31 { b |= 0x1f err = out.WriteByte(b) if err != nil { return } err = marshalBase128Int(out, int64(t.tag)) if err != nil { return } } else { b |= uint8(t.tag) err = out.WriteByte(b) if err != nil { return } } if t.length >= 128 { l := lengthLength(t.length) err = out.WriteByte(0x80 | byte(l)) if err != nil { return } err = marshalLength(out, t.length) if err != nil { return } } else { err = out.WriteByte(byte(t.length)) if err != nil { return } } return nil } func marshalBitString(out *forkableWriter, b BitString) (err error) { paddingBits := byte((8 - b.BitLength%8) % 8) err = out.WriteByte(paddingBits) if err != nil { return } _, err = out.Write(b.Bytes) return } func marshalObjectIdentifier(out *forkableWriter, oid []int) (err error) { if len(oid) < 2 || oid[0] > 2 || (oid[0] < 2 && oid[1] >= 40) { return StructuralError{"invalid object identifier"} } err = marshalBase128Int(out, int64(oid[0]*40+oid[1])) if err != nil { return } for i := 2; i < len(oid); i++ { err = marshalBase128Int(out, int64(oid[i])) if err != nil { return } } return } func marshalPrintableString(out *forkableWriter, s string) (err error) { b := []byte(s) for _, c := range b { if !isPrintable(c) { return StructuralError{"PrintableString contains invalid character"} } } _, err = out.Write(b) return } func marshalIA5String(out *forkableWriter, s string) (err error) { b := []byte(s) for _, c := range b { if c > 127 { return StructuralError{"IA5String contains invalid character"} } } _, err = out.Write(b) return } func marshalUTF8String(out *forkableWriter, s string) (err error) { _, err = out.Write([]byte(s)) return } func marshalTwoDigits(out *forkableWriter, v int) (err error) { err = out.WriteByte(byte('0' + (v/10)%10)) if err != nil { return } return out.WriteByte(byte('0' + v%10)) } func marshalFourDigits(out *forkableWriter, v int) (err error) { var bytes [4]byte for i := range bytes { bytes[3-i] = '0' + byte(v%10) v /= 10 } _, err = out.Write(bytes[:]) return } func outsideUTCRange(t time.Time) bool { year := t.Year() return year < 1950 || year >= 2050 } func marshalUTCTime(out *forkableWriter, t time.Time) (err error) { year := t.Year() switch { case 1950 <= year && year < 2000: err = marshalTwoDigits(out, int(year-1900)) case 2000 <= year && year < 2050: err = marshalTwoDigits(out, int(year-2000)) default: return StructuralError{"cannot represent time as UTCTime"} } if err != nil { return } return marshalTimeCommon(out, t) } func marshalGeneralizedTime(out *forkableWriter, t time.Time) (err error) { year := t.Year() if year < 0 || year > 9999 { return StructuralError{"cannot represent time as GeneralizedTime"} } if err = marshalFourDigits(out, year); err != nil { return } return marshalTimeCommon(out, t) } func marshalTimeCommon(out *forkableWriter, t time.Time) (err error) { _, month, day := t.Date() err = marshalTwoDigits(out, int(month)) if err != nil { return } err = marshalTwoDigits(out, day) if err != nil { return } hour, min, sec := t.Clock() err = marshalTwoDigits(out, hour) if err != nil { return } err = marshalTwoDigits(out, min) if err != nil { return } err = marshalTwoDigits(out, sec) if err != nil { return } _, offset := t.Zone() switch { case offset/60 == 0: err = out.WriteByte('Z') return case offset > 0: err = out.WriteByte('+') case offset < 0: err = out.WriteByte('-') } if err != nil { return } offsetMinutes := offset / 60 if offsetMinutes < 0 { offsetMinutes = -offsetMinutes } err = marshalTwoDigits(out, offsetMinutes/60) if err != nil { return } err = marshalTwoDigits(out, offsetMinutes%60) return } func stripTagAndLength(in []byte) []byte { _, offset, err := parseTagAndLength(in, 0) if err != nil { return in } return in[offset:] } func marshalBody(out *forkableWriter, value reflect.Value, params fieldParameters) (err error) { switch value.Type() { case flagType: return nil case timeType: t := value.Interface().(time.Time) if params.timeType == tagGeneralizedTime || outsideUTCRange(t) { return marshalGeneralizedTime(out, t) } else { return marshalUTCTime(out, t) } case bitStringType: return marshalBitString(out, value.Interface().(BitString)) case objectIdentifierType: return marshalObjectIdentifier(out, value.Interface().(ObjectIdentifier)) case bigIntType: return marshalBigInt(out, value.Interface().(*big.Int)) } switch v := value; v.Kind() { case reflect.Bool: if v.Bool() { return out.WriteByte(255) } else { return out.WriteByte(0) } case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: return marshalInt64(out, int64(v.Int())) case reflect.Struct: t := v.Type() startingField := 0 // If the first element of the structure is a non-empty // RawContents, then we don't bother serializing the rest. if t.NumField() > 0 && t.Field(0).Type == rawContentsType { s := v.Field(0) if s.Len() > 0 { bytes := make([]byte, s.Len()) for i := 0; i < s.Len(); i++ { bytes[i] = uint8(s.Index(i).Uint()) } /* The RawContents will contain the tag and * length fields but we'll also be writing * those ourselves, so we strip them out of * bytes */ _, err = out.Write(stripTagAndLength(bytes)) return } else { startingField = 1 } } for i := startingField; i < t.NumField(); i++ { var pre *forkableWriter pre, out = out.fork() err = marshalField(pre, v.Field(i), parseFieldParameters(t.Field(i).Tag.Get("asn1"))) if err != nil { return } } return case reflect.Slice: sliceType := v.Type() if sliceType.Elem().Kind() == reflect.Uint8 { bytes := make([]byte, v.Len()) for i := 0; i < v.Len(); i++ { bytes[i] = uint8(v.Index(i).Uint()) } _, err = out.Write(bytes) return } var fp fieldParameters for i := 0; i < v.Len(); i++ { var pre *forkableWriter pre, out = out.fork() err = marshalField(pre, v.Index(i), fp) if err != nil { return } } return case reflect.String: switch params.stringType { case tagIA5String: return marshalIA5String(out, v.String()) case tagPrintableString: return marshalPrintableString(out, v.String()) default: return marshalUTF8String(out, v.String()) } } return StructuralError{"unknown Go type"} } func marshalField(out *forkableWriter, v reflect.Value, params fieldParameters) (err error) { if !v.IsValid() { return fmt.Errorf("asn1: cannot marshal nil value") } // If the field is an interface{} then recurse into it. if v.Kind() == reflect.Interface && v.Type().NumMethod() == 0 { return marshalField(out, v.Elem(), params) } if v.Kind() == reflect.Slice && v.Len() == 0 && params.omitEmpty { return } if params.optional && params.defaultValue != nil && canHaveDefaultValue(v.Kind()) { defaultValue := reflect.New(v.Type()).Elem() defaultValue.SetInt(*params.defaultValue) if reflect.DeepEqual(v.Interface(), defaultValue.Interface()) { return } } // If no default value is given then the zero value for the type is // assumed to be the default value. This isn't obviously the correct // behaviour, but it's what Go has traditionally done. if params.optional && params.defaultValue == nil { if reflect.DeepEqual(v.Interface(), reflect.Zero(v.Type()).Interface()) { return } } if v.Type() == rawValueType { rv := v.Interface().(RawValue) if len(rv.FullBytes) != 0 { _, err = out.Write(rv.FullBytes) } else { err = marshalTagAndLength(out, tagAndLength{rv.Class, rv.Tag, len(rv.Bytes), rv.IsCompound}) if err != nil { return } _, err = out.Write(rv.Bytes) } return } tag, isCompound, ok := getUniversalType(v.Type()) if !ok { err = StructuralError{fmt.Sprintf("unknown Go type: %v", v.Type())} return } class := classUniversal if params.timeType != 0 && tag != tagUTCTime { return StructuralError{"explicit time type given to non-time member"} } if params.stringType != 0 && tag != tagPrintableString { return StructuralError{"explicit string type given to non-string member"} } switch tag { case tagPrintableString: if params.stringType == 0 { // This is a string without an explicit string type. We'll use // a PrintableString if the character set in the string is // sufficiently limited, otherwise we'll use a UTF8String. for _, r := range v.String() { if r >= utf8.RuneSelf || !isPrintable(byte(r)) { if !utf8.ValidString(v.String()) { return errors.New("asn1: string not valid UTF-8") } tag = tagUTF8String break } } } else { tag = params.stringType } case tagUTCTime: if params.timeType == tagGeneralizedTime || outsideUTCRange(v.Interface().(time.Time)) { tag = tagGeneralizedTime } } if params.set { if tag != tagSequence { return StructuralError{"non sequence tagged as set"} } tag = tagSet } tags, body := out.fork() err = marshalBody(body, v, params) if err != nil { return } bodyLen := body.Len() var explicitTag *forkableWriter if params.explicit { explicitTag, tags = tags.fork() } if !params.explicit && params.tag != nil { // implicit tag. tag = *params.tag class = classContextSpecific } err = marshalTagAndLength(tags, tagAndLength{class, tag, bodyLen, isCompound}) if err != nil { return } if params.explicit { err = marshalTagAndLength(explicitTag, tagAndLength{ class: classContextSpecific, tag: *params.tag, length: bodyLen + tags.Len(), isCompound: true, }) } return err } // Marshal returns the ASN.1 encoding of val. // // In addition to the struct tags recognised by Unmarshal, the following can be // used: // // ia5: causes strings to be marshaled as ASN.1, IA5 strings // omitempty: causes empty slices to be skipped // printable: causes strings to be marshaled as ASN.1, PrintableString strings. // utf8: causes strings to be marshaled as ASN.1, UTF8 strings func Marshal(val interface{}) ([]byte, error) { var out bytes.Buffer v := reflect.ValueOf(val) f := newForkableWriter() err := marshalField(f, v, fieldParameters{}) if err != nil { return nil, err } _, err = f.writeTo(&out) return out.Bytes(), err }
/*! UIkit 2.19.0 | http://www.getuikit.com | (c) 2014 YOOtheme | MIT License */ .uk-autocomplete{display:inline-block;position:relative;max-width:100%;vertical-align:middle}.uk-nav-autocomplete>li>a{color:#444}.uk-nav-autocomplete>li.uk-active>a{background:#00a8e6;color:#fff;outline:0}.uk-nav-autocomplete .uk-nav-header{color:#999}.uk-nav-autocomplete .uk-nav-divider{border-top:1px solid #ddd}
/* * tps65910.c -- TI TPS6591x * * Copyright 2010 Texas Instruments Inc. * * Author: Jorge Eduardo Candelaria <jedu@slimlogic.co.uk> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/err.h> #include <linux/platform_device.h> #include <linux/debugfs.h> #include <linux/gpio.h> #include <linux/mfd/tps65910.h> #define COMP 0 #define COMP1 1 #define COMP2 2 /* Comparator 1 voltage selection table in milivolts */ static const u16 COMP_VSEL_TABLE[] = { 0, 2500, 2500, 2500, 2500, 2550, 2600, 2650, 2700, 2750, 2800, 2850, 2900, 2950, 3000, 3050, 3100, 3150, 3200, 3250, 3300, 3350, 3400, 3450, 3500, }; struct comparator { const char *name; int reg; int uV_max; const u16 *vsel_table; }; static struct comparator tps_comparators[] = { { .name = "COMP1", .reg = TPS65911_VMBCH, .uV_max = 3500, .vsel_table = COMP_VSEL_TABLE, }, { .name = "COMP2", .reg = TPS65911_VMBCH2, .uV_max = 3500, .vsel_table = COMP_VSEL_TABLE, }, }; static int comp_threshold_set(struct tps65910 *tps65910, int id, int voltage) { struct comparator tps_comp = tps_comparators[id]; int curr_voltage = 0; int ret; u8 index = 0, val; if (id == COMP) return 0; while (curr_voltage < tps_comp.uV_max) { curr_voltage = tps_comp.vsel_table[index]; if (curr_voltage >= voltage) break; else if (curr_voltage < voltage) index ++; } if (curr_voltage > tps_comp.uV_max) return -EINVAL; val = index << 1; ret = tps65910->write(tps65910, tps_comp.reg, 1, &val); return ret; } static int comp_threshold_get(struct tps65910 *tps65910, int id) { struct comparator tps_comp = tps_comparators[id]; int ret; u8 val; if (id == COMP) return 0; ret = tps65910->read(tps65910, tps_comp.reg, 1, &val); if (ret < 0) return ret; val >>= 1; return tps_comp.vsel_table[val]; } static ssize_t comp_threshold_show(struct device *dev, struct device_attribute *attr, char *buf) { struct tps65910 *tps65910 = dev_get_drvdata(dev->parent); struct attribute comp_attr = attr->attr; int id, uVolt; if (!strcmp(comp_attr.name, "comp1_threshold")) id = COMP1; else if (!strcmp(comp_attr.name, "comp2_threshold")) id = COMP2; else return -EINVAL; uVolt = comp_threshold_get(tps65910, id); return sprintf(buf, "%d\n", uVolt); } static DEVICE_ATTR(comp1_threshold, S_IRUGO, comp_threshold_show, NULL); static DEVICE_ATTR(comp2_threshold, S_IRUGO, comp_threshold_show, NULL); static __devinit int tps65911_comparator_probe(struct platform_device *pdev) { struct tps65910 *tps65910 = dev_get_drvdata(pdev->dev.parent); struct tps65910_board *pdata = dev_get_platdata(tps65910->dev); int ret; ret = comp_threshold_set(tps65910, COMP1, pdata->vmbch_threshold); if (ret < 0) { dev_err(&pdev->dev, "cannot set COMP1 threshold\n"); return ret; } ret = comp_threshold_set(tps65910, COMP2, pdata->vmbch2_threshold); if (ret < 0) { dev_err(&pdev->dev, "cannot set COMP2 theshold\n"); return ret; } /* Create sysfs entry */ ret = device_create_file(&pdev->dev, &dev_attr_comp1_threshold); if (ret < 0) dev_err(&pdev->dev, "failed to add COMP1 sysfs file\n"); ret = device_create_file(&pdev->dev, &dev_attr_comp2_threshold); if (ret < 0) dev_err(&pdev->dev, "failed to add COMP2 sysfs file\n"); return ret; } static __devexit int tps65911_comparator_remove(struct platform_device *pdev) { struct tps65910 *tps65910; tps65910 = dev_get_drvdata(pdev->dev.parent); device_remove_file(&pdev->dev, &dev_attr_comp2_threshold); device_remove_file(&pdev->dev, &dev_attr_comp1_threshold); return 0; } static struct platform_driver tps65911_comparator_driver = { .driver = { .name = "tps65911-comparator", .owner = THIS_MODULE, }, .probe = tps65911_comparator_probe, .remove = __devexit_p(tps65911_comparator_remove), }; static int __init tps65911_comparator_init(void) { return platform_driver_register(&tps65911_comparator_driver); } subsys_initcall(tps65911_comparator_init); static void __exit tps65911_comparator_exit(void) { platform_driver_unregister(&tps65911_comparator_driver); } module_exit(tps65911_comparator_exit); MODULE_AUTHOR("Jorge Eduardo Candelaria <jedu@slimlogic.co.uk>"); MODULE_DESCRIPTION("TPS65911 comparator driver"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("platform:tps65911-comparator");